diff --git a/.ccw/personal/coding-style.md b/.ccw/personal/coding-style.md new file mode 100644 index 00000000..4d11e746 --- /dev/null +++ b/.ccw/personal/coding-style.md @@ -0,0 +1,27 @@ +--- +title: "Personal Coding Style" +dimension: personal +category: general +keywords: + - style + - preference +readMode: optional +priority: medium +--- + +# Personal Coding Style + +## Preferences + +- Describe your preferred coding style here +- Example: verbose variable names vs terse, functional vs imperative + +## Patterns I Prefer + +- List patterns you reach for most often +- Example: builder pattern, factory functions, tagged unions + +## Things I Avoid + +- List anti-patterns or approaches you dislike +- Example: deep inheritance hierarchies, magic strings diff --git a/.ccw/personal/tool-preferences.md b/.ccw/personal/tool-preferences.md new file mode 100644 index 00000000..3eadc0d5 --- /dev/null +++ b/.ccw/personal/tool-preferences.md @@ -0,0 +1,25 @@ +--- +title: "Tool Preferences" +dimension: personal +category: general +keywords: + - tool + - cli + - editor +readMode: optional +priority: low +--- + +# Tool Preferences + +## Editor + +- Preferred editor and key extensions/plugins + +## CLI Tools + +- Preferred shell, package manager, build tools + +## Debugging + +- Preferred debugging approach and tools diff --git a/.ccw/specs/architecture-constraints.md b/.ccw/specs/architecture-constraints.md index e90a073b..7bf09a4a 100644 --- a/.ccw/specs/architecture-constraints.md +++ b/.ccw/specs/architecture-constraints.md @@ -1,3 +1,13 @@ +--- +title: Architecture Constraints +readMode: optional +priority: medium +category: general +scope: project +dimension: specs +keywords: [architecture, constraint, schema, compatibility, portability, design, arch] +--- + # Architecture Constraints ## Schema Evolution diff --git a/.ccw/specs/coding-conventions.md b/.ccw/specs/coding-conventions.md index a3503317..a98054bf 100644 --- a/.ccw/specs/coding-conventions.md +++ b/.ccw/specs/coding-conventions.md @@ -1,3 +1,13 @@ +--- +title: Coding Conventions +readMode: optional +priority: medium +category: general +scope: project +dimension: specs +keywords: [coding, convention, style, naming, pattern, navigation, schema, error-handling, implementation, validation, clarity, doc] +--- + # Coding Conventions ## Navigation & Path Handling @@ -9,6 +19,7 @@ ## Document Generation - [architecture] For document generation systems, adopt Layer 3→2→1 pattern (components → features → indexes) for efficient incremental updates. (learned: 2026-03-07) +- [tools] When commands need to generate files with deterministic paths and frontmatter, use dedicated ccw tool endpoints (`ccw tool exec`) instead of raw `ccw cli -p` calls. Endpoints control output path, file naming, and structural metadata; CLI tools only generate prose content. (learned: 2026-03-09) ## Implementation Quality diff --git a/.claude/commands/ddd/doc-generate.md b/.claude/commands/ddd/doc-generate.md index 01af3fd0..9b2819a2 100644 --- a/.claude/commands/ddd/doc-generate.md +++ b/.claude/commands/ddd/doc-generate.md @@ -48,8 +48,9 @@ doc-index.json → tech-registry/*.md (L3) → feature-maps/*.md (L2) → _index ├── tech-registry/ ← Component documentation (Layer 3) │ ├── _index.md │ └── {component-slug}.md -└── sessions/ - └── _index.md ← Planning sessions index (Layer 1) +└── planning/ ← Planning sessions (Layer 1) + ├── _index.md ← Planning sessions index + └── {task-slug}-{date}/ ← Individual session folders ``` ## Phase 1: Load & Validate @@ -87,147 +88,82 @@ IF docs already exist AND NOT --force: Ask user (unless -y → overwrite) ``` -## Phase 2: Layer 3 — Component Documentation +## Phase 2: Layer 3 -- Component Documentation -For each component in `technicalComponents[]`: +For each component in `technicalComponents[]`, call the generate_ddd_docs endpoint: ```bash -ccw cli -p "PURPOSE: Generate component documentation for {component.name} -TASK: -• Document component purpose and responsibility -• List exported symbols (classes, functions, types) -• Document dependencies (internal and external) -• Include code examples for key APIs -• Document integration points with other components -MODE: write -CONTEXT: @{component.codeLocations[].path} -EXPECTED: Markdown file with: Overview, API Reference, Dependencies, Usage Examples -CONSTRAINTS: Focus on public API | Include type signatures -" --tool gemini --mode write --cd .workflow/.doc-index/tech-registry/ +for COMPONENT_ID in "${technicalComponents[@]}"; do + ccw tool exec generate_ddd_docs '{"strategy":"component","entityId":"'"$COMPONENT_ID"'","tool":"gemini"}' +done ``` +The endpoint handles: +- Loading the component entity from doc-index.json +- Building YAML frontmatter (layer: 3, component_id, name, type, features, code_locations, generated_at) +- Constructing the CLI prompt with code context paths +- **Including Change History section**: Pull related entries from `doc-index.json.actions[]` where `affectedComponents` includes this component ID. Display as timeline (date, action type, description) +- Writing output to `.workflow/.doc-index/tech-registry/{slug}.md` +- Tool fallback (gemini -> qwen -> codex) on failure + Output: `.workflow/.doc-index/tech-registry/{component-slug}.md` -Frontmatter: -```markdown ---- -layer: 3 -component_id: tech-{slug} -name: ComponentName -type: service|controller|model|... -features: [feat-auth] -code_locations: - - path: src/services/auth.ts - symbols: [AuthService, AuthService.login] -generated_at: ISO8601 ---- -``` +## Phase 3: Layer 2 -- Feature Documentation -Sections: Responsibility, Code Locations, Related Requirements, Architecture Decisions, Dependencies (in/out) - -## Phase 3: Layer 2 — Feature Documentation - -For each feature in `features[]`: +For each feature in `features[]`, call the generate_ddd_docs endpoint: ```bash -ccw cli -p "PURPOSE: Generate feature documentation for {feature.name} -TASK: -• Describe feature purpose and business value -• List requirements (from requirementIds) -• Document components involved (from techComponentIds) -• Include architecture decisions (from adrIds) -• Provide integration guide -MODE: write -CONTEXT: @.workflow/.doc-index/tech-registry/{related-components}.md -EXPECTED: Markdown file with: Overview, Requirements, Components, Architecture, Integration -CONSTRAINTS: Reference Layer 3 component docs | Business-focused language -" --tool gemini --mode write --cd .workflow/.doc-index/feature-maps/ +for FEATURE_ID in "${features[@]}"; do + ccw tool exec generate_ddd_docs '{"strategy":"feature","entityId":"'"$FEATURE_ID"'","tool":"gemini"}' +done ``` +The endpoint handles: +- Loading the feature entity from doc-index.json +- Building YAML frontmatter (layer: 2, feature_id, name, epic_id, status, requirements, components, tags, generated_at) +- Constructing the CLI prompt referencing Layer 3 component docs +- **Including Change History section**: Pull related entries from `doc-index.json.actions[]` where `affectedFeatures` includes this feature ID. Display as timeline (date, action type, description) +- Writing output to `.workflow/.doc-index/feature-maps/{slug}.md` +- Tool fallback (gemini -> qwen -> codex) on failure + Output: `.workflow/.doc-index/feature-maps/{feature-slug}.md` -Frontmatter: -```markdown ---- -layer: 2 -feature_id: feat-{slug} -name: Feature Name -epic_id: EPIC-NNN|null -status: implemented|in-progress|planned|partial -requirements: [REQ-001, REQ-002] -components: [tech-auth-service, tech-user-model] -depends_on_layer3: [tech-auth-service, tech-user-model] -tags: [auth, security] -generated_at: ISO8601 ---- -``` - -Sections: Overview, Requirements (with mapping status), Technical Components, Architecture Decisions, Change History - -## Phase 4: Layer 1 — Index & Overview Documentation +## Phase 4: Layer 1 -- Index & Overview Documentation ### 4.1 Index Documents -Generate catalog files: +Generate catalog files for each subdirectory: -- **feature-maps/_index.md** — Feature overview table with status -- **tech-registry/_index.md** — Component registry table with types -- **action-logs/_index.md** — Action history table (empty initially for new projects) +```bash +# Feature maps index +ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"feature-maps","tool":"gemini"}' + +# Tech registry index +ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"tech-registry","tool":"gemini"}' + +# Action logs index +ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"action-logs","tool":"gemini"}' + +# Planning sessions index +ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"planning","tool":"gemini"}' +``` + +Or generate all indexes at once (omit entityId): + +```bash +ccw tool exec generate_ddd_docs '{"strategy":"index","tool":"gemini"}' +``` ### 4.2 README.md (unless --skip-overview) ```bash -ccw cli -p "PURPOSE: Generate project README with overview and navigation -TASK: -• Project summary and purpose -• Quick start guide -• Navigation to features, components, and architecture -• Link to doc-index.json -MODE: write -CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/feature-maps/_index.md -EXPECTED: README.md with: Overview, Quick Start, Navigation, Links -CONSTRAINTS: High-level only | Entry point for new developers -" --tool gemini --mode write --cd .workflow/.doc-index/ +ccw tool exec generate_ddd_docs '{"strategy":"overview","tool":"gemini"}' ``` ### 4.3 ARCHITECTURE.md (unless --skip-overview) ```bash -ccw cli -p "PURPOSE: Generate architecture overview document -TASK: -• System design overview -• Component relationships and dependencies -• Key architecture decisions (from ADRs) -• Technology stack -MODE: write -CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/tech-registry/*.md -EXPECTED: ARCHITECTURE.md with: System Design, Component Diagram, ADRs, Tech Stack -CONSTRAINTS: Architecture-focused | Reference component docs for details -" --tool gemini --mode write --cd .workflow/.doc-index/ -``` - -### 4.4 sessions/_index.md (unless --skip-overview) - -```bash -ccw cli -p "PURPOSE: Generate planning sessions index -TASK: -• List all planning session folders chronologically -• Link to each session's plan.json -• Show session status and task count -MODE: write -CONTEXT: @.workflow/.doc-index/planning/*/plan.json -EXPECTED: sessions/_index.md with: Session List, Links, Status -CONSTRAINTS: Chronological order | Link to session folders -" --tool gemini --mode write --cd .workflow/.doc-index/sessions/ -``` - -Layer 1 frontmatter: -```markdown ---- -layer: 1 -depends_on_layer2: [feat-auth, feat-orders] -generated_at: ISO8601 ---- +ccw tool exec generate_ddd_docs '{"strategy":"overview","entityId":"architecture","tool":"gemini"}' ``` ## Phase 5: SCHEMA.md (unless --skip-schema) @@ -235,17 +171,7 @@ generated_at: ISO8601 ### 5.1 Generate Schema Documentation ```bash -ccw cli -p "PURPOSE: Document doc-index.json schema structure and versioning -TASK: -• Document current schema structure (all fields) -• Define versioning policy (semver: major.minor) -• Document migration protocol for version upgrades -• Provide examples for each schema section -MODE: write -CONTEXT: @.workflow/.doc-index/doc-index.json -EXPECTED: SCHEMA.md with: Schema Structure, Versioning Policy, Migration Protocol, Examples -CONSTRAINTS: Complete field documentation | Clear migration steps -" --tool gemini --mode write --cd .workflow/.doc-index/ +ccw tool exec generate_ddd_docs '{"strategy":"schema","tool":"gemini"}' ``` ### 5.2 Versioning Policy @@ -284,7 +210,7 @@ Total: {N} documents generated | `-y, --yes` | Auto-confirm all decisions | | `--layer <3\|2\|1\|all>` | Generate specific layer only (default: all) | | `--force` | Overwrite existing documents | -| `--skip-overview` | Skip README.md, ARCHITECTURE.md, sessions/_index.md | +| `--skip-overview` | Skip README.md, ARCHITECTURE.md, planning/_index.md | | `--skip-schema` | Skip SCHEMA.md generation | ## Integration Points @@ -293,3 +219,4 @@ Total: {N} documents generated - **Called by**: `/ddd:scan` (after index assembly), `/ddd:index-build` (after index assembly) - **Standalone**: Can be run independently on any project with existing doc-index.json - **Output**: Complete document tree in `.workflow/.doc-index/` +- **Endpoint**: `ccw tool exec generate_ddd_docs` handles prompt construction, frontmatter, tool fallback, and file creation diff --git a/.claude/commands/ddd/doc-refresh.md b/.claude/commands/ddd/doc-refresh.md index b4e4ad8c..14963459 100644 --- a/.claude/commands/ddd/doc-refresh.md +++ b/.claude/commands/ddd/doc-refresh.md @@ -163,7 +163,7 @@ ccw cli -p "PURPOSE: Update project overview docs after feature changes TASK: • Update README.md feature list • Update ARCHITECTURE.md if new components added -• Update sessions/_index.md with new planning sessions +• Update planning/_index.md with new planning sessions MODE: write CONTEXT: @.workflow/.doc-index/feature-maps/*.md @.workflow/.doc-index/doc-index.json EXPECTED: Updated overview docs with current project state diff --git a/.claude/commands/ddd/sync.md b/.claude/commands/ddd/sync.md index 9b49b28a..f8ec6b00 100644 --- a/.claude/commands/ddd/sync.md +++ b/.claude/commands/ddd/sync.md @@ -37,11 +37,42 @@ After completing a development task, synchronize the document index with actual - `doc-index.json` must exist - Git repository with committed or staged changes +## Phase 0: Consistency Validation + +Before processing changes, verify that `doc-index.json` entries are consistent with actual code state. + +### 0.1 Validate Code Locations + +For each `technicalComponents[].codeLocations[]`: +- Verify file exists on disk +- If file was deleted/moved → flag for removal or update +- If file exists → verify listed `symbols[]` still exist (quick grep/AST check) + +### 0.2 Validate Symbols + +For components with `codeLocations[].symbols[]`: +- Check each symbol still exists in the referenced file +- Detect new exported symbols not yet tracked +- Report: `{N} stale symbols, {N} untracked symbols` + +### 0.3 Validation Report + +``` +Consistency Check: + Components validated: {N} + Files verified: {N} + Stale references: {N} (files missing or symbols removed) + Untracked symbols: {N} (new exports not in index) +``` + +If stale references found: warn and auto-fix during Phase 3 updates. +If `--dry-run`: report only, no fixes. + ## Phase 1: Change Detection -### 0.1 Schema Version Check (TASK-006) +### 1.0.1 Schema Version Check -Before processing changes, verify doc-index schema compatibility: +Before processing changes, verify doc-index.json schema compatibility: ```javascript const docIndex = JSON.parse(Read('.workflow/.doc-index/doc-index.json')); @@ -201,6 +232,7 @@ For each affected component in `doc-index.json`: - Update `codeLocations` if file paths or line ranges changed - Update `symbols` if new exports were added - Add new `actionIds` entry +- **Auto-update `responsibility`**: If symbols changed (new methods/exports added or removed), re-infer responsibility from current symbols list using Gemini analysis. This prevents stale descriptions (e.g., responsibility still says "登录、注册" after adding logout support) ### 3.2 Register New Components diff --git a/.claude/commands/workflow/session/sync.md b/.claude/commands/workflow/session/sync.md index 5163d3ef..68f926ec 100644 --- a/.claude/commands/workflow/session/sync.md +++ b/.claude/commands/workflow/session/sync.md @@ -65,11 +65,14 @@ Analyze context and produce two update payloads. Use LLM reasoning (current agen ```javascript // ── Guidelines extraction ── // Scan git diff + session for: -// - New patterns adopted → convention -// - Restrictions discovered → constraint -// - Surprises / gotchas → learning +// - Debugging experiences → bug +// - Reusable code patterns → pattern +// - Architecture/design decisions → decision +// - Conventions, constraints, insights → rule // -// Output: array of { type, category, text } +// Output: array of { type, tag, text } +// type: 'bug' | 'pattern' | 'decision' | 'rule' +// tag: domain tag (api, routing, schema, security, etc.) // RULE: Only extract genuinely reusable insights. Skip trivial/obvious items. // RULE: Deduplicate against existing guidelines before adding. @@ -118,7 +121,7 @@ console.log(` ── Sync Preview ── Guidelines (${guidelineUpdates.length} items): -${guidelineUpdates.map(g => ` [${g.type}/${g.category}] ${g.text}`).join('\n') || ' (none)'} +${guidelineUpdates.map(g => ` [${g.type}:${g.tag}] ${g.text}`).join('\n') || ' (none)'} Tech [${detectCategory(summary)}]: ${techEntry.title} @@ -137,26 +140,102 @@ if (!autoYes) { ## Step 4: Write ```javascript -// ── Update specs/*.md ── -// Uses .ccw/specs/ directory (same as frontend/backend spec-index-builder) -if (guidelineUpdates.length > 0) { - // Map guideline types to spec files - const specFileMap = { - convention: '.ccw/specs/coding-conventions.md', - constraint: '.ccw/specs/architecture-constraints.md', - learning: '.ccw/specs/coding-conventions.md' // learnings appended to conventions +const matter = require('gray-matter') // YAML frontmatter parser + +// ── Frontmatter check & repair helper ── +// Ensures target spec file has valid YAML frontmatter with keywords +// Uses gray-matter for robust parsing (handles malformed frontmatter, missing fields) +function ensureFrontmatter(filePath, tag, type) { + const titleMap = { + 'coding-conventions': 'Coding Conventions', + 'architecture-constraints': 'Architecture Constraints', + 'learnings': 'Learnings', + 'quality-rules': 'Quality Rules' + } + const basename = filePath.split('/').pop().replace('.md', '') + const title = titleMap[basename] || basename + const defaultFm = { + title, + readMode: 'optional', + priority: 'medium', + scope: 'project', + dimension: 'specs', + keywords: [tag, type] } + if (!file_exists(filePath)) { + // Case A: Create new file with frontmatter + Write(filePath, matter.stringify(`\n# ${title}\n\n`, defaultFm)) + return + } + + const raw = Read(filePath) + let parsed + try { + parsed = matter(raw) + } catch { + parsed = { data: {}, content: raw } + } + + const hasFrontmatter = raw.trimStart().startsWith('---') + + if (!hasFrontmatter) { + // Case B: File exists but no frontmatter → prepend + Write(filePath, matter.stringify(raw, defaultFm)) + return + } + + // Case C: Frontmatter exists → ensure keywords include current tag + const existingKeywords = parsed.data.keywords || [] + const newKeywords = [...new Set([...existingKeywords, tag, type])] + + if (newKeywords.length !== existingKeywords.length) { + parsed.data.keywords = newKeywords + Write(filePath, matter.stringify(parsed.content, parsed.data)) + } +} + +// ── Update specs/*.md ── +// Uses .ccw/specs/ directory - unified [type:tag] entry format +if (guidelineUpdates.length > 0) { + // Map knowledge types to spec files + const specFileMap = { + bug: '.ccw/specs/learnings.md', + pattern: '.ccw/specs/coding-conventions.md', + decision: '.ccw/specs/architecture-constraints.md', + rule: null // determined by content below + } + + const date = new Date().toISOString().split('T')[0] + const needsDate = { bug: true, pattern: true, decision: true, rule: false } + for (const g of guidelineUpdates) { - const targetFile = specFileMap[g.type] + // For rule type, route by content and tag + let targetFile = specFileMap[g.type] + if (!targetFile) { + const isQuality = /\b(test|coverage|lint|eslint|质量|测试覆盖|pre-commit|tsc|type.check)\b/i.test(g.text) + || ['testing', 'quality', 'lint'].includes(g.tag) + const isConstraint = /\b(禁止|no|never|must not|forbidden|不得|不允许)\b/i.test(g.text) + if (isQuality) { + targetFile = '.ccw/specs/quality-rules.md' + } else if (isConstraint) { + targetFile = '.ccw/specs/architecture-constraints.md' + } else { + targetFile = '.ccw/specs/coding-conventions.md' + } + } + + // Ensure frontmatter exists and keywords are up-to-date + ensureFrontmatter(targetFile, g.tag, g.type) + const existing = Read(targetFile) - const ruleText = g.type === 'learning' - ? `- [${g.category}] ${g.text} (learned: ${new Date().toISOString().split('T')[0]})` - : `- [${g.category}] ${g.text}` + const entryLine = needsDate[g.type] + ? `- [${g.type}:${g.tag}] ${g.text} (${date})` + : `- [${g.type}:${g.tag}] ${g.text}` // Deduplicate: skip if text already in file if (!existing.includes(g.text)) { - const newContent = existing.trimEnd() + '\n' + ruleText + '\n' + const newContent = existing.trimEnd() + '\n' + entryLine + '\n' Write(targetFile, newContent) } } @@ -198,4 +277,5 @@ Write(techPath, JSON.stringify(tech, null, 2)) ## Related Commands - `/workflow:spec:setup` - Initialize project with specs scaffold -- `/workflow:spec:add` - Interactive wizard to create individual specs with scope selection +- `/workflow:spec:add` - Add knowledge entries (bug/pattern/decision/rule) with unified [type:tag] format +- `/workflow:spec:load` - Interactive spec loader with keyword/type/tag filtering diff --git a/.claude/commands/workflow/spec/add.md b/.claude/commands/workflow/spec/add.md index 6d32a82f..4b371a38 100644 --- a/.claude/commands/workflow/spec/add.md +++ b/.claude/commands/workflow/spec/add.md @@ -1,125 +1,210 @@ --- name: add -description: Add specs, conventions, constraints, or learnings to project guidelines interactively or automatically -argument-hint: "[-y|--yes] [--type ] [--category ] [--dimension ] [--scope ] [--interactive] \"rule text\"" +description: Add knowledge entries (bug fixes, code patterns, decisions, rules) to project specs interactively or automatically +argument-hint: "[-y|--yes] [--type ] [--tag ] [--dimension ] [--scope ] [--interactive] \"summary text\"" examples: - /workflow:spec:add "Use functional components for all React code" - - /workflow:spec:add -y "No direct DB access from controllers" --type constraint - - /workflow:spec:add --scope global --dimension personal + - /workflow:spec:add -y "No direct DB access from controllers" --type rule + - /workflow:spec:add --type bug --tag api "API 返回 502 Bad Gateway" + - /workflow:spec:add --type pattern --tag routing "添加新 API 路由标准流程" + - /workflow:spec:add --type decision --tag db "选用 PostgreSQL 作为主数据库" - /workflow:spec:add --interactive - - /workflow:spec:add "Cache invalidation requires event sourcing" --type learning --category architecture --- ## Auto Mode -When `--yes` or `-y`: Auto-categorize and add guideline without confirmation. +When `--yes` or `-y`: Auto-categorize and add entry without confirmation. # Spec Add Command (/workflow:spec:add) ## Overview -Unified command for adding specs one at a time. Supports both interactive wizard mode and direct CLI mode. +Unified command for adding structured knowledge entries one at a time. Supports 4 knowledge types with optional extended fields for complex entries (bug debugging, code patterns, architecture decisions). **Key Features**: -- Supports both project specs and personal specs -- Scope selection (global vs project) for personal specs -- Category-based organization for workflow stages -- Interactive wizard mode with smart defaults -- Direct CLI mode with auto-detection of type and category +- 4 knowledge types: `bug`, `pattern`, `decision`, `rule` +- Unified entry format: `- [type:tag] summary (date)` +- Extended fields for complex types (bug/pattern/decision) +- Interactive wizard with type-specific field prompts +- Direct CLI mode with auto-detection +- Backward compatible: `[tag]` = `[rule:tag]` shorthand - Auto-confirm mode (`-y`/`--yes`) for scripted usage +## Knowledge Type System + +| Type | Purpose | Format | Target File | +|------|---------|--------|-------------| +| `bug` | Debugging experience (symptoms → cause → fix) | Extended | `learnings.md` | +| `pattern` | Reusable code patterns / reference implementations | Extended | `coding-conventions.md` | +| `decision` | Architecture / design decisions (ADR-lite) | Extended | `architecture-constraints.md` | +| `rule` | Hard constraints, conventions, general insights | Simple (single line) | By content (conventions / constraints) | + +### Extended Fields Per Type + +**bug** (core: 原因, 修复 | optional: 症状, 参考): +```markdown +- [bug:api] API 返回 502 Bad Gateway (2026-03-06) + - 原因: 路由处理器未在 server.ts 路由分发中注册 + - 修复: 在路由分发逻辑中导入并调用 app.use(newRouter) + - 参考: src/server.ts:45 +``` + +**pattern** (core: 场景, 代码 | optional: 步骤): +```markdown +- [pattern:routing] 添加新 API 路由标准流程 (2026-03-06) + - 场景: Express 应用新增业务接口 + - 步骤: 1.创建 routes/xxx.ts → 2.server.ts import → 3.app.use() 挂载 + - 代码: + ```typescript + if (pathname.startsWith('/api/xxx')) { + if (await handleXxxRoutes(routeContext)) return; + } + ``` +``` + +**decision** (core: 决策, 理由 | optional: 背景, 备选, 状态): +```markdown +- [decision:db] 选用 PostgreSQL 作为主数据库 (2026-03-01) + - 决策: 使用 PostgreSQL 15 + - 理由: JSONB 支持完善,PostGIS 扩展成熟 + - 备选: MySQL(JSON弱) / SQLite(不适合并发) + - 状态: accepted +``` + +**rule** (no extended fields): +```markdown +- [rule:security] 禁止在代码中硬编码密钥或密码 +``` + +### Entry Format Specification + +``` +Entry Line: - [type:tag] 摘要描述 (YYYY-MM-DD) +Extended: - key: value +Code Block: ```lang + code here + ``` +``` + +- **`type`**: Required. One of `bug`, `pattern`, `decision`, `rule` +- **`tag`**: Required. Domain tag (api, routing, schema, react, security, etc.) +- **`(date)`**: Required for bug/pattern/decision. Optional for rule. +- **Backward compat**: `- [tag] text` = `- [rule:tag] text` + +### Parsing Regex + +```javascript +// Entry line extraction +/^- \[(\w+):([\w-]+)\] (.*?)(?: \((\d{4}-\d{2}-\d{2})\))?$/ + +// Extended field extraction (per indented line) +/^\s{4}-\s([\w-]+):\s?(.*)/ +``` + ## Use Cases -1. **During Session**: Capture important decisions as they're made -2. **After Session**: Reflect on lessons learned before archiving -3. **Proactive**: Add team conventions or architectural rules -4. **Interactive**: Guided wizard for adding rules with full control over dimension, scope, and category +1. **Bug Fix**: Capture debugging experience immediately after fixing a bug +2. **Code Pattern**: Record reusable coding patterns discovered during implementation +3. **Architecture Decision**: Document important technical decisions with rationale +4. **Rule/Convention**: Add team conventions or hard constraints +5. **Interactive**: Guided wizard with type-specific field prompts ## Usage ```bash -/workflow:spec:add # Interactive wizard (all prompts) -/workflow:spec:add --interactive # Explicit interactive wizard -/workflow:spec:add "Use async/await instead of callbacks" # Direct mode (auto-detect type) -/workflow:spec:add -y "No direct DB access" --type constraint # Auto-confirm, skip confirmation -/workflow:spec:add --scope global --dimension personal # Create global personal spec (interactive) -/workflow:spec:add --dimension specs --category exploration # Project spec in exploration category (interactive) +/workflow:spec:add # Interactive wizard +/workflow:spec:add --interactive # Explicit interactive wizard +/workflow:spec:add "Use async/await instead of callbacks" # Direct mode (auto-detect → rule) +/workflow:spec:add --type bug --tag api "API 返回 502" # Bug with tag +/workflow:spec:add --type pattern --tag react "带状态函数组件" # Pattern with tag +/workflow:spec:add --type decision --tag db "选用 PostgreSQL" # Decision with tag +/workflow:spec:add -y "No direct DB access" --type rule --tag arch # Auto-confirm rule +/workflow:spec:add --scope global --dimension personal # Global personal spec ``` ## Parameters | Parameter | Type | Required | Default | Description | |-----------|------|----------|---------|-------------| -| `rule` | string | Yes (unless `--interactive`) | - | The rule, convention, or insight to add | -| `--type` | enum | No | auto-detect | Type: `convention`, `constraint`, `learning` | -| `--category` | string | No | auto-detect / `general` | Category for organization (see categories below) | +| `summary` | string | Yes (unless `--interactive`) | - | Summary text for the knowledge entry | +| `--type` | enum | No | auto-detect | Type: `bug`, `pattern`, `decision`, `rule` | +| `--tag` | string | No | auto-detect | Domain tag (api, routing, schema, react, security, etc.) | | `--dimension` | enum | No | Interactive | `specs` (project) or `personal` | | `--scope` | enum | No | `project` | `global` or `project` (only for personal dimension) | -| `--interactive` | flag | No | - | Launch full guided wizard for adding rules | +| `--interactive` | flag | No | - | Launch full guided wizard | | `-y` / `--yes` | flag | No | - | Auto-categorize and add without confirmation | -### Type Categories +### Legacy Parameter Mapping -**convention** - Coding style preferences (goes to `conventions` section) -- Subcategories: `coding_style`, `naming_patterns`, `file_structure`, `documentation` +For backward compatibility, old parameter values are internally mapped: -**constraint** - Hard rules that must not be violated (goes to `constraints` section) -- Subcategories: `architecture`, `tech_stack`, `performance`, `security` +| Old Parameter | Old Value | Maps To | +|---------------|-----------|---------| +| `--type` | `convention` | `rule` | +| `--type` | `constraint` | `rule` | +| `--type` | `learning` | `bug` (if has cause/fix indicators) or `rule` (otherwise) | +| `--category` | `` | `--tag ` | -**learning** - Session-specific insights (goes to `learnings` array) -- Subcategories: `architecture`, `performance`, `security`, `testing`, `process`, `other` +### Suggested Tags -### Workflow Stage Categories (for `--category`) +| Domain | Tags | +|--------|------| +| Backend | `api`, `routing`, `db`, `auth`, `middleware` | +| Frontend | `react`, `ui`, `state`, `css`, `a11y` | +| Infra | `deploy`, `ci`, `docker`, `perf`, `build` | +| Quality | `security`, `testing`, `lint`, `typing` | +| Architecture | `arch`, `schema`, `migration`, `pattern` | -| Category | Use Case | Example Rules | -|----------|----------|---------------| -| `general` | Applies to all stages | "Use TypeScript strict mode" | -| `exploration` | Code exploration, debugging | "Always trace the call stack before modifying" | -| `planning` | Task planning, requirements | "Break down tasks into 2-hour chunks" | -| `execution` | Implementation, testing | "Run tests after each file modification" | +Tags are freeform — any `[\w-]+` value is accepted. ## Execution Process ``` Input Parsing: - |- Parse: rule text (positional argument, optional if --interactive) - |- Parse: --type (convention|constraint|learning) - |- Parse: --category (subcategory) + |- Parse: summary text (positional argument, optional if --interactive) + |- Parse: --type (bug|pattern|decision|rule) + |- Parse: --tag (domain tag) |- Parse: --dimension (specs|personal) |- Parse: --scope (global|project) |- Parse: --interactive (flag) +- Parse: -y / --yes (flag) -Step 1: Parse Input +Step 1: Parse Input (with legacy mapping) Step 2: Determine Mode - |- If --interactive OR no rule text → Full Interactive Wizard (Path A) - +- If rule text provided → Direct Mode (Path B) + |- If --interactive OR no summary text → Full Interactive Wizard (Path A) + +- If summary text provided → Direct Mode (Path B) Path A: Interactive Wizard |- Step A1: Ask dimension (if not specified) |- Step A2: Ask scope (if personal + scope not specified) - |- Step A3: Ask category (if not specified) - |- Step A4: Ask type (convention|constraint|learning) - |- Step A5: Ask content (rule text) + |- Step A3: Ask type (bug|pattern|decision|rule) + |- Step A4: Ask tag (domain tag) + |- Step A5: Ask summary (entry text) + |- Step A6: Ask extended fields (if bug/pattern/decision) +- Continue to Step 3 Path B: Direct Mode |- Step B1: Auto-detect type (if not specified) using detectType() - |- Step B2: Auto-detect category (if not specified) using detectCategory() + |- Step B2: Auto-detect tag (if not specified) using detectTag() |- Step B3: Default dimension to 'specs' if not specified +- Continue to Step 3 Step 3: Determine Target File - |- specs dimension → .ccw/specs/coding-conventions.md or architecture-constraints.md - +- personal dimension → ~/.ccw/personal/ or .ccw/personal/ + |- bug → .ccw/specs/learnings.md + |- pattern → .ccw/specs/coding-conventions.md + |- decision → .ccw/specs/architecture-constraints.md + |- rule → .ccw/specs/coding-conventions.md or architecture-constraints.md + +- personal → ~/.ccw/personal/ or .ccw/personal/ -Step 4: Validate and Write Spec +Step 4: Build Entry (entry line + extended fields) + +Step 5: Validate and Write |- Ensure target directory and file exist |- Check for duplicates - |- Append rule to appropriate section + |- Append entry to file +- Run ccw spec rebuild -Step 5: Display Confirmation +Step 6: Display Confirmation +- If -y/--yes: Minimal output +- Otherwise: Full confirmation with location details ``` @@ -137,21 +222,21 @@ const argsLower = args.toLowerCase() const autoConfirm = argsLower.includes('--yes') || argsLower.includes('-y') const isInteractive = argsLower.includes('--interactive') -// Extract named parameters +// Extract named parameters (support both new and legacy names) const hasType = argsLower.includes('--type') -const hasCategory = argsLower.includes('--category') +const hasTag = argsLower.includes('--tag') || argsLower.includes('--category') const hasDimension = argsLower.includes('--dimension') const hasScope = argsLower.includes('--scope') let type = hasType ? args.match(/--type\s+(\w+)/i)?.[1]?.toLowerCase() : null -let category = hasCategory ? args.match(/--category\s+(\w+)/i)?.[1]?.toLowerCase() : null +let tag = hasTag ? args.match(/--(?:tag|category)\s+([\w-]+)/i)?.[1]?.toLowerCase() : null let dimension = hasDimension ? args.match(/--dimension\s+(\w+)/i)?.[1]?.toLowerCase() : null let scope = hasScope ? args.match(/--scope\s+(\w+)/i)?.[1]?.toLowerCase() : null -// Extract rule text (everything before flags, or quoted string) -let ruleText = args +// Extract summary text (everything before flags, or quoted string) +let summaryText = args .replace(/--type\s+\w+/gi, '') - .replace(/--category\s+\w+/gi, '') + .replace(/--(?:tag|category)\s+[\w-]+/gi, '') .replace(/--dimension\s+\w+/gi, '') .replace(/--scope\s+\w+/gi, '') .replace(/--interactive/gi, '') @@ -160,6 +245,17 @@ let ruleText = args .replace(/^["']|["']$/g, '') .trim() +// Legacy type mapping +if (type) { + const legacyMap = { 'convention': 'rule', 'constraint': 'rule' } + if (legacyMap[type]) { + type = legacyMap[type] + } else if (type === 'learning') { + // Defer to detectType() for finer classification + type = null + } +} + // Validate values if (scope && !['global', 'project'].includes(scope)) { console.log("Invalid scope. Use 'global' or 'project'.") @@ -169,28 +265,17 @@ if (dimension && !['specs', 'personal'].includes(dimension)) { console.log("Invalid dimension. Use 'specs' or 'personal'.") return } -if (type && !['convention', 'constraint', 'learning'].includes(type)) { - console.log("Invalid type. Use 'convention', 'constraint', or 'learning'.") +if (type && !['bug', 'pattern', 'decision', 'rule'].includes(type)) { + console.log("Invalid type. Use 'bug', 'pattern', 'decision', or 'rule'.") return } -if (category) { - const validCategories = [ - 'general', 'exploration', 'planning', 'execution', - 'coding_style', 'naming_patterns', 'file_structure', 'documentation', - 'architecture', 'tech_stack', 'performance', 'security', - 'testing', 'process', 'other' - ] - if (!validCategories.includes(category)) { - console.log(`Invalid category. Valid categories: ${validCategories.join(', ')}`) - return - } -} +// Tags are freeform [\w-]+, no validation needed ``` ### Step 2: Determine Mode ```javascript -const useInteractiveWizard = isInteractive || !ruleText +const useInteractiveWizard = isInteractive || !summaryText ``` ### Path A: Interactive Wizard @@ -206,11 +291,11 @@ if (!dimension) { options: [ { label: "Project Spec", - description: "Coding conventions, constraints, quality rules for this project (stored in .ccw/specs/)" + description: "Knowledge entries for this project (stored in .ccw/specs/)" }, { label: "Personal Spec", - description: "Personal preferences and constraints that follow you across projects (stored in ~/.ccw/specs/personal/ or .ccw/specs/personal/)" + description: "Personal preferences across projects (stored in ~/.ccw/personal/)" } ] }] @@ -230,11 +315,11 @@ if (dimension === 'personal' && !scope) { options: [ { label: "Global (Recommended)", - description: "Apply to ALL projects (~/.ccw/specs/personal/)" + description: "Apply to ALL projects (~/.ccw/personal/)" }, { label: "Project-only", - description: "Apply only to this project (.ccw/specs/personal/)" + description: "Apply only to this project (.ccw/personal/)" } ] }] @@ -243,89 +328,153 @@ if (dimension === 'personal' && !scope) { } ``` -**If category not specified**: -```javascript -if (!category) { - const categoryAnswer = AskUserQuestion({ - questions: [{ - question: "Which workflow stage does this spec apply to?", - header: "Category", - multiSelect: false, - options: [ - { - label: "General (Recommended)", - description: "Applies to all stages (default)" - }, - { - label: "Exploration", - description: "Code exploration, analysis, debugging" - }, - { - label: "Planning", - description: "Task planning, requirements gathering" - }, - { - label: "Execution", - description: "Implementation, testing, deployment" - } - ] - }] - }) - const categoryLabel = categoryAnswer.answers["Category"] - category = categoryLabel.includes("General") ? "general" - : categoryLabel.includes("Exploration") ? "exploration" - : categoryLabel.includes("Planning") ? "planning" - : "execution" -} -``` - **Ask type (if not specified)**: ```javascript if (!type) { const typeAnswer = AskUserQuestion({ questions: [{ - question: "What type of rule is this?", + question: "What type of knowledge entry is this?", header: "Type", multiSelect: false, options: [ { - label: "Convention", - description: "Coding style preference (e.g., use functional components)" + label: "Bug", + description: "Debugging experience: symptoms, root cause, fix (e.g., API 502 caused by...)" }, { - label: "Constraint", - description: "Hard rule that must not be violated (e.g., no direct DB access)" + label: "Pattern", + description: "Reusable code pattern or reference implementation (e.g., adding API routes)" }, { - label: "Learning", - description: "Insight or lesson learned (e.g., cache invalidation needs events)" + label: "Decision", + description: "Architecture or design decision with rationale (e.g., chose PostgreSQL because...)" + }, + { + label: "Rule", + description: "Hard constraint, convention, or general insight (e.g., no direct DB access)" } ] }] }) const typeLabel = typeAnswer.answers["Type"] - type = typeLabel.includes("Convention") ? "convention" - : typeLabel.includes("Constraint") ? "constraint" - : "learning" + type = typeLabel.includes("Bug") ? "bug" + : typeLabel.includes("Pattern") ? "pattern" + : typeLabel.includes("Decision") ? "decision" + : "rule" } ``` -**Ask content (rule text)**: +**Ask tag (if not specified)**: ```javascript -if (!ruleText) { - const contentAnswer = AskUserQuestion({ +if (!tag) { + const tagAnswer = AskUserQuestion({ questions: [{ - question: "Enter the rule or guideline text:", - header: "Content", + question: "What domain does this entry belong to?", + header: "Tag", multiSelect: false, options: [ - { label: "Custom rule", description: "Type your own rule using the 'Other' option below" }, - { label: "Skip", description: "Cancel adding a spec" } + { label: "api", description: "API endpoints, HTTP, REST, routing" }, + { label: "arch", description: "Architecture, design patterns, module structure" }, + { label: "security", description: "Authentication, authorization, input validation" }, + { label: "perf", description: "Performance, caching, optimization" } ] }] }) - if (contentAnswer.answers["Content"] === "Skip") return - ruleText = contentAnswer.answers["Content"] + tag = tagAnswer.answers["Tag"].toLowerCase().replace(/\s+/g, '-') +} +``` + +**Ask summary (entry text)**: +```javascript +if (!summaryText) { + const contentAnswer = AskUserQuestion({ + questions: [{ + question: "Enter the summary text for this entry:", + header: "Summary", + multiSelect: false, + options: [ + { label: "Custom text", description: "Type your summary using the 'Other' option below" }, + { label: "Skip", description: "Cancel adding an entry" } + ] + }] + }) + if (contentAnswer.answers["Summary"] === "Skip") return + summaryText = contentAnswer.answers["Summary"] +} +``` + +**Ask extended fields (if bug/pattern/decision)**: +```javascript +let extendedFields = {} + +if (type === 'bug') { + // Core fields: 原因, 修复 + const bugAnswer = AskUserQuestion({ + questions: [ + { + question: "Root cause of the bug (原因):", + header: "Cause", + multiSelect: false, + options: [ + { label: "Enter cause", description: "Type root cause via 'Other' option" }, + { label: "Skip", description: "Add later by editing the file" } + ] + }, + { + question: "How was it fixed (修复):", + header: "Fix", + multiSelect: false, + options: [ + { label: "Enter fix", description: "Type fix description via 'Other' option" }, + { label: "Skip", description: "Add later by editing the file" } + ] + } + ] + }) + if (bugAnswer.answers["Cause"] !== "Skip") extendedFields['原因'] = bugAnswer.answers["Cause"] + if (bugAnswer.answers["Fix"] !== "Skip") extendedFields['修复'] = bugAnswer.answers["Fix"] + +} else if (type === 'pattern') { + // Core field: 场景 + const patternAnswer = AskUserQuestion({ + questions: [{ + question: "When should this pattern be used (场景):", + header: "UseCase", + multiSelect: false, + options: [ + { label: "Enter use case", description: "Type applicable scenario via 'Other' option" }, + { label: "Skip", description: "Add later by editing the file" } + ] + }] + }) + if (patternAnswer.answers["UseCase"] !== "Skip") extendedFields['场景'] = patternAnswer.answers["UseCase"] + +} else if (type === 'decision') { + // Core fields: 决策, 理由 + const decisionAnswer = AskUserQuestion({ + questions: [ + { + question: "What was decided (决策):", + header: "Decision", + multiSelect: false, + options: [ + { label: "Enter decision", description: "Type the decision via 'Other' option" }, + { label: "Skip", description: "Add later by editing the file" } + ] + }, + { + question: "Why was this chosen (理由):", + header: "Rationale", + multiSelect: false, + options: [ + { label: "Enter rationale", description: "Type the reasoning via 'Other' option" }, + { label: "Skip", description: "Add later by editing the file" } + ] + } + ] + }) + if (decisionAnswer.answers["Decision"] !== "Skip") extendedFields['决策'] = decisionAnswer.answers["Decision"] + if (decisionAnswer.answers["Rationale"] !== "Skip") extendedFields['理由'] = decisionAnswer.answers["Rationale"] } ``` @@ -333,48 +482,55 @@ if (!ruleText) { **Auto-detect type if not specified**: ```javascript -function detectType(ruleText) { - const text = ruleText.toLowerCase(); +function detectType(text) { + const t = text.toLowerCase() - // Constraint indicators - if (/\b(no|never|must not|forbidden|prohibited|always must)\b/.test(text)) { - return 'constraint'; + // Bug indicators + if (/\b(bug|fix|错误|报错|502|404|500|crash|失败|异常|undefined|null pointer)\b/.test(t)) { + return 'bug' } - // Learning indicators - if (/\b(learned|discovered|realized|found that|turns out)\b/.test(text)) { - return 'learning'; + // Pattern indicators + if (/\b(pattern|模式|模板|标准流程|how to|步骤|参考)\b/.test(t)) { + return 'pattern' } - // Default to convention - return 'convention'; + // Decision indicators + if (/\b(决定|选用|采用|decision|chose|选择|替代|vs|比较)\b/.test(t)) { + return 'decision' + } + + // Default to rule + return 'rule' } -function detectCategory(ruleText, type) { - const text = ruleText.toLowerCase(); +function detectTag(text) { + const t = text.toLowerCase() - if (type === 'constraint' || type === 'learning') { - if (/\b(architecture|layer|module|dependency|circular)\b/.test(text)) return 'architecture'; - if (/\b(security|auth|permission|sanitize|xss|sql)\b/.test(text)) return 'security'; - if (/\b(performance|cache|lazy|async|sync|slow)\b/.test(text)) return 'performance'; - if (/\b(test|coverage|mock|stub)\b/.test(text)) return 'testing'; - } + if (/\b(api|http|rest|endpoint|路由|routing|proxy)\b/.test(t)) return 'api' + if (/\b(security|auth|permission|密钥|xss|sql|注入)\b/.test(t)) return 'security' + if (/\b(database|db|sql|postgres|mysql|mongo|数据库)\b/.test(t)) return 'db' + if (/\b(react|component|hook|组件|jsx|tsx)\b/.test(t)) return 'react' + if (/\b(performance|perf|cache|缓存|slow|慢|优化)\b/.test(t)) return 'perf' + if (/\b(test|testing|jest|vitest|测试|coverage)\b/.test(t)) return 'testing' + if (/\b(architecture|arch|layer|模块|module|依赖)\b/.test(t)) return 'arch' + if (/\b(build|webpack|vite|compile|构建|打包)\b/.test(t)) return 'build' + if (/\b(deploy|ci|cd|docker|部署)\b/.test(t)) return 'deploy' + if (/\b(style|naming|命名|格式|lint|eslint)\b/.test(t)) return 'style' + if (/\b(schema|migration|迁移|版本)\b/.test(t)) return 'schema' + if (/\b(error|exception|错误处理|异常处理)\b/.test(t)) return 'error' + if (/\b(ui|css|layout|样式|界面)\b/.test(t)) return 'ui' + if (/\b(file|path|路径|目录|文件)\b/.test(t)) return 'file' + if (/\b(doc|comment|文档|注释)\b/.test(t)) return 'doc' - if (type === 'convention') { - if (/\b(name|naming|prefix|suffix|camel|pascal)\b/.test(text)) return 'naming_patterns'; - if (/\b(file|folder|directory|structure|organize)\b/.test(text)) return 'file_structure'; - if (/\b(doc|comment|jsdoc|readme)\b/.test(text)) return 'documentation'; - return 'coding_style'; - } - - return type === 'constraint' ? 'tech_stack' : 'other'; + return 'general' } if (!type) { - type = detectType(ruleText) + type = detectType(summaryText) } -if (!category) { - category = detectCategory(ruleText, type) +if (!tag) { + tag = detectTag(summaryText) } if (!dimension) { dimension = 'specs' // Default to project specs in direct mode @@ -402,51 +558,66 @@ Bash('ccw spec rebuild') const path = require('path') const os = require('os') -const isConvention = type === 'convention' -const isConstraint = type === 'constraint' -const isLearning = type === 'learning' - let targetFile let targetDir if (dimension === 'specs') { - // Project specs - use .ccw/specs/ (same as frontend/backend spec-index-builder) targetDir = '.ccw/specs' - if (isConstraint) { + + if (type === 'bug') { + targetFile = path.join(targetDir, 'learnings.md') + } else if (type === 'decision') { targetFile = path.join(targetDir, 'architecture-constraints.md') - } else { + } else if (type === 'pattern') { targetFile = path.join(targetDir, 'coding-conventions.md') + } else { + // rule: route by content and tag + const isConstraint = /\b(禁止|no|never|must not|forbidden|不得|不允许)\b/i.test(summaryText) + const isQuality = /\b(test|coverage|lint|eslint|质量|测试覆盖|pre-commit|tsc|type.check)\b/i.test(summaryText) + || ['testing', 'quality', 'lint'].includes(tag) + if (isQuality) { + targetFile = path.join(targetDir, 'quality-rules.md') + } else if (isConstraint) { + targetFile = path.join(targetDir, 'architecture-constraints.md') + } else { + targetFile = path.join(targetDir, 'coding-conventions.md') + } } } else { - // Personal specs - use .ccw/personal/ (same as backend spec-index-builder) + // Personal specs if (scope === 'global') { targetDir = path.join(os.homedir(), '.ccw', 'personal') } else { targetDir = path.join('.ccw', 'personal') } - // Create type-based filename - const typePrefix = isConstraint ? 'constraints' : isLearning ? 'learnings' : 'conventions' - targetFile = path.join(targetDir, `${typePrefix}.md`) + // Type-based filename + const fileMap = { bug: 'learnings', pattern: 'conventions', decision: 'constraints', rule: 'conventions' } + targetFile = path.join(targetDir, `${fileMap[type]}.md`) } ``` ### Step 5: Build Entry ```javascript -function buildEntry(rule, type, category, sessionId) { - if (type === 'learning') { - return { - date: new Date().toISOString().split('T')[0], - session_id: sessionId || null, - insight: rule, - category: category, - context: null - }; +function buildEntry(summary, type, tag, extendedFields) { + const date = new Date().toISOString().split('T')[0] + const needsDate = ['bug', 'pattern', 'decision'].includes(type) + + // Entry line + let entry = `- [${type}:${tag}] ${summary}` + if (needsDate) { + entry += ` (${date})` } - // For conventions and constraints, just return the rule string - return rule; + // Extended fields (indented with 4 spaces) + if (extendedFields && Object.keys(extendedFields).length > 0) { + for (const [key, value] of Object.entries(extendedFields)) { + entry += `\n - ${key}: ${value}` + } + } + + return entry } ``` @@ -454,56 +625,108 @@ function buildEntry(rule, type, category, sessionId) { ```javascript const fs = require('fs') +const matter = require('gray-matter') // YAML frontmatter parser // Ensure directory exists if (!fs.existsSync(targetDir)) { fs.mkdirSync(targetDir, { recursive: true }) } -// Check if file exists -const fileExists = fs.existsSync(targetFile) +// ── Frontmatter check & repair ── +// Handles 3 cases: +// A) File doesn't exist → create with frontmatter +// B) File exists but no frontmatter → prepend frontmatter +// C) File exists with frontmatter → ensure keywords include current tag -if (!fileExists) { - // Create new file with frontmatter - const frontmatter = `--- -title: ${dimension === 'specs' ? 'Project' : 'Personal'} ${isConstraint ? 'Constraints' : isLearning ? 'Learnings' : 'Conventions'} +const titleMap = { + 'coding-conventions': 'Coding Conventions', + 'architecture-constraints': 'Architecture Constraints', + 'learnings': 'Learnings', + 'quality-rules': 'Quality Rules', + 'conventions': 'Personal Conventions', + 'constraints': 'Personal Constraints' +} + +function ensureFrontmatter(filePath, dim, sc, t, ty) { + const basename = path.basename(filePath, '.md') + const title = titleMap[basename] || basename + + if (!fs.existsSync(filePath)) { + // Case A: Create new file with frontmatter + const content = `--- +title: ${title} readMode: optional priority: medium -category: ${category} -scope: ${dimension === 'personal' ? scope : 'project'} -dimension: ${dimension} -keywords: [${category}, ${isConstraint ? 'constraint' : isLearning ? 'learning' : 'convention'}] +scope: ${dim === 'personal' ? sc : 'project'} +dimension: ${dim} +keywords: [${t}, ${ty}] --- -# ${dimension === 'specs' ? 'Project' : 'Personal'} ${isConstraint ? 'Constraints' : isLearning ? 'Learnings' : 'Conventions'} +# ${title} ` - fs.writeFileSync(targetFile, frontmatter, 'utf8') + fs.writeFileSync(filePath, content, 'utf8') + return + } + + // File exists — read and check frontmatter + const raw = fs.readFileSync(filePath, 'utf8') + let parsed + try { + parsed = matter(raw) + } catch { + parsed = { data: {}, content: raw } + } + + const hasFrontmatter = raw.trimStart().startsWith('---') + + if (!hasFrontmatter) { + // Case B: File exists but no frontmatter → prepend + const fm = `--- +title: ${title} +readMode: optional +priority: medium +scope: ${dim === 'personal' ? sc : 'project'} +dimension: ${dim} +keywords: [${t}, ${ty}] +--- + +` + fs.writeFileSync(filePath, fm + raw, 'utf8') + return + } + + // Case C: Frontmatter exists → ensure keywords include current tag + const existingKeywords = parsed.data.keywords || [] + const newKeywords = [...new Set([...existingKeywords, t, ty])] + + if (newKeywords.length !== existingKeywords.length) { + // Keywords changed — update frontmatter + parsed.data.keywords = newKeywords + const updated = matter.stringify(parsed.content, parsed.data) + fs.writeFileSync(filePath, updated, 'utf8') + } } +ensureFrontmatter(targetFile, dimension, scope, tag, type) + // Read existing content let content = fs.readFileSync(targetFile, 'utf8') -// Deduplicate: skip if rule text already exists in the file -if (content.includes(ruleText)) { +// Deduplicate: skip if summary text already exists in the file +if (content.includes(summaryText)) { console.log(` -Rule already exists in ${targetFile} -Text: "${ruleText}" +Entry already exists in ${targetFile} +Text: "${summaryText}" `) return } -// Format the new rule based on type -let newRule -if (isLearning) { - const entry = buildEntry(ruleText, type, category) - newRule = `- [learning/${category}] ${entry.insight} (${entry.date})` -} else { - newRule = `- [${category}] ${ruleText}` -} +// Build the entry +const newEntry = buildEntry(summaryText, type, tag, extendedFields) -// Append the rule -content = content.trimEnd() + '\n' + newRule + '\n' +// Append the entry +content = content.trimEnd() + '\n' + newEntry + '\n' fs.writeFileSync(targetFile, content, 'utf8') // Rebuild spec index @@ -514,23 +737,24 @@ Bash('ccw spec rebuild') **If `-y`/`--yes` (auto mode)**: ``` -Spec added: [${type}/${category}] "${ruleText}" -> ${targetFile} +Spec added: [${type}:${tag}] "${summaryText}" -> ${targetFile} ``` **Otherwise (full confirmation)**: ``` -Spec created successfully +Entry created successfully +Type: ${type} +Tag: ${tag} +Summary: "${summaryText}" Dimension: ${dimension} Scope: ${dimension === 'personal' ? scope : 'project'} -Category: ${category} -Type: ${type} -Rule: "${ruleText}" +${Object.keys(extendedFields).length > 0 ? `Extended fields: ${Object.keys(extendedFields).join(', ')}` : ''} Location: ${targetFile} Use 'ccw spec list' to view all specs -Use 'ccw spec load --category ${category}' to load specs by category +Tip: Edit ${targetFile} to add code examples or additional details ``` ## Target File Resolution @@ -538,24 +762,25 @@ Use 'ccw spec load --category ${category}' to load specs by category ### Project Specs (dimension: specs) ``` .ccw/specs/ -|- coding-conventions.md <- conventions, learnings -|- architecture-constraints.md <- constraints -+- quality-rules.md <- quality rules +|- coding-conventions.md <- pattern, rule (conventions) +|- architecture-constraints.md <- decision, rule (constraints) +|- learnings.md <- bug (debugging experience) ++- quality-rules.md <- quality rules ``` ### Personal Specs (dimension: personal) ``` # Global (~/.ccw/personal/) ~/.ccw/personal/ -|- conventions.md <- personal conventions (all projects) -|- constraints.md <- personal constraints (all projects) -+- learnings.md <- personal learnings (all projects) +|- conventions.md <- pattern, rule (all projects) +|- constraints.md <- decision, rule (all projects) ++- learnings.md <- bug (all projects) # Project-local (.ccw/personal/) .ccw/personal/ -|- conventions.md <- personal conventions (this project only) -|- constraints.md <- personal constraints (this project only) -+- learnings.md <- personal learnings (this project only) +|- conventions.md <- pattern, rule (this project only) +|- constraints.md <- decision, rule (this project only) ++- learnings.md <- bug (this project only) ``` ## Examples @@ -563,74 +788,99 @@ Use 'ccw spec load --category ${category}' to load specs by category ### Interactive Wizard ```bash /workflow:spec:add --interactive -# Prompts for: dimension -> scope (if personal) -> category -> type -> content +# Prompts for: dimension -> scope (if personal) -> type -> tag -> summary -> extended fields ``` -### Add a Convention (Direct) +### Add a Bug Fix Experience ```bash -/workflow:spec:add "Use async/await instead of callbacks" --type convention --category coding_style +/workflow:spec:add --type bug --tag api "API 返回 502 Bad Gateway" +``` + +Result in `.ccw/specs/learnings.md`: +```markdown +- [bug:api] API 返回 502 Bad Gateway (2026-03-09) +``` + +With interactive extended fields: +```markdown +- [bug:api] API 返回 502 Bad Gateway (2026-03-09) + - 原因: 路由处理器未在 server.ts 路由分发中注册 + - 修复: 在路由分发逻辑中导入并调用 app.use(newRouter) +``` + +### Add a Code Pattern +```bash +/workflow:spec:add --type pattern --tag routing "添加新 API 路由标准流程" ``` Result in `.ccw/specs/coding-conventions.md`: ```markdown -- [coding_style] Use async/await instead of callbacks +- [pattern:routing] 添加新 API 路由标准流程 (2026-03-09) + - 场景: Express 应用新增业务接口 ``` -### Add an Architectural Constraint (Direct) +### Add an Architecture Decision ```bash -/workflow:spec:add "No direct DB access from controllers" --type constraint --category architecture +/workflow:spec:add --type decision --tag db "选用 PostgreSQL 作为主数据库" ``` Result in `.ccw/specs/architecture-constraints.md`: ```markdown -- [architecture] No direct DB access from controllers +- [decision:db] 选用 PostgreSQL 作为主数据库 (2026-03-09) + - 决策: 使用 PostgreSQL 15 + - 理由: JSONB 支持完善,PostGIS 扩展成熟 ``` -### Capture a Learning (Direct, Auto-detect) +### Add a Rule (Direct, Auto-detect) ```bash -/workflow:spec:add "Cache invalidation requires event sourcing for consistency" --type learning +/workflow:spec:add "Use async/await instead of callbacks" ``` Result in `.ccw/specs/coding-conventions.md`: ```markdown -- [learning/architecture] Cache invalidation requires event sourcing for consistency (2026-03-06) +- [rule:style] Use async/await instead of callbacks ``` -### Auto-confirm Mode +### Add a Constraint Rule ```bash -/workflow:spec:add -y "No direct DB access from controllers" --type constraint -# Auto-detects category as 'architecture', writes without confirmation prompt +/workflow:spec:add -y "No direct DB access from controllers" --type rule --tag arch ``` -### Personal Spec (Global) +Result in `.ccw/specs/architecture-constraints.md`: +```markdown +- [rule:arch] No direct DB access from controllers +``` + +### Legacy Compatibility ```bash -/workflow:spec:add --scope global --dimension personal --type convention "Prefer descriptive variable names" +# Old syntax still works +/workflow:spec:add "No ORM allowed" --type constraint --category architecture +# Internally maps to: --type rule --tag architecture +``` + +Result: +```markdown +- [rule:architecture] No ORM allowed +``` + +### Personal Spec +```bash +/workflow:spec:add --scope global --dimension personal --type rule --tag style "Prefer descriptive variable names" ``` Result in `~/.ccw/personal/conventions.md`: ```markdown -- [general] Prefer descriptive variable names -``` - -### Personal Spec (Project) -```bash -/workflow:spec:add --scope project --dimension personal --type constraint "No ORM in this project" -``` - -Result in `.ccw/personal/constraints.md`: -```markdown -- [general] No ORM in this project +- [rule:style] Prefer descriptive variable names ``` ## Error Handling -- **Duplicate Rule**: Warn and skip if exact rule text already exists in target file -- **Invalid Category**: Suggest valid categories for the type +- **Duplicate Entry**: Warn and skip if summary text already exists in target file +- **Invalid Type**: Exit with error - must be 'bug', 'pattern', 'decision', or 'rule' - **Invalid Scope**: Exit with error - must be 'global' or 'project' - **Invalid Dimension**: Exit with error - must be 'specs' or 'personal' -- **Invalid Type**: Exit with error - must be 'convention', 'constraint', or 'learning' +- **Legacy Type**: Auto-map convention→rule, constraint→rule, learning→auto-detect - **File not writable**: Check permissions, suggest manual creation -- **Invalid path**: Exit with error message - **File Corruption**: Backup existing file before modification ## Related Commands diff --git a/.claude/commands/workflow/spec/load.md b/.claude/commands/workflow/spec/load.md new file mode 100644 index 00000000..cbd36589 --- /dev/null +++ b/.claude/commands/workflow/spec/load.md @@ -0,0 +1,392 @@ +--- +name: load +description: Interactive spec loader - ask what user needs, then load relevant specs by keyword routing +argument-hint: "[--all] [--type ] [--tag ] [\"keyword query\"]" +examples: + - /workflow:spec:load + - /workflow:spec:load "api routing" + - /workflow:spec:load --type bug + - /workflow:spec:load --all + - /workflow:spec:load --tag security +--- + +# Spec Load Command (/workflow:spec:load) + +## Overview + +Interactive entry point for loading and browsing project specs. Asks the user what they need, then routes to the appropriate spec content based on keywords, type filters, or tag filters. + +**Design**: Menu-driven → keyword match → load & display. No file modifications. + +**Note**: This command may be called by other workflow commands. Upon completion, return immediately to continue the calling workflow. + +## Usage +```bash +/workflow:spec:load # Interactive menu +/workflow:spec:load "api routing" # Direct keyword search +/workflow:spec:load --type bug # Filter by knowledge type +/workflow:spec:load --tag security # Filter by domain tag +/workflow:spec:load --all # Load all specs +``` + +## Execution Process + +``` +Input Parsing: + ├─ Parse --all flag → loadAll = true | false + ├─ Parse --type (bug|pattern|decision|rule) + ├─ Parse --tag (domain tag) + └─ Parse keyword query (positional text) + +Decision: + ├─ --all → Load all specs (Path C) + ├─ --type or --tag or keyword → Direct filter (Path B) + └─ No args → Interactive menu (Path A) + +Path A: Interactive Menu + ├─ Step A1: Ask user intent + ├─ Step A2: Route to action + └─ Step A3: Display results + +Path B: Direct Filter + ├─ Step B1: Build filter from args + ├─ Step B2: Search specs + └─ Step B3: Display results + +Path C: Load All + └─ Display all spec contents + +Output: + └─ Formatted spec entries matching user query +``` + +## Implementation + +### Step 1: Parse Input + +```javascript +const args = $ARGUMENTS +const argsLower = args.toLowerCase() + +const loadAll = argsLower.includes('--all') +const hasType = argsLower.includes('--type') +const hasTag = argsLower.includes('--tag') + +let type = hasType ? args.match(/--type\s+(\w+)/i)?.[1]?.toLowerCase() : null +let tag = hasTag ? args.match(/--tag\s+([\w-]+)/i)?.[1]?.toLowerCase() : null + +// Extract keyword query (everything that's not a flag) +let keyword = args + .replace(/--type\s+\w+/gi, '') + .replace(/--tag\s+[\w-]+/gi, '') + .replace(/--all/gi, '') + .replace(/^["']|["']$/g, '') + .trim() + +// Validate type +if (type && !['bug', 'pattern', 'decision', 'rule'].includes(type)) { + console.log("Invalid type. Use 'bug', 'pattern', 'decision', or 'rule'.") + return +} +``` + +### Step 2: Determine Mode + +```javascript +const useInteractive = !loadAll && !hasType && !hasTag && !keyword +``` + +### Path A: Interactive Menu + +```javascript +if (useInteractive) { + const answer = AskUserQuestion({ + questions: [{ + question: "What specs would you like to load?", + header: "Action", + multiSelect: false, + options: [ + { + label: "Browse all specs", + description: "Load and display all project spec entries" + }, + { + label: "Search by keyword", + description: "Find specs matching a keyword (e.g., api, security, routing)" + }, + { + label: "View bug experiences", + description: "Load all [bug:*] debugging experience entries" + }, + { + label: "View code patterns", + description: "Load all [pattern:*] reusable code pattern entries" + } + ] + }] + }) + + const choice = answer.answers["Action"] + + if (choice === "Browse all specs") { + loadAll = true + } else if (choice === "View bug experiences") { + type = "bug" + } else if (choice === "View code patterns") { + type = "pattern" + } else if (choice === "Search by keyword") { + // Ask for keyword + const kwAnswer = AskUserQuestion({ + questions: [{ + question: "Enter keyword(s) to search for:", + header: "Keyword", + multiSelect: false, + options: [ + { label: "api", description: "API endpoints, HTTP, REST, routing" }, + { label: "security", description: "Authentication, authorization, input validation" }, + { label: "arch", description: "Architecture, design patterns, module structure" }, + { label: "perf", description: "Performance, caching, optimization" } + ] + }] + }) + keyword = kwAnswer.answers["Keyword"].toLowerCase() + } else { + // "Other" — user typed custom input, use as keyword + keyword = choice.toLowerCase() + } +} +``` + +### Step 3: Load Spec Files + +```javascript +// Discover all spec files +const specFiles = [ + '.ccw/specs/coding-conventions.md', + '.ccw/specs/architecture-constraints.md', + '.ccw/specs/learnings.md', + '.ccw/specs/quality-rules.md' +] + +// Also check personal specs +const personalFiles = [ + '~/.ccw/personal/conventions.md', + '~/.ccw/personal/constraints.md', + '~/.ccw/personal/learnings.md', + '.ccw/personal/conventions.md', + '.ccw/personal/constraints.md', + '.ccw/personal/learnings.md' +] + +// Read all existing spec files +const allEntries = [] + +for (const file of [...specFiles, ...personalFiles]) { + if (!file_exists(file)) continue + const content = Read(file) + + // Extract entries using unified format regex + // Entry line: - [type:tag] summary (date) + // Extended: - key: value + const lines = content.split('\n') + let currentEntry = null + + for (const line of lines) { + const entryMatch = line.match(/^- \[(\w+):([\w-]+)\] (.*?)(?:\s+\((\d{4}-\d{2}-\d{2})\))?$/) + if (entryMatch) { + if (currentEntry) allEntries.push(currentEntry) + currentEntry = { + type: entryMatch[1], + tag: entryMatch[2], + summary: entryMatch[3], + date: entryMatch[4] || null, + extended: {}, + source: file, + raw: line + } + } else if (currentEntry && /^\s{4}- ([\w-]+):\s?(.*)/.test(line)) { + const fieldMatch = line.match(/^\s{4}- ([\w-]+):\s?(.*)/) + currentEntry.extended[fieldMatch[1]] = fieldMatch[2] + } else if (currentEntry && !/^\s{4}/.test(line) && line.trim() !== '') { + // Non-indented non-empty line = end of current entry + allEntries.push(currentEntry) + currentEntry = null + } + + // Also handle legacy format: - [tag] text (learned: date) + const legacyMatch = line.match(/^- \[([\w-]+)\] (.+?)(?:\s+\(learned: (\d{4}-\d{2}-\d{2})\))?$/) + if (!entryMatch && legacyMatch) { + if (currentEntry) allEntries.push(currentEntry) + currentEntry = { + type: 'rule', + tag: legacyMatch[1], + summary: legacyMatch[2], + date: legacyMatch[3] || null, + extended: {}, + source: file, + raw: line, + legacy: true + } + } + } + if (currentEntry) allEntries.push(currentEntry) +} +``` + +### Step 4: Filter Entries + +```javascript +let filtered = allEntries + +// Filter by type +if (type) { + filtered = filtered.filter(e => e.type === type) +} + +// Filter by tag +if (tag) { + filtered = filtered.filter(e => e.tag === tag) +} + +// Filter by keyword (search in tag, summary, and extended fields) +if (keyword) { + const kw = keyword.toLowerCase() + const kwTerms = kw.split(/\s+/) + + filtered = filtered.filter(e => { + const searchText = [ + e.type, e.tag, e.summary, + ...Object.values(e.extended) + ].join(' ').toLowerCase() + + return kwTerms.every(term => searchText.includes(term)) + }) +} + +// If --all, keep everything (no filter) +``` + +### Step 5: Display Results + +```javascript +if (filtered.length === 0) { + const filterDesc = [] + if (type) filterDesc.push(`type=${type}`) + if (tag) filterDesc.push(`tag=${tag}`) + if (keyword) filterDesc.push(`keyword="${keyword}"`) + + console.log(` +No specs found matching: ${filterDesc.join(', ') || '(all)'} + +Available spec files: +${specFiles.filter(f => file_exists(f)).map(f => ` - ${f}`).join('\n') || ' (none)'} + +Suggestions: +- Use /workflow:spec:setup to initialize specs +- Use /workflow:spec:add to add new entries +- Use /workflow:spec:load --all to see everything +`) + return +} + +// Group by source file +const grouped = {} +for (const entry of filtered) { + if (!grouped[entry.source]) grouped[entry.source] = [] + grouped[entry.source].push(entry) +} + +// Display +console.log(` +## Specs Loaded (${filtered.length} entries) +${type ? `Type: ${type}` : ''}${tag ? ` Tag: ${tag}` : ''}${keyword ? ` Keyword: "${keyword}"` : ''} +`) + +for (const [source, entries] of Object.entries(grouped)) { + console.log(`### ${source}`) + console.log('') + + for (const entry of entries) { + // Render entry + const datePart = entry.date ? ` (${entry.date})` : '' + console.log(`- [${entry.type}:${entry.tag}] ${entry.summary}${datePart}`) + + // Render extended fields + for (const [key, value] of Object.entries(entry.extended)) { + console.log(` - ${key}: ${value}`) + } + } + console.log('') +} + +// Summary footer +const typeCounts = {} +for (const e of filtered) { + typeCounts[e.type] = (typeCounts[e.type] || 0) + 1 +} +const typeBreakdown = Object.entries(typeCounts) + .map(([t, c]) => `${t}: ${c}`) + .join(', ') + +console.log(`---`) +console.log(`Total: ${filtered.length} entries (${typeBreakdown})`) +console.log(`Sources: ${Object.keys(grouped).join(', ')}`) +``` + +## Examples + +### Interactive Browse +```bash +/workflow:spec:load +# → Menu: "What specs would you like to load?" +# → User selects "Browse all specs" +# → Displays all entries grouped by file +``` + +### Keyword Search +```bash +/workflow:spec:load "api routing" +# → Filters entries where tag/summary/extended contains "api" AND "routing" +# → Displays matching entries +``` + +### Type Filter +```bash +/workflow:spec:load --type bug +# → Shows all [bug:*] entries from learnings.md +``` + +### Tag Filter +```bash +/workflow:spec:load --tag security +# → Shows all [*:security] entries across all spec files +``` + +### Combined Filters +```bash +/workflow:spec:load --type rule --tag api +# → Shows all [rule:api] entries +``` + +### Load All +```bash +/workflow:spec:load --all +# → Displays every entry from every spec file +``` + +## Error Handling + +| Error | Resolution | +|-------|------------| +| No spec files found | Suggest `/workflow:spec:setup` to initialize | +| No matching entries | Show available files and suggest alternatives | +| Invalid type | Exit with valid type list | +| Corrupt entry format | Skip unparseable lines, continue loading | + +## Related Commands + +- `/workflow:spec:setup` - Initialize project with specs scaffold +- `/workflow:spec:add` - Add knowledge entries (bug/pattern/decision/rule) with unified [type:tag] format +- `/workflow:session:sync` - Quick-sync session work to specs and project-tech +- `ccw spec list` - View spec file index +- `ccw spec load` - CLI-level spec loading (used by hooks) diff --git a/.claude/commands/workflow/spec/setup.md b/.claude/commands/workflow/spec/setup.md index be592142..6c4aa08e 100644 --- a/.claude/commands/workflow/spec/setup.md +++ b/.claude/commands/workflow/spec/setup.md @@ -471,70 +471,129 @@ For each category of collected answers, append rules to the corresponding spec M - Round 5 (quality): `category: execution` (testing phase) ```javascript +const matter = require('gray-matter') // YAML frontmatter parser + +// ── Frontmatter check & repair helper ── +// Ensures target spec file has valid YAML frontmatter with keywords +// Uses gray-matter for robust parsing (handles malformed frontmatter, missing fields) +function ensureSpecFrontmatter(filePath, extraKeywords = []) { + const titleMap = { + 'coding-conventions': 'Coding Conventions', + 'architecture-constraints': 'Architecture Constraints', + 'learnings': 'Learnings', + 'quality-rules': 'Quality Rules' + } + const basename = filePath.split('/').pop().replace('.md', '') + const title = titleMap[basename] || basename + const defaultKw = filePath.includes('conventions') ? 'convention' + : filePath.includes('constraints') ? 'constraint' : 'quality' + const defaultFm = { + title, + readMode: 'optional', + priority: 'medium', + category: 'general', + scope: 'project', + dimension: 'specs', + keywords: [...new Set([defaultKw, ...extraKeywords])] + } + + if (!file_exists(filePath)) { + // Case A: Create new file with frontmatter + const specDir = path.dirname(filePath) + if (!fs.existsSync(specDir)) { + fs.mkdirSync(specDir, { recursive: true }) + } + Write(filePath, matter.stringify(`\n# ${title}\n\n`, defaultFm)) + return + } + + const raw = Read(filePath) + let parsed + try { + parsed = matter(raw) + } catch { + parsed = { data: {}, content: raw } + } + + const hasFrontmatter = raw.trimStart().startsWith('---') + + if (!hasFrontmatter) { + // Case B: File exists but no frontmatter → prepend + Write(filePath, matter.stringify(raw, defaultFm)) + return + } + + // Case C: Frontmatter exists → ensure keywords include extras + const existingKeywords = parsed.data.keywords || [] + const newKeywords = [...new Set([...existingKeywords, defaultKw, ...extraKeywords])] + + if (newKeywords.length !== existingKeywords.length) { + parsed.data.keywords = newKeywords + Write(filePath, matter.stringify(parsed.content, parsed.data)) + } +} + // Helper: append rules to a spec MD file with category support // Uses .ccw/specs/ directory (same as frontend/backend spec-index-builder) function appendRulesToSpecFile(filePath, rules, defaultCategory = 'general') { if (rules.length === 0) return - // Ensure .ccw/specs/ directory exists - const specDir = path.dirname(filePath) - if (!fs.existsSync(specDir)) { - fs.mkdirSync(specDir, { recursive: true }) - } + // Extract domain tags from rules for keyword accumulation + const ruleTags = rules + .map(r => r.match(/\[[\w]+:([\w-]+)\]/)?.[1]) + .filter(Boolean) - // Check if file exists - if (!file_exists(filePath)) { - // Create file with frontmatter including category - const frontmatter = `--- -title: ${filePath.includes('conventions') ? 'Coding Conventions' : filePath.includes('constraints') ? 'Architecture Constraints' : 'Quality Rules'} -readMode: optional -priority: medium -category: ${defaultCategory} -scope: project -dimension: specs -keywords: [${defaultCategory}, ${filePath.includes('conventions') ? 'convention' : filePath.includes('constraints') ? 'constraint' : 'quality'}] ---- - -# ${filePath.includes('conventions') ? 'Coding Conventions' : filePath.includes('constraints') ? 'Architecture Constraints' : 'Quality Rules'} - -` - Write(filePath, frontmatter) - } + // Ensure frontmatter exists and keywords include rule tags + ensureSpecFrontmatter(filePath, [...new Set(ruleTags)]) const existing = Read(filePath) - // Append new rules as markdown list items after existing content - const newContent = existing.trimEnd() + '\n' + rules.map(r => `- ${r}`).join('\n') + '\n' + // Append new rules as markdown list items - rules are already in [type:tag] format from caller + const newContent = existing.trimEnd() + '\n' + rules.map(r => { + // If rule already has - prefix or [type:tag] format, use as-is + if (/^- /.test(r)) return r + if (/^\[[\w]+:[\w-]+\]/.test(r)) return `- ${r}` + return `- [rule:${defaultCategory}] ${r}` + }).join('\n') + '\n' Write(filePath, newContent) } -// Write conventions (general category) - use .ccw/specs/ (same as frontend/backend) -appendRulesToSpecFile('.ccw/specs/coding-conventions.md', - [...newCodingStyle, ...newNamingPatterns, ...newFileStructure, ...newDocumentation], - 'general') +// Helper: infer domain tag from rule content +function inferTag(text) { + const t = text.toLowerCase() + if (/\b(api|http|rest|endpoint|routing)\b/.test(t)) return 'api' + if (/\b(security|auth|permission|xss|sql|sanitize)\b/.test(t)) return 'security' + if (/\b(database|db|sql|postgres|mysql)\b/.test(t)) return 'db' + if (/\b(react|component|hook|jsx|tsx)\b/.test(t)) return 'react' + if (/\b(performance|cache|lazy|async|slow)\b/.test(t)) return 'perf' + if (/\b(test|coverage|mock|jest|vitest)\b/.test(t)) return 'testing' + if (/\b(architecture|layer|module|dependency)\b/.test(t)) return 'arch' + if (/\b(naming|camel|pascal|prefix|suffix)\b/.test(t)) return 'naming' + if (/\b(file|folder|directory|structure)\b/.test(t)) return 'file' + if (/\b(doc|comment|jsdoc|readme)\b/.test(t)) return 'doc' + if (/\b(build|webpack|vite|compile)\b/.test(t)) return 'build' + if (/\b(deploy|ci|cd|docker)\b/.test(t)) return 'deploy' + if (/\b(lint|eslint|prettier|format)\b/.test(t)) return 'lint' + if (/\b(type|typescript|strict|any)\b/.test(t)) return 'typing' + return 'style' // fallback for coding conventions +} -// Write constraints (planning category) +// Write conventions - infer domain tags from content +appendRulesToSpecFile('.ccw/specs/coding-conventions.md', + [...newCodingStyle, ...newNamingPatterns, ...newFileStructure, ...newDocumentation] + .map(r => /^\[[\w]+:[\w-]+\]/.test(r) ? r : `[rule:${inferTag(r)}] ${r}`), + 'style') + +// Write constraints - infer domain tags from content appendRulesToSpecFile('.ccw/specs/architecture-constraints.md', - [...newArchitecture, ...newTechStack, ...newPerformance, ...newSecurity], - 'planning') + [...newArchitecture, ...newTechStack, ...newPerformance, ...newSecurity] + .map(r => /^\[[\w]+:[\w-]+\]/.test(r) ? r : `[rule:${inferTag(r)}] ${r}`), + 'arch') // Write quality rules (execution category) if (newQualityRules.length > 0) { const qualityPath = '.ccw/specs/quality-rules.md' - if (!file_exists(qualityPath)) { - Write(qualityPath, `--- -title: Quality Rules -readMode: required -priority: high -category: execution -scope: project -dimension: specs -keywords: [execution, quality, testing, coverage, lint] ---- - -# Quality Rules - -`) - } + // ensureSpecFrontmatter handles create/repair/keyword-update + ensureSpecFrontmatter(qualityPath, ['quality', 'testing', 'coverage', 'lint']) appendRulesToSpecFile(qualityPath, newQualityRules.map(q => `${q.rule} (scope: ${q.scope}, enforced by: ${q.enforced_by})`), 'execution') @@ -644,7 +703,8 @@ Next steps: ## Related Commands -- `/workflow:spec:add` - Interactive wizard to create individual specs with scope selection +- `/workflow:spec:add` - Add knowledge entries (bug/pattern/decision/rule) with unified [type:tag] format +- `/workflow:spec:load` - Interactive spec loader with keyword/type/tag filtering - `/workflow:session:sync` - Quick-sync session work to specs and project-tech - `workflow-plan` skill - Start planning with initialized project context - `/workflow:status --project` - View project state and guidelines diff --git a/ccw/src/tools/generate-ddd-docs.ts b/ccw/src/tools/generate-ddd-docs.ts new file mode 100644 index 00000000..60f0987a --- /dev/null +++ b/ccw/src/tools/generate-ddd-docs.ts @@ -0,0 +1,671 @@ +/** + * Generate DDD Docs Tool + * Generate DDD documentation from doc-index.json with deterministic output paths. + * Supports 5 strategies: component (L3), feature (L2), index, overview, schema + */ + +import { z } from 'zod'; +import type { ToolSchema, ToolResult } from '../types/tool.js'; +import { existsSync, readFileSync, mkdirSync, writeFileSync, unlinkSync } from 'fs'; +import { join, resolve, dirname } from 'path'; +import { execSync } from 'child_process'; +import { tmpdir } from 'os'; +import { getSecondaryModel } from './cli-config-manager.js'; + +// Default doc-index path relative to project root +const DEFAULT_DOC_INDEX_PATH = '.workflow/.doc-index/doc-index.json'; + +// Define Zod schema for validation +const ParamsSchema = z.object({ + strategy: z.enum(['component', 'feature', 'index', 'overview', 'schema']), + entityId: z.string().optional(), + docIndexPath: z.string().default(DEFAULT_DOC_INDEX_PATH), + tool: z.enum(['gemini', 'qwen', 'codex']).default('gemini'), + model: z.string().optional(), +}); + +type Params = z.infer; + +interface ToolOutput { + success: boolean; + strategy: string; + entity_id?: string; + output_path: string; + tool: string; + model?: string; + duration_seconds?: number; + message?: string; + error?: string; +} + +// --- doc-index.json type definitions --- + +interface CodeLocation { + path: string; + symbols?: string[]; + lineRange?: [number, number]; +} + +interface TechnicalComponent { + id: string; + name: string; + type: string; + responsibility?: string; + adrId?: string | null; + docPath?: string; + codeLocations?: CodeLocation[]; + dependsOn?: string[]; + featureIds?: string[]; + actionIds?: string[]; +} + +interface Feature { + id: string; + name: string; + epicId?: string | null; + status?: string; + docPath?: string; + requirementIds?: string[]; + techComponentIds?: string[]; + tags?: string[]; +} + +interface DocIndex { + version?: string; + schema_version?: string; + project?: string; + build_path?: string; + last_updated?: string; + features?: Feature[]; + technicalComponents?: TechnicalComponent[]; + requirements?: Array<{ id: string; title?: string; priority?: string }>; + architectureDecisions?: Array<{ id: string; title?: string; componentIds?: string[] }>; + actions?: Array<{ id: string; description?: string; type?: string; timestamp?: string; affectedComponents?: string[]; affectedFeatures?: string[] }>; + glossary?: Array<{ id: string; term: string; definition?: string }>; + [key: string]: unknown; +} + +// --- Core functions --- + +/** + * Load and parse doc-index.json + */ +function loadDocIndex(indexPath: string): DocIndex { + const absPath = resolve(process.cwd(), indexPath); + if (!existsSync(absPath)) { + throw new Error(`doc-index.json not found at: ${absPath}. Run /ddd:scan or /ddd:index-build first.`); + } + const raw = readFileSync(absPath, 'utf8'); + return JSON.parse(raw) as DocIndex; +} + +/** + * Calculate deterministic output path based on strategy and entityId. + * All paths are relative to the doc-index directory. + */ +function calculateDddOutputPath( + strategy: string, + entityId: string | undefined, + docIndexDir: string +): string { + switch (strategy) { + case 'component': { + if (!entityId) throw new Error('entityId is required for component strategy'); + // tech-{slug} -> {slug}.md + const slug = entityId.replace(/^tech-/, ''); + return join(docIndexDir, 'tech-registry', `${slug}.md`); + } + case 'feature': { + if (!entityId) throw new Error('entityId is required for feature strategy'); + // feat-{slug} -> {slug}.md + const slug = entityId.replace(/^feat-/, ''); + return join(docIndexDir, 'feature-maps', `${slug}.md`); + } + case 'index': + // Generate _index.md files - entityId determines which subdirectory + if (entityId) { + return join(docIndexDir, entityId, '_index.md'); + } + // Default: generate all index files (return the doc-index dir itself) + return docIndexDir; + case 'overview': + if (entityId === 'architecture') { + return join(docIndexDir, 'ARCHITECTURE.md'); + } + return join(docIndexDir, 'README.md'); + case 'schema': + return join(docIndexDir, 'SCHEMA.md'); + default: + throw new Error(`Unknown strategy: ${strategy}`); + } +} + +/** + * Build YAML frontmatter string from entity metadata + */ +function buildFrontmatter( + strategy: string, + entity: TechnicalComponent | Feature | null, + docIndex: DocIndex +): string { + const now = new Date().toISOString(); + + switch (strategy) { + case 'component': { + const comp = entity as TechnicalComponent; + if (!comp) return ''; + const featureIds = comp.featureIds || []; + const codeLocations = (comp.codeLocations || []).map(loc => { + const symbolsStr = loc.symbols && loc.symbols.length > 0 + ? `\n symbols: [${loc.symbols.join(', ')}]` + : ''; + return ` - path: ${loc.path}${symbolsStr}`; + }).join('\n'); + + return [ + '---', + 'layer: 3', + `component_id: ${comp.id}`, + `name: ${comp.name}`, + `type: ${comp.type || 'unknown'}`, + `features: [${featureIds.join(', ')}]`, + codeLocations ? `code_locations:\n${codeLocations}` : 'code_locations: []', + `generated_at: ${now}`, + '---', + ].join('\n'); + } + + case 'feature': { + const feat = entity as Feature; + if (!feat) return ''; + const reqIds = feat.requirementIds || []; + const techIds = feat.techComponentIds || []; + const tags = feat.tags || []; + + return [ + '---', + 'layer: 2', + `feature_id: ${feat.id}`, + `name: ${feat.name}`, + `epic_id: ${feat.epicId || 'null'}`, + `status: ${feat.status || 'planned'}`, + `requirements: [${reqIds.join(', ')}]`, + `components: [${techIds.join(', ')}]`, + `depends_on_layer3: [${techIds.join(', ')}]`, + `tags: [${tags.join(', ')}]`, + `generated_at: ${now}`, + '---', + ].join('\n'); + } + + case 'index': + case 'overview': { + const featureIds = (docIndex.features || []).map(f => f.id); + return [ + '---', + 'layer: 1', + `depends_on_layer2: [${featureIds.join(', ')}]`, + `generated_at: ${now}`, + '---', + ].join('\n'); + } + + case 'schema': + return [ + '---', + `schema_version: ${docIndex.schema_version || docIndex.version || '1.0'}`, + `generated_at: ${now}`, + '---', + ].join('\n'); + + default: + return ''; + } +} + +/** + * Build CLI prompt combining frontmatter, content instructions, and code context + */ +function buildDddPrompt( + strategy: string, + entity: TechnicalComponent | Feature | null, + frontmatter: string, + docIndex: DocIndex, + outputPath: string +): string { + const absOutputPath = resolve(process.cwd(), outputPath); + + switch (strategy) { + case 'component': { + const comp = entity as TechnicalComponent; + const contextPaths = (comp.codeLocations || []).map(loc => `@${loc.path}`).join(' '); + // Build change history from actions + const compActions = (docIndex.actions || []) + .filter(a => (a.affectedComponents || []).includes(comp.id)) + .map(a => `- ${a.timestamp?.split('T')[0] || 'unknown'} | ${a.type || 'change'} | ${a.description || a.id}`) + .join('\n'); + const changeHistoryBlock = compActions + ? `\n\nChange History (include as "## Change History" section):\n${compActions}` + : ''; + return `PURPOSE: Generate component documentation for ${comp.name} +TASK: +- Document component purpose and responsibility +- List exported symbols (classes, functions, types) +- Document dependencies (internal and external) +- Include code examples for key APIs +- Document integration points with other components +- Include change history timeline +MODE: write +CONTEXT: ${contextPaths || '@**/*'} +EXPECTED: Markdown file with: Overview, API Reference, Dependencies, Usage Examples, Change History +CONSTRAINTS: Focus on public API | Include type signatures + +OUTPUT FILE: ${absOutputPath} + +The file MUST start with this exact frontmatter: + +${frontmatter} + +Sections to include after frontmatter: +- Responsibility +- Code Locations +- Related Requirements +- Architecture Decisions +- Dependencies (in/out) +- Change History${changeHistoryBlock}`; + } + + case 'feature': { + const feat = entity as Feature; + const techIds = feat.techComponentIds || []; + const componentDocs = techIds + .map(id => { + const slug = id.replace(/^tech-/, ''); + return `@.workflow/.doc-index/tech-registry/${slug}.md`; + }) + .join(' '); + // Build change history from actions + const featActions = (docIndex.actions || []) + .filter(a => (a.affectedFeatures || []).includes(feat.id)) + .map(a => `- ${a.timestamp?.split('T')[0] || 'unknown'} | ${a.type || 'change'} | ${a.description || a.id}`) + .join('\n'); + const featChangeHistoryBlock = featActions + ? `\n\nChange History (include as "## Change History" section):\n${featActions}` + : ''; + return `PURPOSE: Generate feature documentation for ${feat.name} +TASK: +- Describe feature purpose and business value +- List requirements (from requirementIds) +- Document components involved (from techComponentIds) +- Include architecture decisions (from adrIds) +- Provide integration guide +- Include change history timeline +MODE: write +CONTEXT: ${componentDocs || '@.workflow/.doc-index/tech-registry/*.md'} +EXPECTED: Markdown file with: Overview, Requirements, Components, Architecture, Integration, Change History +CONSTRAINTS: Reference Layer 3 component docs | Business-focused language + +OUTPUT FILE: ${absOutputPath} + +The file MUST start with this exact frontmatter: + +${frontmatter} + +Sections to include after frontmatter: +- Overview +- Requirements (with mapping status) +- Technical Components +- Architecture Decisions +- Change History${featChangeHistoryBlock}`; + } + + case 'index': { + const docIndexDir = dirname(resolve(process.cwd(), outputPath)); + const parentDir = dirname(docIndexDir); + return `PURPOSE: Generate index document for ${docIndexDir} +TASK: +- List all entries in this directory with brief descriptions +- Create a navigable catalog with links to each document +- Include status/type columns where applicable +MODE: write +CONTEXT: @${parentDir}/doc-index.json +EXPECTED: Markdown index file with: table of entries, descriptions, links +CONSTRAINTS: Catalog format | Link to sibling documents + +OUTPUT FILE: ${absOutputPath} + +The file MUST start with this exact frontmatter: + +${frontmatter}`; + } + + case 'overview': { + const isArchitecture = outputPath.endsWith('ARCHITECTURE.md'); + if (isArchitecture) { + return `PURPOSE: Generate architecture overview document +TASK: +- System design overview +- Component relationships and dependencies +- Key architecture decisions (from ADRs) +- Technology stack +MODE: write +CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/tech-registry/*.md +EXPECTED: ARCHITECTURE.md with: System Design, Component Diagram, ADRs, Tech Stack +CONSTRAINTS: Architecture-focused | Reference component docs for details + +OUTPUT FILE: ${absOutputPath} + +The file MUST start with this exact frontmatter: + +${frontmatter}`; + } + return `PURPOSE: Generate project README with overview and navigation +TASK: +- Project summary and purpose +- Quick start guide +- Navigation to features, components, and architecture +- Link to doc-index.json +MODE: write +CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/feature-maps/_index.md +EXPECTED: README.md with: Overview, Quick Start, Navigation, Links +CONSTRAINTS: High-level only | Entry point for new developers + +OUTPUT FILE: ${absOutputPath} + +The file MUST start with this exact frontmatter: + +${frontmatter}`; + } + + case 'schema': { + return `PURPOSE: Document doc-index.json schema structure and versioning +TASK: +- Document current schema structure (all fields) +- Define versioning policy (semver: major.minor) +- Document migration protocol for version upgrades +- Provide examples for each schema section +MODE: write +CONTEXT: @.workflow/.doc-index/doc-index.json +EXPECTED: SCHEMA.md with: Schema Structure, Versioning Policy, Migration Protocol, Examples +CONSTRAINTS: Complete field documentation | Clear migration steps + +OUTPUT FILE: ${absOutputPath} + +The file MUST start with this exact frontmatter: + +${frontmatter}`; + } + + default: + throw new Error(`Unknown strategy: ${strategy}`); + } +} + +/** + * Create temporary prompt file and return path + */ +function createPromptFile(prompt: string): string { + const timestamp = Date.now(); + const randomSuffix = Math.random().toString(36).substring(2, 8); + const promptFile = join(tmpdir(), `ddd-docs-prompt-${timestamp}-${randomSuffix}.txt`); + writeFileSync(promptFile, prompt, 'utf8'); + return promptFile; +} + +/** + * Build CLI command using stdin piping + */ +function buildCliCommand(tool: string, promptFile: string, model: string): string { + const normalizedPath = promptFile.replace(/\\/g, '/'); + const isWindows = process.platform === 'win32'; + + const catCmd = isWindows ? `Get-Content -Raw "${normalizedPath}" | ` : `cat "${normalizedPath}" | `; + const modelFlag = model ? ` -m "${model}"` : ''; + + switch (tool) { + case 'qwen': + return `${catCmd}qwen${modelFlag} --yolo`; + case 'codex': + if (isWindows) { + return `codex --full-auto exec (Get-Content -Raw "${normalizedPath}")${modelFlag} --skip-git-repo-check -s danger-full-access`; + } + return `codex --full-auto exec "$(cat "${normalizedPath}")"${modelFlag} --skip-git-repo-check -s danger-full-access`; + case 'gemini': + default: + return `${catCmd}gemini${modelFlag} --yolo`; + } +} + +/** + * Resolve entity from doc-index based on strategy and entityId + */ +function resolveEntity( + strategy: string, + entityId: string | undefined, + docIndex: DocIndex +): TechnicalComponent | Feature | null { + if (strategy === 'component') { + if (!entityId) throw new Error('entityId is required for component strategy'); + const comp = (docIndex.technicalComponents || []).find(c => c.id === entityId); + if (!comp) throw new Error(`Component not found in doc-index: ${entityId}`); + return comp; + } + + if (strategy === 'feature') { + if (!entityId) throw new Error('entityId is required for feature strategy'); + const feat = (docIndex.features || []).find(f => f.id === entityId); + if (!feat) throw new Error(`Feature not found in doc-index: ${entityId}`); + return feat; + } + + // index, overview, schema do not require a specific entity + return null; +} + +/** + * For the index strategy, generate _index.md for multiple directories + */ +function getIndexTargets(entityId: string | undefined): string[] { + if (entityId) { + return [entityId]; + } + // Default: all standard subdirectories + return ['feature-maps', 'tech-registry', 'action-logs', 'planning']; +} + +// Tool schema for MCP +export const schema: ToolSchema = { + name: 'generate_ddd_docs', + description: `Generate DDD documentation from doc-index.json with deterministic output paths. + +Strategies: +- component: Layer 3 technical component doc (tech-registry/{slug}.md) +- feature: Layer 2 feature map doc (feature-maps/{slug}.md) +- index: Layer 1 _index.md catalog files for subdirectories +- overview: Layer 1 README.md or ARCHITECTURE.md +- schema: SCHEMA.md documenting doc-index.json structure + +Requires doc-index.json from /ddd:scan or /ddd:index-build. +Output: .workflow/.doc-index/...`, + inputSchema: { + type: 'object', + properties: { + strategy: { + type: 'string', + enum: ['component', 'feature', 'index', 'overview', 'schema'], + description: 'Document generation strategy: component (L3), feature (L2), index, overview, schema (L1)' + }, + entityId: { + type: 'string', + description: 'Entity ID from doc-index.json (required for component/feature, optional for index/overview). For overview: "architecture" to generate ARCHITECTURE.md, omit for README.md. For index: subdirectory name or omit for all.' + }, + docIndexPath: { + type: 'string', + description: 'Path to doc-index.json (default: .workflow/.doc-index/doc-index.json)', + default: '.workflow/.doc-index/doc-index.json' + }, + tool: { + type: 'string', + enum: ['gemini', 'qwen', 'codex'], + description: 'CLI tool to use (default: gemini)', + default: 'gemini' + }, + model: { + type: 'string', + description: 'Model name (optional, uses tool defaults)' + } + }, + required: ['strategy'] + } +}; + +// Handler function +export async function handler(params: Record): Promise> { + const parsed = ParamsSchema.safeParse(params); + if (!parsed.success) { + return { success: false, error: `Invalid params: ${parsed.error.message}` }; + } + + const { strategy, entityId, docIndexPath, tool, model } = parsed.data; + + try { + // Load doc-index.json + const docIndex = loadDocIndex(docIndexPath); + const docIndexDir = dirname(resolve(process.cwd(), docIndexPath)); + + // Resolve model + let actualModel = model || ''; + if (!actualModel) { + try { + actualModel = getSecondaryModel(process.cwd(), tool); + } catch { + actualModel = ''; + } + } + + // Handle index strategy separately (may generate multiple files) + if (strategy === 'index') { + const targets = getIndexTargets(entityId); + const results: string[] = []; + + for (const target of targets) { + const outputPath = join(docIndexDir, target, '_index.md'); + const absOutputDir = dirname(resolve(process.cwd(), outputPath)); + + // Ensure directory exists + mkdirSync(absOutputDir, { recursive: true }); + + const frontmatter = buildFrontmatter('index', null, docIndex); + const prompt = buildDddPrompt('index', null, frontmatter, docIndex, outputPath); + const promptFile = createPromptFile(prompt); + const command = buildCliCommand(tool, promptFile, actualModel); + + console.log(`[DDD] Generating index: ${target}/_index.md`); + + try { + const startTime = Date.now(); + execSync(command, { + cwd: docIndexDir, + encoding: 'utf8', + stdio: 'inherit', + timeout: 600000, + shell: process.platform === 'win32' ? 'powershell.exe' : '/bin/bash' + }); + const duration = Math.round((Date.now() - startTime) / 1000); + results.push(`${target}/_index.md (${duration}s)`); + } finally { + try { unlinkSync(promptFile); } catch { /* ignore */ } + } + } + + return { + success: true, + result: { + success: true, + strategy, + entity_id: entityId, + output_path: docIndexDir, + tool, + model: actualModel, + message: `Generated index files: ${results.join(', ')}` + } + }; + } + + // Single-file strategies: component, feature, overview, schema + const entity = resolveEntity(strategy, entityId, docIndex); + const outputPath = calculateDddOutputPath(strategy, entityId, docIndexDir); + const absOutputDir = dirname(resolve(process.cwd(), outputPath)); + + // Ensure output directory exists + mkdirSync(absOutputDir, { recursive: true }); + + // Build frontmatter and prompt + const frontmatter = buildFrontmatter(strategy, entity, docIndex); + const prompt = buildDddPrompt(strategy, entity, frontmatter, docIndex, outputPath); + + // Create temp prompt file + const promptFile = createPromptFile(prompt); + + // Build CLI command + const command = buildCliCommand(tool, promptFile, actualModel); + + console.log(`[DDD] Generating ${strategy}: ${outputPath}`); + console.log(`[DDD] Tool: ${tool} | Model: ${actualModel || 'default'}`); + + try { + const startTime = Date.now(); + + execSync(command, { + cwd: docIndexDir, + encoding: 'utf8', + stdio: 'inherit', + timeout: 600000, + shell: process.platform === 'win32' ? 'powershell.exe' : '/bin/bash' + }); + + const duration = Math.round((Date.now() - startTime) / 1000); + + // Cleanup + try { unlinkSync(promptFile); } catch { /* ignore */ } + + console.log(`[DDD] Completed in ${duration}s: ${outputPath}`); + + return { + success: true, + result: { + success: true, + strategy, + entity_id: entityId, + output_path: outputPath, + tool, + model: actualModel, + duration_seconds: duration, + message: `Documentation generated successfully in ${duration}s` + } + }; + } catch (error) { + // Cleanup on error + try { unlinkSync(promptFile); } catch { /* ignore */ } + + // Tool fallback: gemini -> qwen -> codex + const fallbackChain = ['gemini', 'qwen', 'codex']; + const currentIdx = fallbackChain.indexOf(tool); + if (currentIdx >= 0 && currentIdx < fallbackChain.length - 1) { + const nextTool = fallbackChain[currentIdx + 1]; + console.log(`[DDD] ${tool} failed, falling back to ${nextTool}`); + return handler({ ...params, tool: nextTool }); + } + + return { + success: false, + error: `Documentation generation failed: ${(error as Error).message}` + }; + } + } catch (error) { + return { + success: false, + error: `Tool execution failed: ${(error as Error).message}` + }; + } +} diff --git a/ccw/src/tools/index.ts b/ccw/src/tools/index.ts index 028ac9d1..4fdc1ca3 100644 --- a/ccw/src/tools/index.ts +++ b/ccw/src/tools/index.ts @@ -14,6 +14,7 @@ import * as classifyFoldersMod from './classify-folders.js'; import * as detectChangedModulesMod from './detect-changed-modules.js'; import * as discoverDesignFilesMod from './discover-design-files.js'; import * as generateModuleDocsMod from './generate-module-docs.js'; +import * as generateDddDocsMod from './generate-ddd-docs.js'; import * as convertTokensToCssMod from './convert-tokens-to-css.js'; import * as sessionManagerMod from './session-manager.js'; import * as cliExecutorMod from './cli-executor.js'; @@ -358,6 +359,7 @@ registerTool(toLegacyTool(classifyFoldersMod)); registerTool(toLegacyTool(detectChangedModulesMod)); registerTool(toLegacyTool(discoverDesignFilesMod)); registerTool(toLegacyTool(generateModuleDocsMod)); +registerTool(toLegacyTool(generateDddDocsMod)); registerTool(toLegacyTool(convertTokensToCssMod)); registerTool(toLegacyTool(sessionManagerMod)); registerTool(toLegacyTool(cliExecutorMod));