mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-11 17:21:03 +08:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1cd96b90e8 | ||
|
|
efbbaff834 | ||
|
|
1ada08f073 | ||
|
|
65ff5f54cb | ||
|
|
c50d9b21dc | ||
|
|
38d1987f41 | ||
|
|
d29dabf0a9 | ||
|
|
2d723644ea | ||
|
|
9fb13ed6b0 | ||
|
|
b4ad8c7b80 | ||
|
|
6f9dc836c3 | ||
|
|
663620955c | ||
|
|
cbd1813ea7 | ||
|
|
b2fc2f60f1 | ||
|
|
3341a2e772 | ||
|
|
61ea9d47a6 | ||
|
|
f3ae78f95e | ||
|
|
334f82eaad | ||
|
|
1c1a4afd23 | ||
|
|
c014c0568a | ||
|
|
62d8aa3623 | ||
|
|
9aa07e8d01 | ||
|
|
4254eeeaa7 |
27
.ccw/personal/coding-style.md
Normal file
27
.ccw/personal/coding-style.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
title: "Personal Coding Style"
|
||||||
|
dimension: personal
|
||||||
|
category: general
|
||||||
|
keywords:
|
||||||
|
- style
|
||||||
|
- preference
|
||||||
|
readMode: optional
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
# Personal Coding Style
|
||||||
|
|
||||||
|
## Preferences
|
||||||
|
|
||||||
|
- Describe your preferred coding style here
|
||||||
|
- Example: verbose variable names vs terse, functional vs imperative
|
||||||
|
|
||||||
|
## Patterns I Prefer
|
||||||
|
|
||||||
|
- List patterns you reach for most often
|
||||||
|
- Example: builder pattern, factory functions, tagged unions
|
||||||
|
|
||||||
|
## Things I Avoid
|
||||||
|
|
||||||
|
- List anti-patterns or approaches you dislike
|
||||||
|
- Example: deep inheritance hierarchies, magic strings
|
||||||
25
.ccw/personal/tool-preferences.md
Normal file
25
.ccw/personal/tool-preferences.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
title: "Tool Preferences"
|
||||||
|
dimension: personal
|
||||||
|
category: general
|
||||||
|
keywords:
|
||||||
|
- tool
|
||||||
|
- cli
|
||||||
|
- editor
|
||||||
|
readMode: optional
|
||||||
|
priority: low
|
||||||
|
---
|
||||||
|
|
||||||
|
# Tool Preferences
|
||||||
|
|
||||||
|
## Editor
|
||||||
|
|
||||||
|
- Preferred editor and key extensions/plugins
|
||||||
|
|
||||||
|
## CLI Tools
|
||||||
|
|
||||||
|
- Preferred shell, package manager, build tools
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
- Preferred debugging approach and tools
|
||||||
@@ -1,3 +1,13 @@
|
|||||||
|
---
|
||||||
|
title: Architecture Constraints
|
||||||
|
readMode: optional
|
||||||
|
priority: medium
|
||||||
|
category: general
|
||||||
|
scope: project
|
||||||
|
dimension: specs
|
||||||
|
keywords: [architecture, constraint, schema, compatibility, portability, design, arch]
|
||||||
|
---
|
||||||
|
|
||||||
# Architecture Constraints
|
# Architecture Constraints
|
||||||
|
|
||||||
## Schema Evolution
|
## Schema Evolution
|
||||||
|
|||||||
@@ -1,3 +1,13 @@
|
|||||||
|
---
|
||||||
|
title: Coding Conventions
|
||||||
|
readMode: optional
|
||||||
|
priority: medium
|
||||||
|
category: general
|
||||||
|
scope: project
|
||||||
|
dimension: specs
|
||||||
|
keywords: [coding, convention, style, naming, pattern, navigation, schema, error-handling, implementation, validation, clarity, doc]
|
||||||
|
---
|
||||||
|
|
||||||
# Coding Conventions
|
# Coding Conventions
|
||||||
|
|
||||||
## Navigation & Path Handling
|
## Navigation & Path Handling
|
||||||
@@ -9,6 +19,7 @@
|
|||||||
## Document Generation
|
## Document Generation
|
||||||
|
|
||||||
- [architecture] For document generation systems, adopt Layer 3→2→1 pattern (components → features → indexes) for efficient incremental updates. (learned: 2026-03-07)
|
- [architecture] For document generation systems, adopt Layer 3→2→1 pattern (components → features → indexes) for efficient incremental updates. (learned: 2026-03-07)
|
||||||
|
- [tools] When commands need to generate files with deterministic paths and frontmatter, use dedicated ccw tool endpoints (`ccw tool exec`) instead of raw `ccw cli -p` calls. Endpoints control output path, file naming, and structural metadata; CLI tools only generate prose content. (learned: 2026-03-09)
|
||||||
|
|
||||||
## Implementation Quality
|
## Implementation Quality
|
||||||
|
|
||||||
|
|||||||
@@ -8,12 +8,12 @@ description: |
|
|||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
- Context: Coordinator spawns analyst worker
|
- Context: Coordinator spawns analyst worker
|
||||||
user: "role: analyst\nrole_spec: .claude/skills/team-lifecycle/role-specs/analyst.md\nsession: .workflow/.team/TLS-xxx"
|
user: "role: analyst\nrole_spec: ~ or <project>/.claude/skills/team-lifecycle/role-specs/analyst.md\nsession: .workflow/.team/TLS-xxx"
|
||||||
assistant: "Loading role spec, discovering RESEARCH-* tasks, executing Phase 2-4 domain logic"
|
assistant: "Loading role spec, discovering RESEARCH-* tasks, executing Phase 2-4 domain logic"
|
||||||
commentary: Agent parses prompt, loads role spec, runs built-in Phase 1 then role-specific Phase 2-4 then built-in Phase 5
|
commentary: Agent parses prompt, loads role spec, runs built-in Phase 1 then role-specific Phase 2-4 then built-in Phase 5
|
||||||
|
|
||||||
- Context: Coordinator spawns writer worker with inner loop
|
- Context: Coordinator spawns writer worker with inner loop
|
||||||
user: "role: writer\nrole_spec: .claude/skills/team-lifecycle/role-specs/writer.md\ninner_loop: true"
|
user: "role: writer\nrole_spec: ~ or <project>/.claude/skills/team-lifecycle/role-specs/writer.md\ninner_loop: true"
|
||||||
assistant: "Loading role spec, processing all DRAFT-* tasks in inner loop"
|
assistant: "Loading role spec, processing all DRAFT-* tasks in inner loop"
|
||||||
commentary: Agent detects inner_loop=true, loops Phase 1-5 for each same-prefix task
|
commentary: Agent detects inner_loop=true, loops Phase 1-5 for each same-prefix task
|
||||||
color: green
|
color: green
|
||||||
|
|||||||
@@ -48,8 +48,9 @@ doc-index.json → tech-registry/*.md (L3) → feature-maps/*.md (L2) → _index
|
|||||||
├── tech-registry/ ← Component documentation (Layer 3)
|
├── tech-registry/ ← Component documentation (Layer 3)
|
||||||
│ ├── _index.md
|
│ ├── _index.md
|
||||||
│ └── {component-slug}.md
|
│ └── {component-slug}.md
|
||||||
└── sessions/
|
└── planning/ ← Planning sessions (Layer 1)
|
||||||
└── _index.md ← Planning sessions index (Layer 1)
|
├── _index.md ← Planning sessions index
|
||||||
|
└── {task-slug}-{date}/ ← Individual session folders
|
||||||
```
|
```
|
||||||
|
|
||||||
## Phase 1: Load & Validate
|
## Phase 1: Load & Validate
|
||||||
@@ -87,147 +88,82 @@ IF docs already exist AND NOT --force:
|
|||||||
Ask user (unless -y → overwrite)
|
Ask user (unless -y → overwrite)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Phase 2: Layer 3 — Component Documentation
|
## Phase 2: Layer 3 -- Component Documentation
|
||||||
|
|
||||||
For each component in `technicalComponents[]`:
|
For each component in `technicalComponents[]`, call the generate_ddd_docs endpoint:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "PURPOSE: Generate component documentation for {component.name}
|
for COMPONENT_ID in "${technicalComponents[@]}"; do
|
||||||
TASK:
|
ccw tool exec generate_ddd_docs '{"strategy":"component","entityId":"'"$COMPONENT_ID"'","tool":"gemini"}'
|
||||||
• Document component purpose and responsibility
|
done
|
||||||
• List exported symbols (classes, functions, types)
|
|
||||||
• Document dependencies (internal and external)
|
|
||||||
• Include code examples for key APIs
|
|
||||||
• Document integration points with other components
|
|
||||||
MODE: write
|
|
||||||
CONTEXT: @{component.codeLocations[].path}
|
|
||||||
EXPECTED: Markdown file with: Overview, API Reference, Dependencies, Usage Examples
|
|
||||||
CONSTRAINTS: Focus on public API | Include type signatures
|
|
||||||
" --tool gemini --mode write --cd .workflow/.doc-index/tech-registry/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The endpoint handles:
|
||||||
|
- Loading the component entity from doc-index.json
|
||||||
|
- Building YAML frontmatter (layer: 3, component_id, name, type, features, code_locations, generated_at)
|
||||||
|
- Constructing the CLI prompt with code context paths
|
||||||
|
- **Including Change History section**: Pull related entries from `doc-index.json.actions[]` where `affectedComponents` includes this component ID. Display as timeline (date, action type, description)
|
||||||
|
- Writing output to `.workflow/.doc-index/tech-registry/{slug}.md`
|
||||||
|
- Tool fallback (gemini -> qwen -> codex) on failure
|
||||||
|
|
||||||
Output: `.workflow/.doc-index/tech-registry/{component-slug}.md`
|
Output: `.workflow/.doc-index/tech-registry/{component-slug}.md`
|
||||||
|
|
||||||
Frontmatter:
|
## Phase 3: Layer 2 -- Feature Documentation
|
||||||
```markdown
|
|
||||||
---
|
|
||||||
layer: 3
|
|
||||||
component_id: tech-{slug}
|
|
||||||
name: ComponentName
|
|
||||||
type: service|controller|model|...
|
|
||||||
features: [feat-auth]
|
|
||||||
code_locations:
|
|
||||||
- path: src/services/auth.ts
|
|
||||||
symbols: [AuthService, AuthService.login]
|
|
||||||
generated_at: ISO8601
|
|
||||||
---
|
|
||||||
```
|
|
||||||
|
|
||||||
Sections: Responsibility, Code Locations, Related Requirements, Architecture Decisions, Dependencies (in/out)
|
For each feature in `features[]`, call the generate_ddd_docs endpoint:
|
||||||
|
|
||||||
## Phase 3: Layer 2 — Feature Documentation
|
|
||||||
|
|
||||||
For each feature in `features[]`:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "PURPOSE: Generate feature documentation for {feature.name}
|
for FEATURE_ID in "${features[@]}"; do
|
||||||
TASK:
|
ccw tool exec generate_ddd_docs '{"strategy":"feature","entityId":"'"$FEATURE_ID"'","tool":"gemini"}'
|
||||||
• Describe feature purpose and business value
|
done
|
||||||
• List requirements (from requirementIds)
|
|
||||||
• Document components involved (from techComponentIds)
|
|
||||||
• Include architecture decisions (from adrIds)
|
|
||||||
• Provide integration guide
|
|
||||||
MODE: write
|
|
||||||
CONTEXT: @.workflow/.doc-index/tech-registry/{related-components}.md
|
|
||||||
EXPECTED: Markdown file with: Overview, Requirements, Components, Architecture, Integration
|
|
||||||
CONSTRAINTS: Reference Layer 3 component docs | Business-focused language
|
|
||||||
" --tool gemini --mode write --cd .workflow/.doc-index/feature-maps/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The endpoint handles:
|
||||||
|
- Loading the feature entity from doc-index.json
|
||||||
|
- Building YAML frontmatter (layer: 2, feature_id, name, epic_id, status, requirements, components, tags, generated_at)
|
||||||
|
- Constructing the CLI prompt referencing Layer 3 component docs
|
||||||
|
- **Including Change History section**: Pull related entries from `doc-index.json.actions[]` where `affectedFeatures` includes this feature ID. Display as timeline (date, action type, description)
|
||||||
|
- Writing output to `.workflow/.doc-index/feature-maps/{slug}.md`
|
||||||
|
- Tool fallback (gemini -> qwen -> codex) on failure
|
||||||
|
|
||||||
Output: `.workflow/.doc-index/feature-maps/{feature-slug}.md`
|
Output: `.workflow/.doc-index/feature-maps/{feature-slug}.md`
|
||||||
|
|
||||||
Frontmatter:
|
## Phase 4: Layer 1 -- Index & Overview Documentation
|
||||||
```markdown
|
|
||||||
---
|
|
||||||
layer: 2
|
|
||||||
feature_id: feat-{slug}
|
|
||||||
name: Feature Name
|
|
||||||
epic_id: EPIC-NNN|null
|
|
||||||
status: implemented|in-progress|planned|partial
|
|
||||||
requirements: [REQ-001, REQ-002]
|
|
||||||
components: [tech-auth-service, tech-user-model]
|
|
||||||
depends_on_layer3: [tech-auth-service, tech-user-model]
|
|
||||||
tags: [auth, security]
|
|
||||||
generated_at: ISO8601
|
|
||||||
---
|
|
||||||
```
|
|
||||||
|
|
||||||
Sections: Overview, Requirements (with mapping status), Technical Components, Architecture Decisions, Change History
|
|
||||||
|
|
||||||
## Phase 4: Layer 1 — Index & Overview Documentation
|
|
||||||
|
|
||||||
### 4.1 Index Documents
|
### 4.1 Index Documents
|
||||||
|
|
||||||
Generate catalog files:
|
Generate catalog files for each subdirectory:
|
||||||
|
|
||||||
- **feature-maps/_index.md** — Feature overview table with status
|
```bash
|
||||||
- **tech-registry/_index.md** — Component registry table with types
|
# Feature maps index
|
||||||
- **action-logs/_index.md** — Action history table (empty initially for new projects)
|
ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"feature-maps","tool":"gemini"}'
|
||||||
|
|
||||||
|
# Tech registry index
|
||||||
|
ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"tech-registry","tool":"gemini"}'
|
||||||
|
|
||||||
|
# Action logs index
|
||||||
|
ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"action-logs","tool":"gemini"}'
|
||||||
|
|
||||||
|
# Planning sessions index
|
||||||
|
ccw tool exec generate_ddd_docs '{"strategy":"index","entityId":"planning","tool":"gemini"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
Or generate all indexes at once (omit entityId):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ccw tool exec generate_ddd_docs '{"strategy":"index","tool":"gemini"}'
|
||||||
|
```
|
||||||
|
|
||||||
### 4.2 README.md (unless --skip-overview)
|
### 4.2 README.md (unless --skip-overview)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "PURPOSE: Generate project README with overview and navigation
|
ccw tool exec generate_ddd_docs '{"strategy":"overview","tool":"gemini"}'
|
||||||
TASK:
|
|
||||||
• Project summary and purpose
|
|
||||||
• Quick start guide
|
|
||||||
• Navigation to features, components, and architecture
|
|
||||||
• Link to doc-index.json
|
|
||||||
MODE: write
|
|
||||||
CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/feature-maps/_index.md
|
|
||||||
EXPECTED: README.md with: Overview, Quick Start, Navigation, Links
|
|
||||||
CONSTRAINTS: High-level only | Entry point for new developers
|
|
||||||
" --tool gemini --mode write --cd .workflow/.doc-index/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 4.3 ARCHITECTURE.md (unless --skip-overview)
|
### 4.3 ARCHITECTURE.md (unless --skip-overview)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "PURPOSE: Generate architecture overview document
|
ccw tool exec generate_ddd_docs '{"strategy":"overview","entityId":"architecture","tool":"gemini"}'
|
||||||
TASK:
|
|
||||||
• System design overview
|
|
||||||
• Component relationships and dependencies
|
|
||||||
• Key architecture decisions (from ADRs)
|
|
||||||
• Technology stack
|
|
||||||
MODE: write
|
|
||||||
CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/tech-registry/*.md
|
|
||||||
EXPECTED: ARCHITECTURE.md with: System Design, Component Diagram, ADRs, Tech Stack
|
|
||||||
CONSTRAINTS: Architecture-focused | Reference component docs for details
|
|
||||||
" --tool gemini --mode write --cd .workflow/.doc-index/
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4.4 sessions/_index.md (unless --skip-overview)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ccw cli -p "PURPOSE: Generate planning sessions index
|
|
||||||
TASK:
|
|
||||||
• List all planning session folders chronologically
|
|
||||||
• Link to each session's plan.json
|
|
||||||
• Show session status and task count
|
|
||||||
MODE: write
|
|
||||||
CONTEXT: @.workflow/.doc-index/planning/*/plan.json
|
|
||||||
EXPECTED: sessions/_index.md with: Session List, Links, Status
|
|
||||||
CONSTRAINTS: Chronological order | Link to session folders
|
|
||||||
" --tool gemini --mode write --cd .workflow/.doc-index/sessions/
|
|
||||||
```
|
|
||||||
|
|
||||||
Layer 1 frontmatter:
|
|
||||||
```markdown
|
|
||||||
---
|
|
||||||
layer: 1
|
|
||||||
depends_on_layer2: [feat-auth, feat-orders]
|
|
||||||
generated_at: ISO8601
|
|
||||||
---
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Phase 5: SCHEMA.md (unless --skip-schema)
|
## Phase 5: SCHEMA.md (unless --skip-schema)
|
||||||
@@ -235,17 +171,7 @@ generated_at: ISO8601
|
|||||||
### 5.1 Generate Schema Documentation
|
### 5.1 Generate Schema Documentation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "PURPOSE: Document doc-index.json schema structure and versioning
|
ccw tool exec generate_ddd_docs '{"strategy":"schema","tool":"gemini"}'
|
||||||
TASK:
|
|
||||||
• Document current schema structure (all fields)
|
|
||||||
• Define versioning policy (semver: major.minor)
|
|
||||||
• Document migration protocol for version upgrades
|
|
||||||
• Provide examples for each schema section
|
|
||||||
MODE: write
|
|
||||||
CONTEXT: @.workflow/.doc-index/doc-index.json
|
|
||||||
EXPECTED: SCHEMA.md with: Schema Structure, Versioning Policy, Migration Protocol, Examples
|
|
||||||
CONSTRAINTS: Complete field documentation | Clear migration steps
|
|
||||||
" --tool gemini --mode write --cd .workflow/.doc-index/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 5.2 Versioning Policy
|
### 5.2 Versioning Policy
|
||||||
@@ -284,7 +210,7 @@ Total: {N} documents generated
|
|||||||
| `-y, --yes` | Auto-confirm all decisions |
|
| `-y, --yes` | Auto-confirm all decisions |
|
||||||
| `--layer <3\|2\|1\|all>` | Generate specific layer only (default: all) |
|
| `--layer <3\|2\|1\|all>` | Generate specific layer only (default: all) |
|
||||||
| `--force` | Overwrite existing documents |
|
| `--force` | Overwrite existing documents |
|
||||||
| `--skip-overview` | Skip README.md, ARCHITECTURE.md, sessions/_index.md |
|
| `--skip-overview` | Skip README.md, ARCHITECTURE.md, planning/_index.md |
|
||||||
| `--skip-schema` | Skip SCHEMA.md generation |
|
| `--skip-schema` | Skip SCHEMA.md generation |
|
||||||
|
|
||||||
## Integration Points
|
## Integration Points
|
||||||
@@ -293,3 +219,4 @@ Total: {N} documents generated
|
|||||||
- **Called by**: `/ddd:scan` (after index assembly), `/ddd:index-build` (after index assembly)
|
- **Called by**: `/ddd:scan` (after index assembly), `/ddd:index-build` (after index assembly)
|
||||||
- **Standalone**: Can be run independently on any project with existing doc-index.json
|
- **Standalone**: Can be run independently on any project with existing doc-index.json
|
||||||
- **Output**: Complete document tree in `.workflow/.doc-index/`
|
- **Output**: Complete document tree in `.workflow/.doc-index/`
|
||||||
|
- **Endpoint**: `ccw tool exec generate_ddd_docs` handles prompt construction, frontmatter, tool fallback, and file creation
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ ccw cli -p "PURPOSE: Update project overview docs after feature changes
|
|||||||
TASK:
|
TASK:
|
||||||
• Update README.md feature list
|
• Update README.md feature list
|
||||||
• Update ARCHITECTURE.md if new components added
|
• Update ARCHITECTURE.md if new components added
|
||||||
• Update sessions/_index.md with new planning sessions
|
• Update planning/_index.md with new planning sessions
|
||||||
MODE: write
|
MODE: write
|
||||||
CONTEXT: @.workflow/.doc-index/feature-maps/*.md @.workflow/.doc-index/doc-index.json
|
CONTEXT: @.workflow/.doc-index/feature-maps/*.md @.workflow/.doc-index/doc-index.json
|
||||||
EXPECTED: Updated overview docs with current project state
|
EXPECTED: Updated overview docs with current project state
|
||||||
|
|||||||
@@ -37,11 +37,42 @@ After completing a development task, synchronize the document index with actual
|
|||||||
- `doc-index.json` must exist
|
- `doc-index.json` must exist
|
||||||
- Git repository with committed or staged changes
|
- Git repository with committed or staged changes
|
||||||
|
|
||||||
|
## Phase 0: Consistency Validation
|
||||||
|
|
||||||
|
Before processing changes, verify that `doc-index.json` entries are consistent with actual code state.
|
||||||
|
|
||||||
|
### 0.1 Validate Code Locations
|
||||||
|
|
||||||
|
For each `technicalComponents[].codeLocations[]`:
|
||||||
|
- Verify file exists on disk
|
||||||
|
- If file was deleted/moved → flag for removal or update
|
||||||
|
- If file exists → verify listed `symbols[]` still exist (quick grep/AST check)
|
||||||
|
|
||||||
|
### 0.2 Validate Symbols
|
||||||
|
|
||||||
|
For components with `codeLocations[].symbols[]`:
|
||||||
|
- Check each symbol still exists in the referenced file
|
||||||
|
- Detect new exported symbols not yet tracked
|
||||||
|
- Report: `{N} stale symbols, {N} untracked symbols`
|
||||||
|
|
||||||
|
### 0.3 Validation Report
|
||||||
|
|
||||||
|
```
|
||||||
|
Consistency Check:
|
||||||
|
Components validated: {N}
|
||||||
|
Files verified: {N}
|
||||||
|
Stale references: {N} (files missing or symbols removed)
|
||||||
|
Untracked symbols: {N} (new exports not in index)
|
||||||
|
```
|
||||||
|
|
||||||
|
If stale references found: warn and auto-fix during Phase 3 updates.
|
||||||
|
If `--dry-run`: report only, no fixes.
|
||||||
|
|
||||||
## Phase 1: Change Detection
|
## Phase 1: Change Detection
|
||||||
|
|
||||||
### 0.1 Schema Version Check (TASK-006)
|
### 1.0.1 Schema Version Check
|
||||||
|
|
||||||
Before processing changes, verify doc-index schema compatibility:
|
Before processing changes, verify doc-index.json schema compatibility:
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
const docIndex = JSON.parse(Read('.workflow/.doc-index/doc-index.json'));
|
const docIndex = JSON.parse(Read('.workflow/.doc-index/doc-index.json'));
|
||||||
@@ -201,6 +232,7 @@ For each affected component in `doc-index.json`:
|
|||||||
- Update `codeLocations` if file paths or line ranges changed
|
- Update `codeLocations` if file paths or line ranges changed
|
||||||
- Update `symbols` if new exports were added
|
- Update `symbols` if new exports were added
|
||||||
- Add new `actionIds` entry
|
- Add new `actionIds` entry
|
||||||
|
- **Auto-update `responsibility`**: If symbols changed (new methods/exports added or removed), re-infer responsibility from current symbols list using Gemini analysis. This prevents stale descriptions (e.g., responsibility still says "登录、注册" after adding logout support)
|
||||||
|
|
||||||
### 3.2 Register New Components
|
### 3.2 Register New Components
|
||||||
|
|
||||||
|
|||||||
@@ -332,21 +332,22 @@ CONSTRAINTS: Focus on ${dimensions.join(', ')}
|
|||||||
|
|
||||||
5. **Interactive Recommendation Review** (skip in auto mode):
|
5. **Interactive Recommendation Review** (skip in auto mode):
|
||||||
|
|
||||||
Walk through each recommendation one-by-one for user confirmation:
|
Present all recommendations, then batch-confirm via **single AskUserQuestion call** (up to 4 questions):
|
||||||
|
|
||||||
```
|
```
|
||||||
For each recommendation (ordered by priority high→medium→low):
|
1. Display all recommendations with numbering (action, rationale, priority, steps[])
|
||||||
1. Present: action, rationale, priority, steps[] (numbered sub-steps)
|
2. Single AskUserQuestion call — one question per recommendation (max 4, ordered by priority high→medium→low):
|
||||||
2. AskUserQuestion (single-select, header: "建议#N"):
|
Each question (single-select, header: "建议#N"):
|
||||||
- **确认** (label: "确认", desc: "Accept as-is") → review_status = "accepted"
|
- **确认** (label: "确认", desc: "Accept as-is") → review_status = "accepted"
|
||||||
- **修改** (label: "修改", desc: "Adjust scope/steps") → record modification → review_status = "modified"
|
- **修改** (label: "修改", desc: "Adjust scope/steps") → review_status = "modified"
|
||||||
- **删除** (label: "删除", desc: "Not needed") → record reason → review_status = "rejected"
|
- **删除** (label: "删除", desc: "Not needed") → review_status = "rejected"
|
||||||
- **跳过审议** (label: "跳过审议", desc: "Accept all remaining") → break loop
|
3. If >4 recommendations: batch in groups of 4 with additional AskUserQuestion calls
|
||||||
3. Record review decision to discussion.md Decision Log
|
4. For "修改" selections: follow up to capture modification details
|
||||||
4. Update conclusions.json recommendation.review_status
|
5. Record all review decisions to discussion.md Decision Log
|
||||||
|
6. Update conclusions.json recommendation.review_status for each
|
||||||
```
|
```
|
||||||
|
|
||||||
**After review loop**: Display summary of reviewed recommendations:
|
**After review**: Display summary of reviewed recommendations:
|
||||||
- Accepted: N items | Modified: N items | Rejected: N items
|
- Accepted: N items | Modified: N items | Rejected: N items
|
||||||
- Only accepted/modified recommendations proceed to next step
|
- Only accepted/modified recommendations proceed to next step
|
||||||
|
|
||||||
|
|||||||
@@ -65,11 +65,14 @@ Analyze context and produce two update payloads. Use LLM reasoning (current agen
|
|||||||
```javascript
|
```javascript
|
||||||
// ── Guidelines extraction ──
|
// ── Guidelines extraction ──
|
||||||
// Scan git diff + session for:
|
// Scan git diff + session for:
|
||||||
// - New patterns adopted → convention
|
// - Debugging experiences → bug
|
||||||
// - Restrictions discovered → constraint
|
// - Reusable code patterns → pattern
|
||||||
// - Surprises / gotchas → learning
|
// - Architecture/design decisions → decision
|
||||||
|
// - Conventions, constraints, insights → rule
|
||||||
//
|
//
|
||||||
// Output: array of { type, category, text }
|
// Output: array of { type, tag, text }
|
||||||
|
// type: 'bug' | 'pattern' | 'decision' | 'rule'
|
||||||
|
// tag: domain tag (api, routing, schema, security, etc.)
|
||||||
// RULE: Only extract genuinely reusable insights. Skip trivial/obvious items.
|
// RULE: Only extract genuinely reusable insights. Skip trivial/obvious items.
|
||||||
// RULE: Deduplicate against existing guidelines before adding.
|
// RULE: Deduplicate against existing guidelines before adding.
|
||||||
|
|
||||||
@@ -118,7 +121,7 @@ console.log(`
|
|||||||
── Sync Preview ──
|
── Sync Preview ──
|
||||||
|
|
||||||
Guidelines (${guidelineUpdates.length} items):
|
Guidelines (${guidelineUpdates.length} items):
|
||||||
${guidelineUpdates.map(g => ` [${g.type}/${g.category}] ${g.text}`).join('\n') || ' (none)'}
|
${guidelineUpdates.map(g => ` [${g.type}:${g.tag}] ${g.text}`).join('\n') || ' (none)'}
|
||||||
|
|
||||||
Tech [${detectCategory(summary)}]:
|
Tech [${detectCategory(summary)}]:
|
||||||
${techEntry.title}
|
${techEntry.title}
|
||||||
@@ -137,26 +140,102 @@ if (!autoYes) {
|
|||||||
## Step 4: Write
|
## Step 4: Write
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// ── Update specs/*.md ──
|
const matter = require('gray-matter') // YAML frontmatter parser
|
||||||
// Uses .ccw/specs/ directory (same as frontend/backend spec-index-builder)
|
|
||||||
if (guidelineUpdates.length > 0) {
|
// ── Frontmatter check & repair helper ──
|
||||||
// Map guideline types to spec files
|
// Ensures target spec file has valid YAML frontmatter with keywords
|
||||||
const specFileMap = {
|
// Uses gray-matter for robust parsing (handles malformed frontmatter, missing fields)
|
||||||
convention: '.ccw/specs/coding-conventions.md',
|
function ensureFrontmatter(filePath, tag, type) {
|
||||||
constraint: '.ccw/specs/architecture-constraints.md',
|
const titleMap = {
|
||||||
learning: '.ccw/specs/coding-conventions.md' // learnings appended to conventions
|
'coding-conventions': 'Coding Conventions',
|
||||||
|
'architecture-constraints': 'Architecture Constraints',
|
||||||
|
'learnings': 'Learnings',
|
||||||
|
'quality-rules': 'Quality Rules'
|
||||||
|
}
|
||||||
|
const basename = filePath.split('/').pop().replace('.md', '')
|
||||||
|
const title = titleMap[basename] || basename
|
||||||
|
const defaultFm = {
|
||||||
|
title,
|
||||||
|
readMode: 'optional',
|
||||||
|
priority: 'medium',
|
||||||
|
scope: 'project',
|
||||||
|
dimension: 'specs',
|
||||||
|
keywords: [tag, type]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!file_exists(filePath)) {
|
||||||
|
// Case A: Create new file with frontmatter
|
||||||
|
Write(filePath, matter.stringify(`\n# ${title}\n\n`, defaultFm))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const raw = Read(filePath)
|
||||||
|
let parsed
|
||||||
|
try {
|
||||||
|
parsed = matter(raw)
|
||||||
|
} catch {
|
||||||
|
parsed = { data: {}, content: raw }
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasFrontmatter = raw.trimStart().startsWith('---')
|
||||||
|
|
||||||
|
if (!hasFrontmatter) {
|
||||||
|
// Case B: File exists but no frontmatter → prepend
|
||||||
|
Write(filePath, matter.stringify(raw, defaultFm))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case C: Frontmatter exists → ensure keywords include current tag
|
||||||
|
const existingKeywords = parsed.data.keywords || []
|
||||||
|
const newKeywords = [...new Set([...existingKeywords, tag, type])]
|
||||||
|
|
||||||
|
if (newKeywords.length !== existingKeywords.length) {
|
||||||
|
parsed.data.keywords = newKeywords
|
||||||
|
Write(filePath, matter.stringify(parsed.content, parsed.data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Update specs/*.md ──
|
||||||
|
// Uses .ccw/specs/ directory - unified [type:tag] entry format
|
||||||
|
if (guidelineUpdates.length > 0) {
|
||||||
|
// Map knowledge types to spec files
|
||||||
|
const specFileMap = {
|
||||||
|
bug: '.ccw/specs/learnings.md',
|
||||||
|
pattern: '.ccw/specs/coding-conventions.md',
|
||||||
|
decision: '.ccw/specs/architecture-constraints.md',
|
||||||
|
rule: null // determined by content below
|
||||||
|
}
|
||||||
|
|
||||||
|
const date = new Date().toISOString().split('T')[0]
|
||||||
|
const needsDate = { bug: true, pattern: true, decision: true, rule: false }
|
||||||
|
|
||||||
for (const g of guidelineUpdates) {
|
for (const g of guidelineUpdates) {
|
||||||
const targetFile = specFileMap[g.type]
|
// For rule type, route by content and tag
|
||||||
|
let targetFile = specFileMap[g.type]
|
||||||
|
if (!targetFile) {
|
||||||
|
const isQuality = /\b(test|coverage|lint|eslint|质量|测试覆盖|pre-commit|tsc|type.check)\b/i.test(g.text)
|
||||||
|
|| ['testing', 'quality', 'lint'].includes(g.tag)
|
||||||
|
const isConstraint = /\b(禁止|no|never|must not|forbidden|不得|不允许)\b/i.test(g.text)
|
||||||
|
if (isQuality) {
|
||||||
|
targetFile = '.ccw/specs/quality-rules.md'
|
||||||
|
} else if (isConstraint) {
|
||||||
|
targetFile = '.ccw/specs/architecture-constraints.md'
|
||||||
|
} else {
|
||||||
|
targetFile = '.ccw/specs/coding-conventions.md'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure frontmatter exists and keywords are up-to-date
|
||||||
|
ensureFrontmatter(targetFile, g.tag, g.type)
|
||||||
|
|
||||||
const existing = Read(targetFile)
|
const existing = Read(targetFile)
|
||||||
const ruleText = g.type === 'learning'
|
const entryLine = needsDate[g.type]
|
||||||
? `- [${g.category}] ${g.text} (learned: ${new Date().toISOString().split('T')[0]})`
|
? `- [${g.type}:${g.tag}] ${g.text} (${date})`
|
||||||
: `- [${g.category}] ${g.text}`
|
: `- [${g.type}:${g.tag}] ${g.text}`
|
||||||
|
|
||||||
// Deduplicate: skip if text already in file
|
// Deduplicate: skip if text already in file
|
||||||
if (!existing.includes(g.text)) {
|
if (!existing.includes(g.text)) {
|
||||||
const newContent = existing.trimEnd() + '\n' + ruleText + '\n'
|
const newContent = existing.trimEnd() + '\n' + entryLine + '\n'
|
||||||
Write(targetFile, newContent)
|
Write(targetFile, newContent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -198,4 +277,5 @@ Write(techPath, JSON.stringify(tech, null, 2))
|
|||||||
## Related Commands
|
## Related Commands
|
||||||
|
|
||||||
- `/workflow:spec:setup` - Initialize project with specs scaffold
|
- `/workflow:spec:setup` - Initialize project with specs scaffold
|
||||||
- `/workflow:spec:add` - Interactive wizard to create individual specs with scope selection
|
- `/workflow:spec:add` - Add knowledge entries (bug/pattern/decision/rule) with unified [type:tag] format
|
||||||
|
- `/workflow:spec:load` - Interactive spec loader with keyword/type/tag filtering
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
392
.claude/commands/workflow/spec/load.md
Normal file
392
.claude/commands/workflow/spec/load.md
Normal file
@@ -0,0 +1,392 @@
|
|||||||
|
---
|
||||||
|
name: load
|
||||||
|
description: Interactive spec loader - ask what user needs, then load relevant specs by keyword routing
|
||||||
|
argument-hint: "[--all] [--type <bug|pattern|decision|rule>] [--tag <tag>] [\"keyword query\"]"
|
||||||
|
examples:
|
||||||
|
- /workflow:spec:load
|
||||||
|
- /workflow:spec:load "api routing"
|
||||||
|
- /workflow:spec:load --type bug
|
||||||
|
- /workflow:spec:load --all
|
||||||
|
- /workflow:spec:load --tag security
|
||||||
|
---
|
||||||
|
|
||||||
|
# Spec Load Command (/workflow:spec:load)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Interactive entry point for loading and browsing project specs. Asks the user what they need, then routes to the appropriate spec content based on keywords, type filters, or tag filters.
|
||||||
|
|
||||||
|
**Design**: Menu-driven → keyword match → load & display. No file modifications.
|
||||||
|
|
||||||
|
**Note**: This command may be called by other workflow commands. Upon completion, return immediately to continue the calling workflow.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load # Interactive menu
|
||||||
|
/workflow:spec:load "api routing" # Direct keyword search
|
||||||
|
/workflow:spec:load --type bug # Filter by knowledge type
|
||||||
|
/workflow:spec:load --tag security # Filter by domain tag
|
||||||
|
/workflow:spec:load --all # Load all specs
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Process
|
||||||
|
|
||||||
|
```
|
||||||
|
Input Parsing:
|
||||||
|
├─ Parse --all flag → loadAll = true | false
|
||||||
|
├─ Parse --type (bug|pattern|decision|rule)
|
||||||
|
├─ Parse --tag (domain tag)
|
||||||
|
└─ Parse keyword query (positional text)
|
||||||
|
|
||||||
|
Decision:
|
||||||
|
├─ --all → Load all specs (Path C)
|
||||||
|
├─ --type or --tag or keyword → Direct filter (Path B)
|
||||||
|
└─ No args → Interactive menu (Path A)
|
||||||
|
|
||||||
|
Path A: Interactive Menu
|
||||||
|
├─ Step A1: Ask user intent
|
||||||
|
├─ Step A2: Route to action
|
||||||
|
└─ Step A3: Display results
|
||||||
|
|
||||||
|
Path B: Direct Filter
|
||||||
|
├─ Step B1: Build filter from args
|
||||||
|
├─ Step B2: Search specs
|
||||||
|
└─ Step B3: Display results
|
||||||
|
|
||||||
|
Path C: Load All
|
||||||
|
└─ Display all spec contents
|
||||||
|
|
||||||
|
Output:
|
||||||
|
└─ Formatted spec entries matching user query
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
### Step 1: Parse Input
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const args = $ARGUMENTS
|
||||||
|
const argsLower = args.toLowerCase()
|
||||||
|
|
||||||
|
const loadAll = argsLower.includes('--all')
|
||||||
|
const hasType = argsLower.includes('--type')
|
||||||
|
const hasTag = argsLower.includes('--tag')
|
||||||
|
|
||||||
|
let type = hasType ? args.match(/--type\s+(\w+)/i)?.[1]?.toLowerCase() : null
|
||||||
|
let tag = hasTag ? args.match(/--tag\s+([\w-]+)/i)?.[1]?.toLowerCase() : null
|
||||||
|
|
||||||
|
// Extract keyword query (everything that's not a flag)
|
||||||
|
let keyword = args
|
||||||
|
.replace(/--type\s+\w+/gi, '')
|
||||||
|
.replace(/--tag\s+[\w-]+/gi, '')
|
||||||
|
.replace(/--all/gi, '')
|
||||||
|
.replace(/^["']|["']$/g, '')
|
||||||
|
.trim()
|
||||||
|
|
||||||
|
// Validate type
|
||||||
|
if (type && !['bug', 'pattern', 'decision', 'rule'].includes(type)) {
|
||||||
|
console.log("Invalid type. Use 'bug', 'pattern', 'decision', or 'rule'.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Determine Mode
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const useInteractive = !loadAll && !hasType && !hasTag && !keyword
|
||||||
|
```
|
||||||
|
|
||||||
|
### Path A: Interactive Menu
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (useInteractive) {
|
||||||
|
const answer = AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: "What specs would you like to load?",
|
||||||
|
header: "Action",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
label: "Browse all specs",
|
||||||
|
description: "Load and display all project spec entries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Search by keyword",
|
||||||
|
description: "Find specs matching a keyword (e.g., api, security, routing)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "View bug experiences",
|
||||||
|
description: "Load all [bug:*] debugging experience entries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "View code patterns",
|
||||||
|
description: "Load all [pattern:*] reusable code pattern entries"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
|
||||||
|
const choice = answer.answers["Action"]
|
||||||
|
|
||||||
|
if (choice === "Browse all specs") {
|
||||||
|
loadAll = true
|
||||||
|
} else if (choice === "View bug experiences") {
|
||||||
|
type = "bug"
|
||||||
|
} else if (choice === "View code patterns") {
|
||||||
|
type = "pattern"
|
||||||
|
} else if (choice === "Search by keyword") {
|
||||||
|
// Ask for keyword
|
||||||
|
const kwAnswer = AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: "Enter keyword(s) to search for:",
|
||||||
|
header: "Keyword",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "api", description: "API endpoints, HTTP, REST, routing" },
|
||||||
|
{ label: "security", description: "Authentication, authorization, input validation" },
|
||||||
|
{ label: "arch", description: "Architecture, design patterns, module structure" },
|
||||||
|
{ label: "perf", description: "Performance, caching, optimization" }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
keyword = kwAnswer.answers["Keyword"].toLowerCase()
|
||||||
|
} else {
|
||||||
|
// "Other" — user typed custom input, use as keyword
|
||||||
|
keyword = choice.toLowerCase()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Load Spec Files
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Discover all spec files
|
||||||
|
const specFiles = [
|
||||||
|
'.ccw/specs/coding-conventions.md',
|
||||||
|
'.ccw/specs/architecture-constraints.md',
|
||||||
|
'.ccw/specs/learnings.md',
|
||||||
|
'.ccw/specs/quality-rules.md'
|
||||||
|
]
|
||||||
|
|
||||||
|
// Also check personal specs
|
||||||
|
const personalFiles = [
|
||||||
|
'~/.ccw/personal/conventions.md',
|
||||||
|
'~/.ccw/personal/constraints.md',
|
||||||
|
'~/.ccw/personal/learnings.md',
|
||||||
|
'.ccw/personal/conventions.md',
|
||||||
|
'.ccw/personal/constraints.md',
|
||||||
|
'.ccw/personal/learnings.md'
|
||||||
|
]
|
||||||
|
|
||||||
|
// Read all existing spec files
|
||||||
|
const allEntries = []
|
||||||
|
|
||||||
|
for (const file of [...specFiles, ...personalFiles]) {
|
||||||
|
if (!file_exists(file)) continue
|
||||||
|
const content = Read(file)
|
||||||
|
|
||||||
|
// Extract entries using unified format regex
|
||||||
|
// Entry line: - [type:tag] summary (date)
|
||||||
|
// Extended: - key: value
|
||||||
|
const lines = content.split('\n')
|
||||||
|
let currentEntry = null
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
const entryMatch = line.match(/^- \[(\w+):([\w-]+)\] (.*?)(?:\s+\((\d{4}-\d{2}-\d{2})\))?$/)
|
||||||
|
if (entryMatch) {
|
||||||
|
if (currentEntry) allEntries.push(currentEntry)
|
||||||
|
currentEntry = {
|
||||||
|
type: entryMatch[1],
|
||||||
|
tag: entryMatch[2],
|
||||||
|
summary: entryMatch[3],
|
||||||
|
date: entryMatch[4] || null,
|
||||||
|
extended: {},
|
||||||
|
source: file,
|
||||||
|
raw: line
|
||||||
|
}
|
||||||
|
} else if (currentEntry && /^\s{4}- ([\w-]+):\s?(.*)/.test(line)) {
|
||||||
|
const fieldMatch = line.match(/^\s{4}- ([\w-]+):\s?(.*)/)
|
||||||
|
currentEntry.extended[fieldMatch[1]] = fieldMatch[2]
|
||||||
|
} else if (currentEntry && !/^\s{4}/.test(line) && line.trim() !== '') {
|
||||||
|
// Non-indented non-empty line = end of current entry
|
||||||
|
allEntries.push(currentEntry)
|
||||||
|
currentEntry = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also handle legacy format: - [tag] text (learned: date)
|
||||||
|
const legacyMatch = line.match(/^- \[([\w-]+)\] (.+?)(?:\s+\(learned: (\d{4}-\d{2}-\d{2})\))?$/)
|
||||||
|
if (!entryMatch && legacyMatch) {
|
||||||
|
if (currentEntry) allEntries.push(currentEntry)
|
||||||
|
currentEntry = {
|
||||||
|
type: 'rule',
|
||||||
|
tag: legacyMatch[1],
|
||||||
|
summary: legacyMatch[2],
|
||||||
|
date: legacyMatch[3] || null,
|
||||||
|
extended: {},
|
||||||
|
source: file,
|
||||||
|
raw: line,
|
||||||
|
legacy: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (currentEntry) allEntries.push(currentEntry)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Filter Entries
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
let filtered = allEntries
|
||||||
|
|
||||||
|
// Filter by type
|
||||||
|
if (type) {
|
||||||
|
filtered = filtered.filter(e => e.type === type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by tag
|
||||||
|
if (tag) {
|
||||||
|
filtered = filtered.filter(e => e.tag === tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by keyword (search in tag, summary, and extended fields)
|
||||||
|
if (keyword) {
|
||||||
|
const kw = keyword.toLowerCase()
|
||||||
|
const kwTerms = kw.split(/\s+/)
|
||||||
|
|
||||||
|
filtered = filtered.filter(e => {
|
||||||
|
const searchText = [
|
||||||
|
e.type, e.tag, e.summary,
|
||||||
|
...Object.values(e.extended)
|
||||||
|
].join(' ').toLowerCase()
|
||||||
|
|
||||||
|
return kwTerms.every(term => searchText.includes(term))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// If --all, keep everything (no filter)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Display Results
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (filtered.length === 0) {
|
||||||
|
const filterDesc = []
|
||||||
|
if (type) filterDesc.push(`type=${type}`)
|
||||||
|
if (tag) filterDesc.push(`tag=${tag}`)
|
||||||
|
if (keyword) filterDesc.push(`keyword="${keyword}"`)
|
||||||
|
|
||||||
|
console.log(`
|
||||||
|
No specs found matching: ${filterDesc.join(', ') || '(all)'}
|
||||||
|
|
||||||
|
Available spec files:
|
||||||
|
${specFiles.filter(f => file_exists(f)).map(f => ` - ${f}`).join('\n') || ' (none)'}
|
||||||
|
|
||||||
|
Suggestions:
|
||||||
|
- Use /workflow:spec:setup to initialize specs
|
||||||
|
- Use /workflow:spec:add to add new entries
|
||||||
|
- Use /workflow:spec:load --all to see everything
|
||||||
|
`)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group by source file
|
||||||
|
const grouped = {}
|
||||||
|
for (const entry of filtered) {
|
||||||
|
if (!grouped[entry.source]) grouped[entry.source] = []
|
||||||
|
grouped[entry.source].push(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display
|
||||||
|
console.log(`
|
||||||
|
## Specs Loaded (${filtered.length} entries)
|
||||||
|
${type ? `Type: ${type}` : ''}${tag ? ` Tag: ${tag}` : ''}${keyword ? ` Keyword: "${keyword}"` : ''}
|
||||||
|
`)
|
||||||
|
|
||||||
|
for (const [source, entries] of Object.entries(grouped)) {
|
||||||
|
console.log(`### ${source}`)
|
||||||
|
console.log('')
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
// Render entry
|
||||||
|
const datePart = entry.date ? ` (${entry.date})` : ''
|
||||||
|
console.log(`- [${entry.type}:${entry.tag}] ${entry.summary}${datePart}`)
|
||||||
|
|
||||||
|
// Render extended fields
|
||||||
|
for (const [key, value] of Object.entries(entry.extended)) {
|
||||||
|
console.log(` - ${key}: ${value}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log('')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary footer
|
||||||
|
const typeCounts = {}
|
||||||
|
for (const e of filtered) {
|
||||||
|
typeCounts[e.type] = (typeCounts[e.type] || 0) + 1
|
||||||
|
}
|
||||||
|
const typeBreakdown = Object.entries(typeCounts)
|
||||||
|
.map(([t, c]) => `${t}: ${c}`)
|
||||||
|
.join(', ')
|
||||||
|
|
||||||
|
console.log(`---`)
|
||||||
|
console.log(`Total: ${filtered.length} entries (${typeBreakdown})`)
|
||||||
|
console.log(`Sources: ${Object.keys(grouped).join(', ')}`)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Interactive Browse
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load
|
||||||
|
# → Menu: "What specs would you like to load?"
|
||||||
|
# → User selects "Browse all specs"
|
||||||
|
# → Displays all entries grouped by file
|
||||||
|
```
|
||||||
|
|
||||||
|
### Keyword Search
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load "api routing"
|
||||||
|
# → Filters entries where tag/summary/extended contains "api" AND "routing"
|
||||||
|
# → Displays matching entries
|
||||||
|
```
|
||||||
|
|
||||||
|
### Type Filter
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load --type bug
|
||||||
|
# → Shows all [bug:*] entries from learnings.md
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tag Filter
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load --tag security
|
||||||
|
# → Shows all [*:security] entries across all spec files
|
||||||
|
```
|
||||||
|
|
||||||
|
### Combined Filters
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load --type rule --tag api
|
||||||
|
# → Shows all [rule:api] entries
|
||||||
|
```
|
||||||
|
|
||||||
|
### Load All
|
||||||
|
```bash
|
||||||
|
/workflow:spec:load --all
|
||||||
|
# → Displays every entry from every spec file
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error | Resolution |
|
||||||
|
|-------|------------|
|
||||||
|
| No spec files found | Suggest `/workflow:spec:setup` to initialize |
|
||||||
|
| No matching entries | Show available files and suggest alternatives |
|
||||||
|
| Invalid type | Exit with valid type list |
|
||||||
|
| Corrupt entry format | Skip unparseable lines, continue loading |
|
||||||
|
|
||||||
|
## Related Commands
|
||||||
|
|
||||||
|
- `/workflow:spec:setup` - Initialize project with specs scaffold
|
||||||
|
- `/workflow:spec:add` - Add knowledge entries (bug/pattern/decision/rule) with unified [type:tag] format
|
||||||
|
- `/workflow:session:sync` - Quick-sync session work to specs and project-tech
|
||||||
|
- `ccw spec list` - View spec file index
|
||||||
|
- `ccw spec load` - CLI-level spec loading (used by hooks)
|
||||||
@@ -471,70 +471,129 @@ For each category of collected answers, append rules to the corresponding spec M
|
|||||||
- Round 5 (quality): `category: execution` (testing phase)
|
- Round 5 (quality): `category: execution` (testing phase)
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
const matter = require('gray-matter') // YAML frontmatter parser
|
||||||
|
|
||||||
|
// ── Frontmatter check & repair helper ──
|
||||||
|
// Ensures target spec file has valid YAML frontmatter with keywords
|
||||||
|
// Uses gray-matter for robust parsing (handles malformed frontmatter, missing fields)
|
||||||
|
function ensureSpecFrontmatter(filePath, extraKeywords = []) {
|
||||||
|
const titleMap = {
|
||||||
|
'coding-conventions': 'Coding Conventions',
|
||||||
|
'architecture-constraints': 'Architecture Constraints',
|
||||||
|
'learnings': 'Learnings',
|
||||||
|
'quality-rules': 'Quality Rules'
|
||||||
|
}
|
||||||
|
const basename = filePath.split('/').pop().replace('.md', '')
|
||||||
|
const title = titleMap[basename] || basename
|
||||||
|
const defaultKw = filePath.includes('conventions') ? 'convention'
|
||||||
|
: filePath.includes('constraints') ? 'constraint' : 'quality'
|
||||||
|
const defaultFm = {
|
||||||
|
title,
|
||||||
|
readMode: 'optional',
|
||||||
|
priority: 'medium',
|
||||||
|
category: 'general',
|
||||||
|
scope: 'project',
|
||||||
|
dimension: 'specs',
|
||||||
|
keywords: [...new Set([defaultKw, ...extraKeywords])]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!file_exists(filePath)) {
|
||||||
|
// Case A: Create new file with frontmatter
|
||||||
|
const specDir = path.dirname(filePath)
|
||||||
|
if (!fs.existsSync(specDir)) {
|
||||||
|
fs.mkdirSync(specDir, { recursive: true })
|
||||||
|
}
|
||||||
|
Write(filePath, matter.stringify(`\n# ${title}\n\n`, defaultFm))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const raw = Read(filePath)
|
||||||
|
let parsed
|
||||||
|
try {
|
||||||
|
parsed = matter(raw)
|
||||||
|
} catch {
|
||||||
|
parsed = { data: {}, content: raw }
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasFrontmatter = raw.trimStart().startsWith('---')
|
||||||
|
|
||||||
|
if (!hasFrontmatter) {
|
||||||
|
// Case B: File exists but no frontmatter → prepend
|
||||||
|
Write(filePath, matter.stringify(raw, defaultFm))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case C: Frontmatter exists → ensure keywords include extras
|
||||||
|
const existingKeywords = parsed.data.keywords || []
|
||||||
|
const newKeywords = [...new Set([...existingKeywords, defaultKw, ...extraKeywords])]
|
||||||
|
|
||||||
|
if (newKeywords.length !== existingKeywords.length) {
|
||||||
|
parsed.data.keywords = newKeywords
|
||||||
|
Write(filePath, matter.stringify(parsed.content, parsed.data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Helper: append rules to a spec MD file with category support
|
// Helper: append rules to a spec MD file with category support
|
||||||
// Uses .ccw/specs/ directory (same as frontend/backend spec-index-builder)
|
// Uses .ccw/specs/ directory (same as frontend/backend spec-index-builder)
|
||||||
function appendRulesToSpecFile(filePath, rules, defaultCategory = 'general') {
|
function appendRulesToSpecFile(filePath, rules, defaultCategory = 'general') {
|
||||||
if (rules.length === 0) return
|
if (rules.length === 0) return
|
||||||
|
|
||||||
// Ensure .ccw/specs/ directory exists
|
// Extract domain tags from rules for keyword accumulation
|
||||||
const specDir = path.dirname(filePath)
|
const ruleTags = rules
|
||||||
if (!fs.existsSync(specDir)) {
|
.map(r => r.match(/\[[\w]+:([\w-]+)\]/)?.[1])
|
||||||
fs.mkdirSync(specDir, { recursive: true })
|
.filter(Boolean)
|
||||||
}
|
|
||||||
|
|
||||||
// Check if file exists
|
// Ensure frontmatter exists and keywords include rule tags
|
||||||
if (!file_exists(filePath)) {
|
ensureSpecFrontmatter(filePath, [...new Set(ruleTags)])
|
||||||
// Create file with frontmatter including category
|
|
||||||
const frontmatter = `---
|
|
||||||
title: ${filePath.includes('conventions') ? 'Coding Conventions' : filePath.includes('constraints') ? 'Architecture Constraints' : 'Quality Rules'}
|
|
||||||
readMode: optional
|
|
||||||
priority: medium
|
|
||||||
category: ${defaultCategory}
|
|
||||||
scope: project
|
|
||||||
dimension: specs
|
|
||||||
keywords: [${defaultCategory}, ${filePath.includes('conventions') ? 'convention' : filePath.includes('constraints') ? 'constraint' : 'quality'}]
|
|
||||||
---
|
|
||||||
|
|
||||||
# ${filePath.includes('conventions') ? 'Coding Conventions' : filePath.includes('constraints') ? 'Architecture Constraints' : 'Quality Rules'}
|
|
||||||
|
|
||||||
`
|
|
||||||
Write(filePath, frontmatter)
|
|
||||||
}
|
|
||||||
|
|
||||||
const existing = Read(filePath)
|
const existing = Read(filePath)
|
||||||
// Append new rules as markdown list items after existing content
|
// Append new rules as markdown list items - rules are already in [type:tag] format from caller
|
||||||
const newContent = existing.trimEnd() + '\n' + rules.map(r => `- ${r}`).join('\n') + '\n'
|
const newContent = existing.trimEnd() + '\n' + rules.map(r => {
|
||||||
|
// If rule already has - prefix or [type:tag] format, use as-is
|
||||||
|
if (/^- /.test(r)) return r
|
||||||
|
if (/^\[[\w]+:[\w-]+\]/.test(r)) return `- ${r}`
|
||||||
|
return `- [rule:${defaultCategory}] ${r}`
|
||||||
|
}).join('\n') + '\n'
|
||||||
Write(filePath, newContent)
|
Write(filePath, newContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write conventions (general category) - use .ccw/specs/ (same as frontend/backend)
|
// Helper: infer domain tag from rule content
|
||||||
appendRulesToSpecFile('.ccw/specs/coding-conventions.md',
|
function inferTag(text) {
|
||||||
[...newCodingStyle, ...newNamingPatterns, ...newFileStructure, ...newDocumentation],
|
const t = text.toLowerCase()
|
||||||
'general')
|
if (/\b(api|http|rest|endpoint|routing)\b/.test(t)) return 'api'
|
||||||
|
if (/\b(security|auth|permission|xss|sql|sanitize)\b/.test(t)) return 'security'
|
||||||
|
if (/\b(database|db|sql|postgres|mysql)\b/.test(t)) return 'db'
|
||||||
|
if (/\b(react|component|hook|jsx|tsx)\b/.test(t)) return 'react'
|
||||||
|
if (/\b(performance|cache|lazy|async|slow)\b/.test(t)) return 'perf'
|
||||||
|
if (/\b(test|coverage|mock|jest|vitest)\b/.test(t)) return 'testing'
|
||||||
|
if (/\b(architecture|layer|module|dependency)\b/.test(t)) return 'arch'
|
||||||
|
if (/\b(naming|camel|pascal|prefix|suffix)\b/.test(t)) return 'naming'
|
||||||
|
if (/\b(file|folder|directory|structure)\b/.test(t)) return 'file'
|
||||||
|
if (/\b(doc|comment|jsdoc|readme)\b/.test(t)) return 'doc'
|
||||||
|
if (/\b(build|webpack|vite|compile)\b/.test(t)) return 'build'
|
||||||
|
if (/\b(deploy|ci|cd|docker)\b/.test(t)) return 'deploy'
|
||||||
|
if (/\b(lint|eslint|prettier|format)\b/.test(t)) return 'lint'
|
||||||
|
if (/\b(type|typescript|strict|any)\b/.test(t)) return 'typing'
|
||||||
|
return 'style' // fallback for coding conventions
|
||||||
|
}
|
||||||
|
|
||||||
// Write constraints (planning category)
|
// Write conventions - infer domain tags from content
|
||||||
|
appendRulesToSpecFile('.ccw/specs/coding-conventions.md',
|
||||||
|
[...newCodingStyle, ...newNamingPatterns, ...newFileStructure, ...newDocumentation]
|
||||||
|
.map(r => /^\[[\w]+:[\w-]+\]/.test(r) ? r : `[rule:${inferTag(r)}] ${r}`),
|
||||||
|
'style')
|
||||||
|
|
||||||
|
// Write constraints - infer domain tags from content
|
||||||
appendRulesToSpecFile('.ccw/specs/architecture-constraints.md',
|
appendRulesToSpecFile('.ccw/specs/architecture-constraints.md',
|
||||||
[...newArchitecture, ...newTechStack, ...newPerformance, ...newSecurity],
|
[...newArchitecture, ...newTechStack, ...newPerformance, ...newSecurity]
|
||||||
'planning')
|
.map(r => /^\[[\w]+:[\w-]+\]/.test(r) ? r : `[rule:${inferTag(r)}] ${r}`),
|
||||||
|
'arch')
|
||||||
|
|
||||||
// Write quality rules (execution category)
|
// Write quality rules (execution category)
|
||||||
if (newQualityRules.length > 0) {
|
if (newQualityRules.length > 0) {
|
||||||
const qualityPath = '.ccw/specs/quality-rules.md'
|
const qualityPath = '.ccw/specs/quality-rules.md'
|
||||||
if (!file_exists(qualityPath)) {
|
// ensureSpecFrontmatter handles create/repair/keyword-update
|
||||||
Write(qualityPath, `---
|
ensureSpecFrontmatter(qualityPath, ['quality', 'testing', 'coverage', 'lint'])
|
||||||
title: Quality Rules
|
|
||||||
readMode: required
|
|
||||||
priority: high
|
|
||||||
category: execution
|
|
||||||
scope: project
|
|
||||||
dimension: specs
|
|
||||||
keywords: [execution, quality, testing, coverage, lint]
|
|
||||||
---
|
|
||||||
|
|
||||||
# Quality Rules
|
|
||||||
|
|
||||||
`)
|
|
||||||
}
|
|
||||||
appendRulesToSpecFile(qualityPath,
|
appendRulesToSpecFile(qualityPath,
|
||||||
newQualityRules.map(q => `${q.rule} (scope: ${q.scope}, enforced by: ${q.enforced_by})`),
|
newQualityRules.map(q => `${q.rule} (scope: ${q.scope}, enforced by: ${q.enforced_by})`),
|
||||||
'execution')
|
'execution')
|
||||||
@@ -644,7 +703,8 @@ Next steps:
|
|||||||
|
|
||||||
## Related Commands
|
## Related Commands
|
||||||
|
|
||||||
- `/workflow:spec:add` - Interactive wizard to create individual specs with scope selection
|
- `/workflow:spec:add` - Add knowledge entries (bug/pattern/decision/rule) with unified [type:tag] format
|
||||||
|
- `/workflow:spec:load` - Interactive spec loader with keyword/type/tag filtering
|
||||||
- `/workflow:session:sync` - Quick-sync session work to specs and project-tech
|
- `/workflow:session:sync` - Quick-sync session work to specs and project-tech
|
||||||
- `workflow-plan` skill - Start planning with initialized project context
|
- `workflow-plan` skill - Start planning with initialized project context
|
||||||
- `/workflow:status --project` - View project state and guidelines
|
- `/workflow:status --project` - View project state and guidelines
|
||||||
|
|||||||
@@ -289,7 +289,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-guidelines",
|
"name": "init-guidelines",
|
||||||
"command": "/workflow:init-guidelines",
|
"command": "/workflow:spec:setup -guidelines",
|
||||||
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
||||||
"arguments": "[--reset]",
|
"arguments": "[--reset]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -300,7 +300,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-specs",
|
"name": "init-specs",
|
||||||
"command": "/workflow:init-specs",
|
"command": "/workflow:spec:setup -specs",
|
||||||
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
||||||
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -311,7 +311,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init",
|
"name": "init",
|
||||||
"command": "/workflow:init",
|
"command": "/workflow:spec:setup ",
|
||||||
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
||||||
"arguments": "[--regenerate] [--skip-specs]",
|
"arguments": "[--regenerate] [--skip-specs]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
|
|||||||
@@ -276,7 +276,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-guidelines",
|
"name": "init-guidelines",
|
||||||
"command": "/workflow:init-guidelines",
|
"command": "/workflow:spec:setup -guidelines",
|
||||||
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
||||||
"arguments": "[--reset]",
|
"arguments": "[--reset]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -287,7 +287,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-specs",
|
"name": "init-specs",
|
||||||
"command": "/workflow:init-specs",
|
"command": "/workflow:spec:setup -specs",
|
||||||
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
||||||
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -298,7 +298,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init",
|
"name": "init",
|
||||||
"command": "/workflow:init",
|
"command": "/workflow:spec:setup ",
|
||||||
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
||||||
"arguments": "[--regenerate] [--skip-specs]",
|
"arguments": "[--regenerate] [--skip-specs]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
|
|||||||
@@ -298,7 +298,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-guidelines",
|
"name": "init-guidelines",
|
||||||
"command": "/workflow:init-guidelines",
|
"command": "/workflow:spec:setup -guidelines",
|
||||||
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
||||||
"arguments": "[--reset]",
|
"arguments": "[--reset]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -309,7 +309,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-specs",
|
"name": "init-specs",
|
||||||
"command": "/workflow:init-specs",
|
"command": "/workflow:spec:setup -specs",
|
||||||
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
||||||
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -320,7 +320,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init",
|
"name": "init",
|
||||||
"command": "/workflow:init",
|
"command": "/workflow:spec:setup ",
|
||||||
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
||||||
"arguments": "[--regenerate] [--skip-specs]",
|
"arguments": "[--regenerate] [--skip-specs]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
|
|||||||
@@ -145,7 +145,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-guidelines",
|
"name": "init-guidelines",
|
||||||
"command": "/workflow:init-guidelines",
|
"command": "/workflow:spec:setup -guidelines",
|
||||||
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
"description": "Interactive wizard to fill specs/*.md based on project analysis",
|
||||||
"arguments": "[--reset]",
|
"arguments": "[--reset]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -156,7 +156,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init-specs",
|
"name": "init-specs",
|
||||||
"command": "/workflow:init-specs",
|
"command": "/workflow:spec:setup -specs",
|
||||||
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
"description": "Interactive wizard to create individual specs or personal constraints with scope selection",
|
||||||
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
"arguments": "[--scope <global|project>] [--dimension <specs|personal>] [--category <general|exploration|planning|execution>]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
@@ -167,7 +167,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "init",
|
"name": "init",
|
||||||
"command": "/workflow:init",
|
"command": "/workflow:spec:setup ",
|
||||||
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
"description": "Initialize project-level state with intelligent project analysis using cli-explore-agent",
|
||||||
"arguments": "[--regenerate] [--skip-specs]",
|
"arguments": "[--regenerate] [--skip-specs]",
|
||||||
"category": "workflow",
|
"category": "workflow",
|
||||||
|
|||||||
382
.claude/skills/skill-iter-tune/SKILL.md
Normal file
382
.claude/skills/skill-iter-tune/SKILL.md
Normal file
@@ -0,0 +1,382 @@
|
|||||||
|
---
|
||||||
|
name: skill-iter-tune
|
||||||
|
description: Iterative skill tuning via execute-evaluate-improve feedback loop. Uses ccw cli Claude to execute skill, Gemini to evaluate quality, and Agent to apply improvements. Iterates until quality threshold or max iterations. Triggers on "skill iter tune", "iterative skill tuning", "tune skill".
|
||||||
|
allowed-tools: Skill, Agent, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, Read, Write, Edit, Bash, Glob, Grep
|
||||||
|
---
|
||||||
|
|
||||||
|
# Skill Iter Tune
|
||||||
|
|
||||||
|
Iterative skill refinement through execute-evaluate-improve feedback loops. Each iteration runs the skill via Claude, evaluates output via Gemini, and applies improvements via Agent.
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Skill Iter Tune Orchestrator (SKILL.md) │
|
||||||
|
│ → Parse input → Setup workspace → Iteration Loop → Final Report │
|
||||||
|
└────────────────────────────┬─────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
┌───────────────────┼───────────────────────────────────┐
|
||||||
|
↓ ↓ ↓
|
||||||
|
┌──────────┐ ┌─────────────────────────────┐ ┌──────────┐
|
||||||
|
│ Phase 1 │ │ Iteration Loop (2→3→4) │ │ Phase 5 │
|
||||||
|
│ Setup │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ │ Report │
|
||||||
|
│ │─────→│ │ P2 │→ │ P3 │→ │ P4 │ │────→│ │
|
||||||
|
│ Backup + │ │ │Exec │ │Eval │ │Impr │ │ │ History │
|
||||||
|
│ Init │ │ └─────┘ └─────┘ └─────┘ │ │ Summary │
|
||||||
|
└──────────┘ │ ↑ │ │ └──────────┘
|
||||||
|
│ └───────────────┘ │
|
||||||
|
│ (if score < threshold │
|
||||||
|
│ AND iter < max) │
|
||||||
|
└─────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Chain Mode Extension
|
||||||
|
|
||||||
|
```
|
||||||
|
Chain Mode (execution_mode === "chain"):
|
||||||
|
|
||||||
|
Phase 2 runs per-skill in chain_order:
|
||||||
|
Skill A → ccw cli → artifacts/skill-A/
|
||||||
|
↓ (artifacts as input)
|
||||||
|
Skill B → ccw cli → artifacts/skill-B/
|
||||||
|
↓ (artifacts as input)
|
||||||
|
Skill C → ccw cli → artifacts/skill-C/
|
||||||
|
|
||||||
|
Phase 3 evaluates entire chain output + per-skill scores
|
||||||
|
Phase 4 improves weakest skill(s) in chain
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Design Principles
|
||||||
|
|
||||||
|
1. **Iteration Loop**: Phases 2-3-4 repeat until quality threshold, max iterations, or convergence
|
||||||
|
2. **Two-Tool Pipeline**: Claude (write/execute) + Gemini (analyze/evaluate) = complementary perspectives
|
||||||
|
3. **Pure Orchestrator**: SKILL.md coordinates only — execution detail lives in phase files
|
||||||
|
4. **Progressive Phase Loading**: Phase docs read only when that phase executes
|
||||||
|
5. **Skill Versioning**: Each iteration snapshots skill state before execution
|
||||||
|
6. **Convergence Detection**: Stop early if score stalls (no improvement in 2 consecutive iterations)
|
||||||
|
|
||||||
|
## Interactive Preference Collection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// ★ Auto mode detection
|
||||||
|
const autoYes = /\b(-y|--yes)\b/.test($ARGUMENTS)
|
||||||
|
|
||||||
|
if (autoYes) {
|
||||||
|
workflowPreferences = {
|
||||||
|
autoYes: true,
|
||||||
|
maxIterations: 5,
|
||||||
|
qualityThreshold: 80,
|
||||||
|
executionMode: 'single'
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const prefResponse = AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: "选择迭代调优配置:",
|
||||||
|
header: "Tune Config",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Quick (3 iter, 70)", description: "快速迭代,适合小幅改进" },
|
||||||
|
{ label: "Standard (5 iter, 80) (Recommended)", description: "平衡方案,适合多数场景" },
|
||||||
|
{ label: "Thorough (8 iter, 90)", description: "深度优化,适合生产级 skill" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
const configMap = {
|
||||||
|
"Quick": { maxIterations: 3, qualityThreshold: 70 },
|
||||||
|
"Standard": { maxIterations: 5, qualityThreshold: 80 },
|
||||||
|
"Thorough": { maxIterations: 8, qualityThreshold: 90 }
|
||||||
|
}
|
||||||
|
const selected = Object.keys(configMap).find(k =>
|
||||||
|
prefResponse["Tune Config"].startsWith(k)
|
||||||
|
) || "Standard"
|
||||||
|
workflowPreferences = { autoYes: false, ...configMap[selected] }
|
||||||
|
|
||||||
|
// ★ Mode selection: chain vs single
|
||||||
|
const modeResponse = AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: "选择调优模式:",
|
||||||
|
header: "Tune Mode",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Single Skill (Recommended)", description: "独立调优每个 skill,适合单一 skill 优化" },
|
||||||
|
{ label: "Skill Chain", description: "按链序执行,前一个 skill 的产出作为后一个的输入" }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
workflowPreferences.executionMode = modeResponse["Tune Mode"].startsWith("Skill Chain")
|
||||||
|
? "chain" : "single";
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Input Processing
|
||||||
|
|
||||||
|
```
|
||||||
|
$ARGUMENTS → Parse:
|
||||||
|
├─ Skill path(s): first arg, comma-separated for multiple
|
||||||
|
│ e.g., ".claude/skills/my-skill" or "my-skill" (auto-prefixed)
|
||||||
|
│ Chain mode: order preserved as chain_order
|
||||||
|
├─ Test scenario: --scenario "description" or remaining text
|
||||||
|
└─ Flags: --max-iterations=N, --threshold=N, -y/--yes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Flow
|
||||||
|
|
||||||
|
> **⚠️ COMPACT DIRECTIVE**: Context compression MUST check TodoWrite phase status.
|
||||||
|
> The phase currently marked `in_progress` is the active execution phase — preserve its FULL content.
|
||||||
|
> Only compress phases marked `completed` or `pending`.
|
||||||
|
|
||||||
|
### Phase 1: Setup (one-time)
|
||||||
|
|
||||||
|
Read and execute: `Ref: phases/01-setup.md`
|
||||||
|
|
||||||
|
- Parse skill paths, validate existence
|
||||||
|
- Create workspace at `.workflow/.scratchpad/skill-iter-tune-{ts}/`
|
||||||
|
- Backup original skill files
|
||||||
|
- Initialize iteration-state.json
|
||||||
|
|
||||||
|
Output: `workDir`, `targetSkills[]`, `testScenario`, initialized state
|
||||||
|
|
||||||
|
### Iteration Loop
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Orchestrator iteration loop
|
||||||
|
while (true) {
|
||||||
|
// Increment iteration
|
||||||
|
state.current_iteration++;
|
||||||
|
state.iterations.push({
|
||||||
|
round: state.current_iteration,
|
||||||
|
status: 'pending',
|
||||||
|
execution: null,
|
||||||
|
evaluation: null,
|
||||||
|
improvement: null
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update TodoWrite
|
||||||
|
TaskUpdate(iterationTask, {
|
||||||
|
subject: `Iteration ${state.current_iteration}/${state.max_iterations}`,
|
||||||
|
status: 'in_progress',
|
||||||
|
activeForm: `Running iteration ${state.current_iteration}`
|
||||||
|
});
|
||||||
|
|
||||||
|
// === Phase 2: Execute ===
|
||||||
|
// Read: phases/02-execute.md
|
||||||
|
// Single mode: one ccw cli call for all skills
|
||||||
|
// Chain mode: sequential ccw cli per skill in chain_order, passing artifacts
|
||||||
|
// Snapshot skill → construct prompt → ccw cli --tool claude --mode write
|
||||||
|
// Collect artifacts
|
||||||
|
|
||||||
|
// === Phase 3: Evaluate ===
|
||||||
|
// Read: phases/03-evaluate.md
|
||||||
|
// Construct eval prompt → ccw cli --tool gemini --mode analysis
|
||||||
|
// Parse score → write iteration-N-eval.md → check termination
|
||||||
|
|
||||||
|
// Check termination
|
||||||
|
if (shouldTerminate(state)) {
|
||||||
|
break; // → Phase 5
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Phase 4: Improve ===
|
||||||
|
// Read: phases/04-improve.md
|
||||||
|
// Agent applies suggestions → write iteration-N-changes.md
|
||||||
|
|
||||||
|
// Update TodoWrite with score
|
||||||
|
// Continue loop
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Execute Skill (per iteration)
|
||||||
|
|
||||||
|
Read and execute: `Ref: phases/02-execute.md`
|
||||||
|
|
||||||
|
- Snapshot skill → `iteration-{N}/skill-snapshot/`
|
||||||
|
- Build execution prompt from skill content + test scenario
|
||||||
|
- Execute: `ccw cli -p "..." --tool claude --mode write --cd "${iterDir}/artifacts"`
|
||||||
|
- Collect artifacts
|
||||||
|
|
||||||
|
### Phase 3: Evaluate Quality (per iteration)
|
||||||
|
|
||||||
|
Read and execute: `Ref: phases/03-evaluate.md`
|
||||||
|
|
||||||
|
- Build evaluation prompt with skill + artifacts + criteria + history
|
||||||
|
- Execute: `ccw cli -p "..." --tool gemini --mode analysis`
|
||||||
|
- Parse 5-dimension score (Clarity, Completeness, Correctness, Effectiveness, Efficiency)
|
||||||
|
- Write `iteration-{N}-eval.md`
|
||||||
|
- Check termination: score >= threshold | iter >= max | convergence | error limit
|
||||||
|
|
||||||
|
### Phase 4: Apply Improvements (per iteration, skipped on termination)
|
||||||
|
|
||||||
|
Read and execute: `Ref: phases/04-improve.md`
|
||||||
|
|
||||||
|
- Read evaluation suggestions
|
||||||
|
- Launch general-purpose Agent to apply changes
|
||||||
|
- Write `iteration-{N}-changes.md`
|
||||||
|
- Update state
|
||||||
|
|
||||||
|
### Phase 5: Final Report (one-time)
|
||||||
|
|
||||||
|
Read and execute: `Ref: phases/05-report.md`
|
||||||
|
|
||||||
|
- Generate comprehensive report with score progression table
|
||||||
|
- Write `final-report.md`
|
||||||
|
- Display summary to user
|
||||||
|
|
||||||
|
**Phase Reference Documents** (read on-demand when phase executes):
|
||||||
|
|
||||||
|
| Phase | Document | Purpose | Compact |
|
||||||
|
|-------|----------|---------|---------|
|
||||||
|
| 1 | [phases/01-setup.md](phases/01-setup.md) | Initialize workspace and state | TodoWrite 驱动 |
|
||||||
|
| 2 | [phases/02-execute.md](phases/02-execute.md) | Execute skill via ccw cli Claude | TodoWrite 驱动 + 🔄 sentinel |
|
||||||
|
| 3 | [phases/03-evaluate.md](phases/03-evaluate.md) | Evaluate via ccw cli Gemini | TodoWrite 驱动 + 🔄 sentinel |
|
||||||
|
| 4 | [phases/04-improve.md](phases/04-improve.md) | Apply improvements via Agent | TodoWrite 驱动 + 🔄 sentinel |
|
||||||
|
| 5 | [phases/05-report.md](phases/05-report.md) | Generate final report | TodoWrite 驱动 |
|
||||||
|
|
||||||
|
**Compact Rules**:
|
||||||
|
1. **TodoWrite `in_progress`** → 保留完整内容,禁止压缩
|
||||||
|
2. **TodoWrite `completed`** → 可压缩为摘要
|
||||||
|
3. **🔄 sentinel fallback** → 若 compact 后仅存 sentinel 而无完整 Step 协议,立即 `Read()` 恢复
|
||||||
|
|
||||||
|
## Core Rules
|
||||||
|
|
||||||
|
1. **Start Immediately**: First action is preference collection → Phase 1 setup
|
||||||
|
2. **Progressive Loading**: Read phase doc ONLY when that phase is about to execute
|
||||||
|
3. **Snapshot Before Execute**: Always snapshot skill state before each iteration
|
||||||
|
4. **Background CLI**: ccw cli runs in background, wait for hook callback before proceeding
|
||||||
|
5. **Parse Every Output**: Extract structured JSON from CLI outputs for state updates
|
||||||
|
6. **DO NOT STOP**: Continuous iteration until termination condition met
|
||||||
|
7. **Single State Source**: `iteration-state.json` is the only source of truth
|
||||||
|
|
||||||
|
## Data Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
User Input (skill paths + test scenario)
|
||||||
|
↓ (+ execution_mode + chain_order if chain mode)
|
||||||
|
↓
|
||||||
|
Phase 1: Setup
|
||||||
|
↓ workDir, targetSkills[], testScenario, iteration-state.json
|
||||||
|
↓
|
||||||
|
┌─→ Phase 2: Execute (ccw cli claude)
|
||||||
|
│ ↓ artifacts/ (skill execution output)
|
||||||
|
│ ↓
|
||||||
|
│ Phase 3: Evaluate (ccw cli gemini)
|
||||||
|
│ ↓ score, dimensions[], suggestions[], iteration-N-eval.md
|
||||||
|
│ ↓
|
||||||
|
│ [Terminate?]─── YES ──→ Phase 5: Report → final-report.md
|
||||||
|
│ ↓ NO
|
||||||
|
│ ↓
|
||||||
|
│ Phase 4: Improve (Agent)
|
||||||
|
│ ↓ modified skill files, iteration-N-changes.md
|
||||||
|
│ ↓
|
||||||
|
└───┘ next iteration
|
||||||
|
```
|
||||||
|
|
||||||
|
## TodoWrite Pattern
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Initial state
|
||||||
|
TaskCreate({ subject: "Phase 1: Setup workspace", activeForm: "Setting up workspace" })
|
||||||
|
TaskCreate({ subject: "Iteration Loop", activeForm: "Running iterations" })
|
||||||
|
TaskCreate({ subject: "Phase 5: Final Report", activeForm: "Generating report" })
|
||||||
|
|
||||||
|
// Chain mode: create per-skill tracking tasks
|
||||||
|
if (state.execution_mode === 'chain') {
|
||||||
|
for (const skillName of state.chain_order) {
|
||||||
|
TaskCreate({
|
||||||
|
subject: `Chain: ${skillName}`,
|
||||||
|
activeForm: `Tracking ${skillName}`,
|
||||||
|
description: `Skill chain member position ${state.chain_order.indexOf(skillName) + 1}`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// During iteration N
|
||||||
|
// Single mode: one score per iteration (existing behavior)
|
||||||
|
// Chain mode: per-skill status updates
|
||||||
|
if (state.execution_mode === 'chain') {
|
||||||
|
// After each skill executes in Phase 2:
|
||||||
|
TaskUpdate(chainSkillTask, {
|
||||||
|
subject: `Chain: ${skillName} — Iter ${N} executed`,
|
||||||
|
activeForm: `${skillName} iteration ${N}`
|
||||||
|
})
|
||||||
|
// After Phase 3 evaluates:
|
||||||
|
TaskUpdate(chainSkillTask, {
|
||||||
|
subject: `Chain: ${skillName} — Score ${chainScores[skillName]}/100`,
|
||||||
|
activeForm: `${skillName} scored`
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Single mode (existing)
|
||||||
|
TaskCreate({
|
||||||
|
subject: `Iteration ${N}: Score ${score}/100`,
|
||||||
|
activeForm: `Iteration ${N} complete`,
|
||||||
|
description: `Strengths: ... | Weaknesses: ... | Suggestions: ${count}`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Completed — collapse
|
||||||
|
TaskUpdate(iterLoop, {
|
||||||
|
subject: `Iteration Loop (${totalIters} iters, final: ${finalScore})`,
|
||||||
|
status: 'completed'
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
## Termination Logic
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function shouldTerminate(state) {
|
||||||
|
// 1. Quality threshold met
|
||||||
|
if (state.latest_score >= state.quality_threshold) {
|
||||||
|
return { terminate: true, reason: 'quality_threshold_met' };
|
||||||
|
}
|
||||||
|
// 2. Max iterations reached
|
||||||
|
if (state.current_iteration >= state.max_iterations) {
|
||||||
|
return { terminate: true, reason: 'max_iterations_reached' };
|
||||||
|
}
|
||||||
|
// 3. Convergence: ≤2 points improvement over last 2 iterations
|
||||||
|
if (state.score_trend.length >= 3) {
|
||||||
|
const last3 = state.score_trend.slice(-3);
|
||||||
|
if (last3[2] - last3[0] <= 2) {
|
||||||
|
state.converged = true;
|
||||||
|
return { terminate: true, reason: 'convergence_detected' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 4. Error limit
|
||||||
|
if (state.error_count >= state.max_errors) {
|
||||||
|
return { terminate: true, reason: 'error_limit_reached' };
|
||||||
|
}
|
||||||
|
return { terminate: false };
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Phase | Error | Recovery |
|
||||||
|
|-------|-------|----------|
|
||||||
|
| 2: Execute | CLI timeout/crash | Retry once with simplified prompt, then skip |
|
||||||
|
| 3: Evaluate | CLI fails | Retry once, then use score 50 with warning |
|
||||||
|
| 3: Evaluate | JSON parse fails | Extract score heuristically, save raw output |
|
||||||
|
| 4: Improve | Agent fails | Rollback from `iteration-{N}/skill-snapshot/` |
|
||||||
|
| Any | 3+ consecutive errors | Terminate with error report |
|
||||||
|
|
||||||
|
**Error Budget**: Each phase gets 1 retry. 3 consecutive failed iterations triggers termination.
|
||||||
|
|
||||||
|
## Coordinator Checklist
|
||||||
|
|
||||||
|
### Pre-Phase Actions
|
||||||
|
- [ ] Read iteration-state.json for current state
|
||||||
|
- [ ] Verify workspace directory exists
|
||||||
|
- [ ] Check error count hasn't exceeded limit
|
||||||
|
|
||||||
|
### Per-Iteration Actions
|
||||||
|
- [ ] Increment current_iteration in state
|
||||||
|
- [ ] Create iteration-{N} subdirectory
|
||||||
|
- [ ] Update TodoWrite with iteration status
|
||||||
|
- [ ] After Phase 3: check termination before Phase 4
|
||||||
|
- [ ] After Phase 4: write state, proceed to next iteration
|
||||||
|
|
||||||
|
### Post-Workflow Actions
|
||||||
|
- [ ] Execute Phase 5 (Report)
|
||||||
|
- [ ] Display final summary to user
|
||||||
|
- [ ] Update all TodoWrite tasks to completed
|
||||||
144
.claude/skills/skill-iter-tune/phases/01-setup.md
Normal file
144
.claude/skills/skill-iter-tune/phases/01-setup.md
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
# Phase 1: Setup
|
||||||
|
|
||||||
|
Initialize workspace, backup skills, parse inputs.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Parse skill path(s) and test scenario from user input
|
||||||
|
- Validate all skill paths exist and contain SKILL.md
|
||||||
|
- Create isolated workspace directory structure
|
||||||
|
- Backup original skill files
|
||||||
|
- Initialize iteration-state.json
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
### Step 1.1: Parse Input
|
||||||
|
|
||||||
|
Parse `$ARGUMENTS` to extract skill paths and test scenario.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Parse skill paths (first argument or comma-separated)
|
||||||
|
const args = $ARGUMENTS.trim();
|
||||||
|
const pathMatch = args.match(/^([^\s]+)/);
|
||||||
|
const rawPaths = pathMatch ? pathMatch[1].split(',') : [];
|
||||||
|
|
||||||
|
// Parse test scenario
|
||||||
|
const scenarioMatch = args.match(/(?:--scenario|--test)\s+"([^"]+)"/);
|
||||||
|
const scenarioText = scenarioMatch ? scenarioMatch[1] : args.replace(rawPaths.join(','), '').trim();
|
||||||
|
|
||||||
|
// Record chain order (preserves input order for chain mode)
|
||||||
|
const chainOrder = rawPaths.map(p => p.startsWith('.claude/') ? p.split('/').pop() : p);
|
||||||
|
|
||||||
|
// If no scenario, ask user
|
||||||
|
if (!scenarioText) {
|
||||||
|
const response = AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: "Please describe the test scenario for evaluating this skill:",
|
||||||
|
header: "Test Scenario",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "General quality test", description: "Evaluate overall skill quality with a generic task" },
|
||||||
|
{ label: "Specific scenario", description: "I'll describe a specific test case" }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
// Use response to construct testScenario
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 1.2: Validate Skill Paths
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const targetSkills = [];
|
||||||
|
for (const rawPath of rawPaths) {
|
||||||
|
const skillPath = rawPath.startsWith('.claude/') ? rawPath : `.claude/skills/${rawPath}`;
|
||||||
|
|
||||||
|
// Validate SKILL.md exists
|
||||||
|
const skillFiles = Glob(`${skillPath}/SKILL.md`);
|
||||||
|
if (skillFiles.length === 0) {
|
||||||
|
throw new Error(`Skill not found at: ${skillPath} -- SKILL.md missing`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all skill files
|
||||||
|
const allFiles = Glob(`${skillPath}/**/*.md`);
|
||||||
|
targetSkills.push({
|
||||||
|
name: skillPath.split('/').pop(),
|
||||||
|
path: skillPath,
|
||||||
|
files: allFiles.map(f => f.replace(skillPath + '/', '')),
|
||||||
|
primary_file: 'SKILL.md'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 1.3: Create Workspace
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const ts = Date.now();
|
||||||
|
const workDir = `.workflow/.scratchpad/skill-iter-tune-${ts}`;
|
||||||
|
|
||||||
|
Bash(`mkdir -p "${workDir}/backups" "${workDir}/iterations"`);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 1.4: Backup Original Skills
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
for (const skill of targetSkills) {
|
||||||
|
Bash(`cp -r "${skill.path}" "${workDir}/backups/${skill.name}"`);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 1.5: Initialize State
|
||||||
|
|
||||||
|
Write `iteration-state.json` with initial state:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const initialState = {
|
||||||
|
status: 'running',
|
||||||
|
started_at: new Date().toISOString(),
|
||||||
|
updated_at: new Date().toISOString(),
|
||||||
|
target_skills: targetSkills,
|
||||||
|
test_scenario: {
|
||||||
|
description: scenarioText,
|
||||||
|
// Parse --requirements and --input-args from $ARGUMENTS if provided
|
||||||
|
// e.g., --requirements "clear output,no errors" --input-args "my-skill --scenario test"
|
||||||
|
requirements: parseListArg(args, '--requirements') || [],
|
||||||
|
input_args: parseStringArg(args, '--input-args') || '',
|
||||||
|
success_criteria: parseStringArg(args, '--success-criteria') || 'Produces correct, high-quality output'
|
||||||
|
},
|
||||||
|
execution_mode: workflowPreferences.executionMode || 'single',
|
||||||
|
chain_order: workflowPreferences.executionMode === 'chain'
|
||||||
|
? targetSkills.map(s => s.name)
|
||||||
|
: [],
|
||||||
|
current_iteration: 0,
|
||||||
|
max_iterations: workflowPreferences.maxIterations,
|
||||||
|
quality_threshold: workflowPreferences.qualityThreshold,
|
||||||
|
latest_score: 0,
|
||||||
|
score_trend: [],
|
||||||
|
converged: false,
|
||||||
|
iterations: [],
|
||||||
|
errors: [],
|
||||||
|
error_count: 0,
|
||||||
|
max_errors: 3,
|
||||||
|
work_dir: workDir,
|
||||||
|
backup_dir: `${workDir}/backups`
|
||||||
|
};
|
||||||
|
|
||||||
|
Write(`${workDir}/iteration-state.json`, JSON.stringify(initialState, null, 2));
|
||||||
|
|
||||||
|
// Chain mode: create per-skill tracking tasks
|
||||||
|
if (initialState.execution_mode === 'chain') {
|
||||||
|
for (const skill of targetSkills) {
|
||||||
|
TaskCreate({
|
||||||
|
subject: `Chain: ${skill.name}`,
|
||||||
|
activeForm: `Tracking ${skill.name}`,
|
||||||
|
description: `Skill chain member: ${skill.path} | Position: ${targetSkills.indexOf(skill) + 1}/${targetSkills.length}`
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Variables**: `workDir`, `targetSkills[]`, `testScenario`, `chainOrder` (chain mode)
|
||||||
|
- **Files**: `iteration-state.json`, `backups/` directory with skill copies
|
||||||
|
- **TodoWrite**: Mark Phase 1 completed, start Iteration Loop. Chain mode: per-skill tracking tasks created
|
||||||
292
.claude/skills/skill-iter-tune/phases/02-execute.md
Normal file
292
.claude/skills/skill-iter-tune/phases/02-execute.md
Normal file
@@ -0,0 +1,292 @@
|
|||||||
|
# Phase 2: Execute Skill
|
||||||
|
|
||||||
|
> **COMPACT SENTINEL [Phase 2: Execute]**
|
||||||
|
> This phase contains 4 execution steps (Step 2.1 -- 2.4).
|
||||||
|
> If you can read this sentinel but cannot find the full Step protocol below, context has been compressed.
|
||||||
|
> Recovery: `Read("phases/02-execute.md")`
|
||||||
|
|
||||||
|
Execute the target skill against the test scenario using `ccw cli --tool claude --mode write`. Claude receives the full skill definition and simulates producing its expected output artifacts.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Snapshot current skill version before execution
|
||||||
|
- Construct execution prompt with full skill content + test scenario
|
||||||
|
- Execute via ccw cli Claude
|
||||||
|
- Collect output artifacts
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
### Step 2.1: Snapshot Current Skill
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const N = state.current_iteration;
|
||||||
|
const iterDir = `${state.work_dir}/iterations/iteration-${N}`;
|
||||||
|
Bash(`mkdir -p "${iterDir}/skill-snapshot" "${iterDir}/artifacts"`);
|
||||||
|
|
||||||
|
// Chain mode: create per-skill artifact directories
|
||||||
|
if (state.execution_mode === 'chain') {
|
||||||
|
for (const skillName of state.chain_order) {
|
||||||
|
Bash(`mkdir -p "${iterDir}/artifacts/${skillName}"`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot current skill state (so we can compare/rollback)
|
||||||
|
for (const skill of state.target_skills) {
|
||||||
|
Bash(`cp -r "${skill.path}" "${iterDir}/skill-snapshot/${skill.name}"`);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2.2: Construct Execution Prompt (Single Mode)
|
||||||
|
|
||||||
|
Read the execute-prompt template and substitute variables.
|
||||||
|
|
||||||
|
> Skip to Step 2.2b if `state.execution_mode === 'chain'`.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Ref: templates/execute-prompt.md
|
||||||
|
|
||||||
|
// Build skillContent by reading only executable skill files (SKILL.md, phases/, specs/)
|
||||||
|
// Exclude README.md, docs/, and other non-executable files to save tokens
|
||||||
|
const skillContent = state.target_skills.map(skill => {
|
||||||
|
const skillMd = Read(`${skill.path}/SKILL.md`);
|
||||||
|
const phaseFiles = Glob(`${skill.path}/phases/*.md`).sort().map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
const specFiles = Glob(`${skill.path}/specs/*.md`).map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
|
||||||
|
return `### File: SKILL.md\n${skillMd}\n\n` +
|
||||||
|
phaseFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') +
|
||||||
|
(specFiles.length > 0 ? '\n\n' + specFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') : '');
|
||||||
|
}).join('\n\n---\n\n');
|
||||||
|
|
||||||
|
// Construct full prompt using template
|
||||||
|
const executePrompt = `PURPOSE: Simulate executing the following workflow skill against a test scenario. Produce all expected output artifacts as if the skill were invoked with the given input.
|
||||||
|
|
||||||
|
SKILL CONTENT:
|
||||||
|
${skillContent}
|
||||||
|
|
||||||
|
TEST SCENARIO:
|
||||||
|
Description: ${state.test_scenario.description}
|
||||||
|
Input Arguments: ${state.test_scenario.input_args}
|
||||||
|
Requirements: ${state.test_scenario.requirements.join('; ')}
|
||||||
|
Success Criteria: ${state.test_scenario.success_criteria}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. Study the complete skill structure (SKILL.md + all phase files)
|
||||||
|
2. Follow the skill execution flow sequentially
|
||||||
|
3. For each phase, produce the artifacts that phase would generate
|
||||||
|
4. Write all output artifacts to the current working directory
|
||||||
|
5. Create a manifest.json listing all produced artifacts
|
||||||
|
|
||||||
|
MODE: write
|
||||||
|
CONTEXT: @**/*
|
||||||
|
EXPECTED: All artifacts written to disk + manifest.json
|
||||||
|
CONSTRAINTS: Follow skill flow exactly, produce realistic output, not placeholders`;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2.3: Execute via ccw cli
|
||||||
|
|
||||||
|
> **CHECKPOINT**: Before executing CLI, verify:
|
||||||
|
> 1. This phase is TodoWrite `in_progress`
|
||||||
|
> 2. `iterDir/artifacts/` directory exists
|
||||||
|
> 3. Prompt is properly escaped
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function escapeForShell(str) {
|
||||||
|
return str.replace(/"/g, '\\"').replace(/\$/g, '\\$').replace(/`/g, '\\`');
|
||||||
|
}
|
||||||
|
|
||||||
|
const cliCommand = `ccw cli -p "${escapeForShell(executePrompt)}" --tool claude --mode write --cd "${iterDir}/artifacts"`;
|
||||||
|
|
||||||
|
// Execute in background, wait for hook callback
|
||||||
|
Bash({
|
||||||
|
command: cliCommand,
|
||||||
|
run_in_background: true,
|
||||||
|
timeout: 600000 // 10 minutes max
|
||||||
|
});
|
||||||
|
|
||||||
|
// STOP HERE -- wait for hook callback to resume
|
||||||
|
// After callback, verify artifacts were produced
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2.2b: Chain Execution Path
|
||||||
|
|
||||||
|
> Skip this step if `state.execution_mode === 'single'`.
|
||||||
|
|
||||||
|
In chain mode, execute each skill sequentially. Each skill receives the previous skill's artifacts as input context.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Chain execution: iterate through chain_order
|
||||||
|
let previousArtifacts = ''; // Accumulates upstream output
|
||||||
|
|
||||||
|
for (let i = 0; i < state.chain_order.length; i++) {
|
||||||
|
const skillName = state.chain_order[i];
|
||||||
|
const skill = state.target_skills.find(s => s.name === skillName);
|
||||||
|
const skillArtifactDir = `${iterDir}/artifacts/${skillName}`;
|
||||||
|
|
||||||
|
// Build this skill's content
|
||||||
|
const skillMd = Read(`${skill.path}/SKILL.md`);
|
||||||
|
const phaseFiles = Glob(`${skill.path}/phases/*.md`).sort().map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
const specFiles = Glob(`${skill.path}/specs/*.md`).map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
|
||||||
|
const singleSkillContent = `### File: SKILL.md\n${skillMd}\n\n` +
|
||||||
|
phaseFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') +
|
||||||
|
(specFiles.length > 0 ? '\n\n' + specFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') : '');
|
||||||
|
|
||||||
|
// Build chain context from previous skill's artifacts
|
||||||
|
const chainInputContext = previousArtifacts
|
||||||
|
? `\nPREVIOUS CHAIN OUTPUT (from upstream skill "${state.chain_order[i - 1]}"):\n${previousArtifacts}\n\nIMPORTANT: Use the above output as input context for this skill's execution.\n`
|
||||||
|
: '';
|
||||||
|
|
||||||
|
// Construct per-skill execution prompt
|
||||||
|
// Ref: templates/execute-prompt.md
|
||||||
|
const chainPrompt = `PURPOSE: Simulate executing the following workflow skill against a test scenario. Produce all expected output artifacts.
|
||||||
|
|
||||||
|
SKILL CONTENT (${skillName} — chain position ${i + 1}/${state.chain_order.length}):
|
||||||
|
${singleSkillContent}
|
||||||
|
${chainInputContext}
|
||||||
|
TEST SCENARIO:
|
||||||
|
Description: ${state.test_scenario.description}
|
||||||
|
Input Arguments: ${state.test_scenario.input_args}
|
||||||
|
Requirements: ${state.test_scenario.requirements.join('; ')}
|
||||||
|
Success Criteria: ${state.test_scenario.success_criteria}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. Study the complete skill structure
|
||||||
|
2. Follow the skill execution flow sequentially
|
||||||
|
3. Produce all expected artifacts
|
||||||
|
4. Write output to the current working directory
|
||||||
|
5. Create manifest.json listing all produced artifacts
|
||||||
|
|
||||||
|
MODE: write
|
||||||
|
CONTEXT: @**/*
|
||||||
|
CONSTRAINTS: Follow skill flow exactly, produce realistic output`;
|
||||||
|
|
||||||
|
function escapeForShell(str) {
|
||||||
|
return str.replace(/"/g, '\\"').replace(/\$/g, '\\$').replace(/`/g, '\\`');
|
||||||
|
}
|
||||||
|
|
||||||
|
const cliCommand = `ccw cli -p "${escapeForShell(chainPrompt)}" --tool claude --mode write --cd "${skillArtifactDir}"`;
|
||||||
|
|
||||||
|
// Execute in background
|
||||||
|
Bash({
|
||||||
|
command: cliCommand,
|
||||||
|
run_in_background: true,
|
||||||
|
timeout: 600000
|
||||||
|
});
|
||||||
|
|
||||||
|
// STOP -- wait for hook callback
|
||||||
|
|
||||||
|
// After callback: collect artifacts for next skill in chain
|
||||||
|
const artifacts = Glob(`${skillArtifactDir}/**/*`);
|
||||||
|
const skillSuccess = artifacts.length > 0;
|
||||||
|
|
||||||
|
if (skillSuccess) {
|
||||||
|
previousArtifacts = artifacts.slice(0, 10).map(f => {
|
||||||
|
const relPath = f.replace(skillArtifactDir + '/', '');
|
||||||
|
const content = Read(f, { limit: 100 });
|
||||||
|
return `--- ${relPath} ---\n${content}`;
|
||||||
|
}).join('\n\n');
|
||||||
|
} else {
|
||||||
|
// Mid-chain failure: keep previous artifacts for downstream skills
|
||||||
|
// Log warning but continue chain — downstream skills receive last successful output
|
||||||
|
state.errors.push({
|
||||||
|
phase: 'execute',
|
||||||
|
message: `Chain skill "${skillName}" (position ${i + 1}) produced no artifacts. Downstream skills will receive upstream output from "${state.chain_order[i - 1] || 'none'}" instead.`,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
});
|
||||||
|
state.error_count++;
|
||||||
|
// previousArtifacts remains from last successful skill (or empty if first)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update per-skill TodoWrite
|
||||||
|
// TaskUpdate chain skill task with execution status
|
||||||
|
|
||||||
|
// Record per-skill execution
|
||||||
|
if (!state.iterations[N - 1].execution.chain_executions) {
|
||||||
|
state.iterations[N - 1].execution.chain_executions = [];
|
||||||
|
}
|
||||||
|
state.iterations[N - 1].execution.chain_executions.push({
|
||||||
|
skill_name: skillName,
|
||||||
|
cli_command: cliCommand,
|
||||||
|
artifacts_dir: skillArtifactDir,
|
||||||
|
success: skillSuccess
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check error budget: abort chain if too many consecutive failures
|
||||||
|
if (state.error_count >= 3) {
|
||||||
|
state.errors.push({
|
||||||
|
phase: 'execute',
|
||||||
|
message: `Chain execution aborted at skill "${skillName}" — error limit reached (${state.error_count} errors).`,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2.4: Collect Artifacts
|
||||||
|
|
||||||
|
After CLI completes (hook callback received):
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// List produced artifacts
|
||||||
|
const artifactFiles = Glob(`${iterDir}/artifacts/**/*`);
|
||||||
|
|
||||||
|
// Chain mode: check per-skill artifacts
|
||||||
|
if (state.execution_mode === 'chain') {
|
||||||
|
const chainSuccess = state.iterations[N - 1].execution.chain_executions?.every(e => e.success) ?? false;
|
||||||
|
state.iterations[N - 1].execution.success = chainSuccess;
|
||||||
|
state.iterations[N - 1].execution.artifacts_dir = `${iterDir}/artifacts`;
|
||||||
|
} else {
|
||||||
|
|
||||||
|
if (artifactFiles.length === 0) {
|
||||||
|
// Execution produced nothing -- record error
|
||||||
|
state.iterations[N - 1].execution = {
|
||||||
|
cli_command: cliCommand,
|
||||||
|
started_at: new Date().toISOString(),
|
||||||
|
completed_at: new Date().toISOString(),
|
||||||
|
artifacts_dir: `${iterDir}/artifacts`,
|
||||||
|
success: false
|
||||||
|
};
|
||||||
|
state.error_count++;
|
||||||
|
// Continue to Phase 3 anyway -- Gemini can evaluate the skill even without artifacts
|
||||||
|
} else {
|
||||||
|
state.iterations[N - 1].execution = {
|
||||||
|
cli_command: cliCommand,
|
||||||
|
started_at: new Date().toISOString(),
|
||||||
|
completed_at: new Date().toISOString(),
|
||||||
|
artifacts_dir: `${iterDir}/artifacts`,
|
||||||
|
success: true
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
} // end single mode branch
|
||||||
|
|
||||||
|
// Update state
|
||||||
|
Write(`${state.work_dir}/iteration-state.json`, JSON.stringify(state, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error | Recovery |
|
||||||
|
|-------|----------|
|
||||||
|
| CLI timeout (10min) | Record failure, continue to Phase 3 without artifacts |
|
||||||
|
| CLI crash | Retry once with simplified prompt (SKILL.md only, no phase files) |
|
||||||
|
| No artifacts produced | Continue to Phase 3, evaluation focuses on skill definition quality |
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Files**: `iteration-{N}/skill-snapshot/`, `iteration-{N}/artifacts/`
|
||||||
|
- **State**: `iterations[N-1].execution` updated
|
||||||
|
- **Next**: Phase 3 (Evaluate)
|
||||||
312
.claude/skills/skill-iter-tune/phases/03-evaluate.md
Normal file
312
.claude/skills/skill-iter-tune/phases/03-evaluate.md
Normal file
@@ -0,0 +1,312 @@
|
|||||||
|
# Phase 3: Evaluate Quality
|
||||||
|
|
||||||
|
> **COMPACT SENTINEL [Phase 3: Evaluate]**
|
||||||
|
> This phase contains 5 execution steps (Step 3.1 -- 3.5).
|
||||||
|
> If you can read this sentinel but cannot find the full Step protocol below, context has been compressed.
|
||||||
|
> Recovery: `Read("phases/03-evaluate.md")`
|
||||||
|
|
||||||
|
Evaluate skill quality using `ccw cli --tool gemini --mode analysis`. Gemini scores the skill across 5 dimensions and provides improvement suggestions.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Construct evaluation prompt with skill + artifacts + criteria
|
||||||
|
- Execute via ccw cli Gemini
|
||||||
|
- Parse multi-dimensional score
|
||||||
|
- Write iteration-{N}-eval.md
|
||||||
|
- Check termination conditions
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
### Step 3.1: Prepare Evaluation Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const N = state.current_iteration;
|
||||||
|
const iterDir = `${state.work_dir}/iterations/iteration-${N}`;
|
||||||
|
|
||||||
|
// Read evaluation criteria
|
||||||
|
// Ref: specs/evaluation-criteria.md
|
||||||
|
const evaluationCriteria = Read('.claude/skills/skill-iter-tune/specs/evaluation-criteria.md');
|
||||||
|
|
||||||
|
// Build skillContent (same pattern as Phase 02 — only executable files)
|
||||||
|
const skillContent = state.target_skills.map(skill => {
|
||||||
|
const skillMd = Read(`${skill.path}/SKILL.md`);
|
||||||
|
const phaseFiles = Glob(`${skill.path}/phases/*.md`).sort().map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
const specFiles = Glob(`${skill.path}/specs/*.md`).map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
return `### File: SKILL.md\n${skillMd}\n\n` +
|
||||||
|
phaseFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') +
|
||||||
|
(specFiles.length > 0 ? '\n\n' + specFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') : '');
|
||||||
|
}).join('\n\n---\n\n');
|
||||||
|
|
||||||
|
// Build artifacts summary
|
||||||
|
let artifactsSummary = 'No artifacts produced (execution may have failed)';
|
||||||
|
|
||||||
|
if (state.execution_mode === 'chain') {
|
||||||
|
// Chain mode: group artifacts by skill
|
||||||
|
const chainSummaries = state.chain_order.map(skillName => {
|
||||||
|
const skillArtifactDir = `${iterDir}/artifacts/${skillName}`;
|
||||||
|
const files = Glob(`${skillArtifactDir}/**/*`);
|
||||||
|
if (files.length === 0) return `### ${skillName} (no artifacts)`;
|
||||||
|
const filesSummary = files.map(f => {
|
||||||
|
const relPath = f.replace(`${skillArtifactDir}/`, '');
|
||||||
|
const content = Read(f, { limit: 200 });
|
||||||
|
return `--- ${relPath} ---\n${content}`;
|
||||||
|
}).join('\n\n');
|
||||||
|
return `### ${skillName} (chain position ${state.chain_order.indexOf(skillName) + 1})\n${filesSummary}`;
|
||||||
|
});
|
||||||
|
artifactsSummary = chainSummaries.join('\n\n---\n\n');
|
||||||
|
} else {
|
||||||
|
// Single mode (existing)
|
||||||
|
const artifactFiles = Glob(`${iterDir}/artifacts/**/*`);
|
||||||
|
if (artifactFiles.length > 0) {
|
||||||
|
artifactsSummary = artifactFiles.map(f => {
|
||||||
|
const relPath = f.replace(`${iterDir}/artifacts/`, '');
|
||||||
|
const content = Read(f, { limit: 200 });
|
||||||
|
return `--- ${relPath} ---\n${content}`;
|
||||||
|
}).join('\n\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build previous evaluation context
|
||||||
|
const previousEvalContext = state.iterations.filter(i => i.evaluation).length > 0
|
||||||
|
? `PREVIOUS ITERATIONS:\n` + state.iterations.filter(i => i.evaluation).map(iter =>
|
||||||
|
`Iteration ${iter.round}: Score ${iter.evaluation.score}\n` +
|
||||||
|
` Applied: ${iter.improvement?.changes_applied?.map(c => c.summary).join('; ') || 'none'}\n` +
|
||||||
|
` Weaknesses: ${iter.evaluation.weaknesses?.slice(0, 3).join('; ') || 'none'}`
|
||||||
|
).join('\n') + '\nIMPORTANT: Focus on NEW issues not yet addressed.'
|
||||||
|
: '';
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3.2: Construct Evaluation Prompt
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Ref: templates/eval-prompt.md
|
||||||
|
const evalPrompt = `PURPOSE: Evaluate the quality of a workflow skill by examining its definition and produced artifacts.
|
||||||
|
|
||||||
|
SKILL DEFINITION:
|
||||||
|
${skillContent}
|
||||||
|
|
||||||
|
TEST SCENARIO:
|
||||||
|
${state.test_scenario.description}
|
||||||
|
Requirements: ${state.test_scenario.requirements.join('; ')}
|
||||||
|
Success Criteria: ${state.test_scenario.success_criteria}
|
||||||
|
|
||||||
|
ARTIFACTS PRODUCED:
|
||||||
|
${artifactsSummary}
|
||||||
|
|
||||||
|
EVALUATION CRITERIA:
|
||||||
|
${evaluationCriteria}
|
||||||
|
|
||||||
|
${previousEvalContext}
|
||||||
|
|
||||||
|
${state.execution_mode === 'chain' ? `
|
||||||
|
CHAIN CONTEXT:
|
||||||
|
This skill chain contains ${state.chain_order.length} skills executed in order:
|
||||||
|
${state.chain_order.map((s, i) => `${i+1}. ${s}`).join('\n')}
|
||||||
|
Current evaluation covers the entire chain output.
|
||||||
|
Please provide per-skill quality scores in an additional "chain_scores" field: { "${state.chain_order[0]}": <score>, ... }
|
||||||
|
` : ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. Score each dimension (Clarity 0.20, Completeness 0.25, Correctness 0.25, Effectiveness 0.20, Efficiency 0.10) on 0-100
|
||||||
|
2. Calculate weighted composite score
|
||||||
|
3. List top 3 strengths
|
||||||
|
4. List top 3-5 weaknesses with file:section references
|
||||||
|
5. Provide 3-5 prioritized improvement suggestions with concrete changes
|
||||||
|
|
||||||
|
EXPECTED OUTPUT (strict JSON, no markdown):
|
||||||
|
{
|
||||||
|
"composite_score": <0-100>,
|
||||||
|
"dimensions": [
|
||||||
|
{"name":"Clarity","id":"clarity","score":<0-100>,"weight":0.20,"feedback":"..."},
|
||||||
|
{"name":"Completeness","id":"completeness","score":<0-100>,"weight":0.25,"feedback":"..."},
|
||||||
|
{"name":"Correctness","id":"correctness","score":<0-100>,"weight":0.25,"feedback":"..."},
|
||||||
|
{"name":"Effectiveness","id":"effectiveness","score":<0-100>,"weight":0.20,"feedback":"..."},
|
||||||
|
{"name":"Efficiency","id":"efficiency","score":<0-100>,"weight":0.10,"feedback":"..."}
|
||||||
|
],
|
||||||
|
"strengths": ["...", "...", "..."],
|
||||||
|
"weaknesses": ["...with file:section ref...", "..."],
|
||||||
|
"suggestions": [
|
||||||
|
{"priority":"high|medium|low","target_file":"...","description":"...","rationale":"...","code_snippet":"..."}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
CONSTRAINTS: Be rigorous, reference exact files, focus on highest-impact changes, output ONLY JSON`;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3.3: Execute via ccw cli Gemini
|
||||||
|
|
||||||
|
> **CHECKPOINT**: Verify evaluation prompt is properly constructed before CLI execution.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Shell escape utility (same as Phase 02)
|
||||||
|
function escapeForShell(str) {
|
||||||
|
return str.replace(/"/g, '\\"').replace(/\$/g, '\\$').replace(/`/g, '\\`');
|
||||||
|
}
|
||||||
|
|
||||||
|
const skillPath = state.target_skills[0].path; // Primary skill for --cd
|
||||||
|
|
||||||
|
const cliCommand = `ccw cli -p "${escapeForShell(evalPrompt)}" --tool gemini --mode analysis --cd "${skillPath}"`;
|
||||||
|
|
||||||
|
// Execute in background
|
||||||
|
Bash({
|
||||||
|
command: cliCommand,
|
||||||
|
run_in_background: true,
|
||||||
|
timeout: 300000 // 5 minutes
|
||||||
|
});
|
||||||
|
|
||||||
|
// STOP -- wait for hook callback
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3.4: Parse Score and Write Eval File
|
||||||
|
|
||||||
|
After CLI completes:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Parse JSON from Gemini output
|
||||||
|
// The output may contain markdown wrapping -- extract JSON
|
||||||
|
const rawOutput = /* CLI output from callback */;
|
||||||
|
const jsonMatch = rawOutput.match(/\{[\s\S]*\}/);
|
||||||
|
let evaluation;
|
||||||
|
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
evaluation = JSON.parse(jsonMatch[0]);
|
||||||
|
// Extract chain_scores if present
|
||||||
|
if (state.execution_mode === 'chain' && evaluation.chain_scores) {
|
||||||
|
state.iterations[N - 1].evaluation.chain_scores = evaluation.chain_scores;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Fallback: try to extract score heuristically
|
||||||
|
const scoreMatch = rawOutput.match(/"composite_score"\s*:\s*(\d+)/);
|
||||||
|
evaluation = {
|
||||||
|
composite_score: scoreMatch ? parseInt(scoreMatch[1]) : 50,
|
||||||
|
dimensions: [],
|
||||||
|
strengths: [],
|
||||||
|
weaknesses: ['Evaluation output parsing failed -- raw output saved'],
|
||||||
|
suggestions: []
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
evaluation = {
|
||||||
|
composite_score: 50,
|
||||||
|
dimensions: [],
|
||||||
|
strengths: [],
|
||||||
|
weaknesses: ['No structured evaluation output -- defaulting to 50'],
|
||||||
|
suggestions: []
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write iteration-N-eval.md
|
||||||
|
const evalReport = `# Iteration ${N} Evaluation
|
||||||
|
|
||||||
|
**Composite Score**: ${evaluation.composite_score}/100
|
||||||
|
**Date**: ${new Date().toISOString()}
|
||||||
|
|
||||||
|
## Dimension Scores
|
||||||
|
|
||||||
|
| Dimension | Score | Weight | Feedback |
|
||||||
|
|-----------|-------|--------|----------|
|
||||||
|
${(evaluation.dimensions || []).map(d =>
|
||||||
|
`| ${d.name} | ${d.score} | ${d.weight} | ${d.feedback} |`
|
||||||
|
).join('\n')}
|
||||||
|
|
||||||
|
${(state.execution_mode === 'chain' && evaluation.chain_scores) ? `
|
||||||
|
## Chain Scores
|
||||||
|
|
||||||
|
| Skill | Score | Chain Position |
|
||||||
|
|-------|-------|----------------|
|
||||||
|
${state.chain_order.map((s, i) => `| ${s} | ${evaluation.chain_scores[s] || '-'} | ${i + 1} |`).join('\n')}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
|
## Strengths
|
||||||
|
${(evaluation.strengths || []).map(s => `- ${s}`).join('\n')}
|
||||||
|
|
||||||
|
## Weaknesses
|
||||||
|
${(evaluation.weaknesses || []).map(w => `- ${w}`).join('\n')}
|
||||||
|
|
||||||
|
## Improvement Suggestions
|
||||||
|
${(evaluation.suggestions || []).map((s, i) =>
|
||||||
|
`### ${i + 1}. [${s.priority}] ${s.description}\n- **Target**: ${s.target_file}\n- **Rationale**: ${s.rationale}\n${s.code_snippet ? `- **Suggested**:\n\`\`\`\n${s.code_snippet}\n\`\`\`` : ''}`
|
||||||
|
).join('\n\n')}
|
||||||
|
`;
|
||||||
|
|
||||||
|
Write(`${iterDir}/iteration-${N}-eval.md`, evalReport);
|
||||||
|
|
||||||
|
// Update state
|
||||||
|
state.iterations[N - 1].evaluation = {
|
||||||
|
score: evaluation.composite_score,
|
||||||
|
dimensions: evaluation.dimensions || [],
|
||||||
|
strengths: evaluation.strengths || [],
|
||||||
|
weaknesses: evaluation.weaknesses || [],
|
||||||
|
suggestions: evaluation.suggestions || [],
|
||||||
|
chain_scores: evaluation.chain_scores || null,
|
||||||
|
eval_file: `${iterDir}/iteration-${N}-eval.md`
|
||||||
|
};
|
||||||
|
state.latest_score = evaluation.composite_score;
|
||||||
|
state.score_trend.push(evaluation.composite_score);
|
||||||
|
|
||||||
|
Write(`${state.work_dir}/iteration-state.json`, JSON.stringify(state, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3.5: Check Termination
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function shouldTerminate(state) {
|
||||||
|
// 1. Quality threshold met
|
||||||
|
if (state.latest_score >= state.quality_threshold) {
|
||||||
|
return { terminate: true, reason: 'quality_threshold_met' };
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Max iterations reached
|
||||||
|
if (state.current_iteration >= state.max_iterations) {
|
||||||
|
return { terminate: true, reason: 'max_iterations_reached' };
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Convergence: no improvement in last 2 iterations
|
||||||
|
if (state.score_trend.length >= 3) {
|
||||||
|
const last3 = state.score_trend.slice(-3);
|
||||||
|
const improvement = last3[2] - last3[0];
|
||||||
|
if (improvement <= 2) {
|
||||||
|
state.converged = true;
|
||||||
|
return { terminate: true, reason: 'convergence_detected' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Error limit
|
||||||
|
if (state.error_count >= state.max_errors) {
|
||||||
|
return { terminate: true, reason: 'error_limit_reached' };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { terminate: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
const termination = shouldTerminate(state);
|
||||||
|
if (termination.terminate) {
|
||||||
|
state.termination_reason = termination.reason;
|
||||||
|
Write(`${state.work_dir}/iteration-state.json`, JSON.stringify(state, null, 2));
|
||||||
|
// Skip Phase 4, go directly to Phase 5 (Report)
|
||||||
|
} else {
|
||||||
|
// Continue to Phase 4 (Improve)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error | Recovery |
|
||||||
|
|-------|----------|
|
||||||
|
| CLI timeout | Retry once, if still fails use score 50 with warning |
|
||||||
|
| JSON parse failure | Extract score heuristically, save raw output |
|
||||||
|
| No output | Default score 50, note in weaknesses |
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Files**: `iteration-{N}-eval.md`
|
||||||
|
- **State**: `iterations[N-1].evaluation`, `latest_score`, `score_trend` updated
|
||||||
|
- **Decision**: terminate -> Phase 5, continue -> Phase 4
|
||||||
|
- **TodoWrite**: Update current iteration score display
|
||||||
186
.claude/skills/skill-iter-tune/phases/04-improve.md
Normal file
186
.claude/skills/skill-iter-tune/phases/04-improve.md
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# Phase 4: Apply Improvements
|
||||||
|
|
||||||
|
> **COMPACT SENTINEL [Phase 4: Improve]**
|
||||||
|
> This phase contains 4 execution steps (Step 4.1 -- 4.4).
|
||||||
|
> If you can read this sentinel but cannot find the full Step protocol below, context has been compressed.
|
||||||
|
> Recovery: `Read("phases/04-improve.md")`
|
||||||
|
|
||||||
|
Apply targeted improvements to skill files based on evaluation suggestions. Uses a general-purpose Agent to make changes, ensuring only suggested modifications are applied.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Read evaluation suggestions from current iteration
|
||||||
|
- Launch Agent to apply improvements in priority order
|
||||||
|
- Document all changes made
|
||||||
|
- Update iteration state
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
### Step 4.1: Prepare Improvement Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const N = state.current_iteration;
|
||||||
|
const iterDir = `${state.work_dir}/iterations/iteration-${N}`;
|
||||||
|
const evaluation = state.iterations[N - 1].evaluation;
|
||||||
|
|
||||||
|
// Verify we have suggestions to apply
|
||||||
|
if (!evaluation.suggestions || evaluation.suggestions.length === 0) {
|
||||||
|
// No suggestions -- skip improvement, mark iteration complete
|
||||||
|
state.iterations[N - 1].improvement = {
|
||||||
|
changes_applied: [],
|
||||||
|
changes_file: null,
|
||||||
|
improvement_rationale: 'No suggestions provided by evaluation'
|
||||||
|
};
|
||||||
|
state.iterations[N - 1].status = 'completed';
|
||||||
|
Write(`${state.work_dir}/iteration-state.json`, JSON.stringify(state, null, 2));
|
||||||
|
// -> Return to orchestrator for next iteration
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build file inventory for agent context
|
||||||
|
const skillFileInventory = state.target_skills.map(skill => {
|
||||||
|
return `Skill: ${skill.name} (${skill.path})\nFiles:\n` +
|
||||||
|
skill.files.map(f => ` - ${f}`).join('\n');
|
||||||
|
}).join('\n\n');
|
||||||
|
|
||||||
|
// Chain mode: add chain relationship context
|
||||||
|
const chainContext = state.execution_mode === 'chain'
|
||||||
|
? `\nChain Order: ${state.chain_order.join(' -> ')}\n` +
|
||||||
|
`Chain Scores: ${state.chain_order.map(s =>
|
||||||
|
`${s}: ${state.iterations[N-1].evaluation?.chain_scores?.[s] || 'N/A'}`
|
||||||
|
).join(', ')}\n` +
|
||||||
|
`Weakest Link: ${state.chain_order.reduce((min, s) => {
|
||||||
|
const score = state.iterations[N-1].evaluation?.chain_scores?.[s] || 100;
|
||||||
|
return score < (state.iterations[N-1].evaluation?.chain_scores?.[min] || 100) ? s : min;
|
||||||
|
}, state.chain_order[0])}`
|
||||||
|
: '';
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4.2: Launch Improvement Agent
|
||||||
|
|
||||||
|
> **CHECKPOINT**: Before launching agent, verify:
|
||||||
|
> 1. evaluation.suggestions is non-empty
|
||||||
|
> 2. All target_file paths in suggestions are valid
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const suggestionsText = evaluation.suggestions.map((s, i) =>
|
||||||
|
`${i + 1}. [${s.priority.toUpperCase()}] ${s.description}\n` +
|
||||||
|
` Target: ${s.target_file}\n` +
|
||||||
|
` Rationale: ${s.rationale}\n` +
|
||||||
|
(s.code_snippet ? ` Suggested change:\n ${s.code_snippet}\n` : '')
|
||||||
|
).join('\n');
|
||||||
|
|
||||||
|
Agent({
|
||||||
|
subagent_type: 'general-purpose',
|
||||||
|
run_in_background: false,
|
||||||
|
description: `Apply skill improvements iteration ${N}`,
|
||||||
|
prompt: `## Task: Apply Targeted Improvements to Skill Files
|
||||||
|
|
||||||
|
You are improving a workflow skill based on evaluation feedback. Apply ONLY the suggested changes -- do not refactor, add features, or "improve" beyond what is explicitly suggested.
|
||||||
|
|
||||||
|
## Current Score: ${evaluation.score}/100
|
||||||
|
Dimension breakdown:
|
||||||
|
${evaluation.dimensions.map(d => `- ${d.name}: ${d.score}/100`).join('\n')}
|
||||||
|
|
||||||
|
## Skill File Inventory
|
||||||
|
${skillFileInventory}
|
||||||
|
|
||||||
|
${chainContext ? `## Chain Context\n${chainContext}\n\nPrioritize improvements on the weakest skill in the chain. Also consider interface compatibility between adjacent skills in the chain.\n` : ''}
|
||||||
|
|
||||||
|
## Improvement Suggestions (apply in priority order)
|
||||||
|
${suggestionsText}
|
||||||
|
|
||||||
|
## Rules
|
||||||
|
1. Read each target file BEFORE modifying it
|
||||||
|
2. Apply ONLY the suggested changes -- no unsolicited modifications
|
||||||
|
3. If a suggestion's target_file doesn't exist, skip it and note in summary
|
||||||
|
4. If a suggestion conflicts with existing patterns, adapt it to fit (note adaptation)
|
||||||
|
5. Preserve existing code style, naming conventions, and structure
|
||||||
|
6. After all changes, write a change summary to: ${iterDir}/iteration-${N}-changes.md
|
||||||
|
|
||||||
|
## Changes Summary Format (write to ${iterDir}/iteration-${N}-changes.md)
|
||||||
|
|
||||||
|
# Iteration ${N} Changes
|
||||||
|
|
||||||
|
## Applied Suggestions
|
||||||
|
- [high] description: what was changed in which file
|
||||||
|
- [medium] description: what was changed in which file
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
- path/to/file.md: brief description of changes
|
||||||
|
|
||||||
|
## Skipped Suggestions (if any)
|
||||||
|
- description: reason for skipping
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
- Any adaptations or considerations
|
||||||
|
|
||||||
|
## Success Criteria
|
||||||
|
- All high-priority suggestions applied
|
||||||
|
- Medium-priority suggestions applied if feasible
|
||||||
|
- Low-priority suggestions applied if trivial
|
||||||
|
- Changes summary written to ${iterDir}/iteration-${N}-changes.md
|
||||||
|
`
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4.3: Verify Changes
|
||||||
|
|
||||||
|
After agent completes:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Verify changes summary was written
|
||||||
|
const changesFile = `${iterDir}/iteration-${N}-changes.md`;
|
||||||
|
const changesExist = Glob(changesFile).length > 0;
|
||||||
|
|
||||||
|
if (!changesExist) {
|
||||||
|
// Agent didn't write summary -- create a minimal one
|
||||||
|
Write(changesFile, `# Iteration ${N} Changes\n\n## Notes\nAgent completed but did not produce changes summary.\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read changes summary to extract applied changes
|
||||||
|
const changesContent = Read(changesFile);
|
||||||
|
|
||||||
|
// Parse applied changes (heuristic: count lines starting with "- [")
|
||||||
|
const appliedMatches = changesContent.match(/^- \[.+?\]/gm) || [];
|
||||||
|
const changes_applied = appliedMatches.map(m => ({
|
||||||
|
summary: m.replace(/^- /, ''),
|
||||||
|
file: '' // Extracted from context
|
||||||
|
}));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4.4: Update State
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
state.iterations[N - 1].improvement = {
|
||||||
|
changes_applied: changes_applied,
|
||||||
|
changes_file: changesFile,
|
||||||
|
improvement_rationale: `Applied ${changes_applied.length} improvements based on evaluation score ${evaluation.score}`
|
||||||
|
};
|
||||||
|
state.iterations[N - 1].status = 'completed';
|
||||||
|
state.updated_at = new Date().toISOString();
|
||||||
|
|
||||||
|
// Also update the skill files list in case new files were created
|
||||||
|
for (const skill of state.target_skills) {
|
||||||
|
skill.files = Glob(`${skill.path}/**/*.md`).map(f => f.replace(skill.path + '/', ''));
|
||||||
|
}
|
||||||
|
|
||||||
|
Write(`${state.work_dir}/iteration-state.json`, JSON.stringify(state, null, 2));
|
||||||
|
|
||||||
|
// -> Return to orchestrator for next iteration (Phase 2) or termination check
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error | Recovery |
|
||||||
|
|-------|----------|
|
||||||
|
| Agent fails to complete | Rollback from skill-snapshot: `cp -r "${iterDir}/skill-snapshot/${skill.name}/*" "${skill.path}/"` |
|
||||||
|
| Agent corrupts files | Same rollback from snapshot |
|
||||||
|
| Changes summary missing | Create minimal summary, continue |
|
||||||
|
| target_file not found | Agent skips suggestion, notes in summary |
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Files**: `iteration-{N}-changes.md`, modified skill files
|
||||||
|
- **State**: `iterations[N-1].improvement` and `.status` updated
|
||||||
|
- **Next**: Return to orchestrator, begin next iteration (Phase 2) or terminate
|
||||||
166
.claude/skills/skill-iter-tune/phases/05-report.md
Normal file
166
.claude/skills/skill-iter-tune/phases/05-report.md
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
# Phase 5: Final Report
|
||||||
|
|
||||||
|
> **COMPACT SENTINEL [Phase 5: Report]**
|
||||||
|
> This phase contains 4 execution steps (Step 5.1 -- 5.4).
|
||||||
|
> If you can read this sentinel but cannot find the full Step protocol below, context has been compressed.
|
||||||
|
> Recovery: `Read("phases/05-report.md")`
|
||||||
|
|
||||||
|
Generate comprehensive iteration history report and display results to user.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Read complete iteration state
|
||||||
|
- Generate formatted final report with score progression
|
||||||
|
- Write final-report.md
|
||||||
|
- Display summary to user
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
### Step 5.1: Read Complete State
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const state = JSON.parse(Read(`${state.work_dir}/iteration-state.json`));
|
||||||
|
state.status = 'completed';
|
||||||
|
state.updated_at = new Date().toISOString();
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5.2: Generate Report
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Determine outcome
|
||||||
|
const outcomeMap = {
|
||||||
|
quality_threshold_met: 'PASSED -- Quality threshold reached',
|
||||||
|
max_iterations_reached: 'MAX ITERATIONS -- Threshold not reached',
|
||||||
|
convergence_detected: 'CONVERGED -- Score stopped improving',
|
||||||
|
error_limit_reached: 'FAILED -- Too many errors'
|
||||||
|
};
|
||||||
|
const outcome = outcomeMap[state.termination_reason] || 'COMPLETED';
|
||||||
|
|
||||||
|
// Build score progression table
|
||||||
|
const scoreTable = state.iterations
|
||||||
|
.filter(i => i.evaluation)
|
||||||
|
.map(i => {
|
||||||
|
const dims = i.evaluation.dimensions || [];
|
||||||
|
const dimScores = ['clarity', 'completeness', 'correctness', 'effectiveness', 'efficiency']
|
||||||
|
.map(id => {
|
||||||
|
const dim = dims.find(d => d.id === id);
|
||||||
|
return dim ? dim.score : '-';
|
||||||
|
});
|
||||||
|
return `| ${i.round} | ${i.evaluation.score} | ${dimScores.join(' | ')} |`;
|
||||||
|
}).join('\n');
|
||||||
|
|
||||||
|
// Build iteration details
|
||||||
|
const iterationDetails = state.iterations.map(iter => {
|
||||||
|
const evalSection = iter.evaluation
|
||||||
|
? `**Score**: ${iter.evaluation.score}/100\n` +
|
||||||
|
`**Strengths**: ${iter.evaluation.strengths?.join(', ') || 'N/A'}\n` +
|
||||||
|
`**Weaknesses**: ${iter.evaluation.weaknesses?.slice(0, 3).join(', ') || 'N/A'}`
|
||||||
|
: '**Evaluation**: Skipped or failed';
|
||||||
|
|
||||||
|
const changesSection = iter.improvement
|
||||||
|
? `**Changes Applied**: ${iter.improvement.changes_applied?.length || 0}\n` +
|
||||||
|
(iter.improvement.changes_applied?.map(c => ` - ${c.summary}`).join('\n') || ' None')
|
||||||
|
: '**Improvements**: None';
|
||||||
|
|
||||||
|
return `### Iteration ${iter.round}\n${evalSection}\n${changesSection}`;
|
||||||
|
}).join('\n\n');
|
||||||
|
|
||||||
|
const report = `# Skill Iter Tune -- Final Report
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
| Field | Value |
|
||||||
|
|-------|-------|
|
||||||
|
| **Target Skills** | ${state.target_skills.map(s => s.name).join(', ')} |
|
||||||
|
| **Execution Mode** | ${state.execution_mode} |
|
||||||
|
${state.execution_mode === 'chain' ? `| **Chain Order** | ${state.chain_order.join(' -> ')} |` : ''}
|
||||||
|
| **Test Scenario** | ${state.test_scenario.description} |
|
||||||
|
| **Iterations** | ${state.iterations.length} |
|
||||||
|
| **Initial Score** | ${state.score_trend[0] || 'N/A'} |
|
||||||
|
| **Final Score** | ${state.latest_score}/100 |
|
||||||
|
| **Quality Threshold** | ${state.quality_threshold} |
|
||||||
|
| **Outcome** | ${outcome} |
|
||||||
|
| **Started** | ${state.started_at} |
|
||||||
|
| **Completed** | ${state.updated_at} |
|
||||||
|
|
||||||
|
## Score Progression
|
||||||
|
|
||||||
|
| Iter | Composite | Clarity | Completeness | Correctness | Effectiveness | Efficiency |
|
||||||
|
|------|-----------|---------|--------------|-------------|---------------|------------|
|
||||||
|
${scoreTable}
|
||||||
|
|
||||||
|
**Trend**: ${state.score_trend.join(' -> ')}
|
||||||
|
|
||||||
|
${state.execution_mode === 'chain' ? `
|
||||||
|
## Chain Score Progression
|
||||||
|
|
||||||
|
| Iter | ${state.chain_order.join(' | ')} |
|
||||||
|
|------|${state.chain_order.map(() => '------').join('|')}|
|
||||||
|
${state.iterations.filter(i => i.evaluation?.chain_scores).map(i => {
|
||||||
|
const scores = state.chain_order.map(s => i.evaluation.chain_scores[s] || '-');
|
||||||
|
return `| ${i.round} | ${scores.join(' | ')} |`;
|
||||||
|
}).join('\n')}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
|
## Iteration Details
|
||||||
|
|
||||||
|
${iterationDetails}
|
||||||
|
|
||||||
|
## Remaining Weaknesses
|
||||||
|
|
||||||
|
${state.iterations.length > 0 && state.iterations[state.iterations.length - 1].evaluation
|
||||||
|
? state.iterations[state.iterations.length - 1].evaluation.weaknesses?.map(w => `- ${w}`).join('\n') || 'None identified'
|
||||||
|
: 'No evaluation data available'}
|
||||||
|
|
||||||
|
## Artifact Locations
|
||||||
|
|
||||||
|
| Path | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| \`${state.work_dir}/iteration-state.json\` | Complete state history |
|
||||||
|
| \`${state.work_dir}/iterations/iteration-{N}/iteration-{N}-eval.md\` | Per-iteration evaluations |
|
||||||
|
| \`${state.work_dir}/iterations/iteration-{N}/iteration-{N}-changes.md\` | Per-iteration change logs |
|
||||||
|
| \`${state.work_dir}/final-report.md\` | This report |
|
||||||
|
| \`${state.backup_dir}/\` | Original skill backups |
|
||||||
|
|
||||||
|
## Restore Original
|
||||||
|
|
||||||
|
To revert all changes and restore the original skill files:
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
${state.target_skills.map(s => `cp -r "${state.backup_dir}/${s.name}"/* "${s.path}/"`).join('\n')}
|
||||||
|
\`\`\`
|
||||||
|
`;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5.3: Write Report and Update State
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Write(`${state.work_dir}/final-report.md`, report);
|
||||||
|
|
||||||
|
state.status = 'completed';
|
||||||
|
Write(`${state.work_dir}/iteration-state.json`, JSON.stringify(state, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5.4: Display Summary to User
|
||||||
|
|
||||||
|
Output to user:
|
||||||
|
|
||||||
|
```
|
||||||
|
Skill Iter Tune Complete!
|
||||||
|
|
||||||
|
Target: {skill names}
|
||||||
|
Iterations: {count}
|
||||||
|
Score: {initial} -> {final} ({outcome})
|
||||||
|
Threshold: {threshold}
|
||||||
|
|
||||||
|
Score trend: {score1} -> {score2} -> ... -> {scoreN}
|
||||||
|
|
||||||
|
Full report: {workDir}/final-report.md
|
||||||
|
Backups: {backupDir}/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Files**: `final-report.md`
|
||||||
|
- **State**: `status = completed`
|
||||||
|
- **Next**: Workflow complete. Return control to user.
|
||||||
63
.claude/skills/skill-iter-tune/specs/evaluation-criteria.md
Normal file
63
.claude/skills/skill-iter-tune/specs/evaluation-criteria.md
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# Evaluation Criteria
|
||||||
|
|
||||||
|
Skill 质量评估标准,由 Phase 03 (Evaluate) 引用。Gemini 按此标准对 skill 产出物进行多维度评分。
|
||||||
|
|
||||||
|
## Dimensions
|
||||||
|
|
||||||
|
| Dimension | Weight | ID | Description |
|
||||||
|
|-----------|--------|----|-------------|
|
||||||
|
| Clarity | 0.20 | clarity | 指令清晰无歧义,结构良好,易于遵循。Phase 文件有明确的 Step 划分、输入输出说明 |
|
||||||
|
| Completeness | 0.25 | completeness | 覆盖所有必要阶段、边界情况、错误处理。没有遗漏关键执行路径 |
|
||||||
|
| Correctness | 0.25 | correctness | 逻辑正确,数据流一致,Phase 间无矛盾。State schema 与实际使用匹配 |
|
||||||
|
| Effectiveness | 0.20 | effectiveness | 在给定测试场景下能产出高质量输出。产物满足用户需求和成功标准 |
|
||||||
|
| Efficiency | 0.10 | efficiency | 无冗余内容,上下文使用合理,不浪费 token。Phase 职责清晰无重叠 |
|
||||||
|
|
||||||
|
## Scoring Guide
|
||||||
|
|
||||||
|
| Range | Level | Description |
|
||||||
|
|-------|-------|-------------|
|
||||||
|
| 90-100 | Excellent | 生产级别,几乎无改进空间 |
|
||||||
|
| 80-89 | Good | 可投入使用,仅需微调 |
|
||||||
|
| 70-79 | Adequate | 功能可用,有明显可改进区域 |
|
||||||
|
| 60-69 | Needs Work | 存在影响产出质量的显著问题 |
|
||||||
|
| 0-59 | Poor | 结构或逻辑存在根本性问题 |
|
||||||
|
|
||||||
|
## Composite Score Calculation
|
||||||
|
|
||||||
|
```
|
||||||
|
composite = sum(dimension.score * dimension.weight)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output JSON Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"composite_score": 75,
|
||||||
|
"dimensions": [
|
||||||
|
{ "name": "Clarity", "id": "clarity", "score": 80, "weight": 0.20, "feedback": "..." },
|
||||||
|
{ "name": "Completeness", "id": "completeness", "score": 70, "weight": 0.25, "feedback": "..." },
|
||||||
|
{ "name": "Correctness", "id": "correctness", "score": 78, "weight": 0.25, "feedback": "..." },
|
||||||
|
{ "name": "Effectiveness", "id": "effectiveness", "score": 72, "weight": 0.20, "feedback": "..." },
|
||||||
|
{ "name": "Efficiency", "id": "efficiency", "score": 85, "weight": 0.10, "feedback": "..." }
|
||||||
|
],
|
||||||
|
"strengths": ["...", "...", "..."],
|
||||||
|
"weaknesses": ["...", "...", "..."],
|
||||||
|
"suggestions": [
|
||||||
|
{
|
||||||
|
"priority": "high",
|
||||||
|
"target_file": "phases/02-execute.md",
|
||||||
|
"description": "Add explicit error handling for CLI timeout",
|
||||||
|
"rationale": "Current phase has no recovery path when CLI execution exceeds timeout",
|
||||||
|
"code_snippet": "optional suggested replacement code"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Evaluation Focus by Iteration
|
||||||
|
|
||||||
|
| Iteration | Primary Focus |
|
||||||
|
|-----------|--------------|
|
||||||
|
| 1 | 全面评估,建立 baseline |
|
||||||
|
| 2-3 | 重点关注上一轮 weaknesses 是否改善,避免重复已解决的问题 |
|
||||||
|
| 4+ | 精细化改进,关注 Effectiveness 和 Efficiency |
|
||||||
134
.claude/skills/skill-iter-tune/templates/eval-prompt.md
Normal file
134
.claude/skills/skill-iter-tune/templates/eval-prompt.md
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
# Evaluation Prompt Template
|
||||||
|
|
||||||
|
Phase 03 使用此模板构造 ccw cli 提示词,让 Gemini 按多维度评估 skill 质量。
|
||||||
|
|
||||||
|
## Template
|
||||||
|
|
||||||
|
```
|
||||||
|
PURPOSE: Evaluate the quality of a workflow skill by examining both its definition files and the artifacts it produced when executed against a test scenario. Provide a structured multi-dimensional score with actionable improvement suggestions.
|
||||||
|
|
||||||
|
SKILL DEFINITION:
|
||||||
|
${skillContent}
|
||||||
|
|
||||||
|
TEST SCENARIO:
|
||||||
|
${testScenario.description}
|
||||||
|
Requirements: ${testScenario.requirements}
|
||||||
|
Success Criteria: ${testScenario.success_criteria}
|
||||||
|
|
||||||
|
ARTIFACTS PRODUCED:
|
||||||
|
${artifactsSummary}
|
||||||
|
|
||||||
|
EVALUATION CRITERIA:
|
||||||
|
${evaluationCriteria}
|
||||||
|
|
||||||
|
${previousEvalContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. Read all skill definition files and produced artifacts carefully
|
||||||
|
2. Score each dimension on 0-100 based on the evaluation criteria:
|
||||||
|
- Clarity (weight 0.20): Instructions unambiguous, well-structured, easy to follow
|
||||||
|
- Completeness (weight 0.25): All phases, edge cases, error handling covered
|
||||||
|
- Correctness (weight 0.25): Logic sound, data flow consistent, no contradictions
|
||||||
|
- Effectiveness (weight 0.20): Produces high-quality output for the test scenario
|
||||||
|
- Efficiency (weight 0.10): Minimal redundancy, appropriate context usage
|
||||||
|
3. Calculate weighted composite score
|
||||||
|
4. List top 3 strengths
|
||||||
|
5. List top 3-5 weaknesses with specific file:section references
|
||||||
|
6. Provide 3-5 prioritized improvement suggestions with concrete changes
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
|
||||||
|
EXPECTED OUTPUT FORMAT (strict JSON, no markdown wrapping):
|
||||||
|
{
|
||||||
|
"composite_score": <number 0-100>,
|
||||||
|
"dimensions": [
|
||||||
|
{ "name": "Clarity", "id": "clarity", "score": <0-100>, "weight": 0.20, "feedback": "<specific feedback>" },
|
||||||
|
{ "name": "Completeness", "id": "completeness", "score": <0-100>, "weight": 0.25, "feedback": "<specific feedback>" },
|
||||||
|
{ "name": "Correctness", "id": "correctness", "score": <0-100>, "weight": 0.25, "feedback": "<specific feedback>" },
|
||||||
|
{ "name": "Effectiveness", "id": "effectiveness", "score": <0-100>, "weight": 0.20, "feedback": "<specific feedback>" },
|
||||||
|
{ "name": "Efficiency", "id": "efficiency", "score": <0-100>, "weight": 0.10, "feedback": "<specific feedback>" }
|
||||||
|
],
|
||||||
|
"strengths": ["<strength 1>", "<strength 2>", "<strength 3>"],
|
||||||
|
"weaknesses": ["<weakness 1 with file:section reference>", "..."],
|
||||||
|
"suggestions": [
|
||||||
|
{
|
||||||
|
"priority": "high|medium|low",
|
||||||
|
"target_file": "<relative path to skill file>",
|
||||||
|
"description": "<what to change>",
|
||||||
|
"rationale": "<why this improves quality>",
|
||||||
|
"code_snippet": "<optional: suggested replacement content>"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"chain_scores": {
|
||||||
|
"<skill_name>": "<number 0-100, per-skill score — only present in chain mode>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CONSTRAINTS:
|
||||||
|
- Be rigorous and specific — reference exact file paths and sections
|
||||||
|
- Each suggestion MUST include a target_file that maps to a skill file
|
||||||
|
- Focus suggestions on highest-impact changes first
|
||||||
|
- Do NOT suggest changes already addressed in previous iterations
|
||||||
|
- Output ONLY the JSON object, no surrounding text or markdown
|
||||||
|
```
|
||||||
|
|
||||||
|
## Variable Substitution
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `${skillContent}` | Same as execute-prompt.md | 完整 skill 文件内容 |
|
||||||
|
| `${testScenario.*}` | iteration-state.json | 测试场景信息 |
|
||||||
|
| `${artifactsSummary}` | Phase 03 reads artifacts/ dir | 产出物文件列表 + 内容摘要 |
|
||||||
|
| `${evaluationCriteria}` | specs/evaluation-criteria.md | 评分标准全文 |
|
||||||
|
| `${previousEvalContext}` | 历史迭代记录 | 前几轮评估摘要(避免重复建议) |
|
||||||
|
| `${chainContext}` | Phase 03 constructs | chain 模式下的链上下文信息 |
|
||||||
|
|
||||||
|
## previousEvalContext Construction
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Build context from prior iterations to avoid repeating suggestions
|
||||||
|
const previousEvalContext = state.iterations.length > 0
|
||||||
|
? `PREVIOUS ITERATIONS (context for avoiding duplicate suggestions):
|
||||||
|
${state.iterations.map(iter => `
|
||||||
|
Iteration ${iter.round}: Score ${iter.evaluation?.score || 'N/A'}
|
||||||
|
Applied changes: ${iter.improvement?.changes_applied?.map(c => c.summary).join('; ') || 'none'}
|
||||||
|
Remaining weaknesses: ${iter.evaluation?.weaknesses?.slice(0, 3).join('; ') || 'none'}
|
||||||
|
`).join('')}
|
||||||
|
IMPORTANT: Focus on NEW issues or issues NOT adequately addressed in previous improvements.`
|
||||||
|
: '';
|
||||||
|
```
|
||||||
|
|
||||||
|
## chainContext Construction
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Build chain context for evaluation (chain mode only)
|
||||||
|
const chainContext = state.execution_mode === 'chain'
|
||||||
|
? `CHAIN CONTEXT:
|
||||||
|
This skill chain contains ${state.chain_order.length} skills executed in order:
|
||||||
|
${state.chain_order.map((s, i) => `${i+1}. ${s}`).join('\n')}
|
||||||
|
Current evaluation covers the entire chain output.
|
||||||
|
Please provide per-skill quality scores in an additional "chain_scores" field.`
|
||||||
|
: '';
|
||||||
|
```
|
||||||
|
|
||||||
|
## artifactsSummary Construction
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Read manifest.json if available, otherwise list files
|
||||||
|
const manifestPath = `${iterDir}/artifacts/manifest.json`;
|
||||||
|
let artifactsSummary;
|
||||||
|
|
||||||
|
if (fileExists(manifestPath)) {
|
||||||
|
const manifest = JSON.parse(Read(manifestPath));
|
||||||
|
artifactsSummary = manifest.artifacts.map(a =>
|
||||||
|
`- ${a.path}: ${a.description} (Phase ${a.phase})`
|
||||||
|
).join('\n');
|
||||||
|
} else {
|
||||||
|
// Fallback: list all files with first 200 lines each
|
||||||
|
const files = Glob(`${iterDir}/artifacts/**/*`);
|
||||||
|
artifactsSummary = files.map(f => {
|
||||||
|
const content = Read(f, { limit: 200 });
|
||||||
|
return `--- ${f.replace(iterDir + '/artifacts/', '')} ---\n${content}`;
|
||||||
|
}).join('\n\n');
|
||||||
|
}
|
||||||
|
```
|
||||||
97
.claude/skills/skill-iter-tune/templates/execute-prompt.md
Normal file
97
.claude/skills/skill-iter-tune/templates/execute-prompt.md
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# Execute Prompt Template
|
||||||
|
|
||||||
|
Phase 02 使用此模板构造 ccw cli 提示词,让 Claude 模拟执行 skill 并产出所有预期产物。
|
||||||
|
|
||||||
|
## Template
|
||||||
|
|
||||||
|
```
|
||||||
|
PURPOSE: Simulate executing the following workflow skill against a test scenario. Produce all expected output artifacts as if the skill were invoked with the given input. This is for evaluating skill quality.
|
||||||
|
|
||||||
|
SKILL CONTENT:
|
||||||
|
${skillContent}
|
||||||
|
|
||||||
|
TEST SCENARIO:
|
||||||
|
Description: ${testScenario.description}
|
||||||
|
Input Arguments: ${testScenario.input_args}
|
||||||
|
Requirements: ${testScenario.requirements}
|
||||||
|
Success Criteria: ${testScenario.success_criteria}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. Study the complete skill structure (SKILL.md + all phase files)
|
||||||
|
2. Follow the skill's execution flow sequentially (Phase 1 → Phase N)
|
||||||
|
3. For each phase, produce the artifacts that phase would generate
|
||||||
|
4. Write all output artifacts to the current working directory
|
||||||
|
5. Create a manifest.json listing all produced artifacts with descriptions
|
||||||
|
|
||||||
|
MODE: write
|
||||||
|
|
||||||
|
CONTEXT: @**/*
|
||||||
|
|
||||||
|
EXPECTED:
|
||||||
|
- All artifacts the skill would produce for this test scenario
|
||||||
|
- Each artifact in its correct relative path
|
||||||
|
- A manifest.json at root: { "artifacts": [{ "path": "...", "description": "...", "phase": N }] }
|
||||||
|
|
||||||
|
CONSTRAINTS:
|
||||||
|
- Follow the skill execution flow exactly — do not skip or reorder phases
|
||||||
|
- Produce realistic, high-quality output (not placeholder content)
|
||||||
|
- If the skill requires user interaction (AskUserQuestion), use reasonable defaults
|
||||||
|
- If the skill invokes external tools/CLIs, document what would be called but produce expected output directly
|
||||||
|
```
|
||||||
|
|
||||||
|
## Variable Substitution
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `${skillContent}` | Phase 02 reads all skill files | 完整 SKILL.md + phase 文件内容,用 markdown headers 分隔 |
|
||||||
|
| `${testScenario.description}` | iteration-state.json | 用户描述的测试场景 |
|
||||||
|
| `${testScenario.input_args}` | iteration-state.json | 模拟传给 skill 的参数 |
|
||||||
|
| `${testScenario.requirements}` | iteration-state.json | 质量要求列表 |
|
||||||
|
| `${testScenario.success_criteria}` | iteration-state.json | 成功标准定义 |
|
||||||
|
|
||||||
|
## Chain Mode Extension
|
||||||
|
|
||||||
|
When running in chain mode, the template is invoked once per skill in `chain_order`. Each invocation includes:
|
||||||
|
|
||||||
|
### Additional Variable
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `${previousChainOutput}` | Phase 02 chain loop | 前序 skill 的 artifacts 摘要 (chain 模式下非首个 skill) |
|
||||||
|
|
||||||
|
### Chain Prompt Modification
|
||||||
|
|
||||||
|
When `execution_mode === 'chain'`, the prompt includes:
|
||||||
|
|
||||||
|
```
|
||||||
|
PREVIOUS CHAIN OUTPUT (from upstream skill "${previousSkillName}"):
|
||||||
|
${previousChainOutput}
|
||||||
|
|
||||||
|
IMPORTANT: Use the above output as input context for this skill's execution.
|
||||||
|
```
|
||||||
|
|
||||||
|
This section is only added for skills at position 2+ in the chain. The first skill in the chain receives no upstream context.
|
||||||
|
|
||||||
|
## skillContent Construction
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Read only executable skill files and format with consistent headers
|
||||||
|
const skillMd = Read(`${skill.path}/SKILL.md`);
|
||||||
|
const phaseFiles = Glob(`${skill.path}/phases/*.md`).map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
const specFiles = Glob(`${skill.path}/specs/*.md`).map(f => ({
|
||||||
|
relativePath: f.replace(skill.path + '/', ''),
|
||||||
|
content: Read(f)
|
||||||
|
}));
|
||||||
|
|
||||||
|
const skillContent = `
|
||||||
|
### File: SKILL.md
|
||||||
|
${skillMd}
|
||||||
|
|
||||||
|
${phaseFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n')}
|
||||||
|
|
||||||
|
${specFiles.length > 0 ? specFiles.map(f => `### File: ${f.relativePath}\n${f.content}`).join('\n\n') : ''}
|
||||||
|
`.trim();
|
||||||
|
```
|
||||||
@@ -66,7 +66,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-arch-opt/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-arch-opt/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: arch-opt
|
team_name: arch-opt
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ Find ready tasks, spawn workers, STOP.
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-arch-opt/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-arch-opt/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: arch-opt
|
team_name: arch-opt
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
"team_name": "arch-opt",
|
"team_name": "arch-opt",
|
||||||
"team_display_name": "Architecture Optimization",
|
"team_display_name": "Architecture Optimization",
|
||||||
"skill_name": "team-arch-opt",
|
"skill_name": "team-arch-opt",
|
||||||
"skill_path": ".claude/skills/team-arch-opt/",
|
"skill_path": "~ or <project>/.claude/skills/team-arch-opt/",
|
||||||
"pipeline_type": "Linear with Review-Fix Cycle (Parallel-Capable)",
|
"pipeline_type": "Linear with Review-Fix Cycle (Parallel-Capable)",
|
||||||
"completion_action": "interactive",
|
"completion_action": "interactive",
|
||||||
"has_inline_discuss": true,
|
"has_inline_discuss": true,
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-brainstorm/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-brainstorm/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: brainstorm
|
team_name: brainstorm
|
||||||
@@ -89,7 +89,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: ideator
|
role: ideator
|
||||||
role_spec: .claude/skills/team-brainstorm/roles/ideator/role.md
|
role_spec: ~ or <project>/.claude/skills/team-brainstorm/roles/ideator/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: brainstorm
|
team_name: brainstorm
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ Find ready tasks, spawn workers, STOP.
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-brainstorm/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-brainstorm/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: brainstorm
|
team_name: brainstorm
|
||||||
|
|||||||
@@ -32,6 +32,18 @@ Universal team coordination skill: analyze task -> generate role-specs -> dispat
|
|||||||
ccw cli --mode write - code generation and modification
|
ccw cli --mode write - code generation and modification
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Shared Constants
|
||||||
|
|
||||||
|
| Constant | Value |
|
||||||
|
|----------|-------|
|
||||||
|
| Session prefix | `TC` |
|
||||||
|
| Session path | `.workflow/.team/TC-<slug>-<date>/` |
|
||||||
|
| Worker agent | `team-worker` |
|
||||||
|
| Message bus | `mcp__ccw-tools__team_msg(session_id=<session-id>, ...)` |
|
||||||
|
| CLI analysis | `ccw cli --mode analysis` |
|
||||||
|
| CLI write | `ccw cli --mode write` |
|
||||||
|
| Max roles | 5 |
|
||||||
|
|
||||||
## Role Router
|
## Role Router
|
||||||
|
|
||||||
This skill is **coordinator-only**. Workers do NOT invoke this skill -- they are spawned as `team-worker` agents directly.
|
This skill is **coordinator-only**. Workers do NOT invoke this skill -- they are spawned as `team-worker` agents directly.
|
||||||
@@ -85,6 +97,9 @@ User provides task description
|
|||||||
|---------|--------|
|
|---------|--------|
|
||||||
| `check` / `status` | Output execution status graph, no advancement |
|
| `check` / `status` | Output execution status graph, no advancement |
|
||||||
| `resume` / `continue` | Check worker states, advance next step |
|
| `resume` / `continue` | Check worker states, advance next step |
|
||||||
|
| `revise <TASK-ID> [feedback]` | Revise specific task with optional feedback |
|
||||||
|
| `feedback <text>` | Inject feedback into active pipeline |
|
||||||
|
| `improve [dimension]` | Auto-improve weakest quality dimension |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -150,6 +165,17 @@ AskUserQuestion({
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Specs Reference
|
||||||
|
|
||||||
|
| Spec | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| [specs/pipelines.md](specs/pipelines.md) | Dynamic pipeline model, task naming, dependency graph |
|
||||||
|
| [specs/role-spec-template.md](specs/role-spec-template.md) | Template for dynamic role-spec generation |
|
||||||
|
| [specs/quality-gates.md](specs/quality-gates.md) | Quality thresholds and scoring dimensions |
|
||||||
|
| [specs/knowledge-transfer.md](specs/knowledge-transfer.md) | Context transfer protocols between roles |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Session Directory
|
## Session Directory
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -16,6 +16,20 @@ Parse user task description -> detect required capabilities -> build dependency
|
|||||||
|
|
||||||
If task context requires codebase knowledge, set `needs_research: true`. Phase 2 will spawn researcher worker.
|
If task context requires codebase knowledge, set `needs_research: true`. Phase 2 will spawn researcher worker.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
| Trigger | Condition |
|
||||||
|
|---------|-----------|
|
||||||
|
| New task | Coordinator Phase 1 receives task description |
|
||||||
|
| Re-analysis | User provides revised requirements |
|
||||||
|
| Adapt | handleAdapt extends analysis for new capability |
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
- **Delegation**: Inline execution (coordinator processes directly)
|
||||||
|
- **Mode**: Text-level analysis only (no codebase reading)
|
||||||
|
- **Output**: `<session>/task-analysis.json`
|
||||||
|
|
||||||
## Phase 2: Context Loading
|
## Phase 2: Context Loading
|
||||||
|
|
||||||
| Input | Source | Required |
|
| Input | Source | Required |
|
||||||
|
|||||||
@@ -4,6 +4,20 @@
|
|||||||
|
|
||||||
Create task chains from dynamic dependency graphs. Builds pipelines from the task-analysis.json produced by Phase 1. Workers are spawned as team-worker agents with role-spec paths.
|
Create task chains from dynamic dependency graphs. Builds pipelines from the task-analysis.json produced by Phase 1. Workers are spawned as team-worker agents with role-spec paths.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
| Trigger | Condition |
|
||||||
|
|---------|-----------|
|
||||||
|
| After analysis | Phase 1 complete, task-analysis.json exists |
|
||||||
|
| After adapt | handleAdapt created new roles, needs new tasks |
|
||||||
|
| Re-dispatch | Pipeline restructuring (rare) |
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
- **Delegation**: Inline execution (coordinator processes directly)
|
||||||
|
- **Inputs**: task-analysis.json + team-session.json
|
||||||
|
- **Output**: TaskCreate calls with dependency chains
|
||||||
|
|
||||||
## Phase 2: Context Loading
|
## Phase 2: Context Loading
|
||||||
|
|
||||||
| Input | Source | Required |
|
| Input | Source | Required |
|
||||||
|
|||||||
@@ -4,6 +4,22 @@
|
|||||||
|
|
||||||
Event-driven pipeline coordination with Spawn-and-Stop pattern. Role names are read from `team-session.json#roles`. Workers are spawned as `team-worker` agents with role-spec paths. Includes `handleComplete` for pipeline completion action and `handleAdapt` for mid-pipeline capability gap handling.
|
Event-driven pipeline coordination with Spawn-and-Stop pattern. Role names are read from `team-session.json#roles`. Workers are spawned as `team-worker` agents with role-spec paths. Includes `handleComplete` for pipeline completion action and `handleAdapt` for mid-pipeline capability gap handling.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
| Trigger | Condition |
|
||||||
|
|---------|-----------|
|
||||||
|
| Worker callback | Message contains [role-name] from session roles |
|
||||||
|
| User command | "check", "status", "resume", "continue" |
|
||||||
|
| Capability gap | Worker reports capability_gap |
|
||||||
|
| Pipeline spawn | After dispatch, initial spawn needed |
|
||||||
|
| Pipeline complete | All tasks done |
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
- **Delegation**: Inline execution with handler routing
|
||||||
|
- **Beat model**: ONE_STEP_PER_INVOCATION — one handler then STOP
|
||||||
|
- **Workers**: Spawned as team-worker via Agent() in background
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
| Constant | Value | Description |
|
| Constant | Value | Description |
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
|
---
|
||||||
|
role: coordinator
|
||||||
|
---
|
||||||
|
|
||||||
# Coordinator Role
|
# Coordinator Role
|
||||||
|
|
||||||
Orchestrate the team-coordinate workflow: task analysis, dynamic role-spec generation, task dispatching, progress monitoring, session state, and completion action. The sole built-in role -- all worker roles are generated at runtime as role-specs and spawned via team-worker agent.
|
Orchestrate the team-coordinate workflow: task analysis, dynamic role-spec generation, task dispatching, progress monitoring, session state, and completion action. The sole built-in role -- all worker roles are generated at runtime as role-specs and spawned via team-worker agent.
|
||||||
@@ -33,6 +37,30 @@ Orchestrate the team-coordinate workflow: task analysis, dynamic role-spec gener
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Trigger |
|
||||||
|
|------|-----------|---------|
|
||||||
|
| state_update | outbound | Session init, pipeline progress |
|
||||||
|
| task_unblocked | outbound | Task ready for execution |
|
||||||
|
| fast_advance | inbound | Worker skipped coordinator |
|
||||||
|
| capability_gap | inbound | Worker needs new capability |
|
||||||
|
| error | inbound | Worker failure |
|
||||||
|
| impl_complete | inbound | Worker task done |
|
||||||
|
| consensus_blocked | inbound | Discussion verdict conflict |
|
||||||
|
|
||||||
|
## Message Bus Protocol
|
||||||
|
|
||||||
|
All coordinator state changes MUST be logged to team_msg BEFORE SendMessage:
|
||||||
|
|
||||||
|
1. `team_msg(operation="log", ...)` — log the event
|
||||||
|
2. `SendMessage(...)` — communicate to worker/user
|
||||||
|
3. `TaskUpdate(...)` — update task state
|
||||||
|
|
||||||
|
Read state before every handler: `team_msg(operation="get_state", session_id=<session-id>)`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Command Execution Protocol
|
## Command Execution Protocol
|
||||||
|
|
||||||
When coordinator needs to execute a command (analyze-task, dispatch, monitor):
|
When coordinator needs to execute a command (analyze-task, dispatch, monitor):
|
||||||
@@ -52,6 +80,20 @@ Phase 1 needs task analysis
|
|||||||
-> Continue to Phase 2
|
-> Continue to Phase 2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
| Tool | Type | Purpose |
|
||||||
|
|------|------|---------|
|
||||||
|
| commands/analyze-task.md | Command | Task analysis and role design |
|
||||||
|
| commands/dispatch.md | Command | Task chain creation |
|
||||||
|
| commands/monitor.md | Command | Pipeline monitoring and handlers |
|
||||||
|
| team-worker | Subagent | Worker spawning |
|
||||||
|
| TeamCreate / TeamDelete | System | Team lifecycle |
|
||||||
|
| TaskCreate / TaskList / TaskGet / TaskUpdate | System | Task lifecycle |
|
||||||
|
| team_msg | System | Message bus operations |
|
||||||
|
| SendMessage | System | Inter-agent communication |
|
||||||
|
| AskUserQuestion | System | User interaction |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Entry Router
|
## Entry Router
|
||||||
|
|||||||
111
.claude/skills/team-coordinate/specs/knowledge-transfer.md
Normal file
111
.claude/skills/team-coordinate/specs/knowledge-transfer.md
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
# Knowledge Transfer Protocols
|
||||||
|
|
||||||
|
## 1. Transfer Channels
|
||||||
|
|
||||||
|
| Channel | Scope | Mechanism | When to Use |
|
||||||
|
|---------|-------|-----------|-------------|
|
||||||
|
| **Artifacts** | Producer -> Consumer | Write to `<session>/artifacts/<name>.md`, consumer reads in Phase 2 | Structured deliverables (reports, plans, specs) |
|
||||||
|
| **State Updates** | Cross-role | `team_msg(operation="log", type="state_update", data={...})` / `team_msg(operation="get_state", session_id=<session-id>)` | Key findings, decisions, metadata (small, structured data) |
|
||||||
|
| **Wisdom** | Cross-task | Append to `<session>/wisdom/{learnings,decisions,conventions,issues}.md` | Patterns, conventions, risks discovered during execution |
|
||||||
|
| **Context Accumulator** | Intra-role (inner loop) | In-memory array, passed to each subsequent task in same-prefix loop | Prior task summaries within same role's inner loop |
|
||||||
|
| **Exploration Cache** | Cross-role | `<session>/explorations/cache-index.json` + per-angle JSON | Codebase discovery results, prevents duplicate exploration |
|
||||||
|
|
||||||
|
## 2. Context Loading Protocol (Phase 2)
|
||||||
|
|
||||||
|
Every role MUST load context in this order before starting work.
|
||||||
|
|
||||||
|
| Step | Action | Required |
|
||||||
|
|------|--------|----------|
|
||||||
|
| 1 | Extract session path from task description | Yes |
|
||||||
|
| 2 | `team_msg(operation="get_state", session_id=<session-id>)` | Yes |
|
||||||
|
| 3 | Read artifact files from upstream state's `ref` paths | Yes |
|
||||||
|
| 4 | Read `<session>/wisdom/*.md` if exists | Yes |
|
||||||
|
| 5 | Check `<session>/explorations/cache-index.json` before new exploration | If exploring |
|
||||||
|
| 6 | For inner_loop roles: load context_accumulator from prior tasks | If inner_loop |
|
||||||
|
|
||||||
|
**Loading rules**:
|
||||||
|
- Never skip step 2 -- state contains key decisions and findings
|
||||||
|
- If `ref` path in state does not exist, log warning and continue
|
||||||
|
- Wisdom files are append-only -- read all entries, newest last
|
||||||
|
|
||||||
|
## 3. Context Publishing Protocol (Phase 4)
|
||||||
|
|
||||||
|
| Step | Action | Required |
|
||||||
|
|------|--------|----------|
|
||||||
|
| 1 | Write deliverable to `<session>/artifacts/<task-id>-<name>.md` | Yes |
|
||||||
|
| 2 | Send `team_msg(type="state_update")` with payload (see schema below) | Yes |
|
||||||
|
| 3 | Append wisdom entries for learnings, decisions, issues found | If applicable |
|
||||||
|
|
||||||
|
## 4. State Update Schema
|
||||||
|
|
||||||
|
Sent via `team_msg(type="state_update")` on task completion.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"status": "task_complete",
|
||||||
|
"task_id": "<TASK-NNN>",
|
||||||
|
"ref": "<session>/artifacts/<filename>",
|
||||||
|
"key_findings": [
|
||||||
|
"Finding 1",
|
||||||
|
"Finding 2"
|
||||||
|
],
|
||||||
|
"decisions": [
|
||||||
|
"Decision with rationale"
|
||||||
|
],
|
||||||
|
"files_modified": [
|
||||||
|
"path/to/file.ts"
|
||||||
|
],
|
||||||
|
"verification": "self-validated | peer-reviewed | tested"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Field rules**:
|
||||||
|
- `ref`: Always an artifact path, never inline content
|
||||||
|
- `key_findings`: Max 5 items, each under 100 chars
|
||||||
|
- `decisions`: Include rationale, not just the choice
|
||||||
|
- `files_modified`: Only for implementation tasks
|
||||||
|
- `verification`: One of `self-validated`, `peer-reviewed`, `tested`
|
||||||
|
|
||||||
|
**Write state** (namespaced by role):
|
||||||
|
```
|
||||||
|
team_msg(operation="log", session_id=<session-id>, from=<role>, type="state_update", data={
|
||||||
|
"<role_name>": { "key_findings": [...], "scope": "..." }
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Read state**:
|
||||||
|
```
|
||||||
|
team_msg(operation="get_state", session_id=<session-id>)
|
||||||
|
// Returns merged state from all state_update messages
|
||||||
|
```
|
||||||
|
|
||||||
|
## 5. Exploration Cache Protocol
|
||||||
|
|
||||||
|
Prevents redundant research across tasks and discussion rounds.
|
||||||
|
|
||||||
|
| Step | Action |
|
||||||
|
|------|--------|
|
||||||
|
| 1 | Read `<session>/explorations/cache-index.json` |
|
||||||
|
| 2 | If angle already explored, read cached result from `explore-<angle>.json` |
|
||||||
|
| 3 | If not cached, perform exploration |
|
||||||
|
| 4 | Write result to `<session>/explorations/explore-<angle>.json` |
|
||||||
|
| 5 | Update `cache-index.json` with new entry |
|
||||||
|
|
||||||
|
**cache-index.json format**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"entries": [
|
||||||
|
{
|
||||||
|
"angle": "competitor-analysis",
|
||||||
|
"file": "explore-competitor-analysis.json",
|
||||||
|
"created_by": "RESEARCH-001",
|
||||||
|
"timestamp": "2026-01-15T10:30:00Z"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rules**:
|
||||||
|
- Cache key is the exploration `angle` (normalized to kebab-case)
|
||||||
|
- Cache entries never expire within a session
|
||||||
|
- Any role can read cached explorations; only the creator updates them
|
||||||
@@ -81,3 +81,17 @@ message_types:
|
|||||||
## Specs Reference
|
## Specs Reference
|
||||||
|
|
||||||
- [role-spec-template.md](role-spec-template.md) — Template for generating dynamic role-specs
|
- [role-spec-template.md](role-spec-template.md) — Template for generating dynamic role-specs
|
||||||
|
- [quality-gates.md](quality-gates.md) — Quality thresholds and scoring dimensions
|
||||||
|
- [knowledge-transfer.md](knowledge-transfer.md) — Context transfer protocols between roles
|
||||||
|
|
||||||
|
## Quality Gate Integration
|
||||||
|
|
||||||
|
Dynamic pipelines reference quality thresholds from [specs/quality-gates.md](quality-gates.md).
|
||||||
|
|
||||||
|
| Gate Point | Trigger | Criteria Source |
|
||||||
|
|------------|---------|----------------|
|
||||||
|
| After artifact production | Producer role Phase 4 | Behavioral Traits in role-spec |
|
||||||
|
| After validation tasks | Tester/analyst completion | quality-gates.md thresholds |
|
||||||
|
| Pipeline completion | All tasks done | Aggregate scoring |
|
||||||
|
|
||||||
|
Issue classification: Error (blocks) > Warning (proceed with justification) > Info (log for future).
|
||||||
|
|||||||
112
.claude/skills/team-coordinate/specs/quality-gates.md
Normal file
112
.claude/skills/team-coordinate/specs/quality-gates.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Quality Gates
|
||||||
|
|
||||||
|
## 1. Quality Thresholds
|
||||||
|
|
||||||
|
| Result | Score | Action |
|
||||||
|
|--------|-------|--------|
|
||||||
|
| Pass | >= 80% | Report completed |
|
||||||
|
| Review | 60-79% | Report completed with warnings |
|
||||||
|
| Fail | < 60% | Retry Phase 3 (max 2 retries) |
|
||||||
|
|
||||||
|
## 2. Scoring Dimensions
|
||||||
|
|
||||||
|
| Dimension | Weight | Criteria |
|
||||||
|
|-----------|--------|----------|
|
||||||
|
| Completeness | 25% | All required outputs present with substantive content |
|
||||||
|
| Consistency | 25% | Terminology, formatting, cross-references are uniform |
|
||||||
|
| Accuracy | 25% | Outputs are factually correct and verifiable against sources |
|
||||||
|
| Depth | 25% | Sufficient detail for downstream consumers to act on deliverables |
|
||||||
|
|
||||||
|
**Score** = weighted average of all dimensions (0-100 per dimension).
|
||||||
|
|
||||||
|
## 3. Dynamic Role Quality Checks
|
||||||
|
|
||||||
|
Quality checks vary by `output_type` (from task-analysis.json role metadata).
|
||||||
|
|
||||||
|
### output_type: artifact
|
||||||
|
|
||||||
|
| Check | Pass Criteria |
|
||||||
|
|-------|---------------|
|
||||||
|
| Artifact exists | File written to `<session>/artifacts/` |
|
||||||
|
| Content non-empty | Substantive content, not just headers |
|
||||||
|
| Format correct | Expected format (MD, JSON) matches deliverable |
|
||||||
|
| Cross-references | All references to upstream artifacts resolve |
|
||||||
|
|
||||||
|
### output_type: codebase
|
||||||
|
|
||||||
|
| Check | Pass Criteria |
|
||||||
|
|-------|---------------|
|
||||||
|
| Files modified | Claimed files actually changed (Read to confirm) |
|
||||||
|
| Syntax valid | No syntax errors in modified files |
|
||||||
|
| No regressions | Existing functionality preserved |
|
||||||
|
| Summary artifact | Implementation summary written to artifacts/ |
|
||||||
|
|
||||||
|
### output_type: mixed
|
||||||
|
|
||||||
|
All checks from both `artifact` and `codebase` apply.
|
||||||
|
|
||||||
|
## 4. Verification Protocol
|
||||||
|
|
||||||
|
Derived from Behavioral Traits in [role-spec-template.md](role-spec-template.md).
|
||||||
|
|
||||||
|
| Step | Action | Required |
|
||||||
|
|------|--------|----------|
|
||||||
|
| 1 | Verify all claimed files exist via Read | Yes |
|
||||||
|
| 2 | Confirm artifact written to `<session>/artifacts/` | Yes |
|
||||||
|
| 3 | Check verification summary fields present | Yes |
|
||||||
|
| 4 | Score against quality dimensions | Yes |
|
||||||
|
| 5 | Apply threshold -> Pass/Review/Fail | Yes |
|
||||||
|
|
||||||
|
**On Fail**: Retry Phase 3 (max 2 retries). After 2 retries, report `partial_completion`.
|
||||||
|
|
||||||
|
**On Review**: Proceed with warnings logged to `<session>/wisdom/issues.md`.
|
||||||
|
|
||||||
|
## 5. Code Review Dimensions
|
||||||
|
|
||||||
|
For REVIEW-* or validation tasks during implementation pipelines.
|
||||||
|
|
||||||
|
### Quality
|
||||||
|
|
||||||
|
| Check | Severity |
|
||||||
|
|-------|----------|
|
||||||
|
| Empty catch blocks | Error |
|
||||||
|
| `as any` type casts | Warning |
|
||||||
|
| `@ts-ignore` / `@ts-expect-error` | Warning |
|
||||||
|
| `console.log` in production code | Warning |
|
||||||
|
| Unused imports/variables | Info |
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
| Check | Severity |
|
||||||
|
|-------|----------|
|
||||||
|
| Hardcoded secrets/credentials | Error |
|
||||||
|
| SQL injection vectors | Error |
|
||||||
|
| `eval()` or `Function()` usage | Error |
|
||||||
|
| `innerHTML` assignment | Warning |
|
||||||
|
| Missing input validation | Warning |
|
||||||
|
|
||||||
|
### Architecture
|
||||||
|
|
||||||
|
| Check | Severity |
|
||||||
|
|-------|----------|
|
||||||
|
| Circular dependencies | Error |
|
||||||
|
| Deep cross-boundary imports (3+ levels) | Warning |
|
||||||
|
| Files > 500 lines | Warning |
|
||||||
|
| Functions > 50 lines | Info |
|
||||||
|
|
||||||
|
### Requirements Coverage
|
||||||
|
|
||||||
|
| Check | Severity |
|
||||||
|
|-------|----------|
|
||||||
|
| Core functionality implemented | Error if missing |
|
||||||
|
| Acceptance criteria covered | Error if missing |
|
||||||
|
| Edge cases handled | Warning |
|
||||||
|
| Error states handled | Warning |
|
||||||
|
|
||||||
|
## 6. Issue Classification
|
||||||
|
|
||||||
|
| Class | Label | Action |
|
||||||
|
|-------|-------|--------|
|
||||||
|
| Error | Must fix | Blocks progression, must resolve before proceeding |
|
||||||
|
| Warning | Should fix | Should resolve, can proceed with justification |
|
||||||
|
| Info | Nice to have | Optional improvement, log for future |
|
||||||
@@ -46,6 +46,7 @@ message_types:
|
|||||||
| `prefix` | Yes | Task prefix to filter (e.g., RESEARCH, DRAFT, IMPL) |
|
| `prefix` | Yes | Task prefix to filter (e.g., RESEARCH, DRAFT, IMPL) |
|
||||||
| `inner_loop` | Yes | Whether team-worker loops through same-prefix tasks |
|
| `inner_loop` | Yes | Whether team-worker loops through same-prefix tasks |
|
||||||
| `CLI tools` | No | Array of CLI tool types this role may call |
|
| `CLI tools` | No | Array of CLI tool types this role may call |
|
||||||
|
| `output_tag` | Yes | Output tag for all messages, e.g., `[researcher]` |
|
||||||
| `message_types` | Yes | Message type mapping for team_msg |
|
| `message_types` | Yes | Message type mapping for team_msg |
|
||||||
| `message_types.success` | Yes | Type string for successful completion |
|
| `message_types.success` | Yes | Type string for successful completion |
|
||||||
| `message_types.error` | Yes | Type string for errors (usually "error") |
|
| `message_types.error` | Yes | Type string for errors (usually "error") |
|
||||||
@@ -63,6 +64,29 @@ message_types:
|
|||||||
| `<placeholder>` notation | Use angle brackets for variable substitution |
|
| `<placeholder>` notation | Use angle brackets for variable substitution |
|
||||||
| Reference CLI tools by name | team-worker resolves invocation from its delegation templates |
|
| Reference CLI tools by name | team-worker resolves invocation from its delegation templates |
|
||||||
|
|
||||||
|
## Generated Role-Spec Structure
|
||||||
|
|
||||||
|
Every generated role-spec MUST include these blocks:
|
||||||
|
|
||||||
|
### Identity Block (mandatory — first section of generated spec)
|
||||||
|
|
||||||
|
```
|
||||||
|
Tag: [<role_name>] | Prefix: <PREFIX>-*
|
||||||
|
Responsibility: <one-line from task analysis>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Boundaries Block (mandatory — after Identity)
|
||||||
|
|
||||||
|
```
|
||||||
|
### MUST
|
||||||
|
- <3-5 rules derived from task analysis>
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Execute work outside assigned prefix
|
||||||
|
- Modify artifacts from other roles
|
||||||
|
- Skip Phase 4 verification
|
||||||
|
```
|
||||||
|
|
||||||
## Behavioral Traits
|
## Behavioral Traits
|
||||||
|
|
||||||
All dynamically generated role-specs MUST embed these traits into Phase 4. Coordinator copies this section verbatim into every generated role-spec as a Phase 4 appendix.
|
All dynamically generated role-specs MUST embed these traits into Phase 4. Coordinator copies this section verbatim into every generated role-spec as a Phase 4 appendix.
|
||||||
@@ -93,6 +117,11 @@ Phase 4 must produce a verification summary with these fields:
|
|||||||
- Still fails → report `partial_completion` with details, NOT `completed`
|
- Still fails → report `partial_completion` with details, NOT `completed`
|
||||||
- Update shared state via `team_msg(operation="log", type="state_update", data={...})` after verification passes
|
- Update shared state via `team_msg(operation="log", type="state_update", data={...})` after verification passes
|
||||||
|
|
||||||
|
Quality thresholds from [specs/quality-gates.md](quality-gates.md):
|
||||||
|
- Pass >= 80%: report completed
|
||||||
|
- Review 60-79%: report completed with warnings
|
||||||
|
- Fail < 60%: retry Phase 3 (max 2)
|
||||||
|
|
||||||
### Error Protocol
|
### Error Protocol
|
||||||
|
|
||||||
- Primary approach fails → try alternative (different CLI tool / different tool)
|
- Primary approach fails → try alternative (different CLI tool / different tool)
|
||||||
@@ -139,48 +168,25 @@ Coordinator MAY reference these patterns when composing Phase 2-4 content for a
|
|||||||
|
|
||||||
## Knowledge Transfer Protocol
|
## Knowledge Transfer Protocol
|
||||||
|
|
||||||
How context flows between roles. Coordinator MUST reference this when composing Phase 2 of any role-spec.
|
Full protocol: [specs/knowledge-transfer.md](knowledge-transfer.md)
|
||||||
|
|
||||||
### Transfer Channels
|
Generated role-specs Phase 2 MUST declare which upstream sources to load.
|
||||||
|
Generated role-specs Phase 4 MUST include state update and artifact publishing.
|
||||||
|
|
||||||
| Channel | Scope | Mechanism | When to Use |
|
---
|
||||||
|---------|-------|-----------|-------------|
|
|
||||||
| **Artifacts** | Producer -> Consumer | Write to `<session>/artifacts/<name>.md`, consumer reads in Phase 2 | Structured deliverables (reports, plans, specs) |
|
|
||||||
| **State Updates** | Cross-role | `team_msg(operation="log", type="state_update", data={...})` / `team_msg(operation="get_state", session_id=<session-id>)` | Key findings, decisions, metadata (small, structured data) |
|
|
||||||
| **Wisdom** | Cross-task | Append to `<session>/wisdom/{learnings,decisions,conventions,issues}.md` | Patterns, conventions, risks discovered during execution |
|
|
||||||
| **context_accumulator** | Intra-role (inner loop) | In-memory array, passed to each subsequent task in same-prefix loop | Prior task summaries within same role's inner loop |
|
|
||||||
| **Exploration cache** | Cross-role | `<session>/explorations/cache-index.json` + per-angle JSON | Codebase discovery results, prevents duplicate exploration |
|
|
||||||
|
|
||||||
### Phase 2 Context Loading (role-spec must specify)
|
## Generated Role-Spec Validation
|
||||||
|
|
||||||
Every generated role-spec Phase 2 MUST declare which upstream sources to load:
|
Coordinator verifies before writing each role-spec:
|
||||||
|
|
||||||
```
|
| Check | Criteria |
|
||||||
1. Extract session path from task description
|
|-------|----------|
|
||||||
2. Read upstream artifacts: <list which artifacts from which upstream role>
|
| Frontmatter complete | All required fields present (role, prefix, inner_loop, output_tag, message_types, CLI tools) |
|
||||||
3. Read cross-role state via `team_msg(operation="get_state", session_id=<session-id>)`
|
| Identity block | Tag, prefix, responsibility defined |
|
||||||
4. Load wisdom files for accumulated knowledge
|
| Boundaries | MUST and MUST NOT rules present |
|
||||||
5. For inner_loop roles: load context_accumulator from prior tasks
|
| Phase 2 | Context loading sources specified |
|
||||||
6. Check exploration cache before running new explorations
|
| Phase 3 | Execution goal clear, not prescriptive about tools |
|
||||||
```
|
| Phase 4 | Behavioral Traits copied verbatim |
|
||||||
|
| Error Handling | Table with 3+ scenarios |
|
||||||
### State Update Convention
|
| Line count | Target ~80 lines (max 120) |
|
||||||
|
| No built-in overlap | No Phase 1/5, no message bus code, no consensus handling |
|
||||||
Cross-role state is managed via `team_msg` state updates instead of a separate file:
|
|
||||||
|
|
||||||
- **Write state**: `team_msg(operation="log", session_id=<session-id>, from=<role>, type="state_update", data={ "<role_name>": { ... } })`
|
|
||||||
- **Read state**: `team_msg(operation="get_state", session_id=<session-id>)`
|
|
||||||
- **Namespaced keys**: Each role writes under its own namespace key in `data`
|
|
||||||
- **Small data only**: Key findings, decision summaries, metadata. NOT full documents
|
|
||||||
- **State stored in**: `.msg/meta.json` (auto-managed by team_msg)
|
|
||||||
- **Example write**:
|
|
||||||
```
|
|
||||||
team_msg(operation="log", session_id="TC-auth-2026-03-03", from="researcher", type="state_update", data={
|
|
||||||
"researcher": { "key_findings": [...], "scope": "..." }
|
|
||||||
})
|
|
||||||
```
|
|
||||||
- **Example read**:
|
|
||||||
```
|
|
||||||
team_msg(operation="get_state", session_id="TC-auth-2026-03-03")
|
|
||||||
// Returns merged state from all state_update messages
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ Generate complete team skills following the team-lifecycle-v4 architecture: SKIL
|
|||||||
## Key Design Principles
|
## Key Design Principles
|
||||||
|
|
||||||
1. **v4 Architecture Compliance**: Generated skills follow team-lifecycle-v4 pattern — SKILL.md = pure router, beat model = coordinator-only, unified structure (roles/ + specs/ + templates/)
|
1. **v4 Architecture Compliance**: Generated skills follow team-lifecycle-v4 pattern — SKILL.md = pure router, beat model = coordinator-only, unified structure (roles/ + specs/ + templates/)
|
||||||
2. **Golden Sample Reference**: Uses `team-lifecycle-v4` as reference implementation at `.claude/skills/team-lifecycle-v4/`
|
2. **Golden Sample Reference**: Uses `team-lifecycle-v4` as reference implementation at `~ or <project>/.claude/skills/team-lifecycle-v4/`
|
||||||
3. **Intelligent Commands Distribution**: Auto-determines which roles need `commands/` (2+ commands) vs inline logic (1 command)
|
3. **Intelligent Commands Distribution**: Auto-determines which roles need `commands/` (2+ commands) vs inline logic (1 command)
|
||||||
4. **team-worker Compatibility**: Role.md files include correct YAML frontmatter for team-worker agent parsing
|
4. **team-worker Compatibility**: Role.md files include correct YAML frontmatter for team-worker agent parsing
|
||||||
|
|
||||||
@@ -76,7 +76,7 @@ Return:
|
|||||||
|
|
||||||
## Golden Sample
|
## Golden Sample
|
||||||
|
|
||||||
Generated skills follow the architecture of `.claude/skills/team-lifecycle-v4/`:
|
Generated skills follow the architecture of `~ or <project>/.claude/skills/team-lifecycle-v4/`:
|
||||||
|
|
||||||
```
|
```
|
||||||
.claude/skills/<skill-name>/
|
.claude/skills/<skill-name>/
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Generate all role files, specs, and templates based on `teamConfig` and the gene
|
|||||||
|
|
||||||
## Golden Sample Reference
|
## Golden Sample Reference
|
||||||
|
|
||||||
Read the golden sample at `.claude/skills/team-lifecycle-v4/` for each file type before generating. This ensures pattern fidelity.
|
Read the golden sample at `~ or <project>/.claude/skills/team-lifecycle-v4/` for each file type before generating. This ensures pattern fidelity.
|
||||||
|
|
||||||
## Step 3.1: Generate Coordinator
|
## Step 3.1: Generate Coordinator
|
||||||
|
|
||||||
@@ -305,7 +305,7 @@ For each additional spec in `teamConfig.specs` (beyond pipelines), generate doma
|
|||||||
|
|
||||||
For each template in `teamConfig.templates`:
|
For each template in `teamConfig.templates`:
|
||||||
|
|
||||||
1. Check if golden sample has matching template at `.claude/skills/team-lifecycle-v4/templates/`
|
1. Check if golden sample has matching template at `~ or <project>/.claude/skills/team-lifecycle-v4/templates/`
|
||||||
2. If exists: copy and adapt for new domain
|
2. If exists: copy and adapt for new domain
|
||||||
3. If not: generate domain-appropriate template structure
|
3. If not: generate domain-appropriate template structure
|
||||||
|
|
||||||
|
|||||||
@@ -193,7 +193,7 @@ Agent({
|
|||||||
name: "<role>",
|
name: "<role>",
|
||||||
team_name: "<team_name>",
|
team_name: "<team_name>",
|
||||||
prompt: `role: <role>
|
prompt: `role: <role>
|
||||||
role_spec: .claude/skills/team-edict/role-specs/<role>.md
|
role_spec: ~ or <project>/.claude/skills/team-edict/role-specs/<role>.md
|
||||||
session: <session_path>
|
session: <session_path>
|
||||||
session_id: <session_id>
|
session_id: <session_id>
|
||||||
team_name: <team_name>
|
team_name: <team_name>
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ team_msg(operation="log", session_id=<session_id>, from="xingbu",
|
|||||||
|
|
||||||
1. 读取当前任务(QA-* task description)
|
1. 读取当前任务(QA-* task description)
|
||||||
2. 读取 `<session_path>/plan/dispatch-plan.md` 获取验收标准
|
2. 读取 `<session_path>/plan/dispatch-plan.md` 获取验收标准
|
||||||
3. 读取 `.claude/skills/team-edict/specs/quality-gates.md` 获取质量门标准
|
3. 读取 `~ or <project>/.claude/skills/team-edict/specs/quality-gates.md` 获取质量门标准
|
||||||
4. 读取被测部门(通常为工部)的产出报告
|
4. 读取被测部门(通常为工部)的产出报告
|
||||||
|
|
||||||
## Phase 3: 质量审查
|
## Phase 3: 质量审查
|
||||||
|
|||||||
@@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Phase 0/1 启动时执行
|
// Phase 0/1 启动时执行
|
||||||
Read(".claude/skills/team-edict/specs/team-config.json") // 加载路由规则和artifact路径
|
Read("~ or <project>/.claude/skills/team-edict/specs/team-config.json") // 加载路由规则和artifact路径
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -106,7 +106,7 @@ Read(".claude/skills/team-edict/specs/team-config.json") // 加载路由规则
|
|||||||
name: "zhongshu",
|
name: "zhongshu",
|
||||||
team_name: <team_name>,
|
team_name: <team_name>,
|
||||||
prompt: `role: zhongshu
|
prompt: `role: zhongshu
|
||||||
role_spec: .claude/skills/team-edict/role-specs/zhongshu.md
|
role_spec: ~ or <project>/.claude/skills/team-edict/role-specs/zhongshu.md
|
||||||
session: <session_path>
|
session: <session_path>
|
||||||
session_id: <session_id>
|
session_id: <session_id>
|
||||||
team_name: <team_name>
|
team_name: <team_name>
|
||||||
@@ -138,7 +138,7 @@ inner_loop: false`,
|
|||||||
name: "menxia",
|
name: "menxia",
|
||||||
team_name: <team_name>,
|
team_name: <team_name>,
|
||||||
prompt: `role: menxia
|
prompt: `role: menxia
|
||||||
role_spec: .claude/skills/team-edict/role-specs/menxia.md
|
role_spec: ~ or <project>/.claude/skills/team-edict/role-specs/menxia.md
|
||||||
session: <session_path>
|
session: <session_path>
|
||||||
session_id: <session_id>
|
session_id: <session_id>
|
||||||
team_name: <team_name>
|
team_name: <team_name>
|
||||||
@@ -177,7 +177,7 @@ inner_loop: false`,
|
|||||||
name: "shangshu",
|
name: "shangshu",
|
||||||
team_name: <team_name>,
|
team_name: <team_name>,
|
||||||
prompt: `role: shangshu
|
prompt: `role: shangshu
|
||||||
role_spec: .claude/skills/team-edict/role-specs/shangshu.md
|
role_spec: ~ or <project>/.claude/skills/team-edict/role-specs/shangshu.md
|
||||||
session: <session_path>
|
session: <session_path>
|
||||||
session_id: <session_id>
|
session_id: <session_id>
|
||||||
team_name: <team_name>
|
team_name: <team_name>
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-frontend-debug/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-frontend-debug/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: <team-name>
|
team_name: <team-name>
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ EXPECTED: <artifact path> + <quality criteria>
|
|||||||
CONSTRAINTS: <scope limits>
|
CONSTRAINTS: <scope limits>
|
||||||
---
|
---
|
||||||
InnerLoop: <true|false>
|
InnerLoop: <true|false>
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/<role>/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/<role>/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -55,7 +55,7 @@ EXPECTED: <session>/artifacts/TEST-001-report.md + <session>/artifacts/TEST-001-
|
|||||||
CONSTRAINTS: Use Chrome DevTools MCP only | Do not modify any code | Test all listed features
|
CONSTRAINTS: Use Chrome DevTools MCP only | Do not modify any code | Test all listed features
|
||||||
---
|
---
|
||||||
InnerLoop: true
|
InnerLoop: true
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/tester/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/tester/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### ANALYZE-001 (Test Mode): Analyze Discovered Issues
|
### ANALYZE-001 (Test Mode): Analyze Discovered Issues
|
||||||
@@ -75,7 +75,7 @@ EXPECTED: <session>/artifacts/ANALYZE-001-rca.md with root causes for all issues
|
|||||||
CONSTRAINTS: Read-only analysis | Skip low-severity warnings unless user requests
|
CONSTRAINTS: Read-only analysis | Skip low-severity warnings unless user requests
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/analyzer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/analyzer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
**Conditional**: If TEST-001 reports zero issues → skip ANALYZE-001, FIX-001, VERIFY-001. Pipeline completes.
|
**Conditional**: If TEST-001 reports zero issues → skip ANALYZE-001, FIX-001, VERIFY-001. Pipeline completes.
|
||||||
@@ -96,7 +96,7 @@ EXPECTED: Modified source files + <session>/artifacts/FIX-001-changes.md
|
|||||||
CONSTRAINTS: Minimal changes per issue | Follow existing code style
|
CONSTRAINTS: Minimal changes per issue | Follow existing code style
|
||||||
---
|
---
|
||||||
InnerLoop: true
|
InnerLoop: true
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/fixer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/fixer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### VERIFY-001 (Test Mode): Re-Test After Fix
|
### VERIFY-001 (Test Mode): Re-Test After Fix
|
||||||
@@ -117,7 +117,7 @@ EXPECTED: <session>/artifacts/VERIFY-001-report.md with pass/fail per previously
|
|||||||
CONSTRAINTS: Only re-test failed scenarios | Use Chrome DevTools MCP only
|
CONSTRAINTS: Only re-test failed scenarios | Use Chrome DevTools MCP only
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/verifier/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/verifier/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -143,7 +143,7 @@ EXPECTED: <session>/evidence/ directory with all captures + reproduction report
|
|||||||
CONSTRAINTS: Use Chrome DevTools MCP only | Do not modify any code
|
CONSTRAINTS: Use Chrome DevTools MCP only | Do not modify any code
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/reproducer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/reproducer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### ANALYZE-001 (Debug Mode): Root Cause Analysis
|
### ANALYZE-001 (Debug Mode): Root Cause Analysis
|
||||||
@@ -164,7 +164,7 @@ EXPECTED: <session>/artifacts/ANALYZE-001-rca.md with root cause, file:line, fix
|
|||||||
CONSTRAINTS: Read-only analysis | Request more evidence if inconclusive
|
CONSTRAINTS: Read-only analysis | Request more evidence if inconclusive
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/analyzer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/analyzer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### FIX-001 (Debug Mode): Code Fix
|
### FIX-001 (Debug Mode): Code Fix
|
||||||
@@ -183,7 +183,7 @@ EXPECTED: Modified source files + <session>/artifacts/FIX-001-changes.md
|
|||||||
CONSTRAINTS: Minimal changes | Follow existing code style | No breaking changes
|
CONSTRAINTS: Minimal changes | Follow existing code style | No breaking changes
|
||||||
---
|
---
|
||||||
InnerLoop: true
|
InnerLoop: true
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/fixer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/fixer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### VERIFY-001 (Debug Mode): Fix Verification
|
### VERIFY-001 (Debug Mode): Fix Verification
|
||||||
@@ -203,7 +203,7 @@ EXPECTED: <session>/artifacts/VERIFY-001-report.md with pass/fail verdict
|
|||||||
CONSTRAINTS: Use Chrome DevTools MCP only | Same steps as reproduction
|
CONSTRAINTS: Use Chrome DevTools MCP only | Same steps as reproduction
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/verifier/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/verifier/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -219,7 +219,7 @@ TASK: <specific evidence requests from Analyzer>
|
|||||||
CONTEXT: Session + Analyzer request
|
CONTEXT: Session + Analyzer request
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/reproducer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/reproducer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### FIX-002 (Either Mode): Re-Fix After Failed Verification
|
### FIX-002 (Either Mode): Re-Fix After Failed Verification
|
||||||
@@ -231,7 +231,7 @@ TASK: Review VERIFY-001 failure details, apply corrective fix
|
|||||||
CONTEXT: Session + VERIFY-001-report.md
|
CONTEXT: Session + VERIFY-001-report.md
|
||||||
---
|
---
|
||||||
InnerLoop: true
|
InnerLoop: true
|
||||||
RoleSpec: .claude/skills/team-frontend-debug/roles/fixer/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-frontend-debug/roles/fixer/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
## Conditional Skip Rules
|
## Conditional Skip Rules
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-frontend/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-frontend/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: frontend
|
team_name: frontend
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-frontend/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-frontend/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: frontend
|
team_name: frontend
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-issue/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-issue/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: issue
|
team_name: issue
|
||||||
@@ -89,7 +89,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-issue/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-issue/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: issue
|
team_name: issue
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ Find ready tasks, spawn workers, STOP.
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-issue/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-issue/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: issue
|
team_name: issue
|
||||||
@@ -133,7 +133,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-issue/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-issue/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: issue
|
team_name: issue
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-iterdev/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-iterdev/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: iterdev
|
team_name: iterdev
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-iterdev/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-iterdev/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: iterdev
|
team_name: iterdev
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-lifecycle-v4/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-lifecycle-v4/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: <team-name>
|
team_name: <team-name>
|
||||||
@@ -98,7 +98,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: supervisor
|
role: supervisor
|
||||||
role_spec: .claude/skills/team-lifecycle-v4/roles/supervisor/role.md
|
role_spec: ~ or <project>/.claude/skills/team-lifecycle-v4/roles/supervisor/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: <team-name>
|
team_name: <team-name>
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ EXPECTED: <artifact path> + <quality criteria>
|
|||||||
CONSTRAINTS: <scope limits>
|
CONSTRAINTS: <scope limits>
|
||||||
---
|
---
|
||||||
InnerLoop: <true|false>
|
InnerLoop: <true|false>
|
||||||
RoleSpec: .claude/skills/team-lifecycle-v4/roles/<role>/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-lifecycle-v4/roles/<role>/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
## InnerLoop Flag Rules
|
## InnerLoop Flag Rules
|
||||||
@@ -45,7 +45,7 @@ CHECKPOINT tasks are dispatched like regular tasks but handled differently at sp
|
|||||||
- Owner: supervisor
|
- Owner: supervisor
|
||||||
- **NOT spawned as team-worker** — coordinator wakes the resident supervisor via SendMessage
|
- **NOT spawned as team-worker** — coordinator wakes the resident supervisor via SendMessage
|
||||||
- If `supervision: false` in team-session.json, skip creating CHECKPOINT tasks entirely
|
- If `supervision: false` in team-session.json, skip creating CHECKPOINT tasks entirely
|
||||||
- RoleSpec in description: `.claude/skills/team-lifecycle-v4/roles/supervisor/role.md`
|
- RoleSpec in description: `~ or <project>/.claude/skills/team-lifecycle-v4/roles/supervisor/role.md`
|
||||||
|
|
||||||
## Dependency Validation
|
## Dependency Validation
|
||||||
|
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-perf-opt/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-perf-opt/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: perf-opt
|
team_name: perf-opt
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-perf-opt/role-specs/<role>.md
|
role_spec: ~ or <project>/.claude/skills/team-perf-opt/role-specs/<role>.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: perf-opt
|
team_name: perf-opt
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ Execute `commands/dispatch.md` inline (Command Execution Protocol).
|
|||||||
### Initial Spawn
|
### Initial Spawn
|
||||||
|
|
||||||
Find first unblocked task and spawn its worker using SKILL.md Worker Spawn Template with:
|
Find first unblocked task and spawn its worker using SKILL.md Worker Spawn Template with:
|
||||||
- `role_spec: .claude/skills/team-perf-opt/roles/<role>/role.md`
|
- `role_spec: ~ or <project>/.claude/skills/team-perf-opt/roles/<role>/role.md`
|
||||||
- `team_name: perf-opt`
|
- `team_name: perf-opt`
|
||||||
|
|
||||||
**STOP** after spawning. Wait for worker callback.
|
**STOP** after spawning. Wait for worker callback.
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
"team_name": "perf-opt",
|
"team_name": "perf-opt",
|
||||||
"team_display_name": "Performance Optimization",
|
"team_display_name": "Performance Optimization",
|
||||||
"skill_name": "team-perf-opt",
|
"skill_name": "team-perf-opt",
|
||||||
"skill_path": ".claude/skills/team-perf-opt/",
|
"skill_path": "~ or <project>/.claude/skills/team-perf-opt/",
|
||||||
"worker_agent": "team-worker",
|
"worker_agent": "team-worker",
|
||||||
"pipeline_type": "Linear with Review-Fix Cycle (Parallel-Capable)",
|
"pipeline_type": "Linear with Review-Fix Cycle (Parallel-Capable)",
|
||||||
"completion_action": "interactive",
|
"completion_action": "interactive",
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-planex/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-planex/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: planex
|
team_name: planex
|
||||||
|
|||||||
@@ -125,7 +125,7 @@ Collect task states from TaskList()
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-planex/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-planex/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: <team-name>
|
team_name: <team-name>
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-quality-assurance/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-quality-assurance/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: quality-assurance
|
team_name: quality-assurance
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ EXPECTED: <artifact path> + <quality criteria>
|
|||||||
CONSTRAINTS: <scope limits>
|
CONSTRAINTS: <scope limits>
|
||||||
---
|
---
|
||||||
InnerLoop: <true|false>
|
InnerLoop: <true|false>
|
||||||
RoleSpec: .claude/skills/team-quality-assurance/roles/<role>/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-quality-assurance/roles/<role>/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
## Pipeline Task Registry
|
## Pipeline Task Registry
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ EXPECTED: Fixed test files | Improved coverage
|
|||||||
CONSTRAINTS: Only modify test files | No source changes
|
CONSTRAINTS: Only modify test files | No source changes
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-quality-assurance/roles/generator/role.md"
|
RoleSpec: ~ or <project>/.claude/skills/team-quality-assurance/roles/generator/role.md"
|
||||||
})
|
})
|
||||||
TaskCreate({
|
TaskCreate({
|
||||||
subject: "QARUN-gc-<round>: Re-execute <layer> (GC #<round>)",
|
subject: "QARUN-gc-<round>: Re-execute <layer> (GC #<round>)",
|
||||||
@@ -72,7 +72,7 @@ EXPECTED: <session>/results/run-<layer>-gc-<round>.json
|
|||||||
CONSTRAINTS: Read-only execution
|
CONSTRAINTS: Read-only execution
|
||||||
---
|
---
|
||||||
InnerLoop: false
|
InnerLoop: false
|
||||||
RoleSpec: .claude/skills/team-quality-assurance/roles/executor/role.md",
|
RoleSpec: ~ or <project>/.claude/skills/team-quality-assurance/roles/executor/role.md",
|
||||||
blockedBy: ["QAGEN-fix-<round>"]
|
blockedBy: ["QAGEN-fix-<round>"]
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
@@ -149,7 +149,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-quality-assurance/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-quality-assurance/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: quality-assurance
|
team_name: quality-assurance
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-review/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-review/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: review
|
team_name: review
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ EXPECTED: <artifact path> + <quality criteria>
|
|||||||
CONSTRAINTS: <scope limits>
|
CONSTRAINTS: <scope limits>
|
||||||
---
|
---
|
||||||
InnerLoop: <true|false>
|
InnerLoop: <true|false>
|
||||||
RoleSpec: .claude/skills/team-review/roles/<role>/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-review/roles/<role>/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
## Pipeline Task Registry
|
## Pipeline Task Registry
|
||||||
|
|||||||
@@ -24,9 +24,9 @@ Event-driven pipeline coordination. Beat model: coordinator wake -> process -> s
|
|||||||
|
|
||||||
| Prefix | Role | Role Spec | inner_loop |
|
| Prefix | Role | Role Spec | inner_loop |
|
||||||
|--------|------|-----------|------------|
|
|--------|------|-----------|------------|
|
||||||
| SCAN-* | scanner | `.claude/skills/team-review/roles/scanner/role.md` | false |
|
| SCAN-* | scanner | `~ or <project>/.claude/skills/team-review/roles/scanner/role.md` | false |
|
||||||
| REV-* | reviewer | `.claude/skills/team-review/roles/reviewer/role.md` | false |
|
| REV-* | reviewer | `~ or <project>/.claude/skills/team-review/roles/reviewer/role.md` | false |
|
||||||
| FIX-* | fixer | `.claude/skills/team-review/roles/fixer/role.md` | true |
|
| FIX-* | fixer | `~ or <project>/.claude/skills/team-review/roles/fixer/role.md` | true |
|
||||||
|
|
||||||
## handleCallback
|
## handleCallback
|
||||||
|
|
||||||
@@ -123,7 +123,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-review/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-review/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: review
|
team_name: review
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-roadmap-dev/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-roadmap-dev/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: roadmap-dev
|
team_name: roadmap-dev
|
||||||
@@ -148,7 +148,7 @@ AskUserQuestion({
|
|||||||
|----------|------------|
|
|----------|------------|
|
||||||
| Unknown --role value | Error with role registry list |
|
| Unknown --role value | Error with role registry list |
|
||||||
| Role file not found | Error with expected path (roles/{name}/role.md) |
|
| Role file not found | Error with expected path (roles/{name}/role.md) |
|
||||||
| project-tech.json missing | Coordinator invokes /workflow:init |
|
| project-tech.json missing | Coordinator invokes /workflow:spec:setup |
|
||||||
| Phase verification fails with gaps | Coordinator triggers gap closure loop (max 3 iterations) |
|
| Phase verification fails with gaps | Coordinator triggers gap closure loop (max 3 iterations) |
|
||||||
| Max gap closure iterations (3) | Report to user, ask for guidance |
|
| Max gap closure iterations (3) | Report to user, ask for guidance |
|
||||||
| Worker crash | Respawn worker, reassign task |
|
| Worker crash | Respawn worker, reassign task |
|
||||||
|
|||||||
@@ -15,9 +15,9 @@ Handle all coordinator monitoring events for the roadmap-dev pipeline using the
|
|||||||
|
|
||||||
| Prefix | Role | Role Spec | inner_loop |
|
| Prefix | Role | Role Spec | inner_loop |
|
||||||
|--------|------|-----------|------------|
|
|--------|------|-----------|------------|
|
||||||
| PLAN | planner | `.claude/skills/team-roadmap-dev/roles/planner/role.md` | true (cli_tools: gemini --mode analysis) |
|
| PLAN | planner | `~ or <project>/.claude/skills/team-roadmap-dev/roles/planner/role.md` | true (cli_tools: gemini --mode analysis) |
|
||||||
| EXEC | executor | `.claude/skills/team-roadmap-dev/roles/executor/role.md` | true (cli_tools: gemini --mode write) |
|
| EXEC | executor | `~ or <project>/.claude/skills/team-roadmap-dev/roles/executor/role.md` | true (cli_tools: gemini --mode write) |
|
||||||
| VERIFY | verifier | `.claude/skills/team-roadmap-dev/roles/verifier/role.md` | true |
|
| VERIFY | verifier | `~ or <project>/.claude/skills/team-roadmap-dev/roles/verifier/role.md` | true |
|
||||||
|
|
||||||
### Pipeline Structure
|
### Pipeline Structure
|
||||||
|
|
||||||
@@ -247,7 +247,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-roadmap-dev/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-roadmap-dev/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: roadmap-dev
|
team_name: roadmap-dev
|
||||||
|
|||||||
@@ -284,7 +284,7 @@ Delegate to `commands/monitor.md`:
|
|||||||
|
|
||||||
| Scenario | Resolution |
|
| Scenario | Resolution |
|
||||||
|----------|------------|
|
|----------|------------|
|
||||||
| project-tech.json missing | Invoke /workflow:init automatically |
|
| project-tech.json missing | Invoke /workflow:spec:setup automatically |
|
||||||
| User cancels roadmap discussion | Save session state, exit gracefully |
|
| User cancels roadmap discussion | Save session state, exit gracefully |
|
||||||
| Planner fails | Retry once, then ask user for guidance |
|
| Planner fails | Retry once, then ask user for guidance |
|
||||||
| Executor fails on plan | Mark plan as failed, continue with next |
|
| Executor fails on plan | Mark plan as failed, continue with next |
|
||||||
|
|||||||
@@ -88,6 +88,6 @@ Phase N: PLAN-N01 --> EXEC-N01 --> VERIFY-N01
|
|||||||
|
|
||||||
| Prefix | Role | Role Spec | Inner Loop |
|
| Prefix | Role | Role Spec | Inner Loop |
|
||||||
|--------|------|-----------|------------|
|
|--------|------|-----------|------------|
|
||||||
| PLAN | planner | `.claude/skills/team-roadmap-dev/roles/planner/role.md` | true |
|
| PLAN | planner | `~ or <project>/.claude/skills/team-roadmap-dev/roles/planner/role.md` | true |
|
||||||
| EXEC | executor | `.claude/skills/team-roadmap-dev/roles/executor/role.md` | true |
|
| EXEC | executor | `~ or <project>/.claude/skills/team-roadmap-dev/roles/executor/role.md` | true |
|
||||||
| VERIFY | verifier | `.claude/skills/team-roadmap-dev/roles/verifier/role.md` | true |
|
| VERIFY | verifier | `~ or <project>/.claude/skills/team-roadmap-dev/roles/verifier/role.md` | true |
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
"team_name": "roadmap-dev",
|
"team_name": "roadmap-dev",
|
||||||
"team_display_name": "Roadmap Dev",
|
"team_display_name": "Roadmap Dev",
|
||||||
"skill_name": "team-roadmap-dev",
|
"skill_name": "team-roadmap-dev",
|
||||||
"skill_path": ".claude/skills/team-roadmap-dev/",
|
"skill_path": "~ or <project>/.claude/skills/team-roadmap-dev/",
|
||||||
"design_source": "roadmap-driven development workflow design (2026-02-24)",
|
"design_source": "roadmap-driven development workflow design (2026-02-24)",
|
||||||
"pipeline_type": "Phased",
|
"pipeline_type": "Phased",
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
@@ -85,7 +85,7 @@
|
|||||||
"init_prerequisite": {
|
"init_prerequisite": {
|
||||||
"required_files": [".workflow/project-tech.json"],
|
"required_files": [".workflow/project-tech.json"],
|
||||||
"optional_files": [".workflow/specs/*.md"],
|
"optional_files": [".workflow/specs/*.md"],
|
||||||
"init_command": "/workflow:init"
|
"init_command": "/workflow:spec:setup "
|
||||||
},
|
},
|
||||||
"_metadata": {
|
"_metadata": {
|
||||||
"created_at": "2026-02-24",
|
"created_at": "2026-02-24",
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-tech-debt/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-tech-debt/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: tech-debt
|
team_name: tech-debt
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-tech-debt/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-tech-debt/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: tech-debt
|
team_name: tech-debt
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-testing/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-testing/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: testing
|
team_name: testing
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ EXPECTED: <deliverable path> + <quality criteria>
|
|||||||
CONSTRAINTS: <scope limits, focus areas>
|
CONSTRAINTS: <scope limits, focus areas>
|
||||||
---
|
---
|
||||||
InnerLoop: <true|false>
|
InnerLoop: <true|false>
|
||||||
RoleSpec: .claude/skills/team-testing/roles/<role>/role.md
|
RoleSpec: ~ or <project>/.claude/skills/team-testing/roles/<role>/role.md
|
||||||
```
|
```
|
||||||
|
|
||||||
## Pipeline Task Registry
|
## Pipeline Task Registry
|
||||||
|
|||||||
@@ -25,10 +25,10 @@ Event-driven pipeline coordination. Beat model: coordinator wake -> process -> s
|
|||||||
|
|
||||||
| Prefix | Role | Role Spec | inner_loop |
|
| Prefix | Role | Role Spec | inner_loop |
|
||||||
|--------|------|-----------|------------|
|
|--------|------|-----------|------------|
|
||||||
| STRATEGY-* | strategist | `.claude/skills/team-testing/roles/strategist/role.md` | false |
|
| STRATEGY-* | strategist | `~ or <project>/.claude/skills/team-testing/roles/strategist/role.md` | false |
|
||||||
| TESTGEN-* | generator | `.claude/skills/team-testing/roles/generator/role.md` | true |
|
| TESTGEN-* | generator | `~ or <project>/.claude/skills/team-testing/roles/generator/role.md` | true |
|
||||||
| TESTRUN-* | executor | `.claude/skills/team-testing/roles/executor/role.md` | true |
|
| TESTRUN-* | executor | `~ or <project>/.claude/skills/team-testing/roles/executor/role.md` | true |
|
||||||
| TESTANA-* | analyst | `.claude/skills/team-testing/roles/analyst/role.md` | false |
|
| TESTANA-* | analyst | `~ or <project>/.claude/skills/team-testing/roles/analyst/role.md` | false |
|
||||||
|
|
||||||
## handleCallback
|
## handleCallback
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ EXPECTED: Revised test files in <session>/tests/<layer>/
|
|||||||
CONSTRAINTS: Only modify test files
|
CONSTRAINTS: Only modify test files
|
||||||
---
|
---
|
||||||
InnerLoop: true
|
InnerLoop: true
|
||||||
RoleSpec: .claude/skills/team-testing/roles/generator/role.md"
|
RoleSpec: ~ or <project>/.claude/skills/team-testing/roles/generator/role.md"
|
||||||
})
|
})
|
||||||
TaskCreate({
|
TaskCreate({
|
||||||
subject: "TESTRUN-<layer>-fix-<round>: Re-execute <layer> (GC #<round>)",
|
subject: "TESTRUN-<layer>-fix-<round>: Re-execute <layer> (GC #<round>)",
|
||||||
@@ -80,7 +80,7 @@ CONTEXT:
|
|||||||
EXPECTED: <session>/results/run-<N>-gc.json
|
EXPECTED: <session>/results/run-<N>-gc.json
|
||||||
---
|
---
|
||||||
InnerLoop: true
|
InnerLoop: true
|
||||||
RoleSpec: .claude/skills/team-testing/roles/executor/role.md",
|
RoleSpec: ~ or <project>/.claude/skills/team-testing/roles/executor/role.md",
|
||||||
blockedBy: ["TESTGEN-<layer>-fix-<round>"]
|
blockedBy: ["TESTGEN-<layer>-fix-<round>"]
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
@@ -150,7 +150,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-testing/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-testing/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: testing
|
team_name: testing
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-uidesign/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-uidesign/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: uidesign
|
team_name: uidesign
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-uidesign/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-uidesign/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: uidesign
|
team_name: uidesign
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-ultra-analyze/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-ultra-analyze/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: ultra-analyze
|
team_name: ultra-analyze
|
||||||
|
|||||||
@@ -211,10 +211,10 @@ Find and spawn the next ready tasks.
|
|||||||
|
|
||||||
| Task Prefix | Role | Role Spec |
|
| Task Prefix | Role | Role Spec |
|
||||||
|-------------|------|-----------|
|
|-------------|------|-----------|
|
||||||
| `EXPLORE-*` | explorer | `.claude/skills/team-ultra-analyze/role-specs/explorer.md` |
|
| `EXPLORE-*` | explorer | `~ or <project>/.claude/skills/team-ultra-analyze/role-specs/explorer.md` |
|
||||||
| `ANALYZE-*` | analyst | `.claude/skills/team-ultra-analyze/role-specs/analyst.md` |
|
| `ANALYZE-*` | analyst | `~ or <project>/.claude/skills/team-ultra-analyze/role-specs/analyst.md` |
|
||||||
| `DISCUSS-*` | discussant | `.claude/skills/team-ultra-analyze/role-specs/discussant.md` |
|
| `DISCUSS-*` | discussant | `~ or <project>/.claude/skills/team-ultra-analyze/role-specs/discussant.md` |
|
||||||
| `SYNTH-*` | synthesizer | `.claude/skills/team-ultra-analyze/role-specs/synthesizer.md` |
|
| `SYNTH-*` | synthesizer | `~ or <project>/.claude/skills/team-ultra-analyze/role-specs/synthesizer.md` |
|
||||||
|
|
||||||
3. Spawn team-worker for each ready task:
|
3. Spawn team-worker for each ready task:
|
||||||
|
|
||||||
@@ -227,7 +227,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-ultra-analyze/role-specs/<role>.md
|
role_spec: ~ or <project>/.claude/skills/team-ultra-analyze/role-specs/<role>.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: ultra-analyze
|
team_name: ultra-analyze
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ Execute `commands/dispatch.md` inline (Command Execution Protocol):
|
|||||||
### Initial Spawn
|
### Initial Spawn
|
||||||
|
|
||||||
Find first unblocked tasks and spawn their workers. Use SKILL.md Worker Spawn Template with:
|
Find first unblocked tasks and spawn their workers. Use SKILL.md Worker Spawn Template with:
|
||||||
- `role_spec: .claude/skills/team-ultra-analyze/roles/<role>/role.md`
|
- `role_spec: ~ or <project>/.claude/skills/team-ultra-analyze/roles/<role>/role.md`
|
||||||
- `team_name: ultra-analyze`
|
- `team_name: ultra-analyze`
|
||||||
- `inner_loop: false`
|
- `inner_loop: false`
|
||||||
|
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-ux-improve/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-ux-improve/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: ux-improve
|
team_name: ux-improve
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ Agent({
|
|||||||
run_in_background: true,
|
run_in_background: true,
|
||||||
prompt: `## Role Assignment
|
prompt: `## Role Assignment
|
||||||
role: <role>
|
role: <role>
|
||||||
role_spec: .claude/skills/team-ux-improve/roles/<role>/role.md
|
role_spec: ~ or <project>/.claude/skills/team-ux-improve/roles/<role>/role.md
|
||||||
session: <session-folder>
|
session: <session-folder>
|
||||||
session_id: <session-id>
|
session_id: <session-id>
|
||||||
team_name: ux-improve
|
team_name: ux-improve
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ TEXT-LEVEL ONLY. No source code reading.
|
|||||||
├── explorations/
|
├── explorations/
|
||||||
└── wisdom/contributions/
|
└── wisdom/contributions/
|
||||||
```
|
```
|
||||||
3. **Wisdom Initialization**: Copy `.claude/skills/team-ux-improve/wisdom/` to `<session>/wisdom/`
|
3. **Wisdom Initialization**: Copy `~ or <project>/.claude/skills/team-ux-improve/wisdom/` to `<session>/wisdom/`
|
||||||
4. Initialize `.msg/meta.json` via team_msg state_update with pipeline metadata
|
4. Initialize `.msg/meta.json` via team_msg state_update with pipeline metadata
|
||||||
5. TeamCreate(team_name="ux-improve")
|
5. TeamCreate(team_name="ux-improve")
|
||||||
6. Do NOT spawn workers yet - deferred to Phase 4
|
6. Do NOT spawn workers yet - deferred to Phase 4
|
||||||
@@ -110,7 +110,7 @@ Delegate to `commands/monitor.md#handleSpawnNext`:
|
|||||||
|
|
||||||
3. **Wisdom Consolidation**: Check `<session>/wisdom/contributions/` for worker contributions
|
3. **Wisdom Consolidation**: Check `<session>/wisdom/contributions/` for worker contributions
|
||||||
- If contributions exist -> AskUserQuestion to merge to permanent wisdom
|
- If contributions exist -> AskUserQuestion to merge to permanent wisdom
|
||||||
- If approved -> copy to `.claude/skills/team-ux-improve/wisdom/`
|
- If approved -> copy to `~ or <project>/.claude/skills/team-ux-improve/wisdom/`
|
||||||
|
|
||||||
4. Calculate: completed_tasks, total_issues_found, issues_fixed, test_pass_rate
|
4. Calculate: completed_tasks, total_issues_found, issues_fixed, test_pass_rate
|
||||||
5. Output pipeline summary with [coordinator] prefix
|
5. Output pipeline summary with [coordinator] prefix
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ UX improvement pipeline modes and task registry.
|
|||||||
|
|
||||||
## Wisdom System
|
## Wisdom System
|
||||||
|
|
||||||
Workers contribute learnings to `<session>/wisdom/contributions/`. On pipeline completion, coordinator asks user to merge approved contributions to permanent wisdom at `.claude/skills/team-ux-improve/wisdom/`.
|
Workers contribute learnings to `<session>/wisdom/contributions/`. On pipeline completion, coordinator asks user to merge approved contributions to permanent wisdom at `~ or <project>/.claude/skills/team-ux-improve/wisdom/`.
|
||||||
|
|
||||||
| Directory | Purpose |
|
| Directory | Purpose |
|
||||||
|-----------|---------|
|
|-----------|---------|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"team_display_name": "UX Improve",
|
"team_display_name": "UX Improve",
|
||||||
"team_purpose": "Systematically discover and fix UI/UX interaction issues including unresponsive buttons, missing feedback, and state refresh problems",
|
"team_purpose": "Systematically discover and fix UI/UX interaction issues including unresponsive buttons, missing feedback, and state refresh problems",
|
||||||
"skill_name": "team-ux-improve",
|
"skill_name": "team-ux-improve",
|
||||||
"skill_path": ".claude/skills/team-ux-improve/",
|
"skill_path": "~ or <project>/.claude/skills/team-ux-improve/",
|
||||||
"worker_agent": "team-worker",
|
"worker_agent": "team-worker",
|
||||||
"pipeline_type": "Standard",
|
"pipeline_type": "Standard",
|
||||||
"completion_action": "interactive",
|
"completion_action": "interactive",
|
||||||
|
|||||||
110
.codex/skills/spec-generator/README.md
Normal file
110
.codex/skills/spec-generator/README.md
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
# Spec Generator
|
||||||
|
|
||||||
|
Structured specification document generator producing a complete document chain (Product Brief -> PRD -> Architecture -> Epics).
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Via workflow command
|
||||||
|
/workflow:spec "Build a task management system"
|
||||||
|
/workflow:spec -y "User auth with OAuth2" # Auto mode
|
||||||
|
/workflow:spec -c "task management" # Resume session
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
spec-generator/
|
||||||
|
|- SKILL.md # Entry point: metadata + architecture + flow
|
||||||
|
|- phases/
|
||||||
|
| |- 01-discovery.md # Seed analysis + codebase exploration + spec type selection
|
||||||
|
| |- 01-5-requirement-clarification.md # Interactive requirement expansion
|
||||||
|
| |- 02-product-brief.md # Multi-CLI product brief + glossary generation
|
||||||
|
| |- 03-requirements.md # PRD with MoSCoW priorities + RFC 2119 constraints
|
||||||
|
| |- 04-architecture.md # Architecture + state machine + config model + observability
|
||||||
|
| |- 05-epics-stories.md # Epic/Story decomposition
|
||||||
|
| |- 06-readiness-check.md # Quality validation + handoff + iterate option
|
||||||
|
| |- 06-5-auto-fix.md # Auto-fix loop for readiness issues (max 2 iterations)
|
||||||
|
| |- 07-issue-export.md # Issue creation from Epics + export report
|
||||||
|
|- specs/
|
||||||
|
| |- document-standards.md # Format, frontmatter, naming rules
|
||||||
|
| |- quality-gates.md # Per-phase quality criteria + iteration tracking
|
||||||
|
| |- glossary-template.json # Terminology glossary schema
|
||||||
|
|- templates/
|
||||||
|
| |- product-brief.md # Product brief template (+ Concepts & Non-Goals)
|
||||||
|
| |- requirements-prd.md # PRD template
|
||||||
|
| |- architecture-doc.md # Architecture template (+ state machine, config, observability)
|
||||||
|
| |- epics-template.md # Epic/Story template (+ versioning)
|
||||||
|
| |- profiles/ # Spec type specialization profiles
|
||||||
|
| |- service-profile.md # Service spec: lifecycle, observability, trust
|
||||||
|
| |- api-profile.md # API spec: endpoints, auth, rate limiting
|
||||||
|
| |- library-profile.md # Library spec: public API, examples, compatibility
|
||||||
|
|- README.md # This file
|
||||||
|
```
|
||||||
|
|
||||||
|
## 7-Phase Pipeline
|
||||||
|
|
||||||
|
| Phase | Name | Output | CLI Tools | Key Features |
|
||||||
|
|-------|------|--------|-----------|-------------|
|
||||||
|
| 1 | Discovery | spec-config.json | Gemini (analysis) | Spec type selection |
|
||||||
|
| 1.5 | Req Expansion | refined-requirements.json | Gemini (analysis) | Multi-round interactive |
|
||||||
|
| 2 | Product Brief *(Agent)* | product-brief.md, glossary.json | Gemini + Codex + Claude (parallel) | Terminology glossary |
|
||||||
|
| 3 | Requirements *(Agent)* | requirements/ | Gemini + **Codex review** | RFC 2119, data model |
|
||||||
|
| 4 | Architecture *(Agent)* | architecture/ | Gemini + Codex (sequential) | State machine, config, observability |
|
||||||
|
| 5 | Epics & Stories *(Agent)* | epics/ | Gemini + **Codex review** | Glossary consistency |
|
||||||
|
| 6 | Readiness Check | readiness-report.md, spec-summary.md | Gemini + **Codex** (parallel) | Per-requirement verification |
|
||||||
|
| 6.5 | Auto-Fix *(Agent)* | Updated phase docs | Gemini (analysis) | Max 2 iterations |
|
||||||
|
| 7 | Issue Export | issue-export-report.md | ccw issue create | Epic→Issue mapping, wave assignment |
|
||||||
|
|
||||||
|
## Runtime Output
|
||||||
|
|
||||||
|
```
|
||||||
|
.workflow/.spec/SPEC-{slug}-{YYYY-MM-DD}/
|
||||||
|
|- spec-config.json # Session state
|
||||||
|
|- discovery-context.json # Codebase context (optional)
|
||||||
|
|- refined-requirements.json # Phase 1.5 (requirement expansion)
|
||||||
|
|- glossary.json # Phase 2 (terminology)
|
||||||
|
|- product-brief.md # Phase 2
|
||||||
|
|- requirements/ # Phase 3 (directory)
|
||||||
|
| |- _index.md
|
||||||
|
| |- REQ-*.md
|
||||||
|
| └── NFR-*.md
|
||||||
|
|- architecture/ # Phase 4 (directory)
|
||||||
|
| |- _index.md
|
||||||
|
| └── ADR-*.md
|
||||||
|
|- epics/ # Phase 5 (directory)
|
||||||
|
| |- _index.md
|
||||||
|
| └── EPIC-*.md
|
||||||
|
|- readiness-report.md # Phase 6
|
||||||
|
|- spec-summary.md # Phase 6
|
||||||
|
└── issue-export-report.md # Phase 7 (issue export)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Flags
|
||||||
|
|
||||||
|
- `-y|--yes`: Auto mode - skip all interactive confirmations
|
||||||
|
- `-c|--continue`: Resume from last completed phase
|
||||||
|
|
||||||
|
Spec type is selected interactively in Phase 1 (defaults to `service` in auto mode)
|
||||||
|
Available types: `service`, `api`, `library`, `platform`
|
||||||
|
|
||||||
|
## Handoff
|
||||||
|
|
||||||
|
After Phase 6, choose execution path:
|
||||||
|
- `Export Issues (Phase 7)` - Create issues per Epic with spec links → team-planex
|
||||||
|
- `workflow-lite-plan` - Execute per Epic
|
||||||
|
- `workflow:req-plan-with-file` - Roadmap decomposition
|
||||||
|
- `workflow-plan` - Full planning
|
||||||
|
- `Iterate & improve` - Re-run failed phases (max 2 iterations)
|
||||||
|
|
||||||
|
## Design Principles
|
||||||
|
|
||||||
|
- **Document chain**: Each phase builds on previous outputs
|
||||||
|
- **Multi-perspective**: Gemini/Codex/Claude provide different viewpoints
|
||||||
|
- **Template-driven**: Consistent format via templates + frontmatter
|
||||||
|
- **Resumable**: spec-config.json tracks completed phases
|
||||||
|
- **Pure documentation**: No code generation - clean handoff to execution workflows
|
||||||
|
- **Type-specialized**: Profiles adapt templates to service/api/library/platform requirements
|
||||||
|
- **Iterative quality**: Phase 6.5 auto-fix repairs issues, max 2 iterations before handoff
|
||||||
|
- **Terminology-first**: glossary.json ensures consistent terminology across all documents
|
||||||
|
- **Agent-delegated**: Heavy document phases (2-5, 6.5) run in doc-generator agents to minimize main context usage
|
||||||
425
.codex/skills/spec-generator/SKILL.md
Normal file
425
.codex/skills/spec-generator/SKILL.md
Normal file
@@ -0,0 +1,425 @@
|
|||||||
|
---
|
||||||
|
name: spec-generator
|
||||||
|
description: Specification generator - 7 phase document chain producing product brief, PRD, architecture, epics, and issues. Agent-delegated heavy phases (2-5, 6.5) with Codex review gates. Triggers on "generate spec", "create specification", "spec generator", "workflow:spec".
|
||||||
|
allowed-tools: Agent, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, Read, Write, Edit, Bash, Glob, Grep, Skill
|
||||||
|
---
|
||||||
|
|
||||||
|
# Spec Generator
|
||||||
|
|
||||||
|
Structured specification document generator producing a complete specification package (Product Brief, PRD, Architecture, Epics, Issues) through 7 sequential phases with multi-CLI analysis, Codex review gates, and interactive refinement. Heavy document phases are delegated to `doc-generator` agents to minimize main context usage. **Document generation only** - execution handoff via issue export to team-planex or existing workflows.
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
```
|
||||||
|
Phase 0: Specification Study (Read specs/ + templates/ - mandatory prerequisite) [Inline]
|
||||||
|
|
|
||||||
|
Phase 1: Discovery -> spec-config.json + discovery-context.json [Inline]
|
||||||
|
| (includes spec_type selection)
|
||||||
|
Phase 1.5: Req Expansion -> refined-requirements.json [Inline]
|
||||||
|
| (interactive discussion + CLI gap analysis)
|
||||||
|
Phase 2: Product Brief -> product-brief.md + glossary.json [Agent]
|
||||||
|
| (3-CLI parallel + synthesis)
|
||||||
|
Phase 3: Requirements (PRD) -> requirements/ (_index.md + REQ-*.md + NFR-*.md) [Agent]
|
||||||
|
| (Gemini + Codex review)
|
||||||
|
Phase 4: Architecture -> architecture/ (_index.md + ADR-*.md) [Agent]
|
||||||
|
| (Gemini + Codex review)
|
||||||
|
Phase 5: Epics & Stories -> epics/ (_index.md + EPIC-*.md) [Agent]
|
||||||
|
| (Gemini + Codex review)
|
||||||
|
Phase 6: Readiness Check -> readiness-report.md + spec-summary.md [Inline]
|
||||||
|
| (Gemini + Codex dual validation + per-req verification)
|
||||||
|
├── Pass (>=80%): Handoff or Phase 7
|
||||||
|
├── Review (60-79%): Handoff with caveats or Phase 7
|
||||||
|
└── Fail (<60%): Phase 6.5 Auto-Fix (max 2 iterations)
|
||||||
|
|
|
||||||
|
Phase 6.5: Auto-Fix -> Updated Phase 2-5 documents [Agent]
|
||||||
|
|
|
||||||
|
└── Re-run Phase 6 validation
|
||||||
|
|
|
||||||
|
Phase 7: Issue Export -> issue-export-report.md [Inline]
|
||||||
|
(Epic→Issue mapping, ccw issue create, wave assignment)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Design Principles
|
||||||
|
|
||||||
|
1. **Document Chain**: Each phase builds on previous outputs, creating a traceable specification chain from idea to executable issues
|
||||||
|
2. **Agent-Delegated**: Heavy document phases (2-5, 6.5) run in `doc-generator` agents, keeping main context lean (summaries only)
|
||||||
|
3. **Multi-Perspective Analysis**: CLI tools (Gemini/Codex/Claude) provide product, technical, and user perspectives in parallel
|
||||||
|
4. **Codex Review Gates**: Phases 3, 5, 6 include Codex CLI review for quality validation before output
|
||||||
|
5. **Interactive by Default**: Each phase offers user confirmation points; `-y` flag enables full auto mode
|
||||||
|
6. **Resumable Sessions**: `spec-config.json` tracks completed phases; `-c` flag resumes from last checkpoint
|
||||||
|
7. **Template-Driven**: All documents generated from standardized templates with YAML frontmatter
|
||||||
|
8. **Pure Documentation**: No code generation or execution - clean handoff via issue export to execution workflows
|
||||||
|
9. **Spec Type Specialization**: Templates adapt to spec type (service/api/library/platform) via profiles for domain-specific depth
|
||||||
|
10. **Iterative Quality**: Phase 6.5 auto-fix loop repairs issues found in readiness check (max 2 iterations)
|
||||||
|
11. **Terminology Consistency**: glossary.json generated in Phase 2, injected into all subsequent phases
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Mandatory Prerequisites
|
||||||
|
|
||||||
|
> **Do NOT skip**: Before performing any operations, you **must** completely read the following documents. Proceeding without reading the specifications will result in outputs that do not meet quality standards.
|
||||||
|
|
||||||
|
### Specification Documents (Required Reading)
|
||||||
|
|
||||||
|
| Document | Purpose | Priority |
|
||||||
|
|----------|---------|----------|
|
||||||
|
| [specs/document-standards.md](specs/document-standards.md) | Document format, frontmatter, naming conventions | **P0 - Must read before execution** |
|
||||||
|
| [specs/quality-gates.md](specs/quality-gates.md) | Per-phase quality gate criteria and scoring | **P0 - Must read before execution** |
|
||||||
|
|
||||||
|
### Template Files (Must read before generation)
|
||||||
|
|
||||||
|
| Document | Purpose |
|
||||||
|
|----------|---------|
|
||||||
|
| [templates/product-brief.md](templates/product-brief.md) | Product brief document template |
|
||||||
|
| [templates/requirements-prd.md](templates/requirements-prd.md) | PRD document template |
|
||||||
|
| [templates/architecture-doc.md](templates/architecture-doc.md) | Architecture document template |
|
||||||
|
| [templates/epics-template.md](templates/epics-template.md) | Epic/Story document template |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
Input Parsing:
|
||||||
|
|- Parse $ARGUMENTS: extract idea/topic, flags (-y, -c, -m)
|
||||||
|
|- Detect mode: new | continue
|
||||||
|
|- If continue: read spec-config.json, resume from first incomplete phase
|
||||||
|
|- If new: proceed to Phase 1
|
||||||
|
|
||||||
|
Phase 1: Discovery & Seed Analysis
|
||||||
|
|- Ref: phases/01-discovery.md
|
||||||
|
|- Generate session ID: SPEC-{slug}-{YYYY-MM-DD}
|
||||||
|
|- Parse input (text or file reference)
|
||||||
|
|- Gemini CLI seed analysis (problem, users, domain, dimensions)
|
||||||
|
|- Codebase exploration (conditional, if project detected)
|
||||||
|
|- Spec type selection: service|api|library|platform (interactive, -y defaults to service)
|
||||||
|
|- User confirmation (interactive, -y skips)
|
||||||
|
|- Output: spec-config.json, discovery-context.json (optional)
|
||||||
|
|
||||||
|
Phase 1.5: Requirement Expansion & Clarification
|
||||||
|
|- Ref: phases/01-5-requirement-clarification.md
|
||||||
|
|- CLI gap analysis: completeness scoring, missing dimensions detection
|
||||||
|
|- Multi-round interactive discussion (max 5 rounds)
|
||||||
|
| |- Round 1: present gap analysis + expansion suggestions
|
||||||
|
| |- Round N: follow-up refinement based on user responses
|
||||||
|
|- User final confirmation of requirements
|
||||||
|
|- Auto mode (-y): CLI auto-expansion without interaction
|
||||||
|
|- Output: refined-requirements.json
|
||||||
|
|
||||||
|
Phase 2: Product Brief [AGENT: doc-generator]
|
||||||
|
|- Delegate to Task(subagent_type="doc-generator")
|
||||||
|
|- Agent reads: phases/02-product-brief.md
|
||||||
|
|- Agent executes: 3 parallel CLI analyses + synthesis + glossary generation
|
||||||
|
|- Agent writes: product-brief.md, glossary.json
|
||||||
|
|- Agent returns: JSON summary {files_created, quality_notes, key_decisions}
|
||||||
|
|- Orchestrator validates: files exist, spec-config.json updated
|
||||||
|
|
||||||
|
Phase 3: Requirements / PRD [AGENT: doc-generator]
|
||||||
|
|- Delegate to Task(subagent_type="doc-generator")
|
||||||
|
|- Agent reads: phases/03-requirements.md
|
||||||
|
|- Agent executes: Gemini expansion + Codex review (Step 2.5) + priority sorting
|
||||||
|
|- Agent writes: requirements/ directory (_index.md + REQ-*.md + NFR-*.md)
|
||||||
|
|- Agent returns: JSON summary {files_created, codex_review_integrated, key_decisions}
|
||||||
|
|- Orchestrator validates: directory exists, file count matches
|
||||||
|
|
||||||
|
Phase 4: Architecture [AGENT: doc-generator]
|
||||||
|
|- Delegate to Task(subagent_type="doc-generator")
|
||||||
|
|- Agent reads: phases/04-architecture.md
|
||||||
|
|- Agent executes: Gemini analysis + Codex review + codebase mapping
|
||||||
|
|- Agent writes: architecture/ directory (_index.md + ADR-*.md)
|
||||||
|
|- Agent returns: JSON summary {files_created, codex_review_rating, key_decisions}
|
||||||
|
|- Orchestrator validates: directory exists, ADR files present
|
||||||
|
|
||||||
|
Phase 5: Epics & Stories [AGENT: doc-generator]
|
||||||
|
|- Delegate to Task(subagent_type="doc-generator")
|
||||||
|
|- Agent reads: phases/05-epics-stories.md
|
||||||
|
|- Agent executes: Gemini decomposition + Codex review (Step 2.5) + validation
|
||||||
|
|- Agent writes: epics/ directory (_index.md + EPIC-*.md)
|
||||||
|
|- Agent returns: JSON summary {files_created, codex_review_integrated, mvp_epic_count}
|
||||||
|
|- Orchestrator validates: directory exists, MVP epics present
|
||||||
|
|
||||||
|
Phase 6: Readiness Check [INLINE + ENHANCED]
|
||||||
|
|- Ref: phases/06-readiness-check.md
|
||||||
|
|- Gemini CLI: cross-document validation (completeness, consistency, traceability)
|
||||||
|
|- Codex CLI: technical depth review (ADR quality, data model, security, observability)
|
||||||
|
|- Per-requirement verification: iterate all REQ-*.md / NFR-*.md
|
||||||
|
| |- Check: AC exists + testable, Brief trace, Story coverage, Arch coverage
|
||||||
|
| |- Generate: Per-Requirement Verification table
|
||||||
|
|- Merge dual CLI scores into quality report
|
||||||
|
|- Output: readiness-report.md, spec-summary.md
|
||||||
|
|- Handoff options: Phase 7 (issue export), lite-plan, req-plan, plan, iterate
|
||||||
|
|
||||||
|
Phase 6.5: Auto-Fix (conditional) [AGENT: doc-generator]
|
||||||
|
|- Delegate to Task(subagent_type="doc-generator")
|
||||||
|
|- Agent reads: phases/06-5-auto-fix.md + readiness-report.md
|
||||||
|
|- Agent executes: fix affected Phase 2-5 documents
|
||||||
|
|- Agent returns: JSON summary {files_modified, issues_fixed, phases_touched}
|
||||||
|
|- Re-run Phase 6 validation
|
||||||
|
|- Max 2 iterations, then force handoff
|
||||||
|
|
||||||
|
Phase 7: Issue Export [INLINE]
|
||||||
|
|- Ref: phases/07-issue-export.md
|
||||||
|
|- Read EPIC-*.md files, assign waves (MVP→wave-1, others→wave-2)
|
||||||
|
|- Create issues via ccw issue create (one per Epic)
|
||||||
|
|- Map Epic dependencies to issue dependencies
|
||||||
|
|- Generate issue-export-report.md
|
||||||
|
|- Update spec-config.json with issue_ids
|
||||||
|
|- Handoff: team-planex, wave-1 only, view issues, done
|
||||||
|
|
||||||
|
Complete: Full specification package with issues ready for execution
|
||||||
|
|
||||||
|
Phase 6/7 → Handoff Bridge (conditional, based on user selection):
|
||||||
|
├─ team-planex: Execute issues via coordinated team workflow
|
||||||
|
├─ lite-plan: Extract first MVP Epic description → direct text input
|
||||||
|
├─ plan / req-plan: Create WFS session + .brainstorming/ bridge files
|
||||||
|
│ ├─ guidance-specification.md (synthesized from spec outputs)
|
||||||
|
│ ├─ feature-specs/feature-index.json (Epic → Feature mapping)
|
||||||
|
│ └─ feature-specs/F-{num}-{slug}.md (one per Epic)
|
||||||
|
└─ context-search-agent auto-discovers .brainstorming/
|
||||||
|
→ context-package.json.brainstorm_artifacts populated
|
||||||
|
→ action-planning-agent consumes: guidance_spec (P1) → feature_index (P2)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Directory Setup
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Session ID generation
|
||||||
|
const slug = topic.toLowerCase().replace(/[^a-z0-9\u4e00-\u9fff]+/g, '-').slice(0, 40);
|
||||||
|
const date = new Date().toISOString().slice(0, 10);
|
||||||
|
const sessionId = `SPEC-${slug}-${date}`;
|
||||||
|
const workDir = `.workflow/.spec/${sessionId}`;
|
||||||
|
|
||||||
|
Bash(`mkdir -p "${workDir}"`);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
.workflow/.spec/SPEC-{slug}-{YYYY-MM-DD}/
|
||||||
|
├── spec-config.json # Session configuration + phase state
|
||||||
|
├── discovery-context.json # Codebase exploration results (optional)
|
||||||
|
├── refined-requirements.json # Phase 1.5: Confirmed requirements after discussion
|
||||||
|
├── glossary.json # Phase 2: Terminology glossary for cross-doc consistency
|
||||||
|
├── product-brief.md # Phase 2: Product brief
|
||||||
|
├── requirements/ # Phase 3: Detailed PRD (directory)
|
||||||
|
│ ├── _index.md # Summary, MoSCoW table, traceability, links
|
||||||
|
│ ├── REQ-NNN-{slug}.md # Individual functional requirement
|
||||||
|
│ └── NFR-{type}-NNN-{slug}.md # Individual non-functional requirement
|
||||||
|
├── architecture/ # Phase 4: Architecture decisions (directory)
|
||||||
|
│ ├── _index.md # Overview, components, tech stack, links
|
||||||
|
│ └── ADR-NNN-{slug}.md # Individual Architecture Decision Record
|
||||||
|
├── epics/ # Phase 5: Epic/Story breakdown (directory)
|
||||||
|
│ ├── _index.md # Epic table, dependency map, MVP scope
|
||||||
|
│ └── EPIC-NNN-{slug}.md # Individual Epic with Stories
|
||||||
|
├── readiness-report.md # Phase 6: Quality report (+ per-req verification table)
|
||||||
|
├── spec-summary.md # Phase 6: One-page executive summary
|
||||||
|
└── issue-export-report.md # Phase 7: Issue mapping table + spec links
|
||||||
|
```
|
||||||
|
|
||||||
|
## State Management
|
||||||
|
|
||||||
|
**spec-config.json** serves as core state file:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "SPEC-xxx-2026-02-11",
|
||||||
|
"seed_input": "User input text",
|
||||||
|
"input_type": "text",
|
||||||
|
"timestamp": "ISO8601",
|
||||||
|
"mode": "interactive",
|
||||||
|
"complexity": "moderate",
|
||||||
|
"depth": "standard",
|
||||||
|
"focus_areas": [],
|
||||||
|
"spec_type": "service",
|
||||||
|
"iteration_count": 0,
|
||||||
|
"iteration_history": [],
|
||||||
|
"seed_analysis": {
|
||||||
|
"problem_statement": "...",
|
||||||
|
"target_users": [],
|
||||||
|
"domain": "...",
|
||||||
|
"constraints": [],
|
||||||
|
"dimensions": []
|
||||||
|
},
|
||||||
|
"has_codebase": false,
|
||||||
|
"refined_requirements_file": "refined-requirements.json",
|
||||||
|
"issue_ids": [],
|
||||||
|
"issues_created": 0,
|
||||||
|
"phasesCompleted": [
|
||||||
|
{ "phase": 1, "name": "discovery", "output_file": "spec-config.json", "completed_at": "ISO8601" },
|
||||||
|
{ "phase": 1.5, "name": "requirement-clarification", "output_file": "refined-requirements.json", "discussion_rounds": 2, "completed_at": "ISO8601" },
|
||||||
|
{ "phase": 3, "name": "requirements", "output_dir": "requirements/", "output_index": "requirements/_index.md", "file_count": 8, "completed_at": "ISO8601" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Resume mechanism**: `-c|--continue` flag reads `spec-config.json.phasesCompleted`, resumes from first incomplete phase.
|
||||||
|
|
||||||
|
## Core Rules
|
||||||
|
|
||||||
|
1. **Start Immediately**: First action is TaskCreate initialization, then Phase 0 (spec study), then Phase 1
|
||||||
|
2. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute
|
||||||
|
3. **Auto-Continue**: All phases run autonomously; check TaskList to execute next pending phase
|
||||||
|
4. **Parse Every Output**: Extract required data from each phase for next phase context
|
||||||
|
5. **DO NOT STOP**: Continuous 7-phase pipeline until all phases complete or user exits
|
||||||
|
6. **Respect -y Flag**: When auto mode, skip all AskUserQuestion calls, use recommended defaults
|
||||||
|
7. **Respect -c Flag**: When continue mode, load spec-config.json and resume from checkpoint
|
||||||
|
8. **Inject Glossary**: From Phase 3 onward, inject glossary.json terms into every CLI prompt
|
||||||
|
9. **Load Profile**: Read templates/profiles/{spec_type}-profile.md and inject requirements into Phase 2-5 prompts
|
||||||
|
10. **Iterate on Failure**: When Phase 6 score < 60%, auto-trigger Phase 6.5 (max 2 iterations)
|
||||||
|
11. **Agent Delegation**: Phase 2-5 and 6.5 MUST be delegated to `doc-generator` agents via Task tool — never execute inline
|
||||||
|
12. **Lean Context**: Orchestrator only sees agent return summaries (JSON), never the full document content
|
||||||
|
13. **Validate Agent Output**: After each agent returns, verify files exist on disk and spec-config.json was updated
|
||||||
|
|
||||||
|
## Agent Delegation Protocol
|
||||||
|
|
||||||
|
For Phase 2-5 and 6.5, the orchestrator delegates to a `doc-generator` agent via the Task tool. The orchestrator builds a lean context envelope — passing only paths, never file content.
|
||||||
|
|
||||||
|
### Context Envelope Template
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Task({
|
||||||
|
subagent_type: "doc-generator",
|
||||||
|
run_in_background: false,
|
||||||
|
description: `Spec Phase ${N}: ${phaseName}`,
|
||||||
|
prompt: `
|
||||||
|
## Spec Generator - Phase ${N}: ${phaseName}
|
||||||
|
|
||||||
|
### Session
|
||||||
|
- ID: ${sessionId}
|
||||||
|
- Work Dir: ${workDir}
|
||||||
|
- Auto Mode: ${autoMode}
|
||||||
|
- Spec Type: ${specType}
|
||||||
|
|
||||||
|
### Input (read from disk)
|
||||||
|
${inputFilesList} // Only file paths — agent reads content itself
|
||||||
|
|
||||||
|
### Instructions
|
||||||
|
Read: ${skillDir}/phases/${phaseFile} // Agent reads the phase doc for full instructions
|
||||||
|
Apply template: ${skillDir}/templates/${templateFile}
|
||||||
|
|
||||||
|
### Glossary (Phase 3+ only)
|
||||||
|
Read: ${workDir}/glossary.json
|
||||||
|
|
||||||
|
### Output
|
||||||
|
Write files to: ${workDir}/${outputPath}
|
||||||
|
Update: ${workDir}/spec-config.json (phasesCompleted)
|
||||||
|
Return: JSON summary { files_created, quality_notes, key_decisions }
|
||||||
|
`
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Orchestrator Post-Agent Validation
|
||||||
|
|
||||||
|
After each agent returns:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// 1. Parse agent return summary
|
||||||
|
const summary = JSON.parse(agentResult);
|
||||||
|
|
||||||
|
// 2. Validate files exist
|
||||||
|
summary.files_created.forEach(file => {
|
||||||
|
const exists = Glob(`${workDir}/${file}`);
|
||||||
|
if (!exists.length) throw new Error(`Agent claimed to create ${file} but file not found`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// 3. Verify spec-config.json updated
|
||||||
|
const config = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const phaseComplete = config.phasesCompleted.some(p => p.phase === N);
|
||||||
|
if (!phaseComplete) throw new Error(`Agent did not update phasesCompleted for Phase ${N}`);
|
||||||
|
|
||||||
|
// 4. Store summary for downstream context (do NOT read full documents)
|
||||||
|
phasesSummaries[N] = summary;
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Reference Documents by Phase
|
||||||
|
|
||||||
|
### Phase 1: Discovery
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/01-discovery.md](phases/01-discovery.md) | Seed analysis and session setup | Phase start |
|
||||||
|
| [templates/profiles/](templates/profiles/) | Spec type profiles | Spec type selection |
|
||||||
|
| [specs/document-standards.md](specs/document-standards.md) | Frontmatter format for spec-config.json | Config generation |
|
||||||
|
|
||||||
|
### Phase 1.5: Requirement Expansion & Clarification
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/01-5-requirement-clarification.md](phases/01-5-requirement-clarification.md) | Interactive requirement discussion workflow | Phase start |
|
||||||
|
| [specs/quality-gates.md](specs/quality-gates.md) | Quality criteria for refined requirements | Validation |
|
||||||
|
|
||||||
|
### Phase 2: Product Brief
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/02-product-brief.md](phases/02-product-brief.md) | Multi-CLI analysis orchestration | Phase start |
|
||||||
|
| [templates/product-brief.md](templates/product-brief.md) | Document template | Document generation |
|
||||||
|
| [specs/glossary-template.json](specs/glossary-template.json) | Glossary schema | Glossary generation |
|
||||||
|
|
||||||
|
### Phase 3: Requirements
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/03-requirements.md](phases/03-requirements.md) | PRD generation workflow | Phase start |
|
||||||
|
| [templates/requirements-prd.md](templates/requirements-prd.md) | Document template | Document generation |
|
||||||
|
|
||||||
|
### Phase 4: Architecture
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/04-architecture.md](phases/04-architecture.md) | Architecture decision workflow | Phase start |
|
||||||
|
| [templates/architecture-doc.md](templates/architecture-doc.md) | Document template | Document generation |
|
||||||
|
|
||||||
|
### Phase 5: Epics & Stories
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/05-epics-stories.md](phases/05-epics-stories.md) | Epic/Story decomposition | Phase start |
|
||||||
|
| [templates/epics-template.md](templates/epics-template.md) | Document template | Document generation |
|
||||||
|
|
||||||
|
### Phase 6: Readiness Check
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/06-readiness-check.md](phases/06-readiness-check.md) | Cross-document validation | Phase start |
|
||||||
|
| [specs/quality-gates.md](specs/quality-gates.md) | Quality scoring criteria | Validation |
|
||||||
|
|
||||||
|
### Phase 6.5: Auto-Fix
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/06-5-auto-fix.md](phases/06-5-auto-fix.md) | Auto-fix workflow for readiness issues | When Phase 6 score < 60% |
|
||||||
|
| [specs/quality-gates.md](specs/quality-gates.md) | Iteration exit criteria | Validation |
|
||||||
|
|
||||||
|
### Phase 7: Issue Export
|
||||||
|
| Document | Purpose | When to Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| [phases/07-issue-export.md](phases/07-issue-export.md) | Epic→Issue mapping and export | Phase start |
|
||||||
|
| [specs/quality-gates.md](specs/quality-gates.md) | Issue export quality criteria | Validation |
|
||||||
|
|
||||||
|
### Debugging & Troubleshooting
|
||||||
|
| Issue | Solution Document |
|
||||||
|
|-------|-------------------|
|
||||||
|
| Phase execution failed | Refer to the relevant Phase documentation |
|
||||||
|
| Output does not meet expectations | [specs/quality-gates.md](specs/quality-gates.md) |
|
||||||
|
| Document format issues | [specs/document-standards.md](specs/document-standards.md) |
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Phase | Error | Blocking? | Action |
|
||||||
|
|-------|-------|-----------|--------|
|
||||||
|
| Phase 1 | Empty input | Yes | Error and exit |
|
||||||
|
| Phase 1 | CLI seed analysis fails | No | Use basic parsing fallback |
|
||||||
|
| Phase 1.5 | Gap analysis CLI fails | No | Skip to user questions with basic prompts |
|
||||||
|
| Phase 1.5 | User skips discussion | No | Proceed with seed_analysis as-is |
|
||||||
|
| Phase 1.5 | Max rounds reached (5) | No | Force confirmation with current state |
|
||||||
|
| Phase 2 | Single CLI perspective fails | No | Continue with available perspectives |
|
||||||
|
| Phase 2 | All CLI calls fail | No | Generate basic brief from seed analysis |
|
||||||
|
| Phase 3 | Gemini CLI fails | No | Use codex fallback |
|
||||||
|
| Phase 4 | Architecture review fails | No | Skip review, proceed with initial analysis |
|
||||||
|
| Phase 5 | Story generation fails | No | Generate epics without detailed stories |
|
||||||
|
| Phase 6 | Validation CLI fails | No | Generate partial report with available data |
|
||||||
|
| Phase 6.5 | Auto-fix CLI fails | No | Log failure, proceed to handoff with Review status |
|
||||||
|
| Phase 6.5 | Max iterations reached | No | Force handoff, report remaining issues |
|
||||||
|
| Phase 7 | ccw issue create fails for one Epic | No | Log error, continue with remaining Epics |
|
||||||
|
| Phase 7 | No EPIC files found | Yes | Error and return to Phase 5 |
|
||||||
|
| Phase 7 | All issue creations fail | Yes | Error with CLI diagnostic, suggest manual creation |
|
||||||
|
| Phase 2-5 | Agent fails to return | Yes | Retry once, then fall back to inline execution |
|
||||||
|
| Phase 2-5 | Agent returns incomplete files | No | Log gaps, attempt inline completion for missing files |
|
||||||
|
|
||||||
|
### CLI Fallback Chain
|
||||||
|
|
||||||
|
Gemini -> Codex -> Claude -> degraded mode (local analysis only)
|
||||||
@@ -0,0 +1,404 @@
|
|||||||
|
# Phase 1.5: Requirement Expansion & Clarification
|
||||||
|
|
||||||
|
在进入正式文档生成前,通过多轮交互讨论对原始需求进行深度挖掘、扩展和确认。
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- 识别原始需求中的模糊点、遗漏和潜在风险
|
||||||
|
- 通过 CLI 辅助分析需求完整性,生成深度探测问题
|
||||||
|
- 支持多轮交互讨论,逐步细化需求
|
||||||
|
- 生成经用户确认的 `refined-requirements.json` 作为后续阶段的高质量输入
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/spec-config.json` (Phase 1 output)
|
||||||
|
- Optional: `{workDir}/discovery-context.json` (codebase context)
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Phase 1 Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const { seed_analysis, seed_input, focus_areas, has_codebase, depth } = specConfig;
|
||||||
|
|
||||||
|
let discoveryContext = null;
|
||||||
|
if (has_codebase) {
|
||||||
|
try {
|
||||||
|
discoveryContext = JSON.parse(Read(`${workDir}/discovery-context.json`));
|
||||||
|
} catch (e) { /* proceed without */ }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: CLI Gap Analysis & Question Generation
|
||||||
|
|
||||||
|
调用 Gemini CLI 分析原始需求的完整性,识别模糊点并生成探测问题。
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: 深度分析用户的初始需求,识别模糊点、遗漏和需要澄清的领域。
|
||||||
|
Success: 生成 3-5 个高质量的探测问题,覆盖功能范围、边界条件、非功能性需求、用户场景等维度。
|
||||||
|
|
||||||
|
ORIGINAL SEED INPUT:
|
||||||
|
${seed_input}
|
||||||
|
|
||||||
|
SEED ANALYSIS:
|
||||||
|
${JSON.stringify(seed_analysis, null, 2)}
|
||||||
|
|
||||||
|
FOCUS AREAS: ${focus_areas.join(', ')}
|
||||||
|
${discoveryContext ? `
|
||||||
|
CODEBASE CONTEXT:
|
||||||
|
- Existing patterns: ${discoveryContext.existing_patterns?.slice(0,5).join(', ') || 'none'}
|
||||||
|
- Tech stack: ${JSON.stringify(discoveryContext.tech_stack || {})}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. 评估当前需求描述的完整性(1-10 分,列出缺失维度)
|
||||||
|
2. 识别 3-5 个关键模糊区域,每个区域包含:
|
||||||
|
- 模糊点描述(为什么不清楚)
|
||||||
|
- 1-2 个开放式探测问题
|
||||||
|
- 1-2 个扩展建议(基于领域最佳实践)
|
||||||
|
3. 检查以下维度是否有遗漏:
|
||||||
|
- 功能范围边界(什么在范围内/外?)
|
||||||
|
- 核心用户场景和流程
|
||||||
|
- 非功能性需求(性能、安全、可用性、可扩展性)
|
||||||
|
- 集成点和外部依赖
|
||||||
|
- 数据模型和存储需求
|
||||||
|
- 错误处理和异常场景
|
||||||
|
4. 基于领域经验提供需求扩展建议
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: JSON output:
|
||||||
|
{
|
||||||
|
\"completeness_score\": 7,
|
||||||
|
\"missing_dimensions\": [\"Performance requirements\", \"Error handling\"],
|
||||||
|
\"clarification_areas\": [
|
||||||
|
{
|
||||||
|
\"area\": \"Scope boundary\",
|
||||||
|
\"rationale\": \"Input does not clarify...\",
|
||||||
|
\"questions\": [\"Question 1?\", \"Question 2?\"],
|
||||||
|
\"suggestions\": [\"Suggestion 1\", \"Suggestion 2\"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
\"expansion_recommendations\": [
|
||||||
|
{
|
||||||
|
\"category\": \"Non-functional\",
|
||||||
|
\"recommendation\": \"Consider adding...\",
|
||||||
|
\"priority\": \"high|medium|low\"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
CONSTRAINTS: 问题必须是开放式的,建议必须具体可执行,使用用户输入的语言
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
// Wait for CLI result before continuing
|
||||||
|
```
|
||||||
|
|
||||||
|
解析 CLI 输出为结构化数据:
|
||||||
|
```javascript
|
||||||
|
const gapAnalysis = {
|
||||||
|
completeness_score: 0,
|
||||||
|
missing_dimensions: [],
|
||||||
|
clarification_areas: [],
|
||||||
|
expansion_recommendations: []
|
||||||
|
};
|
||||||
|
// Parse from CLI output
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Interactive Discussion Loop
|
||||||
|
|
||||||
|
核心多轮交互循环。每轮:展示分析结果 → 用户回应 → 更新需求状态 → 判断是否继续。
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Initialize requirement state
|
||||||
|
let requirementState = {
|
||||||
|
problem_statement: seed_analysis.problem_statement,
|
||||||
|
target_users: seed_analysis.target_users,
|
||||||
|
domain: seed_analysis.domain,
|
||||||
|
constraints: seed_analysis.constraints,
|
||||||
|
confirmed_features: [],
|
||||||
|
non_functional_requirements: [],
|
||||||
|
boundary_conditions: [],
|
||||||
|
integration_points: [],
|
||||||
|
key_assumptions: [],
|
||||||
|
discussion_rounds: 0
|
||||||
|
};
|
||||||
|
|
||||||
|
let discussionLog = [];
|
||||||
|
let userSatisfied = false;
|
||||||
|
|
||||||
|
// === Round 1: Present gap analysis results ===
|
||||||
|
// Display completeness_score, clarification_areas, expansion_recommendations
|
||||||
|
// Then ask user to respond
|
||||||
|
|
||||||
|
while (!userSatisfied && requirementState.discussion_rounds < 5) {
|
||||||
|
requirementState.discussion_rounds++;
|
||||||
|
|
||||||
|
if (requirementState.discussion_rounds === 1) {
|
||||||
|
// --- First round: present initial gap analysis ---
|
||||||
|
// Format questions and suggestions from gapAnalysis for display
|
||||||
|
// Present as a structured summary to the user
|
||||||
|
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: buildDiscussionPrompt(gapAnalysis, requirementState),
|
||||||
|
header: "Req Expand",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "I'll answer", description: "I have answers/feedback to provide (type in 'Other')" },
|
||||||
|
{ label: "Accept all suggestions", description: "Accept all expansion recommendations as-is" },
|
||||||
|
{ label: "Skip to generation", description: "Requirements are clear enough, proceed directly" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// --- Subsequent rounds: refine based on user feedback ---
|
||||||
|
// Call CLI with accumulated context for follow-up analysis
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: 基于用户最新回应,更新需求理解,识别剩余模糊点。
|
||||||
|
|
||||||
|
CURRENT REQUIREMENT STATE:
|
||||||
|
${JSON.stringify(requirementState, null, 2)}
|
||||||
|
|
||||||
|
DISCUSSION HISTORY:
|
||||||
|
${JSON.stringify(discussionLog, null, 2)}
|
||||||
|
|
||||||
|
USER'S LATEST RESPONSE:
|
||||||
|
${lastUserResponse}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. 将用户回应整合到需求状态中
|
||||||
|
2. 识别 1-3 个仍需澄清或可扩展的领域
|
||||||
|
3. 生成后续问题(如有必要)
|
||||||
|
4. 如果需求已充分,输出最终需求摘要
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: JSON output:
|
||||||
|
{
|
||||||
|
\"updated_fields\": { /* fields to merge into requirementState */ },
|
||||||
|
\"status\": \"need_more_discussion\" | \"ready_for_confirmation\",
|
||||||
|
\"follow_up\": {
|
||||||
|
\"remaining_areas\": [{\"area\": \"...\", \"questions\": [\"...\"]}],
|
||||||
|
\"summary\": \"...\"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CONSTRAINTS: 避免重复已回答的问题,聚焦未覆盖的领域
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
// Wait for CLI result, parse and continue
|
||||||
|
|
||||||
|
// If status === "ready_for_confirmation", break to confirmation step
|
||||||
|
// If status === "need_more_discussion", present follow-up questions
|
||||||
|
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: buildFollowUpPrompt(followUpAnalysis, requirementState),
|
||||||
|
header: "Follow-up",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "I'll answer", description: "I have more feedback (type in 'Other')" },
|
||||||
|
{ label: "Looks good", description: "Requirements are sufficiently clear now" },
|
||||||
|
{ label: "Accept suggestions", description: "Accept remaining suggestions" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process user response
|
||||||
|
// - "Skip to generation" / "Looks good" → userSatisfied = true
|
||||||
|
// - "Accept all suggestions" → merge suggestions into requirementState, userSatisfied = true
|
||||||
|
// - "I'll answer" (with Other text) → record in discussionLog, continue loop
|
||||||
|
// - User selects Other with custom text → parse and record
|
||||||
|
|
||||||
|
discussionLog.push({
|
||||||
|
round: requirementState.discussion_rounds,
|
||||||
|
agent_prompt: currentPrompt,
|
||||||
|
user_response: userResponse,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Helper: Build Discussion Prompt
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function buildDiscussionPrompt(gapAnalysis, state) {
|
||||||
|
let prompt = `## Requirement Analysis Results\n\n`;
|
||||||
|
prompt += `**Completeness Score**: ${gapAnalysis.completeness_score}/10\n`;
|
||||||
|
|
||||||
|
if (gapAnalysis.missing_dimensions.length > 0) {
|
||||||
|
prompt += `**Missing Dimensions**: ${gapAnalysis.missing_dimensions.join(', ')}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt += `### Key Questions\n\n`;
|
||||||
|
gapAnalysis.clarification_areas.forEach((area, i) => {
|
||||||
|
prompt += `**${i+1}. ${area.area}**\n`;
|
||||||
|
prompt += ` ${area.rationale}\n`;
|
||||||
|
area.questions.forEach(q => { prompt += ` - ${q}\n`; });
|
||||||
|
if (area.suggestions.length > 0) {
|
||||||
|
prompt += ` Suggestions: ${area.suggestions.join('; ')}\n`;
|
||||||
|
}
|
||||||
|
prompt += `\n`;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (gapAnalysis.expansion_recommendations.length > 0) {
|
||||||
|
prompt += `### Expansion Recommendations\n\n`;
|
||||||
|
gapAnalysis.expansion_recommendations.forEach(rec => {
|
||||||
|
prompt += `- [${rec.priority}] **${rec.category}**: ${rec.recommendation}\n`;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt += `\nPlease answer the questions above, or choose an option below.`;
|
||||||
|
return prompt;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Auto Mode Handling
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (autoMode) {
|
||||||
|
// Skip interactive discussion
|
||||||
|
// CLI generates default requirement expansion based on seed_analysis
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: 基于种子分析自动生成需求扩展,无需用户交互。
|
||||||
|
|
||||||
|
SEED ANALYSIS:
|
||||||
|
${JSON.stringify(seed_analysis, null, 2)}
|
||||||
|
|
||||||
|
SEED INPUT: ${seed_input}
|
||||||
|
DEPTH: ${depth}
|
||||||
|
${discoveryContext ? `CODEBASE: ${JSON.stringify(discoveryContext.tech_stack || {})}` : ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
1. 基于领域最佳实践,自动扩展功能需求清单
|
||||||
|
2. 推断合理的非功能性需求
|
||||||
|
3. 识别明显的边界条件
|
||||||
|
4. 列出关键假设
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: JSON output matching refined-requirements.json schema
|
||||||
|
CONSTRAINTS: 保守推断,只添加高置信度的扩展
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
// Parse output directly into refined-requirements.json
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Generate Requirement Confirmation Summary
|
||||||
|
|
||||||
|
在写入文件前,向用户展示最终的需求确认摘要(非 auto mode)。
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!autoMode) {
|
||||||
|
// Build confirmation summary from requirementState
|
||||||
|
const summary = buildConfirmationSummary(requirementState);
|
||||||
|
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: `## Requirement Confirmation\n\n${summary}\n\nConfirm and proceed to specification generation?`,
|
||||||
|
header: "Confirm",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Confirm & proceed", description: "Requirements confirmed, start spec generation" },
|
||||||
|
{ label: "Need adjustments", description: "Go back and refine further" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
// If "Need adjustments" → loop back to Step 3
|
||||||
|
// If "Confirm & proceed" → continue to Step 6
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Write refined-requirements.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const refinedRequirements = {
|
||||||
|
session_id: specConfig.session_id,
|
||||||
|
phase: "1.5",
|
||||||
|
generated_at: new Date().toISOString(),
|
||||||
|
source: autoMode ? "auto-expansion" : "interactive-discussion",
|
||||||
|
discussion_rounds: requirementState.discussion_rounds,
|
||||||
|
|
||||||
|
// Core requirement content
|
||||||
|
clarified_problem_statement: requirementState.problem_statement,
|
||||||
|
confirmed_target_users: requirementState.target_users.map(u =>
|
||||||
|
typeof u === 'string' ? { name: u, needs: [], pain_points: [] } : u
|
||||||
|
),
|
||||||
|
confirmed_domain: requirementState.domain,
|
||||||
|
|
||||||
|
confirmed_features: requirementState.confirmed_features.map(f => ({
|
||||||
|
name: f.name,
|
||||||
|
description: f.description,
|
||||||
|
acceptance_criteria: f.acceptance_criteria || [],
|
||||||
|
edge_cases: f.edge_cases || [],
|
||||||
|
priority: f.priority || "unset"
|
||||||
|
})),
|
||||||
|
|
||||||
|
non_functional_requirements: requirementState.non_functional_requirements.map(nfr => ({
|
||||||
|
type: nfr.type, // Performance, Security, Usability, Scalability, etc.
|
||||||
|
details: nfr.details,
|
||||||
|
measurable_criteria: nfr.measurable_criteria || ""
|
||||||
|
})),
|
||||||
|
|
||||||
|
boundary_conditions: {
|
||||||
|
in_scope: requirementState.boundary_conditions.filter(b => b.scope === 'in'),
|
||||||
|
out_of_scope: requirementState.boundary_conditions.filter(b => b.scope === 'out'),
|
||||||
|
constraints: requirementState.constraints
|
||||||
|
},
|
||||||
|
|
||||||
|
integration_points: requirementState.integration_points,
|
||||||
|
key_assumptions: requirementState.key_assumptions,
|
||||||
|
|
||||||
|
// Traceability
|
||||||
|
discussion_log: autoMode ? [] : discussionLog
|
||||||
|
};
|
||||||
|
|
||||||
|
Write(`${workDir}/refined-requirements.json`, JSON.stringify(refinedRequirements, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 7: Update spec-config.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
specConfig.refined_requirements_file = "refined-requirements.json";
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 1.5,
|
||||||
|
name: "requirement-clarification",
|
||||||
|
output_file: "refined-requirements.json",
|
||||||
|
discussion_rounds: requirementState.discussion_rounds,
|
||||||
|
completed_at: new Date().toISOString()
|
||||||
|
});
|
||||||
|
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **File**: `refined-requirements.json`
|
||||||
|
- **Format**: JSON
|
||||||
|
- **Updated**: `spec-config.json` (added `refined_requirements_file` field and phase 1.5 to `phasesCompleted`)
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] Problem statement refined (>= 30 characters, more specific than seed)
|
||||||
|
- [ ] At least 2 confirmed features with descriptions
|
||||||
|
- [ ] At least 1 non-functional requirement identified
|
||||||
|
- [ ] Boundary conditions defined (in-scope + out-of-scope)
|
||||||
|
- [ ] Key assumptions listed (>= 1)
|
||||||
|
- [ ] Discussion rounds recorded (>= 1 in interactive mode)
|
||||||
|
- [ ] User explicitly confirmed requirements (non-auto mode)
|
||||||
|
- [ ] `refined-requirements.json` written with valid JSON
|
||||||
|
- [ ] `spec-config.json` updated with phase 1.5 completion
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Proceed to [Phase 2: Product Brief](02-product-brief.md). Phase 2 should load `refined-requirements.json` as primary input instead of relying solely on `spec-config.json.seed_analysis`.
|
||||||
257
.codex/skills/spec-generator/phases/01-discovery.md
Normal file
257
.codex/skills/spec-generator/phases/01-discovery.md
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
# Phase 1: Discovery
|
||||||
|
|
||||||
|
Parse input, analyze the seed idea, optionally explore codebase, establish session configuration.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Generate session ID and create output directory
|
||||||
|
- Parse user input (text description or file reference)
|
||||||
|
- Analyze seed via Gemini CLI to extract problem space dimensions
|
||||||
|
- Conditionally explore codebase for existing patterns and constraints
|
||||||
|
- Gather user preferences (depth, focus areas) via interactive confirmation
|
||||||
|
- Write `spec-config.json` as the session state file
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `$ARGUMENTS` (user input from command)
|
||||||
|
- Flags: `-y` (auto mode), `-c` (continue mode)
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Session Initialization
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Parse arguments
|
||||||
|
const args = $ARGUMENTS;
|
||||||
|
const autoMode = args.includes('-y') || args.includes('--yes');
|
||||||
|
const continueMode = args.includes('-c') || args.includes('--continue');
|
||||||
|
|
||||||
|
// Extract the idea/topic (remove flags)
|
||||||
|
const idea = args.replace(/(-y|--yes|-c|--continue)\s*/g, '').trim();
|
||||||
|
|
||||||
|
// Generate session ID
|
||||||
|
const slug = idea.toLowerCase()
|
||||||
|
.replace(/[^a-z0-9\u4e00-\u9fff]+/g, '-')
|
||||||
|
.replace(/^-|-$/g, '')
|
||||||
|
.slice(0, 40);
|
||||||
|
const date = new Date().toISOString().slice(0, 10);
|
||||||
|
const sessionId = `SPEC-${slug}-${date}`;
|
||||||
|
const workDir = `.workflow/.spec/${sessionId}`;
|
||||||
|
|
||||||
|
// Check for continue mode
|
||||||
|
if (continueMode) {
|
||||||
|
// Find existing session
|
||||||
|
const existingSessions = Glob('.workflow/.spec/SPEC-*/spec-config.json');
|
||||||
|
// If slug matches an existing session, load it and resume
|
||||||
|
// Read spec-config.json, find first incomplete phase, jump to that phase
|
||||||
|
return; // Resume logic handled by orchestrator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create output directory
|
||||||
|
Bash(`mkdir -p "${workDir}"`);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Input Parsing
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Determine input type
|
||||||
|
if (idea.startsWith('@') || idea.endsWith('.md') || idea.endsWith('.txt')) {
|
||||||
|
// File reference - read and extract content
|
||||||
|
const filePath = idea.replace(/^@/, '');
|
||||||
|
const fileContent = Read(filePath);
|
||||||
|
// Use file content as the seed
|
||||||
|
inputType = 'file';
|
||||||
|
seedInput = fileContent;
|
||||||
|
} else {
|
||||||
|
// Direct text description
|
||||||
|
inputType = 'text';
|
||||||
|
seedInput = idea;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Seed Analysis via Gemini CLI
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Analyze this seed idea/requirement to extract structured problem space dimensions.
|
||||||
|
Success: Clear problem statement, target users, domain identification, 3-5 exploration dimensions.
|
||||||
|
|
||||||
|
SEED INPUT:
|
||||||
|
${seedInput}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Extract a clear problem statement (what problem does this solve?)
|
||||||
|
- Identify target users (who benefits?)
|
||||||
|
- Determine the domain (technical, business, consumer, etc.)
|
||||||
|
- List constraints (budget, time, technical, regulatory)
|
||||||
|
- Generate 3-5 exploration dimensions (key areas to investigate)
|
||||||
|
- Assess complexity: simple (1-2 components), moderate (3-5 components), complex (6+ components)
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: JSON output with fields: problem_statement, target_users[], domain, constraints[], dimensions[], complexity
|
||||||
|
CONSTRAINTS: Be specific and actionable, not vague
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
// Wait for CLI result before continuing
|
||||||
|
```
|
||||||
|
|
||||||
|
Parse the CLI output into structured `seedAnalysis`:
|
||||||
|
```javascript
|
||||||
|
const seedAnalysis = {
|
||||||
|
problem_statement: "...",
|
||||||
|
target_users: ["..."],
|
||||||
|
domain: "...",
|
||||||
|
constraints: ["..."],
|
||||||
|
dimensions: ["..."]
|
||||||
|
};
|
||||||
|
const complexity = "moderate"; // from CLI output
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Codebase Exploration (Conditional)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Detect if running inside a project with code
|
||||||
|
const hasCodebase = Glob('**/*.{ts,js,py,java,go,rs}').length > 0
|
||||||
|
|| Glob('package.json').length > 0
|
||||||
|
|| Glob('Cargo.toml').length > 0;
|
||||||
|
|
||||||
|
if (hasCodebase) {
|
||||||
|
Agent({
|
||||||
|
subagent_type: "cli-explore-agent",
|
||||||
|
run_in_background: false,
|
||||||
|
description: `Explore codebase for spec: ${slug}`,
|
||||||
|
prompt: `
|
||||||
|
## Spec Generator Context
|
||||||
|
Topic: ${seedInput}
|
||||||
|
Dimensions: ${seedAnalysis.dimensions.join(', ')}
|
||||||
|
Session: ${workDir}
|
||||||
|
|
||||||
|
## MANDATORY FIRST STEPS
|
||||||
|
1. Search for code related to topic keywords
|
||||||
|
2. Read project config files (package.json, pyproject.toml, etc.) if they exist
|
||||||
|
|
||||||
|
## Exploration Focus
|
||||||
|
- Identify existing implementations related to the topic
|
||||||
|
- Find patterns that could inform architecture decisions
|
||||||
|
- Map current architecture constraints
|
||||||
|
- Locate integration points and dependencies
|
||||||
|
|
||||||
|
## Output
|
||||||
|
Write findings to: ${workDir}/discovery-context.json
|
||||||
|
|
||||||
|
Schema:
|
||||||
|
{
|
||||||
|
"relevant_files": [{"path": "...", "relevance": "high|medium|low", "rationale": "..."}],
|
||||||
|
"existing_patterns": ["pattern descriptions"],
|
||||||
|
"architecture_constraints": ["constraint descriptions"],
|
||||||
|
"integration_points": ["integration point descriptions"],
|
||||||
|
"tech_stack": {"languages": [], "frameworks": [], "databases": []},
|
||||||
|
"_metadata": { "exploration_type": "spec-discovery", "timestamp": "ISO8601" }
|
||||||
|
}
|
||||||
|
`
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: User Confirmation (Interactive)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!autoMode) {
|
||||||
|
// Confirm problem statement and select depth
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: `Problem statement: "${seedAnalysis.problem_statement}" - Is this accurate?`,
|
||||||
|
header: "Problem",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Accurate", description: "Proceed with this problem statement" },
|
||||||
|
{ label: "Needs adjustment", description: "I'll refine the problem statement" }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
question: "What specification depth do you need?",
|
||||||
|
header: "Depth",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Light", description: "Quick overview - key decisions only" },
|
||||||
|
{ label: "Standard (Recommended)", description: "Balanced detail for most projects" },
|
||||||
|
{ label: "Comprehensive", description: "Maximum detail for complex/critical projects" }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
question: "Which areas should we focus on?",
|
||||||
|
header: "Focus",
|
||||||
|
multiSelect: true,
|
||||||
|
options: seedAnalysis.dimensions.map(d => ({ label: d, description: `Explore ${d} in depth` }))
|
||||||
|
},
|
||||||
|
{
|
||||||
|
question: "What type of specification is this?",
|
||||||
|
header: "Spec Type",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Service (Recommended)", description: "Long-running service with lifecycle, state machine, observability" },
|
||||||
|
{ label: "API", description: "REST/GraphQL API with endpoints, auth, rate limiting" },
|
||||||
|
{ label: "Library/SDK", description: "Reusable package with public API surface, examples" },
|
||||||
|
{ label: "Platform", description: "Multi-component system, uses Service profile" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Auto mode defaults
|
||||||
|
depth = "standard";
|
||||||
|
focusAreas = seedAnalysis.dimensions;
|
||||||
|
specType = "service"; // default for auto mode
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Write spec-config.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = {
|
||||||
|
session_id: sessionId,
|
||||||
|
seed_input: seedInput,
|
||||||
|
input_type: inputType,
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
mode: autoMode ? "auto" : "interactive",
|
||||||
|
complexity: complexity,
|
||||||
|
depth: depth,
|
||||||
|
focus_areas: focusAreas,
|
||||||
|
seed_analysis: seedAnalysis,
|
||||||
|
has_codebase: hasCodebase,
|
||||||
|
spec_type: specType, // "service" | "api" | "library" | "platform"
|
||||||
|
iteration_count: 0,
|
||||||
|
iteration_history: [],
|
||||||
|
phasesCompleted: [
|
||||||
|
{
|
||||||
|
phase: 1,
|
||||||
|
name: "discovery",
|
||||||
|
output_file: "spec-config.json",
|
||||||
|
completed_at: new Date().toISOString()
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **File**: `spec-config.json`
|
||||||
|
- **File**: `discovery-context.json` (optional, if codebase detected)
|
||||||
|
- **Format**: JSON
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] Session ID matches `SPEC-{slug}-{date}` format
|
||||||
|
- [ ] Problem statement exists and is >= 20 characters
|
||||||
|
- [ ] Target users identified (>= 1)
|
||||||
|
- [ ] 3-5 exploration dimensions generated
|
||||||
|
- [ ] spec-config.json written with all required fields
|
||||||
|
- [ ] Output directory created
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Proceed to [Phase 2: Product Brief](02-product-brief.md) with the generated spec-config.json.
|
||||||
298
.codex/skills/spec-generator/phases/02-product-brief.md
Normal file
298
.codex/skills/spec-generator/phases/02-product-brief.md
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
# Phase 2: Product Brief
|
||||||
|
|
||||||
|
> **Execution Mode: Agent Delegated**
|
||||||
|
> This phase is executed by a `doc-generator` agent. The orchestrator (SKILL.md) passes session context via the Task tool. The agent reads this file for instructions, executes all steps, writes output files, and returns a JSON summary.
|
||||||
|
|
||||||
|
Generate a product brief through multi-perspective CLI analysis, establishing "what" and "why".
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Read Phase 1 outputs (spec-config.json, discovery-context.json)
|
||||||
|
- Launch 3 parallel CLI analyses from product, technical, and user perspectives
|
||||||
|
- Synthesize convergent themes and conflicting views
|
||||||
|
- Optionally refine with user input
|
||||||
|
- Generate product-brief.md using template
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/spec-config.json`
|
||||||
|
- Primary: `{workDir}/refined-requirements.json` (Phase 1.5 output, preferred over raw seed_analysis)
|
||||||
|
- Optional: `{workDir}/discovery-context.json`
|
||||||
|
- Config: `{workDir}/spec-config.json`
|
||||||
|
- Template: `templates/product-brief.md`
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Phase 1 Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const { seed_analysis, seed_input, has_codebase, depth, focus_areas } = specConfig;
|
||||||
|
|
||||||
|
// Load refined requirements (Phase 1.5 output) - preferred over raw seed_analysis
|
||||||
|
let refinedReqs = null;
|
||||||
|
try {
|
||||||
|
refinedReqs = JSON.parse(Read(`${workDir}/refined-requirements.json`));
|
||||||
|
} catch (e) {
|
||||||
|
// No refined requirements, fall back to seed_analysis
|
||||||
|
}
|
||||||
|
|
||||||
|
let discoveryContext = null;
|
||||||
|
if (has_codebase) {
|
||||||
|
try {
|
||||||
|
discoveryContext = JSON.parse(Read(`${workDir}/discovery-context.json`));
|
||||||
|
} catch (e) {
|
||||||
|
// No discovery context available, proceed without
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build shared context string for CLI prompts
|
||||||
|
// Prefer refined requirements over raw seed_analysis
|
||||||
|
const problem = refinedReqs?.clarified_problem_statement || seed_analysis.problem_statement;
|
||||||
|
const users = refinedReqs?.confirmed_target_users?.map(u => u.name || u).join(', ')
|
||||||
|
|| seed_analysis.target_users.join(', ');
|
||||||
|
const domain = refinedReqs?.confirmed_domain || seed_analysis.domain;
|
||||||
|
const constraints = refinedReqs?.boundary_conditions?.constraints?.join(', ')
|
||||||
|
|| seed_analysis.constraints.join(', ');
|
||||||
|
const features = refinedReqs?.confirmed_features?.map(f => f.name).join(', ') || '';
|
||||||
|
const nfrs = refinedReqs?.non_functional_requirements?.map(n => `${n.type}: ${n.details}`).join('; ') || '';
|
||||||
|
|
||||||
|
const sharedContext = `
|
||||||
|
SEED: ${seed_input}
|
||||||
|
PROBLEM: ${problem}
|
||||||
|
TARGET USERS: ${users}
|
||||||
|
DOMAIN: ${domain}
|
||||||
|
CONSTRAINTS: ${constraints}
|
||||||
|
FOCUS AREAS: ${focus_areas.join(', ')}
|
||||||
|
${features ? `CONFIRMED FEATURES: ${features}` : ''}
|
||||||
|
${nfrs ? `NON-FUNCTIONAL REQUIREMENTS: ${nfrs}` : ''}
|
||||||
|
${discoveryContext ? `
|
||||||
|
CODEBASE CONTEXT:
|
||||||
|
- Existing patterns: ${discoveryContext.existing_patterns?.slice(0,5).join(', ') || 'none'}
|
||||||
|
- Architecture constraints: ${discoveryContext.architecture_constraints?.slice(0,3).join(', ') || 'none'}
|
||||||
|
- Tech stack: ${JSON.stringify(discoveryContext.tech_stack || {})}
|
||||||
|
` : ''}`;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Multi-CLI Parallel Analysis (3 perspectives)
|
||||||
|
|
||||||
|
Launch 3 CLI calls in parallel:
|
||||||
|
|
||||||
|
**Product Perspective (Gemini)**:
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Product analysis for specification - identify market fit, user value, and success criteria.
|
||||||
|
Success: Clear vision, measurable goals, competitive positioning.
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Define product vision (1-3 sentences, aspirational)
|
||||||
|
- Analyze market/competitive landscape
|
||||||
|
- Define 3-5 measurable success metrics
|
||||||
|
- Identify scope boundaries (in-scope vs out-of-scope)
|
||||||
|
- Assess user value proposition
|
||||||
|
- List assumptions that need validation
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Structured product analysis with: vision, goals with metrics, scope, competitive positioning, assumptions
|
||||||
|
CONSTRAINTS: Focus on 'what' and 'why', not 'how'
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Technical Perspective (Codex)**:
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Technical feasibility analysis for specification - assess implementation viability and constraints.
|
||||||
|
Success: Clear technical constraints, integration complexity, technology recommendations.
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Assess technical feasibility of the core concept
|
||||||
|
- Identify technical constraints and blockers
|
||||||
|
- Evaluate integration complexity with existing systems
|
||||||
|
- Recommend technology approach (high-level)
|
||||||
|
- Identify technical risks and dependencies
|
||||||
|
- Estimate complexity: simple/moderate/complex
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Technical analysis with: feasibility assessment, constraints, integration complexity, tech recommendations, risks
|
||||||
|
CONSTRAINTS: Focus on feasibility and constraints, not detailed architecture
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**User Perspective (Claude)**:
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: User experience analysis for specification - understand user journeys, pain points, and UX considerations.
|
||||||
|
Success: Clear user personas, journey maps, UX requirements.
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Elaborate user personas with goals and frustrations
|
||||||
|
- Map primary user journey (happy path)
|
||||||
|
- Identify key pain points in current experience
|
||||||
|
- Define UX success criteria
|
||||||
|
- List accessibility and usability considerations
|
||||||
|
- Suggest interaction patterns
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: User analysis with: personas, journey map, pain points, UX criteria, interaction recommendations
|
||||||
|
CONSTRAINTS: Focus on user needs and experience, not implementation
|
||||||
|
" --tool claude --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// STOP: Wait for all 3 CLI results before continuing
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Synthesize Perspectives
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// After receiving all 3 CLI results:
|
||||||
|
// Extract convergent themes (all agree)
|
||||||
|
// Identify conflicting views (need resolution)
|
||||||
|
// Note unique contributions from each perspective
|
||||||
|
|
||||||
|
const synthesis = {
|
||||||
|
convergent_themes: [], // themes all 3 perspectives agree on
|
||||||
|
conflicts: [], // areas where perspectives differ
|
||||||
|
product_insights: [], // unique from product perspective
|
||||||
|
technical_insights: [], // unique from technical perspective
|
||||||
|
user_insights: [] // unique from user perspective
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Interactive Refinement (Optional)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!autoMode) {
|
||||||
|
// Present synthesis summary to user
|
||||||
|
// AskUserQuestion with:
|
||||||
|
// - Confirm vision statement
|
||||||
|
// - Resolve any conflicts between perspectives
|
||||||
|
// - Adjust scope if needed
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: "Review the synthesized product brief. Any adjustments needed?",
|
||||||
|
header: "Review",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Looks good", description: "Proceed to PRD generation" },
|
||||||
|
{ label: "Adjust scope", description: "Narrow or expand the scope" },
|
||||||
|
{ label: "Revise vision", description: "Refine the vision statement" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Generate product-brief.md
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Read template
|
||||||
|
const template = Read('templates/product-brief.md');
|
||||||
|
|
||||||
|
// Fill template with synthesized content
|
||||||
|
// Apply document-standards.md formatting rules
|
||||||
|
// Write with YAML frontmatter
|
||||||
|
|
||||||
|
const frontmatter = `---
|
||||||
|
session_id: ${specConfig.session_id}
|
||||||
|
phase: 2
|
||||||
|
document_type: product-brief
|
||||||
|
status: ${autoMode ? 'complete' : 'draft'}
|
||||||
|
generated_at: ${new Date().toISOString()}
|
||||||
|
stepsCompleted: ["load-context", "multi-cli-analysis", "synthesis", "generation"]
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- spec-config.json
|
||||||
|
---`;
|
||||||
|
|
||||||
|
// Combine frontmatter + filled template content
|
||||||
|
Write(`${workDir}/product-brief.md`, `${frontmatter}\n\n${filledContent}`);
|
||||||
|
|
||||||
|
// Update spec-config.json
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 2,
|
||||||
|
name: "product-brief",
|
||||||
|
output_file: "product-brief.md",
|
||||||
|
completed_at: new Date().toISOString()
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5.5: Generate glossary.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Extract terminology from product brief and CLI analysis
|
||||||
|
// Generate structured glossary for cross-document consistency
|
||||||
|
|
||||||
|
const glossary = {
|
||||||
|
session_id: specConfig.session_id,
|
||||||
|
terms: [
|
||||||
|
// Extract from product brief content:
|
||||||
|
// - Key domain nouns from problem statement
|
||||||
|
// - User persona names
|
||||||
|
// - Technical terms from multi-perspective synthesis
|
||||||
|
// Each term should have:
|
||||||
|
// { term: "...", definition: "...", aliases: [], first_defined_in: "product-brief.md", category: "core|technical|business" }
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
Write(`${workDir}/glossary.json`, JSON.stringify(glossary, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
**Glossary Injection**: In all subsequent phase prompts, inject the following into the CONTEXT section:
|
||||||
|
```
|
||||||
|
TERMINOLOGY GLOSSARY (use these terms consistently):
|
||||||
|
${JSON.stringify(glossary.terms, null, 2)}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **File**: `product-brief.md`
|
||||||
|
- **Format**: Markdown with YAML frontmatter
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] Vision statement: clear, 1-3 sentences
|
||||||
|
- [ ] Problem statement: specific and measurable
|
||||||
|
- [ ] Target users: >= 1 persona with needs
|
||||||
|
- [ ] Goals: >= 2 with measurable metrics
|
||||||
|
- [ ] Scope: in-scope and out-of-scope defined
|
||||||
|
- [ ] Multi-perspective synthesis included
|
||||||
|
- [ ] YAML frontmatter valid
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Proceed to [Phase 3: Requirements](03-requirements.md) with the generated product-brief.md.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Return Summary
|
||||||
|
|
||||||
|
When executed as a delegated agent, return the following JSON summary to the orchestrator:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"phase": 2,
|
||||||
|
"status": "complete",
|
||||||
|
"files_created": ["product-brief.md", "glossary.json"],
|
||||||
|
"quality_notes": ["list of any quality concerns or deviations"],
|
||||||
|
"key_decisions": ["list of significant synthesis decisions made"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The orchestrator will:
|
||||||
|
1. Validate that listed files exist on disk
|
||||||
|
2. Read `spec-config.json` to confirm `phasesCompleted` was updated
|
||||||
|
3. Store the summary for downstream phase context
|
||||||
248
.codex/skills/spec-generator/phases/03-requirements.md
Normal file
248
.codex/skills/spec-generator/phases/03-requirements.md
Normal file
@@ -0,0 +1,248 @@
|
|||||||
|
# Phase 3: Requirements (PRD)
|
||||||
|
|
||||||
|
> **Execution Mode: Agent Delegated**
|
||||||
|
> This phase is executed by a `doc-generator` agent. The orchestrator (SKILL.md) passes session context via the Task tool. The agent reads this file for instructions, executes all steps, writes output files, and returns a JSON summary.
|
||||||
|
|
||||||
|
Generate a detailed Product Requirements Document with functional/non-functional requirements, acceptance criteria, and MoSCoW prioritization.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Read product-brief.md and extract goals, scope, constraints
|
||||||
|
- Expand each goal into functional requirements with acceptance criteria
|
||||||
|
- Generate non-functional requirements
|
||||||
|
- Apply MoSCoW priority labels (user input or auto)
|
||||||
|
- Generate requirements.md using template
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/product-brief.md`
|
||||||
|
- Config: `{workDir}/spec-config.json`
|
||||||
|
- Template: `templates/requirements-prd.md` (directory structure: `_index.md` + `REQ-*.md` + `NFR-*.md`)
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Phase 2 Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const productBrief = Read(`${workDir}/product-brief.md`);
|
||||||
|
|
||||||
|
// Extract key sections from product brief
|
||||||
|
// - Goals & Success Metrics table
|
||||||
|
// - Scope (in-scope items)
|
||||||
|
// - Target Users (personas)
|
||||||
|
// - Constraints
|
||||||
|
// - Technical perspective insights
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Requirements Expansion via Gemini CLI
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Generate detailed functional and non-functional requirements from product brief.
|
||||||
|
Success: Complete PRD with testable acceptance criteria for every requirement.
|
||||||
|
|
||||||
|
PRODUCT BRIEF CONTEXT:
|
||||||
|
${productBrief}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- For each goal in the product brief, generate 3-7 functional requirements
|
||||||
|
- Each requirement must have:
|
||||||
|
- Unique ID: REQ-NNN (zero-padded)
|
||||||
|
- Clear title
|
||||||
|
- Detailed description
|
||||||
|
- User story: As a [persona], I want [action] so that [benefit]
|
||||||
|
- 2-4 specific, testable acceptance criteria
|
||||||
|
- Generate non-functional requirements:
|
||||||
|
- Performance (response times, throughput)
|
||||||
|
- Security (authentication, authorization, data protection)
|
||||||
|
- Scalability (user load, data volume)
|
||||||
|
- Usability (accessibility, learnability)
|
||||||
|
- Assign initial MoSCoW priority based on:
|
||||||
|
- Must: Core functionality, cannot launch without
|
||||||
|
- Should: Important but has workaround
|
||||||
|
- Could: Nice-to-have, enhances experience
|
||||||
|
- Won't: Explicitly deferred
|
||||||
|
- Use RFC 2119 keywords (MUST, SHOULD, MAY, MUST NOT, SHOULD NOT) to define behavioral constraints for each requirement. Example: 'The system MUST return a 401 response within 100ms for invalid tokens.'
|
||||||
|
- For each core domain entity referenced in requirements, define its data model: fields, types, constraints, and relationships to other entities
|
||||||
|
- Maintain terminology consistency with the glossary below:
|
||||||
|
TERMINOLOGY GLOSSARY:
|
||||||
|
\${glossary ? JSON.stringify(glossary.terms, null, 2) : 'N/A - generate terms inline'}
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Structured requirements with: ID, title, description, user story, acceptance criteria, priority, traceability to goals
|
||||||
|
CONSTRAINTS: Every requirement must be specific enough to estimate and test. No vague requirements like 'system should be fast'.
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2.5: Codex Requirements Review
|
||||||
|
|
||||||
|
After receiving Gemini expansion results, validate requirements quality via Codex CLI before proceeding:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Critical review of generated requirements - validate quality, testability, and scope alignment.
|
||||||
|
Success: Actionable feedback on requirement quality with specific issues identified.
|
||||||
|
|
||||||
|
GENERATED REQUIREMENTS:
|
||||||
|
${geminiRequirementsOutput.slice(0, 5000)}
|
||||||
|
|
||||||
|
PRODUCT BRIEF SCOPE:
|
||||||
|
${productBrief.slice(0, 2000)}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Verify every acceptance criterion is specific, measurable, and testable (not vague like 'should be fast')
|
||||||
|
- Validate RFC 2119 keyword usage: MUST/SHOULD/MAY used correctly per RFC 2119 semantics
|
||||||
|
- Check scope containment: no requirement exceeds the product brief's defined scope boundaries
|
||||||
|
- Assess data model completeness: all referenced entities have field-level definitions
|
||||||
|
- Identify duplicate or overlapping requirements
|
||||||
|
- Rate overall requirements quality: 1-5 with justification
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Requirements review with: per-requirement feedback, testability assessment, scope violations, data model gaps, quality rating
|
||||||
|
CONSTRAINTS: Be genuinely critical. Focus on requirements that would block implementation if left vague.
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for Codex review result
|
||||||
|
// Integrate feedback into requirements before writing files:
|
||||||
|
// - Fix vague acceptance criteria flagged by Codex
|
||||||
|
// - Correct RFC 2119 keyword misuse
|
||||||
|
// - Remove or flag requirements that exceed brief scope
|
||||||
|
// - Fill data model gaps identified by Codex
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: User Priority Sorting (Interactive)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!autoMode) {
|
||||||
|
// Present requirements grouped by initial priority
|
||||||
|
// Allow user to adjust MoSCoW labels
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: "Review the Must-Have requirements. Any that should be reprioritized?",
|
||||||
|
header: "Must-Have",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "All correct", description: "Must-have requirements are accurate" },
|
||||||
|
{ label: "Too many", description: "Some should be Should/Could" },
|
||||||
|
{ label: "Missing items", description: "Some Should requirements should be Must" }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
question: "What is the target MVP scope?",
|
||||||
|
header: "MVP Scope",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Must-Have only (Recommended)", description: "MVP includes only Must requirements" },
|
||||||
|
{ label: "Must + key Should", description: "Include critical Should items in MVP" },
|
||||||
|
{ label: "Comprehensive", description: "Include all Must and Should" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
// Apply user adjustments to priorities
|
||||||
|
} else {
|
||||||
|
// Auto mode: accept CLI-suggested priorities as-is
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Generate requirements/ directory
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Read template
|
||||||
|
const template = Read('templates/requirements-prd.md');
|
||||||
|
|
||||||
|
// Create requirements directory
|
||||||
|
Bash(`mkdir -p "${workDir}/requirements"`);
|
||||||
|
|
||||||
|
const status = autoMode ? 'complete' : 'draft';
|
||||||
|
const timestamp = new Date().toISOString();
|
||||||
|
|
||||||
|
// Parse CLI output into structured requirements
|
||||||
|
const funcReqs = parseFunctionalRequirements(cliOutput); // [{id, slug, title, priority, ...}]
|
||||||
|
const nfReqs = parseNonFunctionalRequirements(cliOutput); // [{id, type, slug, title, ...}]
|
||||||
|
|
||||||
|
// Step 4a: Write individual REQ-*.md files (one per functional requirement)
|
||||||
|
funcReqs.forEach(req => {
|
||||||
|
// Use REQ-NNN-{slug}.md template from templates/requirements-prd.md
|
||||||
|
// Fill: id, title, priority, description, user_story, acceptance_criteria, traces
|
||||||
|
Write(`${workDir}/requirements/REQ-${req.id}-${req.slug}.md`, reqContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step 4b: Write individual NFR-*.md files (one per non-functional requirement)
|
||||||
|
nfReqs.forEach(nfr => {
|
||||||
|
// Use NFR-{type}-NNN-{slug}.md template from templates/requirements-prd.md
|
||||||
|
// Fill: id, type, category, title, requirement, metric, target, traces
|
||||||
|
Write(`${workDir}/requirements/NFR-${nfr.type}-${nfr.id}-${nfr.slug}.md`, nfrContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step 4c: Write _index.md (summary + links to all individual files)
|
||||||
|
// Use _index.md template from templates/requirements-prd.md
|
||||||
|
// Fill: summary table, functional req links table, NFR links tables,
|
||||||
|
// data requirements, integration requirements, traceability matrix
|
||||||
|
Write(`${workDir}/requirements/_index.md`, indexContent);
|
||||||
|
|
||||||
|
// Update spec-config.json
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 3,
|
||||||
|
name: "requirements",
|
||||||
|
output_dir: "requirements/",
|
||||||
|
output_index: "requirements/_index.md",
|
||||||
|
file_count: funcReqs.length + nfReqs.length + 1,
|
||||||
|
completed_at: timestamp
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Directory**: `requirements/`
|
||||||
|
- `_index.md` — Summary, MoSCoW table, traceability matrix, links
|
||||||
|
- `REQ-NNN-{slug}.md` — Individual functional requirement (per requirement)
|
||||||
|
- `NFR-{type}-NNN-{slug}.md` — Individual non-functional requirement (per NFR)
|
||||||
|
- **Format**: Markdown with YAML frontmatter, cross-linked via relative paths
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] Functional requirements: >= 3 with REQ-NNN IDs, each in own file
|
||||||
|
- [ ] Every requirement file has >= 1 acceptance criterion
|
||||||
|
- [ ] Every requirement has MoSCoW priority tag in frontmatter
|
||||||
|
- [ ] Non-functional requirements: >= 1, each in own file
|
||||||
|
- [ ] User stories present for Must-have requirements
|
||||||
|
- [ ] `_index.md` links to all individual requirement files
|
||||||
|
- [ ] Traceability links to product-brief.md goals
|
||||||
|
- [ ] All files have valid YAML frontmatter
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Proceed to [Phase 4: Architecture](04-architecture.md) with the generated requirements.md.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Return Summary
|
||||||
|
|
||||||
|
When executed as a delegated agent, return the following JSON summary to the orchestrator:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"phase": 3,
|
||||||
|
"status": "complete",
|
||||||
|
"files_created": ["requirements/_index.md", "requirements/REQ-001-*.md", "..."],
|
||||||
|
"file_count": 0,
|
||||||
|
"codex_review_integrated": true,
|
||||||
|
"quality_notes": ["list of quality concerns or Codex feedback items addressed"],
|
||||||
|
"key_decisions": ["MoSCoW priority rationale", "scope adjustments from Codex review"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The orchestrator will:
|
||||||
|
1. Validate that `requirements/` directory exists with `_index.md` and individual files
|
||||||
|
2. Read `spec-config.json` to confirm `phasesCompleted` was updated
|
||||||
|
3. Store the summary for downstream phase context
|
||||||
274
.codex/skills/spec-generator/phases/04-architecture.md
Normal file
274
.codex/skills/spec-generator/phases/04-architecture.md
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
# Phase 4: Architecture
|
||||||
|
|
||||||
|
> **Execution Mode: Agent Delegated**
|
||||||
|
> This phase is executed by a `doc-generator` agent. The orchestrator (SKILL.md) passes session context via the Task tool. The agent reads this file for instructions, executes all steps, writes output files, and returns a JSON summary.
|
||||||
|
|
||||||
|
Generate technical architecture decisions, component design, and technology selections based on requirements.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Analyze requirements to identify core components and system architecture
|
||||||
|
- Generate Architecture Decision Records (ADRs) with alternatives
|
||||||
|
- Map architecture to existing codebase (if applicable)
|
||||||
|
- Challenge architecture via Codex CLI review
|
||||||
|
- Generate architecture.md using template
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/requirements/_index.md` (and individual `REQ-*.md` files)
|
||||||
|
- Reference: `{workDir}/product-brief.md`
|
||||||
|
- Optional: `{workDir}/discovery-context.json`
|
||||||
|
- Config: `{workDir}/spec-config.json`
|
||||||
|
- Template: `templates/architecture-doc.md`
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Phase 2-3 Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const productBrief = Read(`${workDir}/product-brief.md`);
|
||||||
|
const requirements = Read(`${workDir}/requirements.md`);
|
||||||
|
|
||||||
|
let discoveryContext = null;
|
||||||
|
if (specConfig.has_codebase) {
|
||||||
|
try {
|
||||||
|
discoveryContext = JSON.parse(Read(`${workDir}/discovery-context.json`));
|
||||||
|
} catch (e) { /* no context */ }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load glossary for terminology consistency
|
||||||
|
let glossary = null;
|
||||||
|
try {
|
||||||
|
glossary = JSON.parse(Read(`${workDir}/glossary.json`));
|
||||||
|
} catch (e) { /* proceed without */ }
|
||||||
|
|
||||||
|
// Load spec type profile for specialized sections
|
||||||
|
const specType = specConfig.spec_type || 'service';
|
||||||
|
let profile = null;
|
||||||
|
try {
|
||||||
|
profile = Read(`templates/profiles/${specType}-profile.md`);
|
||||||
|
} catch (e) { /* use base template only */ }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Architecture Analysis via Gemini CLI
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Generate technical architecture for the specified requirements.
|
||||||
|
Success: Complete component architecture, tech stack, and ADRs with justified decisions.
|
||||||
|
|
||||||
|
PRODUCT BRIEF (summary):
|
||||||
|
${productBrief.slice(0, 3000)}
|
||||||
|
|
||||||
|
REQUIREMENTS:
|
||||||
|
${requirements.slice(0, 5000)}
|
||||||
|
|
||||||
|
${discoveryContext ? `EXISTING CODEBASE:
|
||||||
|
- Tech stack: ${JSON.stringify(discoveryContext.tech_stack || {})}
|
||||||
|
- Existing patterns: ${discoveryContext.existing_patterns?.slice(0,5).join('; ') || 'none'}
|
||||||
|
- Architecture constraints: ${discoveryContext.architecture_constraints?.slice(0,3).join('; ') || 'none'}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Define system architecture style (monolith, microservices, serverless, etc.) with justification
|
||||||
|
- Identify core components and their responsibilities
|
||||||
|
- Create component interaction diagram (Mermaid graph TD format)
|
||||||
|
- Specify technology stack: languages, frameworks, databases, infrastructure
|
||||||
|
- Generate 2-4 Architecture Decision Records (ADRs):
|
||||||
|
- Each ADR: context, decision, 2-3 alternatives with pros/cons, consequences
|
||||||
|
- Focus on: data storage, API design, authentication, key technical choices
|
||||||
|
- Define data model: key entities and relationships (Mermaid erDiagram format)
|
||||||
|
- Identify security architecture: auth, authorization, data protection
|
||||||
|
- List API endpoints (high-level)
|
||||||
|
${discoveryContext ? '- Map new components to existing codebase modules' : ''}
|
||||||
|
- For each core entity with a lifecycle, create an ASCII state machine diagram showing:
|
||||||
|
- All states and transitions
|
||||||
|
- Trigger events for each transition
|
||||||
|
- Side effects of transitions
|
||||||
|
- Error states and recovery paths
|
||||||
|
- Define a Configuration Model: list all configurable fields with name, type, default value, constraint, and description
|
||||||
|
- Define Error Handling strategy:
|
||||||
|
- Classify errors (transient/permanent/degraded)
|
||||||
|
- Per-component error behavior using RFC 2119 keywords
|
||||||
|
- Recovery mechanisms
|
||||||
|
- Define Observability requirements:
|
||||||
|
- Key metrics (name, type: counter/gauge/histogram, labels)
|
||||||
|
- Structured log format and key log events
|
||||||
|
- Health check endpoints
|
||||||
|
\${profile ? \`
|
||||||
|
SPEC TYPE PROFILE REQUIREMENTS (\${specType}):
|
||||||
|
\${profile}
|
||||||
|
\` : ''}
|
||||||
|
\${glossary ? \`
|
||||||
|
TERMINOLOGY GLOSSARY (use consistently):
|
||||||
|
\${JSON.stringify(glossary.terms, null, 2)}
|
||||||
|
\` : ''}
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Complete architecture with: style justification, component diagram, tech stack table, ADRs, data model, security controls, API overview
|
||||||
|
CONSTRAINTS: Architecture must support all Must-have requirements. Prefer proven technologies over cutting-edge.
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Architecture Review via Codex CLI
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// After receiving Gemini analysis, challenge it with Codex
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Critical review of proposed architecture - identify weaknesses and risks.
|
||||||
|
Success: Actionable feedback with specific concerns and improvement suggestions.
|
||||||
|
|
||||||
|
PROPOSED ARCHITECTURE:
|
||||||
|
${geminiArchitectureOutput.slice(0, 5000)}
|
||||||
|
|
||||||
|
REQUIREMENTS CONTEXT:
|
||||||
|
${requirements.slice(0, 2000)}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Challenge each ADR: are the alternatives truly the best options?
|
||||||
|
- Identify scalability bottlenecks in the component design
|
||||||
|
- Assess security gaps: authentication, authorization, data protection
|
||||||
|
- Evaluate technology choices: maturity, community support, fit
|
||||||
|
- Check for over-engineering or under-engineering
|
||||||
|
- Verify architecture covers all Must-have requirements
|
||||||
|
- Rate overall architecture quality: 1-5 with justification
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Architecture review with: per-ADR feedback, scalability concerns, security gaps, technology risks, quality rating
|
||||||
|
CONSTRAINTS: Be genuinely critical, not just validating. Focus on actionable improvements.
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Interactive ADR Decisions (Optional)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!autoMode) {
|
||||||
|
// Present ADRs with review feedback to user
|
||||||
|
// For each ADR where review raised concerns:
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: "Architecture review raised concerns. How should we proceed?",
|
||||||
|
header: "ADR Review",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Accept as-is", description: "Architecture is sound, proceed" },
|
||||||
|
{ label: "Incorporate feedback", description: "Adjust ADRs based on review" },
|
||||||
|
{ label: "Simplify", description: "Reduce complexity, fewer components" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
// Apply user decisions to architecture
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Codebase Integration Mapping (Conditional)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (specConfig.has_codebase && discoveryContext) {
|
||||||
|
// Map new architecture components to existing code
|
||||||
|
const integrationMapping = discoveryContext.relevant_files.map(f => ({
|
||||||
|
new_component: "...", // matched from architecture
|
||||||
|
existing_module: f.path,
|
||||||
|
integration_type: "Extend|Replace|New",
|
||||||
|
notes: f.rationale
|
||||||
|
}));
|
||||||
|
// Include in architecture document
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Generate architecture/ directory
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const template = Read('templates/architecture-doc.md');
|
||||||
|
|
||||||
|
// Create architecture directory
|
||||||
|
Bash(`mkdir -p "${workDir}/architecture"`);
|
||||||
|
|
||||||
|
const status = autoMode ? 'complete' : 'draft';
|
||||||
|
const timestamp = new Date().toISOString();
|
||||||
|
|
||||||
|
// Parse CLI outputs into structured ADRs
|
||||||
|
const adrs = parseADRs(geminiArchitectureOutput, codexReviewOutput); // [{id, slug, title, ...}]
|
||||||
|
|
||||||
|
// Step 6a: Write individual ADR-*.md files (one per decision)
|
||||||
|
adrs.forEach(adr => {
|
||||||
|
// Use ADR-NNN-{slug}.md template from templates/architecture-doc.md
|
||||||
|
// Fill: id, title, status, context, decision, alternatives, consequences, traces
|
||||||
|
Write(`${workDir}/architecture/ADR-${adr.id}-${adr.slug}.md`, adrContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step 6b: Write _index.md (overview + components + tech stack + links to ADRs)
|
||||||
|
// Use _index.md template from templates/architecture-doc.md
|
||||||
|
// Fill: system overview, component diagram, tech stack, ADR links table,
|
||||||
|
// data model, API design, security controls, infrastructure, codebase integration
|
||||||
|
Write(`${workDir}/architecture/_index.md`, indexContent);
|
||||||
|
|
||||||
|
// Update spec-config.json
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 4,
|
||||||
|
name: "architecture",
|
||||||
|
output_dir: "architecture/",
|
||||||
|
output_index: "architecture/_index.md",
|
||||||
|
file_count: adrs.length + 1,
|
||||||
|
completed_at: timestamp
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Directory**: `architecture/`
|
||||||
|
- `_index.md` — Overview, component diagram, tech stack, data model, security, links
|
||||||
|
- `ADR-NNN-{slug}.md` — Individual Architecture Decision Record (per ADR)
|
||||||
|
- **Format**: Markdown with YAML frontmatter, cross-linked to requirements via relative paths
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] Component diagram present in `_index.md` (Mermaid or ASCII)
|
||||||
|
- [ ] Tech stack specified (languages, frameworks, key libraries)
|
||||||
|
- [ ] >= 1 ADR file with alternatives considered
|
||||||
|
- [ ] Each ADR file lists >= 2 options
|
||||||
|
- [ ] `_index.md` ADR table links to all individual ADR files
|
||||||
|
- [ ] Integration points identified
|
||||||
|
- [ ] Data model described
|
||||||
|
- [ ] Codebase mapping present (if has_codebase)
|
||||||
|
- [ ] All files have valid YAML frontmatter
|
||||||
|
- [ ] ADR files link back to requirement files
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Proceed to [Phase 5: Epics & Stories](05-epics-stories.md) with the generated architecture.md.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Return Summary
|
||||||
|
|
||||||
|
When executed as a delegated agent, return the following JSON summary to the orchestrator:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"phase": 4,
|
||||||
|
"status": "complete",
|
||||||
|
"files_created": ["architecture/_index.md", "architecture/ADR-001-*.md", "..."],
|
||||||
|
"file_count": 0,
|
||||||
|
"codex_review_rating": 0,
|
||||||
|
"quality_notes": ["list of quality concerns or review feedback addressed"],
|
||||||
|
"key_decisions": ["architecture style choice", "key ADR decisions"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The orchestrator will:
|
||||||
|
1. Validate that `architecture/` directory exists with `_index.md` and ADR files
|
||||||
|
2. Read `spec-config.json` to confirm `phasesCompleted` was updated
|
||||||
|
3. Store the summary for downstream phase context
|
||||||
241
.codex/skills/spec-generator/phases/05-epics-stories.md
Normal file
241
.codex/skills/spec-generator/phases/05-epics-stories.md
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
# Phase 5: Epics & Stories
|
||||||
|
|
||||||
|
> **Execution Mode: Agent Delegated**
|
||||||
|
> This phase is executed by a `doc-generator` agent. The orchestrator (SKILL.md) passes session context via the Task tool. The agent reads this file for instructions, executes all steps, writes output files, and returns a JSON summary.
|
||||||
|
|
||||||
|
Decompose the specification into executable Epics and Stories with dependency mapping.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Group requirements into 3-7 logical Epics
|
||||||
|
- Tag MVP subset of Epics
|
||||||
|
- Generate 2-5 Stories per Epic in standard user story format
|
||||||
|
- Map cross-Epic dependencies (Mermaid diagram)
|
||||||
|
- Generate epics.md using template
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/requirements/_index.md`, `{workDir}/architecture/_index.md` (and individual files)
|
||||||
|
- Reference: `{workDir}/product-brief.md`
|
||||||
|
- Config: `{workDir}/spec-config.json`
|
||||||
|
- Template: `templates/epics-template.md` (directory structure: `_index.md` + `EPIC-*.md`)
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Phase 2-4 Context
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const productBrief = Read(`${workDir}/product-brief.md`);
|
||||||
|
const requirements = Read(`${workDir}/requirements.md`);
|
||||||
|
const architecture = Read(`${workDir}/architecture.md`);
|
||||||
|
|
||||||
|
let glossary = null;
|
||||||
|
try {
|
||||||
|
glossary = JSON.parse(Read(`${workDir}/glossary.json`));
|
||||||
|
} catch (e) { /* proceed without */ }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Epic Decomposition via Gemini CLI
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Decompose requirements into executable Epics and Stories for implementation planning.
|
||||||
|
Success: 3-7 Epics with prioritized Stories, dependency map, and MVP subset clearly defined.
|
||||||
|
|
||||||
|
PRODUCT BRIEF (summary):
|
||||||
|
${productBrief.slice(0, 2000)}
|
||||||
|
|
||||||
|
REQUIREMENTS:
|
||||||
|
${requirements.slice(0, 5000)}
|
||||||
|
|
||||||
|
ARCHITECTURE (summary):
|
||||||
|
${architecture.slice(0, 3000)}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Group requirements into 3-7 logical Epics:
|
||||||
|
- Each Epic: EPIC-NNN ID, title, description, priority (Must/Should/Could)
|
||||||
|
- Group by functional domain or user journey stage
|
||||||
|
- Tag MVP Epics (minimum set for initial release)
|
||||||
|
|
||||||
|
- For each Epic, generate 2-5 Stories:
|
||||||
|
- Each Story: STORY-{EPIC}-NNN ID, title
|
||||||
|
- User story format: As a [persona], I want [action] so that [benefit]
|
||||||
|
- 2-4 acceptance criteria per story (testable)
|
||||||
|
- Relative size estimate: S/M/L/XL
|
||||||
|
- Trace to source requirement(s): REQ-NNN
|
||||||
|
|
||||||
|
- Create dependency map:
|
||||||
|
- Cross-Epic dependencies (which Epics block others)
|
||||||
|
- Mermaid graph LR format
|
||||||
|
- Recommended execution order with rationale
|
||||||
|
|
||||||
|
- Define MVP:
|
||||||
|
- Which Epics are in MVP
|
||||||
|
- MVP definition of done (3-5 criteria)
|
||||||
|
- What is explicitly deferred post-MVP
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Structured output with: Epic list (ID, title, priority, MVP flag), Stories per Epic (ID, user story, AC, size, trace), dependency Mermaid diagram, execution order, MVP definition
|
||||||
|
CONSTRAINTS:
|
||||||
|
- Every Must-have requirement must appear in at least one Story
|
||||||
|
- Stories must be small enough to implement independently (no XL stories in MVP)
|
||||||
|
- Dependencies should be minimized across Epics
|
||||||
|
\${glossary ? \`- Maintain terminology consistency with glossary: \${glossary.terms.map(t => t.term).join(', ')}\` : ''}
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2.5: Codex Epics Review
|
||||||
|
|
||||||
|
After receiving Gemini decomposition results, validate epic/story quality via Codex CLI:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Critical review of epic/story decomposition - validate coverage, sizing, and dependency structure.
|
||||||
|
Success: Actionable feedback on epic quality with specific issues identified.
|
||||||
|
|
||||||
|
GENERATED EPICS AND STORIES:
|
||||||
|
${geminiEpicsOutput.slice(0, 5000)}
|
||||||
|
|
||||||
|
REQUIREMENTS (Must-Have):
|
||||||
|
${mustHaveRequirements.slice(0, 2000)}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Verify Must-Have requirement coverage: every Must requirement appears in at least one Story
|
||||||
|
- Check MVP story sizing: no XL stories in MVP epics (too large to implement independently)
|
||||||
|
- Validate dependency graph: no circular dependencies between Epics
|
||||||
|
- Assess acceptance criteria: every Story AC is specific and testable
|
||||||
|
- Verify traceability: Stories trace back to specific REQ-NNN IDs
|
||||||
|
- Check Epic granularity: 3-7 epics (not too few/many), 2-5 stories each
|
||||||
|
- Rate overall decomposition quality: 1-5 with justification
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Epic review with: coverage gaps, oversized stories, dependency issues, traceability gaps, quality rating
|
||||||
|
CONSTRAINTS: Focus on issues that would block execution planning. Be specific about which Story/Epic has problems.
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for Codex review result
|
||||||
|
// Integrate feedback into epics before writing files:
|
||||||
|
// - Add missing Stories for uncovered Must requirements
|
||||||
|
// - Split XL stories in MVP epics into smaller units
|
||||||
|
// - Fix dependency cycles identified by Codex
|
||||||
|
// - Improve vague acceptance criteria
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Interactive Validation (Optional)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!autoMode) {
|
||||||
|
// Present Epic overview table and dependency diagram
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: "Review the Epic breakdown. Any adjustments needed?",
|
||||||
|
header: "Epics",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Looks good", description: "Epic structure is appropriate" },
|
||||||
|
{ label: "Merge epics", description: "Some epics should be combined" },
|
||||||
|
{ label: "Split epic", description: "An epic is too large, needs splitting" },
|
||||||
|
{ label: "Adjust MVP", description: "Change which epics are in MVP" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
// Apply user adjustments
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Generate epics/ directory
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const template = Read('templates/epics-template.md');
|
||||||
|
|
||||||
|
// Create epics directory
|
||||||
|
Bash(`mkdir -p "${workDir}/epics"`);
|
||||||
|
|
||||||
|
const status = autoMode ? 'complete' : 'draft';
|
||||||
|
const timestamp = new Date().toISOString();
|
||||||
|
|
||||||
|
// Parse CLI output into structured Epics
|
||||||
|
const epicsList = parseEpics(cliOutput); // [{id, slug, title, priority, mvp, size, stories[], reqs[], adrs[], deps[]}]
|
||||||
|
|
||||||
|
// Step 4a: Write individual EPIC-*.md files (one per Epic, stories included)
|
||||||
|
epicsList.forEach(epic => {
|
||||||
|
// Use EPIC-NNN-{slug}.md template from templates/epics-template.md
|
||||||
|
// Fill: id, title, priority, mvp, size, description, requirements links,
|
||||||
|
// architecture links, dependency links, stories with user stories + AC
|
||||||
|
Write(`${workDir}/epics/EPIC-${epic.id}-${epic.slug}.md`, epicContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step 4b: Write _index.md (overview + dependency map + MVP scope + traceability)
|
||||||
|
// Use _index.md template from templates/epics-template.md
|
||||||
|
// Fill: epic overview table (with links), dependency Mermaid diagram,
|
||||||
|
// execution order, MVP scope, traceability matrix, estimation summary
|
||||||
|
Write(`${workDir}/epics/_index.md`, indexContent);
|
||||||
|
|
||||||
|
// Update spec-config.json
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 5,
|
||||||
|
name: "epics-stories",
|
||||||
|
output_dir: "epics/",
|
||||||
|
output_index: "epics/_index.md",
|
||||||
|
file_count: epicsList.length + 1,
|
||||||
|
completed_at: timestamp
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Directory**: `epics/`
|
||||||
|
- `_index.md` — Overview table, dependency map, MVP scope, traceability matrix, links
|
||||||
|
- `EPIC-NNN-{slug}.md` — Individual Epic with Stories (per Epic)
|
||||||
|
- **Format**: Markdown with YAML frontmatter, cross-linked to requirements and architecture via relative paths
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] 3-7 Epic files with EPIC-NNN IDs
|
||||||
|
- [ ] >= 1 Epic tagged as MVP in frontmatter
|
||||||
|
- [ ] 2-5 Stories per Epic file
|
||||||
|
- [ ] Stories use "As a...I want...So that..." format
|
||||||
|
- [ ] `_index.md` has cross-Epic dependency map (Mermaid)
|
||||||
|
- [ ] `_index.md` links to all individual Epic files
|
||||||
|
- [ ] Relative sizing (S/M/L/XL) per Story
|
||||||
|
- [ ] Epic files link to requirement files and ADR files
|
||||||
|
- [ ] All files have valid YAML frontmatter
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Proceed to [Phase 6: Readiness Check](06-readiness-check.md) to validate the complete specification package.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Return Summary
|
||||||
|
|
||||||
|
When executed as a delegated agent, return the following JSON summary to the orchestrator:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"phase": 5,
|
||||||
|
"status": "complete",
|
||||||
|
"files_created": ["epics/_index.md", "epics/EPIC-001-*.md", "..."],
|
||||||
|
"file_count": 0,
|
||||||
|
"codex_review_integrated": true,
|
||||||
|
"mvp_epic_count": 0,
|
||||||
|
"total_story_count": 0,
|
||||||
|
"quality_notes": ["list of quality concerns or Codex feedback items addressed"],
|
||||||
|
"key_decisions": ["MVP scope decisions", "dependency resolution choices"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The orchestrator will:
|
||||||
|
1. Validate that `epics/` directory exists with `_index.md` and EPIC files
|
||||||
|
2. Read `spec-config.json` to confirm `phasesCompleted` was updated
|
||||||
|
3. Store the summary for downstream phase context
|
||||||
172
.codex/skills/spec-generator/phases/06-5-auto-fix.md
Normal file
172
.codex/skills/spec-generator/phases/06-5-auto-fix.md
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
# Phase 6.5: Auto-Fix
|
||||||
|
|
||||||
|
> **Execution Mode: Agent Delegated**
|
||||||
|
> This phase is executed by a `doc-generator` agent when triggered by the orchestrator after Phase 6 identifies issues. The agent reads this file for instructions, applies fixes to affected documents, and returns a JSON summary.
|
||||||
|
|
||||||
|
Automatically repair specification issues identified in Phase 6 Readiness Check.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Parse readiness-report.md to extract Error and Warning items
|
||||||
|
- Group issues by originating Phase (2-5)
|
||||||
|
- Re-generate affected sections with error context injected into CLI prompts
|
||||||
|
- Re-run Phase 6 validation after fixes
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/readiness-report.md` (Phase 6 output)
|
||||||
|
- Config: `{workDir}/spec-config.json` (with iteration_count)
|
||||||
|
- All Phase 2-5 outputs
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Parse Readiness Report
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const readinessReport = Read(`${workDir}/readiness-report.md`);
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
|
||||||
|
// Load glossary for terminology consistency during fixes
|
||||||
|
let glossary = null;
|
||||||
|
try {
|
||||||
|
glossary = JSON.parse(Read(`${workDir}/glossary.json`));
|
||||||
|
} catch (e) { /* proceed without */ }
|
||||||
|
|
||||||
|
// Extract issues from readiness report
|
||||||
|
// Parse Error and Warning severity items
|
||||||
|
// Group by originating phase:
|
||||||
|
// Phase 2 issues: vision, problem statement, scope, personas
|
||||||
|
// Phase 3 issues: requirements, acceptance criteria, priority, traceability
|
||||||
|
// Phase 4 issues: architecture, ADRs, tech stack, data model, state machine
|
||||||
|
// Phase 5 issues: epics, stories, dependencies, MVP scope
|
||||||
|
|
||||||
|
const issuesByPhase = {
|
||||||
|
2: [], // product brief issues
|
||||||
|
3: [], // requirements issues
|
||||||
|
4: [], // architecture issues
|
||||||
|
5: [] // epics issues
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse structured issues from report
|
||||||
|
// Each issue: { severity: "Error"|"Warning", description: "...", location: "file:section" }
|
||||||
|
|
||||||
|
// Map phase numbers to output files
|
||||||
|
const phaseOutputFile = {
|
||||||
|
2: 'product-brief.md',
|
||||||
|
3: 'requirements/_index.md',
|
||||||
|
4: 'architecture/_index.md',
|
||||||
|
5: 'epics/_index.md'
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Fix Affected Phases (Sequential)
|
||||||
|
|
||||||
|
For each phase with issues (in order 2 -> 3 -> 4 -> 5):
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
for (const [phase, issues] of Object.entries(issuesByPhase)) {
|
||||||
|
if (issues.length === 0) continue;
|
||||||
|
|
||||||
|
const errorContext = issues.map(i => `[${i.severity}] ${i.description} (at ${i.location})`).join('\n');
|
||||||
|
|
||||||
|
// Read current phase output
|
||||||
|
const currentOutput = Read(`${workDir}/${phaseOutputFile[phase]}`);
|
||||||
|
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Fix specification issues identified in readiness check for Phase ${phase}.
|
||||||
|
Success: All listed issues resolved while maintaining consistency with other documents.
|
||||||
|
|
||||||
|
CURRENT DOCUMENT:
|
||||||
|
${currentOutput.slice(0, 5000)}
|
||||||
|
|
||||||
|
ISSUES TO FIX:
|
||||||
|
${errorContext}
|
||||||
|
|
||||||
|
${glossary ? `GLOSSARY (maintain consistency):
|
||||||
|
${JSON.stringify(glossary.terms, null, 2)}` : ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Address each listed issue specifically
|
||||||
|
- Maintain all existing content that is not flagged
|
||||||
|
- Ensure terminology consistency with glossary
|
||||||
|
- Preserve YAML frontmatter and cross-references
|
||||||
|
- Use RFC 2119 keywords for behavioral requirements
|
||||||
|
- Increment document version number
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Corrected document content addressing all listed issues
|
||||||
|
CONSTRAINTS: Minimal changes - only fix flagged issues, do not restructure unflagged sections
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for result, apply fixes to document
|
||||||
|
// Update document version in frontmatter
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Update State
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 6.5,
|
||||||
|
name: "auto-fix",
|
||||||
|
iteration: specConfig.iteration_count,
|
||||||
|
phases_fixed: Object.keys(issuesByPhase).filter(p => issuesByPhase[p].length > 0),
|
||||||
|
completed_at: new Date().toISOString()
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Re-run Phase 6 Validation
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Re-execute Phase 6: Readiness Check
|
||||||
|
// This creates a new readiness-report.md
|
||||||
|
// If still Fail and iteration_count < 2: loop back to Step 1
|
||||||
|
// If Pass or iteration_count >= 2: proceed to handoff
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **Updated**: Phase 2-5 documents (only affected ones)
|
||||||
|
- **Updated**: `spec-config.json` (iteration tracking)
|
||||||
|
- **Triggers**: Phase 6 re-validation
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] All Error-severity issues addressed
|
||||||
|
- [ ] Warning-severity issues attempted (best effort)
|
||||||
|
- [ ] Document versions incremented for modified files
|
||||||
|
- [ ] Terminology consistency maintained
|
||||||
|
- [ ] Cross-references still valid after fixes
|
||||||
|
- [ ] Iteration count not exceeded (max 2)
|
||||||
|
|
||||||
|
## Next Phase
|
||||||
|
|
||||||
|
Re-run [Phase 6: Readiness Check](06-readiness-check.md) to validate fixes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Return Summary
|
||||||
|
|
||||||
|
When executed as a delegated agent, return the following JSON summary to the orchestrator:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"phase": 6.5,
|
||||||
|
"status": "complete",
|
||||||
|
"files_modified": ["list of files that were updated"],
|
||||||
|
"issues_fixed": {
|
||||||
|
"errors": 0,
|
||||||
|
"warnings": 0
|
||||||
|
},
|
||||||
|
"quality_notes": ["list of fix decisions and remaining concerns"],
|
||||||
|
"phases_touched": [2, 3, 4, 5]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The orchestrator will:
|
||||||
|
1. Validate that listed files were actually modified (check version increment)
|
||||||
|
2. Update `spec-config.json` iteration tracking
|
||||||
|
3. Re-trigger Phase 6 validation
|
||||||
581
.codex/skills/spec-generator/phases/06-readiness-check.md
Normal file
581
.codex/skills/spec-generator/phases/06-readiness-check.md
Normal file
@@ -0,0 +1,581 @@
|
|||||||
|
# Phase 6: Readiness Check
|
||||||
|
|
||||||
|
Validate the complete specification package, generate quality report and executive summary, provide execution handoff options.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Cross-document validation: completeness, consistency, traceability, depth
|
||||||
|
- Generate quality scores per dimension
|
||||||
|
- Produce readiness-report.md with issue list and traceability matrix
|
||||||
|
- Produce spec-summary.md as one-page executive summary
|
||||||
|
- Update all document frontmatter to `status: complete`
|
||||||
|
- Present handoff options to execution workflows
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- All Phase 2-5 outputs: `product-brief.md`, `requirements/_index.md` (+ `REQ-*.md`, `NFR-*.md`), `architecture/_index.md` (+ `ADR-*.md`), `epics/_index.md` (+ `EPIC-*.md`)
|
||||||
|
- Config: `{workDir}/spec-config.json`
|
||||||
|
- Reference: `specs/quality-gates.md`
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load All Documents
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const productBrief = Read(`${workDir}/product-brief.md`);
|
||||||
|
const requirementsIndex = Read(`${workDir}/requirements/_index.md`);
|
||||||
|
const architectureIndex = Read(`${workDir}/architecture/_index.md`);
|
||||||
|
const epicsIndex = Read(`${workDir}/epics/_index.md`);
|
||||||
|
const qualityGates = Read('specs/quality-gates.md');
|
||||||
|
|
||||||
|
// Load individual files for deep validation
|
||||||
|
const reqFiles = Glob(`${workDir}/requirements/REQ-*.md`);
|
||||||
|
const nfrFiles = Glob(`${workDir}/requirements/NFR-*.md`);
|
||||||
|
const adrFiles = Glob(`${workDir}/architecture/ADR-*.md`);
|
||||||
|
const epicFiles = Glob(`${workDir}/epics/EPIC-*.md`);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Cross-Document Validation via Gemini CLI
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Validate specification package for completeness, consistency, traceability, and depth.
|
||||||
|
Success: Comprehensive quality report with scores, issues, and traceability matrix.
|
||||||
|
|
||||||
|
DOCUMENTS TO VALIDATE:
|
||||||
|
|
||||||
|
=== PRODUCT BRIEF ===
|
||||||
|
${productBrief.slice(0, 3000)}
|
||||||
|
|
||||||
|
=== REQUIREMENTS INDEX (${reqFiles.length} REQ + ${nfrFiles.length} NFR files) ===
|
||||||
|
${requirementsIndex.slice(0, 3000)}
|
||||||
|
|
||||||
|
=== ARCHITECTURE INDEX (${adrFiles.length} ADR files) ===
|
||||||
|
${architectureIndex.slice(0, 2500)}
|
||||||
|
|
||||||
|
=== EPICS INDEX (${epicFiles.length} EPIC files) ===
|
||||||
|
${epicsIndex.slice(0, 2500)}
|
||||||
|
|
||||||
|
QUALITY CRITERIA (from quality-gates.md):
|
||||||
|
${qualityGates.slice(0, 2000)}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
Perform 4-dimension validation:
|
||||||
|
|
||||||
|
1. COMPLETENESS (25%):
|
||||||
|
- All required sections present in each document?
|
||||||
|
- All template fields filled with substantive content?
|
||||||
|
- Score 0-100 with specific gaps listed
|
||||||
|
|
||||||
|
2. CONSISTENCY (25%):
|
||||||
|
- Terminology uniform across documents?
|
||||||
|
- Terminology glossary compliance: all core terms used consistently per glossary.json definitions?
|
||||||
|
- No synonym drift (e.g., "user" vs "client" vs "consumer" for same concept)?
|
||||||
|
- User personas consistent?
|
||||||
|
- Scope consistent (PRD does not exceed brief)?
|
||||||
|
- Scope containment: PRD requirements do not exceed product brief's defined scope?
|
||||||
|
- Non-Goals respected: no requirement or story contradicts explicit Non-Goals?
|
||||||
|
- Tech stack references match between architecture and epics?
|
||||||
|
- Score 0-100 with inconsistencies listed
|
||||||
|
|
||||||
|
3. TRACEABILITY (25%):
|
||||||
|
- Every goal has >= 1 requirement?
|
||||||
|
- Every Must requirement has architecture coverage?
|
||||||
|
- Every Must requirement appears in >= 1 story?
|
||||||
|
- ADR choices reflected in epics?
|
||||||
|
- Build traceability matrix: Goal -> Requirement -> Architecture -> Epic/Story
|
||||||
|
- Score 0-100 with orphan items listed
|
||||||
|
|
||||||
|
4. DEPTH (25%):
|
||||||
|
- Acceptance criteria specific and testable?
|
||||||
|
- Architecture decisions justified with alternatives?
|
||||||
|
- Stories estimable by dev team?
|
||||||
|
- Score 0-100 with vague areas listed
|
||||||
|
|
||||||
|
ALSO:
|
||||||
|
- List all issues found, classified as Error/Warning/Info
|
||||||
|
- Generate overall weighted score
|
||||||
|
- Determine gate: Pass (>=80) / Review (60-79) / Fail (<60)
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: JSON-compatible output with: dimension scores, overall score, gate, issues list (severity + description + location), traceability matrix
|
||||||
|
CONSTRAINTS: Be thorough but fair. Focus on actionable issues.
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2b: Codex Technical Depth Review
|
||||||
|
|
||||||
|
Launch Codex review in parallel with Gemini validation for deeper technical assessment:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Deep technical quality review of specification package - assess architectural rigor and implementation readiness.
|
||||||
|
Success: Technical quality assessment with specific actionable feedback on ADR quality, data model, security, and observability.
|
||||||
|
|
||||||
|
ARCHITECTURE INDEX:
|
||||||
|
${architectureIndex.slice(0, 3000)}
|
||||||
|
|
||||||
|
ADR FILES (summaries):
|
||||||
|
${adrFiles.map(f => Read(f).slice(0, 500)).join('\n---\n')}
|
||||||
|
|
||||||
|
REQUIREMENTS INDEX:
|
||||||
|
${requirementsIndex.slice(0, 2000)}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- ADR Alternative Quality: Each ADR has >= 2 genuine alternatives with substantive pros/cons (not strawman options)
|
||||||
|
- Data Model Completeness: All entities referenced in requirements have field-level definitions with types and constraints
|
||||||
|
- Security Coverage: Authentication, authorization, data protection, and input validation addressed for all external interfaces
|
||||||
|
- Observability Specification: Metrics, logging, and health checks defined for service/platform types
|
||||||
|
- Error Handling: Error classification and recovery strategies defined per component
|
||||||
|
- Configuration Model: All configurable parameters documented with types, defaults, and constraints
|
||||||
|
- Rate each dimension 1-5 with specific gaps identified
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Technical depth review with: per-dimension scores (1-5), specific gaps, improvement recommendations, overall technical readiness assessment
|
||||||
|
CONSTRAINTS: Focus on gaps that would cause implementation ambiguity. Ignore cosmetic issues.
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Codex result merged with Gemini result in Step 3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2c: Per-Requirement Verification
|
||||||
|
|
||||||
|
Iterate through all individual requirement files for fine-grained verification:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Load all requirement files
|
||||||
|
const reqFiles = Glob(`${workDir}/requirements/REQ-*.md`);
|
||||||
|
const nfrFiles = Glob(`${workDir}/requirements/NFR-*.md`);
|
||||||
|
const allReqFiles = [...reqFiles, ...nfrFiles];
|
||||||
|
|
||||||
|
// Load reference documents for cross-checking
|
||||||
|
const productBrief = Read(`${workDir}/product-brief.md`);
|
||||||
|
const epicFiles = Glob(`${workDir}/epics/EPIC-*.md`);
|
||||||
|
const adrFiles = Glob(`${workDir}/architecture/ADR-*.md`);
|
||||||
|
|
||||||
|
// Read all epic content for coverage check
|
||||||
|
const epicContents = epicFiles.map(f => ({ path: f, content: Read(f) }));
|
||||||
|
const adrContents = adrFiles.map(f => ({ path: f, content: Read(f) }));
|
||||||
|
|
||||||
|
// Per-requirement verification
|
||||||
|
const verificationResults = allReqFiles.map(reqFile => {
|
||||||
|
const content = Read(reqFile);
|
||||||
|
const reqId = extractReqId(content); // e.g., REQ-001 or NFR-PERF-001
|
||||||
|
const priority = extractPriority(content); // Must/Should/Could/Won't
|
||||||
|
|
||||||
|
// Check 1: AC exists and is testable
|
||||||
|
const hasAC = content.includes('- [ ]') || content.includes('Acceptance Criteria');
|
||||||
|
const acTestable = !content.match(/should be (fast|good|reliable|secure)/i); // No vague AC
|
||||||
|
|
||||||
|
// Check 2: Traces back to Brief goal
|
||||||
|
const tracesLinks = content.match(/product-brief\.md/);
|
||||||
|
|
||||||
|
// Check 3: Must requirements have Story coverage (search EPIC files)
|
||||||
|
let storyCoverage = priority !== 'Must' ? 'N/A' :
|
||||||
|
epicContents.some(e => e.content.includes(reqId)) ? 'Covered' : 'MISSING';
|
||||||
|
|
||||||
|
// Check 4: Must requirements have architecture coverage (search ADR files)
|
||||||
|
let archCoverage = priority !== 'Must' ? 'N/A' :
|
||||||
|
adrContents.some(a => a.content.includes(reqId)) ||
|
||||||
|
Read(`${workDir}/architecture/_index.md`).includes(reqId) ? 'Covered' : 'MISSING';
|
||||||
|
|
||||||
|
return {
|
||||||
|
req_id: reqId,
|
||||||
|
priority,
|
||||||
|
ac_exists: hasAC ? 'Yes' : 'MISSING',
|
||||||
|
ac_testable: acTestable ? 'Yes' : 'VAGUE',
|
||||||
|
brief_trace: tracesLinks ? 'Yes' : 'MISSING',
|
||||||
|
story_coverage: storyCoverage,
|
||||||
|
arch_coverage: archCoverage,
|
||||||
|
pass: hasAC && acTestable && tracesLinks &&
|
||||||
|
(priority !== 'Must' || (storyCoverage === 'Covered' && archCoverage === 'Covered'))
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Generate Per-Requirement Verification table for readiness-report.md
|
||||||
|
const verificationTable = `
|
||||||
|
## Per-Requirement Verification
|
||||||
|
|
||||||
|
| Req ID | Priority | AC Exists | AC Testable | Brief Trace | Story Coverage | Arch Coverage | Status |
|
||||||
|
|--------|----------|-----------|-------------|-------------|----------------|---------------|--------|
|
||||||
|
${verificationResults.map(r =>
|
||||||
|
`| ${r.req_id} | ${r.priority} | ${r.ac_exists} | ${r.ac_testable} | ${r.brief_trace} | ${r.story_coverage} | ${r.arch_coverage} | ${r.pass ? 'PASS' : 'FAIL'} |`
|
||||||
|
).join('\n')}
|
||||||
|
|
||||||
|
**Summary**: ${verificationResults.filter(r => r.pass).length}/${verificationResults.length} requirements pass all checks.
|
||||||
|
`;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Generate readiness-report.md
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const frontmatterReport = `---
|
||||||
|
session_id: ${specConfig.session_id}
|
||||||
|
phase: 6
|
||||||
|
document_type: readiness-report
|
||||||
|
status: complete
|
||||||
|
generated_at: ${new Date().toISOString()}
|
||||||
|
stepsCompleted: ["load-all", "cross-validation", "codex-technical-review", "per-req-verification", "scoring", "report-generation"]
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- product-brief.md
|
||||||
|
- requirements/_index.md
|
||||||
|
- architecture/_index.md
|
||||||
|
- epics/_index.md
|
||||||
|
---`;
|
||||||
|
|
||||||
|
// Report content from CLI validation output:
|
||||||
|
// - Quality Score Summary (4 dimensions + overall)
|
||||||
|
// - Gate Decision (Pass/Review/Fail)
|
||||||
|
// - Issue List (grouped by severity: Error, Warning, Info)
|
||||||
|
// - Traceability Matrix (Goal -> Req -> Arch -> Epic/Story)
|
||||||
|
// - Codex Technical Depth Review (per-dimension scores from Step 2b)
|
||||||
|
// - Per-Requirement Verification Table (from Step 2c)
|
||||||
|
// - Recommendations for improvement
|
||||||
|
|
||||||
|
Write(`${workDir}/readiness-report.md`, `${frontmatterReport}\n\n${reportContent}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Generate spec-summary.md
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const frontmatterSummary = `---
|
||||||
|
session_id: ${specConfig.session_id}
|
||||||
|
phase: 6
|
||||||
|
document_type: spec-summary
|
||||||
|
status: complete
|
||||||
|
generated_at: ${new Date().toISOString()}
|
||||||
|
stepsCompleted: ["synthesis"]
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- product-brief.md
|
||||||
|
- requirements/_index.md
|
||||||
|
- architecture/_index.md
|
||||||
|
- epics/_index.md
|
||||||
|
- readiness-report.md
|
||||||
|
---`;
|
||||||
|
|
||||||
|
// One-page executive summary:
|
||||||
|
// - Product Name & Vision (from product-brief.md)
|
||||||
|
// - Problem & Target Users (from product-brief.md)
|
||||||
|
// - Key Requirements count (Must/Should/Could from requirements.md)
|
||||||
|
// - Architecture Style & Tech Stack (from architecture.md)
|
||||||
|
// - Epic Overview (count, MVP scope from epics.md)
|
||||||
|
// - Quality Score (from readiness-report.md)
|
||||||
|
// - Recommended Next Step
|
||||||
|
// - File manifest with links
|
||||||
|
|
||||||
|
Write(`${workDir}/spec-summary.md`, `${frontmatterSummary}\n\n${summaryContent}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Update All Document Status
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Update frontmatter status to 'complete' in all documents (directories + single files)
|
||||||
|
// product-brief.md is a single file
|
||||||
|
const singleFiles = ['product-brief.md'];
|
||||||
|
singleFiles.forEach(doc => {
|
||||||
|
const content = Read(`${workDir}/${doc}`);
|
||||||
|
Write(`${workDir}/${doc}`, content.replace(/status: draft/, 'status: complete'));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update all files in directories (index + individual files)
|
||||||
|
const dirFiles = [
|
||||||
|
...Glob(`${workDir}/requirements/*.md`),
|
||||||
|
...Glob(`${workDir}/architecture/*.md`),
|
||||||
|
...Glob(`${workDir}/epics/*.md`)
|
||||||
|
];
|
||||||
|
dirFiles.forEach(filePath => {
|
||||||
|
const content = Read(filePath);
|
||||||
|
if (content.includes('status: draft')) {
|
||||||
|
Write(filePath, content.replace(/status: draft/, 'status: complete'));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update spec-config.json
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 6,
|
||||||
|
name: "readiness-check",
|
||||||
|
output_file: "readiness-report.md",
|
||||||
|
completed_at: new Date().toISOString()
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Handoff Options
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: "Specification package is complete. What would you like to do next?",
|
||||||
|
header: "Next Step",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
label: "Execute via lite-plan",
|
||||||
|
description: "Start implementing with /workflow-lite-plan, one Epic at a time"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Create roadmap",
|
||||||
|
description: "Generate execution roadmap with /workflow:req-plan-with-file"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Full planning",
|
||||||
|
description: "Detailed planning with /workflow-plan for the full scope"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Export Issues (Phase 7)",
|
||||||
|
description: "Create issues per Epic with spec links and wave assignment"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Iterate & improve",
|
||||||
|
description: "Re-run failed phases based on readiness report issues (max 2 iterations)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Based on user selection, execute the corresponding handoff:
|
||||||
|
|
||||||
|
if (selection === "Execute via lite-plan") {
|
||||||
|
// lite-plan accepts a text description directly
|
||||||
|
// Read first MVP Epic from individual EPIC-*.md files
|
||||||
|
const epicFiles = Glob(`${workDir}/epics/EPIC-*.md`);
|
||||||
|
const firstMvpFile = epicFiles.find(f => {
|
||||||
|
const content = Read(f);
|
||||||
|
return content.includes('mvp: true');
|
||||||
|
});
|
||||||
|
const epicContent = Read(firstMvpFile);
|
||||||
|
const title = extractTitle(epicContent); // First # heading
|
||||||
|
const description = extractSection(epicContent, "Description");
|
||||||
|
Skill(skill="workflow-lite-plan", args=`"${title}: ${description}"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection === "Full planning" || selection === "Create roadmap") {
|
||||||
|
// === Bridge: Build brainstorm_artifacts compatible structure ===
|
||||||
|
// Reads from directory-based outputs (individual files), maps to .brainstorming/ format
|
||||||
|
// for context-search-agent auto-discovery → action-planning-agent consumption.
|
||||||
|
|
||||||
|
// Step A: Read spec documents from directories
|
||||||
|
const specSummary = Read(`${workDir}/spec-summary.md`);
|
||||||
|
const productBrief = Read(`${workDir}/product-brief.md`);
|
||||||
|
const requirementsIndex = Read(`${workDir}/requirements/_index.md`);
|
||||||
|
const architectureIndex = Read(`${workDir}/architecture/_index.md`);
|
||||||
|
const epicsIndex = Read(`${workDir}/epics/_index.md`);
|
||||||
|
|
||||||
|
// Read individual EPIC files (already split — direct mapping to feature-specs)
|
||||||
|
const epicFiles = Glob(`${workDir}/epics/EPIC-*.md`);
|
||||||
|
|
||||||
|
// Step B: Build structured description from spec-summary
|
||||||
|
const structuredDesc = `GOAL: ${extractGoal(specSummary)}
|
||||||
|
SCOPE: ${extractScope(specSummary)}
|
||||||
|
CONTEXT: Generated from spec session ${specConfig.session_id}. Source: ${workDir}/`;
|
||||||
|
|
||||||
|
// Step C: Create WFS session (provides session directory + .brainstorming/)
|
||||||
|
Skill(skill="workflow:session:start", args=`--auto "${structuredDesc}"`)
|
||||||
|
// → Produces sessionId (WFS-xxx) and session directory at .workflow/active/{sessionId}/
|
||||||
|
|
||||||
|
// Step D: Create .brainstorming/ bridge files
|
||||||
|
const brainstormDir = `.workflow/active/${sessionId}/.brainstorming`;
|
||||||
|
Bash(`mkdir -p "${brainstormDir}/feature-specs"`);
|
||||||
|
|
||||||
|
// D.1: guidance-specification.md (highest priority — action-planning-agent reads first)
|
||||||
|
// Synthesized from spec-summary + product-brief + architecture/requirements indexes
|
||||||
|
Write(`${brainstormDir}/guidance-specification.md`, `
|
||||||
|
# ${specConfig.seed_analysis.problem_statement} - Confirmed Guidance Specification
|
||||||
|
|
||||||
|
**Source**: spec-generator session ${specConfig.session_id}
|
||||||
|
**Generated**: ${new Date().toISOString()}
|
||||||
|
**Spec Directory**: ${workDir}
|
||||||
|
|
||||||
|
## 1. Project Positioning & Goals
|
||||||
|
${extractSection(productBrief, "Vision")}
|
||||||
|
${extractSection(productBrief, "Goals")}
|
||||||
|
|
||||||
|
## 2. Requirements Summary
|
||||||
|
${extractSection(requirementsIndex, "Functional Requirements")}
|
||||||
|
|
||||||
|
## 3. Architecture Decisions
|
||||||
|
${extractSection(architectureIndex, "Architecture Decision Records")}
|
||||||
|
${extractSection(architectureIndex, "Technology Stack")}
|
||||||
|
|
||||||
|
## 4. Implementation Scope
|
||||||
|
${extractSection(epicsIndex, "Epic Overview")}
|
||||||
|
${extractSection(epicsIndex, "MVP Scope")}
|
||||||
|
|
||||||
|
## Feature Decomposition
|
||||||
|
${extractSection(epicsIndex, "Traceability Matrix")}
|
||||||
|
|
||||||
|
## Appendix: Source Documents
|
||||||
|
| Document | Path | Description |
|
||||||
|
|----------|------|-------------|
|
||||||
|
| Product Brief | ${workDir}/product-brief.md | Vision, goals, scope |
|
||||||
|
| Requirements | ${workDir}/requirements/ | _index.md + REQ-*.md + NFR-*.md |
|
||||||
|
| Architecture | ${workDir}/architecture/ | _index.md + ADR-*.md |
|
||||||
|
| Epics | ${workDir}/epics/ | _index.md + EPIC-*.md |
|
||||||
|
| Readiness Report | ${workDir}/readiness-report.md | Quality validation |
|
||||||
|
`);
|
||||||
|
|
||||||
|
// D.2: feature-index.json (each EPIC file mapped to a Feature)
|
||||||
|
// Path: feature-specs/feature-index.json (matches context-search-agent discovery)
|
||||||
|
// Directly read from individual EPIC-*.md files (no monolithic parsing needed)
|
||||||
|
const features = epicFiles.map(epicFile => {
|
||||||
|
const content = Read(epicFile);
|
||||||
|
const fm = parseFrontmatter(content); // Extract YAML frontmatter
|
||||||
|
const basename = path.basename(epicFile, '.md'); // EPIC-001-slug
|
||||||
|
const epicNum = fm.id.replace('EPIC-', ''); // 001
|
||||||
|
const slug = basename.replace(/^EPIC-\d+-/, ''); // slug
|
||||||
|
return {
|
||||||
|
id: `F-${epicNum}`,
|
||||||
|
slug: slug,
|
||||||
|
name: extractTitle(content),
|
||||||
|
description: extractSection(content, "Description"),
|
||||||
|
priority: fm.mvp ? "High" : "Medium",
|
||||||
|
spec_path: `${brainstormDir}/feature-specs/F-${epicNum}-${slug}.md`,
|
||||||
|
source_epic: fm.id,
|
||||||
|
source_file: epicFile
|
||||||
|
};
|
||||||
|
});
|
||||||
|
Write(`${brainstormDir}/feature-specs/feature-index.json`, JSON.stringify({
|
||||||
|
version: "1.0",
|
||||||
|
source: "spec-generator",
|
||||||
|
spec_session: specConfig.session_id,
|
||||||
|
features,
|
||||||
|
cross_cutting_specs: []
|
||||||
|
}, null, 2));
|
||||||
|
|
||||||
|
// D.3: Feature-spec files — directly adapt from individual EPIC-*.md files
|
||||||
|
// Since Epics are already individual documents, transform format directly
|
||||||
|
// Filename pattern: F-{num}-{slug}.md (matches context-search-agent glob F-*-*.md)
|
||||||
|
features.forEach(feature => {
|
||||||
|
const epicContent = Read(feature.source_file);
|
||||||
|
Write(feature.spec_path, `
|
||||||
|
# Feature Spec: ${feature.source_epic} - ${feature.name}
|
||||||
|
|
||||||
|
**Source**: ${feature.source_file}
|
||||||
|
**Priority**: ${feature.priority === "High" ? "MVP" : "Post-MVP"}
|
||||||
|
|
||||||
|
## Description
|
||||||
|
${extractSection(epicContent, "Description")}
|
||||||
|
|
||||||
|
## Stories
|
||||||
|
${extractSection(epicContent, "Stories")}
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
${extractSection(epicContent, "Requirements")}
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
${extractSection(epicContent, "Architecture")}
|
||||||
|
`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step E: Invoke downstream workflow
|
||||||
|
// context-search-agent will auto-discover .brainstorming/ files
|
||||||
|
// → context-package.json.brainstorm_artifacts populated
|
||||||
|
// → action-planning-agent loads guidance_specification (P1) + feature_index (P2)
|
||||||
|
if (selection === "Full planning") {
|
||||||
|
Skill(skill="workflow-plan", args=`"${structuredDesc}"`)
|
||||||
|
} else {
|
||||||
|
Skill(skill="workflow:req-plan-with-file", args=`"${extractGoal(specSummary)}"`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection === "Export Issues (Phase 7)") {
|
||||||
|
// Proceed to Phase 7: Issue Export
|
||||||
|
// Read phases/07-issue-export.md and execute
|
||||||
|
}
|
||||||
|
|
||||||
|
// If user selects "Other": Export only or return to specific phase
|
||||||
|
|
||||||
|
if (selection === "Iterate & improve") {
|
||||||
|
// Check iteration count
|
||||||
|
if (specConfig.iteration_count >= 2) {
|
||||||
|
// Max iterations reached, force handoff
|
||||||
|
// Present handoff options again without iterate
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update iteration tracking
|
||||||
|
specConfig.iteration_count = (specConfig.iteration_count || 0) + 1;
|
||||||
|
specConfig.iteration_history.push({
|
||||||
|
iteration: specConfig.iteration_count,
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
readiness_score: overallScore,
|
||||||
|
errors_found: errorCount,
|
||||||
|
phases_to_fix: affectedPhases
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
|
||||||
|
// Proceed to Phase 6.5: Auto-Fix
|
||||||
|
// Read phases/06-5-auto-fix.md and execute
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Helper Functions Reference (pseudocode)
|
||||||
|
|
||||||
|
The following helper functions are used in the handoff bridge. They operate on markdown content from individual spec files:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Extract title from a markdown document (first # heading)
|
||||||
|
function extractTitle(markdown) {
|
||||||
|
// Return the text after the first # heading (e.g., "# EPIC-001: Title" → "Title")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse YAML frontmatter from markdown (between --- markers)
|
||||||
|
function parseFrontmatter(markdown) {
|
||||||
|
// Return object with: id, priority, mvp, size, requirements, architecture, dependencies
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract GOAL/SCOPE from spec-summary frontmatter or ## sections
|
||||||
|
function extractGoal(specSummary) { /* Return the Vision/Goal line */ }
|
||||||
|
function extractScope(specSummary) { /* Return the Scope/MVP boundary */ }
|
||||||
|
|
||||||
|
// Extract a named ## section from a markdown document
|
||||||
|
function extractSection(markdown, sectionName) {
|
||||||
|
// Return content between ## {sectionName} and next ## heading
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **File**: `readiness-report.md` - Quality validation report
|
||||||
|
- **File**: `spec-summary.md` - One-page executive summary
|
||||||
|
- **Format**: Markdown with YAML frontmatter
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] All document directories validated (product-brief, requirements/, architecture/, epics/)
|
||||||
|
- [ ] All frontmatter parseable and valid (index + individual files)
|
||||||
|
- [ ] Cross-references checked (relative links between directories)
|
||||||
|
- [ ] Overall quality score calculated
|
||||||
|
- [ ] No unresolved Error-severity issues
|
||||||
|
- [ ] Traceability matrix generated
|
||||||
|
- [ ] spec-summary.md created
|
||||||
|
- [ ] All document statuses updated to 'complete' (all files in all directories)
|
||||||
|
- [ ] Handoff options presented
|
||||||
|
|
||||||
|
## Completion
|
||||||
|
|
||||||
|
This is the final phase. The specification package is ready for execution handoff.
|
||||||
|
|
||||||
|
### Output Files Manifest
|
||||||
|
|
||||||
|
| Path | Phase | Description |
|
||||||
|
|------|-------|-------------|
|
||||||
|
| `spec-config.json` | 1 | Session configuration and state |
|
||||||
|
| `discovery-context.json` | 1 | Codebase exploration (optional) |
|
||||||
|
| `product-brief.md` | 2 | Product brief with multi-perspective synthesis |
|
||||||
|
| `requirements/` | 3 | Directory: `_index.md` + `REQ-*.md` + `NFR-*.md` |
|
||||||
|
| `architecture/` | 4 | Directory: `_index.md` + `ADR-*.md` |
|
||||||
|
| `epics/` | 5 | Directory: `_index.md` + `EPIC-*.md` |
|
||||||
|
| `readiness-report.md` | 6 | Quality validation report |
|
||||||
|
| `spec-summary.md` | 6 | One-page executive summary |
|
||||||
329
.codex/skills/spec-generator/phases/07-issue-export.md
Normal file
329
.codex/skills/spec-generator/phases/07-issue-export.md
Normal file
@@ -0,0 +1,329 @@
|
|||||||
|
# Phase 7: Issue Export
|
||||||
|
|
||||||
|
Map specification Epics to issues, create them via `ccw issue create`, and generate an export report with spec document links.
|
||||||
|
|
||||||
|
> **Execution Mode: Inline**
|
||||||
|
> This phase runs in the main orchestrator context (not delegated to agent) for direct access to `ccw issue create` CLI and interactive handoff options.
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
|
||||||
|
- Read all EPIC-*.md files from Phase 5 output
|
||||||
|
- Assign waves: MVP epics → wave-1, non-MVP → wave-2
|
||||||
|
- Create one issue per Epic via `ccw issue create`
|
||||||
|
- Map Epic dependencies to issue dependencies
|
||||||
|
- Generate issue-export-report.md with mapping table and spec links
|
||||||
|
- Present handoff options for execution
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
- Dependency: `{workDir}/epics/_index.md` (and individual `EPIC-*.md` files)
|
||||||
|
- Reference: `{workDir}/readiness-report.md`, `{workDir}/spec-config.json`
|
||||||
|
- Reference: `{workDir}/product-brief.md`, `{workDir}/requirements/_index.md`, `{workDir}/architecture/_index.md`
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Epic Files
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const specConfig = JSON.parse(Read(`${workDir}/spec-config.json`));
|
||||||
|
const epicFiles = Glob(`${workDir}/epics/EPIC-*.md`);
|
||||||
|
const epicsIndex = Read(`${workDir}/epics/_index.md`);
|
||||||
|
|
||||||
|
// Parse each Epic file
|
||||||
|
const epics = epicFiles.map(epicFile => {
|
||||||
|
const content = Read(epicFile);
|
||||||
|
const fm = parseFrontmatter(content);
|
||||||
|
const title = extractTitle(content);
|
||||||
|
const description = extractSection(content, "Description");
|
||||||
|
const stories = extractSection(content, "Stories");
|
||||||
|
const reqRefs = extractSection(content, "Requirements");
|
||||||
|
const adrRefs = extractSection(content, "Architecture");
|
||||||
|
const deps = fm.dependencies || [];
|
||||||
|
|
||||||
|
return {
|
||||||
|
file: epicFile,
|
||||||
|
id: fm.id, // e.g., EPIC-001
|
||||||
|
title,
|
||||||
|
description,
|
||||||
|
stories,
|
||||||
|
reqRefs,
|
||||||
|
adrRefs,
|
||||||
|
priority: fm.priority,
|
||||||
|
mvp: fm.mvp || false,
|
||||||
|
dependencies: deps, // other EPIC IDs this depends on
|
||||||
|
size: fm.size
|
||||||
|
};
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Wave Assignment
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const epicWaves = epics.map(epic => ({
|
||||||
|
...epic,
|
||||||
|
wave: epic.mvp ? 1 : 2
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Log wave assignment
|
||||||
|
const wave1 = epicWaves.filter(e => e.wave === 1);
|
||||||
|
const wave2 = epicWaves.filter(e => e.wave === 2);
|
||||||
|
// wave-1: MVP epics (must-have, core functionality)
|
||||||
|
// wave-2: Post-MVP epics (should-have, enhancements)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Issue Creation Loop
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const createdIssues = [];
|
||||||
|
const epicToIssue = {}; // EPIC-ID -> Issue ID mapping
|
||||||
|
|
||||||
|
for (const epic of epicWaves) {
|
||||||
|
// Build issue JSON matching roadmap-with-file schema
|
||||||
|
const issueData = {
|
||||||
|
title: `[${specConfig.session_id}] ${epic.title}`,
|
||||||
|
status: "pending",
|
||||||
|
priority: epic.wave === 1 ? 2 : 3, // wave-1 = higher priority
|
||||||
|
context: `## ${epic.title}
|
||||||
|
|
||||||
|
${epic.description}
|
||||||
|
|
||||||
|
## Stories
|
||||||
|
${epic.stories}
|
||||||
|
|
||||||
|
## Spec References
|
||||||
|
- Epic: ${epic.file}
|
||||||
|
- Requirements: ${epic.reqRefs}
|
||||||
|
- Architecture: ${epic.adrRefs}
|
||||||
|
- Product Brief: ${workDir}/product-brief.md
|
||||||
|
- Full Spec: ${workDir}/`,
|
||||||
|
source: "text",
|
||||||
|
tags: [
|
||||||
|
"spec-generated",
|
||||||
|
`spec:${specConfig.session_id}`,
|
||||||
|
`wave-${epic.wave}`,
|
||||||
|
epic.mvp ? "mvp" : "post-mvp",
|
||||||
|
`epic:${epic.id}`
|
||||||
|
],
|
||||||
|
extended_context: {
|
||||||
|
notes: {
|
||||||
|
session: specConfig.session_id,
|
||||||
|
spec_dir: workDir,
|
||||||
|
source_epic: epic.id,
|
||||||
|
wave: epic.wave,
|
||||||
|
depends_on_issues: [], // Filled in Step 4
|
||||||
|
spec_documents: {
|
||||||
|
product_brief: `${workDir}/product-brief.md`,
|
||||||
|
requirements: `${workDir}/requirements/_index.md`,
|
||||||
|
architecture: `${workDir}/architecture/_index.md`,
|
||||||
|
epic: epic.file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
lifecycle_requirements: {
|
||||||
|
test_strategy: "acceptance",
|
||||||
|
regression_scope: "affected",
|
||||||
|
acceptance_type: "manual",
|
||||||
|
commit_strategy: "per-epic"
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create issue via ccw issue create (pipe JSON to avoid shell escaping)
|
||||||
|
const result = Bash(`echo '${JSON.stringify(issueData)}' | ccw issue create`);
|
||||||
|
|
||||||
|
// Parse returned issue ID
|
||||||
|
const issueId = JSON.parse(result).id; // e.g., ISS-20260308-001
|
||||||
|
epicToIssue[epic.id] = issueId;
|
||||||
|
|
||||||
|
createdIssues.push({
|
||||||
|
epic_id: epic.id,
|
||||||
|
epic_title: epic.title,
|
||||||
|
issue_id: issueId,
|
||||||
|
wave: epic.wave,
|
||||||
|
priority: issueData.priority,
|
||||||
|
mvp: epic.mvp
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Epic Dependency → Issue Dependency Mapping
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Map EPIC dependencies to Issue dependencies
|
||||||
|
for (const epic of epicWaves) {
|
||||||
|
if (epic.dependencies.length === 0) continue;
|
||||||
|
|
||||||
|
const issueId = epicToIssue[epic.id];
|
||||||
|
const depIssueIds = epic.dependencies
|
||||||
|
.map(depEpicId => epicToIssue[depEpicId])
|
||||||
|
.filter(Boolean);
|
||||||
|
|
||||||
|
if (depIssueIds.length > 0) {
|
||||||
|
// Update issue's extended_context.notes.depends_on_issues
|
||||||
|
// This is informational — actual dependency enforcement is in execution phase
|
||||||
|
// Note: ccw issue create already created the issue; dependency info is in the context
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Generate issue-export-report.md
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const timestamp = new Date().toISOString();
|
||||||
|
|
||||||
|
const reportContent = `---
|
||||||
|
session_id: ${specConfig.session_id}
|
||||||
|
phase: 7
|
||||||
|
document_type: issue-export-report
|
||||||
|
status: complete
|
||||||
|
generated_at: ${timestamp}
|
||||||
|
stepsCompleted: ["load-epics", "wave-assignment", "issue-creation", "dependency-mapping", "report-generation"]
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- epics/_index.md
|
||||||
|
- readiness-report.md
|
||||||
|
---
|
||||||
|
|
||||||
|
# Issue Export Report
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
- **Session**: ${specConfig.session_id}
|
||||||
|
- **Issues Created**: ${createdIssues.length}
|
||||||
|
- **Wave 1 (MVP)**: ${wave1.length} issues
|
||||||
|
- **Wave 2 (Post-MVP)**: ${wave2.length} issues
|
||||||
|
- **Export Date**: ${timestamp}
|
||||||
|
|
||||||
|
## Issue Mapping
|
||||||
|
|
||||||
|
| Epic ID | Epic Title | Issue ID | Wave | Priority | MVP |
|
||||||
|
|---------|-----------|----------|------|----------|-----|
|
||||||
|
${createdIssues.map(i =>
|
||||||
|
`| ${i.epic_id} | ${i.epic_title} | ${i.issue_id} | ${i.wave} | ${i.priority} | ${i.mvp ? 'Yes' : 'No'} |`
|
||||||
|
).join('\n')}
|
||||||
|
|
||||||
|
## Spec Document Links
|
||||||
|
|
||||||
|
| Document | Path | Description |
|
||||||
|
|----------|------|-------------|
|
||||||
|
| Product Brief | ${workDir}/product-brief.md | Vision, goals, scope |
|
||||||
|
| Requirements | ${workDir}/requirements/_index.md | Functional + non-functional requirements |
|
||||||
|
| Architecture | ${workDir}/architecture/_index.md | Components, ADRs, tech stack |
|
||||||
|
| Epics | ${workDir}/epics/_index.md | Epic/Story breakdown |
|
||||||
|
| Readiness Report | ${workDir}/readiness-report.md | Quality validation |
|
||||||
|
| Spec Summary | ${workDir}/spec-summary.md | Executive summary |
|
||||||
|
|
||||||
|
## Dependency Map
|
||||||
|
|
||||||
|
| Issue ID | Depends On |
|
||||||
|
|----------|-----------|
|
||||||
|
${createdIssues.map(i => {
|
||||||
|
const epic = epicWaves.find(e => e.id === i.epic_id);
|
||||||
|
const deps = (epic.dependencies || []).map(d => epicToIssue[d]).filter(Boolean);
|
||||||
|
return `| ${i.issue_id} | ${deps.length > 0 ? deps.join(', ') : 'None'} |`;
|
||||||
|
}).join('\n')}
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
1. **team-planex**: Execute all issues via coordinated team workflow
|
||||||
|
2. **Wave 1 only**: Execute MVP issues first (${wave1.length} issues)
|
||||||
|
3. **View issues**: Browse created issues via \`ccw issue list --tag spec:${specConfig.session_id}\`
|
||||||
|
4. **Manual review**: Review individual issues before execution
|
||||||
|
`;
|
||||||
|
|
||||||
|
Write(`${workDir}/issue-export-report.md`, reportContent);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Update spec-config.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
specConfig.issue_ids = createdIssues.map(i => i.issue_id);
|
||||||
|
specConfig.issues_created = createdIssues.length;
|
||||||
|
specConfig.phasesCompleted.push({
|
||||||
|
phase: 7,
|
||||||
|
name: "issue-export",
|
||||||
|
output_file: "issue-export-report.md",
|
||||||
|
issues_created: createdIssues.length,
|
||||||
|
wave_1_count: wave1.length,
|
||||||
|
wave_2_count: wave2.length,
|
||||||
|
completed_at: timestamp
|
||||||
|
});
|
||||||
|
Write(`${workDir}/spec-config.json`, JSON.stringify(specConfig, null, 2));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 7: Handoff Options
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: `${createdIssues.length} issues created from ${epicWaves.length} Epics. What would you like to do next?`,
|
||||||
|
header: "Next Step",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
label: "Execute via team-planex",
|
||||||
|
description: `Execute all ${createdIssues.length} issues with coordinated team workflow`
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Wave 1 only",
|
||||||
|
description: `Execute ${wave1.length} MVP issues first`
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "View issues",
|
||||||
|
description: "Browse created issues before deciding"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "Done",
|
||||||
|
description: "Export complete, handle manually"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Based on user selection:
|
||||||
|
if (selection === "Execute via team-planex") {
|
||||||
|
const issueIds = createdIssues.map(i => i.issue_id).join(',');
|
||||||
|
Skill({ skill: "team-planex", args: `--issues ${issueIds}` });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection === "Wave 1 only") {
|
||||||
|
const wave1Ids = createdIssues.filter(i => i.wave === 1).map(i => i.issue_id).join(',');
|
||||||
|
Skill({ skill: "team-planex", args: `--issues ${wave1Ids}` });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection === "View issues") {
|
||||||
|
Bash(`ccw issue list --tag spec:${specConfig.session_id}`);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **File**: `issue-export-report.md` — Issue mapping table + spec links + next steps
|
||||||
|
- **Updated**: `.workflow/issues/issues.jsonl` — New issue entries appended
|
||||||
|
- **Updated**: `spec-config.json` — Phase 7 completion + issue IDs
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
- [ ] All MVP Epics have corresponding issues created
|
||||||
|
- [ ] All non-MVP Epics have corresponding issues created
|
||||||
|
- [ ] Issue tags include `spec-generated` and `spec:{session_id}`
|
||||||
|
- [ ] Issue `extended_context.notes.spec_documents` paths are correct
|
||||||
|
- [ ] Wave assignment matches MVP status (MVP → wave-1, non-MVP → wave-2)
|
||||||
|
- [ ] Epic dependencies mapped to issue dependency references
|
||||||
|
- [ ] `issue-export-report.md` generated with mapping table
|
||||||
|
- [ ] `spec-config.json` updated with `issue_ids` and `issues_created`
|
||||||
|
- [ ] Handoff options presented
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error | Blocking? | Action |
|
||||||
|
|-------|-----------|--------|
|
||||||
|
| `ccw issue create` fails for one Epic | No | Log error, continue with remaining Epics, report partial creation |
|
||||||
|
| No EPIC files found | Yes | Error and return to Phase 5 |
|
||||||
|
| All issue creations fail | Yes | Error with CLI diagnostic, suggest manual creation |
|
||||||
|
| Dependency EPIC not found in mapping | No | Skip dependency link, log warning |
|
||||||
|
|
||||||
|
## Completion
|
||||||
|
|
||||||
|
Phase 7 is the final phase. The specification package has been fully converted to executable issues ready for team-planex or manual execution.
|
||||||
295
.codex/skills/spec-generator/specs/document-standards.md
Normal file
295
.codex/skills/spec-generator/specs/document-standards.md
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
# Document Standards
|
||||||
|
|
||||||
|
Defines format conventions, YAML frontmatter schema, naming rules, and content structure for all spec-generator outputs.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
| Phase | Usage | Section |
|
||||||
|
|-------|-------|---------|
|
||||||
|
| All Phases | Frontmatter format | YAML Frontmatter Schema |
|
||||||
|
| All Phases | File naming | Naming Conventions |
|
||||||
|
| Phase 2-5 | Document structure | Content Structure |
|
||||||
|
| Phase 6 | Validation reference | All sections |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## YAML Frontmatter Schema
|
||||||
|
|
||||||
|
Every generated document MUST begin with YAML frontmatter:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
session_id: SPEC-{slug}-{YYYY-MM-DD}
|
||||||
|
phase: {1-6}
|
||||||
|
document_type: {product-brief|requirements|architecture|epics|readiness-report|spec-summary|issue-export-report}
|
||||||
|
status: draft|review|complete
|
||||||
|
generated_at: {ISO8601 timestamp}
|
||||||
|
stepsCompleted: []
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- {list of input documents used}
|
||||||
|
---
|
||||||
|
```
|
||||||
|
|
||||||
|
### Field Definitions
|
||||||
|
|
||||||
|
| Field | Type | Required | Description |
|
||||||
|
|-------|------|----------|-------------|
|
||||||
|
| `session_id` | string | Yes | Session identifier matching spec-config.json |
|
||||||
|
| `phase` | number | Yes | Phase number that generated this document (1-6) |
|
||||||
|
| `document_type` | string | Yes | One of: product-brief, requirements, architecture, epics, readiness-report, spec-summary, issue-export-report |
|
||||||
|
| `status` | enum | Yes | draft (initial), review (user reviewed), complete (finalized) |
|
||||||
|
| `generated_at` | string | Yes | ISO8601 timestamp of generation |
|
||||||
|
| `stepsCompleted` | array | Yes | List of step IDs completed during generation |
|
||||||
|
| `version` | number | Yes | Document version, incremented on re-generation |
|
||||||
|
| `dependencies` | array | No | List of input files this document depends on |
|
||||||
|
|
||||||
|
### Status Transitions
|
||||||
|
|
||||||
|
```
|
||||||
|
draft -> review -> complete
|
||||||
|
| ^
|
||||||
|
+-------------------+ (direct promotion in auto mode)
|
||||||
|
```
|
||||||
|
|
||||||
|
- **draft**: Initial generation, not yet user-reviewed
|
||||||
|
- **review**: User has reviewed and provided feedback
|
||||||
|
- **complete**: Finalized, ready for downstream consumption
|
||||||
|
|
||||||
|
In auto mode (`-y`), documents are promoted directly from `draft` to `complete`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
### Session ID Format
|
||||||
|
|
||||||
|
```
|
||||||
|
SPEC-{slug}-{YYYY-MM-DD}
|
||||||
|
```
|
||||||
|
|
||||||
|
- **slug**: Lowercase, alphanumeric + Chinese characters, hyphens as separators, max 40 chars
|
||||||
|
- **date**: UTC+8 date in YYYY-MM-DD format
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `SPEC-task-management-system-2026-02-11`
|
||||||
|
- `SPEC-user-auth-oauth-2026-02-11`
|
||||||
|
|
||||||
|
### Output Files
|
||||||
|
|
||||||
|
| File | Phase | Description |
|
||||||
|
|------|-------|-------------|
|
||||||
|
| `spec-config.json` | 1 | Session configuration and state |
|
||||||
|
| `discovery-context.json` | 1 | Codebase exploration results (optional) |
|
||||||
|
| `refined-requirements.json` | 1.5 | Confirmed requirements after discussion |
|
||||||
|
| `glossary.json` | 2 | Terminology glossary for cross-document consistency |
|
||||||
|
| `product-brief.md` | 2 | Product brief document |
|
||||||
|
| `requirements.md` | 3 | PRD document |
|
||||||
|
| `architecture.md` | 4 | Architecture decisions document |
|
||||||
|
| `epics.md` | 5 | Epic/Story breakdown document |
|
||||||
|
| `readiness-report.md` | 6 | Quality validation report |
|
||||||
|
| `spec-summary.md` | 6 | One-page executive summary |
|
||||||
|
| `issue-export-report.md` | 7 | Issue export report with Epic→Issue mapping |
|
||||||
|
|
||||||
|
### Output Directory
|
||||||
|
|
||||||
|
```
|
||||||
|
.workflow/.spec/{session-id}/
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Content Structure
|
||||||
|
|
||||||
|
### Heading Hierarchy
|
||||||
|
|
||||||
|
- `#` (H1): Document title only (one per document)
|
||||||
|
- `##` (H2): Major sections
|
||||||
|
- `###` (H3): Subsections
|
||||||
|
- `####` (H4): Detail items (use sparingly)
|
||||||
|
|
||||||
|
Maximum depth: 4 levels. Prefer flat structures.
|
||||||
|
|
||||||
|
### Section Ordering
|
||||||
|
|
||||||
|
Every document follows this general pattern:
|
||||||
|
|
||||||
|
1. **YAML Frontmatter** (mandatory)
|
||||||
|
2. **Title** (H1)
|
||||||
|
3. **Executive Summary** (2-3 sentences)
|
||||||
|
4. **Core Content Sections** (H2, document-specific)
|
||||||
|
5. **Open Questions / Risks** (if applicable)
|
||||||
|
6. **References / Traceability** (links to upstream/downstream docs)
|
||||||
|
|
||||||
|
### Formatting Rules
|
||||||
|
|
||||||
|
| Element | Format | Example |
|
||||||
|
|---------|--------|---------|
|
||||||
|
| Requirements | `REQ-{NNN}` prefix | REQ-001: User login |
|
||||||
|
| Acceptance criteria | Checkbox list | `- [ ] User can log in with email` |
|
||||||
|
| Architecture decisions | `ADR-{NNN}` prefix | ADR-001: Use PostgreSQL |
|
||||||
|
| Epics | `EPIC-{NNN}` prefix | EPIC-001: Authentication |
|
||||||
|
| Stories | `STORY-{EPIC}-{NNN}` prefix | STORY-001-001: Login form |
|
||||||
|
| Priority tags | MoSCoW labels | `[Must]`, `[Should]`, `[Could]`, `[Won't]` |
|
||||||
|
| Mermaid diagrams | Fenced code blocks | ````mermaid ... ``` `` |
|
||||||
|
| Code examples | Language-tagged blocks | ````typescript ... ``` `` |
|
||||||
|
|
||||||
|
### Cross-Reference Format
|
||||||
|
|
||||||
|
Use relative references between documents:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
See [Product Brief](product-brief.md#section-name) for details.
|
||||||
|
Derived from [REQ-001](requirements.md#req-001).
|
||||||
|
```
|
||||||
|
|
||||||
|
### Language
|
||||||
|
|
||||||
|
- Document body: Follow user's input language (Chinese or English)
|
||||||
|
- Technical identifiers: Always English (REQ-001, ADR-001, EPIC-001)
|
||||||
|
- YAML frontmatter keys: Always English
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## spec-config.json Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "string (required)",
|
||||||
|
"seed_input": "string (required) - original user input",
|
||||||
|
"input_type": "text|file (required)",
|
||||||
|
"timestamp": "ISO8601 (required)",
|
||||||
|
"mode": "interactive|auto (required)",
|
||||||
|
"complexity": "simple|moderate|complex (required)",
|
||||||
|
"depth": "light|standard|comprehensive (required)",
|
||||||
|
"focus_areas": ["string array"],
|
||||||
|
"seed_analysis": {
|
||||||
|
"problem_statement": "string",
|
||||||
|
"target_users": ["string array"],
|
||||||
|
"domain": "string",
|
||||||
|
"constraints": ["string array"],
|
||||||
|
"dimensions": ["string array - 3-5 exploration dimensions"]
|
||||||
|
},
|
||||||
|
"has_codebase": "boolean",
|
||||||
|
"spec_type": "service|api|library|platform (required) - type of specification",
|
||||||
|
"iteration_count": "number (required, default 0) - number of auto-fix iterations completed",
|
||||||
|
"iteration_history": [
|
||||||
|
{
|
||||||
|
"iteration": "number",
|
||||||
|
"timestamp": "ISO8601",
|
||||||
|
"readiness_score": "number (0-100)",
|
||||||
|
"errors_found": "number",
|
||||||
|
"phases_fixed": ["number array - phase numbers that were re-generated"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"refined_requirements_file": "string (optional) - path to refined-requirements.json",
|
||||||
|
"phasesCompleted": [
|
||||||
|
{
|
||||||
|
"phase": "number (1-6)",
|
||||||
|
"name": "string (phase name)",
|
||||||
|
"output_file": "string (primary output file)",
|
||||||
|
"completed_at": "ISO8601"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"issue_ids": ["string array (optional) - IDs of issues created in Phase 7"],
|
||||||
|
"issues_created": "number (optional, default 0) - count of issues created in Phase 7"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## refined-requirements.json Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "string (required) - matches spec-config.json",
|
||||||
|
"phase": "1.5",
|
||||||
|
"generated_at": "ISO8601 (required)",
|
||||||
|
"source": "interactive-discussion|auto-expansion (required)",
|
||||||
|
"discussion_rounds": "number (required) - 0 for auto mode",
|
||||||
|
"clarified_problem_statement": "string (required) - refined problem statement",
|
||||||
|
"confirmed_target_users": [
|
||||||
|
{
|
||||||
|
"name": "string",
|
||||||
|
"needs": ["string array"],
|
||||||
|
"pain_points": ["string array"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"confirmed_domain": "string",
|
||||||
|
"confirmed_features": [
|
||||||
|
{
|
||||||
|
"name": "string",
|
||||||
|
"description": "string",
|
||||||
|
"acceptance_criteria": ["string array"],
|
||||||
|
"edge_cases": ["string array"],
|
||||||
|
"priority": "must|should|could|unset"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"non_functional_requirements": [
|
||||||
|
{
|
||||||
|
"type": "Performance|Security|Usability|Scalability|Reliability|...",
|
||||||
|
"details": "string",
|
||||||
|
"measurable_criteria": "string (optional)"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"boundary_conditions": {
|
||||||
|
"in_scope": ["string array"],
|
||||||
|
"out_of_scope": ["string array"],
|
||||||
|
"constraints": ["string array"]
|
||||||
|
},
|
||||||
|
"integration_points": ["string array"],
|
||||||
|
"key_assumptions": ["string array"],
|
||||||
|
"discussion_log": [
|
||||||
|
{
|
||||||
|
"round": "number",
|
||||||
|
"agent_prompt": "string",
|
||||||
|
"user_response": "string",
|
||||||
|
"timestamp": "ISO8601"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## glossary.json Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "string (required) - matches spec-config.json",
|
||||||
|
"generated_at": "ISO8601 (required)",
|
||||||
|
"version": "number (required, default 1) - incremented on updates",
|
||||||
|
"terms": [
|
||||||
|
{
|
||||||
|
"term": "string (required) - the canonical term",
|
||||||
|
"definition": "string (required) - concise definition",
|
||||||
|
"aliases": ["string array - acceptable alternative names"],
|
||||||
|
"first_defined_in": "string (required) - source document path",
|
||||||
|
"category": "core|technical|business (required)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Glossary Usage Rules
|
||||||
|
|
||||||
|
- Terms MUST be defined before first use in any document
|
||||||
|
- All documents MUST use the canonical term from glossary; aliases are for reference only
|
||||||
|
- Glossary is generated in Phase 2 and injected into all subsequent phase prompts
|
||||||
|
- Phase 6 validates glossary compliance across all documents
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Validation Checklist
|
||||||
|
|
||||||
|
- [ ] Every document starts with valid YAML frontmatter
|
||||||
|
- [ ] `session_id` matches across all documents in a session
|
||||||
|
- [ ] `status` field reflects current document state
|
||||||
|
- [ ] All cross-references resolve to valid targets
|
||||||
|
- [ ] Heading hierarchy is correct (no skipped levels)
|
||||||
|
- [ ] Technical identifiers use correct prefixes
|
||||||
|
- [ ] Output files are in the correct directory
|
||||||
|
- [ ] `glossary.json` created with >= 5 terms
|
||||||
|
- [ ] `spec_type` field set in spec-config.json
|
||||||
|
- [ ] All documents use glossary terms consistently
|
||||||
|
- [ ] Non-Goals section present in product brief (if applicable)
|
||||||
29
.codex/skills/spec-generator/specs/glossary-template.json
Normal file
29
.codex/skills/spec-generator/specs/glossary-template.json
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{
|
||||||
|
"$schema": "glossary-v1",
|
||||||
|
"description": "Template for terminology glossary used across spec-generator documents",
|
||||||
|
"session_id": "",
|
||||||
|
"generated_at": "",
|
||||||
|
"version": 1,
|
||||||
|
"terms": [
|
||||||
|
{
|
||||||
|
"term": "",
|
||||||
|
"definition": "",
|
||||||
|
"aliases": [],
|
||||||
|
"first_defined_in": "product-brief.md",
|
||||||
|
"category": "core"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"_usage_notes": {
|
||||||
|
"category_values": {
|
||||||
|
"core": "Domain-specific terms central to the product (e.g., 'Workspace', 'Session')",
|
||||||
|
"technical": "Technical terms specific to the architecture (e.g., 'gRPC', 'event bus')",
|
||||||
|
"business": "Business/process terms (e.g., 'Sprint', 'SLA', 'stakeholder')"
|
||||||
|
},
|
||||||
|
"rules": [
|
||||||
|
"Terms MUST be defined before first use in any document",
|
||||||
|
"All documents MUST use the canonical 'term' field consistently",
|
||||||
|
"Aliases are for reference only - prefer canonical term in all documents",
|
||||||
|
"Phase 6 validates glossary compliance across all documents"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user