Refactor Terminal Dashboard: Remove Sessions Panel, Update Toolbar and Page Layout

- Removed the Sessions panel from the DashboardToolbar component, making the Sessions sidebar always visible.
- Updated the TerminalDashboardPage layout to include a fixed session sidebar alongside the terminal grid.
- Adjusted related imports and state management for session counts.
- Added new document standards for YAML frontmatter schema, naming conventions, and content structure.
- Introduced quality gates for spec-generator outputs, detailing scoring dimensions and per-phase criteria.
- Created templates for architecture documents, epics & stories, product briefs, and requirements PRD.
This commit is contained in:
catlog22
2026-02-14 22:43:42 +08:00
parent 75558dc411
commit b7bd433263
13 changed files with 2058 additions and 61 deletions

View File

@@ -65,7 +65,12 @@ const sessionFolder = sessionMatch ? sessionMatch[1].trim() : '.workflow/.spec-t
// Parse topic from task description
const topicLines = task.description.split('\n').filter(l => !l.startsWith('Session:') && !l.startsWith('输出:') && l.trim())
const topic = topicLines[0] || task.subject.replace('RESEARCH-001: ', '')
const rawTopic = topicLines[0] || task.subject.replace('RESEARCH-001: ', '')
// 支持文件引用输入(与 spec-generator Phase 1 一致)
const topic = (rawTopic.startsWith('@') || rawTopic.endsWith('.md') || rawTopic.endsWith('.txt'))
? Read(rawTopic.replace(/^@/, ''))
: rawTopic
// Use Gemini CLI for seed analysis
Bash({
@@ -122,6 +127,9 @@ const specConfig = {
topic: topic,
status: "research_complete",
complexity: seedAnalysis.complexity_assessment || "moderate",
depth: task.description.match(/讨论深度:\s*(.+)/)?.[1] || "standard",
focus_areas: seedAnalysis.exploration_dimensions || [],
mode: "interactive", // team 模式始终交互
phases_completed: ["discovery"],
created_at: new Date().toISOString(),
session_folder: sessionFolder,

View File

@@ -321,9 +321,184 @@ AskUserQuestion({
]
}]
})
// 新需求 → 回到 Phase 1复用 team新建任务链
// 交付执行 → 提示可用的执行 workflow
// 关闭 → shutdown 给每个 teammate → TeamDelete()
// === 新需求 → 回到 Phase 1复用 team新建任务链===
// === 交付执行 → Handoff 逻辑 ===
if (userChoice === '交付执行') {
AskUserQuestion({
questions: [{
question: "选择交付方式:",
header: "Handoff",
multiSelect: false,
options: [
{ label: "lite-plan", description: "逐 Epic 轻量执行" },
{ label: "full-plan", description: "完整规划(创建 WFS session + .brainstorming/ 桥接)" },
{ label: "req-plan", description: "需求级路线图规划" },
{ label: "create-issues", description: "每个 Epic 创建 issue" }
]
}]
})
// 读取 spec 文档
const specConfig = JSON.parse(Read(`${specSessionFolder}/spec-config.json`))
const specSummary = Read(`${specSessionFolder}/spec-summary.md`)
const productBrief = Read(`${specSessionFolder}/product-brief.md`)
const requirementsIndex = Read(`${specSessionFolder}/requirements/_index.md`)
const architectureIndex = Read(`${specSessionFolder}/architecture/_index.md`)
const epicsIndex = Read(`${specSessionFolder}/epics/_index.md`)
const epicFiles = Glob(`${specSessionFolder}/epics/EPIC-*.md`)
if (handoffChoice === 'lite-plan') {
// 读取首个 MVP Epic → 调用 lite-plan
const firstMvpFile = epicFiles.find(f => {
const content = Read(f)
return content.includes('mvp: true')
})
const epicContent = Read(firstMvpFile)
const title = epicContent.match(/^#\s+(.+)/m)?.[1] || ''
const description = epicContent.match(/## Description\n([\s\S]*?)(?=\n## )/)?.[1]?.trim() || ''
Skill({ skill: "workflow:lite-plan", args: `"${title}: ${description}"` })
}
if (handoffChoice === 'full-plan' || handoffChoice === 'req-plan') {
// === 桥接: 构建 .brainstorming/ 兼容结构 ===
// 从 spec-generator Phase 6 Step 6 适配
// Step A: 构建结构化描述
const structuredDesc = `GOAL: ${specConfig.seed_analysis?.problem_statement || specConfig.topic}
SCOPE: ${specConfig.complexity} complexity
CONTEXT: Generated from spec team session ${specConfig.session_id}. Source: ${specSessionFolder}/`
// Step B: 创建 WFS session
Skill({ skill: "workflow:session:start", args: `--auto "${structuredDesc}"` })
// → 产出 sessionId (WFS-xxx) 和 session 目录
// Step C: 创建 .brainstorming/ 桥接文件
const brainstormDir = `.workflow/active/${sessionId}/.brainstorming`
Bash(`mkdir -p "${brainstormDir}/feature-specs"`)
// C.1: guidance-specification.mdaction-planning-agent 最高优先读取)
Write(`${brainstormDir}/guidance-specification.md`, `
# ${specConfig.seed_analysis?.problem_statement || specConfig.topic} - Confirmed Guidance Specification
**Source**: spec-team session ${specConfig.session_id}
**Generated**: ${new Date().toISOString()}
**Spec Directory**: ${specSessionFolder}
## 1. Project Positioning & Goals
${extractSection(productBrief, "Vision")}
${extractSection(productBrief, "Goals")}
## 2. Requirements Summary
${extractSection(requirementsIndex, "Functional Requirements")}
## 3. Architecture Decisions
${extractSection(architectureIndex, "Architecture Decision Records")}
${extractSection(architectureIndex, "Technology Stack")}
## 4. Implementation Scope
${extractSection(epicsIndex, "Epic Overview")}
${extractSection(epicsIndex, "MVP Scope")}
## Feature Decomposition
${extractSection(epicsIndex, "Traceability Matrix")}
## Appendix: Source Documents
| Document | Path | Description |
|----------|------|-------------|
| Product Brief | ${specSessionFolder}/product-brief.md | Vision, goals, scope |
| Requirements | ${specSessionFolder}/requirements/ | _index.md + REQ-*.md + NFR-*.md |
| Architecture | ${specSessionFolder}/architecture/ | _index.md + ADR-*.md |
| Epics | ${specSessionFolder}/epics/ | _index.md + EPIC-*.md |
| Readiness Report | ${specSessionFolder}/readiness-report.md | Quality validation |
`)
// C.2: feature-index.jsonEPIC → Feature 映射)
const features = epicFiles.map(epicFile => {
const content = Read(epicFile)
const fmMatch = content.match(/^---\n([\s\S]*?)\n---/)
const fm = fmMatch ? parseYAML(fmMatch[1]) : {}
const basename = epicFile.replace(/.*[/\\]/, '').replace('.md', '')
const epicNum = (fm.id || '').replace('EPIC-', '')
const slug = basename.replace(/^EPIC-\d+-/, '')
return {
id: `F-${epicNum}`, slug, name: content.match(/^#\s+(.+)/m)?.[1] || '',
priority: fm.mvp ? "High" : "Medium",
spec_path: `${brainstormDir}/feature-specs/F-${epicNum}-${slug}.md`,
source_epic: fm.id, source_file: epicFile
}
})
Write(`${brainstormDir}/feature-specs/feature-index.json`, JSON.stringify({
version: "1.0", source: "spec-team",
spec_session: specConfig.session_id, features, cross_cutting_specs: []
}, null, 2))
// C.3: Feature-spec 文件EPIC → F-*.md 转换)
features.forEach(feature => {
const epicContent = Read(feature.source_file)
Write(feature.spec_path, `
# Feature Spec: ${feature.source_epic} - ${feature.name}
**Source**: ${feature.source_file}
**Priority**: ${feature.priority === "High" ? "MVP" : "Post-MVP"}
## Description
${extractSection(epicContent, "Description")}
## Stories
${extractSection(epicContent, "Stories")}
## Requirements
${extractSection(epicContent, "Requirements")}
## Architecture
${extractSection(epicContent, "Architecture")}
`)
})
// Step D: 调用下游 workflow
if (handoffChoice === 'full-plan') {
Skill({ skill: "workflow:plan", args: `"${structuredDesc}"` })
} else {
Skill({ skill: "workflow:req-plan-with-file", args: `"${specConfig.seed_analysis?.problem_statement || specConfig.topic}"` })
}
}
if (handoffChoice === 'create-issues') {
// 逐 EPIC 文件创建 issue
epicFiles.forEach(epicFile => {
const content = Read(epicFile)
const title = content.match(/^#\s+(.+)/m)?.[1] || ''
const description = content.match(/## Description\n([\s\S]*?)(?=\n## )/)?.[1]?.trim() || ''
Skill({ skill: "issue:new", args: `"${title}: ${description}"` })
})
}
}
// === 关闭 → shutdown 给每个 teammate → TeamDelete() ===
```
#### Helper Functions Reference (pseudocode)
```javascript
// Extract a named ## section from a markdown document
function extractSection(markdown, sectionName) {
// Return content between ## {sectionName} and next ## heading
const regex = new RegExp(`## ${sectionName}\\n([\\s\\S]*?)(?=\\n## |$)`)
return markdown.match(regex)?.[1]?.trim() || ''
}
// Parse YAML frontmatter string into object
function parseYAML(yamlStr) {
// Simple key-value parsing from YAML frontmatter
const result = {}
yamlStr.split('\n').forEach(line => {
const match = line.match(/^(\w+):\s*(.+)/)
if (match) result[match[1]] = match[2].replace(/^["']|["']$/g, '')
})
return result
}
```
## Session File Structure

View File

@@ -101,6 +101,10 @@ if (reviewMode === 'spec') {
const sessionMatch = task.description.match(/Session:\s*(.+)/)
const sessionFolder = sessionMatch ? sessionMatch[1].trim() : ''
// 加载质量门禁标准(引用 spec-generator 共享资源)
let qualityGates = null
try { qualityGates = Read('../specs/quality-gates.md') } catch {}
// Load all spec documents
const documents = {
config: null, discoveryContext: null, productBrief: null,
@@ -227,6 +231,7 @@ if (reviewMode === 'spec') {
// Completeness (25%): all sections present with content
function scoreCompleteness(docs) {
let score = 0
const issues = []
const checks = [
{ name: 'spec-config.json', present: !!docs.config, weight: 5 },
{ name: 'discovery-context.json', present: !!docs.discoveryContext, weight: 10 },
@@ -238,8 +243,54 @@ if (reviewMode === 'spec') {
{ name: 'epics/_index.md', present: !!docs.epicsIndex, weight: 10 },
{ name: 'EPIC-* files', present: docs.epics.length > 0, weight: 5 }
]
checks.forEach(c => { if (c.present) score += c.weight })
return { score, issues: checks.filter(c => !c.present).map(c => `Missing: ${c.name}`) }
checks.forEach(c => { if (c.present) score += c.weight; else issues.push(`Missing: ${c.name}`) })
// 增强: section 内容检查(不仅检查文件是否存在,还检查关键 section 是否有实质内容)
if (docs.productBrief) {
const briefSections = ['## Vision', '## Problem Statement', '## Target Users', '## Goals', '## Scope']
const missingSections = briefSections.filter(s => !docs.productBrief.includes(s))
if (missingSections.length > 0) {
score -= missingSections.length * 3
issues.push(`Product Brief missing sections: ${missingSections.join(', ')}`)
}
}
if (docs.requirementsIndex) {
const reqSections = ['## Functional Requirements', '## Non-Functional Requirements', '## MoSCoW Summary']
const missingReqSections = reqSections.filter(s => !docs.requirementsIndex.includes(s))
if (missingReqSections.length > 0) {
score -= missingReqSections.length * 3
issues.push(`Requirements index missing sections: ${missingReqSections.join(', ')}`)
}
}
if (docs.architectureIndex) {
const archSections = ['## Architecture Decision Records', '## Technology Stack']
const missingArchSections = archSections.filter(s => !docs.architectureIndex.includes(s))
if (missingArchSections.length > 0) {
score -= missingArchSections.length * 3
issues.push(`Architecture index missing sections: ${missingArchSections.join(', ')}`)
}
if (!docs.architectureIndex.includes('```mermaid')) {
score -= 5
issues.push('Architecture index missing Mermaid component diagram')
}
}
if (docs.epicsIndex) {
const epicsSections = ['## Epic Overview', '## MVP Scope']
const missingEpicsSections = epicsSections.filter(s => !docs.epicsIndex.includes(s))
if (missingEpicsSections.length > 0) {
score -= missingEpicsSections.length * 3
issues.push(`Epics index missing sections: ${missingEpicsSections.join(', ')}`)
}
if (!docs.epicsIndex.includes('```mermaid')) {
score -= 5
issues.push('Epics index missing Mermaid dependency diagram')
}
}
return { score: Math.max(0, score), issues }
}
// Consistency (25%): terminology, format, references
@@ -391,6 +442,31 @@ version: 1
## Quality Gate: ${qualityGate}
## Per-Phase Quality Gates
${qualityGates ? `_(Applied from ../specs/quality-gates.md)_
### Phase 2 (Product Brief)
- Vision statement: ${docs.productBrief?.includes('## Vision') ? 'PASS' : 'MISSING'}
- Problem statement specificity: ${docs.productBrief?.match(/## Problem/)?.length ? 'PASS' : 'MISSING'}
- Target users >= 1: ${docs.productBrief?.includes('## Target Users') ? 'PASS' : 'MISSING'}
- Measurable goals >= 2: ${docs.productBrief?.includes('## Goals') ? 'PASS' : 'MISSING'}
### Phase 3 (Requirements)
- Functional requirements >= 3: ${docs.requirements.length >= 3 ? 'PASS' : 'FAIL (' + docs.requirements.length + ')'}
- Acceptance criteria present: ${docs.requirements.some(r => /acceptance|criteria/i.test(r)) ? 'PASS' : 'MISSING'}
- MoSCoW priority tags: ${docs.requirementsIndex?.includes('Must') ? 'PASS' : 'MISSING'}
### Phase 4 (Architecture)
- Component diagram: ${docs.architectureIndex?.includes('mermaid') ? 'PASS' : 'MISSING'}
- ADR with alternatives: ${docs.adrs.some(a => /alternative|option/i.test(a)) ? 'PASS' : 'MISSING'}
- Tech stack specified: ${docs.architectureIndex?.includes('Technology') ? 'PASS' : 'MISSING'}
### Phase 5 (Epics)
- MVP subset tagged: ${docs.epics.some(e => /mvp:\s*true/i.test(e)) ? 'PASS' : 'MISSING'}
- Dependency map: ${docs.epicsIndex?.includes('mermaid') ? 'PASS' : 'MISSING'}
- Story sizing: ${docs.epics.some(e => /\b[SMLX]{1,2}\b|Small|Medium|Large/.test(e)) ? 'PASS' : 'MISSING'}
` : '_(quality-gates.md not loaded)_'}
## Issues Found
${allSpecIssues.map(i => '- ' + i).join('\n') || 'None'}

View File

@@ -106,17 +106,564 @@ if (docType === 'epics') {
### Phase 3: Document Generation (type-specific)
Route to specific generation logic based on document type:
**前置步骤(所有类型共用)**:
**DRAFT-001: Product Brief** — Multi-perspective analysis using 3 parallel CLI analyses (product/technical/user), then synthesize into product-brief.md with YAML frontmatter.
```javascript
// 1. 加载格式规范
const docStandards = Read('../specs/document-standards.md')
**DRAFT-002: Requirements/PRD** — Expand requirements from Product Brief via CLI. Generate REQ-NNN functional requirements + NFR-{type}-NNN non-functional requirements with MoSCoW prioritization. Output to requirements/ directory.
// 2. 加载对应 template 文件(路径见 SKILL.md Shared Spec Resources
const templateMap = {
'product-brief': '../templates/product-brief.md',
'requirements': '../templates/requirements-prd.md',
'architecture': '../templates/architecture-doc.md',
'epics': '../templates/epics-template.md'
}
const template = Read(templateMap[docType])
**DRAFT-003: Architecture** — Design system architecture from requirements via CLI. Generate architecture/_index.md + ADR-*.md files with tech stack, component diagrams (Mermaid), and data model.
// 3. 构建 sharedContext
const seedAnalysis = specConfig?.seed_analysis || discoveryContext?.seed_analysis || {}
const sharedContext = `
SEED: ${specConfig?.topic || ''}
PROBLEM: ${seedAnalysis.problem_statement || ''}
TARGET USERS: ${(seedAnalysis.target_users || []).join(', ')}
DOMAIN: ${seedAnalysis.domain || ''}
CONSTRAINTS: ${(seedAnalysis.constraints || []).join(', ')}
FOCUS AREAS: ${(specConfig?.focus_areas || []).join(', ')}
${priorDocs.discoveryContext ? `
CODEBASE CONTEXT:
- Existing patterns: ${JSON.parse(priorDocs.discoveryContext).existing_patterns?.slice(0,5).join(', ') || 'none'}
- Tech stack: ${JSON.stringify(JSON.parse(priorDocs.discoveryContext).tech_stack || {})}
` : ''}`
**DRAFT-004: Epics & Stories** — Decompose requirements into EPIC-* with STORY-* user stories, cross-Epic dependency map, MVP scope definition, and execution order. Output to epics/ directory.
// 4. 路由到具体类型
```
Each uses CLI tools (gemini/codex/claude) for multi-perspective analysis, with discussion feedback integration from the preceding DISCUSS round.
#### DRAFT-001: Product Brief
3 路并行 CLI 分析(产品视角/技术视角/用户视角),综合后生成 product-brief.md。
```javascript
if (docType === 'product-brief') {
// === 并行 CLI 分析 ===
// 产品视角 (Gemini)
Bash({
command: `ccw cli -p "PURPOSE: Product analysis for specification - identify market fit, user value, and success criteria.
Success: Clear vision, measurable goals, competitive positioning.
${sharedContext}
TASK:
- Define product vision (1-3 sentences, aspirational)
- Analyze market/competitive landscape
- Define 3-5 measurable success metrics
- Identify scope boundaries (in-scope vs out-of-scope)
- Assess user value proposition
- List assumptions that need validation
MODE: analysis
EXPECTED: Structured product analysis with: vision, goals with metrics, scope, competitive positioning, assumptions
CONSTRAINTS: Focus on 'what' and 'why', not 'how'
" --tool gemini --mode analysis`,
run_in_background: true
})
// 技术视角 (Codex)
Bash({
command: `ccw cli -p "PURPOSE: Technical feasibility analysis for specification - assess implementation viability and constraints.
Success: Clear technical constraints, integration complexity, technology recommendations.
${sharedContext}
TASK:
- Assess technical feasibility of the core concept
- Identify technical constraints and blockers
- Evaluate integration complexity with existing systems
- Recommend technology approach (high-level)
- Identify technical risks and dependencies
- Estimate complexity: simple/moderate/complex
MODE: analysis
EXPECTED: Technical analysis with: feasibility assessment, constraints, integration complexity, tech recommendations, risks
CONSTRAINTS: Focus on feasibility and constraints, not detailed architecture
" --tool codex --mode analysis`,
run_in_background: true
})
// 用户视角 (Claude)
Bash({
command: `ccw cli -p "PURPOSE: User experience analysis for specification - understand user journeys, pain points, and UX considerations.
Success: Clear user personas, journey maps, UX requirements.
${sharedContext}
TASK:
- Elaborate user personas with goals and frustrations
- Map primary user journey (happy path)
- Identify key pain points in current experience
- Define UX success criteria
- List accessibility and usability considerations
- Suggest interaction patterns
MODE: analysis
EXPECTED: User analysis with: personas, journey map, pain points, UX criteria, interaction recommendations
CONSTRAINTS: Focus on user needs and experience, not implementation
" --tool claude --mode analysis`,
run_in_background: true
})
// STOP: Wait for all 3 CLI results
// === 综合三视角 ===
const synthesis = {
convergent_themes: [], // 三视角一致的主题
conflicts: [], // 视角冲突点
product_insights: [], // 产品视角独特洞察
technical_insights: [], // 技术视角独特洞察
user_insights: [] // 用户视角独特洞察
}
// === 整合讨论反馈 ===
if (discussionFeedback) {
// 从 discuss-001-scope.md 提取共识和调整建议
// 将讨论结论融入 synthesis
}
// === 按 template 生成文档 ===
const frontmatter = `---
session_id: ${specConfig?.session_id || 'unknown'}
phase: 2
document_type: product-brief
status: draft
generated_at: ${new Date().toISOString()}
version: 1
dependencies:
- spec-config.json
- discovery-context.json
---`
// 填充 template 中所有 section: Vision, Problem Statement, Target Users, Goals, Scope
// 应用 document-standards.md 格式规范
Write(`${sessionFolder}/product-brief.md`, `${frontmatter}\n\n${filledContent}`)
outputPath = 'product-brief.md'
}
```
#### DRAFT-002: Requirements/PRD
通过 Gemini CLI 扩展需求,生成 REQ-NNN + NFR-{type}-NNN 文件。
```javascript
if (docType === 'requirements') {
// === 需求扩展 CLI ===
Bash({
command: `ccw cli -p "PURPOSE: Generate detailed functional and non-functional requirements from product brief.
Success: Complete PRD with testable acceptance criteria for every requirement.
PRODUCT BRIEF CONTEXT:
${priorDocs.productBrief?.slice(0, 3000) || ''}
${sharedContext}
TASK:
- For each goal in the product brief, generate 3-7 functional requirements
- Each requirement must have:
- Unique ID: REQ-NNN (zero-padded)
- Clear title
- Detailed description
- User story: As a [persona], I want [action] so that [benefit]
- 2-4 specific, testable acceptance criteria
- Generate non-functional requirements:
- Performance (response times, throughput)
- Security (authentication, authorization, data protection)
- Scalability (user load, data volume)
- Usability (accessibility, learnability)
- Assign MoSCoW priority: Must/Should/Could/Won't
- Output structure per requirement: ID, title, description, user_story, acceptance_criteria[], priority, traces
MODE: analysis
EXPECTED: Structured requirements with: ID, title, description, user story, acceptance criteria, priority, traceability to goals
CONSTRAINTS: Every requirement must be specific enough to estimate and test. No vague requirements.
" --tool gemini --mode analysis`,
run_in_background: true
})
// Wait for CLI result
// === 整合讨论反馈 ===
if (discussionFeedback) {
// 从 discuss-002-brief.md 提取需求调整建议
// 合并新增/修改/删除需求
}
// === 生成 requirements/ 目录 ===
Bash(`mkdir -p "${sessionFolder}/requirements"`)
const timestamp = new Date().toISOString()
// Parse CLI output → funcReqs[], nfReqs[]
const funcReqs = parseFunctionalRequirements(cliOutput)
const nfReqs = parseNonFunctionalRequirements(cliOutput)
// 写入独立 REQ-*.md 文件(每个功能需求一个文件)
funcReqs.forEach(req => {
const reqFrontmatter = `---
id: REQ-${req.id}
title: "${req.title}"
priority: ${req.priority}
status: draft
traces:
- product-brief.md
---`
const reqContent = `${reqFrontmatter}
# REQ-${req.id}: ${req.title}
## Description
${req.description}
## User Story
${req.user_story}
## Acceptance Criteria
${req.acceptance_criteria.map((ac, i) => `${i+1}. ${ac}`).join('\n')}
`
Write(`${sessionFolder}/requirements/REQ-${req.id}-${req.slug}.md`, reqContent)
})
// 写入独立 NFR-*.md 文件
nfReqs.forEach(nfr => {
const nfrFrontmatter = `---
id: NFR-${nfr.type}-${nfr.id}
type: ${nfr.type}
title: "${nfr.title}"
status: draft
traces:
- product-brief.md
---`
const nfrContent = `${nfrFrontmatter}
# NFR-${nfr.type}-${nfr.id}: ${nfr.title}
## Requirement
${nfr.requirement}
## Metric & Target
${nfr.metric} — Target: ${nfr.target}
`
Write(`${sessionFolder}/requirements/NFR-${nfr.type}-${nfr.id}-${nfr.slug}.md`, nfrContent)
})
// 写入 _index.md汇总 + 链接)
const indexFrontmatter = `---
session_id: ${specConfig?.session_id || 'unknown'}
phase: 3
document_type: requirements-index
status: draft
generated_at: ${timestamp}
version: 1
dependencies:
- product-brief.md
---`
const indexContent = `${indexFrontmatter}
# Requirements (PRD)
## Summary
Total: ${funcReqs.length} functional + ${nfReqs.length} non-functional requirements
## Functional Requirements
| ID | Title | Priority | Status |
|----|-------|----------|--------|
${funcReqs.map(r => `| [REQ-${r.id}](REQ-${r.id}-${r.slug}.md) | ${r.title} | ${r.priority} | draft |`).join('\n')}
## Non-Functional Requirements
| ID | Type | Title |
|----|------|-------|
${nfReqs.map(n => `| [NFR-${n.type}-${n.id}](NFR-${n.type}-${n.id}-${n.slug}.md) | ${n.type} | ${n.title} |`).join('\n')}
## MoSCoW Summary
- **Must**: ${funcReqs.filter(r => r.priority === 'Must').length}
- **Should**: ${funcReqs.filter(r => r.priority === 'Should').length}
- **Could**: ${funcReqs.filter(r => r.priority === 'Could').length}
- **Won't**: ${funcReqs.filter(r => r.priority === "Won't").length}
`
Write(`${sessionFolder}/requirements/_index.md`, indexContent)
outputPath = 'requirements/_index.md'
}
```
#### DRAFT-003: Architecture
两阶段 CLIGemini 架构设计 + Codex 架构挑战/审查。
```javascript
if (docType === 'architecture') {
// === 阶段1: 架构设计 (Gemini) ===
Bash({
command: `ccw cli -p "PURPOSE: Generate technical architecture for the specified requirements.
Success: Complete component architecture, tech stack, and ADRs with justified decisions.
PRODUCT BRIEF (summary):
${priorDocs.productBrief?.slice(0, 3000) || ''}
REQUIREMENTS:
${priorDocs.requirementsIndex?.slice(0, 5000) || ''}
${sharedContext}
TASK:
- Define system architecture style (monolith, microservices, serverless, etc.) with justification
- Identify core components and their responsibilities
- Create component interaction diagram (Mermaid graph TD format)
- Specify technology stack: languages, frameworks, databases, infrastructure
- Generate 2-4 Architecture Decision Records (ADRs):
- Each ADR: context, decision, 2-3 alternatives with pros/cons, consequences
- Focus on: data storage, API design, authentication, key technical choices
- Define data model: key entities and relationships (Mermaid erDiagram format)
- Identify security architecture: auth, authorization, data protection
- List API endpoints (high-level)
MODE: analysis
EXPECTED: Complete architecture with: style justification, component diagram, tech stack table, ADRs, data model, security controls, API overview
CONSTRAINTS: Architecture must support all Must-have requirements. Prefer proven technologies.
" --tool gemini --mode analysis`,
run_in_background: true
})
// Wait for Gemini result
// === 阶段2: 架构审查 (Codex) ===
Bash({
command: `ccw cli -p "PURPOSE: Critical review of proposed architecture - identify weaknesses and risks.
Success: Actionable feedback with specific concerns and improvement suggestions.
PROPOSED ARCHITECTURE:
${geminiArchitectureOutput.slice(0, 5000)}
REQUIREMENTS CONTEXT:
${priorDocs.requirementsIndex?.slice(0, 2000) || ''}
TASK:
- Challenge each ADR: are the alternatives truly the best options?
- Identify scalability bottlenecks in the component design
- Assess security gaps: authentication, authorization, data protection
- Evaluate technology choices: maturity, community support, fit
- Check for over-engineering or under-engineering
- Verify architecture covers all Must-have requirements
- Rate overall architecture quality: 1-5 with justification
MODE: analysis
EXPECTED: Architecture review with: per-ADR feedback, scalability concerns, security gaps, technology risks, quality rating
CONSTRAINTS: Be genuinely critical, not just validating. Focus on actionable improvements.
" --tool codex --mode analysis`,
run_in_background: true
})
// Wait for Codex result
// === 整合讨论反馈 ===
if (discussionFeedback) {
// 从 discuss-003-requirements.md 提取架构相关反馈
// 合并到架构设计中
}
// === 代码库集成映射(条件性) ===
let integrationMapping = null
if (priorDocs.discoveryContext) {
const dc = JSON.parse(priorDocs.discoveryContext)
if (dc.relevant_files) {
integrationMapping = dc.relevant_files.map(f => ({
new_component: '...',
existing_module: f.path,
integration_type: 'Extend|Replace|New',
notes: f.rationale
}))
}
}
// === 生成 architecture/ 目录 ===
Bash(`mkdir -p "${sessionFolder}/architecture"`)
const timestamp = new Date().toISOString()
const adrs = parseADRs(geminiArchitectureOutput, codexReviewOutput)
// 写入独立 ADR-*.md 文件
adrs.forEach(adr => {
const adrFrontmatter = `---
id: ADR-${adr.id}
title: "${adr.title}"
status: draft
traces:
- ../requirements/_index.md
---`
const adrContent = `${adrFrontmatter}
# ADR-${adr.id}: ${adr.title}
## Context
${adr.context}
## Decision
${adr.decision}
## Alternatives
${adr.alternatives.map((alt, i) => `### Option ${i+1}: ${alt.name}\n- **Pros**: ${alt.pros.join(', ')}\n- **Cons**: ${alt.cons.join(', ')}`).join('\n\n')}
## Consequences
${adr.consequences}
## Review Feedback
${adr.reviewFeedback || 'N/A'}
`
Write(`${sessionFolder}/architecture/ADR-${adr.id}-${adr.slug}.md`, adrContent)
})
// 写入 _index.md含 Mermaid 组件图 + ER图 + 链接)
const archIndexFrontmatter = `---
session_id: ${specConfig?.session_id || 'unknown'}
phase: 4
document_type: architecture-index
status: draft
generated_at: ${timestamp}
version: 1
dependencies:
- ../product-brief.md
- ../requirements/_index.md
---`
// 包含: system overview, component diagram (Mermaid), tech stack table,
// ADR links table, data model (Mermaid erDiagram), API design, security controls
Write(`${sessionFolder}/architecture/_index.md`, archIndexContent)
outputPath = 'architecture/_index.md'
}
```
#### DRAFT-004: Epics & Stories
通过 Gemini CLI 分解为 Epic生成 EPIC-*.md 文件。
```javascript
if (docType === 'epics') {
// === Epic 分解 CLI ===
Bash({
command: `ccw cli -p "PURPOSE: Decompose requirements into executable Epics and Stories for implementation planning.
Success: 3-7 Epics with prioritized Stories, dependency map, and MVP subset clearly defined.
PRODUCT BRIEF (summary):
${priorDocs.productBrief?.slice(0, 2000) || ''}
REQUIREMENTS:
${priorDocs.requirementsIndex?.slice(0, 5000) || ''}
ARCHITECTURE (summary):
${priorDocs.architectureIndex?.slice(0, 3000) || ''}
TASK:
- Group requirements into 3-7 logical Epics:
- Each Epic: EPIC-NNN ID, title, description, priority (Must/Should/Could)
- Group by functional domain or user journey stage
- Tag MVP Epics (minimum set for initial release)
- For each Epic, generate 2-5 Stories:
- Each Story: STORY-{EPIC}-NNN ID, title
- User story format: As a [persona], I want [action] so that [benefit]
- 2-4 acceptance criteria per story (testable)
- Relative size estimate: S/M/L/XL
- Trace to source requirement(s): REQ-NNN
- Create dependency map:
- Cross-Epic dependencies (which Epics block others)
- Mermaid graph LR format
- Recommended execution order with rationale
- Define MVP:
- Which Epics are in MVP
- MVP definition of done (3-5 criteria)
- What is explicitly deferred post-MVP
MODE: analysis
EXPECTED: Structured output with: Epic list (ID, title, priority, MVP flag), Stories per Epic (ID, user story, AC, size, trace), dependency Mermaid diagram, execution order, MVP definition
CONSTRAINTS: Every Must-have requirement must appear in at least one Story. Stories must be small enough to implement independently. Dependencies should be minimized across Epics.
" --tool gemini --mode analysis`,
run_in_background: true
})
// Wait for CLI result
// === 整合讨论反馈 ===
if (discussionFeedback) {
// 从 discuss-004-architecture.md 提取执行相关反馈
// 调整 Epic 粒度、MVP 范围
}
// === 生成 epics/ 目录 ===
Bash(`mkdir -p "${sessionFolder}/epics"`)
const timestamp = new Date().toISOString()
const epicsList = parseEpics(cliOutput)
// 写入独立 EPIC-*.md 文件(含 stories
epicsList.forEach(epic => {
const epicFrontmatter = `---
id: EPIC-${epic.id}
title: "${epic.title}"
priority: ${epic.priority}
mvp: ${epic.mvp}
size: ${epic.size}
requirements:
${epic.reqs.map(r => ` - ${r}`).join('\n')}
architecture:
${epic.adrs.map(a => ` - ${a}`).join('\n')}
dependencies:
${epic.deps.map(d => ` - ${d}`).join('\n')}
status: draft
---`
const storiesContent = epic.stories.map(s => `### ${s.id}: ${s.title}
**User Story**: ${s.user_story}
**Size**: ${s.size}
**Traces**: ${s.traces.join(', ')}
**Acceptance Criteria**:
${s.acceptance_criteria.map((ac, i) => `${i+1}. ${ac}`).join('\n')}
`).join('\n')
const epicContent = `${epicFrontmatter}
# EPIC-${epic.id}: ${epic.title}
## Description
${epic.description}
## Stories
${storiesContent}
## Requirements
${epic.reqs.map(r => `- [${r}](../requirements/${r}.md)`).join('\n')}
## Architecture
${epic.adrs.map(a => `- [${a}](../architecture/${a}.md)`).join('\n')}
`
Write(`${sessionFolder}/epics/EPIC-${epic.id}-${epic.slug}.md`, epicContent)
})
// 写入 _index.md含 Mermaid 依赖图 + MVP + 链接)
const epicsIndexFrontmatter = `---
session_id: ${specConfig?.session_id || 'unknown'}
phase: 5
document_type: epics-index
status: draft
generated_at: ${timestamp}
version: 1
dependencies:
- ../requirements/_index.md
- ../architecture/_index.md
---`
// 包含: Epic overview table (with links), dependency Mermaid diagram,
// execution order, MVP scope, traceability matrix
Write(`${sessionFolder}/epics/_index.md`, epicsIndexContent)
outputPath = 'epics/_index.md'
}
```
### Phase 4: Self-Validation