mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-28 09:23:08 +08:00
feat: Implement recursive core-memory database discovery and project listing
- Added `findAllCoreMemoryDatabases` function to recursively locate core-memory databases in nested project structures. - Updated `listAllProjects` to utilize the new recursive function for improved project listing. - Enhanced `getMemoriesFromProject` and `findMemoryAcrossProjects` to support nested project structures. feat: Introduce spec context injection in hooks configuration - Added a new hook configuration for "Spec Context Injection" to load project specs based on prompt keywords. chore: Add gray-matter dependency for YAML frontmatter parsing - Included `gray-matter` package in `package.json` for parsing YAML frontmatter in markdown files. feat: Create Spec Index Builder tool for managing project specs - Implemented `spec-index-builder.ts` to scan markdown files, extract YAML frontmatter, and generate index cache files for different spec dimensions. feat: Develop Spec Init tool for initializing spec directories and seed documents - Created `spec-init.ts` to set up the directory structure and seed documents for the spec system. feat: Build Spec Keyword Extractor for keyword extraction from prompts - Added `spec-keyword-extractor.ts` to extract keywords from user prompts, supporting both English and Chinese text. feat: Implement Spec Loader for loading and filtering specs based on keywords - Developed `spec-loader.ts` to handle loading of specs, filtering by read mode and keyword matches, and formatting output for CLI or hooks.
This commit is contained in:
@@ -255,6 +255,7 @@ if (isFullPipeline && ideaAngles.length > 1) {
|
||||
const agentName = `ideator-${i + 1}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${agentName} worker`,
|
||||
team_name: teamName,
|
||||
name: agentName,
|
||||
prompt: `你是 team "${teamName}" 的 IDEATOR (${agentName})。
|
||||
@@ -284,6 +285,7 @@ if (isFullPipeline && ideaAngles.length > 1) {
|
||||
// Quick/Deep pipeline: single ideator
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ideator worker`,
|
||||
team_name: teamName,
|
||||
name: "ideator",
|
||||
prompt: `你是 team "${teamName}" 的 IDEATOR。
|
||||
@@ -311,6 +313,7 @@ if (isFullPipeline && ideaAngles.length > 1) {
|
||||
// Challenger
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn challenger worker`,
|
||||
team_name: teamName,
|
||||
name: "challenger",
|
||||
prompt: `你是 team "${teamName}" 的 CHALLENGER。
|
||||
@@ -335,6 +338,7 @@ Task({
|
||||
// Synthesizer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn synthesizer worker`,
|
||||
team_name: teamName,
|
||||
name: "synthesizer",
|
||||
prompt: `你是 team "${teamName}" 的 SYNTHESIZER。
|
||||
@@ -359,6 +363,7 @@ Task({
|
||||
// Evaluator
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn evaluator worker`,
|
||||
team_name: teamName,
|
||||
name: "evaluator",
|
||||
prompt: `你是 team "${teamName}" 的 EVALUATOR。
|
||||
|
||||
@@ -321,6 +321,7 @@ TeamCreate({ team_name: teamName })
|
||||
// Analyst
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn analyst worker`,
|
||||
team_name: teamName,
|
||||
name: "analyst",
|
||||
prompt: `你是 team "${teamName}" 的 ANALYST。
|
||||
@@ -347,6 +348,7 @@ Session: ${sessionFolder}
|
||||
// Architect
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn architect worker`,
|
||||
team_name: teamName,
|
||||
name: "architect",
|
||||
prompt: `你是 team "${teamName}" 的 ARCHITECT。
|
||||
@@ -372,6 +374,7 @@ Session: ${sessionFolder}
|
||||
// Developer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn developer worker`,
|
||||
team_name: teamName,
|
||||
name: "developer",
|
||||
prompt: `你是 team "${teamName}" 的 DEVELOPER。
|
||||
@@ -397,6 +400,7 @@ Session: ${sessionFolder}
|
||||
// QA
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn qa worker`,
|
||||
team_name: teamName,
|
||||
name: "qa",
|
||||
prompt: `你是 team "${teamName}" 的 QA (质量保证)。
|
||||
|
||||
@@ -251,6 +251,7 @@ if (isBatchMode && issueIds.length > 1) {
|
||||
const agentName = `explorer-${i + 1}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: agentName,
|
||||
prompt: `你是 team "issue" 的 EXPLORER (${agentName})。
|
||||
@@ -281,6 +282,7 @@ if (isBatchMode && issueIds.length > 1) {
|
||||
// Quick/Full mode: single explorer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: "explorer",
|
||||
prompt: `你是 team "issue" 的 EXPLORER。
|
||||
@@ -310,6 +312,7 @@ if (isBatchMode && issueIds.length > 1) {
|
||||
// Planner
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: "planner",
|
||||
prompt: `你是 team "issue" 的 PLANNER。
|
||||
@@ -337,6 +340,7 @@ Task({
|
||||
// Reviewer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: "reviewer",
|
||||
prompt: `你是 team "issue" 的 REVIEWER。
|
||||
@@ -365,6 +369,7 @@ Task({
|
||||
// Integrator
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: "integrator",
|
||||
prompt: `你是 team "issue" 的 INTEGRATOR。
|
||||
@@ -397,6 +402,7 @@ if (isBatchMode && issueIds.length > 2) {
|
||||
const agentName = `implementer-${i + 1}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: agentName,
|
||||
prompt: `你是 team "issue" 的 IMPLEMENTER (${agentName})。
|
||||
@@ -426,6 +432,7 @@ if (isBatchMode && issueIds.length > 2) {
|
||||
// Quick/Full mode: single implementer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn issue worker`,
|
||||
team_name: "issue",
|
||||
name: "implementer",
|
||||
prompt: `你是 team "issue" 的 IMPLEMENTER。
|
||||
|
||||
@@ -396,6 +396,7 @@ TeamCreate({ team_name: teamName })
|
||||
// Architect
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn architect worker`,
|
||||
team_name: teamName,
|
||||
name: "architect",
|
||||
prompt: `你是 team "${teamName}" 的 ARCHITECT。
|
||||
@@ -421,6 +422,7 @@ Task({
|
||||
// Developer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn developer worker`,
|
||||
team_name: teamName,
|
||||
name: "developer",
|
||||
prompt: `你是 team "${teamName}" 的 DEVELOPER。
|
||||
@@ -444,6 +446,7 @@ Task({
|
||||
// Tester
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn tester worker`,
|
||||
team_name: teamName,
|
||||
name: "tester",
|
||||
prompt: `你是 team "${teamName}" 的 TESTER。
|
||||
@@ -467,6 +470,7 @@ Task({
|
||||
// Reviewer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn reviewer worker`,
|
||||
team_name: teamName,
|
||||
name: "reviewer",
|
||||
prompt: `你是 team "${teamName}" 的 REVIEWER。
|
||||
|
||||
@@ -160,30 +160,36 @@ Skill(skill="team-lifecycle-v2", args="--role=coordinator 任务描述")
|
||||
```javascript
|
||||
if (!roleMatch) {
|
||||
// Orchestration Mode: 自动路由到 coordinator
|
||||
// coordinator role.md 将执行:
|
||||
// Phase 1: 需求澄清
|
||||
// Phase 2: TeamCreate + spawn 所有 worker agents
|
||||
// 每个 agent prompt 中包含 Skill(args="--role=xxx") 回调
|
||||
// Phase 3: 创建任务链
|
||||
// Phase 4: 监控协调循环
|
||||
// Phase 5: 结果汇报
|
||||
// coordinator Entry Router 先检测命令类型:
|
||||
// - Worker 回调 → handleCallback → 自动推进
|
||||
// - "check" → handleCheck → 状态报告
|
||||
// - "resume" → handleResume → 手动推进
|
||||
// - 新任务 → Phase 1-3 → spawn first batch → STOP
|
||||
|
||||
const role = "coordinator"
|
||||
Read(VALID_ROLES[role].file)
|
||||
}
|
||||
```
|
||||
|
||||
**完整调用链**:
|
||||
**完整调用链(Spawn-and-Stop)**:
|
||||
|
||||
```
|
||||
用户: Skill(args="任务描述")
|
||||
│
|
||||
├─ SKILL.md: 无 --role → Orchestration Mode → 读取 coordinator role.md
|
||||
│
|
||||
├─ coordinator Phase 2: TeamCreate + spawn workers
|
||||
│ 每个 worker prompt 中包含 Skill(args="--role=xxx") 回调
|
||||
├─ coordinator Phase 1-3: 需求澄清 → TeamCreate → 创建任务链
|
||||
│
|
||||
├─ coordinator Phase 3: dispatch 任务链
|
||||
├─ coordinator Phase 4: spawn 首批 worker(后台) → STOP ← 立即停止
|
||||
│
|
||||
│ ┌─────────────────────────────────────────────────────┐
|
||||
│ │ Worker 在后台执行,完成后 SendMessage 回调 │
|
||||
│ │ │
|
||||
│ │ 三种唤醒源推进流水线: │
|
||||
│ │ 1. Worker 回调 → coordinator 自动推进下一步 │
|
||||
│ │ 2. 用户 "check" → 输出执行状态图 │
|
||||
│ │ 3. 用户 "resume" → 手动推进 │
|
||||
│ └─────────────────────────────────────────────────────┘
|
||||
│
|
||||
├─ worker 收到任务 → Skill(args="--role=xxx") → SKILL.md Role Router → role.md
|
||||
│ 每个 worker 自动获取:
|
||||
@@ -191,9 +197,25 @@ if (!roleMatch) {
|
||||
│ ├─ 可用命令 (commands/*.md)
|
||||
│ └─ 执行逻辑 (5-phase process)
|
||||
│
|
||||
└─ coordinator Phase 4-5: 监控 → 结果汇报
|
||||
├─ worker 完成 → SendMessage(to: coordinator) → 唤醒 coordinator → spawn 下一批 → STOP
|
||||
│ (循环直到 pipeline 完成)
|
||||
│
|
||||
└─ Pipeline 完成 → Phase 5: 结果汇报
|
||||
```
|
||||
|
||||
### User Commands(流水线推进指令)
|
||||
|
||||
当 coordinator 已 spawn worker 并 STOP 后,用户可用以下指令唤醒 coordinator:
|
||||
|
||||
| Command | Usage | Action |
|
||||
|---------|-------|--------|
|
||||
| `check` | `Skill(args="check")` | 输出执行状态图(pipeline graph),显示每个任务状态,不做推进 |
|
||||
| `resume` | `Skill(args="resume")` | 检查所有 worker 成员状态,完成的任务自动推进到下一步 |
|
||||
| `status` | `Skill(args="status")` | 同 `check` |
|
||||
|
||||
> Worker 完成后的 SendMessage 回调也会自动唤醒 coordinator 推进流水线,
|
||||
> 无需用户手动 `resume`。`resume` 用于手动推进(如 checkpoint 后、或回调未触发时)。
|
||||
|
||||
### Available Roles
|
||||
|
||||
| Role | Task Prefix | Responsibility | Role File |
|
||||
@@ -506,16 +528,18 @@ Coordinator supports `--resume` / `--continue` flags to resume interrupted sessi
|
||||
|
||||
## Coordinator Spawn Template
|
||||
|
||||
When coordinator creates teammates, use this pattern:
|
||||
When coordinator spawns workers, use **background mode** (Spawn-and-Stop pattern):
|
||||
|
||||
```javascript
|
||||
TeamCreate({ team_name: teamName })
|
||||
|
||||
// For each worker role:
|
||||
// For each ready worker — 后台 spawn,立即返回:
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${roleName} worker`, // ← 必填参数
|
||||
team_name: teamName,
|
||||
name: "<role_name>",
|
||||
run_in_background: true, // ← KEY: 后台执行,coordinator 立即返回
|
||||
prompt: `你是 team "${teamName}" 的 <ROLE_NAME_UPPER>.
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
@@ -542,9 +566,13 @@ Session: ${sessionFolder}
|
||||
3. team_msg log + SendMessage 结果给 coordinator(带 [<role_name>] 标识)
|
||||
4. TaskUpdate completed → 检查下一个任务 → 回到步骤 1`
|
||||
})
|
||||
|
||||
// ⚠️ Spawn 后立即 STOP — 不阻塞等待
|
||||
// Worker 完成后通过 SendMessage 回调唤醒 coordinator
|
||||
// 用户也可通过 "check" / "resume" 命令手动推进
|
||||
```
|
||||
|
||||
See [roles/coordinator/role.md](roles/coordinator/role.md) for the full spawn implementation with per-role prompts.
|
||||
See [roles/coordinator/role.md](roles/coordinator/role.md) for the full spawn implementation with per-role prompts and Entry Router.
|
||||
|
||||
## Shared Spec Resources
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Dispatch Command - Task Chain Creation
|
||||
|
||||
**Purpose**: Create task chains based on execution mode (spec-only, impl-only, full-lifecycle)
|
||||
**Purpose**: Create task chains based on execution mode, aligned with SKILL.md Three-Mode Pipeline
|
||||
|
||||
**Invoked by**: Coordinator role.md Phase 3
|
||||
|
||||
@@ -10,151 +10,141 @@
|
||||
|
||||
## Task Chain Strategies
|
||||
|
||||
### Role-Task Mapping (Source of Truth: SKILL.md VALID_ROLES)
|
||||
|
||||
| Task Prefix | Role | VALID_ROLES Key |
|
||||
|-------------|------|-----------------|
|
||||
| RESEARCH-* | analyst | `analyst` |
|
||||
| DISCUSS-* | discussant | `discussant` |
|
||||
| DRAFT-* | writer | `writer` |
|
||||
| QUALITY-* | reviewer | `reviewer` |
|
||||
| PLAN-* | planner | `planner` |
|
||||
| IMPL-* | executor | `executor` |
|
||||
| TEST-* | tester | `tester` |
|
||||
| REVIEW-* | reviewer | `reviewer` |
|
||||
| DEV-FE-* | fe-developer | `fe-developer` |
|
||||
| QA-FE-* | fe-qa | `fe-qa` |
|
||||
|
||||
---
|
||||
|
||||
### Strategy 1: Spec-Only Mode (12 tasks)
|
||||
|
||||
Pipeline: `RESEARCH → DISCUSS → DRAFT → DISCUSS → DRAFT → DISCUSS → DRAFT → DISCUSS → DRAFT → DISCUSS → QUALITY → DISCUSS`
|
||||
|
||||
```javascript
|
||||
if (requirements.mode === "spec-only") {
|
||||
Output("[coordinator] Creating spec-only task chain (12 tasks)")
|
||||
|
||||
// Task 1: Requirements Analysis
|
||||
// Task 1: Seed Analysis
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "req-analysis",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Analyze requirements and extract key features",
|
||||
dependencies: [],
|
||||
input: {
|
||||
scope: requirements.scope,
|
||||
focus: requirements.focus,
|
||||
depth: requirements.depth
|
||||
},
|
||||
status: "active" // First task starts immediately
|
||||
subject: "RESEARCH-001",
|
||||
owner: "analyst",
|
||||
description: `Seed analysis: codebase exploration and context gathering\nSession: ${sessionFolder}\nScope: ${requirements.scope}\nFocus: ${requirements.focus.join(", ")}\nDepth: ${requirements.depth}`,
|
||||
blockedBy: [],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 2: Architecture Design
|
||||
// Task 2: Critique Research
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "arch-design",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design system architecture",
|
||||
dependencies: ["req-analysis"],
|
||||
status: "blocked"
|
||||
subject: "DISCUSS-001",
|
||||
owner: "discussant",
|
||||
description: `Critique research findings from RESEARCH-001, identify gaps and clarify scope\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["RESEARCH-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 3: API Design
|
||||
// Task 3: Product Brief
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "api-design",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design API contracts and endpoints",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
subject: "DRAFT-001",
|
||||
owner: "writer",
|
||||
description: `Generate Product Brief based on RESEARCH-001 findings and DISCUSS-001 feedback\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DISCUSS-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 4: Data Model Design
|
||||
// Task 4: Critique Product Brief
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "data-model",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design data models and schemas",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
subject: "DISCUSS-002",
|
||||
owner: "discussant",
|
||||
description: `Critique Product Brief (DRAFT-001), evaluate completeness and clarity\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DRAFT-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 5: UI Specification
|
||||
// Task 5: Requirements/PRD
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "ui-spec",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design UI components and user flows",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
subject: "DRAFT-002",
|
||||
owner: "writer",
|
||||
description: `Generate Requirements/PRD incorporating DISCUSS-002 feedback\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DISCUSS-002"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 6: Test Strategy
|
||||
// Task 6: Critique Requirements
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "test-strategy",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Define testing strategy and test cases",
|
||||
dependencies: ["api-design", "data-model"],
|
||||
status: "blocked"
|
||||
subject: "DISCUSS-003",
|
||||
owner: "discussant",
|
||||
description: `Critique Requirements/PRD (DRAFT-002), validate coverage and feasibility\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DRAFT-002"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 7: Error Handling Design
|
||||
// Task 7: Architecture Document
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "error-handling",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design error handling and recovery mechanisms",
|
||||
dependencies: ["api-design"],
|
||||
status: "blocked"
|
||||
subject: "DRAFT-003",
|
||||
owner: "writer",
|
||||
description: `Generate Architecture Document incorporating DISCUSS-003 feedback\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DISCUSS-003"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 8: Security Review
|
||||
// Task 8: Critique Architecture
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "security-review",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Review security considerations and vulnerabilities",
|
||||
dependencies: ["api-design", "data-model"],
|
||||
status: "blocked"
|
||||
subject: "DISCUSS-004",
|
||||
owner: "discussant",
|
||||
description: `Critique Architecture Document (DRAFT-003), evaluate design decisions\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DRAFT-003"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 9: Performance Requirements
|
||||
// Task 9: Epics
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "perf-requirements",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Define performance requirements and benchmarks",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
subject: "DRAFT-004",
|
||||
owner: "writer",
|
||||
description: `Generate Epics document incorporating DISCUSS-004 feedback\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DISCUSS-004"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 10: Documentation Outline
|
||||
// Task 10: Critique Epics
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "doc-outline",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Create documentation structure and outline",
|
||||
dependencies: ["api-design"],
|
||||
status: "blocked"
|
||||
subject: "DISCUSS-005",
|
||||
owner: "discussant",
|
||||
description: `Critique Epics (DRAFT-004), validate task decomposition and priorities\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DRAFT-004"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 11: Review Specifications
|
||||
// Task 11: Spec Quality Check
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "review-spec",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Review all specifications for consistency",
|
||||
dependencies: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"],
|
||||
status: "blocked"
|
||||
subject: "QUALITY-001",
|
||||
owner: "reviewer",
|
||||
description: `5-dimension spec quality validation across all spec artifacts\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DISCUSS-005"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 12: Finalize Specifications
|
||||
// Task 12: Final Review Discussion
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "finalize-spec",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Finalize and package all specifications",
|
||||
dependencies: ["review-spec"],
|
||||
status: "blocked"
|
||||
subject: "DISCUSS-006",
|
||||
owner: "discussant",
|
||||
description: `Final review discussion: address QUALITY-001 findings, sign-off\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["QUALITY-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
Output("[coordinator] Spec-only task chain created (12 tasks)")
|
||||
Output("[coordinator] Starting with: req-analysis")
|
||||
Output("[coordinator] Starting with: RESEARCH-001 (analyst)")
|
||||
}
|
||||
```
|
||||
|
||||
@@ -162,6 +152,8 @@ if (requirements.mode === "spec-only") {
|
||||
|
||||
### Strategy 2: Impl-Only Mode (4 tasks)
|
||||
|
||||
Pipeline: `PLAN → IMPL → TEST + REVIEW`
|
||||
|
||||
```javascript
|
||||
if (requirements.mode === "impl-only") {
|
||||
Output("[coordinator] Creating impl-only task chain (4 tasks)")
|
||||
@@ -183,7 +175,6 @@ if (requirements.mode === "impl-only") {
|
||||
type: "text"
|
||||
})
|
||||
|
||||
// Validate spec file exists
|
||||
const specContent = Read(specFile)
|
||||
if (!specContent) {
|
||||
throw new Error(`Specification file not found: ${specFile}`)
|
||||
@@ -191,65 +182,44 @@ if (requirements.mode === "impl-only") {
|
||||
|
||||
Output(`[coordinator] Using specification: ${specFile}`)
|
||||
|
||||
// Task 1: Setup Scaffold
|
||||
// Task 1: Planning
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "setup-scaffold",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Setup project scaffold and dependencies",
|
||||
dependencies: [],
|
||||
input: {
|
||||
spec_file: specFile,
|
||||
scope: requirements.scope
|
||||
},
|
||||
status: "active" // First task starts immediately
|
||||
subject: "PLAN-001",
|
||||
owner: "planner",
|
||||
description: `Multi-angle codebase exploration and structured planning\nSession: ${sessionFolder}\nSpec: ${specFile}\nScope: ${requirements.scope}`,
|
||||
blockedBy: [],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 2: Core Implementation
|
||||
// Task 2: Implementation
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "core-impl",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Implement core functionality",
|
||||
dependencies: ["setup-scaffold"],
|
||||
input: {
|
||||
spec_file: specFile
|
||||
},
|
||||
status: "blocked"
|
||||
subject: "IMPL-001",
|
||||
owner: "executor",
|
||||
description: `Code implementation following PLAN-001\nSession: ${sessionFolder}\nSpec: ${specFile}`,
|
||||
blockedBy: ["PLAN-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 3: Integration
|
||||
// Task 3: Testing (parallel with REVIEW-001)
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "integration",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Integrate components and test",
|
||||
dependencies: ["core-impl"],
|
||||
input: {
|
||||
spec_file: specFile
|
||||
},
|
||||
status: "blocked"
|
||||
subject: "TEST-001",
|
||||
owner: "tester",
|
||||
description: `Adaptive test-fix cycles and quality gates\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["IMPL-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 4: Finalize Implementation
|
||||
// Task 4: Code Review (parallel with TEST-001)
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "finalize-impl",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Finalize implementation and documentation",
|
||||
dependencies: ["integration"],
|
||||
input: {
|
||||
spec_file: specFile
|
||||
},
|
||||
status: "blocked"
|
||||
subject: "REVIEW-001",
|
||||
owner: "reviewer",
|
||||
description: `4-dimension code review of IMPL-001 output\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["IMPL-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
Output("[coordinator] Impl-only task chain created (4 tasks)")
|
||||
Output("[coordinator] Starting with: setup-scaffold")
|
||||
Output("[coordinator] Starting with: PLAN-001 (planner)")
|
||||
}
|
||||
```
|
||||
|
||||
@@ -257,206 +227,267 @@ if (requirements.mode === "impl-only") {
|
||||
|
||||
### Strategy 3: Full-Lifecycle Mode (16 tasks)
|
||||
|
||||
Pipeline: `[Spec pipeline 12] → PLAN(blockedBy: DISCUSS-006) → IMPL → TEST + REVIEW`
|
||||
|
||||
```javascript
|
||||
if (requirements.mode === "full-lifecycle") {
|
||||
Output("[coordinator] Creating full-lifecycle task chain (16 tasks)")
|
||||
|
||||
// ========================================
|
||||
// SPEC PHASE (12 tasks)
|
||||
// SPEC PHASE (12 tasks) — same as spec-only
|
||||
// ========================================
|
||||
|
||||
// Task 1: Requirements Analysis
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "req-analysis",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Analyze requirements and extract key features",
|
||||
dependencies: [],
|
||||
input: {
|
||||
scope: requirements.scope,
|
||||
focus: requirements.focus,
|
||||
depth: requirements.depth
|
||||
},
|
||||
status: "active" // First task starts immediately
|
||||
})
|
||||
|
||||
// Task 2: Architecture Design
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "arch-design",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design system architecture",
|
||||
dependencies: ["req-analysis"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 3: API Design
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "api-design",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design API contracts and endpoints",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 4: Data Model Design
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "data-model",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design data models and schemas",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 5: UI Specification
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "ui-spec",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design UI components and user flows",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 6: Test Strategy
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "test-strategy",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Define testing strategy and test cases",
|
||||
dependencies: ["api-design", "data-model"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 7: Error Handling Design
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "error-handling",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Design error handling and recovery mechanisms",
|
||||
dependencies: ["api-design"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 8: Security Review
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "security-review",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Review security considerations and vulnerabilities",
|
||||
dependencies: ["api-design", "data-model"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 9: Performance Requirements
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "perf-requirements",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Define performance requirements and benchmarks",
|
||||
dependencies: ["arch-design"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 10: Documentation Outline
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "doc-outline",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Create documentation structure and outline",
|
||||
dependencies: ["api-design"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 11: Review Specifications
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "review-spec",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Review all specifications for consistency",
|
||||
dependencies: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"],
|
||||
status: "blocked"
|
||||
})
|
||||
|
||||
// Task 12: Finalize Specifications
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "finalize-spec",
|
||||
assigned_to: "spec-writer",
|
||||
phase: "spec",
|
||||
description: "Finalize and package all specifications",
|
||||
dependencies: ["review-spec"],
|
||||
status: "blocked"
|
||||
})
|
||||
TaskCreate({ subject: "RESEARCH-001", owner: "analyst", description: `Seed analysis: codebase exploration and context gathering\nSession: ${sessionFolder}\nScope: ${requirements.scope}\nFocus: ${requirements.focus.join(", ")}\nDepth: ${requirements.depth}`, blockedBy: [], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-001", owner: "discussant", description: `Critique research findings from RESEARCH-001\nSession: ${sessionFolder}`, blockedBy: ["RESEARCH-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-001", owner: "writer", description: `Generate Product Brief\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-002", owner: "discussant", description: `Critique Product Brief (DRAFT-001)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-002", owner: "writer", description: `Generate Requirements/PRD\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-002"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-003", owner: "discussant", description: `Critique Requirements/PRD (DRAFT-002)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-002"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-003", owner: "writer", description: `Generate Architecture Document\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-003"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-004", owner: "discussant", description: `Critique Architecture Document (DRAFT-003)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-003"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-004", owner: "writer", description: `Generate Epics\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-004"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-005", owner: "discussant", description: `Critique Epics (DRAFT-004)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-004"], status: "pending" })
|
||||
TaskCreate({ subject: "QUALITY-001", owner: "reviewer", description: `5-dimension spec quality validation\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-005"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-006", owner: "discussant", description: `Final review discussion and sign-off\nSession: ${sessionFolder}`, blockedBy: ["QUALITY-001"], status: "pending" })
|
||||
|
||||
// ========================================
|
||||
// IMPL PHASE (4 tasks)
|
||||
// IMPL PHASE (4 tasks) — blocked by spec completion
|
||||
// ========================================
|
||||
|
||||
// Task 13: Setup Scaffold
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "setup-scaffold",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Setup project scaffold and dependencies",
|
||||
dependencies: ["finalize-spec"], // Blocked until spec phase completes
|
||||
status: "blocked"
|
||||
subject: "PLAN-001",
|
||||
owner: "planner",
|
||||
description: `Multi-angle codebase exploration and structured planning\nSession: ${sessionFolder}\nScope: ${requirements.scope}`,
|
||||
blockedBy: ["DISCUSS-006"], // Blocked until spec phase completes
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 14: Core Implementation
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "core-impl",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Implement core functionality",
|
||||
dependencies: ["setup-scaffold"],
|
||||
status: "blocked"
|
||||
subject: "IMPL-001",
|
||||
owner: "executor",
|
||||
description: `Code implementation following PLAN-001\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["PLAN-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 15: Integration
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "integration",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Integrate components and test",
|
||||
dependencies: ["core-impl"],
|
||||
status: "blocked"
|
||||
subject: "TEST-001",
|
||||
owner: "tester",
|
||||
description: `Adaptive test-fix cycles and quality gates\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["IMPL-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Task 16: Finalize Implementation
|
||||
TaskCreate({
|
||||
team_id: teamId,
|
||||
task_id: "finalize-impl",
|
||||
assigned_to: "implementer",
|
||||
phase: "impl",
|
||||
description: "Finalize implementation and documentation",
|
||||
dependencies: ["integration"],
|
||||
status: "blocked"
|
||||
subject: "REVIEW-001",
|
||||
owner: "reviewer",
|
||||
description: `4-dimension code review of IMPL-001 output\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["IMPL-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
Output("[coordinator] Full-lifecycle task chain created (16 tasks)")
|
||||
Output("[coordinator] Starting with: req-analysis")
|
||||
Output("[coordinator] Starting with: RESEARCH-001 (analyst)")
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Strategy 4: FE-Only Mode (3 tasks)
|
||||
|
||||
Pipeline: `PLAN → DEV-FE → QA-FE` (with GC loop: max 2 rounds)
|
||||
|
||||
```javascript
|
||||
if (requirements.mode === "fe-only") {
|
||||
Output("[coordinator] Creating fe-only task chain (3 tasks)")
|
||||
|
||||
TaskCreate({
|
||||
subject: "PLAN-001",
|
||||
owner: "planner",
|
||||
description: `Multi-angle codebase exploration and structured planning (frontend focus)\nSession: ${sessionFolder}\nScope: ${requirements.scope}`,
|
||||
blockedBy: [],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
TaskCreate({
|
||||
subject: "DEV-FE-001",
|
||||
owner: "fe-developer",
|
||||
description: `Frontend component/page implementation following PLAN-001\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["PLAN-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
TaskCreate({
|
||||
subject: "QA-FE-001",
|
||||
owner: "fe-qa",
|
||||
description: `5-dimension frontend QA for DEV-FE-001 output\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DEV-FE-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Note: GC loop (DEV-FE-002 → QA-FE-002) created dynamically by coordinator
|
||||
// when QA-FE-001 verdict = NEEDS_FIX (max 2 rounds)
|
||||
|
||||
Output("[coordinator] FE-only task chain created (3 tasks)")
|
||||
Output("[coordinator] Starting with: PLAN-001 (planner)")
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Strategy 5: Fullstack Mode (6 tasks)
|
||||
|
||||
Pipeline: `PLAN → IMPL ∥ DEV-FE → TEST ∥ QA-FE → REVIEW`
|
||||
|
||||
```javascript
|
||||
if (requirements.mode === "fullstack") {
|
||||
Output("[coordinator] Creating fullstack task chain (6 tasks)")
|
||||
|
||||
TaskCreate({
|
||||
subject: "PLAN-001",
|
||||
owner: "planner",
|
||||
description: `Multi-angle codebase exploration and structured planning (fullstack)\nSession: ${sessionFolder}\nScope: ${requirements.scope}`,
|
||||
blockedBy: [],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Backend + Frontend in parallel
|
||||
TaskCreate({
|
||||
subject: "IMPL-001",
|
||||
owner: "executor",
|
||||
description: `Backend implementation following PLAN-001\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["PLAN-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
TaskCreate({
|
||||
subject: "DEV-FE-001",
|
||||
owner: "fe-developer",
|
||||
description: `Frontend implementation following PLAN-001\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["PLAN-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Testing + QA in parallel
|
||||
TaskCreate({
|
||||
subject: "TEST-001",
|
||||
owner: "tester",
|
||||
description: `Backend test-fix cycles\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["IMPL-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
TaskCreate({
|
||||
subject: "QA-FE-001",
|
||||
owner: "fe-qa",
|
||||
description: `Frontend QA for DEV-FE-001\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["DEV-FE-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
// Final review after all testing
|
||||
TaskCreate({
|
||||
subject: "REVIEW-001",
|
||||
owner: "reviewer",
|
||||
description: `Full code review (backend + frontend)\nSession: ${sessionFolder}`,
|
||||
blockedBy: ["TEST-001", "QA-FE-001"],
|
||||
status: "pending"
|
||||
})
|
||||
|
||||
Output("[coordinator] Fullstack task chain created (6 tasks)")
|
||||
Output("[coordinator] Starting with: PLAN-001 (planner)")
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Strategy 6: Full-Lifecycle-FE Mode (18 tasks)
|
||||
|
||||
Pipeline: `[Spec 12] → PLAN(blockedBy: DISCUSS-006) → IMPL ∥ DEV-FE → TEST ∥ QA-FE → REVIEW`
|
||||
|
||||
```javascript
|
||||
if (requirements.mode === "full-lifecycle-fe") {
|
||||
Output("[coordinator] Creating full-lifecycle-fe task chain (18 tasks)")
|
||||
|
||||
// SPEC PHASE (12 tasks) — same as spec-only
|
||||
TaskCreate({ subject: "RESEARCH-001", owner: "analyst", description: `Seed analysis\nSession: ${sessionFolder}\nScope: ${requirements.scope}`, blockedBy: [], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-001", owner: "discussant", description: `Critique research findings\nSession: ${sessionFolder}`, blockedBy: ["RESEARCH-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-001", owner: "writer", description: `Generate Product Brief\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-002", owner: "discussant", description: `Critique Product Brief\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-002", owner: "writer", description: `Generate Requirements/PRD\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-002"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-003", owner: "discussant", description: `Critique Requirements\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-002"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-003", owner: "writer", description: `Generate Architecture Document\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-003"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-004", owner: "discussant", description: `Critique Architecture\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-003"], status: "pending" })
|
||||
TaskCreate({ subject: "DRAFT-004", owner: "writer", description: `Generate Epics\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-004"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-005", owner: "discussant", description: `Critique Epics\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-004"], status: "pending" })
|
||||
TaskCreate({ subject: "QUALITY-001", owner: "reviewer", description: `Spec quality validation\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-005"], status: "pending" })
|
||||
TaskCreate({ subject: "DISCUSS-006", owner: "discussant", description: `Final review and sign-off\nSession: ${sessionFolder}`, blockedBy: ["QUALITY-001"], status: "pending" })
|
||||
|
||||
// IMPL PHASE (6 tasks) — fullstack, blocked by spec
|
||||
TaskCreate({ subject: "PLAN-001", owner: "planner", description: `Fullstack planning\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-006"], status: "pending" })
|
||||
TaskCreate({ subject: "IMPL-001", owner: "executor", description: `Backend implementation\nSession: ${sessionFolder}`, blockedBy: ["PLAN-001"], status: "pending" })
|
||||
TaskCreate({ subject: "DEV-FE-001", owner: "fe-developer", description: `Frontend implementation\nSession: ${sessionFolder}`, blockedBy: ["PLAN-001"], status: "pending" })
|
||||
TaskCreate({ subject: "TEST-001", owner: "tester", description: `Backend test-fix cycles\nSession: ${sessionFolder}`, blockedBy: ["IMPL-001"], status: "pending" })
|
||||
TaskCreate({ subject: "QA-FE-001", owner: "fe-qa", description: `Frontend QA\nSession: ${sessionFolder}`, blockedBy: ["DEV-FE-001"], status: "pending" })
|
||||
TaskCreate({ subject: "REVIEW-001", owner: "reviewer", description: `Full code review\nSession: ${sessionFolder}`, blockedBy: ["TEST-001", "QA-FE-001"], status: "pending" })
|
||||
|
||||
Output("[coordinator] Full-lifecycle-fe task chain created (18 tasks)")
|
||||
Output("[coordinator] Starting with: RESEARCH-001 (analyst)")
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task Metadata Reference
|
||||
|
||||
```javascript
|
||||
// Unified metadata for all pipelines (used by Session Resume)
|
||||
const TASK_METADATA = {
|
||||
// Spec pipeline (12 tasks)
|
||||
"RESEARCH-001": { role: "analyst", deps: [], description: "Seed analysis: codebase exploration and context gathering" },
|
||||
"DISCUSS-001": { role: "discussant", deps: ["RESEARCH-001"], description: "Critique research findings, identify gaps" },
|
||||
"DRAFT-001": { role: "writer", deps: ["DISCUSS-001"], description: "Generate Product Brief" },
|
||||
"DISCUSS-002": { role: "discussant", deps: ["DRAFT-001"], description: "Critique Product Brief" },
|
||||
"DRAFT-002": { role: "writer", deps: ["DISCUSS-002"], description: "Generate Requirements/PRD" },
|
||||
"DISCUSS-003": { role: "discussant", deps: ["DRAFT-002"], description: "Critique Requirements/PRD" },
|
||||
"DRAFT-003": { role: "writer", deps: ["DISCUSS-003"], description: "Generate Architecture Document" },
|
||||
"DISCUSS-004": { role: "discussant", deps: ["DRAFT-003"], description: "Critique Architecture Document" },
|
||||
"DRAFT-004": { role: "writer", deps: ["DISCUSS-004"], description: "Generate Epics" },
|
||||
"DISCUSS-005": { role: "discussant", deps: ["DRAFT-004"], description: "Critique Epics" },
|
||||
"QUALITY-001": { role: "reviewer", deps: ["DISCUSS-005"], description: "5-dimension spec quality validation" },
|
||||
"DISCUSS-006": { role: "discussant", deps: ["QUALITY-001"], description: "Final review discussion and sign-off" },
|
||||
|
||||
// Impl pipeline (4 tasks) — deps shown for impl-only mode
|
||||
// In full-lifecycle, PLAN-001 deps = ["DISCUSS-006"]
|
||||
"PLAN-001": { role: "planner", deps: [], description: "Multi-angle codebase exploration and structured planning" },
|
||||
"IMPL-001": { role: "executor", deps: ["PLAN-001"], description: "Code implementation following plan" },
|
||||
"TEST-001": { role: "tester", deps: ["IMPL-001"], description: "Adaptive test-fix cycles and quality gates" },
|
||||
"REVIEW-001": { role: "reviewer", deps: ["IMPL-001"], description: "4-dimension code review" },
|
||||
|
||||
// Frontend pipeline tasks
|
||||
"DEV-FE-001": { role: "fe-developer", deps: ["PLAN-001"], description: "Frontend component/page implementation" },
|
||||
"QA-FE-001": { role: "fe-qa", deps: ["DEV-FE-001"], description: "5-dimension frontend QA" },
|
||||
// GC loop tasks (created dynamically)
|
||||
"DEV-FE-002": { role: "fe-developer", deps: ["QA-FE-001"], description: "Frontend fixes (GC round 2)" },
|
||||
"QA-FE-002": { role: "fe-qa", deps: ["DEV-FE-002"], description: "Frontend QA re-check (GC round 2)" }
|
||||
}
|
||||
|
||||
// Pipeline chain constants
|
||||
const SPEC_CHAIN = [
|
||||
"RESEARCH-001", "DISCUSS-001", "DRAFT-001", "DISCUSS-002",
|
||||
"DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004",
|
||||
"DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006"
|
||||
]
|
||||
|
||||
const IMPL_CHAIN = ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"]
|
||||
|
||||
const FE_CHAIN = ["DEV-FE-001", "QA-FE-001"]
|
||||
|
||||
const FULLSTACK_CHAIN = ["PLAN-001", "IMPL-001", "DEV-FE-001", "TEST-001", "QA-FE-001", "REVIEW-001"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution Method Handling
|
||||
|
||||
### Sequential Execution
|
||||
@@ -464,7 +495,7 @@ if (requirements.mode === "full-lifecycle") {
|
||||
```javascript
|
||||
if (requirements.executionMethod === "sequential") {
|
||||
Output("[coordinator] Sequential execution: tasks will run one at a time")
|
||||
// Only one task marked as "active" at a time
|
||||
// Only one task active at a time
|
||||
// Next task activated only after predecessor completes
|
||||
}
|
||||
```
|
||||
@@ -474,44 +505,9 @@ if (requirements.executionMethod === "sequential") {
|
||||
```javascript
|
||||
if (requirements.executionMethod === "parallel") {
|
||||
Output("[coordinator] Parallel execution: independent tasks will run concurrently")
|
||||
|
||||
// Activate all tasks with no dependencies
|
||||
const independentTasks = allTasks.filter(t => t.dependencies.length === 0)
|
||||
for (const task of independentTasks) {
|
||||
TaskUpdate(task.task_id, { status: "active" })
|
||||
Output(`[coordinator] Activated parallel task: ${task.task_id}`)
|
||||
}
|
||||
|
||||
// As tasks complete, activate all tasks whose dependencies are met
|
||||
// (Handled in coordination loop)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task Metadata Reference
|
||||
|
||||
```javascript
|
||||
const TASK_METADATA = {
|
||||
// Spec tasks
|
||||
"req-analysis": { phase: "spec", deps: [], description: "Analyze requirements" },
|
||||
"arch-design": { phase: "spec", deps: ["req-analysis"], description: "Design architecture" },
|
||||
"api-design": { phase: "spec", deps: ["arch-design"], description: "Design API contracts" },
|
||||
"data-model": { phase: "spec", deps: ["arch-design"], description: "Design data models" },
|
||||
"ui-spec": { phase: "spec", deps: ["arch-design"], description: "Design UI specifications" },
|
||||
"test-strategy": { phase: "spec", deps: ["api-design", "data-model"], description: "Define test strategy" },
|
||||
"error-handling": { phase: "spec", deps: ["api-design"], description: "Design error handling" },
|
||||
"security-review": { phase: "spec", deps: ["api-design", "data-model"], description: "Security review" },
|
||||
"perf-requirements": { phase: "spec", deps: ["arch-design"], description: "Performance requirements" },
|
||||
"doc-outline": { phase: "spec", deps: ["api-design"], description: "Documentation outline" },
|
||||
"review-spec": { phase: "spec", deps: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"], description: "Review specifications" },
|
||||
"finalize-spec": { phase: "spec", deps: ["review-spec"], description: "Finalize specifications" },
|
||||
|
||||
// Impl tasks
|
||||
"setup-scaffold": { phase: "impl", deps: ["finalize-spec"], description: "Setup project scaffold" },
|
||||
"core-impl": { phase: "impl", deps: ["setup-scaffold"], description: "Core implementation" },
|
||||
"integration": { phase: "impl", deps: ["core-impl"], description: "Integration work" },
|
||||
"finalize-impl": { phase: "impl", deps: ["integration"], description: "Finalize implementation" }
|
||||
// Tasks with all deps met can run in parallel
|
||||
// e.g., TEST-001 and REVIEW-001 both depend on IMPL-001 → run together
|
||||
// e.g., IMPL-001 and DEV-FE-001 both depend on PLAN-001 → run together
|
||||
}
|
||||
```
|
||||
|
||||
@@ -523,8 +519,5 @@ All outputs from this command use the `[coordinator]` tag:
|
||||
|
||||
```
|
||||
[coordinator] Creating spec-only task chain (12 tasks)
|
||||
[coordinator] Task created: req-analysis
|
||||
[coordinator] Task created: arch-design
|
||||
...
|
||||
[coordinator] Starting with: req-analysis
|
||||
[coordinator] Starting with: RESEARCH-001 (analyst)
|
||||
```
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,7 +30,7 @@
|
||||
| `task_complete` | Worker | Task finished | Update session, check dependencies, kick next task |
|
||||
| `task_blocked` | Worker | Dependency missing | Log block reason, wait for predecessor |
|
||||
| `discussion_needed` | Worker | Ambiguity found | Route to user via AskUserQuestion |
|
||||
| `research_complete` | Researcher | Research done | Checkpoint with user before impl |
|
||||
| `research_ready` | analyst | Research done | Checkpoint with user before impl |
|
||||
|
||||
## Toolbox
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
|
||||
### Subagent Capabilities
|
||||
- `TeamCreate` - Initialize team with session metadata
|
||||
- `TeamSpawn` - Spawn worker subagents (spec-writer, implementer, researcher)
|
||||
- `TeamSpawn` - Spawn worker subagents (analyst, writer, discussant, planner, executor, tester, reviewer, etc.)
|
||||
- `TaskCreate` - Create tasks with dependencies
|
||||
- `TaskUpdate` - Update task status/metadata
|
||||
- `TaskGet` - Retrieve task details
|
||||
@@ -55,6 +55,66 @@
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Entry Router: Command Detection
|
||||
|
||||
**Purpose**: Detect invocation type and route to appropriate handler. Coordinator 有三种唤醒源:worker 回调、用户命令、初始调用。
|
||||
|
||||
```javascript
|
||||
const args = $ARGUMENTS
|
||||
|
||||
// ─── 1. Worker callback detection ───
|
||||
// Worker 完成后 SendMessage 到 coordinator,消息含 [role] 标识
|
||||
const callbackMatch = args.match(/\[(\w[\w-]*)\]/)
|
||||
const WORKER_ROLES = ['analyst','writer','discussant','planner','executor','tester','reviewer','explorer','architect','fe-developer','fe-qa']
|
||||
const isCallback = callbackMatch && WORKER_ROLES.includes(callbackMatch[1])
|
||||
|
||||
// ─── 2. User command detection ───
|
||||
const isCheck = /\b(check|status|--check)\b/i.test(args)
|
||||
const isResume = /\b(resume|continue|next|--resume|--continue)\b/i.test(args)
|
||||
|
||||
// ─── 3. Route ───
|
||||
if (isCallback || isCheck || isResume) {
|
||||
// Need active session
|
||||
const sessionFile = findActiveSession()
|
||||
if (!sessionFile) {
|
||||
Output("[coordinator] No active session found. Start a new session by providing a task description.")
|
||||
return
|
||||
}
|
||||
|
||||
// Load monitor command and execute
|
||||
Read("commands/monitor.md")
|
||||
|
||||
if (isCallback) {
|
||||
// Worker 回调 → 自动推进
|
||||
handleCallback(callbackMatch[1], args)
|
||||
} else if (isCheck) {
|
||||
// 状态报告 → 输出执行状态图
|
||||
handleCheck()
|
||||
} else if (isResume) {
|
||||
// 手动推进 → 检查成员状态并推进
|
||||
handleResume()
|
||||
}
|
||||
|
||||
// STOP — 所有命令执行完后立即停止
|
||||
return
|
||||
}
|
||||
|
||||
// ─── 4. Normal invoke → check for session resume or new session ───
|
||||
goto Phase0
|
||||
|
||||
// Helper: find active session file
|
||||
function findActiveSession() {
|
||||
const sessionFiles = Glob("D:/Claude_dms3/.workflow/.sessions/team-lifecycle-*.json")
|
||||
for (const f of sessionFiles) {
|
||||
const s = Read(f)
|
||||
if (s.status === "active") return f
|
||||
}
|
||||
return null
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 0: Session Resume Check
|
||||
|
||||
**Purpose**: Detect and resume interrupted sessions
|
||||
@@ -101,38 +161,44 @@ if (sessionFiles.length > 1) {
|
||||
SessionReconciliation: {
|
||||
Output("[coordinator] Reconciling session state...")
|
||||
|
||||
// Pipeline constants
|
||||
// Pipeline constants (aligned with SKILL.md Three-Mode Pipeline)
|
||||
const SPEC_CHAIN = [
|
||||
"req-analysis", "arch-design", "api-design", "data-model",
|
||||
"ui-spec", "test-strategy", "error-handling", "security-review",
|
||||
"perf-requirements", "doc-outline", "review-spec", "finalize-spec"
|
||||
"RESEARCH-001", "DISCUSS-001", "DRAFT-001", "DISCUSS-002",
|
||||
"DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004",
|
||||
"DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006"
|
||||
]
|
||||
|
||||
const IMPL_CHAIN = [
|
||||
"setup-scaffold", "core-impl", "integration", "finalize-impl"
|
||||
]
|
||||
const IMPL_CHAIN = ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"]
|
||||
|
||||
// Task metadata with dependencies
|
||||
const FE_CHAIN = ["DEV-FE-001", "QA-FE-001"]
|
||||
|
||||
const FULLSTACK_CHAIN = ["PLAN-001", "IMPL-001", "DEV-FE-001", "TEST-001", "QA-FE-001", "REVIEW-001"]
|
||||
|
||||
// Task metadata — role must match VALID_ROLES in SKILL.md
|
||||
const TASK_METADATA = {
|
||||
// Spec tasks
|
||||
"req-analysis": { phase: "spec", deps: [], description: "Analyze requirements" },
|
||||
"arch-design": { phase: "spec", deps: ["req-analysis"], description: "Design architecture" },
|
||||
"api-design": { phase: "spec", deps: ["arch-design"], description: "Design API contracts" },
|
||||
"data-model": { phase: "spec", deps: ["arch-design"], description: "Design data models" },
|
||||
"ui-spec": { phase: "spec", deps: ["arch-design"], description: "Design UI specifications" },
|
||||
"test-strategy": { phase: "spec", deps: ["api-design", "data-model"], description: "Define test strategy" },
|
||||
"error-handling": { phase: "spec", deps: ["api-design"], description: "Design error handling" },
|
||||
"security-review": { phase: "spec", deps: ["api-design", "data-model"], description: "Security review" },
|
||||
"perf-requirements": { phase: "spec", deps: ["arch-design"], description: "Performance requirements" },
|
||||
"doc-outline": { phase: "spec", deps: ["api-design"], description: "Documentation outline" },
|
||||
"review-spec": { phase: "spec", deps: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"], description: "Review specifications" },
|
||||
"finalize-spec": { phase: "spec", deps: ["review-spec"], description: "Finalize specifications" },
|
||||
// Spec pipeline (12 tasks)
|
||||
"RESEARCH-001": { role: "analyst", phase: "spec", deps: [], description: "Seed analysis: codebase exploration and context gathering" },
|
||||
"DISCUSS-001": { role: "discussant", phase: "spec", deps: ["RESEARCH-001"], description: "Critique research findings" },
|
||||
"DRAFT-001": { role: "writer", phase: "spec", deps: ["DISCUSS-001"], description: "Generate Product Brief" },
|
||||
"DISCUSS-002": { role: "discussant", phase: "spec", deps: ["DRAFT-001"], description: "Critique Product Brief" },
|
||||
"DRAFT-002": { role: "writer", phase: "spec", deps: ["DISCUSS-002"], description: "Generate Requirements/PRD" },
|
||||
"DISCUSS-003": { role: "discussant", phase: "spec", deps: ["DRAFT-002"], description: "Critique Requirements/PRD" },
|
||||
"DRAFT-003": { role: "writer", phase: "spec", deps: ["DISCUSS-003"], description: "Generate Architecture Document" },
|
||||
"DISCUSS-004": { role: "discussant", phase: "spec", deps: ["DRAFT-003"], description: "Critique Architecture Document" },
|
||||
"DRAFT-004": { role: "writer", phase: "spec", deps: ["DISCUSS-004"], description: "Generate Epics" },
|
||||
"DISCUSS-005": { role: "discussant", phase: "spec", deps: ["DRAFT-004"], description: "Critique Epics" },
|
||||
"QUALITY-001": { role: "reviewer", phase: "spec", deps: ["DISCUSS-005"], description: "5-dimension spec quality validation" },
|
||||
"DISCUSS-006": { role: "discussant", phase: "spec", deps: ["QUALITY-001"], description: "Final review discussion and sign-off" },
|
||||
|
||||
// Impl tasks
|
||||
"setup-scaffold": { phase: "impl", deps: ["finalize-spec"], description: "Setup project scaffold" },
|
||||
"core-impl": { phase: "impl", deps: ["setup-scaffold"], description: "Core implementation" },
|
||||
"integration": { phase: "impl", deps: ["core-impl"], description: "Integration work" },
|
||||
"finalize-impl": { phase: "impl", deps: ["integration"], description: "Finalize implementation" }
|
||||
// Impl pipeline (deps shown for impl-only; full-lifecycle adds PLAN-001 → ["DISCUSS-006"])
|
||||
"PLAN-001": { role: "planner", phase: "impl", deps: [], description: "Multi-angle codebase exploration and structured planning" },
|
||||
"IMPL-001": { role: "executor", phase: "impl", deps: ["PLAN-001"], description: "Code implementation following plan" },
|
||||
"TEST-001": { role: "tester", phase: "impl", deps: ["IMPL-001"], description: "Adaptive test-fix cycles and quality gates" },
|
||||
"REVIEW-001": { role: "reviewer", phase: "impl", deps: ["IMPL-001"], description: "4-dimension code review" },
|
||||
|
||||
// Frontend pipeline tasks
|
||||
"DEV-FE-001": { role: "fe-developer", phase: "impl", deps: ["PLAN-001"], description: "Frontend component/page implementation" },
|
||||
"QA-FE-001": { role: "fe-qa", phase: "impl", deps: ["DEV-FE-001"], description: "5-dimension frontend QA" }
|
||||
}
|
||||
|
||||
// Helper: Get predecessor task
|
||||
@@ -164,12 +230,16 @@ SessionReconciliation: {
|
||||
Output(` Pending: ${pendingTasks.length}`)
|
||||
|
||||
// Step 3: Determine remaining work
|
||||
const expectedChain = session.mode === "spec-only" ? SPEC_CHAIN :
|
||||
session.mode === "impl-only" ? IMPL_CHAIN :
|
||||
[...SPEC_CHAIN, ...IMPL_CHAIN]
|
||||
const expectedChain =
|
||||
session.mode === "spec-only" ? SPEC_CHAIN :
|
||||
session.mode === "impl-only" ? IMPL_CHAIN :
|
||||
session.mode === "fe-only" ? ["PLAN-001", ...FE_CHAIN] :
|
||||
session.mode === "fullstack" ? FULLSTACK_CHAIN :
|
||||
session.mode === "full-lifecycle-fe" ? [...SPEC_CHAIN, ...FULLSTACK_CHAIN] :
|
||||
[...SPEC_CHAIN, ...IMPL_CHAIN] // full-lifecycle default
|
||||
|
||||
const remainingTaskIds = expectedChain.filter(id =>
|
||||
!completedTasks.some(t => t.task_id === id)
|
||||
!completedTasks.some(t => t.subject === id)
|
||||
)
|
||||
|
||||
Output(`[coordinator] Remaining tasks: ${remainingTaskIds.join(", ")}`)
|
||||
@@ -186,32 +256,31 @@ SessionReconciliation: {
|
||||
|
||||
// Step 5: Create missing tasks
|
||||
for (const taskId of remainingTaskIds) {
|
||||
const existingTask = allTasks.find(t => t.task_id === taskId)
|
||||
const existingTask = allTasks.find(t => t.subject === taskId)
|
||||
if (!existingTask) {
|
||||
const metadata = TASK_METADATA[taskId]
|
||||
TaskCreate({
|
||||
team_id: session.team_id,
|
||||
task_id: taskId,
|
||||
phase: metadata.phase,
|
||||
description: metadata.description,
|
||||
dependencies: metadata.deps,
|
||||
subject: taskId,
|
||||
owner: metadata.role,
|
||||
description: `${metadata.description}\nSession: ${sessionFolder}`,
|
||||
blockedBy: metadata.deps,
|
||||
status: "pending"
|
||||
})
|
||||
Output(`[coordinator] Created missing task: ${taskId}`)
|
||||
Output(`[coordinator] Created missing task: ${taskId} (${metadata.role})`)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 6: Verify dependencies
|
||||
for (const taskId of remainingTaskIds) {
|
||||
const task = TaskGet(taskId)
|
||||
const task = allTasks.find(t => t.subject === taskId)
|
||||
if (!task) continue
|
||||
const metadata = TASK_METADATA[taskId]
|
||||
const allDepsMet = metadata.deps.every(depId =>
|
||||
completedTasks.some(t => t.task_id === depId)
|
||||
completedTasks.some(t => t.subject === depId)
|
||||
)
|
||||
|
||||
if (allDepsMet && task.status === "blocked") {
|
||||
TaskUpdate(taskId, { status: "pending" })
|
||||
Output(`[coordinator] Unblocked task: ${taskId}`)
|
||||
if (allDepsMet && task.status !== "completed") {
|
||||
Output(`[coordinator] Unblocked task: ${taskId} (${metadata.role})`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,7 +300,7 @@ SessionReconciliation: {
|
||||
const nextTask = TaskGet(nextTaskId)
|
||||
const metadata = TASK_METADATA[nextTaskId]
|
||||
|
||||
if (metadata.deps.every(depId => completedTasks.some(t => t.task_id === depId))) {
|
||||
if (metadata.deps.every(depId => completedTasks.some(t => t.subject === depId))) {
|
||||
TaskUpdate(nextTaskId, { status: "active" })
|
||||
Output(`[coordinator] Kicking task: ${nextTaskId}`)
|
||||
goto Phase4_CoordinationLoop
|
||||
@@ -422,7 +491,10 @@ const sessionData = {
|
||||
status: "active",
|
||||
created_at: new Date().toISOString(),
|
||||
tasks_total: requirements.mode === "spec-only" ? 12 :
|
||||
requirements.mode === "impl-only" ? 4 : 16,
|
||||
requirements.mode === "impl-only" ? 4 :
|
||||
requirements.mode === "fe-only" ? 3 :
|
||||
requirements.mode === "fullstack" ? 6 :
|
||||
requirements.mode === "full-lifecycle-fe" ? 18 : 16,
|
||||
tasks_completed: 0,
|
||||
current_phase: requirements.mode === "impl-only" ? "impl" : "spec"
|
||||
}
|
||||
@@ -431,16 +503,18 @@ Write(sessionFile, sessionData)
|
||||
Output(`[coordinator] Session file created: ${sessionFile}`)
|
||||
|
||||
// ⚠️ Workers are NOT pre-spawned here.
|
||||
// Workers are spawned per-stage in Phase 4 via Stop-Wait Task(run_in_background: false).
|
||||
// Workers are spawned on-demand in Phase 4 via Task(run_in_background: true).
|
||||
// Coordinator spawns → STOPS → worker 回调或用户 check/resume 唤醒 coordinator.
|
||||
// See SKILL.md Coordinator Spawn Template for worker prompt templates.
|
||||
//
|
||||
// Worker roles by mode (spawned on-demand):
|
||||
// spec-only: spec-writer
|
||||
// impl-only: implementer
|
||||
// fe-only: fe-developer, fe-qa
|
||||
// fullstack: implementer, fe-developer, fe-qa
|
||||
// full-lifecycle / full-lifecycle-fe: spec-writer + relevant impl roles
|
||||
// Always available: researcher (for ambiguity resolution)
|
||||
// Worker roles by mode (spawned on-demand, must match VALID_ROLES in SKILL.md):
|
||||
// spec-only: analyst, discussant, writer, reviewer
|
||||
// impl-only: planner, executor, tester, reviewer
|
||||
// fe-only: planner, fe-developer, fe-qa
|
||||
// fullstack: planner, executor, fe-developer, tester, fe-qa, reviewer
|
||||
// full-lifecycle: analyst, discussant, writer, reviewer, planner, executor, tester
|
||||
// full-lifecycle-fe: all of the above + fe-developer, fe-qa
|
||||
// On-demand (ambiguity): analyst or explorer
|
||||
|
||||
goto Phase3
|
||||
```
|
||||
@@ -465,27 +539,43 @@ goto Phase4
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Coordination Loop
|
||||
### Phase 4: Spawn-and-Stop
|
||||
|
||||
**Purpose**: Monitor task progress and route messages
|
||||
**Purpose**: Spawn first batch of ready workers, then STOP. 后续推进由 worker 回调或用户命令驱动。
|
||||
|
||||
> **设计原则(Stop-Wait)**: 模型执行没有时间概念,禁止任何形式的轮询等待。
|
||||
> - ❌ 禁止: `while` 循环 + `sleep` + 检查状态
|
||||
> - ✅ 采用: 同步 `Task(run_in_background: false)` 调用,Worker 返回 = 阶段完成信号
|
||||
> **设计原则(Spawn-and-Stop + Callback)**:
|
||||
> - ❌ 禁止: 阻塞循环 `Task(run_in_background: false)` 串行等待所有 worker
|
||||
> - ❌ 禁止: `while` + `sleep` + 轮询
|
||||
> - ✅ 采用: `Task(run_in_background: true)` 后台 spawn,立即返回
|
||||
> - ✅ 采用: Worker SendMessage 回调自动唤醒 coordinator
|
||||
> - ✅ 采用: 用户 `check` / `resume` 命令辅助推进
|
||||
>
|
||||
> 按 Phase 3 创建的任务链顺序,逐阶段 spawn worker 同步执行。
|
||||
> Worker prompt 使用 SKILL.md Coordinator Spawn Template。
|
||||
> Coordinator 每次只做一步操作,然后 STOP 交还控制权。
|
||||
> 流水线通过三种唤醒源推进:worker 回调(自动)、用户 resume(手动)、用户 check(状态)。
|
||||
|
||||
```javascript
|
||||
Output("[coordinator] Phase 4: Coordination Loop")
|
||||
Output("[coordinator] Phase 4: Spawning first batch...")
|
||||
|
||||
// Delegate to command file
|
||||
const monitorStrategy = Read("commands/monitor.md")
|
||||
// Load monitor command logic
|
||||
Read("commands/monitor.md")
|
||||
|
||||
// Execute strategy defined in command file
|
||||
// (monitor.md contains the complete message routing and checkpoint logic)
|
||||
// Spawn first batch of ready tasks → STOP
|
||||
const result = handleSpawnNext()
|
||||
|
||||
goto Phase5
|
||||
if (result === "PIPELINE_COMPLETE") {
|
||||
goto Phase5
|
||||
}
|
||||
|
||||
// STOP — coordinator 完成输出,控制权交还给用户
|
||||
// 后续推进方式:
|
||||
// 1. Worker 完成 → SendMessage 回调 → Entry Router → handleCallback → 自动推进
|
||||
// 2. User 输入 "check" → Entry Router → handleCheck → 状态报告
|
||||
// 3. User 输入 "resume" → Entry Router → handleResume → 手动推进
|
||||
Output("")
|
||||
Output("[coordinator] Coordinator paused. Pipeline will advance via:")
|
||||
Output(" • Worker callbacks (automatic)")
|
||||
Output(" • 'check' — view execution status graph")
|
||||
Output(" • 'resume' — manually advance pipeline")
|
||||
```
|
||||
|
||||
---
|
||||
@@ -514,7 +604,7 @@ Output(`[coordinator] Duration: ${calculateDuration(session.created_at, new Date
|
||||
const completedTasks = teamState.tasks.filter(t => t.status === "completed")
|
||||
Output("[coordinator] Deliverables:")
|
||||
for (const task of completedTasks) {
|
||||
Output(` ✓ ${task.task_id}: ${task.description}`)
|
||||
Output(` ✓ ${task.subject}: ${task.description}`)
|
||||
if (task.output_file) {
|
||||
Output(` Output: ${task.output_file}`)
|
||||
}
|
||||
@@ -547,12 +637,12 @@ switch (nextAction) {
|
||||
case "review":
|
||||
const taskToReview = AskUserQuestion({
|
||||
question: "Which task output to review?",
|
||||
choices: completedTasks.map(t => t.task_id)
|
||||
choices: completedTasks.map(t => t.subject)
|
||||
})
|
||||
const reviewTask = completedTasks.find(t => t.task_id === taskToReview)
|
||||
const reviewTask = completedTasks.find(t => t.subject === taskToReview)
|
||||
if (reviewTask.output_file) {
|
||||
const content = Read(reviewTask.output_file)
|
||||
Output(`[coordinator] Task: ${reviewTask.task_id}`)
|
||||
Output(`[coordinator] Task: ${reviewTask.subject}`)
|
||||
Output(content)
|
||||
}
|
||||
goto Phase5 // Loop back for more actions
|
||||
@@ -569,8 +659,8 @@ switch (nextAction) {
|
||||
|
||||
case "handoff-lite-plan":
|
||||
Output("[coordinator] Generating lite-plan from specifications...")
|
||||
// Read finalize-spec output
|
||||
const specOutput = Read(getTaskOutput("finalize-spec"))
|
||||
// Read spec completion output (DISCUSS-006 = final sign-off)
|
||||
const specOutput = Read(getTaskOutput("DISCUSS-006"))
|
||||
// Create lite-plan format
|
||||
const litePlan = generateLitePlan(specOutput)
|
||||
const litePlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-lite-plan.md`
|
||||
@@ -580,7 +670,7 @@ switch (nextAction) {
|
||||
|
||||
case "handoff-full-plan":
|
||||
Output("[coordinator] Generating full-plan from specifications...")
|
||||
const fullSpecOutput = Read(getTaskOutput("finalize-spec"))
|
||||
const fullSpecOutput = Read(getTaskOutput("DISCUSS-006"))
|
||||
const fullPlan = generateFullPlan(fullSpecOutput)
|
||||
const fullPlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-full-plan.md`
|
||||
Write(fullPlanFile, fullPlan)
|
||||
@@ -589,7 +679,7 @@ switch (nextAction) {
|
||||
|
||||
case "handoff-req-plan":
|
||||
Output("[coordinator] Generating req-plan from requirements...")
|
||||
const reqAnalysis = Read(getTaskOutput("req-analysis"))
|
||||
const reqAnalysis = Read(getTaskOutput("RESEARCH-001"))
|
||||
const reqPlan = generateReqPlan(reqAnalysis)
|
||||
const reqPlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-req-plan.md`
|
||||
Write(reqPlanFile, reqPlan)
|
||||
@@ -598,7 +688,7 @@ switch (nextAction) {
|
||||
|
||||
case "handoff-create-issues":
|
||||
Output("[coordinator] Generating GitHub issues...")
|
||||
const issuesSpec = Read(getTaskOutput("finalize-spec"))
|
||||
const issuesSpec = Read(getTaskOutput("DISCUSS-006"))
|
||||
const issues = generateGitHubIssues(issuesSpec)
|
||||
const issuesFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-issues.json`
|
||||
Write(issuesFile, issues)
|
||||
@@ -665,7 +755,14 @@ function generateGitHubIssues(specOutput) {
|
||||
"resumed_at": null,
|
||||
"tasks_total": 16,
|
||||
"tasks_completed": 5,
|
||||
"current_phase": "spec"
|
||||
"current_phase": "spec",
|
||||
"active_workers": [
|
||||
{
|
||||
"task_subject": "DISCUSS-003",
|
||||
"role": "discussant",
|
||||
"spawned_at": "2026-02-18T10:15:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -235,6 +235,7 @@ TeamCreate({ team_name: teamName })
|
||||
// Analyst (spec-only / full)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn analyst worker`,
|
||||
team_name: teamName,
|
||||
name: "analyst",
|
||||
prompt: `你是 team "${teamName}" 的 ANALYST。
|
||||
@@ -256,6 +257,7 @@ Session: ${sessionFolder}
|
||||
// Writer (spec-only / full)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn writer worker`,
|
||||
team_name: teamName,
|
||||
name: "writer",
|
||||
prompt: `你是 team "${teamName}" 的 WRITER。
|
||||
@@ -276,6 +278,7 @@ Session: ${sessionFolder}
|
||||
// Discussant (spec-only / full)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn discussant worker`,
|
||||
team_name: teamName,
|
||||
name: "discussant",
|
||||
prompt: `你是 team "${teamName}" 的 DISCUSSANT。
|
||||
@@ -297,6 +300,7 @@ Session: ${sessionFolder}
|
||||
// Planner (impl-only / full)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn planner worker`,
|
||||
team_name: teamName,
|
||||
name: "planner",
|
||||
prompt: `你是 team "${teamName}" 的 PLANNER。
|
||||
@@ -318,6 +322,7 @@ Session: ${sessionFolder}
|
||||
// Executor (impl-only / full)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn executor worker`,
|
||||
team_name: teamName,
|
||||
name: "executor",
|
||||
prompt: `你是 team "${teamName}" 的 EXECUTOR。
|
||||
@@ -338,6 +343,7 @@ Task({
|
||||
// Tester (impl-only / full)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn tester worker`,
|
||||
team_name: teamName,
|
||||
name: "tester",
|
||||
prompt: `你是 team "${teamName}" 的 TESTER。
|
||||
@@ -358,6 +364,7 @@ Task({
|
||||
// Reviewer (all modes)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn reviewer worker`,
|
||||
team_name: teamName,
|
||||
name: "reviewer",
|
||||
prompt: `你是 team "${teamName}" 的 REVIEWER。
|
||||
|
||||
@@ -294,6 +294,7 @@ TaskCreate({
|
||||
// 5. Spawn planner agent
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn planner worker`,
|
||||
team_name: teamName,
|
||||
name: "planner",
|
||||
prompt: `你是 team "${teamName}" 的 PLANNER。
|
||||
@@ -331,6 +332,7 @@ EXEC-* 任务 description 必须包含 solution_file 字段指向该文件
|
||||
// 6. Spawn executor agent
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn executor worker`,
|
||||
team_name: teamName,
|
||||
name: "executor",
|
||||
prompt: `你是 team "${teamName}" 的 EXECUTOR。
|
||||
|
||||
@@ -285,6 +285,7 @@ TeamCreate({ team_name: teamName })
|
||||
// Scout
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn scout worker`,
|
||||
team_name: teamName,
|
||||
name: "scout",
|
||||
prompt: `你是 team "${teamName}" 的 SCOUT。
|
||||
@@ -313,6 +314,7 @@ Task({
|
||||
// Strategist
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn strategist worker`,
|
||||
team_name: teamName,
|
||||
name: "strategist",
|
||||
prompt: `你是 team "${teamName}" 的 STRATEGIST。
|
||||
@@ -345,6 +347,7 @@ if (isFullMode) {
|
||||
const agentName = `generator-${i}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${agentName} worker`,
|
||||
team_name: teamName,
|
||||
name: agentName,
|
||||
prompt: `你是 team "${teamName}" 的 GENERATOR (${agentName})。
|
||||
@@ -371,6 +374,7 @@ if (isFullMode) {
|
||||
} else {
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn generator worker`,
|
||||
team_name: teamName,
|
||||
name: "generator",
|
||||
prompt: `你是 team "${teamName}" 的 GENERATOR。
|
||||
@@ -400,6 +404,7 @@ if (isFullMode) {
|
||||
const agentName = `executor-${i}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${agentName} worker`,
|
||||
team_name: teamName,
|
||||
name: agentName,
|
||||
prompt: `你是 team "${teamName}" 的 EXECUTOR (${agentName})。
|
||||
@@ -426,6 +431,7 @@ if (isFullMode) {
|
||||
} else {
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn executor worker`,
|
||||
team_name: teamName,
|
||||
name: "executor",
|
||||
prompt: `你是 team "${teamName}" 的 EXECUTOR。
|
||||
@@ -452,6 +458,7 @@ if (isFullMode) {
|
||||
// Analyst
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn analyst worker`,
|
||||
team_name: teamName,
|
||||
name: "analyst",
|
||||
prompt: `你是 team "${teamName}" 的 ANALYST。
|
||||
|
||||
@@ -125,6 +125,9 @@ for (const stageTask of pipelineTasks) {
|
||||
// 3. 同步 spawn worker — 阻塞直到 worker 返回(Stop-Wait 核心)
|
||||
const workerResult = Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${workerConfig.role} worker for ${stageTask.subject}`,
|
||||
team_name: teamName,
|
||||
name: workerConfig.role,
|
||||
prompt: `你是 team "${teamName}" 的 ${workerConfig.role.toUpperCase()}。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
|
||||
@@ -206,6 +206,7 @@ TeamCreate({ team_name: "roadmap-dev" })
|
||||
// Planner
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn roadmap-dev worker`,
|
||||
team_name: "roadmap-dev",
|
||||
name: "planner",
|
||||
prompt: `你是 team "roadmap-dev" 的 PLANNER。
|
||||
@@ -230,6 +231,7 @@ Session: ${sessionFolder}
|
||||
// Executor
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn roadmap-dev worker`,
|
||||
team_name: "roadmap-dev",
|
||||
name: "executor",
|
||||
prompt: `你是 team "roadmap-dev" 的 EXECUTOR。
|
||||
@@ -254,6 +256,7 @@ Session: ${sessionFolder}
|
||||
// Verifier
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn roadmap-dev worker`,
|
||||
team_name: "roadmap-dev",
|
||||
name: "verifier",
|
||||
prompt: `你是 team "roadmap-dev" 的 VERIFIER。
|
||||
|
||||
@@ -172,6 +172,7 @@ function spawnPlanner(phase, gapIteration, sessionFolder) {
|
||||
// Synchronous call - blocks until planner returns
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn planner worker for phase ${phase}`,
|
||||
team_name: "roadmap-dev",
|
||||
name: "planner",
|
||||
prompt: `You are the PLANNER for team "roadmap-dev".
|
||||
@@ -203,6 +204,7 @@ function spawnExecutor(phase, gapIteration, sessionFolder) {
|
||||
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn executor worker for phase ${phase}`,
|
||||
team_name: "roadmap-dev",
|
||||
name: "executor",
|
||||
prompt: `You are the EXECUTOR for team "roadmap-dev".
|
||||
@@ -234,6 +236,7 @@ function spawnVerifier(phase, gapIteration, sessionFolder) {
|
||||
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn verifier worker for phase ${phase}`,
|
||||
team_name: "roadmap-dev",
|
||||
name: "verifier",
|
||||
prompt: `You are the VERIFIER for team "roadmap-dev".
|
||||
|
||||
@@ -303,6 +303,7 @@ TeamCreate({ team_name: teamName })
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn scanner worker`,
|
||||
prompt: `你是 team "${teamName}" 的 SCANNER。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
@@ -335,6 +336,7 @@ Skill(skill="team-tech-debt", args="--role=scanner")
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn assessor worker`,
|
||||
prompt: `你是 team "${teamName}" 的 ASSESSOR。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
@@ -364,6 +366,7 @@ Skill(skill="team-tech-debt", args="--role=assessor")
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn planner worker`,
|
||||
prompt: `你是 team "${teamName}" 的 PLANNER。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
@@ -393,6 +396,7 @@ Skill(skill="team-tech-debt", args="--role=planner")
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn executor worker`,
|
||||
prompt: `你是 team "${teamName}" 的 EXECUTOR。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
@@ -422,6 +426,7 @@ Skill(skill="team-tech-debt", args="--role=executor")
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn validator worker`,
|
||||
prompt: `你是 team "${teamName}" 的 VALIDATOR。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
|
||||
@@ -96,6 +96,9 @@ for (const stageTask of pipelineTasks) {
|
||||
// Task() 本身就是等待机制,无需 sleep/poll
|
||||
const workerResult = Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${workerConfig.role} worker for ${stageTask.subject}`,
|
||||
team_name: teamName,
|
||||
name: workerConfig.role,
|
||||
prompt: buildWorkerPrompt(stageTask, workerConfig, sessionFolder, taskDescription),
|
||||
run_in_background: false // ← 同步阻塞 = 天然回调
|
||||
})
|
||||
@@ -290,6 +293,9 @@ function handleStageFailure(stageTask, taskState, workerConfig, autoYes) {
|
||||
TaskUpdate({ taskId: stageTask.id, status: 'in_progress' })
|
||||
const retryResult = Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Retry ${workerConfig.role} worker for ${stageTask.subject}`,
|
||||
team_name: teamName,
|
||||
name: workerConfig.role,
|
||||
prompt: buildWorkerPrompt(stageTask, workerConfig, sessionFolder, taskDescription),
|
||||
run_in_background: false
|
||||
})
|
||||
|
||||
@@ -226,6 +226,7 @@ TeamCreate({ team_name: teamName })
|
||||
// Strategist
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn strategist worker`,
|
||||
team_name: teamName,
|
||||
name: "strategist",
|
||||
prompt: `你是 team "${teamName}" 的 STRATEGIST。
|
||||
@@ -251,6 +252,7 @@ Task({
|
||||
// Generator
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn generator worker`,
|
||||
team_name: teamName,
|
||||
name: "generator",
|
||||
prompt: `你是 team "${teamName}" 的 GENERATOR。
|
||||
@@ -274,6 +276,7 @@ Task({
|
||||
// Executor
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn executor worker`,
|
||||
team_name: teamName,
|
||||
name: "executor",
|
||||
prompt: `你是 team "${teamName}" 的 EXECUTOR。
|
||||
@@ -297,6 +300,7 @@ Task({
|
||||
// Analyst
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn analyst worker`,
|
||||
team_name: teamName,
|
||||
name: "analyst",
|
||||
prompt: `你是 team "${teamName}" 的 ANALYST。
|
||||
|
||||
@@ -294,6 +294,7 @@ TeamCreate({ team_name: teamName })
|
||||
// Researcher
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn researcher worker`,
|
||||
team_name: teamName,
|
||||
name: "researcher",
|
||||
prompt: `你是 team "${teamName}" 的 RESEARCHER。
|
||||
@@ -320,6 +321,7 @@ Session: ${sessionFolder}
|
||||
// Designer
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn designer worker`,
|
||||
team_name: teamName,
|
||||
name: "designer",
|
||||
prompt: `你是 team "${teamName}" 的 DESIGNER。
|
||||
@@ -345,6 +347,7 @@ Session: ${sessionFolder}
|
||||
// Reviewer (AUDIT)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn reviewer worker`,
|
||||
team_name: teamName,
|
||||
name: "reviewer",
|
||||
prompt: `你是 team "${teamName}" 的 REVIEWER (审查员)。
|
||||
@@ -370,6 +373,7 @@ Session: ${sessionFolder}
|
||||
// Implementer (BUILD)
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn implementer worker`,
|
||||
team_name: teamName,
|
||||
name: "implementer",
|
||||
prompt: `你是 team "${teamName}" 的 IMPLEMENTER (实现者)。
|
||||
|
||||
@@ -286,6 +286,7 @@ if (isParallel) {
|
||||
const agentName = `explorer-${i + 1}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${agentName} worker`,
|
||||
team_name: teamName,
|
||||
name: agentName,
|
||||
prompt: `你是 team "${teamName}" 的 EXPLORER (${agentName})。
|
||||
@@ -316,6 +317,7 @@ if (isParallel) {
|
||||
// Single explorer for quick mode
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn explorer worker`,
|
||||
team_name: teamName,
|
||||
name: "explorer",
|
||||
prompt: `你是 team "${teamName}" 的 EXPLORER。
|
||||
@@ -349,6 +351,7 @@ if (isParallel) {
|
||||
const agentName = `analyst-${i + 1}`
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${agentName} worker`,
|
||||
team_name: teamName,
|
||||
name: agentName,
|
||||
prompt: `你是 team "${teamName}" 的 ANALYST (${agentName})。
|
||||
@@ -377,6 +380,7 @@ if (isParallel) {
|
||||
} else {
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn analyst worker`,
|
||||
team_name: teamName,
|
||||
name: "analyst",
|
||||
prompt: `你是 team "${teamName}" 的 ANALYST。
|
||||
@@ -405,6 +409,7 @@ if (isParallel) {
|
||||
// ── Discussant (always single) ──
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn discussant worker`,
|
||||
team_name: teamName,
|
||||
name: "discussant",
|
||||
prompt: `你是 team "${teamName}" 的 DISCUSSANT。
|
||||
@@ -430,6 +435,7 @@ Task({
|
||||
// ── Synthesizer (always single) ──
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn synthesizer worker`,
|
||||
team_name: teamName,
|
||||
name: "synthesizer",
|
||||
prompt: `你是 team "${teamName}" 的 SYNTHESIZER。
|
||||
|
||||
@@ -110,6 +110,9 @@ for (const stageTask of preDiscussionTasks) {
|
||||
// 3. 同步 spawn worker — 阻塞直到 worker 返回(Stop-Wait 核心)
|
||||
const workerResult = Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn ${workerConfig.role} worker for ${stageTask.subject}`,
|
||||
team_name: teamName,
|
||||
name: workerConfig.role,
|
||||
prompt: `你是 team "${teamName}" 的 ${workerConfig.role.toUpperCase()}。
|
||||
|
||||
## ⚠️ 首要指令(MUST)
|
||||
@@ -204,11 +207,25 @@ if (MAX_DISCUSSION_ROUNDS === 0) {
|
||||
TaskUpdate({ taskId: discussTask.id, status: 'in_progress' })
|
||||
const discussResult = Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn discussant worker for ${discussTask.subject}`,
|
||||
team_name: teamName,
|
||||
name: "discussant",
|
||||
prompt: `你是 team "${teamName}" 的 DISCUSSANT。
|
||||
|
||||
## Primary Directive
|
||||
Skill(skill="team-ultra-analyze", args="--role=discussant")
|
||||
当前任务: ${discussTask.subject}
|
||||
Session: ${sessionFolder}
|
||||
TaskUpdate({ taskId: "${discussTask.id}", status: "completed" })`,
|
||||
|
||||
## Assignment
|
||||
- Task ID: ${discussTask.id}
|
||||
- Task: ${discussTask.subject}
|
||||
- Session: ${sessionFolder}
|
||||
|
||||
## Workflow
|
||||
1. Skill(skill="team-ultra-analyze", args="--role=discussant") to load role definition
|
||||
2. Execute task per role.md
|
||||
3. TaskUpdate({ taskId: "${discussTask.id}", status: "completed" })
|
||||
|
||||
All outputs carry [discussant] tag.`,
|
||||
run_in_background: false
|
||||
})
|
||||
}
|
||||
@@ -382,11 +399,25 @@ if (synthTask) {
|
||||
TaskUpdate({ taskId: synthTask.id, status: 'in_progress' })
|
||||
const synthResult = Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: `Spawn synthesizer worker for ${synthTask.subject}`,
|
||||
team_name: teamName,
|
||||
name: "synthesizer",
|
||||
prompt: `你是 team "${teamName}" 的 SYNTHESIZER。
|
||||
|
||||
## Primary Directive
|
||||
Skill(skill="team-ultra-analyze", args="--role=synthesizer")
|
||||
当前任务: ${synthTask.subject}
|
||||
Session: ${sessionFolder}
|
||||
TaskUpdate({ taskId: "${synthTask.id}", status: "completed" })`,
|
||||
|
||||
## Assignment
|
||||
- Task ID: ${synthTask.id}
|
||||
- Task: ${synthTask.subject}
|
||||
- Session: ${sessionFolder}
|
||||
|
||||
## Workflow
|
||||
1. Skill(skill="team-ultra-analyze", args="--role=synthesizer") to load role definition
|
||||
2. Execute task per role.md
|
||||
3. TaskUpdate({ taskId: "${synthTask.id}", status: "completed" })
|
||||
|
||||
All outputs carry [synthesizer] tag.`,
|
||||
run_in_background: false
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1643,7 +1643,45 @@ function getCCWHome(): string {
|
||||
}
|
||||
|
||||
/**
|
||||
* List all projects with their memory counts
|
||||
* Recursively find all core-memory databases in nested project structure
|
||||
* Handles both flat structure (projects/my-project/) and nested structure (projects/d-/my-project/)
|
||||
*/
|
||||
function findAllCoreMemoryDatabases(
|
||||
projectsDir: string,
|
||||
baseRelPath: string = ''
|
||||
): Array<{ projectId: string; dbPath: string }> {
|
||||
const results: Array<{ projectId: string; dbPath: string }> = [];
|
||||
const entries = readdirSync(projectsDir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
// Skip hidden directories
|
||||
if (entry.name.startsWith('.')) continue;
|
||||
|
||||
const currentPath = join(projectsDir, entry.name);
|
||||
const currentRelPath = baseRelPath ? join(baseRelPath, entry.name) : entry.name;
|
||||
|
||||
// Check if this directory has a core-memory database
|
||||
const coreMemoryDb = join(currentPath, 'core-memory', 'core_memory.db');
|
||||
if (existsSync(coreMemoryDb)) {
|
||||
// Found a project - use relative path as project ID
|
||||
results.push({
|
||||
projectId: currentRelPath,
|
||||
dbPath: coreMemoryDb
|
||||
});
|
||||
}
|
||||
|
||||
// Recurse into subdirectories to find nested projects
|
||||
const nestedResults = findAllCoreMemoryDatabases(currentPath, currentRelPath);
|
||||
results.push(...nestedResults);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* List all projects with their memory counts (supports nested project structure)
|
||||
*/
|
||||
export function listAllProjects(): ProjectInfo[] {
|
||||
const projectsDir = join(getCCWHome(), 'projects');
|
||||
@@ -1652,43 +1690,38 @@ export function listAllProjects(): ProjectInfo[] {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Find all core-memory databases recursively
|
||||
const allProjects = findAllCoreMemoryDatabases(projectsDir);
|
||||
|
||||
const projects: ProjectInfo[] = [];
|
||||
const entries = readdirSync(projectsDir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
const projectId = entry.name;
|
||||
const coreMemoryDb = join(projectsDir, projectId, 'core-memory', 'core_memory.db');
|
||||
|
||||
for (const { projectId, dbPath } of allProjects) {
|
||||
let memoriesCount = 0;
|
||||
let clustersCount = 0;
|
||||
let lastUpdated: string | undefined;
|
||||
|
||||
if (existsSync(coreMemoryDb)) {
|
||||
try {
|
||||
const db = new Database(dbPath, { readonly: true });
|
||||
|
||||
// Count memories
|
||||
const memResult = db.prepare('SELECT COUNT(*) as count FROM memories').get() as { count: number };
|
||||
memoriesCount = memResult?.count || 0;
|
||||
|
||||
// Count clusters
|
||||
try {
|
||||
const db = new Database(coreMemoryDb, { readonly: true });
|
||||
|
||||
// Count memories
|
||||
const memResult = db.prepare('SELECT COUNT(*) as count FROM memories').get() as { count: number };
|
||||
memoriesCount = memResult?.count || 0;
|
||||
|
||||
// Count clusters
|
||||
try {
|
||||
const clusterResult = db.prepare('SELECT COUNT(*) as count FROM session_clusters').get() as { count: number };
|
||||
clustersCount = clusterResult?.count || 0;
|
||||
} catch {
|
||||
// Table might not exist
|
||||
}
|
||||
|
||||
// Get last update time
|
||||
const lastMemory = db.prepare('SELECT MAX(updated_at) as last FROM memories').get() as { last: string };
|
||||
lastUpdated = lastMemory?.last;
|
||||
|
||||
db.close();
|
||||
const clusterResult = db.prepare('SELECT COUNT(*) as count FROM session_clusters').get() as { count: number };
|
||||
clustersCount = clusterResult?.count || 0;
|
||||
} catch {
|
||||
// Database might be locked or corrupted
|
||||
// Table might not exist
|
||||
}
|
||||
|
||||
// Get last update time
|
||||
const lastMemory = db.prepare('SELECT MAX(updated_at) as last FROM memories').get() as { last: string };
|
||||
lastUpdated = lastMemory?.last;
|
||||
|
||||
db.close();
|
||||
} catch {
|
||||
// Database might be locked or corrupted
|
||||
}
|
||||
|
||||
// Convert project ID back to approximate path
|
||||
@@ -1715,7 +1748,8 @@ export function listAllProjects(): ProjectInfo[] {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get memories from another project by ID
|
||||
* Get memories from another project by ID (supports nested project structure)
|
||||
* @param projectId - Project ID which can be a nested path like "d-/ccws"
|
||||
*/
|
||||
export function getMemoriesFromProject(projectId: string): CoreMemory[] {
|
||||
const projectsDir = join(getCCWHome(), 'projects');
|
||||
@@ -1746,8 +1780,8 @@ export function getMemoriesFromProject(projectId: string): CoreMemory[] {
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a memory by ID across all projects
|
||||
* Searches through all project databases to locate a specific memory
|
||||
* Find a memory by ID across all projects (supports nested project structure)
|
||||
* Searches through all project databases recursively to locate a specific memory
|
||||
*/
|
||||
export function findMemoryAcrossProjects(memoryId: string): { memory: CoreMemory; projectId: string } | null {
|
||||
const projectsDir = join(getCCWHome(), 'projects');
|
||||
@@ -1756,18 +1790,12 @@ export function findMemoryAcrossProjects(memoryId: string): { memory: CoreMemory
|
||||
return null;
|
||||
}
|
||||
|
||||
const entries = readdirSync(projectsDir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
const projectId = entry.name;
|
||||
const coreMemoryDb = join(projectsDir, projectId, 'core-memory', 'core_memory.db');
|
||||
|
||||
if (!existsSync(coreMemoryDb)) continue;
|
||||
// Find all core-memory databases recursively
|
||||
const allProjects = findAllCoreMemoryDatabases(projectsDir);
|
||||
|
||||
for (const { projectId, dbPath } of allProjects) {
|
||||
try {
|
||||
const db = new Database(coreMemoryDb, { readonly: true });
|
||||
const db = new Database(dbPath, { readonly: true });
|
||||
const row = db.prepare('SELECT * FROM memories WHERE id = ?').get(memoryId) as any;
|
||||
db.close();
|
||||
|
||||
|
||||
@@ -80,6 +80,14 @@
|
||||
"Maps keywords to execution modes using ModeRegistryService",
|
||||
"Injects systemMessage on mode activation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Spec Context Injection",
|
||||
"description": "Loads project specs matching prompt keywords and injects as system context",
|
||||
"enabled": true,
|
||||
"command": "ccw spec load --stdin",
|
||||
"timeout": 5000,
|
||||
"failMode": "silent"
|
||||
}
|
||||
],
|
||||
"file-modified": [
|
||||
|
||||
417
ccw/src/tools/spec-index-builder.ts
Normal file
417
ccw/src/tools/spec-index-builder.ts
Normal file
@@ -0,0 +1,417 @@
|
||||
/**
|
||||
* Spec Index Builder
|
||||
*
|
||||
* Scans .workflow/{dimension}/*.md files, parses YAML frontmatter via
|
||||
* gray-matter, and writes .spec-index/{dimension}.index.json cache files.
|
||||
*
|
||||
* Supports 4 dimensions: specs, roadmap, changelog, personal
|
||||
*
|
||||
* YAML Frontmatter Schema:
|
||||
* ---
|
||||
* title: "Document Title"
|
||||
* dimension: "specs"
|
||||
* keywords: ["auth", "security"]
|
||||
* readMode: "required" # required | optional
|
||||
* priority: "high" # critical | high | medium | low
|
||||
* ---
|
||||
*/
|
||||
|
||||
import matter from 'gray-matter';
|
||||
import { readFileSync, writeFileSync, existsSync, mkdirSync, readdirSync } from 'fs';
|
||||
import { join, basename, extname, relative } from 'path';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* YAML frontmatter schema for spec MD files.
|
||||
*/
|
||||
export interface SpecFrontmatter {
|
||||
title: string;
|
||||
dimension: string;
|
||||
keywords: string[];
|
||||
readMode: 'required' | 'optional';
|
||||
priority: 'critical' | 'high' | 'medium' | 'low';
|
||||
}
|
||||
|
||||
/**
|
||||
* Single entry in the dimension index cache.
|
||||
*/
|
||||
export interface SpecIndexEntry {
|
||||
/** Document title from frontmatter */
|
||||
title: string;
|
||||
/** Relative file path from project root */
|
||||
file: string;
|
||||
/** Dimension this spec belongs to */
|
||||
dimension: string;
|
||||
/** Keywords for matching against user prompts */
|
||||
keywords: string[];
|
||||
/** Whether this spec is required or optional */
|
||||
readMode: 'required' | 'optional';
|
||||
/** Priority level for ordering */
|
||||
priority: 'critical' | 'high' | 'medium' | 'low';
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete index for one dimension.
|
||||
*/
|
||||
export interface DimensionIndex {
|
||||
/** Dimension name */
|
||||
dimension: string;
|
||||
/** All spec entries in this dimension */
|
||||
entries: SpecIndexEntry[];
|
||||
/** ISO timestamp when this index was built */
|
||||
built_at: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Constants
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* The 4 supported spec dimensions.
|
||||
*/
|
||||
export const SPEC_DIMENSIONS = ['specs', 'roadmap', 'changelog', 'personal'] as const;
|
||||
|
||||
export type SpecDimension = typeof SPEC_DIMENSIONS[number];
|
||||
|
||||
/**
|
||||
* Valid readMode values.
|
||||
*/
|
||||
const VALID_READ_MODES = ['required', 'optional'] as const;
|
||||
|
||||
/**
|
||||
* Valid priority values.
|
||||
*/
|
||||
const VALID_PRIORITIES = ['critical', 'high', 'medium', 'low'] as const;
|
||||
|
||||
/**
|
||||
* Directory name for spec index cache files.
|
||||
*/
|
||||
const SPEC_INDEX_DIR = '.spec-index';
|
||||
|
||||
// ============================================================================
|
||||
// Public API
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Get the path to the index JSON file for a given dimension.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
* @param dimension - The dimension name
|
||||
* @returns Absolute path to .spec-index/{dimension}.index.json
|
||||
*/
|
||||
export function getIndexPath(projectPath: string, dimension: string): string {
|
||||
return join(projectPath, SPEC_INDEX_DIR, `${dimension}.index.json`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to the .workflow/{dimension} directory.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
* @param dimension - The dimension name
|
||||
* @returns Absolute path to .workflow/{dimension}/
|
||||
*/
|
||||
export function getDimensionDir(projectPath: string, dimension: string): string {
|
||||
return join(projectPath, '.workflow', dimension);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the index for a single dimension.
|
||||
*
|
||||
* Scans .workflow/{dimension}/*.md files, parses YAML frontmatter,
|
||||
* extracts the 5 required fields, and returns a DimensionIndex.
|
||||
*
|
||||
* Files with malformed or missing frontmatter are skipped gracefully.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
* @param dimension - The dimension to index (e.g., 'specs')
|
||||
* @returns DimensionIndex with all valid entries
|
||||
*/
|
||||
export async function buildDimensionIndex(
|
||||
projectPath: string,
|
||||
dimension: string
|
||||
): Promise<DimensionIndex> {
|
||||
const dimensionDir = getDimensionDir(projectPath, dimension);
|
||||
const entries: SpecIndexEntry[] = [];
|
||||
|
||||
// If directory doesn't exist, return empty index
|
||||
if (!existsSync(dimensionDir)) {
|
||||
return {
|
||||
dimension,
|
||||
entries: [],
|
||||
built_at: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
// Scan for .md files
|
||||
let files: string[];
|
||||
try {
|
||||
files = readdirSync(dimensionDir).filter(
|
||||
f => extname(f).toLowerCase() === '.md'
|
||||
);
|
||||
} catch {
|
||||
// Directory read error - return empty index
|
||||
return {
|
||||
dimension,
|
||||
entries: [],
|
||||
built_at: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const filePath = join(dimensionDir, file);
|
||||
const entry = parseSpecFile(filePath, dimension, projectPath);
|
||||
if (entry) {
|
||||
entries.push(entry);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
dimension,
|
||||
entries,
|
||||
built_at: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build indices for all 4 dimensions and write to .spec-index/.
|
||||
*
|
||||
* Creates .spec-index/ directory if it doesn't exist.
|
||||
* Writes {dimension}.index.json for each dimension.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
*/
|
||||
export async function buildAllIndices(projectPath: string): Promise<void> {
|
||||
const indexDir = join(projectPath, SPEC_INDEX_DIR);
|
||||
|
||||
// Ensure .spec-index directory exists
|
||||
if (!existsSync(indexDir)) {
|
||||
mkdirSync(indexDir, { recursive: true });
|
||||
}
|
||||
|
||||
for (const dimension of SPEC_DIMENSIONS) {
|
||||
const index = await buildDimensionIndex(projectPath, dimension);
|
||||
const indexPath = getIndexPath(projectPath, dimension);
|
||||
|
||||
try {
|
||||
writeFileSync(indexPath, JSON.stringify(index, null, 2), 'utf-8');
|
||||
} catch (err) {
|
||||
// Log but continue with other dimensions
|
||||
console.error(
|
||||
`[spec-index-builder] Failed to write index for ${dimension}: ${(err as Error).message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a cached dimension index from disk.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
* @param dimension - The dimension to read
|
||||
* @returns DimensionIndex if cache exists and is valid, null otherwise
|
||||
*/
|
||||
export function readCachedIndex(
|
||||
projectPath: string,
|
||||
dimension: string
|
||||
): DimensionIndex | null {
|
||||
const indexPath = getIndexPath(projectPath, dimension);
|
||||
|
||||
if (!existsSync(indexPath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const content = readFileSync(indexPath, 'utf-8');
|
||||
const parsed = JSON.parse(content) as DimensionIndex;
|
||||
|
||||
// Basic validation
|
||||
if (
|
||||
parsed &&
|
||||
typeof parsed.dimension === 'string' &&
|
||||
Array.isArray(parsed.entries) &&
|
||||
typeof parsed.built_at === 'string'
|
||||
) {
|
||||
return parsed;
|
||||
}
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the dimension index, using cache if available, otherwise building fresh.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
* @param dimension - The dimension to get
|
||||
* @param forceRebuild - Skip cache and rebuild from source files
|
||||
* @returns DimensionIndex
|
||||
*/
|
||||
export async function getDimensionIndex(
|
||||
projectPath: string,
|
||||
dimension: string,
|
||||
forceRebuild = false
|
||||
): Promise<DimensionIndex> {
|
||||
if (!forceRebuild) {
|
||||
const cached = readCachedIndex(projectPath, dimension);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
|
||||
// Build fresh and cache
|
||||
const index = await buildDimensionIndex(projectPath, dimension);
|
||||
|
||||
const indexDir = join(projectPath, SPEC_INDEX_DIR);
|
||||
if (!existsSync(indexDir)) {
|
||||
mkdirSync(indexDir, { recursive: true });
|
||||
}
|
||||
|
||||
const indexPath = getIndexPath(projectPath, dimension);
|
||||
try {
|
||||
writeFileSync(indexPath, JSON.stringify(index, null, 2), 'utf-8');
|
||||
} catch {
|
||||
// Cache write failure is non-fatal
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Internal helpers
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Parse a single spec MD file and extract its frontmatter into a SpecIndexEntry.
|
||||
*
|
||||
* @param filePath - Absolute path to the MD file
|
||||
* @param dimension - The dimension this file belongs to
|
||||
* @param projectPath - Project root for computing relative paths
|
||||
* @returns SpecIndexEntry if frontmatter is valid, null if malformed/missing
|
||||
*/
|
||||
function parseSpecFile(
|
||||
filePath: string,
|
||||
dimension: string,
|
||||
projectPath: string
|
||||
): SpecIndexEntry | null {
|
||||
let content: string;
|
||||
try {
|
||||
content = readFileSync(filePath, 'utf-8');
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse frontmatter
|
||||
let parsed: matter.GrayMatterFile<string>;
|
||||
try {
|
||||
parsed = matter(content);
|
||||
} catch {
|
||||
// Malformed frontmatter - skip
|
||||
return null;
|
||||
}
|
||||
|
||||
const data = parsed.data as Record<string, unknown>;
|
||||
|
||||
// Extract and validate frontmatter fields
|
||||
const title = extractString(data, 'title');
|
||||
if (!title) {
|
||||
// Title is required - use filename as fallback
|
||||
const fallbackTitle = basename(filePath, extname(filePath));
|
||||
return buildEntry(fallbackTitle, filePath, dimension, projectPath, data);
|
||||
}
|
||||
|
||||
return buildEntry(title, filePath, dimension, projectPath, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a SpecIndexEntry from parsed frontmatter data.
|
||||
*/
|
||||
function buildEntry(
|
||||
title: string,
|
||||
filePath: string,
|
||||
dimension: string,
|
||||
projectPath: string,
|
||||
data: Record<string, unknown>
|
||||
): SpecIndexEntry {
|
||||
// Compute relative file path from project root using path.relative
|
||||
// Normalize to forward slashes for cross-platform consistency
|
||||
const relativePath = relative(projectPath, filePath).replace(/\\/g, '/');
|
||||
|
||||
// Extract keywords - accept string[] or single string
|
||||
const keywords = extractStringArray(data, 'keywords');
|
||||
|
||||
// Extract readMode with validation
|
||||
const rawReadMode = extractString(data, 'readMode');
|
||||
const readMode = isValidReadMode(rawReadMode) ? rawReadMode : 'optional';
|
||||
|
||||
// Extract priority with validation
|
||||
const rawPriority = extractString(data, 'priority');
|
||||
const priority = isValidPriority(rawPriority) ? rawPriority : 'medium';
|
||||
|
||||
return {
|
||||
title,
|
||||
file: relativePath,
|
||||
dimension,
|
||||
keywords,
|
||||
readMode,
|
||||
priority,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a string value from parsed YAML data.
|
||||
*/
|
||||
function extractString(
|
||||
data: Record<string, unknown>,
|
||||
key: string
|
||||
): string | null {
|
||||
const value = data[key];
|
||||
if (typeof value === 'string' && value.trim().length > 0) {
|
||||
return value.trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a string array from parsed YAML data.
|
||||
* Handles both array format and comma-separated string format.
|
||||
*/
|
||||
function extractStringArray(
|
||||
data: Record<string, unknown>,
|
||||
key: string
|
||||
): string[] {
|
||||
const value = data[key];
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value
|
||||
.filter((item): item is string => typeof item === 'string')
|
||||
.map(s => s.trim())
|
||||
.filter(s => s.length > 0);
|
||||
}
|
||||
|
||||
if (typeof value === 'string') {
|
||||
return value
|
||||
.split(',')
|
||||
.map(s => s.trim())
|
||||
.filter(s => s.length > 0);
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard for valid readMode values.
|
||||
*/
|
||||
function isValidReadMode(value: string | null): value is 'required' | 'optional' {
|
||||
return value !== null && (VALID_READ_MODES as readonly string[]).includes(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard for valid priority values.
|
||||
*/
|
||||
function isValidPriority(value: string | null): value is 'critical' | 'high' | 'medium' | 'low' {
|
||||
return value !== null && (VALID_PRIORITIES as readonly string[]).includes(value);
|
||||
}
|
||||
296
ccw/src/tools/spec-init.ts
Normal file
296
ccw/src/tools/spec-init.ts
Normal file
@@ -0,0 +1,296 @@
|
||||
/**
|
||||
* Spec Init - Initialize the 4-dimension spec system
|
||||
*
|
||||
* Creates .workflow/specs/, .workflow/roadmap/, .workflow/changelog/,
|
||||
* .workflow/personal/, and .workflow/.spec-index/ directories with
|
||||
* seed MD documents containing YAML frontmatter templates.
|
||||
*
|
||||
* Idempotent: skips existing files, only creates missing directories/files.
|
||||
*/
|
||||
|
||||
import { existsSync, mkdirSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface SpecFrontmatter {
|
||||
title: string;
|
||||
dimension: string;
|
||||
keywords: string[];
|
||||
readMode: 'required' | 'optional';
|
||||
priority: 'high' | 'medium' | 'low';
|
||||
}
|
||||
|
||||
export interface SeedDoc {
|
||||
filename: string;
|
||||
frontmatter: SpecFrontmatter;
|
||||
body: string;
|
||||
}
|
||||
|
||||
export interface InitResult {
|
||||
created: string[];
|
||||
skipped: string[];
|
||||
directories: string[];
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const DIMENSIONS = ['specs', 'roadmap', 'changelog', 'personal'] as const;
|
||||
export const INDEX_DIR = '.spec-index';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Seed Documents
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const SEED_DOCS: Map<string, SeedDoc[]> = new Map([
|
||||
[
|
||||
'specs',
|
||||
[
|
||||
{
|
||||
filename: 'coding-conventions.md',
|
||||
frontmatter: {
|
||||
title: 'Coding Conventions',
|
||||
dimension: 'specs',
|
||||
keywords: ['typescript', 'naming', 'style', 'convention'],
|
||||
readMode: 'required',
|
||||
priority: 'high',
|
||||
},
|
||||
body: `# Coding Conventions
|
||||
|
||||
## Naming
|
||||
|
||||
- Use camelCase for variables and functions
|
||||
- Use PascalCase for classes and interfaces
|
||||
- Use UPPER_SNAKE_CASE for constants
|
||||
|
||||
## Formatting
|
||||
|
||||
- 2-space indentation
|
||||
- Single quotes for strings
|
||||
- Trailing commas in multi-line constructs
|
||||
|
||||
## Patterns
|
||||
|
||||
- Prefer composition over inheritance
|
||||
- Use early returns to reduce nesting
|
||||
- Keep functions under 30 lines when practical
|
||||
|
||||
## Error Handling
|
||||
|
||||
- Always handle errors explicitly
|
||||
- Prefer typed errors over generic catch-all
|
||||
- Log errors with sufficient context
|
||||
`,
|
||||
},
|
||||
{
|
||||
filename: 'architecture-constraints.md',
|
||||
frontmatter: {
|
||||
title: 'Architecture Constraints',
|
||||
dimension: 'specs',
|
||||
keywords: ['architecture', 'module', 'layer', 'pattern'],
|
||||
readMode: 'required',
|
||||
priority: 'high',
|
||||
},
|
||||
body: `# Architecture Constraints
|
||||
|
||||
## Module Boundaries
|
||||
|
||||
- Each module owns its data and exposes a public API
|
||||
- No circular dependencies between modules
|
||||
- Shared utilities live in a dedicated shared layer
|
||||
|
||||
## Layer Separation
|
||||
|
||||
- Presentation layer must not import data layer directly
|
||||
- Business logic must be independent of framework specifics
|
||||
- Configuration must be externalized, not hardcoded
|
||||
|
||||
## Dependency Rules
|
||||
|
||||
- External dependencies require justification
|
||||
- Prefer standard library when available
|
||||
- Pin dependency versions for reproducibility
|
||||
`,
|
||||
},
|
||||
],
|
||||
],
|
||||
[
|
||||
'personal',
|
||||
[
|
||||
{
|
||||
filename: 'coding-style.md',
|
||||
frontmatter: {
|
||||
title: 'Personal Coding Style',
|
||||
dimension: 'personal',
|
||||
keywords: ['style', 'preference'],
|
||||
readMode: 'optional',
|
||||
priority: 'medium',
|
||||
},
|
||||
body: `# Personal Coding Style
|
||||
|
||||
## Preferences
|
||||
|
||||
- Describe your preferred coding style here
|
||||
- Example: verbose variable names vs terse, functional vs imperative
|
||||
|
||||
## Patterns I Prefer
|
||||
|
||||
- List patterns you reach for most often
|
||||
- Example: builder pattern, factory functions, tagged unions
|
||||
|
||||
## Things I Avoid
|
||||
|
||||
- List anti-patterns or approaches you dislike
|
||||
- Example: deep inheritance hierarchies, magic strings
|
||||
`,
|
||||
},
|
||||
{
|
||||
filename: 'tool-preferences.md',
|
||||
frontmatter: {
|
||||
title: 'Tool Preferences',
|
||||
dimension: 'personal',
|
||||
keywords: ['tool', 'cli', 'editor'],
|
||||
readMode: 'optional',
|
||||
priority: 'low',
|
||||
},
|
||||
body: `# Tool Preferences
|
||||
|
||||
## Editor
|
||||
|
||||
- Preferred editor and key extensions/plugins
|
||||
|
||||
## CLI Tools
|
||||
|
||||
- Preferred shell, package manager, build tools
|
||||
|
||||
## Debugging
|
||||
|
||||
- Preferred debugging approach and tools
|
||||
`,
|
||||
},
|
||||
],
|
||||
],
|
||||
[
|
||||
'roadmap',
|
||||
[
|
||||
{
|
||||
filename: 'current.md',
|
||||
frontmatter: {
|
||||
title: 'Current Roadmap',
|
||||
dimension: 'roadmap',
|
||||
keywords: ['roadmap', 'plan', 'milestone'],
|
||||
readMode: 'optional',
|
||||
priority: 'medium',
|
||||
},
|
||||
body: `# Current Roadmap
|
||||
|
||||
## Active Milestone
|
||||
|
||||
- Milestone name and target date
|
||||
- Key deliverables
|
||||
|
||||
## Upcoming
|
||||
|
||||
- Next planned features or improvements
|
||||
|
||||
## Completed
|
||||
|
||||
- Recently completed milestones for reference
|
||||
`,
|
||||
},
|
||||
],
|
||||
],
|
||||
[
|
||||
'changelog',
|
||||
[],
|
||||
],
|
||||
]);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Frontmatter Serializer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Serialize a SpecFrontmatter object to YAML frontmatter string.
|
||||
* Uses template literal to avoid a js-yaml dependency.
|
||||
*/
|
||||
export function formatFrontmatter(fm: SpecFrontmatter): string {
|
||||
const keywordsYaml = fm.keywords.map((k) => ` - ${k}`).join('\n');
|
||||
return [
|
||||
'---',
|
||||
`title: "${fm.title}"`,
|
||||
`dimension: ${fm.dimension}`,
|
||||
`keywords:`,
|
||||
keywordsYaml,
|
||||
`readMode: ${fm.readMode}`,
|
||||
`priority: ${fm.priority}`,
|
||||
'---',
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Init Function
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Initialize the spec system directory structure and seed documents.
|
||||
*
|
||||
* Idempotent: creates directories if missing, writes seed files only when
|
||||
* they do not already exist.
|
||||
*
|
||||
* @param projectPath - Absolute path to the project root
|
||||
* @returns InitResult with lists of created/skipped paths
|
||||
*/
|
||||
export function initSpecSystem(projectPath: string): InitResult {
|
||||
const workflowDir = join(projectPath, '.workflow');
|
||||
const result: InitResult = {
|
||||
created: [],
|
||||
skipped: [],
|
||||
directories: [],
|
||||
};
|
||||
|
||||
// Ensure .workflow root exists
|
||||
if (!existsSync(workflowDir)) {
|
||||
mkdirSync(workflowDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Create dimension directories
|
||||
for (const dim of DIMENSIONS) {
|
||||
const dirPath = join(workflowDir, dim);
|
||||
if (!existsSync(dirPath)) {
|
||||
mkdirSync(dirPath, { recursive: true });
|
||||
result.directories.push(dirPath);
|
||||
}
|
||||
}
|
||||
|
||||
// Create index directory
|
||||
const indexPath = join(workflowDir, INDEX_DIR);
|
||||
if (!existsSync(indexPath)) {
|
||||
mkdirSync(indexPath, { recursive: true });
|
||||
result.directories.push(indexPath);
|
||||
}
|
||||
|
||||
// Write seed documents per dimension
|
||||
for (const [dimension, docs] of SEED_DOCS) {
|
||||
const dimDir = join(workflowDir, dimension);
|
||||
|
||||
for (const doc of docs) {
|
||||
const filePath = join(dimDir, doc.filename);
|
||||
|
||||
if (existsSync(filePath)) {
|
||||
result.skipped.push(filePath);
|
||||
continue;
|
||||
}
|
||||
|
||||
const content = formatFrontmatter(doc.frontmatter) + '\n\n' + doc.body;
|
||||
writeFileSync(filePath, content, 'utf8');
|
||||
result.created.push(filePath);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
208
ccw/src/tools/spec-keyword-extractor.ts
Normal file
208
ccw/src/tools/spec-keyword-extractor.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
/**
|
||||
* Spec Keyword Extractor
|
||||
*
|
||||
* Extracts keywords from user prompt text for matching against
|
||||
* spec document YAML frontmatter keywords.
|
||||
*
|
||||
* Supports:
|
||||
* - English word tokenization (split by spaces/punctuation, remove stop words)
|
||||
* - Chinese character segment extraction (CJK boundary splitting)
|
||||
*/
|
||||
|
||||
/**
|
||||
* Common English stop words to filter out during keyword extraction.
|
||||
* These words appear frequently but carry little semantic meaning
|
||||
* for spec matching.
|
||||
*/
|
||||
export const STOP_WORDS = new Set([
|
||||
// Articles
|
||||
'a', 'an', 'the',
|
||||
// Pronouns
|
||||
'i', 'me', 'my', 'we', 'our', 'you', 'your', 'he', 'she', 'it', 'they', 'them',
|
||||
'this', 'that', 'these', 'those', 'what', 'which', 'who', 'whom',
|
||||
// Prepositions
|
||||
'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'from', 'as', 'into',
|
||||
'about', 'between', 'through', 'after', 'before', 'above', 'below',
|
||||
// Conjunctions
|
||||
'and', 'or', 'but', 'if', 'then', 'else', 'when', 'while', 'so', 'because',
|
||||
// Auxiliary verbs
|
||||
'is', 'am', 'are', 'was', 'were', 'be', 'been', 'being',
|
||||
'has', 'have', 'had', 'do', 'does', 'did',
|
||||
'will', 'would', 'shall', 'should', 'may', 'might', 'can', 'could', 'must',
|
||||
// Common verbs (too generic for matching)
|
||||
'get', 'got', 'make', 'made', 'let', 'go', 'going', 'come', 'take', 'give',
|
||||
// Adverbs
|
||||
'not', 'no', 'yes', 'also', 'just', 'only', 'very', 'too', 'now', 'here',
|
||||
'there', 'how', 'why', 'where', 'all', 'each', 'every', 'both', 'some',
|
||||
'any', 'most', 'more', 'less', 'much', 'many', 'few', 'other', 'such',
|
||||
// Misc
|
||||
'please', 'need', 'want', 'like', 'know', 'think', 'see', 'use', 'using',
|
||||
'way', 'thing', 'something', 'anything', 'nothing',
|
||||
]);
|
||||
|
||||
/**
|
||||
* Regex to detect CJK (Chinese/Japanese/Korean) characters.
|
||||
* Covers CJK Unified Ideographs and common extensions.
|
||||
*/
|
||||
const CJK_REGEX = /[\u4e00-\u9fff\u3400-\u4dbf\uf900-\ufaff]/;
|
||||
|
||||
/**
|
||||
* Regex to match contiguous CJK character sequences.
|
||||
*/
|
||||
const CJK_SEGMENT_REGEX = /[\u4e00-\u9fff\u3400-\u4dbf\uf900-\ufaff]+/g;
|
||||
|
||||
/**
|
||||
* Regex to split text into English word tokens.
|
||||
* Splits on whitespace and common punctuation.
|
||||
*/
|
||||
const WORD_SPLIT_REGEX = /[\s,;:!?.()\[\]{}<>"'`~@#$%^&*+=|\\/_\-\u3001\u3002\uff0c\uff1b\uff1a\uff01\uff1f]+/;
|
||||
|
||||
/**
|
||||
* Minimum word length to keep (filters out single-char English tokens).
|
||||
*/
|
||||
const MIN_WORD_LENGTH = 2;
|
||||
|
||||
/**
|
||||
* Extract keywords from prompt text.
|
||||
*
|
||||
* For English text:
|
||||
* Splits by whitespace/punctuation, lowercases, removes stop words,
|
||||
* filters short tokens, and deduplicates.
|
||||
*
|
||||
* For Chinese text:
|
||||
* Extracts contiguous CJK character sequences. For sequences longer
|
||||
* than 2 characters, also generates 2-character sliding window bigrams
|
||||
* to improve matching (since Chinese keywords in YAML are typically
|
||||
* 2-4 character compounds).
|
||||
*
|
||||
* @param text - The user prompt text to extract keywords from
|
||||
* @returns Array of unique keywords (lowercase for English, original for CJK)
|
||||
*/
|
||||
export function extractKeywords(text: string): string[] {
|
||||
if (!text || typeof text !== 'string') {
|
||||
return [];
|
||||
}
|
||||
|
||||
const keywords = new Set<string>();
|
||||
|
||||
// Extract English keywords
|
||||
const englishKeywords = extractEnglishKeywords(text);
|
||||
for (const kw of englishKeywords) {
|
||||
keywords.add(kw);
|
||||
}
|
||||
|
||||
// Extract CJK keywords
|
||||
const cjkKeywords = extractCjkKeywords(text);
|
||||
for (const kw of cjkKeywords) {
|
||||
keywords.add(kw);
|
||||
}
|
||||
|
||||
return Array.from(keywords);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract English keywords from text.
|
||||
*
|
||||
* @param text - Input text
|
||||
* @returns Array of lowercase English keyword tokens
|
||||
*/
|
||||
function extractEnglishKeywords(text: string): string[] {
|
||||
// Remove CJK characters first so they don't pollute English tokens
|
||||
const cleanedText = text.replace(CJK_SEGMENT_REGEX, ' ');
|
||||
|
||||
const tokens = cleanedText
|
||||
.split(WORD_SPLIT_REGEX)
|
||||
.map(token => token.toLowerCase().trim())
|
||||
.filter(token =>
|
||||
token.length >= MIN_WORD_LENGTH &&
|
||||
!STOP_WORDS.has(token) &&
|
||||
// Filter out pure number tokens
|
||||
!/^\d+$/.test(token)
|
||||
);
|
||||
|
||||
// Deduplicate while preserving order
|
||||
return Array.from(new Set(tokens));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract CJK keywords from text.
|
||||
*
|
||||
* Extracts contiguous CJK segments. For segments longer than 2 characters,
|
||||
* generates 2-character bigrams as well (common Chinese keyword length).
|
||||
*
|
||||
* @param text - Input text
|
||||
* @returns Array of CJK keyword segments
|
||||
*/
|
||||
function extractCjkKeywords(text: string): string[] {
|
||||
if (!CJK_REGEX.test(text)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const keywords = new Set<string>();
|
||||
|
||||
// Find all contiguous CJK segments
|
||||
const segments = text.match(CJK_SEGMENT_REGEX);
|
||||
if (!segments) {
|
||||
return [];
|
||||
}
|
||||
|
||||
for (const segment of segments) {
|
||||
// Add the full segment
|
||||
keywords.add(segment);
|
||||
|
||||
// For longer segments, generate 2-char bigrams
|
||||
if (segment.length > 2) {
|
||||
for (let i = 0; i <= segment.length - 2; i++) {
|
||||
keywords.add(segment.substring(i, i + 2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(keywords);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a keyword matches any entry in a keyword list.
|
||||
* Supports case-insensitive matching for English and exact matching for CJK.
|
||||
*
|
||||
* @param keyword - The keyword to check
|
||||
* @param targetKeywords - The target keyword list from spec frontmatter
|
||||
* @returns true if keyword matches any target
|
||||
*/
|
||||
export function keywordMatches(keyword: string, targetKeywords: string[]): boolean {
|
||||
const lowerKeyword = keyword.toLowerCase();
|
||||
return targetKeywords.some(target => {
|
||||
const lowerTarget = target.toLowerCase();
|
||||
// Exact match (case insensitive)
|
||||
if (lowerKeyword === lowerTarget) return true;
|
||||
// Substring match: keyword appears within target or vice versa
|
||||
if (lowerTarget.includes(lowerKeyword) || lowerKeyword.includes(lowerTarget)) return true;
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate match score between extracted keywords and spec keywords.
|
||||
* Higher score means better match.
|
||||
*
|
||||
* @param extractedKeywords - Keywords extracted from user prompt
|
||||
* @param specKeywords - Keywords from spec YAML frontmatter
|
||||
* @returns Number of matching keywords (0 = no match)
|
||||
*/
|
||||
export function calculateMatchScore(
|
||||
extractedKeywords: string[],
|
||||
specKeywords: string[]
|
||||
): number {
|
||||
if (!extractedKeywords.length || !specKeywords.length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let score = 0;
|
||||
for (const keyword of extractedKeywords) {
|
||||
if (keywordMatches(keyword, specKeywords)) {
|
||||
score++;
|
||||
}
|
||||
}
|
||||
|
||||
return score;
|
||||
}
|
||||
378
ccw/src/tools/spec-loader.ts
Normal file
378
ccw/src/tools/spec-loader.ts
Normal file
@@ -0,0 +1,378 @@
|
||||
/**
|
||||
* Spec Loader
|
||||
*
|
||||
* Core loading logic for the spec system. Reads index caches, filters specs
|
||||
* by readMode and keyword match, loads MD content, merges by dimension
|
||||
* priority, and formats output for CLI or Hook consumption.
|
||||
*
|
||||
* Single entry point: loadSpecs(options) -> SpecLoadResult
|
||||
*
|
||||
* Data flow:
|
||||
* Keywords -> IndexCache -> Filter(required + keyword-matched) ->
|
||||
* MDLoader -> PriorityMerger -> OutputFormatter
|
||||
*/
|
||||
|
||||
import matter from 'gray-matter';
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
|
||||
import {
|
||||
getDimensionIndex,
|
||||
SpecIndexEntry,
|
||||
DimensionIndex,
|
||||
SPEC_DIMENSIONS,
|
||||
type SpecDimension,
|
||||
} from './spec-index-builder.js';
|
||||
|
||||
import {
|
||||
extractKeywords,
|
||||
calculateMatchScore,
|
||||
} from './spec-keyword-extractor.js';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Input options for loadSpecs().
|
||||
*/
|
||||
export interface SpecLoadOptions {
|
||||
/** Absolute path to the project root */
|
||||
projectPath: string;
|
||||
/** Specific dimension to load (loads all if omitted) */
|
||||
dimension?: SpecDimension;
|
||||
/** Pre-extracted keywords (skips extraction if provided) */
|
||||
keywords?: string[];
|
||||
/** Output format: 'cli' for markdown, 'hook' for JSON */
|
||||
outputFormat: 'cli' | 'hook';
|
||||
/** Raw stdin data from Claude Code hook (used to extract user_prompt) */
|
||||
stdinData?: { user_prompt?: string; prompt?: string; [key: string]: unknown };
|
||||
/** Enable debug logging to stderr */
|
||||
debug?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Output from loadSpecs().
|
||||
*/
|
||||
export interface SpecLoadResult {
|
||||
/** Formatted content string (markdown or JSON) */
|
||||
content: string;
|
||||
/** Output format that was used */
|
||||
format: 'markdown' | 'json';
|
||||
/** List of spec titles that were matched and loaded */
|
||||
matchedSpecs: string[];
|
||||
/** Total number of spec files loaded */
|
||||
totalLoaded: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal representation of a loaded spec's content.
|
||||
*/
|
||||
interface LoadedSpec {
|
||||
title: string;
|
||||
dimension: string;
|
||||
priority: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Constants
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Dimension priority for merge ordering.
|
||||
* Lower number = loaded first (lower priority, gets overridden).
|
||||
* Higher number = loaded last (higher priority, overrides).
|
||||
*/
|
||||
const DIMENSION_PRIORITY: Record<string, number> = {
|
||||
personal: 1,
|
||||
changelog: 2,
|
||||
roadmap: 3,
|
||||
specs: 4,
|
||||
};
|
||||
|
||||
/**
|
||||
* Priority weight for ordering specs within a dimension.
|
||||
*/
|
||||
const SPEC_PRIORITY_WEIGHT: Record<string, number> = {
|
||||
critical: 4,
|
||||
high: 3,
|
||||
medium: 2,
|
||||
low: 1,
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Public API
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Load specs based on options.
|
||||
*
|
||||
* Pipeline:
|
||||
* 1. Extract keywords from options.keywords, stdinData, or empty
|
||||
* 2. For each dimension: read index cache (fallback to on-the-fly build)
|
||||
* 3. Filter: all required specs + optional specs with keyword match
|
||||
* 4. Load MD file content (strip frontmatter)
|
||||
* 5. Merge by dimension priority
|
||||
* 6. Format for CLI (markdown) or Hook (JSON)
|
||||
*
|
||||
* @param options - Loading configuration
|
||||
* @returns SpecLoadResult with formatted content
|
||||
*/
|
||||
export async function loadSpecs(options: SpecLoadOptions): Promise<SpecLoadResult> {
|
||||
const { projectPath, outputFormat, debug } = options;
|
||||
|
||||
// Step 1: Resolve keywords
|
||||
const keywords = resolveKeywords(options);
|
||||
|
||||
if (debug) {
|
||||
debugLog(`Extracted ${keywords.length} keywords: [${keywords.join(', ')}]`);
|
||||
}
|
||||
|
||||
// Step 2: Determine which dimensions to process
|
||||
const dimensions = options.dimension
|
||||
? [options.dimension]
|
||||
: [...SPEC_DIMENSIONS];
|
||||
|
||||
// Step 3: For each dimension, read index and filter specs
|
||||
const allLoadedSpecs: LoadedSpec[] = [];
|
||||
let totalScanned = 0;
|
||||
|
||||
for (const dim of dimensions) {
|
||||
const index = await getDimensionIndex(projectPath, dim);
|
||||
totalScanned += index.entries.length;
|
||||
|
||||
const { required, matched } = filterSpecs(index, keywords);
|
||||
|
||||
if (debug) {
|
||||
debugLog(
|
||||
`[${dim}] scanned=${index.entries.length} required=${required.length} matched=${matched.length}`
|
||||
);
|
||||
}
|
||||
|
||||
// Step 4: Load content for filtered entries
|
||||
const entriesToLoad = [...required, ...matched];
|
||||
const loaded = loadSpecContent(projectPath, entriesToLoad);
|
||||
allLoadedSpecs.push(...loaded);
|
||||
}
|
||||
|
||||
if (debug) {
|
||||
debugLog(
|
||||
`Total: scanned=${totalScanned} loaded=${allLoadedSpecs.length}`
|
||||
);
|
||||
}
|
||||
|
||||
// Step 5: Merge by dimension priority
|
||||
const mergedContent = mergeByPriority(allLoadedSpecs);
|
||||
|
||||
// Step 6: Format output
|
||||
const matchedTitles = allLoadedSpecs.map(s => s.title);
|
||||
const content = formatOutput(mergedContent, matchedTitles, outputFormat);
|
||||
const format = outputFormat === 'cli' ? 'markdown' : 'json';
|
||||
|
||||
return {
|
||||
content,
|
||||
format,
|
||||
matchedSpecs: matchedTitles,
|
||||
totalLoaded: allLoadedSpecs.length,
|
||||
};
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Core Functions
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Filter specs by readMode and keyword match.
|
||||
*
|
||||
* - required: all entries with readMode === 'required'
|
||||
* - matched: entries with readMode === 'optional' that have keyword intersection
|
||||
*
|
||||
* @param index - The dimension index to filter
|
||||
* @param keywords - Extracted prompt keywords
|
||||
* @returns Separated required and matched entries (deduplicated)
|
||||
*/
|
||||
export function filterSpecs(
|
||||
index: DimensionIndex,
|
||||
keywords: string[]
|
||||
): { required: SpecIndexEntry[]; matched: SpecIndexEntry[] } {
|
||||
const required: SpecIndexEntry[] = [];
|
||||
const matched: SpecIndexEntry[] = [];
|
||||
|
||||
for (const entry of index.entries) {
|
||||
if (entry.readMode === 'required') {
|
||||
required.push(entry);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Optional entries: check keyword intersection
|
||||
if (keywords.length > 0 && entry.keywords.length > 0) {
|
||||
const score = calculateMatchScore(keywords, entry.keywords);
|
||||
if (score > 0) {
|
||||
matched.push(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { required, matched };
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge loaded spec content by dimension priority.
|
||||
*
|
||||
* Dimension priority order: personal(1) < changelog(2) < roadmap(3) < specs(4).
|
||||
* Within a dimension, specs are ordered by priority weight (critical > high > medium > low).
|
||||
*
|
||||
* @param specs - All loaded specs
|
||||
* @returns Merged content string ordered by priority
|
||||
*/
|
||||
export function mergeByPriority(specs: LoadedSpec[]): string {
|
||||
if (specs.length === 0) {
|
||||
return '';
|
||||
}
|
||||
|
||||
// Sort by dimension priority (ascending), then by spec priority weight (descending)
|
||||
const sorted = [...specs].sort((a, b) => {
|
||||
const dimA = DIMENSION_PRIORITY[a.dimension] ?? 0;
|
||||
const dimB = DIMENSION_PRIORITY[b.dimension] ?? 0;
|
||||
if (dimA !== dimB) {
|
||||
return dimA - dimB;
|
||||
}
|
||||
const priA = SPEC_PRIORITY_WEIGHT[a.priority] ?? 0;
|
||||
const priB = SPEC_PRIORITY_WEIGHT[b.priority] ?? 0;
|
||||
return priB - priA;
|
||||
});
|
||||
|
||||
// Concatenate content with separators
|
||||
const sections: string[] = [];
|
||||
for (const spec of sorted) {
|
||||
sections.push(`## ${spec.title}\n\n${spec.content.trim()}`);
|
||||
}
|
||||
|
||||
return sections.join('\n\n---\n\n');
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Internal Helpers
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Resolve keywords from options.
|
||||
*
|
||||
* Priority:
|
||||
* 1. options.keywords (pre-extracted)
|
||||
* 2. options.stdinData.user_prompt or options.stdinData.prompt (extract from text)
|
||||
* 3. empty array (only required specs will load)
|
||||
*/
|
||||
function resolveKeywords(options: SpecLoadOptions): string[] {
|
||||
if (options.keywords && options.keywords.length > 0) {
|
||||
return options.keywords;
|
||||
}
|
||||
|
||||
const prompt = options.stdinData?.user_prompt || options.stdinData?.prompt;
|
||||
if (prompt && typeof prompt === 'string') {
|
||||
return extractKeywords(prompt);
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Load MD file content for a list of spec entries.
|
||||
*
|
||||
* Reads each file, strips YAML frontmatter via gray-matter, returns body content.
|
||||
* Silently skips files that cannot be read.
|
||||
*
|
||||
* @param projectPath - Project root directory
|
||||
* @param entries - Spec index entries to load
|
||||
* @returns Array of loaded specs with content
|
||||
*/
|
||||
function loadSpecContent(
|
||||
projectPath: string,
|
||||
entries: SpecIndexEntry[]
|
||||
): LoadedSpec[] {
|
||||
const loaded: LoadedSpec[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
const filePath = join(projectPath, entry.file);
|
||||
|
||||
if (!existsSync(filePath)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let raw: string;
|
||||
try {
|
||||
raw = readFileSync(filePath, 'utf-8');
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Strip frontmatter using gray-matter
|
||||
let body: string;
|
||||
try {
|
||||
const parsed = matter(raw);
|
||||
body = parsed.content;
|
||||
} catch {
|
||||
// Fallback: use raw content if frontmatter parsing fails
|
||||
body = raw;
|
||||
}
|
||||
|
||||
// Skip empty content
|
||||
if (!body.trim()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
loaded.push({
|
||||
title: entry.title,
|
||||
dimension: entry.dimension,
|
||||
priority: entry.priority,
|
||||
content: body,
|
||||
});
|
||||
}
|
||||
|
||||
return loaded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the merged content for output.
|
||||
*
|
||||
* CLI format: markdown with --- separators and section titles.
|
||||
* Hook format: JSON { continue: true, systemMessage: '<project-specs>...</project-specs>' }
|
||||
*
|
||||
* @param mergedContent - Priority-merged spec content
|
||||
* @param matchedTitles - List of matched spec titles
|
||||
* @param format - Output format ('cli' or 'hook')
|
||||
* @returns Formatted string
|
||||
*/
|
||||
function formatOutput(
|
||||
mergedContent: string,
|
||||
matchedTitles: string[],
|
||||
format: 'cli' | 'hook'
|
||||
): string {
|
||||
if (!mergedContent) {
|
||||
if (format === 'hook') {
|
||||
return JSON.stringify({ continue: true });
|
||||
}
|
||||
return '(No matching specs found)';
|
||||
}
|
||||
|
||||
if (format === 'cli') {
|
||||
// CLI: markdown with header
|
||||
const header = `# Project Specs (${matchedTitles.length} loaded)`;
|
||||
return `${header}\n\n${mergedContent}`;
|
||||
}
|
||||
|
||||
// Hook: JSON with systemMessage wrapped in <project-specs> tags
|
||||
const wrappedContent = `<project-specs>\n${mergedContent}\n</project-specs>`;
|
||||
return JSON.stringify({
|
||||
continue: true,
|
||||
systemMessage: wrappedContent,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a debug log message to stderr (avoids polluting stdout for hooks).
|
||||
*/
|
||||
function debugLog(message: string): void {
|
||||
process.stderr.write(`[spec-loader] ${message}\n`);
|
||||
}
|
||||
@@ -49,6 +49,7 @@
|
||||
"commander": "^11.0.0",
|
||||
"figlet": "^1.7.0",
|
||||
"glob": "^10.3.0",
|
||||
"gray-matter": "^4.0.3",
|
||||
"gradient-string": "^2.0.2",
|
||||
"inquirer": "^9.2.0",
|
||||
"jsonwebtoken": "^9.0.3",
|
||||
|
||||
Reference in New Issue
Block a user