Compare commits

...

6 Commits

Author SHA1 Message Date
catlog22
7a6f4c3f22 chore: bump version to 6.3.43 - fix parallel-dev-cycle documentation inconsistencies 2026-01-23 17:52:12 +08:00
catlog22
2f32d08d87 feat: Update documentation and file references for changes.log in parallel development cycle 2026-01-23 17:51:21 +08:00
catlog22
79d20add43 feat: Enhance Code Developer and Requirements Analyst agents with proactive debugging and self-enhancement strategies 2026-01-23 17:41:17 +08:00
catlog22
f363c635f5 feat: Enhance issue loading with intelligent grouping for batch processing 2026-01-23 17:03:27 +08:00
catlog22
61e3747768 feat: Add batch solutions endpoint (ccw issue solutions)
- Add solutionsAction() to query all bound solutions in one call
- Reduces O(N) queries to O(1) for queue formation
- Update /issue:queue command to use new endpoint
- Performance: 18 individual queries → 1 batch query

Version: 6.3.42
2026-01-23 16:56:08 +08:00
catlog22
54ec6a7c57 feat: Enhance issue management with batch processing and solution querying
- Updated issue loading process to create batches based on size (max 3 per batch).
- Removed semantic grouping in favor of simple size-based batching.
- Introduced a new command to batch query solutions for multiple issues.
- Improved solution loading to fetch all planned issues with bound solutions in a single call.
- Added detailed handling for multi-solution issues, including user selection for binding.
- Created a new workflow command for multi-agent development with documented progress and incremental iteration support.
- Added .gitignore for ace-tool directory to prevent unnecessary files from being tracked.
2026-01-23 16:55:10 +08:00
16 changed files with 1562 additions and 586 deletions

View File

@@ -55,11 +55,11 @@ Unified planning command using **issue-plan-agent** that combines exploration an
## Execution Process
```
Phase 1: Issue Loading
Phase 1: Issue Loading & Intelligent Grouping
├─ Parse input (single, comma-separated, or --all-pending)
├─ Fetch issue metadata (ID, title, tags)
├─ Validate issues exist (create if needed)
└─ Group by similarity (shared tags or title keywords, max 3 per batch)
└─ Intelligent grouping via Gemini (semantic similarity, max 3 per batch)
Phase 2: Unified Explore + Plan (issue-plan-agent)
├─ Launch issue-plan-agent per batch
@@ -119,46 +119,11 @@ if (useAllPending) {
}
// Note: Agent fetches full issue content via `ccw issue status <id> --json`
// Semantic grouping via Gemini CLI (max 4 issues per group)
async function groupBySimilarityGemini(issues) {
const issueSummaries = issues.map(i => ({
id: i.id, title: i.title, tags: i.tags
}));
// Intelligent grouping: Analyze issues by title/tags, group semantically similar ones
// Strategy: Same module/component, related bugs, feature clusters
// Constraint: Max ${batchSize} issues per batch
const prompt = `
PURPOSE: Group similar issues by semantic similarity for batch processing; maximize within-group coherence; max 4 issues per group
TASK: • Analyze issue titles/tags semantically • Identify functional/architectural clusters • Assign each issue to one group
MODE: analysis
CONTEXT: Issue metadata only
EXPECTED: JSON with groups array, each containing max 4 issue_ids, theme, rationale
CONSTRAINTS: Each issue in exactly one group | Max 4 issues per group | Balance group sizes
INPUT:
${JSON.stringify(issueSummaries, null, 2)}
OUTPUT FORMAT:
{"groups":[{"group_id":1,"theme":"...","issue_ids":["..."],"rationale":"..."}],"ungrouped":[]}
`;
const taskId = Bash({
command: `ccw cli -p "${prompt}" --tool gemini --mode analysis`,
run_in_background: true, timeout: 600000
});
const output = TaskOutput({ task_id: taskId, block: true });
// Extract JSON from potential markdown code blocks
function extractJsonFromMarkdown(text) {
const jsonMatch = text.match(/```json\s*\n([\s\S]*?)\n```/) ||
text.match(/```\s*\n([\s\S]*?)\n```/);
return jsonMatch ? jsonMatch[1] : text;
}
const result = JSON.parse(extractJsonFromMarkdown(output));
return result.groups.map(g => g.issue_ids.map(id => issues.find(i => i.id === id)));
}
const batches = await groupBySimilarityGemini(issues);
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es) (max 4 issues/agent)`);
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es)`);
TodoWrite({
todos: batches.map((_, i) => ({
@@ -207,7 +172,9 @@ ${issueList}
- Add explicit verification steps to prevent same failure mode
6. **If github_url exists**: Add final task to comment on GitHub issue
7. Write solution to: .workflow/issues/solutions/{issue-id}.jsonl
8. Single solution → auto-bind; Multiple → return for selection
8. **CRITICAL - Binding Decision**:
- Single solution → **MUST execute**: ccw issue bind <issue-id> <solution-id>
- Multiple solutions → Return pending_selection only (no bind)
### Failure-Aware Planning Rules
- **Extract failure patterns**: Parse issue.feedback where type='failure' and stage='execute'
@@ -265,35 +232,55 @@ for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) {
}
agentResults.push(summary); // Store for Phase 3 conflict aggregation
// Verify binding for bound issues (agent should have executed bind)
for (const item of summary.bound || []) {
console.log(`${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`);
const status = JSON.parse(Bash(`ccw issue status ${item.issue_id} --json`).trim());
if (status.bound_solution_id === item.solution_id) {
console.log(`${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`);
} else {
// Fallback: agent failed to bind, execute here
Bash(`ccw issue bind ${item.issue_id} ${item.solution_id}`);
console.log(`${item.issue_id}: ${item.solution_id} (${item.task_count} tasks) [recovered]`);
}
}
// Collect and notify pending selections
// Collect pending selections for Phase 3
for (const pending of summary.pending_selection || []) {
console.log(`${pending.issue_id}: ${pending.solutions.length} solutions → awaiting selection`);
pendingSelections.push(pending);
}
if (summary.conflicts?.length > 0) {
console.log(`⚠ Conflicts: ${summary.conflicts.length} detected (will resolve in Phase 3)`);
}
updateTodo(`Plan batch ${batchIndex + 1}`, 'completed');
}
}
```
### Phase 3: Conflict Resolution & Solution Selection
### Phase 3: Solution Selection (if pending)
**Conflict Handling:**
- Collect `conflicts` from all agent results
- Low/Medium severity → auto-resolve with `recommended_resolution`
- High severity → use `AskUserQuestion` to let user choose resolution
```javascript
// Handle multi-solution issues
for (const pending of pendingSelections) {
if (pending.solutions.length === 0) continue;
**Multi-Solution Selection:**
- If `pending_selection` contains issues with multiple solutions:
- Use `AskUserQuestion` to present options (solution ID + task count + description)
- Extract selected solution ID from user response
- Verify solution file exists, recover from payload if missing
- Bind selected solution via `ccw issue bind <issue-id> <solution-id>`
const options = pending.solutions.slice(0, 4).map(sol => ({
label: `${sol.id} (${sol.task_count} tasks)`,
description: sol.description || sol.approach || 'No description'
}));
const answer = AskUserQuestion({
questions: [{
question: `Issue ${pending.issue_id}: which solution to bind?`,
header: pending.issue_id,
options: options,
multiSelect: false
}]
});
const selected = answer[Object.keys(answer)[0]];
if (!selected || selected === 'Other') continue;
const solId = selected.split(' ')[0];
Bash(`ccw issue bind ${pending.issue_id} ${solId}`);
console.log(`${pending.issue_id}: ${solId} bound`);
}
```
### Phase 4: Summary

View File

@@ -28,12 +28,13 @@ Queue formation command using **issue-queue-agent** that analyzes all bound solu
| Operation | Correct | Incorrect |
|-----------|---------|-----------|
| List issues (brief) | `ccw issue list --status planned --brief` | `Read('issues.jsonl')` |
| **Batch solutions (NEW)** | `ccw issue solutions --status planned --brief` | Loop `ccw issue solution <id>` |
| List queue (brief) | `ccw issue queue --brief` | `Read('queues/*.json')` |
| Read issue details | `ccw issue status <id> --json` | `Read('issues.jsonl')` |
| Get next item | `ccw issue next --json` | `Read('queues/*.json')` |
| Update status | `ccw issue update <id> --status ...` | Direct file edit |
| Sync from queue | `ccw issue update --from-queue` | Direct file edit |
| **Read solution (brief)** | `ccw issue solution <id> --brief` | `Read('solutions/*.jsonl')` |
| Read solution (single) | `ccw issue solution <id> --brief` | `Read('solutions/*.jsonl')` |
**Output Options**:
- `--brief`: JSON with minimal fields (id, status, counts)
@@ -131,24 +132,23 @@ Phase 7: Active Queue Check & Decision (REQUIRED)
### Phase 1: Solution Loading & Distribution
**Data Loading:**
- Use `ccw issue list --status planned --brief` to get planned issues with `bound_solution_id`
- If no planned issues found → display message, suggest `/issue:plan`
**Solution Brief Loading** (for each planned issue):
```bash
ccw issue solution <issue-id> --brief
# Returns: [{ solution_id, is_bound, task_count, files_touched[] }]
```
- Use `ccw issue solutions --status planned --brief` to get all planned issues with solutions in **one call**
- Returns: Array of `{ issue_id, solution_id, is_bound, task_count, files_touched[], priority }`
- If no bound solutions found → display message, suggest `/issue:plan`
**Build Solution Objects:**
```json
{
"issue_id": "ISS-xxx",
"solution_id": "SOL-ISS-xxx-1",
"task_count": 3,
"files_touched": ["src/auth.ts", "src/utils.ts"],
"priority": "medium"
```javascript
// Single CLI call replaces N individual queries
const result = Bash(`ccw issue solutions --status planned --brief`).trim();
const solutions = result ? JSON.parse(result) : [];
if (solutions.length === 0) {
console.log('No bound solutions found. Run /issue:plan first.');
return;
}
// solutions already in correct format:
// { issue_id, solution_id, is_bound, task_count, files_touched[], priority }
```
**Multi-Queue Distribution** (if `--queues > 1`):

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ Multi-agent parallel development cycle using Codex subagent pattern with continu
This skill implements a **single-file-per-agent** development workflow:
- **RA**: `requirements.md` (all requirements + edge cases + history)
- **EP**: `plan.md` (architecture + tasks + integration points)
- **EP**: `exploration.md`, `architecture.md`, `plan.json` (codebase exploration + architecture + structured tasks)
- **CD**: `implementation.md` (progress + files + decisions + testing)
- **VAS**: `summary.md` (validation + test results + recommendations)
@@ -49,7 +49,9 @@ Creates:
│ ├── requirements.md (v1.0.0)
│ └── changes.log (NDJSON)
├── ep/
│ ├── plan.md (v1.0.0)
│ ├── exploration.md (v1.0.0)
│ ├── architecture.md (v1.0.0)
│ ├── plan.json (v1.0.0)
│ └── changes.log (NDJSON)
├── cd/
│ ├── implementation.md (v1.0.0)
@@ -113,20 +115,19 @@ When iteration completes, next extends to v1.1.0:
```
Current State (v1.0.0)
├── requirements.md (v1.0.0)
├── plan.md (v1.0.0)
├── plan.json (v1.0.0)
├── implementation.md (v1.0.0)
└── summary.md (v1.0.0)
User: "Add GitHub provider"
Archive Old Write New
├── history/requirements-v1.0.0.md
├── history/plan-v1.0.0.md → requirements.md (v1.1.0) - REWRITTEN
├── history/impl-v1.0.0.md → plan.md (v1.1.0) - REWRITTEN
└── history/summary-v1.0.0.md → implementation.md (v1.1.0) - REWRITTEN
→ summary.md (v1.1.0) - REWRITTEN
Append to changes.log (NDJSON)
├── history/requirements-v1.0.0.md → requirements.md (v1.1.0) - REWRITTEN
├── history/plan-v1.0.0.json → plan.json (v1.1.0) - REWRITTEN
├── history/impl-v1.0.0.md → implementation.md (v1.1.0) - REWRITTEN
└── history/summary-v1.0.0.md → summary.md (v1.1.0) - REWRITTEN
Append to changes.log (NDJSON)
```
## Session Files
@@ -142,11 +143,13 @@ ra/ - Requirements Analyst
└── requirements-v1.1.0.md
ep/ - Exploration & Planning
├── plan.md # v1.2.0 (current)
├── exploration.md # v1.2.0 (codebase exploration)
├── architecture.md # v1.2.0 (architecture design)
├── plan.json # v1.2.0 (structured task list, current)
├── changes.log # NDJSON audit trail
└── history/
├── plan-v1.0.0.md
└── plan-v1.1.0.md
├── plan-v1.0.0.json
└── plan-v1.1.0.json
cd/ - Code Developer
├── implementation.md # v1.2.0 (current)
@@ -203,7 +206,7 @@ vas/ - Validation & Archival
| Agent | File | Contains | Size |
|-------|------|----------|------|
| **RA** | requirements.md | All FR, NFR, edge cases, history summary | ~2-5KB |
| **EP** | plan.md | Architecture, all tasks, critical path, history | ~3-8KB |
| **EP** | exploration.md + architecture.md + plan.json | Codebase exploration, architecture design, structured task list | ~5-10KB total |
| **CD** | implementation.md | Completed tasks, files changed, decisions, tests | ~4-10KB |
| **VAS** | summary.md | Test results, coverage, issues, recommendations | ~5-12KB |
@@ -362,14 +365,14 @@ Orchestrator (main coordinator)
All write to:
- requirements.md (v1.x.0)
- plan.md (v1.x.0)
- exploration.md, architecture.md, plan.json (v1.x.0)
- implementation.md (v1.x.0)
- summary.md (v1.x.0)
- changes.log (NDJSON append)
[Automatic archival]
- history/requirements-v1.{x-1}.0.md
- history/plan-v1.{x-1}.0.md
- history/plan-v1.{x-1}.0.json
- etc...
Orchestrator: Next iteration?

View File

@@ -1,25 +1,26 @@
---
name: Parallel Dev Cycle
description: Multi-agent parallel development cycle with requirement analysis, exploration planning, code development, and validation. Supports continuous iteration with markdown progress documentation.
argument-hint: TASK="<task description>" [--cycle-id=<id>] [--auto] [--parallel=<count>]
argument-hint: TASK="<task description>" | --cycle-id=<id> [--extend="<extension>"] [--auto] [--parallel=<count>]
---
# Parallel Dev Cycle - Multi-Agent Development Workflow
Multi-agent parallel development cycle using Codex subagent pattern with four specialized workers:
1. **Requirements Analysis & Extension** (RA) - 需求分析及扩展
2. **Exploration & Planning** (EP) - 探索规划
3. **Code Development** (CD) - 代码开发
4. **Validation & Archival Summary** (VAS) - 验证及归档总结
1. **Requirements Analysis & Extension** (RA) - Requirement analysis and self-enhancement
2. **Exploration & Planning** (EP) - Exploration and planning
3. **Code Development** (CD) - Code development with debug strategy support
4. **Validation & Archival Summary** (VAS) - Validation and archival summary
每个 agent **仅维护一个主文档文件**,支持版本化、自动归档、完整历史追溯。
Each agent **maintains one main document** (e.g., requirements.md, plan.json, implementation.md) that is completely rewritten per iteration, plus auxiliary logs (changes.log, debug-log.ndjson) that are append-only. Supports versioning, automatic archival, and complete history tracking.
## Arguments
| Arg | Required | Description |
|-----|----------|-------------|
| TASK | No | Task description (for new cycle, mutually exclusive with --cycle-id) |
| --cycle-id | No | Existing cycle ID to continue (from API or previous session) |
| TASK | One of TASK or --cycle-id | Task description (for new cycle, mutually exclusive with --cycle-id) |
| --cycle-id | One of TASK or --cycle-id | Existing cycle ID to continue (from API or previous session) |
| --extend | No | Extension description (only valid with --cycle-id) |
| --auto | No | Auto-cycle mode (run all phases sequentially) |
| --parallel | No | Number of parallel agents (default: 4, max: 4) |
@@ -61,12 +62,13 @@ Multi-agent parallel development cycle using Codex subagent pattern with four sp
## Key Design Principles
1. **Single File Per Agent**: 每个 agent 仅维护一个主文件(精简清晰)
2. **Version-Based Overwrite**: 每个版本完全重写主文件
3. **Automatic Archival**: 旧版本自动归档到 `history/` 目录
4. **Complete Audit Trail**: Changes.log (NDJSON) 保留所有变更历史
5. **Parallel Execution**: 四个 agent 同时工作,无需等待
6. **File References**: 使用简短文件路径而非内容传递
1. **Main Document + Auxiliary Logs**: Each agent maintains one main document (rewritten per iteration) and auxiliary logs (append-only)
2. **Version-Based Overwrite**: Main documents completely rewritten per version; logs append-only
3. **Automatic Archival**: Old main document versions automatically archived to `history/` directory
4. **Complete Audit Trail**: Changes.log (NDJSON) preserves all change history
5. **Parallel Coordination**: Four agents launched simultaneously; coordination via shared state and orchestrator
6. **File References**: Use short file paths instead of content passing
7. **Self-Enhancement**: RA agent proactively extends requirements based on context
## Session Structure
@@ -75,439 +77,118 @@ Multi-agent parallel development cycle using Codex subagent pattern with four sp
+-- {cycleId}.json # Master state file
+-- {cycleId}.progress/
+-- ra/
| +-- requirements.md # v1.2.0 (当前,完全重写)
| +-- changes.log # NDJSON 完整历史(append-only
| +-- requirements.md # Current version (complete rewrite)
| +-- changes.log # NDJSON complete history (append-only)
| └-- history/
| +-- requirements-v1.0.0.md # 归档快照
| +-- requirements-v1.1.0.md # 归档快照
| +-- requirements-v1.0.0.md # Archived snapshot
| +-- requirements-v1.1.0.md # Archived snapshot
+-- ep/
| +-- plan.md # v1.2.0 (当前)
| +-- changes.log # NDJSON 完整历史
| +-- exploration.md # Codebase exploration report
| +-- architecture.md # Architecture design
| +-- plan.json # Structured task list (current version)
| +-- changes.log # NDJSON complete history
| └-- history/
| +-- plan-v1.0.0.md
| +-- plan-v1.1.0.md
| +-- plan-v1.0.0.json
| +-- plan-v1.1.0.json
+-- cd/
| +-- implementation.md # v1.2.0 (当前)
| +-- changes.log # NDJSON 完整历史
| +-- implementation.md # Current version
| +-- debug-log.ndjson # Debug hypothesis tracking
| +-- changes.log # NDJSON complete history
| └-- history/
| +-- implementation-v1.0.0.md
| +-- implementation-v1.1.0.md
+-- vas/
| +-- summary.md # v1.2.0 (当前)
| +-- changes.log # NDJSON 完整历史
| +-- summary.md # Current version
| +-- changes.log # NDJSON complete history
| └-- history/
| +-- summary-v1.0.0.md
| +-- summary-v1.1.0.md
└-- coordination/
+-- timeline.md # 执行时间线
+-- decisions.log # 决策日志
+-- timeline.md # Execution timeline
+-- decisions.log # Decision log
```
## State Management
### Unified Cycle State
State schema is defined in [phases/state-schema.md](phases/state-schema.md). The master state file (`{cycleId}.json`) tracks:
```json
{
"cycle_id": "cycle-v1-20260122-abc123",
"title": "Task title",
"status": "running",
"current_iteration": 2,
"current_phase": "cd",
"agents": {
"ra": {
"status": "completed",
"version": "1.2.0",
"output_file": ".workflow/.cycle/cycle-v1-xxx.progress/ra/requirements.md",
"summary": { "requirements": 10, "edge_cases": 5 }
},
"ep": {
"status": "completed",
"version": "1.2.0",
"output_file": ".workflow/.cycle/cycle-v1-xxx.progress/ep/plan.md",
"summary": { "tasks": 8, "critical_path": 4 }
},
"cd": {
"status": "running",
"version": "1.1.0",
"output_file": ".workflow/.cycle/cycle-v1-xxx.progress/cd/implementation.md",
"summary": { "completed_tasks": 3, "files_modified": 5 }
},
"vas": {
"status": "idle",
"version": "0.0.0",
"output_file": null
}
}
}
```
## Agent Output Format
### RA: requirements.md (单文件完整输出)
```markdown
# Requirements Specification - v1.2.0
## Document Status
| Field | Value |
|-------|-------|
| **Version** | 1.2.0 |
| **Previous Version** | 1.1.0 (Added Google OAuth) |
| **This Version** | Added MFA support, GitHub provider |
| **Iteration** | 3 |
| **Updated** | 2026-01-23T10:00:00+08:00 |
---
## Functional Requirements
- FR-001: OAuth authentication via Google/GitHub (v1.0.0, enhanced v1.1.0-1.2.0)
- FR-002: Multi-provider support (v1.1.0)
- FR-003: MFA/TOTP support (NEW v1.2.0)
## Non-Functional Requirements
- NFR-001: Response time < 500ms
- NFR-002: Support 1000 concurrent users
## Edge Cases
- EC-001: OAuth timeout → Fallback retry
- EC-002: Invalid TOTP → Max 3 attempts (NEW v1.2.0)
## Success Criteria
- [ ] All FRs implemented
- [ ] NFRs validated
- [ ] Coverage > 80%
---
## History Summary
| Version | Date | Summary |
|---------|------|---------|
| 1.0.0 | 2026-01-22 | Initial OAuth |
| 1.1.0 | 2026-01-22 | + Google OAuth |
| 1.2.0 | 2026-01-23 | + GitHub, + MFA (current) |
For detailed history, see `history/` and `changes.log`
```
### EP: plan.md (单文件完整输出)
```markdown
# Implementation Plan - v1.2.0
## Plan Status
| Field | Value |
|-------|-------|
| **Version** | 1.2.0 |
| **Previous** | 1.1.0 (Added GitHub integration) |
| **This Version** | Added MFA tasks (current) |
| **Total Tasks** | 10 |
| **Estimated Hours** | 20 |
---
## Architecture Highlights
- OAuth: passport-oauth2 library
- Providers: Google, GitHub
- Providers: Store in User.oauth_id, oauth_provider
- MFA: TOTP-based (NEW v1.2.0)
---
## Implementation Tasks
### Phase 1: Foundation (TASK-001-003)
- TASK-001: Setup OAuth config (1h, small)
- TASK-002: Update User model (2h, medium)
- TASK-003: Google OAuth strategy (4h, large)
### Phase 2: Multi-Provider (TASK-004-005)
- TASK-004: GitHub OAuth strategy (3h, medium) [NEW v1.2.0]
- TASK-005: Provider selection UI (2h, medium)
### Phase 3: MFA (TASK-006-008) [NEW v1.2.0]
- TASK-006: TOTP setup endpoint (3h, medium)
- TASK-007: TOTP verification (2h, medium)
- TASK-008: Recovery codes (1h, small)
### Phase 4: Testing & Docs (TASK-009-010)
- TASK-009: Integration tests (4h, large)
- TASK-010: Documentation (2h, medium)
---
## Critical Path
1. TASK-001 → TASK-002 → TASK-003 → TASK-005
2. TASK-006 → TASK-007 → TASK-008 → TASK-009
---
## Integration Points
- Location: src/middleware/auth.ts
- Database: User table oauth_* columns
- Frontend: login.tsx OAuth buttons
---
## History Summary
| Version | Date | Summary |
|---------|------|---------|
| 1.0.0 | 2026-01-22 | Basic OAuth plan |
| 1.1.0 | 2026-01-22 | + GitHub task |
| 1.2.0 | 2026-01-23 | + MFA tasks (current) |
```
### CD: implementation.md (单文件完整输出)
```markdown
# Implementation Progress - v1.1.0
## Progress Status
| Field | Value |
|-------|-------|
| **Version** | 1.1.0 |
| **Previous** | 1.0.0 (Initial OAuth) |
| **This Version** | GitHub OAuth support (current) |
| **Iteration** | 2 |
| **Updated** | 2026-01-23T09:30:00+08:00 |
---
## Completed Tasks
- ✓ TASK-001: Setup OAuth config (1h)
- ✓ TASK-002: Update User model (2h)
- ✓ TASK-003: Google OAuth strategy (4h)
- ✓ TASK-004: GitHub OAuth strategy (3h) [NEW v1.1.0]
## In Progress
- 🔄 TASK-005: Provider selection UI (50% complete)
## Next Tasks
- ☐ TASK-006: TOTP setup (v1.2.0)
- ☐ Tests & documentation
---
## Files Modified
| File | Action | Description |
|------|--------|-------------|
| src/config/oauth.ts | create | OAuth config (45 lines) |
| src/strategies/oauth-google.ts | create | Google strategy (120 lines) |
| src/strategies/oauth-github.ts | create | GitHub strategy (100 lines) [NEW v1.1.0] |
| src/models/User.ts | modify | +oauth_id, oauth_provider (8 lines) |
| src/routes/auth.ts | modify | +/auth/google, /auth/github (+75 lines) |
---
## Key Decisions Made
1. **OAuth Library**: passport-oauth2 (mature, well-maintained)
2. **Token Storage**: Database (for refresh tokens)
3. **Provider Selection**: Buttons on login page
---
## Issues & Blockers
### Current
- None
### Resolved (v1.0.0 → v1.1.0)
- ✓ OAuth callback URL validation (fixed)
- ✓ CORS issues (headers updated)
---
## Testing Status
| Test Type | v1.0.0 | v1.1.0 |
|-----------|--------|--------|
| Unit | 20/20 ✓ | 25/25 ✓ |
| Integration | 8/10 ⚠ | 12/14 ⚠ |
| E2E | 3/5 ⚠ | 5/8 ⚠ |
---
## History Summary
| Version | Date | Summary |
|---------|------|---------|
| 1.0.0 | 2026-01-22 | Google OAuth implementation |
| 1.1.0 | 2026-01-23 | + GitHub OAuth (current) |
```
### VAS: summary.md (单文件完整输出)
```markdown
# Validation & Summary Report - v1.0.0
## Validation Status
| Metric | Value | Target | Status |
|--------|-------|--------|--------|
| **Test Pass Rate** | 92% | 90% | ✓ |
| **Code Coverage** | 87% | 80% | ✓ |
| **Requirements Met** | 3/3 | 100% | ✓ |
| **Critical Issues** | 0 | 0 | ✓ |
| **Production Ready** | YES | - | ✓ |
---
## Test Execution Results
- **Total Tests**: 50
- **Passed**: 46 (92%)
- **Failed**: 4 (8%)
- **Duration**: 2m 34s
### Failures
1. **oauth-refresh**: Expected token refresh, got error
- Severity: Medium
- Recommendation: Handle expired refresh tokens (v1.1.0 task)
2. **concurrent-login**: Race condition in session writes
- Severity: High
- Recommendation: Add mutex for session writes (v1.1.0 task)
3. **github-provider**: Timeout on provider response
- Severity: Medium
- Recommendation: Add retry logic with backoff
4. **totp-edge-case**: Invalid TOTP timing window
- Severity: Low
- Recommendation: Expand timing window by ±30s
---
## Code Coverage Analysis
- **Overall**: 87% (target: 80%) ✓
- **OAuth Module**: 95%
- **Routes**: 82%
- **User Model**: 78%
### Gaps
- Error recovery paths (15% uncovered)
- Concurrent request handling (20% uncovered)
---
## Requirements Verification
- ✓ FR-001: OAuth authentication (100% implemented)
- ✓ FR-002: Multi-provider support (Google: 100%, GitHub: 95%)
- ⚠ FR-003: MFA support (0% - planned v1.2.0)
- ✓ NFR-001: Response time < 500ms (avg 245ms)
- ✓ NFR-002: Handle 100 concurrent (sustained 120)
---
## Known Issues Summary
1. **MEDIUM**: OAuth refresh token edge case
- Impact: Users may need re-auth
- Status: Will fix in v1.1.0
2. **MEDIUM**: GitHub provider timeout
- Impact: Occasional login failures
- Status: Will fix in v1.1.0
---
## Deliverables Checklist
- ✓ Code implementation complete
- ✓ Unit tests written (20/20)
- ✓ Integration tests written (12/14)
- ✓ Documentation updated
- ✓ Security review: PASSED
- ✓ Performance benchmarks: MET
---
## Recommendations
1. **For v1.1.0**: Fix refresh token and concurrent login issues
2. **For v1.2.0**: Implement MFA/TOTP support
3. **For v1.3.0**: Add provider error recovery
4. **General**: Increase timeout tolerances
---
## Sign-Off
- **Status**: ✓ APPROVED FOR PRODUCTION
- **Validating Agent**: VAS-v1.0.0
- **Timestamp**: 2026-01-22T12:00:00+08:00
- **By**: Validation & Archival Specialist
---
## History Summary
| Version | Date | Summary |
|---------|------|---------|
| 1.0.0 | 2026-01-22 | Initial validation report (current) |
```
- Cycle metadata (id, title, status, iterations)
- Agent states (status, output files, version)
- Shared context (requirements, plan, changes, test results)
- Coordination data (feedback log, decisions, blockers)
## Versioning Workflow
### 初始版本 (v1.0.0)
### Initial Version (v1.0.0)
```bash
/parallel-dev-cycle TASK="Implement OAuth login"
```
生成:
Generates:
```
requirements.md (v1.0.0)
plan.md (v1.0.0)
implementation.md (v1.0.0) - 如适用
summary.md (v1.0.0) - 如适用
exploration.md (v1.0.0)
architecture.md (v1.0.0)
plan.json (v1.0.0)
implementation.md (v1.0.0) - if applicable
summary.md (v1.0.0) - if applicable
```
### 迭代版本 (v1.1.0, v1.2.0)
### Iteration Versions (v1.1.0, v1.2.0)
```bash
/parallel-dev-cycle --cycle-id=cycle-v1-xxx --extend="Add GitHub support"
```
**自动处理**
1. 读取当前 `requirements.md (v1.0.0)`
2. 自动归档到 `history/requirements-v1.0.0.md`
3. 重新创建 `requirements.md (v1.1.0)` - 完全覆盖
4. 追加变更到 `changes.log` (NDJSON)
**Automatic handling**:
1. Read current `requirements.md (v1.0.0)`
2. Auto-archive to `history/requirements-v1.0.0.md`
3. Recreate `requirements.md (v1.1.0)` - complete overwrite
4. Append changes to `changes.log` (NDJSON)
## Changes.log Format (NDJSON)
保留永久审计日志append-only永不删除
Permanent audit log (append-only, never deleted):
```jsonl
{"timestamp":"2026-01-22T10:00:00+08:00","version":"1.0.0","agent":"ra","action":"create","change":"Initial requirements","iteration":1}
{"timestamp":"2026-01-22T11:00:00+08:00","version":"1.1.0","agent":"ra","action":"update","change":"Added Google OAuth requirement","iteration":2}
{"timestamp":"2026-01-22T11:30:00+08:00","version":"1.0.0","agent":"ep","action":"create","change":"Initial implementation plan","iteration":1}
{"timestamp":"2026-01-22T12:00:00+08:00","version":"1.1.0","agent":"ep","action":"update","change":"Added GitHub OAuth tasks","iteration":2}
{"timestamp":"2026-01-22T13:00:00+08:00","version":"1.0.0","agent":"cd","action":"create","change":"Started OAuth implementation","iteration":1}
```
## Usage
```bash
# 启动新循环
# Start new cycle
/parallel-dev-cycle TASK="Implement real-time notifications"
# 继续循环
# Continue cycle
/parallel-dev-cycle --cycle-id=cycle-v1-20260122-abc123
# 带扩展需求的迭代
# Iteration with extension
/parallel-dev-cycle --cycle-id=cycle-v1-20260122-abc123 --extend="Also add email notifications"
# 自动模式
# Auto mode
/parallel-dev-cycle --auto TASK="Add OAuth authentication"
```
## Key Benefits
**简洁**: 每个 agent 只维护 1 个文件 + changes.log
**高效**: 版本重写无需复杂版本标记
**可查**: 完整历史在 `history/` `changes.log`
**快速**: Agent 读取当前版本快速(不需解析历史)
**审计**: NDJSON changes.log 完整追溯每个变更
- **Simple**: Each agent maintains only 1 file + changes.log
- **Efficient**: Version rewrite without complex version marking
- **Traceable**: Complete history in `history/` and `changes.log`
- **Fast**: Agent reads current version quickly (no history parsing needed)
- **Auditable**: NDJSON changes.log fully traces every change
- **Self-Enhancing**: RA agent proactively extends requirements
- **Debug-Ready**: CD agent supports hypothesis-driven debugging
## Reference Documents
| Document | Purpose |
|----------|---------|
| [phases/orchestrator.md](phases/orchestrator.md) | 协调器逻辑 |
| [phases/state-schema.md](phases/state-schema.md) | 状态结构 |
| [phases/agents/](phases/agents/) | 四个 agent 角色 |
| [specs/coordination-protocol.md](specs/coordination-protocol.md) | 通信协议 |
| [specs/versioning-strategy.md](specs/versioning-strategy.md) | 版本管理 |
| [phases/orchestrator.md](phases/orchestrator.md) | Orchestrator logic |
| [phases/state-schema.md](phases/state-schema.md) | State structure definition |
| [phases/agents/](phases/agents/) | Four agent role definitions |
| [specs/coordination-protocol.md](specs/coordination-protocol.md) | Communication protocol |
| [specs/versioning-strategy.md](specs/versioning-strategy.md) | Version management |

View File

@@ -28,7 +28,7 @@ The Code Developer is responsible for implementing features according to the pla
- Document all file modifications
- Log changes in NDJSON format
- Track which iteration introduced which changes
- Update code-changes.log
- Update changes.log
4. **Report Issues**
- Document development blockers
@@ -43,7 +43,7 @@ The Code Developer is responsible for implementing features according to the pla
- Test code before submitting
- Document code changes clearly
- Track blockers and issues
- Append to code-changes.log, never overwrite
- Append to changes.log, never overwrite
- Reference requirements in code comments
- Use meaningful commit messages in implementation notes
@@ -90,7 +90,7 @@ For each task in the plan:
- Reference requirements
3. **Track Changes**
- Log each file modification to code-changes.log
- Log each file modification to changes.log
- Format: `{timestamp, iteration, file, action, description}`
- Include reason for change
@@ -99,12 +99,96 @@ For each task in the plan:
- Verify integration
- Test error cases
- Check performance
- **If tests fail**: Initiate Debug Workflow (see Debug Workflow section)
5. **Report Progress**
- Update implementation.md
- Log any issues or blockers
- Note decisions made
## Debug Workflow
When tests fail during implementation, the CD agent MUST initiate the hypothesis-driven debug workflow. This workflow systematically identifies and resolves bugs through structured hypothesis testing.
### Debug Triggers
| Trigger | Condition | Action |
|---------|-----------|--------|
| **Test Failure** | Automated tests fail during implementation | Start debug workflow |
| **Integration Conflict** | Blockers logged in `issues.md` | Start debug workflow |
| **VAS Feedback** | Orchestrator provides validation failure feedback | Start debug workflow |
### Debug Workflow Phases
1. **Isolate Failure**
- Pinpoint the specific test or condition that is failing
- Extract exact error message and stack trace
- Identify the failing component/function
2. **Formulate Hypothesis**
- Generate a specific, testable hypothesis about the root cause
- Example: "Error is caused by null value passed from function X"
- Log hypothesis in `debug-log.ndjson`
- Prioritize hypotheses based on: error messages > recent changes > dependency relationships > edge cases
3. **Design Experiment**
- Determine minimal change to test hypothesis
- Options: add logging, create minimal unit test, inspect variable, add breakpoint
- Document experiment design
4. **Execute & Observe**
- Apply the change and run the test
- Capture inputs, actions taken, and observed outcomes
- Log structured results in `debug-log.ndjson`
5. **Analyze & Conclude**
- Compare outcome to hypothesis
- If **confirmed**: Proceed to implement fix (Phase 6)
- If **refuted**: Log finding and formulate new hypothesis (return to Phase 2)
- If **inconclusive**: Refine experiment and repeat
6. **Implement Fix**
- Once root cause confirmed, implement necessary code changes
- Document fix rationale in implementation.md
- Log fix in changes.log
7. **Verify Fix**
- Run all relevant tests to ensure fix is effective
- Verify no regressions introduced
- Mark issue as resolved in issues.md
### Debug Log Format (NDJSON)
File: `.workflow/.cycle/{cycleId}.progress/cd/debug-log.ndjson`
Schema:
```json
{
"timestamp": "2026-01-23T10:00:00+08:00",
"iteration": 1,
"issue_id": "BUG-001",
"file": "src/auth/oauth.ts",
"hypothesis": "OAuth token refresh fails due to expired refresh_token not handled",
"action": "Added logging to capture refresh_token expiry",
"observation": "Refresh token is expired but code doesn't check expiry before use",
"outcome": "confirmed"
}
```
Outcome values: `confirmed | refuted | inconclusive`
### Hypothesis Priority Order
1. **Direct Error Messages/Stack Traces**: Most reliable starting point
2. **Recent Changes**: Check `changes.log` for recent modifications
3. **Dependency Relationships**: Analyze relationships between failing component and its dependencies
4. **Edge Cases**: Review `edge-cases.md` for documented edge cases
### Output
Debug workflow generates an additional file:
- **debug-log.ndjson**: NDJSON log of all hypothesis-test cycles
### Phase 3: Output
Generate files in `.workflow/.cycle/{cycleId}.progress/cd/`:
@@ -150,7 +234,7 @@ Overview of what was implemented in this iteration.
- Code review and merge
```
**code-changes.log** (NDJSON):
**changes.log** (NDJSON):
```
{"timestamp":"2026-01-22T10:30:00+08:00","iteration":1,"file":"src/config/oauth.ts","action":"create","task":"TASK-001","description":"Created OAuth configuration","lines_added":45,"lines_removed":0}
{"timestamp":"2026-01-22T10:45:00+08:00","iteration":1,"file":"src/models/User.ts","action":"modify","task":"TASK-002","description":"Added oauth_id and oauth_provider fields","lines_added":8,"lines_removed":0}
@@ -190,11 +274,12 @@ Overview of what was implemented in this iteration.
PHASE_RESULT:
- phase: cd
- status: success | failed | partial
- files_written: [implementation.md, code-changes.log, issues.md]
- files_written: [implementation.md, changes.log, debug-log.ndjson (if debug executed), issues.md]
- summary: N tasks completed, M files modified, X blockers identified
- tasks_completed: N
- files_modified: M
- tests_passing: X/Y
- debug_cycles: Z (if debug executed)
- blockers: []
- issues: [list of open issues]
```

View File

@@ -59,13 +59,24 @@ The Requirements Analyst maintains **a single file** (`requirements.md`) contain
- Task description from state
- Project tech stack and guidelines
2. **Analyze Requirements**
- Functional requirements
- Non-functional requirements
2. **Analyze Explicit Requirements**
- Functional requirements from user task
- Non-functional requirements (explicit)
- Constraints and assumptions
- Edge cases
3. **Generate Single File**
3. **Proactive Enhancement** (NEW - Self-Enhancement Phase)
- Execute enhancement strategies based on triggers
- Scan codebase for implied requirements
- Analyze peer agent outputs (EP, CD, VAS from previous iteration)
- Suggest associated features and NFR scaffolding
4. **Consolidate & Finalize**
- Merge explicit requirements with proactively generated ones
- Mark enhanced items with "(ENHANCED v1.0.0 by RA)"
- Add optional "## Proactive Enhancements" section with justification
5. **Generate Single File**
- Write `requirements.md` v1.0.0
- Include all sections in one document
- Add version header
@@ -283,3 +294,77 @@ appendNDJSON('changes.log', {
5. **Audit Trail**: Changes.log tracks every modification
6. **Readability First**: File should be clear and concise
7. **Version Markers**: Mark new items with "(NEW v1.x.0)"
8. **Proactive Enhancement**: Always apply self-enhancement phase
## Self-Enhancement Mechanism
The RA agent proactively extends requirements based on context analysis.
### Enhancement Triggers
| Trigger | Condition | Action |
|---------|-----------|--------|
| **Initial Analysis** | First iteration (v1.0.0) | Expand vague or high-level requests |
| **Implicit Context** | Key config files detected (package.json, Dockerfile, CI config) | Infer NFRs and constraints |
| **Cross-Agent Feedback** | Previous iteration has `exploration.identified_risks`, `cd.blockers`, or `vas.test_results.failed_tests` | Cover uncovered requirements |
### Enhancement Strategies
1. **Codebase Analysis**
- Scan key project files (package.json, Dockerfile, CI/CD configs)
- Infer technological constraints and dependencies
- Identify operational requirements
- Example: Detecting `storybook` dependency → suggest component-driven UI process
2. **Peer Output Mining**
- Analyze EP agent's `exploration.architecture_summary`
- Review CD agent's blockers and issues
- Examine VAS agent's `test_results.failed_tests`
- Formalize insights as new requirements
3. **Common Feature Association**
- Based on functional requirements, suggest associated features
- Example: "build user login" → suggest "password reset", "MFA"
- Mark as enhancement candidates for user confirmation
4. **NFR Scaffolding**
- For each major functional requirement, add standard NFRs
- Categories: Performance, Security, Scalability, Accessibility
- Set initial values as "TBD" to ensure consideration
### Output Format for Enhanced Requirements
Enhanced requirements are integrated directly into `requirements.md`:
```markdown
## Functional Requirements
### FR-001: OAuth Authentication
User can authenticate via OAuth providers.
**Status**: Defined (v1.0.0)
**Priority**: High
### FR-002: Password Reset (ENHANCED v1.0.0 by RA)
Users can reset their password via email link.
**Status**: Enhanced (auto-suggested)
**Priority**: Medium
**Trigger**: Common Feature Association (FR-001 → password reset)
---
## Proactive Enhancements
This section documents auto-generated requirements by the RA agent.
| ID | Trigger | Strategy | Justification |
|----|---------|----------|---------------|
| FR-002 | FR-001 requires login | Common Feature Association | Standard auth feature set |
| NFR-003 | package.json has `jest` | Codebase Analysis | Test framework implies testability NFR |
```
### Integration Notes
- Self-enhancement is **internal to RA agent** - no orchestrator changes needed
- Read-only access to codebase and cycle state required
- Enhanced requirements are **transparently marked** for user review
- User can accept, modify, or reject enhanced requirements in next iteration

View File

@@ -456,7 +456,7 @@ Code Developer - Implement features based on plan and requirements.
Write files to ${progressDir}/cd/:
- implementation.md: Implementation progress and decisions
- code-changes.log: NDJSON format, each line: {file, action, timestamp}
- changes.log: NDJSON format, each line: {file, action, timestamp}
- issues.md: Development issues and blockers
## OUTPUT FORMAT
@@ -484,7 +484,7 @@ function spawnVASAgent(cycleId, state, progressDir) {
### MANDATORY FIRST STEPS (Agent Execute)
1. **Read role definition**: ~/.codex/agents/validation-archivist.md
2. Read: ${progressDir}/cd/code-changes.log
2. Read: ${progressDir}/cd/changes.log
---
@@ -638,7 +638,7 @@ function generateFinalSummary(cycleId, state) {
## Generated Files
- .workflow/.cycle/${cycleId}.progress/ra/requirements.md
- .workflow/.cycle/${cycleId}.progress/ep/plan.json
- .workflow/.cycle/${cycleId}.progress/cd/code-changes.log
- .workflow/.cycle/${cycleId}.progress/cd/changes.log
- .workflow/.cycle/${cycleId}.progress/vas/summary.md
## Continuation Instructions

View File

@@ -353,7 +353,7 @@ State changes trigger file writes:
|--------------|-----------|
| `requirements` updated | `.progress/ra/requirements.md` + version bump |
| `plan` updated | `.progress/ep/plan.json` + version bump |
| `changes` appended | `.progress/cd/code-changes.log` + iteration marker |
| `changes` appended | `.progress/cd/changes.log` + iteration marker |
| `test_results` updated | `.progress/vas/test-results.json` + version bump |
| Full iteration done | `.progress/coordination/timeline.md` appended |
@@ -413,7 +413,7 @@ function rebuildState(cycleId) {
// Read markdown files
const raMarkdown = Read(`${progressDir}/ra/requirements.md`)
const epMarkdown = Read(`${progressDir}/ep/plan.json`)
const cdChanges = Read(`${progressDir}/cd/code-changes.log`)
const cdChanges = Read(`${progressDir}/cd/changes.log`)
const vasResults = Read(`${progressDir}/vas/test-results.json`)
// Reconstruct state from files

View File

@@ -244,7 +244,7 @@ if (cdChangesPath && exists(cdChangesPath)) {
"version": "1.0.0",
"output_files": {
"progress": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/implementation.md",
"changes": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/code-changes.log",
"changes": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/changes.log",
"issues": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/issues.md"
},
"summary": {
@@ -313,7 +313,7 @@ Full Test Report: .workflow/.cycle/${cycleId}.progress/vas/test-results.json (v1
## Implementation Reference
- Current Code: .workflow/.cycle/${cycleId}.progress/cd/implementation.md (v1.0.0)
- Code Changes: .workflow/.cycle/${cycleId}.progress/cd/code-changes.log (v1.0.0)
- Code Changes: .workflow/.cycle/${cycleId}.progress/cd/changes.log (v1.0.0)
## Action Required
1. Review failing tests in referenced test results file

View File

@@ -12,19 +12,26 @@ The coordination protocol enables four parallel agents (RA, EP, CD, VAS) to comm
**Location**: `.workflow/.cycle/{cycleId}.json`
All agents read from and write to the unified state file:
**Access Pattern**:
- **Agents**: READ ONLY - check dependencies and status
- **Orchestrator**: READ-WRITE - updates state after each phase
```javascript
// Every agent: Read fresh state at action start
// Every agent: Read state to check dependencies
const state = JSON.parse(Read(`.workflow/.cycle/${cycleId}.json`))
const canProceed = checkDependencies(state)
// Every agent: Write updated state at action end
Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(state, null, 2))
// Agent outputs PHASE_RESULT (reports to orchestrator, NOT writes directly)
console.log("PHASE_RESULT: ...")
// Only Orchestrator writes to state file after receiving PHASE_RESULT
// Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(updatedState, null, 2))
```
**Protocol**:
- Read-Update-Write pattern (no lock needed, orchestrator serializes)
- Timestamp all updates with ISO8601 format
- Only orchestrator writes to state file (no concurrent writes, no lock needed)
- Agents read state to understand dependencies
- Timestamp all orchestrator updates with ISO8601 format
- Never delete existing data, only append
### 2. Progress Markdown Files (Async Log)
@@ -33,18 +40,18 @@ Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(state, null, 2))
Each agent writes progress to dedicated markdown files:
| Agent | Files |
|-------|-------|
| RA | requirements.md, edge-cases.md, changes.log |
| EP | exploration.md, architecture.md, plan.json |
| CD | implementation.md, code-changes.log, issues.md |
| VAS | validation.md, test-results.json, coverage.md, summary.md |
| Agent | Main Documents (Rewrite) | Logs (Append-Only) |
|-------|--------------------------|-------------------|
| RA | requirements.md | changes.log |
| EP | exploration.md, architecture.md, plan.json | changes.log |
| CD | implementation.md, issues.md | changes.log, debug-log.ndjson |
| VAS | validation.md, summary.md, test-results.json | changes.log |
**Protocol**:
- Append-only pattern (no overwrites)
- Version each document independently
- **Main documents**: Complete rewrite per iteration, archived to `history/`
- **Log files**: Append-only (changes.log, debug-log.ndjson) - never delete
- **Version synchronization**: All main documents share same version (e.g., all v1.1.0 in iteration 2)
- Include timestamp on each update
- Maintain backward compatibility
### 3. Orchestrator send_input (Synchronous)
@@ -198,6 +205,8 @@ PHASE_DETAILS:
## Dependency Resolution
**Execution Model**: All four agents are spawned in parallel, but execution blocks based on dependencies. Orchestrator manages dependency resolution via shared state.
### Build Order (Default)
```
@@ -206,6 +215,12 @@ RA (Requirements) → EP (Planning) → CD (Development) → VAS (Validation)
Block EP Block CD Block VAS Block completion
```
**Explanation**:
- All agents spawned simultaneously
- Each agent checks dependencies in shared state before proceeding
- Blocked agents wait for dependency completion
- Orchestrator uses `send_input` to notify dependent agents when ready
### Parallel Opportunities
Some phases can run in parallel:
@@ -384,7 +399,7 @@ console.log(state.coordination.feedback_log)
tail .workflow/.cycle/cycle-xxx.progress/ra/changes.log
# Check CD changes
grep "TASK-001" .workflow/.cycle/cycle-xxx.progress/cd/code-changes.log
grep "TASK-001" .workflow/.cycle/cycle-xxx.progress/cd/changes.log
# Check coordination timeline
tail -50 .workflow/.cycle/cycle-xxx.progress/coordination/feedback.md

View File

@@ -1,50 +1,51 @@
# Document Versioning Strategy
文档版本管理策略:重新创建 vs 增量更新
Document version management strategy: Complete Rewrite + Archive History
## 推荐方案:重新创建 + 归档历史
## Recommended Approach: Complete Rewrite + Archive History
每次迭代,**完全重写**主文档,旧版本自动归档到 `history/` 目录。
For each iteration, **completely rewrite** the main document, and automatically archive the old version to the `history/` directory.
### 文件结构
### File Structure
```
.workflow/.cycle/cycle-v1-20260122-abc123.progress/
├── ra/
│ ├── requirements.md # v1.2.0 (当前版本,重新创建)
│ ├── edge-cases.md # v1.2.0 (当前版本,重新创建)
│ ├── changes.log # NDJSON 完整变更历史(append-only
│ ├── requirements.md # v1.2.0 (current version, complete rewrite)
│ ├── edge-cases.md # v1.2.0 (current version, complete rewrite)
│ ├── changes.log # NDJSON complete change history (append-only)
│ └── history/
│ ├── requirements-v1.0.0.md (归档)
│ ├── requirements-v1.1.0.md (归档)
│ ├── edge-cases-v1.0.0.md (归档)
│ └── edge-cases-v1.1.0.md (归档)
│ ├── requirements-v1.0.0.md (archived)
│ ├── requirements-v1.1.0.md (archived)
│ ├── edge-cases-v1.0.0.md (archived)
│ └── edge-cases-v1.1.0.md (archived)
├── ep/
│ ├── exploration.md # v1.2.0 (当前)
│ ├── architecture.md # v1.2.0 (当前)
│ ├── plan.json # v1.2.0 (当前)
│ ├── exploration.md # v1.2.0 (current)
│ ├── architecture.md # v1.2.0 (current)
│ ├── plan.json # v1.2.0 (current)
│ └── history/
│ ├── plan-v1.0.0.json
│ └── plan-v1.1.0.json
├── cd/
│ ├── implementation.md # v1.2.0 (当前)
│ ├── code-changes.log # NDJSON 完整历史
│ ├── issues.md # 当前未解决问题
│ ├── implementation.md # v1.2.0 (current)
│ ├── changes.log # NDJSON complete history
│ ├── debug-log.ndjson # Debug hypothesis tracking
│ ├── issues.md # Current unresolved issues
│ └── history/
│ ├── implementation-v1.0.0.md
│ └── implementation-v1.1.0.md
└── vas/
├── validation.md # v1.2.0 (当前)
├── test-results.json # v1.2.0 (当前)
├── summary.md # v1.2.0 (当前)
├── validation.md # v1.2.0 (current)
├── test-results.json # v1.2.0 (current)
├── summary.md # v1.2.0 (current)
└── history/
├── validation-v1.0.0.md
└── test-results-v1.0.0.json
```
## 文档模板优化
## Optimized Document Template
### Requirements.md (重新创建版本)
### Requirements.md (Complete Rewrite Version)
```markdown
# Requirements Specification - v1.2.0
@@ -162,7 +163,7 @@ Response time < 500ms for all OAuth flows.
**Detailed History**: See `history/` directory and `changes.log`
```
### Changes.log (NDJSON - 完整历史)
### Changes.log (NDJSON - Complete History)
```jsonl
{"timestamp":"2026-01-22T10:00:00+08:00","iteration":1,"version":"1.0.0","action":"create","type":"requirement","id":"FR-001","description":"Initial OAuth requirement"}
@@ -173,37 +174,37 @@ Response time < 500ms for all OAuth flows.
{"timestamp":"2026-01-23T10:05:00+08:00","iteration":3,"version":"1.2.0","action":"update","type":"requirement","id":"FR-002","description":"Added GitHub provider"}
```
## 实现流程
## Implementation Flow
### Agent 工作流RA 为例)
### Agent Workflow (RA Example)
```javascript
// ==================== RA Agent 迭代流程 ====================
// ==================== RA Agent Iteration Flow ====================
// 读取当前状态
// Read current state
const state = JSON.parse(Read(`.workflow/.cycle/${cycleId}.json`))
const currentVersion = state.requirements?.version || "0.0.0"
const iteration = state.current_iteration
// 如果是迭代(已有旧版本)
// If iteration (old version exists)
if (currentVersion !== "0.0.0") {
// 1. 归档旧版本
// 1. Archive old version
const oldFile = `.workflow/.cycle/${cycleId}.progress/ra/requirements.md`
const archiveFile = `.workflow/.cycle/${cycleId}.progress/ra/history/requirements-v${currentVersion}.md`
Copy(oldFile, archiveFile) // 归档
Copy(oldFile, archiveFile) // Archive
// 2. 读取旧版本(可选,用于理解上下文)
// 2. Read old version (optional, for context understanding)
const oldRequirements = Read(oldFile)
// 3. 读取变更历史
// 3. Read change history
const changesLog = readNDJSON(`.workflow/.cycle/${cycleId}.progress/ra/changes.log`)
}
// 4. 生成新版本号
// 4. Generate new version number
const newVersion = bumpVersion(currentVersion, 'minor') // 1.1.0 -> 1.2.0
// 5. 生成新文档(完全重写)
// 5. Generate new document (complete rewrite)
const newRequirements = generateRequirements({
version: newVersion,
previousVersion: currentVersion,
@@ -211,13 +212,13 @@ const newRequirements = generateRequirements({
currentChanges: "Added MFA and GitHub provider",
iteration: iteration,
taskDescription: state.description,
changesLog: changesLog // 用于理解历史
changesLog: changesLog // For understanding history
})
// 6. 写入新文档(覆盖旧的)
// 6. Write new document (overwrite old)
Write(`.workflow/.cycle/${cycleId}.progress/ra/requirements.md`, newRequirements)
// 7. 追加变更到 changes.log
// 7. Append change to changes.log
appendNDJSON(`.workflow/.cycle/${cycleId}.progress/ra/changes.log`, {
timestamp: getUtc8ISOString(),
iteration: iteration,
@@ -228,7 +229,7 @@ appendNDJSON(`.workflow/.cycle/${cycleId}.progress/ra/changes.log`, {
description: "Added MFA requirement"
})
// 8. 更新状态
// 8. Update state
state.requirements = {
version: newVersion,
output_file: `.workflow/.cycle/${cycleId}.progress/ra/requirements.md`,
@@ -242,25 +243,25 @@ state.requirements = {
Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(state, null, 2))
```
## 优势对比
## Advantages Comparison
| 方面 | 增量更新 | 重新创建 + 归档 |
|------|----------|----------------|
| **文档简洁性** | ❌ 越来越长 | ✅ 始终简洁 |
| **Agent 解析** | ❌ 需要解析历史 | ✅ 只看当前版本 |
| **维护复杂度** | ❌ 高(版本标记) | ✅ 低(直接重写) |
| **文件大小** | ❌ 膨胀 | ✅ 固定 |
| **历史追溯** | ✅ 在主文档 | ✅ history/ + changes.log |
| **人类可读** | ❌ 需要跳过历史 | ✅ 直接看当前 |
| **Token 使用** | ❌ 多(读取完整历史) | ✅ 少(只读当前) |
| Aspect | Incremental Update | Complete Rewrite + Archive |
|--------|-------------------|---------------------------|
| **Document Conciseness** | ❌ Gets longer | ✅ Always concise |
| **Agent Parsing** | ❌ Must parse history | ✅ Only read current version |
| **Maintenance Complexity** | ❌ High (version marking) | ✅ Low (direct rewrite) |
| **File Size** | ❌ Bloats | ✅ Fixed |
| **History Tracking** | ✅ In main document | ✅ In history/ + changes.log |
| **Human Readability** | ❌ Must skip history | ✅ Direct current view |
| **Token Usage** | ❌ More (read complete history) | ✅ Less (only read current) |
## 归档策略
## Archive Strategy
### 自动归档触发时机
### Auto-Archive Trigger
```javascript
function shouldArchive(currentVersion, state) {
// 每次版本更新时归档
// Archive on each version update
return currentVersion !== state.requirements?.version
}
@@ -269,29 +270,29 @@ function archiveOldVersion(cycleId, agent, filename, currentVersion) {
const archiveDir = `.workflow/.cycle/${cycleId}.progress/${agent}/history`
const archiveFile = `${archiveDir}/${filename.replace('.', `-v${currentVersion}.`)}`
// 确保归档目录存在
// Ensure archive directory exists
mkdir -p ${archiveDir}
// 复制(不是移动,保持当前文件直到新版本写入)
// Copy (not move, keep current file until new version written)
Copy(currentFile, archiveFile)
console.log(`Archived ${filename} v${currentVersion} to history/`)
}
```
### 清理策略(可选)
### Cleanup Strategy (Optional)
保留最近 N 个版本,删除更老的归档:
Keep most recent N versions, delete older archives:
```javascript
function cleanupArchives(cycleId, agent, keepVersions = 3) {
const historyDir = `.workflow/.cycle/${cycleId}.progress/${agent}/history`
const archives = listFiles(historyDir)
// 按版本号排序
// Sort by version number
archives.sort((a, b) => compareVersions(extractVersion(a), extractVersion(b)))
// 删除最老的版本(保留最近 N 个)
// Delete oldest versions (keep most recent N)
if (archives.length > keepVersions) {
const toDelete = archives.slice(0, archives.length - keepVersions)
toDelete.forEach(file => Delete(`${historyDir}/${file}`))
@@ -299,32 +300,32 @@ function cleanupArchives(cycleId, agent, keepVersions = 3) {
}
```
## Changes.log 的重要性
## Importance of Changes.log
虽然主文档重新创建,但 **changes.log (NDJSON) 永久保留完整历史**
Although main document is completely rewritten, **changes.log (NDJSON) permanently preserves complete history**:
```bash
# 查看所有变更
# View all changes
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq .
# 查看某个需求的历史
# View history of specific requirement
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq 'select(.id=="FR-001")'
# 按迭代查看变更
# View changes by iteration
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq 'select(.iteration==2)'
```
这样:
- **主文档**: 清晰简洁(当前状态)
- **Changes.log**: 完整追溯(所有历史)
- **History/**: 快照备份(按需查看)
This way:
- **Main Document**: Clear and concise (current state)
- **Changes.log**: Complete traceability (all history)
- **History/**: Snapshot backups (view on demand)
## 推荐实施
## Recommended Implementation
1.采用"重新创建"策略
2.主文档只保留"上一版本简要说明"
3.自动归档到 `history/` 目录
4. ✅ Changes.log (NDJSON) 保留完整历史
5.可选:保留最近 3-5 个历史版本
1.Adopt "Complete Rewrite" strategy
2.Main document only keeps "previous version summary"
3.Auto-archive to `history/` directory
4. ✅ Changes.log (NDJSON) preserves complete history
5.Optional: Keep most recent 3-5 historical versions
这样既保持了文档简洁Agent 友好),又保留了完整历史(审计友好)。
This approach keeps documents concise (agent-friendly) while preserving complete history (audit-friendly).

View File

@@ -1220,6 +1220,81 @@ async function solutionAction(issueId: string | undefined, options: IssueOptions
}
}
/**
* solutions - Batch query solutions for multiple issues
* Usage: ccw issue solutions --status planned --brief
*/
async function solutionsAction(options: IssueOptions): Promise<void> {
// Get issues filtered by status
const issues = readIssues();
let targetIssues = issues;
if (options.status) {
const statuses = options.status.split(',').map((s: string) => s.trim());
targetIssues = issues.filter((i: Issue) => statuses.includes(i.status));
}
// Filter to only issues with bound_solution_id
const boundIssues = targetIssues.filter((i: Issue) => i.bound_solution_id);
if (boundIssues.length === 0) {
if (options.json || options.brief) {
console.log('[]');
} else {
console.log(chalk.yellow('No bound solutions found'));
}
return;
}
// Collect solutions for all bound issues
const allSolutions: Array<{
issue_id: string;
solution_id: string;
is_bound: boolean;
task_count: number;
files_touched: string[];
priority?: number;
}> = [];
for (const issue of boundIssues) {
const solutions = readSolutions(issue.id);
const boundSolution = solutions.find(s => s.id === issue.bound_solution_id);
if (boundSolution) {
const filesTouched = new Set<string>();
for (const task of boundSolution.tasks) {
if (task.modification_points) {
for (const mp of task.modification_points) {
if (mp.file) filesTouched.add(mp.file);
}
}
}
allSolutions.push({
issue_id: issue.id,
solution_id: boundSolution.id,
is_bound: true,
task_count: boundSolution.tasks.length,
files_touched: Array.from(filesTouched),
priority: issue.priority
});
}
}
// Brief mode: already minimal
if (options.brief || options.json) {
console.log(JSON.stringify(allSolutions, null, 2));
return;
}
// Human-readable output
console.log(chalk.bold.cyan(`\nBound Solutions (${allSolutions.length}):\n`));
for (const sol of allSolutions) {
console.log(`${chalk.green('◉')} ${sol.issue_id}${sol.solution_id}`);
console.log(chalk.gray(` Tasks: ${sol.task_count}, Files: ${sol.files_touched.length}`));
}
}
/**
* init - Initialize a new issue (manual ID)
*/
@@ -2832,6 +2907,9 @@ export async function issueCommand(
case 'solution':
await solutionAction(argsArray[0], options);
break;
case 'solutions':
await solutionsAction(options);
break;
case 'init':
await initAction(argsArray[0], options);
break;

1
ccw/src/core/routes/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.ace-tool/

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "claude-code-workflow",
"version": "6.3.41",
"version": "6.3.43",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "claude-code-workflow",
"version": "6.3.41",
"version": "6.3.43",
"license": "MIT",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",

View File

@@ -1,6 +1,6 @@
{
"name": "claude-code-workflow",
"version": "6.3.41",
"version": "6.3.43",
"description": "JSON-driven multi-agent development framework with intelligent CLI orchestration (Gemini/Qwen/Codex), context-first architecture, and automated workflow execution",
"type": "module",
"main": "ccw/src/index.js",