mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-21 19:08:17 +08:00
chore: update SKILL.md for analyze-with-file and roadmap-with-file to reflect changes in execution process and handoff to csv-wave-pipeline
This commit is contained in:
@@ -1,716 +0,0 @@
|
|||||||
# Analyze Task Generation & Execution Spec
|
|
||||||
|
|
||||||
> **Purpose**: Quality standards for task generation + execution specification for Phase 5 of `analyze-with-file`.
|
|
||||||
> **Consumer**: Phase 5 of `analyze-with-file` workflow.
|
|
||||||
> **Scope**: Task generation quality + direct inline execution.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Task Generation Flow
|
|
||||||
|
|
||||||
> **Entry point**: Routed here from SKILL.md Phase 5 when complexity is `complex` (≥3 recommendations or high-priority with dependencies).
|
|
||||||
|
|
||||||
```
|
|
||||||
Step 1: Load context → Step 2: Generate .task/*.json → Step 3: Pre-execution analysis
|
|
||||||
→ Step 4: User confirmation → Step 5: Serial execution → Step 6: Finalize
|
|
||||||
```
|
|
||||||
|
|
||||||
**Input artifacts** (all from session folder):
|
|
||||||
|
|
||||||
| Artifact | Required | Provides |
|
|
||||||
|----------|----------|----------|
|
|
||||||
| `conclusions.json` | Yes | `recommendations[]` with action, rationale, priority, evidence_refs |
|
|
||||||
| `exploration-codebase.json` | No | `relevant_files[]`, `patterns[]`, `constraints[]`, `integration_points[]` — primary source for file resolution |
|
|
||||||
| `explorations.json` | No | `sources[]`, `key_findings[]` — fallback for file resolution |
|
|
||||||
| `perspectives.json` | No | Multi-perspective findings — alternative to explorations.json |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## File Resolution Algorithm
|
|
||||||
|
|
||||||
Target files are resolved with a 3-priority fallback chain. Recommendations carry only `evidence_refs` — file resolution is EXECUTE.md's responsibility:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
function resolveTargetFiles(rec, codebaseContext, explorations) {
|
|
||||||
// Priority 1: Extract file paths from evidence_refs (e.g., "src/auth/token.ts:89")
|
|
||||||
if (rec.evidence_refs?.length) {
|
|
||||||
const filePaths = [...new Set(
|
|
||||||
rec.evidence_refs
|
|
||||||
.filter(ref => ref.includes('/') || ref.includes('.'))
|
|
||||||
.map(ref => ref.split(':')[0])
|
|
||||||
)]
|
|
||||||
if (filePaths.length) {
|
|
||||||
return filePaths.map(path => ({
|
|
||||||
path,
|
|
||||||
action: 'modify',
|
|
||||||
target: null,
|
|
||||||
changes: []
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Priority 2: Match from exploration-codebase.json relevant_files
|
|
||||||
if (codebaseContext?.relevant_files?.length) {
|
|
||||||
const keywords = extractKeywords(rec.action + ' ' + rec.rationale)
|
|
||||||
const matched = codebaseContext.relevant_files.filter(f =>
|
|
||||||
keywords.some(kw =>
|
|
||||||
f.path.toLowerCase().includes(kw) ||
|
|
||||||
f.summary?.toLowerCase().includes(kw) ||
|
|
||||||
f.relevance?.toLowerCase().includes(kw)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if (matched.length) {
|
|
||||||
return matched.map(f => ({
|
|
||||||
path: f.path,
|
|
||||||
action: 'modify',
|
|
||||||
target: null,
|
|
||||||
changes: rec.changes || []
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Priority 3: Match from explorations.json sources
|
|
||||||
if (explorations?.sources?.length) {
|
|
||||||
const actionVerb = rec.action.split(' ')[0].toLowerCase()
|
|
||||||
const matched = explorations.sources.filter(s =>
|
|
||||||
s.summary?.toLowerCase().includes(actionVerb) ||
|
|
||||||
s.file?.includes(actionVerb)
|
|
||||||
)
|
|
||||||
if (matched.length) {
|
|
||||||
return matched.map(s => ({
|
|
||||||
path: s.file,
|
|
||||||
action: 'modify',
|
|
||||||
target: null,
|
|
||||||
changes: []
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: empty array — task relies on description + implementation for guidance
|
|
||||||
return []
|
|
||||||
}
|
|
||||||
|
|
||||||
function extractKeywords(text) {
|
|
||||||
return text
|
|
||||||
.toLowerCase()
|
|
||||||
.replace(/[^a-z0-9\u4e00-\u9fa5\s]/g, ' ')
|
|
||||||
.split(/\s+/)
|
|
||||||
.filter(w => w.length > 2)
|
|
||||||
.filter(w => !['the', 'and', 'for', 'with', 'from', 'that', 'this'].includes(w))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Task Type Inference
|
|
||||||
|
|
||||||
| Recommendation Pattern | Inferred Type |
|
|
||||||
|------------------------|---------------|
|
|
||||||
| fix, resolve, repair, patch, correct | `fix` |
|
|
||||||
| refactor, restructure, extract, reorganize, decouple | `refactor` |
|
|
||||||
| add, implement, create, build, introduce | `feature` |
|
|
||||||
| improve, optimize, enhance, upgrade, streamline | `enhancement` |
|
|
||||||
| test, coverage, validate, verify, assert | `testing` |
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
function inferTaskType(rec) {
|
|
||||||
const text = (rec.action + ' ' + rec.rationale).toLowerCase()
|
|
||||||
const patterns = [
|
|
||||||
{ type: 'fix', keywords: ['fix', 'resolve', 'repair', 'patch', 'correct', 'bug'] },
|
|
||||||
{ type: 'refactor', keywords: ['refactor', 'restructure', 'extract', 'reorganize', 'decouple'] },
|
|
||||||
{ type: 'feature', keywords: ['add', 'implement', 'create', 'build', 'introduce'] },
|
|
||||||
{ type: 'enhancement', keywords: ['improve', 'optimize', 'enhance', 'upgrade', 'streamline'] },
|
|
||||||
{ type: 'testing', keywords: ['test', 'coverage', 'validate', 'verify', 'assert'] }
|
|
||||||
]
|
|
||||||
for (const p of patterns) {
|
|
||||||
if (p.keywords.some(kw => text.includes(kw))) return p.type
|
|
||||||
}
|
|
||||||
return 'enhancement' // safe default
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Effort Inference
|
|
||||||
|
|
||||||
| Signal | Effort |
|
|
||||||
|--------|--------|
|
|
||||||
| priority=high AND files >= 3 | `large` |
|
|
||||||
| priority=high OR files=2 | `medium` |
|
|
||||||
| priority=medium AND files <= 1 | `medium` |
|
|
||||||
| priority=low OR single file | `small` |
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
function inferEffort(rec, targetFiles) {
|
|
||||||
const fileCount = targetFiles?.length || 0
|
|
||||||
if (rec.priority === 'high' && fileCount >= 3) return 'large'
|
|
||||||
if (rec.priority === 'high' || fileCount >= 2) return 'medium'
|
|
||||||
if (rec.priority === 'low' || fileCount <= 1) return 'small'
|
|
||||||
return 'medium'
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Convergence Quality Validation
|
|
||||||
|
|
||||||
Every task's `convergence` MUST pass quality gates before writing to disk.
|
|
||||||
|
|
||||||
### Quality Rules
|
|
||||||
|
|
||||||
| Field | Requirement | Validation |
|
|
||||||
|-------|-------------|------------|
|
|
||||||
| `criteria[]` | **Testable** — assertions or concrete manual steps | Reject vague patterns; each criterion must reference observable behavior |
|
|
||||||
| `verification` | **Executable** — shell command or explicit step sequence | Must contain a runnable command or step-by-step verification procedure |
|
|
||||||
| `definition_of_done` | **Business language** — non-technical stakeholder can judge | Must NOT contain technical commands (jest, tsc, npm, build) |
|
|
||||||
|
|
||||||
### Vague Pattern Detection
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
const VAGUE_PATTERNS = /正常|正确|好|可以|没问题|works|fine|good|correct|properly|as expected/i
|
|
||||||
const TECHNICAL_IN_DOD = /compile|build|lint|npm|npx|jest|tsc|eslint|cargo|pytest|go test/i
|
|
||||||
|
|
||||||
function validateConvergenceQuality(tasks) {
|
|
||||||
const issues = []
|
|
||||||
tasks.forEach(task => {
|
|
||||||
// Rule 1: No vague criteria
|
|
||||||
task.convergence.criteria.forEach((c, i) => {
|
|
||||||
if (VAGUE_PATTERNS.test(c) && c.length < 20) {
|
|
||||||
issues.push({
|
|
||||||
task: task.id, field: `criteria[${i}]`,
|
|
||||||
problem: 'Vague criterion', value: c,
|
|
||||||
fix: 'Replace with specific observable condition from evidence'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Rule 2: Verification should be executable
|
|
||||||
if (task.convergence.verification && task.convergence.verification.length < 5) {
|
|
||||||
issues.push({
|
|
||||||
task: task.id, field: 'verification',
|
|
||||||
problem: 'Too short to be executable', value: task.convergence.verification,
|
|
||||||
fix: 'Provide shell command or numbered step sequence'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rule 3: DoD should be business language
|
|
||||||
if (TECHNICAL_IN_DOD.test(task.convergence.definition_of_done)) {
|
|
||||||
issues.push({
|
|
||||||
task: task.id, field: 'definition_of_done',
|
|
||||||
problem: 'Contains technical commands', value: task.convergence.definition_of_done,
|
|
||||||
fix: 'Rewrite in business language describing user/system outcome'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rule 4: files[].changes should not be empty when files exist
|
|
||||||
task.files?.forEach((f, i) => {
|
|
||||||
if (f.action === 'modify' && (!f.changes || f.changes.length === 0) && !f.change) {
|
|
||||||
issues.push({
|
|
||||||
task: task.id, field: `files[${i}].changes`,
|
|
||||||
problem: 'No change description for modify action', value: f.path,
|
|
||||||
fix: 'Describe what specifically changes in this file'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Rule 5: implementation steps should exist
|
|
||||||
if (!task.implementation || task.implementation.length === 0) {
|
|
||||||
issues.push({
|
|
||||||
task: task.id, field: 'implementation',
|
|
||||||
problem: 'No implementation steps',
|
|
||||||
fix: 'Add at least one step describing how to realize this task'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Auto-fix where possible, log remaining issues
|
|
||||||
issues.forEach(issue => {
|
|
||||||
// Attempt auto-fix based on available evidence
|
|
||||||
// If unfixable, log warning — task still generated but flagged
|
|
||||||
})
|
|
||||||
return issues
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Good vs Bad Examples
|
|
||||||
|
|
||||||
**Criteria**:
|
|
||||||
|
|
||||||
| Bad | Good |
|
|
||||||
|-----|------|
|
|
||||||
| `"Code works correctly"` | `"refreshToken() returns a new JWT with >0 expiry when called with expired token"` |
|
|
||||||
| `"No errors"` | `"Error handler at auth.ts:45 returns 401 status with { error: 'token_expired' } body"` |
|
|
||||||
| `"Performance is good"` | `"API response time < 200ms at p95 for /api/users endpoint under 100 concurrent requests"` |
|
|
||||||
|
|
||||||
**Verification**:
|
|
||||||
|
|
||||||
| Bad | Good |
|
|
||||||
|-----|------|
|
|
||||||
| `"Check it"` | `"jest --testPathPattern=auth.test.ts && npx tsc --noEmit"` |
|
|
||||||
| `"Run tests"` | `"1. Run npm test -- --grep 'token refresh' 2. Verify no TypeScript errors with npx tsc --noEmit"` |
|
|
||||||
|
|
||||||
**Definition of Done**:
|
|
||||||
|
|
||||||
| Bad | Good |
|
|
||||||
|-----|------|
|
|
||||||
| `"jest passes"` | `"Users remain logged in across token expiration without manual re-login"` |
|
|
||||||
| `"No TypeScript errors"` | `"Authentication flow handles all user-facing error scenarios with clear error messages"` |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Required Task Fields (analyze-with-file producer)
|
|
||||||
|
|
||||||
SKILL.md produces minimal recommendations `{action, rationale, priority, evidence_refs}`. EXECUTE.md enriches these into full task JSON. The final `.task/*.json` MUST populate:
|
|
||||||
|
|
||||||
| Block | Fields | Required |
|
|
||||||
|-------|--------|----------|
|
|
||||||
| IDENTITY | `id`, `title`, `description` | Yes |
|
|
||||||
| CLASSIFICATION | `type`, `priority`, `effort` | Yes |
|
|
||||||
| DEPENDENCIES | `depends_on` | Yes (empty array if none) |
|
|
||||||
| CONVERGENCE | `convergence.criteria[]`, `convergence.verification`, `convergence.definition_of_done` | Yes |
|
|
||||||
| FILES | `files[].path`, `files[].action`, `files[].changes`/`files[].change` | Yes (if files identified) |
|
|
||||||
| IMPLEMENTATION | `implementation[]` with step + description | Yes |
|
|
||||||
| CONTEXT | `evidence`, `source.tool`, `source.session_id`, `source.original_id` | Yes |
|
|
||||||
|
|
||||||
### Task JSON Example
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": "TASK-001",
|
|
||||||
"title": "Fix authentication token refresh",
|
|
||||||
"description": "Token refresh fails silently when JWT expires, causing users to be logged out unexpectedly",
|
|
||||||
"type": "fix",
|
|
||||||
"priority": "high",
|
|
||||||
"effort": "medium",
|
|
||||||
"files": [
|
|
||||||
{
|
|
||||||
"path": "src/auth/token.ts",
|
|
||||||
"action": "modify",
|
|
||||||
"target": "refreshToken",
|
|
||||||
"changes": [
|
|
||||||
"Add await to refreshToken() call at line 89",
|
|
||||||
"Add error propagation for refresh failure"
|
|
||||||
],
|
|
||||||
"change": "Add await to refreshToken() call and propagate errors"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "src/middleware/auth.ts",
|
|
||||||
"action": "modify",
|
|
||||||
"target": "authMiddleware",
|
|
||||||
"changes": [
|
|
||||||
"Update error handler at line 45 to distinguish refresh failures from auth failures"
|
|
||||||
],
|
|
||||||
"change": "Update error handler to propagate refresh failures"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"depends_on": [],
|
|
||||||
"convergence": {
|
|
||||||
"criteria": [
|
|
||||||
"refreshToken() returns new valid JWT when called with expired token",
|
|
||||||
"Expired token triggers automatic refresh without user action",
|
|
||||||
"Failed refresh returns 401 with { error: 'token_expired' } body"
|
|
||||||
],
|
|
||||||
"verification": "jest --testPathPattern=token.test.ts && npx tsc --noEmit",
|
|
||||||
"definition_of_done": "Users remain logged in across token expiration without manual re-login"
|
|
||||||
},
|
|
||||||
"implementation": [
|
|
||||||
{
|
|
||||||
"step": "1",
|
|
||||||
"description": "Add await to refreshToken() call in token.ts",
|
|
||||||
"actions": ["Read token.ts", "Add await keyword at line 89", "Verify async chain"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"step": "2",
|
|
||||||
"description": "Update error handler in auth middleware",
|
|
||||||
"actions": ["Read auth.ts", "Modify error handler at line 45", "Add refresh-specific error type"]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"evidence": ["src/auth/token.ts:89", "src/middleware/auth.ts:45"],
|
|
||||||
"source": {
|
|
||||||
"tool": "analyze-with-file",
|
|
||||||
"session_id": "ANL-auth-token-refresh-2025-01-21",
|
|
||||||
"original_id": "TASK-001"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Step 1: Load All Context Sources
|
|
||||||
|
|
||||||
Phase 2-4 already loaded and processed these artifacts. If data is still in conversation memory, skip disk reads.
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// Skip loading if already in memory from Phase 2-4
|
|
||||||
// Only read from disk when entering EXECUTE.md from a fresh/resumed session
|
|
||||||
|
|
||||||
if (!conclusions) {
|
|
||||||
conclusions = JSON.parse(Read(`${sessionFolder}/conclusions.json`))
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!codebaseContext) {
|
|
||||||
codebaseContext = file_exists(`${sessionFolder}/exploration-codebase.json`)
|
|
||||||
? JSON.parse(Read(`${sessionFolder}/exploration-codebase.json`))
|
|
||||||
: null
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!explorations) {
|
|
||||||
explorations = file_exists(`${sessionFolder}/explorations.json`)
|
|
||||||
? JSON.parse(Read(`${sessionFolder}/explorations.json`))
|
|
||||||
: file_exists(`${sessionFolder}/perspectives.json`)
|
|
||||||
? JSON.parse(Read(`${sessionFolder}/perspectives.json`))
|
|
||||||
: null
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Step 2: Enrich Recommendations & Generate .task/*.json
|
|
||||||
|
|
||||||
SKILL.md Phase 4 produces minimal recommendations: `{action, rationale, priority, evidence_refs}`.
|
|
||||||
This step enriches each recommendation with execution-specific details using codebase context, then generates individual task JSON files.
|
|
||||||
|
|
||||||
**Enrichment pipeline**: `rec (minimal) + codebaseContext + explorations → task JSON (full)`
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
const tasks = conclusions.recommendations.map((rec, index) => {
|
|
||||||
const taskId = `TASK-${String(index + 1).padStart(3, '0')}`
|
|
||||||
|
|
||||||
// 1. ENRICH: Resolve target files from codebase context (not from rec)
|
|
||||||
const targetFiles = resolveTargetFiles(rec, codebaseContext, explorations)
|
|
||||||
|
|
||||||
// 2. ENRICH: Generate implementation steps from action + context
|
|
||||||
const implSteps = generateImplementationSteps(rec, targetFiles, codebaseContext)
|
|
||||||
|
|
||||||
// 3. ENRICH: Derive change descriptions per file
|
|
||||||
const enrichedFiles = targetFiles.map(f => ({
|
|
||||||
path: f.path,
|
|
||||||
action: f.action || 'modify',
|
|
||||||
target: f.target || null,
|
|
||||||
changes: deriveChanges(rec, f, codebaseContext) || [],
|
|
||||||
change: rec.action
|
|
||||||
}))
|
|
||||||
|
|
||||||
return {
|
|
||||||
id: taskId,
|
|
||||||
title: rec.action,
|
|
||||||
description: rec.rationale,
|
|
||||||
type: inferTaskType(rec),
|
|
||||||
priority: rec.priority,
|
|
||||||
effort: inferEffort(rec, targetFiles),
|
|
||||||
|
|
||||||
files: enrichedFiles,
|
|
||||||
depends_on: [],
|
|
||||||
|
|
||||||
// CONVERGENCE (must pass quality validation)
|
|
||||||
convergence: {
|
|
||||||
criteria: generateCriteria(rec),
|
|
||||||
verification: generateVerification(rec),
|
|
||||||
definition_of_done: generateDoD(rec)
|
|
||||||
},
|
|
||||||
|
|
||||||
// IMPLEMENTATION steps (generated here, not from SKILL.md)
|
|
||||||
implementation: implSteps,
|
|
||||||
|
|
||||||
// CONTEXT
|
|
||||||
evidence: rec.evidence_refs || [],
|
|
||||||
source: {
|
|
||||||
tool: 'analyze-with-file',
|
|
||||||
session_id: sessionId,
|
|
||||||
original_id: taskId
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Quality validation
|
|
||||||
validateConvergenceQuality(tasks)
|
|
||||||
|
|
||||||
// Write each task as individual JSON file
|
|
||||||
Bash(`mkdir -p ${sessionFolder}/.task`)
|
|
||||||
tasks.forEach(task => {
|
|
||||||
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(task, null, 2))
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
**Enrichment Functions**:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// Generate implementation steps from action + resolved files
|
|
||||||
function generateImplementationSteps(rec, targetFiles, codebaseContext) {
|
|
||||||
// 1. Parse rec.action into atomic steps
|
|
||||||
// 2. Map steps to target files
|
|
||||||
// 3. Add context from codebaseContext.patterns if applicable
|
|
||||||
// Return: [{step: '1', description: '...', actions: [...]}]
|
|
||||||
return [{
|
|
||||||
step: '1',
|
|
||||||
description: rec.action,
|
|
||||||
actions: targetFiles.map(f => `Modify ${f.path}`)
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Derive specific change descriptions for a file
|
|
||||||
function deriveChanges(rec, file, codebaseContext) {
|
|
||||||
// 1. Match rec.action keywords to file content patterns
|
|
||||||
// 2. Use codebaseContext.patterns for context-aware change descriptions
|
|
||||||
// 3. Use rec.evidence_refs to locate specific modification points
|
|
||||||
// Return: ['specific change 1', 'specific change 2']
|
|
||||||
return [rec.action]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Step 3-6: Execution Steps
|
|
||||||
|
|
||||||
After `.task/*.json` generation, validate and execute tasks directly inline.
|
|
||||||
|
|
||||||
### Step 3: Pre-Execution Analysis
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
const taskFiles = Glob(`${sessionFolder}/.task/*.json`)
|
|
||||||
const tasks = taskFiles.map(f => JSON.parse(Read(f)))
|
|
||||||
|
|
||||||
// 1. Dependency validation
|
|
||||||
const taskIds = new Set(tasks.map(t => t.id))
|
|
||||||
const errors = []
|
|
||||||
tasks.forEach(task => {
|
|
||||||
task.depends_on.forEach(dep => {
|
|
||||||
if (!taskIds.has(dep)) errors.push(`${task.id}: depends on unknown task ${dep}`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// 2. Circular dependency detection (DFS)
|
|
||||||
function detectCycles(tasks) {
|
|
||||||
const graph = new Map(tasks.map(t => [t.id, t.depends_on]))
|
|
||||||
const visited = new Set(), inStack = new Set(), cycles = []
|
|
||||||
function dfs(node, path) {
|
|
||||||
if (inStack.has(node)) { cycles.push([...path, node].join(' → ')); return }
|
|
||||||
if (visited.has(node)) return
|
|
||||||
visited.add(node); inStack.add(node)
|
|
||||||
;(graph.get(node) || []).forEach(dep => dfs(dep, [...path, node]))
|
|
||||||
inStack.delete(node)
|
|
||||||
}
|
|
||||||
tasks.forEach(t => { if (!visited.has(t.id)) dfs(t.id, []) })
|
|
||||||
return cycles
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Topological sort for execution order
|
|
||||||
function topoSort(tasks) {
|
|
||||||
const inDegree = new Map(tasks.map(t => [t.id, 0]))
|
|
||||||
tasks.forEach(t => t.depends_on.forEach(dep => {
|
|
||||||
inDegree.set(t.id, inDegree.get(t.id) + 1)
|
|
||||||
}))
|
|
||||||
const queue = tasks.filter(t => inDegree.get(t.id) === 0).map(t => t.id)
|
|
||||||
const order = []
|
|
||||||
while (queue.length) {
|
|
||||||
const id = queue.shift()
|
|
||||||
order.push(id)
|
|
||||||
tasks.forEach(t => {
|
|
||||||
if (t.depends_on.includes(id)) {
|
|
||||||
inDegree.set(t.id, inDegree.get(t.id) - 1)
|
|
||||||
if (inDegree.get(t.id) === 0) queue.push(t.id)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return order
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. File conflict detection
|
|
||||||
const fileTaskMap = new Map()
|
|
||||||
tasks.forEach(task => {
|
|
||||||
(task.files || []).forEach(f => {
|
|
||||||
if (!fileTaskMap.has(f.path)) fileTaskMap.set(f.path, [])
|
|
||||||
fileTaskMap.get(f.path).push(task.id)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
const conflicts = []
|
|
||||||
fileTaskMap.forEach((taskIds, file) => {
|
|
||||||
if (taskIds.length > 1) conflicts.push({ file, tasks: taskIds })
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Initialize Execution Artifacts
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// execution.md — overview with task table
|
|
||||||
const executionMd = `# Execution Overview
|
|
||||||
|
|
||||||
## Session Info
|
|
||||||
- **Session ID**: ${sessionId}
|
|
||||||
- **Plan Source**: .task/*.json (from analysis conclusions)
|
|
||||||
- **Started**: ${getUtc8ISOString()}
|
|
||||||
- **Total Tasks**: ${tasks.length}
|
|
||||||
|
|
||||||
## Task Overview
|
|
||||||
|
|
||||||
| # | ID | Title | Type | Priority | Status |
|
|
||||||
|---|-----|-------|------|----------|--------|
|
|
||||||
${tasks.map((t, i) => `| ${i+1} | ${t.id} | ${t.title} | ${t.type} | ${t.priority} | pending |`).join('\n')}
|
|
||||||
|
|
||||||
## Pre-Execution Analysis
|
|
||||||
${conflicts.length
|
|
||||||
? `### File Conflicts\n${conflicts.map(c => `- **${c.file}**: ${c.tasks.join(', ')}`).join('\n')}`
|
|
||||||
: 'No file conflicts detected.'}
|
|
||||||
|
|
||||||
## Execution Timeline
|
|
||||||
> Updated as tasks complete
|
|
||||||
`
|
|
||||||
Write(`${sessionFolder}/execution.md`, executionMd)
|
|
||||||
|
|
||||||
// execution-events.md — chronological event log
|
|
||||||
Write(`${sessionFolder}/execution-events.md`,
|
|
||||||
`# Execution Events\n\n**Session**: ${sessionId}\n**Started**: ${getUtc8ISOString()}\n\n---\n\n`)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: Task Execution Loop
|
|
||||||
|
|
||||||
**User Confirmation** before execution:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
if (!autoYes) {
|
|
||||||
const action = AskUserQuestion({
|
|
||||||
questions: [{
|
|
||||||
question: `Execute ${tasks.length} tasks?\n${tasks.map(t => ` ${t.id}: ${t.title} (${t.priority})`).join('\n')}`,
|
|
||||||
header: "Confirm",
|
|
||||||
multiSelect: false,
|
|
||||||
options: [
|
|
||||||
{ label: "Start", description: "Execute all tasks serially" },
|
|
||||||
{ label: "Adjust", description: "Modify .task/*.json before execution" },
|
|
||||||
{ label: "Skip", description: "Keep .task/*.json, skip execution" }
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
// "Adjust": user edits task files, then resumes
|
|
||||||
// "Skip": end — user can execute later separately
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Execute tasks serially using `task.implementation` steps and `task.files[].changes` as guidance.
|
|
||||||
|
|
||||||
```
|
|
||||||
For each taskId in executionOrder:
|
|
||||||
├─ Load task from .task/{taskId}.json
|
|
||||||
├─ Check dependencies satisfied
|
|
||||||
├─ Record START event → execution-events.md
|
|
||||||
├─ Execute using task.implementation + task.files[].changes:
|
|
||||||
│ ├─ Read target files listed in task.files[]
|
|
||||||
│ ├─ Apply modifications described in files[].changes / files[].change
|
|
||||||
│ ├─ Follow implementation[].actions sequence
|
|
||||||
│ └─ Use Edit (preferred), Write (new files), Bash (build/test)
|
|
||||||
├─ Verify convergence:
|
|
||||||
│ ├─ Check each convergence.criteria[] item
|
|
||||||
│ ├─ Run convergence.verification (if executable command)
|
|
||||||
│ └─ Record verification results
|
|
||||||
├─ Record COMPLETE/FAIL event → execution-events.md
|
|
||||||
├─ Update execution.md task status
|
|
||||||
└─ Continue to next task
|
|
||||||
```
|
|
||||||
|
|
||||||
**Execution Guidance Priority** — what the AI follows when executing each task:
|
|
||||||
|
|
||||||
| Priority | Source | Example |
|
|
||||||
|----------|--------|---------|
|
|
||||||
| 1 | `files[].changes` / `files[].change` | "Add await to refreshToken() call at line 89" |
|
|
||||||
| 2 | `implementation[].actions` | ["Read token.ts", "Add await keyword at line 89"] |
|
|
||||||
| 3 | `implementation[].description` | "Add await to refreshToken() call in token.ts" |
|
|
||||||
| 4 | `task.description` | "Token refresh fails silently..." |
|
|
||||||
|
|
||||||
When `files[].changes` is populated, the AI has concrete instructions. When empty, it falls back to `implementation` steps, then to `description`.
|
|
||||||
|
|
||||||
### Step 5.1: Failure Handling
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// On task failure, ask user how to proceed
|
|
||||||
if (!autoYes) {
|
|
||||||
AskUserQuestion({
|
|
||||||
questions: [{
|
|
||||||
question: `Task ${task.id} failed: ${errorMessage}\nHow to proceed?`,
|
|
||||||
header: "Failure",
|
|
||||||
multiSelect: false,
|
|
||||||
options: [
|
|
||||||
{ label: "Skip & Continue", description: "Skip this task, continue with next" },
|
|
||||||
{ label: "Retry", description: "Retry this task" },
|
|
||||||
{ label: "Abort", description: "Stop execution, keep progress" }
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 6: Finalize
|
|
||||||
|
|
||||||
After all tasks complete:
|
|
||||||
|
|
||||||
1. Append execution summary to `execution.md` (statistics, task results table)
|
|
||||||
2. Append session footer to `execution-events.md`
|
|
||||||
3. Write back `_execution` state to each `.task/*.json`:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
tasks.forEach(task => {
|
|
||||||
const updated = {
|
|
||||||
...task,
|
|
||||||
status: task._status, // "completed" | "failed" | "skipped"
|
|
||||||
executed_at: task._executed_at,
|
|
||||||
result: {
|
|
||||||
success: task._status === 'completed',
|
|
||||||
files_modified: task._result?.files_modified || [],
|
|
||||||
summary: task._result?.summary || '',
|
|
||||||
error: task._result?.error || null,
|
|
||||||
convergence_verified: task._result?.convergence_verified || []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(updated, null, 2))
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 6.1: Post-Execution Options
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
if (!autoYes) {
|
|
||||||
AskUserQuestion({
|
|
||||||
questions: [{
|
|
||||||
question: `Execution complete: ${completedTasks.size}/${tasks.length} succeeded. Next:`,
|
|
||||||
header: "Post-Execute",
|
|
||||||
multiSelect: false,
|
|
||||||
options: [
|
|
||||||
{ label: "Retry Failed", description: `Re-execute ${failedTasks.size} failed tasks` },
|
|
||||||
{ label: "View Events", description: "Display execution-events.md" },
|
|
||||||
{ label: "Create Issue", description: "Create issue from failed tasks" },
|
|
||||||
{ label: "Done", description: "End workflow" }
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Output Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
{sessionFolder}/
|
|
||||||
├── .task/ # Individual task JSON files (with _execution state after completion)
|
|
||||||
│ ├── TASK-001.json
|
|
||||||
│ └── ...
|
|
||||||
├── execution.md # Execution overview + task table + summary
|
|
||||||
└── execution-events.md # Chronological event log
|
|
||||||
```
|
|
||||||
|
|
||||||
## execution-events.md Event Format
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
## {timestamp} — {task.id}: {task.title}
|
|
||||||
|
|
||||||
**Type**: {task.type} | **Priority**: {task.priority}
|
|
||||||
**Status**: IN PROGRESS
|
|
||||||
**Files**: {task.files[].path}
|
|
||||||
|
|
||||||
### Execution Log
|
|
||||||
- Read {file} ({lines} lines)
|
|
||||||
- Applied: {change description}
|
|
||||||
- ...
|
|
||||||
|
|
||||||
**Status**: COMPLETED / FAILED
|
|
||||||
**Files Modified**: {list}
|
|
||||||
|
|
||||||
#### Convergence Verification
|
|
||||||
- [x/] {criterion 1}
|
|
||||||
- [x/] {criterion 2}
|
|
||||||
- **Verification**: {command} → PASS/FAIL
|
|
||||||
|
|
||||||
---
|
|
||||||
```
|
|
||||||
@@ -10,14 +10,14 @@ argument-hint: "TOPIC=\"<question or topic>\" [--depth=quick|standard|deep] [--c
|
|||||||
|
|
||||||
Interactive collaborative analysis workflow with **documented discussion process**. Records understanding evolution, facilitates multi-round Q&A, and uses inline search tools for deep exploration.
|
Interactive collaborative analysis workflow with **documented discussion process**. Records understanding evolution, facilitates multi-round Q&A, and uses inline search tools for deep exploration.
|
||||||
|
|
||||||
**Core workflow**: Topic → Explore → Discuss → Document → Refine → Conclude → (Optional) Quick Execute
|
**Core workflow**: Topic → Explore → Discuss → Document → Refine → Conclude → Plan Checklist
|
||||||
|
|
||||||
**Key features**:
|
**Key features**:
|
||||||
- **Documented discussion timeline**: Captures understanding evolution across all phases
|
- **Documented discussion timeline**: Captures understanding evolution across all phases
|
||||||
- **Decision recording at every critical point**: Mandatory recording of key findings, direction changes, and trade-offs
|
- **Decision recording at every critical point**: Mandatory recording of key findings, direction changes, and trade-offs
|
||||||
- **Multi-perspective analysis**: Supports up to 4 analysis perspectives (serial, inline)
|
- **Multi-perspective analysis**: Supports up to 4 analysis perspectives (serial, inline)
|
||||||
- **Interactive discussion**: Multi-round Q&A with user feedback and direction adjustments
|
- **Interactive discussion**: Multi-round Q&A with user feedback and direction adjustments
|
||||||
- **Quick execute**: Convert conclusions directly to executable tasks
|
- **Plan output**: Generate structured plan checklist for downstream execution (e.g., `$csv-wave-pipeline`)
|
||||||
|
|
||||||
### Decision Recording Protocol
|
### Decision Recording Protocol
|
||||||
|
|
||||||
@@ -128,17 +128,11 @@ Step 4: Synthesis & Conclusion
|
|||||||
├─ Consolidate all insights → conclusions.json (with steps[] per recommendation)
|
├─ Consolidate all insights → conclusions.json (with steps[] per recommendation)
|
||||||
├─ Update discussion.md with final synthesis
|
├─ Update discussion.md with final synthesis
|
||||||
├─ Interactive Recommendation Review (per-recommendation confirm/modify/reject)
|
├─ Interactive Recommendation Review (per-recommendation confirm/modify/reject)
|
||||||
└─ Offer options: quick execute / create issue / generate task / export / done
|
└─ Offer options: generate plan / create issue / export / done
|
||||||
|
|
||||||
Step 5: Execute (Optional - user selects, routes by complexity)
|
Step 5: Plan Generation (Optional - produces plan only, NO code modifications)
|
||||||
├─ Simple (≤2 recs): Direct inline execution → summary in discussion.md
|
├─ Generate inline plan checklist → appended to discussion.md
|
||||||
└─ Complex (≥3 recs): EXECUTE.md pipeline
|
└─ Remind user to execute via $csv-wave-pipeline
|
||||||
├─ Enrich recommendations → generate .task/TASK-*.json
|
|
||||||
├─ Pre-execution analysis (dependencies, file conflicts, execution order)
|
|
||||||
├─ User confirmation
|
|
||||||
├─ Direct inline execution (Read/Edit/Write/Grep/Glob/Bash)
|
|
||||||
├─ Record events → execution-events.md, update execution.md
|
|
||||||
└─ Report completion summary
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
@@ -817,7 +811,7 @@ for (const [index, rec] of sortedRecs.entries()) {
|
|||||||
|
|
||||||
##### Step 4.4: Post-Completion Options
|
##### Step 4.4: Post-Completion Options
|
||||||
|
|
||||||
**Complexity Assessment** — determine whether .task/*.json generation is warranted:
|
**Complexity Assessment** — determine available options:
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Assess recommendation complexity to decide available options
|
// Assess recommendation complexity to decide available options
|
||||||
@@ -833,9 +827,9 @@ function assessComplexity(recs) {
|
|||||||
|
|
||||||
// Complexity → available options mapping:
|
// Complexity → available options mapping:
|
||||||
// none: Done | Create Issue | Export Report
|
// none: Done | Create Issue | Export Report
|
||||||
// simple: Done | Create Issue | Export Report (no task generation — overkill)
|
// simple: Done | Create Issue | Export Report
|
||||||
// moderate: Done | Generate Task | Create Issue | Export Report
|
// moderate: Generate Plan | Create Issue | Export Report | Done
|
||||||
// complex: Quick Execute | Generate Task | Create Issue | Export Report | Done
|
// complex: Generate Plan | Create Issue | Export Report | Done
|
||||||
```
|
```
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
@@ -850,9 +844,9 @@ if (!autoYes) {
|
|||||||
}]
|
}]
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
// Auto mode: generate .task/*.json only for moderate/complex, skip for simple/none
|
// Auto mode: generate plan only for moderate/complex, skip for simple/none
|
||||||
if (complexity === 'complex' || complexity === 'moderate') {
|
if (complexity === 'complex' || complexity === 'moderate') {
|
||||||
// → Phase 5 Step 5.1-5.2 (task generation only, no execution)
|
// → Phase 5 (plan generation only, NO code modifications)
|
||||||
} else {
|
} else {
|
||||||
// → Done (conclusions.json is sufficient output)
|
// → Done (conclusions.json is sufficient output)
|
||||||
}
|
}
|
||||||
@@ -865,14 +859,13 @@ if (!autoYes) {
|
|||||||
|------------|-------------------|-----------|
|
|------------|-------------------|-----------|
|
||||||
| `none` | Done, Create Issue, Export Report | No actionable recommendations |
|
| `none` | Done, Create Issue, Export Report | No actionable recommendations |
|
||||||
| `simple` | Done, Create Issue, Export Report | 1-2 low-priority items don't warrant formal task JSON |
|
| `simple` | Done, Create Issue, Export Report | 1-2 low-priority items don't warrant formal task JSON |
|
||||||
| `moderate` | Generate Task, Create Issue, Export Report, Done | Task structure helpful but execution not urgent |
|
| `moderate` | Generate Plan, Create Issue, Export Report, Done | Task structure helpful for downstream execution |
|
||||||
| `complex` | Quick Execute, Generate Task, Create Issue, Export Report, Done | Full pipeline justified |
|
| `complex` | Generate Plan, Create Issue, Export Report, Done | Full plan generation justified |
|
||||||
|
|
||||||
| Selection | Action |
|
| Selection | Action |
|
||||||
|-----------|--------|
|
|-----------|--------|
|
||||||
| Quick Execute | Jump to Phase 5 (only reviewed recs with status accepted/modified) |
|
| Generate Plan | Jump to Phase 5 (plan generation only, NO code modifications) |
|
||||||
| Create Issue | `Skill(skill="issue:new", args="...")` (only reviewed recs) |
|
| Create Issue | `Skill(skill="issue:new", args="...")` (only reviewed recs) |
|
||||||
| Generate Task | Jump to Phase 5 Step 5.1-5.2 only (generate .task/*.json, no execution) |
|
|
||||||
| Export Report | Copy discussion.md + conclusions.json to user-specified location |
|
| Export Report | Copy discussion.md + conclusions.json to user-specified location |
|
||||||
| Done | Display artifact paths, end |
|
| Done | Display artifact paths, end |
|
||||||
|
|
||||||
@@ -883,96 +876,64 @@ if (!autoYes) {
|
|||||||
- User offered meaningful next step options
|
- User offered meaningful next step options
|
||||||
- **Complete decision trail** documented and traceable from initial scoping to final conclusions
|
- **Complete decision trail** documented and traceable from initial scoping to final conclusions
|
||||||
|
|
||||||
### Phase 5: Execute (Optional)
|
### Phase 5: Plan Generation (Optional — NO code modifications)
|
||||||
|
|
||||||
**Objective**: Execute analysis recommendations — route by complexity.
|
**Objective**: Generate structured plan checklist from analysis recommendations. **This phase produces plans only — it does NOT modify any source code.**
|
||||||
|
|
||||||
**Trigger**: User selects "Quick Execute" in Phase 4. In auto mode, triggered only for `moderate`/`complex` recommendations.
|
**Trigger**: User selects "Generate Plan" in Phase 4. In auto mode, triggered only for `moderate`/`complex` recommendations.
|
||||||
|
|
||||||
**Routing Logic**:
|
|
||||||
|
|
||||||
```
|
|
||||||
complexity assessment (from Phase 4.3)
|
|
||||||
├─ simple/moderate (≤2 recommendations, clear changes)
|
|
||||||
│ └─ Direct inline execution — no .task/*.json overhead
|
|
||||||
└─ complex (≥3 recommendations, or high-priority with dependencies)
|
|
||||||
└─ Route to EXECUTE.md — full pipeline (task generation → execution)
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Step 5.1: Route by Complexity
|
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
const recs = conclusions.recommendations || []
|
const recs = conclusions.recommendations || []
|
||||||
|
|
||||||
if (recs.length >= 3 || recs.some(r => r.priority === 'high')) {
|
// Build plan checklist from all accepted/modified recommendations
|
||||||
// COMPLEX PATH → EXECUTE.md pipeline
|
const planChecklist = recs
|
||||||
// Full specification: EXECUTE.md
|
.filter(r => r.review_status !== 'rejected')
|
||||||
// Flow: load all context → generate .task/*.json → pre-execution analysis → serial execution → finalize
|
.map((rec, index) => {
|
||||||
} else {
|
const files = rec.evidence_refs
|
||||||
// SIMPLE PATH → direct inline execution (below)
|
?.filter(ref => ref.includes(':'))
|
||||||
}
|
.map(ref => ref.split(':')[0]) || []
|
||||||
```
|
|
||||||
|
|
||||||
##### Step 5.2: Simple Path — Direct Inline Execution
|
return `### ${index + 1}. ${rec.action}
|
||||||
|
- **Priority**: ${rec.priority}
|
||||||
For simple/moderate recommendations, execute directly without .task/*.json ceremony:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// For each recommendation:
|
|
||||||
recs.forEach((rec, index) => {
|
|
||||||
// 1. Locate relevant files from evidence_refs or codebase search
|
|
||||||
const files = rec.evidence_refs
|
|
||||||
?.filter(ref => ref.includes(':'))
|
|
||||||
.map(ref => ref.split(':')[0]) || []
|
|
||||||
|
|
||||||
// 2. Read each target file
|
|
||||||
files.forEach(filePath => Read(filePath))
|
|
||||||
|
|
||||||
// 3. Apply changes based on rec.action + rec.rationale
|
|
||||||
// Use Edit (preferred) for modifications, Write for new files
|
|
||||||
|
|
||||||
// 4. Log to discussion.md — append execution summary
|
|
||||||
})
|
|
||||||
|
|
||||||
// Append execution summary to discussion.md
|
|
||||||
appendToDiscussion(`
|
|
||||||
## Quick Execution Summary
|
|
||||||
|
|
||||||
- **Recommendations executed**: ${recs.length}
|
|
||||||
- **Completed**: ${getUtc8ISOString()}
|
|
||||||
|
|
||||||
${recs.map((rec, i) => `### ${i+1}. ${rec.action}
|
|
||||||
- **Status**: completed/failed
|
|
||||||
- **Rationale**: ${rec.rationale}
|
- **Rationale**: ${rec.rationale}
|
||||||
|
- **Target files**: ${files.join(', ') || 'TBD'}
|
||||||
- **Evidence**: ${rec.evidence_refs?.join(', ') || 'N/A'}
|
- **Evidence**: ${rec.evidence_refs?.join(', ') || 'N/A'}
|
||||||
`).join('\n')}
|
- [ ] Ready for execution`
|
||||||
|
}).join('\n\n')
|
||||||
|
|
||||||
|
// Append plan checklist to discussion.md
|
||||||
|
appendToDiscussion(`
|
||||||
|
## Plan Checklist
|
||||||
|
|
||||||
|
> **This is a plan only — no code was modified.**
|
||||||
|
> To execute, use: \`$csv-wave-pipeline "<requirement summary>"\`
|
||||||
|
|
||||||
|
- **Recommendations**: ${recs.length}
|
||||||
|
- **Generated**: ${getUtc8ISOString()}
|
||||||
|
|
||||||
|
${planChecklist}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Next Step: Execute
|
||||||
|
|
||||||
|
Run \`$csv-wave-pipeline\` to execute these recommendations as wave-based batch tasks:
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
$csv-wave-pipeline "${topic}"
|
||||||
|
\`\`\`
|
||||||
`)
|
`)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Simple path characteristics**:
|
**Characteristics**:
|
||||||
- No `.task/*.json` generation
|
- Plan checklist appended directly to `discussion.md`
|
||||||
- No `execution.md` / `execution-events.md`
|
- **No code modifications** — plan output only
|
||||||
- Execution summary appended directly to `discussion.md`
|
- Reminds user to use `$csv-wave-pipeline` for execution
|
||||||
- Suitable for 1-2 clear, low-risk recommendations
|
|
||||||
|
|
||||||
##### Step 5.3: Complex Path — EXECUTE.md Pipeline
|
|
||||||
|
|
||||||
For complex recommendations, follow the full specification in `EXECUTE.md`:
|
|
||||||
|
|
||||||
1. **Load context sources**: Reuse in-memory artifacts or read from disk
|
|
||||||
2. **Enrich recommendations**: Resolve target files, generate implementation steps, build convergence criteria
|
|
||||||
3. **Generate `.task/*.json`**: Individual task files with full execution context
|
|
||||||
4. **Pre-execution analysis**: Dependency validation, file conflicts, topological sort
|
|
||||||
5. **User confirmation**: Present task list, allow adjustment
|
|
||||||
6. **Serial execution**: Execute each task following generated implementation steps
|
|
||||||
7. **Finalize**: Update task states, write execution artifacts
|
|
||||||
|
|
||||||
**Full specification**: `EXECUTE.md`
|
|
||||||
|
|
||||||
**Success Criteria**:
|
**Success Criteria**:
|
||||||
- Simple path: recommendations executed, summary in discussion.md
|
- Plan checklist in discussion.md with all accepted recommendations
|
||||||
- Complex path: `.task/*.json` generated with quality validation, execution tracked via execution.md + execution-events.md
|
- User reminded about `$csv-wave-pipeline` for execution
|
||||||
- Execution route chosen correctly based on complexity assessment
|
- **No source code modified** — strictly plan output
|
||||||
|
|
||||||
## Output Structure
|
## Output Structure
|
||||||
|
|
||||||
@@ -989,11 +950,11 @@ For complex recommendations, follow the full specification in `EXECUTE.md`:
|
|||||||
└── conclusions.json # Phase 4: Final synthesis with recommendations
|
└── conclusions.json # Phase 4: Final synthesis with recommendations
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Phase 5 complex path** adds `.task/`, `execution.md`, `execution-events.md` — see `EXECUTE.md` for structure.
|
> **Phase 5** appends a plan checklist to `discussion.md`. No additional files are generated.
|
||||||
|
|
||||||
| File | Phase | Description |
|
| File | Phase | Description |
|
||||||
|------|-------|-------------|
|
|------|-------|-------------|
|
||||||
| `discussion.md` | 1-4 | Session metadata → discussion timeline → conclusions. Simple execution summary appended here. |
|
| `discussion.md` | 1-5 | Session metadata → discussion timeline → conclusions. Plan checklist appended here (simple path). |
|
||||||
| `exploration-codebase.json` | 2 | Codebase context: relevant files, patterns, constraints |
|
| `exploration-codebase.json` | 2 | Codebase context: relevant files, patterns, constraints |
|
||||||
| `explorations/*.json` | 2 | Per-perspective exploration results (multi only) |
|
| `explorations/*.json` | 2 | Per-perspective exploration results (multi only) |
|
||||||
| `explorations.json` | 2 | Single perspective aggregated findings |
|
| `explorations.json` | 2 | Single perspective aggregated findings |
|
||||||
@@ -1158,16 +1119,13 @@ Remaining questions or areas for investigation
|
|||||||
| User timeout in discussion | Save state, show resume command | Use `--continue` to resume |
|
| User timeout in discussion | Save state, show resume command | Use `--continue` to resume |
|
||||||
| Max rounds reached (5) | Force synthesis phase | Highlight remaining questions in conclusions |
|
| Max rounds reached (5) | Force synthesis phase | Highlight remaining questions in conclusions |
|
||||||
| Session folder conflict | Append timestamp suffix | Create unique folder and continue |
|
| Session folder conflict | Append timestamp suffix | Create unique folder and continue |
|
||||||
| Quick execute: task fails | Record failure, ask user | Retry, skip, or abort (see EXECUTE.md) |
|
| Plan generation: no recommendations | No plan to generate | Inform user, suggest lite-plan |
|
||||||
| Quick execute: verification fails | Mark as unverified | Note in events, manual check |
|
|
||||||
| Quick execute: no recommendations | Cannot generate .task/*.json | Inform user, suggest lite-plan |
|
|
||||||
| Quick execute: simple recommendations | Complexity too low for .task/*.json | Direct inline execution (no task generation) |
|
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
### Core Principles
|
### Core Principles
|
||||||
|
|
||||||
1. **Explicit user confirmation required before code modifications**: The analysis phase is strictly read-only. Any code changes (Phase 5 quick execute) require user approval.
|
1. **No code modifications**: This skill is strictly read-only and plan-only. Phase 5 generates plan checklists in `discussion.md` but does NOT modify source code. Use `$csv-wave-pipeline` for execution.
|
||||||
|
|
||||||
### Before Starting Analysis
|
### Before Starting Analysis
|
||||||
|
|
||||||
@@ -1204,10 +1162,11 @@ Remaining questions or areas for investigation
|
|||||||
- Building shared understanding before implementation
|
- Building shared understanding before implementation
|
||||||
- Want to document how understanding evolved
|
- Want to document how understanding evolved
|
||||||
|
|
||||||
**Use Quick Execute (Phase 5) when:**
|
**Use Plan Generation (Phase 5) when:**
|
||||||
- Analysis conclusions contain clear, actionable recommendations
|
- Analysis conclusions contain clear, actionable recommendations
|
||||||
- Simple: 1-2 clear changes → direct inline execution (no .task/ overhead)
|
- Simple: 1-2 items → inline plan checklist in discussion.md
|
||||||
- Complex: 3+ recommendations with dependencies → EXECUTE.md pipeline (.task/*.json → serial execution)
|
- Complex: 3+ recommendations → detailed plan checklist
|
||||||
|
- **Then execute via**: `$csv-wave-pipeline` for wave-based batch execution
|
||||||
|
|
||||||
**Consider alternatives when:**
|
**Consider alternatives when:**
|
||||||
- Specific bug diagnosis needed → use `debug-with-file`
|
- Specific bug diagnosis needed → use `debug-with-file`
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: roadmap-with-file
|
name: roadmap-with-file
|
||||||
description: Strategic requirement roadmap with iterative decomposition and issue creation. Outputs roadmap.md (human-readable, single source) + issues.jsonl (machine-executable). Handoff to team-planex.
|
description: Strategic requirement roadmap with iterative decomposition and issue creation. Outputs roadmap.md (human-readable, single source) + issues.jsonl (machine-executable). Handoff to csv-wave-pipeline.
|
||||||
argument-hint: "[-y|--yes] [-c|--continue] [-m progressive|direct|auto] \"requirement description\""
|
argument-hint: "[-y|--yes] [-c|--continue] [-m progressive|direct|auto] \"requirement description\""
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -101,8 +101,8 @@ close_agent({ id: agentId })
|
|||||||
|
|
||||||
| Artifact | Purpose | Consumer |
|
| Artifact | Purpose | Consumer |
|
||||||
|----------|---------|----------|
|
|----------|---------|----------|
|
||||||
| `roadmap.md` | ⭐ Human-readable strategic roadmap with all context | Human review, team-planex handoff |
|
| `roadmap.md` | ⭐ Human-readable strategic roadmap with all context | Human review, csv-wave-pipeline handoff |
|
||||||
| `.workflow/issues/issues.jsonl` | Global issue store (appended) | team-planex, issue commands |
|
| `.workflow/issues/issues.jsonl` | Global issue store (appended) | csv-wave-pipeline, issue commands |
|
||||||
|
|
||||||
### Why No Separate JSON Files?
|
### Why No Separate JSON Files?
|
||||||
|
|
||||||
@@ -145,7 +145,7 @@ Strategic requirement roadmap with **iterative decomposition**. Creates a single
|
|||||||
│ │
|
│ │
|
||||||
│ Phase 4: Handoff │
|
│ Phase 4: Handoff │
|
||||||
│ ├─ Final roadmap.md with Issue ID references │
|
│ ├─ Final roadmap.md with Issue ID references │
|
||||||
│ ├─ Options: team-planex | first wave | view issues | done │
|
│ ├─ Options: csv-wave-pipeline | view issues | done │
|
||||||
│ └─ Issues ready in .workflow/issues/issues.jsonl │
|
│ └─ Issues ready in .workflow/issues/issues.jsonl │
|
||||||
│ │
|
│ │
|
||||||
└─────────────────────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────────────────────┘
|
||||||
@@ -180,7 +180,7 @@ Strategic requirement roadmap with **iterative decomposition**. Creates a single
|
|||||||
|
|
||||||
.workflow/issues/issues.jsonl # Global issue store (appended)
|
.workflow/issues/issues.jsonl # Global issue store (appended)
|
||||||
# - One JSON object per line
|
# - One JSON object per line
|
||||||
# - Consumed by team-planex, issue commands
|
# - Consumed by csv-wave-pipeline, issue commands
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -799,8 +799,7 @@ ${selectedMode === 'progressive' ? `**Progressive Mode**:
|
|||||||
type: "choice",
|
type: "choice",
|
||||||
prompt: `${issueIds.length} issues ready. Next step:`,
|
prompt: `${issueIds.length} issues ready. Next step:`,
|
||||||
options: [
|
options: [
|
||||||
{ value: "planex", label: "Execute with team-planex (Recommended)", description: `Run all ${issueIds.length} issues via team-planex` },
|
{ value: "csv-wave", label: "Execute with csv-wave-pipeline (Recommended)", description: `Run all ${issueIds.length} issues via wave-based batch execution` },
|
||||||
{ value: "wave1", label: "Execute first wave", description: "Run wave-1 issues only" },
|
|
||||||
{ value: "view", label: "View issues", description: "Display issue details" },
|
{ value: "view", label: "View issues", description: "Display issue details" },
|
||||||
{ value: "done", label: "Done", description: "Save and exit" }
|
{ value: "done", label: "Done", description: "Save and exit" }
|
||||||
]
|
]
|
||||||
@@ -814,14 +813,9 @@ ${selectedMode === 'progressive' ? `**Progressive Mode**:
|
|||||||
3. **Execute Selection**
|
3. **Execute Selection**
|
||||||
```javascript
|
```javascript
|
||||||
switch (nextStep) {
|
switch (nextStep) {
|
||||||
case 'planex':
|
case 'csv-wave':
|
||||||
// Launch team-planex with all issue IDs
|
// Launch csv-wave-pipeline for wave-based batch execution
|
||||||
Bash(`ccw skill team-planex ${issueIds.join(' ')}`)
|
console.log(`\nTo execute, run:\n\n $csv-wave-pipeline "${requirement}"\n`)
|
||||||
break
|
|
||||||
|
|
||||||
case 'wave1':
|
|
||||||
// Filter issues by wave-1 tag
|
|
||||||
Bash(`ccw skill team-planex --tag wave-1 --session ${sessionId}`)
|
|
||||||
break
|
break
|
||||||
|
|
||||||
case 'view':
|
case 'view':
|
||||||
@@ -836,7 +830,7 @@ ${selectedMode === 'progressive' ? `**Progressive Mode**:
|
|||||||
`Issues created: ${issueIds.length}`,
|
`Issues created: ${issueIds.length}`,
|
||||||
'',
|
'',
|
||||||
'To execute later:',
|
'To execute later:',
|
||||||
` $team-planex ${issueIds.slice(0, 3).join(' ')}...`,
|
` $csv-wave-pipeline "${requirement}"`,
|
||||||
` ccw issue list --session ${sessionId}`
|
` ccw issue list --session ${sessionId}`
|
||||||
].join('\n'))
|
].join('\n'))
|
||||||
break
|
break
|
||||||
@@ -872,7 +866,7 @@ ${selectedMode === 'progressive' ? `**Progressive Mode**:
|
|||||||
4. **Testable Convergence**: criteria = assertions, DoD = business language
|
4. **Testable Convergence**: criteria = assertions, DoD = business language
|
||||||
5. **Explicit Lifecycle**: Always close_agent after wait completes to free resources
|
5. **Explicit Lifecycle**: Always close_agent after wait completes to free resources
|
||||||
6. **DO NOT STOP**: Continuous workflow until handoff complete
|
6. **DO NOT STOP**: Continuous workflow until handoff complete
|
||||||
7. **Plan-Only Modifications**: Interactive feedback (Phase 3) MUST only update `roadmap.md` and `issues.jsonl`. NEVER modify source code, configuration files, or any project files during interactive rounds. Code changes happen only after handoff (Phase 4) via team-planex or other execution skills
|
7. **Plan-Only Modifications**: Interactive feedback (Phase 3) MUST only update `roadmap.md` and `issues.jsonl`. NEVER modify source code, configuration files, or any project files during interactive rounds. Code changes happen only after handoff (Phase 4) via `$csv-wave-pipeline` or other execution skills
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user