mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-14 02:42:04 +08:00
refactor: migrate workflow system from 6-field nested to unified flat task schema
- Schema: add shared_context to plan-overview-base-schema, add pre_analysis/artifacts/inherited and polymorphic implementation (string|object with tdd_phase) to task-schema - Producer: action-planning-agent outputs flat fields (description, depends_on, focus_paths, convergence.criteria, files, implementation, pre_analysis) + plan.json generation - Orchestrator: plan.md/tdd-plan.md validate plan.json, task-generate-agent/tdd output dual-layer - Consumer: code-developer/tdd-developer/test-fix-agent/universal-executor read flat fields - Execute/review: read plan.json for execution strategy, use flat field paths - Remove all migration notes referencing old field names
This commit is contained in:
@@ -118,6 +118,28 @@
|
|||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
|
"shared_context": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Project-level shared context (tech stack, conventions).",
|
||||||
|
"properties": {
|
||||||
|
"tech_stack": {
|
||||||
|
"type": "array",
|
||||||
|
"items": { "type": "string" },
|
||||||
|
"description": "技术栈列表"
|
||||||
|
},
|
||||||
|
"conventions": {
|
||||||
|
"type": "array",
|
||||||
|
"items": { "type": "string" },
|
||||||
|
"description": "编码约定"
|
||||||
|
},
|
||||||
|
"auth_strategy": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "认证策略"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": true
|
||||||
|
},
|
||||||
|
|
||||||
"_metadata": {
|
"_metadata": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["timestamp", "source", "plan_type"],
|
"required": ["timestamp", "source", "plan_type"],
|
||||||
|
|||||||
@@ -141,8 +141,30 @@
|
|||||||
"_comment_IMPLEMENTATION": "IMPLEMENTATION 区块 (可选) — 实施指南",
|
"_comment_IMPLEMENTATION": "IMPLEMENTATION 区块 (可选) — 实施指南",
|
||||||
"implementation": {
|
"implementation": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": { "type": "string" },
|
"items": {
|
||||||
"description": "步骤化实施指南 (来自 Solution Schema)"
|
"oneOf": [
|
||||||
|
{ "type": "string" },
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"required": ["step", "description"],
|
||||||
|
"properties": {
|
||||||
|
"step": { "type": "string", "description": "步骤编号/名称" },
|
||||||
|
"description": { "type": "string", "description": "步骤描述" },
|
||||||
|
"tdd_phase": { "type": "string", "enum": ["red", "green", "refactor"], "description": "TDD 阶段" },
|
||||||
|
"actions": { "type": "array", "items": { "type": "string" }, "description": "具体操作列表" },
|
||||||
|
"test_fix_cycle": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"max_iterations": { "type": "integer", "default": 3 }
|
||||||
|
},
|
||||||
|
"description": "测试修复循环配置"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "步骤化实施指南 — 支持字符串 (简单步骤) 或对象 (含 TDD 阶段等详情)"
|
||||||
},
|
},
|
||||||
"test": {
|
"test": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -177,6 +199,11 @@
|
|||||||
"type": "array",
|
"type": "array",
|
||||||
"items": { "type": "string" },
|
"items": { "type": "string" },
|
||||||
"description": "量化成功指标 (如 '响应时间 <200ms', '覆盖率 >80%',合并自 verification_detail)"
|
"description": "量化成功指标 (如 '响应时间 <200ms', '覆盖率 >80%',合并自 verification_detail)"
|
||||||
|
},
|
||||||
|
"reusable_tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": { "type": "string" },
|
||||||
|
"description": "可复用测试工具/脚本"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
@@ -376,6 +403,50 @@
|
|||||||
"description": "CLI 执行配置"
|
"description": "CLI 执行配置"
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"_comment_EXTENDED_CONTEXT": "EXTENDED CONTEXT 区块 (可选) — 扩展执行上下文",
|
||||||
|
"pre_analysis": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "Pre-execution analysis steps. Agent executes these before implementation.",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["step", "action"],
|
||||||
|
"properties": {
|
||||||
|
"step": { "type": "string", "description": "步骤名称" },
|
||||||
|
"action": { "type": "string", "description": "执行动作描述" },
|
||||||
|
"commands": { "type": "array", "items": { "type": "string" }, "description": "执行命令列表" },
|
||||||
|
"command": { "type": "string", "description": "单条执行命令" },
|
||||||
|
"output_to": { "type": "string", "description": "输出存储位置" },
|
||||||
|
"on_error": { "type": "string", "enum": ["fail", "skip_optional", "continue"], "description": "错误处理策略" }
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"artifacts": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "Brainstorming artifact references for context.",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": { "type": "string", "description": "产物类型" },
|
||||||
|
"source": { "type": "string", "description": "产物来源" },
|
||||||
|
"path": { "type": "string", "description": "产物路径" },
|
||||||
|
"feature_id": { "type": "string", "description": "关联功能 ID" },
|
||||||
|
"priority": { "type": "string", "description": "优先级" },
|
||||||
|
"usage": { "type": "string", "description": "使用说明" }
|
||||||
|
},
|
||||||
|
"additionalProperties": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inherited": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Context inherited from parent task.",
|
||||||
|
"properties": {
|
||||||
|
"from": { "type": "string", "description": "父任务 ID" },
|
||||||
|
"context": { "type": "array", "items": { "type": "string" }, "description": "继承的上下文条目" }
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
|
||||||
"_comment_CONTEXT": "CONTEXT 区块 (可选) — 来源与上下文",
|
"_comment_CONTEXT": "CONTEXT 区块 (可选) — 来源与上下文",
|
||||||
"source": {
|
"source": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -489,7 +560,7 @@
|
|||||||
"additionalProperties": true,
|
"additionalProperties": true,
|
||||||
|
|
||||||
"_field_usage_by_producer": {
|
"_field_usage_by_producer": {
|
||||||
"workflow-plan": "IDENTITY + CLASSIFICATION + SCOPE + DEPENDENCIES + CONVERGENCE + FILES + EXECUTION(meta+cli_execution) + CONTEXT(context_package_path)",
|
"workflow-plan": "IDENTITY + CLASSIFICATION + SCOPE + DEPENDENCIES + CONVERGENCE + FILES + IMPLEMENTATION + EXECUTION(pre_analysis + artifacts + inherited + cli_execution + meta) + PLANNING(reference + rationale + risks + code_skeleton) + CONTEXT(context_package_path)",
|
||||||
"lite-plan": "IDENTITY + CLASSIFICATION + DEPENDENCIES + CONVERGENCE + FILES",
|
"lite-plan": "IDENTITY + CLASSIFICATION + DEPENDENCIES + CONVERGENCE + FILES",
|
||||||
"lite-plan (v2)": "IDENTITY + CLASSIFICATION + SCOPE + DEPENDENCIES + CONVERGENCE + FILES(+change) + IMPLEMENTATION(+manual_checks +success_metrics) + PLANNING(reference + rationale + risks + code_skeleton)",
|
"lite-plan (v2)": "IDENTITY + CLASSIFICATION + SCOPE + DEPENDENCIES + CONVERGENCE + FILES(+change) + IMPLEMENTATION(+manual_checks +success_metrics) + PLANNING(reference + rationale + risks + code_skeleton)",
|
||||||
"req-plan": "IDENTITY + CLASSIFICATION + SCOPE + DEPENDENCIES + CONVERGENCE + PLANNING(risks) + CONTEXT(inputs/outputs)",
|
"req-plan": "IDENTITY + CLASSIFICATION + SCOPE + DEPENDENCIES + CONVERGENCE + PLANNING(risks) + CONTEXT(inputs/outputs)",
|
||||||
|
|||||||
@@ -22,7 +22,8 @@ color: yellow
|
|||||||
|
|
||||||
**Core Capabilities**:
|
**Core Capabilities**:
|
||||||
- Load and synthesize context from multiple sources (session metadata, context packages, brainstorming artifacts)
|
- Load and synthesize context from multiple sources (session metadata, context packages, brainstorming artifacts)
|
||||||
- Generate task JSON files with 6-field schema and artifact integration
|
- Generate task JSON files with unified flat schema (task-schema.json) and artifact integration
|
||||||
|
- Generate plan.json (plan-overview-base-schema) as machine-readable plan overview
|
||||||
- Create IMPL_PLAN.md and TODO_LIST.md with proper linking
|
- Create IMPL_PLAN.md and TODO_LIST.md with proper linking
|
||||||
- Support both agent-mode and CLI-execute-mode workflows
|
- Support both agent-mode and CLI-execute-mode workflows
|
||||||
- Integrate MCP tools for enhanced context gathering
|
- Integrate MCP tools for enhanced context gathering
|
||||||
@@ -114,7 +115,7 @@ mcp__exa__get_code_context_exa(
|
|||||||
tokensNum="dynamic"
|
tokensNum="dynamic"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Integration in flow_control.pre_analysis
|
// Integration in pre_analysis
|
||||||
{
|
{
|
||||||
"step": "local_codebase_exploration",
|
"step": "local_codebase_exploration",
|
||||||
"action": "Explore codebase structure",
|
"action": "Explore codebase structure",
|
||||||
@@ -187,7 +188,7 @@ if (contextPackage.brainstorm_artifacts?.feature_index?.exists) {
|
|||||||
const featureIndex = JSON.parse(Read(contextPackage.brainstorm_artifacts.feature_index.path));
|
const featureIndex = JSON.parse(Read(contextPackage.brainstorm_artifacts.feature_index.path));
|
||||||
|
|
||||||
// Step 2: Load only task-relevant feature specs (1-2 per task)
|
// Step 2: Load only task-relevant feature specs (1-2 per task)
|
||||||
const taskFeatureIds = task.context.artifacts
|
const taskFeatureIds = task.artifacts
|
||||||
.filter(a => a.type === 'feature_spec')
|
.filter(a => a.type === 'feature_spec')
|
||||||
.map(a => a.feature_id);
|
.map(a => a.feature_id);
|
||||||
featureIndex.features
|
featureIndex.features
|
||||||
@@ -201,7 +202,7 @@ if (contextPackage.brainstorm_artifacts?.feature_index?.exists) {
|
|||||||
// context-package uses full paths (".workflow/.../role/file.md")
|
// context-package uses full paths (".workflow/.../role/file.md")
|
||||||
const crossCuttingFromPackage = contextPackage.brainstorm_artifacts.cross_cutting_specs || [];
|
const crossCuttingFromPackage = contextPackage.brainstorm_artifacts.cross_cutting_specs || [];
|
||||||
featureIndex.cross_cutting_specs
|
featureIndex.cross_cutting_specs
|
||||||
.filter(cs => task.context.artifacts.some(a => a.type === 'cross_cutting_spec'))
|
.filter(cs => task.artifacts.some(a => a.type === 'cross_cutting_spec'))
|
||||||
.forEach(cs => {
|
.forEach(cs => {
|
||||||
// Match by path suffix since feature-index uses relative paths
|
// Match by path suffix since feature-index uses relative paths
|
||||||
const matched = crossCuttingFromPackage.find(pkg => pkg.path.endsWith(cs));
|
const matched = crossCuttingFromPackage.find(pkg => pkg.path.endsWith(cs));
|
||||||
@@ -230,32 +231,36 @@ if (contextPackage.brainstorm_artifacts?.feature_index?.exists) {
|
|||||||
- Brainstorming artifacts (guidance, role analyses, synthesis)
|
- Brainstorming artifacts (guidance, role analyses, synthesis)
|
||||||
- Context package (project structure, dependencies, patterns)
|
- Context package (project structure, dependencies, patterns)
|
||||||
|
|
||||||
2. Generate task JSON files
|
2. Generate task JSON files (.task/IMPL-*.json)
|
||||||
- Apply 6-field schema (id, title, status, meta, context, flow_control)
|
- Apply unified flat schema (task-schema.json)
|
||||||
- Integrate artifacts catalog into context.artifacts array
|
- Top-level fields: id, title, description, type, scope, depends_on, focus_paths, convergence, files, implementation, pre_analysis, artifacts, inherited, meta, cli_execution
|
||||||
- Add quantified requirements and measurable acceptance criteria
|
- Add quantified requirements and measurable acceptance criteria
|
||||||
|
|
||||||
3. Create IMPL_PLAN.md
|
3. Generate plan.json (plan-overview-base-schema)
|
||||||
|
- Machine-readable plan overview with task_ids[], shared_context, _metadata
|
||||||
|
- Extract shared_context from context package (tech_stack, conventions)
|
||||||
|
|
||||||
|
4. Create IMPL_PLAN.md
|
||||||
- Load template: Read(~/.ccw/workflows/cli-templates/prompts/workflow/impl-plan-template.txt)
|
- Load template: Read(~/.ccw/workflows/cli-templates/prompts/workflow/impl-plan-template.txt)
|
||||||
- Follow template structure and validation checklist
|
- Follow template structure and validation checklist
|
||||||
- Populate all 8 sections with synthesized context
|
- Populate all 8 sections with synthesized context
|
||||||
- Document CCW workflow phase progression
|
- Document CCW workflow phase progression
|
||||||
- Update quality gate status
|
- Update quality gate status
|
||||||
|
|
||||||
4. Generate TODO_LIST.md
|
5. Generate TODO_LIST.md
|
||||||
- Flat structure ([ ] for pending, [x] for completed)
|
- Flat structure ([ ] for pending, [x] for completed)
|
||||||
- Link to task JSONs and summaries
|
- Link to task JSONs and summaries
|
||||||
|
|
||||||
5. Update session state for execution readiness
|
6. Update session state for execution readiness
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 2. Output Specifications
|
## 2. Output Specifications
|
||||||
|
|
||||||
### 2.1 Task JSON Schema (6-Field)
|
### 2.1 Task JSON Schema (Unified)
|
||||||
|
|
||||||
Generate individual `.task/IMPL-*.json` files with the following structure:
|
Generate individual `.task/IMPL-*.json` files following `task-schema.json` (`.ccw/workflows/cli-templates/schemas/task-schema.json`).
|
||||||
|
|
||||||
#### Top-Level Fields
|
#### Top-Level Fields
|
||||||
|
|
||||||
@@ -263,14 +268,42 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
|||||||
{
|
{
|
||||||
"id": "IMPL-N",
|
"id": "IMPL-N",
|
||||||
"title": "Descriptive task name",
|
"title": "Descriptive task name",
|
||||||
|
"description": "Goal and requirements narrative",
|
||||||
"status": "pending|active|completed|blocked",
|
"status": "pending|active|completed|blocked",
|
||||||
|
"type": "feature|bugfix|refactor|test-gen|test-fix|docs",
|
||||||
|
"scope": "src/auth",
|
||||||
|
"action": "Implement|Fix|Refactor",
|
||||||
|
"depends_on": ["IMPL-N"],
|
||||||
|
"focus_paths": ["src/auth", "tests/auth"],
|
||||||
|
|
||||||
|
"convergence": {
|
||||||
|
"criteria": ["3 features implemented: verify by npm test -- auth (exit code 0)"],
|
||||||
|
"verification": "npm test -- auth && ls src/auth/*.ts | wc -l",
|
||||||
|
"definition_of_done": "Authentication module fully functional"
|
||||||
|
},
|
||||||
|
|
||||||
|
"files": [
|
||||||
|
{ "path": "src/auth/auth.service.ts", "action": "create", "change": "New auth service" },
|
||||||
|
{ "path": "src/users/users.service.ts", "action": "modify", "change": "Update validateUser()" }
|
||||||
|
],
|
||||||
|
"implementation": ["Step 1: ...", "Step 2: ..."],
|
||||||
|
"pre_analysis": [],
|
||||||
|
"artifacts": [],
|
||||||
|
"inherited": { "from": "IMPL-N", "context": ["..."] },
|
||||||
|
|
||||||
"context_package_path": ".workflow/active/WFS-{session}/.process/context-package.json",
|
"context_package_path": ".workflow/active/WFS-{session}/.process/context-package.json",
|
||||||
"cli_execution_id": "WFS-{session}-IMPL-N",
|
|
||||||
"cli_execution": {
|
"cli_execution": {
|
||||||
|
"id": "WFS-{session}-IMPL-N",
|
||||||
"strategy": "new|resume|fork|merge_fork",
|
"strategy": "new|resume|fork|merge_fork",
|
||||||
"resume_from": "parent-cli-id",
|
"resume_from": "parent-cli-id",
|
||||||
"merge_from": ["id1", "id2"]
|
"merge_from": ["id1", "id2"]
|
||||||
}
|
},
|
||||||
|
"meta": { "..." },
|
||||||
|
|
||||||
|
"reference": {},
|
||||||
|
"rationale": {},
|
||||||
|
"risks": [],
|
||||||
|
"test": {}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -281,40 +314,31 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
|||||||
- Prefix: A, B, C... (assigned by module detection order)
|
- Prefix: A, B, C... (assigned by module detection order)
|
||||||
- Sequence: 1, 2, 3... (per-module increment)
|
- Sequence: 1, 2, 3... (per-module increment)
|
||||||
- `title`: Descriptive task name summarizing the work
|
- `title`: Descriptive task name summarizing the work
|
||||||
|
- `description`: Goal and requirements narrative (prose format)
|
||||||
- `status`: Task state - `pending` (not started), `active` (in progress), `completed` (done), `blocked` (waiting on dependencies)
|
- `status`: Task state - `pending` (not started), `active` (in progress), `completed` (done), `blocked` (waiting on dependencies)
|
||||||
- `context_package_path`: Path to smart context package containing project structure, dependencies, and brainstorming artifacts catalog
|
- `type`: Task category from `meta.type` (promoted to top-level)
|
||||||
- `cli_execution_id`: Unique CLI conversation ID (format: `{session_id}-{task_id}`)
|
- `scope`: Target directory or module scope
|
||||||
- `cli_execution`: CLI execution strategy based on task dependencies
|
- `action`: Primary action verb (Implement, Fix, Refactor)
|
||||||
|
- `depends_on`: Prerequisite task IDs
|
||||||
|
- `focus_paths`: Target directories/files
|
||||||
|
- `convergence`: Structured completion criteria
|
||||||
|
- `criteria`: Measurable acceptance conditions
|
||||||
|
- `verification`: Executable verification command
|
||||||
|
- `definition_of_done`: Business-language completion definition
|
||||||
|
- `files`: Target files with structured metadata
|
||||||
|
- `path`: File path
|
||||||
|
- `action`: create/modify/delete
|
||||||
|
- `change`: Description of change
|
||||||
|
- `implementation`: Implementation steps. Supports polymorphic items: strings or objects with `{step, description, tdd_phase, actions, test_fix_cycle}`
|
||||||
|
- `pre_analysis`: Pre-execution analysis steps
|
||||||
|
- `artifacts`: Referenced brainstorming outputs
|
||||||
|
- `inherited`: Context inherited from parent task
|
||||||
|
- `context_package_path`: Path to smart context package
|
||||||
|
- `cli_execution`: CLI execution strategy
|
||||||
|
- `id`: Unique CLI conversation ID (format: `{session_id}-{task_id}`)
|
||||||
- `strategy`: Execution pattern (`new`, `resume`, `fork`, `merge_fork`)
|
- `strategy`: Execution pattern (`new`, `resume`, `fork`, `merge_fork`)
|
||||||
- `resume_from`: Parent task's cli_execution_id (for resume/fork)
|
- `resume_from`: Parent task's cli_execution.id (for resume/fork)
|
||||||
- `merge_from`: Array of parent cli_execution_ids (for merge_fork)
|
- `merge_from`: Array of parent cli_execution.ids (for merge_fork)
|
||||||
|
|
||||||
#### Schema Compatibility
|
|
||||||
|
|
||||||
The 6-field task JSON is a **superset** of `task-schema.json` (the unified task schema at `.ccw/workflows/cli-templates/schemas/task-schema.json`). All generated `.task/IMPL-*.json` files are compatible with the unified schema via the following field mapping:
|
|
||||||
|
|
||||||
| 6-Field Task JSON (this schema) | task-schema.json (unified) | Notes |
|
|
||||||
|--------------------------------|---------------------------|-------|
|
|
||||||
| `id` | `id` | Direct mapping |
|
|
||||||
| `title` | `title` | Direct mapping |
|
|
||||||
| `status` | `status` | Direct mapping |
|
|
||||||
| `meta.type` | `type` | Flattened in unified schema |
|
|
||||||
| `meta.agent` | `meta.agent` | Same path, preserved |
|
|
||||||
| `meta.execution_config` | `meta.execution_config` | Same path, preserved |
|
|
||||||
| `context.requirements` | `description` + `implementation` | Unified schema splits into goal description and step-by-step guide |
|
|
||||||
| `context.acceptance` | `convergence.criteria` | **Key mapping**: acceptance criteria become convergence criteria |
|
|
||||||
| `context.focus_paths` | `focus_paths` | Moved to top-level in unified schema |
|
|
||||||
| `context.depends_on` | `depends_on` | Moved to top-level in unified schema |
|
|
||||||
| `context.shared_context` | _(no direct equivalent)_ | 6-field extension for tech stack and conventions |
|
|
||||||
| `context.artifacts` | `evidence` + `inputs` | Unified schema uses generic evidence/inputs arrays |
|
|
||||||
| `flow_control.target_files` | `files[].path` | Unified schema uses structured file objects |
|
|
||||||
| `flow_control.implementation_approach` | `implementation` | Unified schema uses flat string array |
|
|
||||||
| `flow_control.pre_analysis` | _(no direct equivalent)_ | 6-field extension for pre-execution analysis |
|
|
||||||
| `context_package_path` | `context_package_path` | Direct mapping |
|
|
||||||
| `cli_execution_id` | `cli_execution.id` | Nested in unified schema |
|
|
||||||
| `cli_execution` | `cli_execution` | Direct mapping |
|
|
||||||
|
|
||||||
**Backward Compatibility**: The 6-field schema retains all existing fields. The unified schema fields (`convergence`, `depends_on` at top-level, `files`, `implementation`) are accepted as **optional aliases** when present. Consumers SHOULD check both locations (e.g., `convergence.criteria` OR `context.acceptance`).
|
|
||||||
|
|
||||||
|
|
||||||
**CLI Execution Strategy Rules** (MANDATORY - apply to all tasks):
|
**CLI Execution Strategy Rules** (MANDATORY - apply to all tasks):
|
||||||
@@ -329,9 +353,9 @@ The 6-field task JSON is a **superset** of `task-schema.json` (the unified task
|
|||||||
**Strategy Selection Algorithm**:
|
**Strategy Selection Algorithm**:
|
||||||
```javascript
|
```javascript
|
||||||
function computeCliStrategy(task, allTasks) {
|
function computeCliStrategy(task, allTasks) {
|
||||||
const deps = task.context?.depends_on || []
|
const deps = task.depends_on || []
|
||||||
const childCount = allTasks.filter(t =>
|
const childCount = allTasks.filter(t =>
|
||||||
t.context?.depends_on?.includes(task.id)
|
t.depends_on?.includes(task.id)
|
||||||
).length
|
).length
|
||||||
|
|
||||||
if (deps.length === 0) {
|
if (deps.length === 0) {
|
||||||
@@ -339,17 +363,17 @@ function computeCliStrategy(task, allTasks) {
|
|||||||
} else if (deps.length === 1) {
|
} else if (deps.length === 1) {
|
||||||
const parentTask = allTasks.find(t => t.id === deps[0])
|
const parentTask = allTasks.find(t => t.id === deps[0])
|
||||||
const parentChildCount = allTasks.filter(t =>
|
const parentChildCount = allTasks.filter(t =>
|
||||||
t.context?.depends_on?.includes(deps[0])
|
t.depends_on?.includes(deps[0])
|
||||||
).length
|
).length
|
||||||
|
|
||||||
if (parentChildCount === 1) {
|
if (parentChildCount === 1) {
|
||||||
return { strategy: "resume", resume_from: parentTask.cli_execution_id }
|
return { strategy: "resume", resume_from: parentTask.cli_execution.id }
|
||||||
} else {
|
} else {
|
||||||
return { strategy: "fork", resume_from: parentTask.cli_execution_id }
|
return { strategy: "fork", resume_from: parentTask.cli_execution.id }
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const mergeFrom = deps.map(depId =>
|
const mergeFrom = deps.map(depId =>
|
||||||
allTasks.find(t => t.id === depId).cli_execution_id
|
allTasks.find(t => t.id === depId).cli_execution.id
|
||||||
)
|
)
|
||||||
return { strategy: "merge_fork", merge_from: mergeFrom }
|
return { strategy: "merge_fork", merge_from: mergeFrom }
|
||||||
}
|
}
|
||||||
@@ -392,7 +416,7 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
|
|
||||||
"agent" →
|
"agent" →
|
||||||
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
||||||
Execution: Agent executes pre_analysis, then directly implements implementation_approach
|
Execution: Agent executes pre_analysis, then directly implements implementation steps
|
||||||
|
|
||||||
"cli" →
|
"cli" →
|
||||||
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
||||||
@@ -405,7 +429,7 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
Final task JSON always has method = "agent" or "cli", never "hybrid"
|
Final task JSON always has method = "agent" or "cli", never "hybrid"
|
||||||
```
|
```
|
||||||
|
|
||||||
**IMPORTANT**: implementation_approach steps do NOT contain `command` fields. Execution routing is controlled by task-level `meta.execution_config.method` only.
|
**IMPORTANT**: implementation steps do NOT contain `command` fields. Execution routing is controlled by task-level `meta.execution_config.method` only.
|
||||||
|
|
||||||
**Test Task Extensions** (for type="test-gen" or type="test-fix"):
|
**Test Task Extensions** (for type="test-gen" or type="test-fix"):
|
||||||
|
|
||||||
@@ -426,40 +450,19 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
|
|
||||||
**Note**: CLI tool usage for test-fix tasks is now controlled via task-level `meta.execution_config.method`, not via `meta.use_codex`.
|
**Note**: CLI tool usage for test-fix tasks is now controlled via task-level `meta.execution_config.method`, not via `meta.use_codex`.
|
||||||
|
|
||||||
#### Context Object
|
#### Artifact Mapping
|
||||||
|
|
||||||
|
All context fields (`description`, `depends_on`, `focus_paths`, `convergence`, `artifacts`, `inherited`) are now **top-level** in the task JSON. The `shared_context` (tech_stack, conventions) is stored in **plan.json** at the plan level, not per-task.
|
||||||
|
|
||||||
|
**Quantification Rules** (apply to top-level fields):
|
||||||
|
- `description`: **QUANTIFIED** requirements narrative (MUST include explicit counts and enumerated lists, e.g., "Implement 3 features: [auth, authz, session]")
|
||||||
|
- `convergence.criteria`: **MEASURABLE** acceptance conditions (MUST include verification commands, e.g., "verify by ls ... | wc -l = N")
|
||||||
|
- `focus_paths`: Target directories/files (concrete paths without wildcards)
|
||||||
|
|
||||||
|
**Artifact Field** (`artifacts[]`):
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"context": {
|
|
||||||
"requirements": [
|
|
||||||
"Implement 3 features: [authentication, authorization, session management]",
|
|
||||||
"Create 5 files: [auth.service.ts, auth.controller.ts, auth.middleware.ts, auth.types.ts, auth.test.ts]",
|
|
||||||
"Modify 2 existing functions: [validateUser() in users.service.ts lines 45-60, hashPassword() in utils.ts lines 120-135]"
|
|
||||||
],
|
|
||||||
"focus_paths": ["src/auth", "tests/auth"],
|
|
||||||
"acceptance": [
|
|
||||||
"3 features implemented: verify by npm test -- auth (exit code 0)",
|
|
||||||
"5 files created: verify by ls src/auth/*.ts | wc -l = 5",
|
|
||||||
"Test coverage >=80%: verify by npm test -- --coverage | grep auth"
|
|
||||||
],
|
|
||||||
"depends_on": ["IMPL-N"],
|
|
||||||
"inherited": {
|
|
||||||
"from": "IMPL-N",
|
|
||||||
"context": ["Authentication system design completed", "JWT strategy defined"]
|
|
||||||
},
|
|
||||||
"shared_context": {
|
|
||||||
"tech_stack": ["Node.js", "TypeScript", "Express"],
|
|
||||||
"auth_strategy": "JWT with refresh tokens",
|
|
||||||
"conventions": ["Follow existing auth patterns in src/auth/legacy/"]
|
|
||||||
},
|
|
||||||
"convergence": {
|
|
||||||
"criteria": [
|
|
||||||
"3 features implemented: verify by npm test -- auth (exit code 0)",
|
|
||||||
"5 files created: verify by ls src/auth/*.ts | wc -l = 5"
|
|
||||||
],
|
|
||||||
"verification": "npm test -- auth && ls src/auth/*.ts | wc -l",
|
|
||||||
"definition_of_done": "Authentication module fully functional with all endpoints and tests passing"
|
|
||||||
},
|
|
||||||
"artifacts": [
|
"artifacts": [
|
||||||
{
|
{
|
||||||
"type": "feature_spec|cross_cutting_spec|synthesis_specification|topic_framework|individual_role_analysis",
|
"type": "feature_spec|cross_cutting_spec|synthesis_specification|topic_framework|individual_role_analysis",
|
||||||
@@ -467,37 +470,22 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
"path": "{from feature-index.json or artifacts_inventory}",
|
"path": "{from feature-index.json or artifacts_inventory}",
|
||||||
"feature_id": "F-NNN (feature_spec only)",
|
"feature_id": "F-NNN (feature_spec only)",
|
||||||
"priority": "highest|high|medium|low",
|
"priority": "highest|high|medium|low",
|
||||||
"usage": "Feature requirements and design specifications",
|
"usage": "Feature requirements and design specifications"
|
||||||
"contains": "feature_specific_requirements_and_design"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Field Descriptions**:
|
|
||||||
- `requirements`: **QUANTIFIED** implementation requirements (MUST include explicit counts and enumerated lists, e.g., "5 files: [list]")
|
|
||||||
- `focus_paths`: Target directories/files (concrete paths without wildcards)
|
|
||||||
- `acceptance`: **MEASURABLE** acceptance criteria (MUST include verification commands, e.g., "verify by ls ... | wc -l = N")
|
|
||||||
- `convergence`: _(Optional, unified schema alias)_ Structured completion criteria object following `task-schema.json` format. When present, `convergence.criteria` maps to `acceptance`. Use **either** `acceptance` (6-field native) **or** `convergence` (unified schema native), not both. See [Schema Compatibility](#schema-compatibility) for full mapping.
|
|
||||||
- `criteria`: Array of testable completion conditions (equivalent to `acceptance`)
|
|
||||||
- `verification`: Executable verification command or steps
|
|
||||||
- `definition_of_done`: Business-language completion definition (non-technical)
|
|
||||||
- `depends_on`: Prerequisite task IDs that must complete before this task starts
|
|
||||||
- `inherited`: Context, patterns, and dependencies passed from parent task
|
|
||||||
- `shared_context`: Tech stack, conventions, and architectural strategies for the task
|
|
||||||
- `artifacts`: Referenced brainstorming outputs with detailed metadata
|
|
||||||
|
|
||||||
**Artifact Mapping** (from context package):
|
**Artifact Mapping** (from context package):
|
||||||
- **Feature-index mode** (when `feature_index` exists): Use feature-index.json as primary catalog
|
- **Feature-index mode** (when `feature_index` exists): Use feature-index.json as primary catalog
|
||||||
- **Legacy mode** (fallback): Use `artifacts_inventory` from context package
|
- **Legacy mode** (fallback): Use `artifacts_inventory` from context package
|
||||||
|
|
||||||
- **Artifact Types & Priority**:
|
- **Artifact Types & Priority**:
|
||||||
- **`feature_spec`** (Highest): Feature specification from feature-index.json
|
- **`feature_spec`** (Highest): Feature specification from feature-index.json
|
||||||
- `{type: "feature_spec", source: "brainstorm_feature_specs", path: "<spec_path>", feature_id: "<F-NNN>", priority: "highest", usage: "<task-specific usage>", contains: "<feature scope description>"}`
|
- `{type: "feature_spec", source: "brainstorm_feature_specs", path: "<spec_path>", feature_id: "<F-NNN>", priority: "highest", usage: "<task-specific usage>"}`
|
||||||
- Each task references 1-2 feature specs based on task scope
|
- Each task references 1-2 feature specs based on task scope
|
||||||
- **`cross_cutting_spec`** (High): Cross-cutting concern specification
|
- **`cross_cutting_spec`** (High): Cross-cutting concern specification
|
||||||
- `{type: "cross_cutting_spec", source: "brainstorm_cross_cutting", path: "<spec_path>", priority: "high", usage: "<why this task needs it>", contains: "<cross-cutting scope>"}`
|
- `{type: "cross_cutting_spec", source: "brainstorm_cross_cutting", path: "<spec_path>", priority: "high", usage: "<why this task needs it>"}`
|
||||||
- Load only when task touches shared concerns (auth, logging, error handling, etc.)
|
- Load only when task touches shared concerns (auth, logging, error handling, etc.)
|
||||||
- **`synthesis_specification`** (High): Integrated view with clarifications
|
- **`synthesis_specification`** (High): Integrated view with clarifications
|
||||||
- **`topic_framework`** (High): guidance-specification.md
|
- **`topic_framework`** (High): guidance-specification.md
|
||||||
@@ -509,36 +497,25 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
2. For each task, identify 1-2 primary features by matching task scope to feature `name`/`slug`
|
2. For each task, identify 1-2 primary features by matching task scope to feature `name`/`slug`
|
||||||
3. Add matching feature specs as `feature_spec` artifacts with `feature_id` field
|
3. Add matching feature specs as `feature_spec` artifacts with `feature_id` field
|
||||||
4. Check `cross_cutting_refs` in matched features; add referenced cross-cutting specs as `cross_cutting_spec` artifacts
|
4. Check `cross_cutting_refs` in matched features; add referenced cross-cutting specs as `cross_cutting_spec` artifacts
|
||||||
5. Result: Each task's `context.artifacts[]` contains only the specs it needs (not all specs)
|
5. Result: Each task's `artifacts[]` contains only the specs it needs (not all specs)
|
||||||
|
|
||||||
#### Flow Control Object
|
#### Pre-Analysis, Implementation & Files Fields
|
||||||
|
|
||||||
|
These fields are **top-level** in the task JSON (not nested under any wrapper object).
|
||||||
|
|
||||||
**IMPORTANT**: The `pre_analysis` examples below are **reference templates only**. Agent MUST dynamically select, adapt, and expand steps based on actual task requirements. Apply the principle of **"举一反三"** (draw inferences from examples) - use these patterns as inspiration to create task-specific analysis steps.
|
**IMPORTANT**: The `pre_analysis` examples below are **reference templates only**. Agent MUST dynamically select, adapt, and expand steps based on actual task requirements. Apply the principle of **"举一反三"** (draw inferences from examples) - use these patterns as inspiration to create task-specific analysis steps.
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"flow_control": {
|
|
||||||
"pre_analysis": [...],
|
|
||||||
"implementation_approach": [...],
|
|
||||||
"target_files": [...]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Test Task Extensions** (for type="test-gen" or type="test-fix"):
|
**Test Task Extensions** (for type="test-gen" or type="test-fix"):
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"flow_control": {
|
"test": {
|
||||||
"pre_analysis": [...],
|
"reusable_tools": [
|
||||||
"implementation_approach": [...],
|
|
||||||
"target_files": [...],
|
|
||||||
"reusable_test_tools": [
|
|
||||||
"tests/helpers/testUtils.ts",
|
"tests/helpers/testUtils.ts",
|
||||||
"tests/fixtures/mockData.ts",
|
"tests/fixtures/mockData.ts",
|
||||||
"tests/setup/testSetup.ts"
|
"tests/setup/testSetup.ts"
|
||||||
],
|
],
|
||||||
"test_commands": {
|
"commands": {
|
||||||
"run_tests": "npm test",
|
"run_tests": "npm test",
|
||||||
"run_coverage": "npm test -- --coverage",
|
"run_coverage": "npm test -- --coverage",
|
||||||
"run_specific": "npm test -- {test_file}"
|
"run_specific": "npm test -- {test_file}"
|
||||||
@@ -547,9 +524,9 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Test-Specific Fields**:
|
**Test-Specific Fields** (in `test` object):
|
||||||
- `reusable_test_tools`: List of existing test utility files to reuse (helpers, fixtures, mocks)
|
- `reusable_tools`: List of existing test utility files to reuse (helpers, fixtures, mocks)
|
||||||
- `test_commands`: Test execution commands from project config (package.json, pytest.ini)
|
- `commands`: Test execution commands from project config (package.json, pytest.ini)
|
||||||
|
|
||||||
##### Pre-Analysis Patterns
|
##### Pre-Analysis Patterns
|
||||||
|
|
||||||
@@ -572,7 +549,7 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"step": "load_brainstorm_artifacts",
|
"step": "load_brainstorm_artifacts",
|
||||||
"action": "Load brainstorm artifacts referenced by this task's context.artifacts[]",
|
"action": "Load brainstorm artifacts referenced by this task's artifacts[]",
|
||||||
"commands": "<<PLAN-TIME EXPANSION: Replace with concrete Read() commands>>",
|
"commands": "<<PLAN-TIME EXPANSION: Replace with concrete Read() commands>>",
|
||||||
"output_to": "brainstorm_context",
|
"output_to": "brainstorm_context",
|
||||||
"on_error": "skip_optional"
|
"on_error": "skip_optional"
|
||||||
@@ -582,7 +559,7 @@ userConfig.executionMethod → meta.execution_config
|
|||||||
|
|
||||||
**Plan-Time Expansion Rule for `load_brainstorm_artifacts`**:
|
**Plan-Time Expansion Rule for `load_brainstorm_artifacts`**:
|
||||||
|
|
||||||
When generating each task JSON, agent MUST expand this template step into concrete `Read()` commands based on the task's `context.artifacts[]` array. Since the agent writes both `context.artifacts[]` and `flow_control.pre_analysis[]` simultaneously, the artifact paths are known at plan time.
|
When generating each task JSON, agent MUST expand this template step into concrete `Read()` commands based on the task's `artifacts[]` array. Since the agent writes both `artifacts[]` and `pre_analysis[]` simultaneously, the artifact paths are known at plan time.
|
||||||
|
|
||||||
**Expansion Algorithm**:
|
**Expansion Algorithm**:
|
||||||
```javascript
|
```javascript
|
||||||
@@ -666,7 +643,7 @@ The examples above demonstrate **patterns**, not fixed requirements. Agent MUST:
|
|||||||
|
|
||||||
1. **Always Include** (Required):
|
1. **Always Include** (Required):
|
||||||
- `load_context_package` - Essential for all tasks
|
- `load_context_package` - Essential for all tasks
|
||||||
- `load_brainstorm_artifacts` - Load brainstorm artifacts referenced by task's `context.artifacts[]`; falls back to role analysis progressive loading when no feature_spec artifacts
|
- `load_brainstorm_artifacts` - Load brainstorm artifacts referenced by task's `artifacts[]`; falls back to role analysis progressive loading when no feature_spec artifacts
|
||||||
|
|
||||||
2. **Progressive Addition of Analysis Steps**:
|
2. **Progressive Addition of Analysis Steps**:
|
||||||
Include additional analysis steps as needed for comprehensive planning:
|
Include additional analysis steps as needed for comprehensive planning:
|
||||||
@@ -693,11 +670,11 @@ The examples above demonstrate **patterns**, not fixed requirements. Agent MUST:
|
|||||||
|
|
||||||
**Key Principle**: Examples show **structure patterns**, not specific implementations. Agent must create task-appropriate steps dynamically.
|
**Key Principle**: Examples show **structure patterns**, not specific implementations. Agent must create task-appropriate steps dynamically.
|
||||||
|
|
||||||
##### Implementation Approach
|
##### Implementation Field
|
||||||
|
|
||||||
**Execution Control**:
|
**Execution Control**:
|
||||||
|
|
||||||
The `implementation_approach` defines sequential implementation steps. Execution routing is controlled by **task-level `meta.execution_config.method`**, NOT by step-level `command` fields.
|
The `implementation` field defines sequential implementation steps. Execution routing is controlled by **task-level `meta.execution_config.method`**, NOT by step-level `command` fields.
|
||||||
|
|
||||||
**Two Execution Modes**:
|
**Two Execution Modes**:
|
||||||
|
|
||||||
@@ -728,7 +705,7 @@ The `implementation_approach` defines sequential implementation steps. Execution
|
|||||||
|
|
||||||
**Required fields**: `step`, `title`, `description`, `modification_points`, `logic_flow`, `depends_on`, `output`
|
**Required fields**: `step`, `title`, `description`, `modification_points`, `logic_flow`, `depends_on`, `output`
|
||||||
|
|
||||||
**IMPORTANT**: Do NOT add `command` field to implementation_approach steps. Execution routing is determined by task-level `meta.execution_config.method` only.
|
**IMPORTANT**: Do NOT add `command` field to implementation steps. Execution routing is determined by task-level `meta.execution_config.method` only.
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
|
|
||||||
@@ -774,27 +751,53 @@ The `implementation_approach` defines sequential implementation steps. Execution
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Target Files
|
##### Files Field
|
||||||
|
|
||||||
|
The `files[]` array specifies target files with structured metadata (see top-level `files` field in Task JSON Schema above).
|
||||||
|
|
||||||
|
**Format**:
|
||||||
|
- Each entry: `{ "path": "...", "action": "create|modify|delete", "change": "..." }`
|
||||||
|
- New files: `action: "create"`
|
||||||
|
- Existing files with modifications: `action: "modify"` with change description
|
||||||
|
- Files to remove: `action: "delete"`
|
||||||
|
|
||||||
|
### 2.2 plan.json Structure
|
||||||
|
|
||||||
|
Generate at `.workflow/active/{session_id}/plan.json` following `plan-overview-base-schema.json`:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"target_files": [
|
"summary": "Brief plan description",
|
||||||
"src/auth/auth.service.ts",
|
"approach": "Implementation approach narrative",
|
||||||
"src/auth/auth.controller.ts",
|
"task_ids": ["IMPL-001", "IMPL-002"],
|
||||||
"src/auth/auth.middleware.ts",
|
"task_count": 2,
|
||||||
"src/auth/auth.types.ts",
|
"complexity": "Low|Medium|High",
|
||||||
"tests/auth/auth.test.ts",
|
"estimated_time": "Estimation string",
|
||||||
"src/users/users.service.ts:validateUser:45-60",
|
"recommended_execution": "Sequential|Parallel|Phased",
|
||||||
"src/utils/utils.ts:hashPassword:120-135"
|
"shared_context": {
|
||||||
]
|
"tech_stack": ["TypeScript", "React", "Node.js"],
|
||||||
|
"conventions": ["ESLint", "Prettier", "Jest"]
|
||||||
|
},
|
||||||
|
"_metadata": {
|
||||||
|
"timestamp": "ISO-8601",
|
||||||
|
"source": "action-planning-agent",
|
||||||
|
"planning_mode": "agent-based",
|
||||||
|
"plan_type": "feature",
|
||||||
|
"schema_version": "2.0"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Format**:
|
**Data Sources**:
|
||||||
- New files: `file_path`
|
- `task_ids`: Collected from generated `.task/IMPL-*.json` files
|
||||||
- Existing files with modifications: `file_path:function_name:line_range`
|
- `shared_context.tech_stack`: From `contextPackage.project_context.tech_stack`
|
||||||
|
- `shared_context.conventions`: From `contextPackage.project_context.coding_conventions`
|
||||||
|
- `complexity`: From `analysis_results.complexity` or task count heuristic
|
||||||
|
- `recommended_execution`: Based on task dependency graph analysis
|
||||||
|
|
||||||
### 2.2 IMPL_PLAN.md Structure
|
**Generation Timing**: After all `.task/IMPL-*.json` files are generated, aggregate into plan.json.
|
||||||
|
|
||||||
|
### 2.3 IMPL_PLAN.md Structure
|
||||||
|
|
||||||
**Template-Based Generation**:
|
**Template-Based Generation**:
|
||||||
|
|
||||||
@@ -842,7 +845,7 @@ When multiple modules are detected (frontend/backend, etc.), organize IMPL_PLAN.
|
|||||||
- Example: `depends_on: ["CROSS::B::api-endpoint"]`
|
- Example: `depends_on: ["CROSS::B::api-endpoint"]`
|
||||||
- Integration phase resolves to actual task IDs: `CROSS::B::api → IMPL-B1`
|
- Integration phase resolves to actual task IDs: `CROSS::B::api → IMPL-B1`
|
||||||
|
|
||||||
### 2.3 TODO_LIST.md Structure
|
### 2.4 TODO_LIST.md Structure
|
||||||
|
|
||||||
Generate at `.workflow/active/{session_id}/TODO_LIST.md`:
|
Generate at `.workflow/active/{session_id}/TODO_LIST.md`:
|
||||||
|
|
||||||
@@ -885,7 +888,7 @@ Generate at `.workflow/active/{session_id}/TODO_LIST.md`:
|
|||||||
- Completed tasks → summaries: `[✅](./.summaries/IMPL-XXX-summary.md)`
|
- Completed tasks → summaries: `[✅](./.summaries/IMPL-XXX-summary.md)`
|
||||||
- Consistent ID schemes: `IMPL-N` (single) or `IMPL-{prefix}{seq}` (multi-module)
|
- Consistent ID schemes: `IMPL-N` (single) or `IMPL-{prefix}{seq}` (multi-module)
|
||||||
|
|
||||||
### 2.4 Complexity & Structure Selection
|
### 2.5 Complexity & Structure Selection
|
||||||
|
|
||||||
**Task Division Strategy**: Minimize task count while avoiding single-task overload. Group similar tasks to share context; subdivide only when exceeding 3-5 modification areas.
|
**Task Division Strategy**: Minimize task count while avoiding single-task overload. Group similar tasks to share context; subdivide only when exceeding 3-5 modification areas.
|
||||||
|
|
||||||
@@ -993,11 +996,11 @@ Use `analysis_results.complexity` or task count to determine structure:
|
|||||||
- Load IMPL_PLAN template: `Read(~/.ccw/workflows/cli-templates/prompts/workflow/impl-plan-template.txt)` before generating IMPL_PLAN.md
|
- Load IMPL_PLAN template: `Read(~/.ccw/workflows/cli-templates/prompts/workflow/impl-plan-template.txt)` before generating IMPL_PLAN.md
|
||||||
- Use provided context package: Extract all information from structured context
|
- Use provided context package: Extract all information from structured context
|
||||||
- Respect memory-first rule: Use provided content (already loaded from memory/file)
|
- Respect memory-first rule: Use provided content (already loaded from memory/file)
|
||||||
- Follow 6-field schema: All task JSONs must have id, title, status, context_package_path, meta, context, flow_control
|
- Follow unified flat schema: All task JSONs must have id, title, description, status, type, depends_on, convergence, files, implementation, meta, cli_execution
|
||||||
- **Assign CLI execution IDs**: Every task MUST have `cli_execution_id` (format: `{session_id}-{task_id}`)
|
- **Assign CLI execution IDs**: Every task MUST have `cli_execution.id` (format: `{session_id}-{task_id}`)
|
||||||
- **Compute CLI execution strategy**: Based on `depends_on`, set `cli_execution.strategy` (new/resume/fork/merge_fork)
|
- **Compute CLI execution strategy**: Based on `depends_on`, set `cli_execution.strategy` (new/resume/fork/merge_fork)
|
||||||
- Map artifacts: Use artifacts_inventory to populate task.context.artifacts array
|
- Map artifacts: Use artifacts_inventory to populate task.artifacts array
|
||||||
- Add MCP integration: Include MCP tool steps in flow_control.pre_analysis when capabilities available
|
- Add MCP integration: Include MCP tool steps in pre_analysis when capabilities available
|
||||||
- Validate task count: Maximum 8 tasks (single module) or 6 tasks per module (multi-module), request re-scope if exceeded
|
- Validate task count: Maximum 8 tasks (single module) or 6 tasks per module (multi-module), request re-scope if exceeded
|
||||||
- Use session paths: Construct all paths using provided session_id
|
- Use session paths: Construct all paths using provided session_id
|
||||||
- Link documents properly: Use correct linking format (📋 for JSON, ✅ for summaries)
|
- Link documents properly: Use correct linking format (📋 for JSON, ✅ for summaries)
|
||||||
|
|||||||
@@ -49,26 +49,25 @@ Read(.workflow/active/${SESSION_ID}/.process/context-package.json)
|
|||||||
**Task JSON Parsing** (when task JSON path provided):
|
**Task JSON Parsing** (when task JSON path provided):
|
||||||
Read task JSON and extract structured context:
|
Read task JSON and extract structured context:
|
||||||
```
|
```
|
||||||
Task JSON Fields:
|
Task JSON Fields (unified flat structure):
|
||||||
├── context.requirements[] → What to implement (list of requirements)
|
├── description → What to implement (goal + requirements)
|
||||||
├── context.acceptance[] → How to verify (validation commands)
|
├── convergence.criteria[] → How to verify (validation commands)
|
||||||
├── context.focus_paths[] → Where to focus (directories/files)
|
├── focus_paths[] → Where to focus (directories/files)
|
||||||
├── context.shared_context → Tech stack and conventions
|
├── meta.shared_context → Tech stack and conventions (if present)
|
||||||
│ ├── tech_stack[] → Technologies used (skip auto-detection if present)
|
│ ├── tech_stack[] → Technologies used (skip auto-detection if present)
|
||||||
│ └── conventions[] → Coding conventions to follow
|
│ └── conventions[] → Coding conventions to follow
|
||||||
├── context.artifacts[] → Additional context sources
|
├── artifacts[] → Additional context sources
|
||||||
└── flow_control → Execution instructions
|
├── pre_analysis[] → Context gathering steps (execute first)
|
||||||
├── pre_analysis[] → Context gathering steps (execute first)
|
├── implementation[] → Implementation steps (execute sequentially)
|
||||||
├── implementation_approach[] → Implementation steps (execute sequentially)
|
└── files[] → Files to create/modify (files[].path)
|
||||||
└── target_files[] → Files to create/modify
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parsing Priority**:
|
**Parsing Priority**:
|
||||||
1. Read task JSON from provided path
|
1. Read task JSON from provided path
|
||||||
2. Extract `context.requirements` as implementation goals
|
2. Extract `description` as implementation goals
|
||||||
3. Extract `context.acceptance` as verification criteria
|
3. Extract `convergence.criteria` as verification criteria
|
||||||
4. If `context.shared_context.tech_stack` exists → skip auto-detection, use provided stack
|
4. If `meta.shared_context.tech_stack` exists → skip auto-detection, use provided stack
|
||||||
5. Process `flow_control` if present
|
5. Process `pre_analysis` and `implementation` if present
|
||||||
|
|
||||||
**Pre-Analysis: Smart Tech Stack Loading**:
|
**Pre-Analysis: Smart Tech Stack Loading**:
|
||||||
```bash
|
```bash
|
||||||
@@ -104,20 +103,20 @@ fi
|
|||||||
STEP 1: Parse Task JSON (if path provided)
|
STEP 1: Parse Task JSON (if path provided)
|
||||||
→ Read task JSON file from provided path
|
→ Read task JSON file from provided path
|
||||||
→ Extract and store in memory:
|
→ Extract and store in memory:
|
||||||
• [requirements] ← context.requirements[]
|
• [requirements] ← description
|
||||||
• [acceptance_criteria] ← context.acceptance[]
|
• [acceptance_criteria] ← convergence.criteria[]
|
||||||
• [tech_stack] ← context.shared_context.tech_stack[] (skip auto-detection if present)
|
• [tech_stack] ← meta.shared_context.tech_stack[] (skip auto-detection if present)
|
||||||
• [conventions] ← context.shared_context.conventions[]
|
• [conventions] ← meta.shared_context.conventions[]
|
||||||
• [focus_paths] ← context.focus_paths[]
|
• [focus_paths] ← focus_paths[]
|
||||||
|
|
||||||
STEP 2: Execute Pre-Analysis (if flow_control.pre_analysis exists in Task JSON)
|
STEP 2: Execute Pre-Analysis (if pre_analysis exists in Task JSON)
|
||||||
→ Execute each pre_analysis step sequentially
|
→ Execute each pre_analysis step sequentially
|
||||||
→ Store each step's output in memory using output_to variable name
|
→ Store each step's output in memory using output_to variable name
|
||||||
→ These variables are available for STEP 3
|
→ These variables are available for STEP 3
|
||||||
|
|
||||||
STEP 3: Execute Implementation (choose one path)
|
STEP 3: Execute Implementation (choose one path)
|
||||||
IF flow_control.implementation_approach exists:
|
IF implementation[] exists:
|
||||||
→ Follow implementation_approach steps sequentially
|
→ Follow implementation steps sequentially
|
||||||
→ Substitute [variable_name] placeholders with stored values BEFORE execution
|
→ Substitute [variable_name] placeholders with stored values BEFORE execution
|
||||||
ELSE:
|
ELSE:
|
||||||
→ Use [requirements] as implementation goals
|
→ Use [requirements] as implementation goals
|
||||||
@@ -126,7 +125,7 @@ STEP 3: Execute Implementation (choose one path)
|
|||||||
→ Verify against [acceptance_criteria] on completion
|
→ Verify against [acceptance_criteria] on completion
|
||||||
```
|
```
|
||||||
|
|
||||||
**Pre-Analysis Execution** (flow_control.pre_analysis):
|
**Pre-Analysis Execution** (pre_analysis):
|
||||||
```
|
```
|
||||||
For each step in pre_analysis[]:
|
For each step in pre_analysis[]:
|
||||||
step.step → Step identifier (string name)
|
step.step → Step identifier (string name)
|
||||||
@@ -175,7 +174,7 @@ Example Parsing:
|
|||||||
- Content search: `rg -i "authentication" src/ -C 3`
|
- Content search: `rg -i "authentication" src/ -C 3`
|
||||||
|
|
||||||
**Implementation Approach Execution**:
|
**Implementation Approach Execution**:
|
||||||
When task JSON contains `flow_control.implementation_approach` array:
|
When task JSON contains `implementation` array:
|
||||||
|
|
||||||
**Step Structure**:
|
**Step Structure**:
|
||||||
```
|
```
|
||||||
@@ -197,7 +196,7 @@ const cliTool = task.meta?.execution_config?.cli_tool || getDefaultCliTool(); /
|
|||||||
|
|
||||||
// Phase 1: Execute pre_analysis (always by Agent)
|
// Phase 1: Execute pre_analysis (always by Agent)
|
||||||
const preAnalysisResults = {};
|
const preAnalysisResults = {};
|
||||||
for (const step of task.flow_control.pre_analysis || []) {
|
for (const step of task.pre_analysis || []) {
|
||||||
const result = executePreAnalysisStep(step);
|
const result = executePreAnalysisStep(step);
|
||||||
preAnalysisResults[step.output_to] = result;
|
preAnalysisResults[step.output_to] = result;
|
||||||
}
|
}
|
||||||
@@ -213,7 +212,7 @@ IF executionMethod === 'cli':
|
|||||||
|
|
||||||
ELSE (executionMethod === 'agent'):
|
ELSE (executionMethod === 'agent'):
|
||||||
// Execute implementation steps directly
|
// Execute implementation steps directly
|
||||||
FOR each step in implementation_approach[]:
|
FOR each step in implementation[]:
|
||||||
1. Variable Substitution: Replace [variable_name] with preAnalysisResults
|
1. Variable Substitution: Replace [variable_name] with preAnalysisResults
|
||||||
2. Read modification_points[] as files to create/modify
|
2. Read modification_points[] as files to create/modify
|
||||||
3. Read logic_flow[] as implementation sequence
|
3. Read logic_flow[] as implementation sequence
|
||||||
@@ -242,7 +241,7 @@ function buildCliHandoffPrompt(preAnalysisResults, task, taskJsonPath) {
|
|||||||
.map(([key, value]) => `### ${key}\n${value}`)
|
.map(([key, value]) => `### ${key}\n${value}`)
|
||||||
.join('\n\n');
|
.join('\n\n');
|
||||||
|
|
||||||
const conventions = task.context.shared_context?.conventions?.join(' | ') || '';
|
const conventions = task.meta?.shared_context?.conventions?.join(' | ') || '';
|
||||||
const constraints = `Follow existing patterns | No breaking changes${conventions ? ' | ' + conventions : ''}`;
|
const constraints = `Follow existing patterns | No breaking changes${conventions ? ' | ' + conventions : ''}`;
|
||||||
|
|
||||||
return `
|
return `
|
||||||
@@ -253,22 +252,22 @@ Complete implementation based on pre-analyzed context and task JSON.
|
|||||||
Read full task definition: ${taskJsonPath}
|
Read full task definition: ${taskJsonPath}
|
||||||
|
|
||||||
## TECH STACK
|
## TECH STACK
|
||||||
${task.context.shared_context?.tech_stack?.map(t => `- ${t}`).join('\n') || 'Auto-detect from project files'}
|
${task.meta?.shared_context?.tech_stack?.map(t => `- ${t}`).join('\n') || 'Auto-detect from project files'}
|
||||||
|
|
||||||
## PRE-ANALYSIS CONTEXT
|
## PRE-ANALYSIS CONTEXT
|
||||||
${contextSection}
|
${contextSection}
|
||||||
|
|
||||||
## REQUIREMENTS
|
## REQUIREMENTS
|
||||||
${task.context.requirements?.map(r => `- ${r}`).join('\n') || task.context.requirements}
|
${task.description || 'See task JSON'}
|
||||||
|
|
||||||
## ACCEPTANCE CRITERIA
|
## ACCEPTANCE CRITERIA
|
||||||
${task.context.acceptance?.map(a => `- ${a}`).join('\n') || task.context.acceptance}
|
${task.convergence?.criteria?.map(a => `- ${a}`).join('\n') || 'See task JSON'}
|
||||||
|
|
||||||
## TARGET FILES
|
## TARGET FILES
|
||||||
${task.flow_control.target_files?.map(f => `- ${f}`).join('\n') || 'See task JSON modification_points'}
|
${task.files?.map(f => `- ${f.path || f}`).join('\n') || 'See task JSON'}
|
||||||
|
|
||||||
## FOCUS PATHS
|
## FOCUS PATHS
|
||||||
${task.context.focus_paths?.map(p => `- ${p}`).join('\n') || 'See task JSON'}
|
${task.focus_paths?.map(p => `- ${p}`).join('\n') || 'See task JSON'}
|
||||||
|
|
||||||
MODE: write
|
MODE: write
|
||||||
CONSTRAINTS: ${constraints}
|
CONSTRAINTS: ${constraints}
|
||||||
@@ -283,13 +282,13 @@ function buildCliCommand(task, cliTool, cliPrompt) {
|
|||||||
|
|
||||||
switch (cli.strategy) {
|
switch (cli.strategy) {
|
||||||
case 'new':
|
case 'new':
|
||||||
return `${baseCmd} --tool ${cliTool} --mode write --id ${task.cli_execution_id}`;
|
return `${baseCmd} --tool ${cliTool} --mode write --id ${task.cli_execution.id}`;
|
||||||
case 'resume':
|
case 'resume':
|
||||||
return `${baseCmd} --resume ${cli.resume_from} --tool ${cliTool} --mode write`;
|
return `${baseCmd} --resume ${cli.resume_from} --tool ${cliTool} --mode write`;
|
||||||
case 'fork':
|
case 'fork':
|
||||||
return `${baseCmd} --resume ${cli.resume_from} --id ${task.cli_execution_id} --tool ${cliTool} --mode write`;
|
return `${baseCmd} --resume ${cli.resume_from} --id ${task.cli_execution.id} --tool ${cliTool} --mode write`;
|
||||||
case 'merge_fork':
|
case 'merge_fork':
|
||||||
return `${baseCmd} --resume ${cli.merge_from.join(',')} --id ${task.cli_execution_id} --tool ${cliTool} --mode write`;
|
return `${baseCmd} --resume ${cli.merge_from.join(',')} --id ${task.cli_execution.id} --tool ${cliTool} --mode write`;
|
||||||
default:
|
default:
|
||||||
// Fallback: no resume, no id
|
// Fallback: no resume, no id
|
||||||
return `${baseCmd} --tool ${cliTool} --mode write`;
|
return `${baseCmd} --tool ${cliTool} --mode write`;
|
||||||
|
|||||||
@@ -36,14 +36,6 @@ You are a TDD-specialized code execution agent focused on implementing high-qual
|
|||||||
"meta": {
|
"meta": {
|
||||||
"tdd_workflow": true, // REQUIRED: Enables TDD mode
|
"tdd_workflow": true, // REQUIRED: Enables TDD mode
|
||||||
"max_iterations": 3, // Green phase test-fix cycle limit
|
"max_iterations": 3, // Green phase test-fix cycle limit
|
||||||
"cli_execution_id": "{session}-{task}", // CLI session ID for resume
|
|
||||||
"cli_execution": { // CLI execution strategy
|
|
||||||
"strategy": "new|resume|fork|merge_fork",
|
|
||||||
"resume_from": "parent-cli-id" // For resume/fork strategies; array for merge_fork
|
|
||||||
// Note: For merge_fork, resume_from is array: ["id1", "id2", ...]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"context": {
|
|
||||||
"tdd_cycles": [ // Test cases and coverage targets
|
"tdd_cycles": [ // Test cases and coverage targets
|
||||||
{
|
{
|
||||||
"test_count": 5,
|
"test_count": 5,
|
||||||
@@ -51,39 +43,41 @@ You are a TDD-specialized code execution agent focused on implementing high-qual
|
|||||||
"implementation_scope": "...",
|
"implementation_scope": "...",
|
||||||
"expected_coverage": ">=85%"
|
"expected_coverage": ">=85%"
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
|
},
|
||||||
|
"cli_execution": { // CLI execution strategy
|
||||||
|
"id": "{session}-{task}", // CLI session ID for resume
|
||||||
|
"strategy": "new|resume|fork|merge_fork",
|
||||||
|
"resume_from": "parent-cli-id" // For resume/fork strategies; array for merge_fork
|
||||||
|
},
|
||||||
|
"description": "...", // Goal + requirements
|
||||||
"focus_paths": [...], // Absolute or clear relative paths
|
"focus_paths": [...], // Absolute or clear relative paths
|
||||||
"requirements": [...],
|
"convergence": {
|
||||||
"acceptance": [...] // Test commands for validation
|
"criteria": [...] // Test commands for validation
|
||||||
},
|
},
|
||||||
"flow_control": {
|
|
||||||
"pre_analysis": [...], // Context gathering steps
|
"pre_analysis": [...], // Context gathering steps
|
||||||
"implementation_approach": [ // Red-Green-Refactor steps
|
"implementation": [ // Red-Green-Refactor steps (polymorphic: string or object)
|
||||||
{
|
{
|
||||||
"step": 1,
|
"step": "1",
|
||||||
"title": "Red Phase: Write failing tests",
|
"description": "Red Phase: Write failing tests - Write 5 test cases: [...]",
|
||||||
"tdd_phase": "red", // REQUIRED: Phase identifier
|
"tdd_phase": "red", // REQUIRED: Phase identifier
|
||||||
"description": "Write 5 test cases: [...]",
|
"actions": ["Create test files", "Write test cases"],
|
||||||
"modification_points": [...],
|
"test_fix_cycle": null
|
||||||
"command": "..." // Optional CLI command
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"step": 2,
|
"step": "2",
|
||||||
"title": "Green Phase: Implement to pass tests",
|
"description": "Green Phase: Implement to pass tests - Implement N functions...",
|
||||||
"tdd_phase": "green", // Triggers test-fix cycle
|
"tdd_phase": "green", // Triggers test-fix cycle
|
||||||
"description": "Implement N functions...",
|
"actions": ["Implement functions", "Pass tests"],
|
||||||
"modification_points": [...],
|
"test_fix_cycle": { "max_iterations": 3 }
|
||||||
"command": "..."
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"step": 3,
|
"step": "3",
|
||||||
"title": "Refactor Phase: Improve code quality",
|
"description": "Refactor Phase: Improve code quality - Apply N refactorings...",
|
||||||
"tdd_phase": "refactor",
|
"tdd_phase": "refactor",
|
||||||
"description": "Apply N refactorings...",
|
"actions": ["Refactor code", "Verify no regressions"]
|
||||||
"modification_points": [...]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -106,16 +100,16 @@ ELSE:
|
|||||||
// Extract TDD configuration
|
// Extract TDD configuration
|
||||||
const tddConfig = {
|
const tddConfig = {
|
||||||
maxIterations: taskJson.meta.max_iterations || 3,
|
maxIterations: taskJson.meta.max_iterations || 3,
|
||||||
cliExecutionId: taskJson.meta.cli_execution_id,
|
cliExecutionId: taskJson.cli_execution?.id,
|
||||||
cliStrategy: taskJson.meta.cli_execution?.strategy,
|
cliStrategy: taskJson.cli_execution?.strategy,
|
||||||
resumeFrom: taskJson.meta.cli_execution?.resume_from,
|
resumeFrom: taskJson.cli_execution?.resume_from,
|
||||||
testCycles: taskJson.context.tdd_cycles || [],
|
testCycles: taskJson.meta.tdd_cycles || [],
|
||||||
acceptanceTests: taskJson.context.acceptance || []
|
acceptanceTests: taskJson.convergence?.criteria || []
|
||||||
}
|
}
|
||||||
|
|
||||||
// Identify phases
|
// Identify phases (implementation[] supports polymorphic: string or object)
|
||||||
const phases = taskJson.flow_control.implementation_approach
|
const phases = taskJson.implementation
|
||||||
.filter(step => step.tdd_phase)
|
.filter(step => typeof step === 'object' && step.tdd_phase)
|
||||||
.map(step => ({
|
.map(step => ({
|
||||||
step: step.step,
|
step: step.step,
|
||||||
phase: step.tdd_phase, // "red", "green", or "refactor"
|
phase: step.tdd_phase, // "red", "green", or "refactor"
|
||||||
@@ -127,10 +121,10 @@ const phases = taskJson.flow_control.implementation_approach
|
|||||||
```
|
```
|
||||||
REQUIRED CHECKS:
|
REQUIRED CHECKS:
|
||||||
- [ ] meta.tdd_workflow is true
|
- [ ] meta.tdd_workflow is true
|
||||||
- [ ] flow_control.implementation_approach has exactly 3 steps
|
- [ ] implementation[] has exactly 3 object entries with tdd_phase
|
||||||
- [ ] Each step has tdd_phase field ("red", "green", "refactor")
|
- [ ] Each entry has tdd_phase field ("red", "green", "refactor")
|
||||||
- [ ] context.acceptance includes test command
|
- [ ] convergence.criteria includes test command
|
||||||
- [ ] Green phase has modification_points or command
|
- [ ] Green phase has actions or description
|
||||||
|
|
||||||
IF validation fails:
|
IF validation fails:
|
||||||
→ Report invalid TDD task structure
|
→ Report invalid TDD task structure
|
||||||
@@ -165,10 +159,10 @@ STEP 2: Execute Red Phase Implementation
|
|||||||
// Execute directly
|
// Execute directly
|
||||||
→ Create test files in modification_points
|
→ Create test files in modification_points
|
||||||
→ Write test cases following test_cases enumeration
|
→ Write test cases following test_cases enumeration
|
||||||
→ Use context.shared_context.conventions for test style
|
→ Use shared_context.conventions (from plan.json) for test style
|
||||||
|
|
||||||
STEP 3: Validate Red Phase (Test Must Fail)
|
STEP 3: Validate Red Phase (Test Must Fail)
|
||||||
→ Execute test command from context.acceptance
|
→ Execute test command from convergence.criteria
|
||||||
→ Parse test output
|
→ Parse test output
|
||||||
IF tests pass:
|
IF tests pass:
|
||||||
⚠️ WARNING: Tests passing in Red phase - may not test real behavior
|
⚠️ WARNING: Tests passing in Red phase - may not test real behavior
|
||||||
@@ -217,7 +211,7 @@ STEP 3: Test-Fix Cycle (CRITICAL TDD FEATURE)
|
|||||||
FOR iteration in 1..meta.max_iterations:
|
FOR iteration in 1..meta.max_iterations:
|
||||||
|
|
||||||
STEP 3.1: Run Test Suite
|
STEP 3.1: Run Test Suite
|
||||||
→ Execute test command from context.acceptance
|
→ Execute test command from convergence.criteria
|
||||||
→ Capture test output (stdout + stderr)
|
→ Capture test output (stdout + stderr)
|
||||||
→ Parse test results (pass count, fail count, coverage)
|
→ Parse test results (pass count, fail count, coverage)
|
||||||
|
|
||||||
@@ -320,7 +314,7 @@ STEP 2: Execute Refactor Implementation
|
|||||||
• Add documentation where needed
|
• Add documentation where needed
|
||||||
|
|
||||||
STEP 3: Regression Testing (REQUIRED)
|
STEP 3: Regression Testing (REQUIRED)
|
||||||
→ Execute test command from context.acceptance
|
→ Execute test command from convergence.criteria
|
||||||
→ Verify all tests still pass
|
→ Verify all tests still pass
|
||||||
IF tests fail:
|
IF tests fail:
|
||||||
⚠️ REGRESSION DETECTED: Refactoring broke tests
|
⚠️ REGRESSION DETECTED: Refactoring broke tests
|
||||||
@@ -357,13 +351,14 @@ Bash(
|
|||||||
### 4. Context Loading (Inherited from code-developer)
|
### 4. Context Loading (Inherited from code-developer)
|
||||||
|
|
||||||
**Standard Context Sources**:
|
**Standard Context Sources**:
|
||||||
- Task JSON: `context.requirements`, `context.acceptance`, `context.focus_paths`
|
- Task JSON: `description`, `convergence.criteria`, `focus_paths`
|
||||||
- Context Package: `context_package_path` → brainstorm artifacts, exploration results
|
- Context Package: `context_package_path` → brainstorm artifacts, exploration results
|
||||||
- Tech Stack: `context.shared_context.tech_stack` (skip auto-detection if present)
|
- Tech Stack: `meta.shared_context.tech_stack` (skip auto-detection if present)
|
||||||
|
|
||||||
**TDD-Enhanced Context**:
|
**TDD-Enhanced Context**:
|
||||||
- `context.tdd_cycles`: Test case enumeration and coverage targets
|
- `meta.tdd_cycles`: Test case enumeration and coverage targets
|
||||||
- `meta.max_iterations`: Test-fix cycle configuration
|
- `meta.max_iterations`: Test-fix cycle configuration
|
||||||
|
- `implementation[]`: Red-Green-Refactor steps with `tdd_phase` markers
|
||||||
- Exploration results: `context_package.exploration_results` for critical_files and integration_points
|
- Exploration results: `context_package.exploration_results` for critical_files and integration_points
|
||||||
|
|
||||||
### 5. Quality Gates (TDD-Enhanced)
|
### 5. Quality Gates (TDD-Enhanced)
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ jq --arg ts "$(date -Iseconds)" '.status="in_progress" | .status_history += [{"f
|
|||||||
### Flow Control Execution
|
### Flow Control Execution
|
||||||
When task JSON contains `flow_control` field, execute preparation and implementation steps systematically.
|
When task JSON contains `flow_control` field, execute preparation and implementation steps systematically.
|
||||||
|
|
||||||
**Pre-Analysis Steps** (`flow_control.pre_analysis`):
|
**Pre-Analysis Steps** (`pre_analysis`):
|
||||||
1. **Sequential Processing**: Execute steps in order, accumulating context
|
1. **Sequential Processing**: Execute steps in order, accumulating context
|
||||||
2. **Variable Substitution**: Use `[variable_name]` to reference previous outputs
|
2. **Variable Substitution**: Use `[variable_name]` to reference previous outputs
|
||||||
3. **Error Handling**: Follow step-specific strategies (`skip_optional`, `fail`, `retry_once`)
|
3. **Error Handling**: Follow step-specific strategies (`skip_optional`, `fail`, `retry_once`)
|
||||||
@@ -72,7 +72,7 @@ When task JSON contains `flow_control` field, execute preparation and implementa
|
|||||||
"Glob(pattern)" → Glob tool: Glob(pattern=pattern)
|
"Glob(pattern)" → Glob tool: Glob(pattern=pattern)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Implementation Approach** (`flow_control.implementation_approach`):
|
**Implementation Approach** (`implementation`):
|
||||||
When task JSON contains implementation_approach array:
|
When task JSON contains implementation_approach array:
|
||||||
1. **Sequential Execution**: Process steps in order, respecting `depends_on` dependencies
|
1. **Sequential Execution**: Process steps in order, respecting `depends_on` dependencies
|
||||||
2. **Dependency Resolution**: Wait for all steps listed in `depends_on` before starting
|
2. **Dependency Resolution**: Wait for all steps listed in `depends_on` before starting
|
||||||
@@ -162,7 +162,7 @@ run_test_layer "L1-unit" "$UNIT_CMD"
|
|||||||
|
|
||||||
### 3. Failure Diagnosis & Fixing Loop
|
### 3. Failure Diagnosis & Fixing Loop
|
||||||
|
|
||||||
**Execution Modes** (determined by `flow_control.implementation_approach`):
|
**Execution Modes** (determined by `implementation`):
|
||||||
|
|
||||||
**A. Agent Mode (Default, no `command` field in steps)**:
|
**A. Agent Mode (Default, no `command` field in steps)**:
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ IF context sufficient for execution:
|
|||||||
→ Proceed with task execution
|
→ Proceed with task execution
|
||||||
ELIF context insufficient OR task has flow control marker:
|
ELIF context insufficient OR task has flow control marker:
|
||||||
→ Check for [FLOW_CONTROL] marker:
|
→ Check for [FLOW_CONTROL] marker:
|
||||||
- Execute flow_control.pre_analysis steps sequentially for context gathering
|
- Execute pre_analysis steps sequentially for context gathering
|
||||||
- Use four flexible context acquisition methods:
|
- Use four flexible context acquisition methods:
|
||||||
* Document references (cat commands)
|
* Document references (cat commands)
|
||||||
* Search commands (grep/rg/find)
|
* Search commands (grep/rg/find)
|
||||||
|
|||||||
@@ -220,8 +220,9 @@ bash(cat .workflow/active/${sessionId}/workflow-session.json)
|
|||||||
|
|
||||||
**Process**:
|
**Process**:
|
||||||
1. **Check IMPL_PLAN.md**: Verify file exists (defer detailed parsing to Phase 4A)
|
1. **Check IMPL_PLAN.md**: Verify file exists (defer detailed parsing to Phase 4A)
|
||||||
2. **Check TODO_LIST.md**: Verify file exists (defer reading to Phase 3)
|
2. **Check plan.json**: Verify file exists (structured plan overview, used in Phase 4A)
|
||||||
3. **Validate Task Directory**: Ensure `.task/` contains at least one IMPL-*.json file
|
3. **Check TODO_LIST.md**: Verify file exists (defer reading to Phase 3)
|
||||||
|
4. **Validate Task Directory**: Ensure `.task/` contains at least one IMPL-*.json file
|
||||||
|
|
||||||
**Key Optimization**: Only existence checks here. Actual file reading happens in later phases.
|
**Key Optimization**: Only existence checks here. Actual file reading happens in later phases.
|
||||||
|
|
||||||
@@ -257,15 +258,19 @@ This ensures the dashboard shows the session as "ACTIVE" during execution.
|
|||||||
### Phase 4: Execution Strategy Selection & Task Execution
|
### Phase 4: Execution Strategy Selection & Task Execution
|
||||||
**Applies to**: Both normal and resume modes
|
**Applies to**: Both normal and resume modes
|
||||||
|
|
||||||
**Step 4A: Parse Execution Strategy from IMPL_PLAN.md**
|
**Step 4A: Parse Execution Strategy (plan.json preferred, IMPL_PLAN.md fallback)**
|
||||||
|
|
||||||
Read IMPL_PLAN.md Section 4 to extract:
|
Prefer `plan.json` (structured) over `IMPL_PLAN.md` (human-readable) for execution strategy:
|
||||||
|
1. **If plan.json exists**: Read `recommended_execution`, `complexity`, `task_ids[]`, `shared_context`
|
||||||
|
2. **Fallback to IMPL_PLAN.md**: Read Section 4 to extract execution model
|
||||||
|
|
||||||
|
Extract:
|
||||||
- **Execution Model**: Sequential | Parallel | Phased | TDD Cycles
|
- **Execution Model**: Sequential | Parallel | Phased | TDD Cycles
|
||||||
- **Parallelization Opportunities**: Which tasks can run in parallel
|
- **Parallelization Opportunities**: Which tasks can run in parallel
|
||||||
- **Serialization Requirements**: Which tasks must run sequentially
|
- **Serialization Requirements**: Which tasks must run sequentially
|
||||||
- **Critical Path**: Priority execution order
|
- **Critical Path**: Priority execution order
|
||||||
|
|
||||||
If IMPL_PLAN.md lacks execution strategy, use intelligent fallback (analyze task structure).
|
If neither has execution strategy, use intelligent fallback (analyze task structure).
|
||||||
|
|
||||||
**Step 4B: Execute Tasks with Lazy Loading**
|
**Step 4B: Execute Tasks with Lazy Loading**
|
||||||
|
|
||||||
@@ -285,7 +290,7 @@ while (TODO_LIST.md has pending tasks) {
|
|||||||
**Execution Process per Task**:
|
**Execution Process per Task**:
|
||||||
1. **Identify Next Task**: From TodoWrite, get the next `in_progress` task ID
|
1. **Identify Next Task**: From TodoWrite, get the next `in_progress` task ID
|
||||||
2. **Load Task JSON on Demand**: Read `.task/{task-id}.json` for current task ONLY
|
2. **Load Task JSON on Demand**: Read `.task/{task-id}.json` for current task ONLY
|
||||||
3. **Validate Task Structure**: Ensure all 5 required fields exist (id, title, status, meta, context, flow_control)
|
3. **Validate Task Structure**: Ensure required fields exist (id, title, description, depends_on, convergence)
|
||||||
4. **Launch Agent**: Invoke specialized agent with complete context including flow control steps
|
4. **Launch Agent**: Invoke specialized agent with complete context including flow control steps
|
||||||
5. **Monitor Progress**: Track agent execution and handle errors without user interruption
|
5. **Monitor Progress**: Track agent execution and handle errors without user interruption
|
||||||
6. **Collect Results**: Gather implementation results and outputs
|
6. **Collect Results**: Gather implementation results and outputs
|
||||||
@@ -476,7 +481,7 @@ TodoWrite({
|
|||||||
## Agent Execution Pattern
|
## Agent Execution Pattern
|
||||||
|
|
||||||
### Flow Control Execution
|
### Flow Control Execution
|
||||||
**[FLOW_CONTROL]** marker indicates task JSON contains `flow_control.pre_analysis` steps for context preparation.
|
**[FLOW_CONTROL]** marker indicates task JSON contains `pre_analysis` steps for context preparation.
|
||||||
|
|
||||||
**Note**: Orchestrator does NOT execute flow control steps - Agent interprets and executes them autonomously.
|
**Note**: Orchestrator does NOT execute flow control steps - Agent interprets and executes them autonomously.
|
||||||
|
|
||||||
@@ -505,10 +510,10 @@ Task(subagent_type="{meta.agent}",
|
|||||||
|
|
||||||
**Key Markers**:
|
**Key Markers**:
|
||||||
- `Implement` keyword: Triggers tech stack detection and guidelines loading
|
- `Implement` keyword: Triggers tech stack detection and guidelines loading
|
||||||
- `[FLOW_CONTROL]`: Triggers flow_control.pre_analysis execution
|
- `[FLOW_CONTROL]`: Triggers pre_analysis execution
|
||||||
|
|
||||||
**Why Path-Based**: Agent (code-developer.md) autonomously:
|
**Why Path-Based**: Agent (code-developer.md) autonomously:
|
||||||
- Reads and parses task JSON (requirements, acceptance, flow_control, execution_config)
|
- Reads and parses task JSON (description, convergence, implementation, execution_config)
|
||||||
- Executes pre_analysis steps (Phase 1: context gathering)
|
- Executes pre_analysis steps (Phase 1: context gathering)
|
||||||
- Checks execution_config.method (Phase 2: determine mode)
|
- Checks execution_config.method (Phase 2: determine mode)
|
||||||
- CLI mode: Builds handoff prompt and executes via ccw cli with resume strategy
|
- CLI mode: Builds handoff prompt and executes via ccw cli with resume strategy
|
||||||
@@ -532,7 +537,8 @@ meta.agent missing → Infer from meta.type:
|
|||||||
```
|
```
|
||||||
.workflow/active/WFS-[topic-slug]/
|
.workflow/active/WFS-[topic-slug]/
|
||||||
├── workflow-session.json # Session state and metadata
|
├── workflow-session.json # Session state and metadata
|
||||||
├── IMPL_PLAN.md # Planning document and requirements
|
├── plan.json # Structured plan overview (machine-readable)
|
||||||
|
├── IMPL_PLAN.md # Planning document and requirements (human-readable)
|
||||||
├── TODO_LIST.md # Progress tracking (updated by agents)
|
├── TODO_LIST.md # Progress tracking (updated by agents)
|
||||||
├── .task/ # Task definitions (JSON only)
|
├── .task/ # Task definitions (JSON only)
|
||||||
│ ├── IMPL-1.json # Main task definitions
|
│ ├── IMPL-1.json # Main task definitions
|
||||||
|
|||||||
@@ -394,7 +394,7 @@ Return to user showing conflict resolution results (if executed) and selected st
|
|||||||
Skill(skill="workflow:tools:task-generate-agent", args="--session [sessionId]")
|
Skill(skill="workflow:tools:task-generate-agent", args="--session [sessionId]")
|
||||||
```
|
```
|
||||||
|
|
||||||
**CLI Execution Note**: CLI tool usage is now determined semantically by action-planning-agent based on user's task description. If user specifies "use Codex/Gemini/Qwen for X", the agent embeds `command` fields in relevant `implementation_approach` steps.
|
**CLI Execution Note**: CLI tool usage is now determined semantically by action-planning-agent based on user's task description. If user specifies "use Codex/Gemini/Qwen for X", CLI tool usage is controlled by `meta.execution_config.method` per task, not by `command` fields in implementation steps.
|
||||||
|
|
||||||
**Input**:
|
**Input**:
|
||||||
- `sessionId` from Phase 1
|
- `sessionId` from Phase 1
|
||||||
@@ -404,6 +404,7 @@ Skill(skill="workflow:tools:task-generate-agent", args="--session [sessionId]")
|
|||||||
- **Purpose**: Provides structured, minimal context summary to action-planning-agent
|
- **Purpose**: Provides structured, minimal context summary to action-planning-agent
|
||||||
|
|
||||||
**Validation**:
|
**Validation**:
|
||||||
|
- `.workflow/active/[sessionId]/plan.json` exists (structured plan overview)
|
||||||
- `.workflow/active/[sessionId]/IMPL_PLAN.md` exists
|
- `.workflow/active/[sessionId]/IMPL_PLAN.md` exists
|
||||||
- `.workflow/active/[sessionId]/.task/IMPL-*.json` exists (at least one)
|
- `.workflow/active/[sessionId]/.task/IMPL-*.json` exists (at least one)
|
||||||
- `.workflow/active/[sessionId]/TODO_LIST.md` exists
|
- `.workflow/active/[sessionId]/TODO_LIST.md` exists
|
||||||
|
|||||||
@@ -186,8 +186,8 @@ After bash validation, the model takes control to:
|
|||||||
for task_file in ${sessionPath}/.task/*.json; do
|
for task_file in ${sessionPath}/.task/*.json; do
|
||||||
cat "$task_file" | jq -r '
|
cat "$task_file" | jq -r '
|
||||||
"Task: " + .id + "\n" +
|
"Task: " + .id + "\n" +
|
||||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
"Requirements: " + .description + "\n" +
|
||||||
"Acceptance: " + (.context.acceptance | join(", "))
|
"Acceptance: " + (.convergence.criteria | join(", "))
|
||||||
'
|
'
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ NO PRODUCTION CODE WITHOUT A FAILING TEST FIRST
|
|||||||
```
|
```
|
||||||
|
|
||||||
**Enforcement Method**:
|
**Enforcement Method**:
|
||||||
- Phase 5: `implementation_approach` includes test-first steps (Red → Green → Refactor)
|
- Phase 5: `implementation` includes test-first steps (Red → Green → Refactor)
|
||||||
- Green phase: Includes test-fix-cycle configuration (max 3 iterations)
|
- Green phase: Includes test-fix-cycle configuration (max 3 iterations)
|
||||||
- Auto-revert: Triggered when max iterations reached without passing tests
|
- Auto-revert: Triggered when max iterations reached without passing tests
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ NO PRODUCTION CODE WITHOUT A FAILING TEST FIRST
|
|||||||
|
|
||||||
| Checkpoint | Validation Phase | Evidence Required |
|
| Checkpoint | Validation Phase | Evidence Required |
|
||||||
|------------|------------------|-------------------|
|
|------------|------------------|-------------------|
|
||||||
| Test-first structure | Phase 5 | `implementation_approach` has 3 steps |
|
| Test-first structure | Phase 5 | `implementation` has 3 steps |
|
||||||
| Red phase exists | Phase 6 | Step 1: `tdd_phase: "red"` |
|
| Red phase exists | Phase 6 | Step 1: `tdd_phase: "red"` |
|
||||||
| Green phase with test-fix | Phase 6 | Step 2: `tdd_phase: "green"` + test-fix-cycle |
|
| Green phase with test-fix | Phase 6 | Step 2: `tdd_phase: "green"` + test-fix-cycle |
|
||||||
| Refactor phase exists | Phase 6 | Step 3: `tdd_phase: "refactor"` |
|
| Refactor phase exists | Phase 6 | Step 3: `tdd_phase: "refactor"` |
|
||||||
@@ -283,17 +283,18 @@ Skill(skill="workflow:tools:task-generate-tdd", args="--session [sessionId]")
|
|||||||
**Parse**: Extract feature count, task count (not chain count - tasks now contain internal TDD cycles), CLI execution IDs assigned
|
**Parse**: Extract feature count, task count (not chain count - tasks now contain internal TDD cycles), CLI execution IDs assigned
|
||||||
|
|
||||||
**Validate**:
|
**Validate**:
|
||||||
|
- plan.json exists (structured plan overview with `_metadata.plan_type: "tdd"`)
|
||||||
- IMPL_PLAN.md exists (unified plan with TDD Implementation Tasks section)
|
- IMPL_PLAN.md exists (unified plan with TDD Implementation Tasks section)
|
||||||
- IMPL-*.json files exist (one per feature, or container + subtasks for complex features)
|
- IMPL-*.json files exist (one per feature, or container + subtasks for complex features)
|
||||||
- TODO_LIST.md exists with internal TDD phase indicators
|
- TODO_LIST.md exists with internal TDD phase indicators
|
||||||
- Each IMPL task includes:
|
- Each IMPL task includes:
|
||||||
- `meta.tdd_workflow: true`
|
- `meta.tdd_workflow: true`
|
||||||
- `meta.cli_execution_id: {session_id}-{task_id}`
|
- `cli_execution.id: {session_id}-{task_id}`
|
||||||
- `meta.cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }`
|
- `cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }`
|
||||||
- `flow_control.implementation_approach` with exactly 3 steps (red/green/refactor)
|
- `implementation` with exactly 3 steps (red/green/refactor)
|
||||||
- Green phase includes test-fix-cycle configuration
|
- Green phase includes test-fix-cycle configuration
|
||||||
- `context.focus_paths`: absolute or clear relative paths (enhanced with exploration critical_files)
|
- `focus_paths`: absolute or clear relative paths (enhanced with exploration critical_files)
|
||||||
- `flow_control.pre_analysis`: includes exploration integration_points analysis
|
- `pre_analysis`: includes exploration integration_points analysis
|
||||||
- IMPL_PLAN.md contains workflow_type: "tdd" in frontmatter
|
- IMPL_PLAN.md contains workflow_type: "tdd" in frontmatter
|
||||||
- User configuration applied:
|
- User configuration applied:
|
||||||
- If executionMethod == "cli" or "hybrid": command field added to steps
|
- If executionMethod == "cli" or "hybrid": command field added to steps
|
||||||
@@ -302,7 +303,7 @@ Skill(skill="workflow:tools:task-generate-tdd", args="--session [sessionId]")
|
|||||||
|
|
||||||
**Red Flag Detection** (Non-Blocking Warnings):
|
**Red Flag Detection** (Non-Blocking Warnings):
|
||||||
- Task count >18: `⚠️ Task count exceeds hard limit - request re-scope`
|
- Task count >18: `⚠️ Task count exceeds hard limit - request re-scope`
|
||||||
- Missing cli_execution_id: `⚠️ Task lacks CLI execution ID for resume support`
|
- Missing cli_execution.id: `⚠️ Task lacks CLI execution ID for resume support`
|
||||||
- Missing test-fix-cycle: `⚠️ Green phase lacks auto-revert configuration`
|
- Missing test-fix-cycle: `⚠️ Green phase lacks auto-revert configuration`
|
||||||
- Generic task names: `⚠️ Vague task names suggest unclear TDD cycles`
|
- Generic task names: `⚠️ Vague task names suggest unclear TDD cycles`
|
||||||
- Missing focus_paths: `⚠️ Task lacks clear file scope for implementation`
|
- Missing focus_paths: `⚠️ Task lacks clear file scope for implementation`
|
||||||
@@ -351,12 +352,12 @@ Skill(skill="workflow:tools:task-generate-tdd", args="--session [sessionId]")
|
|||||||
1. Each task contains complete TDD workflow (Red-Green-Refactor internally)
|
1. Each task contains complete TDD workflow (Red-Green-Refactor internally)
|
||||||
2. Task structure validation:
|
2. Task structure validation:
|
||||||
- `meta.tdd_workflow: true` in all IMPL tasks
|
- `meta.tdd_workflow: true` in all IMPL tasks
|
||||||
- `meta.cli_execution_id` present (format: {session_id}-{task_id})
|
- `cli_execution.id` present (format: {session_id}-{task_id})
|
||||||
- `meta.cli_execution` strategy assigned (new/resume/fork/merge_fork)
|
- `cli_execution` strategy assigned (new/resume/fork/merge_fork)
|
||||||
- `flow_control.implementation_approach` has exactly 3 steps
|
- `implementation` has exactly 3 steps
|
||||||
- Each step has correct `tdd_phase`: "red", "green", "refactor"
|
- Each step has correct `tdd_phase`: "red", "green", "refactor"
|
||||||
- `context.focus_paths` are absolute or clear relative paths
|
- `focus_paths` are absolute or clear relative paths
|
||||||
- `flow_control.pre_analysis` includes exploration integration analysis
|
- `pre_analysis` includes exploration integration analysis
|
||||||
3. Dependency validation:
|
3. Dependency validation:
|
||||||
- Sequential features: IMPL-N depends_on ["IMPL-(N-1)"] if needed
|
- Sequential features: IMPL-N depends_on ["IMPL-(N-1)"] if needed
|
||||||
- Complex features: IMPL-N.M depends_on ["IMPL-N.(M-1)"] for subtasks
|
- Complex features: IMPL-N.M depends_on ["IMPL-N.(M-1)"] for subtasks
|
||||||
@@ -392,7 +393,7 @@ ls -la .workflow/active/[sessionId]/.task/IMPL-*.json
|
|||||||
echo "IMPL tasks: $(ls .workflow/active/[sessionId]/.task/IMPL-*.json 2>/dev/null | wc -l)"
|
echo "IMPL tasks: $(ls .workflow/active/[sessionId]/.task/IMPL-*.json 2>/dev/null | wc -l)"
|
||||||
|
|
||||||
# Sample task structure verification (first task)
|
# Sample task structure verification (first task)
|
||||||
jq '{id, tdd: .meta.tdd_workflow, cli_id: .meta.cli_execution_id, phases: [.flow_control.implementation_approach[].tdd_phase]}' \
|
jq '{id, tdd: .meta.tdd_workflow, cli_id: .cli_execution.id, phases: [.implementation[].tdd_phase]}' \
|
||||||
"$(ls .workflow/active/[sessionId]/.task/IMPL-*.json | head -1)"
|
"$(ls .workflow/active/[sessionId]/.task/IMPL-*.json | head -1)"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -401,8 +402,8 @@ jq '{id, tdd: .meta.tdd_workflow, cli_id: .meta.cli_execution_id, phases: [.flow
|
|||||||
|---------------|---------------------|---------------|
|
|---------------|---------------------|---------------|
|
||||||
| File existence | `ls -la` artifacts | All files present |
|
| File existence | `ls -la` artifacts | All files present |
|
||||||
| Task count | Count IMPL-*.json | Count matches claims (≤18) |
|
| Task count | Count IMPL-*.json | Count matches claims (≤18) |
|
||||||
| TDD structure | jq sample extraction | Shows red/green/refactor + cli_execution_id |
|
| TDD structure | jq sample extraction | Shows red/green/refactor + cli_execution.id |
|
||||||
| CLI execution IDs | jq extraction | All tasks have cli_execution_id assigned |
|
| CLI execution IDs | jq extraction | All tasks have cli_execution.id assigned |
|
||||||
| Warning log | Check tdd-warnings.log | Logged (may be empty) |
|
| Warning log | Check tdd-warnings.log | Logged (may be empty) |
|
||||||
|
|
||||||
**Return Summary**:
|
**Return Summary**:
|
||||||
@@ -431,7 +432,7 @@ Plans generated:
|
|||||||
- Task List: .workflow/active/[sessionId]/TODO_LIST.md
|
- Task List: .workflow/active/[sessionId]/TODO_LIST.md
|
||||||
(with internal TDD phase indicators and CLI execution strategies)
|
(with internal TDD phase indicators and CLI execution strategies)
|
||||||
- Task JSONs: .workflow/active/[sessionId]/.task/IMPL-*.json
|
- Task JSONs: .workflow/active/[sessionId]/.task/IMPL-*.json
|
||||||
(with cli_execution_id and execution strategies for resume support)
|
(with cli_execution.id and execution strategies for resume support)
|
||||||
|
|
||||||
TDD Configuration:
|
TDD Configuration:
|
||||||
- Each task contains complete Red-Green-Refactor cycle
|
- Each task contains complete Red-Green-Refactor cycle
|
||||||
@@ -579,7 +580,7 @@ Convert user input to TDD-structured format:
|
|||||||
| Missing context-package | File read error | Re-run `/workflow:tools:context-gather` |
|
| Missing context-package | File read error | Re-run `/workflow:tools:context-gather` |
|
||||||
| Invalid task JSON | jq parse error | Report malformed file path |
|
| Invalid task JSON | jq parse error | Report malformed file path |
|
||||||
| Task count exceeds 18 | Count validation ≥19 | Request re-scope, split into multiple sessions |
|
| Task count exceeds 18 | Count validation ≥19 | Request re-scope, split into multiple sessions |
|
||||||
| Missing cli_execution_id | All tasks lack ID | Regenerate tasks with phase 0 user config |
|
| Missing cli_execution.id | All tasks lack ID | Regenerate tasks with phase 0 user config |
|
||||||
| Test-context missing | File not found | Re-run `/workflow:tools:test-context-gather` |
|
| Test-context missing | File not found | Re-run `/workflow:tools:test-context-gather` |
|
||||||
| Phase timeout | No response | Retry phase, check CLI connectivity |
|
| Phase timeout | No response | Retry phase, check CLI connectivity |
|
||||||
| CLI tool not available | Tool not in cli-tools.json | Fall back to alternative preferred tool |
|
| CLI tool not available | Tool not in cli-tools.json | Fall back to alternative preferred tool |
|
||||||
|
|||||||
@@ -179,6 +179,7 @@ const userConfig = {
|
|||||||
│ ├── IMPL-A2.json
|
│ ├── IMPL-A2.json
|
||||||
│ ├── IMPL-B1.json
|
│ ├── IMPL-B1.json
|
||||||
│ └── ...
|
│ └── ...
|
||||||
|
├── plan.json # Output: Structured plan overview
|
||||||
├── IMPL_PLAN.md # Output: Implementation plan (grouped by module)
|
├── IMPL_PLAN.md # Output: Implementation plan (grouped by module)
|
||||||
└── TODO_LIST.md # Output: TODO list (hierarchical)
|
└── TODO_LIST.md # Output: TODO list (hierarchical)
|
||||||
```
|
```
|
||||||
@@ -305,7 +306,7 @@ Based on userConfig.executionMethod, set task-level meta.execution_config:
|
|||||||
|
|
||||||
"agent" →
|
"agent" →
|
||||||
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
||||||
Agent executes implementation_approach steps directly
|
Agent executes implementation steps directly
|
||||||
|
|
||||||
"cli" →
|
"cli" →
|
||||||
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
||||||
@@ -317,7 +318,7 @@ Based on userConfig.executionMethod, set task-level meta.execution_config:
|
|||||||
- Complex tasks (>3 files, complex logic, refactoring) → method: "cli"
|
- Complex tasks (>3 files, complex logic, refactoring) → method: "cli"
|
||||||
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
||||||
|
|
||||||
IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
IMPORTANT: Do NOT add command field to implementation steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
||||||
|
|
||||||
## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED
|
## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED
|
||||||
Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort.
|
Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort.
|
||||||
@@ -346,13 +347,13 @@ If prioritized_context is incomplete, fall back to exploration_results:
|
|||||||
|
|
||||||
## EXPECTED DELIVERABLES
|
## EXPECTED DELIVERABLES
|
||||||
1. Task JSON Files (.task/IMPL-*.json)
|
1. Task JSON Files (.task/IMPL-*.json)
|
||||||
- 6-field schema (id, title, status, context_package_path, meta, context, flow_control)
|
- Unified flat schema (task-schema.json)
|
||||||
- Quantified requirements with explicit counts
|
- Quantified requirements with explicit counts
|
||||||
- Artifacts integration from context package
|
- Artifacts integration from context package
|
||||||
- **focus_paths generated directly from prioritized_context.priority_tiers (critical + high)**
|
- **focus_paths generated directly from prioritized_context.priority_tiers (critical + high)**
|
||||||
- NO re-sorting or re-prioritization - use pre-computed tiers as-is
|
- NO re-sorting or re-prioritization - use pre-computed tiers as-is
|
||||||
- Critical files are PRIMARY focus, High files are SECONDARY
|
- Critical files are PRIMARY focus, High files are SECONDARY
|
||||||
- Flow control with pre_analysis steps (use prioritized_context.dependency_order for task sequencing)
|
- Pre-analysis steps (use prioritized_context.dependency_order for task sequencing)
|
||||||
- **CLI Execution IDs and strategies (MANDATORY)**
|
- **CLI Execution IDs and strategies (MANDATORY)**
|
||||||
|
|
||||||
2. Implementation Plan (IMPL_PLAN.md)
|
2. Implementation Plan (IMPL_PLAN.md)
|
||||||
@@ -360,14 +361,18 @@ If prioritized_context is incomplete, fall back to exploration_results:
|
|||||||
- Task breakdown and execution strategy
|
- Task breakdown and execution strategy
|
||||||
- Complete structure per agent definition
|
- Complete structure per agent definition
|
||||||
|
|
||||||
3. TODO List (TODO_LIST.md)
|
3. Plan Overview (plan.json)
|
||||||
|
- Structured plan overview (plan-overview-base-schema)
|
||||||
|
- Machine-readable task IDs, shared context, metadata
|
||||||
|
|
||||||
|
4. TODO List (TODO_LIST.md)
|
||||||
- Hierarchical structure (containers, pending, completed markers)
|
- Hierarchical structure (containers, pending, completed markers)
|
||||||
- Links to task JSONs and summaries
|
- Links to task JSONs and summaries
|
||||||
- Matches task JSON hierarchy
|
- Matches task JSON hierarchy
|
||||||
|
|
||||||
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||||
Each task JSON MUST include:
|
Each task JSON MUST include:
|
||||||
- **cli_execution_id**: Unique ID for CLI execution (format: `{session_id}-{task_id}`)
|
- **cli_execution.id**: Unique ID for CLI execution (format: `{session_id}-{task_id}`)
|
||||||
- **cli_execution**: Strategy object based on depends_on:
|
- **cli_execution**: Strategy object based on depends_on:
|
||||||
- No deps → `{ "strategy": "new" }`
|
- No deps → `{ "strategy": "new" }`
|
||||||
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
||||||
@@ -503,7 +508,7 @@ Based on userConfig.executionMethod, set task-level meta.execution_config:
|
|||||||
|
|
||||||
"agent" →
|
"agent" →
|
||||||
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
||||||
Agent executes implementation_approach steps directly
|
Agent executes implementation steps directly
|
||||||
|
|
||||||
"cli" →
|
"cli" →
|
||||||
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
||||||
@@ -515,7 +520,7 @@ Based on userConfig.executionMethod, set task-level meta.execution_config:
|
|||||||
- Complex tasks (>3 files, complex logic, refactoring) → method: "cli"
|
- Complex tasks (>3 files, complex logic, refactoring) → method: "cli"
|
||||||
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
||||||
|
|
||||||
IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
IMPORTANT: Do NOT add command field to implementation steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
||||||
|
|
||||||
## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED
|
## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED
|
||||||
Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort.
|
Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort.
|
||||||
@@ -549,20 +554,20 @@ If prioritized_context is incomplete for this module, fall back to exploration_r
|
|||||||
|
|
||||||
## EXPECTED DELIVERABLES
|
## EXPECTED DELIVERABLES
|
||||||
Task JSON Files (.task/IMPL-${module.prefix}*.json):
|
Task JSON Files (.task/IMPL-${module.prefix}*.json):
|
||||||
- 6-field schema (id, title, status, context_package_path, meta, context, flow_control)
|
- Unified flat schema (task-schema.json)
|
||||||
- Task ID format: IMPL-${module.prefix}1, IMPL-${module.prefix}2, ...
|
- Task ID format: IMPL-${module.prefix}1, IMPL-${module.prefix}2, ...
|
||||||
- Quantified requirements with explicit counts
|
- Quantified requirements with explicit counts
|
||||||
- Artifacts integration from context package (filtered for ${module.name})
|
- Artifacts integration from context package (filtered for ${module.name})
|
||||||
- **focus_paths generated directly from prioritized_context.priority_tiers filtered by ${module.paths.join(', ')}**
|
- **focus_paths generated directly from prioritized_context.priority_tiers filtered by ${module.paths.join(', ')}**
|
||||||
- NO re-sorting - use pre-computed tiers filtered for this module
|
- NO re-sorting - use pre-computed tiers filtered for this module
|
||||||
- Critical files are PRIMARY focus, High files are SECONDARY
|
- Critical files are PRIMARY focus, High files are SECONDARY
|
||||||
- Flow control with pre_analysis steps (use prioritized_context.dependency_order for module task sequencing)
|
- Pre-analysis steps (use prioritized_context.dependency_order for module task sequencing)
|
||||||
- **CLI Execution IDs and strategies (MANDATORY)**
|
- **CLI Execution IDs and strategies (MANDATORY)**
|
||||||
- Focus ONLY on ${module.name} module scope
|
- Focus ONLY on ${module.name} module scope
|
||||||
|
|
||||||
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||||
Each task JSON MUST include:
|
Each task JSON MUST include:
|
||||||
- **cli_execution_id**: Unique ID for CLI execution (format: `{session_id}-IMPL-${module.prefix}{seq}`)
|
- **cli_execution.id**: Unique ID for CLI execution (format: `{session_id}-IMPL-${module.prefix}{seq}`)
|
||||||
- **cli_execution**: Strategy object based on depends_on:
|
- **cli_execution**: Strategy object based on depends_on:
|
||||||
- No deps → `{ "strategy": "new" }`
|
- No deps → `{ "strategy": "new" }`
|
||||||
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
||||||
@@ -595,7 +600,7 @@ Hard Constraints:
|
|||||||
|
|
||||||
## SUCCESS CRITERIA
|
## SUCCESS CRITERIA
|
||||||
- Task JSONs saved to .task/ with IMPL-${module.prefix}* naming
|
- Task JSONs saved to .task/ with IMPL-${module.prefix}* naming
|
||||||
- All task JSONs include cli_execution_id and cli_execution strategy
|
- All task JSONs include cli_execution.id and cli_execution strategy
|
||||||
- Cross-module dependencies use CROSS:: placeholder format consistently
|
- Cross-module dependencies use CROSS:: placeholder format consistently
|
||||||
- Focus paths scoped to ${module.paths.join(', ')} only
|
- Focus paths scoped to ${module.paths.join(', ')} only
|
||||||
- Return: task count, task IDs, dependency summary (internal + cross-module)
|
- Return: task count, task IDs, dependency summary (internal + cross-module)
|
||||||
@@ -717,7 +722,7 @@ function resolveCrossModuleDependency(placeholder, allTasks) {
|
|||||||
const candidates = allTasks.filter(t =>
|
const candidates = allTasks.filter(t =>
|
||||||
t.id.startsWith(`IMPL-${targetModule}`) &&
|
t.id.startsWith(`IMPL-${targetModule}`) &&
|
||||||
(t.title.toLowerCase().includes(pattern.toLowerCase()) ||
|
(t.title.toLowerCase().includes(pattern.toLowerCase()) ||
|
||||||
t.context?.description?.toLowerCase().includes(pattern.toLowerCase()))
|
t.description?.toLowerCase().includes(pattern.toLowerCase()))
|
||||||
);
|
);
|
||||||
return candidates.length > 0
|
return candidates.length > 0
|
||||||
? candidates.sort((a, b) => a.id.localeCompare(b.id))[0].id
|
? candidates.sort((a, b) => a.id.localeCompare(b.id))[0].id
|
||||||
|
|||||||
@@ -186,6 +186,7 @@ const userConfig = {
|
|||||||
│ ├── IMPL-1.json
|
│ ├── IMPL-1.json
|
||||||
│ ├── IMPL-2.json
|
│ ├── IMPL-2.json
|
||||||
│ └── ...
|
│ └── ...
|
||||||
|
├── plan.json # Output: Structured plan overview (TDD variant)
|
||||||
├── IMPL_PLAN.md # Output: TDD implementation plan
|
├── IMPL_PLAN.md # Output: TDD implementation plan
|
||||||
└── TODO_LIST.md # Output: TODO list with TDD phases
|
└── TODO_LIST.md # Output: TODO list with TDD phases
|
||||||
```
|
```
|
||||||
@@ -376,7 +377,7 @@ Based on userConfig.executionMethod, set task-level meta.execution_config:
|
|||||||
- Complex cycles (>5 test cases, >3 files, integration tests) → method: "cli"
|
- Complex cycles (>5 test cases, >3 files, integration tests) → method: "cli"
|
||||||
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
||||||
|
|
||||||
IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
IMPORTANT: Do NOT add command field to implementation steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
||||||
|
|
||||||
## EXPLORATION CONTEXT (from context-package.exploration_results)
|
## EXPLORATION CONTEXT (from context-package.exploration_results)
|
||||||
- Load exploration_results from context-package.json
|
- Load exploration_results from context-package.json
|
||||||
@@ -421,19 +422,18 @@ IMPORTANT: Do NOT add command field to implementation_approach steps. Execution
|
|||||||
|
|
||||||
##### 1. TDD Task JSON Files (.task/IMPL-*.json)
|
##### 1. TDD Task JSON Files (.task/IMPL-*.json)
|
||||||
- **Location**: `.workflow/active/{session-id}/.task/`
|
- **Location**: `.workflow/active/{session-id}/.task/`
|
||||||
- **Schema**: 6-field structure with TDD-specific metadata
|
- **Schema**: Unified flat schema (task-schema.json) with TDD-specific metadata
|
||||||
- `id, title, status, context_package_path, meta, context, flow_control`
|
|
||||||
- `meta.tdd_workflow`: true (REQUIRED)
|
- `meta.tdd_workflow`: true (REQUIRED)
|
||||||
- `meta.max_iterations`: 3 (Green phase test-fix cycle limit)
|
- `meta.max_iterations`: 3 (Green phase test-fix cycle limit)
|
||||||
- `meta.cli_execution_id`: Unique CLI execution ID (format: `{session_id}-{task_id}`)
|
- `cli_execution.id`: Unique CLI execution ID (format: `{session_id}-{task_id}`)
|
||||||
- `meta.cli_execution`: Strategy object (new|resume|fork|merge_fork)
|
- `cli_execution`: Strategy object (new|resume|fork|merge_fork)
|
||||||
- `context.tdd_cycles`: Array with quantified test cases and coverage
|
- `tdd_cycles`: Array with quantified test cases and coverage
|
||||||
- `context.focus_paths`: Absolute or clear relative paths (enhanced with exploration critical_files)
|
- `focus_paths`: Absolute or clear relative paths (enhanced with exploration critical_files)
|
||||||
- `flow_control.implementation_approach`: Exactly 3 steps with `tdd_phase` field
|
- `implementation`: Exactly 3 steps with `tdd_phase` field
|
||||||
1. Red Phase (`tdd_phase: "red"`): Write failing tests
|
1. Red Phase (`tdd_phase: "red"`): Write failing tests
|
||||||
2. Green Phase (`tdd_phase: "green"`): Implement to pass tests
|
2. Green Phase (`tdd_phase: "green"`): Implement to pass tests
|
||||||
3. Refactor Phase (`tdd_phase: "refactor"`): Improve code quality
|
3. Refactor Phase (`tdd_phase: "refactor"`): Improve code quality
|
||||||
- `flow_control.pre_analysis`: Include exploration integration_points analysis
|
- `pre_analysis`: Include exploration integration_points analysis
|
||||||
- **meta.execution_config**: Set per userConfig.executionMethod (agent/cli/hybrid)
|
- **meta.execution_config**: Set per userConfig.executionMethod (agent/cli/hybrid)
|
||||||
- **Details**: See action-planning-agent.md § TDD Task JSON Generation
|
- **Details**: See action-planning-agent.md § TDD Task JSON Generation
|
||||||
|
|
||||||
@@ -455,8 +455,8 @@ IMPORTANT: Do NOT add command field to implementation_approach steps. Execution
|
|||||||
### CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
### CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||||
|
|
||||||
Each task JSON MUST include:
|
Each task JSON MUST include:
|
||||||
- **meta.cli_execution_id**: Unique ID for CLI execution (format: `{session_id}-{task_id}`)
|
- **cli_execution.id**: Unique ID for CLI execution (format: `{session_id}-{task_id}`)
|
||||||
- **meta.cli_execution**: Strategy object based on depends_on:
|
- **cli_execution**: Strategy object based on depends_on:
|
||||||
- No deps → `{ "strategy": "new" }`
|
- No deps → `{ "strategy": "new" }`
|
||||||
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
||||||
- 1 dep (multiple children) → `{ "strategy": "fork", "resume_from": "parent-cli-id" }`
|
- 1 dep (multiple children) → `{ "strategy": "fork", "resume_from": "parent-cli-id" }`
|
||||||
@@ -496,7 +496,7 @@ Each task JSON MUST include:
|
|||||||
- [ ] Every acceptance criterion includes measurable coverage percentage
|
- [ ] Every acceptance criterion includes measurable coverage percentage
|
||||||
- [ ] tdd_cycles array contains test_count and test_cases for each cycle
|
- [ ] tdd_cycles array contains test_count and test_cases for each cycle
|
||||||
- [ ] No vague language ("comprehensive", "complete", "thorough")
|
- [ ] No vague language ("comprehensive", "complete", "thorough")
|
||||||
- [ ] cli_execution_id and cli_execution strategy assigned to each task
|
- [ ] cli_execution.id and cli_execution strategy assigned to each task
|
||||||
|
|
||||||
### Agent Execution Summary
|
### Agent Execution Summary
|
||||||
|
|
||||||
@@ -513,7 +513,7 @@ Each task JSON MUST include:
|
|||||||
- ✓ Task count ≤18 (hard limit)
|
- ✓ Task count ≤18 (hard limit)
|
||||||
- ✓ Each task has meta.tdd_workflow: true
|
- ✓ Each task has meta.tdd_workflow: true
|
||||||
- ✓ Each task has exactly 3 implementation steps with tdd_phase field ("red", "green", "refactor")
|
- ✓ Each task has exactly 3 implementation steps with tdd_phase field ("red", "green", "refactor")
|
||||||
- ✓ Each task has meta.cli_execution_id and meta.cli_execution strategy
|
- ✓ Each task has cli_execution.id and cli_execution strategy
|
||||||
- ✓ Green phase includes test-fix cycle logic with max_iterations
|
- ✓ Green phase includes test-fix cycle logic with max_iterations
|
||||||
- ✓ focus_paths are absolute or clear relative paths (from exploration critical_files)
|
- ✓ focus_paths are absolute or clear relative paths (from exploration critical_files)
|
||||||
- ✓ Artifact references mapped correctly from context package
|
- ✓ Artifact references mapped correctly from context package
|
||||||
@@ -525,7 +525,7 @@ Each task JSON MUST include:
|
|||||||
|
|
||||||
## SUCCESS CRITERIA
|
## SUCCESS CRITERIA
|
||||||
- All planning documents generated successfully:
|
- All planning documents generated successfully:
|
||||||
- Task JSONs valid and saved to .task/ directory with cli_execution_id
|
- Task JSONs valid and saved to .task/ directory with cli_execution.id
|
||||||
- IMPL_PLAN.md created with complete TDD structure
|
- IMPL_PLAN.md created with complete TDD structure
|
||||||
- TODO_LIST.md generated matching task JSONs
|
- TODO_LIST.md generated matching task JSONs
|
||||||
- CLI execution strategies assigned based on task dependencies
|
- CLI execution strategies assigned based on task dependencies
|
||||||
@@ -533,7 +533,7 @@ Each task JSON MUST include:
|
|||||||
|
|
||||||
## OUTPUT SUMMARY
|
## OUTPUT SUMMARY
|
||||||
Generate all three documents and report:
|
Generate all three documents and report:
|
||||||
- TDD task JSON files created: N files (IMPL-*.json) with cli_execution_id assigned
|
- TDD task JSON files created: N files (IMPL-*.json) with cli_execution.id assigned
|
||||||
- TDD cycles configured: N cycles with quantified test cases
|
- TDD cycles configured: N cycles with quantified test cases
|
||||||
- CLI execution strategies: new/resume/fork/merge_fork assigned per dependency graph
|
- CLI execution strategies: new/resume/fork/merge_fork assigned per dependency graph
|
||||||
- Artifacts integrated: synthesis-spec/guidance-specification, relevant role analyses
|
- Artifacts integrated: synthesis-spec/guidance-specification, relevant role analyses
|
||||||
@@ -615,9 +615,9 @@ This section provides quick reference for TDD task JSON structure. For complete
|
|||||||
- Required metadata:
|
- Required metadata:
|
||||||
- `meta.tdd_workflow: true`
|
- `meta.tdd_workflow: true`
|
||||||
- `meta.max_iterations: 3`
|
- `meta.max_iterations: 3`
|
||||||
- `meta.cli_execution_id: "{session_id}-{task_id}"`
|
- `cli_execution.id: "{session_id}-{task_id}"`
|
||||||
- `meta.cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }`
|
- `cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }`
|
||||||
- Context: `tdd_cycles` array with quantified test cases and coverage:
|
- `tdd_cycles` array with quantified test cases and coverage:
|
||||||
```javascript
|
```javascript
|
||||||
tdd_cycles: [
|
tdd_cycles: [
|
||||||
{
|
{
|
||||||
@@ -628,15 +628,16 @@ This section provides quick reference for TDD task JSON structure. For complete
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
- Context: `focus_paths` use absolute or clear relative paths
|
- `focus_paths` use absolute or clear relative paths
|
||||||
- Flow control: Exactly 3 steps with `tdd_phase` field ("red", "green", "refactor")
|
- `implementation`: Exactly 3 steps with `tdd_phase` field ("red", "green", "refactor")
|
||||||
- Flow control: `pre_analysis` includes exploration integration_points analysis
|
- `pre_analysis`: includes exploration integration_points analysis
|
||||||
- **meta.execution_config**: Set per `userConfig.executionMethod` (agent/cli/hybrid)
|
- **meta.execution_config**: Set per `userConfig.executionMethod` (agent/cli/hybrid)
|
||||||
- See Phase 2 agent prompt for full schema and requirements
|
- See Phase 2 agent prompt for full schema and requirements
|
||||||
|
|
||||||
## Output Files Structure
|
## Output Files Structure
|
||||||
```
|
```
|
||||||
.workflow/active/{session-id}/
|
.workflow/active/{session-id}/
|
||||||
|
├── plan.json # Structured plan overview (TDD variant)
|
||||||
├── IMPL_PLAN.md # Unified plan with TDD Implementation Tasks section
|
├── IMPL_PLAN.md # Unified plan with TDD Implementation Tasks section
|
||||||
├── TODO_LIST.md # Progress tracking with internal TDD phase indicators
|
├── TODO_LIST.md # Progress tracking with internal TDD phase indicators
|
||||||
├── .task/
|
├── .task/
|
||||||
@@ -662,7 +663,7 @@ This section provides quick reference for TDD task JSON structure. For complete
|
|||||||
## Validation Rules
|
## Validation Rules
|
||||||
|
|
||||||
### Task Completeness
|
### Task Completeness
|
||||||
- Every IMPL-N must contain complete TDD workflow in `flow_control.implementation_approach`
|
- Every IMPL-N must contain complete TDD workflow in `implementation`
|
||||||
- Each task must have 3 steps with `tdd_phase`: "red", "green", "refactor"
|
- Each task must have 3 steps with `tdd_phase`: "red", "green", "refactor"
|
||||||
- Every task must have `meta.tdd_workflow: true`
|
- Every task must have `meta.tdd_workflow: true`
|
||||||
|
|
||||||
@@ -678,7 +679,7 @@ This section provides quick reference for TDD task JSON structure. For complete
|
|||||||
|
|
||||||
### TDD Workflow Validation
|
### TDD Workflow Validation
|
||||||
- `meta.tdd_workflow` must be true
|
- `meta.tdd_workflow` must be true
|
||||||
- `flow_control.implementation_approach` must have exactly 3 steps
|
- `implementation` must have exactly 3 steps
|
||||||
- Each step must have `tdd_phase` field ("red", "green", or "refactor")
|
- Each step must have `tdd_phase` field ("red", "green", or "refactor")
|
||||||
- Green phase step must include test-fix cycle logic
|
- Green phase step must include test-fix cycle logic
|
||||||
- `meta.max_iterations` must be present (default: 3)
|
- `meta.max_iterations` must be present (default: 3)
|
||||||
|
|||||||
Reference in New Issue
Block a user