refactor: Update task structure and grouping principles in planning and execution schemas for clarity and consistency

This commit is contained in:
catlog22
2025-11-28 10:56:45 +08:00
parent beb839d8e2
commit 196b805499
3 changed files with 85 additions and 65 deletions

View File

@@ -150,12 +150,13 @@ Input Parsing:
Execution:
├─ Step 1: Initialize result tracking (previousExecutionResults = [])
├─ Step 2: Task grouping & batch creation
│ ├─ Infer dependencies (same file → sequential, keywords → sequential)
│ ├─ Group into batches (parallel: independent, sequential: dependent)
│ ├─ Extract explicit depends_on (no file/keyword inference)
│ ├─ Group: independent tasks → single parallel batch (maximize utilization)
│ ├─ Group: dependent tasks → sequential phases (respect dependencies)
│ └─ Create TodoWrite list for batches
├─ Step 3: Launch execution
│ ├─ Phase 1: All parallel batches (⚡ concurrent via multiple tool calls)
│ └─ Phase 2: Sequential batches (→ one by one)
│ ├─ Phase 1: All independent tasks (⚡ single batch, concurrent)
│ └─ Phase 2+: Dependent tasks by dependency order
├─ Step 4: Track progress (TodoWrite updates per batch)
└─ Step 5: Code review (if codeReviewTool ≠ "Skip")
@@ -180,66 +181,68 @@ previousExecutionResults = []
**Dependency Analysis & Grouping Algorithm**:
```javascript
// Infer dependencies: same file → sequential, keywords (use/integrate) → sequential
function inferDependencies(tasks) {
return tasks.map((task, i) => {
const deps = []
const file = task.file || task.title.match(/in\s+([^\s:]+)/)?.[1]
const keywords = (task.description || task.title).toLowerCase()
// Use explicit depends_on from plan.json (no inference from file/keywords)
function extractDependencies(tasks) {
const taskIdToIndex = {}
tasks.forEach((t, i) => { taskIdToIndex[t.id] = i })
for (let j = 0; j < i; j++) {
const prevFile = tasks[j].file || tasks[j].title.match(/in\s+([^\s:]+)/)?.[1]
if (file && prevFile === file) deps.push(j) // Same file
else if (/use|integrate|call|import/.test(keywords)) deps.push(j) // Keyword dependency
}
return tasks.map((task, i) => {
// Only use explicit depends_on from plan.json
const deps = (task.depends_on || [])
.map(depId => taskIdToIndex[depId])
.filter(idx => idx !== undefined && idx < i)
return { ...task, taskIndex: i, dependencies: deps }
})
}
// Group into batches: independent → parallel [P1,P2...], dependent → sequential [S1,S2...]
// Group into batches: maximize parallel execution
function createExecutionCalls(tasks, executionMethod) {
const tasksWithDeps = inferDependencies(tasks)
const maxBatch = executionMethod === "Codex" ? 4 : 7
const calls = []
const tasksWithDeps = extractDependencies(tasks)
const processed = new Set()
const calls = []
// Parallel: independent tasks, different files, max batch size
const parallelGroups = []
tasksWithDeps.forEach(t => {
if (t.dependencies.length === 0 && !processed.has(t.taskIndex)) {
const group = [t]
processed.add(t.taskIndex)
tasksWithDeps.forEach(o => {
if (!o.dependencies.length && !processed.has(o.taskIndex) &&
group.length < maxBatch && t.file !== o.file) {
group.push(o)
processed.add(o.taskIndex)
}
})
parallelGroups.push(group)
}
})
// Sequential: dependent tasks, batch when deps satisfied
const remaining = tasksWithDeps.filter(t => !processed.has(t.taskIndex))
while (remaining.length > 0) {
const batch = remaining.filter((t, i) =>
i < maxBatch && t.dependencies.every(d => processed.has(d))
)
if (!batch.length) break
batch.forEach(t => processed.add(t.taskIndex))
calls.push({ executionType: "sequential", groupId: `S${calls.length + 1}`, tasks: batch })
remaining.splice(0, remaining.length, ...remaining.filter(t => !processed.has(t.taskIndex)))
// Phase 1: All independent tasks → single parallel batch (maximize utilization)
const independentTasks = tasksWithDeps.filter(t => t.dependencies.length === 0)
if (independentTasks.length > 0) {
independentTasks.forEach(t => processed.add(t.taskIndex))
calls.push({
method: executionMethod,
executionType: "parallel",
groupId: "P1",
taskSummary: independentTasks.map(t => t.title).join(' | '),
tasks: independentTasks
})
}
// Combine results
return [
...parallelGroups.map((g, i) => ({
method: executionMethod, executionType: "parallel", groupId: `P${i+1}`,
taskSummary: g.map(t => t.title).join(' | '), tasks: g
})),
...calls.map(c => ({ ...c, method: executionMethod, taskSummary: c.tasks.map(t => t.title).join(' → ') }))
]
// Phase 2: Dependent tasks → sequential batches (respect dependencies)
let sequentialIndex = 1
let remaining = tasksWithDeps.filter(t => !processed.has(t.taskIndex))
while (remaining.length > 0) {
// Find tasks whose dependencies are all satisfied
const ready = remaining.filter(t =>
t.dependencies.every(d => processed.has(d))
)
if (ready.length === 0) {
console.warn('Circular dependency detected, forcing remaining tasks')
ready.push(...remaining)
}
// Group ready tasks (can run in parallel within this phase)
ready.forEach(t => processed.add(t.taskIndex))
calls.push({
method: executionMethod,
executionType: ready.length > 1 ? "parallel" : "sequential",
groupId: ready.length > 1 ? `P${calls.length + 1}` : `S${sequentialIndex++}`,
taskSummary: ready.map(t => t.title).join(ready.length > 1 ? ' | ' : ' → '),
tasks: ready
})
remaining = remaining.filter(t => !processed.has(t.taskIndex))
}
return calls
}
executionCalls = createExecutionCalls(planObject.tasks, executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` }))
@@ -536,8 +539,8 @@ codex --full-auto exec "[Verify plan acceptance criteria at ${plan.json}]" --ski
## Best Practices
**Input Modes**: In-memory (lite-plan), prompt (standalone), file (JSON/text)
**Batch Limits**: Agent 7 tasks, CLI 4 tasks
**Execution**: Parallel batches use single Claude message with multiple tool calls (no concurrency limit)
**Task Grouping**: Based on explicit depends_on only; independent tasks run in single parallel batch
**Execution**: All independent tasks launch concurrently via single Claude message with multiple tool calls
## Error Handling

View File

@@ -373,16 +373,28 @@ ${complexity}
Generate plan.json with:
- summary: 2-3 sentence overview
- approach: High-level implementation strategy (incorporating insights from all exploration angles)
- tasks: 3-10 structured tasks with:
- title, file, action, description
- implementation (3-7 steps)
- tasks: 3-7 structured tasks (**IMPORTANT: group by feature/module, NOT by file**)
- **Task Granularity Principle**: Each task = one complete feature unit or module
- title: action verb + target module/feature (e.g., "Implement auth token refresh")
- scope: module path (src/auth/) or feature name, prefer module-level over single file
- action, description
- modification_points: ALL files to modify for this feature (group related changes)
- implementation (3-7 steps covering all modification_points)
- reference (pattern, files, examples)
- acceptance (2-4 criteria)
- acceptance (2-4 criteria for the entire feature)
- depends_on: task IDs this task depends on (use sparingly, only for true dependencies)
- estimated_time, recommended_execution, complexity
- _metadata:
- timestamp, source, planning_mode
- exploration_angles: ${JSON.stringify(manifest.explorations.map(e => e.angle))}
## Task Grouping Rules
1. **Group by feature**: All changes for one feature = one task (even if 3-5 files)
2. **Avoid file-per-task**: Do NOT create separate tasks for each file
3. **Substantial tasks**: Each task should represent 15-60 minutes of work
4. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output
5. **Prefer parallel**: Most tasks should be independent (no depends_on)
## Execution
1. Read ALL exploration files for comprehensive context
2. Execute CLI planning using Gemini (Qwen fallback)

View File

@@ -27,7 +27,7 @@
"maxItems": 10,
"items": {
"type": "object",
"required": ["id", "title", "file", "action", "description", "implementation", "acceptance"],
"required": ["id", "title", "scope", "action", "description", "implementation", "acceptance"],
"properties": {
"id": {
"type": "string",
@@ -36,11 +36,15 @@
},
"title": {
"type": "string",
"description": "Task title (action verb + target)"
"description": "Task title (action verb + target module/feature)"
},
"scope": {
"type": "string",
"description": "Task scope: module path (src/auth/), feature name, or single file. Prefer module/feature level over single file."
},
"file": {
"type": "string",
"description": "Target file path for this task"
"description": "Primary file (deprecated, use scope + modification_points instead)"
},
"action": {
"type": "string",
@@ -53,13 +57,14 @@
},
"modification_points": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"required": ["file", "target", "change"],
"properties": {
"file": {
"type": "string",
"description": "File path"
"description": "File path within scope"
},
"target": {
"type": "string",
@@ -71,7 +76,7 @@
}
}
},
"description": "Precise modification points with file:target:change format"
"description": "All modification points for this task. Group related changes (same feature/module) into one task with multiple modification_points."
},
"implementation": {
"type": "array",