mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-14 02:42:04 +08:00
feat: enforce mandatory rationale and role in explore/diagnosis schema output
- Remove oneOf string option from relevant_files/affected_files, require structured objects - Add required fields: rationale (minLength 10), role/change_type enum - Add optional fields: discovery_source, key_symbols - Update all caller commands with new format instructions and success criteria - Fix consumer code: Map-based dedup, getPath() helper, path extraction - Fix frontend: f.rationale || f.reason backward-compatible fallback
This commit is contained in:
@@ -83,26 +83,45 @@
|
|||||||
},
|
},
|
||||||
"affected_files": {
|
"affected_files": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
|
"minItems": 1,
|
||||||
"items": {
|
"items": {
|
||||||
"oneOf": [
|
|
||||||
{"type": "string"},
|
|
||||||
{
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["path", "relevance"],
|
"required": ["path", "relevance", "rationale", "change_type"],
|
||||||
"properties": {
|
"properties": {
|
||||||
"path": {"type": "string", "description": "File path relative to project root"},
|
"path": {
|
||||||
"relevance": {"type": "number", "minimum": 0, "maximum": 1, "description": "Relevance score 0.0-1.0 (0.7+ high, 0.5-0.7 medium, <0.5 low)"},
|
"type": "string",
|
||||||
"rationale": {"type": "string", "description": "Brief explanation of why this file is affected from this diagnosis angle"},
|
"description": "File path relative to project root"
|
||||||
|
},
|
||||||
|
"relevance": {
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
"description": "Relevance score 0.0-1.0 (0.7+ high, 0.5-0.7 medium, <0.5 low)"
|
||||||
|
},
|
||||||
|
"rationale": {
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 10,
|
||||||
|
"description": "REQUIRED: Selection rationale explaining why this file is affected. Must be specific. Example: 'Contains handleLogin() at line 45 where null check is missing for token response' rather than 'Related to bug'"
|
||||||
|
},
|
||||||
"change_type": {
|
"change_type": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["fix_target", "needs_update", "test_coverage", "reference_only"],
|
"enum": ["fix_target", "needs_update", "test_coverage", "reference_only"],
|
||||||
"description": "Type of change needed for this file"
|
"description": "Type of change needed: fix_target=contains the bug, needs_update=requires changes due to fix, test_coverage=tests to add/update, reference_only=understanding context"
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"description": "Files affected by the bug. Prefer object format with relevance scores for synthesis prioritization."
|
"discovery_source": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["bash-scan", "cli-analysis", "ace-search", "dependency-trace", "stack-trace", "manual"],
|
||||||
|
"description": "How the file was identified as affected"
|
||||||
|
},
|
||||||
|
"key_symbols": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Key symbols (functions, classes) in this file related to the bug"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
"description": "Files affected by the bug. Every file MUST have structured rationale explaining why it is affected and what change is needed."
|
||||||
},
|
},
|
||||||
"reproduction_steps": {
|
"reproduction_steps": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||||
"title": "Exploration Context Schema",
|
"title": "Exploration Context Schema",
|
||||||
"description": "Code exploration results from cli-explore-agent for task context gathering",
|
"description": "Code exploration results from cli-explore-agent for task context gathering. Every file MUST include selection rationale.",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": [
|
"required": [
|
||||||
"project_structure",
|
"project_structure",
|
||||||
@@ -20,21 +20,54 @@
|
|||||||
},
|
},
|
||||||
"relevant_files": {
|
"relevant_files": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
|
"minItems": 1,
|
||||||
"items": {
|
"items": {
|
||||||
"oneOf": [
|
|
||||||
{"type": "string"},
|
|
||||||
{
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["path", "relevance"],
|
"required": ["path", "relevance", "rationale", "role"],
|
||||||
"properties": {
|
"properties": {
|
||||||
"path": {"type": "string", "description": "File path relative to project root"},
|
"path": {
|
||||||
"relevance": {"type": "number", "minimum": 0, "maximum": 1, "description": "Relevance score 0.0-1.0 (0.7+ high, 0.5-0.7 medium, <0.5 low)"},
|
"type": "string",
|
||||||
"rationale": {"type": "string", "description": "Brief explanation of why this file is relevant from this exploration angle"}
|
"description": "File path relative to project root"
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"description": "File paths to be modified or referenced for the task. Prefer object format with relevance scores for synthesis prioritization."
|
"relevance": {
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
"description": "Relevance score 0.0-1.0 (0.7+ high, 0.5-0.7 medium, <0.5 low)"
|
||||||
|
},
|
||||||
|
"rationale": {
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 10,
|
||||||
|
"description": "REQUIRED: Selection rationale explaining why this file is relevant to the exploration topic. Must be specific (not generic). Example: 'Contains AuthService.login() which is the entry point for JWT token generation' rather than 'Related to auth'"
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
"modify_target",
|
||||||
|
"dependency",
|
||||||
|
"pattern_reference",
|
||||||
|
"test_target",
|
||||||
|
"type_definition",
|
||||||
|
"integration_point",
|
||||||
|
"config",
|
||||||
|
"context_only"
|
||||||
|
],
|
||||||
|
"description": "File's role relative to the task: modify_target=needs changes, dependency=imported by targets, pattern_reference=demonstrates patterns to follow, test_target=tests needing update, type_definition=types/interfaces used, integration_point=where new code connects, config=configuration, context_only=understanding only"
|
||||||
|
},
|
||||||
|
"discovery_source": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["bash-scan", "cli-analysis", "ace-search", "dependency-trace", "manual"],
|
||||||
|
"description": "How the file was discovered: bash-scan=structural scan (rg/find/tree), cli-analysis=Gemini/Qwen semantic analysis, ace-search=ACE context engine, dependency-trace=import/export graph traversal, manual=directly specified in task"
|
||||||
|
},
|
||||||
|
"key_symbols": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Key symbols (functions, classes, types) in this file relevant to the task. Example: ['AuthService', 'login', 'TokenPayload']"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
"description": "Files relevant to the task. Every file MUST have structured rationale explaining its selection basis and role classification."
|
||||||
},
|
},
|
||||||
"patterns": {
|
"patterns": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
|||||||
@@ -92,9 +92,11 @@ RULES: {from prompt, if template specified} | analysis=READ-ONLY
|
|||||||
|
|
||||||
### Dual-Source Synthesis
|
### Dual-Source Synthesis
|
||||||
|
|
||||||
1. Bash results: Precise file:line locations
|
1. Bash results: Precise file:line locations → `discovery_source: "bash-scan"`
|
||||||
2. Gemini results: Semantic understanding, design intent
|
2. Gemini results: Semantic understanding, design intent → `discovery_source: "cli-analysis"`
|
||||||
3. Merge with source attribution (bash-discovered | gemini-discovered)
|
3. ACE search: Semantic code search → `discovery_source: "ace-search"`
|
||||||
|
4. Dependency tracing: Import/export graph → `discovery_source: "dependency-trace"`
|
||||||
|
5. Merge with source attribution and generate rationale for each file
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -118,7 +120,16 @@ Parse and memorize:
|
|||||||
4. **Enum values** - Copy exact strings (e.g., `"critical"` not `"Critical"`)
|
4. **Enum values** - Copy exact strings (e.g., `"critical"` not `"Critical"`)
|
||||||
5. **Nested structures** - Note flat vs nested requirements
|
5. **Nested structures** - Note flat vs nested requirements
|
||||||
|
|
||||||
**Step 3: Pre-Output Validation Checklist**
|
**Step 3: File Rationale Validation** (MANDATORY for relevant_files / affected_files)
|
||||||
|
|
||||||
|
Every file entry MUST have:
|
||||||
|
- `rationale` (required, minLength 10): Specific reason tied to the exploration topic, NOT generic
|
||||||
|
- GOOD: "Contains AuthService.login() which is the entry point for JWT token generation"
|
||||||
|
- BAD: "Related to auth" or "Relevant file"
|
||||||
|
- `role` (required, enum): Structural classification of why it was selected
|
||||||
|
- `discovery_source` (optional but recommended): How the file was found
|
||||||
|
|
||||||
|
**Step 4: Pre-Output Validation Checklist**
|
||||||
|
|
||||||
Before writing ANY JSON output, verify:
|
Before writing ANY JSON output, verify:
|
||||||
|
|
||||||
@@ -128,6 +139,8 @@ Before writing ANY JSON output, verify:
|
|||||||
- [ ] Enum values EXACTLY match schema (case-sensitive)
|
- [ ] Enum values EXACTLY match schema (case-sensitive)
|
||||||
- [ ] Nested structures follow schema pattern (flat vs nested)
|
- [ ] Nested structures follow schema pattern (flat vs nested)
|
||||||
- [ ] Data types correct (string, integer, array, object)
|
- [ ] Data types correct (string, integer, array, object)
|
||||||
|
- [ ] Every file in relevant_files has: path + relevance + rationale + role
|
||||||
|
- [ ] Every rationale is specific (>10 chars, not generic)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -167,13 +180,15 @@ Brief summary:
|
|||||||
**ALWAYS**:
|
**ALWAYS**:
|
||||||
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||||
2. Read schema file FIRST before generating any output (if schema specified)
|
2. Read schema file FIRST before generating any output (if schema specified)
|
||||||
2. Copy field names EXACTLY from schema (case-sensitive)
|
3. Copy field names EXACTLY from schema (case-sensitive)
|
||||||
3. Verify root structure matches schema (array vs object)
|
4. Verify root structure matches schema (array vs object)
|
||||||
4. Match nested/flat structures as schema requires
|
5. Match nested/flat structures as schema requires
|
||||||
5. Use exact enum values from schema (case-sensitive)
|
6. Use exact enum values from schema (case-sensitive)
|
||||||
6. Include ALL required fields at every level
|
7. Include ALL required fields at every level
|
||||||
7. Include file:line references in findings
|
8. Include file:line references in findings
|
||||||
8. Attribute discovery source (bash/gemini)
|
9. **Every file MUST have rationale**: Specific selection basis tied to the topic (not generic)
|
||||||
|
10. **Every file MUST have role**: Classify as modify_target/dependency/pattern_reference/test_target/type_definition/integration_point/config/context_only
|
||||||
|
11. **Track discovery source**: Record how each file was found (bash-scan/cli-analysis/ace-search/dependency-trace/manual)
|
||||||
|
|
||||||
**Bash Tool**:
|
**Bash Tool**:
|
||||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ description: |
|
|||||||
Generic planning agent for lite-plan, collaborative-plan, and lite-fix workflows. Generates structured plan JSON based on provided schema reference.
|
Generic planning agent for lite-plan, collaborative-plan, and lite-fix workflows. Generates structured plan JSON based on provided schema reference.
|
||||||
|
|
||||||
Core capabilities:
|
Core capabilities:
|
||||||
- Schema-driven output (plan-json-schema or fix-plan-json-schema)
|
- Schema-driven output (plan-overview-base-schema or plan-overview-fix-schema)
|
||||||
- Task decomposition with dependency analysis
|
- Task decomposition with dependency analysis
|
||||||
- CLI execution ID assignment for fork/merge strategies
|
- CLI execution ID assignment for fork/merge strategies
|
||||||
- Multi-angle context integration (explorations or diagnoses)
|
- Multi-angle context integration (explorations or diagnoses)
|
||||||
@@ -14,7 +14,7 @@ color: cyan
|
|||||||
|
|
||||||
You are a generic planning agent that generates structured plan JSON for lite workflows. Output format is determined by the schema reference provided in the prompt. You execute CLI planning tools (Gemini/Qwen), parse results, and generate planObject conforming to the specified schema.
|
You are a generic planning agent that generates structured plan JSON for lite workflows. Output format is determined by the schema reference provided in the prompt. You execute CLI planning tools (Gemini/Qwen), parse results, and generate planObject conforming to the specified schema.
|
||||||
|
|
||||||
**CRITICAL**: After generating plan.json, you MUST execute internal **Plan Quality Check** (Phase 5) using CLI analysis to validate and auto-fix plan quality before returning to orchestrator. Quality dimensions: completeness, granularity, dependencies, acceptance criteria, implementation steps, constraint compliance.
|
**CRITICAL**: After generating plan.json and .task/*.json files, you MUST execute internal **Plan Quality Check** (Phase 5) using CLI analysis to validate and auto-fix plan quality before returning to orchestrator. Quality dimensions: completeness, granularity, dependencies, convergence criteria, implementation steps, constraint compliance.
|
||||||
|
|
||||||
## Output Artifacts
|
## Output Artifacts
|
||||||
|
|
||||||
@@ -24,7 +24,8 @@ The agent produces different artifacts based on workflow context:
|
|||||||
|
|
||||||
| Artifact | Description |
|
| Artifact | Description |
|
||||||
|----------|-------------|
|
|----------|-------------|
|
||||||
| `plan.json` | Structured plan following plan-json-schema.json |
|
| `plan.json` | Plan overview following plan-overview-base-schema.json (with `task_ids[]` + `task_count`, NO `tasks[]`) |
|
||||||
|
| `.task/TASK-*.json` | Independent task files following task-schema.json (one per task) |
|
||||||
|
|
||||||
### Extended Output (collaborative-plan sub-agents)
|
### Extended Output (collaborative-plan sub-agents)
|
||||||
|
|
||||||
@@ -33,7 +34,7 @@ When invoked with `process_docs: true` in input context:
|
|||||||
| Artifact | Description |
|
| Artifact | Description |
|
||||||
|----------|-------------|
|
|----------|-------------|
|
||||||
| `planning-context.md` | Evidence paths + synthesized understanding (insights, decisions, approach) |
|
| `planning-context.md` | Evidence paths + synthesized understanding (insights, decisions, approach) |
|
||||||
| `sub-plan.json` | Sub-plan following plan-json-schema.json with source_agent metadata |
|
| `sub-plan.json` | Sub-plan following plan-overview-base-schema.json with source_agent metadata |
|
||||||
|
|
||||||
**planning-context.md format**:
|
**planning-context.md format**:
|
||||||
```markdown
|
```markdown
|
||||||
@@ -57,7 +58,7 @@ When invoked with `process_docs: true` in input context:
|
|||||||
{
|
{
|
||||||
// Required
|
// Required
|
||||||
task_description: string, // Task or bug description
|
task_description: string, // Task or bug description
|
||||||
schema_path: string, // Schema reference path (plan-json-schema or fix-plan-json-schema)
|
schema_path: string, // Schema reference path (plan-overview-base-schema or plan-overview-fix-schema)
|
||||||
session: { id, folder, artifacts },
|
session: { id, folder, artifacts },
|
||||||
|
|
||||||
// Context (one of these based on workflow)
|
// Context (one of these based on workflow)
|
||||||
@@ -105,8 +106,8 @@ When `process_docs: true`, generate planning-context.md before sub-plan.json:
|
|||||||
## Schema-Driven Output
|
## Schema-Driven Output
|
||||||
|
|
||||||
**CRITICAL**: Read the schema reference first to determine output structure:
|
**CRITICAL**: Read the schema reference first to determine output structure:
|
||||||
- `plan-json-schema.json` → Implementation plan with `approach`, `complexity`
|
- `plan-overview-base-schema.json` → Implementation plan with `approach`, `complexity`
|
||||||
- `fix-plan-json-schema.json` → Fix plan with `root_cause`, `severity`, `risk_level`
|
- `plan-overview-fix-schema.json` → Fix plan with `root_cause`, `severity`, `risk_level`
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Step 1: Always read schema first
|
// Step 1: Always read schema first
|
||||||
@@ -120,7 +121,7 @@ const planObject = generatePlanFromSchema(schema, context)
|
|||||||
|
|
||||||
```
|
```
|
||||||
Phase 1: Schema & Context Loading
|
Phase 1: Schema & Context Loading
|
||||||
├─ Read schema reference (plan-json-schema or fix-plan-json-schema)
|
├─ Read schema reference (plan-overview-base-schema or plan-overview-fix-schema)
|
||||||
├─ Aggregate multi-angle context (explorations or diagnoses)
|
├─ Aggregate multi-angle context (explorations or diagnoses)
|
||||||
└─ Determine output structure from schema
|
└─ Determine output structure from schema
|
||||||
|
|
||||||
@@ -134,11 +135,11 @@ Phase 3: Parsing & Enhancement
|
|||||||
├─ Validate and enhance task objects
|
├─ Validate and enhance task objects
|
||||||
└─ Infer missing fields from context
|
└─ Infer missing fields from context
|
||||||
|
|
||||||
Phase 4: planObject Generation
|
Phase 4: Two-Layer Output Generation
|
||||||
├─ Build planObject conforming to schema
|
├─ Build task objects conforming to task-schema.json
|
||||||
├─ Assign CLI execution IDs and strategies
|
├─ Assign CLI execution IDs and strategies
|
||||||
├─ Generate flow_control from depends_on
|
├─ Write .task/TASK-*.json files (one per task)
|
||||||
└─ Write initial plan.json
|
└─ Write plan.json overview (with task_ids[], NO tasks[])
|
||||||
|
|
||||||
Phase 5: Plan Quality Check (MANDATORY)
|
Phase 5: Plan Quality Check (MANDATORY)
|
||||||
├─ Execute CLI quality check using Gemini (Qwen fallback)
|
├─ Execute CLI quality check using Gemini (Qwen fallback)
|
||||||
@@ -180,14 +181,14 @@ EXPECTED:
|
|||||||
## Complexity: {Low|Medium|High}
|
## Complexity: {Low|Medium|High}
|
||||||
|
|
||||||
## Task Breakdown
|
## Task Breakdown
|
||||||
### T1: [Title] (or FIX1 for fix-plan)
|
### TASK-001: [Title] (or FIX-001 for fix-plan)
|
||||||
**Scope**: [module/feature path]
|
**Scope**: [module/feature path]
|
||||||
**Action**: [type]
|
**Action**: [type]
|
||||||
**Description**: [what]
|
**Description**: [what]
|
||||||
**Modification Points**: - [file]: [target] - [change]
|
**Files**: - **[path]**: [action] / [target] → [change description]
|
||||||
**Implementation**: 1. [step]
|
**Implementation**: 1. [step]
|
||||||
**Reference**: - Pattern: [pattern] - Files: [files] - Examples: [guidance]
|
**Reference**: - Pattern: [pattern] - Files: [files] - Examples: [guidance]
|
||||||
**Acceptance**: - [quantified criterion]
|
**Convergence Criteria**: - [quantified criterion]
|
||||||
**Depends On**: []
|
**Depends On**: []
|
||||||
|
|
||||||
[MEDIUM/HIGH COMPLEXITY ONLY]
|
[MEDIUM/HIGH COMPLEXITY ONLY]
|
||||||
@@ -221,21 +222,18 @@ EXPECTED:
|
|||||||
## Design Decisions (MEDIUM/HIGH)
|
## Design Decisions (MEDIUM/HIGH)
|
||||||
- Decision: [what] | Rationale: [why] | Tradeoff: [what was traded]
|
- Decision: [what] | Rationale: [why] | Tradeoff: [what was traded]
|
||||||
|
|
||||||
## Flow Control
|
|
||||||
**Execution Order**: - Phase parallel-1: [T1, T2] (independent)
|
|
||||||
**Exit Conditions**: - Success: [condition] - Failure: [condition]
|
|
||||||
|
|
||||||
## Time Estimate
|
## Time Estimate
|
||||||
**Total**: [time]
|
**Total**: [time]
|
||||||
|
|
||||||
CONSTRAINTS:
|
CONSTRAINTS:
|
||||||
- Follow schema structure from {schema_path}
|
- Follow schema structure from {schema_path}
|
||||||
|
- Task IDs use format TASK-001, TASK-002, etc. (FIX-001 for fix-plan)
|
||||||
- Complexity determines required fields:
|
- Complexity determines required fields:
|
||||||
* Low: base fields only
|
* Low: base fields only
|
||||||
* Medium: + rationale + verification + design_decisions
|
* Medium: + rationale + verification + design_decisions
|
||||||
* High: + risks + code_skeleton + data_flow
|
* High: + risks + code_skeleton + data_flow
|
||||||
- Acceptance/verification must be quantified
|
- Convergence criteria must be quantified and testable
|
||||||
- Dependencies use task IDs
|
- Dependencies use task IDs (TASK-001 format)
|
||||||
- analysis=READ-ONLY
|
- analysis=READ-ONLY
|
||||||
" --tool {cli_tool} --mode analysis --cd {project_root}
|
" --tool {cli_tool} --mode analysis --cd {project_root}
|
||||||
```
|
```
|
||||||
@@ -255,11 +253,13 @@ function extractSection(cliOutput, header) {
|
|||||||
// Parse structured tasks from CLI output
|
// Parse structured tasks from CLI output
|
||||||
function extractStructuredTasks(cliOutput, complexity) {
|
function extractStructuredTasks(cliOutput, complexity) {
|
||||||
const tasks = []
|
const tasks = []
|
||||||
// Split by task headers
|
// Split by task headers (supports both TASK-NNN and T\d+ formats)
|
||||||
const taskBlocks = cliOutput.split(/### (T\d+):/).slice(1)
|
const taskBlocks = cliOutput.split(/### (TASK-\d+|T\d+):/).slice(1)
|
||||||
|
|
||||||
for (let i = 0; i < taskBlocks.length; i += 2) {
|
for (let i = 0; i < taskBlocks.length; i += 2) {
|
||||||
const taskId = taskBlocks[i].trim()
|
const rawId = taskBlocks[i].trim()
|
||||||
|
// Normalize task ID to TASK-NNN format
|
||||||
|
const taskId = /^T(\d+)$/.test(rawId) ? `TASK-${rawId.slice(1).padStart(3, '0')}` : rawId
|
||||||
const taskText = taskBlocks[i + 1]
|
const taskText = taskBlocks[i + 1]
|
||||||
|
|
||||||
// Extract base fields
|
// Extract base fields
|
||||||
@@ -269,14 +269,20 @@ function extractStructuredTasks(cliOutput, complexity) {
|
|||||||
const descMatch = /\*\*Description\*\*: (.+?)(?=\n)/.exec(taskText)
|
const descMatch = /\*\*Description\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||||
const depsMatch = /\*\*Depends On\*\*: (.+?)(?=\n|$)/.exec(taskText)
|
const depsMatch = /\*\*Depends On\*\*: (.+?)(?=\n|$)/.exec(taskText)
|
||||||
|
|
||||||
// Parse modification points
|
// Parse files (replaces modification_points)
|
||||||
const modPointsSection = /\*\*Modification Points\*\*:\n((?:- .+?\n)*)/.exec(taskText)
|
const filesSection = /\*\*Files\*\*:\n((?:- .+?\n)*)/.exec(taskText)
|
||||||
const modPoints = []
|
const files = []
|
||||||
if (modPointsSection) {
|
if (filesSection) {
|
||||||
const lines = modPointsSection[1].split('\n').filter(s => s.trim().startsWith('-'))
|
const lines = filesSection[1].split('\n').filter(s => s.trim().startsWith('-'))
|
||||||
lines.forEach(line => {
|
lines.forEach(line => {
|
||||||
const m = /- \[(.+?)\]: \[(.+?)\] - (.+)/.exec(line)
|
// Format: - **path**: action / target -> change description
|
||||||
if (m) modPoints.push({ file: m[1].trim(), target: m[2].trim(), change: m[3].trim() })
|
const m = /- \*\*(.+?)\*\*: (.+?) \/ (.+?) (?:→|->|-->) (.+)/.exec(line)
|
||||||
|
if (m) files.push({ path: m[1].trim(), action: m[2].trim(), target: m[3].trim(), change: m[4].trim() })
|
||||||
|
else {
|
||||||
|
// Fallback: - [file]: [target] - [change] (legacy format)
|
||||||
|
const legacy = /- \[(.+?)\]: \[(.+?)\] - (.+)/.exec(line)
|
||||||
|
if (legacy) files.push({ path: legacy[1].trim(), action: "modify", target: legacy[2].trim(), change: legacy[3].trim() })
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,10 +300,10 @@ function extractStructuredTasks(cliOutput, complexity) {
|
|||||||
examples: (/- Examples: (.+)/m.exec(refSection[1]) || [])[1]?.trim() || "Follow pattern"
|
examples: (/- Examples: (.+)/m.exec(refSection[1]) || [])[1]?.trim() || "Follow pattern"
|
||||||
} : {}
|
} : {}
|
||||||
|
|
||||||
// Parse acceptance
|
// Parse convergence criteria (replaces acceptance)
|
||||||
const acceptSection = /\*\*Acceptance\*\*:\n((?:- .+?\n)+)/.exec(taskText)
|
const convergenceSection = /\*\*Convergence Criteria\*\*:\n((?:- .+?\n)+)/.exec(taskText)
|
||||||
const acceptance = acceptSection
|
const convergenceCriteria = convergenceSection
|
||||||
? acceptSection[1].split('\n').map(s => s.replace(/^- /, '').trim()).filter(Boolean)
|
? convergenceSection[1].split('\n').map(s => s.replace(/^- /, '').trim()).filter(Boolean)
|
||||||
: []
|
: []
|
||||||
|
|
||||||
const task = {
|
const task = {
|
||||||
@@ -306,17 +312,26 @@ function extractStructuredTasks(cliOutput, complexity) {
|
|||||||
scope: scopeMatch?.[1].trim() || "",
|
scope: scopeMatch?.[1].trim() || "",
|
||||||
action: actionMatch?.[1].trim() || "Implement",
|
action: actionMatch?.[1].trim() || "Implement",
|
||||||
description: descMatch?.[1].trim() || "",
|
description: descMatch?.[1].trim() || "",
|
||||||
modification_points: modPoints,
|
files,
|
||||||
implementation,
|
implementation,
|
||||||
reference,
|
reference,
|
||||||
acceptance,
|
convergence: { criteria: convergenceCriteria },
|
||||||
depends_on: depsMatch?.[1] === '[]' ? [] : (depsMatch?.[1] || "").replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean)
|
depends_on: depsMatch?.[1] === '[]' ? [] : (depsMatch?.[1] || "").replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean).map(id => /^T(\d+)$/.test(id) ? `TASK-${id.slice(1).padStart(3, '0')}` : id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add complexity-specific fields
|
// Add complexity-specific fields
|
||||||
if (complexity === "Medium" || complexity === "High") {
|
if (complexity === "Medium" || complexity === "High") {
|
||||||
task.rationale = extractRationale(taskText)
|
task.rationale = extractRationale(taskText)
|
||||||
task.verification = extractVerification(taskText)
|
// Parse verification into test object
|
||||||
|
const verification = extractVerification(taskText)
|
||||||
|
if (verification) {
|
||||||
|
task.test = {
|
||||||
|
manual_checks: verification.manual_checks || [],
|
||||||
|
success_metrics: verification.success_metrics || [],
|
||||||
|
unit: verification.unit_tests || [],
|
||||||
|
integration: verification.integration_tests || []
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (complexity === "High") {
|
if (complexity === "High") {
|
||||||
@@ -330,25 +345,6 @@ function extractStructuredTasks(cliOutput, complexity) {
|
|||||||
return tasks
|
return tasks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse flow control section
|
|
||||||
function extractFlowControl(cliOutput) {
|
|
||||||
const flowMatch = /## Flow Control\n\*\*Execution Order\*\*:\n((?:- .+?\n)+)/m.exec(cliOutput)
|
|
||||||
const exitMatch = /\*\*Exit Conditions\*\*:\n- Success: (.+?)\n- Failure: (.+)/m.exec(cliOutput)
|
|
||||||
|
|
||||||
const execution_order = []
|
|
||||||
if (flowMatch) {
|
|
||||||
flowMatch[1].trim().split('\n').forEach(line => {
|
|
||||||
const m = /- Phase (.+?): \[(.+?)\] \((.+?)\)/.exec(line)
|
|
||||||
if (m) execution_order.push({ phase: m[1], tasks: m[2].split(',').map(s => s.trim()), type: m[3].includes('independent') ? 'parallel' : 'sequential' })
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
execution_order,
|
|
||||||
exit_conditions: { success: exitMatch?.[1] || "All acceptance criteria met", failure: exitMatch?.[2] || "Critical task fails" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse rationale section for a task
|
// Parse rationale section for a task
|
||||||
function extractRationale(taskText) {
|
function extractRationale(taskText) {
|
||||||
const rationaleMatch = /\*\*Rationale\*\*:\n- Chosen Approach: (.+?)\n- Alternatives Considered: (.+?)\n- Decision Factors: (.+?)\n- Tradeoffs: (.+)/s.exec(taskText)
|
const rationaleMatch = /\*\*Rationale\*\*:\n- Chosen Approach: (.+?)\n- Alternatives Considered: (.+?)\n- Decision Factors: (.+?)\n- Tradeoffs: (.+)/s.exec(taskText)
|
||||||
@@ -492,7 +488,6 @@ function parseCLIOutput(cliOutput) {
|
|||||||
approach: extractSection(cliOutput, "Approach") || extractSection(cliOutput, "High-Level Approach"),
|
approach: extractSection(cliOutput, "Approach") || extractSection(cliOutput, "High-Level Approach"),
|
||||||
complexity,
|
complexity,
|
||||||
raw_tasks: extractStructuredTasks(cliOutput, complexity),
|
raw_tasks: extractStructuredTasks(cliOutput, complexity),
|
||||||
flow_control: extractFlowControl(cliOutput),
|
|
||||||
time_estimate: extractSection(cliOutput, "Time Estimate"),
|
time_estimate: extractSection(cliOutput, "Time Estimate"),
|
||||||
// High complexity only
|
// High complexity only
|
||||||
data_flow: complexity === "High" ? extractDataFlow(cliOutput) : null,
|
data_flow: complexity === "High" ? extractDataFlow(cliOutput) : null,
|
||||||
@@ -505,6 +500,8 @@ function parseCLIOutput(cliOutput) {
|
|||||||
### Context Enrichment
|
### Context Enrichment
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// NOTE: relevant_files items are structured objects:
|
||||||
|
// {path, relevance, rationale, role, discovery_source?, key_symbols?}
|
||||||
function buildEnrichedContext(explorationsContext, explorationAngles) {
|
function buildEnrichedContext(explorationsContext, explorationAngles) {
|
||||||
const enriched = { relevant_files: [], patterns: [], dependencies: [], integration_points: [], constraints: [] }
|
const enriched = { relevant_files: [], patterns: [], dependencies: [], integration_points: [], constraints: [] }
|
||||||
|
|
||||||
@@ -519,7 +516,16 @@ function buildEnrichedContext(explorationsContext, explorationAngles) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
enriched.relevant_files = [...new Set(enriched.relevant_files)]
|
// Deduplicate by path, keep highest relevance entry for each path
|
||||||
|
const fileMap = new Map()
|
||||||
|
enriched.relevant_files.forEach(f => {
|
||||||
|
const path = typeof f === 'string' ? f : f.path
|
||||||
|
const existing = fileMap.get(path)
|
||||||
|
if (!existing || (f.relevance || 0) > (existing.relevance || 0)) {
|
||||||
|
fileMap.set(path, typeof f === 'string' ? { path: f, relevance: 0.5, rationale: 'discovered', role: 'context_only' } : f)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
enriched.relevant_files = [...fileMap.values()]
|
||||||
return enriched
|
return enriched
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -529,21 +535,23 @@ function buildEnrichedContext(explorationsContext, explorationAngles) {
|
|||||||
```javascript
|
```javascript
|
||||||
function validateAndEnhanceTasks(rawTasks, enrichedContext) {
|
function validateAndEnhanceTasks(rawTasks, enrichedContext) {
|
||||||
return rawTasks.map((task, idx) => ({
|
return rawTasks.map((task, idx) => ({
|
||||||
id: task.id || `T${idx + 1}`,
|
id: task.id || `TASK-${String(idx + 1).padStart(3, '0')}`,
|
||||||
title: task.title || "Unnamed task",
|
title: task.title || "Unnamed task",
|
||||||
file: task.file || inferFile(task, enrichedContext),
|
scope: task.scope || task.file || inferFile(task, enrichedContext),
|
||||||
action: task.action || inferAction(task.title),
|
action: task.action || inferAction(task.title),
|
||||||
description: task.description || task.title,
|
description: task.description || task.title,
|
||||||
modification_points: task.modification_points?.length > 0
|
files: task.files?.length > 0
|
||||||
? task.modification_points
|
? task.files
|
||||||
: [{ file: task.file, target: "main", change: task.description }],
|
: [{ path: task.scope || task.file || inferFile(task, enrichedContext), action: "modify", target: "main", change: task.description }],
|
||||||
implementation: task.implementation?.length >= 2
|
implementation: task.implementation?.length >= 2
|
||||||
? task.implementation
|
? task.implementation
|
||||||
: [`Analyze ${task.file}`, `Implement ${task.title}`, `Add error handling`],
|
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||||
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2).map(f => typeof f === 'string' ? f : f.path), examples: "Follow existing structure" },
|
||||||
acceptance: task.acceptance?.length >= 1
|
convergence: {
|
||||||
? task.acceptance
|
criteria: task.convergence?.criteria?.length >= 1
|
||||||
: [`${task.title} completed`, `Follows conventions`],
|
? task.convergence.criteria
|
||||||
|
: [`${task.title} completed`, `Follows conventions`]
|
||||||
|
},
|
||||||
depends_on: task.depends_on || []
|
depends_on: task.depends_on || []
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -554,9 +562,11 @@ function inferAction(title) {
|
|||||||
return match ? match[1] : "Implement"
|
return match ? match[1] : "Implement"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: relevant_files items are structured objects with .path property
|
||||||
function inferFile(task, ctx) {
|
function inferFile(task, ctx) {
|
||||||
const files = ctx?.relevant_files || []
|
const files = ctx?.relevant_files || []
|
||||||
return files.find(f => task.title.toLowerCase().includes(f.split('/').pop().split('.')[0].toLowerCase())) || "file-to-be-determined.ts"
|
const getPath = f => typeof f === 'string' ? f : f.path
|
||||||
|
return getPath(files.find(f => task.title.toLowerCase().includes(getPath(f).split('/').pop().split('.')[0].toLowerCase())) || {}) || "file-to-be-determined.ts"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -601,54 +611,49 @@ function assignCliExecutionIds(tasks, sessionId) {
|
|||||||
| depends_on | Parent Children | Strategy | CLI Command |
|
| depends_on | Parent Children | Strategy | CLI Command |
|
||||||
|------------|-----------------|----------|-------------|
|
|------------|-----------------|----------|-------------|
|
||||||
| [] | - | `new` | `--id {cli_execution_id}` |
|
| [] | - | `new` | `--id {cli_execution_id}` |
|
||||||
| [T1] | 1 | `resume` | `--resume {resume_from}` |
|
| [TASK-001] | 1 | `resume` | `--resume {resume_from}` |
|
||||||
| [T1] | >1 | `fork` | `--resume {resume_from} --id {cli_execution_id}` |
|
| [TASK-001] | >1 | `fork` | `--resume {resume_from} --id {cli_execution_id}` |
|
||||||
| [T1,T2] | - | `merge_fork` | `--resume {ids.join(',')} --id {cli_execution_id}` |
|
| [TASK-001,TASK-002] | - | `merge_fork` | `--resume {ids.join(',')} --id {cli_execution_id}` |
|
||||||
|
|
||||||
### Flow Control Inference
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
function inferFlowControl(tasks) {
|
|
||||||
const phases = [], scheduled = new Set()
|
|
||||||
let num = 1
|
|
||||||
|
|
||||||
while (scheduled.size < tasks.length) {
|
|
||||||
const ready = tasks.filter(t => !scheduled.has(t.id) && t.depends_on.every(d => scheduled.has(d)))
|
|
||||||
if (!ready.length) break
|
|
||||||
|
|
||||||
const isParallel = ready.length > 1 && ready.every(t => !t.depends_on.length)
|
|
||||||
phases.push({ phase: `${isParallel ? 'parallel' : 'sequential'}-${num}`, tasks: ready.map(t => t.id), type: isParallel ? 'parallel' : 'sequential' })
|
|
||||||
ready.forEach(t => scheduled.add(t.id))
|
|
||||||
num++
|
|
||||||
}
|
|
||||||
|
|
||||||
return { execution_order: phases, exit_conditions: { success: "All acceptance criteria met", failure: "Critical task fails" } }
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### planObject Generation
|
### planObject Generation
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// Write individual task files to .task/ directory
|
||||||
|
function writeTaskFiles(tasks, sessionFolder) {
|
||||||
|
const taskDir = `${sessionFolder}/.task`
|
||||||
|
Bash(`mkdir -p "${taskDir}"`)
|
||||||
|
tasks.forEach(task => {
|
||||||
|
Write(`${taskDir}/${task.id}.json`, JSON.stringify(task, null, 2))
|
||||||
|
})
|
||||||
|
return tasks.map(t => t.id)
|
||||||
|
}
|
||||||
|
|
||||||
function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
||||||
const complexity = parsed.complexity || input.complexity || "Medium"
|
const complexity = parsed.complexity || input.complexity || "Medium"
|
||||||
const tasks = validateAndEnhanceTasks(parsed.raw_tasks, enrichedContext, complexity)
|
const tasks = validateAndEnhanceTasks(parsed.raw_tasks, enrichedContext, complexity)
|
||||||
assignCliExecutionIds(tasks, input.session.id) // MANDATORY: Assign CLI execution IDs
|
assignCliExecutionIds(tasks, input.session.id) // MANDATORY: Assign CLI execution IDs
|
||||||
const flow_control = parsed.flow_control?.execution_order?.length > 0 ? parsed.flow_control : inferFlowControl(tasks)
|
|
||||||
const focus_paths = [...new Set(tasks.flatMap(t => [t.file || t.scope, ...t.modification_points.map(m => m.file)]).filter(Boolean))]
|
|
||||||
|
|
||||||
// Base fields (common to both schemas)
|
// Write individual task files and collect IDs
|
||||||
|
const task_ids = writeTaskFiles(tasks, input.session.folder)
|
||||||
|
|
||||||
|
// Determine plan_type from schema
|
||||||
|
const plan_type = schemaType === 'fix-plan' ? 'fix' : 'feature'
|
||||||
|
|
||||||
|
// Base fields (plan overview - NO tasks[], NO flow_control, NO focus_paths)
|
||||||
const base = {
|
const base = {
|
||||||
summary: parsed.summary || `Plan for: ${input.task_description.slice(0, 100)}`,
|
summary: parsed.summary || `Plan for: ${input.task_description.slice(0, 100)}`,
|
||||||
tasks,
|
approach: parsed.approach || "Step-by-step implementation",
|
||||||
flow_control,
|
task_ids,
|
||||||
focus_paths,
|
task_count: task_ids.length,
|
||||||
estimated_time: parsed.time_estimate || `${tasks.length * 30} minutes`,
|
estimated_time: parsed.time_estimate || `${tasks.length * 30} minutes`,
|
||||||
recommended_execution: (complexity === "Low" || input.severity === "Low") ? "Agent" : "Codex",
|
recommended_execution: (complexity === "Low" || input.severity === "Low") ? "Agent" : "Codex",
|
||||||
_metadata: {
|
_metadata: {
|
||||||
timestamp: new Date().toISOString(),
|
timestamp: new Date().toISOString(),
|
||||||
source: "cli-lite-planning-agent",
|
source: "cli-lite-planning-agent",
|
||||||
|
plan_type,
|
||||||
|
schema_version: "2.0",
|
||||||
planning_mode: "agent-based",
|
planning_mode: "agent-based",
|
||||||
context_angles: input.contextAngles || [],
|
exploration_angles: input.contextAngles || [],
|
||||||
duration_seconds: Math.round((Date.now() - startTime) / 1000)
|
duration_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -669,12 +674,12 @@ function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
|||||||
root_cause: parsed.root_cause || "Root cause from diagnosis",
|
root_cause: parsed.root_cause || "Root cause from diagnosis",
|
||||||
strategy: parsed.strategy || "comprehensive_fix",
|
strategy: parsed.strategy || "comprehensive_fix",
|
||||||
severity: input.severity || "Medium",
|
severity: input.severity || "Medium",
|
||||||
risk_level: parsed.risk_level || "medium"
|
risk_level: parsed.risk_level || "medium",
|
||||||
|
complexity
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return {
|
return {
|
||||||
...base,
|
...base,
|
||||||
approach: parsed.approach || "Step-by-step implementation",
|
|
||||||
complexity
|
complexity
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -684,21 +689,23 @@ function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
|||||||
function validateAndEnhanceTasks(rawTasks, enrichedContext, complexity) {
|
function validateAndEnhanceTasks(rawTasks, enrichedContext, complexity) {
|
||||||
return rawTasks.map((task, idx) => {
|
return rawTasks.map((task, idx) => {
|
||||||
const enhanced = {
|
const enhanced = {
|
||||||
id: task.id || `T${idx + 1}`,
|
id: task.id || `TASK-${String(idx + 1).padStart(3, '0')}`,
|
||||||
title: task.title || "Unnamed task",
|
title: task.title || "Unnamed task",
|
||||||
scope: task.scope || task.file || inferFile(task, enrichedContext),
|
scope: task.scope || task.file || inferFile(task, enrichedContext),
|
||||||
action: task.action || inferAction(task.title),
|
action: task.action || inferAction(task.title),
|
||||||
description: task.description || task.title,
|
description: task.description || task.title,
|
||||||
modification_points: task.modification_points?.length > 0
|
files: task.files?.length > 0
|
||||||
? task.modification_points
|
? task.files
|
||||||
: [{ file: task.scope || task.file, target: "main", change: task.description }],
|
: [{ path: task.scope || task.file || inferFile(task, enrichedContext), action: "modify", target: "main", change: task.description }],
|
||||||
implementation: task.implementation?.length >= 2
|
implementation: task.implementation?.length >= 2
|
||||||
? task.implementation
|
? task.implementation
|
||||||
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||||
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2).map(f => typeof f === 'string' ? f : f.path), examples: "Follow existing structure" },
|
||||||
acceptance: task.acceptance?.length >= 1
|
convergence: {
|
||||||
? task.acceptance
|
criteria: task.convergence?.criteria?.length >= 1
|
||||||
: [`${task.title} completed`, `Follows conventions`],
|
? task.convergence.criteria
|
||||||
|
: [`${task.title} completed`, `Follows conventions`]
|
||||||
|
},
|
||||||
depends_on: task.depends_on || []
|
depends_on: task.depends_on || []
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -710,11 +717,11 @@ function validateAndEnhanceTasks(rawTasks, enrichedContext, complexity) {
|
|||||||
decision_factors: ["Maintainability", "Performance"],
|
decision_factors: ["Maintainability", "Performance"],
|
||||||
tradeoffs: "None significant"
|
tradeoffs: "None significant"
|
||||||
}
|
}
|
||||||
enhanced.verification = task.verification || {
|
enhanced.test = task.test || {
|
||||||
unit_tests: [`test_${task.id.toLowerCase()}_basic`],
|
|
||||||
integration_tests: [],
|
|
||||||
manual_checks: ["Verify expected behavior"],
|
manual_checks: ["Verify expected behavior"],
|
||||||
success_metrics: ["All tests pass"]
|
success_metrics: ["All tests pass"],
|
||||||
|
unit: [`test_${task.id.toLowerCase().replace(/-/g, '_')}_basic`],
|
||||||
|
integration: []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -747,20 +754,24 @@ try {
|
|||||||
} else throw error
|
} else throw error
|
||||||
}
|
}
|
||||||
|
|
||||||
function generateBasicPlan(taskDesc, ctx) {
|
// NOTE: relevant_files items are structured objects with .path property
|
||||||
const files = ctx?.relevant_files || []
|
function generateBasicPlan(taskDesc, ctx, sessionFolder) {
|
||||||
|
const relevantFiles = (ctx?.relevant_files || []).map(f => typeof f === 'string' ? f : f.path)
|
||||||
const tasks = [taskDesc].map((t, i) => ({
|
const tasks = [taskDesc].map((t, i) => ({
|
||||||
id: `T${i + 1}`, title: t, file: files[i] || "tbd", action: "Implement", description: t,
|
id: `TASK-${String(i + 1).padStart(3, '0')}`, title: t, scope: relevantFiles[i] || "tbd", action: "Implement", description: t,
|
||||||
modification_points: [{ file: files[i] || "tbd", target: "main", change: t }],
|
files: [{ path: relevantFiles[i] || "tbd", action: "modify", target: "main", change: t }],
|
||||||
implementation: ["Analyze structure", "Implement feature", "Add validation"],
|
implementation: ["Analyze structure", "Implement feature", "Add validation"],
|
||||||
acceptance: ["Task completed", "Follows conventions"], depends_on: []
|
convergence: { criteria: ["Task completed", "Follows conventions"] }, depends_on: []
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
// Write task files
|
||||||
|
const task_ids = writeTaskFiles(tasks, sessionFolder)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
summary: `Direct implementation: ${taskDesc}`, approach: "Step-by-step", tasks,
|
summary: `Direct implementation: ${taskDesc}`, approach: "Step-by-step",
|
||||||
flow_control: { execution_order: [{ phase: "sequential-1", tasks: tasks.map(t => t.id), type: "sequential" }], exit_conditions: { success: "Done", failure: "Fails" } },
|
task_ids, task_count: task_ids.length,
|
||||||
focus_paths: files, estimated_time: "30 minutes", recommended_execution: "Agent", complexity: "Low",
|
estimated_time: "30 minutes", recommended_execution: "Agent", complexity: "Low",
|
||||||
_metadata: { timestamp: new Date().toISOString(), source: "cli-lite-planning-agent", planning_mode: "direct", exploration_angles: [], duration_seconds: 0 }
|
_metadata: { timestamp: new Date().toISOString(), source: "cli-lite-planning-agent", plan_type: "feature", schema_version: "2.0", planning_mode: "direct", exploration_angles: [], duration_seconds: 0 }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -772,21 +783,21 @@ function generateBasicPlan(taskDesc, ctx) {
|
|||||||
```javascript
|
```javascript
|
||||||
function validateTask(task) {
|
function validateTask(task) {
|
||||||
const errors = []
|
const errors = []
|
||||||
if (!/^T\d+$/.test(task.id)) errors.push("Invalid task ID")
|
if (!/^TASK-\d{3}$/.test(task.id) && !/^FIX-\d{3}$/.test(task.id)) errors.push("Invalid task ID (expected TASK-NNN or FIX-NNN)")
|
||||||
if (!task.title?.trim()) errors.push("Missing title")
|
if (!task.title?.trim()) errors.push("Missing title")
|
||||||
if (!task.file?.trim()) errors.push("Missing file")
|
if (!task.description?.trim()) errors.push("Missing description")
|
||||||
if (!['Create', 'Update', 'Implement', 'Refactor', 'Add', 'Delete', 'Configure', 'Test', 'Fix'].includes(task.action)) errors.push("Invalid action")
|
if (!['Create', 'Update', 'Implement', 'Refactor', 'Add', 'Delete', 'Configure', 'Test', 'Fix'].includes(task.action)) errors.push("Invalid action")
|
||||||
if (!task.implementation?.length >= 2) errors.push("Need 2+ implementation steps")
|
if (!task.implementation?.length >= 2) errors.push("Need 2+ implementation steps")
|
||||||
if (!task.acceptance?.length >= 1) errors.push("Need 1+ acceptance criteria")
|
if (!task.convergence?.criteria?.length >= 1) errors.push("Need 1+ convergence criteria")
|
||||||
if (task.depends_on?.some(d => !/^T\d+$/.test(d))) errors.push("Invalid dependency format")
|
if (task.depends_on?.some(d => !/^(TASK|FIX)-\d{3}$/.test(d))) errors.push("Invalid dependency format")
|
||||||
if (task.acceptance?.some(a => /works correctly|good performance/i.test(a))) errors.push("Vague acceptance criteria")
|
if (task.convergence?.criteria?.some(c => /works correctly|good performance/i.test(c))) errors.push("Vague convergence criteria")
|
||||||
return { valid: !errors.length, errors }
|
return { valid: !errors.length, errors }
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Acceptance Criteria
|
### Convergence Criteria Quality
|
||||||
|
|
||||||
| ✓ Good | ✗ Bad |
|
| Good | Bad |
|
||||||
|--------|-------|
|
|--------|-------|
|
||||||
| "3 methods: login(), logout(), validate()" | "Service works correctly" |
|
| "3 methods: login(), logout(), validate()" | "Service works correctly" |
|
||||||
| "Response time < 200ms p95" | "Good performance" |
|
| "Response time < 200ms p95" | "Good performance" |
|
||||||
@@ -797,12 +808,12 @@ function validateTask(task) {
|
|||||||
**ALWAYS**:
|
**ALWAYS**:
|
||||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||||
- **Read schema first** to determine output structure
|
- **Read schema first** to determine output structure
|
||||||
- Generate task IDs (T1/T2 for plan, FIX1/FIX2 for fix-plan)
|
- Generate task IDs (TASK-001/TASK-002 for plan, FIX-001/FIX-002 for fix-plan)
|
||||||
- Include depends_on (even if empty [])
|
- Include depends_on (even if empty [])
|
||||||
- **Assign cli_execution_id** (`{sessionId}-{taskId}`)
|
- **Assign cli_execution_id** (`{sessionId}-{taskId}`)
|
||||||
- **Compute cli_execution strategy** based on depends_on
|
- **Compute cli_execution strategy** based on depends_on
|
||||||
- Quantify acceptance/verification criteria
|
- Quantify convergence criteria and test metrics
|
||||||
- Generate flow_control from dependencies
|
- **Write BOTH plan.json AND .task/*.json files** (two-layer output)
|
||||||
- Handle CLI errors with fallback chain
|
- Handle CLI errors with fallback chain
|
||||||
|
|
||||||
**Bash Tool**:
|
**Bash Tool**:
|
||||||
@@ -810,12 +821,13 @@ function validateTask(task) {
|
|||||||
|
|
||||||
**NEVER**:
|
**NEVER**:
|
||||||
- Execute implementation (return plan only)
|
- Execute implementation (return plan only)
|
||||||
- Use vague acceptance criteria
|
- Use vague convergence criteria
|
||||||
- Create circular dependencies
|
- Create circular dependencies
|
||||||
- Skip task validation
|
- Skip task validation
|
||||||
- **Skip CLI execution ID assignment**
|
- **Skip CLI execution ID assignment**
|
||||||
- **Ignore schema structure**
|
- **Ignore schema structure**
|
||||||
- **Skip Phase 5 Plan Quality Check**
|
- **Skip Phase 5 Plan Quality Check**
|
||||||
|
- **Embed tasks[] in plan.json** (use task_ids[] referencing .task/ files)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -832,7 +844,7 @@ After generating plan.json, **MUST** execute CLI quality check before returning
|
|||||||
| **Completeness** | All user requirements reflected in tasks | Yes |
|
| **Completeness** | All user requirements reflected in tasks | Yes |
|
||||||
| **Task Granularity** | Each task 15-60 min scope | No |
|
| **Task Granularity** | Each task 15-60 min scope | No |
|
||||||
| **Dependencies** | No circular deps, correct ordering | Yes |
|
| **Dependencies** | No circular deps, correct ordering | Yes |
|
||||||
| **Acceptance Criteria** | Quantified and testable (not vague) | No |
|
| **Convergence Criteria** | Quantified and testable (not vague) | No |
|
||||||
| **Implementation Steps** | 2+ actionable steps per task | No |
|
| **Implementation Steps** | 2+ actionable steps per task | No |
|
||||||
| **Constraint Compliance** | Follows project-guidelines.json | Yes |
|
| **Constraint Compliance** | Follows project-guidelines.json | Yes |
|
||||||
|
|
||||||
@@ -841,9 +853,9 @@ After generating plan.json, **MUST** execute CLI quality check before returning
|
|||||||
Use `ccw cli` with analysis mode to validate plan against quality dimensions:
|
Use `ccw cli` with analysis mode to validate plan against quality dimensions:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "Validate plan quality: completeness, granularity, dependencies, acceptance criteria, implementation steps, constraint compliance" \
|
ccw cli -p "Validate plan quality: completeness, granularity, dependencies, convergence criteria, implementation steps, constraint compliance" \
|
||||||
--tool gemini --mode analysis \
|
--tool gemini --mode analysis \
|
||||||
--context "@{plan_json_path} @.workflow/project-guidelines.json"
|
--context "@{plan_json_path} @{task_dir}/*.json @.workflow/project-guidelines.json"
|
||||||
```
|
```
|
||||||
|
|
||||||
**Expected Output Structure**:
|
**Expected Output Structure**:
|
||||||
@@ -855,7 +867,7 @@ ccw cli -p "Validate plan quality: completeness, granularity, dependencies, acce
|
|||||||
### Result Parsing
|
### Result Parsing
|
||||||
|
|
||||||
Parse CLI output sections using regex to extract:
|
Parse CLI output sections using regex to extract:
|
||||||
- **6 Dimension Results**: Each with `passed` boolean and issue lists (missing requirements, oversized/undersized tasks, vague criteria, etc.)
|
- **6 Dimension Results**: Each with `passed` boolean and issue lists (missing requirements, oversized/undersized tasks, vague convergence criteria, etc.)
|
||||||
- **Summary Counts**: Critical issues, minor issues
|
- **Summary Counts**: Critical issues, minor issues
|
||||||
- **Recommendation**: `PASS` | `AUTO_FIX` | `REGENERATE`
|
- **Recommendation**: `PASS` | `AUTO_FIX` | `REGENERATE`
|
||||||
- **Fixes**: Optional JSON patches for auto-fixable issues
|
- **Fixes**: Optional JSON patches for auto-fixable issues
|
||||||
@@ -866,7 +878,7 @@ Apply automatic fixes for minor issues:
|
|||||||
|
|
||||||
| Issue Type | Auto-Fix Action | Example |
|
| Issue Type | Auto-Fix Action | Example |
|
||||||
|-----------|----------------|---------|
|
|-----------|----------------|---------|
|
||||||
| **Vague Acceptance** | Replace with quantified criteria | "works correctly" → "All unit tests pass with 100% success rate" |
|
| **Vague Convergence** | Replace with quantified criteria | "works correctly" → "All unit tests pass with 100% success rate" |
|
||||||
| **Insufficient Steps** | Expand to 4-step template | Add: Analyze → Implement → Error handling → Verify |
|
| **Insufficient Steps** | Expand to 4-step template | Add: Analyze → Implement → Error handling → Verify |
|
||||||
| **CLI-Provided Patches** | Apply JSON patches from CLI output | Update task fields per patch specification |
|
| **CLI-Provided Patches** | Apply JSON patches from CLI output | Update task fields per patch specification |
|
||||||
|
|
||||||
@@ -876,7 +888,7 @@ After fixes, update `_metadata.quality_check` with fix log.
|
|||||||
|
|
||||||
After Phase 4 planObject generation:
|
After Phase 4 planObject generation:
|
||||||
|
|
||||||
1. **Write Initial Plan** → `${sessionFolder}/plan.json`
|
1. **Write Task Files** → `${sessionFolder}/.task/TASK-*.json` + **Write Plan** → `${sessionFolder}/plan.json`
|
||||||
2. **Execute CLI Check** → Gemini (Qwen fallback)
|
2. **Execute CLI Check** → Gemini (Qwen fallback)
|
||||||
3. **Parse Results** → Extract recommendation and issues
|
3. **Parse Results** → Extract recommendation and issues
|
||||||
4. **Handle Recommendation**:
|
4. **Handle Recommendation**:
|
||||||
|
|||||||
@@ -165,12 +165,16 @@ if (file_exists(manifestPath)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Synthesis helper functions (conceptual)
|
// Synthesis helper functions (conceptual)
|
||||||
|
// NOTE: relevant_files items are now structured objects:
|
||||||
|
// {path, relevance, rationale, role, discovery_source?, key_symbols?}
|
||||||
function synthesizeCriticalFiles(allRelevantFiles) {
|
function synthesizeCriticalFiles(allRelevantFiles) {
|
||||||
// 1. Group by path
|
// 1. Group by path (files are objects with .path property)
|
||||||
// 2. Count mentions across angles
|
// 2. Count mentions across angles
|
||||||
// 3. Average relevance scores
|
// 3. Average relevance scores
|
||||||
// 4. Rank by: (mention_count * 0.6) + (avg_relevance * 0.4)
|
// 4. Merge rationales from different angles (join with "; ")
|
||||||
// 5. Return top 10-15 with mentioned_by_angles attribution
|
// 5. Collect unique roles and key_symbols across angles
|
||||||
|
// 6. Rank by: (mention_count * 0.6) + (avg_relevance * 0.4)
|
||||||
|
// 7. Return top 10-15 with: path, relevance, rationale, role, mentioned_by_angles, key_symbols
|
||||||
}
|
}
|
||||||
|
|
||||||
function synthesizeConflictIndicators(explorationData) {
|
function synthesizeConflictIndicators(explorationData) {
|
||||||
@@ -544,7 +548,7 @@ Calculate risk level based on:
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"aggregated_insights": {
|
"aggregated_insights": {
|
||||||
"critical_files": [{"path": "src/auth/AuthService.ts", "relevance": 0.95, "mentioned_by_angles": ["architecture"]}],
|
"critical_files": [{"path": "src/auth/AuthService.ts", "relevance": 0.95, "rationale": "Contains login/register/verifyToken - core auth entry points", "role": "modify_target", "mentioned_by_angles": ["architecture"], "key_symbols": ["AuthService", "login", "verifyToken"]}],
|
||||||
"conflict_indicators": [{"type": "pattern_mismatch", "description": "...", "source_angle": "architecture", "severity": "medium"}],
|
"conflict_indicators": [{"type": "pattern_mismatch", "description": "...", "source_angle": "architecture", "severity": "medium"}],
|
||||||
"clarification_needs": [{"question": "...", "context": "...", "options": [], "source_angle": "architecture"}],
|
"clarification_needs": [{"question": "...", "context": "...", "options": [], "source_angle": "architecture"}],
|
||||||
"constraints": [{"constraint": "Must follow existing DI pattern", "source_angle": "architecture"}],
|
"constraints": [{"constraint": "Must follow existing DI pattern", "source_angle": "architecture"}],
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to
|
|||||||
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "plan_revision", summary: "已按反馈拆分task-2为两个子任务" })
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "plan_revision", summary: "已按反馈拆分task-2为两个子任务" })
|
||||||
|
|
||||||
// 错误上报
|
// 错误上报
|
||||||
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "error", summary: "plan-json-schema.json 未找到, 使用默认结构" })
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "error", summary: "plan-overview-base-schema.json 未找到, 使用默认结构" })
|
||||||
```
|
```
|
||||||
|
|
||||||
## Execution Process
|
## Execution Process
|
||||||
@@ -73,10 +73,10 @@ Phase 2: Multi-Angle Exploration
|
|||||||
└─ Write exploration results to session folder
|
└─ Write exploration results to session folder
|
||||||
|
|
||||||
Phase 3: Plan Generation
|
Phase 3: Plan Generation
|
||||||
├─ Read plan-json-schema.json for structure reference
|
├─ Read plan-overview-base-schema.json + task-schema.json for structure reference
|
||||||
├─ Low complexity → Direct Claude planning
|
├─ Low complexity → Direct Claude planning
|
||||||
├─ Medium/High → cli-lite-planning-agent
|
├─ Medium/High → cli-lite-planning-agent
|
||||||
└─ Output: plan.json
|
└─ Output: plan.json (overview with task_ids[]) + .task/TASK-*.json (independent task files)
|
||||||
|
|
||||||
Phase 4: Submit for Approval
|
Phase 4: Submit for Approval
|
||||||
├─ SendMessage plan summary to coordinator
|
├─ SendMessage plan summary to coordinator
|
||||||
@@ -208,6 +208,12 @@ Execute **${angle}** exploration for task planning context.
|
|||||||
## Expected Output
|
## Expected Output
|
||||||
Write JSON to: ${sessionFolder}/exploration-${angle}.json
|
Write JSON to: ${sessionFolder}/exploration-${angle}.json
|
||||||
Follow explore-json-schema.json structure with ${angle}-focused findings.
|
Follow explore-json-schema.json structure with ${angle}-focused findings.
|
||||||
|
|
||||||
|
**MANDATORY**: Every file in relevant_files MUST have:
|
||||||
|
- **rationale** (required): Specific selection basis tied to ${angle} topic (>10 chars, not generic)
|
||||||
|
- **role** (required): modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only
|
||||||
|
- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|manual
|
||||||
|
- **key_symbols** (recommended): Key functions/classes/types relevant to task
|
||||||
`
|
`
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -232,7 +238,7 @@ Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify(explorationM
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Read schema reference
|
// Read schema reference
|
||||||
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json`)
|
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`)
|
||||||
|
|
||||||
if (complexity === 'Low') {
|
if (complexity === 'Low') {
|
||||||
// Direct Claude planning
|
// Direct Claude planning
|
||||||
@@ -242,18 +248,31 @@ if (complexity === 'Low') {
|
|||||||
// Incorporate findings into plan
|
// Incorporate findings into plan
|
||||||
})
|
})
|
||||||
|
|
||||||
// Generate plan following schema
|
// Generate task files in .task/ directory
|
||||||
|
Bash(`mkdir -p ${sessionFolder}/.task`)
|
||||||
|
|
||||||
|
const tasks = [/* structured tasks with dependencies, files[].change, convergence.criteria */]
|
||||||
|
const taskIds = tasks.map(t => t.id)
|
||||||
|
|
||||||
|
// Write individual task files following task-schema.json
|
||||||
|
tasks.forEach(task => {
|
||||||
|
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(task, null, 2))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Generate plan overview following plan-overview-base-schema.json
|
||||||
const plan = {
|
const plan = {
|
||||||
summary: "...",
|
summary: "...",
|
||||||
approach: "...",
|
approach: "...",
|
||||||
tasks: [/* structured tasks with dependencies, modification points, acceptance criteria */],
|
task_ids: taskIds,
|
||||||
|
task_count: taskIds.length,
|
||||||
estimated_time: "...",
|
estimated_time: "...",
|
||||||
recommended_execution: "Agent",
|
recommended_execution: "Agent",
|
||||||
complexity: "Low",
|
complexity: "Low",
|
||||||
_metadata: {
|
_metadata: {
|
||||||
timestamp: new Date().toISOString(),
|
timestamp: new Date().toISOString(),
|
||||||
source: "team-planner",
|
source: "team-planner",
|
||||||
planning_mode: "direct"
|
planning_mode: "direct",
|
||||||
|
plan_type: "feature"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2))
|
Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2))
|
||||||
@@ -264,16 +283,26 @@ if (complexity === 'Low') {
|
|||||||
run_in_background: false,
|
run_in_background: false,
|
||||||
description: "Generate detailed implementation plan",
|
description: "Generate detailed implementation plan",
|
||||||
prompt: `
|
prompt: `
|
||||||
Generate implementation plan and write plan.json.
|
Generate implementation plan with two-layer output.
|
||||||
|
|
||||||
## Output Location
|
## Output Location
|
||||||
**Session Folder**: ${sessionFolder}
|
**Session Folder**: ${sessionFolder}
|
||||||
**Output Files**:
|
**Output Files**:
|
||||||
- ${sessionFolder}/planning-context.md
|
- ${sessionFolder}/planning-context.md
|
||||||
- ${sessionFolder}/plan.json
|
- ${sessionFolder}/plan.json (overview with task_ids[])
|
||||||
|
- ${sessionFolder}/.task/TASK-*.json (independent task files)
|
||||||
|
|
||||||
## Output Schema Reference
|
## Output Schema Reference
|
||||||
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json
|
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json
|
||||||
|
Execute: cat ~/.ccw/workflows/cli-templates/schemas/task-schema.json
|
||||||
|
|
||||||
|
## Output Format: Two-Layer Structure
|
||||||
|
- plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array)
|
||||||
|
- .task/TASK-*.json: Independent task files following task-schema.json
|
||||||
|
|
||||||
|
plan.json required: summary, approach, task_ids, task_count, _metadata (with plan_type)
|
||||||
|
Task files required: id, title, description, depends_on, convergence (with criteria[])
|
||||||
|
Task fields: files[].change (not modification_points), convergence.criteria (not acceptance), test (not verification)
|
||||||
|
|
||||||
## Task Description
|
## Task Description
|
||||||
${task.description}
|
${task.description}
|
||||||
@@ -286,9 +315,9 @@ Path: ${exp.path}`).join('\n\n')}
|
|||||||
${complexity}
|
${complexity}
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
Generate plan.json following schema. Key constraints:
|
Generate plan.json + .task/*.json following schemas. Key constraints:
|
||||||
- tasks: 2-7 structured tasks (group by feature/module, NOT by file)
|
- 2-7 structured tasks (group by feature/module, NOT by file)
|
||||||
- Each task: id, title, scope, modification_points, implementation, acceptance, depends_on
|
- Each task file: id, title, description, files[].change, convergence.criteria, depends_on
|
||||||
- Prefer parallel tasks (minimize depends_on)
|
- Prefer parallel tasks (minimize depends_on)
|
||||||
`
|
`
|
||||||
})
|
})
|
||||||
@@ -301,6 +330,10 @@ Generate plan.json following schema. Key constraints:
|
|||||||
// Read generated plan
|
// Read generated plan
|
||||||
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
||||||
|
|
||||||
|
// Load tasks from .task/ directory (two-layer format)
|
||||||
|
const tasks = plan.task_ids.map(id => JSON.parse(Read(`${sessionFolder}/.task/${id}.json`)))
|
||||||
|
const taskCount = plan.task_count || plan.task_ids.length
|
||||||
|
|
||||||
// Send plan summary to coordinator
|
// Send plan summary to coordinator
|
||||||
SendMessage({
|
SendMessage({
|
||||||
type: "message",
|
type: "message",
|
||||||
@@ -309,19 +342,20 @@ SendMessage({
|
|||||||
|
|
||||||
**Task**: ${task.subject}
|
**Task**: ${task.subject}
|
||||||
**Complexity**: ${complexity}
|
**Complexity**: ${complexity}
|
||||||
**Tasks**: ${plan.tasks.length}
|
**Tasks**: ${taskCount}
|
||||||
|
|
||||||
### Task Summary
|
### Task Summary
|
||||||
${plan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.scope || 'N/A'})`).join('\n')}
|
${tasks.map((t, i) => `${i+1}. ${t.title} (${t.scope || 'N/A'})`).join('\n')}
|
||||||
|
|
||||||
### Approach
|
### Approach
|
||||||
${plan.approach}
|
${plan.approach}
|
||||||
|
|
||||||
### Plan Location
|
### Plan Location
|
||||||
${sessionFolder}/plan.json
|
${sessionFolder}/plan.json
|
||||||
|
${plan.task_ids ? `Task Files: ${sessionFolder}/.task/` : ''}
|
||||||
|
|
||||||
Please review and approve or request revisions.`,
|
Please review and approve or request revisions.`,
|
||||||
summary: `Plan ready: ${plan.tasks.length} tasks`
|
summary: `Plan ready: ${taskCount} tasks`
|
||||||
})
|
})
|
||||||
|
|
||||||
// Wait for coordinator response
|
// Wait for coordinator response
|
||||||
@@ -359,7 +393,11 @@ if (nextTasks.length > 0) {
|
|||||||
├── exploration-{angle2}.json
|
├── exploration-{angle2}.json
|
||||||
├── explorations-manifest.json # Exploration index
|
├── explorations-manifest.json # Exploration index
|
||||||
├── planning-context.md # Evidence + understanding (Medium/High)
|
├── planning-context.md # Evidence + understanding (Medium/High)
|
||||||
└── plan.json # Implementation plan
|
├── plan.json # Plan overview with task_ids[] (NO embedded tasks[])
|
||||||
|
└── .task/ # Independent task files
|
||||||
|
├── TASK-001.json # Task file following task-schema.json
|
||||||
|
├── TASK-002.json
|
||||||
|
└── ...
|
||||||
```
|
```
|
||||||
|
|
||||||
## Error Handling
|
## Error Handling
|
||||||
|
|||||||
@@ -44,7 +44,8 @@ Intelligent lightweight bug fixing command with dynamic workflow adaptation base
|
|||||||
| `diagnosis-{angle}.json` | Per-angle diagnosis results (1-4 files based on severity) |
|
| `diagnosis-{angle}.json` | Per-angle diagnosis results (1-4 files based on severity) |
|
||||||
| `diagnoses-manifest.json` | Index of all diagnosis files |
|
| `diagnoses-manifest.json` | Index of all diagnosis files |
|
||||||
| `planning-context.md` | Evidence paths + synthesized understanding |
|
| `planning-context.md` | Evidence paths + synthesized understanding |
|
||||||
| `fix-plan.json` | Structured fix plan (fix-plan-json-schema.json) |
|
| `fix-plan.json` | Fix plan overview with `task_ids[]` (plan-overview-fix-schema.json) |
|
||||||
|
| `.task/FIX-*.json` | Independent fix task files (one per task) |
|
||||||
|
|
||||||
**Output Directory**: `.workflow/.lite-fix/{bug-slug}-{YYYY-MM-DD}/`
|
**Output Directory**: `.workflow/.lite-fix/{bug-slug}-{YYYY-MM-DD}/`
|
||||||
|
|
||||||
@@ -52,7 +53,7 @@ Intelligent lightweight bug fixing command with dynamic workflow adaptation base
|
|||||||
- Low/Medium severity → Direct Claude planning (no agent)
|
- Low/Medium severity → Direct Claude planning (no agent)
|
||||||
- High/Critical severity → `cli-lite-planning-agent` generates `fix-plan.json`
|
- High/Critical severity → `cli-lite-planning-agent` generates `fix-plan.json`
|
||||||
|
|
||||||
**Schema Reference**: `~/.ccw/workflows/cli-templates/schemas/fix-plan-json-schema.json`
|
**Schema Reference**: `~/.ccw/workflows/cli-templates/schemas/plan-overview-fix-schema.json`
|
||||||
|
|
||||||
## Auto Mode Defaults
|
## Auto Mode Defaults
|
||||||
|
|
||||||
@@ -91,7 +92,7 @@ Phase 2: Clarification (optional, multi-round)
|
|||||||
|
|
||||||
Phase 3: Fix Planning (NO CODE EXECUTION - planning only)
|
Phase 3: Fix Planning (NO CODE EXECUTION - planning only)
|
||||||
+- Decision (based on Phase 1 severity):
|
+- Decision (based on Phase 1 severity):
|
||||||
|- Low/Medium -> Load schema: cat ~/.ccw/workflows/cli-templates/schemas/fix-plan-json-schema.json -> Direct Claude planning (following schema) -> fix-plan.json -> MUST proceed to Phase 4
|
|- Low/Medium -> Load schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-fix-schema.json -> Direct Claude planning (following schema) -> fix-plan.json -> MUST proceed to Phase 4
|
||||||
+- High/Critical -> cli-lite-planning-agent -> fix-plan.json -> MUST proceed to Phase 4
|
+- High/Critical -> cli-lite-planning-agent -> fix-plan.json -> MUST proceed to Phase 4
|
||||||
|
|
||||||
Phase 4: Confirmation & Selection
|
Phase 4: Confirmation & Selection
|
||||||
@@ -254,8 +255,12 @@ Execute **${angle}** diagnosis for bug root cause analysis. Analyze codebase fro
|
|||||||
**IMPORTANT**: Use structured format:
|
**IMPORTANT**: Use structured format:
|
||||||
\`{file: "src/module/file.ts", line_range: "45-60", issue: "Description", confidence: 0.85}\`
|
\`{file: "src/module/file.ts", line_range: "45-60", issue: "Description", confidence: 0.85}\`
|
||||||
- affected_files: Files involved from ${angle} perspective
|
- affected_files: Files involved from ${angle} perspective
|
||||||
**IMPORTANT**: Use object format with relevance scores:
|
**MANDATORY**: Every file MUST use structured object format with ALL required fields:
|
||||||
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains ${angle} logic"}]\`
|
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains handleLogin() at line 45 where null check is missing", change_type: "fix_target", discovery_source: "bash-scan", key_symbols: ["handleLogin"]}]\`
|
||||||
|
- **rationale** (required): Specific reason why this file is affected (>10 chars, not generic)
|
||||||
|
- **change_type** (required): fix_target|needs_update|test_coverage|reference_only
|
||||||
|
- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|stack-trace|manual
|
||||||
|
- **key_symbols** (recommended): Key functions/classes related to the bug
|
||||||
- reproduction_steps: Steps to reproduce the bug
|
- reproduction_steps: Steps to reproduce the bug
|
||||||
- fix_hints: Suggested fix approaches from ${angle} viewpoint
|
- fix_hints: Suggested fix approaches from ${angle} viewpoint
|
||||||
- dependencies: Dependencies relevant to ${angle} diagnosis
|
- dependencies: Dependencies relevant to ${angle} diagnosis
|
||||||
@@ -268,7 +273,9 @@ Execute **${angle}** diagnosis for bug root cause analysis. Analyze codebase fro
|
|||||||
- [ ] Schema obtained via cat diagnosis-json-schema.json
|
- [ ] Schema obtained via cat diagnosis-json-schema.json
|
||||||
- [ ] get_modules_by_depth.sh executed
|
- [ ] get_modules_by_depth.sh executed
|
||||||
- [ ] Root cause identified with confidence score
|
- [ ] Root cause identified with confidence score
|
||||||
- [ ] At least 3 affected files identified with ${angle} rationale
|
- [ ] At least 3 affected files identified with specific rationale + change_type
|
||||||
|
- [ ] Every file has rationale >10 chars (not generic like "Contains ${angle} logic")
|
||||||
|
- [ ] Every file has change_type classification (fix_target/needs_update/etc.)
|
||||||
- [ ] Fix hints are actionable (specific code changes, not generic advice)
|
- [ ] Fix hints are actionable (specific code changes, not generic advice)
|
||||||
- [ ] Reproduction steps are verifiable
|
- [ ] Reproduction steps are verifiable
|
||||||
- [ ] JSON output follows schema exactly
|
- [ ] JSON output follows schema exactly
|
||||||
@@ -426,21 +433,74 @@ if (autoYes) {
|
|||||||
**Low/Medium Severity** - Direct planning by Claude:
|
**Low/Medium Severity** - Direct planning by Claude:
|
||||||
```javascript
|
```javascript
|
||||||
// Step 1: Read schema
|
// Step 1: Read schema
|
||||||
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/fix-plan-json-schema.json`)
|
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-fix-schema.json`)
|
||||||
|
|
||||||
// Step 2: Generate fix-plan following schema (Claude directly, no agent)
|
// Step 2: Generate fix tasks with NEW field names (Claude directly, no agent)
|
||||||
// For Medium complexity: include rationale + verification (optional, but recommended)
|
// Field mapping: modification_points -> files, acceptance -> convergence, verification -> test
|
||||||
|
const fixTasks = [
|
||||||
|
{
|
||||||
|
id: "FIX-001",
|
||||||
|
title: "...",
|
||||||
|
description: "...",
|
||||||
|
scope: "...",
|
||||||
|
action: "Fix|Update|Refactor|Add|Delete",
|
||||||
|
depends_on: [],
|
||||||
|
convergence: {
|
||||||
|
criteria: ["..."] // Quantified acceptance criteria
|
||||||
|
},
|
||||||
|
files: [
|
||||||
|
{ path: "src/module/file.ts", action: "modify", target: "functionName", change: "Description of change" }
|
||||||
|
],
|
||||||
|
implementation: ["Step 1: ...", "Step 2: ..."],
|
||||||
|
test: {
|
||||||
|
manual_checks: ["Reproduce issue", "Verify fix"],
|
||||||
|
success_metrics: ["Issue resolved", "No regressions"]
|
||||||
|
},
|
||||||
|
complexity: "Low|Medium",
|
||||||
|
|
||||||
|
// Medium severity fields (optional for Low, recommended for Medium)
|
||||||
|
...(severity === "Medium" ? {
|
||||||
|
rationale: {
|
||||||
|
chosen_approach: "Direct fix approach",
|
||||||
|
alternatives_considered: ["Workaround", "Refactor"],
|
||||||
|
decision_factors: ["Minimal impact", "Quick turnaround"],
|
||||||
|
tradeoffs: "Doesn't address underlying issue"
|
||||||
|
},
|
||||||
|
test: {
|
||||||
|
unit: ["test_bug_fix_basic"],
|
||||||
|
integration: [],
|
||||||
|
manual_checks: ["Reproduce issue", "Verify fix"],
|
||||||
|
success_metrics: ["Issue resolved", "No regressions"]
|
||||||
|
}
|
||||||
|
} : {})
|
||||||
|
}
|
||||||
|
// ... additional tasks as needed
|
||||||
|
]
|
||||||
|
|
||||||
|
// Step 3: Write individual task files to .task/ directory
|
||||||
|
const taskDir = `${sessionFolder}/.task`
|
||||||
|
Bash(`mkdir -p "${taskDir}"`)
|
||||||
|
|
||||||
|
fixTasks.forEach(task => {
|
||||||
|
Write(`${taskDir}/${task.id}.json`, JSON.stringify(task, null, 2))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 4: Generate fix-plan overview (NO embedded tasks[])
|
||||||
const fixPlan = {
|
const fixPlan = {
|
||||||
summary: "...",
|
summary: "...",
|
||||||
|
approach: "...",
|
||||||
|
task_ids: fixTasks.map(t => t.id),
|
||||||
|
task_count: fixTasks.length,
|
||||||
|
fix_context: {
|
||||||
root_cause: "...",
|
root_cause: "...",
|
||||||
strategy: "immediate_patch|comprehensive_fix|refactor",
|
strategy: "immediate_patch|comprehensive_fix|refactor",
|
||||||
tasks: [...], // Each task: { id, title, scope, ..., depends_on, complexity }
|
severity: severity,
|
||||||
|
risk_level: "..."
|
||||||
|
},
|
||||||
estimated_time: "...",
|
estimated_time: "...",
|
||||||
recommended_execution: "Agent",
|
recommended_execution: "Agent",
|
||||||
severity: severity,
|
|
||||||
risk_level: "...",
|
|
||||||
|
|
||||||
// Medium complexity fields (optional for direct planning, auto-filled for Low)
|
// Medium complexity fields (optional for Low)
|
||||||
...(severity === "Medium" ? {
|
...(severity === "Medium" ? {
|
||||||
design_decisions: [
|
design_decisions: [
|
||||||
{
|
{
|
||||||
@@ -448,58 +508,22 @@ const fixPlan = {
|
|||||||
rationale: "Keeps changes localized and quick to review",
|
rationale: "Keeps changes localized and quick to review",
|
||||||
tradeoff: "Defers comprehensive refactoring"
|
tradeoff: "Defers comprehensive refactoring"
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
tasks_with_rationale: {
|
|
||||||
// Each task gets rationale if Medium
|
|
||||||
task_rationale_example: {
|
|
||||||
rationale: {
|
|
||||||
chosen_approach: "Direct fix approach",
|
|
||||||
alternatives_considered: ["Workaround", "Refactor"],
|
|
||||||
decision_factors: ["Minimal impact", "Quick turnaround"],
|
|
||||||
tradeoffs: "Doesn't address underlying issue"
|
|
||||||
},
|
|
||||||
verification: {
|
|
||||||
unit_tests: ["test_bug_fix_basic"],
|
|
||||||
integration_tests: [],
|
|
||||||
manual_checks: ["Reproduce issue", "Verify fix"],
|
|
||||||
success_metrics: ["Issue resolved", "No regressions"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} : {}),
|
} : {}),
|
||||||
|
|
||||||
_metadata: {
|
_metadata: {
|
||||||
timestamp: getUtc8ISOString(),
|
timestamp: getUtc8ISOString(),
|
||||||
source: "direct-planning",
|
source: "direct-planning",
|
||||||
planning_mode: "direct",
|
planning_mode: "direct",
|
||||||
|
plan_type: "fix",
|
||||||
complexity: severity === "Medium" ? "Medium" : "Low"
|
complexity: severity === "Medium" ? "Medium" : "Low"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 3: Merge task rationale into tasks array
|
// Step 5: Write fix-plan overview to session folder
|
||||||
if (severity === "Medium") {
|
|
||||||
fixPlan.tasks = fixPlan.tasks.map(task => ({
|
|
||||||
...task,
|
|
||||||
rationale: fixPlan.tasks_with_rationale[task.id]?.rationale || {
|
|
||||||
chosen_approach: "Standard fix",
|
|
||||||
alternatives_considered: [],
|
|
||||||
decision_factors: ["Correctness", "Simplicity"],
|
|
||||||
tradeoffs: "None"
|
|
||||||
},
|
|
||||||
verification: fixPlan.tasks_with_rationale[task.id]?.verification || {
|
|
||||||
unit_tests: [`test_${task.id}_basic`],
|
|
||||||
integration_tests: [],
|
|
||||||
manual_checks: ["Verify fix works"],
|
|
||||||
success_metrics: ["Test pass"]
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
delete fixPlan.tasks_with_rationale // Clean up temp field
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 4: Write fix-plan to session folder
|
|
||||||
Write(`${sessionFolder}/fix-plan.json`, JSON.stringify(fixPlan, null, 2))
|
Write(`${sessionFolder}/fix-plan.json`, JSON.stringify(fixPlan, null, 2))
|
||||||
|
|
||||||
// Step 5: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here
|
// Step 6: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here
|
||||||
```
|
```
|
||||||
|
|
||||||
**High/Critical Severity** - Invoke cli-lite-planning-agent:
|
**High/Critical Severity** - Invoke cli-lite-planning-agent:
|
||||||
@@ -510,17 +534,18 @@ Task(
|
|||||||
run_in_background=false,
|
run_in_background=false,
|
||||||
description="Generate detailed fix plan",
|
description="Generate detailed fix plan",
|
||||||
prompt=`
|
prompt=`
|
||||||
Generate fix plan and write fix-plan.json.
|
Generate fix plan using two-layer output format.
|
||||||
|
|
||||||
## Output Location
|
## Output Location
|
||||||
|
|
||||||
**Session Folder**: ${sessionFolder}
|
**Session Folder**: ${sessionFolder}
|
||||||
**Output Files**:
|
**Output Files**:
|
||||||
- ${sessionFolder}/planning-context.md (evidence + understanding)
|
- ${sessionFolder}/planning-context.md (evidence + understanding)
|
||||||
- ${sessionFolder}/fix-plan.json (fix plan)
|
- ${sessionFolder}/fix-plan.json (fix plan overview -- NO embedded tasks[])
|
||||||
|
- ${sessionFolder}/.task/FIX-*.json (independent fix task files, one per task)
|
||||||
|
|
||||||
## Output Schema Reference
|
## Output Schema Reference
|
||||||
Execute: cat ~/.ccw/workflows/cli-templates/schemas/fix-plan-json-schema.json (get schema reference before generating plan)
|
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-fix-schema.json (get schema reference before generating plan)
|
||||||
|
|
||||||
## Project Context (MANDATORY - Read Both Files)
|
## Project Context (MANDATORY - Read Both Files)
|
||||||
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
|
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
|
||||||
@@ -550,20 +575,49 @@ ${JSON.stringify(clarificationContext) || "None"}
|
|||||||
${severity}
|
${severity}
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
Generate fix-plan.json with:
|
|
||||||
|
**Output Format**: Two-layer structure:
|
||||||
|
- fix-plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array)
|
||||||
|
- .task/FIX-*.json: Independent task files following task-schema.json
|
||||||
|
|
||||||
|
**fix-plan.json required fields**:
|
||||||
- summary: 2-3 sentence overview of the fix
|
- summary: 2-3 sentence overview of the fix
|
||||||
- root_cause: Consolidated root cause from all diagnoses
|
- approach: Overall fix approach description
|
||||||
- strategy: "immediate_patch" | "comprehensive_fix" | "refactor"
|
- task_ids: Array of task IDs (e.g., ["FIX-001", "FIX-002"])
|
||||||
- tasks: 1-5 structured fix tasks (**IMPORTANT: group by fix area, NOT by file**)
|
- task_count: Number of tasks
|
||||||
- **Task Granularity Principle**: Each task = one complete fix unit
|
- fix_context:
|
||||||
- title: action verb + target (e.g., "Fix token validation edge case")
|
- root_cause: Consolidated root cause from all diagnoses
|
||||||
- scope: module path (src/auth/) or feature name
|
- strategy: "immediate_patch" | "comprehensive_fix" | "refactor"
|
||||||
- action: "Fix" | "Update" | "Refactor" | "Add" | "Delete"
|
- severity: ${severity}
|
||||||
- description
|
- risk_level: "Low" | "Medium" | "High"
|
||||||
- modification_points: ALL files to modify for this fix (group related changes)
|
- estimated_time, recommended_execution
|
||||||
- implementation (2-5 steps covering all modification_points)
|
- data_flow (High/Critical REQUIRED): How data flows through affected code
|
||||||
- acceptance: Quantified acceptance criteria
|
- diagram: "A -> B -> C" style flow
|
||||||
- depends_on: task IDs this task depends on (use sparingly)
|
- stages: [{stage, input, output, component}]
|
||||||
|
- design_decisions (High/Critical REQUIRED): Global fix decisions
|
||||||
|
- [{decision, rationale, tradeoff}]
|
||||||
|
- _metadata:
|
||||||
|
- timestamp, source, planning_mode
|
||||||
|
- plan_type: "fix"
|
||||||
|
- complexity: "High" | "Critical"
|
||||||
|
- diagnosis_angles: ${JSON.stringify(manifest.diagnoses.map(d => d.angle))}
|
||||||
|
|
||||||
|
**Each .task/FIX-*.json required fields**:
|
||||||
|
- id: "FIX-001" (prefix FIX-, NOT TASK-)
|
||||||
|
- title: action verb + target (e.g., "Fix token validation edge case")
|
||||||
|
- description
|
||||||
|
- scope: module path (src/auth/) or feature name
|
||||||
|
- action: "Fix" | "Update" | "Refactor" | "Add" | "Delete"
|
||||||
|
- depends_on: task IDs this task depends on (use sparingly)
|
||||||
|
- convergence: { criteria: ["Quantified acceptance criterion 1", "..."] }
|
||||||
|
- files: ALL files to modify for this fix (group related changes)
|
||||||
|
- [{ path: "src/file.ts", action: "modify|create|delete", target: "component/function", change: "Description of what changes" }]
|
||||||
|
- implementation: ["Step 1: ...", "Step 2: ..."] (2-5 steps covering all files)
|
||||||
|
- test:
|
||||||
|
- unit: ["test names to add/verify"]
|
||||||
|
- integration: ["integration test names"]
|
||||||
|
- manual_checks: ["manual verification steps"]
|
||||||
|
- success_metrics: ["quantified success criteria"]
|
||||||
|
|
||||||
**High/Critical complexity fields per task** (REQUIRED):
|
**High/Critical complexity fields per task** (REQUIRED):
|
||||||
- rationale:
|
- rationale:
|
||||||
@@ -571,11 +625,6 @@ Generate fix-plan.json with:
|
|||||||
- alternatives_considered: Other approaches evaluated
|
- alternatives_considered: Other approaches evaluated
|
||||||
- decision_factors: Key factors influencing choice
|
- decision_factors: Key factors influencing choice
|
||||||
- tradeoffs: Known tradeoffs of this approach
|
- tradeoffs: Known tradeoffs of this approach
|
||||||
- verification:
|
|
||||||
- unit_tests: Test names to add/verify
|
|
||||||
- integration_tests: Integration test names
|
|
||||||
- manual_checks: Manual verification steps
|
|
||||||
- success_metrics: Quantified success criteria
|
|
||||||
- risks:
|
- risks:
|
||||||
- description: Risk description
|
- description: Risk description
|
||||||
- probability: Low|Medium|High
|
- probability: Low|Medium|High
|
||||||
@@ -586,18 +635,10 @@ Generate fix-plan.json with:
|
|||||||
- interfaces: [{name, definition, purpose}]
|
- interfaces: [{name, definition, purpose}]
|
||||||
- key_functions: [{signature, purpose, returns}]
|
- key_functions: [{signature, purpose, returns}]
|
||||||
|
|
||||||
**Top-level High/Critical fields** (REQUIRED):
|
**Field name rules** (do NOT use old names):
|
||||||
- data_flow: How data flows through affected code
|
- files[].change (NOT modification_points)
|
||||||
- diagram: "A → B → C" style flow
|
- convergence.criteria (NOT acceptance)
|
||||||
- stages: [{stage, input, output, component}]
|
- test (NOT verification at task level)
|
||||||
- design_decisions: Global fix decisions
|
|
||||||
- [{decision, rationale, tradeoff}]
|
|
||||||
|
|
||||||
- estimated_time, recommended_execution, severity, risk_level
|
|
||||||
- _metadata:
|
|
||||||
- timestamp, source, planning_mode
|
|
||||||
- complexity: "High" | "Critical"
|
|
||||||
- diagnosis_angles: ${JSON.stringify(manifest.diagnoses.map(d => d.angle))}
|
|
||||||
|
|
||||||
## Task Grouping Rules
|
## Task Grouping Rules
|
||||||
1. **Group by fix area**: All changes for one fix = one task (even if 2-3 files)
|
1. **Group by fix area**: All changes for one fix = one task (even if 2-3 files)
|
||||||
@@ -605,25 +646,30 @@ Generate fix-plan.json with:
|
|||||||
3. **Substantial tasks**: Each task should represent 10-45 minutes of work
|
3. **Substantial tasks**: Each task should represent 10-45 minutes of work
|
||||||
4. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output
|
4. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output
|
||||||
5. **Prefer parallel**: Most tasks should be independent (no depends_on)
|
5. **Prefer parallel**: Most tasks should be independent (no depends_on)
|
||||||
|
6. **Task IDs**: Use FIX-001, FIX-002 prefix (NOT TASK-)
|
||||||
|
|
||||||
## Execution
|
## Execution
|
||||||
1. Read ALL diagnosis files for comprehensive context
|
1. Read ALL diagnosis files for comprehensive context
|
||||||
2. Execute CLI planning using Gemini (Qwen fallback) with --rule planning-fix-strategy template
|
2. Execute CLI planning using Gemini (Qwen fallback) with --rule planning-fix-strategy template
|
||||||
3. Synthesize findings from multiple diagnosis angles
|
3. Synthesize findings from multiple diagnosis angles
|
||||||
4. Generate fix-plan with:
|
4. Generate fix tasks (1-5 tasks):
|
||||||
- For High/Critical: REQUIRED new fields (rationale, verification, risks, code_skeleton, data_flow, design_decisions)
|
- Each task file written to \`${sessionFolder}/.task/FIX-NNN.json\`
|
||||||
- Each task MUST have rationale (why this fix), verification (how to verify success), and risks (potential issues)
|
- For High/Critical: REQUIRED fields (rationale, test, risks, code_skeleton)
|
||||||
5. Parse output and structure fix-plan
|
5. Generate fix-plan overview:
|
||||||
|
- Written to \`${sessionFolder}/fix-plan.json\`
|
||||||
|
- Contains task_ids[] referencing .task/ files (NO embedded tasks[])
|
||||||
|
- For High/Critical: REQUIRED fields (data_flow, design_decisions)
|
||||||
6. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding)
|
6. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding)
|
||||||
7. **Write**: \`${sessionFolder}/fix-plan.json\`
|
7. **Write**: \`${sessionFolder}/.task/FIX-*.json\` (individual task files)
|
||||||
8. Return brief completion summary
|
8. **Write**: \`${sessionFolder}/fix-plan.json\` (plan overview with task_ids[])
|
||||||
|
9. Return brief completion summary
|
||||||
|
|
||||||
## Output Format for CLI
|
## Output Format for CLI
|
||||||
Include these sections in your fix-plan output:
|
Include these sections in your fix-plan output:
|
||||||
- Summary, Root Cause, Strategy (existing)
|
- Summary, Root Cause (in fix_context), Strategy (existing)
|
||||||
- Data Flow: Diagram showing affected code paths
|
- Data Flow: Diagram showing affected code paths
|
||||||
- Design Decisions: Key architectural choices in the fix
|
- Design Decisions: Key architectural choices in the fix
|
||||||
- Tasks: Each with rationale (Medium/High), verification (Medium/High), risks (High), code_skeleton (High)
|
- Task files: Each with convergence, files, test, rationale (High), risks (High), code_skeleton (High)
|
||||||
`
|
`
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
@@ -638,18 +684,26 @@ Include these sections in your fix-plan output:
|
|||||||
```javascript
|
```javascript
|
||||||
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`))
|
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`))
|
||||||
|
|
||||||
|
// Load tasks from .task/ directory (two-layer format)
|
||||||
|
const tasks = (fixPlan.task_ids || []).map(id => {
|
||||||
|
return JSON.parse(Read(`${sessionFolder}/.task/${id}.json`))
|
||||||
|
})
|
||||||
|
const taskList = tasks
|
||||||
|
|
||||||
|
const fixContext = fixPlan.fix_context || {}
|
||||||
|
|
||||||
console.log(`
|
console.log(`
|
||||||
## Fix Plan
|
## Fix Plan
|
||||||
|
|
||||||
**Summary**: ${fixPlan.summary}
|
**Summary**: ${fixPlan.summary}
|
||||||
**Root Cause**: ${fixPlan.root_cause}
|
**Root Cause**: ${fixContext.root_cause || fixPlan.root_cause}
|
||||||
**Strategy**: ${fixPlan.strategy}
|
**Strategy**: ${fixContext.strategy || fixPlan.strategy}
|
||||||
|
|
||||||
**Tasks** (${fixPlan.tasks.length}):
|
**Tasks** (${taskList.length}):
|
||||||
${fixPlan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.scope})`).join('\n')}
|
${taskList.map((t, i) => `${i+1}. ${t.title} (${t.scope})`).join('\n')}
|
||||||
|
|
||||||
**Severity**: ${fixPlan.severity}
|
**Severity**: ${fixContext.severity || fixPlan.severity}
|
||||||
**Risk Level**: ${fixPlan.risk_level}
|
**Risk Level**: ${fixContext.risk_level || fixPlan.risk_level}
|
||||||
**Estimated Time**: ${fixPlan.estimated_time}
|
**Estimated Time**: ${fixPlan.estimated_time}
|
||||||
**Recommended**: ${fixPlan.recommended_execution}
|
**Recommended**: ${fixPlan.recommended_execution}
|
||||||
`)
|
`)
|
||||||
@@ -679,7 +733,7 @@ if (autoYes) {
|
|||||||
userSelection = AskUserQuestion({
|
userSelection = AskUserQuestion({
|
||||||
questions: [
|
questions: [
|
||||||
{
|
{
|
||||||
question: `Confirm fix plan? (${fixPlan.tasks.length} tasks, ${fixPlan.severity} severity)`,
|
question: `Confirm fix plan? (${taskList.length} tasks, ${fixContext.severity || fixPlan.severity} severity)`,
|
||||||
header: "Confirm",
|
header: "Confirm",
|
||||||
multiSelect: false,
|
multiSelect: false,
|
||||||
options: [
|
options: [
|
||||||
@@ -695,7 +749,7 @@ if (autoYes) {
|
|||||||
options: [
|
options: [
|
||||||
{ label: "Agent", description: "@code-developer agent" },
|
{ label: "Agent", description: "@code-developer agent" },
|
||||||
{ label: "Codex", description: "codex CLI tool" },
|
{ label: "Codex", description: "codex CLI tool" },
|
||||||
{ label: "Auto", description: `Auto: ${fixPlan.severity === 'Low' ? 'Agent' : 'Codex'}` }
|
{ label: "Auto", description: `Auto: ${(fixContext.severity || fixPlan.severity) === 'Low' ? 'Agent' : 'Codex'}` }
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -734,14 +788,21 @@ manifest.diagnoses.forEach(diag => {
|
|||||||
|
|
||||||
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`))
|
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`))
|
||||||
|
|
||||||
|
const fixSeverity = fixPlan.fix_context?.severity || fixPlan.severity
|
||||||
|
|
||||||
executionContext = {
|
executionContext = {
|
||||||
mode: "bugfix",
|
mode: "bugfix",
|
||||||
severity: fixPlan.severity,
|
severity: fixSeverity,
|
||||||
planObject: {
|
planObject: {
|
||||||
...fixPlan,
|
...fixPlan,
|
||||||
// Ensure complexity is set based on severity for new field consumption
|
// Ensure complexity is set based on severity for new field consumption
|
||||||
complexity: fixPlan.complexity || (fixPlan.severity === 'Critical' ? 'High' : (fixPlan.severity === 'High' ? 'High' : 'Medium'))
|
complexity: fixPlan.complexity || (fixSeverity === 'Critical' ? 'High' : (fixSeverity === 'High' ? 'High' : 'Medium'))
|
||||||
},
|
},
|
||||||
|
// Task files from .task/ directory (two-layer format)
|
||||||
|
taskFiles: (fixPlan.task_ids || []).map(id => ({
|
||||||
|
id,
|
||||||
|
path: `${sessionFolder}/.task/${id}.json`
|
||||||
|
})),
|
||||||
diagnosisContext: diagnoses,
|
diagnosisContext: diagnoses,
|
||||||
diagnosisAngles: manifest.diagnoses.map(d => d.angle),
|
diagnosisAngles: manifest.diagnoses.map(d => d.angle),
|
||||||
diagnosisManifest: manifest,
|
diagnosisManifest: manifest,
|
||||||
@@ -758,7 +819,8 @@ executionContext = {
|
|||||||
path: diag.path
|
path: diag.path
|
||||||
})),
|
})),
|
||||||
diagnoses_manifest: `${sessionFolder}/diagnoses-manifest.json`,
|
diagnoses_manifest: `${sessionFolder}/diagnoses-manifest.json`,
|
||||||
fix_plan: `${sessionFolder}/fix-plan.json`
|
fix_plan: `${sessionFolder}/fix-plan.json`,
|
||||||
|
task_dir: `${sessionFolder}/.task`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -780,7 +842,11 @@ Skill(skill="workflow:lite-execute", args="--in-memory --mode bugfix")
|
|||||||
├── diagnosis-{angle4}.json # Diagnosis angle 4 (if applicable)
|
├── diagnosis-{angle4}.json # Diagnosis angle 4 (if applicable)
|
||||||
├── diagnoses-manifest.json # Diagnosis index
|
├── diagnoses-manifest.json # Diagnosis index
|
||||||
├── planning-context.md # Evidence + understanding
|
├── planning-context.md # Evidence + understanding
|
||||||
└── fix-plan.json # Fix plan
|
├── fix-plan.json # Fix plan overview (task_ids[], NO embedded tasks[])
|
||||||
|
└── .task/ # Independent fix task files
|
||||||
|
├── FIX-001.json # Fix task 1
|
||||||
|
├── FIX-002.json # Fix task 2
|
||||||
|
└── ... # Additional fix tasks
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
@@ -791,7 +857,10 @@ Skill(skill="workflow:lite-execute", args="--in-memory --mode bugfix")
|
|||||||
├── diagnosis-validation.json
|
├── diagnosis-validation.json
|
||||||
├── diagnoses-manifest.json
|
├── diagnoses-manifest.json
|
||||||
├── planning-context.md
|
├── planning-context.md
|
||||||
└── fix-plan.json
|
├── fix-plan.json
|
||||||
|
└── .task/
|
||||||
|
├── FIX-001.json
|
||||||
|
└── FIX-002.json
|
||||||
```
|
```
|
||||||
|
|
||||||
## Error Handling
|
## Error Handling
|
||||||
|
|||||||
@@ -44,7 +44,8 @@ Intelligent lightweight planning command with dynamic workflow adaptation based
|
|||||||
| `exploration-{angle}.json` | Per-angle exploration results (1-4 files based on complexity) |
|
| `exploration-{angle}.json` | Per-angle exploration results (1-4 files based on complexity) |
|
||||||
| `explorations-manifest.json` | Index of all exploration files |
|
| `explorations-manifest.json` | Index of all exploration files |
|
||||||
| `planning-context.md` | Evidence paths + synthesized understanding |
|
| `planning-context.md` | Evidence paths + synthesized understanding |
|
||||||
| `plan.json` | Structured implementation plan (plan-json-schema.json) |
|
| `plan.json` | Plan overview with task_ids[] (plan-overview-base-schema.json) |
|
||||||
|
| `.task/TASK-*.json` | Independent task files (one per task) |
|
||||||
|
|
||||||
**Output Directory**: `.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/`
|
**Output Directory**: `.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/`
|
||||||
|
|
||||||
@@ -52,7 +53,7 @@ Intelligent lightweight planning command with dynamic workflow adaptation based
|
|||||||
- Low complexity → Direct Claude planning (no agent)
|
- Low complexity → Direct Claude planning (no agent)
|
||||||
- Medium/High complexity → `cli-lite-planning-agent` generates `plan.json`
|
- Medium/High complexity → `cli-lite-planning-agent` generates `plan.json`
|
||||||
|
|
||||||
**Schema Reference**: `~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json`
|
**Schema Reference**: `~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`
|
||||||
|
|
||||||
## Auto Mode Defaults
|
## Auto Mode Defaults
|
||||||
|
|
||||||
@@ -89,7 +90,7 @@ Phase 2: Clarification (optional, multi-round)
|
|||||||
|
|
||||||
Phase 3: Planning (NO CODE EXECUTION - planning only)
|
Phase 3: Planning (NO CODE EXECUTION - planning only)
|
||||||
└─ Decision (based on Phase 1 complexity):
|
└─ Decision (based on Phase 1 complexity):
|
||||||
├─ Low → Load schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json → Direct Claude planning (following schema) → plan.json
|
├─ Low → Load schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json → Direct Claude planning (following schema) → plan.json
|
||||||
└─ Medium/High → cli-lite-planning-agent → plan.json (agent internally executes quality check)
|
└─ Medium/High → cli-lite-planning-agent → plan.json (agent internally executes quality check)
|
||||||
|
|
||||||
Phase 4: Confirmation & Selection
|
Phase 4: Confirmation & Selection
|
||||||
@@ -251,9 +252,13 @@ Execute **${angle}** exploration for task planning context. Analyze codebase fro
|
|||||||
**Required Fields** (all ${angle} focused):
|
**Required Fields** (all ${angle} focused):
|
||||||
- project_structure: Modules/architecture relevant to ${angle}
|
- project_structure: Modules/architecture relevant to ${angle}
|
||||||
- relevant_files: Files affected from ${angle} perspective
|
- relevant_files: Files affected from ${angle} perspective
|
||||||
**IMPORTANT**: Use object format with relevance scores for synthesis:
|
**MANDATORY**: Every file MUST use structured object format with ALL required fields:
|
||||||
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Core ${angle} logic"}]\`
|
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains AuthService.login() - entry point for JWT token generation", role: "modify_target", discovery_source: "bash-scan", key_symbols: ["AuthService", "login"]}]\`
|
||||||
Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
- **rationale** (required): Specific selection basis tied to ${angle} topic (>10 chars, not generic)
|
||||||
|
- **role** (required): modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only
|
||||||
|
- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|manual
|
||||||
|
- **key_symbols** (recommended): Key functions/classes/types in the file relevant to the task
|
||||||
|
- Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
||||||
- patterns: ${angle}-related patterns to follow
|
- patterns: ${angle}-related patterns to follow
|
||||||
- dependencies: Dependencies relevant to ${angle}
|
- dependencies: Dependencies relevant to ${angle}
|
||||||
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
||||||
@@ -264,7 +269,9 @@ Execute **${angle}** exploration for task planning context. Analyze codebase fro
|
|||||||
## Success Criteria
|
## Success Criteria
|
||||||
- [ ] Schema obtained via cat explore-json-schema.json
|
- [ ] Schema obtained via cat explore-json-schema.json
|
||||||
- [ ] get_modules_by_depth.sh executed
|
- [ ] get_modules_by_depth.sh executed
|
||||||
- [ ] At least 3 relevant files identified with ${angle} rationale
|
- [ ] At least 3 relevant files identified with specific rationale + role
|
||||||
|
- [ ] Every file has rationale >10 chars (not generic like "Related to ${angle}")
|
||||||
|
- [ ] Every file has role classification (modify_target/dependency/etc.)
|
||||||
- [ ] Patterns are actionable (code examples, not generic advice)
|
- [ ] Patterns are actionable (code examples, not generic advice)
|
||||||
- [ ] Integration points include file:line locations
|
- [ ] Integration points include file:line locations
|
||||||
- [ ] Constraints are project-specific to ${angle}
|
- [ ] Constraints are project-specific to ${angle}
|
||||||
@@ -416,7 +423,11 @@ if (autoYes) {
|
|||||||
// 2. 默认 → agent
|
// 2. 默认 → agent
|
||||||
|
|
||||||
const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } }
|
const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } }
|
||||||
plan.tasks.forEach(task => {
|
|
||||||
|
// Load tasks from .task/ directory for executor assignment
|
||||||
|
const taskFiles = Glob(`${sessionFolder}/.task/TASK-*.json`)
|
||||||
|
taskFiles.forEach(taskPath => {
|
||||||
|
const task = JSON.parse(Read(taskPath))
|
||||||
// Claude 根据上述规则语义分析,为每个 task 分配 executor
|
// Claude 根据上述规则语义分析,为每个 task 分配 executor
|
||||||
executorAssignments[task.id] = { executor: '...', reason: '...' }
|
executorAssignments[task.id] = { executor: '...', reason: '...' }
|
||||||
})
|
})
|
||||||
@@ -425,7 +436,7 @@ plan.tasks.forEach(task => {
|
|||||||
**Low Complexity** - Direct planning by Claude:
|
**Low Complexity** - Direct planning by Claude:
|
||||||
```javascript
|
```javascript
|
||||||
// Step 1: Read schema
|
// Step 1: Read schema
|
||||||
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json`)
|
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`)
|
||||||
|
|
||||||
// Step 2: ⚠️ MANDATORY - Read and review ALL exploration files
|
// Step 2: ⚠️ MANDATORY - Read and review ALL exploration files
|
||||||
const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`))
|
const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`))
|
||||||
@@ -434,22 +445,51 @@ manifest.explorations.forEach(exp => {
|
|||||||
console.log(`\n### Exploration: ${exp.angle}\n${explorationData}`)
|
console.log(`\n### Exploration: ${exp.angle}\n${explorationData}`)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Step 3: Generate plan following schema (Claude directly, no agent)
|
// Step 3: Generate task objects (Claude directly, no agent)
|
||||||
// ⚠️ Plan MUST incorporate insights from exploration files read in Step 2
|
// ⚠️ Tasks MUST incorporate insights from exploration files read in Step 2
|
||||||
|
// Task fields use NEW names: convergence.criteria (not acceptance), files[].change (not modification_points), test (not verification)
|
||||||
|
const tasks = [
|
||||||
|
{
|
||||||
|
id: "TASK-001",
|
||||||
|
title: "...",
|
||||||
|
description: "...",
|
||||||
|
depends_on: [],
|
||||||
|
convergence: { criteria: ["..."] },
|
||||||
|
files: [{ path: "...", change: "..." }],
|
||||||
|
implementation: ["..."],
|
||||||
|
test: "..."
|
||||||
|
},
|
||||||
|
// ... more tasks
|
||||||
|
]
|
||||||
|
|
||||||
|
// Step 4: Write task files to .task/ directory
|
||||||
|
const taskDir = `${sessionFolder}/.task`
|
||||||
|
Bash(`mkdir -p "${taskDir}"`)
|
||||||
|
tasks.forEach(task => {
|
||||||
|
Write(`${taskDir}/${task.id}.json`, JSON.stringify(task, null, 2))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 5: Generate plan overview (NO embedded tasks[])
|
||||||
const plan = {
|
const plan = {
|
||||||
summary: "...",
|
summary: "...",
|
||||||
approach: "...",
|
approach: "...",
|
||||||
tasks: [...], // Each task: { id, title, scope, ..., depends_on, execution_group, complexity }
|
task_ids: tasks.map(t => t.id),
|
||||||
|
task_count: tasks.length,
|
||||||
|
complexity: "Low",
|
||||||
estimated_time: "...",
|
estimated_time: "...",
|
||||||
recommended_execution: "Agent",
|
recommended_execution: "Agent",
|
||||||
complexity: "Low",
|
_metadata: {
|
||||||
_metadata: { timestamp: getUtc8ISOString(), source: "direct-planning", planning_mode: "direct" }
|
timestamp: getUtc8ISOString(),
|
||||||
|
source: "direct-planning",
|
||||||
|
planning_mode: "direct",
|
||||||
|
plan_type: "feature"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 4: Write plan to session folder
|
// Step 6: Write plan overview to session folder
|
||||||
Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2))
|
Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2))
|
||||||
|
|
||||||
// Step 5: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here
|
// Step 7: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here
|
||||||
```
|
```
|
||||||
|
|
||||||
**Medium/High Complexity** - Invoke cli-lite-planning-agent:
|
**Medium/High Complexity** - Invoke cli-lite-planning-agent:
|
||||||
@@ -467,10 +507,11 @@ Generate implementation plan and write plan.json.
|
|||||||
**Session Folder**: ${sessionFolder}
|
**Session Folder**: ${sessionFolder}
|
||||||
**Output Files**:
|
**Output Files**:
|
||||||
- ${sessionFolder}/planning-context.md (evidence + understanding)
|
- ${sessionFolder}/planning-context.md (evidence + understanding)
|
||||||
- ${sessionFolder}/plan.json (implementation plan)
|
- ${sessionFolder}/plan.json (plan overview -- NO embedded tasks[])
|
||||||
|
- ${sessionFolder}/.task/TASK-*.json (independent task files, one per task)
|
||||||
|
|
||||||
## Output Schema Reference
|
## Output Schema Reference
|
||||||
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json (get schema reference before generating plan)
|
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json (get schema reference before generating plan)
|
||||||
|
|
||||||
## Project Context (MANDATORY - Read Both Files)
|
## Project Context (MANDATORY - Read Both Files)
|
||||||
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
|
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
|
||||||
@@ -500,10 +541,17 @@ ${JSON.stringify(clarificationContext) || "None"}
|
|||||||
${complexity}
|
${complexity}
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
Generate plan.json following the schema obtained above. Key constraints:
|
Generate plan.json and .task/*.json following the schema obtained above. Key constraints:
|
||||||
- tasks: 2-7 structured tasks (**group by feature/module, NOT by file**)
|
|
||||||
- _metadata.exploration_angles: ${JSON.stringify(manifest.explorations.map(e => e.angle))}
|
- _metadata.exploration_angles: ${JSON.stringify(manifest.explorations.map(e => e.angle))}
|
||||||
|
|
||||||
|
**Output Format**: Two-layer structure:
|
||||||
|
- plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array)
|
||||||
|
- .task/TASK-*.json: Independent task files following task-schema.json
|
||||||
|
|
||||||
|
plan.json required fields: summary, approach, task_ids, task_count, _metadata (with plan_type: "feature")
|
||||||
|
Each task file required fields: id, title, description, depends_on, convergence (with criteria[])
|
||||||
|
Task fields use: files[].change (not modification_points), convergence.criteria (not acceptance), test (not verification)
|
||||||
|
|
||||||
## Task Grouping Rules
|
## Task Grouping Rules
|
||||||
1. **Group by feature**: All changes for one feature = one task (even if 3-5 files)
|
1. **Group by feature**: All changes for one feature = one task (even if 3-5 files)
|
||||||
2. **Group by context**: Tasks with similar context or related functional changes can be grouped together
|
2. **Group by context**: Tasks with similar context or related functional changes can be grouped together
|
||||||
@@ -517,10 +565,12 @@ Generate plan.json following the schema obtained above. Key constraints:
|
|||||||
1. Read schema file (cat command above)
|
1. Read schema file (cat command above)
|
||||||
2. Execute CLI planning using Gemini (Qwen fallback)
|
2. Execute CLI planning using Gemini (Qwen fallback)
|
||||||
3. Read ALL exploration files for comprehensive context
|
3. Read ALL exploration files for comprehensive context
|
||||||
4. Synthesize findings and generate plan following schema
|
4. Synthesize findings and generate tasks + plan overview
|
||||||
5. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding)
|
5. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding)
|
||||||
6. **Write**: \`${sessionFolder}/plan.json\`
|
6. **Create**: \`${sessionFolder}/.task/\` directory (mkdir -p)
|
||||||
7. Return brief completion summary
|
7. **Write**: \`${sessionFolder}/.task/TASK-001.json\`, \`TASK-002.json\`, etc. (one per task)
|
||||||
|
8. **Write**: \`${sessionFolder}/plan.json\` (overview with task_ids[], NO tasks[])
|
||||||
|
9. Return brief completion summary
|
||||||
`
|
`
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
@@ -535,14 +585,21 @@ Generate plan.json following the schema obtained above. Key constraints:
|
|||||||
```javascript
|
```javascript
|
||||||
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
||||||
|
|
||||||
|
// Load tasks from .task/ directory
|
||||||
|
const tasks = (plan.task_ids || []).map(id => {
|
||||||
|
const taskPath = `${sessionFolder}/.task/${id}.json`
|
||||||
|
return JSON.parse(Read(taskPath))
|
||||||
|
})
|
||||||
|
const taskList = tasks
|
||||||
|
|
||||||
console.log(`
|
console.log(`
|
||||||
## Implementation Plan
|
## Implementation Plan
|
||||||
|
|
||||||
**Summary**: ${plan.summary}
|
**Summary**: ${plan.summary}
|
||||||
**Approach**: ${plan.approach}
|
**Approach**: ${plan.approach}
|
||||||
|
|
||||||
**Tasks** (${plan.tasks.length}):
|
**Tasks** (${taskList.length}):
|
||||||
${plan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.file})`).join('\n')}
|
${taskList.map((t, i) => `${i+1}. ${t.title} (${t.scope || t.files?.[0]?.path || ''})`).join('\n')}
|
||||||
|
|
||||||
**Complexity**: ${plan.complexity}
|
**Complexity**: ${plan.complexity}
|
||||||
**Estimated Time**: ${plan.estimated_time}
|
**Estimated Time**: ${plan.estimated_time}
|
||||||
@@ -575,7 +632,7 @@ if (autoYes) {
|
|||||||
userSelection = AskUserQuestion({
|
userSelection = AskUserQuestion({
|
||||||
questions: [
|
questions: [
|
||||||
{
|
{
|
||||||
question: `Confirm plan? (${plan.tasks.length} tasks, ${plan.complexity})`,
|
question: `Confirm plan? (${taskList.length} tasks, ${plan.complexity})`,
|
||||||
header: "Confirm",
|
header: "Confirm",
|
||||||
multiSelect: false,
|
multiSelect: false,
|
||||||
options: [
|
options: [
|
||||||
@@ -632,7 +689,11 @@ manifest.explorations.forEach(exp => {
|
|||||||
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
||||||
|
|
||||||
executionContext = {
|
executionContext = {
|
||||||
planObject: plan,
|
planObject: plan, // plan overview (no tasks[])
|
||||||
|
taskFiles: (plan.task_ids || []).map(id => ({
|
||||||
|
id,
|
||||||
|
path: `${sessionFolder}/.task/${id}.json`
|
||||||
|
})),
|
||||||
explorationsContext: explorations,
|
explorationsContext: explorations,
|
||||||
explorationAngles: manifest.explorations.map(e => e.angle),
|
explorationAngles: manifest.explorations.map(e => e.angle),
|
||||||
explorationManifest: manifest,
|
explorationManifest: manifest,
|
||||||
@@ -653,7 +714,8 @@ executionContext = {
|
|||||||
path: exp.path
|
path: exp.path
|
||||||
})),
|
})),
|
||||||
explorations_manifest: `${sessionFolder}/explorations-manifest.json`,
|
explorations_manifest: `${sessionFolder}/explorations-manifest.json`,
|
||||||
plan: `${sessionFolder}/plan.json`
|
plan: `${sessionFolder}/plan.json`,
|
||||||
|
task_dir: `${sessionFolder}/.task`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -674,7 +736,12 @@ Skill(skill="workflow:lite-execute", args="--in-memory")
|
|||||||
├── exploration-{angle3}.json # Exploration angle 3 (if applicable)
|
├── exploration-{angle3}.json # Exploration angle 3 (if applicable)
|
||||||
├── exploration-{angle4}.json # Exploration angle 4 (if applicable)
|
├── exploration-{angle4}.json # Exploration angle 4 (if applicable)
|
||||||
├── explorations-manifest.json # Exploration index
|
├── explorations-manifest.json # Exploration index
|
||||||
└── plan.json # Implementation plan
|
├── planning-context.md # Evidence paths + understanding
|
||||||
|
├── plan.json # Plan overview (task_ids[])
|
||||||
|
└── .task/ # Task files directory
|
||||||
|
├── TASK-001.json
|
||||||
|
├── TASK-002.json
|
||||||
|
└── ...
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
@@ -684,7 +751,12 @@ Skill(skill="workflow:lite-execute", args="--in-memory")
|
|||||||
├── exploration-auth-patterns.json
|
├── exploration-auth-patterns.json
|
||||||
├── exploration-security.json
|
├── exploration-security.json
|
||||||
├── explorations-manifest.json
|
├── explorations-manifest.json
|
||||||
└── plan.json
|
├── planning-context.md
|
||||||
|
├── plan.json
|
||||||
|
└── .task/
|
||||||
|
├── TASK-001.json
|
||||||
|
├── TASK-002.json
|
||||||
|
└── TASK-003.json
|
||||||
```
|
```
|
||||||
|
|
||||||
## Error Handling
|
## Error Handling
|
||||||
|
|||||||
@@ -164,9 +164,13 @@ Execute **${angle}** exploration for task planning context. Analyze codebase fro
|
|||||||
**Required Fields** (all ${angle} focused):
|
**Required Fields** (all ${angle} focused):
|
||||||
- project_structure: Modules/architecture relevant to ${angle}
|
- project_structure: Modules/architecture relevant to ${angle}
|
||||||
- relevant_files: Files affected from ${angle} perspective
|
- relevant_files: Files affected from ${angle} perspective
|
||||||
**IMPORTANT**: Use object format with relevance scores for synthesis:
|
**MANDATORY**: Every file MUST use structured object format with ALL required fields:
|
||||||
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Core ${angle} logic"}]\`
|
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains AuthService.login() - entry point for JWT token generation", role: "modify_target", discovery_source: "bash-scan", key_symbols: ["AuthService", "login"]}]\`
|
||||||
Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
- **rationale** (required): Specific selection basis tied to ${angle} topic (>10 chars, not generic)
|
||||||
|
- **role** (required): modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only
|
||||||
|
- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|manual
|
||||||
|
- **key_symbols** (recommended): Key functions/classes/types in the file relevant to the task
|
||||||
|
- Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
||||||
- patterns: ${angle}-related patterns to follow
|
- patterns: ${angle}-related patterns to follow
|
||||||
- dependencies: Dependencies relevant to ${angle}
|
- dependencies: Dependencies relevant to ${angle}
|
||||||
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
||||||
|
|||||||
@@ -92,9 +92,11 @@ RULES: {from prompt, if template specified} | analysis=READ-ONLY
|
|||||||
|
|
||||||
### Dual-Source Synthesis
|
### Dual-Source Synthesis
|
||||||
|
|
||||||
1. Bash results: Precise file:line locations
|
1. Bash results: Precise file:line locations → `discovery_source: "bash-scan"`
|
||||||
2. Gemini results: Semantic understanding, design intent
|
2. Gemini results: Semantic understanding, design intent → `discovery_source: "cli-analysis"`
|
||||||
3. Merge with source attribution (bash-discovered | gemini-discovered)
|
3. ACE search: Semantic code search → `discovery_source: "ace-search"`
|
||||||
|
4. Dependency tracing: Import/export graph → `discovery_source: "dependency-trace"`
|
||||||
|
5. Merge with source attribution and generate rationale for each file
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -118,7 +120,16 @@ Parse and memorize:
|
|||||||
4. **Enum values** - Copy exact strings (e.g., `"critical"` not `"Critical"`)
|
4. **Enum values** - Copy exact strings (e.g., `"critical"` not `"Critical"`)
|
||||||
5. **Nested structures** - Note flat vs nested requirements
|
5. **Nested structures** - Note flat vs nested requirements
|
||||||
|
|
||||||
**Step 3: Pre-Output Validation Checklist**
|
**Step 3: File Rationale Validation** (MANDATORY for relevant_files / affected_files)
|
||||||
|
|
||||||
|
Every file entry MUST have:
|
||||||
|
- `rationale` (required, minLength 10): Specific reason tied to the exploration topic, NOT generic
|
||||||
|
- GOOD: "Contains AuthService.login() which is the entry point for JWT token generation"
|
||||||
|
- BAD: "Related to auth" or "Relevant file"
|
||||||
|
- `role` (required, enum): Structural classification of why it was selected
|
||||||
|
- `discovery_source` (optional but recommended): How the file was found
|
||||||
|
|
||||||
|
**Step 4: Pre-Output Validation Checklist**
|
||||||
|
|
||||||
Before writing ANY JSON output, verify:
|
Before writing ANY JSON output, verify:
|
||||||
|
|
||||||
@@ -128,6 +139,8 @@ Before writing ANY JSON output, verify:
|
|||||||
- [ ] Enum values EXACTLY match schema (case-sensitive)
|
- [ ] Enum values EXACTLY match schema (case-sensitive)
|
||||||
- [ ] Nested structures follow schema pattern (flat vs nested)
|
- [ ] Nested structures follow schema pattern (flat vs nested)
|
||||||
- [ ] Data types correct (string, integer, array, object)
|
- [ ] Data types correct (string, integer, array, object)
|
||||||
|
- [ ] Every file in relevant_files has: path + relevance + rationale + role
|
||||||
|
- [ ] Every rationale is specific (>10 chars, not generic)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -167,13 +180,15 @@ Brief summary:
|
|||||||
**ALWAYS**:
|
**ALWAYS**:
|
||||||
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||||
2. Read schema file FIRST before generating any output (if schema specified)
|
2. Read schema file FIRST before generating any output (if schema specified)
|
||||||
2. Copy field names EXACTLY from schema (case-sensitive)
|
3. Copy field names EXACTLY from schema (case-sensitive)
|
||||||
3. Verify root structure matches schema (array vs object)
|
4. Verify root structure matches schema (array vs object)
|
||||||
4. Match nested/flat structures as schema requires
|
5. Match nested/flat structures as schema requires
|
||||||
5. Use exact enum values from schema (case-sensitive)
|
6. Use exact enum values from schema (case-sensitive)
|
||||||
6. Include ALL required fields at every level
|
7. Include ALL required fields at every level
|
||||||
7. Include file:line references in findings
|
8. Include file:line references in findings
|
||||||
8. Attribute discovery source (bash/gemini)
|
9. **Every file MUST have rationale**: Specific selection basis tied to the topic (not generic)
|
||||||
|
10. **Every file MUST have role**: Classify as modify_target/dependency/pattern_reference/test_target/type_definition/integration_point/config/context_only
|
||||||
|
11. **Track discovery source**: Record how each file was found (bash-scan/cli-analysis/ace-search/dependency-trace/manual)
|
||||||
|
|
||||||
**Bash Tool**:
|
**Bash Tool**:
|
||||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||||
|
|||||||
@@ -505,6 +505,8 @@ function parseCLIOutput(cliOutput) {
|
|||||||
### Context Enrichment
|
### Context Enrichment
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// NOTE: relevant_files items are structured objects:
|
||||||
|
// {path, relevance, rationale, role, discovery_source?, key_symbols?}
|
||||||
function buildEnrichedContext(explorationsContext, explorationAngles) {
|
function buildEnrichedContext(explorationsContext, explorationAngles) {
|
||||||
const enriched = { relevant_files: [], patterns: [], dependencies: [], integration_points: [], constraints: [] }
|
const enriched = { relevant_files: [], patterns: [], dependencies: [], integration_points: [], constraints: [] }
|
||||||
|
|
||||||
@@ -519,7 +521,16 @@ function buildEnrichedContext(explorationsContext, explorationAngles) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
enriched.relevant_files = [...new Set(enriched.relevant_files)]
|
// Deduplicate by path, keep highest relevance entry for each path
|
||||||
|
const fileMap = new Map()
|
||||||
|
enriched.relevant_files.forEach(f => {
|
||||||
|
const path = typeof f === 'string' ? f : f.path
|
||||||
|
const existing = fileMap.get(path)
|
||||||
|
if (!existing || (f.relevance || 0) > (existing.relevance || 0)) {
|
||||||
|
fileMap.set(path, typeof f === 'string' ? { path: f, relevance: 0.5, rationale: 'discovered', role: 'context_only' } : f)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
enriched.relevant_files = [...fileMap.values()]
|
||||||
return enriched
|
return enriched
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -540,7 +551,7 @@ function validateAndEnhanceTasks(rawTasks, enrichedContext) {
|
|||||||
implementation: task.implementation?.length >= 2
|
implementation: task.implementation?.length >= 2
|
||||||
? task.implementation
|
? task.implementation
|
||||||
: [`Analyze ${task.file}`, `Implement ${task.title}`, `Add error handling`],
|
: [`Analyze ${task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||||
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2).map(f => typeof f === 'string' ? f : f.path), examples: "Follow existing structure" },
|
||||||
acceptance: task.acceptance?.length >= 1
|
acceptance: task.acceptance?.length >= 1
|
||||||
? task.acceptance
|
? task.acceptance
|
||||||
: [`${task.title} completed`, `Follows conventions`],
|
: [`${task.title} completed`, `Follows conventions`],
|
||||||
@@ -554,9 +565,11 @@ function inferAction(title) {
|
|||||||
return match ? match[1] : "Implement"
|
return match ? match[1] : "Implement"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: relevant_files items are structured objects with .path property
|
||||||
function inferFile(task, ctx) {
|
function inferFile(task, ctx) {
|
||||||
const files = ctx?.relevant_files || []
|
const files = ctx?.relevant_files || []
|
||||||
return files.find(f => task.title.toLowerCase().includes(f.split('/').pop().split('.')[0].toLowerCase())) || "file-to-be-determined.ts"
|
const getPath = f => typeof f === 'string' ? f : f.path
|
||||||
|
return getPath(files.find(f => task.title.toLowerCase().includes(getPath(f).split('/').pop().split('.')[0].toLowerCase())) || {}) || "file-to-be-determined.ts"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -695,7 +708,7 @@ function validateAndEnhanceTasks(rawTasks, enrichedContext, complexity) {
|
|||||||
implementation: task.implementation?.length >= 2
|
implementation: task.implementation?.length >= 2
|
||||||
? task.implementation
|
? task.implementation
|
||||||
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||||
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2).map(f => typeof f === 'string' ? f : f.path), examples: "Follow existing structure" },
|
||||||
acceptance: task.acceptance?.length >= 1
|
acceptance: task.acceptance?.length >= 1
|
||||||
? task.acceptance
|
? task.acceptance
|
||||||
: [`${task.title} completed`, `Follows conventions`],
|
: [`${task.title} completed`, `Follows conventions`],
|
||||||
@@ -747,8 +760,9 @@ try {
|
|||||||
} else throw error
|
} else throw error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: relevant_files items are structured objects with .path property
|
||||||
function generateBasicPlan(taskDesc, ctx) {
|
function generateBasicPlan(taskDesc, ctx) {
|
||||||
const files = ctx?.relevant_files || []
|
const files = (ctx?.relevant_files || []).map(f => typeof f === 'string' ? f : f.path)
|
||||||
const tasks = [taskDesc].map((t, i) => ({
|
const tasks = [taskDesc].map((t, i) => ({
|
||||||
id: `T${i + 1}`, title: t, file: files[i] || "tbd", action: "Implement", description: t,
|
id: `T${i + 1}`, title: t, file: files[i] || "tbd", action: "Implement", description: t,
|
||||||
modification_points: [{ file: files[i] || "tbd", target: "main", change: t }],
|
modification_points: [{ file: files[i] || "tbd", target: "main", change: t }],
|
||||||
|
|||||||
@@ -165,12 +165,16 @@ if (file_exists(manifestPath)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Synthesis helper functions (conceptual)
|
// Synthesis helper functions (conceptual)
|
||||||
|
// NOTE: relevant_files items are now structured objects:
|
||||||
|
// {path, relevance, rationale, role, discovery_source?, key_symbols?}
|
||||||
function synthesizeCriticalFiles(allRelevantFiles) {
|
function synthesizeCriticalFiles(allRelevantFiles) {
|
||||||
// 1. Group by path
|
// 1. Group by path (files are objects with .path property)
|
||||||
// 2. Count mentions across angles
|
// 2. Count mentions across angles
|
||||||
// 3. Average relevance scores
|
// 3. Average relevance scores
|
||||||
// 4. Rank by: (mention_count * 0.6) + (avg_relevance * 0.4)
|
// 4. Merge rationales from different angles (join with "; ")
|
||||||
// 5. Return top 10-15 with mentioned_by_angles attribution
|
// 5. Collect unique roles and key_symbols across angles
|
||||||
|
// 6. Rank by: (mention_count * 0.6) + (avg_relevance * 0.4)
|
||||||
|
// 7. Return top 10-15 with: path, relevance, rationale, role, mentioned_by_angles, key_symbols
|
||||||
}
|
}
|
||||||
|
|
||||||
function synthesizeConflictIndicators(explorationData) {
|
function synthesizeConflictIndicators(explorationData) {
|
||||||
@@ -495,7 +499,7 @@ Calculate risk level based on:
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"aggregated_insights": {
|
"aggregated_insights": {
|
||||||
"critical_files": [{"path": "src/auth/AuthService.ts", "relevance": 0.95, "mentioned_by_angles": ["architecture"]}],
|
"critical_files": [{"path": "src/auth/AuthService.ts", "relevance": 0.95, "rationale": "Contains login/register/verifyToken - core auth entry points", "role": "modify_target", "mentioned_by_angles": ["architecture"], "key_symbols": ["AuthService", "login", "verifyToken"]}],
|
||||||
"conflict_indicators": [{"type": "pattern_mismatch", "description": "...", "source_angle": "architecture", "severity": "medium"}],
|
"conflict_indicators": [{"type": "pattern_mismatch", "description": "...", "source_angle": "architecture", "severity": "medium"}],
|
||||||
"clarification_needs": [{"question": "...", "context": "...", "options": [], "source_angle": "architecture"}],
|
"clarification_needs": [{"question": "...", "context": "...", "options": [], "source_angle": "architecture"}],
|
||||||
"constraints": [{"constraint": "Must follow existing DI pattern", "source_angle": "architecture"}],
|
"constraints": [{"constraint": "Must follow existing DI pattern", "source_angle": "architecture"}],
|
||||||
|
|||||||
@@ -195,9 +195,13 @@ ${sourceRefsDirective}
|
|||||||
**Required Fields** (all ${angle} focused):
|
**Required Fields** (all ${angle} focused):
|
||||||
- project_structure: Modules/architecture relevant to ${angle}
|
- project_structure: Modules/architecture relevant to ${angle}
|
||||||
- relevant_files: Files affected from ${angle} perspective
|
- relevant_files: Files affected from ${angle} perspective
|
||||||
**IMPORTANT**: Use object format with relevance scores for synthesis:
|
**MANDATORY**: Every file MUST use structured object format with ALL required fields:
|
||||||
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Core ${angle} logic"}]\`
|
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains AuthService.login() - entry point for JWT token generation", role: "modify_target", discovery_source: "bash-scan", key_symbols: ["AuthService", "login"]}]\`
|
||||||
Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
- **rationale** (required): Specific selection basis tied to ${angle} topic (>10 chars, not generic)
|
||||||
|
- **role** (required): modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only
|
||||||
|
- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|manual
|
||||||
|
- **key_symbols** (recommended): Key functions/classes/types in the file relevant to the task
|
||||||
|
- Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
||||||
- patterns: ${angle}-related patterns to follow
|
- patterns: ${angle}-related patterns to follow
|
||||||
- dependencies: Dependencies relevant to ${angle}
|
- dependencies: Dependencies relevant to ${angle}
|
||||||
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
||||||
@@ -210,7 +214,9 @@ ${sourceRefsDirective}
|
|||||||
## Success Criteria
|
## Success Criteria
|
||||||
- [ ] Schema obtained via cat explore-json-schema.json
|
- [ ] Schema obtained via cat explore-json-schema.json
|
||||||
- [ ] get_modules_by_depth.sh executed
|
- [ ] get_modules_by_depth.sh executed
|
||||||
- [ ] At least 3 relevant files identified with ${angle} rationale
|
- [ ] At least 3 relevant files identified with specific rationale + role
|
||||||
|
- [ ] Every file has rationale >10 chars (not generic like "Related to ${angle}")
|
||||||
|
- [ ] Every file has role classification (modify_target/dependency/etc.)
|
||||||
- [ ] Patterns are actionable (code examples, not generic advice)
|
- [ ] Patterns are actionable (code examples, not generic advice)
|
||||||
- [ ] Integration points include file:line locations
|
- [ ] Integration points include file:line locations
|
||||||
- [ ] Constraints are project-specific to ${angle}
|
- [ ] Constraints are project-specific to ${angle}
|
||||||
|
|||||||
@@ -248,7 +248,7 @@ function ContextContent({
|
|||||||
<div className="space-y-0.5 pl-2 max-h-32 overflow-y-auto">
|
<div className="space-y-0.5 pl-2 max-h-32 overflow-y-auto">
|
||||||
{ctx.relevant_files.map((f, i) => {
|
{ctx.relevant_files.map((f, i) => {
|
||||||
const filePath = typeof f === 'string' ? f : f.path;
|
const filePath = typeof f === 'string' ? f : f.path;
|
||||||
const reason = typeof f === 'string' ? undefined : f.reason;
|
const reason = typeof f === 'string' ? undefined : (f.rationale || f.reason);
|
||||||
return (
|
return (
|
||||||
<div key={i} className="group flex items-start gap-1 text-muted-foreground hover:bg-muted/30 rounded px-1 py-0.5">
|
<div key={i} className="group flex items-start gap-1 text-muted-foreground hover:bg-muted/30 rounded px-1 py-0.5">
|
||||||
<span className="text-primary/50 shrink-0">{i + 1}.</span>
|
<span className="text-primary/50 shrink-0">{i + 1}.</span>
|
||||||
|
|||||||
@@ -105,10 +105,11 @@ function extractString(val: unknown): string {
|
|||||||
return String(val);
|
return String(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Extract a secondary description from an object (reason, description, etc.) */
|
/** Extract a secondary description from an object (rationale, reason, description, etc.) */
|
||||||
function extractReason(val: unknown): string | undefined {
|
function extractReason(val: unknown): string | undefined {
|
||||||
if (!val || typeof val !== 'object') return undefined;
|
if (!val || typeof val !== 'object') return undefined;
|
||||||
const obj = val as Record<string, unknown>;
|
const obj = val as Record<string, unknown>;
|
||||||
|
if (typeof obj.rationale === 'string') return obj.rationale;
|
||||||
if (typeof obj.reason === 'string') return obj.reason;
|
if (typeof obj.reason === 'string') return obj.reason;
|
||||||
if (typeof obj.description === 'string') return obj.description;
|
if (typeof obj.description === 'string') return obj.description;
|
||||||
return undefined;
|
return undefined;
|
||||||
|
|||||||
@@ -954,7 +954,7 @@ function renderMultiCliContextContent(context, session) {
|
|||||||
<li class="file-item">
|
<li class="file-item">
|
||||||
<span class="file-icon">📄</span>
|
<span class="file-icon">📄</span>
|
||||||
<code>${escapeHtml(typeof f === 'string' ? f : f.path || f.file || '')}</code>
|
<code>${escapeHtml(typeof f === 'string' ? f : f.path || f.file || '')}</code>
|
||||||
${f.reason ? `<span class="file-reason">${escapeHtml(f.reason)}</span>` : ''}
|
${(f.rationale || f.reason) ? `<span class="file-reason">${escapeHtml(f.rationale || f.reason)}</span>` : ''}
|
||||||
</li>
|
</li>
|
||||||
`).join('')}
|
`).join('')}
|
||||||
</ul>
|
</ul>
|
||||||
|
|||||||
Reference in New Issue
Block a user