feat: update empty state messages and hints in English and Chinese locales

refactor: rename variables for clarity in ReviewSessionPage and SessionsPage

fix: update version check logic in SettingsPage

chore: remove unused imports in TeamPage and session-detail components

fix: enhance error handling in MCP server

fix: apply default mode in edit-file tool handler

chore: remove tsbuildinfo file

docs: add Quick Plan & Execute phase documentation for issue discovery

chore: clean up ping output file
This commit is contained in:
catlog22
2026-02-12 23:15:48 +08:00
parent fd6262b78b
commit e44a97e812
32 changed files with 912 additions and 1046 deletions

View File

@@ -7,14 +7,16 @@
## Execution Flow
```
conclusions.json → tasks.jsonl → User Confirmation → Direct Inline Execution → execution.md + execution-events.md
conclusions.json → .task/*.json → User Confirmation → Direct Inline Execution → execution.md + execution-events.md
```
---
## Step 1: Generate tasks.jsonl
## Step 1: Generate .task/*.json
Convert `conclusions.json` recommendations directly into unified JSONL task format. Each line is a self-contained task with convergence criteria, compatible with `unified-execute-with-file`.
Convert `conclusions.json` recommendations directly into individual task JSON files. Each file is a self-contained task with convergence criteria, compatible with `unified-execute-with-file`.
**Schema**: `cat ~/.ccw/workflows/cli-templates/schemas/task-schema.json`
**Conversion Logic**:
@@ -51,9 +53,11 @@ const tasks = conclusions.recommendations.map((rec, index) => ({
}
}))
// Write one task per line
const jsonlContent = tasks.map(t => JSON.stringify(t)).join('\n')
Write(`${sessionFolder}/tasks.jsonl`, jsonlContent)
// Write each task as individual JSON file
Bash(`mkdir -p ${sessionFolder}/.task`)
tasks.forEach(task => {
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(task, null, 2))
})
```
**Task Type Inference**:
@@ -106,13 +110,39 @@ tasks.forEach(task => {
})
```
**Output**: `${sessionFolder}/tasks.jsonl`
**Output**: `${sessionFolder}/.task/TASK-*.json`
**JSONL Schema** (one task per line):
**Task JSON Schema** (one file per task, e.g. `.task/TASK-001.json`):
```jsonl
{"id":"TASK-001","title":"Fix authentication token refresh","description":"Token refresh fails silently when...","type":"fix","priority":"high","effort":"large","files":[{"path":"src/auth/token.ts","action":"modify"},{"path":"src/middleware/auth.ts","action":"modify"}],"depends_on":[],"convergence":{"criteria":["Token refresh returns new valid token","Expired token triggers refresh automatically","Failed refresh redirects to login"],"verification":"jest --testPathPattern=token.test.ts","definition_of_done":"Users remain logged in across token expiration without manual re-login"},"evidence":[...],"source":{"tool":"analyze-with-file","session_id":"ANL-xxx","original_id":"TASK-001"}}
{"id":"TASK-002","title":"Add input validation to user endpoints","description":"Missing validation allows...","type":"enhancement","priority":"medium","effort":"medium","files":[{"path":"src/routes/user.ts","action":"modify"},{"path":"src/validators/user.ts","action":"create"}],"depends_on":["TASK-001"],"convergence":{"criteria":["All user inputs validated against schema","Invalid inputs return 400 with specific error message","SQL injection patterns rejected"],"verification":"jest --testPathPattern=user.validation.test.ts","definition_of_done":"All user-facing inputs are validated with clear error feedback"},"evidence":[...],"source":{"tool":"analyze-with-file","session_id":"ANL-xxx","original_id":"TASK-002"}}
```json
{
"id": "TASK-001",
"title": "Fix authentication token refresh",
"description": "Token refresh fails silently when...",
"type": "fix",
"priority": "high",
"effort": "large",
"files": [
{ "path": "src/auth/token.ts", "action": "modify" },
{ "path": "src/middleware/auth.ts", "action": "modify" }
],
"depends_on": [],
"convergence": {
"criteria": [
"Token refresh returns new valid token",
"Expired token triggers refresh automatically",
"Failed refresh redirects to login"
],
"verification": "jest --testPathPattern=token.test.ts",
"definition_of_done": "Users remain logged in across token expiration without manual re-login"
},
"evidence": [],
"source": {
"tool": "analyze-with-file",
"session_id": "ANL-xxx",
"original_id": "TASK-001"
}
}
```
---
@@ -124,8 +154,8 @@ Validate feasibility before starting execution. Reference: unified-execute-with-
##### Step 2.1: Build Execution Order
```javascript
const tasks = Read(`${sessionFolder}/tasks.jsonl`)
.split('\n').filter(l => l.trim()).map(l => JSON.parse(l))
const taskFiles = Glob(`${sessionFolder}/.task/*.json`)
const tasks = taskFiles.map(f => JSON.parse(Read(f)))
// 1. Dependency validation
const taskIds = new Set(tasks.map(t => t.id))
@@ -220,7 +250,7 @@ const executionMd = `# Execution Overview
## Session Info
- **Session ID**: ${sessionId}
- **Plan Source**: tasks.jsonl (from analysis conclusions)
- **Plan Source**: .task/*.json (from analysis conclusions)
- **Started**: ${getUtc8ISOString()}
- **Total Tasks**: ${tasks.length}
- **Execution Mode**: Direct inline (serial)
@@ -270,7 +300,7 @@ const eventsHeader = `# Execution Events
**Session**: ${sessionId}
**Started**: ${getUtc8ISOString()}
**Source**: tasks.jsonl
**Source**: .task/*.json
---
@@ -302,11 +332,11 @@ if (!autoYes) {
options: [
{ label: "Start Execution", description: "Execute all tasks serially" },
{ label: "Adjust Tasks", description: "Modify, reorder, or remove tasks" },
{ label: "Cancel", description: "Cancel execution, keep tasks.jsonl" }
{ label: "Cancel", description: "Cancel execution, keep .task/" }
]
}]
})
// "Adjust Tasks": display task list, user deselects/reorders, regenerate tasks.jsonl
// "Adjust Tasks": display task list, user deselects/reorders, regenerate .task/*.json
// "Cancel": end workflow, keep artifacts
}
```
@@ -321,7 +351,7 @@ Execute tasks one by one directly using tools (Read, Edit, Write, Grep, Glob, Ba
```
For each taskId in executionOrder:
├─ Load task from tasks.jsonl
├─ Load task from .task/{taskId}.json
├─ Check dependencies satisfied (all deps completed)
├─ Record START event to execution-events.md
├─ Execute task directly:
@@ -506,7 +536,7 @@ ${[...failedTasks].map(id => {
}).join('\n')}
` : ''}
### Artifacts
- **Execution Plan**: ${sessionFolder}/tasks.jsonl
- **Execution Plan**: ${sessionFolder}/.task/
- **Execution Overview**: ${sessionFolder}/execution.md
- **Execution Events**: ${sessionFolder}/execution-events.md
`
@@ -530,16 +560,16 @@ appendToEvents(`
`)
```
##### Step 6.3: Update tasks.jsonl
##### Step 6.3: Update .task/*.json
Rewrite JSONL with `_execution` state per task:
Write back `_execution` state to each task file:
```javascript
const updatedJsonl = tasks.map(task => JSON.stringify({
...task,
_execution: {
status: task._status, // "completed" | "failed" | "skipped" | "pending"
executed_at: task._executed_at, // ISO timestamp
tasks.forEach(task => {
const updatedTask = {
...task,
status: task._status, // "completed" | "failed" | "skipped" | "pending"
executed_at: task._executed_at, // ISO timestamp
result: {
success: task._status === 'completed',
files_modified: task._result?.files_modified || [],
@@ -548,8 +578,8 @@ const updatedJsonl = tasks.map(task => JSON.stringify({
convergence_verified: task._result?.convergence_verified || []
}
}
})).join('\n')
Write(`${sessionFolder}/tasks.jsonl`, updatedJsonl)
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(updatedTask, null, 2))
})
```
---
@@ -589,7 +619,7 @@ if (!autoYes) {
- Filter tasks with `_execution.status === 'failed'`
- Re-execute in original dependency order
- Append retry events to execution-events.md with `[RETRY]` prefix
- Update execution.md and tasks.jsonl
- Update execution.md and .task/*.json
---
@@ -600,14 +630,17 @@ When Quick Execute is activated, session folder expands with:
```
{projectRoot}/.workflow/.analysis/ANL-{slug}-{date}/
├── ... # Phase 1-4 artifacts
├── tasks.jsonl # ⭐ Unified JSONL (one task per line, with convergence + source)
├── .task/ # Individual task JSON files (one per task, with convergence + source)
│ ├── TASK-001.json
│ ├── TASK-002.json
│ └── ...
├── execution.md # Plan overview + task table + execution summary
└── execution-events.md # ⭐ Unified event log (all task executions with details)
```
| File | Purpose |
|------|---------|
| `tasks.jsonl` | Unified task list from conclusions, each line has convergence criteria and source provenance |
| `.task/*.json` | Individual task files from conclusions, each with convergence criteria and source provenance |
| `execution.md` | Overview: plan source, task table, pre-execution analysis, execution timeline, final summary |
| `execution-events.md` | Chronological event stream: task start/complete/fail with details, changes, verification results |
@@ -620,7 +653,7 @@ When Quick Execute is activated, session folder expands with:
**Session**: ANL-xxx-2025-01-21
**Started**: 2025-01-21T10:00:00+08:00
**Source**: tasks.jsonl
**Source**: .task/*.json
---
@@ -681,9 +714,9 @@ When Quick Execute is activated, session folder expands with:
|-----------|--------|----------|
| Task execution fails | Record failure in execution-events.md, ask user | Retry, skip, or abort |
| Verification command fails | Mark criterion as unverified, continue | Note in events, manual check needed |
| No recommendations in conclusions | Cannot generate tasks.jsonl | Inform user, suggest lite-plan |
| No recommendations in conclusions | Cannot generate .task/*.json | Inform user, suggest lite-plan |
| File conflict during execution | Document in execution-events.md | Resolve in dependency order |
| Circular dependencies detected | Stop, report error | Fix dependencies in tasks.jsonl |
| Circular dependencies detected | Stop, report error | Fix dependencies in .task/*.json |
| All tasks fail | Record all failures, suggest analysis review | Re-run analysis or manual intervention |
| Missing target file | Attempt to create if task.type is "feature" | Log as warning for other types |
@@ -691,10 +724,10 @@ When Quick Execute is activated, session folder expands with:
## Success Criteria
- `tasks.jsonl` generated with convergence criteria and source provenance per task
- `.task/*.json` generated with convergence criteria and source provenance per task
- `execution.md` contains plan overview, task table, pre-execution analysis, final summary
- `execution-events.md` contains chronological event stream with convergence verification
- All tasks executed (or explicitly skipped) via direct inline execution
- Each task's convergence criteria checked and recorded
- `_execution` state written back to tasks.jsonl after completion
- `_execution` state written back to .task/*.json after completion
- User informed of results and next steps

View File

@@ -14,10 +14,38 @@ Interactive collaborative analysis workflow with **documented discussion process
**Key features**:
- **Documented discussion timeline**: Captures understanding evolution across all phases
- **Decision recording at every critical point**: Mandatory recording of key findings, direction changes, and trade-offs
- **Multi-perspective analysis**: Supports up to 4 analysis perspectives (serial, inline)
- **Interactive discussion**: Multi-round Q&A with user feedback and direction adjustments
- **Quick execute**: Convert conclusions directly to executable tasks
### Decision Recording Protocol
**CRITICAL**: During analysis, the following situations **MUST** trigger immediate recording to discussion.md:
| Trigger | What to Record | Target Section |
|---------|---------------|----------------|
| **Direction choice** | What was chosen, why, what alternatives were discarded | `#### Decision Log` |
| **Key finding** | Finding content, impact scope, confidence level | `#### Key Findings` |
| **Assumption change** | Old assumption → new understanding, reason, impact | `#### Corrected Assumptions` |
| **User feedback** | User's original input, rationale for adoption/adjustment | `#### User Input` |
| **Disagreement & trade-off** | Conflicting viewpoints, trade-off basis, final choice | `#### Decision Log` |
| **Scope adjustment** | Before/after scope, trigger reason | `#### Decision Log` |
**Decision Record Format**:
```markdown
> **Decision**: [Description of the decision]
> - **Context**: [What triggered this decision]
> - **Options considered**: [Alternatives evaluated]
> - **Chosen**: [Selected approach] — **Reason**: [Rationale]
> - **Impact**: [Effect on analysis direction/conclusions]
```
**Recording Principles**:
- **Immediacy**: Record decisions as they happen, not at the end of a phase
- **Completeness**: Capture context, options, chosen approach, and reason
- **Traceability**: Later phases must be able to trace back why a decision was made
## Auto Mode
When `--yes` or `-y`: Auto-confirm exploration decisions, use recommended analysis angles, skip interactive scoping.
@@ -82,7 +110,7 @@ Step 4: Synthesis & Conclusion
└─ Offer options: quick execute / create issue / generate task / export / done
Step 5: Quick Execute (Optional - user selects)
├─ Convert conclusions.recommendations → tasks.jsonl (unified JSONL with convergence)
├─ Convert conclusions.recommendations → .task/TASK-*.json (individual task files with convergence)
├─ Pre-execution analysis (dependencies, file conflicts, execution order)
├─ User confirmation
├─ Direct inline execution (Read/Edit/Write/Grep/Glob/Bash)
@@ -215,11 +243,21 @@ const discussionMd = `# Analysis Discussion
## Initial Questions
${generateInitialQuestions(topic, dimensions).map(q => `- ${q}`).join('\n')}
## Initial Decisions
> Record why these dimensions and focus areas were selected.
---
## Discussion Timeline
> Rounds will be appended below as analysis progresses.
> Each round MUST include a Decision Log section for any decisions made.
---
## Decision Trail
> Consolidated critical decisions across all rounds (populated in Phase 4).
---
@@ -234,6 +272,7 @@ Write(`${sessionFolder}/discussion.md`, discussionMd)
- Session folder created with discussion.md initialized
- Analysis dimensions identified
- User preferences captured (focus, perspectives, depth)
- **Initial decisions recorded**: Dimension selection rationale, excluded dimensions with reasons, user preference intent
### Phase 2: Exploration
@@ -390,6 +429,8 @@ Append Round 1 with exploration results:
- explorations.json (single) or perspectives.json (multi) created with findings
- discussion.md updated with Round 1 results
- Ready for interactive discussion
- **Key findings recorded** with evidence references and confidence levels
- **Exploration decisions recorded** (why certain perspectives/search strategies were chosen)
### Phase 3: Interactive Discussion
@@ -424,6 +465,11 @@ if (!autoYes) {
##### Step 3.2: Process User Response
**Recording Checkpoint**: Regardless of which option the user selects, the following MUST be recorded to discussion.md:
- User's original choice and expression
- Impact of this choice on analysis direction
- If direction changed, record a full Decision Record
**Deepen** — continue analysis in current direction:
```javascript
// Deeper inline analysis using search tools
@@ -432,6 +478,7 @@ if (!autoYes) {
// Suggest improvement approaches
// Provide risk/impact assessments
// Update explorations.json with deepening findings
// Record: Which assumptions were confirmed, specific angles for deeper exploration
```
**Adjust Direction** — new focus area:
@@ -454,6 +501,7 @@ const adjustedFocus = AskUserQuestion({
// Compare new insights with prior analysis
// Identify what was missed and why
// Update explorations.json with adjusted findings
// Record Decision: Trigger reason for direction adjustment, old vs new direction, expected impact
```
**Specific Questions** — answer directly:
@@ -463,9 +511,13 @@ const adjustedFocus = AskUserQuestion({
// Provide evidence and file references
// Rate confidence for each answer (high/medium/low)
// Document Q&A in discussion.md
// Record: Knowledge gaps revealed by the question, new understanding from the answer
```
**Analysis Complete** — exit loop, proceed to Phase 4.
```javascript
// Record: Why concluding at this round (sufficient information / scope fully focused / user satisfied)
```
##### Step 3.3: Document Each Round
@@ -474,6 +526,7 @@ Update discussion.md with results from each discussion round:
| Section | Content |
|---------|---------|
| User Direction | Action taken (deepen/adjust/questions) and focus area |
| Decision Log | Decisions made this round using Decision Record format |
| Analysis Results | Key findings, insights, evidence with file references |
| Insights | New learnings or clarifications from this round |
| Corrected Assumptions | Important wrong→right transformations with explanation |
@@ -491,6 +544,8 @@ Update discussion.md with results from each discussion round:
- discussion.md updated with all discussion rounds
- Assumptions documented and corrected
- Exit condition reached (user selects complete or max rounds)
- **All decision points recorded** with Decision Record format
- **Direction changes documented** with before/after comparison and rationale
### Phase 4: Synthesis & Conclusion
@@ -514,6 +569,9 @@ const conclusions = {
open_questions: [...], // Unresolved questions
follow_up_suggestions: [ // Next steps
{ type: 'issue|task|research', summary: '...' }
],
decision_trail: [ // Consolidated decisions from all phases
{ round: 1, decision: '...', context: '...', options_considered: [...], chosen: '...', reason: '...', impact: '...' }
]
}
Write(`${sessionFolder}/conclusions.json`, JSON.stringify(conclusions, null, 2))
@@ -537,7 +595,15 @@ Append conclusions section and finalize:
| What Was Clarified | Important corrections (~~wrong→right~~) |
| Key Insights | Valuable learnings for future reference |
**Session Statistics**: Total discussion rounds, key findings count, dimensions covered, artifacts generated.
**Decision Trail Section**:
| Subsection | Content |
|------------|---------|
| Critical Decisions | Pivotal decisions that shaped the analysis outcome |
| Direction Changes | Timeline of scope/focus adjustments with rationale |
| Trade-offs Made | Key trade-offs and why certain paths were chosen |
**Session Statistics**: Total discussion rounds, key findings count, dimensions covered, artifacts generated, **decision count**.
##### Step 4.3: Post-Completion Options
@@ -570,24 +636,27 @@ if (!autoYes) {
**Success Criteria**:
- conclusions.json created with complete synthesis
- discussion.md finalized with conclusions
- discussion.md finalized with conclusions and decision trail
- User offered meaningful next step options
- **Complete decision trail** documented and traceable from initial scoping to final conclusions
### Phase 5: Quick Execute (Optional)
**Objective**: Convert analysis conclusions into JSONL execution list with convergence criteria, then execute tasks directly inline.
**Objective**: Convert analysis conclusions into individual task JSON files with convergence criteria, then execute tasks directly inline.
**Trigger**: User selects "Quick Execute" in Phase 4.
**Key Principle**: No additional exploration — analysis phase has already collected all necessary context. No CLI delegation — execute directly using tools.
**Flow**: `conclusions.json → tasks.jsonl → User Confirmation → Direct Inline Execution → execution.md + execution-events.md`
**Flow**: `conclusions.json → .task/*.json → User Confirmation → Direct Inline Execution → execution.md + execution-events.md`
**Full specification**: See `EXECUTE.md` for detailed step-by-step implementation.
##### Step 5.1: Generate tasks.jsonl
**Schema**: `cat ~/.ccw/workflows/cli-templates/schemas/task-schema.json`
Convert `conclusions.recommendations` into unified JSONL task format. Each line is a self-contained task with convergence criteria:
##### Step 5.1: Generate .task/*.json
Convert `conclusions.recommendations` into individual task JSON files. Each file is a self-contained task with convergence criteria:
```javascript
const conclusions = JSON.parse(Read(`${sessionFolder}/conclusions.json`))
@@ -623,8 +692,11 @@ const tasks = conclusions.recommendations.map((rec, index) => ({
}))
// Validate convergence quality (same as req-plan-with-file)
// Write one task per line
Write(`${sessionFolder}/tasks.jsonl`, tasks.map(t => JSON.stringify(t)).join('\n'))
// Write each task as individual JSON file
Bash(`mkdir -p ${sessionFolder}/.task`)
tasks.forEach(task => {
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(task, null, 2))
})
```
##### Step 5.2: Pre-Execution Analysis
@@ -647,7 +719,7 @@ if (!autoYes) {
options: [
{ label: "Start Execution", description: "Execute all tasks serially" },
{ label: "Adjust Tasks", description: "Modify, reorder, or remove tasks" },
{ label: "Cancel", description: "Cancel execution, keep tasks.jsonl" }
{ label: "Cancel", description: "Cancel execution, keep .task/" }
]
}]
})
@@ -670,7 +742,7 @@ For each task in execution order:
- Update `execution.md` with final summary (statistics, task results table)
- Finalize `execution-events.md` with session footer
- Update `tasks.jsonl` with `_execution` state per task
- Update `.task/*.json` with `_execution` state per task
```javascript
if (!autoYes) {
@@ -691,7 +763,7 @@ if (!autoYes) {
```
**Success Criteria**:
- `tasks.jsonl` generated with convergence criteria and source provenance per task
- `.task/*.json` generated with convergence criteria and source provenance per task
- `execution.md` contains plan overview, task table, pre-execution analysis, final summary
- `execution-events.md` contains chronological event stream with convergence verification
- All tasks executed (or explicitly skipped) via direct inline execution
@@ -710,7 +782,10 @@ if (!autoYes) {
├── explorations.json # Phase 2: Single perspective aggregated findings
├── perspectives.json # Phase 2: Multi-perspective findings with synthesis
├── conclusions.json # Phase 4: Final synthesis with recommendations
├── tasks.jsonl # Phase 5: Unified JSONL with convergence + source (if quick execute)
├── .task/ # Phase 5: Individual task JSON files (if quick execute)
│ ├── TASK-001.json # One file per task with convergence + source
│ ├── TASK-002.json
│ └── ...
├── execution.md # Phase 5: Execution overview + task table + summary (if quick execute)
└── execution-events.md # Phase 5: Chronological event log (if quick execute)
```
@@ -723,7 +798,7 @@ if (!autoYes) {
| `explorations.json` | 2 | Single perspective aggregated findings |
| `perspectives.json` | 2 | Multi-perspective findings with cross-perspective synthesis |
| `conclusions.json` | 4 | Final synthesis: conclusions, recommendations, open questions |
| `tasks.jsonl` | 5 | Unified JSONL from recommendations, each line with convergence criteria and source provenance |
| `.task/*.json` | 5 | Individual task files from recommendations, each with convergence criteria and source provenance |
| `execution.md` | 5 | Execution overview: plan source, task table, pre-execution analysis, final summary |
| `execution-events.md` | 5 | Chronological event stream with task details and convergence verification |
@@ -822,12 +897,14 @@ The discussion.md file evolves through the analysis:
- **Header**: Session ID, topic, start time, identified dimensions
- **Analysis Context**: Focus areas, perspectives, depth level
- **Initial Questions**: Key questions to guide the analysis
- **Initial Decisions**: Why these dimensions and focus areas were selected
- **Discussion Timeline**: Round-by-round findings
- Round 1: Initial Understanding + Exploration Results
- Round 2-N: User feedback + direction adjustments + new insights
- Round 1: Initial Understanding + Exploration Results + **Initial Decision Log**
- Round 2-N: User feedback + direction adjustments + new insights + **Decision Log per round**
- **Decision Trail**: Consolidated critical decisions across all rounds
- **Synthesis & Conclusions**: Summary, key conclusions, recommendations
- **Current Understanding (Final)**: Consolidated insights
- **Session Statistics**: Rounds completed, findings count, artifacts generated
- **Session Statistics**: Rounds completed, findings count, artifacts generated, decision count
### Round Documentation Pattern
@@ -839,6 +916,13 @@ Each discussion round follows a consistent structure:
#### User Input
What the user indicated they wanted to focus on
#### Decision Log
> **Decision**: [Description of direction/scope/approach decision made this round]
> - **Context**: [What triggered this decision]
> - **Options considered**: [Alternatives evaluated]
> - **Chosen**: [Selected approach] — **Reason**: [Rationale]
> - **Impact**: [Effect on analysis direction/conclusions]
#### Analysis Results
New findings from this round's analysis
- Finding 1 (evidence: file:line)
@@ -867,7 +951,7 @@ Remaining questions or areas for investigation
| Session folder conflict | Append timestamp suffix | Create unique folder and continue |
| Quick execute: task fails | Record failure in execution-events.md | User can retry, skip, or abort |
| Quick execute: verification fails | Mark criterion as unverified, continue | Note in events, manual check |
| Quick execute: no recommendations | Cannot generate tasks.jsonl | Suggest using lite-plan instead |
| Quick execute: no recommendations | Cannot generate .task/*.json | Suggest using lite-plan instead |
## Best Practices
@@ -889,6 +973,7 @@ Remaining questions or areas for investigation
3. **Use Continue Mode**: Resume sessions to build on previous findings rather than starting over
4. **Embrace Corrections**: Track wrong→right transformations as valuable learnings
5. **Iterate Thoughtfully**: Each discussion round should meaningfully refine understanding
6. **Record Decisions Immediately**: Never defer recording — capture decisions as they happen using the Decision Record format. A decision not recorded in-the-moment is a decision lost
### Documentation Practices
@@ -898,6 +983,7 @@ Remaining questions or areas for investigation
4. **Evolution Tracking**: Document how understanding changed across rounds
5. **Action Items**: Generate specific, actionable recommendations
6. **Multi-Perspective Synthesis**: When using multiple perspectives, document convergent/conflicting themes
7. **Link Decisions to Outcomes**: When writing conclusions, explicitly reference which decisions led to which outcomes — this creates an auditable trail from initial scoping to final recommendations
## When to Use
@@ -911,7 +997,7 @@ Remaining questions or areas for investigation
**Use Quick Execute (Phase 5) when:**
- Analysis conclusions contain clear, actionable recommendations
- Context is already sufficient — no additional exploration needed
- Want a streamlined analyze → JSONL plan → direct execute pipeline
- Want a streamlined analyze → .task/*.json plan → direct execute pipeline
- Tasks are relatively independent and can be executed serially
**Consider alternatives when:**

View File

@@ -1,463 +0,0 @@
---
name: brainstorm-to-cycle
description: Convert brainstorm session output to parallel-dev-cycle input with idea selection and context enrichment. Unified parameter format.
argument-hint: "--session=<id> [--idea=<index>] [--auto] [--launch]"
---
# Brainstorm to Cycle Adapter
## Overview
Bridge workflow that converts **brainstorm-with-file** output to **parallel-dev-cycle** input. Reads synthesis.json, allows user to select an idea, and formats it as an enriched TASK description.
**Core workflow**: Load Session → Select Idea → Format Task → Launch Cycle
## Inputs
| Argument | Required | Description |
|----------|----------|-------------|
| --session | Yes | Brainstorm session ID (e.g., `BS-rate-limiting-2025-01-28`) |
| --idea | No | Pre-select idea by index (0-based, from top_ideas) |
| --auto | No | Auto-select top-scored idea without confirmation |
| --launch | No | Auto-launch parallel-dev-cycle without preview |
## Output
Launches `/parallel-dev-cycle` with enriched TASK containing:
- Primary recommendation or selected idea
- Key strengths and challenges
- Suggested implementation steps
- Alternative approaches for reference
## Execution Process
```
Phase 1: Session Loading
├─ Validate session folder exists
├─ Read synthesis.json
├─ Parse top_ideas and recommendations
└─ Validate data structure
Phase 2: Idea Selection
├─ --auto mode → Select highest scored idea
├─ --idea=N → Select specified index
└─ Interactive → Present options, await selection
Phase 3: Task Formatting
├─ Build enriched task description
├─ Include context from brainstorm
└─ Generate parallel-dev-cycle command
Phase 4: Cycle Launch
├─ Confirm with user (unless --auto)
└─ Execute parallel-dev-cycle
```
## Implementation
### Phase 1: Session Loading
##### Step 0: Determine Project Root
检测项目根目录,确保 `.workflow/` 产物位置正确:
```bash
PROJECT_ROOT=$(git rev-parse --show-toplevel 2>/dev/null || pwd)
```
优先通过 git 获取仓库根目录;非 git 项目回退到 `pwd` 取当前绝对路径。
存储为 `{projectRoot}`,后续所有 `.workflow/` 路径必须以此为前缀。
```javascript
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
const projectRoot = bash('git rev-parse --show-toplevel 2>/dev/null || pwd').trim()
// Parse arguments
const args = "$ARGUMENTS"
const sessionId = "$SESSION"
const ideaIndexMatch = args.match(/--idea=(\d+)/)
const preSelectedIdea = ideaIndexMatch ? parseInt(ideaIndexMatch[1]) : null
const isAutoMode = args.includes('--auto')
// Validate session
const sessionFolder = `${projectRoot}/.workflow/.brainstorm/${sessionId}`
const synthesisPath = `${sessionFolder}/synthesis.json`
const brainstormPath = `${sessionFolder}/brainstorm.md`
function fileExists(p) {
try { return bash(`test -f "${p}" && echo "yes"`).includes('yes') } catch { return false }
}
if (!fileExists(synthesisPath)) {
console.error(`
## Error: Session Not Found
Session ID: ${sessionId}
Expected path: ${synthesisPath}
**Available sessions**:
`)
bash(`ls -1 ${projectRoot}/.workflow/.brainstorm/ 2>/dev/null | head -10`)
return { status: 'error', message: 'Session not found' }
}
// Load synthesis
const synthesis = JSON.parse(Read(synthesisPath))
// Validate structure
if (!synthesis.top_ideas || synthesis.top_ideas.length === 0) {
console.error(`
## Error: No Ideas Found
The brainstorm session has no top_ideas.
Please complete the brainstorm workflow first.
`)
return { status: 'error', message: 'No ideas in synthesis' }
}
console.log(`
## Brainstorm Session Loaded
**Session**: ${sessionId}
**Topic**: ${synthesis.topic}
**Completed**: ${synthesis.completed}
**Ideas Found**: ${synthesis.top_ideas.length}
`)
```
---
### Phase 2: Idea Selection
```javascript
let selectedIdea = null
let selectionSource = ''
// Auto mode: select highest scored
if (isAutoMode) {
selectedIdea = synthesis.top_ideas.reduce((best, idea) =>
idea.score > best.score ? idea : best
)
selectionSource = 'auto (highest score)'
console.log(`
**Auto-selected**: ${selectedIdea.title} (Score: ${selectedIdea.score}/10)
`)
}
// Pre-selected by index
else if (preSelectedIdea !== null) {
if (preSelectedIdea >= synthesis.top_ideas.length) {
console.error(`
## Error: Invalid Idea Index
Requested: --idea=${preSelectedIdea}
Available: 0 to ${synthesis.top_ideas.length - 1}
`)
return { status: 'error', message: 'Invalid idea index' }
}
selectedIdea = synthesis.top_ideas[preSelectedIdea]
selectionSource = `index ${preSelectedIdea}`
console.log(`
**Pre-selected**: ${selectedIdea.title} (Index: ${preSelectedIdea})
`)
}
// Interactive selection
else {
// Display options
console.log(`
## Select Idea for Development
| # | Title | Score | Feasibility |
|---|-------|-------|-------------|
${synthesis.top_ideas.map((idea, i) =>
`| ${i} | ${idea.title.substring(0, 40)} | ${idea.score}/10 | ${idea.feasibility || 'N/A'} |`
).join('\n')}
**Primary Recommendation**: ${synthesis.recommendations?.primary?.substring(0, 60) || 'N/A'}
`)
// Build options for AskUser
const ideaOptions = synthesis.top_ideas.slice(0, 4).map((idea, i) => ({
label: `#${i}: ${idea.title.substring(0, 30)}`,
description: `Score: ${idea.score}/10 - ${idea.description?.substring(0, 50) || ''}`
}))
// Add primary recommendation option if different
if (synthesis.recommendations?.primary) {
ideaOptions.unshift({
label: "Primary Recommendation",
description: synthesis.recommendations.primary.substring(0, 60)
})
}
const selection = ASK_USER([{
id: "idea", type: "select",
prompt: "Which idea should be developed?",
options: ideaOptions
}]) // BLOCKS (wait for user response)
// Parse selection
if (selection.idea === "Primary Recommendation") {
// Use primary recommendation as task
selectedIdea = {
title: "Primary Recommendation",
description: synthesis.recommendations.primary,
key_strengths: synthesis.key_insights || [],
main_challenges: [],
next_steps: synthesis.follow_up?.filter(f => f.type === 'implementation').map(f => f.summary) || []
}
selectionSource = 'primary recommendation'
} else {
const match = selection.idea.match(/^#(\d+):/)
const idx = match ? parseInt(match[1]) : 0
selectedIdea = synthesis.top_ideas[idx]
selectionSource = `user selected #${idx}`
}
}
console.log(`
### Selected Idea
**Title**: ${selectedIdea.title}
**Source**: ${selectionSource}
**Description**: ${selectedIdea.description?.substring(0, 200) || 'N/A'}
`)
```
---
### Phase 3: Task Formatting
```javascript
// Build enriched task description
function formatTask(idea, synthesis) {
const sections = []
// Main objective
sections.push(`# Main Objective\n\n${idea.title}`)
// Description
if (idea.description) {
sections.push(`# Description\n\n${idea.description}`)
}
// Key strengths
if (idea.key_strengths?.length > 0) {
sections.push(`# Key Strengths\n\n${idea.key_strengths.map(s => `- ${s}`).join('\n')}`)
}
// Main challenges (important for RA agent)
if (idea.main_challenges?.length > 0) {
sections.push(`# Main Challenges to Address\n\n${idea.main_challenges.map(c => `- ${c}`).join('\n')}`)
}
// Recommended steps
if (idea.next_steps?.length > 0) {
sections.push(`# Recommended Implementation Steps\n\n${idea.next_steps.map((s, i) => `${i + 1}. ${s}`).join('\n')}`)
}
// Alternative approaches (for RA consideration)
if (synthesis.recommendations?.alternatives?.length > 0) {
sections.push(`# Alternative Approaches (for reference)\n\n${synthesis.recommendations.alternatives.map(a => `- ${a}`).join('\n')}`)
}
// Key insights from brainstorm
if (synthesis.key_insights?.length > 0) {
const relevantInsights = synthesis.key_insights.slice(0, 3)
sections.push(`# Key Insights from Brainstorm\n\n${relevantInsights.map(i => `- ${i}`).join('\n')}`)
}
// Source reference
sections.push(`# Source\n\nBrainstorm Session: ${synthesis.session_id}\nTopic: ${synthesis.topic}`)
return sections.join('\n\n')
}
const enrichedTask = formatTask(selectedIdea, synthesis)
// Display formatted task
console.log(`
## Formatted Task for parallel-dev-cycle
\`\`\`markdown
${enrichedTask}
\`\`\`
`)
// Save task to session folder for reference
Write(`${sessionFolder}/cycle-task.md`, `# Generated Task\n\n**Generated**: ${getUtc8ISOString()}\n**Idea**: ${selectedIdea.title}\n**Selection**: ${selectionSource}\n\n---\n\n${enrichedTask}`)
```
---
### Phase 4: Cycle Launch
```javascript
// Confirm launch (unless auto mode)
let shouldLaunch = isAutoMode
if (!isAutoMode) {
const confirmation = ASK_USER([{
id: "launch", type: "select",
prompt: "Launch parallel-dev-cycle with this task?",
options: [
{ label: "Yes, launch cycle (Recommended)", description: "Start parallel-dev-cycle with enriched task" },
{ label: "No, just save task", description: "Save formatted task for manual use" }
]
}]) // BLOCKS (wait for user response)
shouldLaunch = confirmation.launch.includes("Yes")
}
if (shouldLaunch) {
console.log(`
## Launching parallel-dev-cycle
**Task**: ${selectedIdea.title}
**Source Session**: ${sessionId}
`)
// Escape task for command line
const escapedTask = enrichedTask
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\$/g, '\\$')
.replace(/`/g, '\\`')
// Launch parallel-dev-cycle
// Note: In actual execution, this would invoke the skill
console.log(`
### Cycle Command
\`\`\`bash
/parallel-dev-cycle TASK="${escapedTask.substring(0, 100)}..."
\`\`\`
**Full task saved to**: ${sessionFolder}/cycle-task.md
`)
// Return success with cycle trigger
return {
status: 'success',
action: 'launch_cycle',
session_id: sessionId,
idea: selectedIdea.title,
task_file: `${sessionFolder}/cycle-task.md`,
cycle_command: `/parallel-dev-cycle TASK="${enrichedTask}"`
}
} else {
console.log(`
## Task Saved (Not Launched)
**Task file**: ${sessionFolder}/cycle-task.md
To launch manually:
\`\`\`bash
/parallel-dev-cycle TASK="$(cat ${sessionFolder}/cycle-task.md)"
\`\`\`
`)
return {
status: 'success',
action: 'saved_only',
session_id: sessionId,
task_file: `${sessionFolder}/cycle-task.md`
}
}
```
---
## Session Files
After execution:
```
{projectRoot}/.workflow/.brainstorm/{session-id}/
├── brainstorm.md # Original brainstorm
├── synthesis.json # Synthesis data (input)
├── perspectives.json # Perspectives data
├── ideas/ # Idea deep-dives
└── cycle-task.md # ⭐ Generated task (output)
```
## Task Format
The generated task includes:
| Section | Purpose | Used By |
|---------|---------|---------|
| Main Objective | Clear goal statement | RA: Primary requirement |
| Description | Detailed explanation | RA: Requirement context |
| Key Strengths | Why this approach | RA: Design decisions |
| Main Challenges | Known issues to address | RA: Edge cases, risks |
| Implementation Steps | Suggested approach | EP: Planning guidance |
| Alternatives | Other valid approaches | RA: Fallback options |
| Key Insights | Learnings from brainstorm | RA: Domain context |
## Error Handling
| Situation | Action |
|-----------|--------|
| Session not found | List available sessions, abort |
| synthesis.json missing | Suggest completing brainstorm first |
| No top_ideas | Report error, abort |
| Invalid --idea index | Show valid range, abort |
| Task too long | Truncate with reference to file |
## Examples
### Auto Mode (Quick Launch)
```bash
/brainstorm-to-cycle SESSION="BS-rate-limiting-2025-01-28" --auto
# → Selects highest-scored idea
# → Launches parallel-dev-cycle immediately
```
### Pre-Selected Idea
```bash
/brainstorm-to-cycle SESSION="BS-auth-system-2025-01-28" --idea=2
# → Selects top_ideas[2]
# → Confirms before launch
```
### Interactive Selection
```bash
/brainstorm-to-cycle SESSION="BS-caching-2025-01-28"
# → Displays all ideas with scores
# → User selects from options
# → Confirms and launches
```
## Integration Flow
```
brainstorm-with-file
synthesis.json
brainstorm-to-cycle ◄─── This command
enriched TASK
parallel-dev-cycle
RA → EP → CD → VAS
```
---
**Now execute brainstorm-to-cycle** with session: $SESSION

View File

@@ -21,7 +21,7 @@ Stateless iterative development loop using Codex single-agent deep interaction p
| loop-v2-routes.ts (Control Plane) |
| |
| State: {projectRoot}/.workflow/.loop/{loopId}.json (MASTER) |
| Tasks: {projectRoot}/.workflow/.loop/{loopId}.tasks.jsonl |
| Tasks: {projectRoot}/.workflow/.loop/{loopId}/.task/*.json |
| |
| /start -> Trigger ccw-loop skill with --loop-id |
| /pause -> Set status='paused' (skill checks before action) |
@@ -60,13 +60,13 @@ Stateless iterative development loop using Codex single-agent deep interaction p
## Prep Package Integration
When `prep-package.json` exists at `{projectRoot}/.workflow/.loop/prep-package.json`, Phase 1 consumes it to:
- Load pre-built task list from `prep-tasks.jsonl` instead of generating tasks from scratch
- Load pre-built task list from `.task/*.json` files instead of generating tasks from scratch
- Apply auto-loop config (max_iterations, timeout)
- Preserve source provenance and convergence criteria from upstream planning/analysis skills
Prep packages are generated by the interactive prompt `/prompts:prep-loop`, which accepts JSONL from:
- `collaborative-plan-with-file` (tasks.jsonl)
- `analyze-with-file` (tasks.jsonl)
- `collaborative-plan-with-file` (.task/*.json)
- `analyze-with-file` (.task/*.json)
- `brainstorm-to-cycle` (cycle-task.md → converted to task format)
See [phases/00-prep-checklist.md](phases/00-prep-checklist.md) for schema and validation rules.
@@ -151,7 +151,8 @@ close_agent → return finalState
```
{projectRoot}/.workflow/.loop/
├── {loopId}.json # Master state file (API + Skill shared)
├── {loopId}.tasks.jsonl # Task list (API managed)
├── {loopId}/.task/ # Task files directory (API managed)
│ └── TASK-{id}.json # Individual task files (task-schema.json)
└── {loopId}.progress/ # Skill progress files
├── develop.md # Development progress timeline
├── debug.md # Understanding evolution document

View File

@@ -20,7 +20,7 @@ Schema reference for `prep-package.json` consumed by ccw-loop Phase 1. Generated
"source": {
"tool": "collaborative-plan-with-file | analyze-with-file | brainstorm-to-cycle | manual",
"session_id": "string",
"jsonl_path": "absolute path to original JSONL",
"task_dir": "absolute path to source .task/ directory",
"task_count": "number",
"tasks_with_convergence": "number"
},
@@ -40,9 +40,9 @@ Schema reference for `prep-package.json` consumed by ccw-loop Phase 1. Generated
}
```
## prep-tasks.jsonl Schema
## .task/*.json Schema (task-schema.json)
One task per line, each in ccw-loop `develop.tasks[]` format with extended fields:
One task per file in `.task/` directory, each following `task-schema.json` with ccw-loop extended fields:
```json
{
@@ -71,8 +71,8 @@ One task per line, each in ccw-loop `develop.tasks[]` format with extended field
| 2 | target_skill | `=== "ccw-loop"` | Skip prep, use default INIT |
| 3 | project_root | Matches current `projectRoot` | Skip prep, warn mismatch |
| 4 | freshness | `generated_at` within 24h | Skip prep, warn stale |
| 5 | tasks file | `prep-tasks.jsonl` exists and readable | Skip prep, use default INIT |
| 6 | tasks content | At least 1 valid task line in JSONL | Skip prep, use default INIT |
| 5 | tasks dir | `.task/` directory exists with *.json files | Skip prep, use default INIT |
| 6 | tasks content | At least 1 valid task JSON in `.task/` | Skip prep, use default INIT |
## Integration Points
@@ -89,9 +89,9 @@ if (fs.existsSync(prepPath)) {
if (checks.valid) {
prepPackage = raw
// Load pre-built tasks from prep-tasks.jsonl
const tasksPath = `${projectRoot}/.workflow/.loop/prep-tasks.jsonl`
const prepTasks = loadPrepTasks(tasksPath)
// Load pre-built tasks from .task/*.json
const taskDir = `${projectRoot}/.workflow/.loop/.task`
const prepTasks = loadPrepTasks(taskDir)
// → Inject into state.skill_state.develop.tasks
// → Set max_iterations from auto_loop config
} else {

View File

@@ -46,14 +46,14 @@ if (fs.existsSync(prepPath)) {
prepPackage = raw
// Load pre-built tasks
const tasksPath = `${projectRoot}/.workflow/.loop/prep-tasks.jsonl`
prepTasks = loadPrepTasks(tasksPath)
const taskDir = `${projectRoot}/.workflow/.loop/.task`
prepTasks = loadPrepTasks(taskDir)
if (prepTasks && prepTasks.length > 0) {
console.log(`✓ Prep package loaded: ${prepTasks.length} tasks from ${prepPackage.source.tool}`)
console.log(` Checks passed: ${checks.passed.join(', ')}`)
} else {
console.warn(` Prep tasks file empty or invalid, falling back to default INIT`)
console.warn(`Warning: Prep tasks directory empty or invalid, falling back to default INIT`)
prepPackage = null
prepTasks = null
}
@@ -103,12 +103,13 @@ function validateLoopPrepPackage(prep, projectRoot) {
failures.push(`prep-package is ${Math.round(hoursSince)}h old (max 24h), may be stale`)
}
// Check 5: prep-tasks.jsonl must exist
const tasksPath = `${projectRoot}/.workflow/.loop/prep-tasks.jsonl`
if (fs.existsSync(tasksPath)) {
passed.push('prep-tasks.jsonl exists')
// Check 5: .task/ directory must exist with task files
const taskDir = `${projectRoot}/.workflow/.loop/.task`
const taskFiles = Glob(`${taskDir}/*.json`)
if (fs.existsSync(taskDir) && taskFiles.length > 0) {
passed.push(`.task/ exists (${taskFiles.length} files)`)
} else {
failures.push('prep-tasks.jsonl not found')
failures.push('.task/ directory not found or empty')
}
// Check 6: task count > 0
@@ -126,24 +127,24 @@ function validateLoopPrepPackage(prep, projectRoot) {
}
/**
* Load pre-built tasks from prep-tasks.jsonl.
* Load pre-built tasks from .task/*.json directory.
* Returns array of task objects or null on failure.
*/
function loadPrepTasks(tasksPath) {
if (!fs.existsSync(tasksPath)) return null
function loadPrepTasks(taskDir) {
if (!fs.existsSync(taskDir)) return null
const content = Read(tasksPath)
const lines = content.trim().split('\n').filter(l => l.trim())
const taskFiles = Glob(`${taskDir}/*.json`).sort()
const tasks = []
for (const line of lines) {
for (const filePath of taskFiles) {
try {
const task = JSON.parse(line)
const content = Read(filePath)
const task = JSON.parse(content)
if (task.id && task.description) {
tasks.push(task)
}
} catch (e) {
console.warn(` Skipping invalid task line: ${e.message}`)
console.warn(`Warning: Skipping invalid task file ${filePath}: ${e.message}`)
}
}

View File

@@ -29,11 +29,23 @@ Unified issue discovery and creation skill covering three entry points: manual i
Issue Discoveries Discoveries │
(registered) (export) (export) │
│ │ │ │
└───────────┴───────────
↓ │
issue-resolve (plan/queue)
/issue:execute
│ ├───────────
┌───────────┐
Phase 4 │
│Quick Plan │
│ │& Execute │ │
│ └─────┬─────┘ │
│ ↓ │
│ .task/*.json │
│ ↓ │
│ Direct Execution │
│ │ │
└───────────┴──────────────────────┘
↓ (fallback/remaining)
issue-resolve (plan/queue)
/issue:execute
```
## Key Design Principles
@@ -107,6 +119,7 @@ Post-Phase:
| Phase 1 | [phases/01-issue-new.md](phases/01-issue-new.md) | Action = Create New | Create issue from GitHub URL or text description |
| Phase 2 | [phases/02-discover.md](phases/02-discover.md) | Action = Discover | Multi-perspective issue discovery (bug, security, test, etc.) |
| Phase 3 | [phases/03-discover-by-prompt.md](phases/03-discover-by-prompt.md) | Action = Discover by Prompt | Prompt-driven iterative exploration with Gemini planning |
| Phase 4 | [phases/04-quick-execute.md](phases/04-quick-execute.md) | Post-Phase = Quick Plan & Execute | Convert high-confidence findings to tasks and execute directly |
## Core Rules
@@ -321,13 +334,15 @@ ASK_USER([{
ASK_USER([{
id: "next_after_discover",
type: "select",
prompt: "Discovery complete. What next?",
prompt: `Discovery complete: ${findings.length} findings, ${executableFindings.length} executable. What next?`,
options: [
{ label: "Quick Plan & Execute (Recommended)", description: `Fix ${executableFindings.length} high-confidence findings directly` },
{ label: "Export to Issues", description: "Convert discoveries to issues" },
{ label: "Plan Solutions", description: "Plan solutions for exported issues via issue-resolve" },
{ label: "Done", description: "Exit workflow" }
]
}]); // BLOCKS (wait for user response)
// If "Quick Plan & Execute" → Read phases/04-quick-execute.md, execute
```
## Related Skills & Commands

View File

@@ -0,0 +1,241 @@
# Phase 4: Quick Plan & Execute
> 来源: 分析会话 `ANL-issue-discover规划执行能力-2026-02-11`
## Overview
直接将高置信度 discovery findings 转换为 `.task/*.json` 并内联执行。
跳过 issue 注册和完整规划流程,适用于明确可修复的问题。
**Core workflow**: Load Findings → Filter → Convert to Tasks → Pre-Execution → User Confirmation → Execute → Finalize
**Trigger**: Phase 2/3 完成后,用户选择 "Quick Plan & Execute"
**Output Directory**: 继承 discovery session 的 `{outputDir}`
**Filter**: `confidence ≥ 0.7 AND priority ∈ {critical, high}`
## Prerequisites
- Phase 2 (Discover) 或 Phase 3 (Discover by Prompt) 已完成
- `{outputDir}` 下存在 discovery 输出 (perspectives/*.json 或 discovery-issues.jsonl)
## Auto Mode
When `--yes` or `-y`: 自动过滤 → 自动生成任务 → 自动确认执行 → 失败自动跳过 → 自动 Done。
## Execution Steps
### Step 4.1: Load & Filter Findings
**加载优先级** (按顺序尝试):
```
1. perspectives/*.json — Phase 2 多视角发现 (每个文件含 findings[])
2. discovery-issues.jsonl — Phase 2/3 聚合输出 (每行一个 JSON finding)
3. iterations/*.json — Phase 3 迭代输出 (每个文件含 findings[])
→ 如果全部为空: 报错 "No discoveries found. Run discover first." 并退出
```
**过滤规则**:
```
executableFindings = allFindings.filter(f =>
(f.confidence || 0) >= 0.7 &&
['critical', 'high'].includes(f.priority)
)
```
- 如果 0 个可执行 findings → 提示 "No executable findings (all below threshold)",建议用户走 "Export to Issues" 路径
- 如果超过 10 个 findings → ASK_USER 确认是否全部执行或选择子集 (Auto mode: 全部执行)
**同文件聚合**:
```
按 finding.file 聚合:
- 同文件 1 个 finding → 生成 1 个独立 task
- 同文件 2+ findings → 合并为 1 个 task (mergeFindingsToTask)
```
### Step 4.2: Generate .task/*.json
对每个 filtered finding (或 file group),生成 task-schema.json 格式的任务文件。
#### 单 Finding 转换 (convertFindingToTask)
```
Finding 字段 → Task-Schema 字段 → 转换逻辑
─────────────────────────────────────────────────────────────
id (dsc-bug-001-...) → id (TASK-001) → 重新编号: TASK-{sequential:3}
title → title → 直接使用
description+impact+rec → description → 拼接: "{description}\n\nImpact: {impact}\nRecommendation: {recommendation}"
(无) → depends_on → 默认 []
(推导) → convergence → 按 perspective/category 模板推导 (见下表)
suggested_issue.type → type → 映射: bug→fix, feature→feature, enhancement→enhancement, refactor→refactor, test→testing
priority → priority → 直接使用 (已匹配 enum)
file + line → files[] → [{path: file, action: "modify", changes: [recommendation], target: "line:{line}"}]
snippet + file:line → evidence[] → ["{file}:{line}", snippet]
recommendation → implementation[] → [recommendation]
(固定) → source → {tool: "issue-discover", session_id: discoveryId, original_id: finding.id}
```
**Type 映射**:
```
suggested_issue.type → task type:
bug → fix, feature → feature, enhancement → enhancement,
refactor → refactor, test → testing, docs → enhancement
perspective fallback (无 suggested_issue.type 时):
bug/security → fix, test → testing, quality/maintainability/best-practices → refactor,
performance/ux → enhancement
```
**Effort 推导**:
```
critical priority → large
high priority → medium
其他 → small
```
#### 合并 Finding 转换 (mergeFindingsToTask)
同文件 2+ findings 合并为一个 task:
```
1. 按 priority 排序: critical > high > medium > low
2. 取最高优先级 finding 的 priority 作为 task priority
3. 取最高优先级 finding 的 type 作为 task type
4. title: "Fix {findings.length} issues in {basename(file)}"
5. description: 按 finding 编号逐条列出 (### Finding N: title + description + impact + recommendation + line)
6. convergence.criteria: 每个 finding 独立生成 criterion
7. verification: 选择最严格的验证命令 (jest > eslint > tsc > Manual)
8. definition_of_done: "修复 {file} 中的 {N} 个问题: {categories.join(', ')}"
9. effort: 1个=原始, 2个=medium, 3+=large
10. source.original_id: findings.map(f => f.id).join(',')
```
#### Convergence 模板 (按 perspective/category 推导)
| Perspective | criteria 模板 | verification | definition_of_done |
|-------------|--------------|-------------|-------------------|
| **bug** | "修复 {file}:{line} 的 {category} 问题", "相关模块测试通过" | `npx tsc --noEmit` | "消除 {impact} 风险" |
| **security** | "修复 {file} 的 {category} 漏洞", "安全检查通过" | `npx eslint {file} --rule 'security/*'` | "消除 {impact} 安全风险" |
| **test** | "新增测试覆盖 {file}:{line} 场景", "新增测试通过" | `npx jest --testPathPattern={testFile}` | "提升 {file} 模块的测试覆盖" |
| **quality** | "重构 {file}:{line} 降低 {category}", "lint 检查通过" | `npx eslint {file}` | "改善代码 {category}" |
| **performance** | "优化 {file}:{line} 的 {category} 问题", "无性能回退" | `npx tsc --noEmit` | "改善 {impact} 的性能表现" |
| **maintainability** | "重构 {file}:{line} 改善 {category}", "构建通过" | `npx tsc --noEmit` | "降低模块间的 {category}" |
| **ux** | "改善 {file}:{line} 的 {category}", "界面测试验证" | `Manual: 检查 UI 行为` | "改善用户感知的 {category}" |
| **best-practices** | "修正 {file}:{line} 的 {category}", "lint 通过" | `npx eslint {file}` | "符合 {category} 最佳实践" |
**低置信度处理**: confidence < 0.8 的 findingsverification 前缀 `Manual: `
**输出**: 写入 `{outputDir}/.task/TASK-{seq}.json`,验证 convergence 非空且非 vague。
### Step 4.3: Pre-Execution Analysis
> Reference: analyze-with-file/EXECUTE.md Step 2-3
复用 EXECUTE.md 的 Pre-Execution 逻辑:
1. **依赖检测**: 检查 `depends_on` 引用是否存在
2. **循环检测**: 无环 → 拓扑排序确定执行顺序
3. **文件冲突分析**: 检查多个 tasks 是否修改同一文件 (同文件已聚合,此处检测跨 task 冲突)
4. **生成 execution.md**: 任务列表、执行顺序、冲突报告
5. **生成 execution-events.md**: 空事件日志,后续记录执行过程
### Step 4.4: User Confirmation
展示任务概要:
```
Quick Execute Summary:
- Total findings: {allFindings.length}
- Executable (filtered): {executableFindings.length}
- Tasks generated: {tasks.length}
- File conflicts: {conflicts.length}
```
ASK_USER:
```javascript
ASK_USER([{
id: "confirm_execute",
type: "select",
prompt: `${tasks.length} tasks ready. Start execution?`,
options: [
{ label: "Start Execution", description: "Execute all tasks" },
{ label: "Adjust Filter", description: "Change confidence/priority threshold" },
{ label: "Cancel", description: "Skip execution, return to post-phase options" }
]
}]);
// Auto mode: Start Execution
```
- "Adjust Filter" → 重新 ASK_USER 输入 confidence 和 priority 阈值,返回 Step 4.1
- "Cancel" → 退出 Phase 4
### Step 4.5: Direct Inline Execution
> Reference: analyze-with-file/EXECUTE.md Step 5
逐任务执行 (按拓扑排序):
```
for each task in sortedTasks:
1. Read target file(s)
2. Analyze current state vs task.description
3. Apply changes (Edit/Write)
4. Verify convergence:
- Execute task.convergence.verification command
- Check criteria fulfillment
5. Record event to execution-events.md:
- TASK_START → TASK_COMPLETE / TASK_FAILED
6. Update .task/TASK-{id}.json _execution status
7. If failed:
- Auto mode: Skip & Continue
- Interactive: ASK_USER → Retry / Skip / Abort
```
**可选 auto-commit**: 每个成功 task 后 `git add {files} && git commit -m "fix: {task.title}"`
### Step 4.6: Finalize
> Reference: analyze-with-file/EXECUTE.md Step 6-7
1. **更新 execution.md**: 执行统计 (成功/失败/跳过)
2. **更新 .task/*.json**: `_execution.status` = completed/failed/skipped
3. **Post-Execute 选项**:
```javascript
// 计算未执行 findings
const remainingFindings = allFindings.filter(f => !executedFindingIds.has(f.id))
ASK_USER([{
id: "post_quick_execute",
type: "select",
prompt: `Quick Execute: ${completedCount}/${tasks.length} succeeded. ${remainingFindings.length} findings not executed.`,
options: [
{ label: "Retry Failed", description: `Re-execute ${failedCount} failed tasks` },
{ label: "Export Remaining", description: `Export ${remainingFindings.length} remaining findings to issues` },
{ label: "View Events", description: "Display execution-events.md" },
{ label: "Done", description: "End workflow" }
]
}]);
// Auto mode: Done
```
**"Export Remaining" 逻辑**: 将未执行的 findings 通过现有 Phase 2/3 的 "Export to Issues" 流程注册为 issues进入 issue-resolve 完整管道。
## Edge Cases
| 边界情况 | 处理策略 |
|---------|---------|
| 0 个可执行 findings | 提示 "No executable findings",建议 Export to Issues |
| 只有 1 个 finding | 正常生成 1 个 TASK-001.json简化确认对话 |
| 超过 10 个 findings | ASK_USER 确认全部执行或选择子集 |
| finding 缺少 recommendation | criteria 退化为 "Review and fix {category} in {file}:{line}" |
| finding 缺少 confidence | 默认 confidence=0.5,不满足过滤阈值 → 排除 |
| discovery 输出不存在 | 报错 "No discoveries found. Run discover first." |
| .task/ 目录已存在 | ASK_USER 追加 (TASK-{max+1}) 或覆盖 |
| 执行中文件被外部修改 | convergence verification 检测到差异,标记为 FAIL |
| 所有 tasks 执行失败 | 建议 "Export to Issues → issue-resolve" 完整路径 |
| finding 来自不同 perspective 但同文件 | 仍合并为一个 taskconvergence.criteria 保留各自标准 |

View File

@@ -2,7 +2,7 @@
## Overview
Serial lightweight planning with CLI-powered exploration and search verification. Produces unified JSONL (`tasks.jsonl`) compatible with `collaborative-plan-with-file` output format, consumable by `unified-execute-with-file`.
Serial lightweight planning with CLI-powered exploration and search verification. Produces `.task/TASK-*.json` (one file per task) compatible with `collaborative-plan-with-file` output format, consumable by `unified-execute-with-file`.
**Core capabilities:**
- Intelligent task analysis with automatic exploration detection
@@ -10,7 +10,7 @@ Serial lightweight planning with CLI-powered exploration and search verification
- Search verification after each CLI exploration (ACE search, Grep, Glob)
- Interactive clarification after exploration to gather missing information
- Direct planning by Claude (all complexity levels, no agent delegation)
- Unified JSONL output (`tasks.jsonl`) with convergence criteria
- Unified multi-file task output (`.task/TASK-*.json`) with convergence criteria
## Parameters
@@ -28,7 +28,7 @@ Serial lightweight planning with CLI-powered exploration and search verification
| `explorations-manifest.json` | Index of all exploration files |
| `exploration-notes.md` | Synthesized exploration notes (all angles combined) |
| `requirement-analysis.json` | Complexity assessment and session metadata |
| `tasks.jsonl` | ⭐ Unified JSONL (collaborative-plan-with-file compatible) |
| `.task/TASK-*.json` | Multi-file task output (one JSON file per task) |
| `plan.md` | Human-readable summary with execution command |
**Output Directory**: `{projectRoot}/.workflow/.lite-plan/{session-id}/`
@@ -62,10 +62,10 @@ Phase 2: Clarification (optional, multi-round)
├─ Deduplicate similar questions
└─ ASK_USER (max 4 questions per round, multiple rounds)
Phase 3: Planning → tasks.jsonl (NO CODE EXECUTION)
Phase 3: Planning → .task/*.json (NO CODE EXECUTION)
├─ Load exploration notes + clarifications + project context
├─ Direct Claude planning (following unified JSONL schema)
├─ Generate tasks.jsonl (one task per line)
├─ Direct Claude planning (following unified task JSON schema)
├─ Generate .task/TASK-*.json (one file per task)
└─ Generate plan.md (human-readable summary)
Phase 4: Confirmation
@@ -73,7 +73,7 @@ Phase 4: Confirmation
└─ ASK_USER: Allow / Modify / Cancel
Phase 5: Handoff
└─ → unified-execute-with-file with tasks.jsonl
└─ → unified-execute-with-file with .task/ directory
```
## Implementation
@@ -334,7 +334,7 @@ Aggregated from all exploration angles, deduplicated
---
### Phase 3: Planning → tasks.jsonl
### Phase 3: Planning → .task/*.json
**IMPORTANT**: Phase 3 is **planning only** — NO code execution. All implementation happens via unified-execute-with-file.
@@ -358,9 +358,9 @@ Write(`${sessionFolder}/requirement-analysis.json`, JSON.stringify({
}, null, 2))
```
#### Step 3.3: Generate tasks.jsonl
#### Step 3.3: Generate .task/*.json
Direct Claude planning — synthesize exploration findings and clarifications into unified JSONL tasks:
Direct Claude planning — synthesize exploration findings and clarifications into individual task JSON files:
**Task Grouping Rules**:
1. **Group by feature**: All changes for one feature = one task (even if 3-5 files)
@@ -370,7 +370,7 @@ Direct Claude planning — synthesize exploration findings and clarifications in
5. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output
6. **Prefer parallel**: Most tasks should be independent (no depends_on)
**Unified JSONL Task Format** (one JSON object per line):
**Unified Task JSON Format** (one JSON file per task, stored in `.task/` directory):
```javascript
{
@@ -406,10 +406,15 @@ Direct Claude planning — synthesize exploration findings and clarifications in
}
```
**Write tasks.jsonl**:
**Write .task/*.json**:
```javascript
const jsonlContent = tasks.map(t => JSON.stringify(t)).join('\n')
Write(`${sessionFolder}/tasks.jsonl`, jsonlContent)
// Create .task/ directory
Bash(`mkdir -p ${sessionFolder}/.task`)
// Write each task as an individual JSON file
tasks.forEach(task => {
Write(`${sessionFolder}/.task/${task.id}.json`, JSON.stringify(task, null, 2))
})
```
#### Step 3.4: Generate plan.md
@@ -449,7 +454,7 @@ ${t.convergence.criteria.map(c => \` - ${c}\`).join('\n')}
## 执行
\`\`\`bash
$unified-execute-with-file PLAN="${sessionFolder}/tasks.jsonl"
$unified-execute-with-file PLAN="${sessionFolder}/.task/"
\`\`\`
**Session artifacts**: \`${sessionFolder}/\`
@@ -463,7 +468,7 @@ Write(`${sessionFolder}/plan.md`, planMd)
#### Step 4.1: Display Plan
Read `{sessionFolder}/tasks.jsonl` and display summary:
Read `{sessionFolder}/.task/` directory and display summary:
- **Summary**: Overall approach (from requirement understanding)
- **Tasks**: Numbered list with ID, title, type, effort
@@ -488,7 +493,7 @@ Read `{sessionFolder}/tasks.jsonl` and display summary:
**Output**: `userSelection` — `{ confirmation: "Allow" | "Modify" | "Cancel" }`
**Modify Loop**: If "Modify" selected, display current tasks.jsonl content, accept user edits (max 3 rounds), regenerate plan.md, re-confirm.
**Modify Loop**: If "Modify" selected, display current `.task/*.json` content, accept user edits (max 3 rounds), regenerate plan.md, re-confirm.
---
@@ -509,7 +514,10 @@ Read `{sessionFolder}/tasks.jsonl` and display summary:
├── explorations-manifest.json # Exploration index
├── exploration-notes.md # Synthesized exploration notes
├── requirement-analysis.json # Complexity assessment
├── tasks.jsonl # ⭐ Unified JSONL output
├── .task/ # ⭐ Task JSON files (one per task)
│ ├── TASK-001.json
│ ├── TASK-002.json
│ └── ...
└── plan.md # Human-readable summary
```
@@ -522,7 +530,10 @@ Read `{sessionFolder}/tasks.jsonl` and display summary:
├── explorations-manifest.json
├── exploration-notes.md
├── requirement-analysis.json
├── tasks.jsonl
├── .task/
│ ├── TASK-001.json
│ ├── TASK-002.json
│ └── ...
└── plan.md
```
@@ -543,7 +554,7 @@ Read `{sessionFolder}/tasks.jsonl` and display summary:
## Post-Phase Update
After Phase 1 (Lite Plan) completes:
- **Output Created**: `tasks.jsonl` + `plan.md` + exploration artifacts in session folder
- **Output Created**: `.task/TASK-*.json` + `plan.md` + exploration artifacts in session folder
- **Session Artifacts**: All files in `{projectRoot}/.workflow/.lite-plan/{session-id}/`
- **Next Action**: Auto-continue to [Phase 2: Execution Handoff](02-lite-execute.md)
- **TodoWrite**: Mark "Lite Plan - Planning" as completed, start "Execution (unified-execute)"

View File

@@ -2,12 +2,12 @@
## Overview
消费 Phase 1 产出的统一 JSONL (`tasks.jsonl`),串行执行任务并进行收敛验证,通过 `execution.md` + `execution-events.md` 跟踪进度。
消费 Phase 1 产出的 `.task/*.json` (multi-file task definitions),串行执行任务并进行收敛验证,通过 `execution.md` + `execution-events.md` 跟踪进度。
**Core workflow**: Load JSONL → Validate → Pre-Execution Analysis → Execute → Verify Convergence → Track Progress
**Core workflow**: Load .task/*.json → Validate → Pre-Execution Analysis → Execute → Verify Convergence → Track Progress
**Key features**:
- **Single format**: 只消费统一 JSONL (`tasks.jsonl`)
- **Single format**: 只消费 `.task/*.json` (one JSON file per task)
- **Convergence-driven**: 每个任务执行后验证收敛标准
- **Serial execution**: 按拓扑序串行执行,依赖跟踪
- **Dual progress tracking**: `execution.md` (概览) + `execution-events.md` (事件流)
@@ -17,11 +17,11 @@
## Invocation
```javascript
$unified-execute-with-file PLAN="${sessionFolder}/tasks.jsonl"
$unified-execute-with-file PLAN="${sessionFolder}/.task/"
// With options
$unified-execute-with-file PLAN="${sessionFolder}/tasks.jsonl" --auto-commit
$unified-execute-with-file PLAN="${sessionFolder}/tasks.jsonl" --dry-run
$unified-execute-with-file PLAN="${sessionFolder}/.task/" --auto-commit
$unified-execute-with-file PLAN="${sessionFolder}/.task/" --dry-run
```
## Output Structure
@@ -32,7 +32,7 @@ ${projectRoot}/.workflow/.execution/EXEC-{slug}-{date}-{random}/
└── execution-events.md # Unified event log (single source of truth)
```
Additionally, the source `tasks.jsonl` is updated in-place with `_execution` states.
Additionally, the source `.task/*.json` files are updated in-place with execution states (`status`, `executed_at`, `result`).
---
@@ -51,11 +51,11 @@ let planPath = planMatch ? planMatch[1] : null
// Auto-detect if no PLAN specified
if (!planPath) {
// Search in order (most recent first):
// .workflow/.lite-plan/*/tasks.jsonl
// .workflow/.req-plan/*/tasks.jsonl
// .workflow/.planning/*/tasks.jsonl
// .workflow/.analysis/*/tasks.jsonl
// .workflow/.brainstorm/*/tasks.jsonl
// .workflow/.lite-plan/*/.task/
// .workflow/.req-plan/*/.task/
// .workflow/.planning/*/.task/
// .workflow/.analysis/*/.task/
// .workflow/.brainstorm/*/.task/
}
// Resolve path
@@ -75,20 +75,19 @@ Bash(`mkdir -p ${sessionFolder}`)
## Phase 1: Load & Validate
**Objective**: Parse unified JSONL, validate schema and dependencies, build execution order.
**Objective**: Parse `.task/*.json` files, validate schema and dependencies, build execution order.
### Step 1.1: Parse Unified JSONL
### Step 1.1: Parse Task JSON Files
```javascript
const content = Read(planPath)
const tasks = content.split('\n')
.filter(line => line.trim())
.map((line, i) => {
try { return JSON.parse(line) }
catch (e) { throw new Error(`Line ${i + 1}: Invalid JSON — ${e.message}`) }
})
// Read all JSON files from .task/ directory
const taskFiles = Glob(`${planPath}/*.json`).sort()
const tasks = taskFiles.map((file, i) => {
try { return JSON.parse(Read(file)) }
catch (e) { throw new Error(`File ${file}: Invalid JSON — ${e.message}`) }
})
if (tasks.length === 0) throw new Error('No tasks found in JSONL file')
if (tasks.length === 0) throw new Error('No task files found in .task/ directory')
```
### Step 1.2: Validate Schema
@@ -300,8 +299,9 @@ for (const taskId of executionOrder) {
if (unmetDeps.length) {
appendToEvents(task, 'BLOCKED', `Unmet dependencies: ${unmetDeps.join(', ')}`)
skippedTasks.add(task.id)
task._execution = { status: 'skipped', executed_at: startTime,
result: { success: false, error: `Blocked by: ${unmetDeps.join(', ')}` } }
task.status = 'skipped'
task.executed_at = startTime
task.result = { success: false, error: `Blocked by: ${unmetDeps.join(', ')}` }
continue
}
@@ -321,8 +321,9 @@ ${task.convergence.criteria.map(c => `- [ ] ${c}`).join('\n')}
if (dryRun) {
// Simulate: mark as completed without changes
appendToEvents(`\n**Status**: ⏭ DRY RUN (no changes)\n\n---\n`)
task._execution = { status: 'completed', executed_at: startTime,
result: { success: true, summary: 'Dry run — no changes made' } }
task.status = 'completed'
task.executed_at = startTime
task.result = { success: true, summary: 'Dry run — no changes made' }
completedTasks.add(task.id)
continue
}
@@ -358,15 +359,14 @@ ${task.convergence.criteria.map((c, i) => `- [${convergenceResults.verified[i] ?
---
`)
task._execution = {
status: 'completed', executed_at: endTime,
result: {
task.status = 'completed'
task.executed_at = endTime
task.result = {
success: true,
files_modified: filesModified,
summary: changeSummary,
convergence_verified: convergenceResults.verified
}
}
completedTasks.add(task.id)
} else {
// 5b. Record FAILURE
@@ -374,7 +374,7 @@ ${task.convergence.criteria.map((c, i) => `- [${convergenceResults.verified[i] ?
}
// 6. Auto-commit if enabled
if (autoCommit && task._execution.status === 'completed') {
if (autoCommit && task.status === 'completed') {
autoCommitTask(task, filesModified)
}
}
@@ -440,14 +440,13 @@ ${task.convergence.criteria.map((c, i) => `- [${convergenceResults.verified[i] ?
---
`)
task._execution = {
status: 'failed', executed_at: endTime,
result: {
task.status = 'failed'
task.executed_at = endTime
task.result = {
success: false,
error: 'Convergence verification failed',
convergence_verified: convergenceResults.verified
}
}
failedTasks.add(task.id)
// Ask user
@@ -518,7 +517,7 @@ const summary = `
| ID | Title | Status | Convergence | Files Modified |
|----|-------|--------|-------------|----------------|
${tasks.map(t => {
const ex = t._execution || {}
const ex = t || {}
const convergenceStatus = ex.result?.convergence_verified
? `${ex.result.convergence_verified.filter(v => v).length}/${ex.result.convergence_verified.length}`
: '-'
@@ -529,7 +528,7 @@ ${failedTasks.size > 0 ? `### Failed Tasks
${[...failedTasks].map(id => {
const t = tasks.find(t => t.id === id)
return `- **${t.id}**: ${t.title}${t._execution?.result?.error || 'Unknown'}`
return `- **${t.id}**: ${t.title}${t.result?.error || 'Unknown'}`
}).join('\n')}
` : ''}
### Artifacts
@@ -555,31 +554,31 @@ appendToEvents(`
`)
```
### Step 4.3: Write Back tasks.jsonl with _execution
### Step 4.3: Write Back .task/*.json with Execution State
Update the source JSONL file with execution states:
Update each task JSON file in-place with execution state:
```javascript
const updatedJsonl = tasks.map(task => JSON.stringify(task)).join('\n')
Write(planPath, updatedJsonl)
// Each task now has _execution: { status, executed_at, result }
tasks.forEach(task => {
const taskFile = `${planPath}/${task.id}.json`
Write(taskFile, JSON.stringify(task, null, 2))
})
// Each task now has status, executed_at, result fields
```
**_execution State** (added to each task):
**Execution State** (added to each task JSON file):
```javascript
{
// ... original task fields ...
_execution: {
status: "completed" | "failed" | "skipped",
executed_at: "ISO timestamp",
result: {
success: boolean,
files_modified: string[], // list of modified file paths
summary: string, // change description
convergence_verified: boolean[], // per criterion
error: string // if failed
}
status: "completed" | "failed" | "skipped",
executed_at: "ISO timestamp",
result: {
success: boolean,
files_modified: string[], // list of modified file paths
summary: string, // change description
convergence_verified: boolean[], // per criterion
error: string // if failed
}
}
```
@@ -604,7 +603,7 @@ AskUserQuestion({
| Selection | Action |
|-----------|--------|
| Retry Failed | Filter tasks with `_execution.status === 'failed'`, re-execute, append `[RETRY]` events |
| Retry Failed | Filter tasks with `status === 'failed'`, re-execute, append `[RETRY]` events |
| View Events | Display execution-events.md content |
| Create Issue | `$issue:new` from failed task details |
| Done | Display artifact paths, end workflow |
@@ -615,10 +614,10 @@ AskUserQuestion({
| Situation | Action | Recovery |
|-----------|--------|----------|
| JSONL file not found | Report error with path | Check path, verify planning phase output |
| Invalid JSON line | Report line number and error | Fix JSONL file manually |
| .task/ directory not found | Report error with path | Check path, verify planning phase output |
| Invalid JSON file | Report filename and error | Fix task JSON file manually |
| Missing convergence | Report validation error | Add convergence fields to tasks |
| Circular dependency | Stop, report cycle path | Fix dependencies in JSONL |
| Circular dependency | Stop, report cycle path | Fix dependencies in task files |
| Task execution fails | Record in events, ask user | Retry, skip, accept, or abort |
| Convergence verification fails | Mark task failed, ask user | Fix code and retry, or accept |
| Verification command timeout | Mark as unverified | Manual verification needed |