From 6b4b9b0775bfe025e4bbd6b6cffd128f8cf37902 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Wed, 14 Jan 2026 12:15:42 +0800 Subject: [PATCH] feat: enhance multi-CLI planning with new schema for solutions and implementation plans; improve file handling with async methods --- .claude/agents/cli-discuss-agent.md | 62 ++++-- .claude/commands/workflow/multi-cli-plan.md | 155 ++++++++++++-- ccw/src/core/lite-scanner.ts | 187 +++++++++++++++- ccw/src/core/routes/session-routes.ts | 225 +++++++++++++++----- 4 files changed, 527 insertions(+), 102 deletions(-) diff --git a/.claude/agents/cli-discuss-agent.md b/.claude/agents/cli-discuss-agent.md index 170d2733..36f2daa9 100644 --- a/.claude/agents/cli-discuss-agent.md +++ b/.claude/agents/cli-discuss-agent.md @@ -60,24 +60,43 @@ Phase 5: Output Generation **Output Path**: `{session.folder}/rounds/{round_number}/synthesis.json` -### Primary Fields (orchestrator reads these) - ```json { "round": 1, "solutions": [ { "name": "Solution Name", - "description": "What this does", "source_cli": ["gemini", "codex"], - "pros": ["advantage 1"], - "cons": ["disadvantage 1"], + "feasibility": 0.85, "effort": "low|medium|high", "risk": "low|medium|high", - "maintainability": "low|medium|high", - "performance_impact": "positive|neutral|negative", - "affected_files": [{"file": "path", "line": 10, "reason": "why"}], - "score": 85 + "summary": "Brief analysis summary", + "implementation_plan": { + "approach": "High-level technical approach", + "tasks": [ + { + "id": "T1", + "name": "Task name", + "depends_on": [], + "files": [{"file": "path", "line": 10, "action": "modify|create|delete"}], + "key_point": "Critical consideration for this task" + }, + { + "id": "T2", + "name": "Second task", + "depends_on": ["T1"], + "files": [{"file": "path2", "line": 1, "action": "create"}], + "key_point": null + } + ], + "execution_flow": "T1 → T2 → T3 (T2,T3 can parallel after T1)", + "milestones": ["Interface defined", "Core logic complete", "Tests passing"] + }, + "dependencies": { + "internal": ["@/lib/module"], + "external": ["npm:package@version"] + }, + "technical_concerns": ["Potential blocker 1", "Risk area 2"] } ], "convergence": { @@ -94,14 +113,21 @@ Phase 5: Output Generation } ``` -### Extended Fields (for visualization) +**Schema Fields**: -- `metadata` - artifactId, timestamp, contributingAgents, durationSeconds -- `discussionTopic` - title, description, scope, status, tags -- `relatedFiles` - fileTree, impactSummary -- `planning` - functional/nonFunctional requirements -- `decision` - status, selectedSolution, rejectedAlternatives -- `decisionRecords` - timeline events +| Field | Purpose | +|-------|---------| +| `feasibility` | Quantitative viability score (0-1) | +| `summary` | Narrative analysis summary | +| `implementation_plan.approach` | High-level technical strategy | +| `implementation_plan.tasks[]` | Discrete implementation tasks | +| `implementation_plan.tasks[].depends_on` | Task dependencies (IDs) | +| `implementation_plan.tasks[].key_point` | Critical consideration for task | +| `implementation_plan.execution_flow` | Visual task sequence | +| `implementation_plan.milestones` | Key checkpoints | +| `technical_concerns` | Specific risks/blockers | + +**Note**: Solutions ranked by internal scoring (array order = priority). `pros/cons` merged into `summary` and `technical_concerns`. --- @@ -273,7 +299,7 @@ Second+ CLI receives prior analysis for verification: 3. Combine pros/cons/affected_files from multiple sources 4. Track source_cli attribution -**Scoring formula**: +**Internal scoring** (used for ranking, not exported): ``` score = (source_cli.length × 20) // Multi-CLI consensus + effort_score[effort] // low=30, medium=20, high=10 @@ -282,7 +308,7 @@ score = (source_cli.length × 20) // Multi-CLI consensus + min(affected_files.length × 3, 15) // Specificity ``` -**Output**: Top 3 solutions ranked by score +**Output**: Top 3 solutions, ranked in array order (highest score first) --- diff --git a/.claude/commands/workflow/multi-cli-plan.md b/.claude/commands/workflow/multi-cli-plan.md index c2f490ca..a06b4098 100644 --- a/.claude/commands/workflow/multi-cli-plan.md +++ b/.claude/commands/workflow/multi-cli-plan.md @@ -130,7 +130,7 @@ Task({ - task_description: ${taskDescription} - round_number: ${currentRound} - session: { id: "${sessionId}", folder: "${sessionFolder}" } -- ace_context: ${JSON.stringify(contextPackage)} +- ace_context: ${JSON.stringify(contextPackageage)} - previous_rounds: ${JSON.stringify(analysisResults)} - user_feedback: ${userFeedback || 'None'} - cli_config: { tools: ["gemini", "codex"], mode: "parallel", fallback_chain: ["gemini", "codex", "claude"] } @@ -225,7 +225,96 @@ AskUserQuestion({ ### Phase 5: Plan Generation -**Invoke Planning Agent**: +**Step 1: Build Context-Package** (Orchestrator responsibility): +```javascript +// Extract key information from user decision and synthesis +const contextPackage = { + // Core solution details + solution: { + name: selectedSolution.name, + source_cli: selectedSolution.source_cli, + feasibility: selectedSolution.feasibility, + effort: selectedSolution.effort, + risk: selectedSolution.risk, + summary: selectedSolution.summary + }, + // Implementation plan (tasks, flow, milestones) + implementation_plan: selectedSolution.implementation_plan, + // Dependencies + dependencies: selectedSolution.dependencies || { internal: [], external: [] }, + // Technical concerns + technical_concerns: selectedSolution.technical_concerns || [], + // Consensus from cross-verification + consensus: { + agreements: synthesis.cross_verification.agreements, + resolved_conflicts: synthesis.cross_verification.resolution + }, + // User constraints (from Phase 4 feedback) + constraints: userConstraints || [], + // Task context + task_description: taskDescription, + session_id: sessionId +} + +// Write context-package for traceability +Write(`${sessionFolder}/context-package.json`, JSON.stringify(contextPackage, null, 2)) +``` + +**Context-Package Schema**: + +| Field | Type | Description | +|-------|------|-------------| +| `solution` | object | User-selected solution from synthesis | +| `solution.name` | string | Solution identifier | +| `solution.feasibility` | number | Viability score (0-1) | +| `solution.summary` | string | Brief analysis summary | +| `implementation_plan` | object | Task breakdown with flow and dependencies | +| `implementation_plan.approach` | string | High-level technical strategy | +| `implementation_plan.tasks[]` | array | Discrete tasks with id, name, depends_on, files | +| `implementation_plan.execution_flow` | string | Task sequence (e.g., "T1 → T2 → T3") | +| `implementation_plan.milestones` | string[] | Key checkpoints | +| `dependencies` | object | Module and package dependencies | +| `technical_concerns` | string[] | Risks and blockers | +| `consensus` | object | Cross-verified agreements from multi-CLI | +| `constraints` | string[] | User-specified constraints from Phase 4 | + +```json +{ + "solution": { + "name": "Strategy Pattern Refactoring", + "source_cli": ["gemini", "codex"], + "feasibility": 0.88, + "effort": "medium", + "risk": "low", + "summary": "Extract payment gateway interface, implement strategy pattern for multi-gateway support" + }, + "implementation_plan": { + "approach": "Define interface → Create concrete strategies → Implement factory → Migrate existing code", + "tasks": [ + {"id": "T1", "name": "Define PaymentGateway interface", "depends_on": [], "files": [{"file": "src/types/payment.ts", "line": 1, "action": "create"}], "key_point": "Include all existing Stripe methods"}, + {"id": "T2", "name": "Implement StripeGateway", "depends_on": ["T1"], "files": [{"file": "src/payment/stripe.ts", "line": 1, "action": "create"}], "key_point": "Wrap existing logic"}, + {"id": "T3", "name": "Create GatewayFactory", "depends_on": ["T1"], "files": [{"file": "src/payment/factory.ts", "line": 1, "action": "create"}], "key_point": null}, + {"id": "T4", "name": "Migrate processor to use factory", "depends_on": ["T2", "T3"], "files": [{"file": "src/payment/processor.ts", "line": 45, "action": "modify"}], "key_point": "Backward compatible"} + ], + "execution_flow": "T1 → (T2 | T3) → T4", + "milestones": ["Interface defined", "Gateway implementations complete", "Migration done"] + }, + "dependencies": { + "internal": ["@/lib/payment-gateway", "@/types/payment"], + "external": ["stripe@^14.0.0"] + }, + "technical_concerns": ["Existing tests must pass", "No breaking API changes"], + "consensus": { + "agreements": ["Use strategy pattern", "Keep existing API"], + "resolved_conflicts": "Factory over DI for simpler integration" + }, + "constraints": ["backward compatible", "no breaking changes to PaymentResult type"], + "task_description": "Refactor payment processing for multi-gateway support", + "session_id": "MCP-payment-refactor-2026-01-14" +} +``` + +**Step 2: Invoke Planning Agent**: ```javascript Task({ subagent_type: "cli-lite-planning-agent", @@ -235,29 +324,35 @@ Task({ ## Schema Reference Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json -## Selected Solution -${JSON.stringify(selectedSolution)} - -## Analysis Consensus -${synthesis.cross_verification.agreements.join('\n')} +## Context-Package (from orchestrator) +${JSON.stringify(contextPackage, null, 2)} ## Execution Process 1. Read plan-json-schema.json for output structure 2. Read project-tech.json and project-guidelines.json -3. Decompose solution into 2-7 tasks (group by feature, not file) -4. Assign dependencies and execution groups -5. Generate IMPL_PLAN.md with step-by-step documentation -6. Generate plan.json following schema exactly +3. Parse context-package fields: + - solution: name, feasibility, summary + - implementation_plan: tasks[], execution_flow, milestones + - dependencies: internal[], external[] + - technical_concerns: risks/blockers + - consensus: agreements, resolved_conflicts + - constraints: user requirements +4. Use implementation_plan.tasks[] as task foundation +5. Preserve task dependencies (depends_on) and execution_flow +6. Expand tasks with detailed acceptance criteria +7. Generate IMPL_PLAN.md documenting milestones and key_points +8. Generate plan.json following schema exactly ## Output - ${sessionFolder}/IMPL_PLAN.md - ${sessionFolder}/plan.json ## Completion Checklist -- [ ] IMPL_PLAN.md written with complete documentation -- [ ] plan.json follows schema exactly -- [ ] All affected files have line numbers -- [ ] Tasks grouped by feature (not one per file) +- [ ] IMPL_PLAN.md documents approach, milestones, technical_concerns +- [ ] plan.json preserves task dependencies from implementation_plan +- [ ] Task execution order follows execution_flow +- [ ] Key_points reflected in task descriptions +- [ ] User constraints applied to implementation - [ ] Acceptance criteria are testable ` }) @@ -279,6 +374,7 @@ if (userConfirms) { │ ├── 1/synthesis.json # Round 1 analysis (cli-discuss-agent) │ ├── 2/synthesis.json # Round 2 analysis (cli-discuss-agent) │ └── .../ +├── context-package.json # Extracted context for planning (orchestrator) ├── IMPL_PLAN.md # Documentation (cli-lite-planning-agent) └── plan.json # Structured plan (cli-lite-planning-agent) ``` @@ -289,23 +385,32 @@ if (userConfirms) { |------|----------|---------| | `session-state.json` | Orchestrator | Session metadata, rounds, decisions | | `rounds/*/synthesis.json` | cli-discuss-agent | Solutions, convergence, cross-verification | +| `context-package.json` | Orchestrator | Extracted solution, dependencies, consensus for planning | | `IMPL_PLAN.md` | cli-lite-planning-agent | Human-readable plan | | `plan.json` | cli-lite-planning-agent | Structured tasks for execution | ## synthesis.json Schema -**Primary Fields** (orchestrator reads these): ```json { "round": 1, "solutions": [{ "name": "Solution Name", - "description": "What this does", "source_cli": ["gemini", "codex"], - "pros": [], "cons": [], + "feasibility": 0.85, "effort": "low|medium|high", "risk": "low|medium|high", - "affected_files": [{"file": "path", "line": 10, "reason": "why"}] + "summary": "Brief analysis summary", + "implementation_plan": { + "approach": "High-level technical approach", + "tasks": [ + {"id": "T1", "name": "Task", "depends_on": [], "files": [], "key_point": "..."} + ], + "execution_flow": "T1 → T2 → T3", + "milestones": ["Checkpoint 1", "Checkpoint 2"] + }, + "dependencies": {"internal": [], "external": []}, + "technical_concerns": ["Risk 1", "Blocker 2"] }], "convergence": { "score": 0.85, @@ -321,7 +426,17 @@ if (userConfirms) { } ``` -**Extended Fields** (for visualization): `metadata`, `discussionTopic`, `relatedFiles`, `planning`, `decision`, `decisionRecords` +**Key Planning Fields**: + +| Field | Purpose | +|-------|---------| +| `feasibility` | Viability score (0-1) | +| `implementation_plan.tasks[]` | Discrete tasks with dependencies | +| `implementation_plan.execution_flow` | Task sequence visualization | +| `implementation_plan.milestones` | Key checkpoints | +| `technical_concerns` | Risks and blockers | + +**Note**: Solutions ranked by internal scoring (array order = priority) ## TodoWrite Structure diff --git a/ccw/src/core/lite-scanner.ts b/ccw/src/core/lite-scanner.ts index fdb74ad4..4c10379e 100644 --- a/ccw/src/core/lite-scanner.ts +++ b/ccw/src/core/lite-scanner.ts @@ -195,8 +195,65 @@ async function scanMultiCliDir(dir: string): Promise { } } +// NEW Schema types for multi-cli synthesis +interface SolutionFileAction { + file: string; + line: number; + action: 'modify' | 'create' | 'delete'; +} + +interface SolutionTask { + id: string; + name: string; + depends_on: string[]; + files: SolutionFileAction[]; + key_point: string | null; +} + +interface SolutionImplementationPlan { + approach: string; + tasks: SolutionTask[]; + execution_flow: string; + milestones: string[]; +} + +interface SolutionDependencies { + internal: string[]; + external: string[]; +} + +interface Solution { + name: string; + source_cli: string[]; + feasibility: number; // 0-1 + effort: 'low' | 'medium' | 'high'; + risk: 'low' | 'medium' | 'high'; + summary: string; + implementation_plan: SolutionImplementationPlan; + dependencies: SolutionDependencies; + technical_concerns: string[]; +} + +interface SynthesisConvergence { + score: number; + new_insights: boolean; + recommendation: 'converged' | 'continue' | 'user_input_needed'; +} + +interface SynthesisCrossVerification { + agreements: string[]; + disagreements: string[]; + resolution: string; +} + interface RoundSynthesis { round: number; + // NEW schema fields + solutions?: Solution[]; + convergence?: SynthesisConvergence; + cross_verification?: SynthesisCrossVerification; + clarification_questions?: string[]; + // OLD schema fields (backward compatibility) converged?: boolean; tasks?: unknown[]; synthesis?: unknown; @@ -230,31 +287,72 @@ async function loadRoundSyntheses(sessionPath: string): Promise 0 + ? solutions.reduce((sum, s) => sum + (s.feasibility || 0), 0) / solutionsCount + : 0; + + // Total is based on rounds, percentage derived from convergence score + const total = syntheses.length; + const completed = isConverged ? total : Math.max(0, total - 1); + const percentage = isConverged ? 100 : Math.round(score * 100); + + return { + total, + completed, + percentage, + convergenceScore: score, + recommendation, + solutionsCount, + avgFeasibility: Math.round(avgFeasibility * 100) / 100 + }; + } + + // OLD schema: Fallback to converged boolean + const isConverged = latestSynthesis.converged === true; const total = syntheses.length; const completed = isConverged ? total : Math.max(0, total - 1); const percentage = isConverged ? 100 : Math.round((completed / Math.max(total, 1)) * 100); @@ -264,6 +362,8 @@ function calculateMultiCliProgress(syntheses: RoundSynthesis[]): Progress { /** * Extract tasks from synthesis objects + * NEW schema: Extract from solutions[].implementation_plan.tasks + * OLD schema: Extract from tasks[] array directly * @param syntheses - Array of round syntheses * @returns Normalized tasks from latest synthesis */ @@ -271,8 +371,33 @@ function extractTasksFromSyntheses(syntheses: RoundSynthesis[]): NormalizedTask[ if (syntheses.length === 0) return []; const latestSynthesis = syntheses[syntheses.length - 1]; - const tasks = latestSynthesis.tasks; + // NEW schema: Extract tasks from solutions + if (latestSynthesis.solutions && Array.isArray(latestSynthesis.solutions)) { + const allTasks: NormalizedTask[] = []; + + for (const solution of latestSynthesis.solutions) { + const implPlan = solution.implementation_plan; + if (!implPlan?.tasks || !Array.isArray(implPlan.tasks)) continue; + + for (const task of implPlan.tasks) { + const normalizedTask = normalizeSolutionTask(task, solution); + if (normalizedTask) { + allTasks.push(normalizedTask); + } + } + } + + // Sort by task ID + return allTasks.sort((a, b) => { + const aNum = parseInt(a.id?.replace(/\D/g, '') || '0'); + const bNum = parseInt(b.id?.replace(/\D/g, '') || '0'); + return aNum - bNum; + }); + } + + // OLD schema: Extract from tasks array directly + const tasks = latestSynthesis.tasks; if (!Array.isArray(tasks)) return []; return tasks @@ -280,6 +405,50 @@ function extractTasksFromSyntheses(syntheses: RoundSynthesis[]): NormalizedTask[ .filter((task): task is NormalizedTask => task !== null); } +/** + * Normalize a solution task from NEW schema to NormalizedTask + * @param task - SolutionTask from new schema + * @param solution - Parent solution for context + * @returns Normalized task + */ +function normalizeSolutionTask(task: SolutionTask, solution: Solution): NormalizedTask | null { + if (!task || !task.id) return null; + + return { + id: task.id, + title: task.name || 'Untitled Task', + status: (task as unknown as { status?: string }).status || 'pending', + meta: { + type: 'implementation', + agent: null, + scope: solution.name || null, + module: null + }, + context: { + requirements: task.key_point ? [task.key_point] : [], + focus_paths: task.files?.map(f => f.file) || [], + acceptance: [], + depends_on: task.depends_on || [] + }, + flow_control: { + implementation_approach: task.files?.map((f, i) => ({ + step: `Step ${i + 1}`, + action: `${f.action} ${f.file}${f.line ? ` at line ${f.line}` : ''}` + })) || [] + }, + _raw: { + task, + solution: { + name: solution.name, + source_cli: solution.source_cli, + feasibility: solution.feasibility, + effort: solution.effort, + risk: solution.risk + } + } + }; +} + /** * Load plan.json or fix-plan.json from session directory * @param sessionPath - Session directory path diff --git a/ccw/src/core/routes/session-routes.ts b/ccw/src/core/routes/session-routes.ts index b9a865f0..2991010d 100644 --- a/ccw/src/core/routes/session-routes.ts +++ b/ccw/src/core/routes/session-routes.ts @@ -2,10 +2,25 @@ * Session Routes Module * Handles all Session/Task-related API endpoints */ -import { readFileSync, writeFileSync, existsSync, readdirSync } from 'fs'; +import { readFileSync, writeFileSync, existsSync } from 'fs'; +import { readFile, readdir, access } from 'fs/promises'; import { join } from 'path'; import type { RouteContext } from './types.js'; +/** + * Check if a file or directory exists (async version) + * @param filePath - Path to check + * @returns Promise + */ +async function fileExists(filePath: string): Promise { + try { + await access(filePath); + return true; + } catch { + return false; + } +} + /** * Get session detail data (context, summaries, impl-plan, review, multi-cli) * @param {string} sessionPath - Path to session directory @@ -23,14 +38,15 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom if (dataType === 'context' || dataType === 'all') { // Try .process/context-package.json first (common location) let contextFile = join(normalizedPath, '.process', 'context-package.json'); - if (!existsSync(contextFile)) { + if (!(await fileExists(contextFile))) { // Fallback to session root contextFile = join(normalizedPath, 'context-package.json'); } - if (existsSync(contextFile)) { + if (await fileExists(contextFile)) { try { - result.context = JSON.parse(readFileSync(contextFile, 'utf8')); + result.context = JSON.parse(await readFile(contextFile, 'utf8')); } catch (e) { + console.warn('Failed to parse context file:', contextFile, (e as Error).message); result.context = null; } } @@ -40,18 +56,18 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom if (dataType === 'tasks' || dataType === 'all') { const taskDir = join(normalizedPath, '.task'); result.tasks = []; - if (existsSync(taskDir)) { - const files = readdirSync(taskDir).filter(f => f.endsWith('.json') && f.startsWith('IMPL-')); + if (await fileExists(taskDir)) { + const files = (await readdir(taskDir)).filter(f => f.endsWith('.json') && f.startsWith('IMPL-')); for (const file of files) { try { - const content = JSON.parse(readFileSync(join(taskDir, file), 'utf8')); + const content = JSON.parse(await readFile(join(taskDir, file), 'utf8')); result.tasks.push({ filename: file, task_id: file.replace('.json', ''), ...content }); } catch (e) { - // Skip unreadable files + console.warn('Failed to parse task file:', join(taskDir, file), (e as Error).message); } } // Sort by task ID @@ -63,14 +79,14 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom if (dataType === 'summary' || dataType === 'all') { const summariesDir = join(normalizedPath, '.summaries'); result.summaries = []; - if (existsSync(summariesDir)) { - const files = readdirSync(summariesDir).filter(f => f.endsWith('.md')); + if (await fileExists(summariesDir)) { + const files = (await readdir(summariesDir)).filter(f => f.endsWith('.md')); for (const file of files) { try { - const content = readFileSync(join(summariesDir, file), 'utf8'); + const content = await readFile(join(summariesDir, file), 'utf8'); result.summaries.push({ name: file.replace('.md', ''), content }); } catch (e) { - // Skip unreadable files + console.warn('Failed to read summary file:', join(summariesDir, file), (e as Error).message); } } } @@ -79,10 +95,11 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom // Load plan.json (for lite tasks) if (dataType === 'plan' || dataType === 'all') { const planFile = join(normalizedPath, 'plan.json'); - if (existsSync(planFile)) { + if (await fileExists(planFile)) { try { - result.plan = JSON.parse(readFileSync(planFile, 'utf8')); + result.plan = JSON.parse(await readFile(planFile, 'utf8')); } catch (e) { + console.warn('Failed to parse plan file:', planFile, (e as Error).message); result.plan = null; } } @@ -100,52 +117,54 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom ]; for (const searchDir of searchDirs) { - if (!existsSync(searchDir)) continue; + if (!(await fileExists(searchDir))) continue; // Look for explorations-manifest.json const manifestFile = join(searchDir, 'explorations-manifest.json'); - if (existsSync(manifestFile)) { + if (await fileExists(manifestFile)) { try { - result.explorations.manifest = JSON.parse(readFileSync(manifestFile, 'utf8')); + result.explorations.manifest = JSON.parse(await readFile(manifestFile, 'utf8')); // Load each exploration file based on manifest const explorations = result.explorations.manifest.explorations || []; for (const exp of explorations) { const expFile = join(searchDir, exp.file); - if (existsSync(expFile)) { + if (await fileExists(expFile)) { try { - result.explorations.data[exp.angle] = JSON.parse(readFileSync(expFile, 'utf8')); + result.explorations.data[exp.angle] = JSON.parse(await readFile(expFile, 'utf8')); } catch (e) { - // Skip unreadable exploration files + console.warn('Failed to parse exploration file:', expFile, (e as Error).message); } } } break; // Found manifest, stop searching } catch (e) { + console.warn('Failed to parse explorations manifest:', manifestFile, (e as Error).message); result.explorations.manifest = null; } } // Look for diagnoses-manifest.json const diagManifestFile = join(searchDir, 'diagnoses-manifest.json'); - if (existsSync(diagManifestFile)) { + if (await fileExists(diagManifestFile)) { try { - result.diagnoses.manifest = JSON.parse(readFileSync(diagManifestFile, 'utf8')); + result.diagnoses.manifest = JSON.parse(await readFile(diagManifestFile, 'utf8')); // Load each diagnosis file based on manifest const diagnoses = result.diagnoses.manifest.diagnoses || []; for (const diag of diagnoses) { const diagFile = join(searchDir, diag.file); - if (existsSync(diagFile)) { + if (await fileExists(diagFile)) { try { - result.diagnoses.data[diag.angle] = JSON.parse(readFileSync(diagFile, 'utf8')); + result.diagnoses.data[diag.angle] = JSON.parse(await readFile(diagFile, 'utf8')); } catch (e) { - // Skip unreadable diagnosis files + console.warn('Failed to parse diagnosis file:', diagFile, (e as Error).message); } } } break; // Found manifest, stop searching } catch (e) { + console.warn('Failed to parse diagnoses manifest:', diagManifestFile, (e as Error).message); result.diagnoses.manifest = null; } } @@ -153,7 +172,7 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom // Fallback: scan for exploration-*.json and diagnosis-*.json files directly if (!result.explorations.manifest) { try { - const expFiles = readdirSync(searchDir).filter(f => f.startsWith('exploration-') && f.endsWith('.json') && f !== 'explorations-manifest.json'); + const expFiles = (await readdir(searchDir)).filter(f => f.startsWith('exploration-') && f.endsWith('.json') && f !== 'explorations-manifest.json'); if (expFiles.length > 0) { // Create synthetic manifest result.explorations.manifest = { @@ -169,21 +188,21 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom for (const file of expFiles) { const angle = file.replace('exploration-', '').replace('.json', ''); try { - result.explorations.data[angle] = JSON.parse(readFileSync(join(searchDir, file), 'utf8')); + result.explorations.data[angle] = JSON.parse(await readFile(join(searchDir, file), 'utf8')); } catch (e) { - // Skip unreadable files + console.warn('Failed to parse exploration file:', join(searchDir, file), (e as Error).message); } } } } catch (e) { - // Directory read failed + console.warn('Failed to read explorations directory:', searchDir, (e as Error).message); } } // Fallback: scan for diagnosis-*.json files directly if (!result.diagnoses.manifest) { try { - const diagFiles = readdirSync(searchDir).filter(f => f.startsWith('diagnosis-') && f.endsWith('.json') && f !== 'diagnoses-manifest.json'); + const diagFiles = (await readdir(searchDir)).filter(f => f.startsWith('diagnosis-') && f.endsWith('.json') && f !== 'diagnoses-manifest.json'); if (diagFiles.length > 0) { // Create synthetic manifest result.diagnoses.manifest = { @@ -199,14 +218,14 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom for (const file of diagFiles) { const angle = file.replace('diagnosis-', '').replace('.json', ''); try { - result.diagnoses.data[angle] = JSON.parse(readFileSync(join(searchDir, file), 'utf8')); + result.diagnoses.data[angle] = JSON.parse(await readFile(join(searchDir, file), 'utf8')); } catch (e) { - // Skip unreadable files + console.warn('Failed to parse diagnosis file:', join(searchDir, file), (e as Error).message); } } } } catch (e) { - // Directory read failed + console.warn('Failed to read diagnoses directory:', searchDir, (e as Error).message); } } @@ -228,12 +247,12 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom ]; for (const conflictFile of conflictFiles) { - if (existsSync(conflictFile)) { + if (await fileExists(conflictFile)) { try { - result.conflictResolution = JSON.parse(readFileSync(conflictFile, 'utf8')); + result.conflictResolution = JSON.parse(await readFile(conflictFile, 'utf8')); break; // Found file, stop searching } catch (e) { - // Skip unreadable file + console.warn('Failed to parse conflict resolution file:', conflictFile, (e as Error).message); } } } @@ -242,27 +261,60 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom // Load IMPL_PLAN.md if (dataType === 'impl-plan' || dataType === 'all') { const implPlanFile = join(normalizedPath, 'IMPL_PLAN.md'); - if (existsSync(implPlanFile)) { + if (await fileExists(implPlanFile)) { try { - result.implPlan = readFileSync(implPlanFile, 'utf8'); + result.implPlan = await readFile(implPlanFile, 'utf8'); } catch (e) { + console.warn('Failed to read IMPL_PLAN.md:', implPlanFile, (e as Error).message); result.implPlan = null; } } } // Load multi-cli discussion rounds (rounds/*/synthesis.json) + // Supports both NEW and OLD schema formats if (dataType === 'multi-cli' || dataType === 'discussions' || dataType === 'all') { result.multiCli = { sessionId: normalizedPath.split('/').pop() || '', type: 'multi-cli-plan', - rounds: [] as Array<{ roundNumber: number; synthesis: Record | null }> + rounds: [] as Array<{ + roundNumber: number; + synthesis: Record | null; + // NEW schema extracted fields + solutions?: Array<{ + name: string; + source_cli: string[]; + feasibility: number; + effort: string; + risk: string; + summary: string; + tasksCount: number; + dependencies: { internal: string[]; external: string[] }; + technical_concerns: string[]; + }>; + convergence?: { + score: number; + new_insights: boolean; + recommendation: string; + }; + cross_verification?: { + agreements: string[]; + disagreements: string[]; + resolution: string; + }; + clarification_questions?: string[]; + }>, + // Aggregated data from latest synthesis + latestSolutions: [] as Array>, + latestConvergence: null as Record | null, + latestCrossVerification: null as Record | null, + clarificationQuestions: [] as string[] }; const roundsDir = join(normalizedPath, 'rounds'); - if (existsSync(roundsDir)) { + if (await fileExists(roundsDir)) { try { - const roundDirs = readdirSync(roundsDir) + const roundDirs = (await readdir(roundsDir)) .filter(d => /^\d+$/.test(d)) // Only numeric directories .sort((a, b) => parseInt(a) - parseInt(b)); @@ -270,21 +322,84 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom const synthesisFile = join(roundsDir, roundDir, 'synthesis.json'); let synthesis: Record | null = null; - if (existsSync(synthesisFile)) { + if (await fileExists(synthesisFile)) { try { - synthesis = JSON.parse(readFileSync(synthesisFile, 'utf8')); + synthesis = JSON.parse(await readFile(synthesisFile, 'utf8')); } catch (e) { - // Skip unreadable synthesis files + console.warn('Failed to parse synthesis file:', synthesisFile, (e as Error).message); } } - result.multiCli.rounds.push({ + // Build round data with NEW schema fields extracted + const roundData: any = { roundNumber: parseInt(roundDir), synthesis - }); + }; + + // Extract NEW schema fields if present + if (synthesis) { + // Extract solutions with summary info + if (Array.isArray(synthesis.solutions)) { + roundData.solutions = (synthesis.solutions as Array>).map(s => ({ + name: s.name || '', + source_cli: s.source_cli || [], + feasibility: s.feasibility ?? 0, + effort: s.effort || 'unknown', + risk: s.risk || 'unknown', + summary: s.summary || '', + tasksCount: s.implementation_plan?.tasks?.length || 0, + dependencies: s.dependencies || { internal: [], external: [] }, + technical_concerns: s.technical_concerns || [] + })); + } + + // Extract convergence + if (synthesis.convergence && typeof synthesis.convergence === 'object') { + const conv = synthesis.convergence as Record; + roundData.convergence = { + score: conv.score ?? 0, + new_insights: conv.new_insights ?? false, + recommendation: conv.recommendation || 'unknown' + }; + } + + // Extract cross_verification + if (synthesis.cross_verification && typeof synthesis.cross_verification === 'object') { + const cv = synthesis.cross_verification as Record; + roundData.cross_verification = { + agreements: Array.isArray(cv.agreements) ? cv.agreements : [], + disagreements: Array.isArray(cv.disagreements) ? cv.disagreements : [], + resolution: (cv.resolution as string) || '' + }; + } + + // Extract clarification_questions + if (Array.isArray(synthesis.clarification_questions)) { + roundData.clarification_questions = synthesis.clarification_questions; + } + } + + result.multiCli.rounds.push(roundData); + } + + // Populate aggregated data from latest round + if (result.multiCli.rounds.length > 0) { + const latestRound = result.multiCli.rounds[result.multiCli.rounds.length - 1]; + if (latestRound.solutions) { + result.multiCli.latestSolutions = latestRound.solutions; + } + if (latestRound.convergence) { + result.multiCli.latestConvergence = latestRound.convergence; + } + if (latestRound.cross_verification) { + result.multiCli.latestCrossVerification = latestRound.cross_verification; + } + if (latestRound.clarification_questions) { + result.multiCli.clarificationQuestions = latestRound.clarification_questions; + } } } catch (e) { - // Directory read failed + console.warn('Failed to read rounds directory:', roundsDir, (e as Error).message); } } } @@ -299,12 +414,12 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom totalFindings: 0 }; - if (existsSync(reviewDir)) { + if (await fileExists(reviewDir)) { // Load review-state.json const stateFile = join(reviewDir, 'review-state.json'); - if (existsSync(stateFile)) { + if (await fileExists(stateFile)) { try { - const state = JSON.parse(readFileSync(stateFile, 'utf8')); + const state = JSON.parse(await readFile(stateFile, 'utf8')); result.review.state = state; result.review.severityDistribution = state.severity_distribution || {}; result.review.totalFindings = state.total_findings || 0; @@ -313,18 +428,18 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom result.review.crossCuttingConcerns = state.cross_cutting_concerns || []; result.review.criticalFiles = state.critical_files || []; } catch (e) { - // Skip unreadable state + console.warn('Failed to parse review state file:', stateFile, (e as Error).message); } } // Load dimension findings const dimensionsDir = join(reviewDir, 'dimensions'); - if (existsSync(dimensionsDir)) { - const files = readdirSync(dimensionsDir).filter(f => f.endsWith('.json')); + if (await fileExists(dimensionsDir)) { + const files = (await readdir(dimensionsDir)).filter(f => f.endsWith('.json')); for (const file of files) { try { const dimName = file.replace('.json', ''); - const data = JSON.parse(readFileSync(join(dimensionsDir, file), 'utf8')); + const data = JSON.parse(await readFile(join(dimensionsDir, file), 'utf8')); // Handle array structure: [ { findings: [...] } ] let findings = []; @@ -346,7 +461,7 @@ async function getSessionDetailData(sessionPath: string, dataType: string): Prom count: findings.length }); } catch (e) { - // Skip unreadable files + console.warn('Failed to parse review dimension file:', join(dimensionsDir, file), (e as Error).message); } } }