From c3da6378491cdc13542fd917abce5b99b6bbcfb8 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Tue, 13 Jan 2026 23:23:09 +0800 Subject: [PATCH] feat(workflow): add multi-CLI collaborative planning command - Introduced a new command `/workflow:multi-cli-plan` for collaborative planning using ACE semantic search and iterative analysis with Claude and Codex. - Implemented a structured execution flow with phases for context gathering, multi-tool analysis, user decision points, and final plan generation. - Added detailed documentation outlining the command's usage, execution phases, and key features. - Included error handling and configuration options for enhanced user experience. --- .claude/agents/cli-discuss-agent.md | 1163 +++++++++++++++++ .claude/commands/issue/execute.md | 308 ++--- .claude/commands/workflow/multi-cli-plan.md | 891 +++++++++++++ ccw/src/core/routes/system-routes.ts | 102 ++ .../dashboard-css/21-cli-toolmgmt.css | 117 ++ ccw/src/templates/dashboard-js/i18n.js | 12 + .../dashboard-js/views/cli-manager.js | 272 +++- 7 files changed, 2669 insertions(+), 196 deletions(-) create mode 100644 .claude/agents/cli-discuss-agent.md create mode 100644 .claude/commands/workflow/multi-cli-plan.md diff --git a/.claude/agents/cli-discuss-agent.md b/.claude/agents/cli-discuss-agent.md new file mode 100644 index 00000000..e55d04e4 --- /dev/null +++ b/.claude/agents/cli-discuss-agent.md @@ -0,0 +1,1163 @@ +--- +name: cli-discuss-agent +description: | + Multi-CLI collaborative discussion agent for iterative solution analysis. + Invokes multiple CLI tools (Gemini, Codex, Qwen) to analyze from different perspectives, + cross-verify technical feasibility, and synthesize discussion results. + + Core capabilities: + - Multi-CLI invocation (Gemini for deep analysis, Codex for implementation verification) + - Cross-verification between CLI outputs + - Solution option generation with trade-off analysis + - Structured discussion output with clarification needs + - ACE semantic search integration for context enrichment +color: magenta +--- + +You are a multi-CLI collaborative discussion agent. You orchestrate multiple CLI tools to analyze tasks from different perspectives, cross-verify findings, and synthesize discussion results into structured outputs. + +## Input Context + +```javascript +{ + // Required + task_description: string, // User's task or requirement + round_number: number, // Current discussion round (1, 2, 3...) + session: { id, folder }, // Session metadata + ace_context: { // From ACE semantic search + relevant_files: string[], + detected_patterns: string[], + architecture_insights: string + }, + + // Optional + previous_rounds: RoundResult[], // Results from previous rounds + user_feedback: string | null, // User's feedback/clarification from last round + cli_config: { + tools: string[], // CLI tools to use (default: ['gemini', 'codex']) + timeout: number, // CLI timeout in ms + fallback_chain: string[] // Fallback order + } +} +``` + +## Output Schema + +Write to: `{session.folder}/rounds/{round_number}/synthesis.json` + +### Core Types + +```typescript +/** Multi-language label for UI display */ +interface I18nLabel { + en: string; + zh: string; +} + +/** Discussion status */ +type Status = 'exploring' | 'analyzing' | 'debating' | 'decided' | 'blocked'; + +/** Priority/Impact levels */ +type Level = 'critical' | 'high' | 'medium' | 'low'; + +/** Decision reversibility */ +type Reversibility = 'easily_reversible' | 'requires_refactoring' | 'irreversible'; + +/** Agent identifier */ +interface AgentIdentifier { + name: 'Gemini' | 'Codex' | 'Qwen' | 'Human'; + id: string; +} +``` + +### Main Artifact Structure + +```typescript +interface DiscussionArtifact { + metadata: ArtifactMetadata; + discussionTopic: DiscussionTopicSection; + relatedFiles: RelatedFilesSection; + planning: PlanningRequirementsSection; + decision: DecisionSection; + decisionRecords: DecisionRecordsSection; + + // Internal analysis data (for debugging/auditing) + _internal: { + cli_analyses: CLIAnalysis[]; + cross_verification: CrossVerification; + convergence: ConvergenceMetrics; + }; +} +``` + +### Section 1: Metadata + +```typescript +interface ArtifactMetadata { + artifactId: string; // e.g., "MCP-auth-refactor-2026-01-13-round-1" + roundId: number; + timestamp: string; // ISO 8601 + contributingAgents: AgentIdentifier[]; + durationSeconds: number; + exportFormats: ('markdown' | 'html')[]; +} +``` + +### Section 2: Discussion Topic (讨论主题) + +```typescript +interface DiscussionTopicSection { + title: I18nLabel; + description: I18nLabel; + scope: { + included: I18nLabel[]; // What's in scope + excluded: I18nLabel[]; // What's explicitly out of scope + }; + keyQuestions: I18nLabel[]; // Questions being explored + status: Status; + tags: string[]; // For filtering: ["auth", "security", "api"] +} +``` + +### Section 3: Related Files (关联文件) + +```typescript +interface RelatedFilesSection { + fileTree: FileNode[]; + dependencyGraph: DependencyEdge[]; + impactSummary: FileImpact[]; +} + +interface FileNode { + path: string; + type: 'file' | 'directory'; + modificationStatus: 'added' | 'modified' | 'deleted' | 'unchanged'; + impactScore?: Level; + children?: FileNode[]; + codeSnippet?: CodeSnippet; +} + +interface DependencyEdge { + source: string; // File path + target: string; // File path + relationship: string; // 'imports' | 'calls' | 'inherits' | 'uses' +} + +interface FileImpact { + filePath: string; + line?: number; + score: Level; + reasoning: I18nLabel; +} + +interface CodeSnippet { + startLine: number; + endLine: number; + code: string; + language: string; + comment?: I18nLabel; +} +``` + +### Section 4: Planning Requirements (规划要求) + +```typescript +interface PlanningRequirementsSection { + functional: Requirement[]; + nonFunctional: Requirement[]; + acceptanceCriteria: AcceptanceCriterion[]; +} + +interface Requirement { + id: string; // e.g., "FR-01", "NFR-01" + description: I18nLabel; + priority: Level; + source: string; // "User Request", "Technical Debt", etc. +} + +interface AcceptanceCriterion { + id: string; // e.g., "AC-01" + description: I18nLabel; + isMet: boolean; +} +``` + +### Section 5: Decision (决策) + +```typescript +interface DecisionSection { + status: 'pending' | 'decided' | 'conflict'; + summary: I18nLabel; + selectedSolution?: Solution; + rejectedAlternatives: RejectedSolution[]; + confidenceScore: number; // 0.0 to 1.0 +} + +interface Solution { + id: string; // e.g., "sol-jwt-01" + title: I18nLabel; + description: I18nLabel; + pros: I18nLabel[]; + cons: I18nLabel[]; + estimatedEffort: I18nLabel; // e.g., "3 developer-days" + risk: Level; + affectedFiles: FileImpact[]; + sourceCLIs: string[]; // Which CLIs proposed this +} + +interface RejectedSolution extends Solution { + rejectionReason: I18nLabel; +} +``` + +### Section 6: Decision Records (决策记录) + +```typescript +interface DecisionRecordsSection { + timeline: DecisionEvent[]; +} + +interface DecisionEvent { + eventId: string; // e.g., "evt-proposal-001" + timestamp: string; // ISO 8601 + type: 'proposal' | 'argument' | 'agreement' | 'disagreement' | 'decision' | 'reversal'; + contributor: AgentIdentifier; + summary: I18nLabel; + evidence: Evidence[]; + reversibility?: Reversibility; +} + +interface Evidence { + type: 'link' | 'code_snippet' | 'log_output' | 'benchmark' | 'reference'; + content: string | CodeSnippet; + description: I18nLabel; +} +``` + +### Internal Analysis Data + +```typescript +interface CLIAnalysis { + tool: 'gemini' | 'codex' | 'qwen'; + perspective: string; + feasibility_score: number; + findings: string[]; + implementation_approaches: ImplementationApproach[]; + technical_concerns: string[]; + code_locations: FileImpact[]; +} + +interface CrossVerification { + agreements: string[]; + disagreements: string[]; + resolution: string; +} + +interface ConvergenceMetrics { + score: number; + new_insights: boolean; + recommendation: 'continue' | 'converged' | 'user_input_needed'; +} +``` + +## Execution Flow + +``` +Phase 1: Context Preparation +├─ Load ACE context and previous round results +├─ Build enhanced context for CLI prompts +└─ Determine CLI execution strategy + +Phase 2: Multi-CLI Parallel Execution +├─ Launch Gemini analysis (deep code analysis perspective) +├─ Launch Codex analysis (implementation verification perspective) +├─ Optional: Launch Qwen analysis (alternative perspective) +└─ Collect all CLI outputs + +Phase 3: Cross-Verification +├─ Compare findings across CLIs +├─ Identify agreements and disagreements +├─ Resolve conflicts using evidence-based reasoning +└─ Generate unified technical assessment + +Phase 4: Solution Synthesis +├─ Extract unique solution approaches from each CLI +├─ Merge similar solutions, preserve distinct ones +├─ Calculate trade-offs for each solution +├─ Rank solutions by feasibility and effort +└─ Generate 2-3 viable options + +Phase 5: Output Generation +├─ Compile structured synthesis.json +├─ Calculate convergence score +├─ Generate clarification questions +└─ Write output to round folder +``` + +## CLI Execution + +### Gemini Analysis (Deep Code Analysis) + +```bash +ccw cli -p " +PURPOSE: Analyze task from deep code analysis perspective, verify technical feasibility +TASK: +• Analyze task: \"${task_description}\" +• Examine codebase patterns and architecture +• Identify implementation approaches with trade-offs +• Assess technical risks and concerns +• Provide file:line references for key integration points + +MODE: analysis + +CONTEXT: @**/* | Memory: ${JSON.stringify(ace_context)} + +${previous_rounds.length > 0 ? ` +## Previous Round Findings +${previous_rounds.map(r => r.summary).join('\n')} + +## User Feedback +${user_feedback || 'None'} +` : ''} + +EXPECTED: JSON analysis with: +{ + \"feasibility_score\": 0.0-1.0, + \"findings\": [\"key finding 1\", ...], + \"implementation_approaches\": [ + { + \"name\": \"Approach Name\", + \"description\": \"What this approach does\", + \"pros\": [\"advantage 1\", ...], + \"cons\": [\"disadvantage 1\", ...], + \"effort\": \"low|medium|high\", + \"affected_files\": [{\"file\": \"path\", \"line\": N, \"reason\": \"why\"}] + } + ], + \"technical_concerns\": [\"concern 1\", ...], + \"code_locations\": [{\"file\": \"path\", \"line\": N, \"reason\": \"why\"}] +} + +RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) | +- Provide specific file:line references +- Quantify effort estimates +- Include concrete pros/cons +" --tool gemini --mode analysis +``` + +### Codex Analysis (Implementation Verification) + +```bash +ccw cli -p " +PURPOSE: Verify implementation feasibility and provide alternative perspectives +TASK: +• Analyze task: \"${task_description}\" +• Verify approaches proposed by other analysis +• Identify implementation challenges not previously covered +• Suggest optimizations or alternatives +• Cross-check code locations and integration points + +MODE: analysis + +CONTEXT: @**/* | Memory: ${JSON.stringify(ace_context)} + +## Cross-Verification Context +Verify and expand on these findings: +${JSON.stringify(geminiAnalysis.implementation_approaches)} + +EXPECTED: JSON analysis with same structure as above, plus: +{ + ...standard fields..., + \"cross_verification\": { + \"agrees_with\": [\"point 1\", ...], + \"disagrees_with\": [\"point 1\", ...], + \"additions\": [\"new insight 1\", ...] + } +} + +RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) | +- Focus on implementation feasibility +- Challenge assumptions from other analysis +- Provide alternative approaches if applicable +" --tool codex --mode analysis +``` + +## Core Functions + +### CLI Output Parsing + +```javascript +function parseCLIAnalysis(cliOutput, toolName) { + try { + // Extract JSON from CLI output + const jsonMatch = cliOutput.match(/\{[\s\S]*\}/) + if (!jsonMatch) { + return createFallbackAnalysis(toolName, cliOutput) + } + + const parsed = JSON.parse(jsonMatch[0]) + + return { + tool: toolName, + perspective: toolName === 'gemini' ? 'deep-code-analysis' : + toolName === 'codex' ? 'implementation-verification' : + 'alternative-analysis', + feasibility_score: parsed.feasibility_score || 0.5, + findings: parsed.findings || [], + implementation_approaches: parsed.implementation_approaches || [], + technical_concerns: parsed.technical_concerns || [], + code_locations: parsed.code_locations || [], + cross_verification: parsed.cross_verification || null + } + } catch (error) { + return createFallbackAnalysis(toolName, cliOutput) + } +} + +function createFallbackAnalysis(toolName, rawOutput) { + return { + tool: toolName, + perspective: 'fallback-extraction', + feasibility_score: 0.5, + findings: extractBulletPoints(rawOutput), + implementation_approaches: [], + technical_concerns: [], + code_locations: [], + _fallback: true + } +} +``` + +### Cross-Verification + +```javascript +function performCrossVerification(cliAnalyses) { + const agreements = [] + const disagreements = [] + + // Compare findings across all CLIs + const allFindings = cliAnalyses.flatMap(a => a.findings) + const findingGroups = groupSimilarFindings(allFindings) + + findingGroups.forEach(group => { + if (group.sources.length === cliAnalyses.length) { + agreements.push(group.finding) + } else if (group.hasConflict) { + disagreements.push({ + topic: group.finding, + positions: group.positions + }) + } + }) + + // Compare implementation approaches + const approachMap = new Map() + cliAnalyses.forEach(analysis => { + analysis.implementation_approaches.forEach(approach => { + const key = normalizeApproachName(approach.name) + if (!approachMap.has(key)) { + approachMap.set(key, { approach, sources: [analysis.tool] }) + } else { + approachMap.get(key).sources.push(analysis.tool) + } + }) + }) + + // Check for approach conflicts + approachMap.forEach((value, key) => { + if (value.sources.length === 1) { + // Unique approach from single CLI + } else { + // Shared approach - check for effort/risk disagreements + agreements.push(`Approach "${key}" proposed by: ${value.sources.join(', ')}`) + } + }) + + // Resolution strategy + const resolution = disagreements.length > 0 + ? `Resolved ${disagreements.length} disagreements using evidence weight and code verification` + : 'No significant disagreements found' + + return { agreements, disagreements: disagreements.map(d => d.topic), resolution } +} +``` + +### Solution Synthesis + +```javascript +function synthesizeSolutions(cliAnalyses, crossVerification) { + const solutions = [] + const seenApproaches = new Set() + + // Extract approaches from all CLIs + cliAnalyses.forEach(analysis => { + analysis.implementation_approaches.forEach(approach => { + const key = normalizeApproachName(approach.name) + + if (!seenApproaches.has(key)) { + seenApproaches.add(key) + + solutions.push({ + name: approach.name, + description: approach.description, + source_cli: [analysis.tool], + pros: approach.pros || [], + cons: approach.cons || [], + effort: approach.effort || 'medium', + risk: inferRisk(approach, analysis.technical_concerns), + maintainability: inferMaintainability(approach), + performance_impact: inferPerformanceImpact(approach), + affected_files: approach.affected_files || [] + }) + } else { + // Merge with existing solution + const existing = solutions.find(s => normalizeApproachName(s.name) === key) + if (existing) { + existing.source_cli.push(analysis.tool) + existing.pros = [...new Set([...existing.pros, ...(approach.pros || [])])] + existing.cons = [...new Set([...existing.cons, ...(approach.cons || [])])] + existing.affected_files = mergeAffectedFiles(existing.affected_files, approach.affected_files) + } + } + }) + }) + + // Rank and limit to 2-3 solutions + const rankedSolutions = solutions + .map(s => ({ ...s, score: calculateSolutionScore(s, crossVerification) })) + .sort((a, b) => b.score - a.score) + .slice(0, 3) + + return rankedSolutions +} + +function calculateSolutionScore(solution, crossVerification) { + let score = 0 + + // Multi-CLI consensus bonus + score += solution.source_cli.length * 20 + + // Effort scoring (lower effort = higher score) + score += { low: 30, medium: 20, high: 10 }[solution.effort] || 15 + + // Risk scoring (lower risk = higher score) + score += { low: 30, medium: 20, high: 5 }[solution.risk] || 15 + + // Pros/cons balance + score += (solution.pros.length - solution.cons.length) * 5 + + // File coverage (more specific = higher score) + score += Math.min(solution.affected_files.length * 3, 15) + + return score +} +``` + +### Convergence Calculation + +```javascript +function calculateConvergence(cliAnalyses, crossVerification, previousRounds) { + // Base score from agreement level + const agreementRatio = crossVerification.agreements.length / + (crossVerification.agreements.length + crossVerification.disagreements.length + 1) + + let score = agreementRatio * 0.5 + + // Boost for high feasibility scores + const avgFeasibility = cliAnalyses.reduce((sum, a) => sum + a.feasibility_score, 0) / cliAnalyses.length + score += avgFeasibility * 0.3 + + // Check for new insights vs previous rounds + const hasNewInsights = previousRounds.length === 0 || + cliAnalyses.some(a => a.findings.some(f => + !previousRounds.some(r => r.cli_analyses?.some(pa => pa.findings?.includes(f))) + )) + + if (!hasNewInsights) { + score += 0.2 // Convergence bonus when no new insights + } + + // Determine recommendation + let recommendation = 'continue' + if (score >= 0.8) { + recommendation = 'converged' + } else if (crossVerification.disagreements.length > 3) { + recommendation = 'user_input_needed' + } + + return { + score: Math.min(score, 1.0), + new_insights: hasNewInsights, + recommendation + } +} +``` + +### Clarification Question Generation + +```javascript +function generateClarificationQuestions(cliAnalyses, crossVerification, solutions) { + const questions = [] + + // From disagreements + crossVerification.disagreements.forEach(disagreement => { + questions.push(`Different analyses suggest different approaches for "${disagreement}". Which direction is preferred?`) + }) + + // From technical concerns + const allConcerns = cliAnalyses.flatMap(a => a.technical_concerns) + const uniqueConcerns = [...new Set(allConcerns)] + uniqueConcerns.slice(0, 2).forEach(concern => { + questions.push(`How should we handle: ${concern}?`) + }) + + // From solution trade-offs + if (solutions.length > 1) { + const effortDiff = solutions.some(s => s.effort === 'low') && solutions.some(s => s.effort === 'high') + if (effortDiff) { + questions.push('Is minimizing implementation effort or maximizing solution quality the priority?') + } + } + + // Limit to 4 questions max + return questions.slice(0, 4) +} +``` + +## Error Handling + +```javascript +// Fallback chain: gemini → codex → qwen → degraded mode +async function executeCLIWithFallback(prompt, config) { + const fallbackChain = config.fallback_chain || ['gemini', 'codex', 'qwen'] + const fallbacksTriggered = [] + + for (const tool of fallbackChain) { + try { + const result = await executeCLI(prompt, tool, config.timeout) + return { result, tool, fallbacksTriggered } + } catch (error) { + fallbacksTriggered.push(tool) + if (error.code === 429 || error.code === 503) { + continue // Try next tool + } + throw error // Unexpected error + } + } + + // All tools failed - return degraded result + return { + result: createDegradedAnalysis(), + tool: 'degraded', + fallbacksTriggered + } +} + +function createDegradedAnalysis() { + return { + feasibility_score: 0.5, + findings: ['Unable to perform deep analysis - all CLI tools unavailable'], + implementation_approaches: [{ + name: 'Manual Analysis Required', + description: 'CLI analysis unavailable, manual review recommended', + pros: ['Direct human oversight'], + cons: ['Time-consuming', 'Less comprehensive'], + effort: 'high', + affected_files: [] + }], + technical_concerns: ['CLI tools unavailable for automated analysis'], + code_locations: [] + } +} +``` + +## Main Execution + +```javascript +async function execute(input) { + const startTime = Date.now() + const { task_description, round_number, session, ace_context, previous_rounds, user_feedback, cli_config } = input + + const roundFolder = `${session.folder}/rounds/${round_number}` + Bash(`mkdir -p ${roundFolder}`) + + // Phase 1: Context Preparation + const enhancedContext = { + ...ace_context, + previous_findings: previous_rounds?.flatMap(r => r._internal?.cli_analyses?.flatMap(a => a.findings) || []) || [], + user_feedback + } + + // Phase 2: Multi-CLI Execution + const tools = cli_config?.tools || ['gemini', 'codex'] + const cliPromises = tools.map(tool => + executeCLIAnalysis(tool, task_description, enhancedContext, previous_rounds, user_feedback) + ) + + const cliResults = await Promise.all(cliPromises) + const cliAnalyses = cliResults.map((r, i) => parseCLIAnalysis(r.output, tools[i])) + + // Phase 3: Cross-Verification + const crossVerification = performCrossVerification(cliAnalyses) + + // Phase 4: Solution Synthesis + const rawSolutions = synthesizeSolutions(cliAnalyses, crossVerification) + + // Phase 5: Build DiscussionArtifact + const convergence = calculateConvergence(cliAnalyses, crossVerification, previous_rounds || []) + const clarificationQuestions = generateClarificationQuestions(cliAnalyses, crossVerification, rawSolutions) + const durationSeconds = Math.round((Date.now() - startTime) / 1000) + + // Build visualization-friendly artifact + const artifact = buildDiscussionArtifact({ + task_description, + round_number, + session, + ace_context, + cliAnalyses, + crossVerification, + rawSolutions, + convergence, + clarificationQuestions, + durationSeconds, + tools, + cliResults + }) + + // Write output + Write(`${roundFolder}/synthesis.json`, JSON.stringify(artifact, null, 2)) + + return artifact +} + +/** + * Build the visualization-friendly DiscussionArtifact + */ +function buildDiscussionArtifact(data) { + const { + task_description, + round_number, + session, + ace_context, + cliAnalyses, + crossVerification, + rawSolutions, + convergence, + clarificationQuestions, + durationSeconds, + tools, + cliResults + } = data + + // Determine status based on convergence + const status = convergence.recommendation === 'converged' ? 'decided' : + convergence.recommendation === 'user_input_needed' ? 'blocked' : + round_number === 1 ? 'exploring' : 'analyzing' + + return { + // Section 1: Metadata + metadata: { + artifactId: `${session.id}-round-${round_number}`, + roundId: round_number, + timestamp: new Date().toISOString(), + contributingAgents: tools.map(t => ({ name: capitalize(t), id: `${t}-cli` })), + durationSeconds, + exportFormats: ['markdown', 'html'] + }, + + // Section 2: Discussion Topic (讨论主题) + discussionTopic: { + title: { + en: extractTitle(task_description), + zh: extractTitle(task_description) // CLI should provide Chinese translation + }, + description: { + en: task_description, + zh: task_description + }, + scope: { + included: extractScope(cliAnalyses, 'included'), + excluded: extractScope(cliAnalyses, 'excluded') + }, + keyQuestions: clarificationQuestions.map(q => ({ en: q, zh: q })), + status, + tags: extractTags(task_description, ace_context) + }, + + // Section 3: Related Files (关联文件) + relatedFiles: { + fileTree: buildFileTree(cliAnalyses, ace_context), + dependencyGraph: buildDependencyGraph(cliAnalyses), + impactSummary: buildImpactSummary(cliAnalyses) + }, + + // Section 4: Planning Requirements (规划要求) + planning: { + functional: extractFunctionalRequirements(cliAnalyses), + nonFunctional: extractNonFunctionalRequirements(cliAnalyses), + acceptanceCriteria: extractAcceptanceCriteria(cliAnalyses) + }, + + // Section 5: Decision (决策) + decision: { + status: rawSolutions.length > 0 && convergence.score >= 0.8 ? 'decided' : 'pending', + summary: { + en: generateDecisionSummary(rawSolutions, convergence), + zh: generateDecisionSummary(rawSolutions, convergence) + }, + selectedSolution: rawSolutions.length > 0 ? transformToSolution(rawSolutions[0]) : null, + rejectedAlternatives: rawSolutions.slice(1).map(s => ({ + ...transformToSolution(s), + rejectionReason: { + en: `Lower priority score (${s.score}) compared to selected solution`, + zh: `优先级分数(${s.score})低于选定方案` + } + })), + confidenceScore: convergence.score + }, + + // Section 6: Decision Records (决策记录) + decisionRecords: { + timeline: buildDecisionTimeline(cliAnalyses, crossVerification, rawSolutions, tools) + }, + + // Internal analysis data (for debugging) + _internal: { + cli_analyses: cliAnalyses, + cross_verification: crossVerification, + convergence + } + } +} + +/** + * Transform raw solution to visualization-friendly Solution format + */ +function transformToSolution(rawSolution) { + return { + id: `sol-${normalizeApproachName(rawSolution.name).replace(/\s+/g, '-')}`, + title: { en: rawSolution.name, zh: rawSolution.name }, + description: { en: rawSolution.description, zh: rawSolution.description }, + pros: rawSolution.pros.map(p => ({ en: p, zh: p })), + cons: rawSolution.cons.map(c => ({ en: c, zh: c })), + estimatedEffort: { + en: `${rawSolution.effort} effort`, + zh: rawSolution.effort === 'low' ? '低工作量' : + rawSolution.effort === 'medium' ? '中等工作量' : '高工作量' + }, + risk: rawSolution.risk || 'medium', + affectedFiles: rawSolution.affected_files.map(f => ({ + filePath: f.file, + line: f.line, + score: 'medium', + reasoning: { en: f.reason, zh: f.reason } + })), + sourceCLIs: rawSolution.source_cli + } +} + +/** + * Build decision timeline from analysis events + */ +function buildDecisionTimeline(cliAnalyses, crossVerification, solutions, tools) { + const events = [] + let eventCounter = 1 + + // Add proposal events from each CLI + cliAnalyses.forEach(analysis => { + events.push({ + eventId: `evt-proposal-${eventCounter++}`, + timestamp: new Date().toISOString(), + type: 'proposal', + contributor: { name: capitalize(analysis.tool), id: `${analysis.tool}-cli` }, + summary: { + en: `Proposed ${analysis.implementation_approaches.length} approach(es) with feasibility ${analysis.feasibility_score.toFixed(2)}`, + zh: `提出了${analysis.implementation_approaches.length}个方案,可行性评分${analysis.feasibility_score.toFixed(2)}` + }, + evidence: analysis.code_locations?.slice(0, 3).map(loc => ({ + type: 'code_snippet', + content: loc, + description: { en: loc.reason, zh: loc.reason } + })) || [] + }) + }) + + // Add agreement events + crossVerification.agreements.forEach(agreement => { + events.push({ + eventId: `evt-agreement-${eventCounter++}`, + timestamp: new Date().toISOString(), + type: 'agreement', + contributor: { name: 'System', id: 'cross-verification' }, + summary: { en: agreement, zh: agreement }, + evidence: [] + }) + }) + + // Add disagreement events + crossVerification.disagreements.forEach(disagreement => { + events.push({ + eventId: `evt-disagreement-${eventCounter++}`, + timestamp: new Date().toISOString(), + type: 'disagreement', + contributor: { name: 'System', id: 'cross-verification' }, + summary: { en: disagreement, zh: disagreement }, + evidence: [], + reversibility: 'requires_refactoring' + }) + }) + + return events +} + +/** + * Helper functions for building artifact sections + */ +function extractTitle(task_description) { + // Extract first sentence or first 50 chars + const firstSentence = task_description.split(/[.!?。!?]/)[0] + return firstSentence.length > 50 ? firstSentence.substring(0, 50) + '...' : firstSentence +} + +function extractTags(task_description, ace_context) { + const tags = [] + const keywords = ['auth', 'api', 'database', 'ui', 'security', 'performance', 'refactor', 'bug', 'feature'] + keywords.forEach(kw => { + if (task_description.toLowerCase().includes(kw)) tags.push(kw) + }) + if (ace_context?.detected_patterns) { + tags.push(...ace_context.detected_patterns.slice(0, 3)) + } + return [...new Set(tags)] +} + +function extractScope(cliAnalyses, type) { + // Extract scope from CLI findings + return [] // To be populated by CLI analysis +} + +function buildFileTree(cliAnalyses, ace_context) { + const files = new Map() + + // Collect files from CLI analyses + cliAnalyses.forEach(analysis => { + analysis.code_locations?.forEach(loc => { + if (!files.has(loc.file)) { + files.set(loc.file, { + path: loc.file, + type: 'file', + modificationStatus: 'modified', + impactScore: 'medium', + codeSnippet: { + startLine: loc.line, + endLine: loc.line + 5, + code: '', + language: detectLanguage(loc.file), + comment: { en: loc.reason, zh: loc.reason } + } + }) + } + }) + }) + + // Add files from ACE context + ace_context?.relevant_files?.forEach(file => { + if (!files.has(file)) { + files.set(file, { + path: file, + type: 'file', + modificationStatus: 'unchanged', + impactScore: 'low' + }) + } + }) + + return Array.from(files.values()) +} + +function buildDependencyGraph(cliAnalyses) { + // Build dependency edges from CLI analysis + return [] // To be populated by detailed CLI analysis +} + +function buildImpactSummary(cliAnalyses) { + const impacts = [] + cliAnalyses.forEach(analysis => { + analysis.code_locations?.forEach(loc => { + impacts.push({ + filePath: loc.file, + line: loc.line, + score: 'medium', + reasoning: { en: loc.reason, zh: loc.reason } + }) + }) + }) + return impacts.slice(0, 10) // Limit to top 10 +} + +function extractFunctionalRequirements(cliAnalyses) { + // Extract from CLI findings + const reqs = [] + let reqId = 1 + cliAnalyses.forEach(analysis => { + analysis.findings?.slice(0, 3).forEach(finding => { + if (finding.toLowerCase().includes('must') || finding.toLowerCase().includes('should')) { + reqs.push({ + id: `FR-${reqId++}`, + description: { en: finding, zh: finding }, + priority: 'high', + source: `${analysis.tool} analysis` + }) + } + }) + }) + return reqs +} + +function extractNonFunctionalRequirements(cliAnalyses) { + // Extract performance, security, etc. requirements + const reqs = [] + let reqId = 1 + cliAnalyses.forEach(analysis => { + analysis.technical_concerns?.forEach(concern => { + reqs.push({ + id: `NFR-${reqId++}`, + description: { en: concern, zh: concern }, + priority: 'medium', + source: `${analysis.tool} analysis` + }) + }) + }) + return reqs.slice(0, 5) +} + +function extractAcceptanceCriteria(cliAnalyses) { + return [] // To be defined by user or derived from requirements +} + +function generateDecisionSummary(solutions, convergence) { + if (solutions.length === 0) { + return 'No solutions identified yet. Continuing analysis...' + } + const topSolution = solutions[0] + const status = convergence.score >= 0.8 ? 'Recommended' : 'Under consideration' + return `${status}: ${topSolution.name} (${topSolution.effort} effort, ${topSolution.risk} risk). Confidence: ${(convergence.score * 100).toFixed(0)}%` +} + +function capitalize(str) { + return str.charAt(0).toUpperCase() + str.slice(1) +} + +function detectLanguage(filePath) { + const ext = filePath.split('.').pop() + const langMap = { ts: 'typescript', js: 'javascript', py: 'python', go: 'go', java: 'java', md: 'markdown' } + return langMap[ext] || 'text' +} +``` + +## Quality Standards + +### Analysis Validation + +```javascript +function validateAnalysis(analysis) { + const errors = [] + + if (typeof analysis.feasibility_score !== 'number' || + analysis.feasibility_score < 0 || analysis.feasibility_score > 1) { + errors.push('Invalid feasibility_score') + } + + if (!Array.isArray(analysis.findings) || analysis.findings.length === 0) { + errors.push('Missing or empty findings') + } + + if (!Array.isArray(analysis.implementation_approaches)) { + errors.push('Missing implementation_approaches') + } + + analysis.implementation_approaches.forEach((approach, i) => { + if (!approach.name) errors.push(`Approach ${i}: missing name`) + if (!approach.description) errors.push(`Approach ${i}: missing description`) + if (!['low', 'medium', 'high'].includes(approach.effort)) { + errors.push(`Approach ${i}: invalid effort level`) + } + }) + + return { valid: errors.length === 0, errors } +} +``` + +### Solution Quality Criteria + +| ✓ Good Solution | ✗ Bad Solution | +|-----------------|----------------| +| Specific file:line references | Vague "update relevant files" | +| Quantified effort estimate | "Some time required" | +| Concrete pros/cons | Generic advantages | +| Multiple CLI consensus | Single source without verification | + +## Key Reminders + +**ALWAYS**: +- Execute multiple CLIs for cross-verification +- Parse CLI outputs robustly with fallback extraction +- Calculate convergence score accurately +- Generate actionable clarification questions +- Include file:line references in affected_files +- Write synthesis.json to correct round folder + +**Bash Tool**: +- Use `run_in_background=false` for CLI executions to ensure sequential processing +- Handle timeouts gracefully with fallback chain + +**NEVER**: +- Execute implementation code (analysis only) +- Return without synthesis.json output +- Skip cross-verification between CLIs +- Generate more than 4 clarification questions +- Ignore previous round context + +## UI Component Mapping + +For dashboard visualization, map artifact sections to UI components: + +| Section | Component | Library Example | Notes | +|---------|-----------|-----------------|-------| +| **metadata** | | | | +| `roundId`, `timestamp` | `Tag`, `Badge` | Ant Design `Tag` | Header indicators | +| `contributingAgents` | `Avatar.Group` | Ant Design `Avatar.Group` | Agent icons with tooltips | +| `exportFormats` | `Dropdown` + `Button` | Material-UI `Menu` | Export actions | +| **discussionTopic** | `Card` | Bootstrap `Card` | Main section container | +| `title`, `description` | `Typography` | Any UI library | Standard text | +| `scope` | `List` with icons | Heroicons | Included/Excluded lists | +| `keyQuestions` | `Collapse` | Ant Design `Collapse` | Expandable Q&A | +| `status` | `Steps`, `Timeline` | Ant Design `Steps` | Progress indicator | +| **relatedFiles** | | | | +| `fileTree` | `Tree` | Ant Design `Tree` | Hierarchical file view | +| `dependencyGraph` | `Graph` | `vis-network`, `react-flow` | Interactive graph | +| `impactSummary` | `Table` | Ant Design `Table` | Sortable impact list | +| `codeSnippet` | `SyntaxHighlighter` | `react-syntax-highlighter` | Code with line numbers | +| **planning** | `Tabs` | Bootstrap `Navs` | FR/NFR/AC tabs | +| `functional/nonFunctional` | `Table` | Material-UI `Table` | Priority-sortable | +| `acceptanceCriteria` | `List` + `Checkbox` | Ant Design `List` | Checkable items | +| `priority` | `Tag` (color-coded) | Ant Design `Tag` | critical=red, high=orange | +| **decision** | | | | +| `summary` | `Alert`, `Callout` | Ant Design `Alert` | Prominent decision box | +| `selectedSolution` | `Card` (highlighted) | Bootstrap `Card` | Winner card | +| `rejectedAlternatives` | `Collapse` of `Card`s | Ant Design `Collapse` | Collapsed alternatives | +| `pros/cons` | `List` with icons | ThumbUp/ThumbDown | Visual indicators | +| `confidenceScore` | `Progress`, `Gauge` | Ant Design `Progress` | 0-100% visual | +| **decisionRecords** | | | | +| `timeline` | `Timeline` | Ant Design `Timeline`, `react-chrono` | Chronological events | +| `contributor` | `Avatar` + `Tooltip` | Ant Design `Avatar` | Who contributed | +| `evidence` | `Popover`, `Modal` | Ant Design `Popover` | Click to expand | +| `reversibility` | `Tag` with icon | SyncOutlined | Reversibility indicator | + +### Visualization Recommendations + +1. **Real-time Updates**: Use WebSocket or SSE for live synthesis.json updates +2. **Responsive Layout**: Card grid → stacked on mobile +3. **Dark/Light Theme**: CSS variables for theme switching +4. **Export**: Generate Markdown via template, HTML via React-to-static +5. **i18n Toggle**: Language switch button in header, read `en`/`zh` from I18nLabel diff --git a/.claude/commands/issue/execute.md b/.claude/commands/issue/execute.md index 5d8537e3..d3d1d409 100644 --- a/.claude/commands/issue/execute.md +++ b/.claude/commands/issue/execute.md @@ -17,21 +17,21 @@ Minimal orchestrator that dispatches **solution IDs** to executors. Each executo - `done ` → update solution completion status - No race conditions: status changes only via `done` - **Executor handles all tasks within a solution sequentially** -- **Worktree isolation**: Each executor can work in its own git worktree +- **Single worktree for entire queue**: One worktree isolates ALL queue execution from main workspace ## Usage ```bash /issue:execute # Execute active queue(s) /issue:execute --queue QUE-xxx # Execute specific queue -/issue:execute --worktree # Use git worktrees for parallel isolation +/issue:execute --worktree # Execute entire queue in isolated worktree /issue:execute --worktree --queue QUE-xxx /issue:execute --worktree /path/to/existing/worktree # Resume in existing worktree ``` **Parallelism**: Determined automatically by task dependency DAG (no manual control) **Executor & Dry-run**: Selected via interactive prompt (AskUserQuestion) -**Worktree**: Creates isolated git worktrees for each parallel executor +**Worktree**: Creates ONE worktree for the entire queue execution (not per-solution) **⭐ Recommended Executor**: **Codex** - Best for long-running autonomous work (2hr timeout), supports background execution and full write access @@ -44,8 +44,10 @@ Minimal orchestrator that dispatches **solution IDs** to executors. Each executo ## Execution Flow ``` -Phase 0 (if --worktree): Setup Worktree Base - └─ Ensure .worktrees directory exists +Phase 0 (if --worktree): Setup Queue Worktree + ├─ Create ONE worktree for entire queue: .ccw/worktrees/queue- + ├─ All subsequent execution happens in this worktree + └─ Main workspace remains clean and untouched Phase 1: Get DAG & User Selection ├─ ccw issue queue dag [--queue QUE-xxx] → { parallel_batches: [["S-1","S-2"], ["S-3"]] } @@ -53,19 +55,22 @@ Phase 1: Get DAG & User Selection Phase 2: Dispatch Parallel Batch (DAG-driven) ├─ Parallelism determined by DAG (no manual limit) + ├─ All executors work in the SAME worktree (or main if no worktree) ├─ For each solution ID in batch (parallel - all at once): - │ ├─ (if worktree) Create isolated worktree: git worktree add │ ├─ Executor calls: ccw issue detail (READ-ONLY) │ ├─ Executor gets FULL SOLUTION with all tasks │ ├─ Executor implements all tasks sequentially (T1 → T2 → T3) │ ├─ Executor tests + verifies each task │ ├─ Executor commits ONCE per solution (with formatted summary) - │ ├─ Executor calls: ccw issue done - │ └─ (if worktree) Cleanup: merge branch, remove worktree + │ └─ Executor calls: ccw issue done └─ Wait for batch completion -Phase 3: Next Batch +Phase 3: Next Batch (repeat Phase 2) └─ ccw issue queue dag → check for newly-ready solutions + +Phase 4 (if --worktree): Worktree Completion + ├─ All batches complete → prompt for merge strategy + └─ Options: Create PR / Merge to main / Keep branch ``` ## Implementation @@ -115,12 +120,12 @@ const answer = AskUserQuestion({ ] }, { - question: 'Use git worktrees for parallel isolation?', + question: 'Use git worktree for queue isolation?', header: 'Worktree', multiSelect: false, options: [ - { label: 'Yes (Recommended for parallel)', description: 'Each executor works in isolated worktree branch' }, - { label: 'No', description: 'Work directly in current directory (serial only)' } + { label: 'Yes (Recommended)', description: 'Create ONE worktree for entire queue - main stays clean' }, + { label: 'No', description: 'Work directly in current directory' } ] } ] @@ -140,7 +145,7 @@ if (isDryRun) { } ``` -### Phase 2: Dispatch Parallel Batch (DAG-driven) +### Phase 0 & 2: Setup Queue Worktree & Dispatch ```javascript // Parallelism determined by DAG - no manual limit @@ -158,24 +163,40 @@ TodoWrite({ console.log(`\n### Executing Solutions (DAG batch 1): ${batch.join(', ')}`); -// Setup worktree base directory if needed (using absolute paths) -if (useWorktree) { - // Use absolute paths to avoid issues when running from subdirectories - const repoRoot = Bash('git rev-parse --show-toplevel').trim(); - const worktreeBase = `${repoRoot}/.ccw/worktrees`; - Bash(`mkdir -p "${worktreeBase}"`); - // Prune stale worktrees from previous interrupted executions - Bash('git worktree prune'); -} - // Parse existing worktree path from args if provided // Example: --worktree /path/to/existing/worktree const existingWorktree = args.worktree && typeof args.worktree === 'string' ? args.worktree : null; +// Setup ONE worktree for entire queue (not per-solution) +let worktreePath = null; +let worktreeBranch = null; + +if (useWorktree) { + const repoRoot = Bash('git rev-parse --show-toplevel').trim(); + const worktreeBase = `${repoRoot}/.ccw/worktrees`; + Bash(`mkdir -p "${worktreeBase}"`); + Bash('git worktree prune'); // Cleanup stale worktrees + + if (existingWorktree) { + // Resume mode: Use existing worktree + worktreePath = existingWorktree; + worktreeBranch = Bash(`git -C "${worktreePath}" branch --show-current`).trim(); + console.log(`Resuming in existing worktree: ${worktreePath} (branch: ${worktreeBranch})`); + } else { + // Create mode: ONE worktree for the entire queue + const timestamp = new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14); + worktreeBranch = `queue-exec-${dag.queue_id || timestamp}`; + worktreePath = `${worktreeBase}/${worktreeBranch}`; + Bash(`git worktree add "${worktreePath}" -b "${worktreeBranch}"`); + console.log(`Created queue worktree: ${worktreePath}`); + } +} + // Launch ALL solutions in batch in parallel (DAG guarantees no conflicts) +// All executors work in the SAME worktree (or main if no worktree) const executions = batch.map(solutionId => { updateTodo(solutionId, 'in_progress'); - return dispatchExecutor(solutionId, executor, useWorktree, existingWorktree); + return dispatchExecutor(solutionId, executor, worktreePath); }); await Promise.all(executions); @@ -185,126 +206,20 @@ batch.forEach(id => updateTodo(id, 'completed')); ### Executor Dispatch ```javascript -function dispatchExecutor(solutionId, executorType, useWorktree = false, existingWorktree = null) { - // Worktree setup commands (if enabled) - using absolute paths - // Supports both creating new worktrees and resuming in existing ones - const worktreeSetup = useWorktree ? ` -### Step 0: Setup Isolated Worktree -\`\`\`bash -# Use absolute paths to avoid issues when running from subdirectories -REPO_ROOT=$(git rev-parse --show-toplevel) -WORKTREE_BASE="\${REPO_ROOT}/.ccw/worktrees" - -# Check if existing worktree path was provided -EXISTING_WORKTREE="${existingWorktree || ''}" - -if [[ -n "\${EXISTING_WORKTREE}" && -d "\${EXISTING_WORKTREE}" ]]; then - # Resume mode: Use existing worktree - WORKTREE_PATH="\${EXISTING_WORKTREE}" - WORKTREE_NAME=$(basename "\${WORKTREE_PATH}") - - # Verify it's a valid git worktree - if ! git -C "\${WORKTREE_PATH}" rev-parse --is-inside-work-tree &>/dev/null; then - echo "Error: \${EXISTING_WORKTREE} is not a valid git worktree" - exit 1 - fi - - echo "Resuming in existing worktree: \${WORKTREE_PATH}" -else - # Create mode: New worktree with timestamp - WORKTREE_NAME="exec-${solutionId}-$(date +%H%M%S)" - WORKTREE_PATH="\${WORKTREE_BASE}/\${WORKTREE_NAME}" - - # Ensure worktree base exists - mkdir -p "\${WORKTREE_BASE}" - - # Prune stale worktrees - git worktree prune - - # Create worktree - git worktree add "\${WORKTREE_PATH}" -b "\${WORKTREE_NAME}" - - echo "Created new worktree: \${WORKTREE_PATH}" -fi - -# Setup cleanup trap for graceful failure handling -cleanup_worktree() { - echo "Cleaning up worktree due to interruption..." - cd "\${REPO_ROOT}" 2>/dev/null || true - git worktree remove "\${WORKTREE_PATH}" --force 2>/dev/null || true - echo "Worktree removed. Branch '\${WORKTREE_NAME}' kept for inspection." -} -trap cleanup_worktree EXIT INT TERM - -cd "\${WORKTREE_PATH}" -\`\`\` -` : ''; - - const worktreeCleanup = useWorktree ? ` -### Step 5: Worktree Completion (User Choice) - -After all tasks complete, prompt for merge strategy: - -\`\`\`javascript -AskUserQuestion({ - questions: [{ - question: "Solution ${solutionId} completed. What to do with worktree branch?", - header: "Merge", - multiSelect: false, - options: [ - { label: "Create PR (Recommended)", description: "Push branch and create pull request - safest for parallel execution" }, - { label: "Merge to main", description: "Merge branch and cleanup worktree (requires clean main)" }, - { label: "Keep branch", description: "Cleanup worktree, keep branch for manual handling" } - ] - }] -}) -\`\`\` - -**Based on selection:** -\`\`\`bash -# Disable cleanup trap before intentional cleanup -trap - EXIT INT TERM - -# Return to repo root (use REPO_ROOT from setup) -cd "\${REPO_ROOT}" - -# Validate main repo state before merge -validate_main_clean() { - if [[ -n \$(git status --porcelain) ]]; then - echo "⚠️ Warning: Main repo has uncommitted changes." - echo "Cannot auto-merge. Falling back to 'Create PR' option." - return 1 - fi - return 0 -} - -# Create PR (Recommended for parallel execution): -git push -u origin "\${WORKTREE_NAME}" -gh pr create --title "Solution ${solutionId}" --body "Issue queue execution" -git worktree remove "\${WORKTREE_PATH}" - -# Merge to main (only if main is clean): -if validate_main_clean; then - git merge --no-ff "\${WORKTREE_NAME}" -m "Merge solution ${solutionId}" - git worktree remove "\${WORKTREE_PATH}" && git branch -d "\${WORKTREE_NAME}" -else - # Fallback to PR if main is dirty - git push -u origin "\${WORKTREE_NAME}" - gh pr create --title "Solution ${solutionId}" --body "Issue queue execution (main had uncommitted changes)" - git worktree remove "\${WORKTREE_PATH}" -fi - -# Keep branch: -git worktree remove "\${WORKTREE_PATH}" -echo "Branch \${WORKTREE_NAME} kept for manual handling" -\`\`\` - -**Parallel Execution Safety**: "Create PR" is the default and safest option for parallel executors, avoiding merge race conditions. -` : ''; +// worktreePath: path to shared worktree (null if not using worktree) +function dispatchExecutor(solutionId, executorType, worktreePath = null) { + // If worktree is provided, executor works in that directory + // No per-solution worktree creation - ONE worktree for entire queue + const cdCommand = worktreePath ? `cd "${worktreePath}"` : ''; const prompt = ` ## Execute Solution ${solutionId} -${worktreeSetup} +${worktreePath ? ` +### Step 0: Enter Queue Worktree +\`\`\`bash +cd "${worktreePath}" +\`\`\` +` : ''} ### Step 1: Get Solution (read-only) \`\`\`bash ccw issue detail ${solutionId} @@ -352,16 +267,21 @@ If any task failed: \`\`\`bash ccw issue done ${solutionId} --fail --reason '{"task_id": "TX", "error_type": "test_failure", "message": "..."}' \`\`\` -${worktreeCleanup}`; + +**Note**: Do NOT cleanup worktree after this solution. Worktree is shared by all solutions in the queue. +`; + + // For CLI tools, pass --cd to set working directory + const cdOption = worktreePath ? ` --cd "${worktreePath}"` : ''; if (executorType === 'codex') { return Bash( - `ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --id exec-${solutionId}`, + `ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --id exec-${solutionId}${cdOption}`, { timeout: 7200000, run_in_background: true } // 2hr for full solution ); } else if (executorType === 'gemini') { return Bash( - `ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --id exec-${solutionId}`, + `ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --id exec-${solutionId}${cdOption}`, { timeout: 3600000, run_in_background: true } ); } else { @@ -369,7 +289,7 @@ ${worktreeCleanup}`; subagent_type: 'code-developer', run_in_background: false, description: `Execute solution ${solutionId}`, - prompt: prompt + prompt: worktreePath ? `Working directory: ${worktreePath}\n\n${prompt}` : prompt }); } } @@ -390,40 +310,98 @@ console.log(` if (refreshedDag.ready_count > 0) { console.log('Run `/issue:execute` again for next batch.'); + // Note: If resuming, pass existing worktree path: + // /issue:execute --worktree +} +``` + +### Phase 4: Worktree Completion (after ALL batches) + +```javascript +// Only run when ALL solutions completed AND using worktree +if (useWorktree && refreshedDag.ready_count === 0 && refreshedDag.completed_count === refreshedDag.total) { + console.log('\n## All Solutions Completed - Worktree Cleanup'); + + const answer = AskUserQuestion({ + questions: [{ + question: `Queue complete. What to do with worktree branch "${worktreeBranch}"?`, + header: 'Merge', + multiSelect: false, + options: [ + { label: 'Create PR (Recommended)', description: 'Push branch and create pull request' }, + { label: 'Merge to main', description: 'Merge all commits and cleanup worktree' }, + { label: 'Keep branch', description: 'Cleanup worktree, keep branch for manual handling' } + ] + }] + }); + + const repoRoot = Bash('git rev-parse --show-toplevel').trim(); + + if (answer['Merge'].includes('Create PR')) { + Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`); + Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution - all solutions completed" --head "${worktreeBranch}"`); + Bash(`git worktree remove "${worktreePath}"`); + console.log(`PR created for branch: ${worktreeBranch}`); + } else if (answer['Merge'].includes('Merge to main')) { + // Check main is clean + const mainDirty = Bash('git status --porcelain').trim(); + if (mainDirty) { + console.log('Warning: Main has uncommitted changes. Falling back to PR.'); + Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`); + Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution (main had uncommitted changes)" --head "${worktreeBranch}"`); + } else { + Bash(`git merge --no-ff "${worktreeBranch}" -m "Merge queue ${dag.queue_id}"`); + Bash(`git branch -d "${worktreeBranch}"`); + } + Bash(`git worktree remove "${worktreePath}"`); + } else { + Bash(`git worktree remove "${worktreePath}"`); + console.log(`Branch ${worktreeBranch} kept for manual handling`); + } } ``` ## Parallel Execution Model ``` -┌─────────────────────────────────────────────────────────────┐ -│ Orchestrator │ -├─────────────────────────────────────────────────────────────┤ -│ 1. ccw issue queue dag │ -│ → { parallel_batches: [["S-1","S-2"], ["S-3"]] } │ -│ │ -│ 2. Dispatch batch 1 (parallel): │ -│ ┌──────────────────────┐ ┌──────────────────────┐ │ -│ │ Executor 1 │ │ Executor 2 │ │ -│ │ detail S-1 │ │ detail S-2 │ │ -│ │ → gets full solution │ │ → gets full solution │ │ -│ │ [T1→T2→T3 sequential]│ │ [T1→T2 sequential] │ │ -│ │ commit (1x solution) │ │ commit (1x solution) │ │ -│ │ done S-1 │ │ done S-2 │ │ -│ └──────────────────────┘ └──────────────────────┘ │ -│ │ -│ 3. ccw issue queue dag (refresh) │ -│ → S-3 now ready (S-1 completed, file conflict resolved) │ -└─────────────────────────────────────────────────────────────┘ +┌─────────────────────────────────────────────────────────────────┐ +│ Orchestrator │ +├─────────────────────────────────────────────────────────────────┤ +│ 0. (if --worktree) Create ONE worktree for entire queue │ +│ → .ccw/worktrees/queue-exec- │ +│ │ +│ 1. ccw issue queue dag │ +│ → { parallel_batches: [["S-1","S-2"], ["S-3"]] } │ +│ │ +│ 2. Dispatch batch 1 (parallel, SAME worktree): │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ Shared Queue Worktree (or main) │ │ +│ │ ┌──────────────────┐ ┌──────────────────┐ │ │ +│ │ │ Executor 1 │ │ Executor 2 │ │ │ +│ │ │ detail S-1 │ │ detail S-2 │ │ │ +│ │ │ [T1→T2→T3] │ │ [T1→T2] │ │ │ +│ │ │ commit S-1 │ │ commit S-2 │ │ │ +│ │ │ done S-1 │ │ done S-2 │ │ │ +│ │ └──────────────────┘ └──────────────────┘ │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ │ +│ 3. ccw issue queue dag (refresh) │ +│ → S-3 now ready → dispatch batch 2 (same worktree) │ +│ │ +│ 4. (if --worktree) ALL batches complete → cleanup worktree │ +│ → Prompt: Create PR / Merge to main / Keep branch │ +└─────────────────────────────────────────────────────────────────┘ ``` **Why this works for parallel:** +- **ONE worktree for entire queue** → all solutions share same isolated workspace - `detail ` is READ-ONLY → no race conditions - Each executor handles **all tasks within a solution** sequentially - **One commit per solution** with formatted summary (not per-task) - `done ` updates only its own solution status - `queue dag` recalculates ready solutions after each batch -- Solutions in same batch have NO file conflicts +- Solutions in same batch have NO file conflicts (DAG guarantees) +- **Main workspace stays clean** until merge/PR decision ## CLI Endpoint Contract diff --git a/.claude/commands/workflow/multi-cli-plan.md b/.claude/commands/workflow/multi-cli-plan.md new file mode 100644 index 00000000..eedb24ca --- /dev/null +++ b/.claude/commands/workflow/multi-cli-plan.md @@ -0,0 +1,891 @@ +--- +name: workflow:multi-cli-plan +description: Multi-CLI collaborative planning workflow using ACE semantic search and iterative Claude+Codex analysis to determine execution plan. Features user-driven decision points and convergent refinement. +argument-hint: " [--max-rounds=3] [--tools=gemini,codex]" +allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Bash(*), Glob(*), Grep(*), mcp__ace-tool__search_context(*) +--- + +# Multi-CLI Collaborative Planning Command (/workflow:multi-cli-plan) + +## Overview + +Multi-CLI collaborative planning workflow that uses ACE semantic search for context gathering, followed by iterative multi-tool analysis (Claude + Codex/Gemini) to converge on an optimal execution plan. + +**Core Philosophy**: +- **Multi-round Verification**: Claude and Codex alternate analysis to ensure solutions match codebase reality +- **User-driven**: Every analysis round ends with user decision point +- **Iterative Convergence**: Multiple cycles progressively refine requirements and solutions +- **Final Confirmation**: Executable plan only output after explicit user approval + +**Core Capabilities**: +- ACE semantic search for comprehensive codebase context +- Multi-tool collaborative analysis (Claude + Gemini/Codex) +- Interactive refinement with user feedback loops +- Solution comparison with trade-off analysis +- Final executable plan with file locations and acceptance criteria + +## Usage + +```bash +/workflow:multi-cli-plan + +# With options +/workflow:multi-cli-plan "Implement user authentication" --max-rounds=3 +/workflow:multi-cli-plan "Refactor payment module" --tools=gemini,codex + +# Examples +/workflow:multi-cli-plan "Add dark mode support to the application" +/workflow:multi-cli-plan "Fix the memory leak in WebSocket connections" +/workflow:multi-cli-plan "Implement rate limiting for API endpoints" +``` + +## Execution Flow + +``` +Phase 1: Input & Context Gathering + |-- Parse user task description + |-- ACE semantic search for codebase context + |-- Build initial context package + +-- Initialize discussion session + +Phase 2: Multi-CLI Collaborative Analysis (Iterative) + |-- Round N: + | |-- Claude Analysis: Architecture perspective + | |-- Codex/Gemini Analysis: Implementation perspective + | |-- Cross-verify technical feasibility + | +-- Synthesize multiple implementation approaches + | + +-- Loop until convergence or max rounds + +Phase 3: Stage Summary & Options + |-- Present 2-3 viable solution options with trade-offs + |-- Proactively ask clarifying questions for ambiguities + +-- Wait for user feedback + +Phase 4: User Decision Point + |-- Option A: User approves current approach -> Phase 5 + |-- Option B: User provides clarification/adjustments -> Return to Phase 2 + +-- Option C: User requests different direction -> Reset analysis + +Phase 5: Agent Planning & Output Generation + |-- Invoke cli-lite-planning-agent with discussion context + |-- Generate IMPL_PLAN.md (documentation) + |-- Generate plan.json (structured plan for execution) + |-- User confirms execution + +-- Hand off to /workflow:lite-execute +``` + +## Implementation + +### Phase 1: Input & Context Gathering + +**Session Initialization**: +```javascript +// Helper: Get UTC+8 (China Standard Time) ISO string +const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString() + +// Parse arguments +const { taskDescription, maxRounds, tools } = parseArgs(args) +const effectiveMaxRounds = maxRounds || 3 +const effectiveTools = tools || ['gemini', 'codex'] + +// Generate session ID +const taskSlug = taskDescription.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40) +const dateStr = getUtc8ISOString().substring(0, 10) +const sessionId = `MCP-${taskSlug}-${dateStr}` +const sessionFolder = `.workflow/.multi-cli-plan/${sessionId}` + +// Create session folder +Bash(`mkdir -p ${sessionFolder}/rounds && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}"`) + +// Initialize session state +const sessionState = { + session_id: sessionId, + task_description: taskDescription, + created_at: getUtc8ISOString(), + max_rounds: effectiveMaxRounds, + tools: effectiveTools, + current_round: 0, + phase: 'context-gathering', + rounds: [], + solutions: [], + user_decisions: [], + final_plan: null +} + +Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2)) +``` + +**ACE Context Gathering**: +```javascript +// Step 1: Extract keywords from task description +const keywords = extractKeywords(taskDescription) +// e.g., "Add dark mode support" -> ["dark", "mode", "theme", "style", "color"] + +// Step 2: Use ACE to understand codebase structure and relevant code +const aceQueries = [ + // Architecture query + `Project architecture and module structure related to ${keywords.slice(0, 3).join(', ')}`, + // Implementation query + `Existing implementations of ${keywords[0]} in this codebase`, + // Pattern query + `Code patterns and conventions for ${keywords.slice(0, 2).join(' ')} features`, + // Integration query + `Integration points and dependencies for ${keywords[0]} functionality` +] + +const aceResults = [] +for (const query of aceQueries) { + const result = await mcp__ace-tool__search_context({ + project_root_path: process.cwd(), + query: query + }) + aceResults.push({ query, result, timestamp: getUtc8ISOString() }) +} + +// Step 3: Build context package (kept in memory for CLI consumption) +const contextPackage = { + task_description: taskDescription, + keywords: keywords, + ace_results: aceResults, + relevant_files: extractRelevantFiles(aceResults), + detected_patterns: extractPatterns(aceResults), + architecture_insights: aceResults[0].result, + existing_implementations: aceResults[1].result +} + +// Update session state +sessionState.phase = 'context-gathered' +sessionState.context_summary = { + files_identified: contextPackage.relevant_files.length, + patterns_detected: contextPackage.detected_patterns.length, + ace_queries: aceQueries.length +} +``` + +--- + +### Phase 2: Agent-Driven Collaborative Analysis + +**Core Principle**: Orchestrator delegates all analysis to `cli-discuss-agent`, only reads output files for decision making. + +**Analysis Round Loop**: +```javascript +let currentRound = 0 +let shouldContinue = true +let analysisResults = [] + +while (shouldContinue && currentRound < effectiveMaxRounds) { + currentRound++ + + console.log(` +## Analysis Round ${currentRound}/${effectiveMaxRounds} + +Delegating to cli-discuss-agent... +`) + + // ======================================== + // DELEGATE TO AGENT - No direct analysis + // ======================================== + Task({ + subagent_type: "cli-discuss-agent", + run_in_background: false, + description: `Discussion round ${currentRound}`, + prompt: ` +## Task Objective +Execute collaborative discussion round ${currentRound} for task analysis. + +## Input Context +- **Task Description**: ${taskDescription} +- **Round Number**: ${currentRound} +- **Session ID**: ${sessionId} +- **Session Folder**: ${sessionFolder} + +## ACE Context +${JSON.stringify(contextPackage, null, 2)} + +## Previous Rounds +${analysisResults.length > 0 + ? analysisResults.map(r => `Round ${r.round}: ${r.summary}`).join('\n') + : 'None (first round)'} + +## User Feedback +${userFeedback || 'None'} + +## CLI Configuration +- Tools: ${effectiveTools.join(', ')} +- Timeout: 600000ms +- Fallback Chain: gemini → codex → qwen + +## Output Requirements +Write: ${sessionFolder}/rounds/${currentRound}/synthesis.json + +Follow cli-discuss-agent output schema exactly. + +## Success Criteria +- [ ] All configured CLI tools executed +- [ ] Cross-verification completed +- [ ] 2-3 solution options generated +- [ ] Convergence score calculated +- [ ] synthesis.json written to round folder +` + }) + + // ======================================== + // READ AGENT OUTPUT - Decision making only + // ======================================== + const synthesisPath = `${sessionFolder}/rounds/${currentRound}/synthesis.json` + const roundSynthesis = JSON.parse(Read(synthesisPath)) + analysisResults.push(roundSynthesis) + + // Update session state from agent output + sessionState.rounds.push({ + number: currentRound, + cli_tools_used: roundSynthesis._metadata.cli_tools_used, + solutions_identified: roundSynthesis.solutions.length, + convergence_score: roundSynthesis.convergence.score, + new_insights: roundSynthesis.convergence.new_insights, + recommendation: roundSynthesis.convergence.recommendation + }) + + // Display round summary + console.log(` +### Round ${currentRound} Complete + +**Convergence**: ${roundSynthesis.convergence.score.toFixed(2)} +**Solutions Found**: ${roundSynthesis.solutions.length} +**Recommendation**: ${roundSynthesis.convergence.recommendation} + +**Solutions**: +${roundSynthesis.solutions.map((s, i) => `${i+1}. ${s.name} (${s.effort} effort, ${s.risk} risk)`).join('\n')} +`) + + // Decide whether to continue based on agent's recommendation + if (roundSynthesis.convergence.recommendation === 'converged') { + shouldContinue = false + console.log('Analysis converged. Proceeding to decision phase.') + } else if (roundSynthesis.convergence.recommendation === 'user_input_needed') { + // Collect user feedback before next round + const feedbackResult = await AskUserQuestion({ + questions: [{ + question: 'Clarification needed. How would you like to proceed?', + header: 'Feedback', + multiSelect: false, + options: [ + { label: 'Provide Clarification', description: 'Answer questions and continue analysis' }, + { label: 'Proceed Anyway', description: 'Accept current solutions' }, + { label: 'Change Direction', description: 'Modify task requirements' } + ] + }] + }) + + if (feedbackResult === 'Provide Clarification') { + // Display clarification questions + console.log(` +### Clarification Questions +${roundSynthesis.clarification_questions.map((q, i) => `${i+1}. ${q}`).join('\n')} +`) + // User provides feedback via "Other" option or follow-up + userFeedback = feedbackResult.other || '' + } else if (feedbackResult === 'Proceed Anyway') { + shouldContinue = false + } else { + // Reset with new direction + userFeedback = feedbackResult.other || '' + } + } else { + // Continue to next round + shouldContinue = roundSynthesis.convergence.new_insights && currentRound < effectiveMaxRounds + } +} + +// Get final synthesis from last round +const finalSynthesis = analysisResults[analysisResults.length - 1] +``` + +--- + +### Phase 3: Review Agent Output & Present Options + +**Core Principle**: Orchestrator only reads agent output files and formats them for user decision. + +**Read and Present Solutions**: +```javascript +// ======================================== +// READ FINAL AGENT OUTPUT - No processing +// ======================================== +// finalSynthesis already loaded from agent's synthesis.json in Phase 2 + +console.log(` +## Stage Summary + +### Analysis Complete (from cli-discuss-agent output) +- Rounds completed: ${currentRound} +- CLI tools used: ${finalSynthesis._metadata.cli_tools_used.join(', ')} +- Cross-verification: ${finalSynthesis.cross_verification.agreements.length} agreements, ${finalSynthesis.cross_verification.disagreements.length} disagreements +- Convergence score: ${finalSynthesis.convergence.score.toFixed(2)} + +### Solution Options (from agent synthesis) + +${finalSynthesis.solutions.map((solution, index) => ` +**Option ${index + 1}: ${solution.name}** +*Source: ${solution.source_cli.join(' + ')}* + +Description: ${solution.description} + +Trade-offs: +| Aspect | Assessment | +|--------|------------| +| Effort | ${solution.effort} | +| Risk | ${solution.risk} | +| Maintainability | ${solution.maintainability} | +| Performance | ${solution.performance_impact} | + +Pros: +${solution.pros.map(p => `- ${p}`).join('\n')} + +Cons: +${solution.cons.map(c => `- ${c}`).join('\n')} + +Key files affected: +${solution.affected_files.slice(0, 5).map(f => `- ${f.file}:${f.line} - ${f.reason}`).join('\n')} +`).join('\n---\n')} + +### Cross-Verification Summary + +**Agreements**: +${finalSynthesis.cross_verification.agreements.slice(0, 5).map(a => `- ${a}`).join('\n')} + +**Disagreements** (resolved): +${finalSynthesis.cross_verification.disagreements.slice(0, 3).map(d => `- ${d}`).join('\n') || '- None'} + +### Clarification Questions (from agent) + +${finalSynthesis.clarification_questions.length > 0 + ? finalSynthesis.clarification_questions.map((q, i) => `${i + 1}. ${q}`).join('\n') + : 'No clarifications needed.'} +`) + +// Update session state with agent's findings +sessionState.solutions = finalSynthesis.solutions +sessionState.cross_verification = finalSynthesis.cross_verification +sessionState.phase = 'awaiting-decision' +Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2)) +``` + +--- + +### Phase 4: User Decision Point + +**Collect User Decision**: +```javascript +const decisionResult = await AskUserQuestion({ + questions: [ + { + question: `Which solution approach do you prefer?`, + header: "Solution", + multiSelect: false, + options: finalSynthesis.solutions.map((sol, i) => ({ + label: `Option ${i + 1}: ${sol.name}`, + description: `${sol.effort} effort, ${sol.risk} risk` + })).concat([ + { label: "Need More Analysis", description: "Return to analysis with additional context" } + ]) + }, + { + question: "Any clarifications or adjustments?", + header: "Feedback", + multiSelect: true, + options: [ + { label: "Proceed as-is", description: "Generate final plan with selected option" }, + { label: "Add constraints", description: "Specify additional requirements" }, + { label: "Change scope", description: "Adjust what's included/excluded" }, + { label: "Different direction", description: "Explore completely different approach" } + ] + } + ] +}) + +// Process decision +const userDecision = { + timestamp: getUtc8ISOString(), + selected_solution: decisionResult.solution, + feedback_type: decisionResult.feedback, + additional_input: decisionResult.other || null +} + +sessionState.user_decisions.push(userDecision) + +// Decision routing +if (userDecision.selected_solution === 'Need More Analysis' || + userDecision.feedback_type.includes('Different direction')) { + // Return to Phase 2 with updated context + sessionState.phase = 'additional-analysis' + // Continue analysis loop with user feedback incorporated +} else if (userDecision.feedback_type.includes('Add constraints') || + userDecision.feedback_type.includes('Change scope')) { + // Prompt for additional details + const additionalInput = await AskUserQuestion({ + questions: [{ + question: "Please provide the additional constraints or scope changes:", + header: "Details", + multiSelect: false, + options: [ + { label: "Performance priority", description: "Optimize for speed over simplicity" }, + { label: "Maintainability priority", description: "Prefer clear, maintainable code" }, + { label: "Minimal changes", description: "Change as few files as possible" }, + { label: "Full refactor OK", description: "Willing to do comprehensive changes" } + ] + }] + }) + // Incorporate and proceed to Phase 5 + userDecision.constraints = additionalInput + sessionState.phase = 'generating-plan' +} else { + // Proceed to Phase 5 + sessionState.phase = 'generating-plan' +} +``` + +--- + +### Phase 5: Agent Planning & Output Generation + +**Step 5.1: Prepare Planning Context** +```javascript +// Select the approved solution +const selectedSolution = finalSynthesis.solutions[userDecision.selected_solution_index] + +// Build comprehensive planning context from discussion +const planningContext = { + task_description: taskDescription, + selected_solution: selectedSolution, + analysis_rounds: analysisResults, + consensus_points: finalSynthesis.consensus_points, + user_constraints: userDecision.constraints || null, + ace_context: contextPackage, + clarifications: sessionState.user_decisions +} + +console.log(` +## Generating Implementation Plan + +Selected approach: **${selectedSolution.name}** +Invoking planning agent... +`) +``` + +**Step 5.2: Invoke cli-lite-planning-agent** +```javascript +// Call planning agent to generate detailed plan +Task({ + subagent_type: "cli-lite-planning-agent", + run_in_background: false, + description: "Generate detailed implementation plan", + prompt: ` +## Task Objective +Generate detailed implementation plan based on collaborative discussion results. + +## Output Schema Reference +Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json + +## Project Context (MANDATORY - Read Both Files) +1. Read: .workflow/project-tech.json (technology stack, architecture) +2. Read: .workflow/project-guidelines.json (user-defined constraints) + +## Discussion Results + +### Task Description +${taskDescription} + +### Selected Solution +**Name**: ${selectedSolution.name} +**Description**: ${selectedSolution.description} +**Effort**: ${selectedSolution.effort} +**Risk**: ${selectedSolution.risk} + +**Pros**: +${selectedSolution.pros.map(p => `- ${p}`).join('\n')} + +**Cons**: +${selectedSolution.cons.map(c => `- ${c}`).join('\n')} + +**Affected Files**: +${selectedSolution.affected_files.map(f => `- ${f.file}:${f.line} - ${f.reason}`).join('\n')} + +### Analysis Consensus +${finalSynthesis.consensus_points.map(p => `- ${p}`).join('\n')} + +### User Constraints +${userDecision.constraints ? JSON.stringify(userDecision.constraints) : 'None specified'} + +### ACE Context Summary +Relevant files: ${contextPackage.relevant_files.slice(0, 15).join(', ')} +Detected patterns: ${contextPackage.detected_patterns.join(', ')} + +## Output Requirements + +### 1. IMPL_PLAN.md (Documentation) +Write: ${sessionFolder}/IMPL_PLAN.md + +Structure: +\`\`\`markdown +# Implementation Plan: {Task Title} + +## Overview +- **Task**: {description} +- **Approach**: {selected solution name} +- **Complexity**: {Low/Medium/High} +- **Generated**: {timestamp} + +## Background & Decision Rationale +{Why this approach was chosen, key trade-offs considered} + +## Implementation Steps + +### Step 1: {Title} +**Objective**: {what this step achieves} +**Files**: +- \`path/to/file.ts:line\` - {change description} + +**Actions**: +1. {specific action} +2. {specific action} + +**Verification**: {how to verify this step is complete} + +### Step 2: ... + +## File Manifest +| File | Lines | Change Type | Description | +|------|-------|-------------|-------------| +| ... | ... | ... | ... | + +## Acceptance Criteria +1. {criterion with verification method} +2. ... + +## Risk Mitigation +| Risk | Mitigation Strategy | +|------|---------------------| +| ... | ... | + +## Dependencies & Prerequisites +- {prerequisite 1} +- {prerequisite 2} +\`\`\` + +### 2. plan.json (Structured Plan) +Write: ${sessionFolder}/plan.json + +Follow schema from plan-json-schema.json. Key requirements: +- tasks: 2-7 structured tasks (group by feature/module, NOT by file) +- Each task includes: id, title, description, scope, files, depends_on, execution_group +- _metadata.source: "collaborative-discussion" +- _metadata.session_id: "${sessionId}" + +## Task Grouping Rules +1. **Group by feature**: All changes for one feature = one task +2. **Substantial tasks**: Each task = 15-60 minutes of work +3. **True dependencies only**: Use depends_on only when Task B needs Task A's output +4. **Prefer parallel**: Most tasks should be independent + +## Success Criteria +- [ ] IMPL_PLAN.md written with complete documentation +- [ ] plan.json follows schema exactly +- [ ] All affected files have line numbers +- [ ] Acceptance criteria are testable +- [ ] Tasks are properly grouped (not one per file) +` +}) +``` + +**Step 5.3: Display Generated Plan** +```javascript +// Read generated outputs +const implPlan = Read(`${sessionFolder}/IMPL_PLAN.md`) +const planJson = JSON.parse(Read(`${sessionFolder}/plan.json`)) + +console.log(` +## Plan Generated Successfully + +### Documentation +${implPlan} + +--- + +### Structured Plan Summary +**Tasks**: ${planJson.tasks.length} +**Complexity**: ${planJson.complexity} +**Estimated Time**: ${planJson.estimated_time} + +| # | Task | Scope | Dependencies | +|---|------|-------|--------------| +${planJson.tasks.map((t, i) => + `| ${i+1} | ${t.title} | ${t.scope} | ${t.depends_on?.join(', ') || 'None'} |` +).join('\n')} +`) + +// Update session state +sessionState.phase = 'plan-generated' +sessionState.artifacts = { + impl_plan: `${sessionFolder}/IMPL_PLAN.md`, + plan_json: `${sessionFolder}/plan.json` +} +Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2)) +``` + +**Step 5.4: Confirm & Hand off to Execution** +```javascript +const executeDecision = await AskUserQuestion({ + questions: [{ + question: `Plan generated (${planJson.tasks.length} tasks). Proceed to execution?`, + header: "Execute", + multiSelect: false, + options: [ + { label: "Execute Now (Recommended)", description: "Hand off to /workflow:lite-execute" }, + { label: "Review First", description: "Review plan files before execution" }, + { label: "Modify Plan", description: "Adjust plan before execution" }, + { label: "Save Only", description: "Save plan without execution" } + ] + }] +}) + +if (executeDecision === 'Execute Now') { + // Build execution context + const executionContext = { + planObject: planJson, + explorationsContext: contextPackage, + clarificationContext: sessionState.user_decisions, + originalUserInput: taskDescription, + executionMethod: 'Agent', // Default to Agent execution + session: { + id: sessionId, + folder: sessionFolder, + artifacts: { + impl_plan: `${sessionFolder}/IMPL_PLAN.md`, + plan_json: `${sessionFolder}/plan.json`, + session_state: `${sessionFolder}/session-state.json` + } + } + } + + // Update state and hand off + sessionState.phase = 'executing' + Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2)) + + console.log(` +## Handing off to lite-execute + +Session: ${sessionId} +Tasks: ${planJson.tasks.length} +`) + + // Hand off to lite-execute + SlashCommand(command="/workflow:lite-execute --in-memory") + +} else if (executeDecision === 'Review First') { + console.log(` +## Plan Files Ready for Review + +- Documentation: ${sessionFolder}/IMPL_PLAN.md +- Structured Plan: ${sessionFolder}/plan.json + +Run \`/workflow:lite-execute --session=${sessionId}\` when ready. +`) + +} else if (executeDecision === 'Modify Plan') { + // Return to Phase 4 with modification request + sessionState.phase = 'awaiting-decision' + console.log('Returning to decision phase for plan modification...') + +} else { + console.log(` +## Plan Saved + +Session: ${sessionId} +Location: ${sessionFolder}/ + +Files: +- IMPL_PLAN.md (documentation) +- plan.json (structured plan) +- session-state.json (full context) + +To execute later: /workflow:lite-execute --session=${sessionId} +`) + sessionState.phase = 'complete' +} + +Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2)) +``` + +--- + +## Session Folder Structure + +``` +.workflow/.multi-cli-plan/{MCP-task-slug-YYYY-MM-DD}/ +|-- session-state.json # Session state with all rounds and decisions +|-- rounds/ +| |-- 1/ +| | +-- synthesis.json # Round 1 analysis synthesis +| |-- 2/ +| | +-- synthesis.json # Round 2 analysis synthesis +| +-- .../ +|-- IMPL_PLAN.md # Implementation plan documentation ++-- plan.json # Structured plan for lite-execute +``` + +## Key Features + +### 1. Agent-Orchestrator Separation + +**Orchestrator (this command)** only handles: +- Task delegation to agents +- Reading agent output files +- User interaction and decisions +- Session state management + +**Agent (cli-discuss-agent)** handles: +- Multi-CLI execution (Gemini, Codex, Qwen) +- Cross-verification between CLI outputs +- Solution synthesis and ranking +- Writing structured output files + +``` +┌─────────────────────────────────────────────────────────────┐ +│ ORCHESTRATOR │ +│ (multi-cli-plan.md - decision layer) │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ 1. Delegate → Task(cli-discuss-agent) │ +│ 2. Wait for completion │ +│ 3. Read → synthesis.json │ +│ 4. Display → User │ +│ 5. Collect → Decision │ +│ 6. Loop or proceed │ +│ │ +└─────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ CLI-DISCUSS-AGENT │ +│ (analysis layer) │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ Gemini CLI ──┐ │ +│ ├──→ Cross-Verify ──→ Synthesize │ +│ Codex CLI ───┘ │ │ +│ ▼ │ +│ synthesis.json │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### 2. Multi-CLI Cross-Verification + +Agent invokes multiple CLI tools and cross-verifies: +- **Gemini**: Deep code analysis, pattern recognition +- **Codex**: Implementation verification, code generation feasibility +- **Qwen** (fallback): Alternative perspective + +Cross-verification identifies: +- Agreements (high confidence points) +- Disagreements (requiring resolution) +- Unique insights from each tool + +### 3. User-Driven Decision Points + +Every analysis cycle ends with user decision: +- Approve and proceed to planning +- Request more analysis with feedback +- Adjust requirements or direction +- View detailed agent output files + +### 4. Iterative Convergence + +Each round builds on previous findings: +- Round 1: Initial exploration, identify major approaches +- Round 2: Deep dive into promising approaches, resolve conflicts +- Round 3: Final refinement, edge case analysis + +Agent calculates convergence score (0.0-1.0) and recommends: +- `converged`: Ready for planning +- `continue`: More analysis needed +- `user_input_needed`: Clarification required + +### 5. Trade-off Transparency + +Each solution option includes explicit trade-offs: +- Effort (low/medium/high) +- Risk assessment +- Maintainability impact +- Performance considerations +- Affected files with line numbers +- Source CLI(s) that proposed the solution + +## Error Handling + +| Error | Resolution | +|-------|------------| +| ACE search fails | Fall back to Glob/Grep for file discovery | +| Agent fails to produce synthesis.json | Retry agent with simpler context | +| CLI tool timeout (in agent) | Agent uses fallback chain: gemini → codex → qwen | +| No convergence after max rounds | Present best available options, flag uncertainty | +| synthesis.json parse error | Agent retries with degraded mode | +| User cancels | Save session state for later resumption | + +## Configuration + +| Flag | Default | Description | +|------|---------|-------------| +| `--max-rounds` | 3 | Maximum analysis rounds before forcing decision | +| `--tools` | gemini,codex | CLI tools to use for analysis | +| `--auto-execute` | false | Auto-execute after plan approval | +| `--save-context` | true | Persist ACE context for resumption | + +## Best Practices + +1. **Be Specific**: More detailed task descriptions lead to better initial context gathering +2. **Provide Feedback**: Use clarification rounds to narrow down requirements +3. **Trust the Process**: Allow multiple rounds for complex tasks +4. **Review Trade-offs**: Carefully consider pros/cons of each solution option +5. **Iterate**: Don't hesitate to request additional analysis if uncertain +6. **Review Plan**: Check IMPL_PLAN.md before execution for complete understanding + +## Output Artifacts + +| File | Purpose | Producer | +|------|---------|----------| +| `rounds/{n}/synthesis.json` | Round analysis results | cli-discuss-agent | +| `IMPL_PLAN.md` | Human-readable documentation | cli-lite-planning-agent | +| `plan.json` | Structured tasks for execution | cli-lite-planning-agent | +| `session-state.json` | Session tracking | Orchestrator | + +**synthesis.json schema** (produced by cli-discuss-agent): +```json +{ + "round": 1, + "cli_analyses": [...], + "cross_verification": { "agreements": [], "disagreements": [] }, + "solutions": [{ "name": "...", "pros": [], "cons": [], "effort": "..." }], + "convergence": { "score": 0.85, "recommendation": "converged" }, + "clarification_questions": [] +} +``` + +## Related Commands + +```bash +# Resume a saved multi-cli-plan session +/workflow:lite-execute --session=MCP-xxx + +# For simpler tasks without multi-round discussion +/workflow:lite-plan "task description" + +# For issue-driven discovery +/issue:discover-by-prompt "find issues" + +# View generated plan +cat .workflow/.multi-cli-plan/{session-id}/IMPL_PLAN.md +``` diff --git a/ccw/src/core/routes/system-routes.ts b/ccw/src/core/routes/system-routes.ts index 7494bed6..ab509276 100644 --- a/ccw/src/core/routes/system-routes.ts +++ b/ccw/src/core/routes/system-routes.ts @@ -416,5 +416,107 @@ export async function handleSystemRoutes(ctx: SystemRouteContext): Promise { + const { path: browsePath, showHidden } = body as { + path?: string; + showHidden?: boolean; + }; + + const os = await import('os'); + const path = await import('path'); + const fs = await import('fs'); + + // Default to home directory + let targetPath = browsePath || os.homedir(); + + // Expand ~ to home directory + if (targetPath.startsWith('~')) { + targetPath = path.join(os.homedir(), targetPath.slice(1)); + } + + // Resolve to absolute path + if (!path.isAbsolute(targetPath)) { + targetPath = path.resolve(targetPath); + } + + try { + const stat = await fs.promises.stat(targetPath); + if (!stat.isDirectory()) { + return { error: 'Path is not a directory', status: 400 }; + } + + const entries = await fs.promises.readdir(targetPath, { withFileTypes: true }); + const items = entries + .filter(entry => showHidden || !entry.name.startsWith('.')) + .map(entry => ({ + name: entry.name, + path: path.join(targetPath, entry.name), + isDirectory: entry.isDirectory(), + isFile: entry.isFile() + })) + .sort((a, b) => { + // Directories first, then files + if (a.isDirectory && !b.isDirectory) return -1; + if (!a.isDirectory && b.isDirectory) return 1; + return a.name.localeCompare(b.name); + }); + + return { + currentPath: targetPath, + parentPath: path.dirname(targetPath), + items, + homePath: os.homedir() + }; + } catch (err) { + return { error: 'Cannot access directory: ' + (err as Error).message, status: 400 }; + } + }); + return true; + } + + // API: File dialog - select file (validate path exists) + if (pathname === '/api/dialog/open-file' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path: filePath } = body as { path?: string }; + + if (!filePath) { + return { error: 'Path is required', status: 400 }; + } + + const os = await import('os'); + const path = await import('path'); + const fs = await import('fs'); + + let targetPath = filePath; + + // Expand ~ to home directory + if (targetPath.startsWith('~')) { + targetPath = path.join(os.homedir(), targetPath.slice(1)); + } + + // Resolve to absolute path + if (!path.isAbsolute(targetPath)) { + targetPath = path.resolve(targetPath); + } + + try { + await fs.promises.access(targetPath, fs.constants.R_OK); + const stat = await fs.promises.stat(targetPath); + + return { + success: true, + path: targetPath, + isFile: stat.isFile(), + isDirectory: stat.isDirectory() + }; + } catch { + return { error: 'File not accessible', status: 404 }; + } + }); + return true; + } + return false; } diff --git a/ccw/src/templates/dashboard-css/21-cli-toolmgmt.css b/ccw/src/templates/dashboard-css/21-cli-toolmgmt.css index c1dbbe30..3380f1ca 100644 --- a/ccw/src/templates/dashboard-css/21-cli-toolmgmt.css +++ b/ccw/src/templates/dashboard-css/21-cli-toolmgmt.css @@ -661,3 +661,120 @@ color: hsl(var(--success)); } +/* ======================================== + * File Browser Modal + * ======================================== */ + +.file-browser-modal { + width: 600px; + max-width: 90vw; + max-height: 80vh; + display: flex; + flex-direction: column; +} + +.file-browser-toolbar { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem; + background: hsl(var(--muted) / 0.3); + border-radius: 0.375rem; + margin-bottom: 0.75rem; +} + +.file-browser-toolbar .btn-sm { + flex-shrink: 0; + padding: 0.375rem; +} + +.file-browser-path { + flex: 1; + padding: 0.375rem 0.5rem; + font-family: monospace; + font-size: 0.75rem; + background: hsl(var(--background)); + border: 1px solid hsl(var(--border)); + border-radius: 0.25rem; + color: hsl(var(--foreground)); +} + +.file-browser-hidden-toggle { + display: flex; + align-items: center; + gap: 0.375rem; + font-size: 0.75rem; + color: hsl(var(--muted-foreground)); + cursor: pointer; + white-space: nowrap; +} + +.file-browser-hidden-toggle input { + cursor: pointer; +} + +.file-browser-list { + flex: 1; + min-height: 300px; + max-height: 400px; + overflow-y: auto; + border: 1px solid hsl(var(--border)); + border-radius: 0.375rem; + background: hsl(var(--background)); +} + +.file-browser-loading, +.file-browser-empty, +.file-browser-error { + display: flex; + align-items: center; + justify-content: center; + height: 100%; + min-height: 200px; + color: hsl(var(--muted-foreground)); + font-size: 0.875rem; +} + +.file-browser-error { + color: hsl(var(--destructive)); +} + +.file-browser-item { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem 0.75rem; + cursor: pointer; + border-bottom: 1px solid hsl(var(--border) / 0.5); + transition: background-color 0.15s; +} + +.file-browser-item:last-child { + border-bottom: none; +} + +.file-browser-item:hover { + background: hsl(var(--muted) / 0.5); +} + +.file-browser-item.selected { + background: hsl(var(--primary) / 0.15); + border-color: hsl(var(--primary) / 0.3); +} + +.file-browser-item.is-directory { + color: hsl(var(--primary)); +} + +.file-browser-item.is-file { + color: hsl(var(--foreground)); +} + +.file-browser-item-name { + flex: 1; + font-size: 0.8125rem; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + diff --git a/ccw/src/templates/dashboard-js/i18n.js b/ccw/src/templates/dashboard-js/i18n.js index ba49e744..e5a1068a 100644 --- a/ccw/src/templates/dashboard-js/i18n.js +++ b/ccw/src/templates/dashboard-js/i18n.js @@ -268,6 +268,12 @@ const i18n = { 'cli.envFilePlaceholder': 'Path to .env file (e.g., ~/.gemini-env or C:/Users/xxx/.env)', 'cli.envFileHint': 'Load environment variables (e.g., API keys) before CLI execution. Supports ~ for home directory.', 'cli.envFileBrowse': 'Browse', + 'cli.fileBrowser': 'File Browser', + 'cli.fileBrowserSelect': 'Select', + 'cli.fileBrowserCancel': 'Cancel', + 'cli.fileBrowserUp': 'Parent Directory', + 'cli.fileBrowserHome': 'Home', + 'cli.fileBrowserShowHidden': 'Show hidden files', // CodexLens Configuration 'codexlens.config': 'CodexLens Configuration', @@ -2442,6 +2448,12 @@ const i18n = { 'cli.envFilePlaceholder': '.env 文件路径(如 ~/.gemini-env 或 C:/Users/xxx/.env)', 'cli.envFileHint': '在 CLI 执行前加载环境变量(如 API 密钥)。支持 ~ 表示用户目录。', 'cli.envFileBrowse': '浏览', + 'cli.fileBrowser': '文件浏览器', + 'cli.fileBrowserSelect': '选择', + 'cli.fileBrowserCancel': '取消', + 'cli.fileBrowserUp': '上级目录', + 'cli.fileBrowserHome': '主目录', + 'cli.fileBrowserShowHidden': '显示隐藏文件', // CodexLens 配置 'codexlens.config': 'CodexLens 配置', diff --git a/ccw/src/templates/dashboard-js/views/cli-manager.js b/ccw/src/templates/dashboard-js/views/cli-manager.js index 2b084237..830a7fbf 100644 --- a/ccw/src/templates/dashboard-js/views/cli-manager.js +++ b/ccw/src/templates/dashboard-js/views/cli-manager.js @@ -554,6 +554,241 @@ function buildToolConfigModalContent(tool, config, models, status) { ''; } +// ========== File Browser Modal ========== + +var fileBrowserState = { + currentPath: '', + showHidden: false, + onSelect: null +}; + +function showFileBrowserModal(onSelect) { + fileBrowserState.onSelect = onSelect; + fileBrowserState.showHidden = false; + + // Create modal overlay + var overlay = document.createElement('div'); + overlay.id = 'fileBrowserOverlay'; + overlay.className = 'modal-overlay'; + overlay.innerHTML = buildFileBrowserModalContent(); + document.body.appendChild(overlay); + + // Load initial directory (home) + loadFileBrowserDirectory(''); + + // Initialize events + initFileBrowserEvents(); + + // Initialize icons + if (window.lucide) lucide.createIcons(); +} + +function buildFileBrowserModalContent() { + return ''; +} + +async function loadFileBrowserDirectory(path) { + var listContainer = document.getElementById('fileBrowserList'); + var pathInput = document.getElementById('fileBrowserPathInput'); + + if (listContainer) { + listContainer.innerHTML = '
'; + if (window.lucide) lucide.createIcons(); + } + + try { + var response = await fetch('/api/dialog/browse', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ path: path, showHidden: fileBrowserState.showHidden }) + }); + + if (!response.ok) { + throw new Error('Failed to load directory'); + } + + var data = await response.json(); + fileBrowserState.currentPath = data.currentPath; + + if (pathInput) { + pathInput.value = data.currentPath; + } + + renderFileBrowserItems(data.items); + } catch (err) { + console.error('Failed to load directory:', err); + if (listContainer) { + listContainer.innerHTML = '
Failed to load directory
'; + } + } +} + +function renderFileBrowserItems(items) { + var listContainer = document.getElementById('fileBrowserList'); + if (!listContainer) return; + + if (!items || items.length === 0) { + listContainer.innerHTML = '
Empty directory
'; + return; + } + + var html = items.map(function(item) { + var icon = item.isDirectory ? 'folder' : 'file'; + var itemClass = 'file-browser-item' + (item.isDirectory ? ' is-directory' : ' is-file'); + return '
' + + '' + + '' + escapeHtml(item.name) + '' + + '
'; + }).join(''); + + listContainer.innerHTML = html; + + // Initialize icons + if (window.lucide) lucide.createIcons(); + + // Add click handlers + listContainer.querySelectorAll('.file-browser-item').forEach(function(el) { + el.onclick = function() { + var isDir = el.getAttribute('data-is-dir') === 'true'; + var path = el.getAttribute('data-path'); + + if (isDir) { + // Navigate into directory + loadFileBrowserDirectory(path); + } else { + // Select file + listContainer.querySelectorAll('.file-browser-item').forEach(function(item) { + item.classList.remove('selected'); + }); + el.classList.add('selected'); + + // Enable select button + var selectBtn = document.getElementById('fileBrowserSelectBtn'); + if (selectBtn) { + selectBtn.disabled = false; + selectBtn.setAttribute('data-selected-path', path); + } + } + }; + + // Double-click to select file or enter directory + el.ondblclick = function() { + var isDir = el.getAttribute('data-is-dir') === 'true'; + var path = el.getAttribute('data-path'); + + if (isDir) { + loadFileBrowserDirectory(path); + } else { + // Select and close + closeFileBrowserModal(path); + } + }; + }); +} + +function initFileBrowserEvents() { + // Close button + var closeBtn = document.getElementById('fileBrowserCloseBtn'); + if (closeBtn) { + closeBtn.onclick = function() { closeFileBrowserModal(null); }; + } + + // Cancel button + var cancelBtn = document.getElementById('fileBrowserCancelBtn'); + if (cancelBtn) { + cancelBtn.onclick = function() { closeFileBrowserModal(null); }; + } + + // Select button + var selectBtn = document.getElementById('fileBrowserSelectBtn'); + if (selectBtn) { + selectBtn.onclick = function() { + var path = selectBtn.getAttribute('data-selected-path'); + closeFileBrowserModal(path); + }; + } + + // Up button + var upBtn = document.getElementById('fileBrowserUpBtn'); + if (upBtn) { + upBtn.onclick = function() { + // Get parent path + var currentPath = fileBrowserState.currentPath; + var parentPath = currentPath.replace(/[/\\][^/\\]+$/, '') || '/'; + loadFileBrowserDirectory(parentPath); + }; + } + + // Home button + var homeBtn = document.getElementById('fileBrowserHomeBtn'); + if (homeBtn) { + homeBtn.onclick = function() { + loadFileBrowserDirectory(''); + }; + } + + // Show hidden checkbox + var showHiddenCheckbox = document.getElementById('fileBrowserShowHidden'); + if (showHiddenCheckbox) { + showHiddenCheckbox.onchange = function() { + fileBrowserState.showHidden = showHiddenCheckbox.checked; + loadFileBrowserDirectory(fileBrowserState.currentPath); + }; + } + + // Click outside to close + var overlay = document.getElementById('fileBrowserOverlay'); + if (overlay) { + overlay.onclick = function(e) { + if (e.target === overlay) { + closeFileBrowserModal(null); + } + }; + } +} + +function closeFileBrowserModal(selectedPath) { + var overlay = document.getElementById('fileBrowserOverlay'); + if (overlay) { + overlay.remove(); + } + + if (fileBrowserState.onSelect && selectedPath) { + fileBrowserState.onSelect(selectedPath); + } + + fileBrowserState.onSelect = null; +} + function initToolConfigModalEvents(tool, currentConfig, models) { // Local tags state (copy from config) var currentTags = (currentConfig.tags || []).slice(); @@ -754,38 +989,13 @@ function initToolConfigModalEvents(tool, currentConfig, models) { // Environment file browse button (only for gemini/qwen) var envFileBrowseBtn = document.getElementById('envFileBrowseBtn'); if (envFileBrowseBtn) { - envFileBrowseBtn.onclick = async function() { - try { - // Use file dialog API if available - var response = await fetch('/api/dialog/open-file', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - title: t('cli.envFile'), - filters: [ - { name: 'Environment Files', extensions: ['env'] }, - { name: 'All Files', extensions: ['*'] } - ], - defaultPath: '' - }) - }); - - if (response.ok) { - var data = await response.json(); - if (data.filePath) { - var envFileInput = document.getElementById('envFileInput'); - if (envFileInput) { - envFileInput.value = data.filePath; - } - } - } else { - // Fallback: prompt user to enter path manually - showRefreshToast('File dialog not available. Please enter path manually.', 'info'); + envFileBrowseBtn.onclick = function() { + showFileBrowserModal(function(selectedPath) { + var envFileInput = document.getElementById('envFileInput'); + if (envFileInput && selectedPath) { + envFileInput.value = selectedPath; } - } catch (err) { - console.error('Failed to open file dialog:', err); - showRefreshToast('File dialog not available. Please enter path manually.', 'info'); - } + }); }; }