diff --git a/.claude/skills/team-planex/SKILL.md b/.claude/skills/team-planex/SKILL.md index 4c88ed9f..bff8b711 100644 --- a/.claude/skills/team-planex/SKILL.md +++ b/.claude/skills/team-planex/SKILL.md @@ -6,7 +6,7 @@ allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), Task # Team PlanEx -2 成员边规划边执行团队。通过 Wave Pipeline(波次流水线)实现 planner 和 executor 并行工作:planner 完成一个 wave 的 queue 后立即创建 EXEC-* 任务,同时进入下一 wave 规划。所有成员通过 `--role=xxx` 路由。 +2 成员边规划边执行团队。通过逐 Issue 节拍流水线实现 planner 和 executor 并行工作:planner 每完成一个 issue 的 solution 后立即创建 EXEC-* 任务(含中间产物文件路径),executor 从文件加载 solution 开始实现。所有成员通过 `--role=xxx` 路由。 ## Architecture Overview @@ -63,7 +63,7 @@ Read(VALID_ROLES[role].file) | Role | Task Prefix | Responsibility | Reuses Agent | Role File | |------|-------------|----------------|--------------|-----------| -| `planner` | PLAN-* | 需求拆解 → issue 创建 → 方案设计 → 队列编排 → EXEC 任务派发 | issue-plan-agent, issue-queue-agent | [roles/planner.md](roles/planner.md) | +| `planner` | PLAN-* | 需求拆解 → issue 创建 → 方案设计 → 冲突检查 → EXEC 任务逐个派发 | issue-plan-agent | [roles/planner.md](roles/planner.md) | | `executor` | EXEC-* | 加载 solution → 代码实现 → 测试 → 提交 | code-developer | [roles/executor.md](roles/executor.md) | ## Input Types @@ -93,7 +93,7 @@ mcp__ccw-tools__team_msg({ summary: `[${role}] ...` }) |------|------| | 需求拆解 (issue 创建) | ❌ 直接编写/修改代码 | | 方案设计 (issue-plan-agent) | ❌ 调用 code-developer | -| 队列编排 (issue-queue-agent) | ❌ 运行测试 | +| 冲突检查 (inline files_touched) | ❌ 运行测试 | | 创建 EXEC-* 任务 | ❌ git commit | | 监控进度 (消息总线) | | @@ -113,6 +113,7 @@ mcp__ccw-tools__team_msg({ summary: `[${role}] ...` }) const TEAM_CONFIG = { name: "planex", sessionDir: ".workflow/.team/PEX-{slug}-{date}/", + artifactsDir: ".workflow/.team/PEX-{slug}-{date}/artifacts/", issueDataDir: ".workflow/issues/" } ``` @@ -135,7 +136,7 @@ mcp__ccw-tools__team_msg({ | Role | Types | |------|-------| -| planner | `wave_ready`, `queue_ready`, `all_planned`, `error` | +| planner | `wave_ready`, `issue_ready`, `all_planned`, `error` | | executor | `impl_complete`, `impl_failed`, `wave_done`, `error` | ### CLI Fallback @@ -161,22 +162,22 @@ TaskUpdate({ taskId: task.id, status: 'in_progress' }) // Phase 5: Report + Loop ``` -## Wave Pipeline +## Wave Pipeline (逐 Issue 节拍) ``` -Wave 1: planner 创建 issues + 规划 solutions + 形成 queue - ↓ (queue ready → 创建 EXEC-* 任务) -Wave 1 执行: executor 开始实现 ←→ planner 继续规划 Wave 2 - ↓ -Wave 2 执行: executor 实现 Wave 2 ←→ planner 规划 Wave 3 - ... -Final: planner 发送 all_planned → executor 完成剩余 EXEC-* → 结束 +Issue 1: planner 规划 solution → 写中间产物 → 冲突检查 → 创建 EXEC-* → issue_ready + ↓ (executor 立即开始) +Issue 2: planner 规划 solution → 写中间产物 → 冲突检查 → 创建 EXEC-* → issue_ready + ↓ (executor 并行消费) +Issue N: ... +Final: planner 发送 all_planned → executor 完成剩余 EXEC-* → 结束 ``` -**波次规则**: -- planner 每完成一个 wave 的 queue 后,立即创建 EXEC-* 任务供 executor 消费 -- planner 不等待 executor 完成当前 wave,直接进入下一 wave -- executor 持续轮询并消费可用的 EXEC-* 任务 +**节拍规则**: +- planner 每完成一个 issue 的 solution 后,**立即**创建 EXEC-* 任务并发送 `issue_ready` 信号 +- solution 写入中间产物文件(`artifacts/solutions/{issueId}.json`),EXEC-* 任务包含 `solution_file` 路径 +- executor 从文件加载 solution(无需再调 `ccw issue solution`),fallback 兼容旧模式 +- planner 不等待 executor,持续推进下一个 issue - 当 planner 发送 `all_planned` 消息后,executor 完成所有剩余任务即可结束 ## Execution Method Selection @@ -265,6 +266,13 @@ Skill(skill="team-planex", args="-y --text '添加日志'") // 1. 创建团队 TeamCreate({ team_name: teamName }) +// 1.5 初始化 sessionDir + artifacts 目录 +const slug = (issueIds[0] || 'batch').replace(/[^a-zA-Z0-9-]/g, '') +const dateStr = new Date().toISOString().slice(0,10).replace(/-/g,'') +const sessionId = `PEX-${slug}-${dateStr}` +const sessionDir = `.workflow/.team/${sessionId}` +Bash(`mkdir -p "${sessionDir}/artifacts/solutions"`) + // 2. 解析输入参数 const issueIds = args.match(/ISS-\d{8}-\d{6}/g) || [] const textMatch = args.match(/--text\s+['"]([^'"]+)['"]/) @@ -298,11 +306,17 @@ executor 的执行方式已确定: ${executionConfig.executionMethod} execution_method: ${executionConfig.executionMethod} code_review: ${executionConfig.codeReviewTool} +## 中间产物(必须) +sessionDir: ${sessionDir} +每个 issue 的 solution 写入: ${sessionDir}/artifacts/solutions/{issueId}.json +EXEC-* 任务 description 必须包含 solution_file 字段指向该文件 +每完成一个 issue 立即发送 issue_ready 消息并创建 EXEC-* 任务 + ## 角色准则(强制) - 你只能处理 PLAN-* 前缀的任务 - 所有输出必须带 [planner] 标识前缀 -- 完成每个 wave 后立即创建 EXEC-* 任务供 executor 消费 -- EXEC-* 任务 description 中必须包含 execution_method 字段 +- 每完成一个 issue 的 solution 后立即创建 EXEC-* 任务(逐 issue 派发,不等 wave 完成) +- EXEC-* 任务 description 中必须包含 execution_method 和 solution_file 字段 ## 消息总线(必须) 每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。 @@ -327,6 +341,10 @@ Task({ 代码审查: ${executionConfig.codeReviewTool} (每个 EXEC-* 任务 description 中可能包含 execution_method 覆盖) +## Solution 加载 +优先从 EXEC-* 任务 description 中的 solution_file 路径读取 solution JSON 文件 +无 solution_file 时 fallback 到 ccw issue solution 命令 + ## 角色准则(强制) - 你只能处理 EXEC-* 前缀的任务 - 所有输出必须带 [executor] 标识前缀 @@ -351,7 +369,7 @@ Task({ | Unknown --role value | Error with available role list | | Missing --role arg | Enter orchestration mode | | Role file not found | Error with expected path (roles/{name}.md) | -| Planner wave failure | Retry once, then report error and halt pipeline | +| Planner issue planning failure | Retry once, then report error and skip to next issue | | Executor impl failure | Report to planner, continue with next EXEC-* task | | No EXEC-* tasks yet | Executor idles, polls for new tasks | | Pipeline stall | Planner monitors — if executor blocked > 2 tasks, escalate to user | diff --git a/.claude/skills/team-planex/roles/executor.md b/.claude/skills/team-planex/roles/executor.md index 39368e39..4698acdd 100644 --- a/.claude/skills/team-planex/roles/executor.md +++ b/.claude/skills/team-planex/roles/executor.md @@ -72,7 +72,7 @@ ```javascript // 从任务描述中解析 execution_method function resolveExecutor(taskDesc, solutionTaskCount) { - const methodMatch = taskDesc.match(/execution_method:\s*(Agent|Codex|Gemini|Auto)/i) + const methodMatch = taskDesc.match(/execution_method[:\s]*\s*(Agent|Codex|Gemini|Auto)/i) const method = methodMatch ? methodMatch[1] : 'Auto' if (method.toLowerCase() === 'auto') { @@ -165,9 +165,20 @@ if (!issueId) { return } -// Load solution plan -const solJson = Bash(`ccw issue solution ${issueId} --json`) -const solution = JSON.parse(solJson) +// Load solution plan — dual mode: file-first, CLI fallback +const solutionFileMatch = task.description.match(/solution_file[:\s]*\s*(\S+\.json)/) + +let solution +if (solutionFileMatch) { + // 新模式:从中间产物文件加载 + const solutionData = JSON.parse(Read(solutionFileMatch[1])) + // 保持 solution.bound 结构兼容 + solution = solutionData.bound ? solutionData : { bound: solutionData } +} else { + // 兼容模式:从 ccw issue solution 加载 + const solJson = Bash(`ccw issue solution ${issueId} --json`) + solution = JSON.parse(solJson) +} if (!solution.bound) { mcp__ccw-tools__team_msg({ diff --git a/.claude/skills/team-planex/roles/planner.md b/.claude/skills/team-planex/roles/planner.md index 3f6a2e32..39a5ced3 100644 --- a/.claude/skills/team-planex/roles/planner.md +++ b/.claude/skills/team-planex/roles/planner.md @@ -1,6 +1,6 @@ # Role: planner -需求拆解 → issue 创建 → 方案设计 → 队列编排 → EXEC 任务派发。内部调用 issue-plan-agent 和 issue-queue-agent,并通过 Wave Pipeline 持续推进。planner 同时承担 lead 角色(无独立 coordinator)。 +需求拆解 → issue 创建 → 方案设计 → 冲突检查 → EXEC 任务逐个派发。内部调用 issue-plan-agent(单 issue),通过 inline files_touched 冲突检查替代 issue-queue-agent,每完成一个 issue 立即派发 EXEC-* 任务。planner 同时承担 lead 角色(无独立 coordinator)。 ## Role Identity @@ -16,8 +16,8 @@ - 仅处理 `PLAN-*` 前缀的任务 - 所有输出必须带 `[planner]` 标识 -- 完成每个 wave 的 queue 后**立即创建 EXEC-\* 任务** -- 不等待 executor 完成当前 wave,直接进入下一 wave 规划 +- 每完成一个 issue 的 solution 后**立即创建 EXEC-\* 任务**并发送 `issue_ready` 信号 +- 不等待 executor,持续推进下一个 issue ### MUST NOT @@ -30,8 +30,8 @@ | Type | Direction | Trigger | Description | |------|-----------|---------|-------------| -| `wave_ready` | planner → executor | Wave queue 完成 + EXEC 任务已创建 | 新 wave 可执行 | -| `queue_ready` | planner → executor | 单个 issue 的 queue 就绪 | 增量通知 | +| `issue_ready` | planner → executor | 单个 issue solution + EXEC 任务已创建 | 逐 issue 节拍信号 | +| `wave_ready` | planner → executor | 一组 issues 全部派发完毕 | wave 汇总信号 | | `all_planned` | planner → executor | 所有 wave 规划完毕 | 最终信号 | | `error` | planner → executor | 阻塞性错误 | 规划失败 | @@ -41,8 +41,7 @@ | Agent Type | Purpose | |------------|---------| -| `issue-plan-agent` | Closed-loop planning: ACE exploration + solution generation + binding | -| `issue-queue-agent` | Solution ordering + conflict detection → execution queue | +| `issue-plan-agent` | Closed-loop planning: ACE exploration + solution generation + binding (单 issue 粒度) | ### CLI Capabilities @@ -167,188 +166,217 @@ if (inputType === 'plan_file') { Issue IDs 已就绪,直接进入 solution 规划。 -#### Path D: execution-plan.json → 波次感知处理 +#### Path D: execution-plan.json → 波次感知逐 issue 处理 ```javascript if (inputType === 'execution_plan') { const projectRoot = Bash('cd . && pwd').trim() const waves = executionPlan.waves + const dispatchedSolutions = [] + // sessionDir 从 planner prompt 中的 sessionDir 变量获取 + const execution_method = args.match(/execution_method:\s*(\S+)/)?.[1] || 'Auto' + const code_review = args.match(/code_review:\s*(\S+)/)?.[1] || 'Skip' let waveNum = 0 for (const wave of waves) { waveNum++ - const waveIssues = wave.issue_ids - // Step 1: issue-plan-agent 生成 solutions - const planResult = Task({ - subagent_type: "issue-plan-agent", - run_in_background: false, - description: `Plan solutions for wave ${waveNum}: ${wave.label}`, - prompt: ` -issue_ids: ${JSON.stringify(waveIssues)} + for (const issueId of wave.issue_ids) { + // Step 1: 单 issue 规划 + const planResult = Task({ + subagent_type: "issue-plan-agent", + run_in_background: false, + description: `Plan solution for ${issueId}`, + prompt: `issue_ids: ["${issueId}"] project_root: "${projectRoot}" ## Requirements -- Generate solutions for each issue -- Auto-bind single solutions +- Generate solution for this issue +- Auto-bind single solution - Issues come from req-plan decomposition (tags: req-plan) -- Respect inter-issue dependencies: ${JSON.stringify(executionPlan.issue_dependencies)} -` +- Respect dependencies: ${JSON.stringify(executionPlan.issue_dependencies)}` + }) + + // Step 2: 获取 solution + 写中间产物 + const solJson = Bash(`ccw issue solution ${issueId} --json`) + const solution = JSON.parse(solJson) + const solutionFile = `${sessionDir}/artifacts/solutions/${issueId}.json` + Write({ + file_path: solutionFile, + content: JSON.stringify({ + session_id: sessionId, issue_id: issueId, ...solution, + execution_config: { execution_method, code_review }, + timestamp: new Date().toISOString() + }, null, 2) + }) + + // Step 3: inline 冲突检查 + const blockedBy = inlineConflictCheck(issueId, solution, dispatchedSolutions) + + // Step 4: 创建 EXEC-* 任务 + const execTask = TaskCreate({ + subject: `EXEC-W${waveNum}-${issueId}: 实现 ${solution.bound?.title || issueId}`, + description: `## 执行任务\n**Wave**: ${waveNum}\n**Issue**: ${issueId}\n**solution_file**: ${solutionFile}\n**execution_method**: ${execution_method}\n**code_review**: ${code_review}`, + activeForm: `实现 ${issueId}`, + owner: "executor" + }) + if (blockedBy.length > 0) { + TaskUpdate({ taskId: execTask.id, addBlockedBy: blockedBy }) + } + + // Step 5: 累积 + 节拍信号 + dispatchedSolutions.push({ issueId, solution, execTaskId: execTask.id }) + mcp__ccw-tools__team_msg({ + operation: "log", team: "planex", from: "planner", to: "executor", + type: "issue_ready", + summary: `[planner] issue_ready: ${issueId}`, + ref: solutionFile + }) + SendMessage({ + type: "message", recipient: "executor", + content: `## [planner] Issue Ready: ${issueId}\n**solution_file**: ${solutionFile}\n**EXEC task**: ${execTask.subject}`, + summary: `[planner] issue_ready: ${issueId}` + }) + } + + // wave 级汇总 + mcp__ccw-tools__team_msg({ + operation: "log", team: "planex", from: "planner", to: "executor", + type: "wave_ready", + summary: `[planner] Wave ${waveNum} fully dispatched: ${wave.issue_ids.length} issues` }) - - // Step 2: issue-queue-agent 形成 queue - const queueResult = Task({ - subagent_type: "issue-queue-agent", - run_in_background: false, - description: `Form queue for wave ${waveNum}: ${wave.label}`, - prompt: ` -issue_ids: ${JSON.stringify(waveIssues)} -project_root: "${projectRoot}" - -## Requirements -- Order solutions by dependency (DAG) -- Detect conflicts between solutions -- Respect wave dependencies: ${JSON.stringify(wave.depends_on_waves)} -- Output execution queue -` - }) - - // Step 3: → Phase 4 (Wave Dispatch) - create EXEC-* tasks - // Continue to next wave without waiting for executor } // After all waves → Phase 5 (Report + Finalize) } ``` -**关键差异**: 波次分组来自 `executionPlan.waves`,而非固定 batch=5。Progressive 模式下 L0(Wave 1) → L1(Wave 2),Direct 模式下 parallel_group 映射为 wave。 +**关键差异**: 波次分组来自 `executionPlan.waves`,但每个 issue 独立规划 + 即时派发。Progressive 模式下 L0(Wave 1) → L1(Wave 2),Direct 模式下 parallel_group 映射为 wave。 -#### Wave 规划(Path A/B/C 汇聚) +#### Wave 规划(Path A/B/C 汇聚)— 逐 issue 派发 -将 issueIds 按波次分组规划(Path D 使用独立的波次逻辑,不走此路径): +将 issueIds 逐个规划并即时派发(Path D 使用独立的波次逻辑,不走此路径): ```javascript if (inputType !== 'execution_plan') { - // Path A/B/C: 固定 batch=5 分组 const projectRoot = Bash('cd . && pwd').trim() + const dispatchedSolutions = [] + const execution_method = args.match(/execution_method:\s*(\S+)/)?.[1] || 'Auto' + const code_review = args.match(/code_review:\s*(\S+)/)?.[1] || 'Skip' + let waveNum = 1 // 简化:不再按 WAVE_SIZE=5 分组,全部视为一个逻辑 wave -// 按批次分组(每 wave 最多 5 个 issues) -const WAVE_SIZE = 5 -const waves = [] -for (let i = 0; i < issueIds.length; i += WAVE_SIZE) { - waves.push(issueIds.slice(i, i + WAVE_SIZE)) -} - -let waveNum = 0 -for (const waveIssues of waves) { - waveNum++ - - // Step 1: 调用 issue-plan-agent 生成 solutions - const planResult = Task({ - subagent_type: "issue-plan-agent", - run_in_background: false, - description: `Plan solutions for wave ${waveNum}`, - prompt: ` -issue_ids: ${JSON.stringify(waveIssues)} + for (const issueId of issueIds) { + // Step 1: 单 issue 规划 + const planResult = Task({ + subagent_type: "issue-plan-agent", + run_in_background: false, + description: `Plan solution for ${issueId}`, + prompt: `issue_ids: ["${issueId}"] project_root: "${projectRoot}" ## Requirements -- Generate solutions for each issue -- Auto-bind single solutions -- For multiple solutions, select the most pragmatic one -` - }) +- Generate solution for this issue +- Auto-bind single solution +- For multiple solutions, select the most pragmatic one` + }) - // Step 2: 调用 issue-queue-agent 形成 queue - const queueResult = Task({ - subagent_type: "issue-queue-agent", - run_in_background: false, - description: `Form queue for wave ${waveNum}`, - prompt: ` -issue_ids: ${JSON.stringify(waveIssues)} -project_root: "${projectRoot}" + // Step 2: 获取 solution + 写中间产物 + const solJson = Bash(`ccw issue solution ${issueId} --json`) + const solution = JSON.parse(solJson) + const solutionFile = `${sessionDir}/artifacts/solutions/${issueId}.json` + Write({ + file_path: solutionFile, + content: JSON.stringify({ + session_id: sessionId, issue_id: issueId, ...solution, + execution_config: { execution_method, code_review }, + timestamp: new Date().toISOString() + }, null, 2) + }) -## Requirements -- Order solutions by dependency (DAG) -- Detect conflicts between solutions -- Output execution queue -` - }) + // Step 3: inline 冲突检查 + const blockedBy = inlineConflictCheck(issueId, solution, dispatchedSolutions) - // Step 3: → Phase 4 (Wave Dispatch) -} + // Step 4: 创建 EXEC-* 任务 + const execTask = TaskCreate({ + subject: `EXEC-W${waveNum}-${issueId}: 实现 ${solution.bound?.title || issueId}`, + description: `## 执行任务\n**Wave**: ${waveNum}\n**Issue**: ${issueId}\n**solution_file**: ${solutionFile}\n**execution_method**: ${execution_method}\n**code_review**: ${code_review}`, + activeForm: `实现 ${issueId}`, + owner: "executor" + }) + if (blockedBy.length > 0) { + TaskUpdate({ taskId: execTask.id, addBlockedBy: blockedBy }) + } + + // Step 5: 累积 + 节拍信号 + dispatchedSolutions.push({ issueId, solution, execTaskId: execTask.id }) + mcp__ccw-tools__team_msg({ + operation: "log", team: "planex", from: "planner", to: "executor", + type: "issue_ready", + summary: `[planner] issue_ready: ${issueId}`, + ref: solutionFile + }) + SendMessage({ + type: "message", recipient: "executor", + content: `## [planner] Issue Ready: ${issueId}\n**solution_file**: ${solutionFile}\n**EXEC task**: ${execTask.subject}`, + summary: `[planner] issue_ready: ${issueId}` + }) + } } // end if (inputType !== 'execution_plan') ``` -### Phase 4: Wave Dispatch +### Phase 4: Inline Conflict Check + Wave Summary -每个 wave 的 queue 完成后,**立即创建 EXEC-\* 任务**供 executor 消费。 +EXEC-* 任务创建已在 Phase 3 逐 issue 完成,Phase 4 仅负责 inline 冲突检查函数定义和 wave 汇总。 + +#### Inline Conflict Check 函数 ```javascript -// Read the generated queue -const queuePath = `.workflow/issues/queue/execution-queue.json` -const queue = JSON.parse(Read(queuePath)) +// Inline conflict check — 替代 issue-queue-agent +// 基于 files_touched 重叠检测 + 显式依赖 +function inlineConflictCheck(issueId, solution, dispatchedSolutions) { + const currentFiles = solution.bound?.files_touched + || solution.bound?.affected_files || [] + const blockedBy = [] -// Create EXEC-* tasks from queue entries -const execTasks = [] -for (const entry of queue.queue) { - const execTask = TaskCreate({ - subject: `EXEC-W${waveNum}-${entry.issue_id}: 实现 ${entry.title || entry.issue_id}`, - description: `## 执行任务 - -**Wave**: ${waveNum} -**Issue**: ${entry.issue_id} -**Solution**: ${entry.solution_id} -**Priority**: ${entry.priority || 'normal'} -**Dependencies**: ${entry.depends_on?.join(', ') || 'none'} - -加载 solution plan 并实现代码。完成后运行测试、提交。`, - activeForm: `实现 ${entry.issue_id}`, - owner: "executor" - }) - execTasks.push(execTask) -} - -// Set up dependency chains between EXEC tasks (based on queue DAG) -for (const entry of queue.queue) { - if (entry.depends_on?.length > 0) { - const thisTask = execTasks.find(t => t.subject.includes(entry.issue_id)) - const depTasks = entry.depends_on.map(depId => - execTasks.find(t => t.subject.includes(depId)) - ).filter(Boolean) - - if (thisTask && depTasks.length > 0) { - TaskUpdate({ - taskId: thisTask.id, - addBlockedBy: depTasks.map(t => t.id) - }) + // 1. 文件冲突检测 + for (const prev of dispatchedSolutions) { + const prevFiles = prev.solution.bound?.files_touched + || prev.solution.bound?.affected_files || [] + const overlap = currentFiles.filter(f => prevFiles.includes(f)) + if (overlap.length > 0) { + blockedBy.push(prev.execTaskId) } } + + // 2. 显式依赖 + const explicitDeps = solution.bound?.dependencies?.on_issues || [] + for (const depId of explicitDeps) { + const depTask = dispatchedSolutions.find(d => d.issueId === depId) + if (depTask && !blockedBy.includes(depTask.execTaskId)) { + blockedBy.push(depTask.execTaskId) + } + } + + return blockedBy } +``` -// Notify executor: wave ready +#### Wave Summary Signal + +Phase 3 循环完成后发送汇总信号(Path A/B/C 在全部 issue 完成后,Path D 在每个 wave 完成后): + +```javascript +// Wave summary — 已在 Phase 3 循环中由每个 wave 末尾发送 +// Path A/B/C: 全部 issue 完成后发送一次 mcp__ccw-tools__team_msg({ - operation: "log", - team: "planex", - from: "planner", - to: "executor", + operation: "log", team: "planex", from: "planner", to: "executor", type: "wave_ready", - summary: `[planner] Wave ${waveNum} ready: ${execTasks.length} EXEC tasks created` + summary: `[planner] Wave ${waveNum} fully dispatched: ${issueIds.length} issues` }) - SendMessage({ - type: "message", - recipient: "executor", - content: `## [planner] Wave ${waveNum} Ready - -**Issues**: ${waveIssues.join(', ')} -**EXEC Tasks Created**: ${execTasks.length} -**Queue**: ${queuePath} - -Executor 可以开始实现。`, + type: "message", recipient: "executor", + content: `## [planner] Wave ${waveNum} Complete\n所有 issues 已逐个派发完毕,共 ${dispatchedSolutions.length} 个 EXEC 任务。`, summary: `[planner] wave_ready: wave ${waveNum}` }) - -// 不等待 executor 完成,继续下一 wave → back to Phase 3 loop ``` ### Phase 5: Report + Finalize @@ -443,9 +471,10 @@ function parsePlanPhases(planContent) { | No PLAN-* tasks available | Idle, wait for orchestrator | | Issue creation failure | Retry once with simplified text, then report error | | issue-plan-agent failure | Retry once, then report error and skip to next issue | -| issue-queue-agent failure | Retry once, then create EXEC tasks without DAG ordering | +| Inline conflict check failure | Skip conflict detection, create EXEC task without blockedBy | | Plan file not found | Report error with expected path | | execution-plan.json parse failure | Fallback to plan_file parsing (Path B) | | execution-plan.json missing waves | Report error, suggest re-running req-plan | | Empty input (no issues, no text, no plan) | AskUserQuestion for clarification | +| Solution artifact write failure | Log warning, create EXEC task without solution_file (executor fallback) | | Wave partially failed | Report partial success, continue with successful issues | diff --git a/.codex/skills/issue-devpipeline/SKILL.md b/.codex/skills/issue-devpipeline/SKILL.md index 7cbc507e..853e1cbd 100644 --- a/.codex/skills/issue-devpipeline/SKILL.md +++ b/.codex/skills/issue-devpipeline/SKILL.md @@ -1,16 +1,16 @@ --- name: issue-devpipeline description: | - Plan-and-Execute pipeline with Wave Pipeline pattern. + Plan-and-Execute pipeline with per-issue beat pattern. Orchestrator coordinates planner (Deep Interaction) and executors (Parallel Fan-out). - Planner produces wave queues, executors implement solutions concurrently. -agents: 4 + Planner outputs per-issue solutions, executors implement solutions concurrently. +agents: 3 phases: 4 --- # Issue DevPipeline -边规划边执行流水线。编排器通过 Wave Pipeline 协调 planner 和 executor(s):planner 完成一个 wave 的规划后输出执行队列,编排器立即为该 wave 派发 executor agents,同时 planner 继续规划下一 wave。 +边规划边执行流水线。编排器通过逐 Issue 节拍流水线协调 planner 和 executor(s):planner 每完成一个 issue 的规划后立即输出,编排器即时为该 issue 派发 executor agent,同时 planner 继续规划下一 issue。 ## Architecture Overview @@ -24,24 +24,23 @@ phases: 4 │ Planner │ │ Executors (N) │ │ (Deep │ │ (Parallel Fan-out) │ │ Interaction│ │ │ - │ multi-round│ │ exec-1 exec-2 ... │ + │ per-issue) │ │ exec-1 exec-2 ... │ └──────┬──────┘ └──────────┬──────────┘ │ │ ┌──────┴──────┐ ┌──────────┴──────────┐ │ issue-plan │ │ code-developer │ - │ issue-queue │ │ (role reference) │ - │ (existing) │ │ │ + │ (existing) │ │ (role reference) │ └─────────────┘ └─────────────────────┘ ``` -**Wave Pipeline Flow**: +**Per-Issue Beat Pipeline Flow**: ``` -Planner Round 1 → Wave 1 queue - ↓ (spawn executors for wave 1) - ↓ send_input → Planner Round 2 → Wave 2 queue - ↓ (spawn executors for wave 2) +Planner → Issue 1 solution → ISSUE_READY + ↓ (spawn executor for issue 1) + ↓ send_input → Planner → Issue 2 solution → ISSUE_READY + ↓ (spawn executor for issue 2) ... - ↓ Planner outputs "ALL_PLANNED" + ↓ Planner outputs "all_planned" ↓ wait for all executor agents ↓ Aggregate results → Done ``` @@ -50,10 +49,9 @@ Planner Round 1 → Wave 1 queue | Agent | Role File | Responsibility | New/Existing | |-------|-----------|----------------|--------------| -| `planex-planner` | `~/.codex/agents/planex-planner.md` | 需求拆解 → issue 创建 → 方案设计 → 队列编排 | New | +| `planex-planner` | `~/.codex/agents/planex-planner.md` | 需求拆解 → issue 创建 → 方案设计 → 冲突检查 → 逐 issue 输出 | New | | `planex-executor` | `~/.codex/agents/planex-executor.md` | 加载 solution → 代码实现 → 测试 → 提交 | New | | `issue-plan-agent` | `~/.codex/agents/issue-plan-agent.md` | Closed-loop: ACE 探索 + solution 生成 | Existing | -| `issue-queue-agent` | `~/.codex/agents/issue-queue-agent.md` | Solution 排序 + 冲突检测 → 执行队列 | Existing | ## Input Types @@ -88,9 +86,16 @@ const inputPayload = { text: textMatch ? textMatch[1] : args, planFile: planMatch ? planMatch[1] : null } + +// Initialize session directory for artifacts +const slug = (issueIds[0] || 'batch').replace(/[^a-zA-Z0-9-]/g, '') +const dateStr = new Date().toISOString().slice(0,10).replace(/-/g,'') +const sessionId = `PEX-${slug}-${dateStr}` +const sessionDir = `.workflow/.team/${sessionId}` +shell(`mkdir -p "${sessionDir}/artifacts/solutions"`) ``` -### Phase 2: Planning (Deep Interaction with Planner) +### Phase 2: Planning (Deep Interaction with Planner — Per-Issue Beat) ```javascript // Track all agents for cleanup @@ -108,45 +113,42 @@ const plannerId = spawn_agent({ --- -Goal: 分析需求并完成第一波 (Wave 1) 的规划。输出执行队列。 +Goal: 分析需求并逐 issue 输出规划结果。每完成一个 issue 立即输出。 Input: ${JSON.stringify(inputPayload, null, 2)} +Session Dir: ${sessionDir} + Scope: -- Include: 需求分析、issue 创建、方案设计、队列编排 +- Include: 需求分析、issue 创建、方案设计、inline 冲突检查、写中间产物 - Exclude: 代码实现、测试执行、git 操作 Deliverables: -输出严格遵循以下 JSON 格式: +每个 issue 输出严格遵循以下 JSON 格式: \`\`\`json { - "wave": 1, - "status": "wave_ready" | "all_planned", - "issues": ["ISS-xxx", ...], - "queue": [ - { - "issue_id": "ISS-xxx", - "solution_id": "SOL-xxx", - "title": "描述", - "priority": "normal", - "depends_on": [] - } - ], + "status": "issue_ready" | "all_planned", + "issue_id": "ISS-xxx", + "solution_id": "SOL-xxx", + "title": "描述", + "priority": "normal", + "depends_on": [], + "solution_file": "${sessionDir}/artifacts/solutions/ISS-xxx.json", "remaining_issues": ["ISS-yyy", ...], - "summary": "本波次规划摘要" + "summary": "本 issue 规划摘要" } \`\`\` Quality bar: - 每个 issue 必须有绑定的 solution -- 队列必须按依赖排序 -- 每波最多 5 个 issues +- Solution 写入中间产物文件 +- Inline 冲突检查标记 depends_on ` }) allAgentIds.push(plannerId) -// Wait for planner Wave 1 output +// Wait for planner first issue output let plannerResult = wait({ ids: [plannerId], timeout_ms: 900000 }) if (plannerResult.timed_out) { @@ -155,21 +157,21 @@ if (plannerResult.timed_out) { } // Parse planner output -let waveData = parseWaveOutput(plannerResult.status[plannerId].completed) +let issueData = parseIssueOutput(plannerResult.status[plannerId].completed) ``` -### Phase 3: Wave Execution Loop +### Phase 3: Per-Issue Execution Loop ```javascript const executorResults = [] -let waveNum = 0 +let issueCount = 0 while (true) { - waveNum++ + issueCount++ - // ─── Dispatch executors for current wave (Parallel Fan-out) ─── - const waveExecutors = waveData.queue.map(entry => - spawn_agent({ + // ─── Dispatch executor for current issue (if valid) ─── + if (issueData && issueData.issue_id) { + const executorId = spawn_agent({ message: ` ## TASK ASSIGNMENT @@ -180,23 +182,25 @@ while (true) { --- -Goal: 实现 ${entry.issue_id} 的 solution +Goal: 实现 ${issueData.issue_id} 的 solution -Issue: ${entry.issue_id} -Solution: ${entry.solution_id} -Title: ${entry.title} -Priority: ${entry.priority} -Dependencies: ${entry.depends_on?.join(', ') || 'none'} +Issue: ${issueData.issue_id} +Solution: ${issueData.solution_id} +Title: ${issueData.title} +Priority: ${issueData.priority} +Dependencies: ${issueData.depends_on?.join(', ') || 'none'} +Solution File: ${issueData.solution_file} +Session Dir: ${sessionDir} Scope: - Include: 加载 solution plan、代码实现、测试运行、git commit -- Exclude: issue 创建、方案修改、队列变更 +- Exclude: issue 创建、方案修改 Deliverables: 输出严格遵循以下格式: \`\`\`json { - "issue_id": "${entry.issue_id}", + "issue_id": "${issueData.issue_id}", "status": "success" | "failed", "files_changed": ["path/to/file", ...], "tests_passed": true | false, @@ -214,65 +218,57 @@ Quality bar: - 每个变更必须 commit ` }) - ) - allAgentIds.push(...waveExecutors) - - // ─── Check if more waves needed ─── - if (waveData.status === 'all_planned') { - // No more waves — wait for current executors and finish - const execResults = wait({ ids: waveExecutors, timeout_ms: 1200000 }) - waveExecutors.forEach((id, i) => { - executorResults.push({ - wave: waveNum, - issue: waveData.queue[i].issue_id, - result: execResults.status[id]?.completed || 'timeout' - }) + allAgentIds.push(executorId) + executorResults.push({ + id: executorId, + issueId: issueData.issue_id, + index: issueCount }) + } + + // ─── Check if all planned ─── + if (issueData?.status === 'all_planned') { break } - // ─── Request next wave from planner (while executors run) ─── + // ─── Request next issue from planner ─── send_input({ id: plannerId, - message: ` -## WAVE ${waveNum} 已派发 - -已为 Wave ${waveNum} 创建 ${waveExecutors.length} 个 executor agents。 - -## NEXT -请继续规划下一波 (Wave ${waveNum + 1})。 -剩余 issues: ${JSON.stringify(waveData.remaining_issues)} - -输出格式同前。如果所有 issues 已规划完毕,status 设为 "all_planned"。 -` + message: `Issue ${issueData?.issue_id || 'unknown'} dispatched. Continue to next issue.` }) - // ─── Wait for both: executors (current wave) + planner (next wave) ─── - const allWaiting = [...waveExecutors, plannerId] - const batchResult = wait({ ids: allWaiting, timeout_ms: 1200000 }) + // ─── Wait for planner next issue ─── + const nextResult = wait({ ids: [plannerId], timeout_ms: 900000 }) - // Collect executor results - waveExecutors.forEach((id, i) => { - executorResults.push({ - wave: waveNum, - issue: waveData.queue[i].issue_id, - result: batchResult.status[id]?.completed || 'timeout' - }) - }) - - // Parse next wave from planner - if (batchResult.status[plannerId]?.completed) { - waveData = parseWaveOutput(batchResult.status[plannerId].completed) + if (nextResult.timed_out) { + send_input({ id: plannerId, message: "请尽快输出当前已完成的规划结果。" }) + const retryResult = wait({ ids: [plannerId], timeout_ms: 120000 }) + if (retryResult.timed_out) break + issueData = parseIssueOutput(retryResult.status[plannerId].completed) } else { - // Planner timed out — wait more - const plannerRetry = wait({ ids: [plannerId], timeout_ms: 300000 }) - if (plannerRetry.timed_out) { - // Abort pipeline - break - } - waveData = parseWaveOutput(plannerRetry.status[plannerId].completed) + issueData = parseIssueOutput(nextResult.status[plannerId].completed) } } + +// ─── Wait for all executor agents ─── +const executorIds = executorResults.map(e => e.id) +if (executorIds.length > 0) { + const execResults = wait({ ids: executorIds, timeout_ms: 1200000 }) + + // Handle timeouts + if (execResults.timed_out) { + const pending = executorIds.filter(id => !execResults.status[id]?.completed) + pending.forEach(id => { + send_input({ id, message: "Please finalize current task and output results." }) + }) + wait({ ids: pending, timeout_ms: 120000 }) + } + + // Collect results + executorResults.forEach(entry => { + entry.result = execResults.status[entry.id]?.completed || 'timeout' + }) +} ``` ### Phase 4: Aggregation & Cleanup @@ -297,19 +293,18 @@ const failed = executorResults.filter(r => { const report = ` ## PlanEx Pipeline Complete -**Waves**: ${waveNum} **Total Issues**: ${executorResults.length} **Succeeded**: ${succeeded.length} **Failed**: ${failed.length} -### Results by Wave -${executorResults.map(r => `- Wave ${r.wave} | ${r.issue} | ${(() => { +### Results +${executorResults.map(r => `- ${r.issueId} | ${(() => { try { return JSON.parse(r.result).status } catch { return 'error' } })()}`).join('\n')} ${failed.length > 0 ? `### Failed Issues -${failed.map(r => `- ${r.issue}: ${(() => { - try { return JSON.parse(r.result).error } catch { return r.result.slice(0, 200) } +${failed.map(r => `- ${r.issueId}: ${(() => { + try { return JSON.parse(r.result).error } catch { return r.result?.slice(0, 200) || 'unknown' } })()}`).join('\n')}` : ''} ` @@ -324,7 +319,7 @@ allAgentIds.forEach(id => { ## Helper Functions ```javascript -function parseWaveOutput(output) { +function parseIssueOutput(output) { // Extract JSON block from agent output const jsonMatch = output.match(/```json\s*([\s\S]*?)```/) if (jsonMatch) { @@ -332,8 +327,8 @@ function parseWaveOutput(output) { } // Fallback: try parsing entire output as JSON try { return JSON.parse(output) } catch {} - // Last resort: return empty wave with all_planned - return { wave: 0, status: 'all_planned', queue: [], remaining_issues: [], summary: 'Parse failed' } + // Last resort: return empty with all_planned + return { status: 'all_planned', issue_id: null, remaining_issues: [], summary: 'Parse failed' } } ``` @@ -342,11 +337,11 @@ function parseWaveOutput(output) { ```javascript const CONFIG = { sessionDir: ".workflow/.team/PEX-{slug}-{date}/", + artifactsDir: ".workflow/.team/PEX-{slug}-{date}/artifacts/", issueDataDir: ".workflow/issues/", - maxWaveSize: 5, plannerTimeout: 900000, // 15 min executorTimeout: 1200000, // 20 min - maxWaves: 10 + maxIssues: 50 } ``` @@ -356,10 +351,10 @@ const CONFIG = { | Scenario | Action | |----------|--------| -| Planner wave timeout | send_input 催促收敛,retry wait 120s | +| Planner issue timeout | send_input 催促收敛,retry wait 120s | | Executor timeout | 标记为 failed,继续其他 executor | | Batch wait partial timeout | 收集已完成结果,继续 pipeline | -| Pipeline stall (> 2 waves timeout) | 中止 pipeline,输出部分结果 | +| Pipeline stall (> 3 issues timeout) | 中止 pipeline,输出部分结果 | ### Cleanup Protocol @@ -379,5 +374,5 @@ allAgentIds.forEach(id => { | No issues created | Report error, abort pipeline | | Solution planning failure | Skip issue, report in final results | | Executor implementation failure | Mark as failed, continue with other executors | -| All executors in wave fail | Report wave failure, continue to next wave | -| Planner exits early | Treat as all_planned, finish current wave | +| Inline conflict check failure | Use empty depends_on, continue | +| Planner exits early | Treat as all_planned, finish current executors | diff --git a/.codex/skills/issue-devpipeline/agents/planex-executor.md b/.codex/skills/issue-devpipeline/agents/planex-executor.md index 126ef2e0..2f4b9099 100644 --- a/.codex/skills/issue-devpipeline/agents/planex-executor.md +++ b/.codex/skills/issue-devpipeline/agents/planex-executor.md @@ -1,7 +1,7 @@ --- name: planex-executor description: | - PlanEx 执行角色。加载 solution plan → 代码实现 → 测试验证 → git commit。 + PlanEx 执行角色。从中间产物文件加载 solution plan(兼容 CLI fallback)→ 代码实现 → 测试验证 → git commit。 每个 executor 实例处理一个 issue 的 solution。 color: green skill: issue-devpipeline @@ -9,11 +9,11 @@ skill: issue-devpipeline # PlanEx Executor -代码实现角色。接收编排器派发的 issue + solution 信息,加载 solution plan,实现代码变更,运行测试验证,提交变更。每个 executor 实例独立处理一个 issue。 +代码实现角色。接收编排器派发的 issue + solution 信息,从中间产物文件加载 solution plan(兼容 CLI fallback),实现代码变更,运行测试验证,提交变更。每个 executor 实例独立处理一个 issue。 ## Core Capabilities -1. **Solution 加载**: 通过 `ccw issue solutions --json` 加载绑定的 solution plan +1. **Solution 加载**: 从中间产物文件加载 solution plan(兼容 `ccw issue solutions --json` fallback) 2. **代码实现**: 按 solution plan 的任务列表顺序实现代码变更 3. **测试验证**: 运行相关测试确保变更正确且不破坏现有功能 4. **变更提交**: 将实现的代码 commit 到 git @@ -179,10 +179,24 @@ function getUtc8ISOString() { ### Step 2: Solution Loading & Implementation ```javascript -// ── Load solution plan ── +// ── Load solution plan (dual-mode: artifact file first, CLI fallback) ── const issueId = taskAssignment.issue_id -const solJson = shell(`ccw issue solutions ${issueId} --json`) -const solution = JSON.parse(solJson) +const solutionFile = taskAssignment.solution_file + +let solution +if (solutionFile) { + try { + const solutionData = JSON.parse(read_file(solutionFile)) + solution = solutionData.bound ? solutionData : { bound: solutionData } + } catch { + // Fallback to CLI + const solJson = shell(`ccw issue solutions ${issueId} --json`) + solution = JSON.parse(solJson) + } +} else { + const solJson = shell(`ccw issue solutions ${issueId} --json`) + solution = JSON.parse(solJson) +} if (!solution.bound) { outputError(`No bound solution for ${issueId}`) diff --git a/.codex/skills/issue-devpipeline/agents/planex-planner.md b/.codex/skills/issue-devpipeline/agents/planex-planner.md index e1fecf5d..490eed1e 100644 --- a/.codex/skills/issue-devpipeline/agents/planex-planner.md +++ b/.codex/skills/issue-devpipeline/agents/planex-planner.md @@ -1,23 +1,24 @@ --- name: planex-planner description: | - PlanEx 规划角色。需求拆解 → issue 创建 → 方案设计 → 队列编排。 - 按波次 (wave) 输出执行队列,支持 Deep Interaction 多轮交互。 + PlanEx 规划角色。需求拆解 → issue 创建 → 方案设计 → inline 冲突检查。 + 逐 issue 输出执行信息,支持 Deep Interaction 多轮交互。 color: blue skill: issue-devpipeline --- # PlanEx Planner -需求分析和规划角色。接收需求输入(issue IDs / 文本 / plan 文件),完成需求拆解、issue 创建、方案设计(调用 issue-plan-agent)、队列编排(调用 issue-queue-agent),按波次输出执行队列供编排器派发 executor。 +需求分析和规划角色。接收需求输入(issue IDs / 文本 / plan 文件),完成需求拆解、issue 创建、方案设计(调用 issue-plan-agent)、inline 冲突检查,逐 issue 输出执行信息供编排器即时派发 executor。 ## Core Capabilities 1. **需求分析**: 解析输入类型,提取需求要素 2. **Issue 创建**: 将文本/plan 拆解为结构化 issue(通过 `ccw issue new`) 3. **方案设计**: 调用 issue-plan-agent 为每个 issue 生成 solution -4. **队列编排**: 调用 issue-queue-agent 按依赖排序形成执行队列 -5. **波次输出**: 每波最多 5 个 issues,输出结构化 JSON 队列 +4. **Inline 冲突检查**: 基于 files_touched 重叠检测 + 显式依赖排序 +5. **中间产物**: 将 solution 写入文件供 executor 直接加载 +6. **逐 issue 输出**: 每完成一个 issue 立即输出 JSON,编排器即时派发 ## Execution Process @@ -32,6 +33,7 @@ skill: issue-devpipeline - **Goal**: What to achieve - **Scope**: What's allowed and forbidden - **Input**: Input payload with type, issueIds, text, planFile + - **Session Dir**: Path for writing solution artifacts - **Deliverables**: Expected JSON output format ### Step 2: Input Processing & Issue Creation @@ -40,6 +42,7 @@ skill: issue-devpipeline ```javascript const input = taskAssignment.input +const sessionDir = taskAssignment.session_dir if (input.type === 'issue_ids') { // Issue IDs 已提供,直接使用 @@ -68,28 +71,24 @@ if (input.type === 'plan_file') { } ``` -### Step 3: Solution Planning & Queue Formation +### Step 3: Per-Issue Solution Planning & Artifact Writing -分波次处理 issues。每波最多 5 个。 +逐 issue 处理:plan-agent → 写中间产物 → 冲突检查 → 输出 JSON。 ```javascript -const WAVE_SIZE = 5 -const allIssues = [...issueIds] -const waves = [] +const projectRoot = shell('pwd').trim() +const dispatchedSolutions = [] +const remainingIssues = [...issueIds] -for (let i = 0; i < allIssues.length; i += WAVE_SIZE) { - waves.push(allIssues.slice(i, i + WAVE_SIZE)) -} +shell(`mkdir -p "${sessionDir}/artifacts/solutions"`) -// 处理第一个 wave(后续 wave 通过 send_input 触发) -const currentWave = waves[0] -const remainingWaves = waves.slice(1) -const remainingIssues = remainingWaves.flat() +for (let i = 0; i < issueIds.length; i++) { + const issueId = issueIds[i] + remainingIssues.shift() -// ── Solution Planning ── -// 调用 issue-plan-agent 为当前 wave 的 issues 生成 solutions -const planAgent = spawn_agent({ - message: ` + // --- Step 3a: Spawn issue-plan-agent for single issue --- + const planAgent = spawn_agent({ + message: ` ## TASK ASSIGNMENT ### MANDATORY FIRST STEPS (Agent Execute) @@ -97,89 +96,112 @@ const planAgent = spawn_agent({ --- -issue_ids: ${JSON.stringify(currentWave)} -project_root: "${shell('pwd').trim()}" +issue_ids: ["${issueId}"] +project_root: "${projectRoot}" ## Requirements -- Generate solutions for each issue -- Auto-bind single solutions +- Generate solution for this issue +- Auto-bind single solution - For multiple solutions, select the most pragmatic one ` -}) -const planResult = wait({ ids: [planAgent], timeout_ms: 600000 }) -close_agent({ id: planAgent }) + }) + const planResult = wait({ ids: [planAgent], timeout_ms: 600000 }) -// ── Queue Formation ── -// 调用 issue-queue-agent 形成执行队列 -const queueAgent = spawn_agent({ - message: ` -## TASK ASSIGNMENT + if (planResult.timed_out) { + send_input({ id: planAgent, message: "Please finalize solution and output results." }) + wait({ ids: [planAgent], timeout_ms: 120000 }) + } -### MANDATORY FIRST STEPS (Agent Execute) -1. **Read role definition**: ~/.codex/agents/issue-queue-agent.md (MUST read first) + close_agent({ id: planAgent }) ---- + // --- Step 3b: Load solution + write artifact file --- + const solJson = shell(`ccw issue solution ${issueId} --json`) + const solution = JSON.parse(solJson) -issue_ids: ${JSON.stringify(currentWave)} -project_root: "${shell('pwd').trim()}" + const solutionFile = `${sessionDir}/artifacts/solutions/${issueId}.json` + write_file(solutionFile, JSON.stringify({ + issue_id: issueId, + ...solution, + timestamp: new Date().toISOString() + }, null, 2)) -## Requirements -- Order solutions by dependency (DAG) -- Detect conflicts between solutions -- Output execution queue -` -}) -const queueResult = wait({ ids: [queueAgent], timeout_ms: 300000 }) -close_agent({ id: queueAgent }) + // --- Step 3c: Inline conflict check --- + const dependsOn = inlineConflictCheck(issueId, solution, dispatchedSolutions) -// 读取生成的 queue 文件 -const queuePath = '.workflow/issues/queue/execution-queue.json' -const queue = JSON.parse(readFile(queuePath)) + // --- Step 3d: Track + output per-issue JSON --- + dispatchedSolutions.push({ issueId, solution, solutionFile }) + + const isLast = remainingIssues.length === 0 + + // Output per-issue JSON for orchestrator + console.log(JSON.stringify({ + status: isLast ? "all_planned" : "issue_ready", + issue_id: issueId, + solution_id: solution.bound?.id || 'N/A', + title: solution.bound?.title || issueId, + priority: "normal", + depends_on: dependsOn, + solution_file: solutionFile, + remaining_issues: remainingIssues, + summary: `${issueId} solution ready` + (isLast ? ` (all ${issueIds.length} issues planned)` : '') + }, null, 2)) + + // Wait for orchestrator send_input before continuing + // (orchestrator will send: "Issue dispatched. Continue.") +} ``` ### Step 4: Output Delivery -输出严格遵循编排器要求的 JSON 格式。 +输出格式(每个 issue 独立输出): ```json { - "wave": 1, - "status": "wave_ready", - "issues": ["ISS-xxx", "ISS-yyy"], - "queue": [ - { - "issue_id": "ISS-xxx", - "solution_id": "SOL-xxx", - "title": "实现功能A", - "priority": "normal", - "depends_on": [] - }, - { - "issue_id": "ISS-yyy", - "solution_id": "SOL-yyy", - "title": "实现功能B", - "priority": "normal", - "depends_on": ["ISS-xxx"] - } - ], - "remaining_issues": ["ISS-zzz"], - "summary": "Wave 1 规划完成: 2 个 issues, 按依赖排序" + "status": "issue_ready", + "issue_id": "ISS-xxx", + "solution_id": "SOL-xxx", + "title": "实现功能A", + "priority": "normal", + "depends_on": [], + "solution_file": ".workflow/.team/PEX-xxx/artifacts/solutions/ISS-xxx.json", + "remaining_issues": ["ISS-yyy", "ISS-zzz"], + "summary": "ISS-xxx solution ready" } ``` **status 取值**: -- `"wave_ready"` — 本波次完成,还有后续波次 -- `"all_planned"` — 所有 issues 已规划完毕(包含最后一个波次的 queue) +- `"issue_ready"` — 本 issue 完成,还有后续 issues +- `"all_planned"` — 所有 issues 已规划完毕(最后一个 issue 的输出) -### Multi-Round: 处理后续 Wave +## Inline Conflict Check -编排器会通过 `send_input` 触发后续波次规划。收到 send_input 后: +```javascript +function inlineConflictCheck(issueId, solution, dispatchedSolutions) { + const currentFiles = solution.bound?.files_touched + || solution.bound?.affected_files || [] + const blockedBy = [] -1. 解析 `remaining_issues` 列表 -2. 取下一批(最多 WAVE_SIZE 个) -3. 重复 Step 3 的 solution planning + queue formation -4. 输出下一个 wave 的 JSON -5. 如果没有剩余 issues,`status` 设为 `"all_planned"` + // 1. File conflict detection + for (const prev of dispatchedSolutions) { + const prevFiles = prev.solution.bound?.files_touched + || prev.solution.bound?.affected_files || [] + const overlap = currentFiles.filter(f => prevFiles.includes(f)) + if (overlap.length > 0) { + blockedBy.push(prev.issueId) + } + } + + // 2. Explicit dependencies + const explicitDeps = solution.bound?.dependencies?.on_issues || [] + for (const depId of explicitDeps) { + if (!blockedBy.includes(depId)) { + blockedBy.push(depId) + } + } + + return blockedBy +} +``` ## Plan File Parsing @@ -219,11 +241,11 @@ function parsePlanPhases(planContent) { ### MUST -- 仅执行规划相关工作(需求分析、issue 创建、方案设计、队列编排) +- 仅执行规划相关工作(需求分析、issue 创建、方案设计、冲突检查) - 输出严格遵循 JSON 格式 -- 每波最多 5 个 issues -- 按依赖关系排序队列 -- 复用已有 issue-plan-agent 和 issue-queue-agent +- 按依赖关系标记 depends_on +- 将 solution 写入中间产物文件 +- 每个 issue 完成后立即输出 JSON ### MUST NOT @@ -237,17 +259,19 @@ function parsePlanPhases(planContent) { **ALWAYS**: - Read role definition file as FIRST action -- Output strictly formatted JSON for each wave +- Output strictly formatted JSON for each issue - Include `remaining_issues` for orchestrator to track progress -- Set correct `status` (`wave_ready` vs `all_planned`) +- Set correct `status` (`issue_ready` vs `all_planned`) +- Write solution artifact file before outputting JSON +- Include `solution_file` path in output - Use `ccw issue new --json` for issue creation -- Clean up spawned sub-agents (issue-plan-agent, issue-queue-agent) +- Clean up spawned sub-agents (issue-plan-agent) **NEVER**: - Implement code (executor's job) - Output free-form text instead of structured JSON - Skip solution planning (every issue needs a bound solution) -- Hold more than 5 issues in a single wave +- Skip writing solution artifact file ## Error Handling @@ -255,7 +279,8 @@ function parsePlanPhases(planContent) { |----------|--------| | Issue creation fails | Retry once with simplified text, skip if still fails | | issue-plan-agent timeout | Retry once, output partial results | -| issue-queue-agent timeout | Output queue without dependency ordering | +| Inline conflict check failure | Use empty depends_on, continue | +| Solution artifact write failure | Report error in JSON output, continue | | Plan file not found | Report in output JSON: `"error": "plan file not found"` | -| Empty input | Output: `"status": "all_planned", "queue": [], "error": "no input"` | +| Empty input | Output: `"status": "all_planned", "error": "no input"` | | Sub-agent parse failure | Use raw output, include in summary | diff --git a/.codex/skills/team-planex/SKILL.md b/.codex/skills/team-planex/SKILL.md index 700f0cd4..c88c3746 100644 --- a/.codex/skills/team-planex/SKILL.md +++ b/.codex/skills/team-planex/SKILL.md @@ -1,13 +1,13 @@ --- name: team-planex -description: 2-member plan-and-execute pipeline with Wave Pipeline for concurrent planning and execution. Planner decomposes requirements into issues, generates solutions, forms execution queues. Executor implements solutions via configurable backends (agent/codex/gemini). Triggers on "team planex". +description: 2-member plan-and-execute pipeline with per-issue beat pipeline for concurrent planning and execution. Planner decomposes requirements into issues, generates solutions, writes artifacts. Executor implements solutions via configurable backends (agent/codex/gemini). Triggers on "team planex". allowed-tools: spawn_agent, wait, send_input, close_agent, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep argument-hint: " [--exec=agent|codex|gemini|auto] [-y]" --- # Team PlanEx -2 成员边规划边执行团队。通过 Wave Pipeline(波次流水线)实现 planner 和 executor 并行工作:planner 完成一个 wave 的 queue 后,orchestrator 立即 spawn executor agent 处理该 wave,同时 send_input 让 planner 继续下一 wave。 +2 成员边规划边执行团队。通过逐 Issue 节拍流水线实现 planner 和 executor 并行工作:planner 每完成一个 issue 的 solution 后输出 ISSUE_READY 信号,orchestrator 立即 spawn executor agent 处理该 issue,同时 send_input 让 planner 继续下一 issue。 ## Architecture Overview @@ -16,7 +16,7 @@ argument-hint: " [--exec=agent|codex │ Orchestrator (this file) │ │ → Parse input → Spawn planner → Spawn exec │ └────────────────┬─────────────────────────────┘ - │ Wave Pipeline + │ Per-Issue Beat Pipeline ┌───────┴───────┐ ↓ ↓ ┌─────────┐ ┌──────────┐ @@ -25,17 +25,16 @@ argument-hint: " [--exec=agent|codex └─────────┘ └──────────┘ │ │ issue-plan-agent code-developer - issue-queue-agent (or codex/gemini CLI) + (or codex/gemini CLI) ``` ## Agent Registry | Agent | Role File | Responsibility | New/Existing | |-------|-----------|----------------|--------------| -| `planex-planner` | `.codex/skills/team-planex/agents/planex-planner.md` | 需求拆解 → issue 创建 → 方案设计 → 队列编排 | New (skill-specific) | +| `planex-planner` | `.codex/skills/team-planex/agents/planex-planner.md` | 需求拆解 → issue 创建 → 方案设计 → 冲突检查 → 逐 issue 派发 | New (skill-specific) | | `planex-executor` | `.codex/skills/team-planex/agents/planex-executor.md` | 加载 solution → 代码实现 → 测试 → 提交 | New (skill-specific) | | `issue-plan-agent` | `~/.codex/agents/issue-plan-agent.md` | ACE exploration + solution generation + binding | Existing | -| `issue-queue-agent` | `~/.codex/agents/issue-queue-agent.md` | Solution ordering + conflict detection | Existing | | `code-developer` | `~/.codex/agents/code-developer.md` | Code implementation (agent backend) | Existing | ## Input Types @@ -86,11 +85,18 @@ if (explicitExec) { // Interactive: ask user for preferences // (orchestrator handles user interaction directly) } + +// Initialize session directory for artifacts +const slug = (issueIds[0] || 'batch').replace(/[^a-zA-Z0-9-]/g, '') +const dateStr = new Date().toISOString().slice(0,10).replace(/-/g,'') +const sessionId = `PEX-${slug}-${dateStr}` +const sessionDir = `.workflow/.team/${sessionId}` +shell(`mkdir -p "${sessionDir}/artifacts/solutions"`) ``` -### Phase 2: Planning (Planner Agent — Deep Interaction) +### Phase 2: Planning (Planner Agent — Per-Issue Beat) -Spawn planner agent for wave-based planning. Uses send_input for multi-wave progression. +Spawn planner agent for per-issue planning. Uses send_input for issue-by-issue progression. ```javascript // Build planner input context @@ -110,7 +116,7 @@ const planner = spawn_agent({ --- -Goal: Decompose requirements into waves of executable solutions +Goal: Decompose requirements into executable solutions (per-issue beat) ## Input ${plannerInput} @@ -119,61 +125,64 @@ ${plannerInput} execution_method: ${executionConfig.executionMethod} code_review: ${executionConfig.codeReviewTool} +## Session Dir +session_dir: ${sessionDir} + ## Deliverables -For EACH wave, output structured wave data: +For EACH issue, output structured data: \`\`\` -WAVE_READY: -wave_number: N -issue_ids: [ISS-xxx, ...] -queue_path: .workflow/issues/queue/execution-queue.json -exec_tasks: [ - { issue_id: "ISS-xxx", solution_id: "SOL-xxx", title: "...", priority: "normal", depends_on: [] }, - ... -] +ISSUE_READY: +{ + "issue_id": "ISS-xxx", + "solution_id": "SOL-xxx", + "title": "...", + "priority": "normal", + "depends_on": [], + "solution_file": "${sessionDir}/artifacts/solutions/ISS-xxx.json" +} \`\`\` -After ALL waves planned, output: +After ALL issues planned, output: \`\`\` ALL_PLANNED: -total_waves: N -total_issues: N +{ "total_issues": N } \`\`\` ## Quality bar - Every issue has a bound solution -- Queue respects dependency DAG -- Wave boundaries are logical groupings +- Solution artifact written to file before output +- Inline conflict check determines depends_on ` }) -// Wait for Wave 1 -const wave1 = wait({ ids: [planner], timeout_ms: 600000 }) +// Wait for first ISSUE_READY +const firstIssue = wait({ ids: [planner], timeout_ms: 600000 }) -if (wave1.timed_out) { - send_input({ id: planner, message: "Please finalize current wave and output WAVE_READY." }) +if (firstIssue.timed_out) { + send_input({ id: planner, message: "Please finalize current issue and output ISSUE_READY." }) const retry = wait({ ids: [planner], timeout_ms: 120000 }) } -// Parse wave data from planner output -const wave1Data = parseWaveReady(wave1.status[planner].completed) +// Parse first issue data +const firstIssueData = parseIssueReady(firstIssue.status[planner].completed) ``` -### Phase 3: Wave Pipeline (Planning + Execution Interleaved) +### Phase 3: Per-Issue Beat Pipeline (Planning + Execution Interleaved) -Pipeline: spawn executor for current wave while planner continues next wave. +Pipeline: spawn executor for current issue while planner continues next issue. ```javascript const allAgentIds = [planner] const executorAgents = [] -let waveNum = 1 let allPlanned = false +let currentIssueOutput = firstIssue.status[planner].completed while (!allPlanned) { - // --- Spawn executor for current wave --- - const waveData = parseWaveReady(currentWaveOutput) + // --- Spawn executor for current issue --- + const issueData = parseIssueReady(currentIssueOutput) - if (waveData && waveData.exec_tasks.length > 0) { + if (issueData) { const executor = spawn_agent({ message: ` ## TASK ASSIGNMENT @@ -185,75 +194,82 @@ while (!allPlanned) { --- -Goal: Implement all solutions in Wave ${waveNum} +Goal: Implement solution for ${issueData.issue_id} -## Wave ${waveNum} Tasks -${JSON.stringify(waveData.exec_tasks, null, 2)} +## Task +${JSON.stringify([issueData], null, 2)} ## Execution Config execution_method: ${executionConfig.executionMethod} code_review: ${executionConfig.codeReviewTool} +## Solution File +solution_file: ${issueData.solution_file} + +## Session Dir +session_dir: ${sessionDir} + ## Deliverables -For each task, output: \`\`\` IMPL_COMPLETE: -issue_id: ISS-xxx +issue_id: ${issueData.issue_id} status: success|failed test_result: pass|fail commit: \`\`\` -After all wave tasks done: -\`\`\` -WAVE_DONE: -wave_number: ${waveNum} -completed: N -failed: N -\`\`\` - ## Quality bar -- All existing tests pass after each implementation +- All existing tests pass after implementation - Code follows project conventions - One commit per solution ` }) allAgentIds.push(executor) - executorAgents.push({ id: executor, wave: waveNum }) + executorAgents.push({ id: executor, issueId: issueData.issue_id }) } - // --- Tell planner to continue next wave --- - if (!allPlanned) { - send_input({ id: planner, message: `Wave ${waveNum} dispatched to executor. Continue to Wave ${waveNum + 1}.` }) + // --- Check if ALL_PLANNED was in this output --- + if (currentIssueOutput.includes("ALL_PLANNED")) { + allPlanned = true + break + } - // Wait for both: planner (next wave) + current executor - const activeIds = [planner] - if (executorAgents.length > 0) { - activeIds.push(executorAgents[executorAgents.length - 1].id) - } - - const results = wait({ ids: activeIds, timeout_ms: 600000 }) - - // Check planner output - const plannerOutput = results.status[planner]?.completed || "" - if (plannerOutput.includes("ALL_PLANNED")) { - allPlanned = true - } else if (plannerOutput.includes("WAVE_READY")) { - waveNum++ - currentWaveOutput = plannerOutput + // --- Tell planner to continue next issue --- + send_input({ id: planner, message: `Issue ${issueData?.issue_id || 'unknown'} dispatched. Continue to next issue.` }) + + // Wait for planner (next issue) + const plannerResult = wait({ ids: [planner], timeout_ms: 600000 }) + + if (plannerResult.timed_out) { + send_input({ id: planner, message: "Please finalize current issue and output results." }) + const retry = wait({ ids: [planner], timeout_ms: 120000 }) + currentIssueOutput = retry.status?.[planner]?.completed || "" + } else { + currentIssueOutput = plannerResult.status[planner]?.completed || "" + } + + // Check for ALL_PLANNED + if (currentIssueOutput.includes("ALL_PLANNED")) { + // May contain a final ISSUE_READY before ALL_PLANNED + const finalIssue = parseIssueReady(currentIssueOutput) + if (finalIssue) { + // Spawn one more executor for the last issue + const lastExec = spawn_agent({ + message: `... same executor spawn as above for ${finalIssue.issue_id} ...` + }) + allAgentIds.push(lastExec) + executorAgents.push({ id: lastExec, issueId: finalIssue.issue_id }) } + allPlanned = true } } -// Wait for remaining executor agents -const pendingExecutors = executorAgents - .map(e => e.id) - .filter(id => !completedIds.includes(id)) +// Wait for all remaining executor agents +const pendingExecutors = executorAgents.map(e => e.id) if (pendingExecutors.length > 0) { const finalResults = wait({ ids: pendingExecutors, timeout_ms: 900000 }) - // Handle timeout if (finalResults.timed_out) { const pending = pendingExecutors.filter(id => !finalResults.status[id]?.completed) pending.forEach(id => { @@ -269,21 +285,21 @@ if (pendingExecutors.length > 0) { ```javascript // Collect results from all executors const pipelineResults = { - waves: [], + issues: [], totalCompleted: 0, totalFailed: 0 } -executorAgents.forEach(({ id, wave }) => { +executorAgents.forEach(({ id, issueId }) => { const output = results.status[id]?.completed || "" - const waveDone = parseWaveDone(output) - pipelineResults.waves.push({ - wave, - completed: waveDone?.completed || 0, - failed: waveDone?.failed || 0 + const implResult = parseImplComplete(output) + pipelineResults.issues.push({ + issueId, + status: implResult?.status || 'unknown', + commit: implResult?.commit || 'N/A' }) - pipelineResults.totalCompleted += waveDone?.completed || 0 - pipelineResults.totalFailed += waveDone?.failed || 0 + if (implResult?.status === 'success') pipelineResults.totalCompleted++ + else pipelineResults.totalFailed++ }) // Output final summary @@ -291,13 +307,13 @@ console.log(` ## PlanEx Pipeline Complete ### Summary -- Total Waves: ${waveNum} -- Total Completed: ${pipelineResults.totalCompleted} -- Total Failed: ${pipelineResults.totalFailed} +- Total Issues: ${executorAgents.length} +- Completed: ${pipelineResults.totalCompleted} +- Failed: ${pipelineResults.totalFailed} -### Wave Details -${pipelineResults.waves.map(w => - `- Wave ${w.wave}: ${w.completed} completed, ${w.failed} failed` +### Issue Details +${pipelineResults.issues.map(i => + `- ${i.issueId}: ${i.status} (commit: ${i.commit})` ).join('\n')} `) @@ -315,27 +331,26 @@ Since Codex agents have isolated contexts, use file-based coordination: | File | Purpose | Writer | Reader | |------|---------|--------|--------| -| `.workflow/.team/PEX-{slug}-{date}/wave-{N}.json` | Wave plan data | planner | orchestrator | -| `.workflow/.team/PEX-{slug}-{date}/exec-{issueId}.json` | Execution result | executor | orchestrator | -| `.workflow/.team/PEX-{slug}-{date}/pipeline-log.ndjson` | Event log | both | orchestrator | -| `.workflow/issues/queue/execution-queue.json` | Execution queue | planner (via issue-queue-agent) | executor | +| `{sessionDir}/artifacts/solutions/{issueId}.json` | Solution artifact | planner | executor | +| `{sessionDir}/exec-{issueId}.json` | Execution result | executor | orchestrator | +| `{sessionDir}/pipeline-log.ndjson` | Event log | both | orchestrator | -### Wave Data Format +### Solution Artifact Format ```json { - "wave_number": 1, - "issue_ids": ["ISS-20260215-001", "ISS-20260215-002"], - "queue_path": ".workflow/issues/queue/execution-queue.json", - "exec_tasks": [ - { - "issue_id": "ISS-20260215-001", - "solution_id": "SOL-001", - "title": "Implement auth module", - "priority": "high", - "depends_on": [] - } - ] + "issue_id": "ISS-20260215-001", + "bound": { + "id": "SOL-001", + "title": "Implement auth module", + "tasks": [...], + "files_touched": ["src/auth/login.ts"] + }, + "execution_config": { + "execution_method": "Agent", + "code_review": "Skip" + }, + "timestamp": "2026-02-15T10:00:00Z" } ``` @@ -358,7 +373,7 @@ Since Codex agents have isolated contexts, use file-based coordination: | Timeout Scenario | Action | |-----------------|--------| -| Planner wave timeout | send_input to urge convergence, retry wait | +| Planner issue timeout | send_input to urge convergence, retry wait | | Executor impl timeout | send_input to finalize, record partial result | | All agents timeout | Log error, abort with partial state | @@ -380,28 +395,27 @@ allAgentIds.forEach(id => { | Scenario | Resolution | |----------|------------| -| Planner wave failure | Retry once via send_input, then abort pipeline | -| Executor impl failure | Record failure, continue with next wave tasks | +| Planner issue failure | Retry once via send_input, then skip issue | +| Executor impl failure | Record failure, continue with next issue | | No issues created from text | Report to user, abort | | Solution generation failure | Skip issue, continue with remaining | -| Queue formation failure | Create exec tasks without DAG ordering | +| Inline conflict check failure | Use empty depends_on, continue | | Pipeline stall (no progress) | Timeout handling → urge convergence → abort | | Missing role file | Log error, use inline fallback instructions | ## Helper Functions ```javascript -function parseWaveReady(output) { - const match = output.match(/WAVE_READY:\s*\n([\s\S]*?)(?=\n```|$)/) +function parseIssueReady(output) { + const match = output.match(/ISSUE_READY:\s*\n([\s\S]*?)(?=\n```|$)/) if (!match) return null - // Parse structured wave data - return JSON.parse(match[1]) + try { return JSON.parse(match[1]) } catch { return null } } -function parseWaveDone(output) { - const match = output.match(/WAVE_DONE:\s*\n([\s\S]*?)(?=\n```|$)/) +function parseImplComplete(output) { + const match = output.match(/IMPL_COMPLETE:\s*\n([\s\S]*?)(?=\n```|$)/) if (!match) return null - return JSON.parse(match[1]) + try { return JSON.parse(match[1]) } catch { return null } } function resolveExecutor(method, taskCount) { diff --git a/.codex/skills/team-planex/agents/planex-executor.md b/.codex/skills/team-planex/agents/planex-executor.md index 52331db4..66aeda2a 100644 --- a/.codex/skills/team-planex/agents/planex-executor.md +++ b/.codex/skills/team-planex/agents/planex-executor.md @@ -1,20 +1,20 @@ --- name: planex-executor description: | - Execution agent for PlanEx pipeline. Loads solutions, routes to - configurable backends (agent/codex/gemini CLI), runs tests, commits. - Processes all tasks within a single wave assignment. + Execution agent for PlanEx pipeline. Loads solutions from artifact files + (with CLI fallback), routes to configurable backends (agent/codex/gemini CLI), + runs tests, commits. Processes all tasks within a single assignment. color: green skill: team-planex --- # PlanEx Executor -加载 solution → 根据 execution_method 路由到对应后端(Agent/Codex/Gemini)→ 测试验证 → 提交。每次被 spawn 时处理一个 wave 的所有 exec tasks,按依赖顺序执行。 +从中间产物文件加载 solution(兼容 CLI fallback)→ 根据 execution_method 路由到对应后端(Agent/Codex/Gemini)→ 测试验证 → 提交。每次被 spawn 时处理分配的 exec tasks,按依赖顺序执行。 ## Core Capabilities -1. **Solution Loading**: 从 issue system 加载 bound solution plan +1. **Solution Loading**: 从中间产物文件加载 bound solution plan(兼容 CLI fallback) 2. **Multi-Backend Routing**: 根据 execution_method 选择 agent/codex/gemini 后端 3. **Test Verification**: 实现后运行测试验证 4. **Commit Management**: 每个 solution 完成后 git commit @@ -209,9 +209,22 @@ for (const task of sorted) { const issueId = task.issue_id const taskStartTime = Date.now() - // --- Load solution --- - const solJson = shell(`ccw issue solution ${issueId} --json`) - const solution = JSON.parse(solJson) + // --- Load solution (dual-mode: artifact file first, CLI fallback) --- + let solution + const solutionFile = task.solution_file + if (solutionFile) { + try { + const solutionData = JSON.parse(read_file(solutionFile)) + solution = solutionData.bound ? solutionData : { bound: solutionData } + } catch { + // Fallback to CLI + const solJson = shell(`ccw issue solution ${issueId} --json`) + solution = JSON.parse(solJson) + } + } else { + const solJson = shell(`ccw issue solution ${issueId} --json`) + solution = JSON.parse(solJson) + } if (!solution.bound) { recordTaskStart(issueId, task.title, 'N/A', '') diff --git a/.codex/skills/team-planex/agents/planex-planner.md b/.codex/skills/team-planex/agents/planex-planner.md index fdb41816..07033940 100644 --- a/.codex/skills/team-planex/agents/planex-planner.md +++ b/.codex/skills/team-planex/agents/planex-planner.md @@ -2,22 +2,23 @@ name: planex-planner description: | Planning lead for PlanEx pipeline. Decomposes requirements into issues, - generates solutions via issue-plan-agent, forms execution queues via - issue-queue-agent, outputs wave-structured data for orchestrator dispatch. + generates solutions via issue-plan-agent, performs inline conflict check, + writes solution artifacts. Per-issue output for orchestrator dispatch. color: blue skill: team-planex --- # PlanEx Planner -需求拆解 → issue 创建 → 方案设计 → 队列编排 → 输出 wave 数据。内部 spawn issue-plan-agent 和 issue-queue-agent 子代理,通过 Wave Pipeline 持续推进。每完成一个 wave 立即输出 WAVE_READY,等待 orchestrator send_input 继续下一 wave。 +需求拆解 → issue 创建 → 方案设计 → inline 冲突检查 → 写中间产物 → 逐 issue 输出。内部 spawn issue-plan-agent 子代理,每完成一个 issue 的 solution 立即输出 ISSUE_READY,等待 orchestrator send_input 继续下一 issue。 ## Core Capabilities 1. **Requirement Decomposition**: 将需求文本/plan 文件拆解为独立 issues 2. **Solution Planning**: 通过 issue-plan-agent 为每个 issue 生成 solution -3. **Queue Formation**: 通过 issue-queue-agent 排序 solutions 并检测冲突 -4. **Wave Output**: 每个 wave 完成后输出结构化 WAVE_READY 数据 +3. **Inline Conflict Check**: 基于 files_touched 重叠检测 + 显式依赖排序 +4. **Solution Artifacts**: 将 solution 写入中间产物文件供 executor 加载 +5. **Per-Issue Output**: 每个 issue 完成后立即输出 ISSUE_READY 数据 ## Execution Process @@ -32,7 +33,8 @@ skill: team-planex - **Goal**: What to achieve - **Input**: Issue IDs / text / plan file - **Execution Config**: execution_method + code_review settings - - **Deliverables**: WAVE_READY + ALL_PLANNED structured output + - **Session Dir**: Path for writing solution artifacts + - **Deliverables**: ISSUE_READY + ALL_PLANNED structured output ### Step 2: Input Parsing & Issue Creation @@ -40,6 +42,8 @@ Parse the input from TASK ASSIGNMENT and create issues as needed. ```javascript const input = taskAssignment.input +const sessionDir = taskAssignment.session_dir +const executionConfig = taskAssignment.execution_config // 1) 已有 Issue IDs const issueIds = input.match(/ISS-\d{8}-\d{6}/g) || [] @@ -47,7 +51,6 @@ const issueIds = input.match(/ISS-\d{8}-\d{6}/g) || [] // 2) 文本输入 → 创建 issue const textMatch = input.match(/text:\s*(.+)/) if (textMatch && issueIds.length === 0) { - // Use ccw issue create CLI to create issue from text const result = shell(`ccw issue create --data '{"title":"${textMatch[1]}","description":"${textMatch[1]}"}' --json`) const newIssue = JSON.parse(result) issueIds.push(newIssue.id) @@ -58,11 +61,10 @@ const planMatch = input.match(/plan_file:\s*(\S+)/) if (planMatch && issueIds.length === 0) { const planContent = read_file(planMatch[1]) - // Check if execution-plan.json from req-plan-with-file try { const content = JSON.parse(planContent) if (content.waves && content.issue_ids) { - // execution-plan format: use wave structure directly + // execution-plan format: use issue_ids directly executionPlan = content issueIds = content.issue_ids } @@ -77,30 +79,20 @@ if (planMatch && issueIds.length === 0) { } ``` -### Step 3: Wave-Based Solution Planning +### Step 3: Per-Issue Solution Planning & Artifact Writing -Group issues into waves, spawn sub-agents for each wave. +Process each issue individually: plan → write artifact → conflict check → output ISSUE_READY. ```javascript const projectRoot = shell('cd . && pwd').trim() +const dispatchedSolutions = [] -// Group into waves (max 5 per wave, or use execution-plan wave structure) -const WAVE_SIZE = 5 -let waves -if (executionPlan) { - waves = executionPlan.waves.map(w => w.issue_ids) -} else { - waves = [] - for (let i = 0; i < issueIds.length; i += WAVE_SIZE) { - waves.push(issueIds.slice(i, i + WAVE_SIZE)) - } -} +shell(`mkdir -p "${sessionDir}/artifacts/solutions"`) -let waveNum = 0 -for (const waveIssues of waves) { - waveNum++ +for (let i = 0; i < issueIds.length; i++) { + const issueId = issueIds[i] - // --- Step 3a: Spawn issue-plan-agent for solutions --- + // --- Step 3a: Spawn issue-plan-agent for single issue --- const planAgent = spawn_agent({ message: ` ## TASK ASSIGNMENT @@ -112,116 +104,121 @@ for (const waveIssues of waves) { --- -Goal: Generate solutions for Wave ${waveNum} issues +Goal: Generate solution for issue ${issueId} -issue_ids: ${JSON.stringify(waveIssues)} +issue_ids: ["${issueId}"] project_root: "${projectRoot}" ## Requirements -- Generate solutions for each issue -- Auto-bind single solutions +- Generate solution for this issue +- Auto-bind single solution - For multiple solutions, select the most pragmatic one ## Deliverables -Structured output with solution bindings per issue. +Structured output with solution binding. ` }) const planResult = wait({ ids: [planAgent], timeout_ms: 600000 }) if (planResult.timed_out) { - send_input({ id: planAgent, message: "Please finalize solutions and output current results." }) + send_input({ id: planAgent, message: "Please finalize solution and output results." }) wait({ ids: [planAgent], timeout_ms: 120000 }) } close_agent({ id: planAgent }) - // --- Step 3b: Spawn issue-queue-agent for ordering --- - const queueAgent = spawn_agent({ - message: ` -## TASK ASSIGNMENT + // --- Step 3b: Load solution + write artifact file --- + const solJson = shell(`ccw issue solution ${issueId} --json`) + const solution = JSON.parse(solJson) -### MANDATORY FIRST STEPS (Agent Execute) -1. **Read role definition**: ~/.codex/agents/issue-queue-agent.md (MUST read first) -2. Read: .workflow/project-tech.json + const solutionFile = `${sessionDir}/artifacts/solutions/${issueId}.json` + write_file(solutionFile, JSON.stringify({ + issue_id: issueId, + ...solution, + execution_config: { + execution_method: executionConfig.executionMethod, + code_review: executionConfig.codeReviewTool + }, + timestamp: new Date().toISOString() + }, null, 2)) ---- + // --- Step 3c: Inline conflict check --- + const blockedBy = inlineConflictCheck(issueId, solution, dispatchedSolutions) -Goal: Form execution queue for Wave ${waveNum} + // --- Step 3d: Output ISSUE_READY for orchestrator --- + dispatchedSolutions.push({ issueId, solution, solutionFile }) -issue_ids: ${JSON.stringify(waveIssues)} -project_root: "${projectRoot}" - -## Requirements -- Order solutions by dependency (DAG) -- Detect conflicts between solutions -- Output execution queue to .workflow/issues/queue/execution-queue.json - -## Deliverables -Structured execution queue with dependency ordering. -` - }) - - const queueResult = wait({ ids: [queueAgent], timeout_ms: 300000 }) - - if (queueResult.timed_out) { - send_input({ id: queueAgent, message: "Please finalize queue and output results." }) - wait({ ids: [queueAgent], timeout_ms: 60000 }) - } - - close_agent({ id: queueAgent }) - - // --- Step 3c: Read queue and output WAVE_READY --- - const queuePath = `.workflow/issues/queue/execution-queue.json` - const queue = JSON.parse(read_file(queuePath)) - - const execTasks = queue.queue.map(entry => ({ - issue_id: entry.issue_id, - solution_id: entry.solution_id, - title: entry.title || entry.issue_id, - priority: entry.priority || "normal", - depends_on: entry.depends_on || [] - })) - - // Output structured wave data for orchestrator console.log(` -WAVE_READY: +ISSUE_READY: ${JSON.stringify({ - wave_number: waveNum, - issue_ids: waveIssues, - queue_path: queuePath, - exec_tasks: execTasks -}, null, 2)} + issue_id: issueId, + solution_id: solution.bound?.id || 'N/A', + title: solution.bound?.title || issueId, + priority: "normal", + depends_on: blockedBy, + solution_file: solutionFile + }, null, 2)} `) - // Wait for orchestrator send_input before continuing to next wave - // (orchestrator will send: "Wave N dispatched. Continue to Wave N+1.") + // Wait for orchestrator send_input before continuing to next issue + // (orchestrator will send: "Issue dispatched. Continue to next issue.") } ``` ### Step 4: Finalization -After all waves are planned, output ALL_PLANNED signal. +After all issues are planned, output ALL_PLANNED signal. ```javascript console.log(` ALL_PLANNED: ${JSON.stringify({ - total_waves: waveNum, total_issues: issueIds.length }, null, 2)} `) ``` +## Inline Conflict Check + +```javascript +function inlineConflictCheck(issueId, solution, dispatchedSolutions) { + const currentFiles = solution.bound?.files_touched + || solution.bound?.affected_files || [] + const blockedBy = [] + + // 1. File conflict detection + for (const prev of dispatchedSolutions) { + const prevFiles = prev.solution.bound?.files_touched + || prev.solution.bound?.affected_files || [] + const overlap = currentFiles.filter(f => prevFiles.includes(f)) + if (overlap.length > 0) { + blockedBy.push(prev.issueId) + } + } + + // 2. Explicit dependencies + const explicitDeps = solution.bound?.dependencies?.on_issues || [] + for (const depId of explicitDeps) { + if (!blockedBy.includes(depId)) { + blockedBy.push(depId) + } + } + + return blockedBy +} +``` + ## Role Boundaries ### MUST - 仅执行规划和拆解工作 -- 每个 wave 完成后输出 WAVE_READY 结构化数据 -- 所有 wave 完成后输出 ALL_PLANNED -- 通过 spawn_agent 调用 issue-plan-agent 和 issue-queue-agent -- 等待 orchestrator send_input 才继续下一 wave +- 每个 issue 完成后输出 ISSUE_READY 结构化数据 +- 所有 issues 完成后输出 ALL_PLANNED +- 通过 spawn_agent 调用 issue-plan-agent(逐个 issue) +- 等待 orchestrator send_input 才继续下一 issue +- 将 solution 写入中间产物文件 ### MUST NOT @@ -267,16 +264,17 @@ function parsePlanPhases(planContent) { **ALWAYS**: - Read role definition file as FIRST action (Step 1) -- Follow structured output template (WAVE_READY / ALL_PLANNED) +- Follow structured output template (ISSUE_READY / ALL_PLANNED) - Stay within planning boundaries (no code implementation) -- Spawn issue-plan-agent and issue-queue-agent for each wave -- Include all issue IDs and solution references in wave data +- Spawn issue-plan-agent for each issue individually +- Write solution artifact file before outputting ISSUE_READY +- Include solution_file path in ISSUE_READY data **NEVER**: - Modify source code files - Skip context loading (Step 1) - Produce unstructured or free-form output -- Continue to next wave without outputting WAVE_READY +- Continue to next issue without outputting ISSUE_READY - Close without outputting ALL_PLANNED ## Error Handling @@ -285,7 +283,8 @@ function parsePlanPhases(planContent) { |----------|--------| | Issue creation failure | Retry once with simplified text, report in output | | issue-plan-agent timeout | Urge convergence via send_input, close and report partial | -| issue-queue-agent failure | Create exec tasks without DAG ordering | +| Inline conflict check failure | Use empty depends_on, continue | +| Solution artifact write failure | Report error, continue with ISSUE_READY output | | Plan file not found | Report error in output with CLARIFICATION_NEEDED | | Empty input (no issues, no text) | Output CLARIFICATION_NEEDED asking for requirements | | Sub-agent produces invalid output | Report error, continue with available data | diff --git a/ccw/frontend/src/components/terminal-dashboard/CliConfigModal.tsx b/ccw/frontend/src/components/terminal-dashboard/CliConfigModal.tsx index db8de677..bbb89f29 100644 --- a/ccw/frontend/src/components/terminal-dashboard/CliConfigModal.tsx +++ b/ccw/frontend/src/components/terminal-dashboard/CliConfigModal.tsx @@ -129,7 +129,9 @@ export function CliConfigModal({ const toolConfig = cliTools[tool]; if (!toolConfig) return []; if (toolConfig.availableModels?.length) return toolConfig.availableModels; - const models = [toolConfig.primaryModel]; + // Build models from primaryModel/secondaryModel, filtering out undefined + const models: string[] = []; + if (toolConfig.primaryModel) models.push(toolConfig.primaryModel); if (toolConfig.secondaryModel && toolConfig.secondaryModel !== toolConfig.primaryModel) { models.push(toolConfig.secondaryModel); } diff --git a/ccw/frontend/src/hooks/useSystemSettings.ts b/ccw/frontend/src/hooks/useSystemSettings.ts index 2a182e78..ca82d3b1 100644 --- a/ccw/frontend/src/hooks/useSystemSettings.ts +++ b/ccw/frontend/src/hooks/useSystemSettings.ts @@ -16,11 +16,15 @@ import { fetchCliToolStatus, fetchCcwInstallations, upgradeCcwInstallation, + exportSettings, + importSettings, type ChineseResponseStatus, type WindowsPlatformStatus, type CodexCliEnhancementStatus, type CcwInstallStatus, type CcwInstallationManifest, + type ExportedSettings, + type ImportOptions, } from '../lib/api'; // Query key factory @@ -32,6 +36,7 @@ export const systemSettingsKeys = { aggregatedStatus: () => [...systemSettingsKeys.all, 'aggregatedStatus'] as const, cliToolStatus: () => [...systemSettingsKeys.all, 'cliToolStatus'] as const, ccwInstallations: () => [...systemSettingsKeys.all, 'ccwInstallations'] as const, + exportSettings: () => [...systemSettingsKeys.all, 'exportSettings'] as const, }; const STALE_TIME = 60 * 1000; // 1 minute @@ -285,3 +290,39 @@ export function useUpgradeCcwInstallation() { error: mutation.error, }; } + +// ======================================== +// Settings Export/Import Hooks +// ======================================== + +export function useExportSettings() { + const mutation = useMutation({ + mutationFn: exportSettings, + }); + + return { + exportSettings: mutation.mutateAsync, + isPending: mutation.isPending, + error: mutation.error, + }; +} + +export function useImportSettings() { + const queryClient = useQueryClient(); + + const mutation = useMutation({ + mutationFn: ({ data, options }: { data: ExportedSettings; options?: ImportOptions }) => + importSettings(data, options), + onSuccess: () => { + // Invalidate all system settings queries to refresh the UI + queryClient.invalidateQueries({ queryKey: systemSettingsKeys.all }); + }, + }); + + return { + importSettings: (data: ExportedSettings, options?: ImportOptions) => + mutation.mutateAsync({ data, options }), + isPending: mutation.isPending, + error: mutation.error, + }; +} diff --git a/ccw/frontend/src/lib/api.ts b/ccw/frontend/src/lib/api.ts index 70d3bb6c..ab687450 100644 --- a/ccw/frontend/src/lib/api.ts +++ b/ccw/frontend/src/lib/api.ts @@ -6482,6 +6482,68 @@ export async function upgradeCcwInstallation( }); } +// ========== CLI Settings Export/Import API ========== + +/** + * Exported settings structure from backend + */ +export interface ExportedSettings { + version: string; + exportedAt: string; + settings: { + cliTools?: Record; + chineseResponse?: { + claudeEnabled: boolean; + codexEnabled: boolean; + }; + windowsPlatform?: { + enabled: boolean; + }; + codexCliEnhancement?: { + enabled: boolean; + }; + }; +} + +/** + * Import options for settings import + */ +export interface ImportOptions { + overwrite?: boolean; + dryRun?: boolean; +} + +/** + * Import result from backend + */ +export interface ImportResult { + success: boolean; + imported: number; + skipped: number; + errors: string[]; + importedIds: string[]; +} + +/** + * Export CLI settings to JSON file + */ +export async function exportSettings(): Promise { + return fetchApi('/api/cli/settings/export'); +} + +/** + * Import CLI settings from JSON data + */ +export async function importSettings( + data: ExportedSettings, + options?: ImportOptions +): Promise { + return fetchApi('/api/cli/settings/import', { + method: 'POST', + body: JSON.stringify({ data, options }), + }); +} + // ========== CCW Tools API ========== /** diff --git a/ccw/frontend/src/locales/en/settings.json b/ccw/frontend/src/locales/en/settings.json index 8d1ad0e2..7c952ae6 100644 --- a/ccw/frontend/src/locales/en/settings.json +++ b/ccw/frontend/src/locales/en/settings.json @@ -77,7 +77,19 @@ "refreshConfig": "Refresh Config", "migrationWarning": "Old format detected, please disable and re-enable to migrate", "enabled": "Enabled", - "disabled": "Disabled" + "disabled": "Disabled", + "export": "Export", + "import": "Import", + "exporting": "Exporting...", + "importing": "Importing...", + "exportImportHint": "Export or import CLI settings configuration", + "exportSuccess": "Settings exported successfully", + "exportError": "Failed to export settings", + "importSuccess": "Settings imported successfully ({imported} imported, {skipped} skipped)", + "importError": "Failed to import settings", + "importInvalidFile": "Please select a valid JSON file", + "importInvalidJson": "Invalid JSON format in file", + "importInvalidStructure": "Invalid settings file structure" }, "systemStatus": { "title": "CCW Installation", diff --git a/ccw/frontend/src/locales/zh/settings.json b/ccw/frontend/src/locales/zh/settings.json index 95b8af98..bb630bd1 100644 --- a/ccw/frontend/src/locales/zh/settings.json +++ b/ccw/frontend/src/locales/zh/settings.json @@ -77,7 +77,19 @@ "refreshConfig": "刷新配置", "migrationWarning": "检测到旧格式,请关闭后重新启用以迁移", "enabled": "已启用", - "disabled": "已禁用" + "disabled": "已禁用", + "export": "导出", + "import": "导入", + "exporting": "导出中...", + "importing": "导入中...", + "exportImportHint": "导出或导入 CLI 设置配置", + "exportSuccess": "设置导出成功", + "exportError": "导出设置失败", + "importSuccess": "设置导入成功(已导入 {imported} 项,跳过 {skipped} 项)", + "importError": "导入设置失败", + "importInvalidFile": "请选择有效的 JSON 文件", + "importInvalidJson": "文件 JSON 格式无效", + "importInvalidStructure": "设置文件结构无效" }, "systemStatus": { "title": "CCW 安装", diff --git a/ccw/frontend/src/pages/SettingsPage.tsx b/ccw/frontend/src/pages/SettingsPage.tsx index d77b7ff4..4f604eff 100644 --- a/ccw/frontend/src/pages/SettingsPage.tsx +++ b/ccw/frontend/src/pages/SettingsPage.tsx @@ -3,7 +3,7 @@ // ======================================== // Application settings and configuration with CLI tools management -import { useState, useCallback, useEffect } from 'react'; +import { useState, useCallback, useEffect, useRef } from 'react'; import { useIntl } from 'react-intl'; import { Settings, @@ -30,6 +30,8 @@ import { File, ArrowUpCircle, Save, + Download, + Upload, } from 'lucide-react'; import { Card } from '@/components/ui/Card'; import { Button } from '@/components/ui/Button'; @@ -54,7 +56,10 @@ import { useCliToolStatus, useCcwInstallations, useUpgradeCcwInstallation, + useExportSettings, + useImportSettings, } from '@/hooks/useSystemSettings'; +import type { ExportedSettings } from '@/lib/api'; import { RemoteNotificationSection } from '@/components/settings/RemoteNotificationSection'; import { A2UIPreferencesSection } from '@/components/settings/A2UIPreferencesSection'; @@ -520,6 +525,72 @@ function ResponseLanguageSection() { const { data: cliEnhStatus, isLoading: cliEnhLoading } = useCodexCliEnhancementStatus(); const { toggle: toggleCliEnh, isPending: cliEnhToggling } = useToggleCodexCliEnhancement(); const { refresh: refreshCliEnh, isPending: refreshing } = useRefreshCodexCliEnhancement(); + const { exportSettings: doExport, isPending: exporting } = useExportSettings(); + const { importSettings: doImport, isPending: importing } = useImportSettings(); + const fileInputRef = useRef(null); + + const handleExport = useCallback(async () => { + try { + const data = await doExport(); + const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19); + a.href = url; + a.download = `ccw-settings-${timestamp}.json`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + toast.success(formatMessage({ id: 'settings.responseLanguage.exportSuccess' })); + } catch (error) { + toast.error(formatMessage({ id: 'settings.responseLanguage.exportError' })); + } + }, [doExport, formatMessage]); + + const handleFileImport = useCallback(async (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (!file) return; + + // Reset file input so the same file can be re-selected + e.target.value = ''; + + // Validate file type + if (!file.name.endsWith('.json') && file.type !== 'application/json') { + toast.error(formatMessage({ id: 'settings.responseLanguage.importInvalidFile' })); + return; + } + + try { + const text = await file.text(); + const data = JSON.parse(text) as ExportedSettings; + + // Validate basic structure + if (!data.version || !data.settings) { + toast.error(formatMessage({ id: 'settings.responseLanguage.importInvalidStructure' })); + return; + } + + const result = await doImport(data); + + if (result.success) { + toast.success( + formatMessage( + { id: 'settings.responseLanguage.importSuccess' }, + { imported: result.imported, skipped: result.skipped } + ) + ); + } else { + toast.error(formatMessage({ id: 'settings.responseLanguage.importError' })); + } + } catch (error) { + if (error instanceof SyntaxError) { + toast.error(formatMessage({ id: 'settings.responseLanguage.importInvalidJson' })); + } else { + toast.error(formatMessage({ id: 'settings.responseLanguage.importError' })); + } + } + }, [doImport, formatMessage]); return ( @@ -542,9 +613,17 @@ function ResponseLanguageSection() { disabled={chineseLoading || chineseToggling} onClick={() => toggleChinese(!chineseStatus?.claudeEnabled, 'claude')} > - {chineseStatus?.claudeEnabled - ? formatMessage({ id: 'settings.responseLanguage.enabled' }) - : formatMessage({ id: 'settings.responseLanguage.disabled' })} + {chineseStatus?.claudeEnabled ? ( + <> + + {formatMessage({ id: 'settings.responseLanguage.enabled' })} + + ) : ( + <> + + {formatMessage({ id: 'settings.responseLanguage.disabled' })} + + )}

@@ -566,9 +645,17 @@ function ResponseLanguageSection() { disabled={chineseLoading || chineseToggling} onClick={() => toggleChinese(!chineseStatus?.codexEnabled, 'codex')} > - {chineseStatus?.codexEnabled - ? formatMessage({ id: 'settings.responseLanguage.enabled' }) - : formatMessage({ id: 'settings.responseLanguage.disabled' })} + {chineseStatus?.codexEnabled ? ( + <> + + {formatMessage({ id: 'settings.responseLanguage.enabled' })} + + ) : ( + <> + + {formatMessage({ id: 'settings.responseLanguage.disabled' })} + + )}

@@ -596,9 +683,17 @@ function ResponseLanguageSection() { disabled={windowsLoading || windowsToggling} onClick={() => toggleWindows(!windowsStatus?.enabled)} > - {windowsStatus?.enabled - ? formatMessage({ id: 'settings.responseLanguage.enabled' }) - : formatMessage({ id: 'settings.responseLanguage.disabled' })} + {windowsStatus?.enabled ? ( + <> + + {formatMessage({ id: 'settings.responseLanguage.enabled' })} + + ) : ( + <> + + {formatMessage({ id: 'settings.responseLanguage.disabled' })} + + )}

@@ -634,9 +729,17 @@ function ResponseLanguageSection() { disabled={cliEnhLoading || cliEnhToggling} onClick={() => toggleCliEnh(!cliEnhStatus?.enabled)} > - {cliEnhStatus?.enabled - ? formatMessage({ id: 'settings.responseLanguage.enabled' }) - : formatMessage({ id: 'settings.responseLanguage.disabled' })} + {cliEnhStatus?.enabled ? ( + <> + + {formatMessage({ id: 'settings.responseLanguage.enabled' })} + + ) : ( + <> + + {formatMessage({ id: 'settings.responseLanguage.disabled' })} + + )} @@ -650,6 +753,62 @@ function ResponseLanguageSection() { )} + + {/* Export/Import Actions */} +

+
+

+ {formatMessage({ id: 'settings.responseLanguage.exportImportHint' })} +

+
+ + + +
+
+
); } diff --git a/ccw/frontend/src/stores/configStore.ts b/ccw/frontend/src/stores/configStore.ts index f79041b3..1f108c7e 100644 --- a/ccw/frontend/src/stores/configStore.ts +++ b/ccw/frontend/src/stores/configStore.ts @@ -14,40 +14,30 @@ import type { A2UIPreferences, } from '../types/store'; -// Default CLI tools configuration +// Default CLI tools configuration - no model defaults, models come from user's cli-tools.json const defaultCliTools: Record = { gemini: { enabled: true, - primaryModel: 'gemini-2.5-pro', - secondaryModel: 'gemini-2.5-flash', tags: ['analysis', 'debug'], type: 'builtin', }, qwen: { enabled: true, - primaryModel: 'coder-model', - secondaryModel: 'coder-model', tags: [], type: 'builtin', }, codex: { enabled: true, - primaryModel: 'gpt-5.2', - secondaryModel: 'gpt-5.2', tags: [], type: 'builtin', }, claude: { enabled: true, - primaryModel: 'sonnet', - secondaryModel: 'haiku', tags: [], type: 'builtin', }, opencode: { enabled: true, - primaryModel: 'opencode/glm-4.7-free', - secondaryModel: 'opencode/glm-4.7-free', tags: [], type: 'builtin', }, diff --git a/ccw/frontend/src/types/store.ts b/ccw/frontend/src/types/store.ts index b6e0f778..170e5522 100644 --- a/ccw/frontend/src/types/store.ts +++ b/ccw/frontend/src/types/store.ts @@ -387,8 +387,8 @@ export type WorkflowStore = WorkflowState & WorkflowActions; export interface CliToolConfig { enabled: boolean; - primaryModel: string; - secondaryModel: string; + primaryModel?: string; + secondaryModel?: string; tags: string[]; type: 'builtin' | 'cli-wrapper' | 'api-endpoint'; /** Path to .env file for environment variables (gemini/qwen/opencode) */ diff --git a/ccw/src/config/cli-settings-manager.ts b/ccw/src/config/cli-settings-manager.ts index 832dbab4..bf547877 100644 --- a/ccw/src/config/cli-settings-manager.ts +++ b/ccw/src/config/cli-settings-manager.ts @@ -16,6 +16,9 @@ import { SettingsListResponse, SettingsOperationResult, SaveEndpointRequest, + ExportedSettings, + ImportOptions, + ImportResult, validateSettings, createDefaultSettings } from '../types/cli-settings.js'; @@ -471,3 +474,152 @@ export function validateEndpointName(name: string): { valid: boolean; error?: st return { valid: true }; } + +/** + * Export all CLI endpoint settings + * Returns an ExportedSettings object with version, timestamp, and all endpoints + */ +export function exportAllSettings(): ExportedSettings { + const { endpoints } = listAllSettings(); + + return { + version: '1.0.0', + timestamp: new Date().toISOString(), + endpoints + }; +} + +/** + * Import settings from an ExportedSettings object + * Validates and applies imported settings with configurable conflict resolution + */ +export function importSettings( + exportedData: unknown, + options: ImportOptions = {} +): ImportResult { + const { + conflictStrategy = 'skip', + skipInvalid = true, + disableImported = false + } = options; + + const result: ImportResult = { + success: false, + imported: 0, + skipped: 0, + errors: [], + importedIds: [] + }; + + // Validate exported data structure + if (!exportedData || typeof exportedData !== 'object') { + result.errors.push('Invalid export data: must be an object'); + return result; + } + + const data = exportedData as Record; + + // Check for required fields + if (!('endpoints' in data) || !Array.isArray(data.endpoints)) { + result.errors.push('Invalid export data: missing or invalid endpoints array'); + return result; + } + + // Validate version (for future migration support) + if ('version' in data && typeof data.version !== 'string') { + result.errors.push('Invalid export data: version must be a string'); + return result; + } + + const endpoints = data.endpoints as unknown[]; + const existingIndex = loadIndex(); + + for (let i = 0; i < endpoints.length; i++) { + const ep = endpoints[i]; + + // Validate endpoint structure + if (!ep || typeof ep !== 'object') { + if (!skipInvalid) { + result.errors.push(`Endpoint at index ${i}: invalid structure`); + } + result.skipped++; + continue; + } + + const endpoint = ep as Record; + + // Check required fields + if (!endpoint.name || typeof endpoint.name !== 'string') { + if (!skipInvalid) { + result.errors.push(`Endpoint at index ${i}: missing or invalid name`); + } + result.skipped++; + continue; + } + + if (!endpoint.settings || typeof endpoint.settings !== 'object') { + if (!skipInvalid) { + result.errors.push(`Endpoint at index ${i}: missing or invalid settings`); + } + result.skipped++; + continue; + } + + // Validate settings using provider-aware validation + const provider: CliProvider = (endpoint.provider as CliProvider) || 'claude'; + if (!validateSettings(endpoint.settings, provider)) { + if (!skipInvalid) { + result.errors.push(`Endpoint "${endpoint.name}": invalid settings format`); + } + result.skipped++; + continue; + } + + // Determine endpoint ID + const endpointId = (endpoint.id as string) || generateEndpointId(); + const exists = existingIndex.has(endpointId); + + // Handle conflicts + if (exists && conflictStrategy === 'skip') { + result.skipped++; + continue; + } + + // Prepare import request + const importRequest: SaveEndpointRequest = { + id: endpointId, + name: endpoint.name as string, + description: endpoint.description as string | undefined, + provider, + settings: endpoint.settings as CliSettings, + enabled: disableImported ? false : (endpoint.enabled as boolean) ?? true + }; + + // For merge strategy, combine with existing if applicable + if (exists && conflictStrategy === 'merge') { + const existing = loadEndpointSettings(endpointId); + if (existing) { + importRequest.settings = { + ...existing.settings, + ...(endpoint.settings as CliSettings) + } as CliSettings; + importRequest.name = (endpoint.name as string) || existing.name; + importRequest.description = (endpoint.description as string) ?? existing.description; + } + } + + // Save the endpoint + const saveResult = saveEndpointSettings(importRequest); + + if (saveResult.success) { + result.imported++; + result.importedIds!.push(endpointId); + } else { + result.errors.push(`Endpoint "${endpoint.name}": ${saveResult.message || 'Failed to save'}`); + result.skipped++; + } + } + + result.success = result.imported > 0; + return result; +} diff --git a/ccw/src/core/routes/cli-settings-routes.ts b/ccw/src/core/routes/cli-settings-routes.ts index ce16597c..c0e93598 100644 --- a/ccw/src/core/routes/cli-settings-routes.ts +++ b/ccw/src/core/routes/cli-settings-routes.ts @@ -12,9 +12,11 @@ import { toggleEndpointEnabled, getSettingsFilePath, ensureSettingsDir, - sanitizeEndpointId + sanitizeEndpointId, + exportAllSettings, + importSettings } from '../../config/cli-settings-manager.js'; -import type { SaveEndpointRequest } from '../../types/cli-settings.js'; +import type { SaveEndpointRequest, ImportOptions } from '../../types/cli-settings.js'; import { validateSettings } from '../../types/cli-settings.js'; import { syncBuiltinToolsAvailability, getBuiltinToolsSyncReport } from '../../tools/claude-cli-tools.js'; @@ -275,5 +277,62 @@ export async function handleCliSettingsRoutes(ctx: RouteContext): Promise { + try { + // Extract import options and data from request + const request = body as { data?: unknown; options?: ImportOptions }; + + if (!request.data) { + return { error: 'Missing export data in request body', status: 400 }; + } + + const result = importSettings(request.data, request.options); + + if (result.success) { + // Broadcast import event + broadcastToClients({ + type: 'CLI_SETTINGS_IMPORTED', + payload: { + imported: result.imported, + skipped: result.skipped, + importedIds: result.importedIds, + timestamp: new Date().toISOString() + } + }); + } + + return result; + } catch (err) { + return { error: (err as Error).message, status: 500 }; + } + }); + return true; + } + return false; } diff --git a/ccw/src/core/services/cli-session-manager.ts b/ccw/src/core/services/cli-session-manager.ts index c4bf0c26..a4ebe35f 100644 --- a/ccw/src/core/services/cli-session-manager.ts +++ b/ccw/src/core/services/cli-session-manager.ts @@ -335,11 +335,15 @@ export class CliSessionManager { } // Merge endpoint env vars with process.env (endpoint overrides process.env) - const spawnEnv: Record = { + const spawnEnv: Record = { ...(process.env as Record), ...endpointEnv, }; + // Unset CLAUDECODE to allow nested Claude Code sessions (SDK/subagent use case) + // See: https://github.com/anthropics/claude-agent-sdk-python/issues/573 + delete spawnEnv.CLAUDECODE; + let pty: nodePty.IPty; try { pty = nodePty.spawn(file, args, { diff --git a/ccw/src/tools/claude-cli-tools.ts b/ccw/src/tools/claude-cli-tools.ts index 87e801ad..d7637cef 100644 --- a/ccw/src/tools/claude-cli-tools.ts +++ b/ccw/src/tools/claude-cli-tools.ts @@ -136,46 +136,32 @@ export interface ClaudeCliCombinedConfig extends ClaudeCliToolsConfig { // ========== Default Config ========== +// Default tools config - no model defaults, models come from user's cli-tools.json const DEFAULT_TOOLS_CONFIG: ClaudeCliToolsConfig = { version: '3.4.0', tools: { gemini: { enabled: true, - primaryModel: 'gemini-2.5-pro', - secondaryModel: 'gemini-2.5-flash', - availableModels: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-2.0-flash-thinking', 'gemini-1.5-pro'], tags: [], type: 'builtin' }, qwen: { enabled: true, - primaryModel: 'coder-model', - secondaryModel: 'coder-model', - availableModels: ['coder-model', 'vision-model', 'qwen-2.5-coder', 'qwen-2.5-72b'], tags: [], type: 'builtin' }, codex: { enabled: true, - primaryModel: 'gpt-5.2', - secondaryModel: 'gpt-5.2', - availableModels: ['gpt-5.2', 'gpt-5', 'gpt5-codex', 'o3', 'o1'], tags: [], type: 'builtin' }, claude: { enabled: true, - primaryModel: 'sonnet', - secondaryModel: 'haiku', - availableModels: ['opus', 'sonnet', 'haiku'], tags: [], type: 'builtin' }, opencode: { enabled: true, - primaryModel: 'opencode/glm-4.7-free', - secondaryModel: 'opencode/glm-4.7-free', - availableModels: ['opencode/glm-4.7-free', 'opencode/deepseek-v3-free'], tags: [], type: 'builtin' } diff --git a/ccw/src/tools/cli-executor-core.ts b/ccw/src/tools/cli-executor-core.ts index 44a73eaf..070304b8 100644 --- a/ccw/src/tools/cli-executor-core.ts +++ b/ccw/src/tools/cli-executor-core.ts @@ -958,12 +958,16 @@ async function executeCliTool( // Merge custom env with process.env (custom env takes precedence) // Also include rulesEnv for $PROTO and $TMPL template variables - const spawnEnv = { + const spawnEnv: Record = { ...process.env, ...customEnv, ...(rulesEnv || {}) }; + // Unset CLAUDECODE to allow nested Claude Code sessions (SDK/subagent use case) + // See: https://github.com/anthropics/claude-agent-sdk-python/issues/573 + delete spawnEnv.CLAUDECODE; + debugLog('SPAWN', `Spawning process`, { command, args, diff --git a/ccw/src/tools/generate-module-docs.ts b/ccw/src/tools/generate-module-docs.ts index 6257cc48..f57b47f7 100644 --- a/ccw/src/tools/generate-module-docs.ts +++ b/ccw/src/tools/generate-module-docs.ts @@ -23,13 +23,6 @@ const CODE_EXTENSIONS = [ '.ts', '.tsx', '.js', '.jsx', '.py', '.sh', '.go', '.rs' ]; -// Default models for each tool -const DEFAULT_MODELS: Record = { - gemini: 'gemini-2.5-flash', - qwen: 'coder-model', - codex: 'gpt5-codex' -}; - // Template paths (relative to user home directory) const TEMPLATE_BASE = '~/.ccw/workflows/cli-templates/prompts/documentation'; @@ -141,20 +134,21 @@ function buildCliCommand(tool: string, promptFile: string, model: string): strin // Build the cat/read command based on platform const catCmd = isWindows ? `Get-Content -Raw "${normalizedPath}" | ` : `cat "${normalizedPath}" | `; + // Build model flag only if model is specified + const modelFlag = model ? ` -m "${model}"` : ''; + switch (tool) { case 'qwen': - return model === 'coder-model' - ? `${catCmd}qwen --yolo` - : `${catCmd}qwen -m "${model}" --yolo`; + return `${catCmd}qwen${modelFlag} --yolo`; case 'codex': // codex uses different syntax - prompt as exec argument if (isWindows) { - return `codex --full-auto exec (Get-Content -Raw "${normalizedPath}") -m "${model}" --skip-git-repo-check -s danger-full-access`; + return `codex --full-auto exec (Get-Content -Raw "${normalizedPath}")${modelFlag} --skip-git-repo-check -s danger-full-access`; } - return `codex --full-auto exec "$(cat "${normalizedPath}")" -m "${model}" --skip-git-repo-check -s danger-full-access`; + return `codex --full-auto exec "$(cat "${normalizedPath}")"${modelFlag} --skip-git-repo-check -s danger-full-access`; case 'gemini': default: - return `${catCmd}gemini -m "${model}" --yolo`; + return `${catCmd}gemini${modelFlag} --yolo`; } } @@ -273,7 +267,8 @@ export async function handler(params: Record): Promise