mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-12 02:37:45 +08:00
feat(workflow): add multi-CLI collaborative planning command
- Introduced a new command `/workflow:multi-cli-plan` for collaborative planning using ACE semantic search and iterative analysis with Claude and Codex. - Implemented a structured execution flow with phases for context gathering, multi-tool analysis, user decision points, and final plan generation. - Added detailed documentation outlining the command's usage, execution phases, and key features. - Included error handling and configuration options for enhanced user experience.
This commit is contained in:
1163
.claude/agents/cli-discuss-agent.md
Normal file
1163
.claude/agents/cli-discuss-agent.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,21 +17,21 @@ Minimal orchestrator that dispatches **solution IDs** to executors. Each executo
|
|||||||
- `done <id>` → update solution completion status
|
- `done <id>` → update solution completion status
|
||||||
- No race conditions: status changes only via `done`
|
- No race conditions: status changes only via `done`
|
||||||
- **Executor handles all tasks within a solution sequentially**
|
- **Executor handles all tasks within a solution sequentially**
|
||||||
- **Worktree isolation**: Each executor can work in its own git worktree
|
- **Single worktree for entire queue**: One worktree isolates ALL queue execution from main workspace
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
/issue:execute # Execute active queue(s)
|
/issue:execute # Execute active queue(s)
|
||||||
/issue:execute --queue QUE-xxx # Execute specific queue
|
/issue:execute --queue QUE-xxx # Execute specific queue
|
||||||
/issue:execute --worktree # Use git worktrees for parallel isolation
|
/issue:execute --worktree # Execute entire queue in isolated worktree
|
||||||
/issue:execute --worktree --queue QUE-xxx
|
/issue:execute --worktree --queue QUE-xxx
|
||||||
/issue:execute --worktree /path/to/existing/worktree # Resume in existing worktree
|
/issue:execute --worktree /path/to/existing/worktree # Resume in existing worktree
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parallelism**: Determined automatically by task dependency DAG (no manual control)
|
**Parallelism**: Determined automatically by task dependency DAG (no manual control)
|
||||||
**Executor & Dry-run**: Selected via interactive prompt (AskUserQuestion)
|
**Executor & Dry-run**: Selected via interactive prompt (AskUserQuestion)
|
||||||
**Worktree**: Creates isolated git worktrees for each parallel executor
|
**Worktree**: Creates ONE worktree for the entire queue execution (not per-solution)
|
||||||
|
|
||||||
**⭐ Recommended Executor**: **Codex** - Best for long-running autonomous work (2hr timeout), supports background execution and full write access
|
**⭐ Recommended Executor**: **Codex** - Best for long-running autonomous work (2hr timeout), supports background execution and full write access
|
||||||
|
|
||||||
@@ -44,8 +44,10 @@ Minimal orchestrator that dispatches **solution IDs** to executors. Each executo
|
|||||||
## Execution Flow
|
## Execution Flow
|
||||||
|
|
||||||
```
|
```
|
||||||
Phase 0 (if --worktree): Setup Worktree Base
|
Phase 0 (if --worktree): Setup Queue Worktree
|
||||||
└─ Ensure .worktrees directory exists
|
├─ Create ONE worktree for entire queue: .ccw/worktrees/queue-<timestamp>
|
||||||
|
├─ All subsequent execution happens in this worktree
|
||||||
|
└─ Main workspace remains clean and untouched
|
||||||
|
|
||||||
Phase 1: Get DAG & User Selection
|
Phase 1: Get DAG & User Selection
|
||||||
├─ ccw issue queue dag [--queue QUE-xxx] → { parallel_batches: [["S-1","S-2"], ["S-3"]] }
|
├─ ccw issue queue dag [--queue QUE-xxx] → { parallel_batches: [["S-1","S-2"], ["S-3"]] }
|
||||||
@@ -53,19 +55,22 @@ Phase 1: Get DAG & User Selection
|
|||||||
|
|
||||||
Phase 2: Dispatch Parallel Batch (DAG-driven)
|
Phase 2: Dispatch Parallel Batch (DAG-driven)
|
||||||
├─ Parallelism determined by DAG (no manual limit)
|
├─ Parallelism determined by DAG (no manual limit)
|
||||||
|
├─ All executors work in the SAME worktree (or main if no worktree)
|
||||||
├─ For each solution ID in batch (parallel - all at once):
|
├─ For each solution ID in batch (parallel - all at once):
|
||||||
│ ├─ (if worktree) Create isolated worktree: git worktree add
|
|
||||||
│ ├─ Executor calls: ccw issue detail <id> (READ-ONLY)
|
│ ├─ Executor calls: ccw issue detail <id> (READ-ONLY)
|
||||||
│ ├─ Executor gets FULL SOLUTION with all tasks
|
│ ├─ Executor gets FULL SOLUTION with all tasks
|
||||||
│ ├─ Executor implements all tasks sequentially (T1 → T2 → T3)
|
│ ├─ Executor implements all tasks sequentially (T1 → T2 → T3)
|
||||||
│ ├─ Executor tests + verifies each task
|
│ ├─ Executor tests + verifies each task
|
||||||
│ ├─ Executor commits ONCE per solution (with formatted summary)
|
│ ├─ Executor commits ONCE per solution (with formatted summary)
|
||||||
│ ├─ Executor calls: ccw issue done <id>
|
│ └─ Executor calls: ccw issue done <id>
|
||||||
│ └─ (if worktree) Cleanup: merge branch, remove worktree
|
|
||||||
└─ Wait for batch completion
|
└─ Wait for batch completion
|
||||||
|
|
||||||
Phase 3: Next Batch
|
Phase 3: Next Batch (repeat Phase 2)
|
||||||
└─ ccw issue queue dag → check for newly-ready solutions
|
└─ ccw issue queue dag → check for newly-ready solutions
|
||||||
|
|
||||||
|
Phase 4 (if --worktree): Worktree Completion
|
||||||
|
├─ All batches complete → prompt for merge strategy
|
||||||
|
└─ Options: Create PR / Merge to main / Keep branch
|
||||||
```
|
```
|
||||||
|
|
||||||
## Implementation
|
## Implementation
|
||||||
@@ -115,12 +120,12 @@ const answer = AskUserQuestion({
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
question: 'Use git worktrees for parallel isolation?',
|
question: 'Use git worktree for queue isolation?',
|
||||||
header: 'Worktree',
|
header: 'Worktree',
|
||||||
multiSelect: false,
|
multiSelect: false,
|
||||||
options: [
|
options: [
|
||||||
{ label: 'Yes (Recommended for parallel)', description: 'Each executor works in isolated worktree branch' },
|
{ label: 'Yes (Recommended)', description: 'Create ONE worktree for entire queue - main stays clean' },
|
||||||
{ label: 'No', description: 'Work directly in current directory (serial only)' }
|
{ label: 'No', description: 'Work directly in current directory' }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -140,7 +145,7 @@ if (isDryRun) {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Phase 2: Dispatch Parallel Batch (DAG-driven)
|
### Phase 0 & 2: Setup Queue Worktree & Dispatch
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Parallelism determined by DAG - no manual limit
|
// Parallelism determined by DAG - no manual limit
|
||||||
@@ -158,24 +163,40 @@ TodoWrite({
|
|||||||
|
|
||||||
console.log(`\n### Executing Solutions (DAG batch 1): ${batch.join(', ')}`);
|
console.log(`\n### Executing Solutions (DAG batch 1): ${batch.join(', ')}`);
|
||||||
|
|
||||||
// Setup worktree base directory if needed (using absolute paths)
|
|
||||||
if (useWorktree) {
|
|
||||||
// Use absolute paths to avoid issues when running from subdirectories
|
|
||||||
const repoRoot = Bash('git rev-parse --show-toplevel').trim();
|
|
||||||
const worktreeBase = `${repoRoot}/.ccw/worktrees`;
|
|
||||||
Bash(`mkdir -p "${worktreeBase}"`);
|
|
||||||
// Prune stale worktrees from previous interrupted executions
|
|
||||||
Bash('git worktree prune');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse existing worktree path from args if provided
|
// Parse existing worktree path from args if provided
|
||||||
// Example: --worktree /path/to/existing/worktree
|
// Example: --worktree /path/to/existing/worktree
|
||||||
const existingWorktree = args.worktree && typeof args.worktree === 'string' ? args.worktree : null;
|
const existingWorktree = args.worktree && typeof args.worktree === 'string' ? args.worktree : null;
|
||||||
|
|
||||||
|
// Setup ONE worktree for entire queue (not per-solution)
|
||||||
|
let worktreePath = null;
|
||||||
|
let worktreeBranch = null;
|
||||||
|
|
||||||
|
if (useWorktree) {
|
||||||
|
const repoRoot = Bash('git rev-parse --show-toplevel').trim();
|
||||||
|
const worktreeBase = `${repoRoot}/.ccw/worktrees`;
|
||||||
|
Bash(`mkdir -p "${worktreeBase}"`);
|
||||||
|
Bash('git worktree prune'); // Cleanup stale worktrees
|
||||||
|
|
||||||
|
if (existingWorktree) {
|
||||||
|
// Resume mode: Use existing worktree
|
||||||
|
worktreePath = existingWorktree;
|
||||||
|
worktreeBranch = Bash(`git -C "${worktreePath}" branch --show-current`).trim();
|
||||||
|
console.log(`Resuming in existing worktree: ${worktreePath} (branch: ${worktreeBranch})`);
|
||||||
|
} else {
|
||||||
|
// Create mode: ONE worktree for the entire queue
|
||||||
|
const timestamp = new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14);
|
||||||
|
worktreeBranch = `queue-exec-${dag.queue_id || timestamp}`;
|
||||||
|
worktreePath = `${worktreeBase}/${worktreeBranch}`;
|
||||||
|
Bash(`git worktree add "${worktreePath}" -b "${worktreeBranch}"`);
|
||||||
|
console.log(`Created queue worktree: ${worktreePath}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Launch ALL solutions in batch in parallel (DAG guarantees no conflicts)
|
// Launch ALL solutions in batch in parallel (DAG guarantees no conflicts)
|
||||||
|
// All executors work in the SAME worktree (or main if no worktree)
|
||||||
const executions = batch.map(solutionId => {
|
const executions = batch.map(solutionId => {
|
||||||
updateTodo(solutionId, 'in_progress');
|
updateTodo(solutionId, 'in_progress');
|
||||||
return dispatchExecutor(solutionId, executor, useWorktree, existingWorktree);
|
return dispatchExecutor(solutionId, executor, worktreePath);
|
||||||
});
|
});
|
||||||
|
|
||||||
await Promise.all(executions);
|
await Promise.all(executions);
|
||||||
@@ -185,126 +206,20 @@ batch.forEach(id => updateTodo(id, 'completed'));
|
|||||||
### Executor Dispatch
|
### Executor Dispatch
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
function dispatchExecutor(solutionId, executorType, useWorktree = false, existingWorktree = null) {
|
// worktreePath: path to shared worktree (null if not using worktree)
|
||||||
// Worktree setup commands (if enabled) - using absolute paths
|
function dispatchExecutor(solutionId, executorType, worktreePath = null) {
|
||||||
// Supports both creating new worktrees and resuming in existing ones
|
// If worktree is provided, executor works in that directory
|
||||||
const worktreeSetup = useWorktree ? `
|
// No per-solution worktree creation - ONE worktree for entire queue
|
||||||
### Step 0: Setup Isolated Worktree
|
const cdCommand = worktreePath ? `cd "${worktreePath}"` : '';
|
||||||
\`\`\`bash
|
|
||||||
# Use absolute paths to avoid issues when running from subdirectories
|
|
||||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
|
||||||
WORKTREE_BASE="\${REPO_ROOT}/.ccw/worktrees"
|
|
||||||
|
|
||||||
# Check if existing worktree path was provided
|
|
||||||
EXISTING_WORKTREE="${existingWorktree || ''}"
|
|
||||||
|
|
||||||
if [[ -n "\${EXISTING_WORKTREE}" && -d "\${EXISTING_WORKTREE}" ]]; then
|
|
||||||
# Resume mode: Use existing worktree
|
|
||||||
WORKTREE_PATH="\${EXISTING_WORKTREE}"
|
|
||||||
WORKTREE_NAME=$(basename "\${WORKTREE_PATH}")
|
|
||||||
|
|
||||||
# Verify it's a valid git worktree
|
|
||||||
if ! git -C "\${WORKTREE_PATH}" rev-parse --is-inside-work-tree &>/dev/null; then
|
|
||||||
echo "Error: \${EXISTING_WORKTREE} is not a valid git worktree"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Resuming in existing worktree: \${WORKTREE_PATH}"
|
|
||||||
else
|
|
||||||
# Create mode: New worktree with timestamp
|
|
||||||
WORKTREE_NAME="exec-${solutionId}-$(date +%H%M%S)"
|
|
||||||
WORKTREE_PATH="\${WORKTREE_BASE}/\${WORKTREE_NAME}"
|
|
||||||
|
|
||||||
# Ensure worktree base exists
|
|
||||||
mkdir -p "\${WORKTREE_BASE}"
|
|
||||||
|
|
||||||
# Prune stale worktrees
|
|
||||||
git worktree prune
|
|
||||||
|
|
||||||
# Create worktree
|
|
||||||
git worktree add "\${WORKTREE_PATH}" -b "\${WORKTREE_NAME}"
|
|
||||||
|
|
||||||
echo "Created new worktree: \${WORKTREE_PATH}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setup cleanup trap for graceful failure handling
|
|
||||||
cleanup_worktree() {
|
|
||||||
echo "Cleaning up worktree due to interruption..."
|
|
||||||
cd "\${REPO_ROOT}" 2>/dev/null || true
|
|
||||||
git worktree remove "\${WORKTREE_PATH}" --force 2>/dev/null || true
|
|
||||||
echo "Worktree removed. Branch '\${WORKTREE_NAME}' kept for inspection."
|
|
||||||
}
|
|
||||||
trap cleanup_worktree EXIT INT TERM
|
|
||||||
|
|
||||||
cd "\${WORKTREE_PATH}"
|
|
||||||
\`\`\`
|
|
||||||
` : '';
|
|
||||||
|
|
||||||
const worktreeCleanup = useWorktree ? `
|
|
||||||
### Step 5: Worktree Completion (User Choice)
|
|
||||||
|
|
||||||
After all tasks complete, prompt for merge strategy:
|
|
||||||
|
|
||||||
\`\`\`javascript
|
|
||||||
AskUserQuestion({
|
|
||||||
questions: [{
|
|
||||||
question: "Solution ${solutionId} completed. What to do with worktree branch?",
|
|
||||||
header: "Merge",
|
|
||||||
multiSelect: false,
|
|
||||||
options: [
|
|
||||||
{ label: "Create PR (Recommended)", description: "Push branch and create pull request - safest for parallel execution" },
|
|
||||||
{ label: "Merge to main", description: "Merge branch and cleanup worktree (requires clean main)" },
|
|
||||||
{ label: "Keep branch", description: "Cleanup worktree, keep branch for manual handling" }
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
**Based on selection:**
|
|
||||||
\`\`\`bash
|
|
||||||
# Disable cleanup trap before intentional cleanup
|
|
||||||
trap - EXIT INT TERM
|
|
||||||
|
|
||||||
# Return to repo root (use REPO_ROOT from setup)
|
|
||||||
cd "\${REPO_ROOT}"
|
|
||||||
|
|
||||||
# Validate main repo state before merge
|
|
||||||
validate_main_clean() {
|
|
||||||
if [[ -n \$(git status --porcelain) ]]; then
|
|
||||||
echo "⚠️ Warning: Main repo has uncommitted changes."
|
|
||||||
echo "Cannot auto-merge. Falling back to 'Create PR' option."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create PR (Recommended for parallel execution):
|
|
||||||
git push -u origin "\${WORKTREE_NAME}"
|
|
||||||
gh pr create --title "Solution ${solutionId}" --body "Issue queue execution"
|
|
||||||
git worktree remove "\${WORKTREE_PATH}"
|
|
||||||
|
|
||||||
# Merge to main (only if main is clean):
|
|
||||||
if validate_main_clean; then
|
|
||||||
git merge --no-ff "\${WORKTREE_NAME}" -m "Merge solution ${solutionId}"
|
|
||||||
git worktree remove "\${WORKTREE_PATH}" && git branch -d "\${WORKTREE_NAME}"
|
|
||||||
else
|
|
||||||
# Fallback to PR if main is dirty
|
|
||||||
git push -u origin "\${WORKTREE_NAME}"
|
|
||||||
gh pr create --title "Solution ${solutionId}" --body "Issue queue execution (main had uncommitted changes)"
|
|
||||||
git worktree remove "\${WORKTREE_PATH}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Keep branch:
|
|
||||||
git worktree remove "\${WORKTREE_PATH}"
|
|
||||||
echo "Branch \${WORKTREE_NAME} kept for manual handling"
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
**Parallel Execution Safety**: "Create PR" is the default and safest option for parallel executors, avoiding merge race conditions.
|
|
||||||
` : '';
|
|
||||||
|
|
||||||
const prompt = `
|
const prompt = `
|
||||||
## Execute Solution ${solutionId}
|
## Execute Solution ${solutionId}
|
||||||
${worktreeSetup}
|
${worktreePath ? `
|
||||||
|
### Step 0: Enter Queue Worktree
|
||||||
|
\`\`\`bash
|
||||||
|
cd "${worktreePath}"
|
||||||
|
\`\`\`
|
||||||
|
` : ''}
|
||||||
### Step 1: Get Solution (read-only)
|
### Step 1: Get Solution (read-only)
|
||||||
\`\`\`bash
|
\`\`\`bash
|
||||||
ccw issue detail ${solutionId}
|
ccw issue detail ${solutionId}
|
||||||
@@ -352,16 +267,21 @@ If any task failed:
|
|||||||
\`\`\`bash
|
\`\`\`bash
|
||||||
ccw issue done ${solutionId} --fail --reason '{"task_id": "TX", "error_type": "test_failure", "message": "..."}'
|
ccw issue done ${solutionId} --fail --reason '{"task_id": "TX", "error_type": "test_failure", "message": "..."}'
|
||||||
\`\`\`
|
\`\`\`
|
||||||
${worktreeCleanup}`;
|
|
||||||
|
**Note**: Do NOT cleanup worktree after this solution. Worktree is shared by all solutions in the queue.
|
||||||
|
`;
|
||||||
|
|
||||||
|
// For CLI tools, pass --cd to set working directory
|
||||||
|
const cdOption = worktreePath ? ` --cd "${worktreePath}"` : '';
|
||||||
|
|
||||||
if (executorType === 'codex') {
|
if (executorType === 'codex') {
|
||||||
return Bash(
|
return Bash(
|
||||||
`ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --id exec-${solutionId}`,
|
`ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --id exec-${solutionId}${cdOption}`,
|
||||||
{ timeout: 7200000, run_in_background: true } // 2hr for full solution
|
{ timeout: 7200000, run_in_background: true } // 2hr for full solution
|
||||||
);
|
);
|
||||||
} else if (executorType === 'gemini') {
|
} else if (executorType === 'gemini') {
|
||||||
return Bash(
|
return Bash(
|
||||||
`ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --id exec-${solutionId}`,
|
`ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --id exec-${solutionId}${cdOption}`,
|
||||||
{ timeout: 3600000, run_in_background: true }
|
{ timeout: 3600000, run_in_background: true }
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
@@ -369,7 +289,7 @@ ${worktreeCleanup}`;
|
|||||||
subagent_type: 'code-developer',
|
subagent_type: 'code-developer',
|
||||||
run_in_background: false,
|
run_in_background: false,
|
||||||
description: `Execute solution ${solutionId}`,
|
description: `Execute solution ${solutionId}`,
|
||||||
prompt: prompt
|
prompt: worktreePath ? `Working directory: ${worktreePath}\n\n${prompt}` : prompt
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -390,40 +310,98 @@ console.log(`
|
|||||||
|
|
||||||
if (refreshedDag.ready_count > 0) {
|
if (refreshedDag.ready_count > 0) {
|
||||||
console.log('Run `/issue:execute` again for next batch.');
|
console.log('Run `/issue:execute` again for next batch.');
|
||||||
|
// Note: If resuming, pass existing worktree path:
|
||||||
|
// /issue:execute --worktree <worktreePath>
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Worktree Completion (after ALL batches)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Only run when ALL solutions completed AND using worktree
|
||||||
|
if (useWorktree && refreshedDag.ready_count === 0 && refreshedDag.completed_count === refreshedDag.total) {
|
||||||
|
console.log('\n## All Solutions Completed - Worktree Cleanup');
|
||||||
|
|
||||||
|
const answer = AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: `Queue complete. What to do with worktree branch "${worktreeBranch}"?`,
|
||||||
|
header: 'Merge',
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: 'Create PR (Recommended)', description: 'Push branch and create pull request' },
|
||||||
|
{ label: 'Merge to main', description: 'Merge all commits and cleanup worktree' },
|
||||||
|
{ label: 'Keep branch', description: 'Cleanup worktree, keep branch for manual handling' }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
|
||||||
|
const repoRoot = Bash('git rev-parse --show-toplevel').trim();
|
||||||
|
|
||||||
|
if (answer['Merge'].includes('Create PR')) {
|
||||||
|
Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`);
|
||||||
|
Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution - all solutions completed" --head "${worktreeBranch}"`);
|
||||||
|
Bash(`git worktree remove "${worktreePath}"`);
|
||||||
|
console.log(`PR created for branch: ${worktreeBranch}`);
|
||||||
|
} else if (answer['Merge'].includes('Merge to main')) {
|
||||||
|
// Check main is clean
|
||||||
|
const mainDirty = Bash('git status --porcelain').trim();
|
||||||
|
if (mainDirty) {
|
||||||
|
console.log('Warning: Main has uncommitted changes. Falling back to PR.');
|
||||||
|
Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`);
|
||||||
|
Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution (main had uncommitted changes)" --head "${worktreeBranch}"`);
|
||||||
|
} else {
|
||||||
|
Bash(`git merge --no-ff "${worktreeBranch}" -m "Merge queue ${dag.queue_id}"`);
|
||||||
|
Bash(`git branch -d "${worktreeBranch}"`);
|
||||||
|
}
|
||||||
|
Bash(`git worktree remove "${worktreePath}"`);
|
||||||
|
} else {
|
||||||
|
Bash(`git worktree remove "${worktreePath}"`);
|
||||||
|
console.log(`Branch ${worktreeBranch} kept for manual handling`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Parallel Execution Model
|
## Parallel Execution Model
|
||||||
|
|
||||||
```
|
```
|
||||||
┌─────────────────────────────────────────────────────────────┐
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
│ Orchestrator │
|
│ Orchestrator │
|
||||||
├─────────────────────────────────────────────────────────────┤
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
│ 1. ccw issue queue dag │
|
│ 0. (if --worktree) Create ONE worktree for entire queue │
|
||||||
│ → { parallel_batches: [["S-1","S-2"], ["S-3"]] } │
|
│ → .ccw/worktrees/queue-exec-<queue-id> │
|
||||||
│ │
|
│ │
|
||||||
│ 2. Dispatch batch 1 (parallel): │
|
│ 1. ccw issue queue dag │
|
||||||
│ ┌──────────────────────┐ ┌──────────────────────┐ │
|
│ → { parallel_batches: [["S-1","S-2"], ["S-3"]] } │
|
||||||
│ │ Executor 1 │ │ Executor 2 │ │
|
│ │
|
||||||
│ │ detail S-1 │ │ detail S-2 │ │
|
│ 2. Dispatch batch 1 (parallel, SAME worktree): │
|
||||||
│ │ → gets full solution │ │ → gets full solution │ │
|
│ ┌──────────────────────────────────────────────────────┐ │
|
||||||
│ │ [T1→T2→T3 sequential]│ │ [T1→T2 sequential] │ │
|
│ │ Shared Queue Worktree (or main) │ │
|
||||||
│ │ commit (1x solution) │ │ commit (1x solution) │ │
|
│ │ ┌──────────────────┐ ┌──────────────────┐ │ │
|
||||||
│ │ done S-1 │ │ done S-2 │ │
|
│ │ │ Executor 1 │ │ Executor 2 │ │ │
|
||||||
│ └──────────────────────┘ └──────────────────────┘ │
|
│ │ │ detail S-1 │ │ detail S-2 │ │ │
|
||||||
│ │
|
│ │ │ [T1→T2→T3] │ │ [T1→T2] │ │ │
|
||||||
│ 3. ccw issue queue dag (refresh) │
|
│ │ │ commit S-1 │ │ commit S-2 │ │ │
|
||||||
│ → S-3 now ready (S-1 completed, file conflict resolved) │
|
│ │ │ done S-1 │ │ done S-2 │ │ │
|
||||||
└─────────────────────────────────────────────────────────────┘
|
│ │ └──────────────────┘ └──────────────────┘ │ │
|
||||||
|
│ └──────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
│ 3. ccw issue queue dag (refresh) │
|
||||||
|
│ → S-3 now ready → dispatch batch 2 (same worktree) │
|
||||||
|
│ │
|
||||||
|
│ 4. (if --worktree) ALL batches complete → cleanup worktree │
|
||||||
|
│ → Prompt: Create PR / Merge to main / Keep branch │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**Why this works for parallel:**
|
**Why this works for parallel:**
|
||||||
|
- **ONE worktree for entire queue** → all solutions share same isolated workspace
|
||||||
- `detail <id>` is READ-ONLY → no race conditions
|
- `detail <id>` is READ-ONLY → no race conditions
|
||||||
- Each executor handles **all tasks within a solution** sequentially
|
- Each executor handles **all tasks within a solution** sequentially
|
||||||
- **One commit per solution** with formatted summary (not per-task)
|
- **One commit per solution** with formatted summary (not per-task)
|
||||||
- `done <id>` updates only its own solution status
|
- `done <id>` updates only its own solution status
|
||||||
- `queue dag` recalculates ready solutions after each batch
|
- `queue dag` recalculates ready solutions after each batch
|
||||||
- Solutions in same batch have NO file conflicts
|
- Solutions in same batch have NO file conflicts (DAG guarantees)
|
||||||
|
- **Main workspace stays clean** until merge/PR decision
|
||||||
|
|
||||||
## CLI Endpoint Contract
|
## CLI Endpoint Contract
|
||||||
|
|
||||||
|
|||||||
891
.claude/commands/workflow/multi-cli-plan.md
Normal file
891
.claude/commands/workflow/multi-cli-plan.md
Normal file
@@ -0,0 +1,891 @@
|
|||||||
|
---
|
||||||
|
name: workflow:multi-cli-plan
|
||||||
|
description: Multi-CLI collaborative planning workflow using ACE semantic search and iterative Claude+Codex analysis to determine execution plan. Features user-driven decision points and convergent refinement.
|
||||||
|
argument-hint: "<task description> [--max-rounds=3] [--tools=gemini,codex]"
|
||||||
|
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Bash(*), Glob(*), Grep(*), mcp__ace-tool__search_context(*)
|
||||||
|
---
|
||||||
|
|
||||||
|
# Multi-CLI Collaborative Planning Command (/workflow:multi-cli-plan)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Multi-CLI collaborative planning workflow that uses ACE semantic search for context gathering, followed by iterative multi-tool analysis (Claude + Codex/Gemini) to converge on an optimal execution plan.
|
||||||
|
|
||||||
|
**Core Philosophy**:
|
||||||
|
- **Multi-round Verification**: Claude and Codex alternate analysis to ensure solutions match codebase reality
|
||||||
|
- **User-driven**: Every analysis round ends with user decision point
|
||||||
|
- **Iterative Convergence**: Multiple cycles progressively refine requirements and solutions
|
||||||
|
- **Final Confirmation**: Executable plan only output after explicit user approval
|
||||||
|
|
||||||
|
**Core Capabilities**:
|
||||||
|
- ACE semantic search for comprehensive codebase context
|
||||||
|
- Multi-tool collaborative analysis (Claude + Gemini/Codex)
|
||||||
|
- Interactive refinement with user feedback loops
|
||||||
|
- Solution comparison with trade-off analysis
|
||||||
|
- Final executable plan with file locations and acceptance criteria
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workflow:multi-cli-plan <task-description>
|
||||||
|
|
||||||
|
# With options
|
||||||
|
/workflow:multi-cli-plan "Implement user authentication" --max-rounds=3
|
||||||
|
/workflow:multi-cli-plan "Refactor payment module" --tools=gemini,codex
|
||||||
|
|
||||||
|
# Examples
|
||||||
|
/workflow:multi-cli-plan "Add dark mode support to the application"
|
||||||
|
/workflow:multi-cli-plan "Fix the memory leak in WebSocket connections"
|
||||||
|
/workflow:multi-cli-plan "Implement rate limiting for API endpoints"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
Phase 1: Input & Context Gathering
|
||||||
|
|-- Parse user task description
|
||||||
|
|-- ACE semantic search for codebase context
|
||||||
|
|-- Build initial context package
|
||||||
|
+-- Initialize discussion session
|
||||||
|
|
||||||
|
Phase 2: Multi-CLI Collaborative Analysis (Iterative)
|
||||||
|
|-- Round N:
|
||||||
|
| |-- Claude Analysis: Architecture perspective
|
||||||
|
| |-- Codex/Gemini Analysis: Implementation perspective
|
||||||
|
| |-- Cross-verify technical feasibility
|
||||||
|
| +-- Synthesize multiple implementation approaches
|
||||||
|
|
|
||||||
|
+-- Loop until convergence or max rounds
|
||||||
|
|
||||||
|
Phase 3: Stage Summary & Options
|
||||||
|
|-- Present 2-3 viable solution options with trade-offs
|
||||||
|
|-- Proactively ask clarifying questions for ambiguities
|
||||||
|
+-- Wait for user feedback
|
||||||
|
|
||||||
|
Phase 4: User Decision Point
|
||||||
|
|-- Option A: User approves current approach -> Phase 5
|
||||||
|
|-- Option B: User provides clarification/adjustments -> Return to Phase 2
|
||||||
|
+-- Option C: User requests different direction -> Reset analysis
|
||||||
|
|
||||||
|
Phase 5: Agent Planning & Output Generation
|
||||||
|
|-- Invoke cli-lite-planning-agent with discussion context
|
||||||
|
|-- Generate IMPL_PLAN.md (documentation)
|
||||||
|
|-- Generate plan.json (structured plan for execution)
|
||||||
|
|-- User confirms execution
|
||||||
|
+-- Hand off to /workflow:lite-execute
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
### Phase 1: Input & Context Gathering
|
||||||
|
|
||||||
|
**Session Initialization**:
|
||||||
|
```javascript
|
||||||
|
// Helper: Get UTC+8 (China Standard Time) ISO string
|
||||||
|
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||||
|
|
||||||
|
// Parse arguments
|
||||||
|
const { taskDescription, maxRounds, tools } = parseArgs(args)
|
||||||
|
const effectiveMaxRounds = maxRounds || 3
|
||||||
|
const effectiveTools = tools || ['gemini', 'codex']
|
||||||
|
|
||||||
|
// Generate session ID
|
||||||
|
const taskSlug = taskDescription.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40)
|
||||||
|
const dateStr = getUtc8ISOString().substring(0, 10)
|
||||||
|
const sessionId = `MCP-${taskSlug}-${dateStr}`
|
||||||
|
const sessionFolder = `.workflow/.multi-cli-plan/${sessionId}`
|
||||||
|
|
||||||
|
// Create session folder
|
||||||
|
Bash(`mkdir -p ${sessionFolder}/rounds && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}"`)
|
||||||
|
|
||||||
|
// Initialize session state
|
||||||
|
const sessionState = {
|
||||||
|
session_id: sessionId,
|
||||||
|
task_description: taskDescription,
|
||||||
|
created_at: getUtc8ISOString(),
|
||||||
|
max_rounds: effectiveMaxRounds,
|
||||||
|
tools: effectiveTools,
|
||||||
|
current_round: 0,
|
||||||
|
phase: 'context-gathering',
|
||||||
|
rounds: [],
|
||||||
|
solutions: [],
|
||||||
|
user_decisions: [],
|
||||||
|
final_plan: null
|
||||||
|
}
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
**ACE Context Gathering**:
|
||||||
|
```javascript
|
||||||
|
// Step 1: Extract keywords from task description
|
||||||
|
const keywords = extractKeywords(taskDescription)
|
||||||
|
// e.g., "Add dark mode support" -> ["dark", "mode", "theme", "style", "color"]
|
||||||
|
|
||||||
|
// Step 2: Use ACE to understand codebase structure and relevant code
|
||||||
|
const aceQueries = [
|
||||||
|
// Architecture query
|
||||||
|
`Project architecture and module structure related to ${keywords.slice(0, 3).join(', ')}`,
|
||||||
|
// Implementation query
|
||||||
|
`Existing implementations of ${keywords[0]} in this codebase`,
|
||||||
|
// Pattern query
|
||||||
|
`Code patterns and conventions for ${keywords.slice(0, 2).join(' ')} features`,
|
||||||
|
// Integration query
|
||||||
|
`Integration points and dependencies for ${keywords[0]} functionality`
|
||||||
|
]
|
||||||
|
|
||||||
|
const aceResults = []
|
||||||
|
for (const query of aceQueries) {
|
||||||
|
const result = await mcp__ace-tool__search_context({
|
||||||
|
project_root_path: process.cwd(),
|
||||||
|
query: query
|
||||||
|
})
|
||||||
|
aceResults.push({ query, result, timestamp: getUtc8ISOString() })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Build context package (kept in memory for CLI consumption)
|
||||||
|
const contextPackage = {
|
||||||
|
task_description: taskDescription,
|
||||||
|
keywords: keywords,
|
||||||
|
ace_results: aceResults,
|
||||||
|
relevant_files: extractRelevantFiles(aceResults),
|
||||||
|
detected_patterns: extractPatterns(aceResults),
|
||||||
|
architecture_insights: aceResults[0].result,
|
||||||
|
existing_implementations: aceResults[1].result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update session state
|
||||||
|
sessionState.phase = 'context-gathered'
|
||||||
|
sessionState.context_summary = {
|
||||||
|
files_identified: contextPackage.relevant_files.length,
|
||||||
|
patterns_detected: contextPackage.detected_patterns.length,
|
||||||
|
ace_queries: aceQueries.length
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 2: Agent-Driven Collaborative Analysis
|
||||||
|
|
||||||
|
**Core Principle**: Orchestrator delegates all analysis to `cli-discuss-agent`, only reads output files for decision making.
|
||||||
|
|
||||||
|
**Analysis Round Loop**:
|
||||||
|
```javascript
|
||||||
|
let currentRound = 0
|
||||||
|
let shouldContinue = true
|
||||||
|
let analysisResults = []
|
||||||
|
|
||||||
|
while (shouldContinue && currentRound < effectiveMaxRounds) {
|
||||||
|
currentRound++
|
||||||
|
|
||||||
|
console.log(`
|
||||||
|
## Analysis Round ${currentRound}/${effectiveMaxRounds}
|
||||||
|
|
||||||
|
Delegating to cli-discuss-agent...
|
||||||
|
`)
|
||||||
|
|
||||||
|
// ========================================
|
||||||
|
// DELEGATE TO AGENT - No direct analysis
|
||||||
|
// ========================================
|
||||||
|
Task({
|
||||||
|
subagent_type: "cli-discuss-agent",
|
||||||
|
run_in_background: false,
|
||||||
|
description: `Discussion round ${currentRound}`,
|
||||||
|
prompt: `
|
||||||
|
## Task Objective
|
||||||
|
Execute collaborative discussion round ${currentRound} for task analysis.
|
||||||
|
|
||||||
|
## Input Context
|
||||||
|
- **Task Description**: ${taskDescription}
|
||||||
|
- **Round Number**: ${currentRound}
|
||||||
|
- **Session ID**: ${sessionId}
|
||||||
|
- **Session Folder**: ${sessionFolder}
|
||||||
|
|
||||||
|
## ACE Context
|
||||||
|
${JSON.stringify(contextPackage, null, 2)}
|
||||||
|
|
||||||
|
## Previous Rounds
|
||||||
|
${analysisResults.length > 0
|
||||||
|
? analysisResults.map(r => `Round ${r.round}: ${r.summary}`).join('\n')
|
||||||
|
: 'None (first round)'}
|
||||||
|
|
||||||
|
## User Feedback
|
||||||
|
${userFeedback || 'None'}
|
||||||
|
|
||||||
|
## CLI Configuration
|
||||||
|
- Tools: ${effectiveTools.join(', ')}
|
||||||
|
- Timeout: 600000ms
|
||||||
|
- Fallback Chain: gemini → codex → qwen
|
||||||
|
|
||||||
|
## Output Requirements
|
||||||
|
Write: ${sessionFolder}/rounds/${currentRound}/synthesis.json
|
||||||
|
|
||||||
|
Follow cli-discuss-agent output schema exactly.
|
||||||
|
|
||||||
|
## Success Criteria
|
||||||
|
- [ ] All configured CLI tools executed
|
||||||
|
- [ ] Cross-verification completed
|
||||||
|
- [ ] 2-3 solution options generated
|
||||||
|
- [ ] Convergence score calculated
|
||||||
|
- [ ] synthesis.json written to round folder
|
||||||
|
`
|
||||||
|
})
|
||||||
|
|
||||||
|
// ========================================
|
||||||
|
// READ AGENT OUTPUT - Decision making only
|
||||||
|
// ========================================
|
||||||
|
const synthesisPath = `${sessionFolder}/rounds/${currentRound}/synthesis.json`
|
||||||
|
const roundSynthesis = JSON.parse(Read(synthesisPath))
|
||||||
|
analysisResults.push(roundSynthesis)
|
||||||
|
|
||||||
|
// Update session state from agent output
|
||||||
|
sessionState.rounds.push({
|
||||||
|
number: currentRound,
|
||||||
|
cli_tools_used: roundSynthesis._metadata.cli_tools_used,
|
||||||
|
solutions_identified: roundSynthesis.solutions.length,
|
||||||
|
convergence_score: roundSynthesis.convergence.score,
|
||||||
|
new_insights: roundSynthesis.convergence.new_insights,
|
||||||
|
recommendation: roundSynthesis.convergence.recommendation
|
||||||
|
})
|
||||||
|
|
||||||
|
// Display round summary
|
||||||
|
console.log(`
|
||||||
|
### Round ${currentRound} Complete
|
||||||
|
|
||||||
|
**Convergence**: ${roundSynthesis.convergence.score.toFixed(2)}
|
||||||
|
**Solutions Found**: ${roundSynthesis.solutions.length}
|
||||||
|
**Recommendation**: ${roundSynthesis.convergence.recommendation}
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
${roundSynthesis.solutions.map((s, i) => `${i+1}. ${s.name} (${s.effort} effort, ${s.risk} risk)`).join('\n')}
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Decide whether to continue based on agent's recommendation
|
||||||
|
if (roundSynthesis.convergence.recommendation === 'converged') {
|
||||||
|
shouldContinue = false
|
||||||
|
console.log('Analysis converged. Proceeding to decision phase.')
|
||||||
|
} else if (roundSynthesis.convergence.recommendation === 'user_input_needed') {
|
||||||
|
// Collect user feedback before next round
|
||||||
|
const feedbackResult = await AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: 'Clarification needed. How would you like to proceed?',
|
||||||
|
header: 'Feedback',
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: 'Provide Clarification', description: 'Answer questions and continue analysis' },
|
||||||
|
{ label: 'Proceed Anyway', description: 'Accept current solutions' },
|
||||||
|
{ label: 'Change Direction', description: 'Modify task requirements' }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
|
||||||
|
if (feedbackResult === 'Provide Clarification') {
|
||||||
|
// Display clarification questions
|
||||||
|
console.log(`
|
||||||
|
### Clarification Questions
|
||||||
|
${roundSynthesis.clarification_questions.map((q, i) => `${i+1}. ${q}`).join('\n')}
|
||||||
|
`)
|
||||||
|
// User provides feedback via "Other" option or follow-up
|
||||||
|
userFeedback = feedbackResult.other || ''
|
||||||
|
} else if (feedbackResult === 'Proceed Anyway') {
|
||||||
|
shouldContinue = false
|
||||||
|
} else {
|
||||||
|
// Reset with new direction
|
||||||
|
userFeedback = feedbackResult.other || ''
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Continue to next round
|
||||||
|
shouldContinue = roundSynthesis.convergence.new_insights && currentRound < effectiveMaxRounds
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get final synthesis from last round
|
||||||
|
const finalSynthesis = analysisResults[analysisResults.length - 1]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 3: Review Agent Output & Present Options
|
||||||
|
|
||||||
|
**Core Principle**: Orchestrator only reads agent output files and formats them for user decision.
|
||||||
|
|
||||||
|
**Read and Present Solutions**:
|
||||||
|
```javascript
|
||||||
|
// ========================================
|
||||||
|
// READ FINAL AGENT OUTPUT - No processing
|
||||||
|
// ========================================
|
||||||
|
// finalSynthesis already loaded from agent's synthesis.json in Phase 2
|
||||||
|
|
||||||
|
console.log(`
|
||||||
|
## Stage Summary
|
||||||
|
|
||||||
|
### Analysis Complete (from cli-discuss-agent output)
|
||||||
|
- Rounds completed: ${currentRound}
|
||||||
|
- CLI tools used: ${finalSynthesis._metadata.cli_tools_used.join(', ')}
|
||||||
|
- Cross-verification: ${finalSynthesis.cross_verification.agreements.length} agreements, ${finalSynthesis.cross_verification.disagreements.length} disagreements
|
||||||
|
- Convergence score: ${finalSynthesis.convergence.score.toFixed(2)}
|
||||||
|
|
||||||
|
### Solution Options (from agent synthesis)
|
||||||
|
|
||||||
|
${finalSynthesis.solutions.map((solution, index) => `
|
||||||
|
**Option ${index + 1}: ${solution.name}**
|
||||||
|
*Source: ${solution.source_cli.join(' + ')}*
|
||||||
|
|
||||||
|
Description: ${solution.description}
|
||||||
|
|
||||||
|
Trade-offs:
|
||||||
|
| Aspect | Assessment |
|
||||||
|
|--------|------------|
|
||||||
|
| Effort | ${solution.effort} |
|
||||||
|
| Risk | ${solution.risk} |
|
||||||
|
| Maintainability | ${solution.maintainability} |
|
||||||
|
| Performance | ${solution.performance_impact} |
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
${solution.pros.map(p => `- ${p}`).join('\n')}
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
${solution.cons.map(c => `- ${c}`).join('\n')}
|
||||||
|
|
||||||
|
Key files affected:
|
||||||
|
${solution.affected_files.slice(0, 5).map(f => `- ${f.file}:${f.line} - ${f.reason}`).join('\n')}
|
||||||
|
`).join('\n---\n')}
|
||||||
|
|
||||||
|
### Cross-Verification Summary
|
||||||
|
|
||||||
|
**Agreements**:
|
||||||
|
${finalSynthesis.cross_verification.agreements.slice(0, 5).map(a => `- ${a}`).join('\n')}
|
||||||
|
|
||||||
|
**Disagreements** (resolved):
|
||||||
|
${finalSynthesis.cross_verification.disagreements.slice(0, 3).map(d => `- ${d}`).join('\n') || '- None'}
|
||||||
|
|
||||||
|
### Clarification Questions (from agent)
|
||||||
|
|
||||||
|
${finalSynthesis.clarification_questions.length > 0
|
||||||
|
? finalSynthesis.clarification_questions.map((q, i) => `${i + 1}. ${q}`).join('\n')
|
||||||
|
: 'No clarifications needed.'}
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Update session state with agent's findings
|
||||||
|
sessionState.solutions = finalSynthesis.solutions
|
||||||
|
sessionState.cross_verification = finalSynthesis.cross_verification
|
||||||
|
sessionState.phase = 'awaiting-decision'
|
||||||
|
Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 4: User Decision Point
|
||||||
|
|
||||||
|
**Collect User Decision**:
|
||||||
|
```javascript
|
||||||
|
const decisionResult = await AskUserQuestion({
|
||||||
|
questions: [
|
||||||
|
{
|
||||||
|
question: `Which solution approach do you prefer?`,
|
||||||
|
header: "Solution",
|
||||||
|
multiSelect: false,
|
||||||
|
options: finalSynthesis.solutions.map((sol, i) => ({
|
||||||
|
label: `Option ${i + 1}: ${sol.name}`,
|
||||||
|
description: `${sol.effort} effort, ${sol.risk} risk`
|
||||||
|
})).concat([
|
||||||
|
{ label: "Need More Analysis", description: "Return to analysis with additional context" }
|
||||||
|
])
|
||||||
|
},
|
||||||
|
{
|
||||||
|
question: "Any clarifications or adjustments?",
|
||||||
|
header: "Feedback",
|
||||||
|
multiSelect: true,
|
||||||
|
options: [
|
||||||
|
{ label: "Proceed as-is", description: "Generate final plan with selected option" },
|
||||||
|
{ label: "Add constraints", description: "Specify additional requirements" },
|
||||||
|
{ label: "Change scope", description: "Adjust what's included/excluded" },
|
||||||
|
{ label: "Different direction", description: "Explore completely different approach" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
// Process decision
|
||||||
|
const userDecision = {
|
||||||
|
timestamp: getUtc8ISOString(),
|
||||||
|
selected_solution: decisionResult.solution,
|
||||||
|
feedback_type: decisionResult.feedback,
|
||||||
|
additional_input: decisionResult.other || null
|
||||||
|
}
|
||||||
|
|
||||||
|
sessionState.user_decisions.push(userDecision)
|
||||||
|
|
||||||
|
// Decision routing
|
||||||
|
if (userDecision.selected_solution === 'Need More Analysis' ||
|
||||||
|
userDecision.feedback_type.includes('Different direction')) {
|
||||||
|
// Return to Phase 2 with updated context
|
||||||
|
sessionState.phase = 'additional-analysis'
|
||||||
|
// Continue analysis loop with user feedback incorporated
|
||||||
|
} else if (userDecision.feedback_type.includes('Add constraints') ||
|
||||||
|
userDecision.feedback_type.includes('Change scope')) {
|
||||||
|
// Prompt for additional details
|
||||||
|
const additionalInput = await AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: "Please provide the additional constraints or scope changes:",
|
||||||
|
header: "Details",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Performance priority", description: "Optimize for speed over simplicity" },
|
||||||
|
{ label: "Maintainability priority", description: "Prefer clear, maintainable code" },
|
||||||
|
{ label: "Minimal changes", description: "Change as few files as possible" },
|
||||||
|
{ label: "Full refactor OK", description: "Willing to do comprehensive changes" }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
// Incorporate and proceed to Phase 5
|
||||||
|
userDecision.constraints = additionalInput
|
||||||
|
sessionState.phase = 'generating-plan'
|
||||||
|
} else {
|
||||||
|
// Proceed to Phase 5
|
||||||
|
sessionState.phase = 'generating-plan'
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 5: Agent Planning & Output Generation
|
||||||
|
|
||||||
|
**Step 5.1: Prepare Planning Context**
|
||||||
|
```javascript
|
||||||
|
// Select the approved solution
|
||||||
|
const selectedSolution = finalSynthesis.solutions[userDecision.selected_solution_index]
|
||||||
|
|
||||||
|
// Build comprehensive planning context from discussion
|
||||||
|
const planningContext = {
|
||||||
|
task_description: taskDescription,
|
||||||
|
selected_solution: selectedSolution,
|
||||||
|
analysis_rounds: analysisResults,
|
||||||
|
consensus_points: finalSynthesis.consensus_points,
|
||||||
|
user_constraints: userDecision.constraints || null,
|
||||||
|
ace_context: contextPackage,
|
||||||
|
clarifications: sessionState.user_decisions
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`
|
||||||
|
## Generating Implementation Plan
|
||||||
|
|
||||||
|
Selected approach: **${selectedSolution.name}**
|
||||||
|
Invoking planning agent...
|
||||||
|
`)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 5.2: Invoke cli-lite-planning-agent**
|
||||||
|
```javascript
|
||||||
|
// Call planning agent to generate detailed plan
|
||||||
|
Task({
|
||||||
|
subagent_type: "cli-lite-planning-agent",
|
||||||
|
run_in_background: false,
|
||||||
|
description: "Generate detailed implementation plan",
|
||||||
|
prompt: `
|
||||||
|
## Task Objective
|
||||||
|
Generate detailed implementation plan based on collaborative discussion results.
|
||||||
|
|
||||||
|
## Output Schema Reference
|
||||||
|
Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json
|
||||||
|
|
||||||
|
## Project Context (MANDATORY - Read Both Files)
|
||||||
|
1. Read: .workflow/project-tech.json (technology stack, architecture)
|
||||||
|
2. Read: .workflow/project-guidelines.json (user-defined constraints)
|
||||||
|
|
||||||
|
## Discussion Results
|
||||||
|
|
||||||
|
### Task Description
|
||||||
|
${taskDescription}
|
||||||
|
|
||||||
|
### Selected Solution
|
||||||
|
**Name**: ${selectedSolution.name}
|
||||||
|
**Description**: ${selectedSolution.description}
|
||||||
|
**Effort**: ${selectedSolution.effort}
|
||||||
|
**Risk**: ${selectedSolution.risk}
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
${selectedSolution.pros.map(p => `- ${p}`).join('\n')}
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
${selectedSolution.cons.map(c => `- ${c}`).join('\n')}
|
||||||
|
|
||||||
|
**Affected Files**:
|
||||||
|
${selectedSolution.affected_files.map(f => `- ${f.file}:${f.line} - ${f.reason}`).join('\n')}
|
||||||
|
|
||||||
|
### Analysis Consensus
|
||||||
|
${finalSynthesis.consensus_points.map(p => `- ${p}`).join('\n')}
|
||||||
|
|
||||||
|
### User Constraints
|
||||||
|
${userDecision.constraints ? JSON.stringify(userDecision.constraints) : 'None specified'}
|
||||||
|
|
||||||
|
### ACE Context Summary
|
||||||
|
Relevant files: ${contextPackage.relevant_files.slice(0, 15).join(', ')}
|
||||||
|
Detected patterns: ${contextPackage.detected_patterns.join(', ')}
|
||||||
|
|
||||||
|
## Output Requirements
|
||||||
|
|
||||||
|
### 1. IMPL_PLAN.md (Documentation)
|
||||||
|
Write: ${sessionFolder}/IMPL_PLAN.md
|
||||||
|
|
||||||
|
Structure:
|
||||||
|
\`\`\`markdown
|
||||||
|
# Implementation Plan: {Task Title}
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
- **Task**: {description}
|
||||||
|
- **Approach**: {selected solution name}
|
||||||
|
- **Complexity**: {Low/Medium/High}
|
||||||
|
- **Generated**: {timestamp}
|
||||||
|
|
||||||
|
## Background & Decision Rationale
|
||||||
|
{Why this approach was chosen, key trade-offs considered}
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
### Step 1: {Title}
|
||||||
|
**Objective**: {what this step achieves}
|
||||||
|
**Files**:
|
||||||
|
- \`path/to/file.ts:line\` - {change description}
|
||||||
|
|
||||||
|
**Actions**:
|
||||||
|
1. {specific action}
|
||||||
|
2. {specific action}
|
||||||
|
|
||||||
|
**Verification**: {how to verify this step is complete}
|
||||||
|
|
||||||
|
### Step 2: ...
|
||||||
|
|
||||||
|
## File Manifest
|
||||||
|
| File | Lines | Change Type | Description |
|
||||||
|
|------|-------|-------------|-------------|
|
||||||
|
| ... | ... | ... | ... |
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
1. {criterion with verification method}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Risk Mitigation
|
||||||
|
| Risk | Mitigation Strategy |
|
||||||
|
|------|---------------------|
|
||||||
|
| ... | ... |
|
||||||
|
|
||||||
|
## Dependencies & Prerequisites
|
||||||
|
- {prerequisite 1}
|
||||||
|
- {prerequisite 2}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
### 2. plan.json (Structured Plan)
|
||||||
|
Write: ${sessionFolder}/plan.json
|
||||||
|
|
||||||
|
Follow schema from plan-json-schema.json. Key requirements:
|
||||||
|
- tasks: 2-7 structured tasks (group by feature/module, NOT by file)
|
||||||
|
- Each task includes: id, title, description, scope, files, depends_on, execution_group
|
||||||
|
- _metadata.source: "collaborative-discussion"
|
||||||
|
- _metadata.session_id: "${sessionId}"
|
||||||
|
|
||||||
|
## Task Grouping Rules
|
||||||
|
1. **Group by feature**: All changes for one feature = one task
|
||||||
|
2. **Substantial tasks**: Each task = 15-60 minutes of work
|
||||||
|
3. **True dependencies only**: Use depends_on only when Task B needs Task A's output
|
||||||
|
4. **Prefer parallel**: Most tasks should be independent
|
||||||
|
|
||||||
|
## Success Criteria
|
||||||
|
- [ ] IMPL_PLAN.md written with complete documentation
|
||||||
|
- [ ] plan.json follows schema exactly
|
||||||
|
- [ ] All affected files have line numbers
|
||||||
|
- [ ] Acceptance criteria are testable
|
||||||
|
- [ ] Tasks are properly grouped (not one per file)
|
||||||
|
`
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 5.3: Display Generated Plan**
|
||||||
|
```javascript
|
||||||
|
// Read generated outputs
|
||||||
|
const implPlan = Read(`${sessionFolder}/IMPL_PLAN.md`)
|
||||||
|
const planJson = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
||||||
|
|
||||||
|
console.log(`
|
||||||
|
## Plan Generated Successfully
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
${implPlan}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Structured Plan Summary
|
||||||
|
**Tasks**: ${planJson.tasks.length}
|
||||||
|
**Complexity**: ${planJson.complexity}
|
||||||
|
**Estimated Time**: ${planJson.estimated_time}
|
||||||
|
|
||||||
|
| # | Task | Scope | Dependencies |
|
||||||
|
|---|------|-------|--------------|
|
||||||
|
${planJson.tasks.map((t, i) =>
|
||||||
|
`| ${i+1} | ${t.title} | ${t.scope} | ${t.depends_on?.join(', ') || 'None'} |`
|
||||||
|
).join('\n')}
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Update session state
|
||||||
|
sessionState.phase = 'plan-generated'
|
||||||
|
sessionState.artifacts = {
|
||||||
|
impl_plan: `${sessionFolder}/IMPL_PLAN.md`,
|
||||||
|
plan_json: `${sessionFolder}/plan.json`
|
||||||
|
}
|
||||||
|
Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 5.4: Confirm & Hand off to Execution**
|
||||||
|
```javascript
|
||||||
|
const executeDecision = await AskUserQuestion({
|
||||||
|
questions: [{
|
||||||
|
question: `Plan generated (${planJson.tasks.length} tasks). Proceed to execution?`,
|
||||||
|
header: "Execute",
|
||||||
|
multiSelect: false,
|
||||||
|
options: [
|
||||||
|
{ label: "Execute Now (Recommended)", description: "Hand off to /workflow:lite-execute" },
|
||||||
|
{ label: "Review First", description: "Review plan files before execution" },
|
||||||
|
{ label: "Modify Plan", description: "Adjust plan before execution" },
|
||||||
|
{ label: "Save Only", description: "Save plan without execution" }
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
|
||||||
|
if (executeDecision === 'Execute Now') {
|
||||||
|
// Build execution context
|
||||||
|
const executionContext = {
|
||||||
|
planObject: planJson,
|
||||||
|
explorationsContext: contextPackage,
|
||||||
|
clarificationContext: sessionState.user_decisions,
|
||||||
|
originalUserInput: taskDescription,
|
||||||
|
executionMethod: 'Agent', // Default to Agent execution
|
||||||
|
session: {
|
||||||
|
id: sessionId,
|
||||||
|
folder: sessionFolder,
|
||||||
|
artifacts: {
|
||||||
|
impl_plan: `${sessionFolder}/IMPL_PLAN.md`,
|
||||||
|
plan_json: `${sessionFolder}/plan.json`,
|
||||||
|
session_state: `${sessionFolder}/session-state.json`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update state and hand off
|
||||||
|
sessionState.phase = 'executing'
|
||||||
|
Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2))
|
||||||
|
|
||||||
|
console.log(`
|
||||||
|
## Handing off to lite-execute
|
||||||
|
|
||||||
|
Session: ${sessionId}
|
||||||
|
Tasks: ${planJson.tasks.length}
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Hand off to lite-execute
|
||||||
|
SlashCommand(command="/workflow:lite-execute --in-memory")
|
||||||
|
|
||||||
|
} else if (executeDecision === 'Review First') {
|
||||||
|
console.log(`
|
||||||
|
## Plan Files Ready for Review
|
||||||
|
|
||||||
|
- Documentation: ${sessionFolder}/IMPL_PLAN.md
|
||||||
|
- Structured Plan: ${sessionFolder}/plan.json
|
||||||
|
|
||||||
|
Run \`/workflow:lite-execute --session=${sessionId}\` when ready.
|
||||||
|
`)
|
||||||
|
|
||||||
|
} else if (executeDecision === 'Modify Plan') {
|
||||||
|
// Return to Phase 4 with modification request
|
||||||
|
sessionState.phase = 'awaiting-decision'
|
||||||
|
console.log('Returning to decision phase for plan modification...')
|
||||||
|
|
||||||
|
} else {
|
||||||
|
console.log(`
|
||||||
|
## Plan Saved
|
||||||
|
|
||||||
|
Session: ${sessionId}
|
||||||
|
Location: ${sessionFolder}/
|
||||||
|
|
||||||
|
Files:
|
||||||
|
- IMPL_PLAN.md (documentation)
|
||||||
|
- plan.json (structured plan)
|
||||||
|
- session-state.json (full context)
|
||||||
|
|
||||||
|
To execute later: /workflow:lite-execute --session=${sessionId}
|
||||||
|
`)
|
||||||
|
sessionState.phase = 'complete'
|
||||||
|
}
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/session-state.json`, JSON.stringify(sessionState, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Session Folder Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
.workflow/.multi-cli-plan/{MCP-task-slug-YYYY-MM-DD}/
|
||||||
|
|-- session-state.json # Session state with all rounds and decisions
|
||||||
|
|-- rounds/
|
||||||
|
| |-- 1/
|
||||||
|
| | +-- synthesis.json # Round 1 analysis synthesis
|
||||||
|
| |-- 2/
|
||||||
|
| | +-- synthesis.json # Round 2 analysis synthesis
|
||||||
|
| +-- .../
|
||||||
|
|-- IMPL_PLAN.md # Implementation plan documentation
|
||||||
|
+-- plan.json # Structured plan for lite-execute
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
### 1. Agent-Orchestrator Separation
|
||||||
|
|
||||||
|
**Orchestrator (this command)** only handles:
|
||||||
|
- Task delegation to agents
|
||||||
|
- Reading agent output files
|
||||||
|
- User interaction and decisions
|
||||||
|
- Session state management
|
||||||
|
|
||||||
|
**Agent (cli-discuss-agent)** handles:
|
||||||
|
- Multi-CLI execution (Gemini, Codex, Qwen)
|
||||||
|
- Cross-verification between CLI outputs
|
||||||
|
- Solution synthesis and ranking
|
||||||
|
- Writing structured output files
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ ORCHESTRATOR │
|
||||||
|
│ (multi-cli-plan.md - decision layer) │
|
||||||
|
├─────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ 1. Delegate → Task(cli-discuss-agent) │
|
||||||
|
│ 2. Wait for completion │
|
||||||
|
│ 3. Read → synthesis.json │
|
||||||
|
│ 4. Display → User │
|
||||||
|
│ 5. Collect → Decision │
|
||||||
|
│ 6. Loop or proceed │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ CLI-DISCUSS-AGENT │
|
||||||
|
│ (analysis layer) │
|
||||||
|
├─────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ Gemini CLI ──┐ │
|
||||||
|
│ ├──→ Cross-Verify ──→ Synthesize │
|
||||||
|
│ Codex CLI ───┘ │ │
|
||||||
|
│ ▼ │
|
||||||
|
│ synthesis.json │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Multi-CLI Cross-Verification
|
||||||
|
|
||||||
|
Agent invokes multiple CLI tools and cross-verifies:
|
||||||
|
- **Gemini**: Deep code analysis, pattern recognition
|
||||||
|
- **Codex**: Implementation verification, code generation feasibility
|
||||||
|
- **Qwen** (fallback): Alternative perspective
|
||||||
|
|
||||||
|
Cross-verification identifies:
|
||||||
|
- Agreements (high confidence points)
|
||||||
|
- Disagreements (requiring resolution)
|
||||||
|
- Unique insights from each tool
|
||||||
|
|
||||||
|
### 3. User-Driven Decision Points
|
||||||
|
|
||||||
|
Every analysis cycle ends with user decision:
|
||||||
|
- Approve and proceed to planning
|
||||||
|
- Request more analysis with feedback
|
||||||
|
- Adjust requirements or direction
|
||||||
|
- View detailed agent output files
|
||||||
|
|
||||||
|
### 4. Iterative Convergence
|
||||||
|
|
||||||
|
Each round builds on previous findings:
|
||||||
|
- Round 1: Initial exploration, identify major approaches
|
||||||
|
- Round 2: Deep dive into promising approaches, resolve conflicts
|
||||||
|
- Round 3: Final refinement, edge case analysis
|
||||||
|
|
||||||
|
Agent calculates convergence score (0.0-1.0) and recommends:
|
||||||
|
- `converged`: Ready for planning
|
||||||
|
- `continue`: More analysis needed
|
||||||
|
- `user_input_needed`: Clarification required
|
||||||
|
|
||||||
|
### 5. Trade-off Transparency
|
||||||
|
|
||||||
|
Each solution option includes explicit trade-offs:
|
||||||
|
- Effort (low/medium/high)
|
||||||
|
- Risk assessment
|
||||||
|
- Maintainability impact
|
||||||
|
- Performance considerations
|
||||||
|
- Affected files with line numbers
|
||||||
|
- Source CLI(s) that proposed the solution
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error | Resolution |
|
||||||
|
|-------|------------|
|
||||||
|
| ACE search fails | Fall back to Glob/Grep for file discovery |
|
||||||
|
| Agent fails to produce synthesis.json | Retry agent with simpler context |
|
||||||
|
| CLI tool timeout (in agent) | Agent uses fallback chain: gemini → codex → qwen |
|
||||||
|
| No convergence after max rounds | Present best available options, flag uncertainty |
|
||||||
|
| synthesis.json parse error | Agent retries with degraded mode |
|
||||||
|
| User cancels | Save session state for later resumption |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
| Flag | Default | Description |
|
||||||
|
|------|---------|-------------|
|
||||||
|
| `--max-rounds` | 3 | Maximum analysis rounds before forcing decision |
|
||||||
|
| `--tools` | gemini,codex | CLI tools to use for analysis |
|
||||||
|
| `--auto-execute` | false | Auto-execute after plan approval |
|
||||||
|
| `--save-context` | true | Persist ACE context for resumption |
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Be Specific**: More detailed task descriptions lead to better initial context gathering
|
||||||
|
2. **Provide Feedback**: Use clarification rounds to narrow down requirements
|
||||||
|
3. **Trust the Process**: Allow multiple rounds for complex tasks
|
||||||
|
4. **Review Trade-offs**: Carefully consider pros/cons of each solution option
|
||||||
|
5. **Iterate**: Don't hesitate to request additional analysis if uncertain
|
||||||
|
6. **Review Plan**: Check IMPL_PLAN.md before execution for complete understanding
|
||||||
|
|
||||||
|
## Output Artifacts
|
||||||
|
|
||||||
|
| File | Purpose | Producer |
|
||||||
|
|------|---------|----------|
|
||||||
|
| `rounds/{n}/synthesis.json` | Round analysis results | cli-discuss-agent |
|
||||||
|
| `IMPL_PLAN.md` | Human-readable documentation | cli-lite-planning-agent |
|
||||||
|
| `plan.json` | Structured tasks for execution | cli-lite-planning-agent |
|
||||||
|
| `session-state.json` | Session tracking | Orchestrator |
|
||||||
|
|
||||||
|
**synthesis.json schema** (produced by cli-discuss-agent):
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"round": 1,
|
||||||
|
"cli_analyses": [...],
|
||||||
|
"cross_verification": { "agreements": [], "disagreements": [] },
|
||||||
|
"solutions": [{ "name": "...", "pros": [], "cons": [], "effort": "..." }],
|
||||||
|
"convergence": { "score": 0.85, "recommendation": "converged" },
|
||||||
|
"clarification_questions": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Related Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Resume a saved multi-cli-plan session
|
||||||
|
/workflow:lite-execute --session=MCP-xxx
|
||||||
|
|
||||||
|
# For simpler tasks without multi-round discussion
|
||||||
|
/workflow:lite-plan "task description"
|
||||||
|
|
||||||
|
# For issue-driven discovery
|
||||||
|
/issue:discover-by-prompt "find issues"
|
||||||
|
|
||||||
|
# View generated plan
|
||||||
|
cat .workflow/.multi-cli-plan/{session-id}/IMPL_PLAN.md
|
||||||
|
```
|
||||||
@@ -416,5 +416,107 @@ export async function handleSystemRoutes(ctx: SystemRouteContext): Promise<boole
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// API: File dialog - list directory contents for file browser
|
||||||
|
if (pathname === '/api/dialog/browse' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
const { path: browsePath, showHidden } = body as {
|
||||||
|
path?: string;
|
||||||
|
showHidden?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
const os = await import('os');
|
||||||
|
const path = await import('path');
|
||||||
|
const fs = await import('fs');
|
||||||
|
|
||||||
|
// Default to home directory
|
||||||
|
let targetPath = browsePath || os.homedir();
|
||||||
|
|
||||||
|
// Expand ~ to home directory
|
||||||
|
if (targetPath.startsWith('~')) {
|
||||||
|
targetPath = path.join(os.homedir(), targetPath.slice(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve to absolute path
|
||||||
|
if (!path.isAbsolute(targetPath)) {
|
||||||
|
targetPath = path.resolve(targetPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const stat = await fs.promises.stat(targetPath);
|
||||||
|
if (!stat.isDirectory()) {
|
||||||
|
return { error: 'Path is not a directory', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const entries = await fs.promises.readdir(targetPath, { withFileTypes: true });
|
||||||
|
const items = entries
|
||||||
|
.filter(entry => showHidden || !entry.name.startsWith('.'))
|
||||||
|
.map(entry => ({
|
||||||
|
name: entry.name,
|
||||||
|
path: path.join(targetPath, entry.name),
|
||||||
|
isDirectory: entry.isDirectory(),
|
||||||
|
isFile: entry.isFile()
|
||||||
|
}))
|
||||||
|
.sort((a, b) => {
|
||||||
|
// Directories first, then files
|
||||||
|
if (a.isDirectory && !b.isDirectory) return -1;
|
||||||
|
if (!a.isDirectory && b.isDirectory) return 1;
|
||||||
|
return a.name.localeCompare(b.name);
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
currentPath: targetPath,
|
||||||
|
parentPath: path.dirname(targetPath),
|
||||||
|
items,
|
||||||
|
homePath: os.homedir()
|
||||||
|
};
|
||||||
|
} catch (err) {
|
||||||
|
return { error: 'Cannot access directory: ' + (err as Error).message, status: 400 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: File dialog - select file (validate path exists)
|
||||||
|
if (pathname === '/api/dialog/open-file' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
const { path: filePath } = body as { path?: string };
|
||||||
|
|
||||||
|
if (!filePath) {
|
||||||
|
return { error: 'Path is required', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const os = await import('os');
|
||||||
|
const path = await import('path');
|
||||||
|
const fs = await import('fs');
|
||||||
|
|
||||||
|
let targetPath = filePath;
|
||||||
|
|
||||||
|
// Expand ~ to home directory
|
||||||
|
if (targetPath.startsWith('~')) {
|
||||||
|
targetPath = path.join(os.homedir(), targetPath.slice(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve to absolute path
|
||||||
|
if (!path.isAbsolute(targetPath)) {
|
||||||
|
targetPath = path.resolve(targetPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await fs.promises.access(targetPath, fs.constants.R_OK);
|
||||||
|
const stat = await fs.promises.stat(targetPath);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
path: targetPath,
|
||||||
|
isFile: stat.isFile(),
|
||||||
|
isDirectory: stat.isDirectory()
|
||||||
|
};
|
||||||
|
} catch {
|
||||||
|
return { error: 'File not accessible', status: 404 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -661,3 +661,120 @@
|
|||||||
color: hsl(var(--success));
|
color: hsl(var(--success));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ========================================
|
||||||
|
* File Browser Modal
|
||||||
|
* ======================================== */
|
||||||
|
|
||||||
|
.file-browser-modal {
|
||||||
|
width: 600px;
|
||||||
|
max-width: 90vw;
|
||||||
|
max-height: 80vh;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-toolbar {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 0.5rem;
|
||||||
|
padding: 0.5rem;
|
||||||
|
background: hsl(var(--muted) / 0.3);
|
||||||
|
border-radius: 0.375rem;
|
||||||
|
margin-bottom: 0.75rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-toolbar .btn-sm {
|
||||||
|
flex-shrink: 0;
|
||||||
|
padding: 0.375rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-path {
|
||||||
|
flex: 1;
|
||||||
|
padding: 0.375rem 0.5rem;
|
||||||
|
font-family: monospace;
|
||||||
|
font-size: 0.75rem;
|
||||||
|
background: hsl(var(--background));
|
||||||
|
border: 1px solid hsl(var(--border));
|
||||||
|
border-radius: 0.25rem;
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-hidden-toggle {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 0.375rem;
|
||||||
|
font-size: 0.75rem;
|
||||||
|
color: hsl(var(--muted-foreground));
|
||||||
|
cursor: pointer;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-hidden-toggle input {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-list {
|
||||||
|
flex: 1;
|
||||||
|
min-height: 300px;
|
||||||
|
max-height: 400px;
|
||||||
|
overflow-y: auto;
|
||||||
|
border: 1px solid hsl(var(--border));
|
||||||
|
border-radius: 0.375rem;
|
||||||
|
background: hsl(var(--background));
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-loading,
|
||||||
|
.file-browser-empty,
|
||||||
|
.file-browser-error {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
height: 100%;
|
||||||
|
min-height: 200px;
|
||||||
|
color: hsl(var(--muted-foreground));
|
||||||
|
font-size: 0.875rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-error {
|
||||||
|
color: hsl(var(--destructive));
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 0.5rem;
|
||||||
|
padding: 0.5rem 0.75rem;
|
||||||
|
cursor: pointer;
|
||||||
|
border-bottom: 1px solid hsl(var(--border) / 0.5);
|
||||||
|
transition: background-color 0.15s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item:last-child {
|
||||||
|
border-bottom: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item:hover {
|
||||||
|
background: hsl(var(--muted) / 0.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item.selected {
|
||||||
|
background: hsl(var(--primary) / 0.15);
|
||||||
|
border-color: hsl(var(--primary) / 0.3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item.is-directory {
|
||||||
|
color: hsl(var(--primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item.is-file {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-browser-item-name {
|
||||||
|
flex: 1;
|
||||||
|
font-size: 0.8125rem;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -268,6 +268,12 @@ const i18n = {
|
|||||||
'cli.envFilePlaceholder': 'Path to .env file (e.g., ~/.gemini-env or C:/Users/xxx/.env)',
|
'cli.envFilePlaceholder': 'Path to .env file (e.g., ~/.gemini-env or C:/Users/xxx/.env)',
|
||||||
'cli.envFileHint': 'Load environment variables (e.g., API keys) before CLI execution. Supports ~ for home directory.',
|
'cli.envFileHint': 'Load environment variables (e.g., API keys) before CLI execution. Supports ~ for home directory.',
|
||||||
'cli.envFileBrowse': 'Browse',
|
'cli.envFileBrowse': 'Browse',
|
||||||
|
'cli.fileBrowser': 'File Browser',
|
||||||
|
'cli.fileBrowserSelect': 'Select',
|
||||||
|
'cli.fileBrowserCancel': 'Cancel',
|
||||||
|
'cli.fileBrowserUp': 'Parent Directory',
|
||||||
|
'cli.fileBrowserHome': 'Home',
|
||||||
|
'cli.fileBrowserShowHidden': 'Show hidden files',
|
||||||
|
|
||||||
// CodexLens Configuration
|
// CodexLens Configuration
|
||||||
'codexlens.config': 'CodexLens Configuration',
|
'codexlens.config': 'CodexLens Configuration',
|
||||||
@@ -2442,6 +2448,12 @@ const i18n = {
|
|||||||
'cli.envFilePlaceholder': '.env 文件路径(如 ~/.gemini-env 或 C:/Users/xxx/.env)',
|
'cli.envFilePlaceholder': '.env 文件路径(如 ~/.gemini-env 或 C:/Users/xxx/.env)',
|
||||||
'cli.envFileHint': '在 CLI 执行前加载环境变量(如 API 密钥)。支持 ~ 表示用户目录。',
|
'cli.envFileHint': '在 CLI 执行前加载环境变量(如 API 密钥)。支持 ~ 表示用户目录。',
|
||||||
'cli.envFileBrowse': '浏览',
|
'cli.envFileBrowse': '浏览',
|
||||||
|
'cli.fileBrowser': '文件浏览器',
|
||||||
|
'cli.fileBrowserSelect': '选择',
|
||||||
|
'cli.fileBrowserCancel': '取消',
|
||||||
|
'cli.fileBrowserUp': '上级目录',
|
||||||
|
'cli.fileBrowserHome': '主目录',
|
||||||
|
'cli.fileBrowserShowHidden': '显示隐藏文件',
|
||||||
|
|
||||||
// CodexLens 配置
|
// CodexLens 配置
|
||||||
'codexlens.config': 'CodexLens 配置',
|
'codexlens.config': 'CodexLens 配置',
|
||||||
|
|||||||
@@ -554,6 +554,241 @@ function buildToolConfigModalContent(tool, config, models, status) {
|
|||||||
'</div>';
|
'</div>';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ========== File Browser Modal ==========
|
||||||
|
|
||||||
|
var fileBrowserState = {
|
||||||
|
currentPath: '',
|
||||||
|
showHidden: false,
|
||||||
|
onSelect: null
|
||||||
|
};
|
||||||
|
|
||||||
|
function showFileBrowserModal(onSelect) {
|
||||||
|
fileBrowserState.onSelect = onSelect;
|
||||||
|
fileBrowserState.showHidden = false;
|
||||||
|
|
||||||
|
// Create modal overlay
|
||||||
|
var overlay = document.createElement('div');
|
||||||
|
overlay.id = 'fileBrowserOverlay';
|
||||||
|
overlay.className = 'modal-overlay';
|
||||||
|
overlay.innerHTML = buildFileBrowserModalContent();
|
||||||
|
document.body.appendChild(overlay);
|
||||||
|
|
||||||
|
// Load initial directory (home)
|
||||||
|
loadFileBrowserDirectory('');
|
||||||
|
|
||||||
|
// Initialize events
|
||||||
|
initFileBrowserEvents();
|
||||||
|
|
||||||
|
// Initialize icons
|
||||||
|
if (window.lucide) lucide.createIcons();
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildFileBrowserModalContent() {
|
||||||
|
return '<div class="modal-content file-browser-modal">' +
|
||||||
|
'<div class="modal-header">' +
|
||||||
|
'<h3><i data-lucide="folder-open" class="w-4 h-4"></i> ' + t('cli.fileBrowser') + '</h3>' +
|
||||||
|
'<button class="modal-close" id="fileBrowserCloseBtn">×</button>' +
|
||||||
|
'</div>' +
|
||||||
|
'<div class="modal-body">' +
|
||||||
|
'<div class="file-browser-toolbar">' +
|
||||||
|
'<button class="btn-sm btn-outline" id="fileBrowserUpBtn" title="' + t('cli.fileBrowserUp') + '">' +
|
||||||
|
'<i data-lucide="arrow-up" class="w-3.5 h-3.5"></i>' +
|
||||||
|
'</button>' +
|
||||||
|
'<button class="btn-sm btn-outline" id="fileBrowserHomeBtn" title="' + t('cli.fileBrowserHome') + '">' +
|
||||||
|
'<i data-lucide="home" class="w-3.5 h-3.5"></i>' +
|
||||||
|
'</button>' +
|
||||||
|
'<input type="text" id="fileBrowserPathInput" class="file-browser-path" placeholder="/" readonly />' +
|
||||||
|
'<label class="file-browser-hidden-toggle">' +
|
||||||
|
'<input type="checkbox" id="fileBrowserShowHidden" />' +
|
||||||
|
'<span>' + t('cli.fileBrowserShowHidden') + '</span>' +
|
||||||
|
'</label>' +
|
||||||
|
'</div>' +
|
||||||
|
'<div class="file-browser-list" id="fileBrowserList">' +
|
||||||
|
'<div class="file-browser-loading"><i data-lucide="loader-2" class="w-5 h-5 animate-spin"></i></div>' +
|
||||||
|
'</div>' +
|
||||||
|
'</div>' +
|
||||||
|
'<div class="modal-footer">' +
|
||||||
|
'<button class="btn btn-outline" id="fileBrowserCancelBtn">' + t('cli.fileBrowserCancel') + '</button>' +
|
||||||
|
'<button class="btn btn-primary" id="fileBrowserSelectBtn" disabled>' +
|
||||||
|
'<i data-lucide="check" class="w-3.5 h-3.5"></i> ' + t('cli.fileBrowserSelect') +
|
||||||
|
'</button>' +
|
||||||
|
'</div>' +
|
||||||
|
'</div>';
|
||||||
|
}
|
||||||
|
|
||||||
|
async function loadFileBrowserDirectory(path) {
|
||||||
|
var listContainer = document.getElementById('fileBrowserList');
|
||||||
|
var pathInput = document.getElementById('fileBrowserPathInput');
|
||||||
|
|
||||||
|
if (listContainer) {
|
||||||
|
listContainer.innerHTML = '<div class="file-browser-loading"><i data-lucide="loader-2" class="w-5 h-5 animate-spin"></i></div>';
|
||||||
|
if (window.lucide) lucide.createIcons();
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
var response = await fetch('/api/dialog/browse', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ path: path, showHidden: fileBrowserState.showHidden })
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error('Failed to load directory');
|
||||||
|
}
|
||||||
|
|
||||||
|
var data = await response.json();
|
||||||
|
fileBrowserState.currentPath = data.currentPath;
|
||||||
|
|
||||||
|
if (pathInput) {
|
||||||
|
pathInput.value = data.currentPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
renderFileBrowserItems(data.items);
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load directory:', err);
|
||||||
|
if (listContainer) {
|
||||||
|
listContainer.innerHTML = '<div class="file-browser-error">Failed to load directory</div>';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderFileBrowserItems(items) {
|
||||||
|
var listContainer = document.getElementById('fileBrowserList');
|
||||||
|
if (!listContainer) return;
|
||||||
|
|
||||||
|
if (!items || items.length === 0) {
|
||||||
|
listContainer.innerHTML = '<div class="file-browser-empty">Empty directory</div>';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var html = items.map(function(item) {
|
||||||
|
var icon = item.isDirectory ? 'folder' : 'file';
|
||||||
|
var itemClass = 'file-browser-item' + (item.isDirectory ? ' is-directory' : ' is-file');
|
||||||
|
return '<div class="' + itemClass + '" data-path="' + escapeHtml(item.path) + '" data-is-dir="' + item.isDirectory + '">' +
|
||||||
|
'<i data-lucide="' + icon + '" class="w-4 h-4"></i>' +
|
||||||
|
'<span class="file-browser-item-name">' + escapeHtml(item.name) + '</span>' +
|
||||||
|
'</div>';
|
||||||
|
}).join('');
|
||||||
|
|
||||||
|
listContainer.innerHTML = html;
|
||||||
|
|
||||||
|
// Initialize icons
|
||||||
|
if (window.lucide) lucide.createIcons();
|
||||||
|
|
||||||
|
// Add click handlers
|
||||||
|
listContainer.querySelectorAll('.file-browser-item').forEach(function(el) {
|
||||||
|
el.onclick = function() {
|
||||||
|
var isDir = el.getAttribute('data-is-dir') === 'true';
|
||||||
|
var path = el.getAttribute('data-path');
|
||||||
|
|
||||||
|
if (isDir) {
|
||||||
|
// Navigate into directory
|
||||||
|
loadFileBrowserDirectory(path);
|
||||||
|
} else {
|
||||||
|
// Select file
|
||||||
|
listContainer.querySelectorAll('.file-browser-item').forEach(function(item) {
|
||||||
|
item.classList.remove('selected');
|
||||||
|
});
|
||||||
|
el.classList.add('selected');
|
||||||
|
|
||||||
|
// Enable select button
|
||||||
|
var selectBtn = document.getElementById('fileBrowserSelectBtn');
|
||||||
|
if (selectBtn) {
|
||||||
|
selectBtn.disabled = false;
|
||||||
|
selectBtn.setAttribute('data-selected-path', path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Double-click to select file or enter directory
|
||||||
|
el.ondblclick = function() {
|
||||||
|
var isDir = el.getAttribute('data-is-dir') === 'true';
|
||||||
|
var path = el.getAttribute('data-path');
|
||||||
|
|
||||||
|
if (isDir) {
|
||||||
|
loadFileBrowserDirectory(path);
|
||||||
|
} else {
|
||||||
|
// Select and close
|
||||||
|
closeFileBrowserModal(path);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function initFileBrowserEvents() {
|
||||||
|
// Close button
|
||||||
|
var closeBtn = document.getElementById('fileBrowserCloseBtn');
|
||||||
|
if (closeBtn) {
|
||||||
|
closeBtn.onclick = function() { closeFileBrowserModal(null); };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel button
|
||||||
|
var cancelBtn = document.getElementById('fileBrowserCancelBtn');
|
||||||
|
if (cancelBtn) {
|
||||||
|
cancelBtn.onclick = function() { closeFileBrowserModal(null); };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select button
|
||||||
|
var selectBtn = document.getElementById('fileBrowserSelectBtn');
|
||||||
|
if (selectBtn) {
|
||||||
|
selectBtn.onclick = function() {
|
||||||
|
var path = selectBtn.getAttribute('data-selected-path');
|
||||||
|
closeFileBrowserModal(path);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Up button
|
||||||
|
var upBtn = document.getElementById('fileBrowserUpBtn');
|
||||||
|
if (upBtn) {
|
||||||
|
upBtn.onclick = function() {
|
||||||
|
// Get parent path
|
||||||
|
var currentPath = fileBrowserState.currentPath;
|
||||||
|
var parentPath = currentPath.replace(/[/\\][^/\\]+$/, '') || '/';
|
||||||
|
loadFileBrowserDirectory(parentPath);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Home button
|
||||||
|
var homeBtn = document.getElementById('fileBrowserHomeBtn');
|
||||||
|
if (homeBtn) {
|
||||||
|
homeBtn.onclick = function() {
|
||||||
|
loadFileBrowserDirectory('');
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show hidden checkbox
|
||||||
|
var showHiddenCheckbox = document.getElementById('fileBrowserShowHidden');
|
||||||
|
if (showHiddenCheckbox) {
|
||||||
|
showHiddenCheckbox.onchange = function() {
|
||||||
|
fileBrowserState.showHidden = showHiddenCheckbox.checked;
|
||||||
|
loadFileBrowserDirectory(fileBrowserState.currentPath);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Click outside to close
|
||||||
|
var overlay = document.getElementById('fileBrowserOverlay');
|
||||||
|
if (overlay) {
|
||||||
|
overlay.onclick = function(e) {
|
||||||
|
if (e.target === overlay) {
|
||||||
|
closeFileBrowserModal(null);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeFileBrowserModal(selectedPath) {
|
||||||
|
var overlay = document.getElementById('fileBrowserOverlay');
|
||||||
|
if (overlay) {
|
||||||
|
overlay.remove();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fileBrowserState.onSelect && selectedPath) {
|
||||||
|
fileBrowserState.onSelect(selectedPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
fileBrowserState.onSelect = null;
|
||||||
|
}
|
||||||
|
|
||||||
function initToolConfigModalEvents(tool, currentConfig, models) {
|
function initToolConfigModalEvents(tool, currentConfig, models) {
|
||||||
// Local tags state (copy from config)
|
// Local tags state (copy from config)
|
||||||
var currentTags = (currentConfig.tags || []).slice();
|
var currentTags = (currentConfig.tags || []).slice();
|
||||||
@@ -754,38 +989,13 @@ function initToolConfigModalEvents(tool, currentConfig, models) {
|
|||||||
// Environment file browse button (only for gemini/qwen)
|
// Environment file browse button (only for gemini/qwen)
|
||||||
var envFileBrowseBtn = document.getElementById('envFileBrowseBtn');
|
var envFileBrowseBtn = document.getElementById('envFileBrowseBtn');
|
||||||
if (envFileBrowseBtn) {
|
if (envFileBrowseBtn) {
|
||||||
envFileBrowseBtn.onclick = async function() {
|
envFileBrowseBtn.onclick = function() {
|
||||||
try {
|
showFileBrowserModal(function(selectedPath) {
|
||||||
// Use file dialog API if available
|
var envFileInput = document.getElementById('envFileInput');
|
||||||
var response = await fetch('/api/dialog/open-file', {
|
if (envFileInput && selectedPath) {
|
||||||
method: 'POST',
|
envFileInput.value = selectedPath;
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
title: t('cli.envFile'),
|
|
||||||
filters: [
|
|
||||||
{ name: 'Environment Files', extensions: ['env'] },
|
|
||||||
{ name: 'All Files', extensions: ['*'] }
|
|
||||||
],
|
|
||||||
defaultPath: ''
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
if (response.ok) {
|
|
||||||
var data = await response.json();
|
|
||||||
if (data.filePath) {
|
|
||||||
var envFileInput = document.getElementById('envFileInput');
|
|
||||||
if (envFileInput) {
|
|
||||||
envFileInput.value = data.filePath;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Fallback: prompt user to enter path manually
|
|
||||||
showRefreshToast('File dialog not available. Please enter path manually.', 'info');
|
|
||||||
}
|
}
|
||||||
} catch (err) {
|
});
|
||||||
console.error('Failed to open file dialog:', err);
|
|
||||||
showRefreshToast('File dialog not available. Please enter path manually.', 'info');
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user