mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-09 03:09:30 +08:00
Compare commits
2 Commits
v5.4.4
...
feature_pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4dd735034e | ||
|
|
a08dd62b59 |
23
.github/workflows/release.yml
vendored
23
.github/workflows/release.yml
vendored
@@ -97,6 +97,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: artifacts
|
path: artifacts
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
|
||||||
- name: Prepare release files
|
- name: Prepare release files
|
||||||
run: |
|
run: |
|
||||||
mkdir -p release
|
mkdir -p release
|
||||||
@@ -104,10 +109,26 @@ jobs:
|
|||||||
cp install.sh install.bat release/
|
cp install.sh install.bat release/
|
||||||
ls -la release/
|
ls -la release/
|
||||||
|
|
||||||
|
- name: Generate release notes with git-cliff
|
||||||
|
run: |
|
||||||
|
# Install git-cliff via npx
|
||||||
|
npx git-cliff@latest --current --strip all -o release_notes.md
|
||||||
|
|
||||||
|
# Fallback if generation failed
|
||||||
|
if [ ! -s release_notes.md ]; then
|
||||||
|
echo "⚠️ Failed to generate release notes with git-cliff" > release_notes.md
|
||||||
|
echo "" >> release_notes.md
|
||||||
|
echo "## What's Changed" >> release_notes.md
|
||||||
|
echo "See commits in this release for details." >> release_notes.md
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "--- Generated Release Notes ---"
|
||||||
|
cat release_notes.md
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
files: release/*
|
files: release/*
|
||||||
generate_release_notes: true
|
body_path: release_notes.md
|
||||||
draft: false
|
draft: false
|
||||||
prerelease: false
|
prerelease: false
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,7 +1,5 @@
|
|||||||
.claude/
|
.claude/
|
||||||
.claude-trace
|
.claude-trace
|
||||||
.DS_Store
|
|
||||||
**/.DS_Store
|
|
||||||
.venv
|
.venv
|
||||||
.pytest_cache
|
.pytest_cache
|
||||||
__pycache__
|
__pycache__
|
||||||
|
|||||||
269
README.md
269
README.md
@@ -132,59 +132,6 @@ Requirements → Architecture → Sprint Plan → Development → Review → QA
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Version Requirements
|
|
||||||
|
|
||||||
### Codex CLI
|
|
||||||
**Minimum version:** Check compatibility with your installation
|
|
||||||
|
|
||||||
The codeagent-wrapper uses these Codex CLI features:
|
|
||||||
- `codex e` - Execute commands (shorthand for `codex exec`)
|
|
||||||
- `--skip-git-repo-check` - Skip git repository validation
|
|
||||||
- `--json` - JSON stream output format
|
|
||||||
- `-C <workdir>` - Set working directory
|
|
||||||
- `resume <session_id>` - Resume previous sessions
|
|
||||||
|
|
||||||
**Verify Codex CLI is installed:**
|
|
||||||
```bash
|
|
||||||
which codex
|
|
||||||
codex --version
|
|
||||||
```
|
|
||||||
|
|
||||||
### Claude CLI
|
|
||||||
**Minimum version:** Check compatibility with your installation
|
|
||||||
|
|
||||||
Required features:
|
|
||||||
- `--output-format stream-json` - Streaming JSON output format
|
|
||||||
- `--setting-sources` - Control setting sources (prevents infinite recursion)
|
|
||||||
- `--dangerously-skip-permissions` - Skip permission prompts (use with caution)
|
|
||||||
- `-p` - Prompt input flag
|
|
||||||
- `-r <session_id>` - Resume sessions
|
|
||||||
|
|
||||||
**Security Note:** The wrapper only adds `--dangerously-skip-permissions` for Claude when explicitly enabled (e.g. `--skip-permissions` / `CODEAGENT_SKIP_PERMISSIONS=true`). Keep it disabled unless you understand the risk.
|
|
||||||
|
|
||||||
**Verify Claude CLI is installed:**
|
|
||||||
```bash
|
|
||||||
which claude
|
|
||||||
claude --version
|
|
||||||
```
|
|
||||||
|
|
||||||
### Gemini CLI
|
|
||||||
**Minimum version:** Check compatibility with your installation
|
|
||||||
|
|
||||||
Required features:
|
|
||||||
- `-o stream-json` - JSON stream output format
|
|
||||||
- `-y` - Auto-approve prompts (non-interactive mode)
|
|
||||||
- `-r <session_id>` - Resume sessions
|
|
||||||
- `-p` - Prompt input flag
|
|
||||||
|
|
||||||
**Verify Gemini CLI is installed:**
|
|
||||||
```bash
|
|
||||||
which gemini
|
|
||||||
gemini --version
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
### Modular Installation (Recommended)
|
### Modular Installation (Recommended)
|
||||||
@@ -216,39 +163,15 @@ python3 install.py --force
|
|||||||
|
|
||||||
```
|
```
|
||||||
~/.claude/
|
~/.claude/
|
||||||
├── bin/
|
├── CLAUDE.md # Core instructions and role definition
|
||||||
│ └── codeagent-wrapper # Main executable
|
├── commands/ # Slash commands (/dev, /code, etc.)
|
||||||
├── CLAUDE.md # Core instructions and role definition
|
├── agents/ # Agent definitions
|
||||||
├── commands/ # Slash commands (/dev, /code, etc.)
|
|
||||||
├── agents/ # Agent definitions
|
|
||||||
├── skills/
|
├── skills/
|
||||||
│ └── codex/
|
│ └── codex/
|
||||||
│ └── SKILL.md # Codex integration skill
|
│ └── SKILL.md # Codex integration skill
|
||||||
├── config.json # Configuration
|
└── installed_modules.json # Installation status
|
||||||
└── installed_modules.json # Installation status
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Customizing Installation Directory
|
|
||||||
|
|
||||||
By default, myclaude installs to `~/.claude`. You can customize this using the `INSTALL_DIR` environment variable:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install to custom directory
|
|
||||||
INSTALL_DIR=/opt/myclaude bash install.sh
|
|
||||||
|
|
||||||
# Update your PATH accordingly
|
|
||||||
export PATH="/opt/myclaude/bin:$PATH"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Directory Structure:**
|
|
||||||
- `$INSTALL_DIR/bin/` - codeagent-wrapper binary
|
|
||||||
- `$INSTALL_DIR/skills/` - Skill definitions
|
|
||||||
- `$INSTALL_DIR/config.json` - Configuration file
|
|
||||||
- `$INSTALL_DIR/commands/` - Slash command definitions
|
|
||||||
- `$INSTALL_DIR/agents/` - Agent definitions
|
|
||||||
|
|
||||||
**Note:** When using a custom installation directory, ensure that `$INSTALL_DIR/bin` is added to your `PATH` environment variable.
|
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
Edit `config.json` to customize:
|
Edit `config.json` to customize:
|
||||||
@@ -346,10 +269,8 @@ $Env:PATH = "$HOME\bin;$Env:PATH"
|
|||||||
```
|
```
|
||||||
|
|
||||||
```batch
|
```batch
|
||||||
REM cmd.exe - persistent for current user (use PowerShell method above instead)
|
REM cmd.exe - persistent for current user
|
||||||
REM WARNING: This expands %PATH% which includes system PATH, causing duplication
|
setx PATH "%USERPROFILE%\bin;%PATH%"
|
||||||
REM Note: Using reg add instead of setx to avoid 1024-character truncation limit
|
|
||||||
reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "%USERPROFILE%\bin;%PATH%" /f
|
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -373,14 +294,11 @@ reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "%USERPROFILE%\bin;%PATH%
|
|||||||
|
|
||||||
**Codex wrapper not found:**
|
**Codex wrapper not found:**
|
||||||
```bash
|
```bash
|
||||||
# Installer auto-adds PATH, check if configured
|
# Check PATH
|
||||||
if [[ ":$PATH:" != *":$HOME/.claude/bin:"* ]]; then
|
echo $PATH | grep -q "$HOME/bin" || echo 'export PATH="$HOME/bin:$PATH"' >> ~/.zshrc
|
||||||
echo "PATH not configured. Reinstalling..."
|
|
||||||
bash install.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Or manually add (idempotent command)
|
# Reinstall
|
||||||
[[ ":$PATH:" != *":$HOME/.claude/bin:"* ]] && echo 'export PATH="$HOME/.claude/bin:$PATH"' >> ~/.zshrc
|
bash install.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
**Permission denied:**
|
**Permission denied:**
|
||||||
@@ -397,172 +315,11 @@ cat ~/.claude/installed_modules.json
|
|||||||
python3 install.py --module dev --force
|
python3 install.py --module dev --force
|
||||||
```
|
```
|
||||||
|
|
||||||
### Version Compatibility Issues
|
|
||||||
|
|
||||||
**Backend CLI not found:**
|
|
||||||
```bash
|
|
||||||
# Check if backend CLIs are installed
|
|
||||||
which codex
|
|
||||||
which claude
|
|
||||||
which gemini
|
|
||||||
|
|
||||||
# Install missing backends
|
|
||||||
# Codex: Follow installation instructions at https://codex.docs
|
|
||||||
# Claude: Follow installation instructions at https://claude.ai/docs
|
|
||||||
# Gemini: Follow installation instructions at https://ai.google.dev/docs
|
|
||||||
```
|
|
||||||
|
|
||||||
**Unsupported CLI flags:**
|
|
||||||
```bash
|
|
||||||
# If you see errors like "unknown flag" or "invalid option"
|
|
||||||
|
|
||||||
# Check backend CLI version
|
|
||||||
codex --version
|
|
||||||
claude --version
|
|
||||||
gemini --version
|
|
||||||
|
|
||||||
# For Codex: Ensure it supports `e`, `--skip-git-repo-check`, `--json`, `-C`, and `resume`
|
|
||||||
# For Claude: Ensure it supports `--output-format stream-json`, `--setting-sources`, `-r`
|
|
||||||
# For Gemini: Ensure it supports `-o stream-json`, `-y`, `-r`, `-p`
|
|
||||||
|
|
||||||
# Update your backend CLI to the latest version if needed
|
|
||||||
```
|
|
||||||
|
|
||||||
**JSON parsing errors:**
|
|
||||||
```bash
|
|
||||||
# If you see "failed to parse JSON output" errors
|
|
||||||
|
|
||||||
# Verify the backend outputs stream-json format
|
|
||||||
codex e --json "test task" # Should output newline-delimited JSON
|
|
||||||
claude --output-format stream-json -p "test" # Should output stream JSON
|
|
||||||
|
|
||||||
# If not, your backend CLI version may be too old or incompatible
|
|
||||||
```
|
|
||||||
|
|
||||||
**Infinite recursion with Claude backend:**
|
|
||||||
```bash
|
|
||||||
# The wrapper prevents this with `--setting-sources ""` flag
|
|
||||||
# If you still see recursion, ensure your Claude CLI supports this flag
|
|
||||||
|
|
||||||
claude --help | grep "setting-sources"
|
|
||||||
|
|
||||||
# If flag is not supported, upgrade Claude CLI
|
|
||||||
```
|
|
||||||
|
|
||||||
**Session resume failures:**
|
|
||||||
```bash
|
|
||||||
# Check if session ID is valid
|
|
||||||
codex history # List recent sessions
|
|
||||||
claude history
|
|
||||||
|
|
||||||
# Ensure backend CLI supports session resumption
|
|
||||||
codex resume <session_id> "test" # Should continue from previous session
|
|
||||||
claude -r <session_id> "test"
|
|
||||||
|
|
||||||
# If not supported, use new sessions instead of resume mode
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## FAQ (Frequently Asked Questions)
|
|
||||||
|
|
||||||
### Q1: `codeagent-wrapper` execution fails with "Unknown event format"
|
|
||||||
|
|
||||||
**Problem:**
|
|
||||||
```
|
|
||||||
Unknown event format: {"type":"turn.started"}
|
|
||||||
Unknown event format: {"type":"assistant", ...}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
This is a logging event format display issue and does not affect actual functionality. It will be fixed in the next version. You can ignore these log outputs.
|
|
||||||
|
|
||||||
**Related Issue:** [#96](https://github.com/cexll/myclaude/issues/96)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q2: Gemini cannot read files ignored by `.gitignore`
|
|
||||||
|
|
||||||
**Problem:**
|
|
||||||
When using `codeagent-wrapper --backend gemini`, files in directories like `.claude/` that are ignored by `.gitignore` cannot be read.
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
- **Option 1:** Remove `.claude/` from your `.gitignore` file
|
|
||||||
- **Option 2:** Ensure files that need to be read are not in `.gitignore` list
|
|
||||||
|
|
||||||
**Related Issue:** [#75](https://github.com/cexll/myclaude/issues/75)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q3: `/dev` command parallel execution is very slow
|
|
||||||
|
|
||||||
**Problem:**
|
|
||||||
Using `/dev` command for simple features takes too long (over 30 minutes) with no visibility into task progress.
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
1. **Check logs:** Review `C:\Users\User\AppData\Local\Temp\codeagent-wrapper-*.log` to identify bottlenecks
|
|
||||||
2. **Adjust backend:**
|
|
||||||
- Try faster models like `gpt-5.1-codex-max`
|
|
||||||
- Running in WSL may be significantly faster
|
|
||||||
3. **Workspace:** Use a single repository instead of monorepo with multiple sub-projects
|
|
||||||
|
|
||||||
**Related Issue:** [#77](https://github.com/cexll/myclaude/issues/77)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q4: Codex permission denied with new Go version
|
|
||||||
|
|
||||||
**Problem:**
|
|
||||||
After upgrading to the new Go-based Codex implementation, execution fails with permission denied errors.
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
Add the following configuration to `~/.codex/config.yaml` (Windows: `c:\user\.codex\config.toml`):
|
|
||||||
```yaml
|
|
||||||
model = "gpt-5.1-codex-max"
|
|
||||||
model_reasoning_effort = "high"
|
|
||||||
model_reasoning_summary = "detailed"
|
|
||||||
approval_policy = "never"
|
|
||||||
sandbox_mode = "workspace-write"
|
|
||||||
disable_response_storage = true
|
|
||||||
network_access = true
|
|
||||||
```
|
|
||||||
|
|
||||||
**Key settings:**
|
|
||||||
- `approval_policy = "never"` - Remove approval restrictions
|
|
||||||
- `sandbox_mode = "workspace-write"` - Allow workspace write access
|
|
||||||
- `network_access = true` - Enable network access
|
|
||||||
|
|
||||||
**Related Issue:** [#31](https://github.com/cexll/myclaude/issues/31)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q5: Permission denied or sandbox restrictions during execution
|
|
||||||
|
|
||||||
**Problem:**
|
|
||||||
Execution fails with permission errors or sandbox restrictions when running codeagent-wrapper.
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
Set the following environment variables:
|
|
||||||
```bash
|
|
||||||
export CODEX_BYPASS_SANDBOX=true
|
|
||||||
export CODEAGENT_SKIP_PERMISSIONS=true
|
|
||||||
```
|
|
||||||
|
|
||||||
Or add them to your shell profile (`~/.zshrc` or `~/.bashrc`):
|
|
||||||
```bash
|
|
||||||
echo 'export CODEX_BYPASS_SANDBOX=true' >> ~/.zshrc
|
|
||||||
echo 'export CODEAGENT_SKIP_PERMISSIONS=true' >> ~/.zshrc
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** These settings bypass security restrictions. Use with caution in trusted environments only.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Still having issues?** Visit [GitHub Issues](https://github.com/cexll/myclaude/issues) to search or report new issues.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
|
### Core Guides
|
||||||
- **[Codeagent-Wrapper Guide](docs/CODEAGENT-WRAPPER.md)** - Multi-backend execution wrapper
|
- **[Codeagent-Wrapper Guide](docs/CODEAGENT-WRAPPER.md)** - Multi-backend execution wrapper
|
||||||
- **[Hooks Documentation](docs/HOOKS.md)** - Custom hooks and automation
|
- **[Hooks Documentation](docs/HOOKS.md)** - Custom hooks and automation
|
||||||
|
|
||||||
|
|||||||
150
README_CN.md
150
README_CN.md
@@ -152,39 +152,15 @@ python3 install.py --force
|
|||||||
|
|
||||||
```
|
```
|
||||||
~/.claude/
|
~/.claude/
|
||||||
├── bin/
|
├── CLAUDE.md # 核心指令和角色定义
|
||||||
│ └── codeagent-wrapper # 主可执行文件
|
├── commands/ # 斜杠命令 (/dev, /code 等)
|
||||||
├── CLAUDE.md # 核心指令和角色定义
|
├── agents/ # 智能体定义
|
||||||
├── commands/ # 斜杠命令 (/dev, /code 等)
|
|
||||||
├── agents/ # 智能体定义
|
|
||||||
├── skills/
|
├── skills/
|
||||||
│ └── codex/
|
│ └── codex/
|
||||||
│ └── SKILL.md # Codex 集成技能
|
│ └── SKILL.md # Codex 集成技能
|
||||||
├── config.json # 配置文件
|
└── installed_modules.json # 安装状态
|
||||||
└── installed_modules.json # 安装状态
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 自定义安装目录
|
|
||||||
|
|
||||||
默认情况下,myclaude 安装到 `~/.claude`。您可以使用 `INSTALL_DIR` 环境变量自定义安装目录:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 安装到自定义目录
|
|
||||||
INSTALL_DIR=/opt/myclaude bash install.sh
|
|
||||||
|
|
||||||
# 相应更新您的 PATH
|
|
||||||
export PATH="/opt/myclaude/bin:$PATH"
|
|
||||||
```
|
|
||||||
|
|
||||||
**目录结构:**
|
|
||||||
- `$INSTALL_DIR/bin/` - codeagent-wrapper 可执行文件
|
|
||||||
- `$INSTALL_DIR/skills/` - 技能定义
|
|
||||||
- `$INSTALL_DIR/config.json` - 配置文件
|
|
||||||
- `$INSTALL_DIR/commands/` - 斜杠命令定义
|
|
||||||
- `$INSTALL_DIR/agents/` - 智能体定义
|
|
||||||
|
|
||||||
**注意:** 使用自定义安装目录时,请确保将 `$INSTALL_DIR/bin` 添加到您的 `PATH` 环境变量中。
|
|
||||||
|
|
||||||
### 配置
|
### 配置
|
||||||
|
|
||||||
编辑 `config.json` 自定义:
|
编辑 `config.json` 自定义:
|
||||||
@@ -282,10 +258,8 @@ $Env:PATH = "$HOME\bin;$Env:PATH"
|
|||||||
```
|
```
|
||||||
|
|
||||||
```batch
|
```batch
|
||||||
REM cmd.exe - 永久添加(当前用户)(建议使用上面的 PowerShell 方法)
|
REM cmd.exe - 永久添加(当前用户)
|
||||||
REM 警告:此命令会展开 %PATH% 包含系统 PATH,导致重复
|
setx PATH "%USERPROFILE%\bin;%PATH%"
|
||||||
REM 注意:使用 reg add 而非 setx 以避免 1024 字符截断限制
|
|
||||||
reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "%USERPROFILE%\bin;%PATH%" /f
|
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -309,14 +283,11 @@ reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "%USERPROFILE%\bin;%PATH%
|
|||||||
|
|
||||||
**Codex wrapper 未找到:**
|
**Codex wrapper 未找到:**
|
||||||
```bash
|
```bash
|
||||||
# 安装程序会自动添加 PATH,检查是否已添加
|
# 检查 PATH
|
||||||
if [[ ":$PATH:" != *":$HOME/.claude/bin:"* ]]; then
|
echo $PATH | grep -q "$HOME/bin" || echo 'export PATH="$HOME/bin:$PATH"' >> ~/.zshrc
|
||||||
echo "PATH not configured. Reinstalling..."
|
|
||||||
bash install.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 或手动添加(幂等性命令)
|
# 重新安装
|
||||||
[[ ":$PATH:" != *":$HOME/.claude/bin:"* ]] && echo 'export PATH="$HOME/.claude/bin:$PATH"' >> ~/.zshrc
|
bash install.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
**权限被拒绝:**
|
**权限被拒绝:**
|
||||||
@@ -335,105 +306,6 @@ python3 install.py --module dev --force
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 常见问题 (FAQ)
|
|
||||||
|
|
||||||
### Q1: `codeagent-wrapper` 执行时报错 "Unknown event format"
|
|
||||||
|
|
||||||
**问题描述:**
|
|
||||||
执行 `codeagent-wrapper` 时出现错误:
|
|
||||||
```
|
|
||||||
Unknown event format: {"type":"turn.started"}
|
|
||||||
Unknown event format: {"type":"assistant", ...}
|
|
||||||
```
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
这是日志事件流的显示问题,不影响实际功能执行。预计在下个版本中修复。如需排查其他问题,可忽略此日志输出。
|
|
||||||
|
|
||||||
**相关 Issue:** [#96](https://github.com/cexll/myclaude/issues/96)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q2: Gemini 无法读取 `.gitignore` 忽略的文件
|
|
||||||
|
|
||||||
**问题描述:**
|
|
||||||
使用 `codeagent-wrapper --backend gemini` 时,无法读取 `.claude/` 等被 `.gitignore` 忽略的目录中的文件。
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
- **方案一:** 在项目根目录的 `.gitignore` 中取消对 `.claude/` 的忽略
|
|
||||||
- **方案二:** 确保需要读取的文件不在 `.gitignore` 忽略列表中
|
|
||||||
|
|
||||||
**相关 Issue:** [#75](https://github.com/cexll/myclaude/issues/75)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q3: `/dev` 命令并行执行特别慢
|
|
||||||
|
|
||||||
**问题描述:**
|
|
||||||
使用 `/dev` 命令开发简单功能耗时过长(超过30分钟),无法了解任务执行状态。
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
1. **检查日志:** 查看 `C:\Users\User\AppData\Local\Temp\codeagent-wrapper-*.log` 分析瓶颈
|
|
||||||
2. **调整后端:**
|
|
||||||
- 尝试使用 `gpt-5.1-codex-max` 等更快的模型
|
|
||||||
- 在 WSL 环境下运行速度可能更快
|
|
||||||
3. **工作区选择:** 使用独立的代码仓库而非包含多个子项目的 monorepo
|
|
||||||
|
|
||||||
**相关 Issue:** [#77](https://github.com/cexll/myclaude/issues/77)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q4: 新版 Go 实现的 Codex 权限不足
|
|
||||||
|
|
||||||
**问题描述:**
|
|
||||||
升级到新版 Go 实现的 Codex 后,出现权限不足的错误。
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
在 `~/.codex/config.yaml` 中添加以下配置(Windows: `c:\user\.codex\config.toml`):
|
|
||||||
```yaml
|
|
||||||
model = "gpt-5.1-codex-max"
|
|
||||||
model_reasoning_effort = "high"
|
|
||||||
model_reasoning_summary = "detailed"
|
|
||||||
approval_policy = "never"
|
|
||||||
sandbox_mode = "workspace-write"
|
|
||||||
disable_response_storage = true
|
|
||||||
network_access = true
|
|
||||||
```
|
|
||||||
|
|
||||||
**关键配置说明:**
|
|
||||||
- `approval_policy = "never"` - 移除审批限制
|
|
||||||
- `sandbox_mode = "workspace-write"` - 允许工作区写入权限
|
|
||||||
- `network_access = true` - 启用网络访问
|
|
||||||
|
|
||||||
**相关 Issue:** [#31](https://github.com/cexll/myclaude/issues/31)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Q5: 执行时遇到权限拒绝或沙箱限制
|
|
||||||
|
|
||||||
**问题描述:**
|
|
||||||
运行 codeagent-wrapper 时出现权限错误或沙箱限制。
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
设置以下环境变量:
|
|
||||||
```bash
|
|
||||||
export CODEX_BYPASS_SANDBOX=true
|
|
||||||
export CODEAGENT_SKIP_PERMISSIONS=true
|
|
||||||
```
|
|
||||||
|
|
||||||
或添加到 shell 配置文件(`~/.zshrc` 或 `~/.bashrc`):
|
|
||||||
```bash
|
|
||||||
echo 'export CODEX_BYPASS_SANDBOX=true' >> ~/.zshrc
|
|
||||||
echo 'export CODEAGENT_SKIP_PERMISSIONS=true' >> ~/.zshrc
|
|
||||||
```
|
|
||||||
|
|
||||||
**注意:** 这些设置会绕过安全限制,请仅在可信环境中使用。
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**仍有疑问?** 请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues) 搜索或提交新问题。
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 许可证
|
## 许可证
|
||||||
|
|
||||||
AGPL-3.0 License - 查看 [LICENSE](LICENSE)
|
AGPL-3.0 License - 查看 [LICENSE](LICENSE)
|
||||||
|
|||||||
@@ -1,12 +1,5 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Backend defines the contract for invoking different AI CLI backends.
|
// Backend defines the contract for invoking different AI CLI backends.
|
||||||
// Each backend is responsible for supplying the executable command and
|
// Each backend is responsible for supplying the executable command and
|
||||||
// building the argument list based on the wrapper config.
|
// building the argument list based on the wrapper config.
|
||||||
@@ -33,96 +26,20 @@ func (ClaudeBackend) Command() string {
|
|||||||
return "claude"
|
return "claude"
|
||||||
}
|
}
|
||||||
func (ClaudeBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
func (ClaudeBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
||||||
return buildClaudeArgs(cfg, targetArg)
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxClaudeSettingsBytes = 1 << 20 // 1MB
|
|
||||||
|
|
||||||
type minimalClaudeSettings struct {
|
|
||||||
Env map[string]string
|
|
||||||
Model string
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadMinimalClaudeSettings 从 ~/.claude/settings.json 只提取安全的最小子集:
|
|
||||||
// - env: 只接受字符串类型的值
|
|
||||||
// - model: 只接受字符串类型的值
|
|
||||||
// 文件缺失/解析失败/超限都返回空。
|
|
||||||
func loadMinimalClaudeSettings() minimalClaudeSettings {
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil || home == "" {
|
|
||||||
return minimalClaudeSettings{}
|
|
||||||
}
|
|
||||||
|
|
||||||
settingPath := filepath.Join(home, ".claude", "settings.json")
|
|
||||||
info, err := os.Stat(settingPath)
|
|
||||||
if err != nil || info.Size() > maxClaudeSettingsBytes {
|
|
||||||
return minimalClaudeSettings{}
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := os.ReadFile(settingPath)
|
|
||||||
if err != nil {
|
|
||||||
return minimalClaudeSettings{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var cfg struct {
|
|
||||||
Env map[string]any `json:"env"`
|
|
||||||
Model any `json:"model"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
|
||||||
return minimalClaudeSettings{}
|
|
||||||
}
|
|
||||||
|
|
||||||
out := minimalClaudeSettings{}
|
|
||||||
|
|
||||||
if model, ok := cfg.Model.(string); ok {
|
|
||||||
out.Model = strings.TrimSpace(model)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.Env) == 0 {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
env := make(map[string]string, len(cfg.Env))
|
|
||||||
for k, v := range cfg.Env {
|
|
||||||
s, ok := v.(string)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
env[k] = s
|
|
||||||
}
|
|
||||||
if len(env) == 0 {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
out.Env = env
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadMinimalEnvSettings is kept for backwards tests; prefer loadMinimalClaudeSettings.
|
|
||||||
func loadMinimalEnvSettings() map[string]string {
|
|
||||||
settings := loadMinimalClaudeSettings()
|
|
||||||
if len(settings.Env) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return settings.Env
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildClaudeArgs(cfg *Config, targetArg string) []string {
|
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
args := []string{"-p"}
|
args := []string{"-p", "--dangerously-skip-permissions"}
|
||||||
if cfg.SkipPermissions {
|
|
||||||
args = append(args, "--dangerously-skip-permissions")
|
// Only skip permissions when explicitly requested
|
||||||
}
|
// if cfg.SkipPermissions {
|
||||||
|
// args = append(args, "--dangerously-skip-permissions")
|
||||||
|
// }
|
||||||
|
|
||||||
// Prevent infinite recursion: disable all setting sources (user, project, local)
|
// Prevent infinite recursion: disable all setting sources (user, project, local)
|
||||||
// This ensures a clean execution environment without CLAUDE.md or skills that would trigger codeagent
|
// This ensures a clean execution environment without CLAUDE.md or skills that would trigger codeagent
|
||||||
args = append(args, "--setting-sources", "")
|
args = append(args, "--setting-sources", "")
|
||||||
|
|
||||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
|
||||||
args = append(args, "--model", model)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Mode == "resume" {
|
if cfg.Mode == "resume" {
|
||||||
if cfg.SessionID != "" {
|
if cfg.SessionID != "" {
|
||||||
// Claude CLI uses -r <session_id> for resume.
|
// Claude CLI uses -r <session_id> for resume.
|
||||||
@@ -143,19 +60,11 @@ func (GeminiBackend) Command() string {
|
|||||||
return "gemini"
|
return "gemini"
|
||||||
}
|
}
|
||||||
func (GeminiBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
func (GeminiBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
||||||
return buildGeminiArgs(cfg, targetArg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildGeminiArgs(cfg *Config, targetArg string) []string {
|
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
args := []string{"-o", "stream-json", "-y"}
|
args := []string{"-o", "stream-json", "-y"}
|
||||||
|
|
||||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
|
||||||
args = append(args, "-m", model)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Mode == "resume" {
|
if cfg.Mode == "resume" {
|
||||||
if cfg.SessionID != "" {
|
if cfg.SessionID != "" {
|
||||||
args = append(args, "-r", cfg.SessionID)
|
args = append(args, "-r", cfg.SessionID)
|
||||||
|
|||||||
@@ -1,9 +1,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@@ -11,16 +8,16 @@ import (
|
|||||||
func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||||
backend := ClaudeBackend{}
|
backend := ClaudeBackend{}
|
||||||
|
|
||||||
t.Run("new mode omits skip-permissions by default", func(t *testing.T) {
|
t.Run("new mode uses workdir without skip by default", func(t *testing.T) {
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/repo"}
|
cfg := &Config{Mode: "new", WorkDir: "/repo"}
|
||||||
got := backend.BuildArgs(cfg, "todo")
|
got := backend.BuildArgs(cfg, "todo")
|
||||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
|
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
|
||||||
if !reflect.DeepEqual(got, want) {
|
if !reflect.DeepEqual(got, want) {
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
t.Fatalf("got %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("new mode can opt-in skip-permissions", func(t *testing.T) {
|
t.Run("new mode opt-in skip permissions with default workdir", func(t *testing.T) {
|
||||||
cfg := &Config{Mode: "new", SkipPermissions: true}
|
cfg := &Config{Mode: "new", SkipPermissions: true}
|
||||||
got := backend.BuildArgs(cfg, "-")
|
got := backend.BuildArgs(cfg, "-")
|
||||||
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "-"}
|
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "-"}
|
||||||
@@ -29,10 +26,10 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("resume mode includes session id", func(t *testing.T) {
|
t.Run("resume mode uses session id and omits workdir", func(t *testing.T) {
|
||||||
cfg := &Config{Mode: "resume", SessionID: "sid-123", WorkDir: "/ignored"}
|
cfg := &Config{Mode: "resume", SessionID: "sid-123", WorkDir: "/ignored"}
|
||||||
got := backend.BuildArgs(cfg, "resume-task")
|
got := backend.BuildArgs(cfg, "resume-task")
|
||||||
want := []string{"-p", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
||||||
if !reflect.DeepEqual(got, want) {
|
if !reflect.DeepEqual(got, want) {
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
t.Fatalf("got %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
@@ -41,16 +38,7 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
|||||||
t.Run("resume mode without session still returns base flags", func(t *testing.T) {
|
t.Run("resume mode without session still returns base flags", func(t *testing.T) {
|
||||||
cfg := &Config{Mode: "resume", WorkDir: "/ignored"}
|
cfg := &Config{Mode: "resume", WorkDir: "/ignored"}
|
||||||
got := backend.BuildArgs(cfg, "follow-up")
|
got := backend.BuildArgs(cfg, "follow-up")
|
||||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "follow-up"}
|
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "follow-up"}
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("resume mode can opt-in skip permissions", func(t *testing.T) {
|
|
||||||
cfg := &Config{Mode: "resume", SessionID: "sid-123", SkipPermissions: true}
|
|
||||||
got := backend.BuildArgs(cfg, "resume-task")
|
|
||||||
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
if !reflect.DeepEqual(got, want) {
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
t.Fatalf("got %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
@@ -63,42 +51,6 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendBuildArgs_Model(t *testing.T) {
|
|
||||||
t.Run("claude includes --model when set", func(t *testing.T) {
|
|
||||||
backend := ClaudeBackend{}
|
|
||||||
cfg := &Config{Mode: "new", Model: "opus"}
|
|
||||||
got := backend.BuildArgs(cfg, "todo")
|
|
||||||
want := []string{"-p", "--setting-sources", "", "--model", "opus", "--output-format", "stream-json", "--verbose", "todo"}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("gemini includes -m when set", func(t *testing.T) {
|
|
||||||
backend := GeminiBackend{}
|
|
||||||
cfg := &Config{Mode: "new", Model: "gemini-3-pro-preview"}
|
|
||||||
got := backend.BuildArgs(cfg, "task")
|
|
||||||
want := []string{"-o", "stream-json", "-y", "-m", "gemini-3-pro-preview", "-p", "task"}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("codex includes --model when set", func(t *testing.T) {
|
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
backend := CodexBackend{}
|
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/tmp", Model: "o3"}
|
|
||||||
got := backend.BuildArgs(cfg, "task")
|
|
||||||
want := []string{"e", "--model", "o3", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||||
t.Run("gemini new mode defaults workdir", func(t *testing.T) {
|
t.Run("gemini new mode defaults workdir", func(t *testing.T) {
|
||||||
backend := GeminiBackend{}
|
backend := GeminiBackend{}
|
||||||
@@ -137,11 +89,7 @@ func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("codex build args omits bypass flag by default", func(t *testing.T) {
|
t.Run("codex build args passthrough remains intact", func(t *testing.T) {
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
backend := CodexBackend{}
|
backend := CodexBackend{}
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/tmp"}
|
cfg := &Config{Mode: "new", WorkDir: "/tmp"}
|
||||||
got := backend.BuildArgs(cfg, "task")
|
got := backend.BuildArgs(cfg, "task")
|
||||||
@@ -150,20 +98,6 @@ func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
|||||||
t.Fatalf("got %v, want %v", got, want)
|
t.Fatalf("got %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("codex build args includes bypass flag when enabled", func(t *testing.T) {
|
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Setenv(key, "true")
|
|
||||||
|
|
||||||
backend := CodexBackend{}
|
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/tmp"}
|
|
||||||
got := backend.BuildArgs(cfg, "task")
|
|
||||||
want := []string{"e", "--dangerously-bypass-approvals-and-sandbox", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClaudeBuildArgs_BackendMetadata(t *testing.T) {
|
func TestClaudeBuildArgs_BackendMetadata(t *testing.T) {
|
||||||
@@ -186,64 +120,3 @@ func TestClaudeBuildArgs_BackendMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadMinimalEnvSettings(t *testing.T) {
|
|
||||||
home := t.TempDir()
|
|
||||||
t.Setenv("HOME", home)
|
|
||||||
t.Setenv("USERPROFILE", home)
|
|
||||||
|
|
||||||
t.Run("missing file returns empty", func(t *testing.T) {
|
|
||||||
if got := loadMinimalEnvSettings(); len(got) != 0 {
|
|
||||||
t.Fatalf("got %v, want empty", got)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("valid env returns string map", func(t *testing.T) {
|
|
||||||
dir := filepath.Join(home, ".claude")
|
|
||||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
|
||||||
t.Fatalf("MkdirAll: %v", err)
|
|
||||||
}
|
|
||||||
path := filepath.Join(dir, "settings.json")
|
|
||||||
data := []byte(`{"env":{"ANTHROPIC_API_KEY":"secret","FOO":"bar"}}`)
|
|
||||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
|
||||||
t.Fatalf("WriteFile: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got := loadMinimalEnvSettings()
|
|
||||||
if got["ANTHROPIC_API_KEY"] != "secret" || got["FOO"] != "bar" {
|
|
||||||
t.Fatalf("got %v, want keys present", got)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("non-string values are ignored", func(t *testing.T) {
|
|
||||||
dir := filepath.Join(home, ".claude")
|
|
||||||
path := filepath.Join(dir, "settings.json")
|
|
||||||
data := []byte(`{"env":{"GOOD":"ok","BAD":123,"ALSO_BAD":true}}`)
|
|
||||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
|
||||||
t.Fatalf("WriteFile: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got := loadMinimalEnvSettings()
|
|
||||||
if got["GOOD"] != "ok" {
|
|
||||||
t.Fatalf("got %v, want GOOD=ok", got)
|
|
||||||
}
|
|
||||||
if _, ok := got["BAD"]; ok {
|
|
||||||
t.Fatalf("got %v, want BAD omitted", got)
|
|
||||||
}
|
|
||||||
if _, ok := got["ALSO_BAD"]; ok {
|
|
||||||
t.Fatalf("got %v, want ALSO_BAD omitted", got)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("oversized file returns empty", func(t *testing.T) {
|
|
||||||
dir := filepath.Join(home, ".claude")
|
|
||||||
path := filepath.Join(dir, "settings.json")
|
|
||||||
data := bytes.Repeat([]byte("a"), maxClaudeSettingsBytes+1)
|
|
||||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
|
||||||
t.Fatalf("WriteFile: %v", err)
|
|
||||||
}
|
|
||||||
if got := loadMinimalEnvSettings(); len(got) != 0 {
|
|
||||||
t.Fatalf("got %v, want empty", got)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -13,16 +13,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func stripTimestampPrefix(line string) string {
|
|
||||||
if !strings.HasPrefix(line, "[") {
|
|
||||||
return line
|
|
||||||
}
|
|
||||||
if idx := strings.Index(line, "] "); idx >= 0 {
|
|
||||||
return line[idx+2:]
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestConcurrentStressLogger 高并发压力测试
|
// TestConcurrentStressLogger 高并发压力测试
|
||||||
func TestConcurrentStressLogger(t *testing.T) {
|
func TestConcurrentStressLogger(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@@ -89,8 +79,7 @@ func TestConcurrentStressLogger(t *testing.T) {
|
|||||||
// 验证日志格式(纯文本,无前缀)
|
// 验证日志格式(纯文本,无前缀)
|
||||||
formatRE := regexp.MustCompile(`^goroutine-\d+-msg-\d+$`)
|
formatRE := regexp.MustCompile(`^goroutine-\d+-msg-\d+$`)
|
||||||
for i, line := range lines[:min(10, len(lines))] {
|
for i, line := range lines[:min(10, len(lines))] {
|
||||||
msg := stripTimestampPrefix(line)
|
if !formatRE.MatchString(line) {
|
||||||
if !formatRE.MatchString(msg) {
|
|
||||||
t.Errorf("line %d has invalid format: %s", i, line)
|
t.Errorf("line %d has invalid format: %s", i, line)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -302,7 +291,7 @@ func TestLoggerOrderPreservation(t *testing.T) {
|
|||||||
sequences := make(map[int][]int) // goroutine ID -> sequence numbers
|
sequences := make(map[int][]int) // goroutine ID -> sequence numbers
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := stripTimestampPrefix(scanner.Text())
|
line := scanner.Text()
|
||||||
var gid, seq int
|
var gid, seq int
|
||||||
// Parse format: G0-SEQ0001 (without INFO: prefix)
|
// Parse format: G0-SEQ0001 (without INFO: prefix)
|
||||||
_, err := fmt.Sscanf(line, "G%d-SEQ%04d", &gid, &seq)
|
_, err := fmt.Sscanf(line, "G%d-SEQ%04d", &gid, &seq)
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ type Config struct {
|
|||||||
Task string
|
Task string
|
||||||
SessionID string
|
SessionID string
|
||||||
WorkDir string
|
WorkDir string
|
||||||
Model string
|
|
||||||
ExplicitStdin bool
|
ExplicitStdin bool
|
||||||
Timeout int
|
Timeout int
|
||||||
Backend string
|
Backend string
|
||||||
@@ -37,7 +36,6 @@ type TaskSpec struct {
|
|||||||
Dependencies []string `json:"dependencies,omitempty"`
|
Dependencies []string `json:"dependencies,omitempty"`
|
||||||
SessionID string `json:"session_id,omitempty"`
|
SessionID string `json:"session_id,omitempty"`
|
||||||
Backend string `json:"backend,omitempty"`
|
Backend string `json:"backend,omitempty"`
|
||||||
Model string `json:"model,omitempty"`
|
|
||||||
Mode string `json:"-"`
|
Mode string `json:"-"`
|
||||||
UseStdin bool `json:"-"`
|
UseStdin bool `json:"-"`
|
||||||
Context context.Context `json:"-"`
|
Context context.Context `json:"-"`
|
||||||
@@ -51,15 +49,7 @@ type TaskResult struct {
|
|||||||
SessionID string `json:"session_id"`
|
SessionID string `json:"session_id"`
|
||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
LogPath string `json:"log_path"`
|
LogPath string `json:"log_path"`
|
||||||
// Structured report fields
|
sharedLog bool
|
||||||
Coverage string `json:"coverage,omitempty"` // extracted coverage percentage (e.g., "92%")
|
|
||||||
CoverageNum float64 `json:"coverage_num,omitempty"` // numeric coverage for comparison
|
|
||||||
CoverageTarget float64 `json:"coverage_target,omitempty"` // target coverage (default 90)
|
|
||||||
FilesChanged []string `json:"files_changed,omitempty"` // list of changed files
|
|
||||||
KeyOutput string `json:"key_output,omitempty"` // brief summary of what was done
|
|
||||||
TestsPassed int `json:"tests_passed,omitempty"` // number of tests passed
|
|
||||||
TestsFailed int `json:"tests_failed,omitempty"` // number of tests failed
|
|
||||||
sharedLog bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var backendRegistry = map[string]Backend{
|
var backendRegistry = map[string]Backend{
|
||||||
@@ -154,8 +144,6 @@ func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
|||||||
task.Mode = "resume"
|
task.Mode = "resume"
|
||||||
case "backend":
|
case "backend":
|
||||||
task.Backend = value
|
task.Backend = value
|
||||||
case "model":
|
|
||||||
task.Model = value
|
|
||||||
case "dependencies":
|
case "dependencies":
|
||||||
for _, dep := range strings.Split(value, ",") {
|
for _, dep := range strings.Split(value, ",") {
|
||||||
dep = strings.TrimSpace(dep)
|
dep = strings.TrimSpace(dep)
|
||||||
@@ -176,9 +164,6 @@ func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
|||||||
if content == "" {
|
if content == "" {
|
||||||
return nil, fmt.Errorf("task block #%d (%q) missing content", taskIndex, task.ID)
|
return nil, fmt.Errorf("task block #%d (%q) missing content", taskIndex, task.ID)
|
||||||
}
|
}
|
||||||
if task.Mode == "resume" && strings.TrimSpace(task.SessionID) == "" {
|
|
||||||
return nil, fmt.Errorf("task block #%d (%q) has empty session_id", taskIndex, task.ID)
|
|
||||||
}
|
|
||||||
if _, exists := seen[task.ID]; exists {
|
if _, exists := seen[task.ID]; exists {
|
||||||
return nil, fmt.Errorf("task block #%d has duplicate id: %s", taskIndex, task.ID)
|
return nil, fmt.Errorf("task block #%d has duplicate id: %s", taskIndex, task.ID)
|
||||||
}
|
}
|
||||||
@@ -202,7 +187,6 @@ func parseArgs() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
backendName := defaultBackendName
|
backendName := defaultBackendName
|
||||||
model := ""
|
|
||||||
skipPermissions := envFlagEnabled("CODEAGENT_SKIP_PERMISSIONS")
|
skipPermissions := envFlagEnabled("CODEAGENT_SKIP_PERMISSIONS")
|
||||||
filtered := make([]string, 0, len(args))
|
filtered := make([]string, 0, len(args))
|
||||||
for i := 0; i < len(args); i++ {
|
for i := 0; i < len(args); i++ {
|
||||||
@@ -225,20 +209,6 @@ func parseArgs() (*Config, error) {
|
|||||||
case arg == "--skip-permissions", arg == "--dangerously-skip-permissions":
|
case arg == "--skip-permissions", arg == "--dangerously-skip-permissions":
|
||||||
skipPermissions = true
|
skipPermissions = true
|
||||||
continue
|
continue
|
||||||
case arg == "--model":
|
|
||||||
if i+1 >= len(args) {
|
|
||||||
return nil, fmt.Errorf("--model flag requires a value")
|
|
||||||
}
|
|
||||||
model = args[i+1]
|
|
||||||
i++
|
|
||||||
continue
|
|
||||||
case strings.HasPrefix(arg, "--model="):
|
|
||||||
value := strings.TrimPrefix(arg, "--model=")
|
|
||||||
if value == "" {
|
|
||||||
return nil, fmt.Errorf("--model flag requires a value")
|
|
||||||
}
|
|
||||||
model = value
|
|
||||||
continue
|
|
||||||
case strings.HasPrefix(arg, "--skip-permissions="):
|
case strings.HasPrefix(arg, "--skip-permissions="):
|
||||||
skipPermissions = parseBoolFlag(strings.TrimPrefix(arg, "--skip-permissions="), skipPermissions)
|
skipPermissions = parseBoolFlag(strings.TrimPrefix(arg, "--skip-permissions="), skipPermissions)
|
||||||
continue
|
continue
|
||||||
@@ -254,7 +224,7 @@ func parseArgs() (*Config, error) {
|
|||||||
}
|
}
|
||||||
args = filtered
|
args = filtered
|
||||||
|
|
||||||
cfg := &Config{WorkDir: defaultWorkdir, Backend: backendName, SkipPermissions: skipPermissions, Model: strings.TrimSpace(model)}
|
cfg := &Config{WorkDir: defaultWorkdir, Backend: backendName, SkipPermissions: skipPermissions}
|
||||||
cfg.MaxParallelWorkers = resolveMaxParallelWorkers()
|
cfg.MaxParallelWorkers = resolveMaxParallelWorkers()
|
||||||
|
|
||||||
if args[0] == "resume" {
|
if args[0] == "resume" {
|
||||||
@@ -262,10 +232,7 @@ func parseArgs() (*Config, error) {
|
|||||||
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
|
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
|
||||||
}
|
}
|
||||||
cfg.Mode = "resume"
|
cfg.Mode = "resume"
|
||||||
cfg.SessionID = strings.TrimSpace(args[1])
|
cfg.SessionID = args[1]
|
||||||
if cfg.SessionID == "" {
|
|
||||||
return nil, fmt.Errorf("resume mode requires non-empty session_id")
|
|
||||||
}
|
|
||||||
cfg.Task = args[2]
|
cfg.Task = args[2]
|
||||||
cfg.ExplicitStdin = (args[2] == "-")
|
cfg.ExplicitStdin = (args[2] == "-")
|
||||||
if len(args) > 3 {
|
if len(args) > 3 {
|
||||||
|
|||||||
@@ -16,18 +16,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const postMessageTerminateDelay = 1 * time.Second
|
|
||||||
|
|
||||||
// commandRunner abstracts exec.Cmd for testability
|
// commandRunner abstracts exec.Cmd for testability
|
||||||
type commandRunner interface {
|
type commandRunner interface {
|
||||||
Start() error
|
Start() error
|
||||||
Wait() error
|
Wait() error
|
||||||
StdoutPipe() (io.ReadCloser, error)
|
StdoutPipe() (io.ReadCloser, error)
|
||||||
StderrPipe() (io.ReadCloser, error)
|
|
||||||
StdinPipe() (io.WriteCloser, error)
|
StdinPipe() (io.WriteCloser, error)
|
||||||
SetStderr(io.Writer)
|
SetStderr(io.Writer)
|
||||||
SetDir(string)
|
SetDir(string)
|
||||||
SetEnv(env map[string]string)
|
|
||||||
Process() processHandle
|
Process() processHandle
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,13 +60,6 @@ func (r *realCmd) StdoutPipe() (io.ReadCloser, error) {
|
|||||||
return r.cmd.StdoutPipe()
|
return r.cmd.StdoutPipe()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realCmd) StderrPipe() (io.ReadCloser, error) {
|
|
||||||
if r.cmd == nil {
|
|
||||||
return nil, errors.New("command is nil")
|
|
||||||
}
|
|
||||||
return r.cmd.StderrPipe()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *realCmd) StdinPipe() (io.WriteCloser, error) {
|
func (r *realCmd) StdinPipe() (io.WriteCloser, error) {
|
||||||
if r.cmd == nil {
|
if r.cmd == nil {
|
||||||
return nil, errors.New("command is nil")
|
return nil, errors.New("command is nil")
|
||||||
@@ -90,52 +79,6 @@ func (r *realCmd) SetDir(dir string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realCmd) SetEnv(env map[string]string) {
|
|
||||||
if r == nil || r.cmd == nil || len(env) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
merged := make(map[string]string, len(env)+len(os.Environ()))
|
|
||||||
for _, kv := range os.Environ() {
|
|
||||||
if kv == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
idx := strings.IndexByte(kv, '=')
|
|
||||||
if idx <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
merged[kv[:idx]] = kv[idx+1:]
|
|
||||||
}
|
|
||||||
for _, kv := range r.cmd.Env {
|
|
||||||
if kv == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
idx := strings.IndexByte(kv, '=')
|
|
||||||
if idx <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
merged[kv[:idx]] = kv[idx+1:]
|
|
||||||
}
|
|
||||||
for k, v := range env {
|
|
||||||
if strings.TrimSpace(k) == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
merged[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := make([]string, 0, len(merged))
|
|
||||||
for k := range merged {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
out := make([]string, 0, len(keys))
|
|
||||||
for _, k := range keys {
|
|
||||||
out = append(out, k+"="+merged[k])
|
|
||||||
}
|
|
||||||
r.cmd.Env = out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *realCmd) Process() processHandle {
|
func (r *realCmd) Process() processHandle {
|
||||||
if r == nil || r.cmd == nil || r.cmd.Process == nil {
|
if r == nil || r.cmd == nil || r.cmd.Process == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -519,259 +462,68 @@ func shouldSkipTask(task TaskSpec, failed map[string]TaskResult) (bool, string)
|
|||||||
return true, fmt.Sprintf("skipped due to failed dependencies: %s", strings.Join(blocked, ","))
|
return true, fmt.Sprintf("skipped due to failed dependencies: %s", strings.Join(blocked, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
// getStatusSymbols returns status symbols based on ASCII mode.
|
|
||||||
func getStatusSymbols() (success, warning, failed string) {
|
|
||||||
if os.Getenv("CODEAGENT_ASCII_MODE") == "true" {
|
|
||||||
return "PASS", "WARN", "FAIL"
|
|
||||||
}
|
|
||||||
return "✓", "⚠️", "✗"
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateFinalOutput(results []TaskResult) string {
|
func generateFinalOutput(results []TaskResult) string {
|
||||||
return generateFinalOutputWithMode(results, true) // default to summary mode
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateFinalOutputWithMode generates output based on mode
|
|
||||||
// summaryOnly=true: structured report - every token has value
|
|
||||||
// summaryOnly=false: full output with complete messages (legacy behavior)
|
|
||||||
func generateFinalOutputWithMode(results []TaskResult, summaryOnly bool) string {
|
|
||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
successSymbol, warningSymbol, failedSymbol := getStatusSymbols()
|
|
||||||
|
|
||||||
reportCoverageTarget := defaultCoverageTarget
|
|
||||||
for _, res := range results {
|
|
||||||
if res.CoverageTarget > 0 {
|
|
||||||
reportCoverageTarget = res.CoverageTarget
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count results by status
|
|
||||||
success := 0
|
success := 0
|
||||||
failed := 0
|
failed := 0
|
||||||
belowTarget := 0
|
|
||||||
for _, res := range results {
|
for _, res := range results {
|
||||||
if res.ExitCode == 0 && res.Error == "" {
|
if res.ExitCode == 0 && res.Error == "" {
|
||||||
success++
|
success++
|
||||||
target := res.CoverageTarget
|
|
||||||
if target <= 0 {
|
|
||||||
target = reportCoverageTarget
|
|
||||||
}
|
|
||||||
if res.Coverage != "" && target > 0 && res.CoverageNum < target {
|
|
||||||
belowTarget++
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
failed++
|
failed++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if summaryOnly {
|
sb.WriteString(fmt.Sprintf("=== Parallel Execution Summary ===\n"))
|
||||||
// Header
|
sb.WriteString(fmt.Sprintf("Total: %d | Success: %d | Failed: %d\n\n", len(results), success, failed))
|
||||||
sb.WriteString("=== Execution Report ===\n")
|
|
||||||
sb.WriteString(fmt.Sprintf("%d tasks | %d passed | %d failed", len(results), success, failed))
|
for _, res := range results {
|
||||||
if belowTarget > 0 {
|
sb.WriteString(fmt.Sprintf("--- Task: %s ---\n", res.TaskID))
|
||||||
sb.WriteString(fmt.Sprintf(" | %d below %.0f%%", belowTarget, reportCoverageTarget))
|
if res.Error != "" {
|
||||||
|
sb.WriteString(fmt.Sprintf("Status: FAILED (exit code %d)\nError: %s\n", res.ExitCode, res.Error))
|
||||||
|
} else if res.ExitCode != 0 {
|
||||||
|
sb.WriteString(fmt.Sprintf("Status: FAILED (exit code %d)\n", res.ExitCode))
|
||||||
|
} else {
|
||||||
|
sb.WriteString("Status: SUCCESS\n")
|
||||||
}
|
}
|
||||||
sb.WriteString("\n\n")
|
if res.SessionID != "" {
|
||||||
|
sb.WriteString(fmt.Sprintf("Session: %s\n", res.SessionID))
|
||||||
// Task Results - each task gets: Did + Files + Tests + Coverage
|
}
|
||||||
sb.WriteString("## Task Results\n")
|
if res.LogPath != "" {
|
||||||
|
if res.sharedLog {
|
||||||
for _, res := range results {
|
sb.WriteString(fmt.Sprintf("Log: %s (shared)\n", res.LogPath))
|
||||||
taskID := sanitizeOutput(res.TaskID)
|
|
||||||
coverage := sanitizeOutput(res.Coverage)
|
|
||||||
keyOutput := sanitizeOutput(res.KeyOutput)
|
|
||||||
logPath := sanitizeOutput(res.LogPath)
|
|
||||||
filesChanged := sanitizeOutput(strings.Join(res.FilesChanged, ", "))
|
|
||||||
|
|
||||||
target := res.CoverageTarget
|
|
||||||
if target <= 0 {
|
|
||||||
target = reportCoverageTarget
|
|
||||||
}
|
|
||||||
|
|
||||||
isSuccess := res.ExitCode == 0 && res.Error == ""
|
|
||||||
isBelowTarget := isSuccess && coverage != "" && target > 0 && res.CoverageNum < target
|
|
||||||
|
|
||||||
if isSuccess && !isBelowTarget {
|
|
||||||
// Passed task: one block with Did/Files/Tests
|
|
||||||
sb.WriteString(fmt.Sprintf("\n### %s %s", taskID, successSymbol))
|
|
||||||
if coverage != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf(" %s", coverage))
|
|
||||||
}
|
|
||||||
sb.WriteString("\n")
|
|
||||||
|
|
||||||
if keyOutput != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Did: %s\n", keyOutput))
|
|
||||||
}
|
|
||||||
if len(res.FilesChanged) > 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("Files: %s\n", filesChanged))
|
|
||||||
}
|
|
||||||
if res.TestsPassed > 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("Tests: %d passed\n", res.TestsPassed))
|
|
||||||
}
|
|
||||||
if logPath != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Log: %s\n", logPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if isSuccess && isBelowTarget {
|
|
||||||
// Below target: add Gap info
|
|
||||||
sb.WriteString(fmt.Sprintf("\n### %s %s %s (below %.0f%%)\n", taskID, warningSymbol, coverage, target))
|
|
||||||
|
|
||||||
if keyOutput != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Did: %s\n", keyOutput))
|
|
||||||
}
|
|
||||||
if len(res.FilesChanged) > 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("Files: %s\n", filesChanged))
|
|
||||||
}
|
|
||||||
if res.TestsPassed > 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("Tests: %d passed\n", res.TestsPassed))
|
|
||||||
}
|
|
||||||
// Extract what's missing from coverage
|
|
||||||
gap := sanitizeOutput(extractCoverageGap(res.Message))
|
|
||||||
if gap != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Gap: %s\n", gap))
|
|
||||||
}
|
|
||||||
if logPath != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Log: %s\n", logPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// Failed task: show error detail
|
sb.WriteString(fmt.Sprintf("Log: %s\n", res.LogPath))
|
||||||
sb.WriteString(fmt.Sprintf("\n### %s %s FAILED\n", taskID, failedSymbol))
|
|
||||||
sb.WriteString(fmt.Sprintf("Exit code: %d\n", res.ExitCode))
|
|
||||||
if errText := sanitizeOutput(res.Error); errText != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Error: %s\n", errText))
|
|
||||||
}
|
|
||||||
// Show context from output (last meaningful lines)
|
|
||||||
detail := sanitizeOutput(extractErrorDetail(res.Message, 300))
|
|
||||||
if detail != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Detail: %s\n", detail))
|
|
||||||
}
|
|
||||||
if logPath != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Log: %s\n", logPath))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if res.Message != "" {
|
||||||
// Summary section
|
sb.WriteString(fmt.Sprintf("\n%s\n", res.Message))
|
||||||
sb.WriteString("\n## Summary\n")
|
|
||||||
sb.WriteString(fmt.Sprintf("- %d/%d completed successfully\n", success, len(results)))
|
|
||||||
|
|
||||||
if belowTarget > 0 || failed > 0 {
|
|
||||||
var needFix []string
|
|
||||||
var needCoverage []string
|
|
||||||
for _, res := range results {
|
|
||||||
if res.ExitCode != 0 || res.Error != "" {
|
|
||||||
taskID := sanitizeOutput(res.TaskID)
|
|
||||||
reason := sanitizeOutput(res.Error)
|
|
||||||
if reason == "" && res.ExitCode != 0 {
|
|
||||||
reason = fmt.Sprintf("exit code %d", res.ExitCode)
|
|
||||||
}
|
|
||||||
reason = safeTruncate(reason, 50)
|
|
||||||
needFix = append(needFix, fmt.Sprintf("%s (%s)", taskID, reason))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
target := res.CoverageTarget
|
|
||||||
if target <= 0 {
|
|
||||||
target = reportCoverageTarget
|
|
||||||
}
|
|
||||||
if res.Coverage != "" && target > 0 && res.CoverageNum < target {
|
|
||||||
needCoverage = append(needCoverage, sanitizeOutput(res.TaskID))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(needFix) > 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("- Fix: %s\n", strings.Join(needFix, ", ")))
|
|
||||||
}
|
|
||||||
if len(needCoverage) > 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("- Coverage: %s\n", strings.Join(needCoverage, ", ")))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// Legacy full output mode
|
|
||||||
sb.WriteString("=== Parallel Execution Summary ===\n")
|
|
||||||
sb.WriteString(fmt.Sprintf("Total: %d | Success: %d | Failed: %d\n\n", len(results), success, failed))
|
|
||||||
|
|
||||||
for _, res := range results {
|
|
||||||
taskID := sanitizeOutput(res.TaskID)
|
|
||||||
sb.WriteString(fmt.Sprintf("--- Task: %s ---\n", taskID))
|
|
||||||
if res.Error != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Status: FAILED (exit code %d)\nError: %s\n", res.ExitCode, sanitizeOutput(res.Error)))
|
|
||||||
} else if res.ExitCode != 0 {
|
|
||||||
sb.WriteString(fmt.Sprintf("Status: FAILED (exit code %d)\n", res.ExitCode))
|
|
||||||
} else {
|
|
||||||
sb.WriteString("Status: SUCCESS\n")
|
|
||||||
}
|
|
||||||
if res.Coverage != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Coverage: %s\n", sanitizeOutput(res.Coverage)))
|
|
||||||
}
|
|
||||||
if res.SessionID != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("Session: %s\n", sanitizeOutput(res.SessionID)))
|
|
||||||
}
|
|
||||||
if res.LogPath != "" {
|
|
||||||
logPath := sanitizeOutput(res.LogPath)
|
|
||||||
if res.sharedLog {
|
|
||||||
sb.WriteString(fmt.Sprintf("Log: %s (shared)\n", logPath))
|
|
||||||
} else {
|
|
||||||
sb.WriteString(fmt.Sprintf("Log: %s\n", logPath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if res.Message != "" {
|
|
||||||
message := sanitizeOutput(res.Message)
|
|
||||||
if message != "" {
|
|
||||||
sb.WriteString(fmt.Sprintf("\n%s\n", message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sb.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
|
sb.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildCodexArgs(cfg *Config, targetArg string) []string {
|
func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||||
if cfg == nil {
|
if cfg.Mode == "resume" {
|
||||||
panic("buildCodexArgs: nil config")
|
return []string{
|
||||||
}
|
"e",
|
||||||
|
"--skip-git-repo-check",
|
||||||
var resumeSessionID string
|
|
||||||
isResume := cfg.Mode == "resume"
|
|
||||||
if isResume {
|
|
||||||
resumeSessionID = strings.TrimSpace(cfg.SessionID)
|
|
||||||
if resumeSessionID == "" {
|
|
||||||
logError("invalid config: resume mode requires non-empty session_id")
|
|
||||||
isResume = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
args := []string{"e"}
|
|
||||||
|
|
||||||
if envFlagEnabled("CODEX_BYPASS_SANDBOX") {
|
|
||||||
logWarn("CODEX_BYPASS_SANDBOX=true: running without approval/sandbox protection")
|
|
||||||
args = append(args, "--dangerously-bypass-approvals-and-sandbox")
|
|
||||||
}
|
|
||||||
|
|
||||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
|
||||||
args = append(args, "--model", model)
|
|
||||||
}
|
|
||||||
|
|
||||||
args = append(args, "--skip-git-repo-check")
|
|
||||||
|
|
||||||
if isResume {
|
|
||||||
return append(args,
|
|
||||||
"--json",
|
"--json",
|
||||||
"resume",
|
"resume",
|
||||||
resumeSessionID,
|
cfg.SessionID,
|
||||||
targetArg,
|
targetArg,
|
||||||
)
|
}
|
||||||
}
|
}
|
||||||
|
return []string{
|
||||||
return append(args,
|
"e",
|
||||||
|
"--skip-git-repo-check",
|
||||||
"-C", cfg.WorkDir,
|
"-C", cfg.WorkDir,
|
||||||
"--json",
|
"--json",
|
||||||
targetArg,
|
targetArg,
|
||||||
)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||||
@@ -800,7 +552,6 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
Task: taskSpec.Task,
|
Task: taskSpec.Task,
|
||||||
SessionID: taskSpec.SessionID,
|
SessionID: taskSpec.SessionID,
|
||||||
WorkDir: taskSpec.WorkDir,
|
WorkDir: taskSpec.WorkDir,
|
||||||
Model: taskSpec.Model,
|
|
||||||
Backend: defaultBackendName,
|
Backend: defaultBackendName,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -823,21 +574,6 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
cfg.WorkDir = defaultWorkdir
|
cfg.WorkDir = defaultWorkdir
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Mode == "resume" && strings.TrimSpace(cfg.SessionID) == "" {
|
|
||||||
result.ExitCode = 1
|
|
||||||
result.Error = "resume mode requires non-empty session_id"
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
var claudeEnv map[string]string
|
|
||||||
if cfg.Backend == "claude" {
|
|
||||||
settings := loadMinimalClaudeSettings()
|
|
||||||
claudeEnv = settings.Env
|
|
||||||
if cfg.Mode != "resume" && strings.TrimSpace(cfg.Model) == "" && settings.Model != "" {
|
|
||||||
cfg.Model = settings.Model
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
useStdin := taskSpec.UseStdin
|
useStdin := taskSpec.UseStdin
|
||||||
targetArg := taskSpec.Task
|
targetArg := taskSpec.Task
|
||||||
if useStdin {
|
if useStdin {
|
||||||
@@ -937,10 +673,6 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
|
|
||||||
cmd := newCommandRunner(ctx, commandName, codexArgs...)
|
cmd := newCommandRunner(ctx, commandName, codexArgs...)
|
||||||
|
|
||||||
if cfg.Backend == "claude" && len(claudeEnv) > 0 {
|
|
||||||
cmd.SetEnv(claudeEnv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For backends that don't support -C flag (claude, gemini), set working directory via cmd.Dir
|
// For backends that don't support -C flag (claude, gemini), set working directory via cmd.Dir
|
||||||
// Codex passes workdir via -C flag, so we skip setting Dir for it to avoid conflicts
|
// Codex passes workdir via -C flag, so we skip setting Dir for it to avoid conflicts
|
||||||
if cfg.Mode != "resume" && commandName != "codex" && cfg.WorkDir != "" {
|
if cfg.Mode != "resume" && commandName != "codex" && cfg.WorkDir != "" {
|
||||||
@@ -959,40 +691,33 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
if cfg.Backend == "gemini" {
|
if cfg.Backend == "gemini" {
|
||||||
stderrFilter = newFilteringWriter(os.Stderr, geminiNoisePatterns)
|
stderrFilter = newFilteringWriter(os.Stderr, geminiNoisePatterns)
|
||||||
stderrOut = stderrFilter
|
stderrOut = stderrFilter
|
||||||
|
defer stderrFilter.Flush()
|
||||||
}
|
}
|
||||||
stderrWriters = append([]io.Writer{stderrOut}, stderrWriters...)
|
stderrWriters = append([]io.Writer{stderrOut}, stderrWriters...)
|
||||||
}
|
}
|
||||||
stderr, err := cmd.StderrPipe()
|
if len(stderrWriters) == 1 {
|
||||||
if err != nil {
|
cmd.SetStderr(stderrWriters[0])
|
||||||
logErrorFn("Failed to create stderr pipe: " + err.Error())
|
} else {
|
||||||
result.ExitCode = 1
|
cmd.SetStderr(io.MultiWriter(stderrWriters...))
|
||||||
result.Error = attachStderr("failed to create stderr pipe: " + err.Error())
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var stdinPipe io.WriteCloser
|
var stdinPipe io.WriteCloser
|
||||||
|
var err error
|
||||||
if useStdin {
|
if useStdin {
|
||||||
stdinPipe, err = cmd.StdinPipe()
|
stdinPipe, err = cmd.StdinPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logErrorFn("Failed to create stdin pipe: " + err.Error())
|
logErrorFn("Failed to create stdin pipe: " + err.Error())
|
||||||
result.ExitCode = 1
|
result.ExitCode = 1
|
||||||
result.Error = attachStderr("failed to create stdin pipe: " + err.Error())
|
result.Error = attachStderr("failed to create stdin pipe: " + err.Error())
|
||||||
closeWithReason(stderr, "stdin-pipe-failed")
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stderrDone := make(chan error, 1)
|
|
||||||
|
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logErrorFn("Failed to create stdout pipe: " + err.Error())
|
logErrorFn("Failed to create stdout pipe: " + err.Error())
|
||||||
result.ExitCode = 1
|
result.ExitCode = 1
|
||||||
result.Error = attachStderr("failed to create stdout pipe: " + err.Error())
|
result.Error = attachStderr("failed to create stdout pipe: " + err.Error())
|
||||||
closeWithReason(stderr, "stdout-pipe-failed")
|
|
||||||
if stdinPipe != nil {
|
|
||||||
_ = stdinPipe.Close()
|
|
||||||
}
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1004,7 +729,6 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
// Start parse goroutine BEFORE starting the command to avoid race condition
|
// Start parse goroutine BEFORE starting the command to avoid race condition
|
||||||
// where fast-completing commands close stdout before parser starts reading
|
// where fast-completing commands close stdout before parser starts reading
|
||||||
messageSeen := make(chan struct{}, 1)
|
messageSeen := make(chan struct{}, 1)
|
||||||
completeSeen := make(chan struct{}, 1)
|
|
||||||
parseCh := make(chan parseResult, 1)
|
parseCh := make(chan parseResult, 1)
|
||||||
go func() {
|
go func() {
|
||||||
msg, tid := parseJSONStreamInternal(stdoutReader, logWarnFn, logInfoFn, func() {
|
msg, tid := parseJSONStreamInternal(stdoutReader, logWarnFn, logInfoFn, func() {
|
||||||
@@ -1012,27 +736,13 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
case messageSeen <- struct{}{}:
|
case messageSeen <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}, func() {
|
|
||||||
select {
|
|
||||||
case completeSeen <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
select {
|
|
||||||
case completeSeen <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
parseCh <- parseResult{message: msg, threadID: tid}
|
parseCh <- parseResult{message: msg, threadID: tid}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
logInfoFn(fmt.Sprintf("Starting %s with args: %s %s...", commandName, commandName, strings.Join(codexArgs[:min(5, len(codexArgs))], " ")))
|
logInfoFn(fmt.Sprintf("Starting %s with args: %s %s...", commandName, commandName, strings.Join(codexArgs[:min(5, len(codexArgs))], " ")))
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
closeWithReason(stdout, "start-failed")
|
|
||||||
closeWithReason(stderr, "start-failed")
|
|
||||||
if stdinPipe != nil {
|
|
||||||
_ = stdinPipe.Close()
|
|
||||||
}
|
|
||||||
if strings.Contains(err.Error(), "executable file not found") {
|
if strings.Contains(err.Error(), "executable file not found") {
|
||||||
msg := fmt.Sprintf("%s command not found in PATH", commandName)
|
msg := fmt.Sprintf("%s command not found in PATH", commandName)
|
||||||
logErrorFn(msg)
|
logErrorFn(msg)
|
||||||
@@ -1051,15 +761,6 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
logInfoFn(fmt.Sprintf("Log capturing to: %s", logger.Path()))
|
logInfoFn(fmt.Sprintf("Log capturing to: %s", logger.Path()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start stderr drain AFTER we know the command started, but BEFORE cmd.Wait can close the pipe.
|
|
||||||
go func() {
|
|
||||||
_, copyErr := io.Copy(io.MultiWriter(stderrWriters...), stderr)
|
|
||||||
if stderrFilter != nil {
|
|
||||||
stderrFilter.Flush()
|
|
||||||
}
|
|
||||||
stderrDone <- copyErr
|
|
||||||
}()
|
|
||||||
|
|
||||||
if useStdin && stdinPipe != nil {
|
if useStdin && stdinPipe != nil {
|
||||||
logInfoFn(fmt.Sprintf("Writing %d chars to stdin...", len(taskSpec.Task)))
|
logInfoFn(fmt.Sprintf("Writing %d chars to stdin...", len(taskSpec.Task)))
|
||||||
go func(data string) {
|
go func(data string) {
|
||||||
@@ -1072,68 +773,17 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
waitCh := make(chan error, 1)
|
waitCh := make(chan error, 1)
|
||||||
go func() { waitCh <- cmd.Wait() }()
|
go func() { waitCh <- cmd.Wait() }()
|
||||||
|
|
||||||
var (
|
var waitErr error
|
||||||
waitErr error
|
var forceKillTimer *forceKillTimer
|
||||||
forceKillTimer *forceKillTimer
|
var ctxCancelled bool
|
||||||
ctxCancelled bool
|
|
||||||
messageTimer *time.Timer
|
|
||||||
messageTimerCh <-chan time.Time
|
|
||||||
forcedAfterComplete bool
|
|
||||||
terminated bool
|
|
||||||
messageSeenObserved bool
|
|
||||||
completeSeenObserved bool
|
|
||||||
)
|
|
||||||
|
|
||||||
waitLoop:
|
select {
|
||||||
for {
|
case waitErr = <-waitCh:
|
||||||
select {
|
case <-ctx.Done():
|
||||||
case waitErr = <-waitCh:
|
ctxCancelled = true
|
||||||
break waitLoop
|
logErrorFn(cancelReason(commandName, ctx))
|
||||||
case <-ctx.Done():
|
forceKillTimer = terminateCommandFn(cmd)
|
||||||
ctxCancelled = true
|
waitErr = <-waitCh
|
||||||
logErrorFn(cancelReason(commandName, ctx))
|
|
||||||
if !terminated {
|
|
||||||
if timer := terminateCommandFn(cmd); timer != nil {
|
|
||||||
forceKillTimer = timer
|
|
||||||
terminated = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
waitErr = <-waitCh
|
|
||||||
break waitLoop
|
|
||||||
case <-messageTimerCh:
|
|
||||||
forcedAfterComplete = true
|
|
||||||
messageTimerCh = nil
|
|
||||||
if !terminated {
|
|
||||||
logWarnFn(fmt.Sprintf("%s output parsed; terminating lingering backend", commandName))
|
|
||||||
if timer := terminateCommandFn(cmd); timer != nil {
|
|
||||||
forceKillTimer = timer
|
|
||||||
terminated = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Close pipes to unblock stream readers, then wait for process exit.
|
|
||||||
closeWithReason(stdout, "terminate")
|
|
||||||
closeWithReason(stderr, "terminate")
|
|
||||||
waitErr = <-waitCh
|
|
||||||
break waitLoop
|
|
||||||
case <-completeSeen:
|
|
||||||
completeSeenObserved = true
|
|
||||||
if messageTimer != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
messageTimer = time.NewTimer(postMessageTerminateDelay)
|
|
||||||
messageTimerCh = messageTimer.C
|
|
||||||
case <-messageSeen:
|
|
||||||
messageSeenObserved = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if messageTimer != nil {
|
|
||||||
if !messageTimer.Stop() {
|
|
||||||
select {
|
|
||||||
case <-messageTimer.C:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if forceKillTimer != nil {
|
if forceKillTimer != nil {
|
||||||
@@ -1141,14 +791,10 @@ waitLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
var parsed parseResult
|
var parsed parseResult
|
||||||
switch {
|
if ctxCancelled {
|
||||||
case ctxCancelled:
|
|
||||||
closeWithReason(stdout, stdoutCloseReasonCtx)
|
closeWithReason(stdout, stdoutCloseReasonCtx)
|
||||||
parsed = <-parseCh
|
parsed = <-parseCh
|
||||||
case messageSeenObserved || completeSeenObserved:
|
} else {
|
||||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
|
||||||
parsed = <-parseCh
|
|
||||||
default:
|
|
||||||
drainTimer := time.NewTimer(stdoutDrainTimeout)
|
drainTimer := time.NewTimer(stdoutDrainTimeout)
|
||||||
defer drainTimer.Stop()
|
defer drainTimer.Stop()
|
||||||
|
|
||||||
@@ -1156,11 +802,6 @@ waitLoop:
|
|||||||
case parsed = <-parseCh:
|
case parsed = <-parseCh:
|
||||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
closeWithReason(stdout, stdoutCloseReasonWait)
|
||||||
case <-messageSeen:
|
case <-messageSeen:
|
||||||
messageSeenObserved = true
|
|
||||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
|
||||||
parsed = <-parseCh
|
|
||||||
case <-completeSeen:
|
|
||||||
completeSeenObserved = true
|
|
||||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
closeWithReason(stdout, stdoutCloseReasonWait)
|
||||||
parsed = <-parseCh
|
parsed = <-parseCh
|
||||||
case <-drainTimer.C:
|
case <-drainTimer.C:
|
||||||
@@ -1169,12 +810,6 @@ waitLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
closeWithReason(stderr, stdoutCloseReasonWait)
|
|
||||||
// Wait for stderr drain so stderrBuf / stderrLogger are not accessed concurrently.
|
|
||||||
// Important: cmd.Wait can block on internal stderr copying if cmd.Stderr is a non-file writer.
|
|
||||||
// We use StderrPipe and drain ourselves to avoid that deadlock class (common when children inherit pipes).
|
|
||||||
<-stderrDone
|
|
||||||
|
|
||||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||||
if errors.Is(ctxErr, context.DeadlineExceeded) {
|
if errors.Is(ctxErr, context.DeadlineExceeded) {
|
||||||
result.ExitCode = 124
|
result.ExitCode = 124
|
||||||
@@ -1187,21 +822,17 @@ waitLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if waitErr != nil {
|
if waitErr != nil {
|
||||||
if forcedAfterComplete && parsed.message != "" {
|
if exitErr, ok := waitErr.(*exec.ExitError); ok {
|
||||||
logWarnFn(fmt.Sprintf("%s terminated after delivering output", commandName))
|
code := exitErr.ExitCode()
|
||||||
} else {
|
logErrorFn(fmt.Sprintf("%s exited with status %d", commandName, code))
|
||||||
if exitErr, ok := waitErr.(*exec.ExitError); ok {
|
result.ExitCode = code
|
||||||
code := exitErr.ExitCode()
|
result.Error = attachStderr(fmt.Sprintf("%s exited with status %d", commandName, code))
|
||||||
logErrorFn(fmt.Sprintf("%s exited with status %d", commandName, code))
|
|
||||||
result.ExitCode = code
|
|
||||||
result.Error = attachStderr(fmt.Sprintf("%s exited with status %d", commandName, code))
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
logErrorFn(commandName + " error: " + waitErr.Error())
|
|
||||||
result.ExitCode = 1
|
|
||||||
result.Error = attachStderr(commandName + " error: " + waitErr.Error())
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
logErrorFn(commandName + " error: " + waitErr.Error())
|
||||||
|
result.ExitCode = 1
|
||||||
|
result.Error = attachStderr(commandName + " error: " + waitErr.Error())
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
message := parsed.message
|
message := parsed.message
|
||||||
@@ -1249,7 +880,7 @@ func forwardSignals(ctx context.Context, cmd commandRunner, logErrorFn func(stri
|
|||||||
case sig := <-sigCh:
|
case sig := <-sigCh:
|
||||||
logErrorFn(fmt.Sprintf("Received signal: %v", sig))
|
logErrorFn(fmt.Sprintf("Received signal: %v", sig))
|
||||||
if proc := cmd.Process(); proc != nil {
|
if proc := cmd.Process(); proc != nil {
|
||||||
_ = sendTermSignal(proc)
|
_ = proc.Signal(syscall.SIGTERM)
|
||||||
time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
||||||
if p := cmd.Process(); p != nil {
|
if p := cmd.Process(); p != nil {
|
||||||
_ = p.Kill()
|
_ = p.Kill()
|
||||||
@@ -1319,7 +950,7 @@ func terminateCommand(cmd commandRunner) *forceKillTimer {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = sendTermSignal(proc)
|
_ = proc.Signal(syscall.SIGTERM)
|
||||||
|
|
||||||
done := make(chan struct{}, 1)
|
done := make(chan struct{}, 1)
|
||||||
timer := time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
timer := time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
||||||
@@ -1341,7 +972,7 @@ func terminateProcess(cmd commandRunner) *time.Timer {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = sendTermSignal(proc)
|
_ = proc.Signal(syscall.SIGTERM)
|
||||||
|
|
||||||
return time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
return time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
||||||
if p := cmd.Process(); p != nil {
|
if p := cmd.Process(); p != nil {
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -33,12 +31,7 @@ type execFakeProcess struct {
|
|||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *execFakeProcess) Pid() int {
|
func (p *execFakeProcess) Pid() int { return p.pid }
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return p.pid
|
|
||||||
}
|
|
||||||
func (p *execFakeProcess) Kill() error {
|
func (p *execFakeProcess) Kill() error {
|
||||||
p.killed.Add(1)
|
p.killed.Add(1)
|
||||||
return nil
|
return nil
|
||||||
@@ -90,16 +83,13 @@ func (rc *reasonReadCloser) record(reason string) {
|
|||||||
|
|
||||||
type execFakeRunner struct {
|
type execFakeRunner struct {
|
||||||
stdout io.ReadCloser
|
stdout io.ReadCloser
|
||||||
stderr io.ReadCloser
|
|
||||||
process processHandle
|
process processHandle
|
||||||
stdin io.WriteCloser
|
stdin io.WriteCloser
|
||||||
dir string
|
dir string
|
||||||
env map[string]string
|
|
||||||
waitErr error
|
waitErr error
|
||||||
waitDelay time.Duration
|
waitDelay time.Duration
|
||||||
startErr error
|
startErr error
|
||||||
stdoutErr error
|
stdoutErr error
|
||||||
stderrErr error
|
|
||||||
stdinErr error
|
stdinErr error
|
||||||
allowNilProcess bool
|
allowNilProcess bool
|
||||||
started atomic.Bool
|
started atomic.Bool
|
||||||
@@ -127,15 +117,6 @@ func (f *execFakeRunner) StdoutPipe() (io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
return f.stdout, nil
|
return f.stdout, nil
|
||||||
}
|
}
|
||||||
func (f *execFakeRunner) StderrPipe() (io.ReadCloser, error) {
|
|
||||||
if f.stderrErr != nil {
|
|
||||||
return nil, f.stderrErr
|
|
||||||
}
|
|
||||||
if f.stderr == nil {
|
|
||||||
f.stderr = io.NopCloser(strings.NewReader(""))
|
|
||||||
}
|
|
||||||
return f.stderr, nil
|
|
||||||
}
|
|
||||||
func (f *execFakeRunner) StdinPipe() (io.WriteCloser, error) {
|
func (f *execFakeRunner) StdinPipe() (io.WriteCloser, error) {
|
||||||
if f.stdinErr != nil {
|
if f.stdinErr != nil {
|
||||||
return nil, f.stdinErr
|
return nil, f.stdinErr
|
||||||
@@ -147,17 +128,6 @@ func (f *execFakeRunner) StdinPipe() (io.WriteCloser, error) {
|
|||||||
}
|
}
|
||||||
func (f *execFakeRunner) SetStderr(io.Writer) {}
|
func (f *execFakeRunner) SetStderr(io.Writer) {}
|
||||||
func (f *execFakeRunner) SetDir(dir string) { f.dir = dir }
|
func (f *execFakeRunner) SetDir(dir string) { f.dir = dir }
|
||||||
func (f *execFakeRunner) SetEnv(env map[string]string) {
|
|
||||||
if len(env) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if f.env == nil {
|
|
||||||
f.env = make(map[string]string, len(env))
|
|
||||||
}
|
|
||||||
for k, v := range env {
|
|
||||||
f.env[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (f *execFakeRunner) Process() processHandle {
|
func (f *execFakeRunner) Process() processHandle {
|
||||||
if f.process != nil {
|
if f.process != nil {
|
||||||
return f.process
|
return f.process
|
||||||
@@ -180,9 +150,6 @@ func TestExecutorHelperCoverage(t *testing.T) {
|
|||||||
if _, err := rc.StdoutPipe(); err == nil {
|
if _, err := rc.StdoutPipe(); err == nil {
|
||||||
t.Fatalf("expected error for nil command")
|
t.Fatalf("expected error for nil command")
|
||||||
}
|
}
|
||||||
if _, err := rc.StderrPipe(); err == nil {
|
|
||||||
t.Fatalf("expected error for nil command")
|
|
||||||
}
|
|
||||||
if _, err := rc.StdinPipe(); err == nil {
|
if _, err := rc.StdinPipe(); err == nil {
|
||||||
t.Fatalf("expected error for nil command")
|
t.Fatalf("expected error for nil command")
|
||||||
}
|
}
|
||||||
@@ -202,14 +169,11 @@ func TestExecutorHelperCoverage(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("StdoutPipe error: %v", err)
|
t.Fatalf("StdoutPipe error: %v", err)
|
||||||
}
|
}
|
||||||
stderrPipe, err := rcProc.StderrPipe()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("StderrPipe error: %v", err)
|
|
||||||
}
|
|
||||||
stdinPipe, err := rcProc.StdinPipe()
|
stdinPipe, err := rcProc.StdinPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("StdinPipe error: %v", err)
|
t.Fatalf("StdinPipe error: %v", err)
|
||||||
}
|
}
|
||||||
|
rcProc.SetStderr(io.Discard)
|
||||||
if err := rcProc.Start(); err != nil {
|
if err := rcProc.Start(); err != nil {
|
||||||
t.Fatalf("Start failed: %v", err)
|
t.Fatalf("Start failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -223,7 +187,6 @@ func TestExecutorHelperCoverage(t *testing.T) {
|
|||||||
_ = procHandle.Kill()
|
_ = procHandle.Kill()
|
||||||
_ = rcProc.Wait()
|
_ = rcProc.Wait()
|
||||||
_, _ = io.ReadAll(stdoutPipe)
|
_, _ = io.ReadAll(stdoutPipe)
|
||||||
_, _ = io.ReadAll(stderrPipe)
|
|
||||||
|
|
||||||
rp := &realProcess{}
|
rp := &realProcess{}
|
||||||
if rp.Pid() != 0 {
|
if rp.Pid() != 0 {
|
||||||
@@ -281,10 +244,6 @@ func TestExecutorHelperCoverage(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("generateFinalOutputAndArgs", func(t *testing.T) {
|
t.Run("generateFinalOutputAndArgs", func(t *testing.T) {
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
out := generateFinalOutput([]TaskResult{
|
out := generateFinalOutput([]TaskResult{
|
||||||
{TaskID: "ok", ExitCode: 0},
|
{TaskID: "ok", ExitCode: 0},
|
||||||
{TaskID: "fail", ExitCode: 1, Error: "boom"},
|
{TaskID: "fail", ExitCode: 1, Error: "boom"},
|
||||||
@@ -292,66 +251,21 @@ func TestExecutorHelperCoverage(t *testing.T) {
|
|||||||
if !strings.Contains(out, "ok") || !strings.Contains(out, "fail") {
|
if !strings.Contains(out, "ok") || !strings.Contains(out, "fail") {
|
||||||
t.Fatalf("unexpected summary output: %s", out)
|
t.Fatalf("unexpected summary output: %s", out)
|
||||||
}
|
}
|
||||||
// Test summary mode (default) - should have new format with ### headers
|
|
||||||
out = generateFinalOutput([]TaskResult{{TaskID: "rich", ExitCode: 0, SessionID: "sess", LogPath: "/tmp/log", Message: "hello"}})
|
out = generateFinalOutput([]TaskResult{{TaskID: "rich", ExitCode: 0, SessionID: "sess", LogPath: "/tmp/log", Message: "hello"}})
|
||||||
if !strings.Contains(out, "### rich") {
|
|
||||||
t.Fatalf("summary output missing task header: %s", out)
|
|
||||||
}
|
|
||||||
// Test full output mode - should have Session and Message
|
|
||||||
out = generateFinalOutputWithMode([]TaskResult{{TaskID: "rich", ExitCode: 0, SessionID: "sess", LogPath: "/tmp/log", Message: "hello"}}, false)
|
|
||||||
if !strings.Contains(out, "Session: sess") || !strings.Contains(out, "Log: /tmp/log") || !strings.Contains(out, "hello") {
|
if !strings.Contains(out, "Session: sess") || !strings.Contains(out, "Log: /tmp/log") || !strings.Contains(out, "hello") {
|
||||||
t.Fatalf("full output missing fields: %s", out)
|
t.Fatalf("rich output missing fields: %s", out)
|
||||||
}
|
}
|
||||||
|
|
||||||
args := buildCodexArgs(&Config{Mode: "new", WorkDir: "/tmp"}, "task")
|
args := buildCodexArgs(&Config{Mode: "new", WorkDir: "/tmp"}, "task")
|
||||||
if !slices.Equal(args, []string{"e", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}) {
|
if len(args) == 0 || args[3] != "/tmp" {
|
||||||
t.Fatalf("unexpected codex args: %+v", args)
|
t.Fatalf("unexpected codex args: %+v", args)
|
||||||
}
|
}
|
||||||
args = buildCodexArgs(&Config{Mode: "resume", SessionID: "sess"}, "target")
|
args = buildCodexArgs(&Config{Mode: "resume", SessionID: "sess"}, "target")
|
||||||
if !slices.Equal(args, []string{"e", "--skip-git-repo-check", "--json", "resume", "sess", "target"}) {
|
if args[3] != "resume" || args[4] != "sess" {
|
||||||
t.Fatalf("unexpected resume args: %+v", args)
|
t.Fatalf("unexpected resume args: %+v", args)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("generateFinalOutputASCIIMode", func(t *testing.T) {
|
|
||||||
t.Setenv("CODEAGENT_ASCII_MODE", "true")
|
|
||||||
|
|
||||||
results := []TaskResult{
|
|
||||||
{TaskID: "ok", ExitCode: 0, Coverage: "92%", CoverageNum: 92, CoverageTarget: 90, KeyOutput: "done"},
|
|
||||||
{TaskID: "warn", ExitCode: 0, Coverage: "80%", CoverageNum: 80, CoverageTarget: 90, KeyOutput: "did"},
|
|
||||||
{TaskID: "bad", ExitCode: 2, Error: "boom"},
|
|
||||||
}
|
|
||||||
out := generateFinalOutput(results)
|
|
||||||
|
|
||||||
for _, sym := range []string{"PASS", "WARN", "FAIL"} {
|
|
||||||
if !strings.Contains(out, sym) {
|
|
||||||
t.Fatalf("ASCII mode should include %q, got: %s", sym, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, sym := range []string{"✓", "⚠️", "✗"} {
|
|
||||||
if strings.Contains(out, sym) {
|
|
||||||
t.Fatalf("ASCII mode should not include %q, got: %s", sym, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("generateFinalOutputUnicodeMode", func(t *testing.T) {
|
|
||||||
t.Setenv("CODEAGENT_ASCII_MODE", "false")
|
|
||||||
|
|
||||||
results := []TaskResult{
|
|
||||||
{TaskID: "ok", ExitCode: 0, Coverage: "92%", CoverageNum: 92, CoverageTarget: 90, KeyOutput: "done"},
|
|
||||||
{TaskID: "warn", ExitCode: 0, Coverage: "80%", CoverageNum: 80, CoverageTarget: 90, KeyOutput: "did"},
|
|
||||||
{TaskID: "bad", ExitCode: 2, Error: "boom"},
|
|
||||||
}
|
|
||||||
out := generateFinalOutput(results)
|
|
||||||
|
|
||||||
for _, sym := range []string{"✓", "⚠️", "✗"} {
|
|
||||||
if !strings.Contains(out, sym) {
|
|
||||||
t.Fatalf("Unicode mode should include %q, got: %s", sym, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("executeConcurrentWrapper", func(t *testing.T) {
|
t.Run("executeConcurrentWrapper", func(t *testing.T) {
|
||||||
orig := runCodexTaskFn
|
orig := runCodexTaskFn
|
||||||
defer func() { runCodexTaskFn = orig }()
|
defer func() { runCodexTaskFn = orig }()
|
||||||
@@ -384,18 +298,6 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
|||||||
origRunner := newCommandRunner
|
origRunner := newCommandRunner
|
||||||
defer func() { newCommandRunner = origRunner }()
|
defer func() { newCommandRunner = origRunner }()
|
||||||
|
|
||||||
t.Run("resumeMissingSessionID", func(t *testing.T) {
|
|
||||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
|
||||||
t.Fatalf("unexpected command execution for invalid resume config")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: ".", Mode: "resume"}, nil, nil, false, false, 1)
|
|
||||||
if res.ExitCode == 0 || !strings.Contains(res.Error, "session_id") {
|
|
||||||
t.Fatalf("expected validation error, got %+v", res)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
var firstStdout *reasonReadCloser
|
var firstStdout *reasonReadCloser
|
||||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||||
@@ -1180,10 +1082,9 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test full output mode for shared marker (summary mode doesn't show it)
|
summary := generateFinalOutput(results)
|
||||||
summary := generateFinalOutputWithMode(results, false)
|
|
||||||
if !strings.Contains(summary, "(shared)") {
|
if !strings.Contains(summary, "(shared)") {
|
||||||
t.Fatalf("full output missing shared marker: %s", summary)
|
t.Fatalf("summary missing shared marker: %s", summary)
|
||||||
}
|
}
|
||||||
|
|
||||||
mainLogger.Flush()
|
mainLogger.Flush()
|
||||||
@@ -1274,7 +1175,7 @@ func TestExecutorSignalAndTermination(t *testing.T) {
|
|||||||
proc.mu.Lock()
|
proc.mu.Lock()
|
||||||
signalled := len(proc.signals)
|
signalled := len(proc.signals)
|
||||||
proc.mu.Unlock()
|
proc.mu.Unlock()
|
||||||
if runtime.GOOS != "windows" && signalled == 0 {
|
if signalled == 0 {
|
||||||
t.Fatalf("process did not receive signal")
|
t.Fatalf("process did not receive signal")
|
||||||
}
|
}
|
||||||
if proc.killed.Load() == 0 {
|
if proc.killed.Load() == 0 {
|
||||||
|
|||||||
@@ -366,8 +366,7 @@ func (l *Logger) run() {
|
|||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
writeEntry := func(entry logEntry) {
|
writeEntry := func(entry logEntry) {
|
||||||
timestamp := time.Now().Format("2006-01-02 15:04:05.000")
|
fmt.Fprintf(l.writer, "%s\n", entry.msg)
|
||||||
fmt.Fprintf(l.writer, "[%s] %s\n", timestamp, entry.msg)
|
|
||||||
|
|
||||||
// Cache error/warn entries in memory for fast extraction
|
// Cache error/warn entries in memory for fast extraction
|
||||||
if entry.isError {
|
if entry.isError {
|
||||||
|
|||||||
@@ -14,15 +14,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
version = "5.4.0"
|
version = "5.2.5"
|
||||||
defaultWorkdir = "."
|
defaultWorkdir = "."
|
||||||
defaultTimeout = 7200 // seconds (2 hours)
|
defaultTimeout = 7200 // seconds
|
||||||
defaultCoverageTarget = 90.0
|
codexLogLineLimit = 1000
|
||||||
codexLogLineLimit = 1000
|
stdinSpecialChars = "\n\\\"'`$"
|
||||||
stdinSpecialChars = "\n\\\"'`$"
|
stderrCaptureLimit = 4 * 1024
|
||||||
stderrCaptureLimit = 4 * 1024
|
defaultBackendName = "codex"
|
||||||
defaultBackendName = "codex"
|
defaultCodexCommand = "codex"
|
||||||
defaultCodexCommand = "codex"
|
|
||||||
|
|
||||||
// stdout close reasons
|
// stdout close reasons
|
||||||
stdoutCloseReasonWait = "wait-done"
|
stdoutCloseReasonWait = "wait-done"
|
||||||
@@ -31,8 +30,6 @@ const (
|
|||||||
stdoutDrainTimeout = 100 * time.Millisecond
|
stdoutDrainTimeout = 100 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
var useASCIIMode = os.Getenv("CODEAGENT_ASCII_MODE") == "true"
|
|
||||||
|
|
||||||
// Test hooks for dependency injection
|
// Test hooks for dependency injection
|
||||||
var (
|
var (
|
||||||
stdinReader io.Reader = os.Stdin
|
stdinReader io.Reader = os.Stdin
|
||||||
@@ -178,8 +175,6 @@ func run() (exitCode int) {
|
|||||||
|
|
||||||
if parallelIndex != -1 {
|
if parallelIndex != -1 {
|
||||||
backendName := defaultBackendName
|
backendName := defaultBackendName
|
||||||
model := ""
|
|
||||||
fullOutput := false
|
|
||||||
var extras []string
|
var extras []string
|
||||||
|
|
||||||
for i := 0; i < len(args); i++ {
|
for i := 0; i < len(args); i++ {
|
||||||
@@ -187,8 +182,6 @@ func run() (exitCode int) {
|
|||||||
switch {
|
switch {
|
||||||
case arg == "--parallel":
|
case arg == "--parallel":
|
||||||
continue
|
continue
|
||||||
case arg == "--full-output":
|
|
||||||
fullOutput = true
|
|
||||||
case arg == "--backend":
|
case arg == "--backend":
|
||||||
if i+1 >= len(args) {
|
if i+1 >= len(args) {
|
||||||
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
|
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
|
||||||
@@ -203,32 +196,17 @@ func run() (exitCode int) {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
backendName = value
|
backendName = value
|
||||||
case arg == "--model":
|
|
||||||
if i+1 >= len(args) {
|
|
||||||
fmt.Fprintln(os.Stderr, "ERROR: --model flag requires a value")
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
model = args[i+1]
|
|
||||||
i++
|
|
||||||
case strings.HasPrefix(arg, "--model="):
|
|
||||||
value := strings.TrimPrefix(arg, "--model=")
|
|
||||||
if value == "" {
|
|
||||||
fmt.Fprintln(os.Stderr, "ERROR: --model flag requires a value")
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
model = value
|
|
||||||
default:
|
default:
|
||||||
extras = append(extras, arg)
|
extras = append(extras, arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(extras) > 0 {
|
if len(extras) > 0 {
|
||||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model and --full-output are allowed.")
|
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend is allowed.")
|
||||||
fmt.Fprintln(os.Stderr, "Usage examples:")
|
fmt.Fprintln(os.Stderr, "Usage examples:")
|
||||||
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
|
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
|
||||||
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
|
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
|
||||||
fmt.Fprintf(os.Stderr, " %s --parallel <<'EOF'\n", name)
|
fmt.Fprintf(os.Stderr, " %s --parallel <<'EOF'\n", name)
|
||||||
fmt.Fprintf(os.Stderr, " %s --parallel --full-output <<'EOF' # include full task output\n", name)
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,14 +230,10 @@ func run() (exitCode int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg.GlobalBackend = backendName
|
cfg.GlobalBackend = backendName
|
||||||
model = strings.TrimSpace(model)
|
|
||||||
for i := range cfg.Tasks {
|
for i := range cfg.Tasks {
|
||||||
if strings.TrimSpace(cfg.Tasks[i].Backend) == "" {
|
if strings.TrimSpace(cfg.Tasks[i].Backend) == "" {
|
||||||
cfg.Tasks[i].Backend = backendName
|
cfg.Tasks[i].Backend = backendName
|
||||||
}
|
}
|
||||||
if strings.TrimSpace(cfg.Tasks[i].Model) == "" && model != "" {
|
|
||||||
cfg.Tasks[i].Model = model
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutSec := resolveTimeout()
|
timeoutSec := resolveTimeout()
|
||||||
@@ -270,33 +244,7 @@ func run() (exitCode int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
results := executeConcurrent(layers, timeoutSec)
|
results := executeConcurrent(layers, timeoutSec)
|
||||||
|
fmt.Println(generateFinalOutput(results))
|
||||||
// Extract structured report fields from each result
|
|
||||||
for i := range results {
|
|
||||||
results[i].CoverageTarget = defaultCoverageTarget
|
|
||||||
if results[i].Message == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := strings.Split(results[i].Message, "\n")
|
|
||||||
|
|
||||||
// Coverage extraction
|
|
||||||
results[i].Coverage = extractCoverageFromLines(lines)
|
|
||||||
results[i].CoverageNum = extractCoverageNum(results[i].Coverage)
|
|
||||||
|
|
||||||
// Files changed
|
|
||||||
results[i].FilesChanged = extractFilesChangedFromLines(lines)
|
|
||||||
|
|
||||||
// Test results
|
|
||||||
results[i].TestsPassed, results[i].TestsFailed = extractTestResultsFromLines(lines)
|
|
||||||
|
|
||||||
// Key output summary
|
|
||||||
results[i].KeyOutput = extractKeyOutputFromLines(lines, 150)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default: summary mode (context-efficient)
|
|
||||||
// --full-output: legacy full output mode
|
|
||||||
fmt.Println(generateFinalOutputWithMode(results, !fullOutput))
|
|
||||||
|
|
||||||
exitCode = 0
|
exitCode = 0
|
||||||
for _, res := range results {
|
for _, res := range results {
|
||||||
@@ -428,7 +376,6 @@ func run() (exitCode int) {
|
|||||||
WorkDir: cfg.WorkDir,
|
WorkDir: cfg.WorkDir,
|
||||||
Mode: cfg.Mode,
|
Mode: cfg.Mode,
|
||||||
SessionID: cfg.SessionID,
|
SessionID: cfg.SessionID,
|
||||||
Model: cfg.Model,
|
|
||||||
UseStdin: useStdin,
|
UseStdin: useStdin,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -500,19 +447,16 @@ Usage:
|
|||||||
%[1]s resume <session_id> "task" [workdir]
|
%[1]s resume <session_id> "task" [workdir]
|
||||||
%[1]s resume <session_id> - [workdir]
|
%[1]s resume <session_id> - [workdir]
|
||||||
%[1]s --parallel Run tasks in parallel (config from stdin)
|
%[1]s --parallel Run tasks in parallel (config from stdin)
|
||||||
%[1]s --parallel --full-output Run tasks in parallel with full output (legacy)
|
|
||||||
%[1]s --version
|
%[1]s --version
|
||||||
%[1]s --help
|
%[1]s --help
|
||||||
|
|
||||||
Parallel mode examples:
|
Parallel mode examples:
|
||||||
%[1]s --parallel < tasks.txt
|
%[1]s --parallel < tasks.txt
|
||||||
echo '...' | %[1]s --parallel
|
echo '...' | %[1]s --parallel
|
||||||
%[1]s --parallel --full-output < tasks.txt
|
|
||||||
%[1]s --parallel <<'EOF'
|
%[1]s --parallel <<'EOF'
|
||||||
|
|
||||||
Environment Variables:
|
Environment Variables:
|
||||||
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
|
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
|
||||||
CODEAGENT_ASCII_MODE Use ASCII symbols instead of Unicode (PASS/WARN/FAIL)
|
|
||||||
|
|
||||||
Exit Codes:
|
Exit Codes:
|
||||||
0 Success
|
0 Success
|
||||||
|
|||||||
@@ -46,26 +46,10 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
|||||||
|
|
||||||
lines := strings.Split(out, "\n")
|
lines := strings.Split(out, "\n")
|
||||||
var currentTask *TaskResult
|
var currentTask *TaskResult
|
||||||
inTaskResults := false
|
|
||||||
|
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
line = strings.TrimSpace(line)
|
line = strings.TrimSpace(line)
|
||||||
|
if strings.HasPrefix(line, "Total:") {
|
||||||
// Parse new format header: "X tasks | Y passed | Z failed"
|
|
||||||
if strings.Contains(line, "tasks |") && strings.Contains(line, "passed |") {
|
|
||||||
parts := strings.Split(line, "|")
|
|
||||||
for _, p := range parts {
|
|
||||||
p = strings.TrimSpace(p)
|
|
||||||
if strings.HasSuffix(p, "tasks") {
|
|
||||||
fmt.Sscanf(p, "%d tasks", &payload.Summary.Total)
|
|
||||||
} else if strings.HasSuffix(p, "passed") {
|
|
||||||
fmt.Sscanf(p, "%d passed", &payload.Summary.Success)
|
|
||||||
} else if strings.HasSuffix(p, "failed") {
|
|
||||||
fmt.Sscanf(p, "%d failed", &payload.Summary.Failed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if strings.HasPrefix(line, "Total:") {
|
|
||||||
// Legacy format: "Total: X | Success: Y | Failed: Z"
|
|
||||||
parts := strings.Split(line, "|")
|
parts := strings.Split(line, "|")
|
||||||
for _, p := range parts {
|
for _, p := range parts {
|
||||||
p = strings.TrimSpace(p)
|
p = strings.TrimSpace(p)
|
||||||
@@ -77,72 +61,13 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
|||||||
fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed)
|
fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if line == "## Task Results" {
|
|
||||||
inTaskResults = true
|
|
||||||
} else if line == "## Summary" {
|
|
||||||
// End of task results section
|
|
||||||
if currentTask != nil {
|
|
||||||
payload.Results = append(payload.Results, *currentTask)
|
|
||||||
currentTask = nil
|
|
||||||
}
|
|
||||||
inTaskResults = false
|
|
||||||
} else if inTaskResults && strings.HasPrefix(line, "### ") {
|
|
||||||
// New task: ### task-id ✓ 92% or ### task-id PASS 92% (ASCII mode)
|
|
||||||
if currentTask != nil {
|
|
||||||
payload.Results = append(payload.Results, *currentTask)
|
|
||||||
}
|
|
||||||
currentTask = &TaskResult{}
|
|
||||||
|
|
||||||
taskLine := strings.TrimPrefix(line, "### ")
|
|
||||||
success, warning, failed := getStatusSymbols()
|
|
||||||
// Parse different formats
|
|
||||||
if strings.Contains(taskLine, " "+success) {
|
|
||||||
parts := strings.Split(taskLine, " "+success)
|
|
||||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
|
||||||
currentTask.ExitCode = 0
|
|
||||||
// Extract coverage if present
|
|
||||||
if len(parts) > 1 {
|
|
||||||
coveragePart := strings.TrimSpace(parts[1])
|
|
||||||
if strings.HasSuffix(coveragePart, "%") {
|
|
||||||
currentTask.Coverage = coveragePart
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if strings.Contains(taskLine, " "+warning) {
|
|
||||||
parts := strings.Split(taskLine, " "+warning)
|
|
||||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
|
||||||
currentTask.ExitCode = 0
|
|
||||||
} else if strings.Contains(taskLine, " "+failed) {
|
|
||||||
parts := strings.Split(taskLine, " "+failed)
|
|
||||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
|
||||||
currentTask.ExitCode = 1
|
|
||||||
} else {
|
|
||||||
currentTask.TaskID = taskLine
|
|
||||||
}
|
|
||||||
} else if currentTask != nil && inTaskResults {
|
|
||||||
// Parse task details
|
|
||||||
if strings.HasPrefix(line, "Exit code:") {
|
|
||||||
fmt.Sscanf(line, "Exit code: %d", ¤tTask.ExitCode)
|
|
||||||
} else if strings.HasPrefix(line, "Error:") {
|
|
||||||
currentTask.Error = strings.TrimPrefix(line, "Error: ")
|
|
||||||
} else if strings.HasPrefix(line, "Log:") {
|
|
||||||
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
|
|
||||||
} else if strings.HasPrefix(line, "Did:") {
|
|
||||||
currentTask.KeyOutput = strings.TrimSpace(strings.TrimPrefix(line, "Did:"))
|
|
||||||
} else if strings.HasPrefix(line, "Detail:") {
|
|
||||||
// Error detail for failed tasks
|
|
||||||
if currentTask.Message == "" {
|
|
||||||
currentTask.Message = strings.TrimSpace(strings.TrimPrefix(line, "Detail:"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if strings.HasPrefix(line, "--- Task:") {
|
} else if strings.HasPrefix(line, "--- Task:") {
|
||||||
// Legacy full output format
|
|
||||||
if currentTask != nil {
|
if currentTask != nil {
|
||||||
payload.Results = append(payload.Results, *currentTask)
|
payload.Results = append(payload.Results, *currentTask)
|
||||||
}
|
}
|
||||||
currentTask = &TaskResult{}
|
currentTask = &TaskResult{}
|
||||||
currentTask.TaskID = strings.TrimSuffix(strings.TrimPrefix(line, "--- Task: "), " ---")
|
currentTask.TaskID = strings.TrimSuffix(strings.TrimPrefix(line, "--- Task: "), " ---")
|
||||||
} else if currentTask != nil && !inTaskResults {
|
} else if currentTask != nil {
|
||||||
// Legacy format parsing
|
|
||||||
if strings.HasPrefix(line, "Status: SUCCESS") {
|
if strings.HasPrefix(line, "Status: SUCCESS") {
|
||||||
currentTask.ExitCode = 0
|
currentTask.ExitCode = 0
|
||||||
} else if strings.HasPrefix(line, "Status: FAILED") {
|
} else if strings.HasPrefix(line, "Status: FAILED") {
|
||||||
@@ -157,11 +82,15 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
|||||||
currentTask.SessionID = strings.TrimPrefix(line, "Session: ")
|
currentTask.SessionID = strings.TrimPrefix(line, "Session: ")
|
||||||
} else if strings.HasPrefix(line, "Log:") {
|
} else if strings.HasPrefix(line, "Log:") {
|
||||||
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
|
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
|
||||||
|
} else if line != "" && !strings.HasPrefix(line, "===") && !strings.HasPrefix(line, "---") {
|
||||||
|
if currentTask.Message != "" {
|
||||||
|
currentTask.Message += "\n"
|
||||||
|
}
|
||||||
|
currentTask.Message += line
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle last task
|
|
||||||
if currentTask != nil {
|
if currentTask != nil {
|
||||||
payload.Results = append(payload.Results, *currentTask)
|
payload.Results = append(payload.Results, *currentTask)
|
||||||
}
|
}
|
||||||
@@ -414,10 +343,9 @@ task-beta`
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range []string{"alpha", "beta"} {
|
for _, id := range []string{"alpha", "beta"} {
|
||||||
// Summary mode shows log paths in table format, not "Log: xxx"
|
want := fmt.Sprintf("Log: %s", logPathFor(id))
|
||||||
logPath := logPathFor(id)
|
if !strings.Contains(output, want) {
|
||||||
if !strings.Contains(output, logPath) {
|
t.Fatalf("parallel output missing %q for %s:\n%s", want, id, output)
|
||||||
t.Fatalf("parallel output missing log path %q for %s:\n%s", logPath, id, output)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -622,16 +550,16 @@ ok-e`
|
|||||||
if resD.LogPath != logPathFor("D") || resE.LogPath != logPathFor("E") {
|
if resD.LogPath != logPathFor("D") || resE.LogPath != logPathFor("E") {
|
||||||
t.Fatalf("expected log paths for D/E, got D=%q E=%q", resD.LogPath, resE.LogPath)
|
t.Fatalf("expected log paths for D/E, got D=%q E=%q", resD.LogPath, resE.LogPath)
|
||||||
}
|
}
|
||||||
// Summary mode shows log paths in table, verify they appear in output
|
|
||||||
for _, id := range []string{"A", "D", "E"} {
|
for _, id := range []string{"A", "D", "E"} {
|
||||||
logPath := logPathFor(id)
|
block := extractTaskBlock(t, output, id)
|
||||||
if !strings.Contains(output, logPath) {
|
want := fmt.Sprintf("Log: %s", logPathFor(id))
|
||||||
t.Fatalf("task %s log path %q not found in output:\n%s", id, logPath, output)
|
if !strings.Contains(block, want) {
|
||||||
|
t.Fatalf("task %s block missing %q:\n%s", id, want, block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Task B was skipped, should have "-" or empty log path in table
|
blockB := extractTaskBlock(t, output, "B")
|
||||||
if resB.LogPath != "" {
|
if strings.Contains(blockB, "Log:") {
|
||||||
t.Fatalf("skipped task B should have empty log path, got %q", resB.LogPath)
|
t.Fatalf("skipped task B should not emit a log line:\n%s", blockB)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -243,10 +243,6 @@ func (d *drainBlockingCmd) StdoutPipe() (io.ReadCloser, error) {
|
|||||||
return newDrainBlockingStdout(ctxReader), nil
|
return newDrainBlockingStdout(ctxReader), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *drainBlockingCmd) StderrPipe() (io.ReadCloser, error) {
|
|
||||||
return d.inner.StderrPipe()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *drainBlockingCmd) StdinPipe() (io.WriteCloser, error) {
|
func (d *drainBlockingCmd) StdinPipe() (io.WriteCloser, error) {
|
||||||
return d.inner.StdinPipe()
|
return d.inner.StdinPipe()
|
||||||
}
|
}
|
||||||
@@ -259,10 +255,6 @@ func (d *drainBlockingCmd) SetDir(dir string) {
|
|||||||
d.inner.SetDir(dir)
|
d.inner.SetDir(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *drainBlockingCmd) SetEnv(env map[string]string) {
|
|
||||||
d.inner.SetEnv(env)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *drainBlockingCmd) Process() processHandle {
|
func (d *drainBlockingCmd) Process() processHandle {
|
||||||
return d.inner.Process()
|
return d.inner.Process()
|
||||||
}
|
}
|
||||||
@@ -318,9 +310,6 @@ func newFakeProcess(pid int) *fakeProcess {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *fakeProcess) Pid() int {
|
func (p *fakeProcess) Pid() int {
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return p.pid
|
return p.pid
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -396,12 +385,7 @@ type fakeCmd struct {
|
|||||||
stdinWriter *bufferWriteCloser
|
stdinWriter *bufferWriteCloser
|
||||||
stdinClaim bool
|
stdinClaim bool
|
||||||
|
|
||||||
stderr *ctxAwareReader
|
stderr io.Writer
|
||||||
stderrWriter *io.PipeWriter
|
|
||||||
stderrOnce sync.Once
|
|
||||||
stderrClaim bool
|
|
||||||
|
|
||||||
env map[string]string
|
|
||||||
|
|
||||||
waitDelay time.Duration
|
waitDelay time.Duration
|
||||||
waitErr error
|
waitErr error
|
||||||
@@ -425,7 +409,6 @@ type fakeCmd struct {
|
|||||||
|
|
||||||
func newFakeCmd(cfg fakeCmdConfig) *fakeCmd {
|
func newFakeCmd(cfg fakeCmdConfig) *fakeCmd {
|
||||||
r, w := io.Pipe()
|
r, w := io.Pipe()
|
||||||
stderrR, stderrW := io.Pipe()
|
|
||||||
cmd := &fakeCmd{
|
cmd := &fakeCmd{
|
||||||
stdout: newCtxAwareReader(r),
|
stdout: newCtxAwareReader(r),
|
||||||
stdoutWriter: w,
|
stdoutWriter: w,
|
||||||
@@ -436,8 +419,6 @@ func newFakeCmd(cfg fakeCmdConfig) *fakeCmd {
|
|||||||
startErr: cfg.StartErr,
|
startErr: cfg.StartErr,
|
||||||
waitDone: make(chan struct{}),
|
waitDone: make(chan struct{}),
|
||||||
keepStdoutOpen: cfg.KeepStdoutOpen,
|
keepStdoutOpen: cfg.KeepStdoutOpen,
|
||||||
stderr: newCtxAwareReader(stderrR),
|
|
||||||
stderrWriter: stderrW,
|
|
||||||
process: newFakeProcess(cfg.PID),
|
process: newFakeProcess(cfg.PID),
|
||||||
}
|
}
|
||||||
if len(cmd.stdoutPlan) == 0 {
|
if len(cmd.stdoutPlan) == 0 {
|
||||||
@@ -514,16 +495,6 @@ func (f *fakeCmd) StdoutPipe() (io.ReadCloser, error) {
|
|||||||
return f.stdout, nil
|
return f.stdout, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeCmd) StderrPipe() (io.ReadCloser, error) {
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
if f.stderrClaim {
|
|
||||||
return nil, errors.New("stderr pipe already claimed")
|
|
||||||
}
|
|
||||||
f.stderrClaim = true
|
|
||||||
return f.stderr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeCmd) StdinPipe() (io.WriteCloser, error) {
|
func (f *fakeCmd) StdinPipe() (io.WriteCloser, error) {
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
defer f.mu.Unlock()
|
||||||
@@ -535,25 +506,11 @@ func (f *fakeCmd) StdinPipe() (io.WriteCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeCmd) SetStderr(w io.Writer) {
|
func (f *fakeCmd) SetStderr(w io.Writer) {
|
||||||
_ = w
|
f.stderr = w
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeCmd) SetDir(string) {}
|
func (f *fakeCmd) SetDir(string) {}
|
||||||
|
|
||||||
func (f *fakeCmd) SetEnv(env map[string]string) {
|
|
||||||
if len(env) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
if f.env == nil {
|
|
||||||
f.env = make(map[string]string, len(env))
|
|
||||||
}
|
|
||||||
for k, v := range env {
|
|
||||||
f.env[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeCmd) Process() processHandle {
|
func (f *fakeCmd) Process() processHandle {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -565,7 +522,6 @@ func (f *fakeCmd) runStdoutScript() {
|
|||||||
if len(f.stdoutPlan) == 0 {
|
if len(f.stdoutPlan) == 0 {
|
||||||
if !f.keepStdoutOpen {
|
if !f.keepStdoutOpen {
|
||||||
f.CloseStdout(nil)
|
f.CloseStdout(nil)
|
||||||
f.CloseStderr(nil)
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -577,7 +533,6 @@ func (f *fakeCmd) runStdoutScript() {
|
|||||||
}
|
}
|
||||||
if !f.keepStdoutOpen {
|
if !f.keepStdoutOpen {
|
||||||
f.CloseStdout(nil)
|
f.CloseStdout(nil)
|
||||||
f.CloseStderr(nil)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -614,19 +569,6 @@ func (f *fakeCmd) CloseStdout(err error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeCmd) CloseStderr(err error) {
|
|
||||||
f.stderrOnce.Do(func() {
|
|
||||||
if f.stderrWriter == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
_ = f.stderrWriter.CloseWithError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_ = f.stderrWriter.Close()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeCmd) StdinContents() string {
|
func (f *fakeCmd) StdinContents() string {
|
||||||
if f.stdinWriter == nil {
|
if f.stdinWriter == nil {
|
||||||
return ""
|
return ""
|
||||||
@@ -914,17 +856,11 @@ func TestRunCodexTask_ContextTimeout(t *testing.T) {
|
|||||||
if fake.process == nil {
|
if fake.process == nil {
|
||||||
t.Fatalf("fake process not initialized")
|
t.Fatalf("fake process not initialized")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "windows" {
|
if fake.process.SignalCount() == 0 {
|
||||||
if fake.process.KillCount() == 0 {
|
t.Fatalf("expected SIGTERM to be sent, got 0")
|
||||||
t.Fatalf("expected Kill to be called, got 0")
|
}
|
||||||
}
|
if fake.process.KillCount() == 0 {
|
||||||
} else {
|
t.Fatalf("expected Kill to eventually run, got 0")
|
||||||
if fake.process.SignalCount() == 0 {
|
|
||||||
t.Fatalf("expected SIGTERM to be sent, got 0")
|
|
||||||
}
|
|
||||||
if fake.process.KillCount() == 0 {
|
|
||||||
t.Fatalf("expected Kill to eventually run, got 0")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if capturedTimer == nil {
|
if capturedTimer == nil {
|
||||||
t.Fatalf("forceKillTimer not captured")
|
t.Fatalf("forceKillTimer not captured")
|
||||||
@@ -943,127 +879,6 @@ func TestRunCodexTask_ContextTimeout(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunCodexTask_ForcesStopAfterCompletion(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
forceKillDelay.Store(0)
|
|
||||||
|
|
||||||
fake := newFakeCmd(fakeCmdConfig{
|
|
||||||
StdoutPlan: []fakeStdoutEvent{
|
|
||||||
{Data: `{"type":"item.completed","item":{"type":"agent_message","text":"done"}}` + "\n"},
|
|
||||||
{Data: `{"type":"thread.completed","thread_id":"tid"}` + "\n"},
|
|
||||||
},
|
|
||||||
KeepStdoutOpen: true,
|
|
||||||
BlockWait: true,
|
|
||||||
ReleaseWaitOnSignal: true,
|
|
||||||
ReleaseWaitOnKill: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
|
||||||
return fake
|
|
||||||
}
|
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
|
||||||
codexCommand = "fake-cmd"
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
result := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "done", WorkDir: defaultWorkdir}, nil, nil, false, false, 60)
|
|
||||||
duration := time.Since(start)
|
|
||||||
|
|
||||||
if result.ExitCode != 0 || result.Message != "done" {
|
|
||||||
t.Fatalf("unexpected result: %+v", result)
|
|
||||||
}
|
|
||||||
if duration > 2*time.Second {
|
|
||||||
t.Fatalf("runCodexTaskWithContext took too long: %v", duration)
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if fake.process.KillCount() == 0 {
|
|
||||||
t.Fatalf("expected Kill to be called, got 0")
|
|
||||||
}
|
|
||||||
} else if fake.process.SignalCount() == 0 {
|
|
||||||
t.Fatalf("expected SIGTERM to be sent, got %d", fake.process.SignalCount())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunCodexTask_ForcesStopAfterTurnCompleted(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
forceKillDelay.Store(0)
|
|
||||||
|
|
||||||
fake := newFakeCmd(fakeCmdConfig{
|
|
||||||
StdoutPlan: []fakeStdoutEvent{
|
|
||||||
{Data: `{"type":"item.completed","item":{"type":"agent_message","text":"done"}}` + "\n"},
|
|
||||||
{Data: `{"type":"turn.completed"}` + "\n"},
|
|
||||||
},
|
|
||||||
KeepStdoutOpen: true,
|
|
||||||
BlockWait: true,
|
|
||||||
ReleaseWaitOnSignal: true,
|
|
||||||
ReleaseWaitOnKill: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
|
||||||
return fake
|
|
||||||
}
|
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
|
||||||
codexCommand = "fake-cmd"
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
result := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "done", WorkDir: defaultWorkdir}, nil, nil, false, false, 60)
|
|
||||||
duration := time.Since(start)
|
|
||||||
|
|
||||||
if result.ExitCode != 0 || result.Message != "done" {
|
|
||||||
t.Fatalf("unexpected result: %+v", result)
|
|
||||||
}
|
|
||||||
if duration > 2*time.Second {
|
|
||||||
t.Fatalf("runCodexTaskWithContext took too long: %v", duration)
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if fake.process.KillCount() == 0 {
|
|
||||||
t.Fatalf("expected Kill to be called, got 0")
|
|
||||||
}
|
|
||||||
} else if fake.process.SignalCount() == 0 {
|
|
||||||
t.Fatalf("expected SIGTERM to be sent, got %d", fake.process.SignalCount())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunCodexTask_DoesNotTerminateBeforeThreadCompleted(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
forceKillDelay.Store(0)
|
|
||||||
|
|
||||||
fake := newFakeCmd(fakeCmdConfig{
|
|
||||||
StdoutPlan: []fakeStdoutEvent{
|
|
||||||
{Data: `{"type":"item.completed","item":{"type":"agent_message","text":"intermediate"}}` + "\n"},
|
|
||||||
{Delay: 1100 * time.Millisecond, Data: `{"type":"item.completed","item":{"type":"agent_message","text":"final"}}` + "\n"},
|
|
||||||
{Data: `{"type":"thread.completed","thread_id":"tid"}` + "\n"},
|
|
||||||
},
|
|
||||||
KeepStdoutOpen: true,
|
|
||||||
BlockWait: true,
|
|
||||||
ReleaseWaitOnSignal: true,
|
|
||||||
ReleaseWaitOnKill: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
|
||||||
return fake
|
|
||||||
}
|
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
|
||||||
codexCommand = "fake-cmd"
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
result := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "done", WorkDir: defaultWorkdir}, nil, nil, false, false, 60)
|
|
||||||
duration := time.Since(start)
|
|
||||||
|
|
||||||
if result.ExitCode != 0 || result.Message != "final" {
|
|
||||||
t.Fatalf("unexpected result: %+v", result)
|
|
||||||
}
|
|
||||||
if duration > 5*time.Second {
|
|
||||||
t.Fatalf("runCodexTaskWithContext took too long: %v", duration)
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if fake.process.KillCount() == 0 {
|
|
||||||
t.Fatalf("expected Kill to be called, got 0")
|
|
||||||
}
|
|
||||||
} else if fake.process.SignalCount() == 0 {
|
|
||||||
t.Fatalf("expected SIGTERM to be sent, got %d", fake.process.SignalCount())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackendParseArgs_NewMode(t *testing.T) {
|
func TestBackendParseArgs_NewMode(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -1150,8 +965,6 @@ func TestBackendParseArgs_ResumeMode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{name: "resume missing session_id", args: []string{"codeagent-wrapper", "resume"}, wantErr: true},
|
{name: "resume missing session_id", args: []string{"codeagent-wrapper", "resume"}, wantErr: true},
|
||||||
{name: "resume missing task", args: []string{"codeagent-wrapper", "resume", "session-123"}, wantErr: true},
|
{name: "resume missing task", args: []string{"codeagent-wrapper", "resume", "session-123"}, wantErr: true},
|
||||||
{name: "resume empty session_id", args: []string{"codeagent-wrapper", "resume", "", "task"}, wantErr: true},
|
|
||||||
{name: "resume whitespace session_id", args: []string{"codeagent-wrapper", "resume", " ", "task"}, wantErr: true},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -1231,65 +1044,6 @@ func TestBackendParseArgs_BackendFlag(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendParseArgs_ModelFlag(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args []string
|
|
||||||
want string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "model flag",
|
|
||||||
args: []string{"codeagent-wrapper", "--model", "opus", "task"},
|
|
||||||
want: "opus",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "model equals syntax",
|
|
||||||
args: []string{"codeagent-wrapper", "--model=opus", "task"},
|
|
||||||
want: "opus",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "model trimmed",
|
|
||||||
args: []string{"codeagent-wrapper", "--model", " opus ", "task"},
|
|
||||||
want: "opus",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "model with resume mode",
|
|
||||||
args: []string{"codeagent-wrapper", "--model", "sonnet", "resume", "sid", "task"},
|
|
||||||
want: "sonnet",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "missing model value",
|
|
||||||
args: []string{"codeagent-wrapper", "--model"},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "model equals missing value",
|
|
||||||
args: []string{"codeagent-wrapper", "--model=", "task"},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
os.Args = tt.args
|
|
||||||
cfg, err := parseArgs()
|
|
||||||
if tt.wantErr {
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error, got nil")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if cfg.Model != tt.want {
|
|
||||||
t.Fatalf("Model = %q, want %q", cfg.Model, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackendParseArgs_SkipPermissions(t *testing.T) {
|
func TestBackendParseArgs_SkipPermissions(t *testing.T) {
|
||||||
const envKey = "CODEAGENT_SKIP_PERMISSIONS"
|
const envKey = "CODEAGENT_SKIP_PERMISSIONS"
|
||||||
t.Cleanup(func() { os.Unsetenv(envKey) })
|
t.Cleanup(func() { os.Unsetenv(envKey) })
|
||||||
@@ -1427,38 +1181,6 @@ do something`
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParallelParseConfig_Model(t *testing.T) {
|
|
||||||
input := `---TASK---
|
|
||||||
id: task-1
|
|
||||||
model: opus
|
|
||||||
---CONTENT---
|
|
||||||
do something`
|
|
||||||
|
|
||||||
cfg, err := parseParallelConfig([]byte(input))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if len(cfg.Tasks) != 1 {
|
|
||||||
t.Fatalf("expected 1 task, got %d", len(cfg.Tasks))
|
|
||||||
}
|
|
||||||
task := cfg.Tasks[0]
|
|
||||||
if task.Model != "opus" {
|
|
||||||
t.Fatalf("model = %q, want opus", task.Model)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParallelParseConfig_EmptySessionID(t *testing.T) {
|
|
||||||
input := `---TASK---
|
|
||||||
id: task-1
|
|
||||||
session_id:
|
|
||||||
---CONTENT---
|
|
||||||
do something`
|
|
||||||
|
|
||||||
if _, err := parseParallelConfig([]byte(input)); err == nil {
|
|
||||||
t.Fatalf("expected error for empty session_id, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParallelParseConfig_InvalidFormat(t *testing.T) {
|
func TestParallelParseConfig_InvalidFormat(t *testing.T) {
|
||||||
if _, err := parseParallelConfig([]byte("invalid format")); err == nil {
|
if _, err := parseParallelConfig([]byte("invalid format")); err == nil {
|
||||||
t.Fatalf("expected error for invalid format, got nil")
|
t.Fatalf("expected error for invalid format, got nil")
|
||||||
@@ -1529,120 +1251,6 @@ code with special chars: $var "quotes"`
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClaudeModel_DefaultsFromSettings(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
|
|
||||||
home := t.TempDir()
|
|
||||||
t.Setenv("HOME", home)
|
|
||||||
t.Setenv("USERPROFILE", home)
|
|
||||||
|
|
||||||
dir := filepath.Join(home, ".claude")
|
|
||||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
|
||||||
t.Fatalf("MkdirAll: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
settingsModel := "claude-opus-4-5-20250929"
|
|
||||||
path := filepath.Join(dir, "settings.json")
|
|
||||||
data := []byte(fmt.Sprintf(`{"model":%q,"env":{"FOO":"bar"}}`, settingsModel))
|
|
||||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
|
||||||
t.Fatalf("WriteFile: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
makeRunner := func(gotName *string, gotArgs *[]string, fake **fakeCmd) func(context.Context, string, ...string) commandRunner {
|
|
||||||
return func(ctx context.Context, name string, args ...string) commandRunner {
|
|
||||||
*gotName = name
|
|
||||||
*gotArgs = append([]string(nil), args...)
|
|
||||||
cmd := newFakeCmd(fakeCmdConfig{
|
|
||||||
PID: 123,
|
|
||||||
StdoutPlan: []fakeStdoutEvent{
|
|
||||||
{Data: "{\"type\":\"result\",\"session_id\":\"sid\",\"result\":\"ok\"}\n"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
*fake = cmd
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("new mode inherits model when unset", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
gotName string
|
|
||||||
gotArgs []string
|
|
||||||
fake *fakeCmd
|
|
||||||
)
|
|
||||||
origRunner := newCommandRunner
|
|
||||||
newCommandRunner = makeRunner(&gotName, &gotArgs, &fake)
|
|
||||||
t.Cleanup(func() { newCommandRunner = origRunner })
|
|
||||||
|
|
||||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "hi", Mode: "new", WorkDir: defaultWorkdir}, ClaudeBackend{}, nil, false, true, 5)
|
|
||||||
if res.ExitCode != 0 || res.Message != "ok" {
|
|
||||||
t.Fatalf("unexpected result: %+v", res)
|
|
||||||
}
|
|
||||||
if gotName != "claude" {
|
|
||||||
t.Fatalf("command = %q, want claude", gotName)
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for i := 0; i+1 < len(gotArgs); i++ {
|
|
||||||
if gotArgs[i] == "--model" && gotArgs[i+1] == settingsModel {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("expected --model %q in args, got %v", settingsModel, gotArgs)
|
|
||||||
}
|
|
||||||
if fake == nil || fake.env["FOO"] != "bar" {
|
|
||||||
t.Fatalf("expected env to include FOO=bar, got %v", fake.env)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("explicit model overrides settings", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
gotName string
|
|
||||||
gotArgs []string
|
|
||||||
fake *fakeCmd
|
|
||||||
)
|
|
||||||
origRunner := newCommandRunner
|
|
||||||
newCommandRunner = makeRunner(&gotName, &gotArgs, &fake)
|
|
||||||
t.Cleanup(func() { newCommandRunner = origRunner })
|
|
||||||
|
|
||||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "hi", Mode: "new", WorkDir: defaultWorkdir, Model: "sonnet"}, ClaudeBackend{}, nil, false, true, 5)
|
|
||||||
if res.ExitCode != 0 || res.Message != "ok" {
|
|
||||||
t.Fatalf("unexpected result: %+v", res)
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for i := 0; i+1 < len(gotArgs); i++ {
|
|
||||||
if gotArgs[i] == "--model" && gotArgs[i+1] == "sonnet" {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("expected --model sonnet in args, got %v", gotArgs)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("resume mode does not inherit model by default", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
gotName string
|
|
||||||
gotArgs []string
|
|
||||||
fake *fakeCmd
|
|
||||||
)
|
|
||||||
origRunner := newCommandRunner
|
|
||||||
newCommandRunner = makeRunner(&gotName, &gotArgs, &fake)
|
|
||||||
t.Cleanup(func() { newCommandRunner = origRunner })
|
|
||||||
|
|
||||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "hi", Mode: "resume", SessionID: "sid-123", WorkDir: defaultWorkdir}, ClaudeBackend{}, nil, false, true, 5)
|
|
||||||
if res.ExitCode != 0 || res.Message != "ok" {
|
|
||||||
t.Fatalf("unexpected result: %+v", res)
|
|
||||||
}
|
|
||||||
for i := 0; i < len(gotArgs); i++ {
|
|
||||||
if gotArgs[i] == "--model" {
|
|
||||||
t.Fatalf("did not expect --model in resume args, got %v", gotArgs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunShouldUseStdin(t *testing.T) {
|
func TestRunShouldUseStdin(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -1673,19 +1281,9 @@ func TestRunShouldUseStdin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRunBuildCodexArgs_NewMode(t *testing.T) {
|
func TestRunBuildCodexArgs_NewMode(t *testing.T) {
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/test/dir"}
|
cfg := &Config{Mode: "new", WorkDir: "/test/dir"}
|
||||||
args := buildCodexArgs(cfg, "my task")
|
args := buildCodexArgs(cfg, "my task")
|
||||||
expected := []string{
|
expected := []string{"e", "--skip-git-repo-check", "-C", "/test/dir", "--json", "my task"}
|
||||||
"e",
|
|
||||||
"--skip-git-repo-check",
|
|
||||||
"-C", "/test/dir",
|
|
||||||
"--json",
|
|
||||||
"my task",
|
|
||||||
}
|
|
||||||
if len(args) != len(expected) {
|
if len(args) != len(expected) {
|
||||||
t.Fatalf("len mismatch")
|
t.Fatalf("len mismatch")
|
||||||
}
|
}
|
||||||
@@ -1697,20 +1295,9 @@ func TestRunBuildCodexArgs_NewMode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRunBuildCodexArgs_ResumeMode(t *testing.T) {
|
func TestRunBuildCodexArgs_ResumeMode(t *testing.T) {
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
cfg := &Config{Mode: "resume", SessionID: "session-abc"}
|
cfg := &Config{Mode: "resume", SessionID: "session-abc"}
|
||||||
args := buildCodexArgs(cfg, "-")
|
args := buildCodexArgs(cfg, "-")
|
||||||
expected := []string{
|
expected := []string{"e", "--skip-git-repo-check", "--json", "resume", "session-abc", "-"}
|
||||||
"e",
|
|
||||||
"--skip-git-repo-check",
|
|
||||||
"--json",
|
|
||||||
"resume",
|
|
||||||
"session-abc",
|
|
||||||
"-",
|
|
||||||
}
|
|
||||||
if len(args) != len(expected) {
|
if len(args) != len(expected) {
|
||||||
t.Fatalf("len mismatch")
|
t.Fatalf("len mismatch")
|
||||||
}
|
}
|
||||||
@@ -1721,61 +1308,6 @@ func TestRunBuildCodexArgs_ResumeMode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunBuildCodexArgs_ResumeMode_EmptySessionHandledGracefully(t *testing.T) {
|
|
||||||
const key = "CODEX_BYPASS_SANDBOX"
|
|
||||||
t.Cleanup(func() { os.Unsetenv(key) })
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
cfg := &Config{Mode: "resume", SessionID: " ", WorkDir: "/test/dir"}
|
|
||||||
args := buildCodexArgs(cfg, "task")
|
|
||||||
expected := []string{"e", "--skip-git-repo-check", "-C", "/test/dir", "--json", "task"}
|
|
||||||
if len(args) != len(expected) {
|
|
||||||
t.Fatalf("len mismatch")
|
|
||||||
}
|
|
||||||
for i := range args {
|
|
||||||
if args[i] != expected[i] {
|
|
||||||
t.Fatalf("args[%d]=%s, want %s", i, args[i], expected[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunBuildCodexArgs_BypassSandboxEnvTrue(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
tempDir := t.TempDir()
|
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("NewLogger() error = %v", err)
|
|
||||||
}
|
|
||||||
setLogger(logger)
|
|
||||||
defer closeLogger()
|
|
||||||
|
|
||||||
t.Setenv("CODEX_BYPASS_SANDBOX", "true")
|
|
||||||
|
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/test/dir"}
|
|
||||||
args := buildCodexArgs(cfg, "my task")
|
|
||||||
found := false
|
|
||||||
for _, arg := range args {
|
|
||||||
if arg == "--dangerously-bypass-approvals-and-sandbox" {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("expected bypass flag in args, got %v", args)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Flush()
|
|
||||||
data, err := os.ReadFile(logger.Path())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to read log file: %v", err)
|
|
||||||
}
|
|
||||||
if !strings.Contains(string(data), "CODEX_BYPASS_SANDBOX=true") {
|
|
||||||
t.Fatalf("expected bypass warning log, got: %s", string(data))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackendSelectBackend(t *testing.T) {
|
func TestBackendSelectBackend(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -1831,13 +1363,7 @@ func TestBackendBuildArgs_CodexBackend(t *testing.T) {
|
|||||||
backend := CodexBackend{}
|
backend := CodexBackend{}
|
||||||
cfg := &Config{Mode: "new", WorkDir: "/test/dir"}
|
cfg := &Config{Mode: "new", WorkDir: "/test/dir"}
|
||||||
got := backend.BuildArgs(cfg, "task")
|
got := backend.BuildArgs(cfg, "task")
|
||||||
want := []string{
|
want := []string{"e", "--skip-git-repo-check", "-C", "/test/dir", "--json", "task"}
|
||||||
"e",
|
|
||||||
"--skip-git-repo-check",
|
|
||||||
"-C", "/test/dir",
|
|
||||||
"--json",
|
|
||||||
"task",
|
|
||||||
}
|
|
||||||
if len(got) != len(want) {
|
if len(got) != len(want) {
|
||||||
t.Fatalf("length mismatch")
|
t.Fatalf("length mismatch")
|
||||||
}
|
}
|
||||||
@@ -1852,13 +1378,13 @@ func TestBackendBuildArgs_ClaudeBackend(t *testing.T) {
|
|||||||
backend := ClaudeBackend{}
|
backend := ClaudeBackend{}
|
||||||
cfg := &Config{Mode: "new", WorkDir: defaultWorkdir}
|
cfg := &Config{Mode: "new", WorkDir: defaultWorkdir}
|
||||||
got := backend.BuildArgs(cfg, "todo")
|
got := backend.BuildArgs(cfg, "todo")
|
||||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
|
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
|
||||||
if len(got) != len(want) {
|
if len(got) != len(want) {
|
||||||
t.Fatalf("args length=%d, want %d: %v", len(got), len(want), got)
|
t.Fatalf("length mismatch")
|
||||||
}
|
}
|
||||||
for i := range want {
|
for i := range want {
|
||||||
if got[i] != want[i] {
|
if got[i] != want[i] {
|
||||||
t.Fatalf("index %d got %q want %q (args=%v)", i, got[i], want[i], got)
|
t.Fatalf("index %d got %s want %s", i, got[i], want[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1873,15 +1399,19 @@ func TestClaudeBackendBuildArgs_OutputValidation(t *testing.T) {
|
|||||||
target := "ensure-flags"
|
target := "ensure-flags"
|
||||||
|
|
||||||
args := backend.BuildArgs(cfg, target)
|
args := backend.BuildArgs(cfg, target)
|
||||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", target}
|
expectedPrefix := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose"}
|
||||||
if len(args) != len(want) {
|
|
||||||
t.Fatalf("args length=%d, want %d: %v", len(args), len(want), args)
|
if len(args) != len(expectedPrefix)+1 {
|
||||||
|
t.Fatalf("args length=%d, want %d", len(args), len(expectedPrefix)+1)
|
||||||
}
|
}
|
||||||
for i := range want {
|
for i, val := range expectedPrefix {
|
||||||
if args[i] != want[i] {
|
if args[i] != val {
|
||||||
t.Fatalf("index %d got %q want %q (args=%v)", i, args[i], want[i], args)
|
t.Fatalf("args[%d]=%q, want %q", i, args[i], val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if args[len(args)-1] != target {
|
||||||
|
t.Fatalf("last arg=%q, want target %q", args[len(args)-1], target)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendBuildArgs_GeminiBackend(t *testing.T) {
|
func TestBackendBuildArgs_GeminiBackend(t *testing.T) {
|
||||||
@@ -2120,7 +1650,7 @@ func TestBackendParseJSONStream_GeminiEvents_OnMessageTriggeredOnStatus(t *testi
|
|||||||
var called int
|
var called int
|
||||||
message, threadID := parseJSONStreamInternal(strings.NewReader(input), nil, nil, func() {
|
message, threadID := parseJSONStreamInternal(strings.NewReader(input), nil, nil, func() {
|
||||||
called++
|
called++
|
||||||
}, nil)
|
})
|
||||||
|
|
||||||
if message != "Hi there" {
|
if message != "Hi there" {
|
||||||
t.Fatalf("message=%q, want %q", message, "Hi there")
|
t.Fatalf("message=%q, want %q", message, "Hi there")
|
||||||
@@ -2149,7 +1679,7 @@ func TestBackendParseJSONStream_OnMessage(t *testing.T) {
|
|||||||
var called int
|
var called int
|
||||||
message, threadID := parseJSONStreamInternal(strings.NewReader(`{"type":"item.completed","item":{"type":"agent_message","text":"hook"}}`), nil, nil, func() {
|
message, threadID := parseJSONStreamInternal(strings.NewReader(`{"type":"item.completed","item":{"type":"agent_message","text":"hook"}}`), nil, nil, func() {
|
||||||
called++
|
called++
|
||||||
}, nil)
|
})
|
||||||
if message != "hook" {
|
if message != "hook" {
|
||||||
t.Fatalf("message = %q, want hook", message)
|
t.Fatalf("message = %q, want hook", message)
|
||||||
}
|
}
|
||||||
@@ -2161,86 +1691,10 @@ func TestBackendParseJSONStream_OnMessage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendParseJSONStream_OnComplete_CodexThreadCompleted(t *testing.T) {
|
|
||||||
input := `{"type":"item.completed","item":{"type":"agent_message","text":"first"}}` + "\n" +
|
|
||||||
`{"type":"item.completed","item":{"type":"agent_message","text":"second"}}` + "\n" +
|
|
||||||
`{"type":"thread.completed","thread_id":"t-1"}`
|
|
||||||
|
|
||||||
var onMessageCalls int
|
|
||||||
var onCompleteCalls int
|
|
||||||
message, threadID := parseJSONStreamInternal(strings.NewReader(input), nil, nil, func() {
|
|
||||||
onMessageCalls++
|
|
||||||
}, func() {
|
|
||||||
onCompleteCalls++
|
|
||||||
})
|
|
||||||
if message != "second" {
|
|
||||||
t.Fatalf("message = %q, want second", message)
|
|
||||||
}
|
|
||||||
if threadID != "t-1" {
|
|
||||||
t.Fatalf("threadID = %q, want t-1", threadID)
|
|
||||||
}
|
|
||||||
if onMessageCalls != 2 {
|
|
||||||
t.Fatalf("onMessage calls = %d, want 2", onMessageCalls)
|
|
||||||
}
|
|
||||||
if onCompleteCalls != 1 {
|
|
||||||
t.Fatalf("onComplete calls = %d, want 1", onCompleteCalls)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackendParseJSONStream_OnComplete_ClaudeResult(t *testing.T) {
|
|
||||||
input := `{"type":"message","subtype":"stream","session_id":"s-1"}` + "\n" +
|
|
||||||
`{"type":"result","result":"OK","session_id":"s-1"}`
|
|
||||||
|
|
||||||
var onMessageCalls int
|
|
||||||
var onCompleteCalls int
|
|
||||||
message, threadID := parseJSONStreamInternal(strings.NewReader(input), nil, nil, func() {
|
|
||||||
onMessageCalls++
|
|
||||||
}, func() {
|
|
||||||
onCompleteCalls++
|
|
||||||
})
|
|
||||||
if message != "OK" {
|
|
||||||
t.Fatalf("message = %q, want OK", message)
|
|
||||||
}
|
|
||||||
if threadID != "s-1" {
|
|
||||||
t.Fatalf("threadID = %q, want s-1", threadID)
|
|
||||||
}
|
|
||||||
if onMessageCalls != 1 {
|
|
||||||
t.Fatalf("onMessage calls = %d, want 1", onMessageCalls)
|
|
||||||
}
|
|
||||||
if onCompleteCalls != 1 {
|
|
||||||
t.Fatalf("onComplete calls = %d, want 1", onCompleteCalls)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackendParseJSONStream_OnComplete_GeminiTerminalResultStatus(t *testing.T) {
|
|
||||||
input := `{"type":"message","role":"assistant","content":"Hi","delta":true,"session_id":"g-1"}` + "\n" +
|
|
||||||
`{"type":"result","status":"success","session_id":"g-1"}`
|
|
||||||
|
|
||||||
var onMessageCalls int
|
|
||||||
var onCompleteCalls int
|
|
||||||
message, threadID := parseJSONStreamInternal(strings.NewReader(input), nil, nil, func() {
|
|
||||||
onMessageCalls++
|
|
||||||
}, func() {
|
|
||||||
onCompleteCalls++
|
|
||||||
})
|
|
||||||
if message != "Hi" {
|
|
||||||
t.Fatalf("message = %q, want Hi", message)
|
|
||||||
}
|
|
||||||
if threadID != "g-1" {
|
|
||||||
t.Fatalf("threadID = %q, want g-1", threadID)
|
|
||||||
}
|
|
||||||
if onMessageCalls != 1 {
|
|
||||||
t.Fatalf("onMessage calls = %d, want 1", onMessageCalls)
|
|
||||||
}
|
|
||||||
if onCompleteCalls != 1 {
|
|
||||||
t.Fatalf("onComplete calls = %d, want 1", onCompleteCalls)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackendParseJSONStream_ScannerError(t *testing.T) {
|
func TestBackendParseJSONStream_ScannerError(t *testing.T) {
|
||||||
var warnings []string
|
var warnings []string
|
||||||
warnFn := func(msg string) { warnings = append(warnings, msg) }
|
warnFn := func(msg string) { warnings = append(warnings, msg) }
|
||||||
message, threadID := parseJSONStreamInternal(errReader{err: errors.New("scan-fail")}, warnFn, nil, nil, nil)
|
message, threadID := parseJSONStreamInternal(errReader{err: errors.New("scan-fail")}, warnFn, nil, nil)
|
||||||
if message != "" || threadID != "" {
|
if message != "" || threadID != "" {
|
||||||
t.Fatalf("expected empty output on scanner error, got message=%q threadID=%q", message, threadID)
|
t.Fatalf("expected empty output on scanner error, got message=%q threadID=%q", message, threadID)
|
||||||
}
|
}
|
||||||
@@ -2812,10 +2266,6 @@ func TestRunCodexTask_Timeout(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRunCodexTask_SignalHandling(t *testing.T) {
|
func TestRunCodexTask_SignalHandling(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
t.Skip("signal-based test is not supported on Windows")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
codexCommand = "sleep"
|
codexCommand = "sleep"
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{"5"} }
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{"5"} }
|
||||||
@@ -2824,9 +2274,7 @@ func TestRunCodexTask_SignalHandling(t *testing.T) {
|
|||||||
go func() { resultCh <- runCodexTask(TaskSpec{Task: "ignored"}, false, 5) }()
|
go func() { resultCh <- runCodexTask(TaskSpec{Task: "ignored"}, false, 5) }()
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
if proc, err := os.FindProcess(os.Getpid()); err == nil && proc != nil {
|
syscall.Kill(os.Getpid(), syscall.SIGTERM)
|
||||||
_ = proc.Signal(syscall.SIGTERM)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := <-resultCh
|
res := <-resultCh
|
||||||
signal.Reset(syscall.SIGINT, syscall.SIGTERM)
|
signal.Reset(syscall.SIGINT, syscall.SIGTERM)
|
||||||
@@ -2924,17 +2372,14 @@ func TestRunGenerateFinalOutput(t *testing.T) {
|
|||||||
if out == "" {
|
if out == "" {
|
||||||
t.Fatalf("generateFinalOutput() returned empty string")
|
t.Fatalf("generateFinalOutput() returned empty string")
|
||||||
}
|
}
|
||||||
// New format: "X tasks | Y passed | Z failed"
|
if !strings.Contains(out, "Total: 3") || !strings.Contains(out, "Success: 2") || !strings.Contains(out, "Failed: 1") {
|
||||||
if !strings.Contains(out, "3 tasks") || !strings.Contains(out, "2 passed") || !strings.Contains(out, "1 failed") {
|
|
||||||
t.Fatalf("summary missing, got %q", out)
|
t.Fatalf("summary missing, got %q", out)
|
||||||
}
|
}
|
||||||
// New format uses ### task-id for each task
|
if !strings.Contains(out, "Task: a") || !strings.Contains(out, "Task: b") {
|
||||||
if !strings.Contains(out, "### a") || !strings.Contains(out, "### b") {
|
t.Fatalf("task entries missing")
|
||||||
t.Fatalf("task entries missing in structured format")
|
|
||||||
}
|
}
|
||||||
// Should have Summary section
|
if strings.Contains(out, "Log:") {
|
||||||
if !strings.Contains(out, "## Summary") {
|
t.Fatalf("unexpected log line when LogPath empty, got %q", out)
|
||||||
t.Fatalf("Summary section missing, got %q", out)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2954,18 +2399,12 @@ func TestRunGenerateFinalOutput_LogPath(t *testing.T) {
|
|||||||
LogPath: "/tmp/log-b",
|
LogPath: "/tmp/log-b",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Test summary mode (default) - should contain log paths
|
|
||||||
out := generateFinalOutput(results)
|
out := generateFinalOutput(results)
|
||||||
if !strings.Contains(out, "/tmp/log-b") {
|
if !strings.Contains(out, "Session: sid\nLog: /tmp/log-a") {
|
||||||
t.Fatalf("summary output missing log path for failed task: %q", out)
|
t.Fatalf("output missing log line after session: %q", out)
|
||||||
}
|
|
||||||
// Test full output mode - shows Session: and Log: lines
|
|
||||||
out = generateFinalOutputWithMode(results, false)
|
|
||||||
if !strings.Contains(out, "Session: sid") || !strings.Contains(out, "Log: /tmp/log-a") {
|
|
||||||
t.Fatalf("full output missing log line after session: %q", out)
|
|
||||||
}
|
}
|
||||||
if !strings.Contains(out, "Log: /tmp/log-b") {
|
if !strings.Contains(out, "Log: /tmp/log-b") {
|
||||||
t.Fatalf("full output missing log line for failed task: %q", out)
|
t.Fatalf("output missing log line for failed task: %q", out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3238,50 +2677,6 @@ do two`)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParallelModelPropagation(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
|
||||||
|
|
||||||
orig := runCodexTaskFn
|
|
||||||
var mu sync.Mutex
|
|
||||||
seen := make(map[string]string)
|
|
||||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
|
||||||
mu.Lock()
|
|
||||||
seen[task.ID] = task.Model
|
|
||||||
mu.Unlock()
|
|
||||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "ok"}
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { runCodexTaskFn = orig })
|
|
||||||
|
|
||||||
stdinReader = strings.NewReader(`---TASK---
|
|
||||||
id: first
|
|
||||||
---CONTENT---
|
|
||||||
do one
|
|
||||||
|
|
||||||
---TASK---
|
|
||||||
id: second
|
|
||||||
model: opus
|
|
||||||
---CONTENT---
|
|
||||||
do two`)
|
|
||||||
os.Args = []string{"codeagent-wrapper", "--parallel", "--model", "sonnet"}
|
|
||||||
|
|
||||||
if code := run(); code != 0 {
|
|
||||||
t.Fatalf("run exit = %d, want 0", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
mu.Lock()
|
|
||||||
firstModel, firstOK := seen["first"]
|
|
||||||
secondModel, secondOK := seen["second"]
|
|
||||||
mu.Unlock()
|
|
||||||
|
|
||||||
if !firstOK || firstModel != "sonnet" {
|
|
||||||
t.Fatalf("first model = %q (present=%v), want sonnet", firstModel, firstOK)
|
|
||||||
}
|
|
||||||
if !secondOK || secondModel != "opus" {
|
|
||||||
t.Fatalf("second model = %q (present=%v), want opus", secondModel, secondOK)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParallelFlag(t *testing.T) {
|
func TestParallelFlag(t *testing.T) {
|
||||||
oldArgs := os.Args
|
oldArgs := os.Args
|
||||||
defer func() { os.Args = oldArgs }()
|
defer func() { os.Args = oldArgs }()
|
||||||
@@ -3307,46 +2702,6 @@ test`
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunParallelWithFullOutput(t *testing.T) {
|
|
||||||
defer resetTestHooks()
|
|
||||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
|
||||||
|
|
||||||
oldArgs := os.Args
|
|
||||||
t.Cleanup(func() { os.Args = oldArgs })
|
|
||||||
os.Args = []string{"codeagent-wrapper", "--parallel", "--full-output"}
|
|
||||||
|
|
||||||
stdinReader = strings.NewReader(`---TASK---
|
|
||||||
id: T1
|
|
||||||
---CONTENT---
|
|
||||||
noop`)
|
|
||||||
t.Cleanup(func() { stdinReader = os.Stdin })
|
|
||||||
|
|
||||||
orig := runCodexTaskFn
|
|
||||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
|
||||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "full output marker"}
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { runCodexTaskFn = orig })
|
|
||||||
|
|
||||||
out := captureOutput(t, func() {
|
|
||||||
if code := run(); code != 0 {
|
|
||||||
t.Fatalf("run exit = %d, want 0", code)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
if !strings.Contains(out, "=== Parallel Execution Summary ===") {
|
|
||||||
t.Fatalf("output missing full-output header, got %q", out)
|
|
||||||
}
|
|
||||||
if !strings.Contains(out, "--- Task: T1 ---") {
|
|
||||||
t.Fatalf("output missing task block, got %q", out)
|
|
||||||
}
|
|
||||||
if !strings.Contains(out, "full output marker") {
|
|
||||||
t.Fatalf("output missing task message, got %q", out)
|
|
||||||
}
|
|
||||||
if strings.Contains(out, "=== Execution Report ===") {
|
|
||||||
t.Fatalf("output should not include summary-only header, got %q", out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParallelInvalidBackend(t *testing.T) {
|
func TestParallelInvalidBackend(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
||||||
@@ -3401,9 +2756,7 @@ func TestVersionFlag(t *testing.T) {
|
|||||||
t.Errorf("exit = %d, want 0", code)
|
t.Errorf("exit = %d, want 0", code)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
want := "codeagent-wrapper version 5.2.5\n"
|
||||||
want := "codeagent-wrapper version 5.4.0\n"
|
|
||||||
|
|
||||||
if output != want {
|
if output != want {
|
||||||
t.Fatalf("output = %q, want %q", output, want)
|
t.Fatalf("output = %q, want %q", output, want)
|
||||||
}
|
}
|
||||||
@@ -3417,9 +2770,7 @@ func TestVersionShortFlag(t *testing.T) {
|
|||||||
t.Errorf("exit = %d, want 0", code)
|
t.Errorf("exit = %d, want 0", code)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
want := "codeagent-wrapper version 5.2.5\n"
|
||||||
want := "codeagent-wrapper version 5.4.0\n"
|
|
||||||
|
|
||||||
if output != want {
|
if output != want {
|
||||||
t.Fatalf("output = %q, want %q", output, want)
|
t.Fatalf("output = %q, want %q", output, want)
|
||||||
}
|
}
|
||||||
@@ -3433,9 +2784,7 @@ func TestVersionLegacyAlias(t *testing.T) {
|
|||||||
t.Errorf("exit = %d, want 0", code)
|
t.Errorf("exit = %d, want 0", code)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
want := "codex-wrapper version 5.2.5\n"
|
||||||
want := "codex-wrapper version 5.4.0\n"
|
|
||||||
|
|
||||||
if output != want {
|
if output != want {
|
||||||
t.Fatalf("output = %q, want %q", output, want)
|
t.Fatalf("output = %q, want %q", output, want)
|
||||||
}
|
}
|
||||||
@@ -4082,10 +3431,6 @@ func TestRun_LoggerLifecycle(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRun_LoggerRemovedOnSignal(t *testing.T) {
|
func TestRun_LoggerRemovedOnSignal(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
t.Skip("signal-based test is not supported on Windows")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip in CI due to unreliable signal delivery in containerized environments
|
// Skip in CI due to unreliable signal delivery in containerized environments
|
||||||
if os.Getenv("CI") != "" || os.Getenv("GITHUB_ACTIONS") != "" {
|
if os.Getenv("CI") != "" || os.Getenv("GITHUB_ACTIONS") != "" {
|
||||||
t.Skip("Skipping signal test in CI environment")
|
t.Skip("Skipping signal test in CI environment")
|
||||||
@@ -4127,9 +3472,7 @@ printf '%s\n' '{"type":"item.completed","item":{"type":"agent_message","text":"l
|
|||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
if proc, err := os.FindProcess(os.Getpid()); err == nil && proc != nil {
|
_ = syscall.Kill(os.Getpid(), syscall.SIGINT)
|
||||||
_ = proc.Signal(syscall.SIGINT)
|
|
||||||
}
|
|
||||||
|
|
||||||
var exitCode int
|
var exitCode int
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadI
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
|
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
|
||||||
return parseJSONStreamInternal(r, warnFn, infoFn, nil, nil)
|
return parseJSONStreamInternal(r, warnFn, infoFn, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -95,7 +95,7 @@ type ItemContent struct {
|
|||||||
Text interface{} `json:"text"`
|
Text interface{} `json:"text"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
|
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func()) (message, threadID string) {
|
||||||
reader := bufio.NewReaderSize(r, jsonLineReaderSize)
|
reader := bufio.NewReaderSize(r, jsonLineReaderSize)
|
||||||
|
|
||||||
if warnFn == nil {
|
if warnFn == nil {
|
||||||
@@ -111,12 +111,6 @@ func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
notifyComplete := func() {
|
|
||||||
if onComplete != nil {
|
|
||||||
onComplete()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
totalEvents := 0
|
totalEvents := 0
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -163,14 +157,7 @@ func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
|||||||
isCodex = true
|
isCodex = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Codex-specific event types without thread_id or item
|
|
||||||
if !isCodex && (event.Type == "turn.started" || event.Type == "turn.completed") {
|
|
||||||
isCodex = true
|
|
||||||
}
|
|
||||||
isClaude := event.Subtype != "" || event.Result != ""
|
isClaude := event.Subtype != "" || event.Result != ""
|
||||||
if !isClaude && event.Type == "result" && event.SessionID != "" && event.Status == "" {
|
|
||||||
isClaude = true
|
|
||||||
}
|
|
||||||
isGemini := event.Role != "" || event.Delta != nil || event.Status != ""
|
isGemini := event.Role != "" || event.Delta != nil || event.Status != ""
|
||||||
|
|
||||||
// Handle Codex events
|
// Handle Codex events
|
||||||
@@ -191,17 +178,6 @@ func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
|||||||
threadID = event.ThreadID
|
threadID = event.ThreadID
|
||||||
infoFn(fmt.Sprintf("thread.started event thread_id=%s", threadID))
|
infoFn(fmt.Sprintf("thread.started event thread_id=%s", threadID))
|
||||||
|
|
||||||
case "thread.completed":
|
|
||||||
if event.ThreadID != "" && threadID == "" {
|
|
||||||
threadID = event.ThreadID
|
|
||||||
}
|
|
||||||
infoFn(fmt.Sprintf("thread.completed event thread_id=%s", event.ThreadID))
|
|
||||||
notifyComplete()
|
|
||||||
|
|
||||||
case "turn.completed":
|
|
||||||
infoFn("turn.completed event")
|
|
||||||
notifyComplete()
|
|
||||||
|
|
||||||
case "item.completed":
|
case "item.completed":
|
||||||
var itemType string
|
var itemType string
|
||||||
if len(event.Item) > 0 {
|
if len(event.Item) > 0 {
|
||||||
@@ -245,10 +221,6 @@ func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
|||||||
claudeMessage = event.Result
|
claudeMessage = event.Result
|
||||||
notifyMessage()
|
notifyMessage()
|
||||||
}
|
}
|
||||||
|
|
||||||
if event.Type == "result" {
|
|
||||||
notifyComplete()
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,10 +236,6 @@ func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
|||||||
|
|
||||||
if event.Status != "" {
|
if event.Status != "" {
|
||||||
notifyMessage()
|
notifyMessage()
|
||||||
|
|
||||||
if event.Type == "result" && (event.Status == "success" || event.Status == "error" || event.Status == "complete" || event.Status == "failed") {
|
|
||||||
notifyComplete()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delta := false
|
delta := false
|
||||||
@@ -279,8 +247,8 @@ func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unknown event format from other backends (turn.started/assistant/user); ignore.
|
// Unknown event format
|
||||||
continue
|
warnFn(fmt.Sprintf("Unknown event format: %s", truncateBytes(line, 100)))
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func TestParseJSONStream_SkipsOverlongLineAndContinues(t *testing.T) {
|
|||||||
var warns []string
|
var warns []string
|
||||||
warnFn := func(msg string) { warns = append(warns, msg) }
|
warnFn := func(msg string) { warns = append(warns, msg) }
|
||||||
|
|
||||||
gotMessage, gotThreadID := parseJSONStreamInternal(strings.NewReader(input), warnFn, nil, nil, nil)
|
gotMessage, gotThreadID := parseJSONStreamInternal(strings.NewReader(input), warnFn, nil, nil)
|
||||||
if gotMessage != "ok" {
|
if gotMessage != "ok" {
|
||||||
t.Fatalf("message=%q, want %q (warns=%v)", gotMessage, "ok", warns)
|
t.Fatalf("message=%q, want %q (warns=%v)", gotMessage, "ok", warns)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBackendParseJSONStream_UnknownEventsAreSilent(t *testing.T) {
|
|
||||||
input := strings.Join([]string{
|
|
||||||
`{"type":"turn.started"}`,
|
|
||||||
`{"type":"assistant","text":"hi"}`,
|
|
||||||
`{"type":"user","text":"yo"}`,
|
|
||||||
`{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`,
|
|
||||||
}, "\n")
|
|
||||||
|
|
||||||
var infos []string
|
|
||||||
infoFn := func(msg string) { infos = append(infos, msg) }
|
|
||||||
|
|
||||||
message, threadID := parseJSONStreamInternal(strings.NewReader(input), nil, infoFn, nil, nil)
|
|
||||||
if message != "ok" {
|
|
||||||
t.Fatalf("message=%q, want %q (infos=%v)", message, "ok", infos)
|
|
||||||
}
|
|
||||||
if threadID != "" {
|
|
||||||
t.Fatalf("threadID=%q, want empty (infos=%v)", threadID, infos)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, msg := range infos {
|
|
||||||
if strings.Contains(msg, "Agent event:") {
|
|
||||||
t.Fatalf("unexpected log for unknown event: %q", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIsProcessRunning(t *testing.T) {
|
|
||||||
t.Run("boundary values", func(t *testing.T) {
|
|
||||||
if isProcessRunning(0) {
|
|
||||||
t.Fatalf("expected pid 0 to be reported as not running")
|
|
||||||
}
|
|
||||||
if isProcessRunning(-1) {
|
|
||||||
t.Fatalf("expected pid -1 to be reported as not running")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("current process", func(t *testing.T) {
|
|
||||||
if !isProcessRunning(os.Getpid()) {
|
|
||||||
t.Fatalf("expected current process (pid=%d) to be running", os.Getpid())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("fake pid", func(t *testing.T) {
|
|
||||||
const nonexistentPID = 1 << 30
|
|
||||||
if isProcessRunning(nonexistentPID) {
|
|
||||||
t.Fatalf("expected pid %d to be reported as not running", nonexistentPID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetProcessStartTimeReadsProcStat(t *testing.T) {
|
|
||||||
start := getProcessStartTime(os.Getpid())
|
|
||||||
if start.IsZero() {
|
|
||||||
t.Fatalf("expected non-zero start time for current process")
|
|
||||||
}
|
|
||||||
if start.After(time.Now().Add(5 * time.Second)) {
|
|
||||||
t.Fatalf("start time is unexpectedly in the future: %v", start)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetProcessStartTimeInvalidData(t *testing.T) {
|
|
||||||
if !getProcessStartTime(0).IsZero() {
|
|
||||||
t.Fatalf("expected zero time for pid 0")
|
|
||||||
}
|
|
||||||
if !getProcessStartTime(-1).IsZero() {
|
|
||||||
t.Fatalf("expected zero time for negative pid")
|
|
||||||
}
|
|
||||||
if !getProcessStartTime(1 << 30).IsZero() {
|
|
||||||
t.Fatalf("expected zero time for non-existent pid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetBootTimeParsesBtime(t *testing.T) {
|
|
||||||
t.Skip("getBootTime is only implemented on Unix-like systems")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetBootTimeInvalidData(t *testing.T) {
|
|
||||||
t.Skip("getBootTime is only implemented on Unix-like systems")
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
//go:build unix || darwin || linux
|
|
||||||
// +build unix darwin linux
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// sendTermSignal sends SIGTERM for graceful shutdown on Unix.
|
|
||||||
func sendTermSignal(proc processHandle) error {
|
|
||||||
if proc == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return proc.Signal(syscall.SIGTERM)
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// sendTermSignal on Windows directly kills the process.
|
|
||||||
// SIGTERM is not supported on Windows.
|
|
||||||
func sendTermSignal(proc processHandle) error {
|
|
||||||
if proc == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
pid := proc.Pid()
|
|
||||||
if pid > 0 {
|
|
||||||
// Kill the whole process tree to avoid leaving inheriting child processes around.
|
|
||||||
// This also helps prevent exec.Cmd.Wait() from blocking on stderr/stdout pipes held open by children.
|
|
||||||
taskkill := "taskkill"
|
|
||||||
if root := os.Getenv("SystemRoot"); root != "" {
|
|
||||||
taskkill = filepath.Join(root, "System32", "taskkill.exe")
|
|
||||||
}
|
|
||||||
cmd := exec.Command(taskkill, "/PID", strconv.Itoa(pid), "/T", "/F")
|
|
||||||
cmd.Stdout = io.Discard
|
|
||||||
cmd.Stderr = io.Discard
|
|
||||||
if err := cmd.Run(); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return proc.Kill()
|
|
||||||
}
|
|
||||||
@@ -75,9 +75,9 @@ func getEnv(key, defaultValue string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type logWriter struct {
|
type logWriter struct {
|
||||||
prefix string
|
prefix string
|
||||||
maxLen int
|
maxLen int
|
||||||
buf bytes.Buffer
|
buf bytes.Buffer
|
||||||
dropped bool
|
dropped bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,55 +205,6 @@ func truncate(s string, maxLen int) string {
|
|||||||
return s[:maxLen] + "..."
|
return s[:maxLen] + "..."
|
||||||
}
|
}
|
||||||
|
|
||||||
// safeTruncate safely truncates string to maxLen, avoiding panic and UTF-8 corruption.
|
|
||||||
func safeTruncate(s string, maxLen int) string {
|
|
||||||
if maxLen <= 0 || s == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
runes := []rune(s)
|
|
||||||
if len(runes) <= maxLen {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxLen < 4 {
|
|
||||||
return string(runes[:1])
|
|
||||||
}
|
|
||||||
|
|
||||||
cutoff := maxLen - 3
|
|
||||||
if cutoff <= 0 {
|
|
||||||
return string(runes[:1])
|
|
||||||
}
|
|
||||||
if len(runes) <= cutoff {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return string(runes[:cutoff]) + "..."
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitizeOutput removes ANSI escape sequences and control characters.
|
|
||||||
func sanitizeOutput(s string) string {
|
|
||||||
var result strings.Builder
|
|
||||||
inEscape := false
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] == '\x1b' && i+1 < len(s) && s[i+1] == '[' {
|
|
||||||
inEscape = true
|
|
||||||
i++ // skip '['
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if inEscape {
|
|
||||||
if (s[i] >= 'A' && s[i] <= 'Z') || (s[i] >= 'a' && s[i] <= 'z') {
|
|
||||||
inEscape = false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Keep printable chars and common whitespace.
|
|
||||||
if s[i] >= 32 || s[i] == '\n' || s[i] == '\t' {
|
|
||||||
result.WriteByte(s[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func min(a, b int) int {
|
func min(a, b int) int {
|
||||||
if a < b {
|
if a < b {
|
||||||
return a
|
return a
|
||||||
@@ -272,444 +223,3 @@ func greet(name string) string {
|
|||||||
func farewell(name string) string {
|
func farewell(name string) string {
|
||||||
return "goodbye " + name
|
return "goodbye " + name
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractMessageSummary extracts a brief summary from task output
|
|
||||||
// Returns first meaningful line or truncated content up to maxLen chars
|
|
||||||
func extractMessageSummary(message string, maxLen int) string {
|
|
||||||
if message == "" || maxLen <= 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find a meaningful summary line
|
|
||||||
lines := strings.Split(message, "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
// Skip empty lines and common noise
|
|
||||||
if line == "" || strings.HasPrefix(line, "```") || strings.HasPrefix(line, "---") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Found a meaningful line
|
|
||||||
return safeTruncate(line, maxLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: truncate entire message
|
|
||||||
clean := strings.TrimSpace(message)
|
|
||||||
return safeTruncate(clean, maxLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractCoverageFromLines extracts coverage from pre-split lines.
|
|
||||||
func extractCoverageFromLines(lines []string) string {
|
|
||||||
if len(lines) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
end := len(lines)
|
|
||||||
for end > 0 && strings.TrimSpace(lines[end-1]) == "" {
|
|
||||||
end--
|
|
||||||
}
|
|
||||||
|
|
||||||
if end == 1 {
|
|
||||||
trimmed := strings.TrimSpace(lines[0])
|
|
||||||
if strings.HasSuffix(trimmed, "%") {
|
|
||||||
if num, err := strconv.ParseFloat(strings.TrimSuffix(trimmed, "%"), 64); err == nil && num >= 0 && num <= 100 {
|
|
||||||
return trimmed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
coverageKeywords := []string{"file", "stmt", "branch", "line", "coverage", "total"}
|
|
||||||
|
|
||||||
for _, line := range lines[:end] {
|
|
||||||
lower := strings.ToLower(line)
|
|
||||||
|
|
||||||
hasKeyword := false
|
|
||||||
tokens := strings.FieldsFunc(lower, func(r rune) bool { return r < 'a' || r > 'z' })
|
|
||||||
for _, token := range tokens {
|
|
||||||
for _, kw := range coverageKeywords {
|
|
||||||
if strings.HasPrefix(token, kw) {
|
|
||||||
hasKeyword = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hasKeyword {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasKeyword {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !strings.Contains(line, "%") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract percentage pattern: number followed by %
|
|
||||||
for i := 0; i < len(line); i++ {
|
|
||||||
if line[i] == '%' && i > 0 {
|
|
||||||
// Walk back to find the number
|
|
||||||
j := i - 1
|
|
||||||
for j >= 0 && (line[j] == '.' || (line[j] >= '0' && line[j] <= '9')) {
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
if j < i-1 {
|
|
||||||
numStr := line[j+1 : i]
|
|
||||||
// Validate it's a reasonable percentage
|
|
||||||
if num, err := strconv.ParseFloat(numStr, 64); err == nil && num >= 0 && num <= 100 {
|
|
||||||
return numStr + "%"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractCoverage extracts coverage percentage from task output
|
|
||||||
// Supports common formats: "Coverage: 92%", "92% coverage", "coverage 92%", "TOTAL 92%"
|
|
||||||
func extractCoverage(message string) string {
|
|
||||||
if message == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return extractCoverageFromLines(strings.Split(message, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractCoverageNum extracts coverage as a numeric value for comparison
|
|
||||||
func extractCoverageNum(coverage string) float64 {
|
|
||||||
if coverage == "" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
// Remove % sign and parse
|
|
||||||
numStr := strings.TrimSuffix(coverage, "%")
|
|
||||||
if num, err := strconv.ParseFloat(numStr, 64); err == nil {
|
|
||||||
return num
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractFilesChangedFromLines extracts files from pre-split lines.
|
|
||||||
func extractFilesChangedFromLines(lines []string) []string {
|
|
||||||
if len(lines) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var files []string
|
|
||||||
seen := make(map[string]bool)
|
|
||||||
exts := []string{".ts", ".tsx", ".js", ".jsx", ".go", ".py", ".rs", ".java", ".vue", ".css", ".scss", ".md", ".json", ".yaml", ".yml", ".toml"}
|
|
||||||
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// Pattern 1: "Modified: path/to/file.ts" or "Created: path/to/file.ts"
|
|
||||||
matchedPrefix := false
|
|
||||||
for _, prefix := range []string{"Modified:", "Created:", "Updated:", "Edited:", "Wrote:", "Changed:"} {
|
|
||||||
if strings.HasPrefix(line, prefix) {
|
|
||||||
file := strings.TrimSpace(strings.TrimPrefix(line, prefix))
|
|
||||||
file = strings.Trim(file, "`,\"'()[],:")
|
|
||||||
file = strings.TrimPrefix(file, "@")
|
|
||||||
if file != "" && !seen[file] {
|
|
||||||
files = append(files, file)
|
|
||||||
seen[file] = true
|
|
||||||
}
|
|
||||||
matchedPrefix = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if matchedPrefix {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pattern 2: Tokens that look like file paths (allow root files, strip @ prefix).
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
for _, part := range parts {
|
|
||||||
part = strings.Trim(part, "`,\"'()[],:")
|
|
||||||
part = strings.TrimPrefix(part, "@")
|
|
||||||
for _, ext := range exts {
|
|
||||||
if strings.HasSuffix(part, ext) && !seen[part] {
|
|
||||||
files = append(files, part)
|
|
||||||
seen[part] = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit to first 10 files to avoid bloat
|
|
||||||
if len(files) > 10 {
|
|
||||||
files = files[:10]
|
|
||||||
}
|
|
||||||
|
|
||||||
return files
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractFilesChanged extracts list of changed files from task output
|
|
||||||
// Looks for common patterns like "Modified: file.ts", "Created: file.ts", file paths in output
|
|
||||||
func extractFilesChanged(message string) []string {
|
|
||||||
if message == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return extractFilesChangedFromLines(strings.Split(message, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractTestResultsFromLines extracts test results from pre-split lines.
|
|
||||||
func extractTestResultsFromLines(lines []string) (passed, failed int) {
|
|
||||||
if len(lines) == 0 {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Common patterns:
|
|
||||||
// pytest: "12 passed, 2 failed"
|
|
||||||
// jest: "Tests: 2 failed, 12 passed"
|
|
||||||
// go: "ok ... 12 tests"
|
|
||||||
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.ToLower(line)
|
|
||||||
|
|
||||||
// Look for test result lines
|
|
||||||
if !strings.Contains(line, "pass") && !strings.Contains(line, "fail") && !strings.Contains(line, "test") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract numbers near "passed" or "pass"
|
|
||||||
if idx := strings.Index(line, "pass"); idx != -1 {
|
|
||||||
// Look for number before "pass"
|
|
||||||
num := extractNumberBefore(line, idx)
|
|
||||||
if num > 0 {
|
|
||||||
passed = num
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract numbers near "failed" or "fail"
|
|
||||||
if idx := strings.Index(line, "fail"); idx != -1 {
|
|
||||||
num := extractNumberBefore(line, idx)
|
|
||||||
if num > 0 {
|
|
||||||
failed = num
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// go test style: "ok ... 12 tests"
|
|
||||||
if passed == 0 {
|
|
||||||
if idx := strings.Index(line, "test"); idx != -1 {
|
|
||||||
num := extractNumberBefore(line, idx)
|
|
||||||
if num > 0 {
|
|
||||||
passed = num
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we found both, stop
|
|
||||||
if passed > 0 && failed > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return passed, failed
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractTestResults extracts test pass/fail counts from task output
|
|
||||||
func extractTestResults(message string) (passed, failed int) {
|
|
||||||
if message == "" {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return extractTestResultsFromLines(strings.Split(message, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractNumberBefore extracts a number that appears before the given index
|
|
||||||
func extractNumberBefore(s string, idx int) int {
|
|
||||||
if idx <= 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk backwards to find digits
|
|
||||||
end := idx - 1
|
|
||||||
for end >= 0 && (s[end] == ' ' || s[end] == ':' || s[end] == ',') {
|
|
||||||
end--
|
|
||||||
}
|
|
||||||
if end < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
start := end
|
|
||||||
for start >= 0 && s[start] >= '0' && s[start] <= '9' {
|
|
||||||
start--
|
|
||||||
}
|
|
||||||
start++
|
|
||||||
|
|
||||||
if start > end {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
numStr := s[start : end+1]
|
|
||||||
if num, err := strconv.Atoi(numStr); err == nil {
|
|
||||||
return num
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractKeyOutputFromLines extracts key output from pre-split lines.
|
|
||||||
func extractKeyOutputFromLines(lines []string, maxLen int) string {
|
|
||||||
if len(lines) == 0 || maxLen <= 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Priority 1: Look for explicit summary lines
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
lower := strings.ToLower(line)
|
|
||||||
if strings.HasPrefix(lower, "summary:") || strings.HasPrefix(lower, "completed:") ||
|
|
||||||
strings.HasPrefix(lower, "implemented:") || strings.HasPrefix(lower, "added:") ||
|
|
||||||
strings.HasPrefix(lower, "created:") || strings.HasPrefix(lower, "fixed:") {
|
|
||||||
content := line
|
|
||||||
for _, prefix := range []string{"Summary:", "Completed:", "Implemented:", "Added:", "Created:", "Fixed:",
|
|
||||||
"summary:", "completed:", "implemented:", "added:", "created:", "fixed:"} {
|
|
||||||
content = strings.TrimPrefix(content, prefix)
|
|
||||||
}
|
|
||||||
content = strings.TrimSpace(content)
|
|
||||||
if len(content) > 0 {
|
|
||||||
return safeTruncate(content, maxLen)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Priority 2: First meaningful line (skip noise)
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" || strings.HasPrefix(line, "```") || strings.HasPrefix(line, "---") ||
|
|
||||||
strings.HasPrefix(line, "#") || strings.HasPrefix(line, "//") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Skip very short lines (likely headers or markers)
|
|
||||||
if len(line) < 20 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return safeTruncate(line, maxLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: truncate entire message
|
|
||||||
clean := strings.TrimSpace(strings.Join(lines, "\n"))
|
|
||||||
return safeTruncate(clean, maxLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractKeyOutput extracts a brief summary of what the task accomplished
|
|
||||||
// Looks for summary lines, first meaningful sentence, or truncates message
|
|
||||||
func extractKeyOutput(message string, maxLen int) string {
|
|
||||||
if message == "" || maxLen <= 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return extractKeyOutputFromLines(strings.Split(message, "\n"), maxLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractCoverageGap extracts what's missing from coverage reports
|
|
||||||
// Looks for uncovered lines, branches, or functions
|
|
||||||
func extractCoverageGap(message string) string {
|
|
||||||
if message == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
lower := strings.ToLower(message)
|
|
||||||
lines := strings.Split(message, "\n")
|
|
||||||
|
|
||||||
// Look for uncovered/missing patterns
|
|
||||||
for _, line := range lines {
|
|
||||||
lineLower := strings.ToLower(line)
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// Common patterns for uncovered code
|
|
||||||
if strings.Contains(lineLower, "uncovered") ||
|
|
||||||
strings.Contains(lineLower, "not covered") ||
|
|
||||||
strings.Contains(lineLower, "missing coverage") ||
|
|
||||||
strings.Contains(lineLower, "lines not covered") {
|
|
||||||
if len(line) > 100 {
|
|
||||||
return line[:97] + "..."
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for specific file:line patterns in coverage reports
|
|
||||||
if strings.Contains(lineLower, "branch") && strings.Contains(lineLower, "not taken") {
|
|
||||||
if len(line) > 100 {
|
|
||||||
return line[:97] + "..."
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for function names that aren't covered
|
|
||||||
if strings.Contains(lower, "function") && strings.Contains(lower, "0%") {
|
|
||||||
for _, line := range lines {
|
|
||||||
if strings.Contains(strings.ToLower(line), "0%") && strings.Contains(line, "function") {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if len(line) > 100 {
|
|
||||||
return line[:97] + "..."
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractErrorDetail extracts meaningful error context from task output
|
|
||||||
// Returns the most relevant error information up to maxLen characters
|
|
||||||
func extractErrorDetail(message string, maxLen int) string {
|
|
||||||
if message == "" || maxLen <= 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := strings.Split(message, "\n")
|
|
||||||
var errorLines []string
|
|
||||||
|
|
||||||
// Look for error-related lines
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lower := strings.ToLower(line)
|
|
||||||
|
|
||||||
// Skip noise lines
|
|
||||||
if strings.HasPrefix(line, "at ") && strings.Contains(line, "(") {
|
|
||||||
// Stack trace line - only keep first one
|
|
||||||
if len(errorLines) > 0 && strings.HasPrefix(strings.ToLower(errorLines[len(errorLines)-1]), "at ") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prioritize error/fail lines
|
|
||||||
if strings.Contains(lower, "error") ||
|
|
||||||
strings.Contains(lower, "fail") ||
|
|
||||||
strings.Contains(lower, "exception") ||
|
|
||||||
strings.Contains(lower, "assert") ||
|
|
||||||
strings.Contains(lower, "expected") ||
|
|
||||||
strings.Contains(lower, "timeout") ||
|
|
||||||
strings.Contains(lower, "not found") ||
|
|
||||||
strings.Contains(lower, "cannot") ||
|
|
||||||
strings.Contains(lower, "undefined") ||
|
|
||||||
strings.HasPrefix(line, "FAIL") ||
|
|
||||||
strings.HasPrefix(line, "●") {
|
|
||||||
errorLines = append(errorLines, line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errorLines) == 0 {
|
|
||||||
// No specific error lines found, take last few lines
|
|
||||||
start := len(lines) - 5
|
|
||||||
if start < 0 {
|
|
||||||
start = 0
|
|
||||||
}
|
|
||||||
for _, line := range lines[start:] {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line != "" {
|
|
||||||
errorLines = append(errorLines, line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join and truncate
|
|
||||||
result := strings.Join(errorLines, " | ")
|
|
||||||
return safeTruncate(result, maxLen)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,143 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExtractCoverage(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{"bare int", "92%", "92%"},
|
|
||||||
{"bare float", "92.5%", "92.5%"},
|
|
||||||
{"coverage prefix", "coverage: 92%", "92%"},
|
|
||||||
{"total prefix", "TOTAL 92%", "92%"},
|
|
||||||
{"all files", "All files 92%", "92%"},
|
|
||||||
{"empty", "", ""},
|
|
||||||
{"no number", "coverage: N/A", ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := extractCoverage(tt.in); got != tt.want {
|
|
||||||
t.Fatalf("extractCoverage(%q) = %q, want %q", tt.in, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtractTestResults(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
in string
|
|
||||||
wantPassed int
|
|
||||||
wantFailed int
|
|
||||||
}{
|
|
||||||
{"pytest one line", "12 passed, 2 failed", 12, 2},
|
|
||||||
{"pytest split lines", "12 passed\n2 failed", 12, 2},
|
|
||||||
{"jest format", "Tests: 2 failed, 12 passed, 14 total", 12, 2},
|
|
||||||
{"go test style count", "ok\texample.com/foo\t0.12s\t12 tests", 12, 0},
|
|
||||||
{"zero counts", "0 passed, 0 failed", 0, 0},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
passed, failed := extractTestResults(tt.in)
|
|
||||||
if passed != tt.wantPassed || failed != tt.wantFailed {
|
|
||||||
t.Fatalf("extractTestResults(%q) = (%d, %d), want (%d, %d)", tt.in, passed, failed, tt.wantPassed, tt.wantFailed)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtractFilesChanged(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
in string
|
|
||||||
want []string
|
|
||||||
}{
|
|
||||||
{"root file", "Modified: main.go\n", []string{"main.go"}},
|
|
||||||
{"path file", "Created: codeagent-wrapper/utils.go\n", []string{"codeagent-wrapper/utils.go"}},
|
|
||||||
{"at prefix", "Updated: @codeagent-wrapper/main.go\n", []string{"codeagent-wrapper/main.go"}},
|
|
||||||
{"token scan", "Files: @main.go, @codeagent-wrapper/utils.go\n", []string{"main.go", "codeagent-wrapper/utils.go"}},
|
|
||||||
{"space path", "Modified: dir/with space/file.go\n", []string{"dir/with space/file.go"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := extractFilesChanged(tt.in); !reflect.DeepEqual(got, tt.want) {
|
|
||||||
t.Fatalf("extractFilesChanged(%q) = %#v, want %#v", tt.in, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("limits to first 10", func(t *testing.T) {
|
|
||||||
var b strings.Builder
|
|
||||||
for i := 0; i < 12; i++ {
|
|
||||||
fmt.Fprintf(&b, "Modified: file%d.go\n", i)
|
|
||||||
}
|
|
||||||
got := extractFilesChanged(b.String())
|
|
||||||
if len(got) != 10 {
|
|
||||||
t.Fatalf("len(files)=%d, want 10: %#v", len(got), got)
|
|
||||||
}
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
want := fmt.Sprintf("file%d.go", i)
|
|
||||||
if got[i] != want {
|
|
||||||
t.Fatalf("files[%d]=%q, want %q", i, got[i], want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSafeTruncate(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
in string
|
|
||||||
maxLen int
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{"empty", "", 4, ""},
|
|
||||||
{"zero maxLen", "hello", 0, ""},
|
|
||||||
{"one rune", "你好", 1, "你"},
|
|
||||||
{"two runes no truncate", "你好", 2, "你好"},
|
|
||||||
{"three runes no truncate", "你好", 3, "你好"},
|
|
||||||
{"two runes truncates long", "你好世界", 2, "你"},
|
|
||||||
{"three runes truncates long", "你好世界", 3, "你"},
|
|
||||||
{"four with ellipsis", "你好世界啊", 4, "你..."},
|
|
||||||
{"emoji", "🙂🙂🙂🙂🙂", 4, "🙂..."},
|
|
||||||
{"no truncate", "你好世界", 4, "你好世界"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := safeTruncate(tt.in, tt.maxLen); got != tt.want {
|
|
||||||
t.Fatalf("safeTruncate(%q, %d) = %q, want %q", tt.in, tt.maxLen, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSanitizeOutput(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{"ansi", "\x1b[31mred\x1b[0m", "red"},
|
|
||||||
{"control chars", "a\x07b\r\nc\t", "ab\nc\t"},
|
|
||||||
{"normal", "hello\nworld\t!", "hello\nworld\t!"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := sanitizeOutput(tt.in); got != tt.want {
|
|
||||||
t.Fatalf("sanitizeOutput(%q) = %q, want %q", tt.in, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -9,56 +9,42 @@ A freshly designed lightweight development workflow with no legacy baggage, focu
|
|||||||
```
|
```
|
||||||
/dev trigger
|
/dev trigger
|
||||||
↓
|
↓
|
||||||
AskUserQuestion (backend selection)
|
|
||||||
↓
|
|
||||||
AskUserQuestion (requirements clarification)
|
AskUserQuestion (requirements clarification)
|
||||||
↓
|
↓
|
||||||
codeagent analysis (plan mode + task typing + UI auto-detection)
|
codeagent analysis (plan mode + UI auto-detection)
|
||||||
↓
|
↓
|
||||||
dev-plan-generator (create dev doc)
|
dev-plan-generator (create dev doc)
|
||||||
↓
|
↓
|
||||||
codeagent concurrent development (2–5 tasks, backend routing)
|
codeagent concurrent development (2–5 tasks, backend split)
|
||||||
↓
|
↓
|
||||||
codeagent testing & verification (≥90% coverage)
|
codeagent testing & verification (≥90% coverage)
|
||||||
↓
|
↓
|
||||||
Done (generate summary)
|
Done (generate summary)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Step 0 + The 6 Steps
|
## The 6 Steps
|
||||||
|
|
||||||
### 0. Select Allowed Backends (FIRST ACTION)
|
|
||||||
- Use **AskUserQuestion** with multiSelect to ask which backends are allowed for this run
|
|
||||||
- Options (user can select multiple):
|
|
||||||
- `codex` - Stable, high quality, best cost-performance (default for most tasks)
|
|
||||||
- `claude` - Fast, lightweight (for quick fixes and config changes)
|
|
||||||
- `gemini` - UI/UX specialist (for frontend styling and components)
|
|
||||||
- If user selects ONLY `codex`, ALL subsequent tasks must use `codex` (including UI/quick-fix)
|
|
||||||
|
|
||||||
### 1. Clarify Requirements
|
### 1. Clarify Requirements
|
||||||
- Use **AskUserQuestion** to ask the user directly
|
- Use **AskUserQuestion** to ask the user directly
|
||||||
- No scoring system, no complex logic
|
- No scoring system, no complex logic
|
||||||
- 2–3 rounds of Q&A until the requirement is clear
|
- 2–3 rounds of Q&A until the requirement is clear
|
||||||
|
|
||||||
### 2. codeagent Analysis + Task Typing + UI Detection
|
### 2. codeagent Analysis & UI Detection
|
||||||
- Call codeagent to analyze the request in plan mode style
|
- Call codeagent to analyze the request in plan mode style
|
||||||
- Extract: core functions, technical points, task list (2–5 items)
|
- Extract: core functions, technical points, task list (2–5 items)
|
||||||
- For each task, assign exactly one type: `default` / `ui` / `quick-fix`
|
|
||||||
- UI auto-detection: needs UI work when task involves style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue); output yes/no plus evidence
|
- UI auto-detection: needs UI work when task involves style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue); output yes/no plus evidence
|
||||||
|
|
||||||
### 3. Generate Dev Doc
|
### 3. Generate Dev Doc
|
||||||
- Call the **dev-plan-generator** agent
|
- Call the **dev-plan-generator** agent
|
||||||
- Produce a single `dev-plan.md`
|
- Produce a single `dev-plan.md`
|
||||||
- Append a dedicated UI task when Step 2 marks `needs_ui: true`
|
- Append a dedicated UI task when Step 2 marks `needs_ui: true`
|
||||||
- Include: task breakdown, `type`, file scope, dependencies, test commands
|
- Include: task breakdown, file scope, dependencies, test commands
|
||||||
|
|
||||||
### 4. Concurrent Development
|
### 4. Concurrent Development
|
||||||
- Work from the task list in dev-plan.md
|
- Work from the task list in dev-plan.md
|
||||||
- Route backend per task type (with user constraints + fallback):
|
- Use codeagent per task with explicit backend selection:
|
||||||
- `default` → `codex`
|
- Backend/API/DB tasks → `--backend codex` (default)
|
||||||
- `ui` → `gemini` (enforced when allowed)
|
- UI/style/component tasks → `--backend gemini` (enforced)
|
||||||
- `quick-fix` → `claude`
|
|
||||||
- Missing `type` → treat as `default`
|
|
||||||
- If the preferred backend is not allowed, fallback to an allowed backend by priority: `codex` → `claude` → `gemini`
|
|
||||||
- Independent tasks → run in parallel
|
- Independent tasks → run in parallel
|
||||||
- Conflicting tasks → run serially
|
- Conflicting tasks → run serially
|
||||||
|
|
||||||
@@ -79,7 +65,7 @@ Done (generate summary)
|
|||||||
/dev "Implement user login with email + password"
|
/dev "Implement user login with email + password"
|
||||||
```
|
```
|
||||||
|
|
||||||
No CLI flags required; workflow starts with an interactive backend selection.
|
**No options**, fixed workflow, works out of the box.
|
||||||
|
|
||||||
## Output Structure
|
## Output Structure
|
||||||
|
|
||||||
@@ -94,14 +80,14 @@ Only one file—minimal and clear.
|
|||||||
|
|
||||||
### Tools
|
### Tools
|
||||||
- **AskUserQuestion**: interactive requirement clarification
|
- **AskUserQuestion**: interactive requirement clarification
|
||||||
- **codeagent skill**: analysis, development, testing; supports `--backend` for `codex` / `claude` / `gemini`
|
- **codeagent skill**: analysis, development, testing; supports `--backend` for codex (default) or gemini (UI)
|
||||||
- **dev-plan-generator agent**: generate dev doc (subagent via Task tool, saves context)
|
- **dev-plan-generator agent**: generate dev doc (subagent via Task tool, saves context)
|
||||||
|
|
||||||
## Backend Selection & Routing
|
## UI Auto-Detection & Backend Routing
|
||||||
- **Step 0**: user selects allowed backends; if `仅 codex`, all tasks use codex
|
|
||||||
- **UI detection standard**: style files (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component code (.tsx, .jsx, .vue) trigger `needs_ui: true`
|
- **UI detection standard**: style files (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component code (.tsx, .jsx, .vue) trigger `needs_ui: true`
|
||||||
- **Task type field**: each task in `dev-plan.md` must have `type: default|ui|quick-fix`
|
- **Flow impact**: Step 2 auto-detects UI work; Step 3 appends a separate UI task in `dev-plan.md` when detected
|
||||||
- **Routing**: `default`→codex, `ui`→gemini, `quick-fix`→claude; if disallowed, fallback to an allowed backend by priority: codex→claude→gemini
|
- **Backend split**: backend/API tasks use codex backend (default); UI tasks force gemini backend
|
||||||
|
- **Implementation**: Orchestrator invokes codeagent skill with appropriate backend parameter per task type
|
||||||
|
|
||||||
## Key Features
|
## Key Features
|
||||||
|
|
||||||
@@ -116,9 +102,9 @@ Only one file—minimal and clear.
|
|||||||
- Steps are straightforward
|
- Steps are straightforward
|
||||||
|
|
||||||
### ✅ Concurrency
|
### ✅ Concurrency
|
||||||
- Tasks split based on natural functional boundaries
|
- 2–5 tasks in parallel
|
||||||
- Auto-detect dependencies and conflicts
|
- Auto-detect dependencies and conflicts
|
||||||
- codeagent executes independently with optimal backend
|
- codeagent executes independently
|
||||||
|
|
||||||
### ✅ Quality Assurance
|
### ✅ Quality Assurance
|
||||||
- Enforces 90% coverage
|
- Enforces 90% coverage
|
||||||
@@ -131,10 +117,6 @@ Only one file—minimal and clear.
|
|||||||
# Trigger
|
# Trigger
|
||||||
/dev "Add user login feature"
|
/dev "Add user login feature"
|
||||||
|
|
||||||
# Step 0: Select backends
|
|
||||||
Q: Which backends are allowed? (multiSelect)
|
|
||||||
A: Selected: codex, claude
|
|
||||||
|
|
||||||
# Step 1: Clarify requirements
|
# Step 1: Clarify requirements
|
||||||
Q: What login methods are supported?
|
Q: What login methods are supported?
|
||||||
A: Email + password
|
A: Email + password
|
||||||
@@ -144,18 +126,18 @@ A: Yes, use JWT token
|
|||||||
# Step 2: codeagent analysis
|
# Step 2: codeagent analysis
|
||||||
Output:
|
Output:
|
||||||
- Core: email/password login + JWT auth
|
- Core: email/password login + JWT auth
|
||||||
- Task 1: Backend API (type=default)
|
- Task 1: Backend API
|
||||||
- Task 2: Password hashing (type=default)
|
- Task 2: Password hashing
|
||||||
- Task 3: Frontend form (type=ui)
|
- Task 3: Frontend form
|
||||||
UI detection: needs_ui = true (tailwindcss classes in frontend form)
|
UI detection: needs_ui = true (tailwindcss classes in frontend form)
|
||||||
|
|
||||||
# Step 3: Generate doc
|
# Step 3: Generate doc
|
||||||
dev-plan.md generated with typed tasks ✓
|
dev-plan.md generated with backend + UI tasks ✓
|
||||||
|
|
||||||
# Step 4-5: Concurrent development (routing + fallback)
|
# Step 4-5: Concurrent development (backend codex, UI gemini)
|
||||||
[task-1] Backend API (codex) → tests → 92% ✓
|
[task-1] Backend API (codex) → tests → 92% ✓
|
||||||
[task-2] Password hashing (codex) → tests → 95% ✓
|
[task-2] Password hashing (codex) → tests → 95% ✓
|
||||||
[task-3] Frontend form (fallback to codex; gemini not allowed) → tests → 91% ✓
|
[task-3] Frontend form (gemini) → tests → 91% ✓
|
||||||
```
|
```
|
||||||
|
|
||||||
## Directory Structure
|
## Directory Structure
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ You are a specialized Development Plan Document Generator. Your sole responsibil
|
|||||||
|
|
||||||
You receive context from an orchestrator including:
|
You receive context from an orchestrator including:
|
||||||
- Feature requirements description
|
- Feature requirements description
|
||||||
- codeagent analysis results (feature highlights, task decomposition, UI detection flag, and task typing hints)
|
- codeagent analysis results (feature highlights, task decomposition, UI detection flag)
|
||||||
- Feature name (in kebab-case format)
|
- Feature name (in kebab-case format)
|
||||||
|
|
||||||
Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
||||||
@@ -29,7 +29,6 @@ Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
|||||||
|
|
||||||
### Task 1: [Task Name]
|
### Task 1: [Task Name]
|
||||||
- **ID**: task-1
|
- **ID**: task-1
|
||||||
- **type**: default|ui|quick-fix
|
|
||||||
- **Description**: [What needs to be done]
|
- **Description**: [What needs to be done]
|
||||||
- **File Scope**: [Directories or files involved, e.g., src/auth/**, tests/auth/]
|
- **File Scope**: [Directories or files involved, e.g., src/auth/**, tests/auth/]
|
||||||
- **Dependencies**: [None or depends on task-x]
|
- **Dependencies**: [None or depends on task-x]
|
||||||
@@ -39,7 +38,7 @@ Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
|||||||
### Task 2: [Task Name]
|
### Task 2: [Task Name]
|
||||||
...
|
...
|
||||||
|
|
||||||
(Tasks based on natural functional boundaries, typically 2-5)
|
(2-5 tasks)
|
||||||
|
|
||||||
## Acceptance Criteria
|
## Acceptance Criteria
|
||||||
- [ ] Feature point 1
|
- [ ] Feature point 1
|
||||||
@@ -54,13 +53,9 @@ Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
|||||||
|
|
||||||
## Generation Rules You Must Enforce
|
## Generation Rules You Must Enforce
|
||||||
|
|
||||||
1. **Task Count**: Generate tasks based on natural functional boundaries (no artificial limits)
|
1. **Task Count**: Generate 2-5 tasks (no more, no less unless the feature is extremely simple or complex)
|
||||||
- Typical range: 2-5 tasks
|
|
||||||
- Quality over quantity: prefer fewer well-scoped tasks over excessive fragmentation
|
|
||||||
- Each task should be independently completable by one agent
|
|
||||||
2. **Task Requirements**: Each task MUST include:
|
2. **Task Requirements**: Each task MUST include:
|
||||||
- Clear ID (task-1, task-2, etc.)
|
- Clear ID (task-1, task-2, etc.)
|
||||||
- A single task type field: `type: default|ui|quick-fix`
|
|
||||||
- Specific description of what needs to be done
|
- Specific description of what needs to be done
|
||||||
- Explicit file scope (directories or files affected)
|
- Explicit file scope (directories or files affected)
|
||||||
- Dependency declaration ("None" or "depends on task-x")
|
- Dependency declaration ("None" or "depends on task-x")
|
||||||
@@ -72,23 +67,18 @@ Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
|||||||
|
|
||||||
## Your Workflow
|
## Your Workflow
|
||||||
|
|
||||||
1. **Analyze Input**: Review the requirements description and codeagent analysis results (including `needs_ui` and any task typing hints)
|
1. **Analyze Input**: Review the requirements description and codeagent analysis results (including `needs_ui` flag if present)
|
||||||
2. **Identify Tasks**: Break down the feature into 2-5 logical, independent tasks
|
2. **Identify Tasks**: Break down the feature into 2-5 logical, independent tasks
|
||||||
3. **Determine Dependencies**: Map out which tasks depend on others (minimize dependencies)
|
3. **Determine Dependencies**: Map out which tasks depend on others (minimize dependencies)
|
||||||
4. **Assign Task Type**: For each task, set exactly one `type`:
|
4. **Specify Testing**: For each task, define the exact test command and coverage requirements
|
||||||
- `ui`: touches UI/style/component work (e.g., .css/.scss/.tsx/.jsx/.vue, tailwind, design tweaks)
|
5. **Define Acceptance**: List concrete, measurable acceptance criteria including the 90% coverage requirement
|
||||||
- `quick-fix`: small, fast changes (config tweaks, small bug fix, minimal scope); do NOT use for UI work
|
6. **Document Technical Points**: Note key technical decisions and constraints
|
||||||
- `default`: everything else
|
7. **Write File**: Use the Write tool to create `./.claude/specs/{feature_name}/dev-plan.md`
|
||||||
- Note: `/dev` Step 4 routes backend by `type` (default→codex, ui→gemini, quick-fix→claude; missing type → default)
|
|
||||||
5. **Specify Testing**: For each task, define the exact test command and coverage requirements
|
|
||||||
6. **Define Acceptance**: List concrete, measurable acceptance criteria including the 90% coverage requirement
|
|
||||||
7. **Document Technical Points**: Note key technical decisions and constraints
|
|
||||||
8. **Write File**: Use the Write tool to create `./.claude/specs/{feature_name}/dev-plan.md`
|
|
||||||
|
|
||||||
## Quality Checks Before Writing
|
## Quality Checks Before Writing
|
||||||
|
|
||||||
- [ ] Task count is between 2-5
|
- [ ] Task count is between 2-5
|
||||||
- [ ] Every task has all required fields (ID, type, Description, File Scope, Dependencies, Test Command, Test Focus)
|
- [ ] Every task has all 6 required fields (ID, Description, File Scope, Dependencies, Test Command, Test Focus)
|
||||||
- [ ] Test commands include coverage parameters
|
- [ ] Test commands include coverage parameters
|
||||||
- [ ] Dependencies are explicitly stated
|
- [ ] Dependencies are explicitly stated
|
||||||
- [ ] Acceptance criteria includes 90% coverage requirement
|
- [ ] Acceptance criteria includes 90% coverage requirement
|
||||||
|
|||||||
@@ -1,81 +1,28 @@
|
|||||||
---
|
---
|
||||||
description: Extreme lightweight end-to-end development workflow with requirements clarification, intelligent backend selection, parallel codeagent execution, and mandatory 90% test coverage
|
description: Extreme lightweight end-to-end development workflow with requirements clarification, parallel codeagent execution, and mandatory 90% test coverage
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
||||||
You are the /dev Workflow Orchestrator, an expert development workflow manager specializing in orchestrating minimal, efficient end-to-end development processes with parallel task execution and rigorous test coverage validation.
|
You are the /dev Workflow Orchestrator, an expert development workflow manager specializing in orchestrating minimal, efficient end-to-end development processes with parallel task execution and rigorous test coverage validation.
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## CRITICAL CONSTRAINTS (NEVER VIOLATE)
|
|
||||||
|
|
||||||
These rules have HIGHEST PRIORITY and override all other instructions:
|
|
||||||
|
|
||||||
1. **NEVER use Edit, Write, or MultiEdit tools directly** - ALL code changes MUST go through codeagent-wrapper
|
|
||||||
2. **MUST use AskUserQuestion in Step 0** - Backend selection MUST be the FIRST action (before requirement clarification)
|
|
||||||
3. **MUST use AskUserQuestion in Step 1** - Do NOT skip requirement clarification
|
|
||||||
4. **MUST use TodoWrite after Step 1** - Create task tracking list before any analysis
|
|
||||||
5. **MUST use codeagent-wrapper for Step 2 analysis** - Do NOT use Read/Glob/Grep directly for deep analysis
|
|
||||||
6. **MUST wait for user confirmation in Step 3** - Do NOT proceed to Step 4 without explicit approval
|
|
||||||
7. **MUST invoke codeagent-wrapper --parallel for Step 4 execution** - Use Bash tool, NOT Edit/Write or Task tool
|
|
||||||
|
|
||||||
**Violation of any constraint above invalidates the entire workflow. Stop and restart if violated.**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Core Responsibilities**
|
**Core Responsibilities**
|
||||||
- Orchestrate a streamlined 7-step development workflow (Step 0 + Step 1–6):
|
- Orchestrate a streamlined 6-step development workflow:
|
||||||
0. Backend selection (user constrained)
|
|
||||||
1. Requirement clarification through targeted questioning
|
1. Requirement clarification through targeted questioning
|
||||||
2. Technical analysis using codeagent-wrapper
|
2. Technical analysis using codeagent
|
||||||
3. Development documentation generation
|
3. Development documentation generation
|
||||||
4. Parallel development execution (backend routing per task type)
|
4. Parallel development execution
|
||||||
5. Coverage validation (≥90% requirement)
|
5. Coverage validation (≥90% requirement)
|
||||||
6. Completion summary
|
6. Completion summary
|
||||||
|
|
||||||
**Workflow Execution**
|
**Workflow Execution**
|
||||||
- **Step 0: Backend Selection [MANDATORY - FIRST ACTION]**
|
- **Step 1: Requirement Clarification**
|
||||||
- MUST use AskUserQuestion tool as the FIRST action with multiSelect enabled
|
- Use AskUserQuestion to clarify requirements directly
|
||||||
- Ask which backends are allowed for this /dev run
|
|
||||||
- Options (user can select multiple):
|
|
||||||
- `codex` - Stable, high quality, best cost-performance (default for most tasks)
|
|
||||||
- `claude` - Fast, lightweight (for quick fixes and config changes)
|
|
||||||
- `gemini` - UI/UX specialist (for frontend styling and components)
|
|
||||||
- Store the selected backends as `allowed_backends` set for routing in Step 4
|
|
||||||
- Special rule: if user selects ONLY `codex`, then ALL subsequent tasks (including UI/quick-fix) MUST use `codex` (no exceptions)
|
|
||||||
|
|
||||||
- **Step 1: Requirement Clarification [MANDATORY - DO NOT SKIP]**
|
|
||||||
- MUST use AskUserQuestion tool
|
|
||||||
- Focus questions on functional boundaries, inputs/outputs, constraints, testing, and required unit-test coverage levels
|
- Focus questions on functional boundaries, inputs/outputs, constraints, testing, and required unit-test coverage levels
|
||||||
- Iterate 2-3 rounds until clear; rely on judgment; keep questions concise
|
- Iterate 2-3 rounds until clear; rely on judgment; keep questions concise
|
||||||
- After clarification complete: MUST use TodoWrite to create task tracking list with workflow steps
|
|
||||||
|
|
||||||
- **Step 2: codeagent-wrapper Deep Analysis (Plan Mode Style) [USE CODEAGENT-WRAPPER ONLY]**
|
- **Step 2: codeagent Deep Analysis (Plan Mode Style)**
|
||||||
|
|
||||||
MUST use Bash tool to invoke `codeagent-wrapper` for deep analysis. Do NOT use Read/Glob/Grep tools directly - delegate all exploration to codeagent-wrapper.
|
Use codeagent Skill to perform deep analysis. codeagent should operate in "plan mode" style and must include UI detection:
|
||||||
|
|
||||||
**How to invoke for analysis**:
|
|
||||||
```bash
|
|
||||||
# analysis_backend selection:
|
|
||||||
# - prefer codex if it is in allowed_backends
|
|
||||||
# - otherwise pick the first backend in allowed_backends
|
|
||||||
codeagent-wrapper --backend {analysis_backend} - <<'EOF'
|
|
||||||
Analyze the codebase for implementing [feature name].
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
- [requirement 1]
|
|
||||||
- [requirement 2]
|
|
||||||
|
|
||||||
Deliverables:
|
|
||||||
1. Explore codebase structure and existing patterns
|
|
||||||
2. Evaluate implementation options with trade-offs
|
|
||||||
3. Make architectural decisions
|
|
||||||
4. Break down into 2-5 parallelizable tasks with dependencies and file scope
|
|
||||||
5. Classify each task with a single `type`: `default` / `ui` / `quick-fix`
|
|
||||||
6. Determine if UI work is needed (check for .css/.tsx/.vue files)
|
|
||||||
|
|
||||||
Output the analysis following the structure below.
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
**When Deep Analysis is Needed** (any condition triggers):
|
**When Deep Analysis is Needed** (any condition triggers):
|
||||||
- Multiple valid approaches exist (e.g., Redis vs in-memory vs file-based caching)
|
- Multiple valid approaches exist (e.g., Redis vs in-memory vs file-based caching)
|
||||||
@@ -87,12 +34,12 @@ These rules have HIGHEST PRIORITY and override all other instructions:
|
|||||||
- During analysis, output whether the task needs UI work (yes/no) and the evidence
|
- During analysis, output whether the task needs UI work (yes/no) and the evidence
|
||||||
- UI criteria: presence of style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue)
|
- UI criteria: presence of style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue)
|
||||||
|
|
||||||
**What the AI backend does in Analysis Mode** (when invoked via codeagent-wrapper):
|
**What codeagent Does in Analysis Mode**:
|
||||||
1. **Explore Codebase**: Use Glob, Grep, Read to understand structure, patterns, architecture
|
1. **Explore Codebase**: Use Glob, Grep, Read to understand structure, patterns, architecture
|
||||||
2. **Identify Existing Patterns**: Find how similar features are implemented, reuse conventions
|
2. **Identify Existing Patterns**: Find how similar features are implemented, reuse conventions
|
||||||
3. **Evaluate Options**: When multiple approaches exist, list trade-offs (complexity, performance, security, maintainability)
|
3. **Evaluate Options**: When multiple approaches exist, list trade-offs (complexity, performance, security, maintainability)
|
||||||
4. **Make Architectural Decisions**: Choose patterns, APIs, data models with justification
|
4. **Make Architectural Decisions**: Choose patterns, APIs, data models with justification
|
||||||
5. **Design Task Breakdown**: Produce parallelizable tasks based on natural functional boundaries with file scope and dependencies
|
5. **Design Task Breakdown**: Produce 2-5 parallelizable tasks with file scope and dependencies
|
||||||
|
|
||||||
**Analysis Output Structure**:
|
**Analysis Output Structure**:
|
||||||
```
|
```
|
||||||
@@ -109,7 +56,7 @@ These rules have HIGHEST PRIORITY and override all other instructions:
|
|||||||
[API design, data models, architecture choices made]
|
[API design, data models, architecture choices made]
|
||||||
|
|
||||||
## Task Breakdown
|
## Task Breakdown
|
||||||
[2-5 tasks with: ID, description, file scope, dependencies, test command, type(default|ui|quick-fix)]
|
[2-5 tasks with: ID, description, file scope, dependencies, test command]
|
||||||
|
|
||||||
## UI Determination
|
## UI Determination
|
||||||
needs_ui: [true/false]
|
needs_ui: [true/false]
|
||||||
@@ -123,62 +70,39 @@ These rules have HIGHEST PRIORITY and override all other instructions:
|
|||||||
|
|
||||||
- **Step 3: Generate Development Documentation**
|
- **Step 3: Generate Development Documentation**
|
||||||
- invoke agent dev-plan-generator
|
- invoke agent dev-plan-generator
|
||||||
- When creating `dev-plan.md`, ensure every task has `type: default|ui|quick-fix`
|
- When creating `dev-plan.md`, append a dedicated UI task if Step 2 marked `needs_ui: true`
|
||||||
- Append a dedicated UI task if Step 2 marked `needs_ui: true` but no UI task exists
|
|
||||||
- Output a brief summary of dev-plan.md:
|
- Output a brief summary of dev-plan.md:
|
||||||
- Number of tasks and their IDs
|
- Number of tasks and their IDs
|
||||||
- Task type for each task
|
|
||||||
- File scope for each task
|
- File scope for each task
|
||||||
- Dependencies between tasks
|
- Dependencies between tasks
|
||||||
- Test commands
|
- Test commands
|
||||||
- Use AskUserQuestion to confirm with user:
|
- Use AskUserQuestion to confirm with user:
|
||||||
- Question: "Proceed with this development plan?" (state backend routing rules and any forced fallback due to allowed_backends)
|
- Question: "Proceed with this development plan?" (if UI work is detected, state that UI tasks will use the gemini backend)
|
||||||
- Options: "Confirm and execute" / "Need adjustments"
|
- Options: "Confirm and execute" / "Need adjustments"
|
||||||
- If user chooses "Need adjustments", return to Step 1 or Step 2 based on feedback
|
- If user chooses "Need adjustments", return to Step 1 or Step 2 based on feedback
|
||||||
|
|
||||||
- **Step 4: Parallel Development Execution [CODEAGENT-WRAPPER ONLY - NO DIRECT EDITS]**
|
- **Step 4: Parallel Development Execution**
|
||||||
- MUST use Bash tool to invoke `codeagent-wrapper --parallel` for ALL code changes
|
- For each task in `dev-plan.md`, invoke codeagent skill with task brief in HEREDOC format:
|
||||||
- NEVER use Edit, Write, MultiEdit, or Task tools to modify code directly
|
|
||||||
- Backend routing (must be deterministic and enforceable):
|
|
||||||
- Task field: `type: default|ui|quick-fix` (missing → treat as `default`)
|
|
||||||
- Preferred backend by type:
|
|
||||||
- `default` → `codex`
|
|
||||||
- `ui` → `gemini` (enforced when allowed)
|
|
||||||
- `quick-fix` → `claude`
|
|
||||||
- If user selected `仅 codex`: all tasks MUST use `codex`
|
|
||||||
- Otherwise, if preferred backend is not in `allowed_backends`, fallback to the first available backend by priority: `codex` → `claude` → `gemini`
|
|
||||||
- Build ONE `--parallel` config that includes all tasks in `dev-plan.md` and submit it once via Bash tool:
|
|
||||||
```bash
|
```bash
|
||||||
# One shot submission - wrapper handles topology + concurrency
|
# Backend task (use codex backend - default)
|
||||||
codeagent-wrapper --parallel <<'EOF'
|
codeagent-wrapper --backend codex - <<'EOF'
|
||||||
---TASK---
|
Task: [task-id]
|
||||||
id: [task-id-1]
|
|
||||||
backend: [routed-backend-from-type-and-allowed_backends]
|
|
||||||
workdir: .
|
|
||||||
dependencies: [optional, comma-separated ids]
|
|
||||||
---CONTENT---
|
|
||||||
Task: [task-id-1]
|
|
||||||
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
||||||
Scope: [task file scope]
|
Scope: [task file scope]
|
||||||
Test: [test command]
|
Test: [test command]
|
||||||
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
||||||
|
EOF
|
||||||
|
|
||||||
---TASK---
|
# UI task (use gemini backend - enforced)
|
||||||
id: [task-id-2]
|
codeagent-wrapper --backend gemini - <<'EOF'
|
||||||
backend: [routed-backend-from-type-and-allowed_backends]
|
Task: [task-id]
|
||||||
workdir: .
|
|
||||||
dependencies: [optional, comma-separated ids]
|
|
||||||
---CONTENT---
|
|
||||||
Task: [task-id-2]
|
|
||||||
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
||||||
Scope: [task file scope]
|
Scope: [task file scope]
|
||||||
Test: [test command]
|
Test: [test command]
|
||||||
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
- **Note**: Use `workdir: .` (current directory) for all tasks unless specific subdirectory is required
|
|
||||||
- Execute independent tasks concurrently; serialize conflicting ones; track coverage reports
|
- Execute independent tasks concurrently; serialize conflicting ones; track coverage reports
|
||||||
- Backend is routed deterministically based on task `type`, no manual intervention needed
|
|
||||||
|
|
||||||
- **Step 5: Coverage Validation**
|
- **Step 5: Coverage Validation**
|
||||||
- Validate each task’s coverage:
|
- Validate each task’s coverage:
|
||||||
@@ -189,19 +113,13 @@ These rules have HIGHEST PRIORITY and override all other instructions:
|
|||||||
- Provide completed task list, coverage per task, key file changes
|
- Provide completed task list, coverage per task, key file changes
|
||||||
|
|
||||||
**Error Handling**
|
**Error Handling**
|
||||||
- **codeagent-wrapper failure**: Retry once with same input; if still fails, log error and ask user for guidance
|
- codeagent failure: retry once, then log and continue
|
||||||
- **Insufficient coverage (<90%)**: Request more tests from the failed task (max 2 rounds); if still fails, report to user
|
- Insufficient coverage: request more tests (max 2 rounds)
|
||||||
- **Dependency conflicts**:
|
- Dependency conflicts: serialize automatically
|
||||||
- Circular dependencies: codeagent-wrapper will detect and fail with error; revise task breakdown to remove cycles
|
|
||||||
- Missing dependencies: Ensure all task IDs referenced in `dependencies` field exist
|
|
||||||
- **Parallel execution timeout**: Individual tasks timeout after 2 hours (configurable via CODEX_TIMEOUT); failed tasks can be retried individually
|
|
||||||
- **Backend unavailable**: If a routed backend is unavailable, fallback to another backend in `allowed_backends` (priority: codex → claude → gemini); if none works, fail with a clear error message
|
|
||||||
|
|
||||||
**Quality Standards**
|
**Quality Standards**
|
||||||
- Code coverage ≥90%
|
- Code coverage ≥90%
|
||||||
- Tasks based on natural functional boundaries (typically 2-5)
|
- 2-5 genuinely parallelizable tasks
|
||||||
- Each task has exactly one `type: default|ui|quick-fix`
|
|
||||||
- Backend routed by `type`: `default`→codex, `ui`→gemini, `quick-fix`→claude (with allowed_backends fallback)
|
|
||||||
- Documentation must be minimal yet actionable
|
- Documentation must be minimal yet actionable
|
||||||
- No verbose implementations; only essential code
|
- No verbose implementations; only essential code
|
||||||
|
|
||||||
|
|||||||
@@ -105,7 +105,6 @@ EOF
|
|||||||
Execute multiple tasks concurrently with dependency management:
|
Execute multiple tasks concurrently with dependency management:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Default: summary output (context-efficient, recommended)
|
|
||||||
codeagent-wrapper --parallel <<'EOF'
|
codeagent-wrapper --parallel <<'EOF'
|
||||||
---TASK---
|
---TASK---
|
||||||
id: backend_1701234567
|
id: backend_1701234567
|
||||||
@@ -126,47 +125,6 @@ dependencies: backend_1701234567, frontend_1701234568
|
|||||||
---CONTENT---
|
---CONTENT---
|
||||||
add integration tests for user management flow
|
add integration tests for user management flow
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Full output mode (for debugging, includes complete task messages)
|
|
||||||
codeagent-wrapper --parallel --full-output <<'EOF'
|
|
||||||
...
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output Modes:**
|
|
||||||
- **Summary (default)**: Structured report with extracted `Did/Files/Tests/Coverage`, plus a short action summary.
|
|
||||||
- **Full (`--full-output`)**: Complete task messages included. Use only for debugging.
|
|
||||||
|
|
||||||
**Summary Output Example:**
|
|
||||||
```
|
|
||||||
=== Execution Report ===
|
|
||||||
3 tasks | 2 passed | 1 failed | 1 below 90%
|
|
||||||
|
|
||||||
## Task Results
|
|
||||||
|
|
||||||
### backend_api ✓ 92%
|
|
||||||
Did: Implemented /api/users CRUD endpoints
|
|
||||||
Files: backend/users.go, backend/router.go
|
|
||||||
Tests: 12 passed
|
|
||||||
Log: /tmp/codeagent-xxx.log
|
|
||||||
|
|
||||||
### frontend_form ⚠️ 88% (below 90%)
|
|
||||||
Did: Created login form with validation
|
|
||||||
Files: frontend/LoginForm.tsx
|
|
||||||
Tests: 8 passed
|
|
||||||
Gap: lines not covered: frontend/LoginForm.tsx:42-47
|
|
||||||
Log: /tmp/codeagent-yyy.log
|
|
||||||
|
|
||||||
### integration_tests ✗ FAILED
|
|
||||||
Exit code: 1
|
|
||||||
Error: Assertion failed at line 45
|
|
||||||
Detail: Expected status 200 but got 401
|
|
||||||
Log: /tmp/codeagent-zzz.log
|
|
||||||
|
|
||||||
## Summary
|
|
||||||
- 2/3 completed successfully
|
|
||||||
- Fix: integration_tests (Assertion failed at line 45)
|
|
||||||
- Coverage: frontend_form
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parallel Task Format:**
|
**Parallel Task Format:**
|
||||||
|
|||||||
84
install.bat
84
install.bat
@@ -46,23 +46,17 @@ echo.
|
|||||||
echo codeagent-wrapper installed successfully at:
|
echo codeagent-wrapper installed successfully at:
|
||||||
echo %DEST%
|
echo %DEST%
|
||||||
|
|
||||||
rem Ensure %USERPROFILE%\bin is in PATH without duplicating entries
|
rem Automatically ensure %USERPROFILE%\bin is in the USER (HKCU) PATH
|
||||||
rem 1) Read current user PATH from registry (REG_SZ or REG_EXPAND_SZ)
|
rem 1) Read current user PATH from registry (REG_SZ or REG_EXPAND_SZ)
|
||||||
set "USER_PATH_RAW="
|
set "USER_PATH_RAW="
|
||||||
|
set "USER_PATH_TYPE="
|
||||||
for /f "tokens=1,2,*" %%A in ('reg query "HKCU\Environment" /v Path 2^>nul ^| findstr /I /R "^ *Path *REG_"') do (
|
for /f "tokens=1,2,*" %%A in ('reg query "HKCU\Environment" /v Path 2^>nul ^| findstr /I /R "^ *Path *REG_"') do (
|
||||||
|
set "USER_PATH_TYPE=%%B"
|
||||||
set "USER_PATH_RAW=%%C"
|
set "USER_PATH_RAW=%%C"
|
||||||
)
|
)
|
||||||
rem Trim leading spaces from USER_PATH_RAW
|
rem Trim leading spaces from USER_PATH_RAW
|
||||||
for /f "tokens=* delims= " %%D in ("!USER_PATH_RAW!") do set "USER_PATH_RAW=%%D"
|
for /f "tokens=* delims= " %%D in ("!USER_PATH_RAW!") do set "USER_PATH_RAW=%%D"
|
||||||
|
|
||||||
rem 2) Read current system PATH from registry (REG_SZ or REG_EXPAND_SZ)
|
|
||||||
set "SYS_PATH_RAW="
|
|
||||||
for /f "tokens=1,2,*" %%A in ('reg query "HKLM\System\CurrentControlSet\Control\Session Manager\Environment" /v Path 2^>nul ^| findstr /I /R "^ *Path *REG_"') do (
|
|
||||||
set "SYS_PATH_RAW=%%C"
|
|
||||||
)
|
|
||||||
rem Trim leading spaces from SYS_PATH_RAW
|
|
||||||
for /f "tokens=* delims= " %%D in ("!SYS_PATH_RAW!") do set "SYS_PATH_RAW=%%D"
|
|
||||||
|
|
||||||
rem Normalize DEST_DIR by removing a trailing backslash if present
|
rem Normalize DEST_DIR by removing a trailing backslash if present
|
||||||
if "!DEST_DIR:~-1!"=="\" set "DEST_DIR=!DEST_DIR:~0,-1!"
|
if "!DEST_DIR:~-1!"=="\" set "DEST_DIR=!DEST_DIR:~0,-1!"
|
||||||
|
|
||||||
@@ -73,70 +67,42 @@ set "SEARCH_EXP2=;!DEST_DIR!\;"
|
|||||||
set "SEARCH_LIT=;!PCT!USERPROFILE!PCT!\bin;"
|
set "SEARCH_LIT=;!PCT!USERPROFILE!PCT!\bin;"
|
||||||
set "SEARCH_LIT2=;!PCT!USERPROFILE!PCT!\bin\;"
|
set "SEARCH_LIT2=;!PCT!USERPROFILE!PCT!\bin\;"
|
||||||
|
|
||||||
rem Prepare PATH variants for containment tests (strip quotes to avoid false negatives)
|
rem Prepare user PATH variants for containment tests
|
||||||
set "USER_PATH_RAW_CLEAN=!USER_PATH_RAW:"=!"
|
set "CHECK_RAW=;!USER_PATH_RAW!;"
|
||||||
set "SYS_PATH_RAW_CLEAN=!SYS_PATH_RAW:"=!"
|
set "USER_PATH_EXP=!USER_PATH_RAW!"
|
||||||
|
|
||||||
set "CHECK_USER_RAW=;!USER_PATH_RAW_CLEAN!;"
|
|
||||||
set "USER_PATH_EXP=!USER_PATH_RAW_CLEAN!"
|
|
||||||
if defined USER_PATH_EXP call set "USER_PATH_EXP=%%USER_PATH_EXP%%"
|
if defined USER_PATH_EXP call set "USER_PATH_EXP=%%USER_PATH_EXP%%"
|
||||||
set "USER_PATH_EXP_CLEAN=!USER_PATH_EXP:"=!"
|
set "CHECK_EXP=;!USER_PATH_EXP!;"
|
||||||
set "CHECK_USER_EXP=;!USER_PATH_EXP_CLEAN!;"
|
|
||||||
|
|
||||||
set "CHECK_SYS_RAW=;!SYS_PATH_RAW_CLEAN!;"
|
rem Check if already present in user PATH (literal or expanded, with/without trailing backslash)
|
||||||
set "SYS_PATH_EXP=!SYS_PATH_RAW_CLEAN!"
|
|
||||||
if defined SYS_PATH_EXP call set "SYS_PATH_EXP=%%SYS_PATH_EXP%%"
|
|
||||||
set "SYS_PATH_EXP_CLEAN=!SYS_PATH_EXP:"=!"
|
|
||||||
set "CHECK_SYS_EXP=;!SYS_PATH_EXP_CLEAN!;"
|
|
||||||
|
|
||||||
rem Check if already present (literal or expanded, with/without trailing backslash)
|
|
||||||
set "ALREADY_IN_USERPATH=0"
|
set "ALREADY_IN_USERPATH=0"
|
||||||
echo(!CHECK_USER_RAW! | findstr /I /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul && set "ALREADY_IN_USERPATH=1"
|
echo !CHECK_RAW! | findstr /I /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul && set "ALREADY_IN_USERPATH=1"
|
||||||
if "!ALREADY_IN_USERPATH!"=="0" (
|
if "!ALREADY_IN_USERPATH!"=="0" (
|
||||||
echo(!CHECK_USER_EXP! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" >nul && set "ALREADY_IN_USERPATH=1"
|
echo !CHECK_EXP! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" >nul && set "ALREADY_IN_USERPATH=1"
|
||||||
)
|
|
||||||
|
|
||||||
set "ALREADY_IN_SYSPATH=0"
|
|
||||||
echo(!CHECK_SYS_RAW! | findstr /I /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul && set "ALREADY_IN_SYSPATH=1"
|
|
||||||
if "!ALREADY_IN_SYSPATH!"=="0" (
|
|
||||||
echo(!CHECK_SYS_EXP! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" >nul && set "ALREADY_IN_SYSPATH=1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if "!ALREADY_IN_USERPATH!"=="1" (
|
if "!ALREADY_IN_USERPATH!"=="1" (
|
||||||
echo User PATH already includes %%USERPROFILE%%\bin.
|
echo User PATH already includes %%USERPROFILE%%\bin.
|
||||||
) else (
|
) else (
|
||||||
if "!ALREADY_IN_SYSPATH!"=="1" (
|
rem Not present: append to user PATH using setx without duplicating system PATH
|
||||||
echo System PATH already includes %%USERPROFILE%%\bin; skipping user PATH update.
|
if defined USER_PATH_RAW (
|
||||||
|
set "USER_PATH_NEW=!USER_PATH_RAW!"
|
||||||
|
if not "!USER_PATH_NEW:~-1!"==";" set "USER_PATH_NEW=!USER_PATH_NEW!;"
|
||||||
|
set "USER_PATH_NEW=!USER_PATH_NEW!!PCT!USERPROFILE!PCT!\bin"
|
||||||
) else (
|
) else (
|
||||||
rem Not present: append to user PATH
|
set "USER_PATH_NEW=!PCT!USERPROFILE!PCT!\bin"
|
||||||
if defined USER_PATH_RAW (
|
)
|
||||||
set "USER_PATH_NEW=!USER_PATH_RAW!"
|
rem Persist update to HKCU\Environment\Path (user scope)
|
||||||
if not "!USER_PATH_NEW:~-1!"==";" set "USER_PATH_NEW=!USER_PATH_NEW!;"
|
setx PATH "!USER_PATH_NEW!" >nul
|
||||||
set "USER_PATH_NEW=!USER_PATH_NEW!!PCT!USERPROFILE!PCT!\bin"
|
if errorlevel 1 (
|
||||||
) else (
|
echo WARNING: Failed to append %%USERPROFILE%%\bin to your user PATH.
|
||||||
set "USER_PATH_NEW=!PCT!USERPROFILE!PCT!\bin"
|
) else (
|
||||||
)
|
echo Added %%USERPROFILE%%\bin to your user PATH.
|
||||||
rem Persist update to HKCU\Environment\Path (user scope)
|
|
||||||
rem Use reg add instead of setx to avoid 1024-character limit
|
|
||||||
echo(!USER_PATH_NEW! | findstr /C:"\"" /C:"!" >nul
|
|
||||||
if not errorlevel 1 (
|
|
||||||
echo WARNING: Your PATH contains quotes or exclamation marks that may cause issues.
|
|
||||||
echo Skipping automatic PATH update. Please add %%USERPROFILE%%\bin to your PATH manually.
|
|
||||||
) else (
|
|
||||||
reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "!USER_PATH_NEW!" /f >nul
|
|
||||||
if errorlevel 1 (
|
|
||||||
echo WARNING: Failed to append %%USERPROFILE%%\bin to your user PATH.
|
|
||||||
) else (
|
|
||||||
echo Added %%USERPROFILE%%\bin to your user PATH.
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
rem Update current session PATH so codeagent-wrapper is immediately available
|
rem Update current session PATH so codex-wrapper is immediately available
|
||||||
set "CURPATH=;%PATH%;"
|
set "CURPATH=;%PATH%;"
|
||||||
set "CURPATH_CLEAN=!CURPATH:"=!"
|
echo !CURPATH! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul
|
||||||
echo(!CURPATH_CLEAN! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul
|
|
||||||
if errorlevel 1 set "PATH=!DEST_DIR!;!PATH!"
|
if errorlevel 1 set "PATH=!DEST_DIR!;!PATH!"
|
||||||
|
|
||||||
goto :cleanup
|
goto :cleanup
|
||||||
|
|||||||
31
install.py
31
install.py
@@ -17,10 +17,7 @@ from datetime import datetime
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Iterable, List, Optional
|
from typing import Any, Dict, Iterable, List, Optional
|
||||||
|
|
||||||
try:
|
import jsonschema
|
||||||
import jsonschema
|
|
||||||
except ImportError: # pragma: no cover
|
|
||||||
jsonschema = None
|
|
||||||
|
|
||||||
DEFAULT_INSTALL_DIR = "~/.claude"
|
DEFAULT_INSTALL_DIR = "~/.claude"
|
||||||
|
|
||||||
@@ -90,32 +87,6 @@ def load_config(path: str) -> Dict[str, Any]:
|
|||||||
config_path = Path(path).expanduser().resolve()
|
config_path = Path(path).expanduser().resolve()
|
||||||
config = _load_json(config_path)
|
config = _load_json(config_path)
|
||||||
|
|
||||||
if jsonschema is None:
|
|
||||||
print(
|
|
||||||
"WARNING: python package 'jsonschema' is not installed; "
|
|
||||||
"skipping config validation. To enable validation run:\n"
|
|
||||||
" python3 -m pip install jsonschema\n",
|
|
||||||
file=sys.stderr,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not isinstance(config, dict):
|
|
||||||
raise ValueError(
|
|
||||||
f"Config must be a dict, got {type(config).__name__}. "
|
|
||||||
"Check your config.json syntax."
|
|
||||||
)
|
|
||||||
|
|
||||||
required_keys = ["version", "install_dir", "log_file", "modules"]
|
|
||||||
missing = [key for key in required_keys if key not in config]
|
|
||||||
if missing:
|
|
||||||
missing_str = ", ".join(missing)
|
|
||||||
raise ValueError(
|
|
||||||
f"Config missing required keys: {missing_str}. "
|
|
||||||
"Install jsonschema for better validation: "
|
|
||||||
"python3 -m pip install jsonschema"
|
|
||||||
)
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
||||||
schema_candidates = [
|
schema_candidates = [
|
||||||
config_path.parent / "config.schema.json",
|
config_path.parent / "config.schema.json",
|
||||||
Path(__file__).resolve().with_name("config.schema.json"),
|
Path(__file__).resolve().with_name("config.schema.json"),
|
||||||
|
|||||||
39
install.sh
39
install.sh
@@ -34,42 +34,23 @@ if ! curl -fsSL "$URL" -o /tmp/codeagent-wrapper; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
INSTALL_DIR="${INSTALL_DIR:-$HOME/.claude}"
|
mkdir -p "$HOME/bin"
|
||||||
BIN_DIR="${INSTALL_DIR}/bin"
|
|
||||||
mkdir -p "$BIN_DIR"
|
|
||||||
|
|
||||||
mv /tmp/codeagent-wrapper "${BIN_DIR}/codeagent-wrapper"
|
mv /tmp/codeagent-wrapper "$HOME/bin/codeagent-wrapper"
|
||||||
chmod +x "${BIN_DIR}/codeagent-wrapper"
|
chmod +x "$HOME/bin/codeagent-wrapper"
|
||||||
|
|
||||||
if "${BIN_DIR}/codeagent-wrapper" --version >/dev/null 2>&1; then
|
if "$HOME/bin/codeagent-wrapper" --version >/dev/null 2>&1; then
|
||||||
echo "codeagent-wrapper installed successfully to ${BIN_DIR}/codeagent-wrapper"
|
echo "codeagent-wrapper installed successfully to ~/bin/codeagent-wrapper"
|
||||||
else
|
else
|
||||||
echo "ERROR: installation verification failed" >&2
|
echo "ERROR: installation verification failed" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Auto-add to shell config files with idempotency
|
if [[ ":$PATH:" != *":$HOME/bin:"* ]]; then
|
||||||
if [[ ":${PATH}:" != *":${BIN_DIR}:"* ]]; then
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "WARNING: ${BIN_DIR} is not in your PATH"
|
echo "WARNING: ~/bin is not in your PATH"
|
||||||
|
echo "Add this line to your ~/.bashrc or ~/.zshrc:"
|
||||||
# Detect shell config file
|
echo ""
|
||||||
if [ -n "$ZSH_VERSION" ]; then
|
echo " export PATH=\"\$HOME/bin:\$PATH\""
|
||||||
RC_FILE="$HOME/.zshrc"
|
|
||||||
else
|
|
||||||
RC_FILE="$HOME/.bashrc"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Idempotent add: check if complete export statement already exists
|
|
||||||
EXPORT_LINE="export PATH=\"${BIN_DIR}:\$PATH\""
|
|
||||||
if [ -f "$RC_FILE" ] && grep -qF "${EXPORT_LINE}" "$RC_FILE" 2>/dev/null; then
|
|
||||||
echo " ${BIN_DIR} already in ${RC_FILE}, skipping."
|
|
||||||
else
|
|
||||||
echo " Adding to ${RC_FILE}..."
|
|
||||||
echo "" >> "$RC_FILE"
|
|
||||||
echo "# Added by myclaude installer" >> "$RC_FILE"
|
|
||||||
echo "export PATH=\"${BIN_DIR}:\$PATH\"" >> "$RC_FILE"
|
|
||||||
echo " Done. Run 'source ${RC_FILE}' or restart shell."
|
|
||||||
fi
|
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ codeagent-wrapper --backend gemini "simple task"
|
|||||||
- `task` (required): Task description, supports `@file` references
|
- `task` (required): Task description, supports `@file` references
|
||||||
- `working_dir` (optional): Working directory (default: current)
|
- `working_dir` (optional): Working directory (default: current)
|
||||||
- `--backend` (optional): Select AI backend (codex/claude/gemini, default: codex)
|
- `--backend` (optional): Select AI backend (codex/claude/gemini, default: codex)
|
||||||
- **Note**: Claude backend only adds `--dangerously-skip-permissions` when explicitly enabled
|
- **Note**: Claude backend defaults to `--dangerously-skip-permissions` for automation compatibility
|
||||||
|
|
||||||
## Return Format
|
## Return Format
|
||||||
|
|
||||||
@@ -101,12 +101,11 @@ EOF
|
|||||||
|
|
||||||
## Parallel Execution
|
## Parallel Execution
|
||||||
|
|
||||||
**Default (summary mode - context-efficient):**
|
**With global backend**:
|
||||||
```bash
|
```bash
|
||||||
codeagent-wrapper --parallel <<'EOF'
|
codeagent-wrapper --parallel --backend claude <<'EOF'
|
||||||
---TASK---
|
---TASK---
|
||||||
id: task1
|
id: task1
|
||||||
backend: codex
|
|
||||||
workdir: /path/to/dir
|
workdir: /path/to/dir
|
||||||
---CONTENT---
|
---CONTENT---
|
||||||
task content
|
task content
|
||||||
@@ -118,17 +117,6 @@ dependent task
|
|||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
**Full output mode (for debugging):**
|
|
||||||
```bash
|
|
||||||
codeagent-wrapper --parallel --full-output <<'EOF'
|
|
||||||
...
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output Modes:**
|
|
||||||
- **Summary (default)**: Structured report with changes, output, verification, and review summary.
|
|
||||||
- **Full (`--full-output`)**: Complete task messages. Use only when debugging specific failures.
|
|
||||||
|
|
||||||
**With per-task backend**:
|
**With per-task backend**:
|
||||||
```bash
|
```bash
|
||||||
codeagent-wrapper --parallel <<'EOF'
|
codeagent-wrapper --parallel <<'EOF'
|
||||||
@@ -159,9 +147,9 @@ Set `CODEAGENT_MAX_PARALLEL_WORKERS` to limit concurrent tasks (default: unlimit
|
|||||||
## Environment Variables
|
## Environment Variables
|
||||||
|
|
||||||
- `CODEX_TIMEOUT`: Override timeout in milliseconds (default: 7200000 = 2 hours)
|
- `CODEX_TIMEOUT`: Override timeout in milliseconds (default: 7200000 = 2 hours)
|
||||||
- `CODEAGENT_SKIP_PERMISSIONS`: Control Claude CLI permission checks
|
- `CODEAGENT_SKIP_PERMISSIONS`: Control permission checks
|
||||||
- For **Claude** backend: Set to `true`/`1` to add `--dangerously-skip-permissions` (default: disabled)
|
- For **Claude** backend: Set to `true`/`1` to **disable** `--dangerously-skip-permissions` (default: enabled)
|
||||||
- For **Codex/Gemini** backends: Currently has no effect
|
- For **Codex/Gemini** backends: Set to `true`/`1` to enable permission skipping (default: disabled)
|
||||||
- `CODEAGENT_MAX_PARALLEL_WORKERS`: Limit concurrent tasks in parallel mode (default: unlimited, recommended: 8)
|
- `CODEAGENT_MAX_PARALLEL_WORKERS`: Limit concurrent tasks in parallel mode (default: unlimited, recommended: 8)
|
||||||
|
|
||||||
## Invocation Pattern
|
## Invocation Pattern
|
||||||
@@ -194,8 +182,9 @@ Bash tool parameters:
|
|||||||
|
|
||||||
## Security Best Practices
|
## Security Best Practices
|
||||||
|
|
||||||
- **Claude Backend**: Permission checks enabled by default
|
- **Claude Backend**: Defaults to `--dangerously-skip-permissions` for automation workflows
|
||||||
- To skip checks: set `CODEAGENT_SKIP_PERMISSIONS=true` or pass `--skip-permissions`
|
- To enforce permission checks with Claude: Set `CODEAGENT_SKIP_PERMISSIONS=true`
|
||||||
|
- **Codex/Gemini Backends**: Permission checks enabled by default
|
||||||
- **Concurrency Limits**: Set `CODEAGENT_MAX_PARALLEL_WORKERS` in production to prevent resource exhaustion
|
- **Concurrency Limits**: Set `CODEAGENT_MAX_PARALLEL_WORKERS` in production to prevent resource exhaustion
|
||||||
- **Automation Context**: This wrapper is designed for AI-driven automation where permission prompts would block execution
|
- **Automation Context**: This wrapper is designed for AI-driven automation where permission prompts would block execution
|
||||||
|
|
||||||
|
|||||||
@@ -1,167 +0,0 @@
|
|||||||
---
|
|
||||||
name: skill-install
|
|
||||||
description: Install Claude skills from GitHub repositories with automated security scanning. Triggers when users want to install skills from a GitHub URL, need to browse available skills in a repository, or want to safely add new skills to their Claude environment.
|
|
||||||
---
|
|
||||||
|
|
||||||
# Skill Install
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Install Claude skills from GitHub repositories with built-in security scanning to protect against malicious code, backdoors, and vulnerabilities.
|
|
||||||
|
|
||||||
## When to Use
|
|
||||||
|
|
||||||
Trigger this skill when the user:
|
|
||||||
- Provides a GitHub repository URL and wants to install skills
|
|
||||||
- Asks to "install skills from GitHub"
|
|
||||||
- Wants to browse and select skills from a repository
|
|
||||||
- Needs to add new skills to their Claude environment
|
|
||||||
|
|
||||||
## Workflow
|
|
||||||
|
|
||||||
### Step 1: Parse GitHub URL
|
|
||||||
|
|
||||||
Accept a GitHub repository URL from the user. The URL should point to a repository containing a `skills/` directory.
|
|
||||||
|
|
||||||
Supported URL formats:
|
|
||||||
- `https://github.com/user/repo`
|
|
||||||
- `https://github.com/user/repo/tree/main/skills`
|
|
||||||
- `https://github.com/user/repo/tree/branch-name/skills`
|
|
||||||
|
|
||||||
Extract:
|
|
||||||
- Repository owner
|
|
||||||
- Repository name
|
|
||||||
- Branch (default to `main` if not specified)
|
|
||||||
|
|
||||||
### Step 2: Fetch Skills List
|
|
||||||
|
|
||||||
Use the WebFetch tool to retrieve the skills directory listing from GitHub.
|
|
||||||
|
|
||||||
GitHub API endpoint pattern:
|
|
||||||
```
|
|
||||||
https://api.github.com/repos/{owner}/{repo}/contents/skills?ref={branch}
|
|
||||||
```
|
|
||||||
|
|
||||||
Parse the response to extract:
|
|
||||||
- Skill directory names
|
|
||||||
- Each skill should be a subdirectory containing a SKILL.md file
|
|
||||||
|
|
||||||
### Step 3: Present Skills to User
|
|
||||||
|
|
||||||
Use the AskUserQuestion tool to let the user select which skills to install.
|
|
||||||
|
|
||||||
Set `multiSelect: true` to allow multiple selections.
|
|
||||||
|
|
||||||
Present each skill with:
|
|
||||||
- Skill name (directory name)
|
|
||||||
- Brief description (if available from SKILL.md frontmatter)
|
|
||||||
|
|
||||||
### Step 4: Fetch Skill Content
|
|
||||||
|
|
||||||
For each selected skill, fetch all files in the skill directory:
|
|
||||||
|
|
||||||
1. Get the file tree for the skill directory
|
|
||||||
2. Download all files (SKILL.md, scripts/, references/, assets/)
|
|
||||||
3. Store the complete skill content for security analysis
|
|
||||||
|
|
||||||
Use WebFetch with GitHub API:
|
|
||||||
```
|
|
||||||
https://api.github.com/repos/{owner}/{repo}/contents/skills/{skill_name}?ref={branch}
|
|
||||||
```
|
|
||||||
|
|
||||||
For each file, fetch the raw content:
|
|
||||||
```
|
|
||||||
https://raw.githubusercontent.com/{owner}/{repo}/{branch}/skills/{skill_name}/{file_path}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: Security Scan
|
|
||||||
|
|
||||||
**CRITICAL:** Before installation, perform a thorough security analysis of each skill.
|
|
||||||
|
|
||||||
Read the security scan prompt template from `references/security_scan_prompt.md` and apply it to analyze the skill content.
|
|
||||||
|
|
||||||
Examine for:
|
|
||||||
1. **Malicious Command Execution** - eval, exec, subprocess with shell=True
|
|
||||||
2. **Backdoor Detection** - obfuscated code, suspicious network requests
|
|
||||||
3. **Credential Theft** - accessing ~/.ssh, ~/.aws, environment variables
|
|
||||||
4. **Unauthorized Network Access** - external requests to suspicious domains
|
|
||||||
5. **File System Abuse** - destructive operations, unauthorized writes
|
|
||||||
6. **Privilege Escalation** - sudo attempts, system modifications
|
|
||||||
7. **Supply Chain Attacks** - suspicious package installations
|
|
||||||
|
|
||||||
Output the security analysis with:
|
|
||||||
- Security Status: SAFE / WARNING / DANGEROUS
|
|
||||||
- Risk Level: LOW / MEDIUM / HIGH / CRITICAL
|
|
||||||
- Detailed findings with file locations and severity
|
|
||||||
- Recommendation: APPROVE / APPROVE_WITH_WARNINGS / REJECT
|
|
||||||
|
|
||||||
### Step 6: User Decision
|
|
||||||
|
|
||||||
Based on the security scan results:
|
|
||||||
|
|
||||||
**If SAFE (APPROVE):**
|
|
||||||
- Proceed directly to installation
|
|
||||||
|
|
||||||
**If WARNING (APPROVE_WITH_WARNINGS):**
|
|
||||||
- Display the security warnings to the user
|
|
||||||
- Use AskUserQuestion to confirm: "Security warnings detected. Do you want to proceed with installation?"
|
|
||||||
- Options: "Yes, install anyway" / "No, skip this skill"
|
|
||||||
|
|
||||||
**If DANGEROUS (REJECT):**
|
|
||||||
- Display the critical security issues
|
|
||||||
- Refuse to install
|
|
||||||
- Explain why the skill is dangerous
|
|
||||||
- Do NOT provide an option to override for CRITICAL severity issues
|
|
||||||
|
|
||||||
### Step 7: Install Skills
|
|
||||||
|
|
||||||
For approved skills, install to `~/.claude/skills/`:
|
|
||||||
|
|
||||||
1. Create the skill directory: `~/.claude/skills/{skill_name}/`
|
|
||||||
2. Write all skill files maintaining the directory structure
|
|
||||||
3. Ensure proper file permissions (executable for scripts)
|
|
||||||
4. Verify SKILL.md exists and has valid frontmatter
|
|
||||||
|
|
||||||
Use the Write tool to create files.
|
|
||||||
|
|
||||||
### Step 8: Confirmation
|
|
||||||
|
|
||||||
After installation, provide a summary:
|
|
||||||
- List of successfully installed skills
|
|
||||||
- List of skipped skills (if any) with reasons
|
|
||||||
- Location: `~/.claude/skills/`
|
|
||||||
- Next steps: "The skills are now available. Restart Claude or use them directly."
|
|
||||||
|
|
||||||
## Example Usage
|
|
||||||
|
|
||||||
**User:** "Install skills from https://github.com/example/claude-skills"
|
|
||||||
|
|
||||||
**Assistant:**
|
|
||||||
1. Fetches skills list from the repository
|
|
||||||
2. Presents available skills: "skill-a", "skill-b", "skill-c"
|
|
||||||
3. User selects "skill-a" and "skill-b"
|
|
||||||
4. Performs security scan on each skill
|
|
||||||
5. skill-a: SAFE - proceeds to install
|
|
||||||
6. skill-b: WARNING (makes HTTP request) - asks user for confirmation
|
|
||||||
7. Installs approved skills to ~/.claude/skills/
|
|
||||||
8. Confirms: "Successfully installed: skill-a, skill-b"
|
|
||||||
|
|
||||||
## Security Notes
|
|
||||||
|
|
||||||
- **Never skip security scanning** - Always analyze skills before installation
|
|
||||||
- **Be conservative** - When in doubt, flag as WARNING and let user decide
|
|
||||||
- **Critical issues are blocking** - CRITICAL severity findings cannot be overridden
|
|
||||||
- **Transparency** - Always show users what was found during security scans
|
|
||||||
- **Sandboxing** - Remind users that skills run with Claude's permissions
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
### references/security_scan_prompt.md
|
|
||||||
|
|
||||||
Contains the detailed security analysis prompt template with:
|
|
||||||
- Complete list of security categories to check
|
|
||||||
- Output format requirements
|
|
||||||
- Example analyses for safe, suspicious, and dangerous skills
|
|
||||||
- Decision criteria for APPROVE/REJECT recommendations
|
|
||||||
|
|
||||||
Load this file when performing security scans to ensure comprehensive analysis.
|
|
||||||
@@ -1,137 +0,0 @@
|
|||||||
# Security Scan Prompt for Skills
|
|
||||||
|
|
||||||
Use this prompt template to analyze skill content for security vulnerabilities before installation.
|
|
||||||
|
|
||||||
## Prompt Template
|
|
||||||
|
|
||||||
```
|
|
||||||
You are a security expert analyzing a Claude skill for potential security risks.
|
|
||||||
|
|
||||||
Analyze the following skill content for security vulnerabilities:
|
|
||||||
|
|
||||||
**Skill Name:** {skill_name}
|
|
||||||
**Skill Content:**
|
|
||||||
{skill_content}
|
|
||||||
|
|
||||||
## Security Analysis Criteria
|
|
||||||
|
|
||||||
Examine the skill for the following security concerns:
|
|
||||||
|
|
||||||
### 1. Malicious Command Execution
|
|
||||||
- Detect `eval()`, `exec()`, `subprocess` with `shell=True`
|
|
||||||
- Identify arbitrary code execution patterns
|
|
||||||
- Check for command injection vulnerabilities
|
|
||||||
|
|
||||||
### 2. Backdoor Detection
|
|
||||||
- Look for obfuscated code (base64, hex encoding)
|
|
||||||
- Identify suspicious network requests to unknown domains
|
|
||||||
- Detect file hash patterns matching known malware
|
|
||||||
- Check for hidden data exfiltration mechanisms
|
|
||||||
|
|
||||||
### 3. Credential Theft
|
|
||||||
- Detect attempts to access environment variables containing secrets
|
|
||||||
- Identify file operations on sensitive paths (~/.ssh, ~/.aws, ~/.netrc)
|
|
||||||
- Check for credential harvesting patterns
|
|
||||||
- Look for keylogging or clipboard monitoring
|
|
||||||
|
|
||||||
### 4. Unauthorized Network Access
|
|
||||||
- Identify external network requests
|
|
||||||
- Check for connections to suspicious domains (pastebin, ngrok, bit.ly, etc.)
|
|
||||||
- Detect data exfiltration via HTTP/HTTPS
|
|
||||||
- Look for reverse shell patterns
|
|
||||||
|
|
||||||
### 5. File System Abuse
|
|
||||||
- Detect destructive file operations (rm -rf, shutil.rmtree)
|
|
||||||
- Identify unauthorized file writes to system directories
|
|
||||||
- Check for file permission modifications
|
|
||||||
- Look for attempts to modify critical system files
|
|
||||||
|
|
||||||
### 6. Privilege Escalation
|
|
||||||
- Detect sudo or privilege escalation attempts
|
|
||||||
- Identify attempts to modify system configurations
|
|
||||||
- Check for container escape patterns
|
|
||||||
|
|
||||||
### 7. Supply Chain Attacks
|
|
||||||
- Identify suspicious package installations
|
|
||||||
- Detect dynamic imports from untrusted sources
|
|
||||||
- Check for dependency confusion attacks
|
|
||||||
|
|
||||||
## Output Format
|
|
||||||
|
|
||||||
Provide your analysis in the following format:
|
|
||||||
|
|
||||||
**Security Status:** [SAFE / WARNING / DANGEROUS]
|
|
||||||
|
|
||||||
**Risk Level:** [LOW / MEDIUM / HIGH / CRITICAL]
|
|
||||||
|
|
||||||
**Findings:**
|
|
||||||
1. [Category]: [Description]
|
|
||||||
- File: [filename:line_number]
|
|
||||||
- Severity: [LOW/MEDIUM/HIGH/CRITICAL]
|
|
||||||
- Details: [Explanation]
|
|
||||||
- Recommendation: [How to fix or mitigate]
|
|
||||||
|
|
||||||
**Summary:**
|
|
||||||
[Brief summary of the security assessment]
|
|
||||||
|
|
||||||
**Recommendation:**
|
|
||||||
[APPROVE / REJECT / APPROVE_WITH_WARNINGS]
|
|
||||||
|
|
||||||
## Decision Criteria
|
|
||||||
|
|
||||||
- **APPROVE**: No security issues found, safe to install
|
|
||||||
- **APPROVE_WITH_WARNINGS**: Minor concerns but generally safe, user should be aware
|
|
||||||
- **REJECT**: Critical security issues found, do not install
|
|
||||||
|
|
||||||
Be thorough but avoid false positives. Consider the context and legitimate use cases.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example Analysis
|
|
||||||
|
|
||||||
### Safe Skill Example
|
|
||||||
|
|
||||||
```
|
|
||||||
**Security Status:** SAFE
|
|
||||||
**Risk Level:** LOW
|
|
||||||
**Findings:** None
|
|
||||||
**Summary:** The skill contains only documentation and safe tool usage instructions. No executable code or suspicious patterns detected.
|
|
||||||
**Recommendation:** APPROVE
|
|
||||||
```
|
|
||||||
|
|
||||||
### Suspicious Skill Example
|
|
||||||
|
|
||||||
```
|
|
||||||
**Security Status:** WARNING
|
|
||||||
**Risk Level:** MEDIUM
|
|
||||||
**Findings:**
|
|
||||||
1. [Network Access]: External HTTP request detected
|
|
||||||
- File: scripts/helper.py:42
|
|
||||||
- Severity: MEDIUM
|
|
||||||
- Details: Script makes HTTP request to api.example.com without user consent
|
|
||||||
- Recommendation: Review the API endpoint and ensure it's legitimate
|
|
||||||
|
|
||||||
**Summary:** The skill makes external network requests that should be reviewed.
|
|
||||||
**Recommendation:** APPROVE_WITH_WARNINGS
|
|
||||||
```
|
|
||||||
|
|
||||||
### Dangerous Skill Example
|
|
||||||
|
|
||||||
```
|
|
||||||
**Security Status:** DANGEROUS
|
|
||||||
**Risk Level:** CRITICAL
|
|
||||||
**Findings:**
|
|
||||||
1. [Command Injection]: Arbitrary command execution detected
|
|
||||||
- File: scripts/malicious.py:15
|
|
||||||
- Severity: CRITICAL
|
|
||||||
- Details: Uses subprocess.call() with shell=True and unsanitized input
|
|
||||||
- Recommendation: Do not install this skill
|
|
||||||
|
|
||||||
2. [Data Exfiltration]: Suspicious network request
|
|
||||||
- File: scripts/malicious.py:28
|
|
||||||
- Severity: HIGH
|
|
||||||
- Details: Sends data to pastebin.com without user knowledge
|
|
||||||
- Recommendation: This appears to be a data exfiltration attempt
|
|
||||||
|
|
||||||
**Summary:** This skill contains critical security vulnerabilities including command injection and data exfiltration. It appears to be malicious.
|
|
||||||
**Recommendation:** REJECT
|
|
||||||
```
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
@echo off
|
|
||||||
setlocal enabledelayedexpansion
|
|
||||||
|
|
||||||
echo Testing PATH update with long strings...
|
|
||||||
echo.
|
|
||||||
|
|
||||||
rem Create a very long PATH string (over 1024 characters)
|
|
||||||
set "LONG_PATH="
|
|
||||||
for /L %%i in (1,1,30) do (
|
|
||||||
set "LONG_PATH=!LONG_PATH!C:\VeryLongDirectoryName%%i\SubDirectory\AnotherSubDirectory;"
|
|
||||||
)
|
|
||||||
|
|
||||||
echo Generated PATH length:
|
|
||||||
echo !LONG_PATH! > temp_path.txt
|
|
||||||
for %%A in (temp_path.txt) do set "PATH_LENGTH=%%~zA"
|
|
||||||
del temp_path.txt
|
|
||||||
echo !PATH_LENGTH! bytes
|
|
||||||
|
|
||||||
rem Test 1: Verify reg add can handle long strings
|
|
||||||
echo.
|
|
||||||
echo Test 1: Testing reg add with long PATH...
|
|
||||||
set "TEST_PATH=!LONG_PATH!%%USERPROFILE%%\bin"
|
|
||||||
reg add "HKCU\Environment" /v TestPath /t REG_EXPAND_SZ /d "!TEST_PATH!" /f >nul 2>nul
|
|
||||||
if errorlevel 1 (
|
|
||||||
echo FAIL: reg add failed with long PATH
|
|
||||||
goto :cleanup
|
|
||||||
) else (
|
|
||||||
echo PASS: reg add succeeded with long PATH
|
|
||||||
)
|
|
||||||
|
|
||||||
rem Test 2: Verify the value was stored correctly
|
|
||||||
echo.
|
|
||||||
echo Test 2: Verifying stored value length...
|
|
||||||
for /f "tokens=2*" %%A in ('reg query "HKCU\Environment" /v TestPath 2^>nul ^| findstr /I "TestPath"') do set "STORED_PATH=%%B"
|
|
||||||
echo !STORED_PATH! > temp_stored.txt
|
|
||||||
for %%A in (temp_stored.txt) do set "STORED_LENGTH=%%~zA"
|
|
||||||
del temp_stored.txt
|
|
||||||
echo Stored PATH length: !STORED_LENGTH! bytes
|
|
||||||
|
|
||||||
if !STORED_LENGTH! LSS 1024 (
|
|
||||||
echo FAIL: Stored PATH was truncated
|
|
||||||
goto :cleanup
|
|
||||||
) else (
|
|
||||||
echo PASS: Stored PATH was not truncated
|
|
||||||
)
|
|
||||||
|
|
||||||
rem Test 3: Verify %%USERPROFILE%%\bin is present
|
|
||||||
echo.
|
|
||||||
echo Test 3: Verifying %%USERPROFILE%%\bin is in stored PATH...
|
|
||||||
echo !STORED_PATH! | findstr /I "USERPROFILE" >nul
|
|
||||||
if errorlevel 1 (
|
|
||||||
echo FAIL: %%USERPROFILE%%\bin not found in stored PATH
|
|
||||||
goto :cleanup
|
|
||||||
) else (
|
|
||||||
echo PASS: %%USERPROFILE%%\bin found in stored PATH
|
|
||||||
)
|
|
||||||
|
|
||||||
echo.
|
|
||||||
echo ========================================
|
|
||||||
echo All tests PASSED
|
|
||||||
echo ========================================
|
|
||||||
|
|
||||||
:cleanup
|
|
||||||
echo.
|
|
||||||
echo Cleaning up test registry key...
|
|
||||||
reg delete "HKCU\Environment" /v TestPath /f >nul 2>nul
|
|
||||||
endlocal
|
|
||||||
302
uninstall.py
302
uninstall.py
@@ -1,302 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Uninstaller for myclaude - reads installed_modules.json for precise removal."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import shutil
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict, List, Optional, Set
|
|
||||||
|
|
||||||
DEFAULT_INSTALL_DIR = "~/.claude"
|
|
||||||
|
|
||||||
# Files created by installer itself (not by modules)
|
|
||||||
INSTALLER_FILES = ["install.log", "installed_modules.json", "installed_modules.json.bak"]
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
|
|
||||||
parser = argparse.ArgumentParser(description="Uninstall myclaude")
|
|
||||||
parser.add_argument(
|
|
||||||
"--install-dir",
|
|
||||||
default=DEFAULT_INSTALL_DIR,
|
|
||||||
help="Installation directory (defaults to ~/.claude)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--module",
|
|
||||||
help="Comma-separated modules to uninstall (default: all installed)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--list",
|
|
||||||
action="store_true",
|
|
||||||
help="List installed modules and exit",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--dry-run",
|
|
||||||
action="store_true",
|
|
||||||
help="Show what would be removed without actually removing",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--purge",
|
|
||||||
action="store_true",
|
|
||||||
help="Remove entire install directory (DANGEROUS: removes user files too)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-y", "--yes",
|
|
||||||
action="store_true",
|
|
||||||
help="Skip confirmation prompt",
|
|
||||||
)
|
|
||||||
return parser.parse_args(argv)
|
|
||||||
|
|
||||||
|
|
||||||
def load_installed_modules(install_dir: Path) -> Dict[str, Any]:
|
|
||||||
"""Load installed_modules.json to know what was installed."""
|
|
||||||
status_file = install_dir / "installed_modules.json"
|
|
||||||
if not status_file.exists():
|
|
||||||
return {}
|
|
||||||
try:
|
|
||||||
with status_file.open("r", encoding="utf-8") as f:
|
|
||||||
return json.load(f)
|
|
||||||
except (json.JSONDecodeError, OSError):
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(install_dir: Path) -> Dict[str, Any]:
|
|
||||||
"""Try to load config.json from source repo to understand module structure."""
|
|
||||||
# Look for config.json in common locations
|
|
||||||
candidates = [
|
|
||||||
Path(__file__).parent / "config.json",
|
|
||||||
install_dir / "config.json",
|
|
||||||
]
|
|
||||||
for path in candidates:
|
|
||||||
if path.exists():
|
|
||||||
try:
|
|
||||||
with path.open("r", encoding="utf-8") as f:
|
|
||||||
return json.load(f)
|
|
||||||
except (json.JSONDecodeError, OSError):
|
|
||||||
continue
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def get_module_files(module_name: str, config: Dict[str, Any]) -> Set[str]:
|
|
||||||
"""Extract files/dirs that a module installs based on config.json operations."""
|
|
||||||
files: Set[str] = set()
|
|
||||||
modules = config.get("modules", {})
|
|
||||||
module_cfg = modules.get(module_name, {})
|
|
||||||
|
|
||||||
for op in module_cfg.get("operations", []):
|
|
||||||
op_type = op.get("type", "")
|
|
||||||
target = op.get("target", "")
|
|
||||||
|
|
||||||
if op_type == "copy_file" and target:
|
|
||||||
files.add(target)
|
|
||||||
elif op_type == "copy_dir" and target:
|
|
||||||
files.add(target)
|
|
||||||
elif op_type == "merge_dir":
|
|
||||||
# merge_dir merges subdirs like commands/, agents/ into install_dir
|
|
||||||
source = op.get("source", "")
|
|
||||||
source_path = Path(__file__).parent / source
|
|
||||||
if source_path.exists():
|
|
||||||
for subdir in source_path.iterdir():
|
|
||||||
if subdir.is_dir():
|
|
||||||
files.add(subdir.name)
|
|
||||||
elif op_type == "run_command":
|
|
||||||
# install.sh installs bin/codeagent-wrapper
|
|
||||||
cmd = op.get("command", "")
|
|
||||||
if "install.sh" in cmd or "install.bat" in cmd:
|
|
||||||
files.add("bin/codeagent-wrapper")
|
|
||||||
files.add("bin")
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_shell_config(rc_file: Path, bin_dir: Path) -> bool:
|
|
||||||
"""Remove PATH export added by installer from shell config."""
|
|
||||||
if not rc_file.exists():
|
|
||||||
return False
|
|
||||||
|
|
||||||
content = rc_file.read_text(encoding="utf-8")
|
|
||||||
original = content
|
|
||||||
|
|
||||||
patterns = [
|
|
||||||
r"\n?# Added by myclaude installer\n",
|
|
||||||
rf'\nexport PATH="{re.escape(str(bin_dir))}:\$PATH"\n?',
|
|
||||||
]
|
|
||||||
|
|
||||||
for pattern in patterns:
|
|
||||||
content = re.sub(pattern, "\n", content)
|
|
||||||
|
|
||||||
content = re.sub(r"\n{3,}$", "\n\n", content)
|
|
||||||
|
|
||||||
if content != original:
|
|
||||||
rc_file.write_text(content, encoding="utf-8")
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def list_installed(install_dir: Path) -> None:
|
|
||||||
"""List installed modules."""
|
|
||||||
status = load_installed_modules(install_dir)
|
|
||||||
modules = status.get("modules", {})
|
|
||||||
|
|
||||||
if not modules:
|
|
||||||
print("No modules installed (installed_modules.json not found or empty)")
|
|
||||||
return
|
|
||||||
|
|
||||||
print(f"Installed modules in {install_dir}:")
|
|
||||||
print(f"{'Module':<15} {'Status':<10} {'Installed At'}")
|
|
||||||
print("-" * 50)
|
|
||||||
for name, info in modules.items():
|
|
||||||
st = info.get("status", "unknown")
|
|
||||||
ts = info.get("installed_at", "unknown")[:19]
|
|
||||||
print(f"{name:<15} {st:<10} {ts}")
|
|
||||||
|
|
||||||
|
|
||||||
def main(argv: Optional[List[str]] = None) -> int:
|
|
||||||
args = parse_args(argv)
|
|
||||||
install_dir = Path(args.install_dir).expanduser().resolve()
|
|
||||||
bin_dir = install_dir / "bin"
|
|
||||||
|
|
||||||
if not install_dir.exists():
|
|
||||||
print(f"Install directory not found: {install_dir}")
|
|
||||||
print("Nothing to uninstall.")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if args.list:
|
|
||||||
list_installed(install_dir)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# Load installation status
|
|
||||||
status = load_installed_modules(install_dir)
|
|
||||||
installed_modules = status.get("modules", {})
|
|
||||||
config = load_config(install_dir)
|
|
||||||
|
|
||||||
# Determine which modules to uninstall
|
|
||||||
if args.module:
|
|
||||||
selected = [m.strip() for m in args.module.split(",") if m.strip()]
|
|
||||||
# Validate
|
|
||||||
for m in selected:
|
|
||||||
if m not in installed_modules:
|
|
||||||
print(f"Error: Module '{m}' is not installed")
|
|
||||||
print("Use --list to see installed modules")
|
|
||||||
return 1
|
|
||||||
else:
|
|
||||||
selected = list(installed_modules.keys())
|
|
||||||
|
|
||||||
if not selected and not args.purge:
|
|
||||||
print("No modules to uninstall.")
|
|
||||||
print("Use --list to see installed modules, or --purge to remove everything.")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# Collect files to remove
|
|
||||||
files_to_remove: Set[str] = set()
|
|
||||||
for module_name in selected:
|
|
||||||
files_to_remove.update(get_module_files(module_name, config))
|
|
||||||
|
|
||||||
# Add installer files if removing all modules
|
|
||||||
if set(selected) == set(installed_modules.keys()):
|
|
||||||
files_to_remove.update(INSTALLER_FILES)
|
|
||||||
|
|
||||||
# Show what will be removed
|
|
||||||
print(f"Install directory: {install_dir}")
|
|
||||||
if args.purge:
|
|
||||||
print(f"\n⚠️ PURGE MODE: Will remove ENTIRE directory including user files!")
|
|
||||||
else:
|
|
||||||
print(f"\nModules to uninstall: {', '.join(selected)}")
|
|
||||||
print(f"\nFiles/directories to remove:")
|
|
||||||
for f in sorted(files_to_remove):
|
|
||||||
path = install_dir / f
|
|
||||||
exists = "✓" if path.exists() else "✗ (not found)"
|
|
||||||
print(f" {f} {exists}")
|
|
||||||
|
|
||||||
# Confirmation
|
|
||||||
if not args.yes and not args.dry_run:
|
|
||||||
prompt = "\nProceed with uninstallation? [y/N] "
|
|
||||||
response = input(prompt).strip().lower()
|
|
||||||
if response not in ("y", "yes"):
|
|
||||||
print("Aborted.")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if args.dry_run:
|
|
||||||
print("\n[Dry run] No files were removed.")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
print(f"\nUninstalling...")
|
|
||||||
removed: List[str] = []
|
|
||||||
|
|
||||||
if args.purge:
|
|
||||||
shutil.rmtree(install_dir)
|
|
||||||
print(f" ✓ Removed {install_dir}")
|
|
||||||
removed.append(str(install_dir))
|
|
||||||
else:
|
|
||||||
# Remove files/dirs in reverse order (files before parent dirs)
|
|
||||||
for item in sorted(files_to_remove, key=lambda x: x.count("/"), reverse=True):
|
|
||||||
path = install_dir / item
|
|
||||||
if not path.exists():
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
if path.is_dir():
|
|
||||||
# Only remove if empty or if it's a known module dir
|
|
||||||
if item in ("bin",):
|
|
||||||
# For bin, only remove codeagent-wrapper
|
|
||||||
wrapper = path / "codeagent-wrapper"
|
|
||||||
if wrapper.exists():
|
|
||||||
wrapper.unlink()
|
|
||||||
print(f" ✓ Removed bin/codeagent-wrapper")
|
|
||||||
removed.append("bin/codeagent-wrapper")
|
|
||||||
# Remove bin if empty
|
|
||||||
if path.exists() and not any(path.iterdir()):
|
|
||||||
path.rmdir()
|
|
||||||
print(f" ✓ Removed empty bin/")
|
|
||||||
else:
|
|
||||||
shutil.rmtree(path)
|
|
||||||
print(f" ✓ Removed {item}/")
|
|
||||||
removed.append(item)
|
|
||||||
else:
|
|
||||||
path.unlink()
|
|
||||||
print(f" ✓ Removed {item}")
|
|
||||||
removed.append(item)
|
|
||||||
except OSError as e:
|
|
||||||
print(f" ✗ Failed to remove {item}: {e}", file=sys.stderr)
|
|
||||||
|
|
||||||
# Update installed_modules.json
|
|
||||||
status_file = install_dir / "installed_modules.json"
|
|
||||||
if status_file.exists() and selected != list(installed_modules.keys()):
|
|
||||||
# Partial uninstall: update status file
|
|
||||||
for m in selected:
|
|
||||||
installed_modules.pop(m, None)
|
|
||||||
if installed_modules:
|
|
||||||
with status_file.open("w", encoding="utf-8") as f:
|
|
||||||
json.dump({"modules": installed_modules}, f, indent=2)
|
|
||||||
print(f" ✓ Updated installed_modules.json")
|
|
||||||
|
|
||||||
# Remove install dir if empty
|
|
||||||
if install_dir.exists() and not any(install_dir.iterdir()):
|
|
||||||
install_dir.rmdir()
|
|
||||||
print(f" ✓ Removed empty install directory")
|
|
||||||
|
|
||||||
# Clean shell configs
|
|
||||||
for rc_name in (".bashrc", ".zshrc"):
|
|
||||||
rc_file = Path.home() / rc_name
|
|
||||||
if cleanup_shell_config(rc_file, bin_dir):
|
|
||||||
print(f" ✓ Cleaned PATH from {rc_name}")
|
|
||||||
|
|
||||||
print("")
|
|
||||||
if removed:
|
|
||||||
print(f"✓ Uninstallation complete ({len(removed)} items removed)")
|
|
||||||
else:
|
|
||||||
print("✓ Nothing to remove")
|
|
||||||
|
|
||||||
if install_dir.exists() and any(install_dir.iterdir()):
|
|
||||||
remaining = list(install_dir.iterdir())
|
|
||||||
print(f"\nNote: {len(remaining)} items remain in {install_dir}")
|
|
||||||
print("These are either user files or from other modules.")
|
|
||||||
print("Use --purge to remove everything (DANGEROUS).")
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main())
|
|
||||||
225
uninstall.sh
225
uninstall.sh
@@ -1,225 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
INSTALL_DIR="${INSTALL_DIR:-$HOME/.claude}"
|
|
||||||
BIN_DIR="${INSTALL_DIR}/bin"
|
|
||||||
STATUS_FILE="${INSTALL_DIR}/installed_modules.json"
|
|
||||||
DRY_RUN=false
|
|
||||||
PURGE=false
|
|
||||||
YES=false
|
|
||||||
LIST_ONLY=false
|
|
||||||
MODULES=""
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
cat <<EOF
|
|
||||||
Usage: $0 [OPTIONS]
|
|
||||||
|
|
||||||
Uninstall myclaude modules.
|
|
||||||
|
|
||||||
Options:
|
|
||||||
--install-dir DIR Installation directory (default: ~/.claude)
|
|
||||||
--module MODULES Comma-separated modules to uninstall (default: all)
|
|
||||||
--list List installed modules and exit
|
|
||||||
--dry-run Show what would be removed without removing
|
|
||||||
--purge Remove entire install directory (DANGEROUS)
|
|
||||||
-y, --yes Skip confirmation prompt
|
|
||||||
-h, --help Show this help
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
$0 --list # List installed modules
|
|
||||||
$0 --dry-run # Preview what would be removed
|
|
||||||
$0 --module dev # Uninstall only 'dev' module
|
|
||||||
$0 -y # Uninstall all without confirmation
|
|
||||||
$0 --purge -y # Remove everything (DANGEROUS)
|
|
||||||
EOF
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Parse arguments
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--install-dir) INSTALL_DIR="$2"; BIN_DIR="${INSTALL_DIR}/bin"; STATUS_FILE="${INSTALL_DIR}/installed_modules.json"; shift 2 ;;
|
|
||||||
--module) MODULES="$2"; shift 2 ;;
|
|
||||||
--list) LIST_ONLY=true; shift ;;
|
|
||||||
--dry-run) DRY_RUN=true; shift ;;
|
|
||||||
--purge) PURGE=true; shift ;;
|
|
||||||
-y|--yes) YES=true; shift ;;
|
|
||||||
-h|--help) usage ;;
|
|
||||||
*) echo "Unknown option: $1" >&2; exit 1 ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if install dir exists
|
|
||||||
if [ ! -d "$INSTALL_DIR" ]; then
|
|
||||||
echo "Install directory not found: $INSTALL_DIR"
|
|
||||||
echo "Nothing to uninstall."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# List installed modules
|
|
||||||
list_modules() {
|
|
||||||
if [ ! -f "$STATUS_FILE" ]; then
|
|
||||||
echo "No modules installed (installed_modules.json not found)"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
echo "Installed modules in $INSTALL_DIR:"
|
|
||||||
echo "Module Status Installed At"
|
|
||||||
echo "--------------------------------------------------"
|
|
||||||
# Parse JSON with basic tools (no jq dependency)
|
|
||||||
python3 -c "
|
|
||||||
import json, sys
|
|
||||||
try:
|
|
||||||
with open('$STATUS_FILE') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
for name, info in data.get('modules', {}).items():
|
|
||||||
status = info.get('status', 'unknown')
|
|
||||||
ts = info.get('installed_at', 'unknown')[:19]
|
|
||||||
print(f'{name:<15} {status:<10} {ts}')
|
|
||||||
except Exception as e:
|
|
||||||
print(f'Error reading status file: {e}', file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
"
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ "$LIST_ONLY" = true ]; then
|
|
||||||
list_modules
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get installed modules from status file
|
|
||||||
get_installed_modules() {
|
|
||||||
if [ ! -f "$STATUS_FILE" ]; then
|
|
||||||
echo ""
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
python3 -c "
|
|
||||||
import json
|
|
||||||
try:
|
|
||||||
with open('$STATUS_FILE') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
print(' '.join(data.get('modules', {}).keys()))
|
|
||||||
except:
|
|
||||||
print('')
|
|
||||||
"
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTALLED=$(get_installed_modules)
|
|
||||||
|
|
||||||
# Determine modules to uninstall
|
|
||||||
if [ -n "$MODULES" ]; then
|
|
||||||
SELECTED="$MODULES"
|
|
||||||
else
|
|
||||||
SELECTED="$INSTALLED"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$SELECTED" ] && [ "$PURGE" != true ]; then
|
|
||||||
echo "No modules to uninstall."
|
|
||||||
echo "Use --list to see installed modules, or --purge to remove everything."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Install directory: $INSTALL_DIR"
|
|
||||||
|
|
||||||
if [ "$PURGE" = true ]; then
|
|
||||||
echo ""
|
|
||||||
echo "⚠️ PURGE MODE: Will remove ENTIRE directory including user files!"
|
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
echo "Modules to uninstall: $SELECTED"
|
|
||||||
echo ""
|
|
||||||
echo "Files/directories that may be removed:"
|
|
||||||
for item in commands agents skills docs bin CLAUDE.md install.log installed_modules.json; do
|
|
||||||
if [ -e "${INSTALL_DIR}/${item}" ]; then
|
|
||||||
echo " $item ✓"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Confirmation
|
|
||||||
if [ "$YES" != true ] && [ "$DRY_RUN" != true ]; then
|
|
||||||
echo ""
|
|
||||||
read -p "Proceed with uninstallation? [y/N] " response
|
|
||||||
case "$response" in
|
|
||||||
[yY]|[yY][eE][sS]) ;;
|
|
||||||
*) echo "Aborted."; exit 0 ;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
echo ""
|
|
||||||
echo "[Dry run] No files were removed."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Uninstalling..."
|
|
||||||
|
|
||||||
if [ "$PURGE" = true ]; then
|
|
||||||
rm -rf "$INSTALL_DIR"
|
|
||||||
echo " ✓ Removed $INSTALL_DIR"
|
|
||||||
else
|
|
||||||
# Remove codeagent-wrapper binary
|
|
||||||
if [ -f "${BIN_DIR}/codeagent-wrapper" ]; then
|
|
||||||
rm -f "${BIN_DIR}/codeagent-wrapper"
|
|
||||||
echo " ✓ Removed bin/codeagent-wrapper"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove bin directory if empty
|
|
||||||
if [ -d "$BIN_DIR" ] && [ -z "$(ls -A "$BIN_DIR" 2>/dev/null)" ]; then
|
|
||||||
rmdir "$BIN_DIR"
|
|
||||||
echo " ✓ Removed empty bin/"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove installed directories
|
|
||||||
for dir in commands agents skills docs; do
|
|
||||||
if [ -d "${INSTALL_DIR}/${dir}" ]; then
|
|
||||||
rm -rf "${INSTALL_DIR}/${dir}"
|
|
||||||
echo " ✓ Removed ${dir}/"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Remove installed files
|
|
||||||
for file in CLAUDE.md install.log installed_modules.json installed_modules.json.bak; do
|
|
||||||
if [ -f "${INSTALL_DIR}/${file}" ]; then
|
|
||||||
rm -f "${INSTALL_DIR}/${file}"
|
|
||||||
echo " ✓ Removed ${file}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Remove install directory if empty
|
|
||||||
if [ -d "$INSTALL_DIR" ] && [ -z "$(ls -A "$INSTALL_DIR" 2>/dev/null)" ]; then
|
|
||||||
rmdir "$INSTALL_DIR"
|
|
||||||
echo " ✓ Removed empty install directory"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up PATH from shell config files
|
|
||||||
cleanup_shell_config() {
|
|
||||||
local rc_file="$1"
|
|
||||||
if [ -f "$rc_file" ]; then
|
|
||||||
if grep -q "# Added by myclaude installer" "$rc_file" 2>/dev/null; then
|
|
||||||
# Create backup
|
|
||||||
cp "$rc_file" "${rc_file}.bak"
|
|
||||||
# Remove myclaude lines
|
|
||||||
grep -v "# Added by myclaude installer" "$rc_file" | \
|
|
||||||
grep -v "export PATH=\"${BIN_DIR}:\$PATH\"" > "${rc_file}.tmp"
|
|
||||||
mv "${rc_file}.tmp" "$rc_file"
|
|
||||||
echo " ✓ Cleaned PATH from $(basename "$rc_file")"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup_shell_config "$HOME/.bashrc"
|
|
||||||
cleanup_shell_config "$HOME/.zshrc"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "✓ Uninstallation complete"
|
|
||||||
|
|
||||||
# Check for remaining files
|
|
||||||
if [ -d "$INSTALL_DIR" ] && [ -n "$(ls -A "$INSTALL_DIR" 2>/dev/null)" ]; then
|
|
||||||
remaining=$(ls -1 "$INSTALL_DIR" 2>/dev/null | wc -l | tr -d ' ')
|
|
||||||
echo ""
|
|
||||||
echo "Note: $remaining items remain in $INSTALL_DIR"
|
|
||||||
echo "These are either user files or from other modules."
|
|
||||||
echo "Use --purge to remove everything (DANGEROUS)."
|
|
||||||
fi
|
|
||||||
Reference in New Issue
Block a user