mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-05 02:30:26 +08:00
Compare commits
3 Commits
v6.0.0
...
feat/intel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61536d04e2 | ||
|
|
2856bf0c29 | ||
|
|
19facf3385 |
@@ -1,54 +1,209 @@
|
||||
{
|
||||
"$schema": "https://anthropic.com/claude-code/marketplace.schema.json",
|
||||
"name": "myclaude",
|
||||
"version": "5.6.1",
|
||||
"description": "Professional multi-agent development workflows with OmO orchestration, Requirements-Driven and BMAD methodologies",
|
||||
"name": "claude-code-dev-workflows",
|
||||
"owner": {
|
||||
"name": "cexll",
|
||||
"email": "evanxian9@gmail.com"
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"email": "contact@example.com",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Professional multi-agent development workflows with Requirements-Driven and BMAD methodologies, featuring 16+ specialized agents and 12+ commands",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "omo",
|
||||
"description": "Multi-agent orchestration for code analysis, bug investigation, fix planning, and implementation with intelligent routing to specialized agents",
|
||||
"version": "5.6.1",
|
||||
"source": "./skills/omo",
|
||||
"category": "development"
|
||||
"name": "requirements-driven-development",
|
||||
"source": "./requirements-driven-workflow/",
|
||||
"description": "Streamlined requirements-driven development workflow with 90% quality gates for practical feature implementation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"workflow",
|
||||
"automation",
|
||||
"quality-gates",
|
||||
"feature-development",
|
||||
"agile",
|
||||
"specifications"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/requirements-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/requirements-generate.md",
|
||||
"./agents/requirements-code.md",
|
||||
"./agents/requirements-testing.md",
|
||||
"./agents/requirements-review.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dev",
|
||||
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
||||
"version": "5.6.1",
|
||||
"source": "./dev-workflow",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "requirements",
|
||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||
"version": "5.6.1",
|
||||
"source": "./requirements-driven-workflow",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "bmad",
|
||||
"name": "bmad-agile-workflow",
|
||||
"source": "./bmad-agile-workflow/",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "5.6.1",
|
||||
"source": "./bmad-agile-workflow",
|
||||
"category": "development"
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
"agile",
|
||||
"scrum",
|
||||
"product-owner",
|
||||
"architect",
|
||||
"developer",
|
||||
"qa",
|
||||
"workflow-orchestration"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/bmad-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/bmad-po.md",
|
||||
"./agents/bmad-architect.md",
|
||||
"./agents/bmad-sm.md",
|
||||
"./agents/bmad-dev.md",
|
||||
"./agents/bmad-qa.md",
|
||||
"./agents/bmad-orchestrator.md",
|
||||
"./agents/bmad-review.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dev-kit",
|
||||
"name": "development-essentials",
|
||||
"source": "./development-essentials/",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "5.6.1",
|
||||
"source": "./development-essentials",
|
||||
"category": "productivity"
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"code",
|
||||
"debug",
|
||||
"test",
|
||||
"optimize",
|
||||
"review",
|
||||
"bugfix",
|
||||
"refactor",
|
||||
"documentation"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/code.md",
|
||||
"./commands/debug.md",
|
||||
"./commands/test.md",
|
||||
"./commands/optimize.md",
|
||||
"./commands/review.md",
|
||||
"./commands/bugfix.md",
|
||||
"./commands/refactor.md",
|
||||
"./commands/docs.md",
|
||||
"./commands/ask.md",
|
||||
"./commands/think.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/code.md",
|
||||
"./agents/bugfix.md",
|
||||
"./agents/bugfix-verify.md",
|
||||
"./agents/optimize.md",
|
||||
"./agents/debug.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "sparv",
|
||||
"description": "Minimal SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point spec gate, unified journal, 2-action saves, 3-failure protocol, and EHRB risk detection",
|
||||
"version": "1.1.0",
|
||||
"source": "./skills/sparv",
|
||||
"category": "development"
|
||||
"name": "codex-cli",
|
||||
"source": "./skills/codex/",
|
||||
"description": "Execute Codex CLI for code analysis, refactoring, and automated code changes with file references (@syntax) and structured output",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"codex",
|
||||
"code-analysis",
|
||||
"refactoring",
|
||||
"automation",
|
||||
"gpt-5",
|
||||
"ai-coding"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"skills": [
|
||||
"./SKILL.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "gemini-cli",
|
||||
"source": "./skills/gemini/",
|
||||
"description": "Execute Gemini CLI for AI-powered code analysis and generation with Google's latest Gemini models",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"gemini",
|
||||
"google-ai",
|
||||
"code-analysis",
|
||||
"code-generation",
|
||||
"ai-reasoning"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"skills": [
|
||||
"./SKILL.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dev-workflow",
|
||||
"source": "./dev-workflow/",
|
||||
"description": "Minimal lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"dev",
|
||||
"workflow",
|
||||
"codex",
|
||||
"testing",
|
||||
"coverage",
|
||||
"concurrent",
|
||||
"lightweight"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/dev.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/dev-plan-generator.md"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -74,7 +74,7 @@ jobs:
|
||||
if [ "${{ matrix.goos }}" = "windows" ]; then
|
||||
OUTPUT_NAME="${OUTPUT_NAME}.exe"
|
||||
fi
|
||||
go build -ldflags="-s -w -X main.version=${VERSION}" -o ${OUTPUT_NAME} ./cmd/codeagent
|
||||
go build -ldflags="-s -w -X main.version=${VERSION}" -o ${OUTPUT_NAME} .
|
||||
chmod +x ${OUTPUT_NAME}
|
||||
echo "artifact_path=codeagent-wrapper/${OUTPUT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
60
CHANGELOG.md
60
CHANGELOG.md
@@ -2,66 +2,6 @@
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [5.6.4] - 2026-01-15
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- add reasoning effort config for codex backend
|
||||
- default to skip-permissions and bypass-sandbox
|
||||
- add multi-agent support with yolo mode
|
||||
- add omo module for multi-agent orchestration
|
||||
- add intelligent backend selection based on task complexity (#61)
|
||||
- v5.4.0 structured execution report (#94)
|
||||
- add millisecond-precision timestamps to all log entries (#91)
|
||||
- skill-install install script and security scan
|
||||
- add uninstall scripts with selective module removal
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- filter codex stderr noise logs
|
||||
- use config override for codex reasoning effort
|
||||
- propagate SkipPermissions to parallel tasks (#113)
|
||||
- add timeout for Windows process termination
|
||||
- reject dash as workdir parameter (#118)
|
||||
- add sleep in fake script to prevent CI race condition
|
||||
- fix gemini env load
|
||||
- fix omo
|
||||
- fix codeagent skill TaskOutput
|
||||
- 修复 Gemini init 事件 session_id 未提取的问题 (#111)
|
||||
- Windows 后端退出:taskkill 结束进程树 + turn.completed 支持 (#108)
|
||||
- support model parameter for all backends, auto-inject from settings (#105)
|
||||
- replace setx with reg add to avoid 1024-char PATH truncation (#101)
|
||||
- 移除未知事件格式的日志噪声 (#96)
|
||||
- prevent duplicate PATH entries on reinstall (#95)
|
||||
- Minor issues #12 and #13 - ASCII mode and performance optimization
|
||||
- correct settings.json filename and bump version to v5.2.8
|
||||
- allow claude backend to read env from setting.json while preventing recursion (#92)
|
||||
- comprehensive security and quality improvements for PR #85 & #87 (#90)
|
||||
- Improve backend termination after message and extend timeout (#86)
|
||||
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 (#88)
|
||||
- filter noisy stderr output from gemini backend (#83)
|
||||
- 修復 wsl install.sh 格式問題 (#78)
|
||||
- 修复多 backend 并行日志 PID 混乱并移除包装格式 (#74) (#76)
|
||||
|
||||
### 🚜 Refactor
|
||||
|
||||
- remove sisyphus agent and unused code
|
||||
- streamline agent documentation and remove sisyphus
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- add OmO workflow to README and fix plugin marketplace structure
|
||||
- update FAQ for default bypass/skip-permissions behavior
|
||||
- 添加 FAQ 常见问题章节
|
||||
- update troubleshooting with idempotent PATH commands (#95)
|
||||
|
||||
### 💼 Other
|
||||
|
||||
- add test-cases skill
|
||||
- add browser skill
|
||||
- BMADh和Requirements-Driven支持根据语义生成对应的文档 (#82)
|
||||
- update all readme
|
||||
|
||||
## [5.2.4] - 2025-12-16
|
||||
|
||||
|
||||
|
||||
146
README.md
146
README.md
@@ -7,7 +7,7 @@
|
||||
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> AI-powered development automation with multi-backend execution (Codex/Claude/Gemini)
|
||||
|
||||
@@ -35,41 +35,6 @@ python3 install.py --install-dir ~/.claude
|
||||
|
||||
## Workflows Overview
|
||||
|
||||
### 0. OmO Multi-Agent Orchestrator (Recommended for Complex Tasks)
|
||||
|
||||
**Intelligent multi-agent orchestration that routes tasks to specialized agents based on risk signals.**
|
||||
|
||||
```bash
|
||||
/omo "analyze and fix this authentication bug"
|
||||
```
|
||||
|
||||
**Agent Hierarchy:**
|
||||
| Agent | Role | Backend | Model |
|
||||
|-------|------|---------|-------|
|
||||
| `oracle` | Technical advisor | Claude | claude-opus-4-5 |
|
||||
| `librarian` | External research | Claude | claude-sonnet-4-5 |
|
||||
| `explore` | Codebase search | OpenCode | grok-code |
|
||||
| `develop` | Code implementation | Codex | gpt-5.2 |
|
||||
| `frontend-ui-ux-engineer` | UI/UX specialist | Gemini | gemini-3-pro |
|
||||
| `document-writer` | Documentation | Gemini | gemini-3-flash |
|
||||
|
||||
**Routing Signals (Not Fixed Pipeline):**
|
||||
- Code location unclear → `explore`
|
||||
- External library/API → `librarian`
|
||||
- Risky/multi-file change → `oracle`
|
||||
- Implementation needed → `develop` / `frontend-ui-ux-engineer`
|
||||
|
||||
**Common Recipes:**
|
||||
- Explain code: `explore`
|
||||
- Small fix with known location: `develop` directly
|
||||
- Bug fix, location unknown: `explore → develop`
|
||||
- Cross-cutting refactor: `explore → oracle → develop`
|
||||
- External API integration: `explore + librarian → oracle → develop`
|
||||
|
||||
**Best For:** Complex bug investigation, multi-file refactoring, architecture decisions
|
||||
|
||||
---
|
||||
|
||||
### 1. Dev Workflow (Recommended)
|
||||
|
||||
**The primary workflow for most development tasks.**
|
||||
@@ -195,7 +160,7 @@ Required features:
|
||||
- `-p` - Prompt input flag
|
||||
- `-r <session_id>` - Resume sessions
|
||||
|
||||
**Security Note:** The wrapper adds `--dangerously-skip-permissions` for Claude by default. Set `CODEAGENT_SKIP_PERMISSIONS=false` to disable if you need permission prompts.
|
||||
**Security Note:** The wrapper only adds `--dangerously-skip-permissions` for Claude when explicitly enabled (e.g. `--skip-permissions` / `CODEAGENT_SKIP_PERMISSIONS=true`). Keep it disabled unless you understand the risk.
|
||||
|
||||
**Verify Claude CLI is installed:**
|
||||
```bash
|
||||
@@ -381,10 +346,8 @@ $Env:PATH = "$HOME\bin;$Env:PATH"
|
||||
```
|
||||
|
||||
```batch
|
||||
REM cmd.exe - persistent for current user (use PowerShell method above instead)
|
||||
REM WARNING: This expands %PATH% which includes system PATH, causing duplication
|
||||
REM Note: Using reg add instead of setx to avoid 1024-character truncation limit
|
||||
reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "%USERPROFILE%\bin;%PATH%" /f
|
||||
REM cmd.exe - persistent for current user
|
||||
setx PATH "%USERPROFILE%\bin;%PATH%"
|
||||
```
|
||||
|
||||
---
|
||||
@@ -499,106 +462,9 @@ claude -r <session_id> "test"
|
||||
|
||||
---
|
||||
|
||||
## FAQ (Frequently Asked Questions)
|
||||
|
||||
### Q1: `codeagent-wrapper` execution fails with "Unknown event format"
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
Unknown event format: {"type":"turn.started"}
|
||||
Unknown event format: {"type":"assistant", ...}
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
This is a logging event format display issue and does not affect actual functionality. It will be fixed in the next version. You can ignore these log outputs.
|
||||
|
||||
**Related Issue:** [#96](https://github.com/cexll/myclaude/issues/96)
|
||||
|
||||
---
|
||||
|
||||
### Q2: Gemini cannot read files ignored by `.gitignore`
|
||||
|
||||
**Problem:**
|
||||
When using `codeagent-wrapper --backend gemini`, files in directories like `.claude/` that are ignored by `.gitignore` cannot be read.
|
||||
|
||||
**Solution:**
|
||||
- **Option 1:** Remove `.claude/` from your `.gitignore` file
|
||||
- **Option 2:** Ensure files that need to be read are not in `.gitignore` list
|
||||
|
||||
**Related Issue:** [#75](https://github.com/cexll/myclaude/issues/75)
|
||||
|
||||
---
|
||||
|
||||
### Q3: `/dev` command parallel execution is very slow
|
||||
|
||||
**Problem:**
|
||||
Using `/dev` command for simple features takes too long (over 30 minutes) with no visibility into task progress.
|
||||
|
||||
**Solution:**
|
||||
1. **Check logs:** Review `C:\Users\User\AppData\Local\Temp\codeagent-wrapper-*.log` to identify bottlenecks
|
||||
2. **Adjust backend:**
|
||||
- Try faster models like `gpt-5.1-codex-max`
|
||||
- Running in WSL may be significantly faster
|
||||
3. **Workspace:** Use a single repository instead of monorepo with multiple sub-projects
|
||||
|
||||
**Related Issue:** [#77](https://github.com/cexll/myclaude/issues/77)
|
||||
|
||||
---
|
||||
|
||||
### Q4: Codex permission denied with new Go version
|
||||
|
||||
**Problem:**
|
||||
After upgrading to the new Go-based Codex implementation, execution fails with permission denied errors.
|
||||
|
||||
**Solution:**
|
||||
Add the following configuration to `~/.codex/config.yaml` (Windows: `c:\user\.codex\config.toml`):
|
||||
```yaml
|
||||
model = "gpt-5.1-codex-max"
|
||||
model_reasoning_effort = "high"
|
||||
model_reasoning_summary = "detailed"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "workspace-write"
|
||||
disable_response_storage = true
|
||||
network_access = true
|
||||
```
|
||||
|
||||
**Key settings:**
|
||||
- `approval_policy = "never"` - Remove approval restrictions
|
||||
- `sandbox_mode = "workspace-write"` - Allow workspace write access
|
||||
- `network_access = true` - Enable network access
|
||||
|
||||
**Related Issue:** [#31](https://github.com/cexll/myclaude/issues/31)
|
||||
|
||||
---
|
||||
|
||||
### Q5: How to disable default bypass/skip-permissions mode
|
||||
|
||||
**Background:**
|
||||
By default, codeagent-wrapper enables bypass mode for both Codex and Claude backends:
|
||||
- `CODEX_BYPASS_SANDBOX=true` - Bypasses Codex sandbox restrictions
|
||||
- `CODEAGENT_SKIP_PERMISSIONS=true` - Skips Claude permission prompts
|
||||
|
||||
**To disable (if you need sandbox/permission protection):**
|
||||
```bash
|
||||
export CODEX_BYPASS_SANDBOX=false
|
||||
export CODEAGENT_SKIP_PERMISSIONS=false
|
||||
```
|
||||
|
||||
Or add to your shell profile (`~/.zshrc` or `~/.bashrc`):
|
||||
```bash
|
||||
echo 'export CODEX_BYPASS_SANDBOX=false' >> ~/.zshrc
|
||||
echo 'export CODEAGENT_SKIP_PERMISSIONS=false' >> ~/.zshrc
|
||||
```
|
||||
|
||||
**Note:** Disabling bypass mode will require manual approval for certain operations.
|
||||
|
||||
---
|
||||
|
||||
**Still having issues?** Visit [GitHub Issues](https://github.com/cexll/myclaude/issues) to search or report new issues.
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
### Core Guides
|
||||
- **[Codeagent-Wrapper Guide](docs/CODEAGENT-WRAPPER.md)** - Multi-backend execution wrapper
|
||||
- **[Hooks Documentation](docs/HOOKS.md)** - Custom hooks and automation
|
||||
|
||||
|
||||
142
README_CN.md
142
README_CN.md
@@ -2,7 +2,7 @@
|
||||
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> AI 驱动的开发自动化 - 多后端执行架构 (Codex/Claude/Gemini)
|
||||
|
||||
@@ -30,41 +30,6 @@ python3 install.py --install-dir ~/.claude
|
||||
|
||||
## 工作流概览
|
||||
|
||||
### 0. OmO 多智能体编排器(复杂任务推荐)
|
||||
|
||||
**基于风险信号智能路由任务到专业智能体的多智能体编排系统。**
|
||||
|
||||
```bash
|
||||
/omo "分析并修复这个认证 bug"
|
||||
```
|
||||
|
||||
**智能体层级:**
|
||||
| 智能体 | 角色 | 后端 | 模型 |
|
||||
|-------|------|------|------|
|
||||
| `oracle` | 技术顾问 | Claude | claude-opus-4-5 |
|
||||
| `librarian` | 外部研究 | Claude | claude-sonnet-4-5 |
|
||||
| `explore` | 代码库搜索 | OpenCode | grok-code |
|
||||
| `develop` | 代码实现 | Codex | gpt-5.2 |
|
||||
| `frontend-ui-ux-engineer` | UI/UX 专家 | Gemini | gemini-3-pro |
|
||||
| `document-writer` | 文档撰写 | Gemini | gemini-3-flash |
|
||||
|
||||
**路由信号(非固定流水线):**
|
||||
- 代码位置不明确 → `explore`
|
||||
- 外部库/API → `librarian`
|
||||
- 高风险/多文件变更 → `oracle`
|
||||
- 需要实现 → `develop` / `frontend-ui-ux-engineer`
|
||||
|
||||
**常用配方:**
|
||||
- 解释代码:`explore`
|
||||
- 位置已知的小修复:直接 `develop`
|
||||
- Bug 修复,位置未知:`explore → develop`
|
||||
- 跨模块重构:`explore → oracle → develop`
|
||||
- 外部 API 集成:`explore + librarian → oracle → develop`
|
||||
|
||||
**适用场景:** 复杂 bug 调查、多文件重构、架构决策
|
||||
|
||||
---
|
||||
|
||||
### 1. Dev 工作流(推荐)
|
||||
|
||||
**大多数开发任务的首选工作流。**
|
||||
@@ -317,10 +282,8 @@ $Env:PATH = "$HOME\bin;$Env:PATH"
|
||||
```
|
||||
|
||||
```batch
|
||||
REM cmd.exe - 永久添加(当前用户)(建议使用上面的 PowerShell 方法)
|
||||
REM 警告:此命令会展开 %PATH% 包含系统 PATH,导致重复
|
||||
REM 注意:使用 reg add 而非 setx 以避免 1024 字符截断限制
|
||||
reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "%USERPROFILE%\bin;%PATH%" /f
|
||||
REM cmd.exe - 永久添加(当前用户)
|
||||
setx PATH "%USERPROFILE%\bin;%PATH%"
|
||||
```
|
||||
|
||||
---
|
||||
@@ -370,105 +333,6 @@ python3 install.py --module dev --force
|
||||
|
||||
---
|
||||
|
||||
## 常见问题 (FAQ)
|
||||
|
||||
### Q1: `codeagent-wrapper` 执行时报错 "Unknown event format"
|
||||
|
||||
**问题描述:**
|
||||
执行 `codeagent-wrapper` 时出现错误:
|
||||
```
|
||||
Unknown event format: {"type":"turn.started"}
|
||||
Unknown event format: {"type":"assistant", ...}
|
||||
```
|
||||
|
||||
**解决方案:**
|
||||
这是日志事件流的显示问题,不影响实际功能执行。预计在下个版本中修复。如需排查其他问题,可忽略此日志输出。
|
||||
|
||||
**相关 Issue:** [#96](https://github.com/cexll/myclaude/issues/96)
|
||||
|
||||
---
|
||||
|
||||
### Q2: Gemini 无法读取 `.gitignore` 忽略的文件
|
||||
|
||||
**问题描述:**
|
||||
使用 `codeagent-wrapper --backend gemini` 时,无法读取 `.claude/` 等被 `.gitignore` 忽略的目录中的文件。
|
||||
|
||||
**解决方案:**
|
||||
- **方案一:** 在项目根目录的 `.gitignore` 中取消对 `.claude/` 的忽略
|
||||
- **方案二:** 确保需要读取的文件不在 `.gitignore` 忽略列表中
|
||||
|
||||
**相关 Issue:** [#75](https://github.com/cexll/myclaude/issues/75)
|
||||
|
||||
---
|
||||
|
||||
### Q3: `/dev` 命令并行执行特别慢
|
||||
|
||||
**问题描述:**
|
||||
使用 `/dev` 命令开发简单功能耗时过长(超过30分钟),无法了解任务执行状态。
|
||||
|
||||
**解决方案:**
|
||||
1. **检查日志:** 查看 `C:\Users\User\AppData\Local\Temp\codeagent-wrapper-*.log` 分析瓶颈
|
||||
2. **调整后端:**
|
||||
- 尝试使用 `gpt-5.1-codex-max` 等更快的模型
|
||||
- 在 WSL 环境下运行速度可能更快
|
||||
3. **工作区选择:** 使用独立的代码仓库而非包含多个子项目的 monorepo
|
||||
|
||||
**相关 Issue:** [#77](https://github.com/cexll/myclaude/issues/77)
|
||||
|
||||
---
|
||||
|
||||
### Q4: 新版 Go 实现的 Codex 权限不足
|
||||
|
||||
**问题描述:**
|
||||
升级到新版 Go 实现的 Codex 后,出现权限不足的错误。
|
||||
|
||||
**解决方案:**
|
||||
在 `~/.codex/config.yaml` 中添加以下配置(Windows: `c:\user\.codex\config.toml`):
|
||||
```yaml
|
||||
model = "gpt-5.1-codex-max"
|
||||
model_reasoning_effort = "high"
|
||||
model_reasoning_summary = "detailed"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "workspace-write"
|
||||
disable_response_storage = true
|
||||
network_access = true
|
||||
```
|
||||
|
||||
**关键配置说明:**
|
||||
- `approval_policy = "never"` - 移除审批限制
|
||||
- `sandbox_mode = "workspace-write"` - 允许工作区写入权限
|
||||
- `network_access = true` - 启用网络访问
|
||||
|
||||
**相关 Issue:** [#31](https://github.com/cexll/myclaude/issues/31)
|
||||
|
||||
---
|
||||
|
||||
### Q5: 执行时遇到权限拒绝或沙箱限制
|
||||
|
||||
**问题描述:**
|
||||
运行 codeagent-wrapper 时出现权限错误或沙箱限制。
|
||||
|
||||
**解决方案:**
|
||||
设置以下环境变量:
|
||||
```bash
|
||||
export CODEX_BYPASS_SANDBOX=true
|
||||
export CODEAGENT_SKIP_PERMISSIONS=true
|
||||
```
|
||||
|
||||
或添加到 shell 配置文件(`~/.zshrc` 或 `~/.bashrc`):
|
||||
```bash
|
||||
echo 'export CODEX_BYPASS_SANDBOX=true' >> ~/.zshrc
|
||||
echo 'export CODEAGENT_SKIP_PERMISSIONS=true' >> ~/.zshrc
|
||||
```
|
||||
|
||||
**注意:** 这些设置会绕过安全限制,请仅在可信环境中使用。
|
||||
|
||||
---
|
||||
|
||||
**仍有疑问?** 请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues) 搜索或提交新问题。
|
||||
|
||||
---
|
||||
|
||||
## 许可证
|
||||
|
||||
AGPL-3.0 License - 查看 [LICENSE](LICENSE)
|
||||
|
||||
37
bmad-agile-workflow/.claude-plugin/marketplace.json
Normal file
37
bmad-agile-workflow/.claude-plugin/marketplace.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"name": "bmad-agile-workflow",
|
||||
"source": "./",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
"agile",
|
||||
"scrum",
|
||||
"product-owner",
|
||||
"architect",
|
||||
"developer",
|
||||
"qa",
|
||||
"workflow-orchestration"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/bmad-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/bmad-po.md",
|
||||
"./agents/bmad-architect.md",
|
||||
"./agents/bmad-sm.md",
|
||||
"./agents/bmad-dev.md",
|
||||
"./agents/bmad-qa.md",
|
||||
"./agents/bmad-orchestrator.md",
|
||||
"./agents/bmad-review.md"
|
||||
]
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "bmad",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
39
codeagent-wrapper/.github/workflows/ci.yml
vendored
39
codeagent-wrapper/.github/workflows/ci.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go-version: ["1.21", "1.22"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
cache: true
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: Build
|
||||
run: make build
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
cache: true
|
||||
- name: Lint
|
||||
run: make lint
|
||||
|
||||
12
codeagent-wrapper/.gitignore
vendored
12
codeagent-wrapper/.gitignore
vendored
@@ -1,7 +1,4 @@
|
||||
# Build artifacts
|
||||
bin/
|
||||
codeagent
|
||||
codeagent.exe
|
||||
codeagent-wrapper
|
||||
codeagent-wrapper.exe
|
||||
*.test
|
||||
@@ -12,12 +9,3 @@ coverage*.out
|
||||
cover.out
|
||||
cover_*.out
|
||||
coverage.html
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Temp files
|
||||
*.tmp
|
||||
*.swp
|
||||
*~
|
||||
.DS_Store
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
GO ?= go
|
||||
|
||||
BINARY ?= codeagent
|
||||
CMD_PKG := ./cmd/codeagent
|
||||
|
||||
TOOLS_BIN := $(CURDIR)/bin
|
||||
TOOLCHAIN ?= go1.22.0
|
||||
GOLANGCI_LINT_VERSION := v1.56.2
|
||||
STATICCHECK_VERSION := v0.4.7
|
||||
|
||||
GOLANGCI_LINT := $(TOOLS_BIN)/golangci-lint
|
||||
STATICCHECK := $(TOOLS_BIN)/staticcheck
|
||||
|
||||
.PHONY: build test lint clean install
|
||||
|
||||
build:
|
||||
$(GO) build -o $(BINARY) $(CMD_PKG)
|
||||
|
||||
test:
|
||||
$(GO) test ./...
|
||||
|
||||
$(GOLANGCI_LINT):
|
||||
@mkdir -p $(TOOLS_BIN)
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) GOBIN=$(TOOLS_BIN) $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
|
||||
$(STATICCHECK):
|
||||
@mkdir -p $(TOOLS_BIN)
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) GOBIN=$(TOOLS_BIN) $(GO) install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||
|
||||
lint: $(GOLANGCI_LINT) $(STATICCHECK)
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) $(GOLANGCI_LINT) run ./...
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) $(STATICCHECK) ./...
|
||||
|
||||
clean:
|
||||
@python3 -c 'import glob, os; paths=["codeagent","codeagent.exe","codeagent-wrapper","codeagent-wrapper.exe","coverage.out","cover.out","coverage.html"]; paths += glob.glob("coverage*.out") + glob.glob("cover_*.out") + glob.glob("*.test"); [os.remove(p) for p in paths if os.path.exists(p)]'
|
||||
|
||||
install:
|
||||
$(GO) install $(CMD_PKG)
|
||||
@@ -1,151 +0,0 @@
|
||||
# codeagent-wrapper
|
||||
|
||||
`codeagent-wrapper` 是一个用 Go 编写的“多后端 AI 代码代理”命令行包装器:用统一的 CLI 入口封装不同的 AI 工具后端(Codex / Claude / Gemini / Opencode),并提供一致的参数、配置与会话恢复体验。
|
||||
|
||||
入口:`cmd/codeagent/main.go`(生成二进制名:`codeagent`)。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 多后端支持:`codex` / `claude` / `gemini` / `opencode`
|
||||
- 统一命令行:`codeagent [flags] <task>` / `codeagent resume <session_id> <task> [workdir]`
|
||||
- 自动 stdin:遇到换行/特殊字符/超长任务自动走 stdin,避免 shell quoting 地狱;也可显式使用 `-`
|
||||
- 配置合并:支持配置文件与 `CODEAGENT_*` 环境变量(viper)
|
||||
- Agent 预设:从 `~/.codeagent/models.json` 读取 backend/model/prompt 等预设
|
||||
- 并行执行:`--parallel` 从 stdin 读取多任务配置,支持依赖拓扑并发执行
|
||||
- 日志清理:`codeagent cleanup` 清理旧日志(日志写入系统临时目录)
|
||||
|
||||
## 安装
|
||||
|
||||
要求:Go 1.21+。
|
||||
|
||||
在仓库根目录执行:
|
||||
|
||||
```bash
|
||||
go install ./cmd/codeagent
|
||||
```
|
||||
|
||||
安装后确认:
|
||||
|
||||
```bash
|
||||
codeagent version
|
||||
```
|
||||
|
||||
## 使用示例
|
||||
|
||||
最简单用法(默认后端:`codex`):
|
||||
|
||||
```bash
|
||||
codeagent "分析 internal/app/cli.go 的入口逻辑,给出改进建议"
|
||||
```
|
||||
|
||||
指定后端:
|
||||
|
||||
```bash
|
||||
codeagent --backend claude "解释 internal/executor/parallel_config.go 的并行配置格式"
|
||||
```
|
||||
|
||||
指定工作目录(第 2 个位置参数):
|
||||
|
||||
```bash
|
||||
codeagent "在当前 repo 下搜索潜在数据竞争" .
|
||||
```
|
||||
|
||||
显式从 stdin 读取 task(使用 `-`):
|
||||
|
||||
```bash
|
||||
cat task.txt | codeagent -
|
||||
```
|
||||
|
||||
恢复会话:
|
||||
|
||||
```bash
|
||||
codeagent resume <session_id> "继续上次任务"
|
||||
```
|
||||
|
||||
并行模式(从 stdin 读取任务配置;禁止位置参数):
|
||||
|
||||
```bash
|
||||
codeagent --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: t1
|
||||
workdir: .
|
||||
backend: codex
|
||||
---CONTENT---
|
||||
列出本项目的主要模块以及它们的职责。
|
||||
---TASK---
|
||||
id: t2
|
||||
dependencies: t1
|
||||
backend: claude
|
||||
---CONTENT---
|
||||
基于 t1 的结论,提出重构风险点与建议。
|
||||
EOF
|
||||
```
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 配置文件
|
||||
|
||||
默认查找路径(当 `--config` 为空时):
|
||||
|
||||
- `$HOME/.codeagent/config.(yaml|yml|json|toml|...)`
|
||||
|
||||
示例(YAML):
|
||||
|
||||
```yaml
|
||||
backend: codex
|
||||
model: gpt-4.1
|
||||
skip-permissions: false
|
||||
```
|
||||
|
||||
也可以通过 `--config /path/to/config.yaml` 显式指定。
|
||||
|
||||
### 环境变量(`CODEAGENT_*`)
|
||||
|
||||
通过 viper 读取并自动映射 `-` 为 `_`,常用项:
|
||||
|
||||
- `CODEAGENT_BACKEND`(`codex|claude|gemini|opencode`)
|
||||
- `CODEAGENT_MODEL`
|
||||
- `CODEAGENT_AGENT`
|
||||
- `CODEAGENT_PROMPT_FILE`
|
||||
- `CODEAGENT_REASONING_EFFORT`
|
||||
- `CODEAGENT_SKIP_PERMISSIONS`
|
||||
- `CODEAGENT_FULL_OUTPUT`(并行模式 legacy 输出)
|
||||
- `CODEAGENT_MAX_PARALLEL_WORKERS`(0 表示不限制,上限 100)
|
||||
|
||||
### Agent 预设(`~/.codeagent/models.json`)
|
||||
|
||||
可在 `~/.codeagent/models.json` 定义 agent → backend/model/prompt 等映射,用 `--agent <name>` 选择:
|
||||
|
||||
```json
|
||||
{
|
||||
"default_backend": "opencode",
|
||||
"default_model": "opencode/grok-code",
|
||||
"agents": {
|
||||
"develop": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-4.1",
|
||||
"prompt_file": "~/.codeagent/prompts/develop.md",
|
||||
"description": "Code development"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 支持的后端
|
||||
|
||||
该项目本身不内置模型能力,依赖你本机安装并可在 `PATH` 中找到对应 CLI:
|
||||
|
||||
- `codex`:执行 `codex e ...`(默认会添加 `--dangerously-bypass-approvals-and-sandbox`;如需关闭请设置 `CODEX_BYPASS_SANDBOX=false`)
|
||||
- `claude`:执行 `claude -p ... --output-format stream-json`(默认会跳过权限提示;如需开启请设置 `CODEAGENT_SKIP_PERMISSIONS=false`)
|
||||
- `gemini`:执行 `gemini ... -o stream-json`(可从 `~/.gemini/.env` 加载环境变量)
|
||||
- `opencode`:执行 `opencode run --format json`
|
||||
|
||||
## 开发
|
||||
|
||||
```bash
|
||||
make build
|
||||
make test
|
||||
make lint
|
||||
make clean
|
||||
```
|
||||
|
||||
135
codeagent-wrapper/backend.go
Normal file
135
codeagent-wrapper/backend.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Backend defines the contract for invoking different AI CLI backends.
|
||||
// Each backend is responsible for supplying the executable command and
|
||||
// building the argument list based on the wrapper config.
|
||||
type Backend interface {
|
||||
Name() string
|
||||
BuildArgs(cfg *Config, targetArg string) []string
|
||||
Command() string
|
||||
}
|
||||
|
||||
type CodexBackend struct{}
|
||||
|
||||
func (CodexBackend) Name() string { return "codex" }
|
||||
func (CodexBackend) Command() string {
|
||||
return "codex"
|
||||
}
|
||||
func (CodexBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
||||
return buildCodexArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
type ClaudeBackend struct{}
|
||||
|
||||
func (ClaudeBackend) Name() string { return "claude" }
|
||||
func (ClaudeBackend) Command() string {
|
||||
return "claude"
|
||||
}
|
||||
func (ClaudeBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
||||
return buildClaudeArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
const maxClaudeSettingsBytes = 1 << 20 // 1MB
|
||||
|
||||
// loadMinimalEnvSettings 从 ~/.claude/settings.json 只提取 env 配置。
|
||||
// 只接受字符串类型的值;文件缺失/解析失败/超限都返回空。
|
||||
func loadMinimalEnvSettings() map[string]string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || home == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
settingPath := filepath.Join(home, ".claude", "settings.json")
|
||||
info, err := os.Stat(settingPath)
|
||||
if err != nil || info.Size() > maxClaudeSettingsBytes {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(settingPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cfg struct {
|
||||
Env map[string]any `json:"env"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(cfg.Env) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
env := make(map[string]string, len(cfg.Env))
|
||||
for k, v := range cfg.Env {
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
env[k] = s
|
||||
}
|
||||
if len(env) == 0 {
|
||||
return nil
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func buildClaudeArgs(cfg *Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
args := []string{"-p"}
|
||||
if cfg.SkipPermissions {
|
||||
args = append(args, "--dangerously-skip-permissions")
|
||||
}
|
||||
|
||||
// Prevent infinite recursion: disable all setting sources (user, project, local)
|
||||
// This ensures a clean execution environment without CLAUDE.md or skills that would trigger codeagent
|
||||
args = append(args, "--setting-sources", "")
|
||||
|
||||
if cfg.Mode == "resume" {
|
||||
if cfg.SessionID != "" {
|
||||
// Claude CLI uses -r <session_id> for resume.
|
||||
args = append(args, "-r", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
// Note: claude CLI doesn't support -C flag; workdir set via cmd.Dir
|
||||
|
||||
args = append(args, "--output-format", "stream-json", "--verbose", targetArg)
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
type GeminiBackend struct{}
|
||||
|
||||
func (GeminiBackend) Name() string { return "gemini" }
|
||||
func (GeminiBackend) Command() string {
|
||||
return "gemini"
|
||||
}
|
||||
func (GeminiBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
||||
return buildGeminiArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
func buildGeminiArgs(cfg *Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
args := []string{"-o", "stream-json", "-y"}
|
||||
|
||||
if cfg.Mode == "resume" {
|
||||
if cfg.SessionID != "" {
|
||||
args = append(args, "-r", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
// Note: gemini CLI doesn't support -C flag; workdir set via cmd.Dir
|
||||
|
||||
args = append(args, "-p", targetArg)
|
||||
|
||||
return args
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package backend
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -6,16 +6,13 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
backend := ClaudeBackend{}
|
||||
|
||||
t.Run("new mode omits skip-permissions when env disabled", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/repo"}
|
||||
t.Run("new mode omits skip-permissions by default", func(t *testing.T) {
|
||||
cfg := &Config{Mode: "new", WorkDir: "/repo"}
|
||||
got := backend.BuildArgs(cfg, "todo")
|
||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -23,8 +20,8 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("new mode includes skip-permissions by default", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new", SkipPermissions: false}
|
||||
t.Run("new mode can opt-in skip-permissions", func(t *testing.T) {
|
||||
cfg := &Config{Mode: "new", SkipPermissions: true}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "-"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -33,8 +30,7 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("resume mode includes session id", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", WorkDir: "/ignored"}
|
||||
cfg := &Config{Mode: "resume", SessionID: "sid-123", WorkDir: "/ignored"}
|
||||
got := backend.BuildArgs(cfg, "resume-task")
|
||||
want := []string{"-p", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -43,8 +39,7 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("resume mode without session still returns base flags", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
cfg := &config.Config{Mode: "resume", WorkDir: "/ignored"}
|
||||
cfg := &Config{Mode: "resume", WorkDir: "/ignored"}
|
||||
got := backend.BuildArgs(cfg, "follow-up")
|
||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "follow-up"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -53,7 +48,7 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("resume mode can opt-in skip permissions", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", SkipPermissions: true}
|
||||
cfg := &Config{Mode: "resume", SessionID: "sid-123", SkipPermissions: true}
|
||||
got := backend.BuildArgs(cfg, "resume-task")
|
||||
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -68,48 +63,12 @@ func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBackendBuildArgs_Model(t *testing.T) {
|
||||
t.Run("claude includes --model when set", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
backend := ClaudeBackend{}
|
||||
cfg := &config.Config{Mode: "new", Model: "opus"}
|
||||
got := backend.BuildArgs(cfg, "todo")
|
||||
want := []string{"-p", "--setting-sources", "", "--model", "opus", "--output-format", "stream-json", "--verbose", "todo"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini includes -m when set", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "new", Model: "gemini-3-pro-preview"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"-o", "stream-json", "-y", "-m", "gemini-3-pro-preview", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("codex includes --model when set", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Setenv(key, "false")
|
||||
|
||||
backend := CodexBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/tmp", Model: "o3"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"e", "--model", "o3", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||
t.Run("gemini new mode defaults workdir", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/workspace"}
|
||||
cfg := &Config{Mode: "new", WorkDir: "/workspace"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"-o", "stream-json", "-y", "task"}
|
||||
want := []string{"-o", "stream-json", "-y", "-p", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
@@ -117,9 +76,9 @@ func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||
|
||||
t.Run("gemini resume mode uses session id", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-999"}
|
||||
cfg := &Config{Mode: "resume", SessionID: "sid-999"}
|
||||
got := backend.BuildArgs(cfg, "resume")
|
||||
want := []string{"-o", "stream-json", "-y", "-r", "sid-999", "resume"}
|
||||
want := []string{"-o", "stream-json", "-y", "-r", "sid-999", "-p", "resume"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
@@ -127,9 +86,9 @@ func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||
|
||||
t.Run("gemini resume mode without session omits identifier", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "resume"}
|
||||
cfg := &Config{Mode: "resume"}
|
||||
got := backend.BuildArgs(cfg, "resume")
|
||||
want := []string{"-o", "stream-json", "-y", "resume"}
|
||||
want := []string{"-o", "stream-json", "-y", "-p", "resume"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
@@ -142,22 +101,13 @@ func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini stdin mode uses -p flag", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"-o", "stream-json", "-y", "-p", "-"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("codex build args omits bypass flag by default", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Setenv(key, "false")
|
||||
t.Cleanup(func() { os.Unsetenv(key) })
|
||||
os.Unsetenv(key)
|
||||
|
||||
backend := CodexBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/tmp"}
|
||||
cfg := &Config{Mode: "new", WorkDir: "/tmp"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"e", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -167,10 +117,11 @@ func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||
|
||||
t.Run("codex build args includes bypass flag when enabled", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Setenv(key, "true")
|
||||
t.Cleanup(func() { os.Unsetenv(key) })
|
||||
os.Setenv(key, "true")
|
||||
|
||||
backend := CodexBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/tmp"}
|
||||
cfg := &Config{Mode: "new", WorkDir: "/tmp"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"e", "--dangerously-bypass-approvals-and-sandbox", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
@@ -206,7 +157,7 @@ func TestLoadMinimalEnvSettings(t *testing.T) {
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
t.Run("missing file returns empty", func(t *testing.T) {
|
||||
if got := LoadMinimalEnvSettings(); len(got) != 0 {
|
||||
if got := loadMinimalEnvSettings(); len(got) != 0 {
|
||||
t.Fatalf("got %v, want empty", got)
|
||||
}
|
||||
})
|
||||
@@ -222,7 +173,7 @@ func TestLoadMinimalEnvSettings(t *testing.T) {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got := LoadMinimalEnvSettings()
|
||||
got := loadMinimalEnvSettings()
|
||||
if got["ANTHROPIC_API_KEY"] != "secret" || got["FOO"] != "bar" {
|
||||
t.Fatalf("got %v, want keys present", got)
|
||||
}
|
||||
@@ -236,7 +187,7 @@ func TestLoadMinimalEnvSettings(t *testing.T) {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got := LoadMinimalEnvSettings()
|
||||
got := loadMinimalEnvSettings()
|
||||
if got["GOOD"] != "ok" {
|
||||
t.Fatalf("got %v, want GOOD=ok", got)
|
||||
}
|
||||
@@ -251,72 +202,12 @@ func TestLoadMinimalEnvSettings(t *testing.T) {
|
||||
t.Run("oversized file returns empty", func(t *testing.T) {
|
||||
dir := filepath.Join(home, ".claude")
|
||||
path := filepath.Join(dir, "settings.json")
|
||||
data := bytes.Repeat([]byte("a"), MaxClaudeSettingsBytes+1)
|
||||
data := bytes.Repeat([]byte("a"), maxClaudeSettingsBytes+1)
|
||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
if got := LoadMinimalEnvSettings(); len(got) != 0 {
|
||||
if got := loadMinimalEnvSettings(); len(got) != 0 {
|
||||
t.Fatalf("got %v, want empty", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpencodeBackend_BuildArgs(t *testing.T) {
|
||||
backend := OpencodeBackend{}
|
||||
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "hello")
|
||||
want := []string{"run", "--format", "json", "hello"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with model", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new", Model: "opencode/grok-code"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"run", "-m", "opencode/grok-code", "--format", "json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume mode", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "ses_123", Model: "opencode/grok-code"}
|
||||
got := backend.BuildArgs(cfg, "follow-up")
|
||||
want := []string{"run", "-m", "opencode/grok-code", "-s", "ses_123", "--format", "json", "follow-up"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume without session", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "resume"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"run", "--format", "json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("stdin mode omits dash", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"run", "--format", "json"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpencodeBackend_Interface(t *testing.T) {
|
||||
backend := OpencodeBackend{}
|
||||
|
||||
if backend.Name() != "opencode" {
|
||||
t.Errorf("Name() = %q, want %q", backend.Name(), "opencode")
|
||||
}
|
||||
if backend.Command() != "opencode" {
|
||||
t.Errorf("Command() = %q, want %q", backend.Command(), "opencode")
|
||||
}
|
||||
}
|
||||
39
codeagent-wrapper/bench_test.go
Normal file
39
codeagent-wrapper/bench_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// BenchmarkLoggerWrite 测试日志写入性能
|
||||
func BenchmarkLoggerWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
logger.Info("benchmark log message")
|
||||
}
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
|
||||
// BenchmarkLoggerConcurrentWrite 测试并发日志写入性能
|
||||
func BenchmarkLoggerConcurrentWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
logger.Info("concurrent benchmark log message")
|
||||
}
|
||||
})
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import app "codeagent-wrapper/internal/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package wrapper
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -11,20 +11,9 @@ import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
func stripTimestampPrefix(line string) string {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "{") {
|
||||
var evt struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &evt); err == nil && evt.Message != "" {
|
||||
return evt.Message
|
||||
}
|
||||
}
|
||||
if !strings.HasPrefix(line, "[") {
|
||||
return line
|
||||
}
|
||||
287
codeagent-wrapper/config.go
Normal file
287
codeagent-wrapper/config.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config holds CLI configuration
|
||||
type Config struct {
|
||||
Mode string // "new" or "resume"
|
||||
Task string
|
||||
SessionID string
|
||||
WorkDir string
|
||||
ExplicitStdin bool
|
||||
Timeout int
|
||||
Backend string
|
||||
SkipPermissions bool
|
||||
MaxParallelWorkers int
|
||||
}
|
||||
|
||||
// ParallelConfig defines the JSON schema for parallel execution
|
||||
type ParallelConfig struct {
|
||||
Tasks []TaskSpec `json:"tasks"`
|
||||
GlobalBackend string `json:"backend,omitempty"`
|
||||
}
|
||||
|
||||
// TaskSpec describes an individual task entry in the parallel config
|
||||
type TaskSpec struct {
|
||||
ID string `json:"id"`
|
||||
Task string `json:"task"`
|
||||
WorkDir string `json:"workdir,omitempty"`
|
||||
Dependencies []string `json:"dependencies,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Backend string `json:"backend,omitempty"`
|
||||
Mode string `json:"-"`
|
||||
UseStdin bool `json:"-"`
|
||||
Context context.Context `json:"-"`
|
||||
}
|
||||
|
||||
// TaskResult captures the execution outcome of a task
|
||||
type TaskResult struct {
|
||||
TaskID string `json:"task_id"`
|
||||
ExitCode int `json:"exit_code"`
|
||||
Message string `json:"message"`
|
||||
SessionID string `json:"session_id"`
|
||||
Error string `json:"error"`
|
||||
LogPath string `json:"log_path"`
|
||||
// Structured report fields
|
||||
Coverage string `json:"coverage,omitempty"` // extracted coverage percentage (e.g., "92%")
|
||||
CoverageNum float64 `json:"coverage_num,omitempty"` // numeric coverage for comparison
|
||||
CoverageTarget float64 `json:"coverage_target,omitempty"` // target coverage (default 90)
|
||||
FilesChanged []string `json:"files_changed,omitempty"` // list of changed files
|
||||
KeyOutput string `json:"key_output,omitempty"` // brief summary of what was done
|
||||
TestsPassed int `json:"tests_passed,omitempty"` // number of tests passed
|
||||
TestsFailed int `json:"tests_failed,omitempty"` // number of tests failed
|
||||
sharedLog bool
|
||||
}
|
||||
|
||||
var backendRegistry = map[string]Backend{
|
||||
"codex": CodexBackend{},
|
||||
"claude": ClaudeBackend{},
|
||||
"gemini": GeminiBackend{},
|
||||
}
|
||||
|
||||
func selectBackend(name string) (Backend, error) {
|
||||
key := strings.ToLower(strings.TrimSpace(name))
|
||||
if key == "" {
|
||||
key = defaultBackendName
|
||||
}
|
||||
if backend, ok := backendRegistry[key]; ok {
|
||||
return backend, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported backend %q", name)
|
||||
}
|
||||
|
||||
func envFlagEnabled(key string) bool {
|
||||
val, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
val = strings.TrimSpace(strings.ToLower(val))
|
||||
switch val {
|
||||
case "", "0", "false", "no", "off":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func parseBoolFlag(val string, defaultValue bool) bool {
|
||||
val = strings.TrimSpace(strings.ToLower(val))
|
||||
switch val {
|
||||
case "1", "true", "yes", "on":
|
||||
return true
|
||||
case "0", "false", "no", "off":
|
||||
return false
|
||||
default:
|
||||
return defaultValue
|
||||
}
|
||||
}
|
||||
|
||||
func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
trimmed := bytes.TrimSpace(data)
|
||||
if len(trimmed) == 0 {
|
||||
return nil, fmt.Errorf("parallel config is empty")
|
||||
}
|
||||
|
||||
tasks := strings.Split(string(trimmed), "---TASK---")
|
||||
var cfg ParallelConfig
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
taskIndex := 0
|
||||
for _, taskBlock := range tasks {
|
||||
taskBlock = strings.TrimSpace(taskBlock)
|
||||
if taskBlock == "" {
|
||||
continue
|
||||
}
|
||||
taskIndex++
|
||||
|
||||
parts := strings.SplitN(taskBlock, "---CONTENT---", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("task block #%d missing ---CONTENT--- separator", taskIndex)
|
||||
}
|
||||
|
||||
meta := strings.TrimSpace(parts[0])
|
||||
content := strings.TrimSpace(parts[1])
|
||||
|
||||
task := TaskSpec{WorkDir: defaultWorkdir}
|
||||
for _, line := range strings.Split(meta, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
kv := strings.SplitN(line, ":", 2)
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(kv[0])
|
||||
value := strings.TrimSpace(kv[1])
|
||||
|
||||
switch key {
|
||||
case "id":
|
||||
task.ID = value
|
||||
case "workdir":
|
||||
task.WorkDir = value
|
||||
case "session_id":
|
||||
task.SessionID = value
|
||||
task.Mode = "resume"
|
||||
case "backend":
|
||||
task.Backend = value
|
||||
case "dependencies":
|
||||
for _, dep := range strings.Split(value, ",") {
|
||||
dep = strings.TrimSpace(dep)
|
||||
if dep != "" {
|
||||
task.Dependencies = append(task.Dependencies, dep)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if task.Mode == "" {
|
||||
task.Mode = "new"
|
||||
}
|
||||
|
||||
if task.ID == "" {
|
||||
return nil, fmt.Errorf("task block #%d missing id field", taskIndex)
|
||||
}
|
||||
if content == "" {
|
||||
return nil, fmt.Errorf("task block #%d (%q) missing content", taskIndex, task.ID)
|
||||
}
|
||||
if task.Mode == "resume" && strings.TrimSpace(task.SessionID) == "" {
|
||||
return nil, fmt.Errorf("task block #%d (%q) has empty session_id", taskIndex, task.ID)
|
||||
}
|
||||
if _, exists := seen[task.ID]; exists {
|
||||
return nil, fmt.Errorf("task block #%d has duplicate id: %s", taskIndex, task.ID)
|
||||
}
|
||||
|
||||
task.Task = content
|
||||
cfg.Tasks = append(cfg.Tasks, task)
|
||||
seen[task.ID] = struct{}{}
|
||||
}
|
||||
|
||||
if len(cfg.Tasks) == 0 {
|
||||
return nil, fmt.Errorf("no tasks found")
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func parseArgs() (*Config, error) {
|
||||
args := os.Args[1:]
|
||||
if len(args) == 0 {
|
||||
return nil, fmt.Errorf("task required")
|
||||
}
|
||||
|
||||
backendName := defaultBackendName
|
||||
skipPermissions := envFlagEnabled("CODEAGENT_SKIP_PERMISSIONS")
|
||||
filtered := make([]string, 0, len(args))
|
||||
for i := 0; i < len(args); i++ {
|
||||
arg := args[i]
|
||||
switch {
|
||||
case arg == "--backend":
|
||||
if i+1 >= len(args) {
|
||||
return nil, fmt.Errorf("--backend flag requires a value")
|
||||
}
|
||||
backendName = args[i+1]
|
||||
i++
|
||||
continue
|
||||
case strings.HasPrefix(arg, "--backend="):
|
||||
value := strings.TrimPrefix(arg, "--backend=")
|
||||
if value == "" {
|
||||
return nil, fmt.Errorf("--backend flag requires a value")
|
||||
}
|
||||
backendName = value
|
||||
continue
|
||||
case arg == "--skip-permissions", arg == "--dangerously-skip-permissions":
|
||||
skipPermissions = true
|
||||
continue
|
||||
case strings.HasPrefix(arg, "--skip-permissions="):
|
||||
skipPermissions = parseBoolFlag(strings.TrimPrefix(arg, "--skip-permissions="), skipPermissions)
|
||||
continue
|
||||
case strings.HasPrefix(arg, "--dangerously-skip-permissions="):
|
||||
skipPermissions = parseBoolFlag(strings.TrimPrefix(arg, "--dangerously-skip-permissions="), skipPermissions)
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, arg)
|
||||
}
|
||||
|
||||
if len(filtered) == 0 {
|
||||
return nil, fmt.Errorf("task required")
|
||||
}
|
||||
args = filtered
|
||||
|
||||
cfg := &Config{WorkDir: defaultWorkdir, Backend: backendName, SkipPermissions: skipPermissions}
|
||||
cfg.MaxParallelWorkers = resolveMaxParallelWorkers()
|
||||
|
||||
if args[0] == "resume" {
|
||||
if len(args) < 3 {
|
||||
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
|
||||
}
|
||||
cfg.Mode = "resume"
|
||||
cfg.SessionID = strings.TrimSpace(args[1])
|
||||
if cfg.SessionID == "" {
|
||||
return nil, fmt.Errorf("resume mode requires non-empty session_id")
|
||||
}
|
||||
cfg.Task = args[2]
|
||||
cfg.ExplicitStdin = (args[2] == "-")
|
||||
if len(args) > 3 {
|
||||
cfg.WorkDir = args[3]
|
||||
}
|
||||
} else {
|
||||
cfg.Mode = "new"
|
||||
cfg.Task = args[0]
|
||||
cfg.ExplicitStdin = (args[0] == "-")
|
||||
if len(args) > 1 {
|
||||
cfg.WorkDir = args[1]
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
const maxParallelWorkersLimit = 100
|
||||
|
||||
func resolveMaxParallelWorkers() int {
|
||||
raw := strings.TrimSpace(os.Getenv("CODEAGENT_MAX_PARALLEL_WORKERS"))
|
||||
if raw == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
value, err := strconv.Atoi(raw)
|
||||
if err != nil || value < 0 {
|
||||
logWarn(fmt.Sprintf("Invalid CODEAGENT_MAX_PARALLEL_WORKERS=%q, falling back to unlimited", raw))
|
||||
return 0
|
||||
}
|
||||
|
||||
if value > maxParallelWorkersLimit {
|
||||
logWarn(fmt.Sprintf("CODEAGENT_MAX_PARALLEL_WORKERS=%d exceeds limit, capping at %d", value, maxParallelWorkersLimit))
|
||||
return maxParallelWorkersLimit
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package executor
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -14,98 +14,15 @@ import (
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
backend "codeagent-wrapper/internal/backend"
|
||||
config "codeagent-wrapper/internal/config"
|
||||
ilogger "codeagent-wrapper/internal/logger"
|
||||
parser "codeagent-wrapper/internal/parser"
|
||||
utils "codeagent-wrapper/internal/utils"
|
||||
)
|
||||
|
||||
const postMessageTerminateDelay = 1 * time.Second
|
||||
const forceKillWaitTimeout = 5 * time.Second
|
||||
|
||||
// Defaults duplicated from wrapper for module decoupling.
|
||||
const (
|
||||
defaultWorkdir = "."
|
||||
defaultCoverageTarget = 90.0
|
||||
defaultBackendName = "codex"
|
||||
|
||||
codexLogLineLimit = 1000
|
||||
stderrCaptureLimit = 4 * 1024
|
||||
)
|
||||
|
||||
const (
|
||||
// stdout close reasons
|
||||
stdoutCloseReasonWait = "wait-done"
|
||||
stdoutCloseReasonDrain = "drain-timeout"
|
||||
stdoutCloseReasonCtx = "context-cancel"
|
||||
stdoutDrainTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// Hook points (tests can override inside this package).
|
||||
var (
|
||||
selectBackendFn = backend.Select
|
||||
commandContext = exec.CommandContext
|
||||
terminateCommandFn = terminateCommand
|
||||
)
|
||||
|
||||
var forceKillDelay atomic.Int32
|
||||
|
||||
func init() {
|
||||
forceKillDelay.Store(5) // seconds - default value
|
||||
}
|
||||
|
||||
type (
|
||||
Backend = backend.Backend
|
||||
Config = config.Config
|
||||
Logger = ilogger.Logger
|
||||
)
|
||||
|
||||
type minimalClaudeSettings = backend.MinimalClaudeSettings
|
||||
|
||||
func loadMinimalClaudeSettings() minimalClaudeSettings { return backend.LoadMinimalClaudeSettings() }
|
||||
|
||||
func loadGeminiEnv() map[string]string { return backend.LoadGeminiEnv() }
|
||||
|
||||
func NewLogger() (*Logger, error) { return ilogger.NewLogger() }
|
||||
|
||||
func NewLoggerWithSuffix(suffix string) (*Logger, error) { return ilogger.NewLoggerWithSuffix(suffix) }
|
||||
|
||||
func setLogger(l *Logger) { ilogger.SetLogger(l) }
|
||||
|
||||
func closeLogger() error { return ilogger.CloseLogger() }
|
||||
|
||||
func activeLogger() *Logger { return ilogger.ActiveLogger() }
|
||||
|
||||
func logInfo(msg string) { ilogger.LogInfo(msg) }
|
||||
|
||||
func logWarn(msg string) { ilogger.LogWarn(msg) }
|
||||
|
||||
func logError(msg string) { ilogger.LogError(msg) }
|
||||
|
||||
func logConcurrencyPlanning(limit, total int) { ilogger.LogConcurrencyPlanning(limit, total) }
|
||||
|
||||
func logConcurrencyState(event, taskID string, active, limit int) {
|
||||
ilogger.LogConcurrencyState(event, taskID, active, limit)
|
||||
}
|
||||
|
||||
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
|
||||
return parser.ParseJSONStreamInternal(r, warnFn, infoFn, onMessage, onComplete)
|
||||
}
|
||||
|
||||
func sanitizeOutput(s string) string { return utils.SanitizeOutput(s) }
|
||||
|
||||
func safeTruncate(s string, maxLen int) string { return utils.SafeTruncate(s, maxLen) }
|
||||
|
||||
func min(a, b int) int { return utils.Min(a, b) }
|
||||
|
||||
// commandRunner abstracts exec.Cmd for testability
|
||||
type commandRunner interface {
|
||||
Start() error
|
||||
Wait() error
|
||||
StdoutPipe() (io.ReadCloser, error)
|
||||
StderrPipe() (io.ReadCloser, error)
|
||||
StdinPipe() (io.WriteCloser, error)
|
||||
SetStderr(io.Writer)
|
||||
SetDir(string)
|
||||
@@ -146,13 +63,6 @@ func (r *realCmd) StdoutPipe() (io.ReadCloser, error) {
|
||||
return r.cmd.StdoutPipe()
|
||||
}
|
||||
|
||||
func (r *realCmd) StderrPipe() (io.ReadCloser, error) {
|
||||
if r.cmd == nil {
|
||||
return nil, errors.New("command is nil")
|
||||
}
|
||||
return r.cmd.StderrPipe()
|
||||
}
|
||||
|
||||
func (r *realCmd) StdinPipe() (io.WriteCloser, error) {
|
||||
if r.cmd == nil {
|
||||
return nil, errors.New("command is nil")
|
||||
@@ -311,21 +221,14 @@ func newTaskLoggerHandle(taskID string) taskLoggerHandle {
|
||||
}
|
||||
|
||||
// defaultRunCodexTaskFn is the default implementation of runCodexTaskFn (exposed for test reset)
|
||||
func DefaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
||||
func defaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
||||
if task.WorkDir == "" {
|
||||
task.WorkDir = defaultWorkdir
|
||||
}
|
||||
if task.Mode == "" {
|
||||
task.Mode = "new"
|
||||
}
|
||||
if strings.TrimSpace(task.PromptFile) != "" {
|
||||
prompt, err := ReadAgentPromptFile(task.PromptFile, false)
|
||||
if err != nil {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "failed to read prompt file: " + err.Error()}
|
||||
}
|
||||
task.Task = WrapTaskWithAgentPrompt(prompt, task.Task)
|
||||
}
|
||||
if task.UseStdin || ShouldUseStdin(task.Task, false) {
|
||||
if task.UseStdin || shouldUseStdin(task.Task, false) {
|
||||
task.UseStdin = true
|
||||
}
|
||||
|
||||
@@ -344,10 +247,12 @@ func DefaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
||||
if parentCtx == nil {
|
||||
parentCtx = context.Background()
|
||||
}
|
||||
return RunCodexTaskWithContext(parentCtx, task, backend, "", nil, nil, false, true, timeout)
|
||||
return runCodexTaskWithContext(parentCtx, task, backend, nil, false, true, timeout)
|
||||
}
|
||||
|
||||
func TopologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
|
||||
var runCodexTaskFn = defaultRunCodexTaskFn
|
||||
|
||||
func topologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
|
||||
idToTask := make(map[string]TaskSpec, len(tasks))
|
||||
indegree := make(map[string]int, len(tasks))
|
||||
adj := make(map[string][]string, len(tasks))
|
||||
@@ -413,16 +318,12 @@ func TopologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func ExecuteConcurrent(layers [][]TaskSpec, timeout int, runTask func(TaskSpec, int) TaskResult) []TaskResult {
|
||||
maxWorkers := config.ResolveMaxParallelWorkers()
|
||||
return ExecuteConcurrentWithContext(context.Background(), layers, timeout, maxWorkers, runTask)
|
||||
func executeConcurrent(layers [][]TaskSpec, timeout int) []TaskResult {
|
||||
maxWorkers := resolveMaxParallelWorkers()
|
||||
return executeConcurrentWithContext(context.Background(), layers, timeout, maxWorkers)
|
||||
}
|
||||
|
||||
func ExecuteConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec, timeout int, maxWorkers int, runTask func(TaskSpec, int) TaskResult) []TaskResult {
|
||||
if runTask == nil {
|
||||
runTask = DefaultRunCodexTaskFn
|
||||
}
|
||||
|
||||
func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec, timeout int, maxWorkers int) []TaskResult {
|
||||
totalTasks := 0
|
||||
for _, layer := range layers {
|
||||
totalTasks += len(layer)
|
||||
@@ -553,7 +454,7 @@ func ExecuteConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec
|
||||
|
||||
printTaskStart(ts.ID, taskLogPath, handle.shared)
|
||||
|
||||
res := runTask(ts, timeout)
|
||||
res := runCodexTaskFn(ts, timeout)
|
||||
if taskLogPath != "" {
|
||||
if res.LogPath == "" || (handle.shared && handle.logger != nil && res.LogPath == handle.logger.Path()) {
|
||||
res.LogPath = taskLogPath
|
||||
@@ -618,14 +519,14 @@ func getStatusSymbols() (success, warning, failed string) {
|
||||
return "✓", "⚠️", "✗"
|
||||
}
|
||||
|
||||
func GenerateFinalOutput(results []TaskResult) string {
|
||||
return GenerateFinalOutputWithMode(results, true) // default to summary mode
|
||||
func generateFinalOutput(results []TaskResult) string {
|
||||
return generateFinalOutputWithMode(results, true) // default to summary mode
|
||||
}
|
||||
|
||||
// generateFinalOutputWithMode generates output based on mode
|
||||
// summaryOnly=true: structured report - every token has value
|
||||
// summaryOnly=false: full output with complete messages (legacy behavior)
|
||||
func GenerateFinalOutputWithMode(results []TaskResult, summaryOnly bool) string {
|
||||
func generateFinalOutputWithMode(results []TaskResult, summaryOnly bool) string {
|
||||
var sb strings.Builder
|
||||
successSymbol, warningSymbol, failedSymbol := getStatusSymbols()
|
||||
|
||||
@@ -838,20 +739,11 @@ func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||
|
||||
args := []string{"e"}
|
||||
|
||||
// Default to bypass sandbox unless CODEX_BYPASS_SANDBOX=false
|
||||
if cfg.Yolo || config.EnvFlagDefaultTrue("CODEX_BYPASS_SANDBOX") {
|
||||
logWarn("YOLO mode or CODEX_BYPASS_SANDBOX enabled: running without approval/sandbox protection")
|
||||
if envFlagEnabled("CODEX_BYPASS_SANDBOX") {
|
||||
logWarn("CODEX_BYPASS_SANDBOX=true: running without approval/sandbox protection")
|
||||
args = append(args, "--dangerously-bypass-approvals-and-sandbox")
|
||||
}
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "--model", model)
|
||||
}
|
||||
|
||||
if reasoningEffort := strings.TrimSpace(cfg.ReasoningEffort); reasoningEffort != "" {
|
||||
args = append(args, "-c", "model_reasoning_effort="+reasoningEffort)
|
||||
}
|
||||
|
||||
args = append(args, "--skip-git-repo-check")
|
||||
|
||||
if isResume {
|
||||
@@ -870,41 +762,37 @@ func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||
)
|
||||
}
|
||||
|
||||
func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, defaultCommandName string, defaultArgsBuilder func(*Config, string) []string, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||
taskCtx := taskSpec.Context
|
||||
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return runCodexTaskWithContext(context.Background(), taskSpec, nil, nil, false, silent, timeoutSec)
|
||||
}
|
||||
|
||||
func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText string, useStdin bool, timeoutSec int) (message, threadID string, exitCode int) {
|
||||
res := runCodexTaskWithContext(parentCtx, TaskSpec{Task: taskText, WorkDir: defaultWorkdir, Mode: "new", UseStdin: useStdin}, nil, codexArgs, true, false, timeoutSec)
|
||||
return res.Message, res.SessionID, res.ExitCode
|
||||
}
|
||||
|
||||
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||
if parentCtx == nil {
|
||||
parentCtx = taskCtx
|
||||
parentCtx = taskSpec.Context
|
||||
}
|
||||
if parentCtx == nil {
|
||||
parentCtx = context.Background()
|
||||
}
|
||||
|
||||
result := TaskResult{TaskID: taskSpec.ID}
|
||||
injectedLogger := taskLoggerFromContext(taskCtx)
|
||||
if injectedLogger == nil {
|
||||
injectedLogger = taskLoggerFromContext(parentCtx)
|
||||
}
|
||||
injectedLogger := taskLoggerFromContext(parentCtx)
|
||||
logger := injectedLogger
|
||||
|
||||
cfg := &Config{
|
||||
Mode: taskSpec.Mode,
|
||||
Task: taskSpec.Task,
|
||||
SessionID: taskSpec.SessionID,
|
||||
WorkDir: taskSpec.WorkDir,
|
||||
Model: taskSpec.Model,
|
||||
ReasoningEffort: taskSpec.ReasoningEffort,
|
||||
SkipPermissions: taskSpec.SkipPermissions,
|
||||
Backend: defaultBackendName,
|
||||
Mode: taskSpec.Mode,
|
||||
Task: taskSpec.Task,
|
||||
SessionID: taskSpec.SessionID,
|
||||
WorkDir: taskSpec.WorkDir,
|
||||
Backend: defaultBackendName,
|
||||
}
|
||||
|
||||
commandName := strings.TrimSpace(defaultCommandName)
|
||||
if commandName == "" {
|
||||
commandName = defaultBackendName
|
||||
}
|
||||
argsBuilder := defaultArgsBuilder
|
||||
if argsBuilder == nil {
|
||||
argsBuilder = buildCodexArgs
|
||||
}
|
||||
commandName := codexCommand
|
||||
argsBuilder := buildCodexArgsFn
|
||||
if backend != nil {
|
||||
commandName = backend.Command()
|
||||
argsBuilder = backend.BuildArgs
|
||||
@@ -928,20 +816,6 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
return result
|
||||
}
|
||||
|
||||
var fileEnv map[string]string
|
||||
if cfg.Backend == "claude" {
|
||||
settings := loadMinimalClaudeSettings()
|
||||
fileEnv = settings.Env
|
||||
if cfg.Mode != "resume" && strings.TrimSpace(cfg.Model) == "" && settings.Model != "" {
|
||||
cfg.Model = settings.Model
|
||||
}
|
||||
}
|
||||
|
||||
// Load gemini env from ~/.gemini/.env if exists
|
||||
if cfg.Backend == "gemini" {
|
||||
fileEnv = loadGeminiEnv()
|
||||
}
|
||||
|
||||
useStdin := taskSpec.UseStdin
|
||||
targetArg := taskSpec.Task
|
||||
if useStdin {
|
||||
@@ -1041,27 +915,9 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
|
||||
cmd := newCommandRunner(ctx, commandName, codexArgs...)
|
||||
|
||||
if len(fileEnv) > 0 {
|
||||
cmd.SetEnv(fileEnv)
|
||||
}
|
||||
|
||||
envBackend := backend
|
||||
if envBackend == nil && cfg.Backend != "" {
|
||||
if b, err := selectBackendFn(cfg.Backend); err == nil {
|
||||
envBackend = b
|
||||
}
|
||||
}
|
||||
|
||||
if envBackend != nil {
|
||||
baseURL, apiKey := config.ResolveBackendConfig(cfg.Backend)
|
||||
if agentName := strings.TrimSpace(taskSpec.Agent); agentName != "" {
|
||||
agentBackend, _, _, _, agentBaseURL, agentAPIKey, _ := config.ResolveAgentConfig(agentName)
|
||||
if strings.EqualFold(strings.TrimSpace(agentBackend), strings.TrimSpace(cfg.Backend)) {
|
||||
baseURL, apiKey = agentBaseURL, agentAPIKey
|
||||
}
|
||||
}
|
||||
if injected := envBackend.Env(baseURL, apiKey); len(injected) > 0 {
|
||||
cmd.SetEnv(injected)
|
||||
if cfg.Backend == "claude" {
|
||||
if env := loadMinimalEnvSettings(); len(env) > 0 {
|
||||
cmd.SetEnv(env)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1083,43 +939,33 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
if cfg.Backend == "gemini" {
|
||||
stderrFilter = newFilteringWriter(os.Stderr, geminiNoisePatterns)
|
||||
stderrOut = stderrFilter
|
||||
} else if cfg.Backend == "codex" {
|
||||
stderrFilter = newFilteringWriter(os.Stderr, codexNoisePatterns)
|
||||
stderrOut = stderrFilter
|
||||
defer stderrFilter.Flush()
|
||||
}
|
||||
stderrWriters = append([]io.Writer{stderrOut}, stderrWriters...)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
logErrorFn("Failed to create stderr pipe: " + err.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = attachStderr("failed to create stderr pipe: " + err.Error())
|
||||
return result
|
||||
if len(stderrWriters) == 1 {
|
||||
cmd.SetStderr(stderrWriters[0])
|
||||
} else {
|
||||
cmd.SetStderr(io.MultiWriter(stderrWriters...))
|
||||
}
|
||||
|
||||
var stdinPipe io.WriteCloser
|
||||
var err error
|
||||
if useStdin {
|
||||
stdinPipe, err = cmd.StdinPipe()
|
||||
if err != nil {
|
||||
logErrorFn("Failed to create stdin pipe: " + err.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = attachStderr("failed to create stdin pipe: " + err.Error())
|
||||
closeWithReason(stderr, "stdin-pipe-failed")
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
stderrDone := make(chan error, 1)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
logErrorFn("Failed to create stdout pipe: " + err.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = attachStderr("failed to create stdout pipe: " + err.Error())
|
||||
closeWithReason(stderr, "stdout-pipe-failed")
|
||||
if stdinPipe != nil {
|
||||
_ = stdinPipe.Close()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -1155,11 +1001,6 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
logInfoFn(fmt.Sprintf("Starting %s with args: %s %s...", commandName, commandName, strings.Join(codexArgs[:min(5, len(codexArgs))], " ")))
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
closeWithReason(stdout, "start-failed")
|
||||
closeWithReason(stderr, "start-failed")
|
||||
if stdinPipe != nil {
|
||||
_ = stdinPipe.Close()
|
||||
}
|
||||
if strings.Contains(err.Error(), "executable file not found") {
|
||||
msg := fmt.Sprintf("%s command not found in PATH", commandName)
|
||||
logErrorFn(msg)
|
||||
@@ -1178,15 +1019,6 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
logInfoFn(fmt.Sprintf("Log capturing to: %s", logger.Path()))
|
||||
}
|
||||
|
||||
// Start stderr drain AFTER we know the command started, but BEFORE cmd.Wait can close the pipe.
|
||||
go func() {
|
||||
_, copyErr := io.Copy(io.MultiWriter(stderrWriters...), stderr)
|
||||
if stderrFilter != nil {
|
||||
stderrFilter.Flush()
|
||||
}
|
||||
stderrDone <- copyErr
|
||||
}()
|
||||
|
||||
if useStdin && stdinPipe != nil {
|
||||
logInfoFn(fmt.Sprintf("Writing %d chars to stdin...", len(taskSpec.Task)))
|
||||
go func(data string) {
|
||||
@@ -1214,8 +1046,7 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
waitLoop:
|
||||
for {
|
||||
select {
|
||||
case err := <-waitCh:
|
||||
waitErr = err
|
||||
case waitErr = <-waitCh:
|
||||
break waitLoop
|
||||
case <-ctx.Done():
|
||||
ctxCancelled = true
|
||||
@@ -1226,17 +1057,8 @@ waitLoop:
|
||||
terminated = true
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case err := <-waitCh:
|
||||
waitErr = err
|
||||
break waitLoop
|
||||
case <-time.After(forceKillWaitTimeout):
|
||||
if proc := cmd.Process(); proc != nil {
|
||||
_ = proc.Kill()
|
||||
}
|
||||
}
|
||||
}
|
||||
waitErr = <-waitCh
|
||||
break waitLoop
|
||||
case <-messageTimerCh:
|
||||
forcedAfterComplete = true
|
||||
messageTimerCh = nil
|
||||
@@ -1247,20 +1069,6 @@ waitLoop:
|
||||
terminated = true
|
||||
}
|
||||
}
|
||||
// Close pipes to unblock stream readers, then wait for process exit.
|
||||
closeWithReason(stdout, "terminate")
|
||||
closeWithReason(stderr, "terminate")
|
||||
for {
|
||||
select {
|
||||
case err := <-waitCh:
|
||||
waitErr = err
|
||||
break waitLoop
|
||||
case <-time.After(forceKillWaitTimeout):
|
||||
if proc := cmd.Process(); proc != nil {
|
||||
_ = proc.Kill()
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-completeSeen:
|
||||
completeSeenObserved = true
|
||||
if messageTimer != nil {
|
||||
@@ -1302,9 +1110,11 @@ waitLoop:
|
||||
case parsed = <-parseCh:
|
||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
||||
case <-messageSeen:
|
||||
messageSeenObserved = true
|
||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
||||
parsed = <-parseCh
|
||||
case <-completeSeen:
|
||||
completeSeenObserved = true
|
||||
closeWithReason(stdout, stdoutCloseReasonWait)
|
||||
parsed = <-parseCh
|
||||
case <-drainTimer.C:
|
||||
@@ -1313,12 +1123,6 @@ waitLoop:
|
||||
}
|
||||
}
|
||||
|
||||
closeWithReason(stderr, stdoutCloseReasonWait)
|
||||
// Wait for stderr drain so stderrBuf / stderrLogger are not accessed concurrently.
|
||||
// Important: cmd.Wait can block on internal stderr copying if cmd.Stderr is a non-file writer.
|
||||
// We use StderrPipe and drain ourselves to avoid that deadlock class (common when children inherit pipes).
|
||||
<-stderrDone
|
||||
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
if errors.Is(ctxErr, context.DeadlineExceeded) {
|
||||
result.ExitCode = 124
|
||||
@@ -1374,13 +1178,44 @@ waitLoop:
|
||||
return result
|
||||
}
|
||||
|
||||
func forwardSignals(ctx context.Context, cmd commandRunner, logErrorFn func(string)) {
|
||||
notify := signalNotifyFn
|
||||
stop := signalStopFn
|
||||
if notify == nil {
|
||||
notify = signal.Notify
|
||||
}
|
||||
if stop == nil {
|
||||
stop = signal.Stop
|
||||
}
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
defer stop(sigCh)
|
||||
select {
|
||||
case sig := <-sigCh:
|
||||
logErrorFn(fmt.Sprintf("Received signal: %v", sig))
|
||||
if proc := cmd.Process(); proc != nil {
|
||||
_ = proc.Signal(syscall.SIGTERM)
|
||||
time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
||||
if p := cmd.Process(); p != nil {
|
||||
_ = p.Kill()
|
||||
}
|
||||
})
|
||||
}
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func cancelReason(commandName string, ctx context.Context) string {
|
||||
if ctx == nil {
|
||||
return "Context cancelled"
|
||||
}
|
||||
|
||||
if commandName == "" {
|
||||
commandName = defaultBackendName
|
||||
commandName = codexCommand
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
@@ -1432,7 +1267,7 @@ func terminateCommand(cmd commandRunner) *forceKillTimer {
|
||||
return nil
|
||||
}
|
||||
|
||||
_ = sendTermSignal(proc)
|
||||
_ = proc.Signal(syscall.SIGTERM)
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
timer := time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
||||
@@ -1444,3 +1279,21 @@ func terminateCommand(cmd commandRunner) *forceKillTimer {
|
||||
|
||||
return &forceKillTimer{timer: timer, done: done}
|
||||
}
|
||||
|
||||
func terminateProcess(cmd commandRunner) *time.Timer {
|
||||
if cmd == nil {
|
||||
return nil
|
||||
}
|
||||
proc := cmd.Process()
|
||||
if proc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
_ = proc.Signal(syscall.SIGTERM)
|
||||
|
||||
return time.AfterFunc(time.Duration(forceKillDelay.Load())*time.Second, func() {
|
||||
if p := cmd.Process(); p != nil {
|
||||
_ = p.Kill()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package wrapper
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -10,15 +10,13 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
var executorTestTaskCounter atomic.Int64
|
||||
@@ -34,12 +32,7 @@ type execFakeProcess struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (p *execFakeProcess) Pid() int {
|
||||
if runtime.GOOS == "windows" {
|
||||
return 0
|
||||
}
|
||||
return p.pid
|
||||
}
|
||||
func (p *execFakeProcess) Pid() int { return p.pid }
|
||||
func (p *execFakeProcess) Kill() error {
|
||||
p.killed.Add(1)
|
||||
return nil
|
||||
@@ -91,8 +84,7 @@ func (rc *reasonReadCloser) record(reason string) {
|
||||
|
||||
type execFakeRunner struct {
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
process executor.ProcessHandle
|
||||
process processHandle
|
||||
stdin io.WriteCloser
|
||||
dir string
|
||||
env map[string]string
|
||||
@@ -100,7 +92,6 @@ type execFakeRunner struct {
|
||||
waitDelay time.Duration
|
||||
startErr error
|
||||
stdoutErr error
|
||||
stderrErr error
|
||||
stdinErr error
|
||||
allowNilProcess bool
|
||||
started atomic.Bool
|
||||
@@ -128,15 +119,6 @@ func (f *execFakeRunner) StdoutPipe() (io.ReadCloser, error) {
|
||||
}
|
||||
return f.stdout, nil
|
||||
}
|
||||
func (f *execFakeRunner) StderrPipe() (io.ReadCloser, error) {
|
||||
if f.stderrErr != nil {
|
||||
return nil, f.stderrErr
|
||||
}
|
||||
if f.stderr == nil {
|
||||
f.stderr = io.NopCloser(strings.NewReader(""))
|
||||
}
|
||||
return f.stderr, nil
|
||||
}
|
||||
func (f *execFakeRunner) StdinPipe() (io.WriteCloser, error) {
|
||||
if f.stdinErr != nil {
|
||||
return nil, f.stdinErr
|
||||
@@ -159,7 +141,7 @@ func (f *execFakeRunner) SetEnv(env map[string]string) {
|
||||
f.env[k] = v
|
||||
}
|
||||
}
|
||||
func (f *execFakeRunner) Process() executor.ProcessHandle {
|
||||
func (f *execFakeRunner) Process() processHandle {
|
||||
if f.process != nil {
|
||||
return f.process
|
||||
}
|
||||
@@ -169,15 +151,220 @@ func (f *execFakeRunner) Process() executor.ProcessHandle {
|
||||
return &execFakeProcess{pid: 1}
|
||||
}
|
||||
|
||||
func TestExecutorHelperCoverage(t *testing.T) {
|
||||
t.Run("realCmdAndProcess", func(t *testing.T) {
|
||||
rc := &realCmd{}
|
||||
if err := rc.Start(); err == nil {
|
||||
t.Fatalf("expected error for nil command")
|
||||
}
|
||||
if err := rc.Wait(); err == nil {
|
||||
t.Fatalf("expected error for nil command")
|
||||
}
|
||||
if _, err := rc.StdoutPipe(); err == nil {
|
||||
t.Fatalf("expected error for nil command")
|
||||
}
|
||||
if _, err := rc.StdinPipe(); err == nil {
|
||||
t.Fatalf("expected error for nil command")
|
||||
}
|
||||
rc.SetStderr(io.Discard)
|
||||
if rc.Process() != nil {
|
||||
t.Fatalf("expected nil process")
|
||||
}
|
||||
rcWithCmd := &realCmd{cmd: &exec.Cmd{}}
|
||||
rcWithCmd.SetStderr(io.Discard)
|
||||
rcWithCmd.SetDir("/tmp")
|
||||
if rcWithCmd.cmd.Dir != "/tmp" {
|
||||
t.Fatalf("expected SetDir to set cmd.Dir, got %q", rcWithCmd.cmd.Dir)
|
||||
}
|
||||
echoCmd := exec.Command("echo", "ok")
|
||||
rcProc := &realCmd{cmd: echoCmd}
|
||||
stdoutPipe, err := rcProc.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatalf("StdoutPipe error: %v", err)
|
||||
}
|
||||
stdinPipe, err := rcProc.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatalf("StdinPipe error: %v", err)
|
||||
}
|
||||
rcProc.SetStderr(io.Discard)
|
||||
if err := rcProc.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
_, _ = stdinPipe.Write([]byte{})
|
||||
_ = stdinPipe.Close()
|
||||
procHandle := rcProc.Process()
|
||||
if procHandle == nil {
|
||||
t.Fatalf("expected process handle")
|
||||
}
|
||||
_ = procHandle.Signal(syscall.SIGTERM)
|
||||
_ = procHandle.Kill()
|
||||
_ = rcProc.Wait()
|
||||
_, _ = io.ReadAll(stdoutPipe)
|
||||
|
||||
rp := &realProcess{}
|
||||
if rp.Pid() != 0 {
|
||||
t.Fatalf("nil process should have pid 0")
|
||||
}
|
||||
if rp.Kill() != nil {
|
||||
t.Fatalf("nil process Kill should be nil")
|
||||
}
|
||||
if rp.Signal(syscall.SIGTERM) != nil {
|
||||
t.Fatalf("nil process Signal should be nil")
|
||||
}
|
||||
rpLive := &realProcess{proc: &os.Process{Pid: 99}}
|
||||
if rpLive.Pid() != 99 {
|
||||
t.Fatalf("expected pid 99, got %d", rpLive.Pid())
|
||||
}
|
||||
_ = rpLive.Kill()
|
||||
_ = rpLive.Signal(syscall.SIGTERM)
|
||||
})
|
||||
|
||||
t.Run("topologicalSortAndSkip", func(t *testing.T) {
|
||||
layers, err := topologicalSort([]TaskSpec{{ID: "root"}, {ID: "child", Dependencies: []string{"root"}}})
|
||||
if err != nil || len(layers) != 2 {
|
||||
t.Fatalf("unexpected topological sort result: layers=%d err=%v", len(layers), err)
|
||||
}
|
||||
if _, err := topologicalSort([]TaskSpec{{ID: "cycle", Dependencies: []string{"cycle"}}}); err == nil {
|
||||
t.Fatalf("expected cycle detection error")
|
||||
}
|
||||
|
||||
failed := map[string]TaskResult{"root": {ExitCode: 1}}
|
||||
if skip, _ := shouldSkipTask(TaskSpec{ID: "child", Dependencies: []string{"root"}}, failed); !skip {
|
||||
t.Fatalf("should skip when dependency failed")
|
||||
}
|
||||
if skip, _ := shouldSkipTask(TaskSpec{ID: "leaf"}, failed); skip {
|
||||
t.Fatalf("should not skip task without dependencies")
|
||||
}
|
||||
if skip, _ := shouldSkipTask(TaskSpec{ID: "child-ok", Dependencies: []string{"root"}}, map[string]TaskResult{}); skip {
|
||||
t.Fatalf("should not skip when dependencies succeeded")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("cancelledTaskResult", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
res := cancelledTaskResult("t1", ctx)
|
||||
if res.ExitCode != 130 {
|
||||
t.Fatalf("expected cancel exit code, got %d", res.ExitCode)
|
||||
}
|
||||
|
||||
timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), 0)
|
||||
defer timeoutCancel()
|
||||
res = cancelledTaskResult("t2", timeoutCtx)
|
||||
if res.ExitCode != 124 {
|
||||
t.Fatalf("expected timeout exit code, got %d", res.ExitCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("generateFinalOutputAndArgs", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Cleanup(func() { os.Unsetenv(key) })
|
||||
os.Unsetenv(key)
|
||||
|
||||
out := generateFinalOutput([]TaskResult{
|
||||
{TaskID: "ok", ExitCode: 0},
|
||||
{TaskID: "fail", ExitCode: 1, Error: "boom"},
|
||||
})
|
||||
if !strings.Contains(out, "ok") || !strings.Contains(out, "fail") {
|
||||
t.Fatalf("unexpected summary output: %s", out)
|
||||
}
|
||||
// Test summary mode (default) - should have new format with ### headers
|
||||
out = generateFinalOutput([]TaskResult{{TaskID: "rich", ExitCode: 0, SessionID: "sess", LogPath: "/tmp/log", Message: "hello"}})
|
||||
if !strings.Contains(out, "### rich") {
|
||||
t.Fatalf("summary output missing task header: %s", out)
|
||||
}
|
||||
// Test full output mode - should have Session and Message
|
||||
out = generateFinalOutputWithMode([]TaskResult{{TaskID: "rich", ExitCode: 0, SessionID: "sess", LogPath: "/tmp/log", Message: "hello"}}, false)
|
||||
if !strings.Contains(out, "Session: sess") || !strings.Contains(out, "Log: /tmp/log") || !strings.Contains(out, "hello") {
|
||||
t.Fatalf("full output missing fields: %s", out)
|
||||
}
|
||||
|
||||
args := buildCodexArgs(&Config{Mode: "new", WorkDir: "/tmp"}, "task")
|
||||
if !slices.Equal(args, []string{"e", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}) {
|
||||
t.Fatalf("unexpected codex args: %+v", args)
|
||||
}
|
||||
args = buildCodexArgs(&Config{Mode: "resume", SessionID: "sess"}, "target")
|
||||
if !slices.Equal(args, []string{"e", "--skip-git-repo-check", "--json", "resume", "sess", "target"}) {
|
||||
t.Fatalf("unexpected resume args: %+v", args)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("generateFinalOutputASCIIMode", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_ASCII_MODE", "true")
|
||||
|
||||
results := []TaskResult{
|
||||
{TaskID: "ok", ExitCode: 0, Coverage: "92%", CoverageNum: 92, CoverageTarget: 90, KeyOutput: "done"},
|
||||
{TaskID: "warn", ExitCode: 0, Coverage: "80%", CoverageNum: 80, CoverageTarget: 90, KeyOutput: "did"},
|
||||
{TaskID: "bad", ExitCode: 2, Error: "boom"},
|
||||
}
|
||||
out := generateFinalOutput(results)
|
||||
|
||||
for _, sym := range []string{"PASS", "WARN", "FAIL"} {
|
||||
if !strings.Contains(out, sym) {
|
||||
t.Fatalf("ASCII mode should include %q, got: %s", sym, out)
|
||||
}
|
||||
}
|
||||
for _, sym := range []string{"✓", "⚠️", "✗"} {
|
||||
if strings.Contains(out, sym) {
|
||||
t.Fatalf("ASCII mode should not include %q, got: %s", sym, out)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("generateFinalOutputUnicodeMode", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_ASCII_MODE", "false")
|
||||
|
||||
results := []TaskResult{
|
||||
{TaskID: "ok", ExitCode: 0, Coverage: "92%", CoverageNum: 92, CoverageTarget: 90, KeyOutput: "done"},
|
||||
{TaskID: "warn", ExitCode: 0, Coverage: "80%", CoverageNum: 80, CoverageTarget: 90, KeyOutput: "did"},
|
||||
{TaskID: "bad", ExitCode: 2, Error: "boom"},
|
||||
}
|
||||
out := generateFinalOutput(results)
|
||||
|
||||
for _, sym := range []string{"✓", "⚠️", "✗"} {
|
||||
if !strings.Contains(out, sym) {
|
||||
t.Fatalf("Unicode mode should include %q, got: %s", sym, out)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("executeConcurrentWrapper", func(t *testing.T) {
|
||||
orig := runCodexTaskFn
|
||||
defer func() { runCodexTaskFn = orig }()
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "done"}
|
||||
}
|
||||
os.Setenv("CODEAGENT_MAX_PARALLEL_WORKERS", "1")
|
||||
defer os.Unsetenv("CODEAGENT_MAX_PARALLEL_WORKERS")
|
||||
|
||||
results := executeConcurrent([][]TaskSpec{{{ID: "wrap"}}}, 1)
|
||||
if len(results) != 1 || results[0].TaskID != "wrap" {
|
||||
t.Fatalf("unexpected wrapper results: %+v", results)
|
||||
}
|
||||
|
||||
unbounded := executeConcurrentWithContext(context.Background(), [][]TaskSpec{{{ID: "unbounded"}}}, 1, 0)
|
||||
if len(unbounded) != 1 || unbounded[0].ExitCode != 0 {
|
||||
t.Fatalf("unexpected unbounded result: %+v", unbounded)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
cancelled := executeConcurrentWithContext(ctx, [][]TaskSpec{{{ID: "cancel"}}}, 1, 1)
|
||||
if cancelled[0].ExitCode == 0 {
|
||||
t.Fatalf("expected cancelled result, got %+v", cancelled[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRunner := newCommandRunner
|
||||
defer func() { newCommandRunner = origRunner }()
|
||||
|
||||
t.Run("resumeMissingSessionID", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
t.Fatalf("unexpected command execution for invalid resume config")
|
||||
return nil
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: ".", Mode: "resume"}, nil, nil, false, false, 1)
|
||||
if res.ExitCode == 0 || !strings.Contains(res.Error, "session_id") {
|
||||
@@ -187,14 +374,13 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
var firstStdout *reasonReadCloser
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
rc := newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"hello"}}`)
|
||||
if firstStdout == nil {
|
||||
firstStdout = rc
|
||||
}
|
||||
return &execFakeRunner{stdout: rc, process: &execFakeProcess{pid: 1234}}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{ID: "task-1", Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
if res.Error != "" || res.Message != "hello" || res.ExitCode != 0 {
|
||||
@@ -224,18 +410,17 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("startErrors", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{startErr: errors.New("executable file not found"), process: &execFakeProcess{pid: 1}}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
if res.ExitCode != 127 {
|
||||
t.Fatalf("expected missing executable exit code, got %d", res.ExitCode)
|
||||
}
|
||||
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{startErr: errors.New("start failed"), process: &execFakeProcess{pid: 2}}
|
||||
})
|
||||
}
|
||||
res = runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
if res.ExitCode == 0 {
|
||||
t.Fatalf("expected non-zero exit on start failure")
|
||||
@@ -243,14 +428,13 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("timeoutAndPipes", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"slow"}}`),
|
||||
process: &execFakeProcess{pid: 5},
|
||||
waitDelay: 20 * time.Millisecond,
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: ".", UseStdin: true}, nil, nil, false, false, 0)
|
||||
if res.ExitCode == 0 {
|
||||
t.Fatalf("expected timeout result, got %+v", res)
|
||||
@@ -258,18 +442,17 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("pipeErrors", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{stdoutErr: errors.New("stdout fail"), process: &execFakeProcess{pid: 6}}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
if res.ExitCode == 0 {
|
||||
t.Fatalf("expected failure on stdout pipe error")
|
||||
}
|
||||
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{stdinErr: errors.New("stdin fail"), process: &execFakeProcess{pid: 7}}
|
||||
})
|
||||
}
|
||||
res = runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: ".", UseStdin: true}, nil, nil, false, false, 1)
|
||||
if res.ExitCode == 0 {
|
||||
t.Fatalf("expected failure on stdin pipe error")
|
||||
@@ -282,14 +465,13 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
if exitErr == nil {
|
||||
t.Fatalf("expected exec.ExitError")
|
||||
}
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"ignored"}}`),
|
||||
process: &execFakeProcess{pid: 8},
|
||||
waitErr: exitErr,
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
if res.ExitCode == 0 {
|
||||
t.Fatalf("expected non-zero exit on wait error")
|
||||
@@ -297,14 +479,13 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("contextCancelled", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"cancel"}}`),
|
||||
process: &execFakeProcess{pid: 9},
|
||||
waitDelay: 10 * time.Millisecond,
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
res := runCodexTaskWithContext(ctx, TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
@@ -314,13 +495,12 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("silentLogger", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"quiet"}}`),
|
||||
process: &execFakeProcess{pid: 10},
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
_ = closeLogger()
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, true, 1)
|
||||
if res.ExitCode != 0 || res.LogPath == "" {
|
||||
@@ -330,13 +510,12 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("injectedLogger", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"injected"}}`),
|
||||
process: &execFakeProcess{pid: 12},
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
_ = closeLogger()
|
||||
|
||||
injected, err := NewLoggerWithSuffix("executor-injected")
|
||||
@@ -348,7 +527,7 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
_ = os.Remove(injected.Path())
|
||||
}()
|
||||
|
||||
ctx := executor.WithTaskLogger(context.Background(), injected)
|
||||
ctx := withTaskLogger(context.Background(), injected)
|
||||
res := runCodexTaskWithContext(ctx, TaskSpec{ID: "task-injected", Task: "payload", WorkDir: "."}, nil, nil, false, true, 1)
|
||||
if res.ExitCode != 0 || res.LogPath != injected.Path() {
|
||||
t.Fatalf("expected injected logger path, got %+v", res)
|
||||
@@ -368,13 +547,12 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("contextLoggerWithoutParent", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"ctx"}}`),
|
||||
process: &execFakeProcess{pid: 14},
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
_ = closeLogger()
|
||||
|
||||
taskLogger, err := NewLoggerWithSuffix("executor-taskctx")
|
||||
@@ -386,8 +564,8 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
_ = os.Remove(taskLogger.Path())
|
||||
})
|
||||
|
||||
ctx := executor.WithTaskLogger(context.Background(), taskLogger)
|
||||
res := runCodexTaskWithContext(context.TODO(), TaskSpec{ID: "task-context", Task: "payload", WorkDir: ".", Context: ctx}, nil, nil, false, true, 1)
|
||||
ctx := withTaskLogger(context.Background(), taskLogger)
|
||||
res := runCodexTaskWithContext(nil, TaskSpec{ID: "task-context", Task: "payload", WorkDir: ".", Context: ctx}, nil, nil, false, true, 1)
|
||||
if res.ExitCode != 0 || res.LogPath != taskLogger.Path() {
|
||||
t.Fatalf("expected task logger to be reused from spec context, got %+v", res)
|
||||
}
|
||||
@@ -407,17 +585,16 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
|
||||
t.Run("backendSetsDirAndNilContext", func(t *testing.T) {
|
||||
var rc *execFakeRunner
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
rc = &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"backend"}}`),
|
||||
process: &execFakeProcess{pid: 13},
|
||||
}
|
||||
return rc
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
|
||||
_ = closeLogger()
|
||||
res := runCodexTaskWithContext(context.TODO(), TaskSpec{ID: "task-backend", Task: "payload", WorkDir: "/tmp"}, ClaudeBackend{}, nil, false, false, 1)
|
||||
res := runCodexTaskWithContext(nil, TaskSpec{ID: "task-backend", Task: "payload", WorkDir: "/tmp"}, ClaudeBackend{}, nil, false, false, 1)
|
||||
if res.ExitCode != 0 || res.Message != "backend" {
|
||||
t.Fatalf("unexpected result: %+v", res)
|
||||
}
|
||||
@@ -426,36 +603,13 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("claudeSkipPermissionsPropagatesFromTaskSpec", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
var gotArgs []string
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
gotArgs = append([]string(nil), args...)
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`),
|
||||
process: &execFakeProcess{pid: 15},
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
|
||||
_ = closeLogger()
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{ID: "task-skip", Task: "payload", WorkDir: ".", SkipPermissions: true}, ClaudeBackend{}, nil, false, false, 1)
|
||||
if res.ExitCode != 0 || res.Error != "" {
|
||||
t.Fatalf("unexpected result: %+v", res)
|
||||
}
|
||||
if !slices.Contains(gotArgs, "--dangerously-skip-permissions") {
|
||||
t.Fatalf("expected --dangerously-skip-permissions in args, got %v", gotArgs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("missingMessage", func(t *testing.T) {
|
||||
executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"task","text":"noop"}}`),
|
||||
process: &execFakeProcess{pid: 11},
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { executor.SetNewCommandRunner(nil) })
|
||||
}
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{Task: "payload", WorkDir: "."}, nil, nil, false, false, 1)
|
||||
if res.ExitCode == 0 {
|
||||
t.Fatalf("expected failure when no agent_message returned")
|
||||
@@ -481,7 +635,7 @@ func TestExecutorParallelLogIsolation(t *testing.T) {
|
||||
|
||||
origRun := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
logger := executor.TaskLoggerFromContext(task.Context)
|
||||
logger := taskLoggerFromContext(task.Context)
|
||||
if logger == nil {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "missing task logger"}
|
||||
}
|
||||
@@ -505,7 +659,7 @@ func TestExecutorParallelLogIsolation(t *testing.T) {
|
||||
os.Stderr = stderrW
|
||||
defer func() { os.Stderr = oldStderr }()
|
||||
|
||||
results := executeConcurrentWithContext(context.TODO(), [][]TaskSpec{{{ID: taskA}, {ID: taskB}}}, 1, -1)
|
||||
results := executeConcurrentWithContext(nil, [][]TaskSpec{{{ID: taskA}, {ID: taskB}}}, 1, -1)
|
||||
|
||||
_ = stderrW.Close()
|
||||
os.Stderr = oldStderr
|
||||
@@ -571,7 +725,7 @@ func TestConcurrentExecutorParallelLogIsolationAndClosure(t *testing.T) {
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
|
||||
oldArgs := os.Args
|
||||
os.Args = []string{wrapperName}
|
||||
os.Args = []string{defaultWrapperName}
|
||||
t.Cleanup(func() { os.Args = oldArgs })
|
||||
|
||||
mainLogger, err := NewLoggerWithSuffix("concurrent-main")
|
||||
@@ -617,7 +771,7 @@ func TestConcurrentExecutorParallelLogIsolationAndClosure(t *testing.T) {
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
readyCh <- struct{}{}
|
||||
|
||||
logger := executor.TaskLoggerFromContext(task.Context)
|
||||
logger := taskLoggerFromContext(task.Context)
|
||||
loggerCh <- taskLoggerInfo{taskID: task.ID, logger: logger}
|
||||
if logger == nil {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "missing task logger"}
|
||||
@@ -704,9 +858,15 @@ func TestConcurrentExecutorParallelLogIsolationAndClosure(t *testing.T) {
|
||||
}
|
||||
|
||||
for taskID, logger := range loggers {
|
||||
if !logger.IsClosed() {
|
||||
if !logger.closed.Load() {
|
||||
t.Fatalf("expected task logger to be closed for %q", taskID)
|
||||
}
|
||||
if logger.file == nil {
|
||||
t.Fatalf("expected task logger file to be non-nil for %q", taskID)
|
||||
}
|
||||
if _, err := logger.file.Write([]byte("x")); err == nil {
|
||||
t.Fatalf("expected task logger file to be closed for %q", taskID)
|
||||
}
|
||||
}
|
||||
|
||||
mainLogger.Flush()
|
||||
@@ -776,10 +936,10 @@ func parseTaskIDFromLogLine(line string) (string, bool) {
|
||||
}
|
||||
|
||||
func TestExecutorTaskLoggerContext(t *testing.T) {
|
||||
if executor.TaskLoggerFromContext(context.TODO()) != nil {
|
||||
t.Fatalf("expected nil logger from TODO context")
|
||||
if taskLoggerFromContext(nil) != nil {
|
||||
t.Fatalf("expected nil logger from nil context")
|
||||
}
|
||||
if executor.TaskLoggerFromContext(context.Background()) != nil {
|
||||
if taskLoggerFromContext(context.Background()) != nil {
|
||||
t.Fatalf("expected nil logger when context has no logger")
|
||||
}
|
||||
|
||||
@@ -792,12 +952,12 @@ func TestExecutorTaskLoggerContext(t *testing.T) {
|
||||
_ = os.Remove(logger.Path())
|
||||
}()
|
||||
|
||||
ctx := executor.WithTaskLogger(context.Background(), logger)
|
||||
if got := executor.TaskLoggerFromContext(ctx); got != logger {
|
||||
ctx := withTaskLogger(context.Background(), logger)
|
||||
if got := taskLoggerFromContext(ctx); got != logger {
|
||||
t.Fatalf("expected logger roundtrip, got %v", got)
|
||||
}
|
||||
|
||||
if executor.TaskLoggerFromContext(executor.WithTaskLogger(context.Background(), nil)) != nil {
|
||||
if taskLoggerFromContext(withTaskLogger(context.Background(), nil)) != nil {
|
||||
t.Fatalf("expected nil logger when injected logger is nil")
|
||||
}
|
||||
}
|
||||
@@ -954,7 +1114,7 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
||||
|
||||
orig := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
logger := executor.TaskLoggerFromContext(task.Context)
|
||||
logger := taskLoggerFromContext(task.Context)
|
||||
if logger != mainLogger {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "unexpected logger"}
|
||||
}
|
||||
@@ -988,6 +1148,9 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
||||
if res.LogPath != mainLogger.Path() {
|
||||
t.Fatalf("shared log path mismatch: got %q want %q", res.LogPath, mainLogger.Path())
|
||||
}
|
||||
if !res.sharedLog {
|
||||
t.Fatalf("expected sharedLog flag for %+v", res)
|
||||
}
|
||||
if !strings.Contains(stderrOut, "Log (shared)") {
|
||||
t.Fatalf("stderr missing shared marker: %s", stderrOut)
|
||||
}
|
||||
@@ -1016,7 +1179,7 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
||||
|
||||
orig := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
logger := executor.TaskLoggerFromContext(task.Context)
|
||||
logger := taskLoggerFromContext(task.Context)
|
||||
if logger == nil {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "missing logger"}
|
||||
}
|
||||
@@ -1054,14 +1217,7 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log %q: %v", res.LogPath, err)
|
||||
}
|
||||
found := false
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
if strings.Contains(stripTimestampPrefix(line), "TASK="+res.TaskID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !strings.Contains(string(data), "TASK="+res.TaskID) {
|
||||
t.Fatalf("log for %q missing task marker, content: %s", res.TaskID, string(data))
|
||||
}
|
||||
_ = os.Remove(res.LogPath)
|
||||
@@ -1069,6 +1225,147 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecutorSignalAndTermination(t *testing.T) {
|
||||
forceKillDelay.Store(0)
|
||||
defer forceKillDelay.Store(5)
|
||||
|
||||
proc := &execFakeProcess{pid: 42}
|
||||
cmd := &execFakeRunner{process: proc}
|
||||
|
||||
origNotify := signalNotifyFn
|
||||
origStop := signalStopFn
|
||||
defer func() {
|
||||
signalNotifyFn = origNotify
|
||||
signalStopFn = origStop
|
||||
}()
|
||||
|
||||
signalNotifyFn = func(c chan<- os.Signal, sigs ...os.Signal) {
|
||||
go func() { c <- syscall.SIGINT }()
|
||||
}
|
||||
signalStopFn = func(c chan<- os.Signal) {}
|
||||
|
||||
forwardSignals(context.Background(), cmd, func(string) {})
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
proc.mu.Lock()
|
||||
signalled := len(proc.signals)
|
||||
proc.mu.Unlock()
|
||||
if signalled == 0 {
|
||||
t.Fatalf("process did not receive signal")
|
||||
}
|
||||
if proc.killed.Load() == 0 {
|
||||
t.Fatalf("process was not killed after signal")
|
||||
}
|
||||
|
||||
timer := terminateProcess(cmd)
|
||||
if timer == nil {
|
||||
t.Fatalf("terminateProcess returned nil timer")
|
||||
}
|
||||
timer.Stop()
|
||||
|
||||
ft := terminateCommand(cmd)
|
||||
if ft == nil {
|
||||
t.Fatalf("terminateCommand returned nil")
|
||||
}
|
||||
ft.Stop()
|
||||
|
||||
cmdKill := &execFakeRunner{process: &execFakeProcess{pid: 50}}
|
||||
ftKill := terminateCommand(cmdKill)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if p, ok := cmdKill.process.(*execFakeProcess); ok && p.killed.Load() == 0 {
|
||||
t.Fatalf("terminateCommand did not kill process")
|
||||
}
|
||||
ftKill.Stop()
|
||||
|
||||
cmdKill2 := &execFakeRunner{process: &execFakeProcess{pid: 51}}
|
||||
timer2 := terminateProcess(cmdKill2)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if p, ok := cmdKill2.process.(*execFakeProcess); ok && p.killed.Load() == 0 {
|
||||
t.Fatalf("terminateProcess did not kill process")
|
||||
}
|
||||
timer2.Stop()
|
||||
|
||||
if terminateCommand(nil) != nil {
|
||||
t.Fatalf("terminateCommand should return nil for nil cmd")
|
||||
}
|
||||
if terminateCommand(&execFakeRunner{allowNilProcess: true}) != nil {
|
||||
t.Fatalf("terminateCommand should return nil when process is nil")
|
||||
}
|
||||
if terminateProcess(nil) != nil {
|
||||
t.Fatalf("terminateProcess should return nil for nil cmd")
|
||||
}
|
||||
if terminateProcess(&execFakeRunner{allowNilProcess: true}) != nil {
|
||||
t.Fatalf("terminateProcess should return nil when process is nil")
|
||||
}
|
||||
|
||||
signalNotifyFn = func(c chan<- os.Signal, sigs ...os.Signal) {}
|
||||
ctxDone, cancelDone := context.WithCancel(context.Background())
|
||||
cancelDone()
|
||||
forwardSignals(ctxDone, &execFakeRunner{process: &execFakeProcess{pid: 70}}, func(string) {})
|
||||
}
|
||||
|
||||
func TestExecutorCancelReasonAndCloseWithReason(t *testing.T) {
|
||||
if reason := cancelReason("", nil); !strings.Contains(reason, "Context") {
|
||||
t.Fatalf("unexpected cancelReason for nil ctx: %s", reason)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 0)
|
||||
defer cancel()
|
||||
if !strings.Contains(cancelReason("cmd", ctx), "timeout") {
|
||||
t.Fatalf("expected timeout reason")
|
||||
}
|
||||
cancelCtx, cancelFn := context.WithCancel(context.Background())
|
||||
cancelFn()
|
||||
if !strings.Contains(cancelReason("cmd", cancelCtx), "Execution cancelled") {
|
||||
t.Fatalf("expected cancellation reason")
|
||||
}
|
||||
if !strings.Contains(cancelReason("", cancelCtx), "codex") {
|
||||
t.Fatalf("expected default command name in cancel reason")
|
||||
}
|
||||
|
||||
rc := &reasonReadCloser{r: strings.NewReader("data"), closedC: make(chan struct{}, 1)}
|
||||
closeWithReason(rc, "why")
|
||||
select {
|
||||
case <-rc.closedC:
|
||||
default:
|
||||
t.Fatalf("CloseWithReason was not called")
|
||||
}
|
||||
|
||||
plain := io.NopCloser(strings.NewReader("x"))
|
||||
closeWithReason(plain, "noop")
|
||||
closeWithReason(nil, "noop")
|
||||
}
|
||||
|
||||
func TestExecutorForceKillTimerStop(t *testing.T) {
|
||||
done := make(chan struct{}, 1)
|
||||
ft := &forceKillTimer{timer: time.AfterFunc(50*time.Millisecond, func() { done <- struct{}{} }), done: done}
|
||||
ft.Stop()
|
||||
|
||||
done2 := make(chan struct{}, 1)
|
||||
ft2 := &forceKillTimer{timer: time.AfterFunc(0, func() { done2 <- struct{}{} }), done: done2}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
ft2.Stop()
|
||||
|
||||
var nilTimer *forceKillTimer
|
||||
nilTimer.Stop()
|
||||
(&forceKillTimer{}).Stop()
|
||||
}
|
||||
|
||||
func TestExecutorForwardSignalsDefaults(t *testing.T) {
|
||||
origNotify := signalNotifyFn
|
||||
origStop := signalStopFn
|
||||
signalNotifyFn = nil
|
||||
signalStopFn = nil
|
||||
defer func() {
|
||||
signalNotifyFn = origNotify
|
||||
signalStopFn = origStop
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
forwardSignals(ctx, &execFakeRunner{process: &execFakeProcess{pid: 80}}, func(string) {})
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestExecutorSharedLogFalseWhenCustomLogPath(t *testing.T) {
|
||||
devNull, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
@@ -1124,9 +1421,10 @@ func TestExecutorSharedLogFalseWhenCustomLogPath(t *testing.T) {
|
||||
}
|
||||
|
||||
res := results[0]
|
||||
out := generateFinalOutputWithMode(results, false)
|
||||
if strings.Contains(out, "(shared)") {
|
||||
t.Fatalf("did not expect shared marker when LogPath differs from shared logger, got: %s", out)
|
||||
// 关键断言:即使 handle.shared=true(因为 task logger 创建失败),
|
||||
// 但因为 LogPath 不等于主 logger 的路径,sharedLog 应为 false
|
||||
if res.sharedLog {
|
||||
t.Fatalf("expected sharedLog=false when LogPath differs from shared logger, got true")
|
||||
}
|
||||
|
||||
// 验证 LogPath 确实是自定义的
|
||||
@@ -1,4 +1,4 @@
|
||||
package executor
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -18,12 +18,6 @@ var geminiNoisePatterns = []string{
|
||||
"YOLO mode is enabled",
|
||||
}
|
||||
|
||||
// codexNoisePatterns contains stderr patterns to filter for codex backend
|
||||
var codexNoisePatterns = []string{
|
||||
"ERROR codex_core::codex: needs_follow_up:",
|
||||
"ERROR codex_core::skills::loader:",
|
||||
}
|
||||
|
||||
// filteringWriter wraps an io.Writer and filters out lines matching patterns
|
||||
type filteringWriter struct {
|
||||
w io.Writer
|
||||
@@ -45,7 +39,7 @@ func (f *filteringWriter) Write(p []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
if !f.shouldFilter(line) {
|
||||
_, _ = f.w.Write([]byte(line))
|
||||
f.w.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -65,7 +59,7 @@ func (f *filteringWriter) Flush() {
|
||||
if f.buf.Len() > 0 {
|
||||
remaining := f.buf.String()
|
||||
if !f.shouldFilter(remaining) {
|
||||
_, _ = f.w.Write([]byte(remaining))
|
||||
f.w.Write([]byte(remaining))
|
||||
}
|
||||
f.buf.Reset()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package executor
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -48,7 +48,7 @@ func TestFilteringWriter(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
fw := newFilteringWriter(&buf, tt.patterns)
|
||||
_, _ = fw.Write([]byte(tt.input))
|
||||
fw.Write([]byte(tt.input))
|
||||
fw.Flush()
|
||||
|
||||
if got := buf.String(); got != tt.want {
|
||||
@@ -63,8 +63,8 @@ func TestFilteringWriterPartialLines(t *testing.T) {
|
||||
fw := newFilteringWriter(&buf, geminiNoisePatterns)
|
||||
|
||||
// Write partial line
|
||||
_, _ = fw.Write([]byte("Hello "))
|
||||
_, _ = fw.Write([]byte("World\n"))
|
||||
fw.Write([]byte("Hello "))
|
||||
fw.Write([]byte("World\n"))
|
||||
fw.Flush()
|
||||
|
||||
if got := buf.String(); got != "Hello World\n" {
|
||||
@@ -1,43 +1,3 @@
|
||||
module codeagent-wrapper
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/goccy/go-json v0.10.5
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.19.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -1,150 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
func TestValidateAgentName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{name: "simple", input: "develop", wantErr: false},
|
||||
{name: "upper", input: "ABC", wantErr: false},
|
||||
{name: "digits", input: "a1", wantErr: false},
|
||||
{name: "dash underscore", input: "a-b_c", wantErr: false},
|
||||
{name: "empty", input: "", wantErr: true},
|
||||
{name: "space", input: "a b", wantErr: true},
|
||||
{name: "slash", input: "a/b", wantErr: true},
|
||||
{name: "dotdot", input: "../evil", wantErr: true},
|
||||
{name: "unicode", input: "中文", wantErr: true},
|
||||
{name: "symbol", input: "a$b", wantErr: true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := config.ValidateAgentName(tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Fatalf("validateAgentName(%q) err=%v, wantErr=%v", tt.input, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseArgs_InvalidAgentNameRejected(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
os.Args = []string{"codeagent-wrapper", "--agent", "../evil", "task"}
|
||||
if _, err := parseArgs(); err == nil {
|
||||
t.Fatalf("expected parseArgs to reject invalid agent name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseParallelConfig_InvalidAgentNameRejected(t *testing.T) {
|
||||
input := `---TASK---
|
||||
id: task-1
|
||||
agent: ../evil
|
||||
---CONTENT---
|
||||
do something`
|
||||
if _, err := parseParallelConfig([]byte(input)); err == nil {
|
||||
t.Fatalf("expected parseParallelConfig to reject invalid agent name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseParallelConfig_ResolvesAgentPromptFile(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(config.ResetModelsConfigCacheForTest)
|
||||
config.ResetModelsConfigCacheForTest()
|
||||
|
||||
configDir := filepath.Join(home, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||
"default_backend": "codex",
|
||||
"default_model": "gpt-test",
|
||||
"agents": {
|
||||
"custom-agent": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-test",
|
||||
"prompt_file": "~/.claude/prompt.md"
|
||||
}
|
||||
}
|
||||
}`), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
input := `---TASK---
|
||||
id: task-1
|
||||
agent: custom-agent
|
||||
---CONTENT---
|
||||
do something`
|
||||
cfg, err := parseParallelConfig([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
|
||||
}
|
||||
if len(cfg.Tasks) != 1 {
|
||||
t.Fatalf("expected 1 task, got %d", len(cfg.Tasks))
|
||||
}
|
||||
if got := cfg.Tasks[0].PromptFile; got != "~/.claude/prompt.md" {
|
||||
t.Fatalf("PromptFile = %q, want %q", got, "~/.claude/prompt.md")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRunCodexTaskFn_AppliesAgentPromptFile(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
claudeDir := filepath.Join(home, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "prompt.md"), []byte("P\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
fake := newFakeCmd(fakeCmdConfig{
|
||||
StdoutPlan: []fakeStdoutEvent{
|
||||
{Data: `{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}` + "\n"},
|
||||
},
|
||||
WaitDelay: 2 * time.Millisecond,
|
||||
})
|
||||
|
||||
_ = executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner { return fake })
|
||||
_ = executor.SetSelectBackendFn(func(name string) (Backend, error) {
|
||||
return testBackend{
|
||||
name: name,
|
||||
command: "fake-cmd",
|
||||
argsFn: func(cfg *Config, targetArg string) []string {
|
||||
return []string{targetArg}
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
res := defaultRunCodexTaskFn(TaskSpec{
|
||||
ID: "t",
|
||||
Task: "do",
|
||||
Backend: "codex",
|
||||
PromptFile: "~/.claude/prompt.md",
|
||||
}, 5)
|
||||
if res.ExitCode != 0 {
|
||||
t.Fatalf("unexpected result: %+v", res)
|
||||
}
|
||||
|
||||
want := "<agent-prompt>\nP\n</agent-prompt>\n\ndo"
|
||||
if got := fake.StdinContents(); got != want {
|
||||
t.Fatalf("stdin mismatch:\n got=%q\nwant=%q", got, want)
|
||||
}
|
||||
}
|
||||
@@ -1,278 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
version = "6.0.0-alpha1"
|
||||
defaultWorkdir = "."
|
||||
defaultTimeout = 7200 // seconds (2 hours)
|
||||
defaultCoverageTarget = 90.0
|
||||
codexLogLineLimit = 1000
|
||||
stdinSpecialChars = "\n\\\"'`$"
|
||||
stderrCaptureLimit = 4 * 1024
|
||||
defaultBackendName = "codex"
|
||||
defaultCodexCommand = "codex"
|
||||
|
||||
// stdout close reasons
|
||||
stdoutCloseReasonWait = "wait-done"
|
||||
stdoutCloseReasonDrain = "drain-timeout"
|
||||
stdoutCloseReasonCtx = "context-cancel"
|
||||
stdoutDrainTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// Test hooks for dependency injection
|
||||
var (
|
||||
stdinReader io.Reader = os.Stdin
|
||||
isTerminalFn = defaultIsTerminal
|
||||
codexCommand = defaultCodexCommand
|
||||
cleanupHook func()
|
||||
startupCleanupAsync = true
|
||||
|
||||
buildCodexArgsFn = buildCodexArgs
|
||||
selectBackendFn = selectBackend
|
||||
cleanupLogsFn = cleanupOldLogs
|
||||
defaultBuildArgsFn = buildCodexArgs
|
||||
runTaskFn = runCodexTask
|
||||
exitFn = os.Exit
|
||||
)
|
||||
|
||||
func runStartupCleanup() {
|
||||
if cleanupLogsFn == nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
|
||||
}
|
||||
}()
|
||||
if _, err := cleanupLogsFn(); err != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleStartupCleanup() {
|
||||
if !startupCleanupAsync {
|
||||
runStartupCleanup()
|
||||
return
|
||||
}
|
||||
if cleanupLogsFn == nil {
|
||||
return
|
||||
}
|
||||
fn := cleanupLogsFn
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
|
||||
}
|
||||
}()
|
||||
if _, err := fn(); err != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func runCleanupMode() int {
|
||||
if cleanupLogsFn == nil {
|
||||
fmt.Fprintln(os.Stderr, "Cleanup failed: log cleanup function not configured")
|
||||
return 1
|
||||
}
|
||||
|
||||
stats, err := cleanupLogsFn()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Cleanup failed: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Println("Cleanup completed")
|
||||
fmt.Printf("Files scanned: %d\n", stats.Scanned)
|
||||
fmt.Printf("Files deleted: %d\n", stats.Deleted)
|
||||
if len(stats.DeletedFiles) > 0 {
|
||||
for _, f := range stats.DeletedFiles {
|
||||
fmt.Printf(" - %s\n", f)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Files kept: %d\n", stats.Kept)
|
||||
if len(stats.KeptFiles) > 0 {
|
||||
for _, f := range stats.KeptFiles {
|
||||
fmt.Printf(" - %s\n", f)
|
||||
}
|
||||
}
|
||||
if stats.Errors > 0 {
|
||||
fmt.Printf("Deletion errors: %d\n", stats.Errors)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func readAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error) {
|
||||
raw := strings.TrimSpace(path)
|
||||
if raw == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
expanded := raw
|
||||
if raw == "~" || strings.HasPrefix(raw, "~/") || strings.HasPrefix(raw, "~\\") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if raw == "~" {
|
||||
expanded = home
|
||||
} else {
|
||||
expanded = home + raw[1:]
|
||||
}
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(expanded)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath = filepath.Clean(absPath)
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
if !allowOutsideClaudeDir {
|
||||
return "", err
|
||||
}
|
||||
logWarn(fmt.Sprintf("Failed to resolve home directory for prompt file validation: %v; proceeding without restriction", err))
|
||||
} else {
|
||||
allowedDirs := []string{
|
||||
filepath.Clean(filepath.Join(home, ".claude")),
|
||||
filepath.Clean(filepath.Join(home, ".codeagent", "agents")),
|
||||
}
|
||||
for i := range allowedDirs {
|
||||
allowedAbs, err := filepath.Abs(allowedDirs[i])
|
||||
if err == nil {
|
||||
allowedDirs[i] = filepath.Clean(allowedAbs)
|
||||
}
|
||||
}
|
||||
|
||||
isWithinDir := func(path, dir string) bool {
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
rel = filepath.Clean(rel)
|
||||
if rel == "." {
|
||||
return true
|
||||
}
|
||||
if rel == ".." {
|
||||
return false
|
||||
}
|
||||
prefix := ".." + string(os.PathSeparator)
|
||||
return !strings.HasPrefix(rel, prefix)
|
||||
}
|
||||
|
||||
if !allowOutsideClaudeDir {
|
||||
withinAllowed := false
|
||||
for _, dir := range allowedDirs {
|
||||
if isWithinDir(absPath, dir) {
|
||||
withinAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinAllowed {
|
||||
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
|
||||
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
|
||||
}
|
||||
|
||||
resolvedPath, errPath := filepath.EvalSymlinks(absPath)
|
||||
if errPath == nil {
|
||||
resolvedPath = filepath.Clean(resolvedPath)
|
||||
resolvedAllowed := make([]string, 0, len(allowedDirs))
|
||||
for _, dir := range allowedDirs {
|
||||
resolvedBase, errBase := filepath.EvalSymlinks(dir)
|
||||
if errBase != nil {
|
||||
continue
|
||||
}
|
||||
resolvedAllowed = append(resolvedAllowed, filepath.Clean(resolvedBase))
|
||||
}
|
||||
if len(resolvedAllowed) > 0 {
|
||||
withinResolved := false
|
||||
for _, dir := range resolvedAllowed {
|
||||
if isWithinDir(resolvedPath, dir) {
|
||||
withinResolved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinResolved {
|
||||
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s) (resolved): %s", strings.Join(resolvedAllowed, ", "), resolvedPath))
|
||||
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
withinAllowed := false
|
||||
for _, dir := range allowedDirs {
|
||||
if isWithinDir(absPath, dir) {
|
||||
withinAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinAllowed {
|
||||
logWarn(fmt.Sprintf("Reading prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimRight(string(data), "\r\n"), nil
|
||||
}
|
||||
|
||||
func wrapTaskWithAgentPrompt(prompt string, task string) string {
|
||||
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
|
||||
}
|
||||
|
||||
func runCleanupHook() {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if cleanupHook != nil {
|
||||
cleanupHook()
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp() {
|
||||
name := currentWrapperName()
|
||||
help := fmt.Sprintf(`%[1]s - Go wrapper for AI CLI backends
|
||||
|
||||
Usage:
|
||||
%[1]s "task" [workdir]
|
||||
%[1]s --backend claude "task" [workdir]
|
||||
%[1]s --prompt-file /path/to/prompt.md "task" [workdir]
|
||||
%[1]s - [workdir] Read task from stdin
|
||||
%[1]s resume <session_id> "task" [workdir]
|
||||
%[1]s resume <session_id> - [workdir]
|
||||
%[1]s --parallel Run tasks in parallel (config from stdin)
|
||||
%[1]s --parallel --full-output Run tasks in parallel with full output (legacy)
|
||||
%[1]s --version
|
||||
%[1]s --help
|
||||
|
||||
Parallel mode examples:
|
||||
%[1]s --parallel < tasks.txt
|
||||
echo '...' | %[1]s --parallel
|
||||
%[1]s --parallel --full-output < tasks.txt
|
||||
%[1]s --parallel <<'EOF'
|
||||
|
||||
Environment Variables:
|
||||
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
|
||||
CODEAGENT_ASCII_MODE Use ASCII symbols instead of Unicode (PASS/WARN/FAIL)
|
||||
|
||||
Exit Codes:
|
||||
0 Success
|
||||
1 General error (missing args, no output)
|
||||
124 Timeout
|
||||
127 backend command not found
|
||||
130 Interrupted (Ctrl+C)
|
||||
* Passthrough from backend process`, name)
|
||||
fmt.Println(help)
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import backend "codeagent-wrapper/internal/backend"
|
||||
|
||||
type Backend = backend.Backend
|
||||
type CodexBackend = backend.CodexBackend
|
||||
type ClaudeBackend = backend.ClaudeBackend
|
||||
type GeminiBackend = backend.GeminiBackend
|
||||
type OpencodeBackend = backend.OpencodeBackend
|
||||
@@ -1,7 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import backend "codeagent-wrapper/internal/backend"
|
||||
|
||||
func init() {
|
||||
backend.SetLogFuncs(logWarn, logError)
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import backend "codeagent-wrapper/internal/backend"
|
||||
|
||||
func selectBackend(name string) (Backend, error) { return backend.Select(name) }
|
||||
@@ -1,103 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
benchCmdSink any
|
||||
benchConfigSink *Config
|
||||
benchMessageSink string
|
||||
benchThreadIDSink string
|
||||
)
|
||||
|
||||
// BenchmarkStartup_NewRootCommand measures CLI startup overhead (command+flags construction).
|
||||
func BenchmarkStartup_NewRootCommand(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchCmdSink = newRootCommand()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkConfigParse_ParseArgs measures config parsing from argv/env (steady-state).
|
||||
func BenchmarkConfigParse_ParseArgs(b *testing.B) {
|
||||
home := b.TempDir()
|
||||
b.Setenv("HOME", home)
|
||||
b.Setenv("USERPROFILE", home)
|
||||
|
||||
config.ResetModelsConfigCacheForTest()
|
||||
b.Cleanup(config.ResetModelsConfigCacheForTest)
|
||||
|
||||
origArgs := os.Args
|
||||
os.Args = []string{"codeagent-wrapper", "--agent", "develop", "task"}
|
||||
b.Cleanup(func() { os.Args = origArgs })
|
||||
|
||||
if _, err := parseArgs(); err != nil {
|
||||
b.Fatalf("warmup parseArgs() error: %v", err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cfg, err := parseArgs()
|
||||
if err != nil {
|
||||
b.Fatalf("parseArgs() error: %v", err)
|
||||
}
|
||||
benchConfigSink = cfg
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkJSONParse_ParseJSONStreamInternal measures line-delimited JSON stream parsing.
|
||||
func BenchmarkJSONParse_ParseJSONStreamInternal(b *testing.B) {
|
||||
stream := []byte(
|
||||
`{"type":"thread.started","thread_id":"t"}` + "\n" +
|
||||
`{"type":"item.completed","item":{"type":"agent_message","text":"hello"}}` + "\n" +
|
||||
`{"type":"thread.completed","thread_id":"t"}` + "\n",
|
||||
)
|
||||
b.SetBytes(int64(len(stream)))
|
||||
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
message, threadID := parseJSONStreamInternal(bytes.NewReader(stream), nil, nil, nil, nil)
|
||||
benchMessageSink = message
|
||||
benchThreadIDSink = threadID
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkLoggerWrite 测试日志写入性能
|
||||
func BenchmarkLoggerWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
logger.Info("benchmark log message")
|
||||
}
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
|
||||
// BenchmarkLoggerConcurrentWrite 测试并发日志写入性能
|
||||
func BenchmarkLoggerConcurrentWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
logger.Info("concurrent benchmark log message")
|
||||
}
|
||||
})
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
@@ -1,657 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type exitError struct {
|
||||
code int
|
||||
}
|
||||
|
||||
func (e exitError) Error() string {
|
||||
return fmt.Sprintf("exit %d", e.code)
|
||||
}
|
||||
|
||||
type cliOptions struct {
|
||||
Backend string
|
||||
Model string
|
||||
ReasoningEffort string
|
||||
Agent string
|
||||
PromptFile string
|
||||
SkipPermissions bool
|
||||
|
||||
Parallel bool
|
||||
FullOutput bool
|
||||
|
||||
Cleanup bool
|
||||
Version bool
|
||||
ConfigFile string
|
||||
}
|
||||
|
||||
func Main() {
|
||||
Run()
|
||||
}
|
||||
|
||||
// Run is the program entrypoint for cmd/codeagent/main.go.
|
||||
func Run() {
|
||||
exitFn(run())
|
||||
}
|
||||
|
||||
func run() int {
|
||||
cmd := newRootCommand()
|
||||
cmd.SetArgs(os.Args[1:])
|
||||
if err := cmd.Execute(); err != nil {
|
||||
var ee exitError
|
||||
if errors.As(err, &ee) {
|
||||
return ee.code
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func newRootCommand() *cobra.Command {
|
||||
name := currentWrapperName()
|
||||
opts := &cliOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: fmt.Sprintf("%s [flags] <task>|resume <session_id> <task> [workdir]", name),
|
||||
Short: "Go wrapper for AI CLI backends",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
Args: cobra.ArbitraryArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if opts.Version {
|
||||
fmt.Printf("%s version %s\n", name, version)
|
||||
return nil
|
||||
}
|
||||
if opts.Cleanup {
|
||||
code := runCleanupMode()
|
||||
if code == 0 {
|
||||
return nil
|
||||
}
|
||||
return exitError{code: code}
|
||||
}
|
||||
|
||||
exitCode := runWithLoggerAndCleanup(func() int {
|
||||
v, err := config.NewViper(opts.ConfigFile)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
if opts.Parallel {
|
||||
return runParallelMode(cmd, args, opts, v, name)
|
||||
}
|
||||
|
||||
logInfo("Script started")
|
||||
|
||||
cfg, err := buildSingleConfig(cmd, args, os.Args[1:], opts, v)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
logInfo(fmt.Sprintf("Parsed args: mode=%s, task_len=%d, backend=%s", cfg.Mode, len(cfg.Task), cfg.Backend))
|
||||
return runSingleMode(cfg, name)
|
||||
})
|
||||
|
||||
if exitCode == 0 {
|
||||
return nil
|
||||
}
|
||||
return exitError{code: exitCode}
|
||||
},
|
||||
}
|
||||
cmd.CompletionOptions.DisableDefaultCmd = true
|
||||
|
||||
addRootFlags(cmd.Flags(), opts)
|
||||
cmd.AddCommand(newVersionCommand(name), newCleanupCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addRootFlags(fs *pflag.FlagSet, opts *cliOptions) {
|
||||
fs.StringVar(&opts.ConfigFile, "config", "", "Config file path (default: $HOME/.codeagent/config.*)")
|
||||
fs.BoolVarP(&opts.Version, "version", "v", false, "Print version and exit")
|
||||
fs.BoolVar(&opts.Cleanup, "cleanup", false, "Clean up old logs and exit")
|
||||
|
||||
fs.BoolVar(&opts.Parallel, "parallel", false, "Run tasks in parallel (config from stdin)")
|
||||
fs.BoolVar(&opts.FullOutput, "full-output", false, "Parallel mode: include full task output (legacy)")
|
||||
|
||||
fs.StringVar(&opts.Backend, "backend", defaultBackendName, "Backend to use (codex, claude, gemini, opencode)")
|
||||
fs.StringVar(&opts.Model, "model", "", "Model override")
|
||||
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
|
||||
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
|
||||
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
|
||||
|
||||
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
|
||||
fs.BoolVar(&opts.SkipPermissions, "dangerously-skip-permissions", false, "Alias for --skip-permissions")
|
||||
}
|
||||
|
||||
func newVersionCommand(name string) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print version and exit",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s version %s\n", name, version)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newCleanupCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "cleanup",
|
||||
Short: "Clean up old logs and exit",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
code := runCleanupMode()
|
||||
if code == 0 {
|
||||
return nil
|
||||
}
|
||||
return exitError{code: code}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runWithLoggerAndCleanup(fn func() int) (exitCode int) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
setLogger(logger)
|
||||
|
||||
defer func() {
|
||||
logger := activeLogger()
|
||||
if logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if err := closeLogger(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to close logger: %v\n", err)
|
||||
}
|
||||
if logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
if entries := logger.ExtractRecentErrors(10); len(entries) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "\n=== Recent Errors ===")
|
||||
for _, entry := range entries {
|
||||
fmt.Fprintln(os.Stderr, entry)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Log file: %s (deleted)\n", logger.Path())
|
||||
}
|
||||
}
|
||||
_ = logger.RemoveLogFile()
|
||||
}()
|
||||
defer runCleanupHook()
|
||||
|
||||
// Clean up stale logs from previous runs.
|
||||
scheduleStartupCleanup()
|
||||
|
||||
return fn()
|
||||
}
|
||||
|
||||
func parseArgs() (*Config, error) {
|
||||
opts := &cliOptions{}
|
||||
cmd := &cobra.Command{SilenceErrors: true, SilenceUsage: true, Args: cobra.ArbitraryArgs}
|
||||
addRootFlags(cmd.Flags(), opts)
|
||||
|
||||
rawArgv := os.Args[1:]
|
||||
if err := cmd.ParseFlags(rawArgv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args := cmd.Flags().Args()
|
||||
|
||||
v, err := config.NewViper(opts.ConfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buildSingleConfig(cmd, args, rawArgv, opts, v)
|
||||
}
|
||||
|
||||
func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts *cliOptions, v *viper.Viper) (*Config, error) {
|
||||
backendName := defaultBackendName
|
||||
model := ""
|
||||
reasoningEffort := ""
|
||||
agentName := ""
|
||||
promptFile := ""
|
||||
promptFileExplicit := false
|
||||
yolo := false
|
||||
|
||||
if cmd.Flags().Changed("agent") {
|
||||
agentName = strings.TrimSpace(opts.Agent)
|
||||
if agentName == "" {
|
||||
return nil, fmt.Errorf("--agent flag requires a value")
|
||||
}
|
||||
if err := config.ValidateAgentName(agentName); err != nil {
|
||||
return nil, fmt.Errorf("--agent flag invalid value: %w", err)
|
||||
}
|
||||
} else {
|
||||
agentName = strings.TrimSpace(v.GetString("agent"))
|
||||
if agentName != "" {
|
||||
if err := config.ValidateAgentName(agentName); err != nil {
|
||||
return nil, fmt.Errorf("--agent flag invalid value: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning string
|
||||
if agentName != "" {
|
||||
var resolvedYolo bool
|
||||
resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning, _, _, resolvedYolo = config.ResolveAgentConfig(agentName)
|
||||
yolo = resolvedYolo
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("prompt-file") {
|
||||
promptFile = strings.TrimSpace(opts.PromptFile)
|
||||
if promptFile == "" {
|
||||
return nil, fmt.Errorf("--prompt-file flag requires a value")
|
||||
}
|
||||
promptFileExplicit = true
|
||||
} else if val := strings.TrimSpace(v.GetString("prompt-file")); val != "" {
|
||||
promptFile = val
|
||||
promptFileExplicit = true
|
||||
} else {
|
||||
promptFile = resolvedPromptFile
|
||||
}
|
||||
|
||||
agentFlagChanged := cmd.Flags().Changed("agent")
|
||||
backendFlagChanged := cmd.Flags().Changed("backend")
|
||||
if backendFlagChanged {
|
||||
backendName = strings.TrimSpace(opts.Backend)
|
||||
if backendName == "" {
|
||||
return nil, fmt.Errorf("--backend flag requires a value")
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case agentFlagChanged && backendFlagChanged && lastFlagIndex(rawArgv, "agent") > lastFlagIndex(rawArgv, "backend"):
|
||||
backendName = resolvedBackend
|
||||
case !backendFlagChanged && agentName != "":
|
||||
backendName = resolvedBackend
|
||||
case !backendFlagChanged:
|
||||
if val := strings.TrimSpace(v.GetString("backend")); val != "" {
|
||||
backendName = val
|
||||
}
|
||||
}
|
||||
|
||||
modelFlagChanged := cmd.Flags().Changed("model")
|
||||
if modelFlagChanged {
|
||||
model = strings.TrimSpace(opts.Model)
|
||||
if model == "" {
|
||||
return nil, fmt.Errorf("--model flag requires a value")
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case agentFlagChanged && modelFlagChanged && lastFlagIndex(rawArgv, "agent") > lastFlagIndex(rawArgv, "model"):
|
||||
model = strings.TrimSpace(resolvedModel)
|
||||
case !modelFlagChanged && agentName != "":
|
||||
model = strings.TrimSpace(resolvedModel)
|
||||
case !modelFlagChanged:
|
||||
model = strings.TrimSpace(v.GetString("model"))
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("reasoning-effort") {
|
||||
reasoningEffort = strings.TrimSpace(opts.ReasoningEffort)
|
||||
if reasoningEffort == "" {
|
||||
return nil, fmt.Errorf("--reasoning-effort flag requires a value")
|
||||
}
|
||||
} else if val := strings.TrimSpace(v.GetString("reasoning-effort")); val != "" {
|
||||
reasoningEffort = val
|
||||
} else if agentName != "" {
|
||||
reasoningEffort = strings.TrimSpace(resolvedReasoning)
|
||||
}
|
||||
|
||||
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
|
||||
skipPermissions := false
|
||||
if skipChanged {
|
||||
skipPermissions = opts.SkipPermissions
|
||||
} else {
|
||||
skipPermissions = v.GetBool("skip-permissions")
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return nil, fmt.Errorf("task required")
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
WorkDir: defaultWorkdir,
|
||||
Backend: backendName,
|
||||
Agent: agentName,
|
||||
PromptFile: promptFile,
|
||||
PromptFileExplicit: promptFileExplicit,
|
||||
SkipPermissions: skipPermissions,
|
||||
Yolo: yolo,
|
||||
Model: model,
|
||||
ReasoningEffort: reasoningEffort,
|
||||
MaxParallelWorkers: config.ResolveMaxParallelWorkers(),
|
||||
}
|
||||
|
||||
if args[0] == "resume" {
|
||||
if len(args) < 3 {
|
||||
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
|
||||
}
|
||||
cfg.Mode = "resume"
|
||||
cfg.SessionID = strings.TrimSpace(args[1])
|
||||
if cfg.SessionID == "" {
|
||||
return nil, fmt.Errorf("resume mode requires non-empty session_id")
|
||||
}
|
||||
cfg.Task = args[2]
|
||||
cfg.ExplicitStdin = (args[2] == "-")
|
||||
if len(args) > 3 {
|
||||
if args[3] == "-" {
|
||||
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
|
||||
}
|
||||
cfg.WorkDir = args[3]
|
||||
}
|
||||
} else {
|
||||
cfg.Mode = "new"
|
||||
cfg.Task = args[0]
|
||||
cfg.ExplicitStdin = (args[0] == "-")
|
||||
if len(args) > 1 {
|
||||
if args[1] == "-" {
|
||||
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
|
||||
}
|
||||
cfg.WorkDir = args[1]
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func lastFlagIndex(argv []string, name string) int {
|
||||
if len(argv) == 0 {
|
||||
return -1
|
||||
}
|
||||
name = strings.TrimSpace(name)
|
||||
if name == "" {
|
||||
return -1
|
||||
}
|
||||
|
||||
needle := "--" + name
|
||||
prefix := needle + "="
|
||||
last := -1
|
||||
for i, arg := range argv {
|
||||
if arg == needle || strings.HasPrefix(arg, prefix) {
|
||||
last = i
|
||||
}
|
||||
}
|
||||
return last
|
||||
}
|
||||
|
||||
func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *viper.Viper, name string) int {
|
||||
if len(args) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; no positional arguments are allowed.")
|
||||
fmt.Fprintln(os.Stderr, "Usage examples:")
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
|
||||
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel <<'EOF'\n", name)
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel --full-output <<'EOF' # include full task output\n", name)
|
||||
return 1
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
||||
return 1
|
||||
}
|
||||
|
||||
backendName := defaultBackendName
|
||||
if cmd.Flags().Changed("backend") {
|
||||
backendName = strings.TrimSpace(opts.Backend)
|
||||
if backendName == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
|
||||
return 1
|
||||
}
|
||||
} else if val := strings.TrimSpace(v.GetString("backend")); val != "" {
|
||||
backendName = val
|
||||
}
|
||||
|
||||
model := ""
|
||||
if cmd.Flags().Changed("model") {
|
||||
model = strings.TrimSpace(opts.Model)
|
||||
if model == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --model flag requires a value")
|
||||
return 1
|
||||
}
|
||||
} else {
|
||||
model = strings.TrimSpace(v.GetString("model"))
|
||||
}
|
||||
|
||||
fullOutput := opts.FullOutput
|
||||
if !cmd.Flags().Changed("full-output") && v.IsSet("full-output") {
|
||||
fullOutput = v.GetBool("full-output")
|
||||
}
|
||||
|
||||
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
|
||||
skipPermissions := false
|
||||
if skipChanged {
|
||||
skipPermissions = opts.SkipPermissions
|
||||
} else {
|
||||
skipPermissions = v.GetBool("skip-permissions")
|
||||
}
|
||||
|
||||
backend, err := selectBackendFn(backendName)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
backendName = backend.Name()
|
||||
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to read stdin: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
cfg, err := parseParallelConfig(data)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
cfg.GlobalBackend = backendName
|
||||
model = strings.TrimSpace(model)
|
||||
for i := range cfg.Tasks {
|
||||
if strings.TrimSpace(cfg.Tasks[i].Backend) == "" {
|
||||
cfg.Tasks[i].Backend = backendName
|
||||
}
|
||||
if strings.TrimSpace(cfg.Tasks[i].Model) == "" && model != "" {
|
||||
cfg.Tasks[i].Model = model
|
||||
}
|
||||
cfg.Tasks[i].SkipPermissions = cfg.Tasks[i].SkipPermissions || skipPermissions
|
||||
}
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
layers, err := topologicalSort(cfg.Tasks)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
results := executeConcurrent(layers, timeoutSec)
|
||||
|
||||
for i := range results {
|
||||
results[i].CoverageTarget = defaultCoverageTarget
|
||||
if results[i].Message == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
lines := strings.Split(results[i].Message, "\n")
|
||||
results[i].Coverage = extractCoverageFromLines(lines)
|
||||
results[i].CoverageNum = extractCoverageNum(results[i].Coverage)
|
||||
results[i].FilesChanged = extractFilesChangedFromLines(lines)
|
||||
results[i].TestsPassed, results[i].TestsFailed = extractTestResultsFromLines(lines)
|
||||
results[i].KeyOutput = extractKeyOutputFromLines(lines, 150)
|
||||
}
|
||||
|
||||
fmt.Println(generateFinalOutputWithMode(results, !fullOutput))
|
||||
|
||||
exitCode := 0
|
||||
for _, res := range results {
|
||||
if res.ExitCode != 0 {
|
||||
exitCode = res.ExitCode
|
||||
}
|
||||
}
|
||||
return exitCode
|
||||
}
|
||||
|
||||
func runSingleMode(cfg *Config, name string) int {
|
||||
backend, err := selectBackendFn(cfg.Backend)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
cfg.Backend = backend.Name()
|
||||
|
||||
cmdInjected := codexCommand != defaultCodexCommand
|
||||
argsInjected := buildCodexArgsFn != nil && reflect.ValueOf(buildCodexArgsFn).Pointer() != reflect.ValueOf(defaultBuildArgsFn).Pointer()
|
||||
|
||||
if backend.Name() != defaultBackendName || !cmdInjected {
|
||||
codexCommand = backend.Command()
|
||||
}
|
||||
if backend.Name() != defaultBackendName || !argsInjected {
|
||||
buildCodexArgsFn = backend.BuildArgs
|
||||
}
|
||||
logInfo(fmt.Sprintf("Selected backend: %s", backend.Name()))
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
logInfo(fmt.Sprintf("Timeout: %ds", timeoutSec))
|
||||
cfg.Timeout = timeoutSec
|
||||
|
||||
var taskText string
|
||||
var piped bool
|
||||
|
||||
if cfg.ExplicitStdin {
|
||||
logInfo("Explicit stdin mode: reading task from stdin")
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
logError("Failed to read stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
taskText = string(data)
|
||||
if taskText == "" {
|
||||
logError("Explicit stdin mode requires task input from stdin")
|
||||
return 1
|
||||
}
|
||||
piped = !isTerminal()
|
||||
} else {
|
||||
pipedTask, err := readPipedTask()
|
||||
if err != nil {
|
||||
logError("Failed to read piped stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
piped = pipedTask != ""
|
||||
if piped {
|
||||
taskText = pipedTask
|
||||
} else {
|
||||
taskText = cfg.Task
|
||||
}
|
||||
}
|
||||
|
||||
if strings.TrimSpace(cfg.PromptFile) != "" {
|
||||
prompt, err := readAgentPromptFile(cfg.PromptFile, cfg.PromptFileExplicit)
|
||||
if err != nil {
|
||||
logError("Failed to read prompt file: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
taskText = wrapTaskWithAgentPrompt(prompt, taskText)
|
||||
}
|
||||
|
||||
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
||||
|
||||
targetArg := taskText
|
||||
if useStdin {
|
||||
targetArg = "-"
|
||||
}
|
||||
codexArgs := buildCodexArgsFn(cfg, targetArg)
|
||||
|
||||
logger := activeLogger()
|
||||
if logger == nil {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: logger is not initialized")
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "[%s]\n", name)
|
||||
fmt.Fprintf(os.Stderr, " Backend: %s\n", cfg.Backend)
|
||||
fmt.Fprintf(os.Stderr, " Command: %s %s\n", codexCommand, strings.Join(codexArgs, " "))
|
||||
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
|
||||
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
|
||||
|
||||
if useStdin {
|
||||
var reasons []string
|
||||
if piped {
|
||||
reasons = append(reasons, "piped input")
|
||||
}
|
||||
if cfg.ExplicitStdin {
|
||||
reasons = append(reasons, "explicit \"-\"")
|
||||
}
|
||||
if strings.Contains(taskText, "\n") {
|
||||
reasons = append(reasons, "newline")
|
||||
}
|
||||
if strings.Contains(taskText, "\\") {
|
||||
reasons = append(reasons, "backslash")
|
||||
}
|
||||
if strings.Contains(taskText, "\"") {
|
||||
reasons = append(reasons, "double-quote")
|
||||
}
|
||||
if strings.Contains(taskText, "'") {
|
||||
reasons = append(reasons, "single-quote")
|
||||
}
|
||||
if strings.Contains(taskText, "`") {
|
||||
reasons = append(reasons, "backtick")
|
||||
}
|
||||
if strings.Contains(taskText, "$") {
|
||||
reasons = append(reasons, "dollar")
|
||||
}
|
||||
if len(taskText) > 800 {
|
||||
reasons = append(reasons, "length>800")
|
||||
}
|
||||
if len(reasons) > 0 {
|
||||
logWarn(fmt.Sprintf("Using stdin mode for task due to: %s", strings.Join(reasons, ", ")))
|
||||
}
|
||||
}
|
||||
|
||||
logInfo(fmt.Sprintf("%s running...", cfg.Backend))
|
||||
|
||||
taskSpec := TaskSpec{
|
||||
Task: taskText,
|
||||
WorkDir: cfg.WorkDir,
|
||||
Mode: cfg.Mode,
|
||||
SessionID: cfg.SessionID,
|
||||
Model: cfg.Model,
|
||||
ReasoningEffort: cfg.ReasoningEffort,
|
||||
Agent: cfg.Agent,
|
||||
SkipPermissions: cfg.SkipPermissions,
|
||||
UseStdin: useStdin,
|
||||
}
|
||||
|
||||
result := runTaskFn(taskSpec, false, cfg.Timeout)
|
||||
|
||||
if result.ExitCode != 0 {
|
||||
return result.ExitCode
|
||||
}
|
||||
|
||||
fmt.Println(result.Message)
|
||||
if result.SessionID != "" {
|
||||
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import config "codeagent-wrapper/internal/config"
|
||||
|
||||
// Keep the existing Config name throughout the codebase, but source the
|
||||
// implementation from internal/config.
|
||||
type Config = config.Config
|
||||
@@ -1,54 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
backend "codeagent-wrapper/internal/backend"
|
||||
config "codeagent-wrapper/internal/config"
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
// defaultRunCodexTaskFn is the default implementation of runCodexTaskFn (exposed for test reset).
|
||||
func defaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
||||
return executor.DefaultRunCodexTaskFn(task, timeout)
|
||||
}
|
||||
|
||||
var runCodexTaskFn = defaultRunCodexTaskFn
|
||||
|
||||
func topologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
|
||||
return executor.TopologicalSort(tasks)
|
||||
}
|
||||
|
||||
func executeConcurrent(layers [][]TaskSpec, timeout int) []TaskResult {
|
||||
maxWorkers := config.ResolveMaxParallelWorkers()
|
||||
return executeConcurrentWithContext(context.Background(), layers, timeout, maxWorkers)
|
||||
}
|
||||
|
||||
func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec, timeout int, maxWorkers int) []TaskResult {
|
||||
return executor.ExecuteConcurrentWithContext(parentCtx, layers, timeout, maxWorkers, runCodexTaskFn)
|
||||
}
|
||||
|
||||
func generateFinalOutput(results []TaskResult) string {
|
||||
return executor.GenerateFinalOutput(results)
|
||||
}
|
||||
|
||||
func generateFinalOutputWithMode(results []TaskResult, summaryOnly bool) string {
|
||||
return executor.GenerateFinalOutputWithMode(results, summaryOnly)
|
||||
}
|
||||
|
||||
func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||
return backend.BuildCodexArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return runCodexTaskWithContext(context.Background(), taskSpec, nil, nil, false, silent, timeoutSec)
|
||||
}
|
||||
|
||||
func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText string, useStdin bool, timeoutSec int) (message, threadID string, exitCode int) {
|
||||
res := runCodexTaskWithContext(parentCtx, TaskSpec{Task: taskText, WorkDir: defaultWorkdir, Mode: "new", UseStdin: useStdin}, nil, codexArgs, true, false, timeoutSec)
|
||||
return res.Message, res.SessionID, res.ExitCode
|
||||
}
|
||||
|
||||
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||
return executor.RunCodexTaskWithContext(parentCtx, taskSpec, backend, codexCommand, buildCodexArgsFn, customArgs, useCustomArgs, silent, timeoutSec)
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import ilogger "codeagent-wrapper/internal/logger"
|
||||
|
||||
type Logger = ilogger.Logger
|
||||
type CleanupStats = ilogger.CleanupStats
|
||||
|
||||
func NewLogger() (*Logger, error) { return ilogger.NewLogger() }
|
||||
|
||||
func NewLoggerWithSuffix(suffix string) (*Logger, error) { return ilogger.NewLoggerWithSuffix(suffix) }
|
||||
|
||||
func setLogger(l *Logger) { ilogger.SetLogger(l) }
|
||||
|
||||
func closeLogger() error { return ilogger.CloseLogger() }
|
||||
|
||||
func activeLogger() *Logger { return ilogger.ActiveLogger() }
|
||||
|
||||
func logInfo(msg string) { ilogger.LogInfo(msg) }
|
||||
|
||||
func logWarn(msg string) { ilogger.LogWarn(msg) }
|
||||
|
||||
func logError(msg string) { ilogger.LogError(msg) }
|
||||
|
||||
func cleanupOldLogs() (CleanupStats, error) { return ilogger.CleanupOldLogs() }
|
||||
|
||||
func sanitizeLogSuffix(raw string) string { return ilogger.SanitizeLogSuffix(raw) }
|
||||
@@ -1,9 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
return executor.ParseParallelConfig(data)
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
|
||||
parser "codeagent-wrapper/internal/parser"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
func parseJSONStream(r io.Reader) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, logWarn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, warnFn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamInternal(r, warnFn, infoFn, nil, nil)
|
||||
}
|
||||
|
||||
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
|
||||
return parser.ParseJSONStreamInternal(r, warnFn, infoFn, onMessage, onComplete)
|
||||
}
|
||||
|
||||
func hasKey(m map[string]json.RawMessage, key string) bool { return parser.HasKey(m, key) }
|
||||
|
||||
func discardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
|
||||
return parser.DiscardInvalidJSON(decoder, reader)
|
||||
}
|
||||
|
||||
func normalizeText(text interface{}) string { return parser.NormalizeText(text) }
|
||||
@@ -1,8 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import executor "codeagent-wrapper/internal/executor"
|
||||
|
||||
// Type aliases to keep existing names in the wrapper package.
|
||||
type ParallelConfig = executor.ParallelConfig
|
||||
type TaskSpec = executor.TaskSpec
|
||||
type TaskResult = executor.TaskResult
|
||||
@@ -1,30 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDefaultIsTerminalCoverage(t *testing.T) {
|
||||
oldStdin := os.Stdin
|
||||
t.Cleanup(func() { os.Stdin = oldStdin })
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), "stdin-*")
|
||||
if err != nil {
|
||||
t.Fatalf("os.CreateTemp() error = %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
os.Stdin = f
|
||||
if got := defaultIsTerminal(); got {
|
||||
t.Fatalf("defaultIsTerminal() = %v, want false for regular file", got)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatalf("Close() error = %v", err)
|
||||
}
|
||||
os.Stdin = f
|
||||
if got := defaultIsTerminal(); !got {
|
||||
t.Fatalf("defaultIsTerminal() = %v, want true when Stat fails", got)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package wrapper
|
||||
|
||||
import ilogger "codeagent-wrapper/internal/logger"
|
||||
|
||||
const wrapperName = ilogger.WrapperName
|
||||
|
||||
func currentWrapperName() string { return ilogger.CurrentWrapperName() }
|
||||
|
||||
func primaryLogPrefix() string { return ilogger.PrimaryLogPrefix() }
|
||||
@@ -1,33 +0,0 @@
|
||||
package backend
|
||||
|
||||
import config "codeagent-wrapper/internal/config"
|
||||
|
||||
// Backend defines the contract for invoking different AI CLI backends.
|
||||
// Each backend is responsible for supplying the executable command and
|
||||
// building the argument list based on the wrapper config.
|
||||
type Backend interface {
|
||||
Name() string
|
||||
BuildArgs(cfg *config.Config, targetArg string) []string
|
||||
Command() string
|
||||
Env(baseURL, apiKey string) map[string]string
|
||||
}
|
||||
|
||||
var (
|
||||
logWarnFn = func(string) {}
|
||||
logErrorFn = func(string) {}
|
||||
)
|
||||
|
||||
// SetLogFuncs configures optional logging hooks used by some backends.
|
||||
// Callers can safely pass nil to disable the hook.
|
||||
func SetLogFuncs(warnFn, errorFn func(string)) {
|
||||
if warnFn != nil {
|
||||
logWarnFn = warnFn
|
||||
} else {
|
||||
logWarnFn = func(string) {}
|
||||
}
|
||||
if errorFn != nil {
|
||||
logErrorFn = errorFn
|
||||
} else {
|
||||
logErrorFn = func(string) {}
|
||||
}
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type ClaudeBackend struct{}
|
||||
|
||||
func (ClaudeBackend) Name() string { return "claude" }
|
||||
func (ClaudeBackend) Command() string { return "claude" }
|
||||
func (ClaudeBackend) Env(baseURL, apiKey string) map[string]string {
|
||||
baseURL = strings.TrimSpace(baseURL)
|
||||
apiKey = strings.TrimSpace(apiKey)
|
||||
if baseURL == "" && apiKey == "" {
|
||||
return nil
|
||||
}
|
||||
env := make(map[string]string, 2)
|
||||
if baseURL != "" {
|
||||
env["ANTHROPIC_BASE_URL"] = baseURL
|
||||
}
|
||||
if apiKey != "" {
|
||||
env["ANTHROPIC_API_KEY"] = apiKey
|
||||
}
|
||||
return env
|
||||
}
|
||||
func (ClaudeBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
return buildClaudeArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
const MaxClaudeSettingsBytes = 1 << 20 // 1MB
|
||||
|
||||
type MinimalClaudeSettings struct {
|
||||
Env map[string]string
|
||||
Model string
|
||||
}
|
||||
|
||||
// LoadMinimalClaudeSettings 从 ~/.claude/settings.json 只提取安全的最小子集:
|
||||
// - env: 只接受字符串类型的值
|
||||
// - model: 只接受字符串类型的值
|
||||
// 文件缺失/解析失败/超限都返回空。
|
||||
func LoadMinimalClaudeSettings() MinimalClaudeSettings {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || home == "" {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
claudeDir := filepath.Clean(filepath.Join(home, ".claude"))
|
||||
settingPath := filepath.Clean(filepath.Join(claudeDir, "settings.json"))
|
||||
rel, err := filepath.Rel(claudeDir, settingPath)
|
||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
info, err := os.Stat(settingPath)
|
||||
if err != nil || info.Size() > MaxClaudeSettingsBytes {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(settingPath) // #nosec G304 -- path is fixed under user home and validated to stay within claudeDir
|
||||
if err != nil {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
var cfg struct {
|
||||
Env map[string]any `json:"env"`
|
||||
Model any `json:"model"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
out := MinimalClaudeSettings{}
|
||||
|
||||
if model, ok := cfg.Model.(string); ok {
|
||||
out.Model = strings.TrimSpace(model)
|
||||
}
|
||||
|
||||
if len(cfg.Env) == 0 {
|
||||
return out
|
||||
}
|
||||
|
||||
env := make(map[string]string, len(cfg.Env))
|
||||
for k, v := range cfg.Env {
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
env[k] = s
|
||||
}
|
||||
if len(env) == 0 {
|
||||
return out
|
||||
}
|
||||
out.Env = env
|
||||
return out
|
||||
}
|
||||
|
||||
func LoadMinimalEnvSettings() map[string]string {
|
||||
settings := LoadMinimalClaudeSettings()
|
||||
if len(settings.Env) == 0 {
|
||||
return nil
|
||||
}
|
||||
return settings.Env
|
||||
}
|
||||
|
||||
func buildClaudeArgs(cfg *config.Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
args := []string{"-p"}
|
||||
// Default to skip permissions unless CODEAGENT_SKIP_PERMISSIONS=false
|
||||
if cfg.SkipPermissions || cfg.Yolo || config.EnvFlagDefaultTrue("CODEAGENT_SKIP_PERMISSIONS") {
|
||||
args = append(args, "--dangerously-skip-permissions")
|
||||
}
|
||||
|
||||
// Prevent infinite recursion: disable all setting sources (user, project, local)
|
||||
// This ensures a clean execution environment without CLAUDE.md or skills that would trigger codeagent
|
||||
args = append(args, "--setting-sources", "")
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "--model", model)
|
||||
}
|
||||
|
||||
if cfg.Mode == "resume" {
|
||||
if cfg.SessionID != "" {
|
||||
// Claude CLI uses -r <session_id> for resume.
|
||||
args = append(args, "-r", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
|
||||
args = append(args, "--output-format", "stream-json", "--verbose", targetArg)
|
||||
|
||||
return args
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
type CodexBackend struct{}
|
||||
|
||||
func (CodexBackend) Name() string { return "codex" }
|
||||
func (CodexBackend) Command() string { return "codex" }
|
||||
func (CodexBackend) Env(baseURL, apiKey string) map[string]string {
|
||||
baseURL = strings.TrimSpace(baseURL)
|
||||
apiKey = strings.TrimSpace(apiKey)
|
||||
if baseURL == "" && apiKey == "" {
|
||||
return nil
|
||||
}
|
||||
env := make(map[string]string, 2)
|
||||
if baseURL != "" {
|
||||
env["OPENAI_BASE_URL"] = baseURL
|
||||
}
|
||||
if apiKey != "" {
|
||||
env["OPENAI_API_KEY"] = apiKey
|
||||
}
|
||||
return env
|
||||
}
|
||||
func (CodexBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
return BuildCodexArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
func BuildCodexArgs(cfg *config.Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
panic("buildCodexArgs: nil config")
|
||||
}
|
||||
|
||||
var resumeSessionID string
|
||||
isResume := cfg.Mode == "resume"
|
||||
if isResume {
|
||||
resumeSessionID = strings.TrimSpace(cfg.SessionID)
|
||||
if resumeSessionID == "" {
|
||||
logErrorFn("invalid config: resume mode requires non-empty session_id")
|
||||
isResume = false
|
||||
}
|
||||
}
|
||||
|
||||
args := []string{"e"}
|
||||
|
||||
// Default to bypass sandbox unless CODEX_BYPASS_SANDBOX=false
|
||||
if cfg.Yolo || config.EnvFlagDefaultTrue("CODEX_BYPASS_SANDBOX") {
|
||||
logWarnFn("YOLO mode or CODEX_BYPASS_SANDBOX enabled: running without approval/sandbox protection")
|
||||
args = append(args, "--dangerously-bypass-approvals-and-sandbox")
|
||||
}
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "--model", model)
|
||||
}
|
||||
|
||||
if reasoningEffort := strings.TrimSpace(cfg.ReasoningEffort); reasoningEffort != "" {
|
||||
args = append(args, "-c", "model_reasoning_effort="+reasoningEffort)
|
||||
}
|
||||
|
||||
args = append(args, "--skip-git-repo-check")
|
||||
|
||||
if isResume {
|
||||
return append(args,
|
||||
"--json",
|
||||
"resume",
|
||||
resumeSessionID,
|
||||
targetArg,
|
||||
)
|
||||
}
|
||||
|
||||
return append(args,
|
||||
"-C", cfg.WorkDir,
|
||||
"--json",
|
||||
targetArg,
|
||||
)
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
type GeminiBackend struct{}
|
||||
|
||||
func (GeminiBackend) Name() string { return "gemini" }
|
||||
func (GeminiBackend) Command() string { return "gemini" }
|
||||
func (GeminiBackend) Env(baseURL, apiKey string) map[string]string {
|
||||
baseURL = strings.TrimSpace(baseURL)
|
||||
apiKey = strings.TrimSpace(apiKey)
|
||||
if baseURL == "" && apiKey == "" {
|
||||
return nil
|
||||
}
|
||||
env := make(map[string]string, 2)
|
||||
if baseURL != "" {
|
||||
env["GOOGLE_GEMINI_BASE_URL"] = baseURL
|
||||
}
|
||||
if apiKey != "" {
|
||||
env["GEMINI_API_KEY"] = apiKey
|
||||
}
|
||||
return env
|
||||
}
|
||||
func (GeminiBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
return buildGeminiArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
// LoadGeminiEnv loads environment variables from ~/.gemini/.env
|
||||
// Supports GEMINI_API_KEY, GEMINI_MODEL, GOOGLE_GEMINI_BASE_URL
|
||||
// Also sets GEMINI_API_KEY_AUTH_MECHANISM=bearer for third-party API compatibility
|
||||
func LoadGeminiEnv() map[string]string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || home == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
envDir := filepath.Clean(filepath.Join(home, ".gemini"))
|
||||
envPath := filepath.Clean(filepath.Join(envDir, ".env"))
|
||||
rel, err := filepath.Rel(envDir, envPath)
|
||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(envPath) // #nosec G304 -- path is fixed under user home and validated to stay within envDir
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
env := make(map[string]string)
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
idx := strings.IndexByte(line, '=')
|
||||
if idx <= 0 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(line[:idx])
|
||||
value := strings.TrimSpace(line[idx+1:])
|
||||
if key != "" && value != "" {
|
||||
env[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Set bearer auth mechanism for third-party API compatibility
|
||||
if _, ok := env["GEMINI_API_KEY"]; ok {
|
||||
if _, hasAuth := env["GEMINI_API_KEY_AUTH_MECHANISM"]; !hasAuth {
|
||||
env["GEMINI_API_KEY_AUTH_MECHANISM"] = "bearer"
|
||||
}
|
||||
}
|
||||
|
||||
if len(env) == 0 {
|
||||
return nil
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func buildGeminiArgs(cfg *config.Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
args := []string{"-o", "stream-json", "-y"}
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "-m", model)
|
||||
}
|
||||
|
||||
if cfg.Mode == "resume" {
|
||||
if cfg.SessionID != "" {
|
||||
args = append(args, "-r", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// Use positional argument instead of deprecated -p flag.
|
||||
// For stdin mode ("-"), use -p to read from stdin.
|
||||
if targetArg == "-" {
|
||||
args = append(args, "-p", targetArg)
|
||||
} else {
|
||||
args = append(args, targetArg)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
type OpencodeBackend struct{}
|
||||
|
||||
func (OpencodeBackend) Name() string { return "opencode" }
|
||||
func (OpencodeBackend) Command() string { return "opencode" }
|
||||
func (OpencodeBackend) Env(baseURL, apiKey string) map[string]string { return nil }
|
||||
func (OpencodeBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
args := []string{"run"}
|
||||
if cfg != nil {
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "-m", model)
|
||||
}
|
||||
if cfg.Mode == "resume" && cfg.SessionID != "" {
|
||||
args = append(args, "-s", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
args = append(args, "--format", "json")
|
||||
if targetArg != "-" {
|
||||
args = append(args, targetArg)
|
||||
}
|
||||
return args
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var registry = map[string]Backend{
|
||||
"codex": CodexBackend{},
|
||||
"claude": ClaudeBackend{},
|
||||
"gemini": GeminiBackend{},
|
||||
"opencode": OpencodeBackend{},
|
||||
}
|
||||
|
||||
// Registry exposes the available backends. Intended for internal inspection/tests.
|
||||
func Registry() map[string]Backend {
|
||||
return registry
|
||||
}
|
||||
|
||||
func Select(name string) (Backend, error) {
|
||||
key := strings.ToLower(strings.TrimSpace(name))
|
||||
if key == "" {
|
||||
key = "codex"
|
||||
}
|
||||
if backend, ok := registry[key]; ok {
|
||||
return backend, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported backend %q", name)
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
ilogger "codeagent-wrapper/internal/logger"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type BackendConfig struct {
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
APIKey string `json:"api_key,omitempty"`
|
||||
}
|
||||
|
||||
type AgentModelConfig struct {
|
||||
Backend string `json:"backend"`
|
||||
Model string `json:"model"`
|
||||
PromptFile string `json:"prompt_file,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Yolo bool `json:"yolo,omitempty"`
|
||||
Reasoning string `json:"reasoning,omitempty"`
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
APIKey string `json:"api_key,omitempty"`
|
||||
}
|
||||
|
||||
type ModelsConfig struct {
|
||||
DefaultBackend string `json:"default_backend"`
|
||||
DefaultModel string `json:"default_model"`
|
||||
Agents map[string]AgentModelConfig `json:"agents"`
|
||||
Backends map[string]BackendConfig `json:"backends,omitempty"`
|
||||
}
|
||||
|
||||
var defaultModelsConfig = ModelsConfig{
|
||||
DefaultBackend: "opencode",
|
||||
DefaultModel: "opencode/grok-code",
|
||||
Agents: map[string]AgentModelConfig{
|
||||
"oracle": {Backend: "claude", Model: "claude-opus-4-5-20251101", PromptFile: "~/.claude/skills/omo/references/oracle.md", Description: "Technical advisor"},
|
||||
"librarian": {Backend: "claude", Model: "claude-sonnet-4-5-20250929", PromptFile: "~/.claude/skills/omo/references/librarian.md", Description: "Researcher"},
|
||||
"explore": {Backend: "opencode", Model: "opencode/grok-code", PromptFile: "~/.claude/skills/omo/references/explore.md", Description: "Code search"},
|
||||
"develop": {Backend: "codex", Model: "", PromptFile: "~/.claude/skills/omo/references/develop.md", Description: "Code development"},
|
||||
"frontend-ui-ux-engineer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md", Description: "Frontend engineer"},
|
||||
"document-writer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/document-writer.md", Description: "Documentation"},
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
modelsConfigOnce sync.Once
|
||||
modelsConfigCached *ModelsConfig
|
||||
)
|
||||
|
||||
func modelsConfig() *ModelsConfig {
|
||||
modelsConfigOnce.Do(func() {
|
||||
modelsConfigCached = loadModelsConfig()
|
||||
})
|
||||
if modelsConfigCached == nil {
|
||||
return &defaultModelsConfig
|
||||
}
|
||||
return modelsConfigCached
|
||||
}
|
||||
|
||||
func loadModelsConfig() *ModelsConfig {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
ilogger.LogWarn(fmt.Sprintf("Failed to resolve home directory for models config: %v; using defaults", err))
|
||||
return &defaultModelsConfig
|
||||
}
|
||||
|
||||
configDir := filepath.Clean(filepath.Join(home, ".codeagent"))
|
||||
configPath := filepath.Clean(filepath.Join(configDir, "models.json"))
|
||||
rel, err := filepath.Rel(configDir, configPath)
|
||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return &defaultModelsConfig
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath) // #nosec G304 -- path is fixed under user home and validated to stay within configDir
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
ilogger.LogWarn(fmt.Sprintf("Failed to read models config %s: %v; using defaults", configPath, err))
|
||||
}
|
||||
return &defaultModelsConfig
|
||||
}
|
||||
|
||||
var cfg ModelsConfig
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
ilogger.LogWarn(fmt.Sprintf("Failed to parse models config %s: %v; using defaults", configPath, err))
|
||||
return &defaultModelsConfig
|
||||
}
|
||||
|
||||
cfg.DefaultBackend = strings.TrimSpace(cfg.DefaultBackend)
|
||||
if cfg.DefaultBackend == "" {
|
||||
cfg.DefaultBackend = defaultModelsConfig.DefaultBackend
|
||||
}
|
||||
cfg.DefaultModel = strings.TrimSpace(cfg.DefaultModel)
|
||||
if cfg.DefaultModel == "" {
|
||||
cfg.DefaultModel = defaultModelsConfig.DefaultModel
|
||||
}
|
||||
|
||||
// Merge with defaults
|
||||
for name, agent := range defaultModelsConfig.Agents {
|
||||
if _, exists := cfg.Agents[name]; !exists {
|
||||
if cfg.Agents == nil {
|
||||
cfg.Agents = make(map[string]AgentModelConfig)
|
||||
}
|
||||
cfg.Agents[name] = agent
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize backend keys so lookups can be case-insensitive.
|
||||
if len(cfg.Backends) > 0 {
|
||||
normalized := make(map[string]BackendConfig, len(cfg.Backends))
|
||||
for k, v := range cfg.Backends {
|
||||
key := strings.ToLower(strings.TrimSpace(k))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
normalized[key] = v
|
||||
}
|
||||
if len(normalized) > 0 {
|
||||
cfg.Backends = normalized
|
||||
} else {
|
||||
cfg.Backends = nil
|
||||
}
|
||||
}
|
||||
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func LoadDynamicAgent(name string) (AgentModelConfig, bool) {
|
||||
if err := ValidateAgentName(name); err != nil {
|
||||
return AgentModelConfig{}, false
|
||||
}
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || strings.TrimSpace(home) == "" {
|
||||
return AgentModelConfig{}, false
|
||||
}
|
||||
|
||||
absPath := filepath.Join(home, ".codeagent", "agents", name+".md")
|
||||
info, err := os.Stat(absPath)
|
||||
if err != nil || info.IsDir() {
|
||||
return AgentModelConfig{}, false
|
||||
}
|
||||
|
||||
return AgentModelConfig{PromptFile: "~/.codeagent/agents/" + name + ".md"}, true
|
||||
}
|
||||
|
||||
func ResolveBackendConfig(backendName string) (baseURL, apiKey string) {
|
||||
cfg := modelsConfig()
|
||||
resolved := resolveBackendConfig(cfg, backendName)
|
||||
return strings.TrimSpace(resolved.BaseURL), strings.TrimSpace(resolved.APIKey)
|
||||
}
|
||||
|
||||
func resolveBackendConfig(cfg *ModelsConfig, backendName string) BackendConfig {
|
||||
if cfg == nil || len(cfg.Backends) == 0 {
|
||||
return BackendConfig{}
|
||||
}
|
||||
key := strings.ToLower(strings.TrimSpace(backendName))
|
||||
if key == "" {
|
||||
key = strings.ToLower(strings.TrimSpace(cfg.DefaultBackend))
|
||||
}
|
||||
if key == "" {
|
||||
return BackendConfig{}
|
||||
}
|
||||
if backend, ok := cfg.Backends[key]; ok {
|
||||
return backend
|
||||
}
|
||||
return BackendConfig{}
|
||||
}
|
||||
|
||||
func resolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool) {
|
||||
cfg := modelsConfig()
|
||||
if agent, ok := cfg.Agents[agentName]; ok {
|
||||
backend = strings.TrimSpace(agent.Backend)
|
||||
if backend == "" {
|
||||
backend = cfg.DefaultBackend
|
||||
}
|
||||
backendCfg := resolveBackendConfig(cfg, backend)
|
||||
|
||||
baseURL = strings.TrimSpace(agent.BaseURL)
|
||||
if baseURL == "" {
|
||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
||||
}
|
||||
apiKey = strings.TrimSpace(agent.APIKey)
|
||||
if apiKey == "" {
|
||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||
}
|
||||
|
||||
return backend, strings.TrimSpace(agent.Model), agent.PromptFile, agent.Reasoning, baseURL, apiKey, agent.Yolo
|
||||
}
|
||||
|
||||
if dynamic, ok := LoadDynamicAgent(agentName); ok {
|
||||
backend = cfg.DefaultBackend
|
||||
model = cfg.DefaultModel
|
||||
backendCfg := resolveBackendConfig(cfg, backend)
|
||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||
return backend, model, dynamic.PromptFile, "", baseURL, apiKey, false
|
||||
}
|
||||
|
||||
backend = cfg.DefaultBackend
|
||||
model = cfg.DefaultModel
|
||||
backendCfg := resolveBackendConfig(cfg, backend)
|
||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||
return backend, model, "", "", baseURL, apiKey, false
|
||||
}
|
||||
|
||||
func ResolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool) {
|
||||
return resolveAgentConfig(agentName)
|
||||
}
|
||||
|
||||
func ResetModelsConfigCacheForTest() {
|
||||
modelsConfigCached = nil
|
||||
modelsConfigOnce = sync.Once{}
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResolveAgentConfig_Defaults(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
// Test that default agents resolve correctly without config file
|
||||
tests := []struct {
|
||||
agent string
|
||||
wantBackend string
|
||||
wantModel string
|
||||
wantPromptFile string
|
||||
}{
|
||||
{"oracle", "claude", "claude-opus-4-5-20251101", "~/.claude/skills/omo/references/oracle.md"},
|
||||
{"librarian", "claude", "claude-sonnet-4-5-20250929", "~/.claude/skills/omo/references/librarian.md"},
|
||||
{"explore", "opencode", "opencode/grok-code", "~/.claude/skills/omo/references/explore.md"},
|
||||
{"frontend-ui-ux-engineer", "gemini", "", "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md"},
|
||||
{"document-writer", "gemini", "", "~/.claude/skills/omo/references/document-writer.md"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.agent, func(t *testing.T) {
|
||||
backend, model, promptFile, _, _, _, _ := resolveAgentConfig(tt.agent)
|
||||
if backend != tt.wantBackend {
|
||||
t.Errorf("backend = %q, want %q", backend, tt.wantBackend)
|
||||
}
|
||||
if model != tt.wantModel {
|
||||
t.Errorf("model = %q, want %q", model, tt.wantModel)
|
||||
}
|
||||
if promptFile != tt.wantPromptFile {
|
||||
t.Errorf("promptFile = %q, want %q", promptFile, tt.wantPromptFile)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveAgentConfig_UnknownAgent(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
backend, model, promptFile, _, _, _, _ := resolveAgentConfig("unknown-agent")
|
||||
if backend != "opencode" {
|
||||
t.Errorf("unknown agent backend = %q, want %q", backend, "opencode")
|
||||
}
|
||||
if model != "opencode/grok-code" {
|
||||
t.Errorf("unknown agent model = %q, want %q", model, "opencode/grok-code")
|
||||
}
|
||||
if promptFile != "" {
|
||||
t.Errorf("unknown agent promptFile = %q, want empty", promptFile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModelsConfig_NoFile(t *testing.T) {
|
||||
home := "/nonexistent/path/that/does/not/exist"
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
cfg := loadModelsConfig()
|
||||
if cfg.DefaultBackend != "opencode" {
|
||||
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "opencode")
|
||||
}
|
||||
if len(cfg.Agents) != 6 {
|
||||
t.Errorf("len(Agents) = %d, want 6", len(cfg.Agents))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModelsConfig_WithFile(t *testing.T) {
|
||||
// Create temp dir and config file
|
||||
tmpDir := t.TempDir()
|
||||
configDir := filepath.Join(tmpDir, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
configContent := `{
|
||||
"default_backend": "claude",
|
||||
"default_model": "claude-opus-4",
|
||||
"backends": {
|
||||
"Claude": {
|
||||
"base_url": "https://backend.example",
|
||||
"api_key": "backend-key"
|
||||
},
|
||||
"codex": {
|
||||
"base_url": "https://openai.example",
|
||||
"api_key": "openai-key"
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"custom-agent": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-4o",
|
||||
"description": "Custom agent",
|
||||
"base_url": "https://agent.example",
|
||||
"api_key": "agent-key"
|
||||
}
|
||||
}
|
||||
}`
|
||||
configPath := filepath.Join(configDir, "models.json")
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Setenv("HOME", tmpDir)
|
||||
t.Setenv("USERPROFILE", tmpDir)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
cfg := loadModelsConfig()
|
||||
|
||||
if cfg.DefaultBackend != "claude" {
|
||||
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "claude")
|
||||
}
|
||||
if cfg.DefaultModel != "claude-opus-4" {
|
||||
t.Errorf("DefaultModel = %q, want %q", cfg.DefaultModel, "claude-opus-4")
|
||||
}
|
||||
|
||||
// Check custom agent
|
||||
if agent, ok := cfg.Agents["custom-agent"]; !ok {
|
||||
t.Error("custom-agent not found")
|
||||
} else {
|
||||
if agent.Backend != "codex" {
|
||||
t.Errorf("custom-agent.Backend = %q, want %q", agent.Backend, "codex")
|
||||
}
|
||||
if agent.Model != "gpt-4o" {
|
||||
t.Errorf("custom-agent.Model = %q, want %q", agent.Model, "gpt-4o")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that defaults are merged
|
||||
if _, ok := cfg.Agents["oracle"]; !ok {
|
||||
t.Error("default agent oracle should be merged")
|
||||
}
|
||||
|
||||
baseURL, apiKey := ResolveBackendConfig("claude")
|
||||
if baseURL != "https://backend.example" {
|
||||
t.Errorf("ResolveBackendConfig(baseURL) = %q, want %q", baseURL, "https://backend.example")
|
||||
}
|
||||
if apiKey != "backend-key" {
|
||||
t.Errorf("ResolveBackendConfig(apiKey) = %q, want %q", apiKey, "backend-key")
|
||||
}
|
||||
|
||||
backend, model, _, _, agentBaseURL, agentAPIKey, _ := ResolveAgentConfig("custom-agent")
|
||||
if backend != "codex" {
|
||||
t.Errorf("ResolveAgentConfig(backend) = %q, want %q", backend, "codex")
|
||||
}
|
||||
if model != "gpt-4o" {
|
||||
t.Errorf("ResolveAgentConfig(model) = %q, want %q", model, "gpt-4o")
|
||||
}
|
||||
if agentBaseURL != "https://agent.example" {
|
||||
t.Errorf("ResolveAgentConfig(baseURL) = %q, want %q", agentBaseURL, "https://agent.example")
|
||||
}
|
||||
if agentAPIKey != "agent-key" {
|
||||
t.Errorf("ResolveAgentConfig(apiKey) = %q, want %q", agentAPIKey, "agent-key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveAgentConfig_DynamicAgent(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
agentDir := filepath.Join(home, ".codeagent", "agents")
|
||||
if err := os.MkdirAll(agentDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(agentDir, "sarsh.md"), []byte("prompt\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
backend, model, promptFile, _, _, _, _ := resolveAgentConfig("sarsh")
|
||||
if backend != "opencode" {
|
||||
t.Errorf("backend = %q, want %q", backend, "opencode")
|
||||
}
|
||||
if model != "opencode/grok-code" {
|
||||
t.Errorf("model = %q, want %q", model, "opencode/grok-code")
|
||||
}
|
||||
if promptFile != "~/.codeagent/agents/sarsh.md" {
|
||||
t.Errorf("promptFile = %q, want %q", promptFile, "~/.codeagent/agents/sarsh.md")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModelsConfig_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
configDir := filepath.Join(tmpDir, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write invalid JSON
|
||||
configPath := filepath.Join(configDir, "models.json")
|
||||
if err := os.WriteFile(configPath, []byte("invalid json {"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Setenv("HOME", tmpDir)
|
||||
t.Setenv("USERPROFILE", tmpDir)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
cfg := loadModelsConfig()
|
||||
// Should fall back to defaults
|
||||
if cfg.DefaultBackend != "opencode" {
|
||||
t.Errorf("invalid JSON should fallback, got DefaultBackend = %q", cfg.DefaultBackend)
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config holds CLI configuration.
|
||||
type Config struct {
|
||||
Mode string // "new" or "resume"
|
||||
Task string
|
||||
SessionID string
|
||||
WorkDir string
|
||||
Model string
|
||||
ReasoningEffort string
|
||||
ExplicitStdin bool
|
||||
Timeout int
|
||||
Backend string
|
||||
Agent string
|
||||
PromptFile string
|
||||
PromptFileExplicit bool
|
||||
SkipPermissions bool
|
||||
Yolo bool
|
||||
MaxParallelWorkers int
|
||||
}
|
||||
|
||||
// EnvFlagEnabled returns true when the environment variable exists and is not
|
||||
// explicitly set to a falsey value ("0/false/no/off").
|
||||
func EnvFlagEnabled(key string) bool {
|
||||
val, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
val = strings.TrimSpace(strings.ToLower(val))
|
||||
switch val {
|
||||
case "", "0", "false", "no", "off":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func ParseBoolFlag(val string, defaultValue bool) bool {
|
||||
val = strings.TrimSpace(strings.ToLower(val))
|
||||
switch val {
|
||||
case "1", "true", "yes", "on":
|
||||
return true
|
||||
case "0", "false", "no", "off":
|
||||
return false
|
||||
default:
|
||||
return defaultValue
|
||||
}
|
||||
}
|
||||
|
||||
// EnvFlagDefaultTrue returns true unless the env var is explicitly set to
|
||||
// false/0/no/off.
|
||||
func EnvFlagDefaultTrue(key string) bool {
|
||||
val, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
return ParseBoolFlag(val, true)
|
||||
}
|
||||
|
||||
func ValidateAgentName(name string) error {
|
||||
if strings.TrimSpace(name) == "" {
|
||||
return fmt.Errorf("agent name is empty")
|
||||
}
|
||||
for _, r := range name {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z':
|
||||
case r >= 'A' && r <= 'Z':
|
||||
case r >= '0' && r <= '9':
|
||||
case r == '-', r == '_':
|
||||
default:
|
||||
return fmt.Errorf("agent name %q contains invalid character %q", name, r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxParallelWorkersLimit = 100
|
||||
|
||||
// ResolveMaxParallelWorkers reads CODEAGENT_MAX_PARALLEL_WORKERS. It returns 0
|
||||
// for "unlimited".
|
||||
func ResolveMaxParallelWorkers() int {
|
||||
raw := strings.TrimSpace(os.Getenv("CODEAGENT_MAX_PARALLEL_WORKERS"))
|
||||
if raw == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
value, err := strconv.Atoi(raw)
|
||||
if err != nil || value < 0 {
|
||||
return 0
|
||||
}
|
||||
if value > maxParallelWorkersLimit {
|
||||
return maxParallelWorkersLimit
|
||||
}
|
||||
return value
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// NewViper returns a viper instance configured for CODEAGENT_* environment
|
||||
// variables and an optional config file.
|
||||
//
|
||||
// Search order when configFile is empty:
|
||||
// - $HOME/.codeagent/config.(yaml|yml|json|toml|...)
|
||||
func NewViper(configFile string) (*viper.Viper, error) {
|
||||
v := viper.New()
|
||||
v.SetEnvPrefix("CODEAGENT")
|
||||
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
v.AutomaticEnv()
|
||||
|
||||
if strings.TrimSpace(configFile) != "" {
|
||||
v.SetConfigFile(configFile)
|
||||
if err := v.ReadInConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || strings.TrimSpace(home) == "" {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
v.SetConfigName("config")
|
||||
v.AddConfigPath(filepath.Join(home, ".codeagent"))
|
||||
if err := v.ReadInConfig(); err != nil {
|
||||
var notFound viper.ConfigFileNotFoundError
|
||||
if errors.As(err, ¬Found) {
|
||||
return v, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package executor
|
||||
|
||||
import "bytes"
|
||||
|
||||
type logWriter struct {
|
||||
prefix string
|
||||
maxLen int
|
||||
buf bytes.Buffer
|
||||
dropped bool
|
||||
}
|
||||
|
||||
func newLogWriter(prefix string, maxLen int) *logWriter {
|
||||
if maxLen <= 0 {
|
||||
maxLen = codexLogLineLimit
|
||||
}
|
||||
return &logWriter{prefix: prefix, maxLen: maxLen}
|
||||
}
|
||||
|
||||
func (lw *logWriter) Write(p []byte) (int, error) {
|
||||
if lw == nil {
|
||||
return len(p), nil
|
||||
}
|
||||
total := len(p)
|
||||
for len(p) > 0 {
|
||||
if idx := bytes.IndexByte(p, '\n'); idx >= 0 {
|
||||
lw.writeLimited(p[:idx])
|
||||
lw.logLine(true)
|
||||
p = p[idx+1:]
|
||||
continue
|
||||
}
|
||||
lw.writeLimited(p)
|
||||
break
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (lw *logWriter) Flush() {
|
||||
if lw == nil || lw.buf.Len() == 0 {
|
||||
return
|
||||
}
|
||||
lw.logLine(false)
|
||||
}
|
||||
|
||||
func (lw *logWriter) logLine(force bool) {
|
||||
if lw == nil {
|
||||
return
|
||||
}
|
||||
line := lw.buf.String()
|
||||
dropped := lw.dropped
|
||||
lw.dropped = false
|
||||
lw.buf.Reset()
|
||||
if line == "" && !force {
|
||||
return
|
||||
}
|
||||
if lw.maxLen > 0 {
|
||||
if dropped {
|
||||
if lw.maxLen > 3 {
|
||||
line = line[:min(len(line), lw.maxLen-3)] + "..."
|
||||
} else {
|
||||
line = line[:min(len(line), lw.maxLen)]
|
||||
}
|
||||
} else if len(line) > lw.maxLen {
|
||||
cutoff := lw.maxLen
|
||||
if cutoff > 3 {
|
||||
line = line[:cutoff-3] + "..."
|
||||
} else {
|
||||
line = line[:cutoff]
|
||||
}
|
||||
}
|
||||
}
|
||||
logInfo(lw.prefix + line)
|
||||
}
|
||||
|
||||
func (lw *logWriter) writeLimited(p []byte) {
|
||||
if lw == nil || len(p) == 0 {
|
||||
return
|
||||
}
|
||||
if lw.maxLen <= 0 {
|
||||
lw.buf.Write(p)
|
||||
return
|
||||
}
|
||||
|
||||
remaining := lw.maxLen - lw.buf.Len()
|
||||
if remaining <= 0 {
|
||||
lw.dropped = true
|
||||
return
|
||||
}
|
||||
if len(p) <= remaining {
|
||||
lw.buf.Write(p)
|
||||
return
|
||||
}
|
||||
lw.buf.Write(p[:remaining])
|
||||
lw.dropped = true
|
||||
}
|
||||
|
||||
type tailBuffer struct {
|
||||
limit int
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (b *tailBuffer) Write(p []byte) (int, error) {
|
||||
if b.limit <= 0 {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
if len(p) >= b.limit {
|
||||
b.data = append(b.data[:0], p[len(p)-b.limit:]...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
total := len(b.data) + len(p)
|
||||
if total <= b.limit {
|
||||
b.data = append(b.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
overflow := total - b.limit
|
||||
b.data = append(b.data[overflow:], p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *tailBuffer) String() string {
|
||||
return string(b.data)
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
trimmed := bytes.TrimSpace(data)
|
||||
if len(trimmed) == 0 {
|
||||
return nil, fmt.Errorf("parallel config is empty")
|
||||
}
|
||||
|
||||
tasks := strings.Split(string(trimmed), "---TASK---")
|
||||
var cfg ParallelConfig
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
taskIndex := 0
|
||||
for _, taskBlock := range tasks {
|
||||
taskBlock = strings.TrimSpace(taskBlock)
|
||||
if taskBlock == "" {
|
||||
continue
|
||||
}
|
||||
taskIndex++
|
||||
|
||||
parts := strings.SplitN(taskBlock, "---CONTENT---", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("task block #%d missing ---CONTENT--- separator", taskIndex)
|
||||
}
|
||||
|
||||
meta := strings.TrimSpace(parts[0])
|
||||
content := strings.TrimSpace(parts[1])
|
||||
|
||||
task := TaskSpec{WorkDir: defaultWorkdir}
|
||||
agentSpecified := false
|
||||
for _, line := range strings.Split(meta, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
kv := strings.SplitN(line, ":", 2)
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(kv[0])
|
||||
value := strings.TrimSpace(kv[1])
|
||||
|
||||
switch key {
|
||||
case "id":
|
||||
task.ID = value
|
||||
case "workdir":
|
||||
// Validate workdir: "-" is not a valid directory
|
||||
if value == "-" {
|
||||
return nil, fmt.Errorf("task block #%d has invalid workdir: '-' is not a valid directory path", taskIndex)
|
||||
}
|
||||
task.WorkDir = value
|
||||
case "session_id":
|
||||
task.SessionID = value
|
||||
task.Mode = "resume"
|
||||
case "backend":
|
||||
task.Backend = value
|
||||
case "model":
|
||||
task.Model = value
|
||||
case "reasoning_effort":
|
||||
task.ReasoningEffort = value
|
||||
case "agent":
|
||||
agentSpecified = true
|
||||
task.Agent = value
|
||||
case "skip_permissions", "skip-permissions":
|
||||
if value == "" {
|
||||
task.SkipPermissions = true
|
||||
continue
|
||||
}
|
||||
task.SkipPermissions = config.ParseBoolFlag(value, false)
|
||||
case "dependencies":
|
||||
for _, dep := range strings.Split(value, ",") {
|
||||
dep = strings.TrimSpace(dep)
|
||||
if dep != "" {
|
||||
task.Dependencies = append(task.Dependencies, dep)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if task.Mode == "" {
|
||||
task.Mode = "new"
|
||||
}
|
||||
|
||||
if agentSpecified {
|
||||
if strings.TrimSpace(task.Agent) == "" {
|
||||
return nil, fmt.Errorf("task block #%d has empty agent field", taskIndex)
|
||||
}
|
||||
if err := config.ValidateAgentName(task.Agent); err != nil {
|
||||
return nil, fmt.Errorf("task block #%d invalid agent name: %w", taskIndex, err)
|
||||
}
|
||||
backend, model, promptFile, reasoning, _, _, _ := config.ResolveAgentConfig(task.Agent)
|
||||
if task.Backend == "" {
|
||||
task.Backend = backend
|
||||
}
|
||||
if task.Model == "" {
|
||||
task.Model = model
|
||||
}
|
||||
if task.ReasoningEffort == "" {
|
||||
task.ReasoningEffort = reasoning
|
||||
}
|
||||
task.PromptFile = promptFile
|
||||
}
|
||||
|
||||
if task.ID == "" {
|
||||
return nil, fmt.Errorf("task block #%d missing id field", taskIndex)
|
||||
}
|
||||
if content == "" {
|
||||
return nil, fmt.Errorf("task block #%d (%q) missing content", taskIndex, task.ID)
|
||||
}
|
||||
if task.Mode == "resume" && strings.TrimSpace(task.SessionID) == "" {
|
||||
return nil, fmt.Errorf("task block #%d (%q) has empty session_id", taskIndex, task.ID)
|
||||
}
|
||||
if _, exists := seen[task.ID]; exists {
|
||||
return nil, fmt.Errorf("task block #%d has duplicate id: %s", taskIndex, task.ID)
|
||||
}
|
||||
|
||||
task.Task = content
|
||||
cfg.Tasks = append(cfg.Tasks, task)
|
||||
seen[task.ID] = struct{}{}
|
||||
}
|
||||
|
||||
if len(cfg.Tasks) == 0 {
|
||||
return nil, fmt.Errorf("no tasks found")
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ReadAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error) {
|
||||
raw := strings.TrimSpace(path)
|
||||
if raw == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
expanded := raw
|
||||
if raw == "~" || strings.HasPrefix(raw, "~/") || strings.HasPrefix(raw, "~\\") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if raw == "~" {
|
||||
expanded = home
|
||||
} else {
|
||||
expanded = home + raw[1:]
|
||||
}
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(expanded)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath = filepath.Clean(absPath)
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
if !allowOutsideClaudeDir {
|
||||
return "", err
|
||||
}
|
||||
logWarn(fmt.Sprintf("Failed to resolve home directory for prompt file validation: %v; proceeding without restriction", err))
|
||||
} else {
|
||||
allowedDirs := []string{
|
||||
filepath.Clean(filepath.Join(home, ".claude")),
|
||||
filepath.Clean(filepath.Join(home, ".codeagent", "agents")),
|
||||
}
|
||||
for i := range allowedDirs {
|
||||
allowedAbs, err := filepath.Abs(allowedDirs[i])
|
||||
if err == nil {
|
||||
allowedDirs[i] = filepath.Clean(allowedAbs)
|
||||
}
|
||||
}
|
||||
|
||||
isWithinDir := func(path, dir string) bool {
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
rel = filepath.Clean(rel)
|
||||
if rel == "." {
|
||||
return true
|
||||
}
|
||||
if rel == ".." {
|
||||
return false
|
||||
}
|
||||
prefix := ".." + string(os.PathSeparator)
|
||||
return !strings.HasPrefix(rel, prefix)
|
||||
}
|
||||
|
||||
if !allowOutsideClaudeDir {
|
||||
withinAllowed := false
|
||||
for _, dir := range allowedDirs {
|
||||
if isWithinDir(absPath, dir) {
|
||||
withinAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinAllowed {
|
||||
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
|
||||
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
|
||||
}
|
||||
|
||||
resolvedPath, errPath := filepath.EvalSymlinks(absPath)
|
||||
if errPath == nil {
|
||||
resolvedPath = filepath.Clean(resolvedPath)
|
||||
resolvedAllowed := make([]string, 0, len(allowedDirs))
|
||||
for _, dir := range allowedDirs {
|
||||
resolvedBase, errBase := filepath.EvalSymlinks(dir)
|
||||
if errBase != nil {
|
||||
continue
|
||||
}
|
||||
resolvedAllowed = append(resolvedAllowed, filepath.Clean(resolvedBase))
|
||||
}
|
||||
if len(resolvedAllowed) > 0 {
|
||||
withinResolved := false
|
||||
for _, dir := range resolvedAllowed {
|
||||
if isWithinDir(resolvedPath, dir) {
|
||||
withinResolved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinResolved {
|
||||
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s) (resolved): %s", strings.Join(resolvedAllowed, ", "), resolvedPath))
|
||||
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
withinAllowed := false
|
||||
for _, dir := range allowedDirs {
|
||||
if isWithinDir(absPath, dir) {
|
||||
withinAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinAllowed {
|
||||
logWarn(fmt.Sprintf("Reading prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimRight(string(data), "\r\n"), nil
|
||||
}
|
||||
|
||||
func WrapTaskWithAgentPrompt(prompt string, task string) string {
|
||||
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWrapTaskWithAgentPrompt(t *testing.T) {
|
||||
got := WrapTaskWithAgentPrompt("P", "do")
|
||||
want := "<agent-prompt>\nP\n</agent-prompt>\n\ndo"
|
||||
if got != want {
|
||||
t.Fatalf("wrapTaskWithAgentPrompt mismatch:\n got=%q\nwant=%q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_EmptyPath(t *testing.T) {
|
||||
for _, allowOutside := range []bool{false, true} {
|
||||
got, err := ReadAgentPromptFile(" ", allowOutside)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error (allowOutside=%v): %v", allowOutside, err)
|
||||
}
|
||||
if got != "" {
|
||||
t.Fatalf("expected empty result (allowOutside=%v), got %q", allowOutside, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_ExplicitAbsolutePath(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "prompt.md")
|
||||
if err := os.WriteFile(path, []byte("LINE1\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got, err := ReadAgentPromptFile(path, true)
|
||||
if err != nil {
|
||||
t.Fatalf("readAgentPromptFile error: %v", err)
|
||||
}
|
||||
if got != "LINE1" {
|
||||
t.Fatalf("got %q, want %q", got, "LINE1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_ExplicitTildeExpansion(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
path := filepath.Join(home, "prompt.md")
|
||||
if err := os.WriteFile(path, []byte("P\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got, err := ReadAgentPromptFile("~/prompt.md", true)
|
||||
if err != nil {
|
||||
t.Fatalf("readAgentPromptFile error: %v", err)
|
||||
}
|
||||
if got != "P" {
|
||||
t.Fatalf("got %q, want %q", got, "P")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_RestrictedAllowsClaudeDir(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
claudeDir := filepath.Join(home, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
path := filepath.Join(claudeDir, "prompt.md")
|
||||
if err := os.WriteFile(path, []byte("OK\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got, err := ReadAgentPromptFile("~/.claude/prompt.md", false)
|
||||
if err != nil {
|
||||
t.Fatalf("readAgentPromptFile error: %v", err)
|
||||
}
|
||||
if got != "OK" {
|
||||
t.Fatalf("got %q, want %q", got, "OK")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_RestrictedAllowsCodeagentAgentsDir(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
agentDir := filepath.Join(home, ".codeagent", "agents")
|
||||
if err := os.MkdirAll(agentDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
path := filepath.Join(agentDir, "sarsh.md")
|
||||
if err := os.WriteFile(path, []byte("OK\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got, err := ReadAgentPromptFile("~/.codeagent/agents/sarsh.md", false)
|
||||
if err != nil {
|
||||
t.Fatalf("readAgentPromptFile error: %v", err)
|
||||
}
|
||||
if got != "OK" {
|
||||
t.Fatalf("got %q, want %q", got, "OK")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_RestrictedRejectsOutsideClaudeDir(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
path := filepath.Join(home, "prompt.md")
|
||||
if err := os.WriteFile(path, []byte("NO\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
if _, err := ReadAgentPromptFile("~/prompt.md", false); err == nil {
|
||||
t.Fatalf("expected error for prompt file outside ~/.claude, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_RestrictedRejectsTraversal(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
path := filepath.Join(home, "secret.md")
|
||||
if err := os.WriteFile(path, []byte("SECRET\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
if _, err := ReadAgentPromptFile("~/.claude/../secret.md", false); err == nil {
|
||||
t.Fatalf("expected traversal to be rejected, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_NotFound(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
claudeDir := filepath.Join(home, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
|
||||
_, err := ReadAgentPromptFile("~/.claude/missing.md", false)
|
||||
if err == nil || !os.IsNotExist(err) {
|
||||
t.Fatalf("expected not-exist error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAgentPromptFile_PermissionDenied(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("chmod-based permission test is not reliable on Windows")
|
||||
}
|
||||
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
claudeDir := filepath.Join(home, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
path := filepath.Join(claudeDir, "private.md")
|
||||
if err := os.WriteFile(path, []byte("PRIVATE\n"), 0o600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
if err := os.Chmod(path, 0o000); err != nil {
|
||||
t.Fatalf("Chmod: %v", err)
|
||||
}
|
||||
|
||||
_, err := ReadAgentPromptFile("~/.claude/private.md", false)
|
||||
if err == nil {
|
||||
t.Fatalf("expected permission error, got nil")
|
||||
}
|
||||
if !os.IsPermission(err) && !strings.Contains(strings.ToLower(err.Error()), "permission") {
|
||||
t.Fatalf("expected permission denied, got: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
package executor
|
||||
|
||||
import "strings"
|
||||
|
||||
// extractCoverageGap extracts what's missing from coverage reports.
|
||||
func extractCoverageGap(message string) string {
|
||||
if message == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
lower := strings.ToLower(message)
|
||||
lines := strings.Split(message, "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
lineLower := strings.ToLower(line)
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if strings.Contains(lineLower, "uncovered") ||
|
||||
strings.Contains(lineLower, "not covered") ||
|
||||
strings.Contains(lineLower, "missing coverage") ||
|
||||
strings.Contains(lineLower, "lines not covered") {
|
||||
if len(line) > 100 {
|
||||
return line[:97] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
if strings.Contains(lineLower, "branch") && strings.Contains(lineLower, "not taken") {
|
||||
if len(line) > 100 {
|
||||
return line[:97] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(lower, "function") && strings.Contains(lower, "0%") {
|
||||
for _, line := range lines {
|
||||
if strings.Contains(strings.ToLower(line), "0%") && strings.Contains(line, "function") {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) > 100 {
|
||||
return line[:97] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractErrorDetail extracts meaningful error context from task output.
|
||||
func extractErrorDetail(message string, maxLen int) string {
|
||||
if message == "" || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
lines := strings.Split(message, "\n")
|
||||
var errorLines []string
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
lower := strings.ToLower(line)
|
||||
|
||||
if strings.HasPrefix(line, "at ") && strings.Contains(line, "(") {
|
||||
if len(errorLines) > 0 && strings.HasPrefix(strings.ToLower(errorLines[len(errorLines)-1]), "at ") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(lower, "error") ||
|
||||
strings.Contains(lower, "fail") ||
|
||||
strings.Contains(lower, "exception") ||
|
||||
strings.Contains(lower, "assert") ||
|
||||
strings.Contains(lower, "expected") ||
|
||||
strings.Contains(lower, "timeout") ||
|
||||
strings.Contains(lower, "not found") ||
|
||||
strings.Contains(lower, "cannot") ||
|
||||
strings.Contains(lower, "undefined") ||
|
||||
strings.HasPrefix(line, "FAIL") ||
|
||||
strings.HasPrefix(line, "●") {
|
||||
errorLines = append(errorLines, line)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errorLines) == 0 {
|
||||
start := len(lines) - 5
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
for _, line := range lines[start:] {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
errorLines = append(errorLines, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result := strings.Join(errorLines, " | ")
|
||||
return safeTruncate(result, maxLen)
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
//go:build unix || darwin || linux
|
||||
// +build unix darwin linux
|
||||
|
||||
package executor
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// sendTermSignal sends SIGTERM for graceful shutdown on Unix.
|
||||
func sendTermSignal(proc processHandle) error {
|
||||
if proc == nil {
|
||||
return nil
|
||||
}
|
||||
return proc.Signal(syscall.SIGTERM)
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package executor
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// sendTermSignal on Windows directly kills the process.
|
||||
// SIGTERM is not supported on Windows.
|
||||
func sendTermSignal(proc processHandle) error {
|
||||
if proc == nil {
|
||||
return nil
|
||||
}
|
||||
pid := proc.Pid()
|
||||
if pid > 0 {
|
||||
// Kill the whole process tree to avoid leaving inheriting child processes around.
|
||||
// This also helps prevent exec.Cmd.Wait() from blocking on stderr/stdout pipes held open by children.
|
||||
taskkill := "taskkill"
|
||||
if root := os.Getenv("SystemRoot"); root != "" {
|
||||
taskkill = filepath.Join(root, "System32", "taskkill.exe")
|
||||
}
|
||||
cmd := exec.Command(taskkill, "/PID", strconv.Itoa(pid), "/T", "/F")
|
||||
cmd.Stdout = io.Discard
|
||||
cmd.Stderr = io.Discard
|
||||
if err := cmd.Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
if err := killProcessTree(pid); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return proc.Kill()
|
||||
}
|
||||
|
||||
func killProcessTree(pid int) error {
|
||||
if pid <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wmic := "wmic"
|
||||
if root := os.Getenv("SystemRoot"); root != "" {
|
||||
wmic = filepath.Join(root, "System32", "wbem", "WMIC.exe")
|
||||
}
|
||||
|
||||
queryChildren := "(ParentProcessId=" + strconv.Itoa(pid) + ")"
|
||||
listCmd := exec.Command(wmic, "process", "where", queryChildren, "get", "ProcessId", "/VALUE")
|
||||
listCmd.Stderr = io.Discard
|
||||
out, err := listCmd.Output()
|
||||
if err == nil {
|
||||
for _, childPID := range parseWMICPIDs(out) {
|
||||
_ = killProcessTree(childPID)
|
||||
}
|
||||
}
|
||||
|
||||
querySelf := "(ProcessId=" + strconv.Itoa(pid) + ")"
|
||||
termCmd := exec.Command(wmic, "process", "where", querySelf, "call", "terminate")
|
||||
termCmd.Stdout = io.Discard
|
||||
termCmd.Stderr = io.Discard
|
||||
if termErr := termCmd.Run(); termErr != nil && err == nil {
|
||||
err = termErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func parseWMICPIDs(out []byte) []int {
|
||||
const prefix = "ProcessId="
|
||||
var pids []int
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
continue
|
||||
}
|
||||
n, err := strconv.Atoi(strings.TrimSpace(strings.TrimPrefix(line, prefix)))
|
||||
if err != nil || n <= 0 {
|
||||
continue
|
||||
}
|
||||
pids = append(pids, n)
|
||||
}
|
||||
return pids
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package executor
|
||||
|
||||
import "strings"
|
||||
|
||||
const stdinSpecialChars = "\n\\\"'`$"
|
||||
|
||||
func ShouldUseStdin(taskText string, piped bool) bool {
|
||||
if piped {
|
||||
return true
|
||||
}
|
||||
if len(taskText) > 800 {
|
||||
return true
|
||||
}
|
||||
return strings.ContainsAny(taskText, stdinSpecialChars)
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package executor
|
||||
|
||||
import "context"
|
||||
|
||||
// ParallelConfig defines the JSON schema for parallel execution.
|
||||
type ParallelConfig struct {
|
||||
Tasks []TaskSpec `json:"tasks"`
|
||||
GlobalBackend string `json:"backend,omitempty"`
|
||||
}
|
||||
|
||||
// TaskSpec describes an individual task entry in the parallel config.
|
||||
type TaskSpec struct {
|
||||
ID string `json:"id"`
|
||||
Task string `json:"task"`
|
||||
WorkDir string `json:"workdir,omitempty"`
|
||||
Dependencies []string `json:"dependencies,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Backend string `json:"backend,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
ReasoningEffort string `json:"reasoning_effort,omitempty"`
|
||||
Agent string `json:"agent,omitempty"`
|
||||
PromptFile string `json:"prompt_file,omitempty"`
|
||||
SkipPermissions bool `json:"skip_permissions,omitempty"`
|
||||
Mode string `json:"-"`
|
||||
UseStdin bool `json:"-"`
|
||||
Context context.Context `json:"-"`
|
||||
}
|
||||
|
||||
// TaskResult captures the execution outcome of a task.
|
||||
type TaskResult struct {
|
||||
TaskID string `json:"task_id"`
|
||||
ExitCode int `json:"exit_code"`
|
||||
Message string `json:"message"`
|
||||
SessionID string `json:"session_id"`
|
||||
Error string `json:"error"`
|
||||
LogPath string `json:"log_path"`
|
||||
// Structured report fields
|
||||
Coverage string `json:"coverage,omitempty"` // extracted coverage percentage (e.g., "92%")
|
||||
CoverageNum float64 `json:"coverage_num,omitempty"` // numeric coverage for comparison
|
||||
CoverageTarget float64 `json:"coverage_target,omitempty"` // target coverage (default 90)
|
||||
FilesChanged []string `json:"files_changed,omitempty"` // list of changed files
|
||||
KeyOutput string `json:"key_output,omitempty"` // brief summary of what was done
|
||||
TestsPassed int `json:"tests_passed,omitempty"` // number of tests passed
|
||||
TestsFailed int `json:"tests_failed,omitempty"` // number of tests failed
|
||||
sharedLog bool
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
|
||||
backend "codeagent-wrapper/internal/backend"
|
||||
)
|
||||
|
||||
type CommandRunner = commandRunner
|
||||
type ProcessHandle = processHandle
|
||||
|
||||
func SetForceKillDelay(seconds int32) (restore func()) {
|
||||
prev := forceKillDelay.Load()
|
||||
forceKillDelay.Store(seconds)
|
||||
return func() { forceKillDelay.Store(prev) }
|
||||
}
|
||||
|
||||
func SetSelectBackendFn(fn func(string) (Backend, error)) (restore func()) {
|
||||
prev := selectBackendFn
|
||||
if fn != nil {
|
||||
selectBackendFn = fn
|
||||
} else {
|
||||
selectBackendFn = backend.Select
|
||||
}
|
||||
return func() { selectBackendFn = prev }
|
||||
}
|
||||
|
||||
func SetCommandContextFn(fn func(context.Context, string, ...string) *exec.Cmd) (restore func()) {
|
||||
prev := commandContext
|
||||
if fn != nil {
|
||||
commandContext = fn
|
||||
} else {
|
||||
commandContext = exec.CommandContext
|
||||
}
|
||||
return func() { commandContext = prev }
|
||||
}
|
||||
|
||||
func SetNewCommandRunner(fn func(context.Context, string, ...string) CommandRunner) (restore func()) {
|
||||
prev := newCommandRunner
|
||||
if fn != nil {
|
||||
newCommandRunner = fn
|
||||
} else {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &realCmd{cmd: commandContext(ctx, name, args...)}
|
||||
}
|
||||
}
|
||||
return func() { newCommandRunner = prev }
|
||||
}
|
||||
|
||||
func WithTaskLogger(ctx context.Context, logger *Logger) context.Context {
|
||||
return withTaskLogger(ctx, logger)
|
||||
}
|
||||
|
||||
func TaskLoggerFromContext(ctx context.Context) *Logger {
|
||||
return taskLoggerFromContext(ctx)
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package logger
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
var loggerPtr atomic.Pointer[Logger]
|
||||
|
||||
func setLogger(l *Logger) {
|
||||
loggerPtr.Store(l)
|
||||
}
|
||||
|
||||
func closeLogger() error {
|
||||
logger := loggerPtr.Swap(nil)
|
||||
if logger == nil {
|
||||
return nil
|
||||
}
|
||||
return logger.Close()
|
||||
}
|
||||
|
||||
func activeLogger() *Logger {
|
||||
return loggerPtr.Load()
|
||||
}
|
||||
|
||||
func logDebug(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Debug(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logInfo(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Info(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logWarn(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Warn(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logError(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func SetLogger(l *Logger) { setLogger(l) }
|
||||
|
||||
func CloseLogger() error { return closeLogger() }
|
||||
|
||||
func ActiveLogger() *Logger { return activeLogger() }
|
||||
|
||||
func LogInfo(msg string) { logInfo(msg) }
|
||||
|
||||
func LogDebug(msg string) { logDebug(msg) }
|
||||
|
||||
func LogWarn(msg string) { logWarn(msg) }
|
||||
|
||||
func LogError(msg string) { logError(msg) }
|
||||
@@ -1,63 +0,0 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v3/process"
|
||||
)
|
||||
|
||||
func pidToInt32(pid int) (int32, bool) {
|
||||
if pid <= 0 || pid > math.MaxInt32 {
|
||||
return 0, false
|
||||
}
|
||||
return int32(pid), true
|
||||
}
|
||||
|
||||
// isProcessRunning reports whether a process with the given pid appears to be running.
|
||||
// It is intentionally conservative on errors to avoid deleting logs for live processes.
|
||||
func isProcessRunning(pid int) bool {
|
||||
pid32, ok := pidToInt32(pid)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
exists, err := process.PidExists(pid32)
|
||||
if err == nil {
|
||||
return exists
|
||||
}
|
||||
|
||||
// If we can positively identify that the process doesn't exist, report false.
|
||||
if errors.Is(err, process.ErrorProcessNotRunning) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Permission/inspection failures: assume it's running to be safe.
|
||||
return true
|
||||
}
|
||||
|
||||
// getProcessStartTime returns the start time of a process.
|
||||
// Returns zero time if the start time cannot be determined.
|
||||
func getProcessStartTime(pid int) time.Time {
|
||||
pid32, ok := pidToInt32(pid)
|
||||
if !ok {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
proc, err := process.NewProcess(pid32)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
ms, err := proc.CreateTime()
|
||||
if err != nil || ms <= 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
return time.UnixMilli(ms)
|
||||
}
|
||||
|
||||
func IsProcessRunning(pid int) bool { return isProcessRunning(pid) }
|
||||
|
||||
func GetProcessStartTime(pid int) time.Time { return getProcessStartTime(pid) }
|
||||
@@ -1,112 +0,0 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestIsProcessRunning(t *testing.T) {
|
||||
t.Run("boundary values", func(t *testing.T) {
|
||||
if isProcessRunning(0) {
|
||||
t.Fatalf("pid 0 should never be treated as running")
|
||||
}
|
||||
if isProcessRunning(-1) {
|
||||
t.Fatalf("negative pid should never be treated as running")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("pid out of int32 range", func(t *testing.T) {
|
||||
if strconv.IntSize <= 32 {
|
||||
t.Skip("int cannot represent values above int32 range")
|
||||
}
|
||||
|
||||
pid := int(int64(math.MaxInt32) + 1)
|
||||
if isProcessRunning(pid) {
|
||||
t.Fatalf("expected pid %d (out of int32 range) to be treated as not running", pid)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("current process", func(t *testing.T) {
|
||||
if !isProcessRunning(os.Getpid()) {
|
||||
t.Fatalf("expected current process (pid=%d) to be running", os.Getpid())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fake pid", func(t *testing.T) {
|
||||
const nonexistentPID = 1 << 30
|
||||
if isProcessRunning(nonexistentPID) {
|
||||
t.Fatalf("expected pid %d to be reported as not running", nonexistentPID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("terminated process", func(t *testing.T) {
|
||||
pid := exitedProcessPID(t)
|
||||
if isProcessRunning(pid) {
|
||||
t.Fatalf("expected exited child process (pid=%d) to be reported as not running", pid)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func exitedProcessPID(t *testing.T) int {
|
||||
t.Helper()
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.Command("cmd", "/c", "exit 0")
|
||||
} else {
|
||||
cmd = exec.Command("sh", "-c", "exit 0")
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatalf("failed to start helper process: %v", err)
|
||||
}
|
||||
pid := cmd.Process.Pid
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
t.Fatalf("helper process did not exit cleanly: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
return pid
|
||||
}
|
||||
|
||||
func TestGetProcessStartTimeReadsProcStat(t *testing.T) {
|
||||
start := getProcessStartTime(os.Getpid())
|
||||
if start.IsZero() {
|
||||
t.Fatalf("expected non-zero start time for current process")
|
||||
}
|
||||
if start.After(time.Now().Add(5 * time.Second)) {
|
||||
t.Fatalf("start time is unexpectedly in the future: %v", start)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProcessStartTimeInvalidData(t *testing.T) {
|
||||
if !getProcessStartTime(0).IsZero() {
|
||||
t.Fatalf("expected zero time for pid 0")
|
||||
}
|
||||
if !getProcessStartTime(-1).IsZero() {
|
||||
t.Fatalf("expected zero time for negative pid")
|
||||
}
|
||||
if !getProcessStartTime(1 << 30).IsZero() {
|
||||
t.Fatalf("expected zero time for non-existent pid")
|
||||
}
|
||||
if strconv.IntSize > 32 {
|
||||
pid := int(int64(math.MaxInt32) + 1)
|
||||
if !getProcessStartTime(pid).IsZero() {
|
||||
t.Fatalf("expected zero time for pid %d (out of int32 range)", pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBootTimeParsesBtime(t *testing.T) {
|
||||
t.Skip("legacy boot-time probing removed; start time now uses gopsutil")
|
||||
}
|
||||
|
||||
func TestGetBootTimeInvalidData(t *testing.T) {
|
||||
t.Skip("legacy boot-time probing removed; start time now uses gopsutil")
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func SetProcessRunningCheck(fn func(int) bool) (restore func()) {
|
||||
prev := processRunningCheck
|
||||
if fn != nil {
|
||||
processRunningCheck = fn
|
||||
} else {
|
||||
processRunningCheck = isProcessRunning
|
||||
}
|
||||
return func() { processRunningCheck = prev }
|
||||
}
|
||||
|
||||
func SetProcessStartTimeFn(fn func(int) time.Time) (restore func()) {
|
||||
prev := processStartTimeFn
|
||||
if fn != nil {
|
||||
processStartTimeFn = fn
|
||||
} else {
|
||||
processStartTimeFn = getProcessStartTime
|
||||
}
|
||||
return func() { processStartTimeFn = prev }
|
||||
}
|
||||
|
||||
func SetRemoveLogFileFn(fn func(string) error) (restore func()) {
|
||||
prev := removeLogFileFn
|
||||
if fn != nil {
|
||||
removeLogFileFn = fn
|
||||
} else {
|
||||
removeLogFileFn = os.Remove
|
||||
}
|
||||
return func() { removeLogFileFn = prev }
|
||||
}
|
||||
|
||||
func SetGlobLogFilesFn(fn func(string) ([]string, error)) (restore func()) {
|
||||
prev := globLogFiles
|
||||
if fn != nil {
|
||||
globLogFiles = fn
|
||||
} else {
|
||||
globLogFiles = filepath.Glob
|
||||
}
|
||||
return func() { globLogFiles = prev }
|
||||
}
|
||||
|
||||
func SetFileStatFn(fn func(string) (os.FileInfo, error)) (restore func()) {
|
||||
prev := fileStatFn
|
||||
if fn != nil {
|
||||
fileStatFn = fn
|
||||
} else {
|
||||
fileStatFn = os.Lstat
|
||||
}
|
||||
return func() { fileStatFn = prev }
|
||||
}
|
||||
|
||||
func SetEvalSymlinksFn(fn func(string) (string, error)) (restore func()) {
|
||||
prev := evalSymlinksFn
|
||||
if fn != nil {
|
||||
evalSymlinksFn = fn
|
||||
} else {
|
||||
evalSymlinksFn = filepath.EvalSymlinks
|
||||
}
|
||||
return func() { evalSymlinksFn = prev }
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package logger
|
||||
|
||||
// WrapperName is the fixed name for this tool.
|
||||
const WrapperName = "codeagent-wrapper"
|
||||
|
||||
// CurrentWrapperName returns the wrapper name (always "codeagent-wrapper").
|
||||
func CurrentWrapperName() string { return WrapperName }
|
||||
|
||||
// LogPrefixes returns the log file name prefixes to look for.
|
||||
func LogPrefixes() []string { return []string{WrapperName} }
|
||||
|
||||
// PrimaryLogPrefix returns the preferred filename prefix for log files.
|
||||
func PrimaryLogPrefix() string { return WrapperName }
|
||||
@@ -1,74 +0,0 @@
|
||||
package parser
|
||||
|
||||
import "github.com/goccy/go-json"
|
||||
|
||||
// JSONEvent represents a Codex JSON output event.
|
||||
type JSONEvent struct {
|
||||
Type string `json:"type"`
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Item *EventItem `json:"item,omitempty"`
|
||||
}
|
||||
|
||||
// EventItem represents the item field in a JSON event.
|
||||
type EventItem struct {
|
||||
Type string `json:"type"`
|
||||
Text interface{} `json:"text"`
|
||||
}
|
||||
|
||||
// ClaudeEvent for Claude stream-json format.
|
||||
type ClaudeEvent struct {
|
||||
Type string `json:"type"`
|
||||
Subtype string `json:"subtype,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Result string `json:"result,omitempty"`
|
||||
}
|
||||
|
||||
// GeminiEvent for Gemini stream-json format.
|
||||
type GeminiEvent struct {
|
||||
Type string `json:"type"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Delta bool `json:"delta,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// UnifiedEvent combines all backend event formats into a single structure
|
||||
// to avoid multiple JSON unmarshal operations per event.
|
||||
type UnifiedEvent struct {
|
||||
// Common fields
|
||||
Type string `json:"type"`
|
||||
|
||||
// Codex-specific fields
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Item json.RawMessage `json:"item,omitempty"` // Lazy parse
|
||||
|
||||
// Claude-specific fields
|
||||
Subtype string `json:"subtype,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Result string `json:"result,omitempty"`
|
||||
|
||||
// Gemini-specific fields
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Delta *bool `json:"delta,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// Opencode-specific fields (camelCase sessionID)
|
||||
OpencodeSessionID string `json:"sessionID,omitempty"`
|
||||
Part json.RawMessage `json:"part,omitempty"`
|
||||
}
|
||||
|
||||
// OpencodePart represents the part field in opencode events.
|
||||
type OpencodePart struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
SessionID string `json:"sessionID,omitempty"`
|
||||
}
|
||||
|
||||
// ItemContent represents the parsed item.text field for Codex events.
|
||||
type ItemContent struct {
|
||||
Type string `json:"type"`
|
||||
Text interface{} `json:"text"`
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseJSONStream_Opencode(t *testing.T) {
|
||||
input := `{"type":"step_start","timestamp":1768187730683,"sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","part":{"id":"prt_bb0339afa001NTqoJ2NS8x91zP","sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","messageID":"msg_bb033866f0011oZxTqvfy0TKtS","type":"step-start","snapshot":"904f0fd58c125b79e60f0993e38f9d9f6200bf47"}}
|
||||
{"type":"text","timestamp":1768187744432,"sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","part":{"id":"prt_bb0339cb5001QDd0Lh0PzFZpa3","sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","messageID":"msg_bb033866f0011oZxTqvfy0TKtS","type":"text","text":"Hello from opencode"}}
|
||||
{"type":"step_finish","timestamp":1768187744471,"sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","part":{"id":"prt_bb033d0af0019VRZzpO2OVW1na","sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","messageID":"msg_bb033866f0011oZxTqvfy0TKtS","type":"step-finish","reason":"stop","snapshot":"904f0fd58c125b79e60f0993e38f9d9f6200bf47","cost":0}}`
|
||||
|
||||
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, nil, nil, nil)
|
||||
|
||||
if threadID != "ses_44fced3c7ffe83sZpzY1rlQka3" {
|
||||
t.Errorf("threadID = %q, want %q", threadID, "ses_44fced3c7ffe83sZpzY1rlQka3")
|
||||
}
|
||||
if message != "Hello from opencode" {
|
||||
t.Errorf("message = %q, want %q", message, "Hello from opencode")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseJSONStream_Opencode_MultipleTextEvents(t *testing.T) {
|
||||
input := `{"type":"text","sessionID":"ses_123","part":{"type":"text","text":"Part 1"}}
|
||||
{"type":"text","sessionID":"ses_123","part":{"type":"text","text":" Part 2"}}
|
||||
{"type":"step_finish","sessionID":"ses_123","part":{"type":"step-finish","reason":"stop"}}`
|
||||
|
||||
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, nil, nil, nil)
|
||||
|
||||
if threadID != "ses_123" {
|
||||
t.Errorf("threadID = %q, want %q", threadID, "ses_123")
|
||||
}
|
||||
if message != "Part 1 Part 2" {
|
||||
t.Errorf("message = %q, want %q", message, "Part 1 Part 2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseJSONStream_Opencode_NoStopReason(t *testing.T) {
|
||||
input := `{"type":"text","sessionID":"ses_456","part":{"type":"text","text":"Content"}}
|
||||
{"type":"step_finish","sessionID":"ses_456","part":{"type":"step-finish","reason":"tool-calls"}}`
|
||||
|
||||
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, nil, nil, nil)
|
||||
|
||||
if threadID != "ses_456" {
|
||||
t.Errorf("threadID = %q, want %q", threadID, "ses_456")
|
||||
}
|
||||
if message != "Content" {
|
||||
t.Errorf("message = %q, want %q", message, "Content")
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBackendParseJSONStream_UnknownEventsAreSilent(t *testing.T) {
|
||||
input := strings.Join([]string{
|
||||
`{"type":"turn.started"}`,
|
||||
`{"type":"assistant","text":"hi"}`,
|
||||
`{"type":"user","text":"yo"}`,
|
||||
`{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`,
|
||||
}, "\n")
|
||||
|
||||
var infos []string
|
||||
infoFn := func(msg string) { infos = append(infos, msg) }
|
||||
|
||||
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, infoFn, nil, nil)
|
||||
if message != "ok" {
|
||||
t.Fatalf("message=%q, want %q (infos=%v)", message, "ok", infos)
|
||||
}
|
||||
if threadID != "" {
|
||||
t.Fatalf("threadID=%q, want empty (infos=%v)", threadID, infos)
|
||||
}
|
||||
|
||||
for _, msg := range infos {
|
||||
if strings.Contains(msg, "Agent event:") {
|
||||
t.Fatalf("unexpected log for unknown event: %q", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package parser
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestTruncateBytes(t *testing.T) {
|
||||
if got := TruncateBytes([]byte("abc"), 3); got != "abc" {
|
||||
t.Fatalf("TruncateBytes() = %q, want %q", got, "abc")
|
||||
}
|
||||
if got := TruncateBytes([]byte("abcd"), 3); got != "abc..." {
|
||||
t.Fatalf("TruncateBytes() = %q, want %q", got, "abc...")
|
||||
}
|
||||
if got := TruncateBytes([]byte("abcd"), -1); got != "" {
|
||||
t.Fatalf("TruncateBytes() = %q, want empty string", got)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package utils
|
||||
|
||||
func Min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package utils
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMin(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
a, b int
|
||||
want int
|
||||
}{
|
||||
{"a less than b", 1, 2, 1},
|
||||
{"b less than a", 5, 3, 3},
|
||||
{"equal values", 7, 7, 7},
|
||||
{"negative a", -5, 3, -5},
|
||||
{"negative b", 5, -3, -3},
|
||||
{"both negative", -5, -3, -5},
|
||||
{"zero and positive", 0, 5, 0},
|
||||
{"zero and negative", 0, -5, -5},
|
||||
{"large values", 1000000, 999999, 999999},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := Min(tt.a, tt.b)
|
||||
if got != tt.want {
|
||||
t.Errorf("Min(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMin(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Min(i, i+1)
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package utils
|
||||
|
||||
import "strings"
|
||||
|
||||
func Truncate(s string, maxLen int) string {
|
||||
if len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
if maxLen < 0 {
|
||||
return ""
|
||||
}
|
||||
return s[:maxLen] + "..."
|
||||
}
|
||||
|
||||
// SafeTruncate safely truncates string to maxLen, avoiding panic and UTF-8 corruption.
|
||||
func SafeTruncate(s string, maxLen int) string {
|
||||
if maxLen <= 0 || s == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
runes := []rune(s)
|
||||
if len(runes) <= maxLen {
|
||||
return s
|
||||
}
|
||||
|
||||
if maxLen < 4 {
|
||||
return string(runes[:1])
|
||||
}
|
||||
|
||||
cutoff := maxLen - 3
|
||||
if cutoff <= 0 {
|
||||
return string(runes[:1])
|
||||
}
|
||||
if len(runes) <= cutoff {
|
||||
return s
|
||||
}
|
||||
return string(runes[:cutoff]) + "..."
|
||||
}
|
||||
|
||||
// SanitizeOutput removes ANSI escape sequences and control characters.
|
||||
func SanitizeOutput(s string) string {
|
||||
var result strings.Builder
|
||||
inEscape := false
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == '\x1b' && i+1 < len(s) && s[i+1] == '[' {
|
||||
inEscape = true
|
||||
i++ // skip '['
|
||||
continue
|
||||
}
|
||||
if inEscape {
|
||||
if (s[i] >= 'A' && s[i] <= 'Z') || (s[i] >= 'a' && s[i] <= 'z') {
|
||||
inEscape = false
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Keep printable chars and common whitespace.
|
||||
if s[i] >= 32 || s[i] == '\n' || s[i] == '\t' {
|
||||
result.WriteByte(s[i])
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTruncate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
maxLen int
|
||||
want string
|
||||
}{
|
||||
{"empty string", "", 10, ""},
|
||||
{"short string", "hello", 10, "hello"},
|
||||
{"exact length", "hello", 5, "hello"},
|
||||
{"needs truncation", "hello world", 5, "hello..."},
|
||||
{"zero maxLen", "hello", 0, "..."},
|
||||
{"negative maxLen", "hello", -1, ""},
|
||||
{"maxLen 1", "hello", 1, "h..."},
|
||||
{"unicode bytes truncate", "你好世界", 10, "你好世\xe7..."}, // Truncate works on bytes, not runes
|
||||
{"mixed truncate", "hello世界abc", 7, "hello\xe4\xb8..."}, // byte-based truncation
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := Truncate(tt.s, tt.maxLen)
|
||||
if got != tt.want {
|
||||
t.Errorf("Truncate(%q, %d) = %q, want %q", tt.s, tt.maxLen, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSafeTruncate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
maxLen int
|
||||
want string
|
||||
}{
|
||||
{"empty string", "", 10, ""},
|
||||
{"zero maxLen", "hello", 0, ""},
|
||||
{"negative maxLen", "hello", -1, ""},
|
||||
{"short string", "hello", 10, "hello"},
|
||||
{"exact length", "hello", 5, "hello"},
|
||||
{"needs truncation", "hello world", 8, "hello..."},
|
||||
{"maxLen 1", "hello", 1, "h"},
|
||||
{"maxLen 2", "hello", 2, "h"},
|
||||
{"maxLen 3", "hello", 3, "h"},
|
||||
{"maxLen 4", "hello", 4, "h..."},
|
||||
{"unicode preserved", "你好世界", 10, "你好世界"},
|
||||
{"unicode exact", "你好世界", 4, "你好世界"},
|
||||
{"unicode truncate", "你好世界test", 6, "你好世..."},
|
||||
{"mixed unicode", "ab你好cd", 5, "ab..."},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := SafeTruncate(tt.s, tt.maxLen)
|
||||
if got != tt.want {
|
||||
t.Errorf("SafeTruncate(%q, %d) = %q, want %q", tt.s, tt.maxLen, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeOutput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want string
|
||||
}{
|
||||
{"empty string", "", ""},
|
||||
{"plain text", "hello world", "hello world"},
|
||||
{"with newline", "hello\nworld", "hello\nworld"},
|
||||
{"with tab", "hello\tworld", "hello\tworld"},
|
||||
{"ANSI color red", "\x1b[31mred\x1b[0m", "red"},
|
||||
{"ANSI bold", "\x1b[1mbold\x1b[0m", "bold"},
|
||||
{"ANSI complex", "\x1b[1;31;40mtext\x1b[0m", "text"},
|
||||
{"control chars", "hello\x00\x01\x02world", "helloworld"},
|
||||
{"mixed ANSI and control", "\x1b[32m\x00ok\x1b[0m", "ok"},
|
||||
{"multiple ANSI sequences", "\x1b[31mred\x1b[0m \x1b[32mgreen\x1b[0m", "red green"},
|
||||
{"incomplete escape", "\x1b[", ""},
|
||||
{"escape without bracket", "\x1bA", "A"},
|
||||
{"cursor movement", "\x1b[2Aup\x1b[2Bdown", "updown"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := SanitizeOutput(tt.s)
|
||||
if got != tt.want {
|
||||
t.Errorf("SanitizeOutput(%q) = %q, want %q", tt.s, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTruncate(b *testing.B) {
|
||||
s := strings.Repeat("hello world ", 100)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Truncate(s, 50)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSafeTruncate(b *testing.B) {
|
||||
s := strings.Repeat("你好世界", 100)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
SafeTruncate(s, 50)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSanitizeOutput(b *testing.B) {
|
||||
s := strings.Repeat("\x1b[31mred\x1b[0m text ", 50)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
SanitizeOutput(s)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package executor
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
)
|
||||
|
||||
func TestLogWriterWriteLimitsBuffer(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("NewLogger error: %v", err)
|
||||
}
|
||||
setLogger(logger)
|
||||
t.Cleanup(func() { _ = closeLogger() })
|
||||
defer closeLogger()
|
||||
|
||||
lw := newLogWriter("P:", 10)
|
||||
_, _ = lw.Write([]byte(strings.Repeat("a", 100)))
|
||||
@@ -34,3 +36,4 @@ func TestLogWriterWriteLimitsBuffer(t *testing.T) {
|
||||
t.Fatalf("log output missing truncated entry, got %q", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package logger
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Logger writes log messages asynchronously to a temp file.
|
||||
@@ -24,7 +22,6 @@ type Logger struct {
|
||||
path string
|
||||
file *os.File
|
||||
writer *bufio.Writer
|
||||
zlogger zerolog.Logger
|
||||
ch chan logEntry
|
||||
flushReq chan chan struct{}
|
||||
done chan struct{}
|
||||
@@ -40,7 +37,6 @@ type Logger struct {
|
||||
|
||||
type logEntry struct {
|
||||
msg string
|
||||
level zerolog.Level
|
||||
isError bool // true for ERROR or WARN levels
|
||||
}
|
||||
|
||||
@@ -77,7 +73,7 @@ func NewLogger() (*Logger, error) {
|
||||
// Useful for tests that need isolated log files within the same process.
|
||||
func NewLoggerWithSuffix(suffix string) (*Logger, error) {
|
||||
pid := os.Getpid()
|
||||
filename := fmt.Sprintf("%s-%d", PrimaryLogPrefix(), pid)
|
||||
filename := fmt.Sprintf("%s-%d", primaryLogPrefix(), pid)
|
||||
var safeSuffix string
|
||||
if suffix != "" {
|
||||
safeSuffix = sanitizeLogSuffix(suffix)
|
||||
@@ -107,8 +103,6 @@ func NewLoggerWithSuffix(suffix string) (*Logger, error) {
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
l.zlogger = zerolog.New(l.writer).With().Timestamp().Logger()
|
||||
|
||||
l.workerWG.Add(1)
|
||||
go l.run()
|
||||
|
||||
@@ -190,24 +184,17 @@ func (l *Logger) Path() string {
|
||||
return l.path
|
||||
}
|
||||
|
||||
func (l *Logger) IsClosed() bool {
|
||||
if l == nil {
|
||||
return true
|
||||
}
|
||||
return l.closed.Load()
|
||||
}
|
||||
|
||||
// Info logs at INFO level.
|
||||
func (l *Logger) Info(msg string) { l.logWithLevel(zerolog.InfoLevel, msg) }
|
||||
func (l *Logger) Info(msg string) { l.log("INFO", msg) }
|
||||
|
||||
// Warn logs at WARN level.
|
||||
func (l *Logger) Warn(msg string) { l.logWithLevel(zerolog.WarnLevel, msg) }
|
||||
func (l *Logger) Warn(msg string) { l.log("WARN", msg) }
|
||||
|
||||
// Debug logs at DEBUG level.
|
||||
func (l *Logger) Debug(msg string) { l.logWithLevel(zerolog.DebugLevel, msg) }
|
||||
func (l *Logger) Debug(msg string) { l.log("DEBUG", msg) }
|
||||
|
||||
// Error logs at ERROR level.
|
||||
func (l *Logger) Error(msg string) { l.logWithLevel(zerolog.ErrorLevel, msg) }
|
||||
func (l *Logger) Error(msg string) { l.log("ERROR", msg) }
|
||||
|
||||
// Close signals the worker to flush and close the log file.
|
||||
// The log file is NOT removed, allowing inspection after program exit.
|
||||
@@ -348,7 +335,7 @@ func (l *Logger) Flush() {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) logWithLevel(entryLevel zerolog.Level, msg string) {
|
||||
func (l *Logger) log(level, msg string) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
@@ -356,8 +343,8 @@ func (l *Logger) logWithLevel(entryLevel zerolog.Level, msg string) {
|
||||
return
|
||||
}
|
||||
|
||||
isError := entryLevel == zerolog.WarnLevel || entryLevel == zerolog.ErrorLevel
|
||||
entry := logEntry{msg: msg, level: entryLevel, isError: isError}
|
||||
isError := level == "WARN" || level == "ERROR"
|
||||
entry := logEntry{msg: msg, isError: isError}
|
||||
l.flushMu.Lock()
|
||||
l.pendingWG.Add(1)
|
||||
l.flushMu.Unlock()
|
||||
@@ -379,7 +366,8 @@ func (l *Logger) run() {
|
||||
defer ticker.Stop()
|
||||
|
||||
writeEntry := func(entry logEntry) {
|
||||
l.zlogger.WithLevel(entry.level).Msg(entry.msg)
|
||||
timestamp := time.Now().Format("2006-01-02 15:04:05.000")
|
||||
fmt.Fprintf(l.writer, "[%s] %s\n", timestamp, entry.msg)
|
||||
|
||||
// Cache error/warn entries in memory for fast extraction
|
||||
if entry.isError {
|
||||
@@ -451,7 +439,10 @@ func cleanupOldLogs() (CleanupStats, error) {
|
||||
var stats CleanupStats
|
||||
tempDir := os.TempDir()
|
||||
|
||||
prefixes := LogPrefixes()
|
||||
prefixes := logPrefixes()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{defaultWrapperName}
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
var matches []string
|
||||
@@ -482,8 +473,7 @@ func cleanupOldLogs() (CleanupStats, error) {
|
||||
stats.Kept++
|
||||
stats.KeptFiles = append(stats.KeptFiles, filename)
|
||||
if reason != "" {
|
||||
// Use Debug level to avoid polluting Recent Errors with cleanup noise
|
||||
logDebug(fmt.Sprintf("cleanupOldLogs: skipping %s: %s", filename, reason))
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs: skipping %s: %s", filename, reason))
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -601,7 +591,10 @@ func isPIDReused(logPath string, pid int) bool {
|
||||
if procStartTime.IsZero() {
|
||||
// Can't determine process start time
|
||||
// Check if file is very old (>7 days), likely from a dead process
|
||||
return time.Since(fileModTime) > 7*24*time.Hour
|
||||
if time.Since(fileModTime) > 7*24*time.Hour {
|
||||
return true // File is old enough to be from a different process
|
||||
}
|
||||
return false // Be conservative for recent files
|
||||
}
|
||||
|
||||
// If the log file was modified before the process started, PID was reused
|
||||
@@ -611,7 +604,10 @@ func isPIDReused(logPath string, pid int) bool {
|
||||
|
||||
func parsePIDFromLog(path string) (int, bool) {
|
||||
name := filepath.Base(path)
|
||||
prefixes := LogPrefixes()
|
||||
prefixes := logPrefixes()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{defaultWrapperName}
|
||||
}
|
||||
|
||||
for _, prefix := range prefixes {
|
||||
prefixWithDash := fmt.Sprintf("%s-", prefix)
|
||||
@@ -665,19 +661,3 @@ func renderWorkerLimit(limit int) string {
|
||||
}
|
||||
return strconv.Itoa(limit)
|
||||
}
|
||||
|
||||
func CleanupOldLogs() (CleanupStats, error) { return cleanupOldLogs() }
|
||||
|
||||
func IsUnsafeFile(path string, tempDir string) (bool, string) { return isUnsafeFile(path, tempDir) }
|
||||
|
||||
func IsPIDReused(logPath string, pid int) bool { return isPIDReused(logPath, pid) }
|
||||
|
||||
func ParsePIDFromLog(path string) (int, bool) { return parsePIDFromLog(path) }
|
||||
|
||||
func LogConcurrencyPlanning(limit, total int) { logConcurrencyPlanning(limit, total) }
|
||||
|
||||
func LogConcurrencyState(event, taskID string, active, limit int) {
|
||||
logConcurrencyState(event, taskID, active, limit)
|
||||
}
|
||||
|
||||
func SanitizeLogSuffix(raw string) string { return sanitizeLogSuffix(raw) }
|
||||
@@ -1,4 +1,4 @@
|
||||
package logger
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -28,7 +28,7 @@ func TestLoggerConcurrencyLogHelpers(t *testing.T) {
|
||||
t.Fatalf("NewLoggerWithSuffix error: %v", err)
|
||||
}
|
||||
setLogger(logger)
|
||||
defer func() { _ = closeLogger() }()
|
||||
defer closeLogger()
|
||||
|
||||
logConcurrencyPlanning(0, 2)
|
||||
logConcurrencyPlanning(3, 2)
|
||||
@@ -64,8 +64,8 @@ func TestLoggerConcurrencyLogHelpersNoopWithoutActiveLogger(t *testing.T) {
|
||||
func TestLoggerCleanupOldLogsSkipsUnsafeAndHandlesAlreadyDeleted(t *testing.T) {
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
unsafePath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", PrimaryLogPrefix(), 222))
|
||||
orphanPath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", PrimaryLogPrefix(), 111))
|
||||
unsafePath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", primaryLogPrefix(), 222))
|
||||
orphanPath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", primaryLogPrefix(), 111))
|
||||
|
||||
stubFileStat(t, func(path string) (os.FileInfo, error) {
|
||||
if path == unsafePath {
|
||||
@@ -1,4 +1,4 @@
|
||||
package logger
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -26,12 +26,12 @@ func TestLoggerWithSuffixNamingAndIsolation(t *testing.T) {
|
||||
}
|
||||
defer loggerB.Close()
|
||||
|
||||
wantA := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", PrimaryLogPrefix(), os.Getpid(), taskA))
|
||||
wantA := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", primaryLogPrefix(), os.Getpid(), taskA))
|
||||
if loggerA.Path() != wantA {
|
||||
t.Fatalf("loggerA path = %q, want %q", loggerA.Path(), wantA)
|
||||
}
|
||||
|
||||
wantB := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", PrimaryLogPrefix(), os.Getpid(), taskB))
|
||||
wantB := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", primaryLogPrefix(), os.Getpid(), taskB))
|
||||
if loggerB.Path() != wantB {
|
||||
t.Fatalf("loggerB path = %q, want %q", loggerB.Path(), wantB)
|
||||
}
|
||||
@@ -105,7 +105,7 @@ func TestLoggerWithSuffixSanitizesUnsafeSuffix(t *testing.T) {
|
||||
_ = os.Remove(logger.Path())
|
||||
})
|
||||
|
||||
wantBase := fmt.Sprintf("%s-%d-%s.log", PrimaryLogPrefix(), os.Getpid(), safe)
|
||||
wantBase := fmt.Sprintf("%s-%d-%s.log", primaryLogPrefix(), os.Getpid(), safe)
|
||||
if gotBase := filepath.Base(logger.Path()); gotBase != wantBase {
|
||||
t.Fatalf("log filename = %q, want %q", gotBase, wantBase)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package logger
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -76,6 +77,30 @@ func TestLoggerWritesLevels(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerDefaultIsTerminalCoverage(t *testing.T) {
|
||||
oldStdin := os.Stdin
|
||||
t.Cleanup(func() { os.Stdin = oldStdin })
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), "stdin-*")
|
||||
if err != nil {
|
||||
t.Fatalf("os.CreateTemp() error = %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
os.Stdin = f
|
||||
if got := defaultIsTerminal(); got {
|
||||
t.Fatalf("defaultIsTerminal() = %v, want false for regular file", got)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatalf("Close() error = %v", err)
|
||||
}
|
||||
os.Stdin = f
|
||||
if got := defaultIsTerminal(); !got {
|
||||
t.Fatalf("defaultIsTerminal() = %v, want true when Stat fails", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
@@ -93,6 +118,11 @@ func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
|
||||
if err := logger.Close(); err != nil {
|
||||
t.Fatalf("Close() returned error: %v", err)
|
||||
}
|
||||
if logger.file != nil {
|
||||
if _, err := logger.file.Write([]byte("x")); err == nil {
|
||||
t.Fatalf("expected file to be closed after Close()")
|
||||
}
|
||||
}
|
||||
|
||||
// After recent changes, log file is kept for debugging - NOT removed
|
||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
||||
@@ -101,6 +131,18 @@ func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
|
||||
|
||||
// Clean up manually for test
|
||||
defer os.Remove(logPath)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
logger.workerWG.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("worker goroutine did not exit after Close")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerConcurrentWritesSafe(t *testing.T) {
|
||||
@@ -152,13 +194,50 @@ func TestLoggerConcurrentWritesSafe(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerTerminateProcessActive(t *testing.T) {
|
||||
cmd := exec.Command("sleep", "5")
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Skipf("cannot start sleep command: %v", err)
|
||||
}
|
||||
|
||||
timer := terminateProcess(&realCmd{cmd: cmd})
|
||||
if timer == nil {
|
||||
t.Fatalf("terminateProcess returned nil timer for active process")
|
||||
}
|
||||
defer timer.Stop()
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatalf("process not terminated promptly")
|
||||
case <-done:
|
||||
}
|
||||
|
||||
// Force the timer callback to run immediately to cover the kill branch.
|
||||
timer.Reset(0)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestLoggerTerminateProcessNil(t *testing.T) {
|
||||
if timer := terminateProcess(nil); timer != nil {
|
||||
t.Fatalf("terminateProcess(nil) should return nil timer")
|
||||
}
|
||||
if timer := terminateProcess(&realCmd{cmd: &exec.Cmd{}}); timer != nil {
|
||||
t.Fatalf("terminateProcess with nil process should return nil timer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerCleanupOldLogsRemovesOrphans(t *testing.T) {
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
orphan1 := createTempLog(t, tempDir, "codeagent-wrapper-111.log")
|
||||
orphan2 := createTempLog(t, tempDir, "codeagent-wrapper-222-suffix.log")
|
||||
running1 := createTempLog(t, tempDir, "codeagent-wrapper-333.log")
|
||||
running2 := createTempLog(t, tempDir, "codeagent-wrapper-444-extra-info.log")
|
||||
orphan1 := createTempLog(t, tempDir, "codex-wrapper-111.log")
|
||||
orphan2 := createTempLog(t, tempDir, "codex-wrapper-222-suffix.log")
|
||||
running1 := createTempLog(t, tempDir, "codex-wrapper-333.log")
|
||||
running2 := createTempLog(t, tempDir, "codex-wrapper-444-extra-info.log")
|
||||
untouched := createTempLog(t, tempDir, "unrelated.log")
|
||||
|
||||
runningPIDs := map[int]bool{333: true, 444: true}
|
||||
@@ -206,15 +285,15 @@ func TestLoggerCleanupOldLogsHandlesInvalidNamesAndErrors(t *testing.T) {
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
invalid := []string{
|
||||
"codeagent-wrapper-.log",
|
||||
"codeagent-wrapper.log",
|
||||
"codeagent-wrapper-foo-bar.txt",
|
||||
"codex-wrapper-.log",
|
||||
"codex-wrapper.log",
|
||||
"codex-wrapper-foo-bar.txt",
|
||||
"not-a-codex.log",
|
||||
}
|
||||
for _, name := range invalid {
|
||||
createTempLog(t, tempDir, name)
|
||||
}
|
||||
target := createTempLog(t, tempDir, "codeagent-wrapper-555-extra.log")
|
||||
target := createTempLog(t, tempDir, "codex-wrapper-555-extra.log")
|
||||
|
||||
var checked []int
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
@@ -310,8 +389,8 @@ func TestLoggerCleanupOldLogsHandlesTempDirPermissionErrors(t *testing.T) {
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
paths := []string{
|
||||
createTempLog(t, tempDir, "codeagent-wrapper-6100.log"),
|
||||
createTempLog(t, tempDir, "codeagent-wrapper-6101.log"),
|
||||
createTempLog(t, tempDir, "codex-wrapper-6100.log"),
|
||||
createTempLog(t, tempDir, "codex-wrapper-6101.log"),
|
||||
}
|
||||
|
||||
stubProcessRunning(t, func(int) bool { return false })
|
||||
@@ -349,8 +428,8 @@ func TestLoggerCleanupOldLogsHandlesTempDirPermissionErrors(t *testing.T) {
|
||||
func TestLoggerCleanupOldLogsHandlesPermissionDeniedFile(t *testing.T) {
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
protected := createTempLog(t, tempDir, "codeagent-wrapper-6200.log")
|
||||
deletable := createTempLog(t, tempDir, "codeagent-wrapper-6201.log")
|
||||
protected := createTempLog(t, tempDir, "codex-wrapper-6200.log")
|
||||
deletable := createTempLog(t, tempDir, "codex-wrapper-6201.log")
|
||||
|
||||
stubProcessRunning(t, func(int) bool { return false })
|
||||
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
|
||||
@@ -389,7 +468,7 @@ func TestLoggerCleanupOldLogsPerformanceBound(t *testing.T) {
|
||||
const fileCount = 400
|
||||
fakePaths := make([]string, fileCount)
|
||||
for i := 0; i < fileCount; i++ {
|
||||
name := fmt.Sprintf("codeagent-wrapper-%d.log", 10000+i)
|
||||
name := fmt.Sprintf("codex-wrapper-%d.log", 10000+i)
|
||||
fakePaths[i] = createTempLog(t, tempDir, name)
|
||||
}
|
||||
|
||||
@@ -426,11 +505,102 @@ func TestLoggerCleanupOldLogsPerformanceBound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerCleanupOldLogsCoverageSuite(t *testing.T) {
|
||||
TestBackendParseJSONStream_CoverageSuite(t)
|
||||
}
|
||||
|
||||
// Reuse the existing coverage suite so the focused TestLogger run still exercises
|
||||
// the rest of the codebase and keeps coverage high.
|
||||
func TestLoggerCoverageSuite(t *testing.T) {
|
||||
suite := []struct {
|
||||
name string
|
||||
fn func(*testing.T)
|
||||
}{
|
||||
{"TestBackendParseJSONStream_CoverageSuite", TestBackendParseJSONStream_CoverageSuite},
|
||||
{"TestVersionCoverageFullRun", TestVersionCoverageFullRun},
|
||||
{"TestVersionMainWrapper", TestVersionMainWrapper},
|
||||
|
||||
{"TestExecutorHelperCoverage", TestExecutorHelperCoverage},
|
||||
{"TestExecutorRunCodexTaskWithContext", TestExecutorRunCodexTaskWithContext},
|
||||
{"TestExecutorParallelLogIsolation", TestExecutorParallelLogIsolation},
|
||||
{"TestExecutorTaskLoggerContext", TestExecutorTaskLoggerContext},
|
||||
{"TestExecutorExecuteConcurrentWithContextBranches", TestExecutorExecuteConcurrentWithContextBranches},
|
||||
{"TestExecutorSignalAndTermination", TestExecutorSignalAndTermination},
|
||||
{"TestExecutorCancelReasonAndCloseWithReason", TestExecutorCancelReasonAndCloseWithReason},
|
||||
{"TestExecutorForceKillTimerStop", TestExecutorForceKillTimerStop},
|
||||
{"TestExecutorForwardSignalsDefaults", TestExecutorForwardSignalsDefaults},
|
||||
|
||||
{"TestBackendParseArgs_NewMode", TestBackendParseArgs_NewMode},
|
||||
{"TestBackendParseArgs_ResumeMode", TestBackendParseArgs_ResumeMode},
|
||||
{"TestBackendParseArgs_BackendFlag", TestBackendParseArgs_BackendFlag},
|
||||
{"TestBackendParseArgs_SkipPermissions", TestBackendParseArgs_SkipPermissions},
|
||||
{"TestBackendParseBoolFlag", TestBackendParseBoolFlag},
|
||||
{"TestBackendEnvFlagEnabled", TestBackendEnvFlagEnabled},
|
||||
{"TestRunResolveTimeout", TestRunResolveTimeout},
|
||||
{"TestRunIsTerminal", TestRunIsTerminal},
|
||||
{"TestRunReadPipedTask", TestRunReadPipedTask},
|
||||
{"TestTailBufferWrite", TestTailBufferWrite},
|
||||
{"TestLogWriterWriteLimitsBuffer", TestLogWriterWriteLimitsBuffer},
|
||||
{"TestLogWriterLogLine", TestLogWriterLogLine},
|
||||
{"TestNewLogWriterDefaultMaxLen", TestNewLogWriterDefaultMaxLen},
|
||||
{"TestNewLogWriterDefaultLimit", TestNewLogWriterDefaultLimit},
|
||||
{"TestRunHello", TestRunHello},
|
||||
{"TestRunGreet", TestRunGreet},
|
||||
{"TestRunFarewell", TestRunFarewell},
|
||||
{"TestRunFarewellEmpty", TestRunFarewellEmpty},
|
||||
|
||||
{"TestParallelParseConfig_Success", TestParallelParseConfig_Success},
|
||||
{"TestParallelParseConfig_Backend", TestParallelParseConfig_Backend},
|
||||
{"TestParallelParseConfig_InvalidFormat", TestParallelParseConfig_InvalidFormat},
|
||||
{"TestParallelParseConfig_EmptyTasks", TestParallelParseConfig_EmptyTasks},
|
||||
{"TestParallelParseConfig_MissingID", TestParallelParseConfig_MissingID},
|
||||
{"TestParallelParseConfig_MissingTask", TestParallelParseConfig_MissingTask},
|
||||
{"TestParallelParseConfig_DuplicateID", TestParallelParseConfig_DuplicateID},
|
||||
{"TestParallelParseConfig_DelimiterFormat", TestParallelParseConfig_DelimiterFormat},
|
||||
|
||||
{"TestBackendSelectBackend", TestBackendSelectBackend},
|
||||
{"TestBackendSelectBackend_Invalid", TestBackendSelectBackend_Invalid},
|
||||
{"TestBackendSelectBackend_DefaultOnEmpty", TestBackendSelectBackend_DefaultOnEmpty},
|
||||
{"TestBackendBuildArgs_CodexBackend", TestBackendBuildArgs_CodexBackend},
|
||||
{"TestBackendBuildArgs_ClaudeBackend", TestBackendBuildArgs_ClaudeBackend},
|
||||
{"TestClaudeBackendBuildArgs_OutputValidation", TestClaudeBackendBuildArgs_OutputValidation},
|
||||
{"TestBackendBuildArgs_GeminiBackend", TestBackendBuildArgs_GeminiBackend},
|
||||
{"TestGeminiBackendBuildArgs_OutputValidation", TestGeminiBackendBuildArgs_OutputValidation},
|
||||
{"TestBackendNamesAndCommands", TestBackendNamesAndCommands},
|
||||
|
||||
{"TestBackendParseJSONStream", TestBackendParseJSONStream},
|
||||
{"TestBackendParseJSONStream_ClaudeEvents", TestBackendParseJSONStream_ClaudeEvents},
|
||||
{"TestBackendParseJSONStream_GeminiEvents", TestBackendParseJSONStream_GeminiEvents},
|
||||
{"TestBackendParseJSONStreamWithWarn_InvalidLine", TestBackendParseJSONStreamWithWarn_InvalidLine},
|
||||
{"TestBackendParseJSONStream_OnMessage", TestBackendParseJSONStream_OnMessage},
|
||||
{"TestBackendParseJSONStream_ScannerError", TestBackendParseJSONStream_ScannerError},
|
||||
{"TestBackendDiscardInvalidJSON", TestBackendDiscardInvalidJSON},
|
||||
{"TestBackendDiscardInvalidJSONBuffer", TestBackendDiscardInvalidJSONBuffer},
|
||||
|
||||
{"TestCurrentWrapperNameFallsBackToExecutable", TestCurrentWrapperNameFallsBackToExecutable},
|
||||
{"TestCurrentWrapperNameDetectsLegacyAliasSymlink", TestCurrentWrapperNameDetectsLegacyAliasSymlink},
|
||||
|
||||
{"TestIsProcessRunning", TestIsProcessRunning},
|
||||
{"TestGetProcessStartTimeReadsProcStat", TestGetProcessStartTimeReadsProcStat},
|
||||
{"TestGetProcessStartTimeInvalidData", TestGetProcessStartTimeInvalidData},
|
||||
{"TestGetBootTimeParsesBtime", TestGetBootTimeParsesBtime},
|
||||
{"TestGetBootTimeInvalidData", TestGetBootTimeInvalidData},
|
||||
|
||||
{"TestClaudeBuildArgs_ModesAndPermissions", TestClaudeBuildArgs_ModesAndPermissions},
|
||||
{"TestClaudeBuildArgs_GeminiAndCodexModes", TestClaudeBuildArgs_GeminiAndCodexModes},
|
||||
{"TestClaudeBuildArgs_BackendMetadata", TestClaudeBuildArgs_BackendMetadata},
|
||||
}
|
||||
|
||||
for _, tc := range suite {
|
||||
t.Run(tc.name, tc.fn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerCleanupOldLogsKeepsCurrentProcessLog(t *testing.T) {
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
currentPID := os.Getpid()
|
||||
currentLog := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", currentPID))
|
||||
currentLog := createTempLog(t, tempDir, fmt.Sprintf("codex-wrapper-%d.log", currentPID))
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
if pid != currentPID {
|
||||
@@ -506,7 +676,7 @@ func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
|
||||
stubEvalSymlinks(t, func(path string) (string, error) {
|
||||
return filepath.Join(absTempDir, filepath.Base(path)), nil
|
||||
})
|
||||
unsafe, reason := isUnsafeFile(filepath.Join(absTempDir, "codeagent-wrapper-1.log"), tempDir)
|
||||
unsafe, reason := isUnsafeFile(filepath.Join(absTempDir, "codex-wrapper-1.log"), tempDir)
|
||||
if !unsafe || reason != "refusing to delete symlink" {
|
||||
t.Fatalf("expected symlink to be rejected, got unsafe=%v reason=%q", unsafe, reason)
|
||||
}
|
||||
@@ -532,9 +702,9 @@ func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
|
||||
})
|
||||
otherDir := t.TempDir()
|
||||
stubEvalSymlinks(t, func(string) (string, error) {
|
||||
return filepath.Join(otherDir, "codeagent-wrapper-9.log"), nil
|
||||
return filepath.Join(otherDir, "codex-wrapper-9.log"), nil
|
||||
})
|
||||
unsafe, reason := isUnsafeFile(filepath.Join(otherDir, "codeagent-wrapper-9.log"), tempDir)
|
||||
unsafe, reason := isUnsafeFile(filepath.Join(otherDir, "codex-wrapper-9.log"), tempDir)
|
||||
if !unsafe || reason != "file is outside tempDir" {
|
||||
t.Fatalf("expected outside file to be rejected, got unsafe=%v reason=%q", unsafe, reason)
|
||||
}
|
||||
@@ -543,21 +713,15 @@ func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
|
||||
|
||||
func TestLoggerPathAndRemove(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
|
||||
logger, err := NewLoggerWithSuffix("sample")
|
||||
if err != nil {
|
||||
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
|
||||
}
|
||||
path := logger.Path()
|
||||
if path == "" {
|
||||
_ = logger.Close()
|
||||
t.Fatalf("logger.Path() returned empty path")
|
||||
}
|
||||
if err := logger.Close(); err != nil {
|
||||
t.Fatalf("Close() error = %v", err)
|
||||
path := filepath.Join(tempDir, "sample.log")
|
||||
if err := os.WriteFile(path, []byte("test"), 0o644); err != nil {
|
||||
t.Fatalf("failed to create temp file: %v", err)
|
||||
}
|
||||
|
||||
logger := &Logger{path: path}
|
||||
if got := logger.Path(); got != path {
|
||||
t.Fatalf("Path() = %q, want %q", got, path)
|
||||
}
|
||||
if err := logger.RemoveLogFile(); err != nil {
|
||||
t.Fatalf("RemoveLogFile() error = %v", err)
|
||||
}
|
||||
@@ -574,6 +738,43 @@ func TestLoggerPathAndRemove(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerTruncateBytesCoverage(t *testing.T) {
|
||||
if got := truncateBytes([]byte("abc"), 3); got != "abc" {
|
||||
t.Fatalf("truncateBytes() = %q, want %q", got, "abc")
|
||||
}
|
||||
if got := truncateBytes([]byte("abcd"), 3); got != "abc..." {
|
||||
t.Fatalf("truncateBytes() = %q, want %q", got, "abc...")
|
||||
}
|
||||
if got := truncateBytes([]byte("abcd"), -1); got != "" {
|
||||
t.Fatalf("truncateBytes() = %q, want empty string", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerInternalLog(t *testing.T) {
|
||||
logger := &Logger{
|
||||
ch: make(chan logEntry, 1),
|
||||
done: make(chan struct{}),
|
||||
pendingWG: sync.WaitGroup{},
|
||||
}
|
||||
|
||||
done := make(chan logEntry, 1)
|
||||
go func() {
|
||||
entry := <-logger.ch
|
||||
logger.pendingWG.Done()
|
||||
done <- entry
|
||||
}()
|
||||
|
||||
logger.log("INFO", "hello")
|
||||
entry := <-done
|
||||
if entry.msg != "hello" {
|
||||
t.Fatalf("unexpected entry %+v", entry)
|
||||
}
|
||||
|
||||
logger.closed.Store(true)
|
||||
logger.log("INFO", "ignored")
|
||||
close(logger.done)
|
||||
}
|
||||
|
||||
func TestLoggerParsePIDFromLog(t *testing.T) {
|
||||
hugePID := strconv.FormatInt(math.MaxInt64, 10) + "0"
|
||||
tests := []struct {
|
||||
@@ -581,13 +782,13 @@ func TestLoggerParsePIDFromLog(t *testing.T) {
|
||||
pid int
|
||||
ok bool
|
||||
}{
|
||||
{"codeagent-wrapper-123.log", 123, true},
|
||||
{"codeagent-wrapper-999-extra.log", 999, true},
|
||||
{"codeagent-wrapper-.log", 0, false},
|
||||
{"codex-wrapper-123.log", 123, true},
|
||||
{"codex-wrapper-999-extra.log", 999, true},
|
||||
{"codex-wrapper-.log", 0, false},
|
||||
{"invalid-name.log", 0, false},
|
||||
{"codeagent-wrapper--5.log", 0, false},
|
||||
{"codeagent-wrapper-0.log", 0, false},
|
||||
{fmt.Sprintf("codeagent-wrapper-%s.log", hugePID), 0, false},
|
||||
{"codex-wrapper--5.log", 0, false},
|
||||
{"codex-wrapper-0.log", 0, false},
|
||||
{fmt.Sprintf("codex-wrapper-%s.log", hugePID), 0, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -626,32 +827,56 @@ func setTempDirEnv(t *testing.T, dir string) string {
|
||||
|
||||
func stubProcessRunning(t *testing.T, fn func(int) bool) {
|
||||
t.Helper()
|
||||
t.Cleanup(SetProcessRunningCheck(fn))
|
||||
original := processRunningCheck
|
||||
processRunningCheck = fn
|
||||
t.Cleanup(func() {
|
||||
processRunningCheck = original
|
||||
})
|
||||
}
|
||||
|
||||
func stubProcessStartTime(t *testing.T, fn func(int) time.Time) {
|
||||
t.Helper()
|
||||
t.Cleanup(SetProcessStartTimeFn(fn))
|
||||
original := processStartTimeFn
|
||||
processStartTimeFn = fn
|
||||
t.Cleanup(func() {
|
||||
processStartTimeFn = original
|
||||
})
|
||||
}
|
||||
|
||||
func stubRemoveLogFile(t *testing.T, fn func(string) error) {
|
||||
t.Helper()
|
||||
t.Cleanup(SetRemoveLogFileFn(fn))
|
||||
original := removeLogFileFn
|
||||
removeLogFileFn = fn
|
||||
t.Cleanup(func() {
|
||||
removeLogFileFn = original
|
||||
})
|
||||
}
|
||||
|
||||
func stubGlobLogFiles(t *testing.T, fn func(string) ([]string, error)) {
|
||||
t.Helper()
|
||||
t.Cleanup(SetGlobLogFilesFn(fn))
|
||||
original := globLogFiles
|
||||
globLogFiles = fn
|
||||
t.Cleanup(func() {
|
||||
globLogFiles = original
|
||||
})
|
||||
}
|
||||
|
||||
func stubFileStat(t *testing.T, fn func(string) (os.FileInfo, error)) {
|
||||
t.Helper()
|
||||
t.Cleanup(SetFileStatFn(fn))
|
||||
original := fileStatFn
|
||||
fileStatFn = fn
|
||||
t.Cleanup(func() {
|
||||
fileStatFn = original
|
||||
})
|
||||
}
|
||||
|
||||
func stubEvalSymlinks(t *testing.T, fn func(string) (string, error)) {
|
||||
t.Helper()
|
||||
t.Cleanup(SetEvalSymlinksFn(fn))
|
||||
original := evalSymlinksFn
|
||||
evalSymlinksFn = fn
|
||||
t.Cleanup(func() {
|
||||
evalSymlinksFn = original
|
||||
})
|
||||
}
|
||||
|
||||
type fakeFileInfo struct {
|
||||
@@ -735,7 +960,7 @@ func TestLoggerExtractRecentErrors(t *testing.T) {
|
||||
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
|
||||
}
|
||||
defer logger.Close()
|
||||
defer func() { _ = logger.RemoveLogFile() }()
|
||||
defer logger.RemoveLogFile()
|
||||
|
||||
// Write logs using logger methods
|
||||
for _, entry := range tt.logs {
|
||||
@@ -775,14 +1000,14 @@ func TestLoggerExtractRecentErrorsNilLogger(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoggerExtractRecentErrorsEmptyPath(t *testing.T) {
|
||||
logger := &Logger{}
|
||||
logger := &Logger{path: ""}
|
||||
if got := logger.ExtractRecentErrors(10); got != nil {
|
||||
t.Fatalf("empty path ExtractRecentErrors() should return nil, got %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerExtractRecentErrorsFileNotExist(t *testing.T) {
|
||||
logger := &Logger{}
|
||||
logger := &Logger{path: "/nonexistent/path/to/log.log"}
|
||||
if got := logger.ExtractRecentErrors(10); got != nil {
|
||||
t.Fatalf("nonexistent file ExtractRecentErrors() should return nil, got %v", got)
|
||||
}
|
||||
@@ -824,7 +1049,7 @@ func TestExtractRecentErrorsBoundaryCheck(t *testing.T) {
|
||||
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
|
||||
}
|
||||
defer logger.Close()
|
||||
defer func() { _ = logger.RemoveLogFile() }()
|
||||
defer logger.RemoveLogFile()
|
||||
|
||||
// Write some errors
|
||||
logger.Error("error 1")
|
||||
@@ -857,7 +1082,7 @@ func TestErrorEntriesMaxLimit(t *testing.T) {
|
||||
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
|
||||
}
|
||||
defer logger.Close()
|
||||
defer func() { _ = logger.RemoveLogFile() }()
|
||||
defer logger.RemoveLogFile()
|
||||
|
||||
// Write 150 error/warn entries
|
||||
for i := 1; i <= 150; i++ {
|
||||
505
codeagent-wrapper/main.go
Normal file
505
codeagent-wrapper/main.go
Normal file
@@ -0,0 +1,505 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
version = "5.4.0"
|
||||
defaultWorkdir = "."
|
||||
defaultTimeout = 7200 // seconds (2 hours)
|
||||
defaultCoverageTarget = 90.0
|
||||
codexLogLineLimit = 1000
|
||||
stdinSpecialChars = "\n\\\"'`$"
|
||||
stderrCaptureLimit = 4 * 1024
|
||||
defaultBackendName = "codex"
|
||||
defaultCodexCommand = "codex"
|
||||
|
||||
// stdout close reasons
|
||||
stdoutCloseReasonWait = "wait-done"
|
||||
stdoutCloseReasonDrain = "drain-timeout"
|
||||
stdoutCloseReasonCtx = "context-cancel"
|
||||
stdoutDrainTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
var useASCIIMode = os.Getenv("CODEAGENT_ASCII_MODE") == "true"
|
||||
|
||||
// Test hooks for dependency injection
|
||||
var (
|
||||
stdinReader io.Reader = os.Stdin
|
||||
isTerminalFn = defaultIsTerminal
|
||||
codexCommand = defaultCodexCommand
|
||||
cleanupHook func()
|
||||
loggerPtr atomic.Pointer[Logger]
|
||||
|
||||
buildCodexArgsFn = buildCodexArgs
|
||||
selectBackendFn = selectBackend
|
||||
commandContext = exec.CommandContext
|
||||
jsonMarshal = json.Marshal
|
||||
cleanupLogsFn = cleanupOldLogs
|
||||
signalNotifyFn = signal.Notify
|
||||
signalStopFn = signal.Stop
|
||||
terminateCommandFn = terminateCommand
|
||||
defaultBuildArgsFn = buildCodexArgs
|
||||
runTaskFn = runCodexTask
|
||||
exitFn = os.Exit
|
||||
)
|
||||
|
||||
var forceKillDelay atomic.Int32
|
||||
|
||||
func init() {
|
||||
forceKillDelay.Store(5) // seconds - default value
|
||||
}
|
||||
|
||||
func runStartupCleanup() {
|
||||
if cleanupLogsFn == nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
|
||||
}
|
||||
}()
|
||||
if _, err := cleanupLogsFn(); err != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func runCleanupMode() int {
|
||||
if cleanupLogsFn == nil {
|
||||
fmt.Fprintln(os.Stderr, "Cleanup failed: log cleanup function not configured")
|
||||
return 1
|
||||
}
|
||||
|
||||
stats, err := cleanupLogsFn()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Cleanup failed: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Println("Cleanup completed")
|
||||
fmt.Printf("Files scanned: %d\n", stats.Scanned)
|
||||
fmt.Printf("Files deleted: %d\n", stats.Deleted)
|
||||
if len(stats.DeletedFiles) > 0 {
|
||||
for _, f := range stats.DeletedFiles {
|
||||
fmt.Printf(" - %s\n", f)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Files kept: %d\n", stats.Kept)
|
||||
if len(stats.KeptFiles) > 0 {
|
||||
for _, f := range stats.KeptFiles {
|
||||
fmt.Printf(" - %s\n", f)
|
||||
}
|
||||
}
|
||||
if stats.Errors > 0 {
|
||||
fmt.Printf("Deletion errors: %d\n", stats.Errors)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func main() {
|
||||
exitCode := run()
|
||||
exitFn(exitCode)
|
||||
}
|
||||
|
||||
// run is the main logic, returns exit code for testability
|
||||
func run() (exitCode int) {
|
||||
name := currentWrapperName()
|
||||
// Handle --version and --help first (no logger needed)
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "--version", "-v":
|
||||
fmt.Printf("%s version %s\n", name, version)
|
||||
return 0
|
||||
case "--help", "-h":
|
||||
printHelp()
|
||||
return 0
|
||||
case "--cleanup":
|
||||
return runCleanupMode()
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize logger for all other commands
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
setLogger(logger)
|
||||
|
||||
defer func() {
|
||||
logger := activeLogger()
|
||||
if logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if err := closeLogger(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to close logger: %v\n", err)
|
||||
}
|
||||
// On failure, extract and display recent errors before removing log
|
||||
if logger != nil {
|
||||
if exitCode != 0 {
|
||||
if errors := logger.ExtractRecentErrors(10); len(errors) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "\n=== Recent Errors ===")
|
||||
for _, entry := range errors {
|
||||
fmt.Fprintln(os.Stderr, entry)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Log file: %s (deleted)\n", logger.Path())
|
||||
}
|
||||
}
|
||||
if err := logger.RemoveLogFile(); err != nil && !os.IsNotExist(err) {
|
||||
// Silently ignore removal errors
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer runCleanupHook()
|
||||
|
||||
// Clean up stale logs from previous runs.
|
||||
runStartupCleanup()
|
||||
|
||||
// Handle remaining commands
|
||||
if len(os.Args) > 1 {
|
||||
args := os.Args[1:]
|
||||
parallelIndex := -1
|
||||
for i, arg := range args {
|
||||
if arg == "--parallel" {
|
||||
parallelIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if parallelIndex != -1 {
|
||||
backendName := defaultBackendName
|
||||
fullOutput := false
|
||||
var extras []string
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
arg := args[i]
|
||||
switch {
|
||||
case arg == "--parallel":
|
||||
continue
|
||||
case arg == "--full-output":
|
||||
fullOutput = true
|
||||
case arg == "--backend":
|
||||
if i+1 >= len(args) {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
|
||||
return 1
|
||||
}
|
||||
backendName = args[i+1]
|
||||
i++
|
||||
case strings.HasPrefix(arg, "--backend="):
|
||||
value := strings.TrimPrefix(arg, "--backend=")
|
||||
if value == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
|
||||
return 1
|
||||
}
|
||||
backendName = value
|
||||
default:
|
||||
extras = append(extras, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(extras) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend and --full-output are allowed.")
|
||||
fmt.Fprintln(os.Stderr, "Usage examples:")
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
|
||||
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel <<'EOF'\n", name)
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel --full-output <<'EOF' # include full task output\n", name)
|
||||
return 1
|
||||
}
|
||||
|
||||
backend, err := selectBackendFn(backendName)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
backendName = backend.Name()
|
||||
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to read stdin: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
cfg, err := parseParallelConfig(data)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
cfg.GlobalBackend = backendName
|
||||
for i := range cfg.Tasks {
|
||||
if strings.TrimSpace(cfg.Tasks[i].Backend) == "" {
|
||||
cfg.Tasks[i].Backend = backendName
|
||||
}
|
||||
}
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
layers, err := topologicalSort(cfg.Tasks)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
results := executeConcurrent(layers, timeoutSec)
|
||||
|
||||
// Extract structured report fields from each result
|
||||
for i := range results {
|
||||
results[i].CoverageTarget = defaultCoverageTarget
|
||||
if results[i].Message == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
lines := strings.Split(results[i].Message, "\n")
|
||||
|
||||
// Coverage extraction
|
||||
results[i].Coverage = extractCoverageFromLines(lines)
|
||||
results[i].CoverageNum = extractCoverageNum(results[i].Coverage)
|
||||
|
||||
// Files changed
|
||||
results[i].FilesChanged = extractFilesChangedFromLines(lines)
|
||||
|
||||
// Test results
|
||||
results[i].TestsPassed, results[i].TestsFailed = extractTestResultsFromLines(lines)
|
||||
|
||||
// Key output summary
|
||||
results[i].KeyOutput = extractKeyOutputFromLines(lines, 150)
|
||||
}
|
||||
|
||||
// Default: summary mode (context-efficient)
|
||||
// --full-output: legacy full output mode
|
||||
fmt.Println(generateFinalOutputWithMode(results, !fullOutput))
|
||||
|
||||
exitCode = 0
|
||||
for _, res := range results {
|
||||
if res.ExitCode != 0 {
|
||||
exitCode = res.ExitCode
|
||||
}
|
||||
}
|
||||
|
||||
return exitCode
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Script started")
|
||||
|
||||
cfg, err := parseArgs()
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
logInfo(fmt.Sprintf("Parsed args: mode=%s, task_len=%d, backend=%s", cfg.Mode, len(cfg.Task), cfg.Backend))
|
||||
|
||||
backend, err := selectBackendFn(cfg.Backend)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
cfg.Backend = backend.Name()
|
||||
|
||||
cmdInjected := codexCommand != defaultCodexCommand
|
||||
argsInjected := buildCodexArgsFn != nil && reflect.ValueOf(buildCodexArgsFn).Pointer() != reflect.ValueOf(defaultBuildArgsFn).Pointer()
|
||||
|
||||
// Wire selected backend into runtime hooks for the rest of the execution,
|
||||
// but preserve any injected test hooks for the default backend.
|
||||
if backend.Name() != defaultBackendName || !cmdInjected {
|
||||
codexCommand = backend.Command()
|
||||
}
|
||||
if backend.Name() != defaultBackendName || !argsInjected {
|
||||
buildCodexArgsFn = backend.BuildArgs
|
||||
}
|
||||
logInfo(fmt.Sprintf("Selected backend: %s", backend.Name()))
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
logInfo(fmt.Sprintf("Timeout: %ds", timeoutSec))
|
||||
cfg.Timeout = timeoutSec
|
||||
|
||||
var taskText string
|
||||
var piped bool
|
||||
|
||||
if cfg.ExplicitStdin {
|
||||
logInfo("Explicit stdin mode: reading task from stdin")
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
logError("Failed to read stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
taskText = string(data)
|
||||
if taskText == "" {
|
||||
logError("Explicit stdin mode requires task input from stdin")
|
||||
return 1
|
||||
}
|
||||
piped = !isTerminal()
|
||||
} else {
|
||||
pipedTask, err := readPipedTask()
|
||||
if err != nil {
|
||||
logError("Failed to read piped stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
piped = pipedTask != ""
|
||||
if piped {
|
||||
taskText = pipedTask
|
||||
} else {
|
||||
taskText = cfg.Task
|
||||
}
|
||||
}
|
||||
|
||||
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
||||
|
||||
targetArg := taskText
|
||||
if useStdin {
|
||||
targetArg = "-"
|
||||
}
|
||||
codexArgs := buildCodexArgsFn(cfg, targetArg)
|
||||
|
||||
// Print startup information to stderr
|
||||
fmt.Fprintf(os.Stderr, "[%s]\n", name)
|
||||
fmt.Fprintf(os.Stderr, " Backend: %s\n", cfg.Backend)
|
||||
fmt.Fprintf(os.Stderr, " Command: %s %s\n", codexCommand, strings.Join(codexArgs, " "))
|
||||
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
|
||||
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
|
||||
|
||||
if useStdin {
|
||||
var reasons []string
|
||||
if piped {
|
||||
reasons = append(reasons, "piped input")
|
||||
}
|
||||
if cfg.ExplicitStdin {
|
||||
reasons = append(reasons, "explicit \"-\"")
|
||||
}
|
||||
if strings.Contains(taskText, "\n") {
|
||||
reasons = append(reasons, "newline")
|
||||
}
|
||||
if strings.Contains(taskText, "\\") {
|
||||
reasons = append(reasons, "backslash")
|
||||
}
|
||||
if strings.Contains(taskText, "\"") {
|
||||
reasons = append(reasons, "double-quote")
|
||||
}
|
||||
if strings.Contains(taskText, "'") {
|
||||
reasons = append(reasons, "single-quote")
|
||||
}
|
||||
if strings.Contains(taskText, "`") {
|
||||
reasons = append(reasons, "backtick")
|
||||
}
|
||||
if strings.Contains(taskText, "$") {
|
||||
reasons = append(reasons, "dollar")
|
||||
}
|
||||
if len(taskText) > 800 {
|
||||
reasons = append(reasons, "length>800")
|
||||
}
|
||||
if len(reasons) > 0 {
|
||||
logWarn(fmt.Sprintf("Using stdin mode for task due to: %s", strings.Join(reasons, ", ")))
|
||||
}
|
||||
}
|
||||
|
||||
logInfo(fmt.Sprintf("%s running...", cfg.Backend))
|
||||
|
||||
taskSpec := TaskSpec{
|
||||
Task: taskText,
|
||||
WorkDir: cfg.WorkDir,
|
||||
Mode: cfg.Mode,
|
||||
SessionID: cfg.SessionID,
|
||||
UseStdin: useStdin,
|
||||
}
|
||||
|
||||
result := runTaskFn(taskSpec, false, cfg.Timeout)
|
||||
|
||||
if result.ExitCode != 0 {
|
||||
return result.ExitCode
|
||||
}
|
||||
|
||||
fmt.Println(result.Message)
|
||||
if result.SessionID != "" {
|
||||
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func setLogger(l *Logger) {
|
||||
loggerPtr.Store(l)
|
||||
}
|
||||
|
||||
func closeLogger() error {
|
||||
logger := loggerPtr.Swap(nil)
|
||||
if logger == nil {
|
||||
return nil
|
||||
}
|
||||
return logger.Close()
|
||||
}
|
||||
|
||||
func activeLogger() *Logger {
|
||||
return loggerPtr.Load()
|
||||
}
|
||||
|
||||
func logInfo(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Info(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logWarn(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Warn(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logError(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func runCleanupHook() {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if cleanupHook != nil {
|
||||
cleanupHook()
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp() {
|
||||
name := currentWrapperName()
|
||||
help := fmt.Sprintf(`%[1]s - Go wrapper for AI CLI backends
|
||||
|
||||
Usage:
|
||||
%[1]s "task" [workdir]
|
||||
%[1]s --backend claude "task" [workdir]
|
||||
%[1]s - [workdir] Read task from stdin
|
||||
%[1]s resume <session_id> "task" [workdir]
|
||||
%[1]s resume <session_id> - [workdir]
|
||||
%[1]s --parallel Run tasks in parallel (config from stdin)
|
||||
%[1]s --parallel --full-output Run tasks in parallel with full output (legacy)
|
||||
%[1]s --version
|
||||
%[1]s --help
|
||||
|
||||
Parallel mode examples:
|
||||
%[1]s --parallel < tasks.txt
|
||||
echo '...' | %[1]s --parallel
|
||||
%[1]s --parallel --full-output < tasks.txt
|
||||
%[1]s --parallel <<'EOF'
|
||||
|
||||
Environment Variables:
|
||||
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
|
||||
CODEAGENT_ASCII_MODE Use ASCII symbols instead of Unicode (PASS/WARN/FAIL)
|
||||
|
||||
Exit Codes:
|
||||
0 Success
|
||||
1 General error (missing args, no output)
|
||||
124 Timeout
|
||||
127 backend command not found
|
||||
130 Interrupted (Ctrl+C)
|
||||
* Passthrough from backend process`, name)
|
||||
fmt.Println(help)
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
package wrapper
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"codeagent-wrapper/internal/logger"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -37,9 +36,7 @@ func captureStdout(t *testing.T, fn func()) string {
|
||||
os.Stdout = old
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("io.Copy() error = %v", err)
|
||||
}
|
||||
io.Copy(&buf, r)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
@@ -60,17 +57,11 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if strings.HasSuffix(p, "tasks") {
|
||||
if _, err := fmt.Sscanf(p, "%d tasks", &payload.Summary.Total); err != nil {
|
||||
t.Fatalf("failed to parse total tasks from %q: %v", p, err)
|
||||
}
|
||||
fmt.Sscanf(p, "%d tasks", &payload.Summary.Total)
|
||||
} else if strings.HasSuffix(p, "passed") {
|
||||
if _, err := fmt.Sscanf(p, "%d passed", &payload.Summary.Success); err != nil {
|
||||
t.Fatalf("failed to parse passed tasks from %q: %v", p, err)
|
||||
}
|
||||
fmt.Sscanf(p, "%d passed", &payload.Summary.Success)
|
||||
} else if strings.HasSuffix(p, "failed") {
|
||||
if _, err := fmt.Sscanf(p, "%d failed", &payload.Summary.Failed); err != nil {
|
||||
t.Fatalf("failed to parse failed tasks from %q: %v", p, err)
|
||||
}
|
||||
fmt.Sscanf(p, "%d failed", &payload.Summary.Failed)
|
||||
}
|
||||
}
|
||||
} else if strings.HasPrefix(line, "Total:") {
|
||||
@@ -79,17 +70,11 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if strings.HasPrefix(p, "Total:") {
|
||||
if _, err := fmt.Sscanf(p, "Total: %d", &payload.Summary.Total); err != nil {
|
||||
t.Fatalf("failed to parse total tasks from %q: %v", p, err)
|
||||
}
|
||||
fmt.Sscanf(p, "Total: %d", &payload.Summary.Total)
|
||||
} else if strings.HasPrefix(p, "Success:") {
|
||||
if _, err := fmt.Sscanf(p, "Success: %d", &payload.Summary.Success); err != nil {
|
||||
t.Fatalf("failed to parse passed tasks from %q: %v", p, err)
|
||||
}
|
||||
fmt.Sscanf(p, "Success: %d", &payload.Summary.Success)
|
||||
} else if strings.HasPrefix(p, "Failed:") {
|
||||
if _, err := fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed); err != nil {
|
||||
t.Fatalf("failed to parse failed tasks from %q: %v", p, err)
|
||||
}
|
||||
fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed)
|
||||
}
|
||||
}
|
||||
} else if line == "## Task Results" {
|
||||
@@ -109,39 +94,34 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
currentTask = &TaskResult{}
|
||||
|
||||
taskLine := strings.TrimPrefix(line, "### ")
|
||||
parseMarker := func(marker string, exitCode int) bool {
|
||||
needle := " " + marker
|
||||
if !strings.Contains(taskLine, needle) {
|
||||
return false
|
||||
}
|
||||
parts := strings.Split(taskLine, needle)
|
||||
success, warning, failed := getStatusSymbols()
|
||||
// Parse different formats
|
||||
if strings.Contains(taskLine, " "+success) {
|
||||
parts := strings.Split(taskLine, " "+success)
|
||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
||||
currentTask.ExitCode = exitCode
|
||||
if exitCode == 0 && len(parts) > 1 {
|
||||
currentTask.ExitCode = 0
|
||||
// Extract coverage if present
|
||||
if len(parts) > 1 {
|
||||
coveragePart := strings.TrimSpace(parts[1])
|
||||
if strings.HasSuffix(coveragePart, "%") {
|
||||
currentTask.Coverage = coveragePart
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case parseMarker("✓", 0), parseMarker("PASS", 0):
|
||||
// ok
|
||||
case parseMarker("⚠️", 0), parseMarker("WARN", 0):
|
||||
// warning
|
||||
case parseMarker("✗", 1), parseMarker("FAIL", 1):
|
||||
// fail
|
||||
default:
|
||||
} else if strings.Contains(taskLine, " "+warning) {
|
||||
parts := strings.Split(taskLine, " "+warning)
|
||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
||||
currentTask.ExitCode = 0
|
||||
} else if strings.Contains(taskLine, " "+failed) {
|
||||
parts := strings.Split(taskLine, " "+failed)
|
||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
||||
currentTask.ExitCode = 1
|
||||
} else {
|
||||
currentTask.TaskID = taskLine
|
||||
}
|
||||
} else if currentTask != nil && inTaskResults {
|
||||
// Parse task details
|
||||
if strings.HasPrefix(line, "Exit code:") {
|
||||
if _, err := fmt.Sscanf(line, "Exit code: %d", ¤tTask.ExitCode); err != nil {
|
||||
t.Fatalf("failed to parse exit code from %q: %v", line, err)
|
||||
}
|
||||
fmt.Sscanf(line, "Exit code: %d", ¤tTask.ExitCode)
|
||||
} else if strings.HasPrefix(line, "Error:") {
|
||||
currentTask.Error = strings.TrimPrefix(line, "Error: ")
|
||||
} else if strings.HasPrefix(line, "Log:") {
|
||||
@@ -167,9 +147,7 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
currentTask.ExitCode = 0
|
||||
} else if strings.HasPrefix(line, "Status: FAILED") {
|
||||
if strings.Contains(line, "exit code") {
|
||||
if _, err := fmt.Sscanf(line, "Status: FAILED (exit code %d)", ¤tTask.ExitCode); err != nil {
|
||||
t.Fatalf("failed to parse exit code from %q: %v", line, err)
|
||||
}
|
||||
fmt.Sscanf(line, "Status: FAILED (exit code %d)", ¤tTask.ExitCode)
|
||||
} else {
|
||||
currentTask.ExitCode = 1
|
||||
}
|
||||
@@ -191,6 +169,32 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
return payload
|
||||
}
|
||||
|
||||
func extractTaskBlock(t *testing.T, output, taskID string) string {
|
||||
t.Helper()
|
||||
header := fmt.Sprintf("--- Task: %s ---", taskID)
|
||||
lines := strings.Split(output, "\n")
|
||||
var block []string
|
||||
collecting := false
|
||||
for _, raw := range lines {
|
||||
trimmed := strings.TrimSpace(raw)
|
||||
if !collecting {
|
||||
if trimmed == header {
|
||||
collecting = true
|
||||
block = append(block, trimmed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(trimmed, "--- Task: ") && trimmed != header {
|
||||
break
|
||||
}
|
||||
block = append(block, trimmed)
|
||||
}
|
||||
if len(block) == 0 {
|
||||
t.Fatalf("task block %s not found in output:\n%s", taskID, output)
|
||||
}
|
||||
return strings.Join(block, "\n")
|
||||
}
|
||||
|
||||
func findResultByID(t *testing.T, payload integrationOutput, id string) TaskResult {
|
||||
t.Helper()
|
||||
for _, res := range payload.Results {
|
||||
@@ -202,37 +206,6 @@ func findResultByID(t *testing.T, payload integrationOutput, id string) TaskResu
|
||||
return TaskResult{}
|
||||
}
|
||||
|
||||
func setTempDirEnv(t *testing.T, dir string) string {
|
||||
t.Helper()
|
||||
resolved := dir
|
||||
if eval, err := filepath.EvalSymlinks(dir); err == nil {
|
||||
resolved = eval
|
||||
}
|
||||
t.Setenv("TMPDIR", resolved)
|
||||
t.Setenv("TEMP", resolved)
|
||||
t.Setenv("TMP", resolved)
|
||||
return resolved
|
||||
}
|
||||
|
||||
func createTempLog(t *testing.T, dir, name string) string {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.WriteFile(path, []byte("test"), 0o644); err != nil {
|
||||
t.Fatalf("failed to create temp log %s: %v", path, err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func stubProcessRunning(t *testing.T, fn func(int) bool) {
|
||||
t.Helper()
|
||||
t.Cleanup(logger.SetProcessRunningCheck(fn))
|
||||
}
|
||||
|
||||
func stubProcessStartTime(t *testing.T, fn func(int) time.Time) {
|
||||
t.Helper()
|
||||
t.Cleanup(logger.SetProcessStartTimeFn(fn))
|
||||
}
|
||||
|
||||
func TestRunParallelEndToEnd_OrderAndConcurrency(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
@@ -418,7 +391,7 @@ id: beta
|
||||
---CONTENT---
|
||||
task-beta`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
os.Args = []string{"codex-wrapper", "--parallel"}
|
||||
|
||||
var exitCode int
|
||||
output := captureStdout(t, func() {
|
||||
@@ -471,9 +444,9 @@ id: d
|
||||
---CONTENT---
|
||||
ok-d`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
os.Args = []string{"codex-wrapper", "--parallel"}
|
||||
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
|
||||
|
||||
origRun := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
@@ -527,9 +500,9 @@ ok-d`
|
||||
|
||||
// After parallel log isolation fix, each task has its own log file
|
||||
expectedLines := map[string]struct{}{
|
||||
fmt.Sprintf("Task a: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-a.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task b: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-b.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task d: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-d.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task a: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d-a.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task b: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d-b.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task d: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d-d.log", os.Getpid()))): {},
|
||||
}
|
||||
|
||||
if len(taskLines) != len(expectedLines) {
|
||||
@@ -547,7 +520,7 @@ func TestRunNonParallelOutputsIncludeLogPathsIntegration(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
os.Args = []string{"codeagent-wrapper", "integration-log-check"}
|
||||
os.Args = []string{"codex-wrapper", "integration-log-check"}
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
codexCommand = "echo"
|
||||
@@ -565,7 +538,7 @@ func TestRunNonParallelOutputsIncludeLogPathsIntegration(t *testing.T) {
|
||||
if exitCode != 0 {
|
||||
t.Fatalf("run() exit=%d, want 0", exitCode)
|
||||
}
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
|
||||
wantLine := fmt.Sprintf("Log: %s", expectedLog)
|
||||
if !strings.Contains(stderr, wantLine) {
|
||||
t.Fatalf("stderr missing %q, got: %q", wantLine, stderr)
|
||||
@@ -668,6 +641,7 @@ func TestRunParallelTimeoutPropagation(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
os.Unsetenv("CODEX_TIMEOUT")
|
||||
})
|
||||
|
||||
var receivedTimeout int
|
||||
@@ -676,7 +650,7 @@ func TestRunParallelTimeoutPropagation(t *testing.T) {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 124, Error: "timeout"}
|
||||
}
|
||||
|
||||
t.Setenv("CODEX_TIMEOUT", "1")
|
||||
os.Setenv("CODEX_TIMEOUT", "1")
|
||||
input := `---TASK---
|
||||
id: T
|
||||
---CONTENT---
|
||||
@@ -746,11 +720,11 @@ func TestRunStartupCleanupRemovesOrphansEndToEnd(t *testing.T) {
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
orphanA := createTempLog(t, tempDir, "codeagent-wrapper-5001.log")
|
||||
orphanB := createTempLog(t, tempDir, "codeagent-wrapper-5002-extra.log")
|
||||
orphanC := createTempLog(t, tempDir, "codeagent-wrapper-5003-suffix.log")
|
||||
orphanA := createTempLog(t, tempDir, "codex-wrapper-5001.log")
|
||||
orphanB := createTempLog(t, tempDir, "codex-wrapper-5002-extra.log")
|
||||
orphanC := createTempLog(t, tempDir, "codex-wrapper-5003-suffix.log")
|
||||
runningPID := 81234
|
||||
runningLog := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", runningPID))
|
||||
runningLog := createTempLog(t, tempDir, fmt.Sprintf("codex-wrapper-%d.log", runningPID))
|
||||
unrelated := createTempLog(t, tempDir, "wrapper.log")
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
@@ -766,7 +740,7 @@ func TestRunStartupCleanupRemovesOrphansEndToEnd(t *testing.T) {
|
||||
codexCommand = createFakeCodexScript(t, "tid-startup", "ok")
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
os.Args = []string{"codeagent-wrapper", "task"}
|
||||
os.Args = []string{"codex-wrapper", "task"}
|
||||
|
||||
if exit := run(); exit != 0 {
|
||||
t.Fatalf("run() exit=%d, want 0", exit)
|
||||
@@ -792,7 +766,7 @@ func TestRunStartupCleanupConcurrentWrappers(t *testing.T) {
|
||||
|
||||
const totalLogs = 40
|
||||
for i := 0; i < totalLogs; i++ {
|
||||
createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", 9000+i))
|
||||
createTempLog(t, tempDir, fmt.Sprintf("codex-wrapper-%d.log", 9000+i))
|
||||
}
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
@@ -816,7 +790,7 @@ func TestRunStartupCleanupConcurrentWrappers(t *testing.T) {
|
||||
close(start)
|
||||
wg.Wait()
|
||||
|
||||
matches, err := filepath.Glob(filepath.Join(tempDir, "codeagent-wrapper-*.log"))
|
||||
matches, err := filepath.Glob(filepath.Join(tempDir, "codex-wrapper-*.log"))
|
||||
if err != nil {
|
||||
t.Fatalf("glob error: %v", err)
|
||||
}
|
||||
@@ -830,9 +804,9 @@ func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
staleA := createTempLog(t, tempDir, "codeagent-wrapper-2100.log")
|
||||
staleB := createTempLog(t, tempDir, "codeagent-wrapper-2200-extra.log")
|
||||
keeper := createTempLog(t, tempDir, "codeagent-wrapper-2300.log")
|
||||
staleA := createTempLog(t, tempDir, "codex-wrapper-2100.log")
|
||||
staleB := createTempLog(t, tempDir, "codex-wrapper-2200-extra.log")
|
||||
keeper := createTempLog(t, tempDir, "codex-wrapper-2300.log")
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
return pid == 2300 || pid == os.Getpid()
|
||||
@@ -844,7 +818,7 @@ func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
||||
return time.Time{}
|
||||
})
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "--cleanup"}
|
||||
os.Args = []string{"codex-wrapper", "--cleanup"}
|
||||
|
||||
var exitCode int
|
||||
output := captureStdout(t, func() {
|
||||
@@ -868,10 +842,10 @@ func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
||||
if !strings.Contains(output, "Files kept: 1") {
|
||||
t.Fatalf("missing 'Files kept: 1' in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, "codeagent-wrapper-2100.log") || !strings.Contains(output, "codeagent-wrapper-2200-extra.log") {
|
||||
if !strings.Contains(output, "codex-wrapper-2100.log") || !strings.Contains(output, "codex-wrapper-2200-extra.log") {
|
||||
t.Fatalf("missing deleted file names in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, "codeagent-wrapper-2300.log") {
|
||||
if !strings.Contains(output, "codex-wrapper-2300.log") {
|
||||
t.Fatalf("missing kept file names in output: %q", output)
|
||||
}
|
||||
|
||||
@@ -884,7 +858,7 @@ func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
||||
t.Fatalf("expected kept log to remain, err=%v", err)
|
||||
}
|
||||
|
||||
currentLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
currentLog := filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
|
||||
if _, err := os.Stat(currentLog); err == nil {
|
||||
t.Fatalf("cleanup mode should not create new log file %s", currentLog)
|
||||
} else if !os.IsNotExist(err) {
|
||||
@@ -903,7 +877,7 @@ func TestRunCleanupFlagEndToEnd_FailureDoesNotAffectStartup(t *testing.T) {
|
||||
return CleanupStats{Scanned: 1}, fmt.Errorf("permission denied")
|
||||
}
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "--cleanup"}
|
||||
os.Args = []string{"codex-wrapper", "--cleanup"}
|
||||
|
||||
var exitCode int
|
||||
errOutput := captureStderr(t, func() {
|
||||
@@ -920,7 +894,7 @@ func TestRunCleanupFlagEndToEnd_FailureDoesNotAffectStartup(t *testing.T) {
|
||||
t.Fatalf("cleanup called %d times, want 1", calls)
|
||||
}
|
||||
|
||||
currentLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
currentLog := filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
|
||||
if _, err := os.Stat(currentLog); err == nil {
|
||||
t.Fatalf("cleanup failure should not create new log file %s", currentLog)
|
||||
} else if !os.IsNotExist(err) {
|
||||
@@ -933,7 +907,7 @@ func TestRunCleanupFlagEndToEnd_FailureDoesNotAffectStartup(t *testing.T) {
|
||||
codexCommand = createFakeCodexScript(t, "tid-cleanup-e2e", "ok")
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
os.Args = []string{"codeagent-wrapper", "post-cleanup task"}
|
||||
os.Args = []string{"codex-wrapper", "post-cleanup task"}
|
||||
|
||||
var normalExit int
|
||||
normalOutput := captureStdout(t, func() {
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,65 +1,102 @@
|
||||
package parser
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
// JSONEvent represents a Codex JSON output event
|
||||
type JSONEvent struct {
|
||||
Type string `json:"type"`
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Item *EventItem `json:"item,omitempty"`
|
||||
}
|
||||
|
||||
// EventItem represents the item field in a JSON event
|
||||
type EventItem struct {
|
||||
Type string `json:"type"`
|
||||
Text interface{} `json:"text"`
|
||||
}
|
||||
|
||||
// ClaudeEvent for Claude stream-json format
|
||||
type ClaudeEvent struct {
|
||||
Type string `json:"type"`
|
||||
Subtype string `json:"subtype,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Result string `json:"result,omitempty"`
|
||||
}
|
||||
|
||||
// GeminiEvent for Gemini stream-json format
|
||||
type GeminiEvent struct {
|
||||
Type string `json:"type"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Delta bool `json:"delta,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func parseJSONStream(r io.Reader) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, logWarn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, warnFn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamInternal(r, warnFn, infoFn, nil, nil)
|
||||
}
|
||||
|
||||
const (
|
||||
jsonLineReaderSize = 64 * 1024
|
||||
jsonLineMaxBytes = 10 * 1024 * 1024
|
||||
jsonLinePreviewBytes = 256
|
||||
)
|
||||
|
||||
type lineScratch struct {
|
||||
buf []byte
|
||||
preview []byte
|
||||
type codexHeader struct {
|
||||
Type string `json:"type"`
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Item *struct {
|
||||
Type string `json:"type"`
|
||||
} `json:"item,omitempty"`
|
||||
}
|
||||
|
||||
const maxPooledLineScratchCap = 1 << 20 // 1 MiB
|
||||
// UnifiedEvent combines all backend event formats into a single structure
|
||||
// to avoid multiple JSON unmarshal operations per event
|
||||
type UnifiedEvent struct {
|
||||
// Common fields
|
||||
Type string `json:"type"`
|
||||
|
||||
var lineScratchPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &lineScratch{
|
||||
buf: make([]byte, 0, jsonLineReaderSize),
|
||||
preview: make([]byte, 0, jsonLinePreviewBytes),
|
||||
}
|
||||
},
|
||||
// Codex-specific fields
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Item json.RawMessage `json:"item,omitempty"` // Lazy parse
|
||||
|
||||
// Claude-specific fields
|
||||
Subtype string `json:"subtype,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Result string `json:"result,omitempty"`
|
||||
|
||||
// Gemini-specific fields
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Delta *bool `json:"delta,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
|
||||
// ItemContent represents the parsed item.text field for Codex events
|
||||
type ItemContent struct {
|
||||
Type string `json:"type"`
|
||||
Text interface{} `json:"text"`
|
||||
}
|
||||
|
||||
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
|
||||
reader := bufio.NewReaderSize(r, jsonLineReaderSize)
|
||||
scratch := lineScratchPool.Get().(*lineScratch)
|
||||
if scratch.buf == nil {
|
||||
scratch.buf = make([]byte, 0, jsonLineReaderSize)
|
||||
} else {
|
||||
scratch.buf = scratch.buf[:0]
|
||||
}
|
||||
if scratch.preview == nil {
|
||||
scratch.preview = make([]byte, 0, jsonLinePreviewBytes)
|
||||
} else {
|
||||
scratch.preview = scratch.preview[:0]
|
||||
}
|
||||
defer func() {
|
||||
if cap(scratch.buf) > maxPooledLineScratchCap {
|
||||
scratch.buf = nil
|
||||
} else if scratch.buf != nil {
|
||||
scratch.buf = scratch.buf[:0]
|
||||
}
|
||||
if cap(scratch.preview) > jsonLinePreviewBytes*4 {
|
||||
scratch.preview = nil
|
||||
} else if scratch.preview != nil {
|
||||
scratch.preview = scratch.preview[:0]
|
||||
}
|
||||
lineScratchPool.Put(scratch)
|
||||
}()
|
||||
|
||||
if warnFn == nil {
|
||||
warnFn = func(string) {}
|
||||
@@ -83,14 +120,13 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
totalEvents := 0
|
||||
|
||||
var (
|
||||
codexMessage string
|
||||
claudeMessage string
|
||||
geminiBuffer strings.Builder
|
||||
opencodeMessage strings.Builder
|
||||
codexMessage string
|
||||
claudeMessage string
|
||||
geminiBuffer strings.Builder
|
||||
)
|
||||
|
||||
for {
|
||||
line, tooLong, err := readLineWithLimit(reader, jsonLineMaxBytes, jsonLinePreviewBytes, scratch)
|
||||
line, tooLong, err := readLineWithLimit(reader, jsonLineMaxBytes, jsonLinePreviewBytes)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
@@ -106,14 +142,14 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
totalEvents++
|
||||
|
||||
if tooLong {
|
||||
warnFn(fmt.Sprintf("Skipped overlong JSON line (> %d bytes): %s", jsonLineMaxBytes, TruncateBytes(line, 100)))
|
||||
warnFn(fmt.Sprintf("Skipped overlong JSON line (> %d bytes): %s", jsonLineMaxBytes, truncateBytes(line, 100)))
|
||||
continue
|
||||
}
|
||||
|
||||
// Single unmarshal for all backend types
|
||||
var event UnifiedEvent
|
||||
if err := json.Unmarshal(line, &event); err != nil {
|
||||
warnFn(fmt.Sprintf("Failed to parse event: %s", TruncateBytes(line, 100)))
|
||||
warnFn(fmt.Sprintf("Failed to parse event: %s", truncateBytes(line, 100)))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -127,46 +163,11 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
isCodex = true
|
||||
}
|
||||
}
|
||||
// Codex-specific event types without thread_id or item
|
||||
if !isCodex && (event.Type == "turn.started" || event.Type == "turn.completed") {
|
||||
isCodex = true
|
||||
}
|
||||
isClaude := event.Subtype != "" || event.Result != ""
|
||||
if !isClaude && event.Type == "result" && event.SessionID != "" && event.Status == "" {
|
||||
isClaude = true
|
||||
}
|
||||
isGemini := (event.Type == "init" && event.SessionID != "") || event.Role != "" || event.Delta != nil || event.Status != ""
|
||||
isOpencode := event.OpencodeSessionID != "" && len(event.Part) > 0
|
||||
|
||||
// Handle Opencode events first (most specific detection)
|
||||
if isOpencode {
|
||||
if threadID == "" {
|
||||
threadID = event.OpencodeSessionID
|
||||
}
|
||||
|
||||
var part OpencodePart
|
||||
if err := json.Unmarshal(event.Part, &part); err != nil {
|
||||
warnFn(fmt.Sprintf("Failed to parse opencode part: %s", err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract sessionID from part if available
|
||||
if part.SessionID != "" && threadID == "" {
|
||||
threadID = part.SessionID
|
||||
}
|
||||
|
||||
infoFn(fmt.Sprintf("Parsed Opencode event #%d type=%s part_type=%s", totalEvents, event.Type, part.Type))
|
||||
|
||||
if event.Type == "text" && part.Text != "" {
|
||||
opencodeMessage.WriteString(part.Text)
|
||||
notifyMessage()
|
||||
}
|
||||
|
||||
if part.Type == "step-finish" && part.Reason == "stop" {
|
||||
notifyComplete()
|
||||
}
|
||||
continue
|
||||
}
|
||||
isGemini := event.Role != "" || event.Delta != nil || event.Status != ""
|
||||
|
||||
// Handle Codex events
|
||||
if isCodex {
|
||||
@@ -193,10 +194,6 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
infoFn(fmt.Sprintf("thread.completed event thread_id=%s", event.ThreadID))
|
||||
notifyComplete()
|
||||
|
||||
case "turn.completed":
|
||||
infoFn("turn.completed event")
|
||||
notifyComplete()
|
||||
|
||||
case "item.completed":
|
||||
var itemType string
|
||||
if len(event.Item) > 0 {
|
||||
@@ -212,7 +209,7 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
// Lazy parse: only parse item content when needed
|
||||
var item ItemContent
|
||||
if err := json.Unmarshal(event.Item, &item); err == nil {
|
||||
normalized := NormalizeText(item.Text)
|
||||
normalized := normalizeText(item.Text)
|
||||
infoFn(fmt.Sprintf("item.completed event item_type=%s message_len=%d", itemType, len(normalized)))
|
||||
if normalized != "" {
|
||||
codexMessage = normalized
|
||||
@@ -274,13 +271,11 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
continue
|
||||
}
|
||||
|
||||
// Unknown event format from other backends (turn.started/assistant/user); ignore.
|
||||
continue
|
||||
// Unknown event format
|
||||
warnFn(fmt.Sprintf("Unknown event format: %s", truncateBytes(line, 100)))
|
||||
}
|
||||
|
||||
switch {
|
||||
case opencodeMessage.Len() > 0:
|
||||
message = opencodeMessage.String()
|
||||
case geminiBuffer.Len() > 0:
|
||||
message = geminiBuffer.String()
|
||||
case claudeMessage != "":
|
||||
@@ -293,12 +288,12 @@ func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(strin
|
||||
return message, threadID
|
||||
}
|
||||
|
||||
func HasKey(m map[string]json.RawMessage, key string) bool {
|
||||
func hasKey(m map[string]json.RawMessage, key string) bool {
|
||||
_, ok := m[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func DiscardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
|
||||
func discardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
|
||||
var buffered bytes.Buffer
|
||||
|
||||
if decoder != nil {
|
||||
@@ -324,7 +319,7 @@ func DiscardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Rea
|
||||
return bufio.NewReader(io.MultiReader(bytes.NewReader(remaining), reader)), err
|
||||
}
|
||||
|
||||
func readLineWithLimit(r *bufio.Reader, maxBytes int, previewBytes int, scratch *lineScratch) (line []byte, tooLong bool, err error) {
|
||||
func readLineWithLimit(r *bufio.Reader, maxBytes int, previewBytes int) (line []byte, tooLong bool, err error) {
|
||||
if r == nil {
|
||||
return nil, false, errors.New("reader is nil")
|
||||
}
|
||||
@@ -347,22 +342,12 @@ func readLineWithLimit(r *bufio.Reader, maxBytes int, previewBytes int, scratch
|
||||
return part, false, nil
|
||||
}
|
||||
|
||||
if scratch == nil {
|
||||
scratch = &lineScratch{}
|
||||
}
|
||||
if scratch.preview == nil {
|
||||
scratch.preview = make([]byte, 0, min(previewBytes, len(part)))
|
||||
}
|
||||
if scratch.buf == nil {
|
||||
scratch.buf = make([]byte, 0, min(maxBytes, len(part)*2))
|
||||
}
|
||||
|
||||
preview := scratch.preview[:0]
|
||||
preview := make([]byte, 0, min(previewBytes, len(part)))
|
||||
if previewBytes > 0 {
|
||||
preview = append(preview, part[:min(previewBytes, len(part))]...)
|
||||
}
|
||||
|
||||
buf := scratch.buf[:0]
|
||||
buf := make([]byte, 0, min(maxBytes, len(part)*2))
|
||||
total := 0
|
||||
if len(part) > maxBytes {
|
||||
tooLong = true
|
||||
@@ -392,16 +377,12 @@ func readLineWithLimit(r *bufio.Reader, maxBytes int, previewBytes int, scratch
|
||||
}
|
||||
|
||||
if tooLong {
|
||||
scratch.preview = preview
|
||||
scratch.buf = buf
|
||||
return preview, true, nil
|
||||
}
|
||||
scratch.preview = preview
|
||||
scratch.buf = buf
|
||||
return buf, false, nil
|
||||
}
|
||||
|
||||
func TruncateBytes(b []byte, maxLen int) string {
|
||||
func truncateBytes(b []byte, maxLen int) string {
|
||||
if len(b) <= maxLen {
|
||||
return string(b)
|
||||
}
|
||||
@@ -411,7 +392,7 @@ func TruncateBytes(b []byte, maxLen int) string {
|
||||
return string(b[:maxLen]) + "..."
|
||||
}
|
||||
|
||||
func NormalizeText(text interface{}) string {
|
||||
func normalizeText(text interface{}) string {
|
||||
switch v := text.(type) {
|
||||
case string:
|
||||
return v
|
||||
@@ -427,10 +408,3 @@ func NormalizeText(text interface{}) string {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package parser
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@@ -18,7 +18,7 @@ func TestParseJSONStream_SkipsOverlongLineAndContinues(t *testing.T) {
|
||||
var warns []string
|
||||
warnFn := func(msg string) { warns = append(warns, msg) }
|
||||
|
||||
gotMessage, gotThreadID := ParseJSONStreamInternal(strings.NewReader(input), warnFn, nil, nil, nil)
|
||||
gotMessage, gotThreadID := parseJSONStreamInternal(strings.NewReader(input), warnFn, nil, nil, nil)
|
||||
if gotMessage != "ok" {
|
||||
t.Fatalf("message=%q, want %q (warns=%v)", gotMessage, "ok", warns)
|
||||
}
|
||||
217
codeagent-wrapper/process_check_test.go
Normal file
217
codeagent-wrapper/process_check_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
//go:build unix || darwin || linux
|
||||
// +build unix darwin linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestIsProcessRunning(t *testing.T) {
|
||||
t.Run("current process", func(t *testing.T) {
|
||||
if !isProcessRunning(os.Getpid()) {
|
||||
t.Fatalf("expected current process (pid=%d) to be running", os.Getpid())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fake pid", func(t *testing.T) {
|
||||
const nonexistentPID = 1 << 30
|
||||
if isProcessRunning(nonexistentPID) {
|
||||
t.Fatalf("expected pid %d to be reported as not running", nonexistentPID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("terminated process", func(t *testing.T) {
|
||||
pid := exitedProcessPID(t)
|
||||
if isProcessRunning(pid) {
|
||||
t.Fatalf("expected exited child process (pid=%d) to be reported as not running", pid)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("boundary values", func(t *testing.T) {
|
||||
if isProcessRunning(0) {
|
||||
t.Fatalf("pid 0 should never be treated as running")
|
||||
}
|
||||
if isProcessRunning(-42) {
|
||||
t.Fatalf("negative pid should never be treated as running")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("find process error", func(t *testing.T) {
|
||||
original := findProcess
|
||||
defer func() { findProcess = original }()
|
||||
|
||||
mockErr := errors.New("findProcess failure")
|
||||
findProcess = func(pid int) (*os.Process, error) {
|
||||
return nil, mockErr
|
||||
}
|
||||
|
||||
if isProcessRunning(1234) {
|
||||
t.Fatalf("expected false when os.FindProcess fails")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func exitedProcessPID(t *testing.T) int {
|
||||
t.Helper()
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.Command("cmd", "/c", "exit 0")
|
||||
} else {
|
||||
cmd = exec.Command("sh", "-c", "exit 0")
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatalf("failed to start helper process: %v", err)
|
||||
}
|
||||
pid := cmd.Process.Pid
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
t.Fatalf("helper process did not exit cleanly: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
return pid
|
||||
}
|
||||
|
||||
func TestRunProcessCheckSmoke(t *testing.T) {
|
||||
t.Run("current process", func(t *testing.T) {
|
||||
if !isProcessRunning(os.Getpid()) {
|
||||
t.Fatalf("expected current process (pid=%d) to be running", os.Getpid())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fake pid", func(t *testing.T) {
|
||||
const nonexistentPID = 1 << 30
|
||||
if isProcessRunning(nonexistentPID) {
|
||||
t.Fatalf("expected pid %d to be reported as not running", nonexistentPID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("boundary values", func(t *testing.T) {
|
||||
if isProcessRunning(0) {
|
||||
t.Fatalf("pid 0 should never be treated as running")
|
||||
}
|
||||
if isProcessRunning(-42) {
|
||||
t.Fatalf("negative pid should never be treated as running")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("find process error", func(t *testing.T) {
|
||||
original := findProcess
|
||||
defer func() { findProcess = original }()
|
||||
|
||||
mockErr := errors.New("findProcess failure")
|
||||
findProcess = func(pid int) (*os.Process, error) {
|
||||
return nil, mockErr
|
||||
}
|
||||
|
||||
if isProcessRunning(1234) {
|
||||
t.Fatalf("expected false when os.FindProcess fails")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProcessStartTimeReadsProcStat(t *testing.T) {
|
||||
pid := 4321
|
||||
boot := time.Unix(1_710_000_000, 0)
|
||||
startTicks := uint64(4500)
|
||||
|
||||
statFields := make([]string, 25)
|
||||
for i := range statFields {
|
||||
statFields[i] = strconv.Itoa(i + 1)
|
||||
}
|
||||
statFields[19] = strconv.FormatUint(startTicks, 10)
|
||||
statContent := fmt.Sprintf("%d (%s) %s", pid, "cmd with space", strings.Join(statFields, " "))
|
||||
|
||||
stubReadFile(t, func(path string) ([]byte, error) {
|
||||
switch path {
|
||||
case fmt.Sprintf("/proc/%d/stat", pid):
|
||||
return []byte(statContent), nil
|
||||
case "/proc/stat":
|
||||
return []byte(fmt.Sprintf("cpu 0 0 0 0\nbtime %d\n", boot.Unix())), nil
|
||||
default:
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
})
|
||||
|
||||
got := getProcessStartTime(pid)
|
||||
want := boot.Add(time.Duration(startTicks/100) * time.Second)
|
||||
if !got.Equal(want) {
|
||||
t.Fatalf("getProcessStartTime() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProcessStartTimeInvalidData(t *testing.T) {
|
||||
pid := 99
|
||||
stubReadFile(t, func(path string) ([]byte, error) {
|
||||
switch path {
|
||||
case fmt.Sprintf("/proc/%d/stat", pid):
|
||||
return []byte("garbage"), nil
|
||||
case "/proc/stat":
|
||||
return []byte("btime not-a-number\n"), nil
|
||||
default:
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
})
|
||||
|
||||
if got := getProcessStartTime(pid); !got.IsZero() {
|
||||
t.Fatalf("invalid /proc data should return zero time, got %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBootTimeParsesBtime(t *testing.T) {
|
||||
const bootSec = 1_711_111_111
|
||||
stubReadFile(t, func(path string) ([]byte, error) {
|
||||
if path != "/proc/stat" {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
content := fmt.Sprintf("intr 0\nbtime %d\n", bootSec)
|
||||
return []byte(content), nil
|
||||
})
|
||||
|
||||
got := getBootTime()
|
||||
want := time.Unix(bootSec, 0)
|
||||
if !got.Equal(want) {
|
||||
t.Fatalf("getBootTime() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBootTimeInvalidData(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
content string
|
||||
}{
|
||||
{"missing", "cpu 0 0 0 0"},
|
||||
{"malformed", "btime abc"},
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stubReadFile(t, func(string) ([]byte, error) {
|
||||
return []byte(tt.content), nil
|
||||
})
|
||||
if got := getBootTime(); !got.IsZero() {
|
||||
t.Fatalf("getBootTime() unexpected value for %s: %v", tt.name, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func stubReadFile(t *testing.T, fn func(string) ([]byte, error)) {
|
||||
t.Helper()
|
||||
original := readFileFn
|
||||
readFileFn = fn
|
||||
t.Cleanup(func() {
|
||||
readFileFn = original
|
||||
})
|
||||
}
|
||||
104
codeagent-wrapper/process_check_unix.go
Normal file
104
codeagent-wrapper/process_check_unix.go
Normal file
@@ -0,0 +1,104 @@
|
||||
//go:build unix || darwin || linux
|
||||
// +build unix darwin linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var findProcess = os.FindProcess
|
||||
var readFileFn = os.ReadFile
|
||||
|
||||
// isProcessRunning returns true if a process with the given pid is running on Unix-like systems.
|
||||
func isProcessRunning(pid int) bool {
|
||||
if pid <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
proc, err := findProcess(pid)
|
||||
if err != nil || proc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err = proc.Signal(syscall.Signal(0))
|
||||
if err != nil && (errors.Is(err, syscall.ESRCH) || errors.Is(err, os.ErrProcessDone)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getProcessStartTime returns the start time of a process on Unix-like systems.
|
||||
// Returns zero time if the start time cannot be determined.
|
||||
func getProcessStartTime(pid int) time.Time {
|
||||
if pid <= 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Read /proc/<pid>/stat to get process start time
|
||||
statPath := fmt.Sprintf("/proc/%d/stat", pid)
|
||||
data, err := readFileFn(statPath)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Parse stat file: fields are space-separated, but comm (field 2) can contain spaces
|
||||
// Find the last ')' to skip comm field safely
|
||||
content := string(data)
|
||||
lastParen := strings.LastIndex(content, ")")
|
||||
if lastParen == -1 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
fields := strings.Fields(content[lastParen+1:])
|
||||
if len(fields) < 20 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Field 22 (index 19 after comm) is starttime in clock ticks since boot
|
||||
startTicks, err := strconv.ParseUint(fields[19], 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Get system boot time
|
||||
bootTime := getBootTime()
|
||||
if bootTime.IsZero() {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Convert ticks to duration (typically 100 ticks/sec on most systems)
|
||||
ticksPerSec := uint64(100) // sysconf(_SC_CLK_TCK), typically 100
|
||||
startTime := bootTime.Add(time.Duration(startTicks/ticksPerSec) * time.Second)
|
||||
|
||||
return startTime
|
||||
}
|
||||
|
||||
// getBootTime returns the system boot time by reading /proc/stat.
|
||||
func getBootTime() time.Time {
|
||||
data, err := readFileFn("/proc/stat")
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "btime ") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) >= 2 {
|
||||
bootSec, err := strconv.ParseInt(fields[1], 10, 64)
|
||||
if err == nil {
|
||||
return time.Unix(bootSec, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return time.Time{}
|
||||
}
|
||||
87
codeagent-wrapper/process_check_windows.go
Normal file
87
codeagent-wrapper/process_check_windows.go
Normal file
@@ -0,0 +1,87 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
processQueryLimitedInformation = 0x1000
|
||||
stillActive = 259 // STILL_ACTIVE exit code
|
||||
)
|
||||
|
||||
var (
|
||||
findProcess = os.FindProcess
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
getProcessTimes = kernel32.NewProc("GetProcessTimes")
|
||||
fileTimeToUnixFn = fileTimeToUnix
|
||||
)
|
||||
|
||||
// isProcessRunning returns true if a process with the given pid is running on Windows.
|
||||
func isProcessRunning(pid int) bool {
|
||||
if pid <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, err := findProcess(pid); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid))
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_ACCESS_DENIED) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
var exitCode uint32
|
||||
if err := syscall.GetExitCodeProcess(handle, &exitCode); err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return exitCode == stillActive
|
||||
}
|
||||
|
||||
// getProcessStartTime returns the start time of a process on Windows.
|
||||
// Returns zero time if the start time cannot be determined.
|
||||
func getProcessStartTime(pid int) time.Time {
|
||||
if pid <= 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid))
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
var creationTime, exitTime, kernelTime, userTime syscall.Filetime
|
||||
ret, _, _ := getProcessTimes.Call(
|
||||
uintptr(handle),
|
||||
uintptr(unsafe.Pointer(&creationTime)),
|
||||
uintptr(unsafe.Pointer(&exitTime)),
|
||||
uintptr(unsafe.Pointer(&kernelTime)),
|
||||
uintptr(unsafe.Pointer(&userTime)),
|
||||
)
|
||||
|
||||
if ret == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
return fileTimeToUnixFn(creationTime)
|
||||
}
|
||||
|
||||
// fileTimeToUnix converts Windows FILETIME to Unix time.
|
||||
func fileTimeToUnix(ft syscall.Filetime) time.Time {
|
||||
// FILETIME is 100-nanosecond intervals since January 1, 1601 UTC
|
||||
nsec := ft.Nanoseconds()
|
||||
return time.Unix(0, nsec)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package wrapper
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
utils "codeagent-wrapper/internal/utils"
|
||||
)
|
||||
|
||||
func resolveTimeout() int {
|
||||
@@ -54,7 +52,7 @@ func shouldUseStdin(taskText string, piped bool) bool {
|
||||
if len(taskText) > 800 {
|
||||
return true
|
||||
}
|
||||
return strings.ContainsAny(taskText, stdinSpecialChars)
|
||||
return strings.IndexAny(taskText, stdinSpecialChars) >= 0
|
||||
}
|
||||
|
||||
func defaultIsTerminal() bool {
|
||||
@@ -198,21 +196,69 @@ func (b *tailBuffer) String() string {
|
||||
}
|
||||
|
||||
func truncate(s string, maxLen int) string {
|
||||
return utils.Truncate(s, maxLen)
|
||||
if len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
if maxLen < 0 {
|
||||
return ""
|
||||
}
|
||||
return s[:maxLen] + "..."
|
||||
}
|
||||
|
||||
// safeTruncate safely truncates string to maxLen, avoiding panic and UTF-8 corruption.
|
||||
func safeTruncate(s string, maxLen int) string {
|
||||
return utils.SafeTruncate(s, maxLen)
|
||||
if maxLen <= 0 || s == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
runes := []rune(s)
|
||||
if len(runes) <= maxLen {
|
||||
return s
|
||||
}
|
||||
|
||||
if maxLen < 4 {
|
||||
return string(runes[:1])
|
||||
}
|
||||
|
||||
cutoff := maxLen - 3
|
||||
if cutoff <= 0 {
|
||||
return string(runes[:1])
|
||||
}
|
||||
if len(runes) <= cutoff {
|
||||
return s
|
||||
}
|
||||
return string(runes[:cutoff]) + "..."
|
||||
}
|
||||
|
||||
// sanitizeOutput removes ANSI escape sequences and control characters.
|
||||
func sanitizeOutput(s string) string {
|
||||
return utils.SanitizeOutput(s)
|
||||
var result strings.Builder
|
||||
inEscape := false
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == '\x1b' && i+1 < len(s) && s[i+1] == '[' {
|
||||
inEscape = true
|
||||
i++ // skip '['
|
||||
continue
|
||||
}
|
||||
if inEscape {
|
||||
if (s[i] >= 'A' && s[i] <= 'Z') || (s[i] >= 'a' && s[i] <= 'z') {
|
||||
inEscape = false
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Keep printable chars and common whitespace.
|
||||
if s[i] >= 32 || s[i] == '\n' || s[i] == '\t' {
|
||||
result.WriteByte(s[i])
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
return utils.Min(a, b)
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func hello() string {
|
||||
@@ -227,6 +273,30 @@ func farewell(name string) string {
|
||||
return "goodbye " + name
|
||||
}
|
||||
|
||||
// extractMessageSummary extracts a brief summary from task output
|
||||
// Returns first meaningful line or truncated content up to maxLen chars
|
||||
func extractMessageSummary(message string, maxLen int) string {
|
||||
if message == "" || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Try to find a meaningful summary line
|
||||
lines := strings.Split(message, "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
// Skip empty lines and common noise
|
||||
if line == "" || strings.HasPrefix(line, "```") || strings.HasPrefix(line, "---") {
|
||||
continue
|
||||
}
|
||||
// Found a meaningful line
|
||||
return safeTruncate(line, maxLen)
|
||||
}
|
||||
|
||||
// Fallback: truncate entire message
|
||||
clean := strings.TrimSpace(message)
|
||||
return safeTruncate(clean, maxLen)
|
||||
}
|
||||
|
||||
// extractCoverageFromLines extracts coverage from pre-split lines.
|
||||
func extractCoverageFromLines(lines []string) string {
|
||||
if len(lines) == 0 {
|
||||
@@ -335,7 +405,7 @@ func extractFilesChangedFromLines(lines []string) []string {
|
||||
for _, prefix := range []string{"Modified:", "Created:", "Updated:", "Edited:", "Wrote:", "Changed:"} {
|
||||
if strings.HasPrefix(line, prefix) {
|
||||
file := strings.TrimSpace(strings.TrimPrefix(line, prefix))
|
||||
file = strings.Trim(file, "`\"'()[],:")
|
||||
file = strings.Trim(file, "`,\"'()[],:")
|
||||
file = strings.TrimPrefix(file, "@")
|
||||
if file != "" && !seen[file] {
|
||||
files = append(files, file)
|
||||
@@ -352,7 +422,7 @@ func extractFilesChangedFromLines(lines []string) []string {
|
||||
// Pattern 2: Tokens that look like file paths (allow root files, strip @ prefix).
|
||||
parts := strings.Fields(line)
|
||||
for _, part := range parts {
|
||||
part = strings.Trim(part, "`\"'()[],:")
|
||||
part = strings.Trim(part, "`,\"'()[],:")
|
||||
part = strings.TrimPrefix(part, "@")
|
||||
for _, ext := range exts {
|
||||
if strings.HasSuffix(part, ext) && !seen[part] {
|
||||
@@ -521,3 +591,125 @@ func extractKeyOutputFromLines(lines []string, maxLen int) string {
|
||||
clean := strings.TrimSpace(strings.Join(lines, "\n"))
|
||||
return safeTruncate(clean, maxLen)
|
||||
}
|
||||
|
||||
// extractKeyOutput extracts a brief summary of what the task accomplished
|
||||
// Looks for summary lines, first meaningful sentence, or truncates message
|
||||
func extractKeyOutput(message string, maxLen int) string {
|
||||
if message == "" || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
return extractKeyOutputFromLines(strings.Split(message, "\n"), maxLen)
|
||||
}
|
||||
|
||||
// extractCoverageGap extracts what's missing from coverage reports
|
||||
// Looks for uncovered lines, branches, or functions
|
||||
func extractCoverageGap(message string) string {
|
||||
if message == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
lower := strings.ToLower(message)
|
||||
lines := strings.Split(message, "\n")
|
||||
|
||||
// Look for uncovered/missing patterns
|
||||
for _, line := range lines {
|
||||
lineLower := strings.ToLower(line)
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Common patterns for uncovered code
|
||||
if strings.Contains(lineLower, "uncovered") ||
|
||||
strings.Contains(lineLower, "not covered") ||
|
||||
strings.Contains(lineLower, "missing coverage") ||
|
||||
strings.Contains(lineLower, "lines not covered") {
|
||||
if len(line) > 100 {
|
||||
return line[:97] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
// Look for specific file:line patterns in coverage reports
|
||||
if strings.Contains(lineLower, "branch") && strings.Contains(lineLower, "not taken") {
|
||||
if len(line) > 100 {
|
||||
return line[:97] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
|
||||
// Look for function names that aren't covered
|
||||
if strings.Contains(lower, "function") && strings.Contains(lower, "0%") {
|
||||
for _, line := range lines {
|
||||
if strings.Contains(strings.ToLower(line), "0%") && strings.Contains(line, "function") {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) > 100 {
|
||||
return line[:97] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractErrorDetail extracts meaningful error context from task output
|
||||
// Returns the most relevant error information up to maxLen characters
|
||||
func extractErrorDetail(message string, maxLen int) string {
|
||||
if message == "" || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
lines := strings.Split(message, "\n")
|
||||
var errorLines []string
|
||||
|
||||
// Look for error-related lines
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
lower := strings.ToLower(line)
|
||||
|
||||
// Skip noise lines
|
||||
if strings.HasPrefix(line, "at ") && strings.Contains(line, "(") {
|
||||
// Stack trace line - only keep first one
|
||||
if len(errorLines) > 0 && strings.HasPrefix(strings.ToLower(errorLines[len(errorLines)-1]), "at ") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Prioritize error/fail lines
|
||||
if strings.Contains(lower, "error") ||
|
||||
strings.Contains(lower, "fail") ||
|
||||
strings.Contains(lower, "exception") ||
|
||||
strings.Contains(lower, "assert") ||
|
||||
strings.Contains(lower, "expected") ||
|
||||
strings.Contains(lower, "timeout") ||
|
||||
strings.Contains(lower, "not found") ||
|
||||
strings.Contains(lower, "cannot") ||
|
||||
strings.Contains(lower, "undefined") ||
|
||||
strings.HasPrefix(line, "FAIL") ||
|
||||
strings.HasPrefix(line, "●") {
|
||||
errorLines = append(errorLines, line)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errorLines) == 0 {
|
||||
// No specific error lines found, take last few lines
|
||||
start := len(lines) - 5
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
for _, line := range lines[start:] {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
errorLines = append(errorLines, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Join and truncate
|
||||
result := strings.Join(errorLines, " | ")
|
||||
return safeTruncate(result, maxLen)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package wrapper
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
126
codeagent-wrapper/wrapper_name.go
Normal file
126
codeagent-wrapper/wrapper_name.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultWrapperName = "codeagent-wrapper"
|
||||
legacyWrapperName = "codex-wrapper"
|
||||
)
|
||||
|
||||
var executablePathFn = os.Executable
|
||||
|
||||
func normalizeWrapperName(path string) string {
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
base := filepath.Base(path)
|
||||
base = strings.TrimSuffix(base, ".exe") // tolerate Windows executables
|
||||
|
||||
switch base {
|
||||
case defaultWrapperName, legacyWrapperName:
|
||||
return base
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// currentWrapperName resolves the wrapper name based on the invoked binary.
|
||||
// Only known names are honored to avoid leaking build/test binary names into logs.
|
||||
func currentWrapperName() string {
|
||||
if len(os.Args) == 0 {
|
||||
return defaultWrapperName
|
||||
}
|
||||
|
||||
if name := normalizeWrapperName(os.Args[0]); name != "" {
|
||||
return name
|
||||
}
|
||||
|
||||
execPath, err := executablePathFn()
|
||||
if err == nil {
|
||||
if name := normalizeWrapperName(execPath); name != "" {
|
||||
return name
|
||||
}
|
||||
|
||||
if resolved, err := filepath.EvalSymlinks(execPath); err == nil {
|
||||
if name := normalizeWrapperName(resolved); name != "" {
|
||||
return name
|
||||
}
|
||||
if alias := resolveAlias(execPath, resolved); alias != "" {
|
||||
return alias
|
||||
}
|
||||
}
|
||||
|
||||
if alias := resolveAlias(execPath, ""); alias != "" {
|
||||
return alias
|
||||
}
|
||||
}
|
||||
|
||||
return defaultWrapperName
|
||||
}
|
||||
|
||||
// logPrefixes returns the set of accepted log name prefixes, including the
|
||||
// current wrapper name and legacy aliases.
|
||||
func logPrefixes() []string {
|
||||
prefixes := []string{currentWrapperName(), defaultWrapperName, legacyWrapperName}
|
||||
seen := make(map[string]struct{}, len(prefixes))
|
||||
var unique []string
|
||||
for _, prefix := range prefixes {
|
||||
if prefix == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[prefix]; ok {
|
||||
continue
|
||||
}
|
||||
seen[prefix] = struct{}{}
|
||||
unique = append(unique, prefix)
|
||||
}
|
||||
return unique
|
||||
}
|
||||
|
||||
// primaryLogPrefix returns the preferred filename prefix for log files.
|
||||
// Defaults to the current wrapper name when available, otherwise falls back
|
||||
// to the canonical default name.
|
||||
func primaryLogPrefix() string {
|
||||
prefixes := logPrefixes()
|
||||
if len(prefixes) == 0 {
|
||||
return defaultWrapperName
|
||||
}
|
||||
return prefixes[0]
|
||||
}
|
||||
|
||||
func resolveAlias(execPath string, target string) string {
|
||||
if execPath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
dir := filepath.Dir(execPath)
|
||||
for _, candidate := range []string{defaultWrapperName, legacyWrapperName} {
|
||||
aliasPath := filepath.Join(dir, candidate)
|
||||
info, err := os.Lstat(aliasPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
resolved, err := filepath.EvalSymlinks(aliasPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if target != "" && resolved != target {
|
||||
continue
|
||||
}
|
||||
|
||||
if name := normalizeWrapperName(aliasPath); name != "" {
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
50
codeagent-wrapper/wrapper_name_test.go
Normal file
50
codeagent-wrapper/wrapper_name_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCurrentWrapperNameFallsBackToExecutable(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
execPath := filepath.Join(tempDir, "codeagent-wrapper")
|
||||
if err := os.WriteFile(execPath, []byte("#!/bin/true\n"), 0o755); err != nil {
|
||||
t.Fatalf("failed to write fake binary: %v", err)
|
||||
}
|
||||
|
||||
os.Args = []string{filepath.Join(tempDir, "custom-name")}
|
||||
executablePathFn = func() (string, error) {
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
if got := currentWrapperName(); got != defaultWrapperName {
|
||||
t.Fatalf("currentWrapperName() = %q, want %q", got, defaultWrapperName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentWrapperNameDetectsLegacyAliasSymlink(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
execPath := filepath.Join(tempDir, "wrapper")
|
||||
aliasPath := filepath.Join(tempDir, legacyWrapperName)
|
||||
|
||||
if err := os.WriteFile(execPath, []byte("#!/bin/true\n"), 0o755); err != nil {
|
||||
t.Fatalf("failed to write fake binary: %v", err)
|
||||
}
|
||||
if err := os.Symlink(execPath, aliasPath); err != nil {
|
||||
t.Fatalf("failed to create alias: %v", err)
|
||||
}
|
||||
|
||||
os.Args = []string{filepath.Join(tempDir, "unknown-runner")}
|
||||
executablePathFn = func() (string, error) {
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
if got := currentWrapperName(); got != legacyWrapperName {
|
||||
t.Fatalf("currentWrapperName() = %q, want %q", got, legacyWrapperName)
|
||||
}
|
||||
}
|
||||
100
config.json
100
config.json
@@ -93,7 +93,7 @@
|
||||
]
|
||||
},
|
||||
"essentials": {
|
||||
"enabled": false,
|
||||
"enabled": true,
|
||||
"description": "Core development commands and utilities",
|
||||
"operations": [
|
||||
{
|
||||
@@ -108,104 +108,6 @@
|
||||
"description": "Copy development commands documentation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"omo": {
|
||||
"enabled": false,
|
||||
"description": "OmO multi-agent orchestration with Sisyphus coordinator",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/SKILL.md",
|
||||
"target": "skills/omo/SKILL.md",
|
||||
"description": "Install omo skill"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/references/oracle.md",
|
||||
"target": "skills/omo/references/oracle.md",
|
||||
"description": "Install oracle agent prompt"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/references/librarian.md",
|
||||
"target": "skills/omo/references/librarian.md",
|
||||
"description": "Install librarian agent prompt"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/references/explore.md",
|
||||
"target": "skills/omo/references/explore.md",
|
||||
"description": "Install explore agent prompt"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/references/frontend-ui-ux-engineer.md",
|
||||
"target": "skills/omo/references/frontend-ui-ux-engineer.md",
|
||||
"description": "Install frontend-ui-ux-engineer agent prompt"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/references/document-writer.md",
|
||||
"target": "skills/omo/references/document-writer.md",
|
||||
"description": "Install document-writer agent prompt"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/omo/references/develop.md",
|
||||
"target": "skills/omo/references/develop.md",
|
||||
"description": "Install develop agent prompt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sparv": {
|
||||
"enabled": false,
|
||||
"description": "SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point gate",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "skills/sparv",
|
||||
"target": "skills/sparv",
|
||||
"description": "Install sparv skill with all scripts and hooks"
|
||||
}
|
||||
]
|
||||
},
|
||||
"course": {
|
||||
"enabled": false,
|
||||
"description": "课程开发工作流,包含 dev、产品需求和测试用例技能",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "skills/dev",
|
||||
"target": "skills/dev",
|
||||
"description": "Install dev skill with agents"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/product-requirements/SKILL.md",
|
||||
"target": "skills/product-requirements/SKILL.md",
|
||||
"description": "Install product-requirements skill"
|
||||
},
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "skills/test-cases",
|
||||
"target": "skills/test-cases",
|
||||
"description": "Install test-cases skill with references"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/codeagent/SKILL.md",
|
||||
"target": "skills/codeagent/SKILL.md",
|
||||
"description": "Install codeagent skill"
|
||||
},
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": "bash install.sh",
|
||||
"description": "Install codeagent-wrapper binary",
|
||||
"env": {
|
||||
"INSTALL_DIR": "${install_dir}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "dev",
|
||||
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
44
development-essentials/.claude-plugin/marketplace.json
Normal file
44
development-essentials/.claude-plugin/marketplace.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"name": "development-essentials",
|
||||
"source": "./",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"code",
|
||||
"debug",
|
||||
"test",
|
||||
"optimize",
|
||||
"review",
|
||||
"bugfix",
|
||||
"refactor",
|
||||
"documentation"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/code.md",
|
||||
"./commands/debug.md",
|
||||
"./commands/test.md",
|
||||
"./commands/optimize.md",
|
||||
"./commands/review.md",
|
||||
"./commands/bugfix.md",
|
||||
"./commands/refactor.md",
|
||||
"./commands/docs.md",
|
||||
"./commands/ask.md",
|
||||
"./commands/think.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/code.md",
|
||||
"./agents/bugfix.md",
|
||||
"./agents/bugfix-verify.md",
|
||||
"./agents/optimize.md",
|
||||
"./agents/debug.md"
|
||||
]
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "essentials",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
@@ -322,8 +322,6 @@ Error: dependency backend_1701234567 failed
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `CODEX_TIMEOUT` | 7200000 | Timeout in milliseconds |
|
||||
| `CODEX_BYPASS_SANDBOX` | true | Bypass Codex sandbox/approval. Set `false` to disable |
|
||||
| `CODEAGENT_SKIP_PERMISSIONS` | true | Skip Claude permission prompts. Set `false` to disable |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
15
install.bat
15
install.bat
@@ -117,18 +117,11 @@ if "!ALREADY_IN_USERPATH!"=="1" (
|
||||
set "USER_PATH_NEW=!PCT!USERPROFILE!PCT!\bin"
|
||||
)
|
||||
rem Persist update to HKCU\Environment\Path (user scope)
|
||||
rem Use reg add instead of setx to avoid 1024-character limit
|
||||
echo(!USER_PATH_NEW! | findstr /C:"\"" /C:"!" >nul
|
||||
if not errorlevel 1 (
|
||||
echo WARNING: Your PATH contains quotes or exclamation marks that may cause issues.
|
||||
echo Skipping automatic PATH update. Please add %%USERPROFILE%%\bin to your PATH manually.
|
||||
setx Path "!USER_PATH_NEW!" >nul
|
||||
if errorlevel 1 (
|
||||
echo WARNING: Failed to append %%USERPROFILE%%\bin to your user PATH.
|
||||
) else (
|
||||
reg add "HKCU\Environment" /v Path /t REG_EXPAND_SZ /d "!USER_PATH_NEW!" /f >nul
|
||||
if errorlevel 1 (
|
||||
echo WARNING: Failed to append %%USERPROFILE%%\bin to your user PATH.
|
||||
) else (
|
||||
echo Added %%USERPROFILE%%\bin to your user PATH.
|
||||
)
|
||||
echo Added %%USERPROFILE%%\bin to your user PATH.
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
543
install.py
543
install.py
@@ -23,7 +23,6 @@ except ImportError: # pragma: no cover
|
||||
jsonschema = None
|
||||
|
||||
DEFAULT_INSTALL_DIR = "~/.claude"
|
||||
SETTINGS_FILE = "settings.json"
|
||||
|
||||
|
||||
def _ensure_list(ctx: Dict[str, Any], key: str) -> List[Any]:
|
||||
@@ -47,7 +46,7 @@ def parse_args(argv: Optional[Iterable[str]] = None) -> argparse.Namespace:
|
||||
)
|
||||
parser.add_argument(
|
||||
"--module",
|
||||
help="Comma-separated modules to install/uninstall, or 'all'",
|
||||
help="Comma-separated modules to install, or 'all' for all enabled",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
@@ -59,16 +58,6 @@ def parse_args(argv: Optional[Iterable[str]] = None) -> argparse.Namespace:
|
||||
action="store_true",
|
||||
help="List available modules and exit",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--status",
|
||||
action="store_true",
|
||||
help="Show installation status of all modules",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--uninstall",
|
||||
action="store_true",
|
||||
help="Uninstall specified modules",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
@@ -92,132 +81,6 @@ def _load_json(path: Path) -> Any:
|
||||
raise ValueError(f"Invalid JSON in {path}: {exc}") from exc
|
||||
|
||||
|
||||
def _save_json(path: Path, data: Any) -> None:
|
||||
"""Save data to JSON file with proper formatting."""
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2, ensure_ascii=False)
|
||||
fh.write("\n")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Hooks Management
|
||||
# =============================================================================
|
||||
|
||||
def load_settings(ctx: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Load settings.json from install directory."""
|
||||
settings_path = ctx["install_dir"] / SETTINGS_FILE
|
||||
if settings_path.exists():
|
||||
try:
|
||||
return _load_json(settings_path)
|
||||
except (ValueError, FileNotFoundError):
|
||||
return {}
|
||||
return {}
|
||||
|
||||
|
||||
def save_settings(ctx: Dict[str, Any], settings: Dict[str, Any]) -> None:
|
||||
"""Save settings.json to install directory."""
|
||||
settings_path = ctx["install_dir"] / SETTINGS_FILE
|
||||
_save_json(settings_path, settings)
|
||||
|
||||
|
||||
def find_module_hooks(module_name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""Find hooks.json for a module if it exists."""
|
||||
# Check for hooks in operations (copy_dir targets)
|
||||
for op in cfg.get("operations", []):
|
||||
if op.get("type") == "copy_dir":
|
||||
target_dir = ctx["install_dir"] / op["target"]
|
||||
hooks_file = target_dir / "hooks" / "hooks.json"
|
||||
if hooks_file.exists():
|
||||
try:
|
||||
return _load_json(hooks_file)
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
# Also check source directory during install
|
||||
for op in cfg.get("operations", []):
|
||||
if op.get("type") == "copy_dir":
|
||||
source_dir = ctx["config_dir"] / op["source"]
|
||||
hooks_file = source_dir / "hooks" / "hooks.json"
|
||||
if hooks_file.exists():
|
||||
try:
|
||||
return _load_json(hooks_file)
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _create_hook_marker(module_name: str) -> str:
|
||||
"""Create a marker to identify hooks from a specific module."""
|
||||
return f"__module:{module_name}__"
|
||||
|
||||
|
||||
def merge_hooks_to_settings(module_name: str, hooks_config: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
"""Merge module hooks into settings.json."""
|
||||
settings = load_settings(ctx)
|
||||
settings.setdefault("hooks", {})
|
||||
|
||||
module_hooks = hooks_config.get("hooks", {})
|
||||
marker = _create_hook_marker(module_name)
|
||||
|
||||
for hook_type, hook_entries in module_hooks.items():
|
||||
settings["hooks"].setdefault(hook_type, [])
|
||||
|
||||
for entry in hook_entries:
|
||||
# Add marker to identify this hook's source module
|
||||
entry_copy = dict(entry)
|
||||
entry_copy["__module__"] = module_name
|
||||
|
||||
# Check if already exists (avoid duplicates)
|
||||
exists = False
|
||||
for existing in settings["hooks"][hook_type]:
|
||||
if existing.get("__module__") == module_name:
|
||||
# Same module, check if same hook
|
||||
if _hooks_equal(existing, entry_copy):
|
||||
exists = True
|
||||
break
|
||||
|
||||
if not exists:
|
||||
settings["hooks"][hook_type].append(entry_copy)
|
||||
|
||||
save_settings(ctx, settings)
|
||||
write_log({"level": "INFO", "message": f"Merged hooks for module: {module_name}"}, ctx)
|
||||
|
||||
|
||||
def unmerge_hooks_from_settings(module_name: str, ctx: Dict[str, Any]) -> None:
|
||||
"""Remove module hooks from settings.json."""
|
||||
settings = load_settings(ctx)
|
||||
|
||||
if "hooks" not in settings:
|
||||
return
|
||||
|
||||
modified = False
|
||||
for hook_type in list(settings["hooks"].keys()):
|
||||
original_len = len(settings["hooks"][hook_type])
|
||||
settings["hooks"][hook_type] = [
|
||||
entry for entry in settings["hooks"][hook_type]
|
||||
if entry.get("__module__") != module_name
|
||||
]
|
||||
if len(settings["hooks"][hook_type]) < original_len:
|
||||
modified = True
|
||||
|
||||
# Remove empty hook type arrays
|
||||
if not settings["hooks"][hook_type]:
|
||||
del settings["hooks"][hook_type]
|
||||
|
||||
if modified:
|
||||
save_settings(ctx, settings)
|
||||
write_log({"level": "INFO", "message": f"Removed hooks for module: {module_name}"}, ctx)
|
||||
|
||||
|
||||
def _hooks_equal(hook1: Dict[str, Any], hook2: Dict[str, Any]) -> bool:
|
||||
"""Compare two hooks ignoring the __module__ marker."""
|
||||
h1 = {k: v for k, v in hook1.items() if k != "__module__"}
|
||||
h2 = {k: v for k, v in hook2.items() if k != "__module__"}
|
||||
return h1 == h2
|
||||
|
||||
|
||||
def load_config(path: str) -> Dict[str, Any]:
|
||||
"""Load config and validate against JSON Schema.
|
||||
|
||||
@@ -303,93 +166,22 @@ def resolve_paths(config: Dict[str, Any], args: argparse.Namespace) -> Dict[str,
|
||||
|
||||
def list_modules(config: Dict[str, Any]) -> None:
|
||||
print("Available Modules:")
|
||||
print(f"{'#':<3} {'Name':<15} {'Default':<8} Description")
|
||||
print("-" * 65)
|
||||
for idx, (name, cfg) in enumerate(config.get("modules", {}).items(), 1):
|
||||
print(f"{'Name':<15} {'Default':<8} Description")
|
||||
print("-" * 60)
|
||||
for name, cfg in config.get("modules", {}).items():
|
||||
default = "✓" if cfg.get("enabled", False) else "✗"
|
||||
desc = cfg.get("description", "")
|
||||
print(f"{idx:<3} {name:<15} {default:<8} {desc}")
|
||||
print(f"{name:<15} {default:<8} {desc}")
|
||||
print("\n✓ = installed by default when no --module specified")
|
||||
|
||||
|
||||
def load_installed_status(ctx: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Load installed modules status from status file."""
|
||||
status_path = Path(ctx["status_file"])
|
||||
if status_path.exists():
|
||||
try:
|
||||
return _load_json(status_path)
|
||||
except (ValueError, FileNotFoundError):
|
||||
return {"modules": {}}
|
||||
return {"modules": {}}
|
||||
|
||||
|
||||
def check_module_installed(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> bool:
|
||||
"""Check if a module is installed by verifying its files exist."""
|
||||
install_dir = ctx["install_dir"]
|
||||
|
||||
for op in cfg.get("operations", []):
|
||||
op_type = op.get("type")
|
||||
if op_type in ("copy_dir", "copy_file"):
|
||||
target = (install_dir / op["target"]).expanduser().resolve()
|
||||
if target.exists():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_installed_modules(config: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[str, bool]:
|
||||
"""Get installation status of all modules by checking files."""
|
||||
result = {}
|
||||
modules = config.get("modules", {})
|
||||
|
||||
# First check status file
|
||||
status = load_installed_status(ctx)
|
||||
status_modules = status.get("modules", {})
|
||||
|
||||
for name, cfg in modules.items():
|
||||
# Check both status file and filesystem
|
||||
in_status = name in status_modules
|
||||
files_exist = check_module_installed(name, cfg, ctx)
|
||||
result[name] = in_status or files_exist
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def list_modules_with_status(config: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
"""List modules with installation status."""
|
||||
installed_status = get_installed_modules(config, ctx)
|
||||
status_data = load_installed_status(ctx)
|
||||
status_modules = status_data.get("modules", {})
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("Module Status")
|
||||
print("=" * 70)
|
||||
print(f"{'#':<3} {'Name':<15} {'Status':<15} {'Installed At':<20} Description")
|
||||
print("-" * 70)
|
||||
|
||||
for idx, (name, cfg) in enumerate(config.get("modules", {}).items(), 1):
|
||||
desc = cfg.get("description", "")[:25]
|
||||
if installed_status.get(name, False):
|
||||
status = "✅ Installed"
|
||||
installed_at = status_modules.get(name, {}).get("installed_at", "")[:16]
|
||||
else:
|
||||
status = "⬚ Not installed"
|
||||
installed_at = ""
|
||||
print(f"{idx:<3} {name:<15} {status:<15} {installed_at:<20} {desc}")
|
||||
|
||||
total = len(config.get("modules", {}))
|
||||
installed_count = sum(1 for v in installed_status.values() if v)
|
||||
print(f"\nTotal: {installed_count}/{total} modules installed")
|
||||
print(f"Install dir: {ctx['install_dir']}")
|
||||
|
||||
|
||||
def select_modules(config: Dict[str, Any], module_arg: Optional[str]) -> Dict[str, Any]:
|
||||
modules = config.get("modules", {})
|
||||
if not module_arg:
|
||||
# No --module specified: show interactive selection
|
||||
return interactive_select_modules(config)
|
||||
return {k: v for k, v in modules.items() if v.get("enabled", False)}
|
||||
|
||||
if module_arg.strip().lower() == "all":
|
||||
return dict(modules.items())
|
||||
return {k: v for k, v in modules.items() if v.get("enabled", False)}
|
||||
|
||||
selected: Dict[str, Any] = {}
|
||||
for name in (part.strip() for part in module_arg.split(",")):
|
||||
@@ -401,263 +193,6 @@ def select_modules(config: Dict[str, Any], module_arg: Optional[str]) -> Dict[st
|
||||
return selected
|
||||
|
||||
|
||||
def interactive_select_modules(config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Interactive module selection when no --module is specified."""
|
||||
modules = config.get("modules", {})
|
||||
module_names = list(modules.keys())
|
||||
|
||||
print("\n" + "=" * 65)
|
||||
print("Welcome to Claude Plugin Installer")
|
||||
print("=" * 65)
|
||||
print("\nNo modules specified. Please select modules to install:\n")
|
||||
|
||||
list_modules(config)
|
||||
|
||||
print("\nEnter module numbers or names (comma-separated), or:")
|
||||
print(" 'all' - Install all modules")
|
||||
print(" 'q' - Quit without installing")
|
||||
print()
|
||||
|
||||
while True:
|
||||
try:
|
||||
user_input = input("Select modules: ").strip()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\nInstallation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
if not user_input:
|
||||
print("No input. Please enter module numbers, names, 'all', or 'q'.")
|
||||
continue
|
||||
|
||||
if user_input.lower() == "q":
|
||||
print("Installation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
if user_input.lower() == "all":
|
||||
print(f"\nSelected all {len(modules)} modules.")
|
||||
return dict(modules.items())
|
||||
|
||||
# Parse selection
|
||||
selected: Dict[str, Any] = {}
|
||||
parts = [p.strip() for p in user_input.replace(" ", ",").split(",") if p.strip()]
|
||||
|
||||
try:
|
||||
for part in parts:
|
||||
# Try as number first
|
||||
if part.isdigit():
|
||||
idx = int(part) - 1
|
||||
if 0 <= idx < len(module_names):
|
||||
name = module_names[idx]
|
||||
selected[name] = modules[name]
|
||||
else:
|
||||
print(f"Invalid number: {part}. Valid range: 1-{len(module_names)}")
|
||||
selected = {}
|
||||
break
|
||||
# Try as name
|
||||
elif part in modules:
|
||||
selected[part] = modules[part]
|
||||
else:
|
||||
print(f"Module not found: '{part}'")
|
||||
selected = {}
|
||||
break
|
||||
|
||||
if selected:
|
||||
names = ", ".join(selected.keys())
|
||||
print(f"\nSelected {len(selected)} module(s): {names}")
|
||||
return selected
|
||||
|
||||
except ValueError:
|
||||
print("Invalid input. Please try again.")
|
||||
continue
|
||||
|
||||
|
||||
def uninstall_module(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Uninstall a module by removing its files and hooks."""
|
||||
result: Dict[str, Any] = {
|
||||
"module": name,
|
||||
"status": "success",
|
||||
"uninstalled_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
install_dir = ctx["install_dir"]
|
||||
removed_paths = []
|
||||
|
||||
for op in cfg.get("operations", []):
|
||||
op_type = op.get("type")
|
||||
try:
|
||||
if op_type in ("copy_dir", "copy_file"):
|
||||
target = (install_dir / op["target"]).expanduser().resolve()
|
||||
if target.exists():
|
||||
if target.is_dir():
|
||||
shutil.rmtree(target)
|
||||
else:
|
||||
target.unlink()
|
||||
removed_paths.append(str(target))
|
||||
write_log({"level": "INFO", "message": f"Removed: {target}"}, ctx)
|
||||
# merge_dir and merge_json are harder to uninstall cleanly, skip
|
||||
except Exception as exc:
|
||||
write_log({"level": "WARNING", "message": f"Failed to remove {op.get('target', 'unknown')}: {exc}"}, ctx)
|
||||
|
||||
# Remove module hooks from settings.json
|
||||
try:
|
||||
unmerge_hooks_from_settings(name, ctx)
|
||||
result["hooks_removed"] = True
|
||||
except Exception as exc:
|
||||
write_log({"level": "WARNING", "message": f"Failed to remove hooks for {name}: {exc}"}, ctx)
|
||||
|
||||
result["removed_paths"] = removed_paths
|
||||
return result
|
||||
|
||||
|
||||
def update_status_after_uninstall(uninstalled_modules: List[str], ctx: Dict[str, Any]) -> None:
|
||||
"""Remove uninstalled modules from status file."""
|
||||
status = load_installed_status(ctx)
|
||||
modules = status.get("modules", {})
|
||||
|
||||
for name in uninstalled_modules:
|
||||
if name in modules:
|
||||
del modules[name]
|
||||
|
||||
status["modules"] = modules
|
||||
status["updated_at"] = datetime.now().isoformat()
|
||||
|
||||
status_path = Path(ctx["status_file"])
|
||||
with status_path.open("w", encoding="utf-8") as fh:
|
||||
json.dump(status, fh, indent=2, ensure_ascii=False)
|
||||
|
||||
|
||||
def interactive_manage(config: Dict[str, Any], ctx: Dict[str, Any]) -> int:
|
||||
"""Interactive module management menu."""
|
||||
while True:
|
||||
installed_status = get_installed_modules(config, ctx)
|
||||
modules = config.get("modules", {})
|
||||
module_names = list(modules.keys())
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("Claude Plugin Manager")
|
||||
print("=" * 70)
|
||||
print(f"{'#':<3} {'Name':<15} {'Status':<15} Description")
|
||||
print("-" * 70)
|
||||
|
||||
for idx, (name, cfg) in enumerate(modules.items(), 1):
|
||||
desc = cfg.get("description", "")[:30]
|
||||
if installed_status.get(name, False):
|
||||
status = "✅ Installed"
|
||||
else:
|
||||
status = "⬚ Not installed"
|
||||
print(f"{idx:<3} {name:<15} {status:<15} {desc}")
|
||||
|
||||
total = len(modules)
|
||||
installed_count = sum(1 for v in installed_status.values() if v)
|
||||
print(f"\nInstalled: {installed_count}/{total} | Dir: {ctx['install_dir']}")
|
||||
|
||||
print("\nCommands:")
|
||||
print(" i <num/name> - Install module(s)")
|
||||
print(" u <num/name> - Uninstall module(s)")
|
||||
print(" q - Quit")
|
||||
print()
|
||||
|
||||
try:
|
||||
user_input = input("Enter command: ").strip()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\nExiting.")
|
||||
return 0
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
if user_input.lower() == "q":
|
||||
print("Goodbye!")
|
||||
return 0
|
||||
|
||||
parts = user_input.split(maxsplit=1)
|
||||
cmd = parts[0].lower()
|
||||
args = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
if cmd == "i":
|
||||
# Install
|
||||
selected = _parse_module_selection(args, modules, module_names)
|
||||
if selected:
|
||||
# Filter out already installed
|
||||
to_install = {k: v for k, v in selected.items() if not installed_status.get(k, False)}
|
||||
if not to_install:
|
||||
print("All selected modules are already installed.")
|
||||
continue
|
||||
print(f"\nInstalling: {', '.join(to_install.keys())}")
|
||||
results = []
|
||||
for name, cfg in to_install.items():
|
||||
try:
|
||||
results.append(execute_module(name, cfg, ctx))
|
||||
print(f" ✓ {name} installed")
|
||||
except Exception as exc:
|
||||
print(f" ✗ {name} failed: {exc}")
|
||||
# Update status
|
||||
current_status = load_installed_status(ctx)
|
||||
for r in results:
|
||||
if r.get("status") == "success":
|
||||
current_status.setdefault("modules", {})[r["module"]] = r
|
||||
current_status["updated_at"] = datetime.now().isoformat()
|
||||
with Path(ctx["status_file"]).open("w", encoding="utf-8") as fh:
|
||||
json.dump(current_status, fh, indent=2, ensure_ascii=False)
|
||||
|
||||
elif cmd == "u":
|
||||
# Uninstall
|
||||
selected = _parse_module_selection(args, modules, module_names)
|
||||
if selected:
|
||||
# Filter to only installed ones
|
||||
to_uninstall = {k: v for k, v in selected.items() if installed_status.get(k, False)}
|
||||
if not to_uninstall:
|
||||
print("None of the selected modules are installed.")
|
||||
continue
|
||||
print(f"\nUninstalling: {', '.join(to_uninstall.keys())}")
|
||||
confirm = input("Confirm? (y/N): ").strip().lower()
|
||||
if confirm != "y":
|
||||
print("Cancelled.")
|
||||
continue
|
||||
for name, cfg in to_uninstall.items():
|
||||
try:
|
||||
uninstall_module(name, cfg, ctx)
|
||||
print(f" ✓ {name} uninstalled")
|
||||
except Exception as exc:
|
||||
print(f" ✗ {name} failed: {exc}")
|
||||
update_status_after_uninstall(list(to_uninstall.keys()), ctx)
|
||||
|
||||
else:
|
||||
print(f"Unknown command: {cmd}. Use 'i', 'u', or 'q'.")
|
||||
|
||||
|
||||
def _parse_module_selection(
|
||||
args: str, modules: Dict[str, Any], module_names: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""Parse module selection from user input."""
|
||||
if not args:
|
||||
print("Please specify module number(s) or name(s).")
|
||||
return {}
|
||||
|
||||
if args.lower() == "all":
|
||||
return dict(modules.items())
|
||||
|
||||
selected: Dict[str, Any] = {}
|
||||
parts = [p.strip() for p in args.replace(",", " ").split() if p.strip()]
|
||||
|
||||
for part in parts:
|
||||
if part.isdigit():
|
||||
idx = int(part) - 1
|
||||
if 0 <= idx < len(module_names):
|
||||
name = module_names[idx]
|
||||
selected[name] = modules[name]
|
||||
else:
|
||||
print(f"Invalid number: {part}")
|
||||
return {}
|
||||
elif part in modules:
|
||||
selected[part] = modules[part]
|
||||
else:
|
||||
print(f"Module not found: '{part}'")
|
||||
return {}
|
||||
|
||||
return selected
|
||||
|
||||
|
||||
def ensure_install_dir(path: Path) -> None:
|
||||
path = Path(path)
|
||||
if path.exists() and not path.is_dir():
|
||||
@@ -706,17 +241,6 @@ def execute_module(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[
|
||||
)
|
||||
raise
|
||||
|
||||
# Handle hooks: find and merge module hooks into settings.json
|
||||
hooks_config = find_module_hooks(name, cfg, ctx)
|
||||
if hooks_config:
|
||||
try:
|
||||
merge_hooks_to_settings(name, hooks_config, ctx)
|
||||
result["operations"].append({"type": "merge_hooks", "status": "success"})
|
||||
result["has_hooks"] = True
|
||||
except Exception as exc:
|
||||
write_log({"level": "WARNING", "message": f"Failed to merge hooks for {name}: {exc}"}, ctx)
|
||||
result["operations"].append({"type": "merge_hooks", "status": "failed", "error": str(exc)})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -1005,54 +529,10 @@ def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
|
||||
ctx = resolve_paths(config, args)
|
||||
|
||||
# Handle --list-modules
|
||||
if getattr(args, "list_modules", False):
|
||||
list_modules(config)
|
||||
return 0
|
||||
|
||||
# Handle --status
|
||||
if getattr(args, "status", False):
|
||||
list_modules_with_status(config, ctx)
|
||||
return 0
|
||||
|
||||
# Handle --uninstall
|
||||
if getattr(args, "uninstall", False):
|
||||
if not args.module:
|
||||
print("Error: --uninstall requires --module to specify which modules to uninstall")
|
||||
return 1
|
||||
modules = config.get("modules", {})
|
||||
installed = load_installed_status(ctx)
|
||||
installed_modules = installed.get("modules", {})
|
||||
|
||||
selected = select_modules(config, args.module)
|
||||
to_uninstall = {k: v for k, v in selected.items() if k in installed_modules}
|
||||
|
||||
if not to_uninstall:
|
||||
print("None of the specified modules are installed.")
|
||||
return 0
|
||||
|
||||
print(f"Uninstalling {len(to_uninstall)} module(s): {', '.join(to_uninstall.keys())}")
|
||||
for name, cfg in to_uninstall.items():
|
||||
try:
|
||||
uninstall_module(name, cfg, ctx)
|
||||
print(f" ✓ {name} uninstalled")
|
||||
except Exception as exc:
|
||||
print(f" ✗ {name} failed: {exc}", file=sys.stderr)
|
||||
|
||||
update_status_after_uninstall(list(to_uninstall.keys()), ctx)
|
||||
print(f"\n✓ Uninstall complete")
|
||||
return 0
|
||||
|
||||
# No --module specified: enter interactive management mode
|
||||
if not args.module:
|
||||
try:
|
||||
ensure_install_dir(ctx["install_dir"])
|
||||
except Exception as exc:
|
||||
print(f"Failed to prepare install dir: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
return interactive_manage(config, ctx)
|
||||
|
||||
# Install specified modules
|
||||
modules = select_modules(config, args.module)
|
||||
|
||||
try:
|
||||
@@ -1088,14 +568,7 @@ def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
)
|
||||
break
|
||||
|
||||
# Merge with existing status
|
||||
current_status = load_installed_status(ctx)
|
||||
for r in results:
|
||||
if r.get("status") == "success":
|
||||
current_status.setdefault("modules", {})[r["module"]] = r
|
||||
current_status["updated_at"] = datetime.now().isoformat()
|
||||
with Path(ctx["status_file"]).open("w", encoding="utf-8") as fh:
|
||||
json.dump(current_status, fh, indent=2, ensure_ascii=False)
|
||||
write_status(results, ctx)
|
||||
|
||||
# Summary
|
||||
success = sum(1 for r in results if r.get("status") == "success")
|
||||
|
||||
29
install.sh
29
install.sh
@@ -53,32 +53,23 @@ if [[ ":${PATH}:" != *":${BIN_DIR}:"* ]]; then
|
||||
echo ""
|
||||
echo "WARNING: ${BIN_DIR} is not in your PATH"
|
||||
|
||||
# Detect shell and set config files
|
||||
# Detect shell config file
|
||||
if [ -n "$ZSH_VERSION" ]; then
|
||||
RC_FILE="$HOME/.zshrc"
|
||||
PROFILE_FILE="$HOME/.zprofile"
|
||||
else
|
||||
RC_FILE="$HOME/.bashrc"
|
||||
PROFILE_FILE="$HOME/.profile"
|
||||
fi
|
||||
|
||||
# Idempotent add: check if complete export statement already exists
|
||||
EXPORT_LINE="export PATH=\"${BIN_DIR}:\$PATH\""
|
||||
FILES_TO_UPDATE=("$RC_FILE" "$PROFILE_FILE")
|
||||
|
||||
for FILE in "${FILES_TO_UPDATE[@]}"; do
|
||||
if [ -f "$FILE" ] && grep -qF "${EXPORT_LINE}" "$FILE" 2>/dev/null; then
|
||||
echo " ${BIN_DIR} already in ${FILE}, skipping."
|
||||
else
|
||||
echo " Adding to ${FILE}..."
|
||||
echo "" >> "$FILE"
|
||||
echo "# Added by myclaude installer" >> "$FILE"
|
||||
echo "${EXPORT_LINE}" >> "$FILE"
|
||||
fi
|
||||
done
|
||||
|
||||
echo " Done. Restart your shell or run:"
|
||||
echo " source ${PROFILE_FILE}"
|
||||
echo " source ${RC_FILE}"
|
||||
if [ -f "$RC_FILE" ] && grep -qF "${EXPORT_LINE}" "$RC_FILE" 2>/dev/null; then
|
||||
echo " ${BIN_DIR} already in ${RC_FILE}, skipping."
|
||||
else
|
||||
echo " Adding to ${RC_FILE}..."
|
||||
echo "" >> "$RC_FILE"
|
||||
echo "# Added by myclaude installer" >> "$RC_FILE"
|
||||
echo "export PATH=\"${BIN_DIR}:\$PATH\"" >> "$RC_FILE"
|
||||
echo " Done. Run 'source ${RC_FILE}' or restart shell."
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
|
||||
33
requirements-driven-workflow/.claude-plugin/marketplace.json
Normal file
33
requirements-driven-workflow/.claude-plugin/marketplace.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"name": "requirements-driven-development",
|
||||
"source": "./",
|
||||
"description": "Streamlined requirements-driven development workflow with 90% quality gates for practical feature implementation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"workflow",
|
||||
"automation",
|
||||
"quality-gates",
|
||||
"feature-development",
|
||||
"agile",
|
||||
"specifications"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/requirements-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/requirements-generate.md",
|
||||
"./agents/requirements-code.md",
|
||||
"./agents/requirements-testing.md",
|
||||
"./agents/requirements-review.md"
|
||||
]
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "requirements",
|
||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
name: browser
|
||||
description: This skill should be used for browser automation tasks using Chrome DevTools Protocol (CDP). Triggers when users need to launch Chrome with remote debugging, navigate pages, execute JavaScript in browser context, capture screenshots, or interactively select DOM elements. No MCP server required.
|
||||
---
|
||||
|
||||
# Browser Automation
|
||||
|
||||
Minimal Chrome DevTools Protocol (CDP) helpers for browser automation without MCP server setup.
|
||||
|
||||
## Setup
|
||||
|
||||
Install dependencies before first use:
|
||||
|
||||
```bash
|
||||
npm install --prefix ~/.claude/skills/browser/browser ws
|
||||
```
|
||||
|
||||
## Scripts
|
||||
|
||||
All scripts connect to Chrome on `localhost:9222`.
|
||||
|
||||
### start.js - Launch Chrome
|
||||
|
||||
```bash
|
||||
scripts/start.js # Fresh profile
|
||||
scripts/start.js --profile # Use persistent profile (keeps cookies/auth)
|
||||
```
|
||||
|
||||
### nav.js - Navigate
|
||||
|
||||
```bash
|
||||
scripts/nav.js https://example.com # Navigate current tab
|
||||
scripts/nav.js https://example.com --new # Open in new tab
|
||||
```
|
||||
|
||||
### eval.js - Execute JavaScript
|
||||
|
||||
```bash
|
||||
scripts/eval.js 'document.title'
|
||||
scripts/eval.js '(() => { const x = 1; return x + 1; })()'
|
||||
```
|
||||
|
||||
Use single expressions or IIFE for multiple statements.
|
||||
|
||||
### screenshot.js - Capture Screenshot
|
||||
|
||||
```bash
|
||||
scripts/screenshot.js
|
||||
```
|
||||
|
||||
Returns `{ path, filename }` of saved PNG in temp directory.
|
||||
|
||||
### pick.js - Visual Element Picker
|
||||
|
||||
```bash
|
||||
scripts/pick.js "Click the submit button"
|
||||
```
|
||||
|
||||
Returns element metadata: tag, id, classes, text, href, selector, rect.
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Launch Chrome: `scripts/start.js --profile` for authenticated sessions
|
||||
2. Navigate: `scripts/nav.js <url>`
|
||||
3. Inspect: `scripts/eval.js 'document.querySelector(...)'`
|
||||
4. Capture: `scripts/screenshot.js` or `scripts/pick.js`
|
||||
5. Return gathered data
|
||||
|
||||
## Key Points
|
||||
|
||||
- All operations run locally - credentials never leave the machine
|
||||
- Use `--profile` flag to preserve cookies and auth tokens
|
||||
- Scripts return structured JSON for agent consumption
|
||||
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user