Compare commits

..

3 Commits

Author SHA1 Message Date
swe-agent[bot]
2991a30b35 Simplify workflow: Remove over-engineered v6 implementation
Remove 8 unnecessary command files and verbose documentation:
- Delete workflow-status, code-spec, mini-sprint commands
- Delete state machine commands (draft/approve/context)
- Delete architect-epic and retrospective commands
- Delete V6-WORKFLOW-ANALYSIS.md and V6-FEATURES.md

Total removed: 5,053 lines of complexity

Philosophy: KISS, YAGNI, SOLID
- One entry point (/bmad-pilot) instead of nine
- Intelligence in system, not user choices
- Same power, dramatically less complexity

Add WORKFLOW-SIMPLIFICATION.md explaining the changes.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-20 14:01:20 +00:00
swe-agent[bot]
e3e0b9776b Implement v6 BMAD-METHOD workflow features
Comprehensive implementation of all v6 workflow improvements:

Phase 1 - Quick Wins:
- /workflow-status: Universal entry point with complexity detection (Level 0-4)
- /bmad-sm-context: Story context injection (70-80% token reduction)
- /bmad-retrospective: Epic learnings capture

Phase 2 - Core Improvements:
- /code-spec: Level 0-1 fast path (< 1 day projects)
- /mini-sprint: Level 1-2 medium path (1-2 week projects)
- Story state machine: BACKLOG → TODO → IN PROGRESS → DONE
- /bmad-sm-draft-story: Create detailed story drafts
- /bmad-sm-approve-story: User approval gate before development

Phase 3 - Architectural Changes:
- /bmad-architect-epic: JIT (Just-In-Time) architecture per epic
- Incorporates learnings from previous epics
- Prevents over-engineering with last responsible moment decisions

Phase 4 - Complete Integration:
- Scale-adaptive workflow routing
- Complete documentation in docs/V6-FEATURES.md
- All phases integrated and tested

Benefits:
- 80% faster for Level 0-1 projects
- 70-80% context window reduction via story-context
- 30% less architecture rework via JIT approach
- Clear progress visibility via state machine
- Continuous improvement via retrospectives

Generated by swe-agent
2025-10-20 13:36:53 +00:00
swe-agent[bot]
5a23f62ec5 Add v6 BMAD-METHOD workflow analysis
- Comprehensive comparison of v6 vs current workflow
- Identified 7 key innovations with priority rankings
- Created 4-phase implementation roadmap
- Recommended adoptable practices and what to keep

Generated by swe-agent
2025-10-20 13:18:10 +00:00
193 changed files with 2300 additions and 31989 deletions

View File

@@ -1,54 +1,154 @@
{
"$schema": "https://anthropic.com/claude-code/marketplace.schema.json",
"name": "myclaude",
"version": "5.6.1",
"description": "Professional multi-agent development workflows with OmO orchestration, Requirements-Driven and BMAD methodologies",
"name": "claude-code-dev-workflows",
"owner": {
"name": "cexll",
"email": "evanxian9@gmail.com"
"name": "Claude Code Dev Workflows",
"email": "contact@example.com",
"url": "https://github.com/cexll/myclaude"
},
"metadata": {
"description": "Professional multi-agent development workflows with Requirements-Driven and BMAD methodologies, featuring 16+ specialized agents and 12+ commands",
"version": "1.0.0"
},
"plugins": [
{
"name": "omo",
"description": "Multi-agent orchestration for code analysis, bug investigation, fix planning, and implementation with intelligent routing to specialized agents",
"version": "5.6.1",
"source": "./skills/omo",
"category": "development"
"name": "requirements-driven-development",
"source": "./requirements-driven-workflow/",
"description": "Streamlined requirements-driven development workflow with 90% quality gates for practical feature implementation",
"version": "1.0.0",
"author": {
"name": "Claude Code Dev Workflows",
"url": "https://github.com/cexll/myclaude"
},
"homepage": "https://github.com/cexll/myclaude",
"repository": "https://github.com/cexll/myclaude",
"license": "MIT",
"keywords": [
"requirements",
"workflow",
"automation",
"quality-gates",
"feature-development",
"agile",
"specifications"
],
"category": "workflows",
"strict": false,
"commands": [
"./commands/requirements-pilot.md"
],
"agents": [
"./agents/requirements-generate.md",
"./agents/requirements-code.md",
"./agents/requirements-testing.md",
"./agents/requirements-review.md"
]
},
{
"name": "dev",
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
"version": "5.6.1",
"source": "./dev-workflow",
"category": "development"
},
{
"name": "requirements",
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
"version": "5.6.1",
"source": "./requirements-driven-workflow",
"category": "development"
},
{
"name": "bmad",
"name": "bmad-agile-workflow",
"source": "./bmad-agile-workflow/",
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
"version": "5.6.1",
"source": "./bmad-agile-workflow",
"category": "development"
"version": "1.0.0",
"author": {
"name": "Claude Code Dev Workflows",
"url": "https://github.com/cexll/myclaude"
},
"homepage": "https://github.com/cexll/myclaude",
"repository": "https://github.com/cexll/myclaude",
"license": "MIT",
"keywords": [
"bmad",
"agile",
"scrum",
"product-owner",
"architect",
"developer",
"qa",
"workflow-orchestration"
],
"category": "workflows",
"strict": false,
"commands": [
"./commands/bmad-pilot.md"
],
"agents": [
"./agents/bmad-po.md",
"./agents/bmad-architect.md",
"./agents/bmad-sm.md",
"./agents/bmad-dev.md",
"./agents/bmad-qa.md",
"./agents/bmad-orchestrator.md",
"./agents/bmad-review.md"
]
},
{
"name": "dev-kit",
"name": "development-essentials",
"source": "./development-essentials/",
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
"version": "5.6.1",
"source": "./development-essentials",
"category": "productivity"
"version": "1.0.0",
"author": {
"name": "Claude Code Dev Workflows",
"url": "https://github.com/cexll/myclaude"
},
"homepage": "https://github.com/cexll/myclaude",
"repository": "https://github.com/cexll/myclaude",
"license": "MIT",
"keywords": [
"code",
"debug",
"test",
"optimize",
"review",
"bugfix",
"refactor",
"documentation"
],
"category": "essentials",
"strict": false,
"commands": [
"./commands/code.md",
"./commands/debug.md",
"./commands/test.md",
"./commands/optimize.md",
"./commands/review.md",
"./commands/bugfix.md",
"./commands/refactor.md",
"./commands/docs.md",
"./commands/ask.md",
"./commands/think.md"
],
"agents": [
"./agents/code.md",
"./agents/bugfix.md",
"./agents/bugfix-verify.md",
"./agents/optimize.md",
"./agents/debug.md"
]
},
{
"name": "sparv",
"description": "Minimal SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point spec gate, unified journal, 2-action saves, 3-failure protocol, and EHRB risk detection",
"version": "1.1.0",
"source": "./skills/sparv",
"category": "development"
"name": "advanced-ai-agents",
"source": "./advanced-ai-agents/",
"description": "Advanced AI agent for complex problem solving and deep analysis with GPT-5 integration",
"version": "1.0.0",
"author": {
"name": "Claude Code Dev Workflows",
"url": "https://github.com/cexll/myclaude"
},
"homepage": "https://github.com/cexll/myclaude",
"repository": "https://github.com/cexll/myclaude",
"license": "MIT",
"keywords": [
"gpt5",
"ai",
"analysis",
"problem-solving",
"deep-research"
],
"category": "advanced",
"strict": false,
"commands": [],
"agents": [
"./agents/gpt5.md"
]
}
]
}

22
.gitattributes vendored
View File

@@ -1,22 +0,0 @@
# Ensure shell scripts always use LF line endings on all platforms
*.sh text eol=lf
# Ensure Python files use LF line endings
*.py text eol=lf
# Auto-detect text files and normalize line endings to LF
* text=auto eol=lf
# Explicitly declare files that should always be treated as binary
*.exe binary
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.ico binary
*.mov binary
*.mp4 binary
*.mp3 binary
*.zip binary
*.gz binary
*.tar binary

View File

@@ -1,34 +0,0 @@
name: CI
on:
push:
branches: [master, rc/*]
pull_request:
branches: [master, rc/*]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Run tests
run: |
cd codeagent-wrapper
go test -v -cover -coverprofile=coverage.out ./...
- name: Check coverage
run: |
cd codeagent-wrapper
go tool cover -func=coverage.out | grep total | awk '{print $3}'
- name: Upload coverage
uses: codecov/codecov-action@v4
with:
file: codeagent-wrapper/coverage.out
continue-on-error: true

View File

@@ -1,113 +0,0 @@
name: Release codeagent-wrapper
on:
push:
tags:
- 'v*'
permissions:
contents: write
jobs:
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Run tests
working-directory: codeagent-wrapper
run: go test -v -coverprofile=cover.out ./...
- name: Check coverage
working-directory: codeagent-wrapper
run: |
go tool cover -func=cover.out | grep total
COVERAGE=$(go tool cover -func=cover.out | grep total | awk '{print $3}' | sed 's/%//')
echo "Coverage: ${COVERAGE}%"
build:
name: Build
needs: test
runs-on: ubuntu-latest
strategy:
matrix:
include:
- goos: linux
goarch: amd64
- goos: linux
goarch: arm64
- goos: darwin
goarch: amd64
- goos: darwin
goarch: arm64
- goos: windows
goarch: amd64
- goos: windows
goarch: arm64
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Build binary
id: build
working-directory: codeagent-wrapper
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 0
run: |
VERSION=${GITHUB_REF#refs/tags/}
OUTPUT_NAME=codeagent-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
if [ "${{ matrix.goos }}" = "windows" ]; then
OUTPUT_NAME="${OUTPUT_NAME}.exe"
fi
go build -ldflags="-s -w -X main.version=${VERSION}" -o ${OUTPUT_NAME} ./cmd/codeagent-wrapper
chmod +x ${OUTPUT_NAME}
echo "artifact_path=codeagent-wrapper/${OUTPUT_NAME}" >> $GITHUB_OUTPUT
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: codeagent-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
path: ${{ steps.build.outputs.artifact_path }}
release:
name: Create Release
needs: build
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: Prepare release files
run: |
mkdir -p release
find artifacts -type f -name "codeagent-wrapper-*" -exec mv {} release/ \;
cp install.sh install.bat release/
ls -la release/
- name: Create Release
uses: softprops/action-gh-release@v2
with:
files: release/*
generate_release_notes: true
draft: false
prerelease: false

11
.gitignore vendored
View File

@@ -1,10 +1,3 @@
CLAUDE.md
.claude/
.claude-trace
.DS_Store
**/.DS_Store
.venv
.pytest_cache
__pycache__
.coverage
coverage.out
references

View File

@@ -1,772 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
## [5.6.4] - 2026-01-15
### 🚀 Features
- add reasoning effort config for codex backend
- default to skip-permissions and bypass-sandbox
- add multi-agent support with yolo mode
- add omo module for multi-agent orchestration
- add intelligent backend selection based on task complexity (#61)
- v5.4.0 structured execution report (#94)
- add millisecond-precision timestamps to all log entries (#91)
- skill-install install script and security scan
- add uninstall scripts with selective module removal
### 🐛 Bug Fixes
- filter codex stderr noise logs
- use config override for codex reasoning effort
- propagate SkipPermissions to parallel tasks (#113)
- add timeout for Windows process termination
- reject dash as workdir parameter (#118)
- add sleep in fake script to prevent CI race condition
- fix gemini env load
- fix omo
- fix codeagent skill TaskOutput
- 修复 Gemini init 事件 session_id 未提取的问题 (#111)
- Windows 后端退出taskkill 结束进程树 + turn.completed 支持 (#108)
- support model parameter for all backends, auto-inject from settings (#105)
- replace setx with reg add to avoid 1024-char PATH truncation (#101)
- 移除未知事件格式的日志噪声 (#96)
- prevent duplicate PATH entries on reinstall (#95)
- Minor issues #12 and #13 - ASCII mode and performance optimization
- correct settings.json filename and bump version to v5.2.8
- allow claude backend to read env from setting.json while preventing recursion (#92)
- comprehensive security and quality improvements for PR #85 & #87 (#90)
- Improve backend termination after message and extend timeout (#86)
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 (#88)
- filter noisy stderr output from gemini backend (#83)
- 修復 wsl install.sh 格式問題 (#78)
- 修复多 backend 并行日志 PID 混乱并移除包装格式 (#74) (#76)
### 🚜 Refactor
- remove sisyphus agent and unused code
- streamline agent documentation and remove sisyphus
### 📚 Documentation
- add OmO workflow to README and fix plugin marketplace structure
- update FAQ for default bypass/skip-permissions behavior
- 添加 FAQ 常见问题章节
- update troubleshooting with idempotent PATH commands (#95)
### 💼 Other
- add test-cases skill
- add browser skill
- BMADh和Requirements-Driven支持根据语义生成对应的文档 (#82)
- update all readme
## [5.2.4] - 2025-12-16
### ⚙️ Miscellaneous Tasks
- integrate git-cliff for automated changelog generation
- bump version to 5.2.4
### 🐛 Bug Fixes
- 防止 Claude backend 无限递归调用
- isolate log files per task in parallel mode
### 💼 Other
- Merge pull request #70 from cexll/fix/prevent-codeagent-infinite-recursion
- Merge pull request #69 from cexll/myclaude-master-20251215-073053-338465000
- update CHANGELOG.md
- Merge pull request #65 from cexll/fix/issue-64-buffer-overflow
## [5.2.3] - 2025-12-15
### 🐛 Bug Fixes
- 修复 bufio.Scanner token too long 错误 ([#64](https://github.com/cexll/myclaude/issues/64))
### 💼 Other
- change version
### 🧪 Testing
- 同步测试中的版本号至 5.2.3
## [5.2.2] - 2025-12-13
### ⚙️ Miscellaneous Tasks
- Bump version and clean up documentation
### 🐛 Bug Fixes
- fix codeagent backend claude no auto
- fix install.py dev fail
### 🧪 Testing
- Fix tests for ClaudeBackend default --dangerously-skip-permissions
## [5.2.1] - 2025-12-13
### 🐛 Bug Fixes
- fix codeagent claude and gemini root dir
### 💼 Other
- update readme
## [5.2.0] - 2025-12-13
### ⚙️ Miscellaneous Tasks
- Update CHANGELOG and remove deprecated test files
### 🐛 Bug Fixes
- fix race condition in stdout parsing
- add worker limit cap and remove legacy alias
- use -r flag for gemini backend resume
- clarify module list shows default state not enabled
- use -r flag for claude backend resume
- remove binary artifacts and improve error messages
- 异常退出时显示最近错误信息
- op_run_command 实时流式输出
- 修复权限标志逻辑和版本号测试
- 重构信号处理逻辑避免重复 nil 检查
- 移除 .claude 配置文件验证步骤
- 修复并行执行启动横幅重复打印问题
- 修复master合并后的编译和测试问题
### 💼 Other
- Merge rc/5.2 into master: v5.2.0 release improvements
- Merge pull request #53 from cexll/rc/5.2
- remove docs
- remove docs
- add prototype prompt skill
- add prd skill
- update memory claude
- remove command gh flow
- update license
- Merge branch 'master' into rc/5.2
- Merge pull request #52 from cexll/fix/parallel-log-path-on-startup
### 📚 Documentation
- remove GitHub workflow related content
### 🚀 Features
- Complete skills system integration and config cleanup
- Improve release notes and installation scripts
- 添加终端日志输出和 verbose 模式
- 完整多后端支持与安全优化
- 替换 Codex 为 codeagent 并添加 UI 自动检测
### 🚜 Refactor
- 调整文件命名和技能定义
### 🧪 Testing
- 添加 ExtractRecentErrors 单元测试
## [5.1.4] - 2025-12-09
### 🐛 Bug Fixes
- 任务启动时立即返回日志文件路径以支持实时调试
## [5.1.3] - 2025-12-08
### 🐛 Bug Fixes
- resolve CI timing race in TestFakeCmdInfra
## [5.1.2] - 2025-12-08
### 🐛 Bug Fixes
- 修复channel同步竞态条件和死锁问题
### 💼 Other
- Merge pull request #51 from cexll/fix/channel-sync-race-conditions
- change codex-wrapper version
## [5.1.1] - 2025-12-08
### 🐛 Bug Fixes
- 增强日志清理的安全性和可靠性
- resolve data race on forceKillDelay with atomic operations
### 💼 Other
- Merge pull request #49 from cexll/freespace8/master
- resolve signal handling conflict preserving testability and Windows support
### 🧪 Testing
- 补充测试覆盖提升至 89.3%
## [5.1.0] - 2025-12-07
### 💼 Other
- Merge pull request #45 from Michaelxwb/master
- 修改windows安装说明
- 修改打包脚本
- 支持windows系统的安装
- Merge pull request #1 from Michaelxwb/feature-win
- 支持window
### 🚀 Features
- 添加启动时清理日志的功能和--cleanup标志支持
- implement enterprise workflow with multi-backend support
## [5.0.0] - 2025-12-05
### ⚙️ Miscellaneous Tasks
- clarify unit-test coverage levels in requirement questions
### 🐛 Bug Fixes
- defer startup log until args parsed
### 💼 Other
- Merge branch 'master' of github.com:cexll/myclaude
- Merge pull request #43 from gurdasnijor/smithery/add-badge
- Add Smithery badge
- Merge pull request #42 from freespace8/master
### 📚 Documentation
- rewrite documentation for v5.0 modular architecture
### 🚀 Features
- feat install.py
- implement modular installation system
### 🚜 Refactor
- remove deprecated plugin modules
## [4.8.2] - 2025-12-02
### 🐛 Bug Fixes
- skip signal test in CI environment
- make forceKillDelay testable to prevent signal test timeout
- correct Go version in go.mod from 1.25.3 to 1.21
- fix codex wrapper async log
- capture and include stderr in error messages
### 💼 Other
- Merge pull request #41 from cexll/fix-async-log
- remove test case 90
- optimize codex-wrapper
- Merge branch 'master' into fix-async-log
## [4.8.1] - 2025-12-01
### 🎨 Styling
- replace emoji with text labels
### 🐛 Bug Fixes
- improve --parallel parameter validation and docs
### 💼 Other
- remove codex-wrapper bin
## [4.8.0] - 2025-11-30
### 💼 Other
- update codex skill dependencies
## [4.7.3] - 2025-11-29
### 🐛 Bug Fixes
- 保留日志文件以便程序退出后调试并完善日志输出功能
### 💼 Other
- Merge pull request #34 from cexll/cce-worktree-master-20251129-111802-997076000
- update CLAUDE.md and codex skill
### 📚 Documentation
- improve codex skill parameter best practices
### 🚀 Features
- add session resume support and improve output format
- add parallel execution support to codex-wrapper
- add async logging to temp file with lifecycle management
## [4.7.2] - 2025-11-28
### 🐛 Bug Fixes
- improve buffer size and streamline message extraction
### 💼 Other
- Merge pull request #32 from freespace8/master
### 🧪 Testing
- 增加对超大单行文本和非字符串文本的处理测试
## [4.7.1] - 2025-11-27
### 💼 Other
- optimize dev pipline
- Merge feat/codex-wrapper: fix repository URLs
## [4.7] - 2025-11-27
### 🐛 Bug Fixes
- update repository URLs to cexll/myclaude
## [4.7-alpha1] - 2025-11-27
### 🐛 Bug Fixes
- fix marketplace schema validation error in dev-workflow plugin
### 💼 Other
- Merge pull request #29 from cexll/feat/codex-wrapper
- Add codex-wrapper Go implementation
- update readme
- update readme
## [4.6] - 2025-11-25
### 💼 Other
- update dev workflow
- update dev workflow
## [4.5] - 2025-11-25
### 🐛 Bug Fixes
- fix codex skill eof
### 💼 Other
- update dev workflow plugin
- update readme
## [4.4] - 2025-11-22
### 🐛 Bug Fixes
- fix codex skill timeout and add more log
- fix codex skill
### 💼 Other
- update gemini skills
- update dev workflow
- update codex skills model config
- Merge branch 'master' of github.com:cexll/myclaude
- Merge pull request #24 from cexll/swe-agent/23-1763544297
### 🚀 Features
- 支持通过环境变量配置 skills 模型
## [4.3] - 2025-11-19
### 🐛 Bug Fixes
- fix codex skills running
### 💼 Other
- update skills plugin
- update gemini
- update doc
- Add Gemini CLI integration skill
### 🚀 Features
- feat simple dev workflow
## [4.2.2] - 2025-11-15
### 💼 Other
- update codex skills
## [4.2.1] - 2025-11-14
### 💼 Other
- Merge pull request #21 from Tshoiasc/master
- Merge branch 'master' into master
- Change default model to gpt-5.1-codex
- Enhance codex.py to auto-detect long inputs and switch to stdin mode, improving handling of shell argument issues. Updated build_codex_args to support stdin and added relevant logging for task length warnings.
## [4.2] - 2025-11-13
### 🐛 Bug Fixes
- fix codex.py wsl run err
### 💼 Other
- optimize codex skills
- Merge branch 'master' of github.com:cexll/myclaude
- Rename SKILLS.md to SKILL.md
- optimize codex skills
### 🚀 Features
- feat codex skills
## [4.1] - 2025-11-04
### 💼 Other
- update enhance-prompt.md response
- update readme
### 📚 Documentation
- 新增 /enhance-prompt 命令并更新所有 README 文档
## [4.0] - 2025-10-22
### 🐛 Bug Fixes
- fix skills format
### 💼 Other
- Merge branch 'master' of github.com:cexll/myclaude
- Merge pull request #18 from cexll/swe-agent/17-1760969135
- update requirements clarity
- update .gitignore
- Fix #17: Update root marketplace.json to use skills array
- Fix #17: Convert requirements-clarity to correct plugin directory format
- Fix #17: Convert requirements-clarity to correct plugin directory format
- Convert requirements-clarity to plugin format with English prompts
- Translate requirements-clarity skill to English for plugin compatibility
- Add requirements-clarity Claude Skill
- Add requirements clarification command
- update
## [3.5] - 2025-10-20
### 💼 Other
- Merge pull request #15 from cexll/swe-agent/13-1760944712
- Fix #13: Clean up redundant README files
- Optimize README structure - Solution A (modular)
- Merge pull request #14 from cexll/swe-agent/12-1760944588
- Fix #12: Update Makefile install paths for new directory structure
## [3.4] - 2025-10-20
### 💼 Other
- Merge pull request #11 from cexll/swe-agent/10-1760752533
- Fix marketplace metadata references
- Fix plugin configuration: rename to marketplace.json and update repository URLs
- Fix #10: Restructure plugin directories to ensure proper command isolation
## [3.3] - 2025-10-15
### 💼 Other
- Update README-zh.md
- Update README.md
- Update marketplace.json
- Update Chinese README with v3.2 plugin system documentation
- Update README with v3.2 plugin system documentation
## [3.2] - 2025-10-10
### 💼 Other
- Add Claude Code plugin system support
- update readme
- Add Makefile for quick deployment and update READMEs
## [3.1] - 2025-09-17
### ◀️ Revert
- revert
### 🐛 Bug Fixes
- fixed bmad-orchestrator not fund
- fix bmad
### 💼 Other
- update bmad review with codex support
- 优化 BMAD 工作流和代理配置
- update gpt5
- support bmad output-style
- update bmad user guide
- update bmad readme
- optimize requirements pilot
- add use gpt5 codex
- add bmad pilot
- sync READMEs with actual commands/agents; remove nonexistent commands; enhance requirements-pilot with testing decision gate and options.
- Update Chinese README and requirements-pilot command to align with latest workflow
- update readme
- update agent
- update bugfix sub agents
- Update ask support KISS YAGNI SOLID
- Add comprehensive documentation and multi-agent workflow system
- update commands
<!-- generated by git-cliff -->

661
LICENSE
View File

@@ -1,661 +0,0 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

View File

@@ -1,18 +1,16 @@
# Claude Code Multi-Agent Workflow System Makefile
# Quick deployment for BMAD and Requirements workflows
.PHONY: help install deploy-bmad deploy-requirements deploy-essentials deploy-advanced deploy-all deploy-commands deploy-agents clean test changelog
.PHONY: help install deploy-bmad deploy-requirements deploy-essentials deploy-advanced deploy-all deploy-commands deploy-agents clean test
# Default target
help:
@echo "Claude Code Multi-Agent Workflow - Quick Deployment"
@echo ""
@echo "Recommended installation: python3 install.py --install-dir ~/.claude"
@echo ""
@echo "Usage: make [target]"
@echo ""
@echo "Targets:"
@echo " install - LEGACY: install all configurations (prefer install.py)"
@echo " install - Install all configurations to Claude Code"
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
@echo " deploy-essentials - Deploy Development Essentials workflow"
@@ -22,7 +20,6 @@ help:
@echo " deploy-all - Deploy everything (commands + agents)"
@echo " test-bmad - Test BMAD workflow with sample"
@echo " test-requirements - Test Requirements workflow with sample"
@echo " changelog - Update CHANGELOG.md using git-cliff"
@echo " clean - Clean generated artifacts"
@echo " help - Show this help message"
@@ -39,8 +36,6 @@ OUTPUT_STYLES_DIR = output-styles
# Install all configurations
install: deploy-all
@echo "⚠️ LEGACY PATH: make install will be removed in future versions."
@echo " Prefer: python3 install.py --install-dir ~/.claude"
@echo "✅ Installation complete!"
# Deploy BMAD workflow
@@ -145,18 +140,4 @@ all: deploy-all
# Version info
version:
@echo "Claude Code Multi-Agent Workflow System v3.1"
@echo "BMAD + Requirements-Driven Development"
# Update CHANGELOG.md using git-cliff
changelog:
@echo "📝 Updating CHANGELOG.md with git-cliff..."
@if ! command -v git-cliff > /dev/null 2>&1; then \
echo "❌ git-cliff not found. Installing via Homebrew..."; \
brew install git-cliff; \
fi
@git-cliff -o CHANGELOG.md
@echo "✅ CHANGELOG.md updated successfully!"
@echo ""
@echo "Preview the changes:"
@echo " git diff CHANGELOG.md"
@echo "BMAD + Requirements-Driven Development"

95
PLUGIN_README.md Normal file
View File

@@ -0,0 +1,95 @@
# Claude Code Plugin System
本项目已支持Claude Code插件系统可以将命令和代理打包成可安装的插件包。
## 插件配置
插件配置文件位于 `.claude-plugin/marketplace.json`,定义了所有可用的插件包。
## 可用插件
### 1. Requirements-Driven Development
- **描述**: 需求驱动的开发工作流包含90%质量门控
- **命令**: `/requirements-pilot`
- **代理**: requirements-generate, requirements-code, requirements-testing, requirements-review
### 2. BMAD Agile Workflow
- **描述**: 完整的BMAD敏捷工作流产品负责人→架构师→SM→开发→QA
- **命令**: `/bmad-pilot`
- **代理**: bmad-po, bmad-architect, bmad-sm, bmad-dev, bmad-qa, bmad-orchestrator
### 3. Development Essentials
- **描述**: 核心开发命令套件
- **命令**: `/code`, `/debug`, `/test`, `/optimize`, `/review`, `/bugfix`, `/refactor`, `/docs`, `/ask`, `/think`
- **代理**: code, bugfix, bugfix-verify, code-optimize, debug, develop
### 4. Advanced AI Agents
- **描述**: 高级AI代理集成GPT-5进行深度分析
- **代理**: gpt5
## 使用插件命令
### 列出所有可用插件
```bash
/plugin list
```
### 查看插件详情
```bash
/plugin info <plugin-name>
```
例如:`/plugin info requirements-driven-development`
### 安装插件
```bash
/plugin install <plugin-name>
```
例如:`/plugin install bmad-agile-workflow`
### 移除插件
```bash
/plugin remove <plugin-name>
```
## 创建自定义插件
要创建自己的插件:
1.`.claude-plugin/marketplace.json` 中添加新的插件定义
2. 指定插件包含的命令和代理文件路径
3. 设置适当的元数据(版本、作者、关键词等)
示例插件结构:
```json
{
"name": "my-custom-plugin",
"source": "./",
"description": "自定义插件描述",
"version": "1.0.0",
"commands": [
"./commands/my-command.md"
],
"agents": [
"./agents/my-agent.md"
]
}
```
## 分享插件
要分享插件给其他项目:
1. 复制整个 `.claude-plugin` 目录到目标项目
2. 确保相关的命令和代理文件存在
3. 在新项目中使用 `/plugin` 命令管理插件
## 注意事项
- 插件系统遵循Claude Code的插件规范
- 所有命令和代理文件必须是有效的Markdown格式
- 插件配置支持版本管理和依赖关系
- 插件可以包含多个命令、代理和输出样式
## 相关文档
- [Claude Code插件文档](https://docs.claude.com/en/docs/claude-code/plugins)
- [示例插件仓库](https://github.com/wshobson/agents)

184
README.md
View File

@@ -1,152 +1,114 @@
[中文](README_CN.md) [English](README.md)
# Claude Code Multi-Agent Workflow System
[![Run in Smithery](https://smithery.ai/badge/skills/cexll)](https://smithery.ai/skills?ns=cexll&utm_source=github&utm_medium=badge)
[![License: AGPL-3.0](https://img.shields.io/badge/License-AGPL_v3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Claude Code](https://img.shields.io/badge/Claude-Code-blue)](https://claude.ai/code)
[![Version](https://img.shields.io/badge/Version-6.x-green)](https://github.com/cexll/myclaude)
[![Version](https://img.shields.io/badge/Version-3.2-green)](https://github.com/cexll/myclaude)
[![Plugin Ready](https://img.shields.io/badge/Plugin-Ready-purple)](https://docs.claude.com/en/docs/claude-code/plugins)
> AI-powered development automation with multi-backend execution (Codex/Claude/Gemini/OpenCode)
> Enterprise-grade agile development automation with AI-powered multi-agent orchestration
## Quick Start
[中文文档](README_CN.md) | [Documentation](docs/)
## 🚀 Quick Start
### Installation
**Plugin System (Recommended)**
```bash
/plugin github.com/cexll/myclaude
```
**Traditional Installation**
```bash
git clone https://github.com/cexll/myclaude.git
cd myclaude
python3 install.py --install-dir ~/.claude
make install
```
## Modules Overview
| Module | Description | Documentation |
|--------|-------------|---------------|
| [do](skills/do/README.md) | **Recommended** - 7-phase feature development with codeagent orchestration | `/do` command |
| [dev](dev-workflow/README.md) | Lightweight dev workflow with Codex integration | `/dev` command |
| [omo](skills/omo/README.md) | Multi-agent orchestration with intelligent routing | `/omo` command |
| [bmad](bmad-agile-workflow/README.md) | BMAD agile workflow with 6 specialized agents | `/bmad-pilot` command |
| [requirements](requirements-driven-workflow/README.md) | Lightweight requirements-to-code pipeline | `/requirements-pilot` command |
| [essentials](development-essentials/README.md) | Core development commands and utilities | `/code`, `/debug`, etc. |
| [sparv](skills/sparv/README.md) | SPARV workflow (Specify→Plan→Act→Review→Vault) | `/sparv` command |
| course | Course development (combines dev + product-requirements + test-cases) | Composite module |
## Installation
### Basic Usage
```bash
# Install all enabled modules
python3 install.py --install-dir ~/.claude
# Full agile workflow
/bmad-pilot "Build user authentication with OAuth2 and MFA"
# Install specific module
python3 install.py --module dev
# Lightweight development
/requirements-pilot "Implement JWT token refresh"
# List available modules
python3 install.py --list-modules
# Force overwrite
python3 install.py --force
# Direct development commands
/code "Add API rate limiting"
```
### Module Configuration
## 📦 Plugin Modules
Edit `config.json` to enable/disable modules:
| Plugin | Description | Key Commands |
|--------|-------------|--------------|
| **[bmad-agile-workflow](docs/BMAD-WORKFLOW.md)** | Complete BMAD methodology with 6 specialized agents | `/bmad-pilot` |
| **[requirements-driven-workflow](docs/REQUIREMENTS-WORKFLOW.md)** | Streamlined requirements-to-code workflow | `/requirements-pilot` |
| **[development-essentials](docs/DEVELOPMENT-COMMANDS.md)** | Core development slash commands | `/code` `/debug` `/test` `/optimize` |
| **[advanced-ai-agents](docs/ADVANCED-AGENTS.md)** | GPT-5 deep reasoning integration | Agent: `gpt5` |
```json
{
"modules": {
"dev": { "enabled": true },
"bmad": { "enabled": false },
"requirements": { "enabled": false },
"essentials": { "enabled": false },
"omo": { "enabled": false },
"sparv": { "enabled": false },
"do": { "enabled": false },
"course": { "enabled": false }
}
}
```
## 💡 Use Cases
## Workflow Selection Guide
**BMAD Workflow** - Full agile process automation
- Product requirements → Architecture design → Sprint planning → Development → Code review → QA testing
- Quality gates with 90% thresholds
- Automated document generation
| Scenario | Recommended |
|----------|-------------|
| Feature development (default) | `/do` |
| Lightweight feature | `/dev` |
| Bug investigation + fix | `/omo` |
| Large enterprise project | `/bmad-pilot` |
| Quick prototype | `/requirements-pilot` |
| Simple task | `/code`, `/debug` |
**Requirements Workflow** - Fast prototyping
- Requirements generation → Implementation → Review → Testing
- Lightweight and practical
## Core Architecture
**Development Commands** - Daily coding
- Direct implementation, debugging, testing, optimization
- No workflow overhead
| Role | Agent | Responsibility |
|------|-------|----------------|
| **Orchestrator** | Claude Code | Planning, context gathering, verification |
| **Executor** | codeagent-wrapper | Code editing, test execution (Codex/Claude/Gemini/OpenCode) |
## 🎯 Key Features
## Backend CLI Requirements
- **🤖 Role-Based Agents**: Specialized AI agents for each development phase
- **📊 Quality Gates**: Automatic quality scoring with iterative refinement
- **✅ Approval Points**: User confirmation at critical workflow stages
- **📁 Persistent Artifacts**: All specs saved to `.claude/specs/`
- **🔌 Plugin System**: Native Claude Code plugin support
- **🔄 Flexible Workflows**: Choose full agile or lightweight development
| Backend | Required Features |
|---------|-------------------|
| Codex | `codex e`, `--json`, `-C`, `resume` |
| Claude | `--output-format stream-json`, `-r` |
| Gemini | `-o stream-json`, `-y`, `-r` |
## 📚 Documentation
## Directory Structure After Installation
- **[BMAD Workflow Guide](docs/BMAD-WORKFLOW.md)** - Complete methodology and agent roles
- **[Requirements Workflow](docs/REQUIREMENTS-WORKFLOW.md)** - Lightweight development process
- **[Development Commands](docs/DEVELOPMENT-COMMANDS.md)** - Slash command reference
- **[Plugin System](docs/PLUGIN-SYSTEM.md)** - Installation and configuration
- **[Quick Start Guide](docs/QUICK-START.md)** - Get started in 5 minutes
```
~/.claude/
├── bin/codeagent-wrapper
├── CLAUDE.md
├── commands/
├── agents/
├── skills/
└── config.json
```
## 🛠️ Installation Methods
## Documentation
- [Codeagent-Wrapper Guide](docs/CODEAGENT-WRAPPER.md)
- [Hooks Documentation](docs/HOOKS.md)
- [codeagent-wrapper](codeagent-wrapper/README.md)
## Troubleshooting
### Common Issues
**Codex wrapper not found:**
**Method 1: Plugin Install** (One command)
```bash
bash install.sh
/plugin install bmad-agile-workflow
```
**Module not loading:**
**Method 2: Make Commands** (Selective installation)
```bash
cat ~/.claude/installed_modules.json
python3 install.py --module <name> --force
make deploy-bmad # BMAD workflow only
make deploy-requirements # Requirements workflow only
make deploy-all # Everything
```
**Backend CLI errors:**
```bash
which codex && codex --version
which claude && claude --version
which gemini && gemini --version
```
**Method 3: Manual Setup**
- Copy `/commands/*.md` to `~/.config/claude/commands/`
- Copy `/agents/*.md` to `~/.config/claude/agents/`
## FAQ
Run `make help` for all options.
| Issue | Solution |
|-------|----------|
| "Unknown event format" | Logging display issue, can be ignored |
| Gemini can't read .gitignore files | Remove from .gitignore or use different backend |
| `/dev` slow | Check logs, try faster model, use single repo |
| Codex permission denied | Set `approval_policy = "never"` in ~/.codex/config.yaml |
## 📄 License
See [GitHub Issues](https://github.com/cexll/myclaude/issues) for more.
MIT License - see [LICENSE](LICENSE)
## License
## 🙋 Support
AGPL-3.0 - see [LICENSE](LICENSE)
- **Issues**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
- **Documentation**: [docs/](docs/)
- **Plugin Guide**: [PLUGIN_README.md](PLUGIN_README.md)
## Support
---
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
- [Documentation](docs/)
**Transform your development with AI-powered automation** - One command, complete workflow, quality assured.

View File

@@ -1,274 +1,114 @@
# Claude Code 多智能体工作流系统
[![License: AGPL-3.0](https://img.shields.io/badge/License-AGPL_v3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Claude Code](https://img.shields.io/badge/Claude-Code-blue)](https://claude.ai/code)
[![Version](https://img.shields.io/badge/Version-6.x-green)](https://github.com/cexll/myclaude)
[![Version](https://img.shields.io/badge/Version-3.2-green)](https://github.com/cexll/myclaude)
[![Plugin Ready](https://img.shields.io/badge/Plugin-Ready-purple)](https://docs.claude.com/en/docs/claude-code/plugins)
> AI 驱动的开发自动化 - 多后端执行架构 (Codex/Claude/Gemini/OpenCode)
> 企业级敏捷开发自动化与 AI 驱动的多智能体编排
## 快速开始
[English](README.md) | [文档](docs/)
## 🚀 快速开始
### 安装
**插件系统(推荐)**
```bash
/plugin github.com/cexll/myclaude
```
**传统安装**
```bash
git clone https://github.com/cexll/myclaude.git
cd myclaude
python3 install.py --install-dir ~/.claude
make install
```
## 模块概览
| 模块 | 描述 | 文档 |
|------|------|------|
| [do](skills/do/README.md) | **推荐** - 7 阶段功能开发 + codeagent 编排 | `/do` 命令 |
| [dev](dev-workflow/README.md) | 轻量级开发工作流 + Codex 集成 | `/dev` 命令 |
| [omo](skills/omo/README.md) | 多智能体编排 + 智能路由 | `/omo` 命令 |
| [bmad](bmad-agile-workflow/README.md) | BMAD 敏捷工作流 + 6 个专业智能体 | `/bmad-pilot` 命令 |
| [requirements](requirements-driven-workflow/README.md) | 轻量级需求到代码流水线 | `/requirements-pilot` 命令 |
| [essentials](development-essentials/README.md) | 核心开发命令和工具 | `/code`, `/debug` 等 |
| [sparv](skills/sparv/README.md) | SPARV 工作流 (Specify→Plan→Act→Review→Vault) | `/sparv` 命令 |
| course | 课程开发(组合 dev + product-requirements + test-cases | 组合模块 |
## 核心架构
| 角色 | 智能体 | 职责 |
|------|-------|------|
| **编排者** | Claude Code | 规划、上下文收集、验证 |
| **执行者** | codeagent-wrapper | 代码编辑、测试执行Codex/Claude/Gemini/OpenCode 后端)|
## 工作流详解
### do 工作流(推荐)
7 阶段功能开发,通过 codeagent-wrapper 编排多个智能体。**大多数功能开发任务的首选工作流。**
### 基本使用
```bash
/do "添加用户登录功能"
# 完整敏捷工作流
/bmad-pilot "构建用户认证系统,支持 OAuth2 和多因素认证"
# 轻量级开发
/requirements-pilot "实现 JWT 令牌刷新"
# 直接开发命令
/code "添加 API 限流功能"
```
**7 阶段:**
| 阶段 | 名称 | 目标 |
|------|------|------|
| 1 | Discovery | 理解需求 |
| 2 | Exploration | 映射代码库模式 |
| 3 | Clarification | 解决歧义(**强制**|
| 4 | Architecture | 设计实现方案 |
| 5 | Implementation | 构建功能(**需审批**|
| 6 | Review | 捕获缺陷 |
| 7 | Summary | 记录结果 |
## 📦 插件模块
**智能体:**
- `code-explorer` - 代码追踪、架构映射
- `code-architect` - 设计方案、文件规划
- `code-reviewer` - 代码审查、简化建议
- `develop` - 实现代码、运行测试
| 插件 | 描述 | 主要命令 |
|------|------|---------|
| **[bmad-agile-workflow](docs/BMAD-WORKFLOW.md)** | 完整 BMAD 方法论包含6个专业智能体 | `/bmad-pilot` |
| **[requirements-driven-workflow](docs/REQUIREMENTS-WORKFLOW.md)** | 精简的需求到代码工作流 | `/requirements-pilot` |
| **[development-essentials](docs/DEVELOPMENT-COMMANDS.md)** | 核心开发斜杠命令 | `/code` `/debug` `/test` `/optimize` |
| **[advanced-ai-agents](docs/ADVANCED-AGENTS.md)** | GPT-5 深度推理集成 | 智能体: `gpt5` |
## 💡 使用场景
**BMAD 工作流** - 完整敏捷流程自动化
- 产品需求 → 架构设计 → 冲刺规划 → 开发实现 → 代码审查 → 质量测试
- 90% 阈值质量门控
- 自动生成文档
**Requirements 工作流** - 快速原型开发
- 需求生成 → 实现 → 审查 → 测试
- 轻量级实用主义
**开发命令** - 日常编码
- 直接实现、调试、测试、优化
- 无工作流开销
## 🎯 核心特性
- **🤖 角色化智能体**: 每个开发阶段的专业 AI 智能体
- **📊 质量门控**: 自动质量评分,迭代优化
- **✅ 确认节点**: 关键工作流阶段的用户确认
- **📁 持久化产物**: 所有规格保存至 `.claude/specs/`
- **🔌 插件系统**: 原生 Claude Code 插件支持
- **🔄 灵活工作流**: 选择完整敏捷或轻量开发
## 📚 文档
- **[BMAD 工作流指南](docs/BMAD-WORKFLOW.md)** - 完整方法论和智能体角色
- **[Requirements 工作流](docs/REQUIREMENTS-WORKFLOW.md)** - 轻量级开发流程
- **[开发命令参考](docs/DEVELOPMENT-COMMANDS.md)** - 斜杠命令说明
- **[插件系统](docs/PLUGIN-SYSTEM.md)** - 安装与配置
- **[快速上手](docs/QUICK-START.md)** - 5分钟入门
## 🛠️ 安装方式
**方式1: 插件安装**(一条命令)
```bash
/plugin install bmad-agile-workflow
```
**方式2: Make 命令**(选择性安装)
```bash
make deploy-bmad # 仅 BMAD 工作流
make deploy-requirements # 仅 Requirements 工作流
make deploy-all # 全部安装
```
**方式3: 手动安装**
- 复制 `/commands/*.md``~/.config/claude/commands/`
- 复制 `/agents/*.md``~/.config/claude/agents/`
运行 `make help` 查看所有选项。
## 📄 许可证
MIT 许可证 - 查看 [LICENSE](LICENSE)
## 🙋 支持
- **问题反馈**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
- **文档**: [docs/](docs/)
- **插件指南**: [PLUGIN_README.md](PLUGIN_README.md)
---
### Dev 工作流
轻量级开发工作流,适合简单功能开发。
```bash
/dev "实现 JWT 用户认证"
```
**6 步流程:**
1. 需求澄清 - 交互式问答
2. Codex 深度分析 - 代码库探索
3. 开发计划生成 - 结构化任务分解
4. 并行执行 - Codex 并发执行
5. 覆盖率验证 - 强制 ≥90%
6. 完成总结 - 报告生成
---
### OmO 多智能体编排器
基于风险信号智能路由任务到专业智能体。
```bash
/omo "分析并修复这个认证 bug"
```
**智能体层级:**
| 智能体 | 角色 | 后端 |
|-------|------|------|
| `oracle` | 技术顾问 | Claude |
| `librarian` | 外部研究 | Claude |
| `explore` | 代码库搜索 | OpenCode |
| `develop` | 代码实现 | Codex |
| `frontend-ui-ux-engineer` | UI/UX 专家 | Gemini |
| `document-writer` | 文档撰写 | Gemini |
**常用配方:**
- 解释代码:`explore`
- 位置已知的小修复:直接 `develop`
- Bug 修复(位置未知):`explore → develop`
- 跨模块重构:`explore → oracle → develop`
---
### SPARV 工作流
极简 5 阶段工作流Specify → Plan → Act → Review → Vault。
```bash
/sparv "实现订单导出功能"
```
**核心规则:**
- **10 分规格门**:得分 0-10必须 >=9 才能进入 Plan
- **2 动作保存**:每 2 次工具调用写入 journal.md
- **3 失败协议**:连续 3 次失败后停止并上报
- **EHRB**:高风险操作需明确确认
**评分维度(各 0-2 分):**
1. Value - 为什么做,可验证的收益
2. Scope - MVP + 不在范围内的内容
3. Acceptance - 可测试的验收标准
4. Boundaries - 错误/性能/兼容/安全边界
5. Risk - EHRB/依赖/未知 + 处理方式
---
### BMAD 敏捷工作流
完整企业敏捷方法论 + 6 个专业智能体。
```bash
/bmad-pilot "构建电商结账系统"
```
**智能体角色:**
| 智能体 | 职责 |
|-------|------|
| Product Owner | 需求与用户故事 |
| Architect | 系统设计与技术决策 |
| Scrum Master | Sprint 规划与任务分解 |
| Developer | 实现 |
| Code Reviewer | 质量保证 |
| QA Engineer | 测试与验证 |
**审批门:**
- PRD 完成后90+ 分)需用户审批
- 架构完成后90+ 分)需用户审批
---
### 需求驱动工作流
轻量级需求到代码流水线。
```bash
/requirements-pilot "实现 API 限流"
```
**100 分质量评分:**
- 功能清晰度30 分
- 技术具体性25 分
- 实现完整性25 分
- 业务上下文20 分
---
### 开发基础命令
日常编码任务的直接命令。
| 命令 | 用途 |
|------|------|
| `/code` | 实现功能 |
| `/debug` | 调试问题 |
| `/test` | 编写测试 |
| `/review` | 代码审查 |
| `/optimize` | 性能优化 |
| `/refactor` | 代码重构 |
| `/docs` | 编写文档 |
---
## 安装
```bash
# 安装所有启用的模块
python3 install.py --install-dir ~/.claude
# 安装特定模块
python3 install.py --module dev
# 列出可用模块
python3 install.py --list-modules
# 强制覆盖
python3 install.py --force
```
### 模块配置
编辑 `config.json` 启用/禁用模块:
```json
{
"modules": {
"dev": { "enabled": true },
"bmad": { "enabled": false },
"requirements": { "enabled": false },
"essentials": { "enabled": false },
"omo": { "enabled": false },
"sparv": { "enabled": false },
"do": { "enabled": false },
"course": { "enabled": false }
}
}
```
## 工作流选择指南
| 场景 | 推荐 |
|------|------|
| 功能开发(默认) | `/do` |
| 轻量级功能 | `/dev` |
| Bug 调查 + 修复 | `/omo` |
| 大型企业项目 | `/bmad-pilot` |
| 快速原型 | `/requirements-pilot` |
| 简单任务 | `/code`, `/debug` |
## 后端 CLI 要求
| 后端 | 必需功能 |
|------|----------|
| Codex | `codex e`, `--json`, `-C`, `resume` |
| Claude | `--output-format stream-json`, `-r` |
| Gemini | `-o stream-json`, `-y`, `-r` |
## 故障排查
**Codex wrapper 未找到:**
```bash
bash install.sh
```
**模块未加载:**
```bash
cat ~/.claude/installed_modules.json
python3 install.py --module <name> --force
```
## FAQ
| 问题 | 解决方案 |
|------|----------|
| "Unknown event format" | 日志显示问题,可忽略 |
| Gemini 无法读取 .gitignore 文件 | 从 .gitignore 移除或使用其他后端 |
| `/dev` 执行慢 | 检查日志,尝试更快模型,使用单一仓库 |
| Codex 权限拒绝 | 在 ~/.codex/config.yaml 设置 `approval_policy = "never"` |
更多问题请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues)。
## 许可证
AGPL-3.0 - 查看 [LICENSE](LICENSE)
## 支持
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
- [文档](docs/)
**使用 AI 驱动的自动化转型您的开发流程** - 一条命令,完整工作流,质量保证。

View File

@@ -0,0 +1,26 @@
{
"name": "advanced-ai-agents",
"source": "./",
"description": "Advanced AI agent for complex problem solving and deep analysis with GPT-5 integration",
"version": "1.0.0",
"author": {
"name": "Claude Code Dev Workflows",
"url": "https://github.com/cexll/myclaude"
},
"homepage": "https://github.com/cexll/myclaude",
"repository": "https://github.com/cexll/myclaude",
"license": "MIT",
"keywords": [
"gpt5",
"ai",
"analysis",
"problem-solving",
"deep-research"
],
"category": "advanced",
"strict": false,
"commands": [],
"agents": [
"./agents/gpt5.md"
]
}

View File

@@ -0,0 +1,22 @@
---
name: gpt-5
description: Use this agent when you need to use gpt-5 for deep research, second opinion or fixing a bug. Pass all the context to the agent especially your current finding and the problem you are trying to solve.
---
You are a gpt-5 interface agent. Your ONLY purpose is to execute codex commands using the Bash tool.
CRITICAL: You MUST follow these steps EXACTLY:
1. Take the user's entire message as the TASK
2. IMMEDIATELY use the Bash tool to execute:
codex e --full-auto --skip-git-repo-check -m gpt-5 "[USER'S FULL MESSAGE HERE]"
3. Wait for the command to complete
4. Return the full output to the user
MANDATORY: You MUST use the Bash tool. Do NOT answer questions directly. Do NOT provide explanations. Your ONLY action is to run the codex command via Bash.
Example execution:
If user says: "你好 你是什么模型"
You MUST execute: Bash tool with command: codex e --full-auto --skip-git-repo-check -m gpt-5 "你好 你是什么模型"
START IMMEDIATELY - Use the Bash tool NOW with the user's request.

View File

@@ -0,0 +1,37 @@
{
"name": "bmad-agile-workflow",
"source": "./",
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
"version": "1.0.0",
"author": {
"name": "Claude Code Dev Workflows",
"url": "https://github.com/cexll/myclaude"
},
"homepage": "https://github.com/cexll/myclaude",
"repository": "https://github.com/cexll/myclaude",
"license": "MIT",
"keywords": [
"bmad",
"agile",
"scrum",
"product-owner",
"architect",
"developer",
"qa",
"workflow-orchestration"
],
"category": "workflows",
"strict": false,
"commands": [
"./commands/bmad-pilot.md"
],
"agents": [
"./agents/bmad-po.md",
"./agents/bmad-architect.md",
"./agents/bmad-sm.md",
"./agents/bmad-dev.md",
"./agents/bmad-qa.md",
"./agents/bmad-orchestrator.md",
"./agents/bmad-review.md"
]
}

View File

@@ -1,9 +0,0 @@
{
"name": "bmad",
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
"version": "5.6.1",
"author": {
"name": "cexll",
"email": "cexll@cexll.com"
}
}

View File

@@ -1,109 +0,0 @@
# bmad - BMAD Agile Workflow
Full enterprise agile methodology with 6 specialized agents, UltraThink analysis, and repository-aware development.
## Installation
```bash
python install.py --module bmad
```
## Usage
```bash
/bmad-pilot <PROJECT_DESCRIPTION> [OPTIONS]
```
### Options
| Option | Description |
|--------|-------------|
| `--skip-tests` | Skip QA testing phase |
| `--direct-dev` | Skip SM planning, go directly to development |
| `--skip-scan` | Skip initial repository scanning |
## Workflow Phases
| Phase | Agent | Deliverable | Description |
|-------|-------|-------------|-------------|
| 0 | Orchestrator | `00-repo-scan.md` | Repository scanning with UltraThink analysis |
| 1 | Product Owner (PO) | `01-product-requirements.md` | PRD with 90+ quality score required |
| 2 | Architect | `02-system-architecture.md` | Technical design with 90+ score required |
| 3 | Scrum Master (SM) | `03-sprint-plan.md` | Sprint backlog with stories and estimates |
| 4 | Developer | Implementation code | Multi-sprint implementation |
| 4.5 | Reviewer | `04-dev-reviewed.md` | Code review (Pass/Pass with Risk/Fail) |
| 5 | QA Engineer | Test suite | Comprehensive testing and validation |
## Agents
| Agent | Role |
|-------|------|
| `bmad-orchestrator` | Repository scanning, workflow coordination |
| `bmad-po` | Requirements gathering, PRD creation |
| `bmad-architect` | System design, technology decisions |
| `bmad-sm` | Sprint planning, task breakdown |
| `bmad-dev` | Code implementation |
| `bmad-review` | Code review, quality assessment |
| `bmad-qa` | Testing, validation |
## Approval Gates
Two mandatory stop points require explicit user approval:
1. **After PRD** (Phase 1 → 2): User must approve requirements before architecture
2. **After Architecture** (Phase 2 → 3): User must approve design before implementation
## Output Structure
```
.claude/specs/{feature_name}/
├── 00-repo-scan.md
├── 01-product-requirements.md
├── 02-system-architecture.md
├── 03-sprint-plan.md
└── 04-dev-reviewed.md
```
## UltraThink Methodology
Applied throughout the workflow for deep analysis:
1. **Hypothesis Generation** - Form hypotheses about the problem
2. **Evidence Collection** - Gather evidence from codebase
3. **Pattern Recognition** - Identify recurring patterns
4. **Synthesis** - Create comprehensive understanding
5. **Validation** - Cross-check findings
## Interactive Confirmation Flow
PO and Architect phases use iterative refinement:
1. Agent produces initial draft + quality score
2. Orchestrator presents to user with clarification questions
3. User provides responses
4. Agent refines until quality >= 90
5. User confirms to save deliverable
## When to Use
- Large multi-sprint features
- Enterprise projects requiring documentation
- Team coordination scenarios
- Projects needing formal approval gates
## Directory Structure
```
bmad-agile-workflow/
├── README.md
├── commands/
│ └── bmad-pilot.md
└── agents/
├── bmad-orchestrator.md
├── bmad-po.md
├── bmad-architect.md
├── bmad-sm.md
├── bmad-dev.md
├── bmad-review.md
└── bmad-qa.md
```

View File

@@ -427,10 +427,6 @@ Generate architecture document at `./.claude/specs/{feature_name}/02-system-arch
## Important Behaviors
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (API, REST, GraphQL, JWT, RBAC, etc.) in English; translate explanatory text only.
### DO:
- Start by reviewing and referencing the PRD
- Present initial architecture based on requirements

View File

@@ -419,10 +419,6 @@ logger.info('User created', {
## Important Implementation Rules
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (API, CRUD, JWT, SQL, etc.) in English; translate explanatory text only.
### DO:
- Follow architecture specifications exactly
- Implement all acceptance criteria from PRD

View File

@@ -22,10 +22,6 @@ You are the BMAD Orchestrator. Your core focus is repository analysis, workflow
- Consistency: ensure conventions and patterns discovered in scan are preserved downstream
- Explicit handoffs: clearly document assumptions, risks, and integration points for other agents
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (API, PRD, Sprint, etc.) in English; translate explanatory text only.
## UltraThink Repository Scan
When asked to analyze the repository, follow this structure and return a clear, actionable summary.

View File

@@ -313,10 +313,6 @@ Generate PRD at `./.claude/specs/{feature_name}/01-product-requirements.md`:
## Important Behaviors
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (API, Sprint, PRD, KPI, MVP, etc.) in English; translate explanatory text only.
### DO:
- Start immediately with greeting and initial understanding
- Show quality scores transparently

View File

@@ -478,10 +478,6 @@ module.exports = {
## Important Testing Rules
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (API, E2E, CI/CD, Mock, etc.) in English; translate explanatory text only.
### DO:
- Test all acceptance criteria from PRD
- Cover happy path, edge cases, and error scenarios

View File

@@ -45,7 +45,3 @@ You are an independent code review agent responsible for conducting reviews betw
- Focus on actionable findings
- Provide specific QA guidance
- Use clear, parseable output format
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (API, PRD, Sprint, etc.) in English; translate explanatory text only.

View File

@@ -351,10 +351,6 @@ So that [benefit]
## Important Behaviors
### Language Rules:
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
- **Technical Terms**: Keep technical terms (Sprint, Epic, Story, Backlog, Velocity, etc.) in English; translate explanatory text only.
### DO:
- Read both PRD and Architecture documents thoroughly
- Create comprehensive task breakdown

View File

@@ -1,72 +0,0 @@
# git-cliff configuration file
# https://git-cliff.org/docs/configuration
[changelog]
# changelog header
header = """
# Changelog
All notable changes to this project will be documented in this file.
"""
# template for the changelog body
body = """
{% if version %}
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
{% else %}
## Unreleased
{% endif %}
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group }}
{% for commit in commits %}
- {{ commit.message | split(pat="\n") | first }}
{% endfor -%}
{% endfor -%}
"""
# remove the leading and trailing whitespace from the template
trim = true
# changelog footer
footer = """
<!-- generated by git-cliff -->
"""
[git]
# parse the commits based on https://www.conventionalcommits.org
conventional_commits = true
# filter out the commits that are not conventional
filter_unconventional = false
# process each line of a commit as an individual commit
split_commits = false
# regex for preprocessing the commit messages
commit_preprocessors = [
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/cexll/myclaude/issues/${2}))" },
]
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "🚀 Features" },
{ message = "^fix", group = "🐛 Bug Fixes" },
{ message = "^doc", group = "📚 Documentation" },
{ message = "^perf", group = "⚡ Performance" },
{ message = "^refactor", group = "🚜 Refactor" },
{ message = "^style", group = "🎨 Styling" },
{ message = "^test", group = "🧪 Testing" },
{ message = "^chore\\(release\\):", skip = true },
{ message = "^chore", group = "⚙️ Miscellaneous Tasks" },
{ body = ".*security", group = "🛡️ Security" },
{ message = "^revert", group = "◀️ Revert" },
{ message = ".*", group = "💼 Other" },
]
# protect breaking changes from being skipped due to matching a skipping commit_parser
protect_breaking_commits = false
# filter out the commits that are not matched by commit parsers
filter_commits = false
# glob pattern for matching git tags
tag_pattern = "v[0-9]*"
# regex for skipping tags
skip_tags = "v0.1.0-beta.1"
# regex for ignoring tags
ignore_tags = ""
# sort the tags topologically
topo_order = false
# sort the commits inside sections by oldest/newest order
sort_commits = "newest"

View File

@@ -1,39 +0,0 @@
name: CI
on:
push:
branches: [main, master]
pull_request:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go-version: ["1.21", "1.22"]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: true
- name: Test
run: make test
- name: Build
run: make build
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: "1.22"
cache: true
- name: Lint
run: make lint

View File

@@ -1,23 +0,0 @@
# Build artifacts
bin/
codeagent
codeagent.exe
codeagent-wrapper
codeagent-wrapper.exe
*.test
# Coverage reports
coverage.out
coverage*.out
cover.out
cover_*.out
coverage.html
# Logs
*.log
# Temp files
*.tmp
*.swp
*~
.DS_Store

View File

@@ -1,37 +0,0 @@
GO ?= go
TOOLS_BIN := $(CURDIR)/bin
TOOLCHAIN ?= go1.22.0
GOLANGCI_LINT_VERSION := v1.56.2
STATICCHECK_VERSION := v0.4.7
GOLANGCI_LINT := $(TOOLS_BIN)/golangci-lint
STATICCHECK := $(TOOLS_BIN)/staticcheck
.PHONY: build test lint clean install
build:
$(GO) build -o codeagent ./cmd/codeagent
$(GO) build -o codeagent-wrapper ./cmd/codeagent-wrapper
test:
$(GO) test ./...
$(GOLANGCI_LINT):
@mkdir -p $(TOOLS_BIN)
GOTOOLCHAIN=$(TOOLCHAIN) GOBIN=$(TOOLS_BIN) $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
$(STATICCHECK):
@mkdir -p $(TOOLS_BIN)
GOTOOLCHAIN=$(TOOLCHAIN) GOBIN=$(TOOLS_BIN) $(GO) install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
lint: $(GOLANGCI_LINT) $(STATICCHECK)
GOTOOLCHAIN=$(TOOLCHAIN) $(GOLANGCI_LINT) run ./...
GOTOOLCHAIN=$(TOOLCHAIN) $(STATICCHECK) ./...
clean:
@python3 -c 'import glob, os; paths=["codeagent","codeagent.exe","codeagent-wrapper","codeagent-wrapper.exe","coverage.out","cover.out","coverage.html"]; paths += glob.glob("coverage*.out") + glob.glob("cover_*.out") + glob.glob("*.test"); [os.remove(p) for p in paths if os.path.exists(p)]'
install:
$(GO) install ./cmd/codeagent
$(GO) install ./cmd/codeagent-wrapper

View File

@@ -1,152 +0,0 @@
# codeagent-wrapper
`codeagent-wrapper` 是一个用 Go 编写的“多后端 AI 代码代理”命令行包装器:用统一的 CLI 入口封装不同的 AI 工具后端Codex / Claude / Gemini / Opencode并提供一致的参数、配置与会话恢复体验。
入口:`cmd/codeagent/main.go`(生成二进制名:`codeagent`)和 `cmd/codeagent-wrapper/main.go`(生成二进制名:`codeagent-wrapper`)。两者行为一致。
## 功能特性
- 多后端支持:`codex` / `claude` / `gemini` / `opencode`
- 统一命令行:`codeagent [flags] <task>` / `codeagent resume <session_id> <task> [workdir]`
- 自动 stdin遇到换行/特殊字符/超长任务自动走 stdin避免 shell quoting 地狱;也可显式使用 `-`
- 配置合并:支持配置文件与 `CODEAGENT_*` 环境变量viper
- Agent 预设:从 `~/.codeagent/models.json` 读取 backend/model/prompt 等预设
- 并行执行:`--parallel` 从 stdin 读取多任务配置,支持依赖拓扑并发执行
- 日志清理:`codeagent cleanup` 清理旧日志(日志写入系统临时目录)
## 安装
要求Go 1.21+。
在仓库根目录执行:
```bash
go install ./cmd/codeagent
go install ./cmd/codeagent-wrapper
```
安装后确认:
```bash
codeagent version
codeagent-wrapper version
```
## 使用示例
最简单用法(默认后端:`codex`
```bash
codeagent "分析 internal/app/cli.go 的入口逻辑,给出改进建议"
```
指定后端:
```bash
codeagent --backend claude "解释 internal/executor/parallel_config.go 的并行配置格式"
```
指定工作目录(第 2 个位置参数):
```bash
codeagent "在当前 repo 下搜索潜在数据竞争" .
```
显式从 stdin 读取 task使用 `-`
```bash
cat task.txt | codeagent -
```
恢复会话:
```bash
codeagent resume <session_id> "继续上次任务"
```
并行模式(从 stdin 读取任务配置;禁止位置参数):
```bash
codeagent --parallel <<'EOF'
---TASK---
id: t1
workdir: .
backend: codex
---CONTENT---
列出本项目的主要模块以及它们的职责。
---TASK---
id: t2
dependencies: t1
backend: claude
---CONTENT---
基于 t1 的结论,提出重构风险点与建议。
EOF
```
## 配置说明
### 配置文件
默认查找路径(当 `--config` 为空时):
- `$HOME/.codeagent/config.(yaml|yml|json|toml|...)`
示例YAML
```yaml
backend: codex
model: gpt-4.1
skip-permissions: false
```
也可以通过 `--config /path/to/config.yaml` 显式指定。
### 环境变量(`CODEAGENT_*`
通过 viper 读取并自动映射 `-``_`,常用项:
- `CODEAGENT_BACKEND``codex|claude|gemini|opencode`
- `CODEAGENT_MODEL`
- `CODEAGENT_AGENT`
- `CODEAGENT_PROMPT_FILE`
- `CODEAGENT_REASONING_EFFORT`
- `CODEAGENT_SKIP_PERMISSIONS`
- `CODEAGENT_FULL_OUTPUT`(并行模式 legacy 输出)
- `CODEAGENT_MAX_PARALLEL_WORKERS`0 表示不限制,上限 100
### Agent 预设(`~/.codeagent/models.json`
可在 `~/.codeagent/models.json` 定义 agent → backend/model/prompt 等映射,用 `--agent <name>` 选择:
```json
{
"default_backend": "opencode",
"default_model": "opencode/grok-code",
"agents": {
"develop": {
"backend": "codex",
"model": "gpt-4.1",
"prompt_file": "~/.codeagent/prompts/develop.md",
"description": "Code development"
}
}
}
```
## 支持的后端
该项目本身不内置模型能力,依赖你本机安装并可在 `PATH` 中找到对应 CLI
- `codex`:执行 `codex e ...`(默认会添加 `--dangerously-bypass-approvals-and-sandbox`;如需关闭请设置 `CODEX_BYPASS_SANDBOX=false`
- `claude`:执行 `claude -p ... --output-format stream-json`(默认会跳过权限提示;如需开启请设置 `CODEAGENT_SKIP_PERMISSIONS=false`
- `gemini`:执行 `gemini ... -o stream-json`(可从 `~/.gemini/.env` 加载环境变量)
- `opencode`:执行 `opencode run --format json`
## 开发
```bash
make build
make test
make lint
make clean
```

View File

@@ -1,43 +0,0 @@
module codeagent-wrapper
go 1.21
require (
github.com/goccy/go-json v0.10.5
github.com/rs/zerolog v1.34.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.19.0
)
require (
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.14.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -1,117 +0,0 @@
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,150 +0,0 @@
package wrapper
import (
"context"
"os"
"path/filepath"
"testing"
"time"
config "codeagent-wrapper/internal/config"
executor "codeagent-wrapper/internal/executor"
)
func TestValidateAgentName(t *testing.T) {
tests := []struct {
name string
input string
wantErr bool
}{
{name: "simple", input: "develop", wantErr: false},
{name: "upper", input: "ABC", wantErr: false},
{name: "digits", input: "a1", wantErr: false},
{name: "dash underscore", input: "a-b_c", wantErr: false},
{name: "empty", input: "", wantErr: true},
{name: "space", input: "a b", wantErr: true},
{name: "slash", input: "a/b", wantErr: true},
{name: "dotdot", input: "../evil", wantErr: true},
{name: "unicode", input: "中文", wantErr: true},
{name: "symbol", input: "a$b", wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := config.ValidateAgentName(tt.input)
if (err != nil) != tt.wantErr {
t.Fatalf("validateAgentName(%q) err=%v, wantErr=%v", tt.input, err, tt.wantErr)
}
})
}
}
func TestParseArgs_InvalidAgentNameRejected(t *testing.T) {
defer resetTestHooks()
os.Args = []string{"codeagent-wrapper", "--agent", "../evil", "task"}
if _, err := parseArgs(); err == nil {
t.Fatalf("expected parseArgs to reject invalid agent name")
}
}
func TestParseParallelConfig_InvalidAgentNameRejected(t *testing.T) {
input := `---TASK---
id: task-1
agent: ../evil
---CONTENT---
do something`
if _, err := parseParallelConfig([]byte(input)); err == nil {
t.Fatalf("expected parseParallelConfig to reject invalid agent name")
}
}
func TestParseParallelConfig_ResolvesAgentPromptFile(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
t.Cleanup(config.ResetModelsConfigCacheForTest)
config.ResetModelsConfigCacheForTest()
configDir := filepath.Join(home, ".codeagent")
if err := os.MkdirAll(configDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
"default_backend": "codex",
"default_model": "gpt-test",
"agents": {
"custom-agent": {
"backend": "codex",
"model": "gpt-test",
"prompt_file": "~/.claude/prompt.md"
}
}
}`), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
input := `---TASK---
id: task-1
agent: custom-agent
---CONTENT---
do something`
cfg, err := parseParallelConfig([]byte(input))
if err != nil {
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
}
if len(cfg.Tasks) != 1 {
t.Fatalf("expected 1 task, got %d", len(cfg.Tasks))
}
if got := cfg.Tasks[0].PromptFile; got != "~/.claude/prompt.md" {
t.Fatalf("PromptFile = %q, want %q", got, "~/.claude/prompt.md")
}
}
func TestDefaultRunCodexTaskFn_AppliesAgentPromptFile(t *testing.T) {
defer resetTestHooks()
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
claudeDir := filepath.Join(home, ".claude")
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
if err := os.WriteFile(filepath.Join(claudeDir, "prompt.md"), []byte("P\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
fake := newFakeCmd(fakeCmdConfig{
StdoutPlan: []fakeStdoutEvent{
{Data: `{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}` + "\n"},
},
WaitDelay: 2 * time.Millisecond,
})
_ = executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner { return fake })
_ = executor.SetSelectBackendFn(func(name string) (Backend, error) {
return testBackend{
name: name,
command: "fake-cmd",
argsFn: func(cfg *Config, targetArg string) []string {
return []string{targetArg}
},
}, nil
})
res := defaultRunCodexTaskFn(TaskSpec{
ID: "t",
Task: "do",
Backend: "codex",
PromptFile: "~/.claude/prompt.md",
}, 5)
if res.ExitCode != 0 {
t.Fatalf("unexpected result: %+v", res)
}
want := "<agent-prompt>\nP\n</agent-prompt>\n\ndo"
if got := fake.StdinContents(); got != want {
t.Fatalf("stdin mismatch:\n got=%q\nwant=%q", got, want)
}
}

View File

@@ -1,278 +0,0 @@
package wrapper
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
const (
version = "6.1.2"
defaultWorkdir = "."
defaultTimeout = 7200 // seconds (2 hours)
defaultCoverageTarget = 90.0
codexLogLineLimit = 1000
stdinSpecialChars = "\n\\\"'`$"
stderrCaptureLimit = 4 * 1024
defaultBackendName = "codex"
defaultCodexCommand = "codex"
// stdout close reasons
stdoutCloseReasonWait = "wait-done"
stdoutCloseReasonDrain = "drain-timeout"
stdoutCloseReasonCtx = "context-cancel"
stdoutDrainTimeout = 500 * time.Millisecond
)
// Test hooks for dependency injection
var (
stdinReader io.Reader = os.Stdin
isTerminalFn = defaultIsTerminal
codexCommand = defaultCodexCommand
cleanupHook func()
startupCleanupAsync = true
buildCodexArgsFn = buildCodexArgs
selectBackendFn = selectBackend
cleanupLogsFn = cleanupOldLogs
defaultBuildArgsFn = buildCodexArgs
runTaskFn = runCodexTask
exitFn = os.Exit
)
func runStartupCleanup() {
if cleanupLogsFn == nil {
return
}
defer func() {
if r := recover(); r != nil {
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
}
}()
if _, err := cleanupLogsFn(); err != nil {
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
}
}
func scheduleStartupCleanup() {
if !startupCleanupAsync {
runStartupCleanup()
return
}
if cleanupLogsFn == nil {
return
}
fn := cleanupLogsFn
go func() {
defer func() {
if r := recover(); r != nil {
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
}
}()
if _, err := fn(); err != nil {
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
}
}()
}
func runCleanupMode() int {
if cleanupLogsFn == nil {
fmt.Fprintln(os.Stderr, "Cleanup failed: log cleanup function not configured")
return 1
}
stats, err := cleanupLogsFn()
if err != nil {
fmt.Fprintf(os.Stderr, "Cleanup failed: %v\n", err)
return 1
}
fmt.Println("Cleanup completed")
fmt.Printf("Files scanned: %d\n", stats.Scanned)
fmt.Printf("Files deleted: %d\n", stats.Deleted)
if len(stats.DeletedFiles) > 0 {
for _, f := range stats.DeletedFiles {
fmt.Printf(" - %s\n", f)
}
}
fmt.Printf("Files kept: %d\n", stats.Kept)
if len(stats.KeptFiles) > 0 {
for _, f := range stats.KeptFiles {
fmt.Printf(" - %s\n", f)
}
}
if stats.Errors > 0 {
fmt.Printf("Deletion errors: %d\n", stats.Errors)
}
return 0
}
func readAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error) {
raw := strings.TrimSpace(path)
if raw == "" {
return "", nil
}
expanded := raw
if raw == "~" || strings.HasPrefix(raw, "~/") || strings.HasPrefix(raw, "~\\") {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
if raw == "~" {
expanded = home
} else {
expanded = home + raw[1:]
}
}
absPath, err := filepath.Abs(expanded)
if err != nil {
return "", err
}
absPath = filepath.Clean(absPath)
home, err := os.UserHomeDir()
if err != nil {
if !allowOutsideClaudeDir {
return "", err
}
logWarn(fmt.Sprintf("Failed to resolve home directory for prompt file validation: %v; proceeding without restriction", err))
} else {
allowedDirs := []string{
filepath.Clean(filepath.Join(home, ".claude")),
filepath.Clean(filepath.Join(home, ".codeagent", "agents")),
}
for i := range allowedDirs {
allowedAbs, err := filepath.Abs(allowedDirs[i])
if err == nil {
allowedDirs[i] = filepath.Clean(allowedAbs)
}
}
isWithinDir := func(path, dir string) bool {
rel, err := filepath.Rel(dir, path)
if err != nil {
return false
}
rel = filepath.Clean(rel)
if rel == "." {
return true
}
if rel == ".." {
return false
}
prefix := ".." + string(os.PathSeparator)
return !strings.HasPrefix(rel, prefix)
}
if !allowOutsideClaudeDir {
withinAllowed := false
for _, dir := range allowedDirs {
if isWithinDir(absPath, dir) {
withinAllowed = true
break
}
}
if !withinAllowed {
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
}
resolvedPath, errPath := filepath.EvalSymlinks(absPath)
if errPath == nil {
resolvedPath = filepath.Clean(resolvedPath)
resolvedAllowed := make([]string, 0, len(allowedDirs))
for _, dir := range allowedDirs {
resolvedBase, errBase := filepath.EvalSymlinks(dir)
if errBase != nil {
continue
}
resolvedAllowed = append(resolvedAllowed, filepath.Clean(resolvedBase))
}
if len(resolvedAllowed) > 0 {
withinResolved := false
for _, dir := range resolvedAllowed {
if isWithinDir(resolvedPath, dir) {
withinResolved = true
break
}
}
if !withinResolved {
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s) (resolved): %s", strings.Join(resolvedAllowed, ", "), resolvedPath))
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
}
}
}
} else {
withinAllowed := false
for _, dir := range allowedDirs {
if isWithinDir(absPath, dir) {
withinAllowed = true
break
}
}
if !withinAllowed {
logWarn(fmt.Sprintf("Reading prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
}
}
}
data, err := os.ReadFile(absPath)
if err != nil {
return "", err
}
return strings.TrimRight(string(data), "\r\n"), nil
}
func wrapTaskWithAgentPrompt(prompt string, task string) string {
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
}
func runCleanupHook() {
if logger := activeLogger(); logger != nil {
logger.Flush()
}
if cleanupHook != nil {
cleanupHook()
}
}
func printHelp() {
name := currentWrapperName()
help := fmt.Sprintf(`%[1]s - Go wrapper for AI CLI backends
Usage:
%[1]s "task" [workdir]
%[1]s --backend claude "task" [workdir]
%[1]s --prompt-file /path/to/prompt.md "task" [workdir]
%[1]s - [workdir] Read task from stdin
%[1]s resume <session_id> "task" [workdir]
%[1]s resume <session_id> - [workdir]
%[1]s --parallel Run tasks in parallel (config from stdin)
%[1]s --parallel --full-output Run tasks in parallel with full output (legacy)
%[1]s --version
%[1]s --help
Parallel mode examples:
%[1]s --parallel < tasks.txt
echo '...' | %[1]s --parallel
%[1]s --parallel --full-output < tasks.txt
%[1]s --parallel <<'EOF'
Environment Variables:
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
CODEAGENT_ASCII_MODE Use ASCII symbols instead of Unicode (PASS/WARN/FAIL)
Exit Codes:
0 Success
1 General error (missing args, no output)
124 Timeout
127 backend command not found
130 Interrupted (Ctrl+C)
* Passthrough from backend process`, name)
fmt.Println(help)
}

View File

@@ -1,9 +0,0 @@
package wrapper
import backend "codeagent-wrapper/internal/backend"
type Backend = backend.Backend
type CodexBackend = backend.CodexBackend
type ClaudeBackend = backend.ClaudeBackend
type GeminiBackend = backend.GeminiBackend
type OpencodeBackend = backend.OpencodeBackend

View File

@@ -1,7 +0,0 @@
package wrapper
import backend "codeagent-wrapper/internal/backend"
func init() {
backend.SetLogFuncs(logWarn, logError)
}

View File

@@ -1,5 +0,0 @@
package wrapper
import backend "codeagent-wrapper/internal/backend"
func selectBackend(name string) (Backend, error) { return backend.Select(name) }

View File

@@ -1,103 +0,0 @@
package wrapper
import (
"bytes"
"os"
"testing"
config "codeagent-wrapper/internal/config"
)
var (
benchCmdSink any
benchConfigSink *Config
benchMessageSink string
benchThreadIDSink string
)
// BenchmarkStartup_NewRootCommand measures CLI startup overhead (command+flags construction).
func BenchmarkStartup_NewRootCommand(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
benchCmdSink = newRootCommand()
}
}
// BenchmarkConfigParse_ParseArgs measures config parsing from argv/env (steady-state).
func BenchmarkConfigParse_ParseArgs(b *testing.B) {
home := b.TempDir()
b.Setenv("HOME", home)
b.Setenv("USERPROFILE", home)
config.ResetModelsConfigCacheForTest()
b.Cleanup(config.ResetModelsConfigCacheForTest)
origArgs := os.Args
os.Args = []string{"codeagent-wrapper", "--agent", "develop", "task"}
b.Cleanup(func() { os.Args = origArgs })
if _, err := parseArgs(); err != nil {
b.Fatalf("warmup parseArgs() error: %v", err)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cfg, err := parseArgs()
if err != nil {
b.Fatalf("parseArgs() error: %v", err)
}
benchConfigSink = cfg
}
}
// BenchmarkJSONParse_ParseJSONStreamInternal measures line-delimited JSON stream parsing.
func BenchmarkJSONParse_ParseJSONStreamInternal(b *testing.B) {
stream := []byte(
`{"type":"thread.started","thread_id":"t"}` + "\n" +
`{"type":"item.completed","item":{"type":"agent_message","text":"hello"}}` + "\n" +
`{"type":"thread.completed","thread_id":"t"}` + "\n",
)
b.SetBytes(int64(len(stream)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
message, threadID := parseJSONStreamInternal(bytes.NewReader(stream), nil, nil, nil, nil)
benchMessageSink = message
benchThreadIDSink = threadID
}
}
// BenchmarkLoggerWrite 测试日志写入性能
func BenchmarkLoggerWrite(b *testing.B) {
logger, err := NewLogger()
if err != nil {
b.Fatal(err)
}
defer logger.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark log message")
}
b.StopTimer()
logger.Flush()
}
// BenchmarkLoggerConcurrentWrite 测试并发日志写入性能
func BenchmarkLoggerConcurrentWrite(b *testing.B) {
logger, err := NewLogger()
if err != nil {
b.Fatal(err)
}
defer logger.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Info("concurrent benchmark log message")
}
})
b.StopTimer()
logger.Flush()
}

View File

@@ -1,664 +0,0 @@
package wrapper
import (
"errors"
"fmt"
"io"
"os"
"reflect"
"strings"
config "codeagent-wrapper/internal/config"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
type exitError struct {
code int
}
func (e exitError) Error() string {
return fmt.Sprintf("exit %d", e.code)
}
type cliOptions struct {
Backend string
Model string
ReasoningEffort string
Agent string
PromptFile string
SkipPermissions bool
Parallel bool
FullOutput bool
Cleanup bool
Version bool
ConfigFile string
}
func Main() {
Run()
}
// Run is the program entrypoint for cmd/codeagent/main.go.
func Run() {
exitFn(run())
}
func run() int {
cmd := newRootCommand()
cmd.SetArgs(os.Args[1:])
if err := cmd.Execute(); err != nil {
var ee exitError
if errors.As(err, &ee) {
return ee.code
}
return 1
}
return 0
}
func newRootCommand() *cobra.Command {
name := currentWrapperName()
opts := &cliOptions{}
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [flags] <task>|resume <session_id> <task> [workdir]", name),
Short: "Go wrapper for AI CLI backends",
SilenceErrors: true,
SilenceUsage: true,
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if opts.Version {
fmt.Printf("%s version %s\n", name, version)
return nil
}
if opts.Cleanup {
code := runCleanupMode()
if code == 0 {
return nil
}
return exitError{code: code}
}
exitCode := runWithLoggerAndCleanup(func() int {
v, err := config.NewViper(opts.ConfigFile)
if err != nil {
logError(err.Error())
return 1
}
if opts.Parallel {
return runParallelMode(cmd, args, opts, v, name)
}
logInfo("Script started")
cfg, err := buildSingleConfig(cmd, args, os.Args[1:], opts, v)
if err != nil {
logError(err.Error())
return 1
}
logInfo(fmt.Sprintf("Parsed args: mode=%s, task_len=%d, backend=%s", cfg.Mode, len(cfg.Task), cfg.Backend))
return runSingleMode(cfg, name)
})
if exitCode == 0 {
return nil
}
return exitError{code: exitCode}
},
}
cmd.CompletionOptions.DisableDefaultCmd = true
addRootFlags(cmd.Flags(), opts)
cmd.AddCommand(newVersionCommand(name), newCleanupCommand())
return cmd
}
func addRootFlags(fs *pflag.FlagSet, opts *cliOptions) {
fs.StringVar(&opts.ConfigFile, "config", "", "Config file path (default: $HOME/.codeagent/config.*)")
fs.BoolVarP(&opts.Version, "version", "v", false, "Print version and exit")
fs.BoolVar(&opts.Cleanup, "cleanup", false, "Clean up old logs and exit")
fs.BoolVar(&opts.Parallel, "parallel", false, "Run tasks in parallel (config from stdin)")
fs.BoolVar(&opts.FullOutput, "full-output", false, "Parallel mode: include full task output (legacy)")
fs.StringVar(&opts.Backend, "backend", defaultBackendName, "Backend to use (codex, claude, gemini, opencode)")
fs.StringVar(&opts.Model, "model", "", "Model override")
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
fs.BoolVar(&opts.SkipPermissions, "dangerously-skip-permissions", false, "Alias for --skip-permissions")
}
func newVersionCommand(name string) *cobra.Command {
return &cobra.Command{
Use: "version",
Short: "Print version and exit",
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Printf("%s version %s\n", name, version)
return nil
},
}
}
func newCleanupCommand() *cobra.Command {
return &cobra.Command{
Use: "cleanup",
Short: "Clean up old logs and exit",
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
code := runCleanupMode()
if code == 0 {
return nil
}
return exitError{code: code}
},
}
}
func runWithLoggerAndCleanup(fn func() int) (exitCode int) {
logger, err := NewLogger()
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
return 1
}
setLogger(logger)
defer func() {
logger := activeLogger()
if logger != nil {
logger.Flush()
}
if err := closeLogger(); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed to close logger: %v\n", err)
}
if logger == nil {
return
}
if exitCode != 0 {
if entries := logger.ExtractRecentErrors(10); len(entries) > 0 {
fmt.Fprintln(os.Stderr, "\n=== Recent Errors ===")
for _, entry := range entries {
fmt.Fprintln(os.Stderr, entry)
}
fmt.Fprintf(os.Stderr, "Log file: %s (deleted)\n", logger.Path())
}
}
_ = logger.RemoveLogFile()
}()
defer runCleanupHook()
// Clean up stale logs from previous runs.
scheduleStartupCleanup()
return fn()
}
func parseArgs() (*Config, error) {
opts := &cliOptions{}
cmd := &cobra.Command{SilenceErrors: true, SilenceUsage: true, Args: cobra.ArbitraryArgs}
addRootFlags(cmd.Flags(), opts)
rawArgv := os.Args[1:]
if err := cmd.ParseFlags(rawArgv); err != nil {
return nil, err
}
args := cmd.Flags().Args()
v, err := config.NewViper(opts.ConfigFile)
if err != nil {
return nil, err
}
return buildSingleConfig(cmd, args, rawArgv, opts, v)
}
func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts *cliOptions, v *viper.Viper) (*Config, error) {
backendName := defaultBackendName
model := ""
reasoningEffort := ""
agentName := ""
promptFile := ""
promptFileExplicit := false
yolo := false
if cmd.Flags().Changed("agent") {
agentName = strings.TrimSpace(opts.Agent)
if agentName == "" {
return nil, fmt.Errorf("--agent flag requires a value")
}
if err := config.ValidateAgentName(agentName); err != nil {
return nil, fmt.Errorf("--agent flag invalid value: %w", err)
}
} else {
agentName = strings.TrimSpace(v.GetString("agent"))
if agentName != "" {
if err := config.ValidateAgentName(agentName); err != nil {
return nil, fmt.Errorf("--agent flag invalid value: %w", err)
}
}
}
var resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning string
if agentName != "" {
var resolvedYolo bool
resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning, _, _, resolvedYolo = config.ResolveAgentConfig(agentName)
yolo = resolvedYolo
}
if cmd.Flags().Changed("prompt-file") {
promptFile = strings.TrimSpace(opts.PromptFile)
if promptFile == "" {
return nil, fmt.Errorf("--prompt-file flag requires a value")
}
promptFileExplicit = true
} else if val := strings.TrimSpace(v.GetString("prompt-file")); val != "" {
promptFile = val
promptFileExplicit = true
} else {
promptFile = resolvedPromptFile
}
agentFlagChanged := cmd.Flags().Changed("agent")
backendFlagChanged := cmd.Flags().Changed("backend")
if backendFlagChanged {
backendName = strings.TrimSpace(opts.Backend)
if backendName == "" {
return nil, fmt.Errorf("--backend flag requires a value")
}
}
switch {
case agentFlagChanged && backendFlagChanged && lastFlagIndex(rawArgv, "agent") > lastFlagIndex(rawArgv, "backend"):
backendName = resolvedBackend
case !backendFlagChanged && agentName != "":
backendName = resolvedBackend
case !backendFlagChanged:
if val := strings.TrimSpace(v.GetString("backend")); val != "" {
backendName = val
}
}
modelFlagChanged := cmd.Flags().Changed("model")
if modelFlagChanged {
model = strings.TrimSpace(opts.Model)
if model == "" {
return nil, fmt.Errorf("--model flag requires a value")
}
}
switch {
case agentFlagChanged && modelFlagChanged && lastFlagIndex(rawArgv, "agent") > lastFlagIndex(rawArgv, "model"):
model = strings.TrimSpace(resolvedModel)
case !modelFlagChanged && agentName != "":
model = strings.TrimSpace(resolvedModel)
case !modelFlagChanged:
model = strings.TrimSpace(v.GetString("model"))
}
if cmd.Flags().Changed("reasoning-effort") {
reasoningEffort = strings.TrimSpace(opts.ReasoningEffort)
if reasoningEffort == "" {
return nil, fmt.Errorf("--reasoning-effort flag requires a value")
}
} else if val := strings.TrimSpace(v.GetString("reasoning-effort")); val != "" {
reasoningEffort = val
} else if agentName != "" {
reasoningEffort = strings.TrimSpace(resolvedReasoning)
}
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
skipPermissions := false
if skipChanged {
skipPermissions = opts.SkipPermissions
} else {
skipPermissions = v.GetBool("skip-permissions")
}
if len(args) == 0 {
return nil, fmt.Errorf("task required")
}
cfg := &Config{
WorkDir: defaultWorkdir,
Backend: backendName,
Agent: agentName,
PromptFile: promptFile,
PromptFileExplicit: promptFileExplicit,
SkipPermissions: skipPermissions,
Yolo: yolo,
Model: model,
ReasoningEffort: reasoningEffort,
MaxParallelWorkers: config.ResolveMaxParallelWorkers(),
}
if args[0] == "resume" {
if len(args) < 3 {
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
}
cfg.Mode = "resume"
cfg.SessionID = strings.TrimSpace(args[1])
if cfg.SessionID == "" {
return nil, fmt.Errorf("resume mode requires non-empty session_id")
}
cfg.Task = args[2]
cfg.ExplicitStdin = (args[2] == "-")
if len(args) > 3 {
if args[3] == "-" {
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
}
cfg.WorkDir = args[3]
}
} else {
cfg.Mode = "new"
cfg.Task = args[0]
cfg.ExplicitStdin = (args[0] == "-")
if len(args) > 1 {
if args[1] == "-" {
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
}
cfg.WorkDir = args[1]
}
}
return cfg, nil
}
func lastFlagIndex(argv []string, name string) int {
if len(argv) == 0 {
return -1
}
name = strings.TrimSpace(name)
if name == "" {
return -1
}
needle := "--" + name
prefix := needle + "="
last := -1
for i, arg := range argv {
if arg == needle || strings.HasPrefix(arg, prefix) {
last = i
}
}
return last
}
func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *viper.Viper, name string) int {
if len(args) > 0 {
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; no positional arguments are allowed.")
fmt.Fprintln(os.Stderr, "Usage examples:")
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
fmt.Fprintf(os.Stderr, " %s --parallel <<'EOF'\n", name)
fmt.Fprintf(os.Stderr, " %s --parallel --full-output <<'EOF' # include full task output\n", name)
return 1
}
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") {
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
return 1
}
backendName := defaultBackendName
if cmd.Flags().Changed("backend") {
backendName = strings.TrimSpace(opts.Backend)
if backendName == "" {
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
return 1
}
} else if val := strings.TrimSpace(v.GetString("backend")); val != "" {
backendName = val
}
model := ""
if cmd.Flags().Changed("model") {
model = strings.TrimSpace(opts.Model)
if model == "" {
fmt.Fprintln(os.Stderr, "ERROR: --model flag requires a value")
return 1
}
} else {
model = strings.TrimSpace(v.GetString("model"))
}
fullOutput := opts.FullOutput
if !cmd.Flags().Changed("full-output") && v.IsSet("full-output") {
fullOutput = v.GetBool("full-output")
}
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
skipPermissions := false
if skipChanged {
skipPermissions = opts.SkipPermissions
} else {
skipPermissions = v.GetBool("skip-permissions")
}
backend, err := selectBackendFn(backendName)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
return 1
}
backendName = backend.Name()
data, err := io.ReadAll(stdinReader)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed to read stdin: %v\n", err)
return 1
}
cfg, err := parseParallelConfig(data)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
return 1
}
cfg.GlobalBackend = backendName
model = strings.TrimSpace(model)
for i := range cfg.Tasks {
if strings.TrimSpace(cfg.Tasks[i].Backend) == "" {
cfg.Tasks[i].Backend = backendName
}
if strings.TrimSpace(cfg.Tasks[i].Model) == "" && model != "" {
cfg.Tasks[i].Model = model
}
cfg.Tasks[i].SkipPermissions = cfg.Tasks[i].SkipPermissions || skipPermissions
}
timeoutSec := resolveTimeout()
layers, err := topologicalSort(cfg.Tasks)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
return 1
}
results := executeConcurrent(layers, timeoutSec)
for i := range results {
results[i].CoverageTarget = defaultCoverageTarget
if results[i].Message == "" {
continue
}
lines := strings.Split(results[i].Message, "\n")
results[i].Coverage = extractCoverageFromLines(lines)
results[i].CoverageNum = extractCoverageNum(results[i].Coverage)
results[i].FilesChanged = extractFilesChangedFromLines(lines)
results[i].TestsPassed, results[i].TestsFailed = extractTestResultsFromLines(lines)
results[i].KeyOutput = extractKeyOutputFromLines(lines, 150)
}
fmt.Println(generateFinalOutputWithMode(results, !fullOutput))
exitCode := 0
for _, res := range results {
if res.ExitCode != 0 {
exitCode = res.ExitCode
}
}
return exitCode
}
func runSingleMode(cfg *Config, name string) int {
backend, err := selectBackendFn(cfg.Backend)
if err != nil {
logError(err.Error())
return 1
}
cfg.Backend = backend.Name()
cmdInjected := codexCommand != defaultCodexCommand
argsInjected := buildCodexArgsFn != nil && reflect.ValueOf(buildCodexArgsFn).Pointer() != reflect.ValueOf(defaultBuildArgsFn).Pointer()
if backend.Name() != defaultBackendName || !cmdInjected {
codexCommand = backend.Command()
}
if backend.Name() != defaultBackendName || !argsInjected {
buildCodexArgsFn = backend.BuildArgs
}
logInfo(fmt.Sprintf("Selected backend: %s", backend.Name()))
timeoutSec := resolveTimeout()
logInfo(fmt.Sprintf("Timeout: %ds", timeoutSec))
cfg.Timeout = timeoutSec
var taskText string
var piped bool
if cfg.ExplicitStdin {
logInfo("Explicit stdin mode: reading task from stdin")
data, err := io.ReadAll(stdinReader)
if err != nil {
logError("Failed to read stdin: " + err.Error())
return 1
}
taskText = string(data)
if taskText == "" {
logError("Explicit stdin mode requires task input from stdin")
return 1
}
piped = !isTerminal()
} else {
pipedTask, err := readPipedTask()
if err != nil {
logError("Failed to read piped stdin: " + err.Error())
return 1
}
piped = pipedTask != ""
if piped {
taskText = pipedTask
} else {
taskText = cfg.Task
}
}
if strings.TrimSpace(cfg.PromptFile) != "" {
prompt, err := readAgentPromptFile(cfg.PromptFile, cfg.PromptFileExplicit)
if err != nil {
logError("Failed to read prompt file: " + err.Error())
return 1
}
taskText = wrapTaskWithAgentPrompt(prompt, taskText)
}
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
targetArg := taskText
if useStdin {
targetArg = "-"
}
codexArgs := buildCodexArgsFn(cfg, targetArg)
logger := activeLogger()
if logger == nil {
fmt.Fprintln(os.Stderr, "ERROR: logger is not initialized")
return 1
}
fmt.Fprintf(os.Stderr, "[%s]\n", name)
fmt.Fprintf(os.Stderr, " Backend: %s\n", cfg.Backend)
fmt.Fprintf(os.Stderr, " Command: %s %s\n", codexCommand, strings.Join(codexArgs, " "))
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
if useStdin {
var reasons []string
if piped {
reasons = append(reasons, "piped input")
}
if cfg.ExplicitStdin {
reasons = append(reasons, "explicit \"-\"")
}
if strings.Contains(taskText, "\n") {
reasons = append(reasons, "newline")
}
if strings.Contains(taskText, "\\") {
reasons = append(reasons, "backslash")
}
if strings.Contains(taskText, "\"") {
reasons = append(reasons, "double-quote")
}
if strings.Contains(taskText, "'") {
reasons = append(reasons, "single-quote")
}
if strings.Contains(taskText, "`") {
reasons = append(reasons, "backtick")
}
if strings.Contains(taskText, "$") {
reasons = append(reasons, "dollar")
}
if len(taskText) > 800 {
reasons = append(reasons, "length>800")
}
if len(reasons) > 0 {
logWarn(fmt.Sprintf("Using stdin mode for task due to: %s", strings.Join(reasons, ", ")))
}
}
logInfo(fmt.Sprintf("%s running...", cfg.Backend))
taskSpec := TaskSpec{
Task: taskText,
WorkDir: cfg.WorkDir,
Mode: cfg.Mode,
SessionID: cfg.SessionID,
Backend: cfg.Backend,
Model: cfg.Model,
ReasoningEffort: cfg.ReasoningEffort,
Agent: cfg.Agent,
SkipPermissions: cfg.SkipPermissions,
UseStdin: useStdin,
}
result := runTaskFn(taskSpec, false, cfg.Timeout)
if result.ExitCode != 0 {
return result.ExitCode
}
// Validate that we got a meaningful output message
if strings.TrimSpace(result.Message) == "" {
logError(fmt.Sprintf("no output message: backend=%s returned empty result.Message with exit_code=0", cfg.Backend))
return 1
}
fmt.Println(result.Message)
if result.SessionID != "" {
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
}
return 0
}

View File

@@ -1,445 +0,0 @@
package wrapper
import (
"bufio"
"context"
"fmt"
"os"
"regexp"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/goccy/go-json"
)
func stripTimestampPrefix(line string) string {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "{") {
var evt struct {
Message string `json:"message"`
}
if err := json.Unmarshal([]byte(line), &evt); err == nil && evt.Message != "" {
return evt.Message
}
}
if !strings.HasPrefix(line, "[") {
return line
}
if idx := strings.Index(line, "] "); idx >= 0 {
return line[idx+2:]
}
return line
}
// TestConcurrentStressLogger 高并发压力测试
func TestConcurrentStressLogger(t *testing.T) {
if testing.Short() {
t.Skip("skipping stress test in short mode")
}
logger, err := NewLoggerWithSuffix("stress")
if err != nil {
t.Fatal(err)
}
defer logger.Close()
t.Logf("Log file: %s", logger.Path())
const (
numGoroutines = 100 // 并发协程数
logsPerRoutine = 1000 // 每个协程写入日志数
totalExpected = numGoroutines * logsPerRoutine
)
var wg sync.WaitGroup
start := time.Now()
// 启动并发写入
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < logsPerRoutine; j++ {
logger.Info(fmt.Sprintf("goroutine-%d-msg-%d", id, j))
}
}(i)
}
wg.Wait()
logger.Flush()
elapsed := time.Since(start)
// 读取日志文件验证
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("failed to read log file: %v", err)
}
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
actualCount := len(lines)
t.Logf("Concurrent stress test results:")
t.Logf(" Goroutines: %d", numGoroutines)
t.Logf(" Logs per goroutine: %d", logsPerRoutine)
t.Logf(" Total expected: %d", totalExpected)
t.Logf(" Total actual: %d", actualCount)
t.Logf(" Duration: %v", elapsed)
t.Logf(" Throughput: %.2f logs/sec", float64(totalExpected)/elapsed.Seconds())
// 验证日志数量
if actualCount < totalExpected/10 {
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)",
actualCount, totalExpected/10, totalExpected)
}
t.Logf("Successfully wrote %d/%d logs (%.1f%%)",
actualCount, totalExpected, float64(actualCount)/float64(totalExpected)*100)
// 验证日志格式(纯文本,无前缀)
formatRE := regexp.MustCompile(`^goroutine-\d+-msg-\d+$`)
for i, line := range lines[:min(10, len(lines))] {
msg := stripTimestampPrefix(line)
if !formatRE.MatchString(msg) {
t.Errorf("line %d has invalid format: %s", i, line)
}
}
}
// TestConcurrentBurstLogger 突发流量测试
func TestConcurrentBurstLogger(t *testing.T) {
if testing.Short() {
t.Skip("skipping burst test in short mode")
}
logger, err := NewLoggerWithSuffix("burst")
if err != nil {
t.Fatal(err)
}
defer logger.Close()
t.Logf("Log file: %s", logger.Path())
const (
numBursts = 10
goroutinesPerBurst = 50
logsPerGoroutine = 100
)
totalLogs := 0
start := time.Now()
// 模拟突发流量
for burst := 0; burst < numBursts; burst++ {
var wg sync.WaitGroup
for i := 0; i < goroutinesPerBurst; i++ {
wg.Add(1)
totalLogs += logsPerGoroutine
go func(b, g int) {
defer wg.Done()
for j := 0; j < logsPerGoroutine; j++ {
logger.Info(fmt.Sprintf("burst-%d-goroutine-%d-msg-%d", b, g, j))
}
}(burst, i)
}
wg.Wait()
time.Sleep(10 * time.Millisecond) // 突发间隔
}
logger.Flush()
elapsed := time.Since(start)
// 验证
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("failed to read log file: %v", err)
}
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
actualCount := len(lines)
t.Logf("Burst test results:")
t.Logf(" Total bursts: %d", numBursts)
t.Logf(" Goroutines per burst: %d", goroutinesPerBurst)
t.Logf(" Expected logs: %d", totalLogs)
t.Logf(" Actual logs: %d", actualCount)
t.Logf(" Duration: %v", elapsed)
t.Logf(" Throughput: %.2f logs/sec", float64(totalLogs)/elapsed.Seconds())
if actualCount < totalLogs/10 {
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)", actualCount, totalLogs/10, totalLogs)
}
t.Logf("Successfully wrote %d/%d logs (%.1f%%)",
actualCount, totalLogs, float64(actualCount)/float64(totalLogs)*100)
}
// TestLoggerChannelCapacity 测试 channel 容量极限
func TestLoggerChannelCapacity(t *testing.T) {
logger, err := NewLoggerWithSuffix("capacity")
if err != nil {
t.Fatal(err)
}
defer logger.Close()
const rapidLogs = 2000 // 超过 channel 容量 (1000)
start := time.Now()
for i := 0; i < rapidLogs; i++ {
logger.Info(fmt.Sprintf("rapid-log-%d", i))
}
sendDuration := time.Since(start)
logger.Flush()
flushDuration := time.Since(start) - sendDuration
t.Logf("Channel capacity test:")
t.Logf(" Logs sent: %d", rapidLogs)
t.Logf(" Send duration: %v", sendDuration)
t.Logf(" Flush duration: %v", flushDuration)
// 验证仍有合理比例的日志写入(非阻塞模式允许部分丢失)
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatal(err)
}
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
actualCount := len(lines)
if actualCount < rapidLogs/10 {
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)", actualCount, rapidLogs/10, rapidLogs)
}
t.Logf("Logs persisted: %d/%d (%.1f%%)", actualCount, rapidLogs, float64(actualCount)/float64(rapidLogs)*100)
}
// TestLoggerMemoryUsage 内存使用测试
func TestLoggerMemoryUsage(t *testing.T) {
logger, err := NewLoggerWithSuffix("memory")
if err != nil {
t.Fatal(err)
}
defer logger.Close()
const numLogs = 20000
longMessage := strings.Repeat("x", 500) // 500 字节长消息
start := time.Now()
for i := 0; i < numLogs; i++ {
logger.Info(fmt.Sprintf("log-%d-%s", i, longMessage))
}
logger.Flush()
elapsed := time.Since(start)
// 检查文件大小
info, err := os.Stat(logger.Path())
if err != nil {
t.Fatal(err)
}
expectedTotalSize := int64(numLogs * 500) // 理论最小总字节数
expectedMinSize := expectedTotalSize / 10 // 接受最多 90% 丢失
actualSize := info.Size()
t.Logf("Memory/disk usage test:")
t.Logf(" Logs written: %d", numLogs)
t.Logf(" Message size: 500 bytes")
t.Logf(" File size: %.2f MB", float64(actualSize)/1024/1024)
t.Logf(" Duration: %v", elapsed)
t.Logf(" Write speed: %.2f MB/s", float64(actualSize)/1024/1024/elapsed.Seconds())
t.Logf(" Persistence ratio: %.1f%%", float64(actualSize)/float64(expectedTotalSize)*100)
if actualSize < expectedMinSize {
t.Errorf("file size too small: got %d bytes, expected at least %d", actualSize, expectedMinSize)
}
}
// TestLoggerFlushTimeout 测试 Flush 超时机制
func TestLoggerFlushTimeout(t *testing.T) {
logger, err := NewLoggerWithSuffix("flush")
if err != nil {
t.Fatal(err)
}
defer logger.Close()
// 写入一些日志
for i := 0; i < 100; i++ {
logger.Info(fmt.Sprintf("test-log-%d", i))
}
// 测试 Flush 应该在合理时间内完成
start := time.Now()
logger.Flush()
duration := time.Since(start)
t.Logf("Flush duration: %v", duration)
if duration > 6*time.Second {
t.Errorf("Flush took too long: %v (expected < 6s)", duration)
}
}
// TestLoggerOrderPreservation 测试日志顺序保持
func TestLoggerOrderPreservation(t *testing.T) {
logger, err := NewLoggerWithSuffix("order")
if err != nil {
t.Fatal(err)
}
defer logger.Close()
const numGoroutines = 10
const logsPerRoutine = 100
var wg sync.WaitGroup
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < logsPerRoutine; j++ {
logger.Info(fmt.Sprintf("G%d-SEQ%04d", id, j))
}
}(i)
}
wg.Wait()
logger.Flush()
// 读取并验证每个 goroutine 的日志顺序
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatal(err)
}
scanner := bufio.NewScanner(strings.NewReader(string(data)))
sequences := make(map[int][]int) // goroutine ID -> sequence numbers
for scanner.Scan() {
line := stripTimestampPrefix(scanner.Text())
var gid, seq int
// Parse format: G0-SEQ0001 (without INFO: prefix)
_, err := fmt.Sscanf(line, "G%d-SEQ%04d", &gid, &seq)
if err != nil {
t.Errorf("invalid log format: %s (error: %v)", line, err)
continue
}
sequences[gid] = append(sequences[gid], seq)
}
// 验证每个 goroutine 内部顺序
for gid, seqs := range sequences {
for i := 0; i < len(seqs)-1; i++ {
if seqs[i] >= seqs[i+1] {
t.Errorf("Goroutine %d: out of order at index %d: %d >= %d",
gid, i, seqs[i], seqs[i+1])
}
}
if len(seqs) != logsPerRoutine {
t.Errorf("Goroutine %d: missing logs, got %d, want %d",
gid, len(seqs), logsPerRoutine)
}
}
t.Logf("Order preservation test: all %d goroutines maintained sequence order", len(sequences))
}
func TestConcurrentWorkerPoolLimit(t *testing.T) {
orig := runCodexTaskFn
defer func() { runCodexTaskFn = orig }()
logger, err := NewLoggerWithSuffix("pool-limit")
if err != nil {
t.Fatal(err)
}
setLogger(logger)
t.Cleanup(func() {
_ = closeLogger()
_ = logger.RemoveLogFile()
})
var active int64
var maxSeen int64
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
if task.Context == nil {
t.Fatalf("context not propagated for task %s", task.ID)
}
cur := atomic.AddInt64(&active, 1)
for {
prev := atomic.LoadInt64(&maxSeen)
if cur <= prev || atomic.CompareAndSwapInt64(&maxSeen, prev, cur) {
break
}
}
select {
case <-task.Context.Done():
atomic.AddInt64(&active, -1)
return TaskResult{TaskID: task.ID, ExitCode: 130, Error: "context cancelled"}
case <-time.After(30 * time.Millisecond):
}
atomic.AddInt64(&active, -1)
return TaskResult{TaskID: task.ID}
}
layers := [][]TaskSpec{{{ID: "t1"}, {ID: "t2"}, {ID: "t3"}, {ID: "t4"}, {ID: "t5"}}}
results := executeConcurrentWithContext(context.Background(), layers, 5, 2)
if len(results) != 5 {
t.Fatalf("unexpected result count: got %d", len(results))
}
if maxSeen > 2 {
t.Fatalf("worker pool exceeded limit: saw %d active workers", maxSeen)
}
logger.Flush()
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("failed to read log file: %v", err)
}
content := string(data)
if !strings.Contains(content, "worker_limit=2") {
t.Fatalf("concurrency planning log missing, content: %s", content)
}
if !strings.Contains(content, "parallel: start") {
t.Fatalf("concurrency start logs missing, content: %s", content)
}
}
func TestConcurrentCancellationPropagation(t *testing.T) {
orig := runCodexTaskFn
defer func() { runCodexTaskFn = orig }()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
if task.Context == nil {
t.Fatalf("context not propagated for task %s", task.ID)
}
select {
case <-task.Context.Done():
return TaskResult{TaskID: task.ID, ExitCode: 130, Error: "context cancelled"}
case <-time.After(200 * time.Millisecond):
return TaskResult{TaskID: task.ID}
}
}
layers := [][]TaskSpec{{{ID: "a"}, {ID: "b"}, {ID: "c"}}}
go func() {
time.Sleep(50 * time.Millisecond)
cancel()
}()
results := executeConcurrentWithContext(ctx, layers, 1, 2)
if len(results) != 3 {
t.Fatalf("unexpected result count: got %d", len(results))
}
cancelled := 0
for _, res := range results {
if res.ExitCode != 0 {
cancelled++
}
}
if cancelled == 0 {
t.Fatalf("expected cancellation to propagate, got results: %+v", results)
}
}

View File

@@ -1,7 +0,0 @@
package wrapper
import config "codeagent-wrapper/internal/config"
// Keep the existing Config name throughout the codebase, but source the
// implementation from internal/config.
type Config = config.Config

View File

@@ -1,54 +0,0 @@
package wrapper
import (
"context"
backend "codeagent-wrapper/internal/backend"
config "codeagent-wrapper/internal/config"
executor "codeagent-wrapper/internal/executor"
)
// defaultRunCodexTaskFn is the default implementation of runCodexTaskFn (exposed for test reset).
func defaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
return executor.DefaultRunCodexTaskFn(task, timeout)
}
var runCodexTaskFn = defaultRunCodexTaskFn
func topologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
return executor.TopologicalSort(tasks)
}
func executeConcurrent(layers [][]TaskSpec, timeout int) []TaskResult {
maxWorkers := config.ResolveMaxParallelWorkers()
return executeConcurrentWithContext(context.Background(), layers, timeout, maxWorkers)
}
func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec, timeout int, maxWorkers int) []TaskResult {
return executor.ExecuteConcurrentWithContext(parentCtx, layers, timeout, maxWorkers, runCodexTaskFn)
}
func generateFinalOutput(results []TaskResult) string {
return executor.GenerateFinalOutput(results)
}
func generateFinalOutputWithMode(results []TaskResult, summaryOnly bool) string {
return executor.GenerateFinalOutputWithMode(results, summaryOnly)
}
func buildCodexArgs(cfg *Config, targetArg string) []string {
return backend.BuildCodexArgs(cfg, targetArg)
}
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
return runCodexTaskWithContext(context.Background(), taskSpec, nil, nil, false, silent, timeoutSec)
}
func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText string, useStdin bool, timeoutSec int) (message, threadID string, exitCode int) {
res := runCodexTaskWithContext(parentCtx, TaskSpec{Task: taskText, WorkDir: defaultWorkdir, Mode: "new", UseStdin: useStdin}, nil, codexArgs, true, false, timeoutSec)
return res.Message, res.SessionID, res.ExitCode
}
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
return executor.RunCodexTaskWithContext(parentCtx, taskSpec, backend, codexCommand, buildCodexArgsFn, customArgs, useCustomArgs, silent, timeoutSec)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +0,0 @@
package wrapper
import ilogger "codeagent-wrapper/internal/logger"
type Logger = ilogger.Logger
type CleanupStats = ilogger.CleanupStats
func NewLogger() (*Logger, error) { return ilogger.NewLogger() }
func NewLoggerWithSuffix(suffix string) (*Logger, error) { return ilogger.NewLoggerWithSuffix(suffix) }
func setLogger(l *Logger) { ilogger.SetLogger(l) }
func closeLogger() error { return ilogger.CloseLogger() }
func activeLogger() *Logger { return ilogger.ActiveLogger() }
func logInfo(msg string) { ilogger.LogInfo(msg) }
func logWarn(msg string) { ilogger.LogWarn(msg) }
func logError(msg string) { ilogger.LogError(msg) }
func cleanupOldLogs() (CleanupStats, error) { return ilogger.CleanupOldLogs() }
func sanitizeLogSuffix(raw string) string { return ilogger.SanitizeLogSuffix(raw) }

View File

@@ -1,949 +0,0 @@
package wrapper
import (
"bytes"
"codeagent-wrapper/internal/logger"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
type integrationSummary struct {
Total int `json:"total"`
Success int `json:"success"`
Failed int `json:"failed"`
}
type integrationOutput struct {
Results []TaskResult `json:"results"`
Summary integrationSummary `json:"summary"`
}
func captureStdout(t *testing.T, fn func()) string {
t.Helper()
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
fn()
w.Close()
os.Stdout = old
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf("io.Copy() error = %v", err)
}
return buf.String()
}
func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
t.Helper()
var payload integrationOutput
lines := strings.Split(out, "\n")
var currentTask *TaskResult
inTaskResults := false
for _, line := range lines {
line = strings.TrimSpace(line)
// Parse new format header: "X tasks | Y passed | Z failed"
if strings.Contains(line, "tasks |") && strings.Contains(line, "passed |") {
parts := strings.Split(line, "|")
for _, p := range parts {
p = strings.TrimSpace(p)
if strings.HasSuffix(p, "tasks") {
if _, err := fmt.Sscanf(p, "%d tasks", &payload.Summary.Total); err != nil {
t.Fatalf("failed to parse total tasks from %q: %v", p, err)
}
} else if strings.HasSuffix(p, "passed") {
if _, err := fmt.Sscanf(p, "%d passed", &payload.Summary.Success); err != nil {
t.Fatalf("failed to parse passed tasks from %q: %v", p, err)
}
} else if strings.HasSuffix(p, "failed") {
if _, err := fmt.Sscanf(p, "%d failed", &payload.Summary.Failed); err != nil {
t.Fatalf("failed to parse failed tasks from %q: %v", p, err)
}
}
}
} else if strings.HasPrefix(line, "Total:") {
// Legacy format: "Total: X | Success: Y | Failed: Z"
parts := strings.Split(line, "|")
for _, p := range parts {
p = strings.TrimSpace(p)
if strings.HasPrefix(p, "Total:") {
if _, err := fmt.Sscanf(p, "Total: %d", &payload.Summary.Total); err != nil {
t.Fatalf("failed to parse total tasks from %q: %v", p, err)
}
} else if strings.HasPrefix(p, "Success:") {
if _, err := fmt.Sscanf(p, "Success: %d", &payload.Summary.Success); err != nil {
t.Fatalf("failed to parse passed tasks from %q: %v", p, err)
}
} else if strings.HasPrefix(p, "Failed:") {
if _, err := fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed); err != nil {
t.Fatalf("failed to parse failed tasks from %q: %v", p, err)
}
}
}
} else if line == "## Task Results" {
inTaskResults = true
} else if line == "## Summary" {
// End of task results section
if currentTask != nil {
payload.Results = append(payload.Results, *currentTask)
currentTask = nil
}
inTaskResults = false
} else if inTaskResults && strings.HasPrefix(line, "### ") {
// New task: ### task-id ✓ 92% or ### task-id PASS 92% (ASCII mode)
if currentTask != nil {
payload.Results = append(payload.Results, *currentTask)
}
currentTask = &TaskResult{}
taskLine := strings.TrimPrefix(line, "### ")
parseMarker := func(marker string, exitCode int) bool {
needle := " " + marker
if !strings.Contains(taskLine, needle) {
return false
}
parts := strings.Split(taskLine, needle)
currentTask.TaskID = strings.TrimSpace(parts[0])
currentTask.ExitCode = exitCode
if exitCode == 0 && len(parts) > 1 {
coveragePart := strings.TrimSpace(parts[1])
if strings.HasSuffix(coveragePart, "%") {
currentTask.Coverage = coveragePart
}
}
return true
}
switch {
case parseMarker("✓", 0), parseMarker("PASS", 0):
// ok
case parseMarker("⚠️", 0), parseMarker("WARN", 0):
// warning
case parseMarker("✗", 1), parseMarker("FAIL", 1):
// fail
default:
currentTask.TaskID = taskLine
}
} else if currentTask != nil && inTaskResults {
// Parse task details
if strings.HasPrefix(line, "Exit code:") {
if _, err := fmt.Sscanf(line, "Exit code: %d", &currentTask.ExitCode); err != nil {
t.Fatalf("failed to parse exit code from %q: %v", line, err)
}
} else if strings.HasPrefix(line, "Error:") {
currentTask.Error = strings.TrimPrefix(line, "Error: ")
} else if strings.HasPrefix(line, "Log:") {
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
} else if strings.HasPrefix(line, "Did:") {
currentTask.KeyOutput = strings.TrimSpace(strings.TrimPrefix(line, "Did:"))
} else if strings.HasPrefix(line, "Detail:") {
// Error detail for failed tasks
if currentTask.Message == "" {
currentTask.Message = strings.TrimSpace(strings.TrimPrefix(line, "Detail:"))
}
}
} else if strings.HasPrefix(line, "--- Task:") {
// Legacy full output format
if currentTask != nil {
payload.Results = append(payload.Results, *currentTask)
}
currentTask = &TaskResult{}
currentTask.TaskID = strings.TrimSuffix(strings.TrimPrefix(line, "--- Task: "), " ---")
} else if currentTask != nil && !inTaskResults {
// Legacy format parsing
if strings.HasPrefix(line, "Status: SUCCESS") {
currentTask.ExitCode = 0
} else if strings.HasPrefix(line, "Status: FAILED") {
if strings.Contains(line, "exit code") {
if _, err := fmt.Sscanf(line, "Status: FAILED (exit code %d)", &currentTask.ExitCode); err != nil {
t.Fatalf("failed to parse exit code from %q: %v", line, err)
}
} else {
currentTask.ExitCode = 1
}
} else if strings.HasPrefix(line, "Error:") {
currentTask.Error = strings.TrimPrefix(line, "Error: ")
} else if strings.HasPrefix(line, "Session:") {
currentTask.SessionID = strings.TrimPrefix(line, "Session: ")
} else if strings.HasPrefix(line, "Log:") {
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
}
}
}
// Handle last task
if currentTask != nil {
payload.Results = append(payload.Results, *currentTask)
}
return payload
}
func findResultByID(t *testing.T, payload integrationOutput, id string) TaskResult {
t.Helper()
for _, res := range payload.Results {
if res.TaskID == id {
return res
}
}
t.Fatalf("result for task %s not found", id)
return TaskResult{}
}
func setTempDirEnv(t *testing.T, dir string) string {
t.Helper()
resolved := dir
if eval, err := filepath.EvalSymlinks(dir); err == nil {
resolved = eval
}
t.Setenv("TMPDIR", resolved)
t.Setenv("TEMP", resolved)
t.Setenv("TMP", resolved)
return resolved
}
func createTempLog(t *testing.T, dir, name string) string {
t.Helper()
path := filepath.Join(dir, name)
if err := os.WriteFile(path, []byte("test"), 0o644); err != nil {
t.Fatalf("failed to create temp log %s: %v", path, err)
}
return path
}
func stubProcessRunning(t *testing.T, fn func(int) bool) {
t.Helper()
t.Cleanup(logger.SetProcessRunningCheck(fn))
}
func stubProcessStartTime(t *testing.T, fn func(int) time.Time) {
t.Helper()
t.Cleanup(logger.SetProcessStartTimeFn(fn))
}
func TestRunParallelEndToEnd_OrderAndConcurrency(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
input := `---TASK---
id: A
---CONTENT---
task-a
---TASK---
id: B
dependencies: A
---CONTENT---
task-b
---TASK---
id: C
dependencies: B
---CONTENT---
task-c
---TASK---
id: D
---CONTENT---
task-d
---TASK---
id: E
---CONTENT---
task-e`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codeagent-wrapper", "--parallel"}
var mu sync.Mutex
starts := make(map[string]time.Time)
ends := make(map[string]time.Time)
var running int64
var maxParallel int64
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
start := time.Now()
mu.Lock()
starts[task.ID] = start
mu.Unlock()
cur := atomic.AddInt64(&running, 1)
for {
prev := atomic.LoadInt64(&maxParallel)
if cur <= prev {
break
}
if atomic.CompareAndSwapInt64(&maxParallel, prev, cur) {
break
}
}
time.Sleep(40 * time.Millisecond)
mu.Lock()
ends[task.ID] = time.Now()
mu.Unlock()
atomic.AddInt64(&running, -1)
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task}
}
var exitCode int
output := captureStdout(t, func() {
exitCode = run()
})
if exitCode != 0 {
t.Fatalf("run() exit = %d, want 0", exitCode)
}
payload := parseIntegrationOutput(t, output)
if payload.Summary.Failed != 0 || payload.Summary.Total != 5 || payload.Summary.Success != 5 {
t.Fatalf("unexpected summary: %+v", payload.Summary)
}
aEnd := ends["A"]
bStart := starts["B"]
cStart := starts["C"]
bEnd := ends["B"]
if aEnd.IsZero() || bStart.IsZero() || bEnd.IsZero() || cStart.IsZero() {
t.Fatalf("missing timestamps, starts=%v ends=%v", starts, ends)
}
if !aEnd.Before(bStart) && !aEnd.Equal(bStart) {
t.Fatalf("B should start after A ends: A_end=%v B_start=%v", aEnd, bStart)
}
if !bEnd.Before(cStart) && !bEnd.Equal(cStart) {
t.Fatalf("C should start after B ends: B_end=%v C_start=%v", bEnd, cStart)
}
dStart := starts["D"]
eStart := starts["E"]
if dStart.IsZero() || eStart.IsZero() {
t.Fatalf("missing D/E start times: %v", starts)
}
delta := dStart.Sub(eStart)
if delta < 0 {
delta = -delta
}
if delta > 25*time.Millisecond {
t.Fatalf("D and E should run in parallel, delta=%v", delta)
}
if maxParallel < 2 {
t.Fatalf("expected at least 2 concurrent tasks, got %d", maxParallel)
}
}
func TestRunParallelCycleDetectionStopsExecution(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
t.Fatalf("task %s should not execute on cycle", task.ID)
return TaskResult{}
}
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
input := `---TASK---
id: A
dependencies: B
---CONTENT---
a
---TASK---
id: B
dependencies: A
---CONTENT---
b`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codeagent-wrapper", "--parallel"}
exitCode := 0
output := captureStdout(t, func() {
exitCode = run()
})
if exitCode == 0 {
t.Fatalf("cycle should cause non-zero exit, got %d", exitCode)
}
if strings.TrimSpace(output) != "" {
t.Fatalf("expected no JSON output on cycle, got %q", output)
}
}
func TestRunParallelOutputsIncludeLogPaths(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
tempDir := t.TempDir()
logPathFor := func(id string) string {
return filepath.Join(tempDir, fmt.Sprintf("%s.log", id))
}
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
res := TaskResult{
TaskID: task.ID,
Message: fmt.Sprintf("result-%s", task.ID),
SessionID: fmt.Sprintf("session-%s", task.ID),
LogPath: logPathFor(task.ID),
}
if task.ID == "beta" {
res.ExitCode = 9
res.Error = "boom"
}
return res
}
input := `---TASK---
id: alpha
---CONTENT---
task-alpha
---TASK---
id: beta
---CONTENT---
task-beta`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codeagent-wrapper", "--parallel"}
var exitCode int
output := captureStdout(t, func() {
exitCode = run()
})
if exitCode != 9 {
t.Fatalf("parallel run exit=%d, want 9", exitCode)
}
payload := parseIntegrationOutput(t, output)
alpha := findResultByID(t, payload, "alpha")
beta := findResultByID(t, payload, "beta")
if alpha.LogPath != logPathFor("alpha") {
t.Fatalf("alpha log path = %q, want %q", alpha.LogPath, logPathFor("alpha"))
}
if beta.LogPath != logPathFor("beta") {
t.Fatalf("beta log path = %q, want %q", beta.LogPath, logPathFor("beta"))
}
for _, id := range []string{"alpha", "beta"} {
// Summary mode shows log paths in table format, not "Log: xxx"
logPath := logPathFor(id)
if !strings.Contains(output, logPath) {
t.Fatalf("parallel output missing log path %q for %s:\n%s", logPath, id, output)
}
}
}
func TestRunParallelStartupLogsPrinted(t *testing.T) {
defer resetTestHooks()
tempDir := setTempDirEnv(t, t.TempDir())
input := `---TASK---
id: a
---CONTENT---
fail
---TASK---
id: b
---CONTENT---
ok-b
---TASK---
id: c
dependencies: a
---CONTENT---
should-skip
---TASK---
id: d
---CONTENT---
ok-d`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codeagent-wrapper", "--parallel"}
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
origRun := runCodexTaskFn
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
path := expectedLog
if logger := activeLogger(); logger != nil && logger.Path() != "" {
path = logger.Path()
}
if task.ID == "a" {
return TaskResult{TaskID: task.ID, ExitCode: 3, Error: "boom", LogPath: path}
}
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task, LogPath: path}
}
t.Cleanup(func() { runCodexTaskFn = origRun })
var exitCode int
var stdoutOut string
stderrOut := captureStderr(t, func() {
stdoutOut = captureStdout(t, func() {
exitCode = run()
})
})
if exitCode == 0 {
t.Fatalf("expected non-zero exit due to task failure, got %d", exitCode)
}
if stdoutOut == "" {
t.Fatalf("expected parallel summary on stdout")
}
lines := strings.Split(strings.TrimSpace(stderrOut), "\n")
var bannerSeen bool
var taskLines []string
for _, raw := range lines {
line := strings.TrimSpace(raw)
if line == "" {
continue
}
if line == "=== Starting Parallel Execution ===" {
if bannerSeen {
t.Fatalf("banner printed multiple times:\n%s", stderrOut)
}
bannerSeen = true
continue
}
taskLines = append(taskLines, line)
}
if !bannerSeen {
t.Fatalf("expected startup banner in stderr, got:\n%s", stderrOut)
}
// After parallel log isolation fix, each task has its own log file
expectedLines := map[string]struct{}{
fmt.Sprintf("Task a: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-a.log", os.Getpid()))): {},
fmt.Sprintf("Task b: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-b.log", os.Getpid()))): {},
fmt.Sprintf("Task d: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-d.log", os.Getpid()))): {},
}
if len(taskLines) != len(expectedLines) {
t.Fatalf("startup log lines mismatch, got %d lines:\n%s", len(taskLines), stderrOut)
}
for _, line := range taskLines {
if _, ok := expectedLines[line]; !ok {
t.Fatalf("unexpected startup line %q\nstderr:\n%s", line, stderrOut)
}
}
}
func TestRunNonParallelOutputsIncludeLogPathsIntegration(t *testing.T) {
defer resetTestHooks()
tempDir := setTempDirEnv(t, t.TempDir())
os.Args = []string{"codeagent-wrapper", "integration-log-check"}
stdinReader = strings.NewReader("")
isTerminalFn = func() bool { return true }
codexCommand = "echo"
buildCodexArgsFn = func(cfg *Config, targetArg string) []string {
return []string{`{"type":"thread.started","thread_id":"integration-session"}` + "\n" + `{"type":"item.completed","item":{"type":"agent_message","text":"done"}}`}
}
var exitCode int
stderr := captureStderr(t, func() {
_ = captureStdout(t, func() {
exitCode = run()
})
})
if exitCode != 0 {
t.Fatalf("run() exit=%d, want 0", exitCode)
}
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
wantLine := fmt.Sprintf("Log: %s", expectedLog)
if !strings.Contains(stderr, wantLine) {
t.Fatalf("stderr missing %q, got: %q", wantLine, stderr)
}
}
func TestRunParallelPartialFailureBlocksDependents(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
tempDir := t.TempDir()
logPathFor := func(id string) string {
return filepath.Join(tempDir, fmt.Sprintf("%s.log", id))
}
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
path := logPathFor(task.ID)
if task.ID == "A" {
return TaskResult{TaskID: "A", ExitCode: 2, Error: "boom", LogPath: path}
}
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task, LogPath: path}
}
input := `---TASK---
id: A
---CONTENT---
fail
---TASK---
id: B
dependencies: A
---CONTENT---
blocked
---TASK---
id: D
---CONTENT---
ok-d
---TASK---
id: E
---CONTENT---
ok-e`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codeagent-wrapper", "--parallel"}
var exitCode int
output := captureStdout(t, func() {
exitCode = run()
})
payload := parseIntegrationOutput(t, output)
if exitCode == 0 {
t.Fatalf("expected non-zero exit when a task fails, got %d", exitCode)
}
resA := findResultByID(t, payload, "A")
resB := findResultByID(t, payload, "B")
resD := findResultByID(t, payload, "D")
resE := findResultByID(t, payload, "E")
if resA.ExitCode == 0 {
t.Fatalf("task A should fail, got %+v", resA)
}
if resB.ExitCode == 0 || !strings.Contains(resB.Error, "dependencies") {
t.Fatalf("task B should be skipped due to dependency failure, got %+v", resB)
}
if resD.ExitCode != 0 || resE.ExitCode != 0 {
t.Fatalf("independent tasks should run successfully, D=%+v E=%+v", resD, resE)
}
if payload.Summary.Failed != 2 || payload.Summary.Total != 4 {
t.Fatalf("unexpected summary after partial failure: %+v", payload.Summary)
}
if resA.LogPath != logPathFor("A") {
t.Fatalf("task A log path = %q, want %q", resA.LogPath, logPathFor("A"))
}
if resB.LogPath != "" {
t.Fatalf("task B should not report a log path when skipped, got %q", resB.LogPath)
}
if resD.LogPath != logPathFor("D") || resE.LogPath != logPathFor("E") {
t.Fatalf("expected log paths for D/E, got D=%q E=%q", resD.LogPath, resE.LogPath)
}
// Summary mode shows log paths in table, verify they appear in output
for _, id := range []string{"A", "D", "E"} {
logPath := logPathFor(id)
if !strings.Contains(output, logPath) {
t.Fatalf("task %s log path %q not found in output:\n%s", id, logPath, output)
}
}
// Task B was skipped, should have "-" or empty log path in table
if resB.LogPath != "" {
t.Fatalf("skipped task B should have empty log path, got %q", resB.LogPath)
}
}
func TestRunParallelTimeoutPropagation(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
var receivedTimeout int
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
receivedTimeout = timeout
return TaskResult{TaskID: task.ID, ExitCode: 124, Error: "timeout"}
}
t.Setenv("CODEX_TIMEOUT", "1")
input := `---TASK---
id: T
---CONTENT---
slow`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codeagent-wrapper", "--parallel"}
exitCode := 0
output := captureStdout(t, func() {
exitCode = run()
})
payload := parseIntegrationOutput(t, output)
if receivedTimeout != 1 {
t.Fatalf("expected timeout 1s to propagate, got %d", receivedTimeout)
}
if exitCode != 124 {
t.Fatalf("expected timeout exit code 124, got %d", exitCode)
}
if payload.Summary.Failed != 1 || payload.Summary.Total != 1 {
t.Fatalf("unexpected summary for timeout case: %+v", payload.Summary)
}
res := findResultByID(t, payload, "T")
if res.Error == "" || res.ExitCode != 124 {
t.Fatalf("timeout result not propagated, got %+v", res)
}
}
func TestRunConcurrentSpeedupBenchmark(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
time.Sleep(50 * time.Millisecond)
return TaskResult{TaskID: task.ID}
}
tasks := make([]TaskSpec, 10)
for i := range tasks {
tasks[i] = TaskSpec{ID: fmt.Sprintf("task-%d", i)}
}
layers := [][]TaskSpec{tasks}
serialStart := time.Now()
for _, task := range tasks {
_ = runCodexTaskFn(task, 5)
}
serialElapsed := time.Since(serialStart)
concurrentStart := time.Now()
_ = executeConcurrent(layers, 5)
concurrentElapsed := time.Since(concurrentStart)
if concurrentElapsed >= serialElapsed/5 {
t.Fatalf("expected concurrent time <20%% of serial, serial=%v concurrent=%v", serialElapsed, concurrentElapsed)
}
ratio := float64(concurrentElapsed) / float64(serialElapsed)
t.Logf("speedup ratio (concurrent/serial)=%.3f", ratio)
}
func TestRunStartupCleanupRemovesOrphansEndToEnd(t *testing.T) {
defer resetTestHooks()
tempDir := setTempDirEnv(t, t.TempDir())
orphanA := createTempLog(t, tempDir, "codeagent-wrapper-5001.log")
orphanB := createTempLog(t, tempDir, "codeagent-wrapper-5002-extra.log")
orphanC := createTempLog(t, tempDir, "codeagent-wrapper-5003-suffix.log")
runningPID := 81234
runningLog := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", runningPID))
unrelated := createTempLog(t, tempDir, "wrapper.log")
stubProcessRunning(t, func(pid int) bool {
return pid == runningPID || pid == os.Getpid()
})
stubProcessStartTime(t, func(pid int) time.Time {
if pid == runningPID || pid == os.Getpid() {
return time.Now().Add(-1 * time.Hour)
}
return time.Time{}
})
codexCommand = createFakeCodexScript(t, "tid-startup", "ok")
stdinReader = strings.NewReader("")
isTerminalFn = func() bool { return true }
os.Args = []string{"codeagent-wrapper", "task"}
if exit := run(); exit != 0 {
t.Fatalf("run() exit=%d, want 0", exit)
}
for _, orphan := range []string{orphanA, orphanB, orphanC} {
if _, err := os.Stat(orphan); !os.IsNotExist(err) {
t.Fatalf("expected orphan %s to be removed, err=%v", orphan, err)
}
}
if _, err := os.Stat(runningLog); err != nil {
t.Fatalf("expected running log to remain, err=%v", err)
}
if _, err := os.Stat(unrelated); err != nil {
t.Fatalf("expected unrelated file to remain, err=%v", err)
}
}
func TestRunStartupCleanupConcurrentWrappers(t *testing.T) {
defer resetTestHooks()
tempDir := setTempDirEnv(t, t.TempDir())
const totalLogs = 40
for i := 0; i < totalLogs; i++ {
createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", 9000+i))
}
stubProcessRunning(t, func(pid int) bool {
return false
})
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
var wg sync.WaitGroup
const instances = 5
start := make(chan struct{})
for i := 0; i < instances; i++ {
wg.Add(1)
go func() {
defer wg.Done()
<-start
runStartupCleanup()
}()
}
close(start)
wg.Wait()
matches, err := filepath.Glob(filepath.Join(tempDir, "codeagent-wrapper-*.log"))
if err != nil {
t.Fatalf("glob error: %v", err)
}
if len(matches) != 0 {
t.Fatalf("expected all orphan logs to be removed, remaining=%v", matches)
}
}
func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
defer resetTestHooks()
tempDir := setTempDirEnv(t, t.TempDir())
staleA := createTempLog(t, tempDir, "codeagent-wrapper-2100.log")
staleB := createTempLog(t, tempDir, "codeagent-wrapper-2200-extra.log")
keeper := createTempLog(t, tempDir, "codeagent-wrapper-2300.log")
stubProcessRunning(t, func(pid int) bool {
return pid == 2300 || pid == os.Getpid()
})
stubProcessStartTime(t, func(pid int) time.Time {
if pid == 2300 || pid == os.Getpid() {
return time.Now().Add(-1 * time.Hour)
}
return time.Time{}
})
os.Args = []string{"codeagent-wrapper", "--cleanup"}
var exitCode int
output := captureStdout(t, func() {
exitCode = run()
})
if exitCode != 0 {
t.Fatalf("cleanup exit = %d, want 0", exitCode)
}
// Check that output contains expected counts and file names
if !strings.Contains(output, "Cleanup completed") {
t.Fatalf("missing 'Cleanup completed' in output: %q", output)
}
if !strings.Contains(output, "Files scanned: 3") {
t.Fatalf("missing 'Files scanned: 3' in output: %q", output)
}
if !strings.Contains(output, "Files deleted: 2") {
t.Fatalf("missing 'Files deleted: 2' in output: %q", output)
}
if !strings.Contains(output, "Files kept: 1") {
t.Fatalf("missing 'Files kept: 1' in output: %q", output)
}
if !strings.Contains(output, "codeagent-wrapper-2100.log") || !strings.Contains(output, "codeagent-wrapper-2200-extra.log") {
t.Fatalf("missing deleted file names in output: %q", output)
}
if !strings.Contains(output, "codeagent-wrapper-2300.log") {
t.Fatalf("missing kept file names in output: %q", output)
}
for _, path := range []string{staleA, staleB} {
if _, err := os.Stat(path); !os.IsNotExist(err) {
t.Fatalf("expected %s to be removed, err=%v", path, err)
}
}
if _, err := os.Stat(keeper); err != nil {
t.Fatalf("expected kept log to remain, err=%v", err)
}
currentLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
if _, err := os.Stat(currentLog); err == nil {
t.Fatalf("cleanup mode should not create new log file %s", currentLog)
} else if !os.IsNotExist(err) {
t.Fatalf("stat(%s) unexpected error: %v", currentLog, err)
}
}
func TestRunCleanupFlagEndToEnd_FailureDoesNotAffectStartup(t *testing.T) {
defer resetTestHooks()
tempDir := setTempDirEnv(t, t.TempDir())
calls := 0
cleanupLogsFn = func() (CleanupStats, error) {
calls++
return CleanupStats{Scanned: 1}, fmt.Errorf("permission denied")
}
os.Args = []string{"codeagent-wrapper", "--cleanup"}
var exitCode int
errOutput := captureStderr(t, func() {
exitCode = run()
})
if exitCode != 1 {
t.Fatalf("cleanup failure exit = %d, want 1", exitCode)
}
if !strings.Contains(errOutput, "Cleanup failed") || !strings.Contains(errOutput, "permission denied") {
t.Fatalf("cleanup stderr = %q, want failure message", errOutput)
}
if calls != 1 {
t.Fatalf("cleanup called %d times, want 1", calls)
}
currentLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
if _, err := os.Stat(currentLog); err == nil {
t.Fatalf("cleanup failure should not create new log file %s", currentLog)
} else if !os.IsNotExist(err) {
t.Fatalf("stat(%s) unexpected error: %v", currentLog, err)
}
cleanupLogsFn = func() (CleanupStats, error) {
return CleanupStats{}, nil
}
codexCommand = createFakeCodexScript(t, "tid-cleanup-e2e", "ok")
stdinReader = strings.NewReader("")
isTerminalFn = func() bool { return true }
os.Args = []string{"codeagent-wrapper", "post-cleanup task"}
var normalExit int
normalOutput := captureStdout(t, func() {
normalExit = run()
})
if normalExit != 0 {
t.Fatalf("normal run exit = %d, want 0", normalExit)
}
if !strings.Contains(normalOutput, "ok") {
t.Fatalf("normal run output = %q, want codex output", normalOutput)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +0,0 @@
package wrapper
import (
executor "codeagent-wrapper/internal/executor"
)
func parseParallelConfig(data []byte) (*ParallelConfig, error) {
return executor.ParseParallelConfig(data)
}

View File

@@ -1,34 +0,0 @@
package wrapper
import (
"bufio"
"io"
parser "codeagent-wrapper/internal/parser"
"github.com/goccy/go-json"
)
func parseJSONStream(r io.Reader) (message, threadID string) {
return parseJSONStreamWithLog(r, logWarn, logInfo)
}
func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadID string) {
return parseJSONStreamWithLog(r, warnFn, logInfo)
}
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
return parseJSONStreamInternal(r, warnFn, infoFn, nil, nil)
}
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
return parser.ParseJSONStreamInternal(r, warnFn, infoFn, onMessage, onComplete)
}
func hasKey(m map[string]json.RawMessage, key string) bool { return parser.HasKey(m, key) }
func discardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
return parser.DiscardInvalidJSON(decoder, reader)
}
func normalizeText(text interface{}) string { return parser.NormalizeText(text) }

View File

@@ -1,8 +0,0 @@
package wrapper
import executor "codeagent-wrapper/internal/executor"
// Type aliases to keep existing names in the wrapper package.
type ParallelConfig = executor.ParallelConfig
type TaskSpec = executor.TaskSpec
type TaskResult = executor.TaskResult

View File

@@ -1,30 +0,0 @@
package wrapper
import (
"os"
"testing"
)
func TestDefaultIsTerminalCoverage(t *testing.T) {
oldStdin := os.Stdin
t.Cleanup(func() { os.Stdin = oldStdin })
f, err := os.CreateTemp(t.TempDir(), "stdin-*")
if err != nil {
t.Fatalf("os.CreateTemp() error = %v", err)
}
defer os.Remove(f.Name())
os.Stdin = f
if got := defaultIsTerminal(); got {
t.Fatalf("defaultIsTerminal() = %v, want false for regular file", got)
}
if err := f.Close(); err != nil {
t.Fatalf("Close() error = %v", err)
}
os.Stdin = f
if got := defaultIsTerminal(); !got {
t.Fatalf("defaultIsTerminal() = %v, want true when Stat fails", got)
}
}

View File

@@ -1,523 +0,0 @@
package wrapper
import (
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
utils "codeagent-wrapper/internal/utils"
)
func resolveTimeout() int {
raw := os.Getenv("CODEX_TIMEOUT")
if raw == "" {
return defaultTimeout
}
parsed, err := strconv.Atoi(raw)
if err != nil || parsed <= 0 {
logWarn(fmt.Sprintf("Invalid CODEX_TIMEOUT '%s', falling back to %ds", raw, defaultTimeout))
return defaultTimeout
}
if parsed > 10000 {
return parsed / 1000
}
return parsed
}
func readPipedTask() (string, error) {
if isTerminal() {
logInfo("Stdin is tty, skipping pipe read")
return "", nil
}
logInfo("Reading from stdin pipe...")
data, err := io.ReadAll(stdinReader)
if err != nil {
return "", fmt.Errorf("read stdin: %w", err)
}
if len(data) == 0 {
logInfo("Stdin pipe returned empty data")
return "", nil
}
logInfo(fmt.Sprintf("Read %d bytes from stdin pipe", len(data)))
return string(data), nil
}
func shouldUseStdin(taskText string, piped bool) bool {
if piped {
return true
}
if len(taskText) > 800 {
return true
}
return strings.ContainsAny(taskText, stdinSpecialChars)
}
func defaultIsTerminal() bool {
fi, err := os.Stdin.Stat()
if err != nil {
return true
}
return (fi.Mode() & os.ModeCharDevice) != 0
}
func isTerminal() bool {
return isTerminalFn()
}
func getEnv(key, defaultValue string) string {
if val := os.Getenv(key); val != "" {
return val
}
return defaultValue
}
type logWriter struct {
prefix string
maxLen int
buf bytes.Buffer
dropped bool
}
func newLogWriter(prefix string, maxLen int) *logWriter {
if maxLen <= 0 {
maxLen = codexLogLineLimit
}
return &logWriter{prefix: prefix, maxLen: maxLen}
}
func (lw *logWriter) Write(p []byte) (int, error) {
if lw == nil {
return len(p), nil
}
total := len(p)
for len(p) > 0 {
if idx := bytes.IndexByte(p, '\n'); idx >= 0 {
lw.writeLimited(p[:idx])
lw.logLine(true)
p = p[idx+1:]
continue
}
lw.writeLimited(p)
break
}
return total, nil
}
func (lw *logWriter) Flush() {
if lw == nil || lw.buf.Len() == 0 {
return
}
lw.logLine(false)
}
func (lw *logWriter) logLine(force bool) {
if lw == nil {
return
}
line := lw.buf.String()
dropped := lw.dropped
lw.dropped = false
lw.buf.Reset()
if line == "" && !force {
return
}
if lw.maxLen > 0 {
if dropped {
if lw.maxLen > 3 {
line = line[:min(len(line), lw.maxLen-3)] + "..."
} else {
line = line[:min(len(line), lw.maxLen)]
}
} else if len(line) > lw.maxLen {
cutoff := lw.maxLen
if cutoff > 3 {
line = line[:cutoff-3] + "..."
} else {
line = line[:cutoff]
}
}
}
logInfo(lw.prefix + line)
}
func (lw *logWriter) writeLimited(p []byte) {
if lw == nil || len(p) == 0 {
return
}
if lw.maxLen <= 0 {
lw.buf.Write(p)
return
}
remaining := lw.maxLen - lw.buf.Len()
if remaining <= 0 {
lw.dropped = true
return
}
if len(p) <= remaining {
lw.buf.Write(p)
return
}
lw.buf.Write(p[:remaining])
lw.dropped = true
}
type tailBuffer struct {
limit int
data []byte
}
func (b *tailBuffer) Write(p []byte) (int, error) {
if b.limit <= 0 {
return len(p), nil
}
if len(p) >= b.limit {
b.data = append(b.data[:0], p[len(p)-b.limit:]...)
return len(p), nil
}
total := len(b.data) + len(p)
if total <= b.limit {
b.data = append(b.data, p...)
return len(p), nil
}
overflow := total - b.limit
b.data = append(b.data[overflow:], p...)
return len(p), nil
}
func (b *tailBuffer) String() string {
return string(b.data)
}
func truncate(s string, maxLen int) string {
return utils.Truncate(s, maxLen)
}
// safeTruncate safely truncates string to maxLen, avoiding panic and UTF-8 corruption.
func safeTruncate(s string, maxLen int) string {
return utils.SafeTruncate(s, maxLen)
}
// sanitizeOutput removes ANSI escape sequences and control characters.
func sanitizeOutput(s string) string {
return utils.SanitizeOutput(s)
}
func min(a, b int) int {
return utils.Min(a, b)
}
func hello() string {
return "hello world"
}
func greet(name string) string {
return "hello " + name
}
func farewell(name string) string {
return "goodbye " + name
}
// extractCoverageFromLines extracts coverage from pre-split lines.
func extractCoverageFromLines(lines []string) string {
if len(lines) == 0 {
return ""
}
end := len(lines)
for end > 0 && strings.TrimSpace(lines[end-1]) == "" {
end--
}
if end == 1 {
trimmed := strings.TrimSpace(lines[0])
if strings.HasSuffix(trimmed, "%") {
if num, err := strconv.ParseFloat(strings.TrimSuffix(trimmed, "%"), 64); err == nil && num >= 0 && num <= 100 {
return trimmed
}
}
}
coverageKeywords := []string{"file", "stmt", "branch", "line", "coverage", "total"}
for _, line := range lines[:end] {
lower := strings.ToLower(line)
hasKeyword := false
tokens := strings.FieldsFunc(lower, func(r rune) bool { return r < 'a' || r > 'z' })
for _, token := range tokens {
for _, kw := range coverageKeywords {
if strings.HasPrefix(token, kw) {
hasKeyword = true
break
}
}
if hasKeyword {
break
}
}
if !hasKeyword {
continue
}
if !strings.Contains(line, "%") {
continue
}
// Extract percentage pattern: number followed by %
for i := 0; i < len(line); i++ {
if line[i] == '%' && i > 0 {
// Walk back to find the number
j := i - 1
for j >= 0 && (line[j] == '.' || (line[j] >= '0' && line[j] <= '9')) {
j--
}
if j < i-1 {
numStr := line[j+1 : i]
// Validate it's a reasonable percentage
if num, err := strconv.ParseFloat(numStr, 64); err == nil && num >= 0 && num <= 100 {
return numStr + "%"
}
}
}
}
}
return ""
}
// extractCoverage extracts coverage percentage from task output
// Supports common formats: "Coverage: 92%", "92% coverage", "coverage 92%", "TOTAL 92%"
func extractCoverage(message string) string {
if message == "" {
return ""
}
return extractCoverageFromLines(strings.Split(message, "\n"))
}
// extractCoverageNum extracts coverage as a numeric value for comparison
func extractCoverageNum(coverage string) float64 {
if coverage == "" {
return 0
}
// Remove % sign and parse
numStr := strings.TrimSuffix(coverage, "%")
if num, err := strconv.ParseFloat(numStr, 64); err == nil {
return num
}
return 0
}
// extractFilesChangedFromLines extracts files from pre-split lines.
func extractFilesChangedFromLines(lines []string) []string {
if len(lines) == 0 {
return nil
}
var files []string
seen := make(map[string]bool)
exts := []string{".ts", ".tsx", ".js", ".jsx", ".go", ".py", ".rs", ".java", ".vue", ".css", ".scss", ".md", ".json", ".yaml", ".yml", ".toml"}
for _, line := range lines {
line = strings.TrimSpace(line)
// Pattern 1: "Modified: path/to/file.ts" or "Created: path/to/file.ts"
matchedPrefix := false
for _, prefix := range []string{"Modified:", "Created:", "Updated:", "Edited:", "Wrote:", "Changed:"} {
if strings.HasPrefix(line, prefix) {
file := strings.TrimSpace(strings.TrimPrefix(line, prefix))
file = strings.Trim(file, "`\"'()[],:")
file = strings.TrimPrefix(file, "@")
if file != "" && !seen[file] {
files = append(files, file)
seen[file] = true
}
matchedPrefix = true
break
}
}
if matchedPrefix {
continue
}
// Pattern 2: Tokens that look like file paths (allow root files, strip @ prefix).
parts := strings.Fields(line)
for _, part := range parts {
part = strings.Trim(part, "`\"'()[],:")
part = strings.TrimPrefix(part, "@")
for _, ext := range exts {
if strings.HasSuffix(part, ext) && !seen[part] {
files = append(files, part)
seen[part] = true
break
}
}
}
}
// Limit to first 10 files to avoid bloat
if len(files) > 10 {
files = files[:10]
}
return files
}
// extractFilesChanged extracts list of changed files from task output
// Looks for common patterns like "Modified: file.ts", "Created: file.ts", file paths in output
func extractFilesChanged(message string) []string {
if message == "" {
return nil
}
return extractFilesChangedFromLines(strings.Split(message, "\n"))
}
// extractTestResultsFromLines extracts test results from pre-split lines.
func extractTestResultsFromLines(lines []string) (passed, failed int) {
if len(lines) == 0 {
return 0, 0
}
// Common patterns:
// pytest: "12 passed, 2 failed"
// jest: "Tests: 2 failed, 12 passed"
// go: "ok ... 12 tests"
for _, line := range lines {
line = strings.ToLower(line)
// Look for test result lines
if !strings.Contains(line, "pass") && !strings.Contains(line, "fail") && !strings.Contains(line, "test") {
continue
}
// Extract numbers near "passed" or "pass"
if idx := strings.Index(line, "pass"); idx != -1 {
// Look for number before "pass"
num := extractNumberBefore(line, idx)
if num > 0 {
passed = num
}
}
// Extract numbers near "failed" or "fail"
if idx := strings.Index(line, "fail"); idx != -1 {
num := extractNumberBefore(line, idx)
if num > 0 {
failed = num
}
}
// go test style: "ok ... 12 tests"
if passed == 0 {
if idx := strings.Index(line, "test"); idx != -1 {
num := extractNumberBefore(line, idx)
if num > 0 {
passed = num
}
}
}
// If we found both, stop
if passed > 0 && failed > 0 {
break
}
}
return passed, failed
}
// extractTestResults extracts test pass/fail counts from task output
func extractTestResults(message string) (passed, failed int) {
if message == "" {
return 0, 0
}
return extractTestResultsFromLines(strings.Split(message, "\n"))
}
// extractNumberBefore extracts a number that appears before the given index
func extractNumberBefore(s string, idx int) int {
if idx <= 0 {
return 0
}
// Walk backwards to find digits
end := idx - 1
for end >= 0 && (s[end] == ' ' || s[end] == ':' || s[end] == ',') {
end--
}
if end < 0 {
return 0
}
start := end
for start >= 0 && s[start] >= '0' && s[start] <= '9' {
start--
}
start++
if start > end {
return 0
}
numStr := s[start : end+1]
if num, err := strconv.Atoi(numStr); err == nil {
return num
}
return 0
}
// extractKeyOutputFromLines extracts key output from pre-split lines.
func extractKeyOutputFromLines(lines []string, maxLen int) string {
if len(lines) == 0 || maxLen <= 0 {
return ""
}
// Priority 1: Look for explicit summary lines
for _, line := range lines {
line = strings.TrimSpace(line)
lower := strings.ToLower(line)
if strings.HasPrefix(lower, "summary:") || strings.HasPrefix(lower, "completed:") ||
strings.HasPrefix(lower, "implemented:") || strings.HasPrefix(lower, "added:") ||
strings.HasPrefix(lower, "created:") || strings.HasPrefix(lower, "fixed:") {
content := line
for _, prefix := range []string{"Summary:", "Completed:", "Implemented:", "Added:", "Created:", "Fixed:",
"summary:", "completed:", "implemented:", "added:", "created:", "fixed:"} {
content = strings.TrimPrefix(content, prefix)
}
content = strings.TrimSpace(content)
if len(content) > 0 {
return safeTruncate(content, maxLen)
}
}
}
// Priority 2: First meaningful line (skip noise)
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "```") || strings.HasPrefix(line, "---") ||
strings.HasPrefix(line, "#") || strings.HasPrefix(line, "//") {
continue
}
// Skip very short lines (likely headers or markers)
if len(line) < 20 {
continue
}
return safeTruncate(line, maxLen)
}
// Fallback: truncate entire message
clean := strings.TrimSpace(strings.Join(lines, "\n"))
return safeTruncate(clean, maxLen)
}

View File

@@ -1,143 +0,0 @@
package wrapper
import (
"fmt"
"reflect"
"strings"
"testing"
)
func TestExtractCoverage(t *testing.T) {
tests := []struct {
name string
in string
want string
}{
{"bare int", "92%", "92%"},
{"bare float", "92.5%", "92.5%"},
{"coverage prefix", "coverage: 92%", "92%"},
{"total prefix", "TOTAL 92%", "92%"},
{"all files", "All files 92%", "92%"},
{"empty", "", ""},
{"no number", "coverage: N/A", ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := extractCoverage(tt.in); got != tt.want {
t.Fatalf("extractCoverage(%q) = %q, want %q", tt.in, got, tt.want)
}
})
}
}
func TestExtractTestResults(t *testing.T) {
tests := []struct {
name string
in string
wantPassed int
wantFailed int
}{
{"pytest one line", "12 passed, 2 failed", 12, 2},
{"pytest split lines", "12 passed\n2 failed", 12, 2},
{"jest format", "Tests: 2 failed, 12 passed, 14 total", 12, 2},
{"go test style count", "ok\texample.com/foo\t0.12s\t12 tests", 12, 0},
{"zero counts", "0 passed, 0 failed", 0, 0},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
passed, failed := extractTestResults(tt.in)
if passed != tt.wantPassed || failed != tt.wantFailed {
t.Fatalf("extractTestResults(%q) = (%d, %d), want (%d, %d)", tt.in, passed, failed, tt.wantPassed, tt.wantFailed)
}
})
}
}
func TestExtractFilesChanged(t *testing.T) {
tests := []struct {
name string
in string
want []string
}{
{"root file", "Modified: main.go\n", []string{"main.go"}},
{"path file", "Created: codeagent-wrapper/utils.go\n", []string{"codeagent-wrapper/utils.go"}},
{"at prefix", "Updated: @codeagent-wrapper/main.go\n", []string{"codeagent-wrapper/main.go"}},
{"token scan", "Files: @main.go, @codeagent-wrapper/utils.go\n", []string{"main.go", "codeagent-wrapper/utils.go"}},
{"space path", "Modified: dir/with space/file.go\n", []string{"dir/with space/file.go"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := extractFilesChanged(tt.in); !reflect.DeepEqual(got, tt.want) {
t.Fatalf("extractFilesChanged(%q) = %#v, want %#v", tt.in, got, tt.want)
}
})
}
t.Run("limits to first 10", func(t *testing.T) {
var b strings.Builder
for i := 0; i < 12; i++ {
fmt.Fprintf(&b, "Modified: file%d.go\n", i)
}
got := extractFilesChanged(b.String())
if len(got) != 10 {
t.Fatalf("len(files)=%d, want 10: %#v", len(got), got)
}
for i := 0; i < 10; i++ {
want := fmt.Sprintf("file%d.go", i)
if got[i] != want {
t.Fatalf("files[%d]=%q, want %q", i, got[i], want)
}
}
})
}
func TestSafeTruncate(t *testing.T) {
tests := []struct {
name string
in string
maxLen int
want string
}{
{"empty", "", 4, ""},
{"zero maxLen", "hello", 0, ""},
{"one rune", "你好", 1, "你"},
{"two runes no truncate", "你好", 2, "你好"},
{"three runes no truncate", "你好", 3, "你好"},
{"two runes truncates long", "你好世界", 2, "你"},
{"three runes truncates long", "你好世界", 3, "你"},
{"four with ellipsis", "你好世界啊", 4, "你..."},
{"emoji", "🙂🙂🙂🙂🙂", 4, "🙂..."},
{"no truncate", "你好世界", 4, "你好世界"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := safeTruncate(tt.in, tt.maxLen); got != tt.want {
t.Fatalf("safeTruncate(%q, %d) = %q, want %q", tt.in, tt.maxLen, got, tt.want)
}
})
}
}
func TestSanitizeOutput(t *testing.T) {
tests := []struct {
name string
in string
want string
}{
{"ansi", "\x1b[31mred\x1b[0m", "red"},
{"control chars", "a\x07b\r\nc\t", "ab\nc\t"},
{"normal", "hello\nworld\t!", "hello\nworld\t!"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := sanitizeOutput(tt.in); got != tt.want {
t.Fatalf("sanitizeOutput(%q) = %q, want %q", tt.in, got, tt.want)
}
})
}
}

View File

@@ -1,9 +0,0 @@
package wrapper
import ilogger "codeagent-wrapper/internal/logger"
const wrapperName = ilogger.WrapperName
func currentWrapperName() string { return ilogger.CurrentWrapperName() }
func primaryLogPrefix() string { return ilogger.PrimaryLogPrefix() }

View File

@@ -1,33 +0,0 @@
package backend
import config "codeagent-wrapper/internal/config"
// Backend defines the contract for invoking different AI CLI backends.
// Each backend is responsible for supplying the executable command and
// building the argument list based on the wrapper config.
type Backend interface {
Name() string
BuildArgs(cfg *config.Config, targetArg string) []string
Command() string
Env(baseURL, apiKey string) map[string]string
}
var (
logWarnFn = func(string) {}
logErrorFn = func(string) {}
)
// SetLogFuncs configures optional logging hooks used by some backends.
// Callers can safely pass nil to disable the hook.
func SetLogFuncs(warnFn, errorFn func(string)) {
if warnFn != nil {
logWarnFn = warnFn
} else {
logWarnFn = func(string) {}
}
if errorFn != nil {
logErrorFn = errorFn
} else {
logErrorFn = func(string) {}
}
}

View File

@@ -1,322 +0,0 @@
package backend
import (
"bytes"
"os"
"path/filepath"
"reflect"
"testing"
config "codeagent-wrapper/internal/config"
)
func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
backend := ClaudeBackend{}
t.Run("new mode omits skip-permissions when env disabled", func(t *testing.T) {
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
cfg := &config.Config{Mode: "new", WorkDir: "/repo"}
got := backend.BuildArgs(cfg, "todo")
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("new mode includes skip-permissions by default", func(t *testing.T) {
cfg := &config.Config{Mode: "new", SkipPermissions: false}
got := backend.BuildArgs(cfg, "-")
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "-"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("resume mode includes session id", func(t *testing.T) {
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", WorkDir: "/ignored"}
got := backend.BuildArgs(cfg, "resume-task")
want := []string{"-p", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("resume mode without session still returns base flags", func(t *testing.T) {
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
cfg := &config.Config{Mode: "resume", WorkDir: "/ignored"}
got := backend.BuildArgs(cfg, "follow-up")
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "follow-up"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("resume mode can opt-in skip permissions", func(t *testing.T) {
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", SkipPermissions: true}
got := backend.BuildArgs(cfg, "resume-task")
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("nil config returns nil", func(t *testing.T) {
if backend.BuildArgs(nil, "ignored") != nil {
t.Fatalf("nil config should return nil args")
}
})
}
func TestBackendBuildArgs_Model(t *testing.T) {
t.Run("claude includes --model when set", func(t *testing.T) {
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
backend := ClaudeBackend{}
cfg := &config.Config{Mode: "new", Model: "opus"}
got := backend.BuildArgs(cfg, "todo")
want := []string{"-p", "--setting-sources", "", "--model", "opus", "--output-format", "stream-json", "--verbose", "todo"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("gemini includes -m when set", func(t *testing.T) {
backend := GeminiBackend{}
cfg := &config.Config{Mode: "new", Model: "gemini-3-pro-preview"}
got := backend.BuildArgs(cfg, "task")
want := []string{"-o", "stream-json", "-y", "-m", "gemini-3-pro-preview", "task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("codex includes --model when set", func(t *testing.T) {
const key = "CODEX_BYPASS_SANDBOX"
t.Setenv(key, "false")
backend := CodexBackend{}
cfg := &config.Config{Mode: "new", WorkDir: "/tmp", Model: "o3"}
got := backend.BuildArgs(cfg, "task")
want := []string{"e", "--model", "o3", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
}
func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
t.Run("gemini new mode defaults workdir", func(t *testing.T) {
backend := GeminiBackend{}
cfg := &config.Config{Mode: "new", WorkDir: "/workspace"}
got := backend.BuildArgs(cfg, "task")
want := []string{"-o", "stream-json", "-y", "task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("gemini resume mode uses session id", func(t *testing.T) {
backend := GeminiBackend{}
cfg := &config.Config{Mode: "resume", SessionID: "sid-999"}
got := backend.BuildArgs(cfg, "resume")
want := []string{"-o", "stream-json", "-y", "-r", "sid-999", "resume"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("gemini resume mode without session omits identifier", func(t *testing.T) {
backend := GeminiBackend{}
cfg := &config.Config{Mode: "resume"}
got := backend.BuildArgs(cfg, "resume")
want := []string{"-o", "stream-json", "-y", "resume"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("gemini nil config returns nil", func(t *testing.T) {
backend := GeminiBackend{}
if backend.BuildArgs(nil, "ignored") != nil {
t.Fatalf("nil config should return nil args")
}
})
t.Run("gemini stdin mode uses -p flag", func(t *testing.T) {
backend := GeminiBackend{}
cfg := &config.Config{Mode: "new"}
got := backend.BuildArgs(cfg, "-")
want := []string{"-o", "stream-json", "-y", "-p", "-"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("codex build args omits bypass flag by default", func(t *testing.T) {
const key = "CODEX_BYPASS_SANDBOX"
t.Setenv(key, "false")
backend := CodexBackend{}
cfg := &config.Config{Mode: "new", WorkDir: "/tmp"}
got := backend.BuildArgs(cfg, "task")
want := []string{"e", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
t.Run("codex build args includes bypass flag when enabled", func(t *testing.T) {
const key = "CODEX_BYPASS_SANDBOX"
t.Setenv(key, "true")
backend := CodexBackend{}
cfg := &config.Config{Mode: "new", WorkDir: "/tmp"}
got := backend.BuildArgs(cfg, "task")
want := []string{"e", "--dangerously-bypass-approvals-and-sandbox", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
})
}
func TestClaudeBuildArgs_BackendMetadata(t *testing.T) {
tests := []struct {
backend Backend
name string
command string
}{
{backend: CodexBackend{}, name: "codex", command: "codex"},
{backend: ClaudeBackend{}, name: "claude", command: "claude"},
{backend: GeminiBackend{}, name: "gemini", command: "gemini"},
}
for _, tt := range tests {
if got := tt.backend.Name(); got != tt.name {
t.Fatalf("Name() = %s, want %s", got, tt.name)
}
if got := tt.backend.Command(); got != tt.command {
t.Fatalf("Command() = %s, want %s", got, tt.command)
}
}
}
func TestLoadMinimalEnvSettings(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
t.Run("missing file returns empty", func(t *testing.T) {
if got := LoadMinimalEnvSettings(); len(got) != 0 {
t.Fatalf("got %v, want empty", got)
}
})
t.Run("valid env returns string map", func(t *testing.T) {
dir := filepath.Join(home, ".claude")
if err := os.MkdirAll(dir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
path := filepath.Join(dir, "settings.json")
data := []byte(`{"env":{"ANTHROPIC_API_KEY":"secret","FOO":"bar"}}`)
if err := os.WriteFile(path, data, 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got := LoadMinimalEnvSettings()
if got["ANTHROPIC_API_KEY"] != "secret" || got["FOO"] != "bar" {
t.Fatalf("got %v, want keys present", got)
}
})
t.Run("non-string values are ignored", func(t *testing.T) {
dir := filepath.Join(home, ".claude")
path := filepath.Join(dir, "settings.json")
data := []byte(`{"env":{"GOOD":"ok","BAD":123,"ALSO_BAD":true}}`)
if err := os.WriteFile(path, data, 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got := LoadMinimalEnvSettings()
if got["GOOD"] != "ok" {
t.Fatalf("got %v, want GOOD=ok", got)
}
if _, ok := got["BAD"]; ok {
t.Fatalf("got %v, want BAD omitted", got)
}
if _, ok := got["ALSO_BAD"]; ok {
t.Fatalf("got %v, want ALSO_BAD omitted", got)
}
})
t.Run("oversized file returns empty", func(t *testing.T) {
dir := filepath.Join(home, ".claude")
path := filepath.Join(dir, "settings.json")
data := bytes.Repeat([]byte("a"), MaxClaudeSettingsBytes+1)
if err := os.WriteFile(path, data, 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if got := LoadMinimalEnvSettings(); len(got) != 0 {
t.Fatalf("got %v, want empty", got)
}
})
}
func TestOpencodeBackend_BuildArgs(t *testing.T) {
backend := OpencodeBackend{}
t.Run("basic", func(t *testing.T) {
cfg := &config.Config{Mode: "new"}
got := backend.BuildArgs(cfg, "hello")
want := []string{"run", "--format", "json", "hello"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("with model", func(t *testing.T) {
cfg := &config.Config{Mode: "new", Model: "opencode/grok-code"}
got := backend.BuildArgs(cfg, "task")
want := []string{"run", "-m", "opencode/grok-code", "--format", "json", "task"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("resume mode", func(t *testing.T) {
cfg := &config.Config{Mode: "resume", SessionID: "ses_123", Model: "opencode/grok-code"}
got := backend.BuildArgs(cfg, "follow-up")
want := []string{"run", "-m", "opencode/grok-code", "-s", "ses_123", "--format", "json", "follow-up"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("resume without session", func(t *testing.T) {
cfg := &config.Config{Mode: "resume"}
got := backend.BuildArgs(cfg, "task")
want := []string{"run", "--format", "json", "task"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("stdin mode omits dash", func(t *testing.T) {
cfg := &config.Config{Mode: "new"}
got := backend.BuildArgs(cfg, "-")
want := []string{"run", "--format", "json"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
}
func TestOpencodeBackend_Interface(t *testing.T) {
backend := OpencodeBackend{}
if backend.Name() != "opencode" {
t.Errorf("Name() = %q, want %q", backend.Name(), "opencode")
}
if backend.Command() != "opencode" {
t.Errorf("Command() = %q, want %q", backend.Command(), "opencode")
}
}

View File

@@ -1,139 +0,0 @@
package backend
import (
"os"
"path/filepath"
"strings"
config "codeagent-wrapper/internal/config"
"github.com/goccy/go-json"
)
type ClaudeBackend struct{}
func (ClaudeBackend) Name() string { return "claude" }
func (ClaudeBackend) Command() string { return "claude" }
func (ClaudeBackend) Env(baseURL, apiKey string) map[string]string {
baseURL = strings.TrimSpace(baseURL)
apiKey = strings.TrimSpace(apiKey)
if baseURL == "" && apiKey == "" {
return nil
}
env := make(map[string]string, 2)
if baseURL != "" {
env["ANTHROPIC_BASE_URL"] = baseURL
}
if apiKey != "" {
env["ANTHROPIC_AUTH_TOKEN"] = apiKey
}
return env
}
func (ClaudeBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
return buildClaudeArgs(cfg, targetArg)
}
const MaxClaudeSettingsBytes = 1 << 20 // 1MB
type MinimalClaudeSettings struct {
Env map[string]string
Model string
}
// LoadMinimalClaudeSettings 从 ~/.claude/settings.json 只提取安全的最小子集:
// - env: 只接受字符串类型的值
// - model: 只接受字符串类型的值
// 文件缺失/解析失败/超限都返回空。
func LoadMinimalClaudeSettings() MinimalClaudeSettings {
home, err := os.UserHomeDir()
if err != nil || home == "" {
return MinimalClaudeSettings{}
}
claudeDir := filepath.Clean(filepath.Join(home, ".claude"))
settingPath := filepath.Clean(filepath.Join(claudeDir, "settings.json"))
rel, err := filepath.Rel(claudeDir, settingPath)
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return MinimalClaudeSettings{}
}
info, err := os.Stat(settingPath)
if err != nil || info.Size() > MaxClaudeSettingsBytes {
return MinimalClaudeSettings{}
}
data, err := os.ReadFile(settingPath) // #nosec G304 -- path is fixed under user home and validated to stay within claudeDir
if err != nil {
return MinimalClaudeSettings{}
}
var cfg struct {
Env map[string]any `json:"env"`
Model any `json:"model"`
}
if err := json.Unmarshal(data, &cfg); err != nil {
return MinimalClaudeSettings{}
}
out := MinimalClaudeSettings{}
if model, ok := cfg.Model.(string); ok {
out.Model = strings.TrimSpace(model)
}
if len(cfg.Env) == 0 {
return out
}
env := make(map[string]string, len(cfg.Env))
for k, v := range cfg.Env {
s, ok := v.(string)
if !ok {
continue
}
env[k] = s
}
if len(env) == 0 {
return out
}
out.Env = env
return out
}
func LoadMinimalEnvSettings() map[string]string {
settings := LoadMinimalClaudeSettings()
if len(settings.Env) == 0 {
return nil
}
return settings.Env
}
func buildClaudeArgs(cfg *config.Config, targetArg string) []string {
if cfg == nil {
return nil
}
args := []string{"-p"}
// Default to skip permissions unless CODEAGENT_SKIP_PERMISSIONS=false
if cfg.SkipPermissions || cfg.Yolo || config.EnvFlagDefaultTrue("CODEAGENT_SKIP_PERMISSIONS") {
args = append(args, "--dangerously-skip-permissions")
}
// Prevent infinite recursion: disable all setting sources (user, project, local)
// This ensures a clean execution environment without CLAUDE.md or skills that would trigger codeagent
args = append(args, "--setting-sources", "")
if model := strings.TrimSpace(cfg.Model); model != "" {
args = append(args, "--model", model)
}
if cfg.Mode == "resume" {
if cfg.SessionID != "" {
// Claude CLI uses -r <session_id> for resume.
args = append(args, "-r", cfg.SessionID)
}
}
args = append(args, "--output-format", "stream-json", "--verbose", targetArg)
return args
}

View File

@@ -1,79 +0,0 @@
package backend
import (
"strings"
config "codeagent-wrapper/internal/config"
)
type CodexBackend struct{}
func (CodexBackend) Name() string { return "codex" }
func (CodexBackend) Command() string { return "codex" }
func (CodexBackend) Env(baseURL, apiKey string) map[string]string {
baseURL = strings.TrimSpace(baseURL)
apiKey = strings.TrimSpace(apiKey)
if baseURL == "" && apiKey == "" {
return nil
}
env := make(map[string]string, 2)
if baseURL != "" {
env["OPENAI_BASE_URL"] = baseURL
}
if apiKey != "" {
env["OPENAI_API_KEY"] = apiKey
}
return env
}
func (CodexBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
return BuildCodexArgs(cfg, targetArg)
}
func BuildCodexArgs(cfg *config.Config, targetArg string) []string {
if cfg == nil {
panic("buildCodexArgs: nil config")
}
var resumeSessionID string
isResume := cfg.Mode == "resume"
if isResume {
resumeSessionID = strings.TrimSpace(cfg.SessionID)
if resumeSessionID == "" {
logErrorFn("invalid config: resume mode requires non-empty session_id")
isResume = false
}
}
args := []string{"e"}
// Default to bypass sandbox unless CODEX_BYPASS_SANDBOX=false
if cfg.Yolo || config.EnvFlagDefaultTrue("CODEX_BYPASS_SANDBOX") {
logWarnFn("YOLO mode or CODEX_BYPASS_SANDBOX enabled: running without approval/sandbox protection")
args = append(args, "--dangerously-bypass-approvals-and-sandbox")
}
if model := strings.TrimSpace(cfg.Model); model != "" {
args = append(args, "--model", model)
}
if reasoningEffort := strings.TrimSpace(cfg.ReasoningEffort); reasoningEffort != "" {
args = append(args, "-c", "model_reasoning_effort="+reasoningEffort)
}
args = append(args, "--skip-git-repo-check")
if isResume {
return append(args,
"--json",
"resume",
resumeSessionID,
targetArg,
)
}
return append(args,
"-C", cfg.WorkDir,
"--json",
targetArg,
)
}

View File

@@ -1,110 +0,0 @@
package backend
import (
"os"
"path/filepath"
"strings"
config "codeagent-wrapper/internal/config"
)
type GeminiBackend struct{}
func (GeminiBackend) Name() string { return "gemini" }
func (GeminiBackend) Command() string { return "gemini" }
func (GeminiBackend) Env(baseURL, apiKey string) map[string]string {
baseURL = strings.TrimSpace(baseURL)
apiKey = strings.TrimSpace(apiKey)
if baseURL == "" && apiKey == "" {
return nil
}
env := make(map[string]string, 2)
if baseURL != "" {
env["GOOGLE_GEMINI_BASE_URL"] = baseURL
}
if apiKey != "" {
env["GEMINI_API_KEY"] = apiKey
}
return env
}
func (GeminiBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
return buildGeminiArgs(cfg, targetArg)
}
// LoadGeminiEnv loads environment variables from ~/.gemini/.env
// Supports GEMINI_API_KEY, GEMINI_MODEL, GOOGLE_GEMINI_BASE_URL
// Also sets GEMINI_API_KEY_AUTH_MECHANISM=bearer for third-party API compatibility
func LoadGeminiEnv() map[string]string {
home, err := os.UserHomeDir()
if err != nil || home == "" {
return nil
}
envDir := filepath.Clean(filepath.Join(home, ".gemini"))
envPath := filepath.Clean(filepath.Join(envDir, ".env"))
rel, err := filepath.Rel(envDir, envPath)
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return nil
}
data, err := os.ReadFile(envPath) // #nosec G304 -- path is fixed under user home and validated to stay within envDir
if err != nil {
return nil
}
env := make(map[string]string)
for _, line := range strings.Split(string(data), "\n") {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
idx := strings.IndexByte(line, '=')
if idx <= 0 {
continue
}
key := strings.TrimSpace(line[:idx])
value := strings.TrimSpace(line[idx+1:])
if key != "" && value != "" {
env[key] = value
}
}
// Set bearer auth mechanism for third-party API compatibility
if _, ok := env["GEMINI_API_KEY"]; ok {
if _, hasAuth := env["GEMINI_API_KEY_AUTH_MECHANISM"]; !hasAuth {
env["GEMINI_API_KEY_AUTH_MECHANISM"] = "bearer"
}
}
if len(env) == 0 {
return nil
}
return env
}
func buildGeminiArgs(cfg *config.Config, targetArg string) []string {
if cfg == nil {
return nil
}
args := []string{"-o", "stream-json", "-y"}
if model := strings.TrimSpace(cfg.Model); model != "" {
args = append(args, "-m", model)
}
if cfg.Mode == "resume" {
if cfg.SessionID != "" {
args = append(args, "-r", cfg.SessionID)
}
}
// Use positional argument instead of deprecated -p flag.
// For stdin mode ("-"), use -p to read from stdin.
if targetArg == "-" {
args = append(args, "-p", targetArg)
} else {
args = append(args, targetArg)
}
return args
}

View File

@@ -1,29 +0,0 @@
package backend
import (
"strings"
config "codeagent-wrapper/internal/config"
)
type OpencodeBackend struct{}
func (OpencodeBackend) Name() string { return "opencode" }
func (OpencodeBackend) Command() string { return "opencode" }
func (OpencodeBackend) Env(baseURL, apiKey string) map[string]string { return nil }
func (OpencodeBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
args := []string{"run"}
if cfg != nil {
if model := strings.TrimSpace(cfg.Model); model != "" {
args = append(args, "-m", model)
}
if cfg.Mode == "resume" && cfg.SessionID != "" {
args = append(args, "-s", cfg.SessionID)
}
}
args = append(args, "--format", "json")
if targetArg != "-" {
args = append(args, targetArg)
}
return args
}

View File

@@ -1,29 +0,0 @@
package backend
import (
"fmt"
"strings"
)
var registry = map[string]Backend{
"codex": CodexBackend{},
"claude": ClaudeBackend{},
"gemini": GeminiBackend{},
"opencode": OpencodeBackend{},
}
// Registry exposes the available backends. Intended for internal inspection/tests.
func Registry() map[string]Backend {
return registry
}
func Select(name string) (Backend, error) {
key := strings.ToLower(strings.TrimSpace(name))
if key == "" {
key = "codex"
}
if backend, ok := registry[key]; ok {
return backend, nil
}
return nil, fmt.Errorf("unsupported backend %q", name)
}

View File

@@ -1,220 +0,0 @@
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
ilogger "codeagent-wrapper/internal/logger"
"github.com/goccy/go-json"
)
type BackendConfig struct {
BaseURL string `json:"base_url,omitempty"`
APIKey string `json:"api_key,omitempty"`
}
type AgentModelConfig struct {
Backend string `json:"backend"`
Model string `json:"model"`
PromptFile string `json:"prompt_file,omitempty"`
Description string `json:"description,omitempty"`
Yolo bool `json:"yolo,omitempty"`
Reasoning string `json:"reasoning,omitempty"`
BaseURL string `json:"base_url,omitempty"`
APIKey string `json:"api_key,omitempty"`
}
type ModelsConfig struct {
DefaultBackend string `json:"default_backend"`
DefaultModel string `json:"default_model"`
Agents map[string]AgentModelConfig `json:"agents"`
Backends map[string]BackendConfig `json:"backends,omitempty"`
}
var defaultModelsConfig = ModelsConfig{
DefaultBackend: "opencode",
DefaultModel: "opencode/grok-code",
Agents: map[string]AgentModelConfig{
"oracle": {Backend: "claude", Model: "claude-opus-4-5-20251101", PromptFile: "~/.claude/skills/omo/references/oracle.md", Description: "Technical advisor"},
"librarian": {Backend: "claude", Model: "claude-sonnet-4-5-20250929", PromptFile: "~/.claude/skills/omo/references/librarian.md", Description: "Researcher"},
"explore": {Backend: "opencode", Model: "opencode/grok-code", PromptFile: "~/.claude/skills/omo/references/explore.md", Description: "Code search"},
"develop": {Backend: "codex", Model: "", PromptFile: "~/.claude/skills/omo/references/develop.md", Description: "Code development"},
"frontend-ui-ux-engineer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md", Description: "Frontend engineer"},
"document-writer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/document-writer.md", Description: "Documentation"},
},
}
var (
modelsConfigOnce sync.Once
modelsConfigCached *ModelsConfig
)
func modelsConfig() *ModelsConfig {
modelsConfigOnce.Do(func() {
modelsConfigCached = loadModelsConfig()
})
if modelsConfigCached == nil {
return &defaultModelsConfig
}
return modelsConfigCached
}
func loadModelsConfig() *ModelsConfig {
home, err := os.UserHomeDir()
if err != nil {
ilogger.LogWarn(fmt.Sprintf("Failed to resolve home directory for models config: %v; using defaults", err))
return &defaultModelsConfig
}
configDir := filepath.Clean(filepath.Join(home, ".codeagent"))
configPath := filepath.Clean(filepath.Join(configDir, "models.json"))
rel, err := filepath.Rel(configDir, configPath)
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return &defaultModelsConfig
}
data, err := os.ReadFile(configPath) // #nosec G304 -- path is fixed under user home and validated to stay within configDir
if err != nil {
if !os.IsNotExist(err) {
ilogger.LogWarn(fmt.Sprintf("Failed to read models config %s: %v; using defaults", configPath, err))
}
return &defaultModelsConfig
}
var cfg ModelsConfig
if err := json.Unmarshal(data, &cfg); err != nil {
ilogger.LogWarn(fmt.Sprintf("Failed to parse models config %s: %v; using defaults", configPath, err))
return &defaultModelsConfig
}
cfg.DefaultBackend = strings.TrimSpace(cfg.DefaultBackend)
if cfg.DefaultBackend == "" {
cfg.DefaultBackend = defaultModelsConfig.DefaultBackend
}
cfg.DefaultModel = strings.TrimSpace(cfg.DefaultModel)
if cfg.DefaultModel == "" {
cfg.DefaultModel = defaultModelsConfig.DefaultModel
}
// Merge with defaults
for name, agent := range defaultModelsConfig.Agents {
if _, exists := cfg.Agents[name]; !exists {
if cfg.Agents == nil {
cfg.Agents = make(map[string]AgentModelConfig)
}
cfg.Agents[name] = agent
}
}
// Normalize backend keys so lookups can be case-insensitive.
if len(cfg.Backends) > 0 {
normalized := make(map[string]BackendConfig, len(cfg.Backends))
for k, v := range cfg.Backends {
key := strings.ToLower(strings.TrimSpace(k))
if key == "" {
continue
}
normalized[key] = v
}
if len(normalized) > 0 {
cfg.Backends = normalized
} else {
cfg.Backends = nil
}
}
return &cfg
}
func LoadDynamicAgent(name string) (AgentModelConfig, bool) {
if err := ValidateAgentName(name); err != nil {
return AgentModelConfig{}, false
}
home, err := os.UserHomeDir()
if err != nil || strings.TrimSpace(home) == "" {
return AgentModelConfig{}, false
}
absPath := filepath.Join(home, ".codeagent", "agents", name+".md")
info, err := os.Stat(absPath)
if err != nil || info.IsDir() {
return AgentModelConfig{}, false
}
return AgentModelConfig{PromptFile: "~/.codeagent/agents/" + name + ".md"}, true
}
func ResolveBackendConfig(backendName string) (baseURL, apiKey string) {
cfg := modelsConfig()
resolved := resolveBackendConfig(cfg, backendName)
return strings.TrimSpace(resolved.BaseURL), strings.TrimSpace(resolved.APIKey)
}
func resolveBackendConfig(cfg *ModelsConfig, backendName string) BackendConfig {
if cfg == nil || len(cfg.Backends) == 0 {
return BackendConfig{}
}
key := strings.ToLower(strings.TrimSpace(backendName))
if key == "" {
key = strings.ToLower(strings.TrimSpace(cfg.DefaultBackend))
}
if key == "" {
return BackendConfig{}
}
if backend, ok := cfg.Backends[key]; ok {
return backend
}
return BackendConfig{}
}
func resolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool) {
cfg := modelsConfig()
if agent, ok := cfg.Agents[agentName]; ok {
backend = strings.TrimSpace(agent.Backend)
if backend == "" {
backend = cfg.DefaultBackend
}
backendCfg := resolveBackendConfig(cfg, backend)
baseURL = strings.TrimSpace(agent.BaseURL)
if baseURL == "" {
baseURL = strings.TrimSpace(backendCfg.BaseURL)
}
apiKey = strings.TrimSpace(agent.APIKey)
if apiKey == "" {
apiKey = strings.TrimSpace(backendCfg.APIKey)
}
return backend, strings.TrimSpace(agent.Model), agent.PromptFile, agent.Reasoning, baseURL, apiKey, agent.Yolo
}
if dynamic, ok := LoadDynamicAgent(agentName); ok {
backend = cfg.DefaultBackend
model = cfg.DefaultModel
backendCfg := resolveBackendConfig(cfg, backend)
baseURL = strings.TrimSpace(backendCfg.BaseURL)
apiKey = strings.TrimSpace(backendCfg.APIKey)
return backend, model, dynamic.PromptFile, "", baseURL, apiKey, false
}
backend = cfg.DefaultBackend
model = cfg.DefaultModel
backendCfg := resolveBackendConfig(cfg, backend)
baseURL = strings.TrimSpace(backendCfg.BaseURL)
apiKey = strings.TrimSpace(backendCfg.APIKey)
return backend, model, "", "", baseURL, apiKey, false
}
func ResolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool) {
return resolveAgentConfig(agentName)
}
func ResetModelsConfigCacheForTest() {
modelsConfigCached = nil
modelsConfigOnce = sync.Once{}
}

View File

@@ -1,221 +0,0 @@
package config
import (
"os"
"path/filepath"
"testing"
)
func TestResolveAgentConfig_Defaults(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
t.Cleanup(ResetModelsConfigCacheForTest)
ResetModelsConfigCacheForTest()
// Test that default agents resolve correctly without config file
tests := []struct {
agent string
wantBackend string
wantModel string
wantPromptFile string
}{
{"oracle", "claude", "claude-opus-4-5-20251101", "~/.claude/skills/omo/references/oracle.md"},
{"librarian", "claude", "claude-sonnet-4-5-20250929", "~/.claude/skills/omo/references/librarian.md"},
{"explore", "opencode", "opencode/grok-code", "~/.claude/skills/omo/references/explore.md"},
{"frontend-ui-ux-engineer", "gemini", "", "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md"},
{"document-writer", "gemini", "", "~/.claude/skills/omo/references/document-writer.md"},
}
for _, tt := range tests {
t.Run(tt.agent, func(t *testing.T) {
backend, model, promptFile, _, _, _, _ := resolveAgentConfig(tt.agent)
if backend != tt.wantBackend {
t.Errorf("backend = %q, want %q", backend, tt.wantBackend)
}
if model != tt.wantModel {
t.Errorf("model = %q, want %q", model, tt.wantModel)
}
if promptFile != tt.wantPromptFile {
t.Errorf("promptFile = %q, want %q", promptFile, tt.wantPromptFile)
}
})
}
}
func TestResolveAgentConfig_UnknownAgent(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
t.Cleanup(ResetModelsConfigCacheForTest)
ResetModelsConfigCacheForTest()
backend, model, promptFile, _, _, _, _ := resolveAgentConfig("unknown-agent")
if backend != "opencode" {
t.Errorf("unknown agent backend = %q, want %q", backend, "opencode")
}
if model != "opencode/grok-code" {
t.Errorf("unknown agent model = %q, want %q", model, "opencode/grok-code")
}
if promptFile != "" {
t.Errorf("unknown agent promptFile = %q, want empty", promptFile)
}
}
func TestLoadModelsConfig_NoFile(t *testing.T) {
home := "/nonexistent/path/that/does/not/exist"
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
t.Cleanup(ResetModelsConfigCacheForTest)
ResetModelsConfigCacheForTest()
cfg := loadModelsConfig()
if cfg.DefaultBackend != "opencode" {
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "opencode")
}
if len(cfg.Agents) != 6 {
t.Errorf("len(Agents) = %d, want 6", len(cfg.Agents))
}
}
func TestLoadModelsConfig_WithFile(t *testing.T) {
// Create temp dir and config file
tmpDir := t.TempDir()
configDir := filepath.Join(tmpDir, ".codeagent")
if err := os.MkdirAll(configDir, 0755); err != nil {
t.Fatal(err)
}
configContent := `{
"default_backend": "claude",
"default_model": "claude-opus-4",
"backends": {
"Claude": {
"base_url": "https://backend.example",
"api_key": "backend-key"
},
"codex": {
"base_url": "https://openai.example",
"api_key": "openai-key"
}
},
"agents": {
"custom-agent": {
"backend": "codex",
"model": "gpt-4o",
"description": "Custom agent",
"base_url": "https://agent.example",
"api_key": "agent-key"
}
}
}`
configPath := filepath.Join(configDir, "models.json")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatal(err)
}
t.Setenv("HOME", tmpDir)
t.Setenv("USERPROFILE", tmpDir)
t.Cleanup(ResetModelsConfigCacheForTest)
ResetModelsConfigCacheForTest()
cfg := loadModelsConfig()
if cfg.DefaultBackend != "claude" {
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "claude")
}
if cfg.DefaultModel != "claude-opus-4" {
t.Errorf("DefaultModel = %q, want %q", cfg.DefaultModel, "claude-opus-4")
}
// Check custom agent
if agent, ok := cfg.Agents["custom-agent"]; !ok {
t.Error("custom-agent not found")
} else {
if agent.Backend != "codex" {
t.Errorf("custom-agent.Backend = %q, want %q", agent.Backend, "codex")
}
if agent.Model != "gpt-4o" {
t.Errorf("custom-agent.Model = %q, want %q", agent.Model, "gpt-4o")
}
}
// Check that defaults are merged
if _, ok := cfg.Agents["oracle"]; !ok {
t.Error("default agent oracle should be merged")
}
baseURL, apiKey := ResolveBackendConfig("claude")
if baseURL != "https://backend.example" {
t.Errorf("ResolveBackendConfig(baseURL) = %q, want %q", baseURL, "https://backend.example")
}
if apiKey != "backend-key" {
t.Errorf("ResolveBackendConfig(apiKey) = %q, want %q", apiKey, "backend-key")
}
backend, model, _, _, agentBaseURL, agentAPIKey, _ := ResolveAgentConfig("custom-agent")
if backend != "codex" {
t.Errorf("ResolveAgentConfig(backend) = %q, want %q", backend, "codex")
}
if model != "gpt-4o" {
t.Errorf("ResolveAgentConfig(model) = %q, want %q", model, "gpt-4o")
}
if agentBaseURL != "https://agent.example" {
t.Errorf("ResolveAgentConfig(baseURL) = %q, want %q", agentBaseURL, "https://agent.example")
}
if agentAPIKey != "agent-key" {
t.Errorf("ResolveAgentConfig(apiKey) = %q, want %q", agentAPIKey, "agent-key")
}
}
func TestResolveAgentConfig_DynamicAgent(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
t.Cleanup(ResetModelsConfigCacheForTest)
ResetModelsConfigCacheForTest()
agentDir := filepath.Join(home, ".codeagent", "agents")
if err := os.MkdirAll(agentDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
if err := os.WriteFile(filepath.Join(agentDir, "sarsh.md"), []byte("prompt\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
backend, model, promptFile, _, _, _, _ := resolveAgentConfig("sarsh")
if backend != "opencode" {
t.Errorf("backend = %q, want %q", backend, "opencode")
}
if model != "opencode/grok-code" {
t.Errorf("model = %q, want %q", model, "opencode/grok-code")
}
if promptFile != "~/.codeagent/agents/sarsh.md" {
t.Errorf("promptFile = %q, want %q", promptFile, "~/.codeagent/agents/sarsh.md")
}
}
func TestLoadModelsConfig_InvalidJSON(t *testing.T) {
tmpDir := t.TempDir()
configDir := filepath.Join(tmpDir, ".codeagent")
if err := os.MkdirAll(configDir, 0755); err != nil {
t.Fatal(err)
}
// Write invalid JSON
configPath := filepath.Join(configDir, "models.json")
if err := os.WriteFile(configPath, []byte("invalid json {"), 0644); err != nil {
t.Fatal(err)
}
t.Setenv("HOME", tmpDir)
t.Setenv("USERPROFILE", tmpDir)
t.Cleanup(ResetModelsConfigCacheForTest)
ResetModelsConfigCacheForTest()
cfg := loadModelsConfig()
// Should fall back to defaults
if cfg.DefaultBackend != "opencode" {
t.Errorf("invalid JSON should fallback, got DefaultBackend = %q", cfg.DefaultBackend)
}
}

View File

@@ -1,102 +0,0 @@
package config
import (
"fmt"
"os"
"strconv"
"strings"
)
// Config holds CLI configuration.
type Config struct {
Mode string // "new" or "resume"
Task string
SessionID string
WorkDir string
Model string
ReasoningEffort string
ExplicitStdin bool
Timeout int
Backend string
Agent string
PromptFile string
PromptFileExplicit bool
SkipPermissions bool
Yolo bool
MaxParallelWorkers int
}
// EnvFlagEnabled returns true when the environment variable exists and is not
// explicitly set to a falsey value ("0/false/no/off").
func EnvFlagEnabled(key string) bool {
val, ok := os.LookupEnv(key)
if !ok {
return false
}
val = strings.TrimSpace(strings.ToLower(val))
switch val {
case "", "0", "false", "no", "off":
return false
default:
return true
}
}
func ParseBoolFlag(val string, defaultValue bool) bool {
val = strings.TrimSpace(strings.ToLower(val))
switch val {
case "1", "true", "yes", "on":
return true
case "0", "false", "no", "off":
return false
default:
return defaultValue
}
}
// EnvFlagDefaultTrue returns true unless the env var is explicitly set to
// false/0/no/off.
func EnvFlagDefaultTrue(key string) bool {
val, ok := os.LookupEnv(key)
if !ok {
return true
}
return ParseBoolFlag(val, true)
}
func ValidateAgentName(name string) error {
if strings.TrimSpace(name) == "" {
return fmt.Errorf("agent name is empty")
}
for _, r := range name {
switch {
case r >= 'a' && r <= 'z':
case r >= 'A' && r <= 'Z':
case r >= '0' && r <= '9':
case r == '-', r == '_':
default:
return fmt.Errorf("agent name %q contains invalid character %q", name, r)
}
}
return nil
}
const maxParallelWorkersLimit = 100
// ResolveMaxParallelWorkers reads CODEAGENT_MAX_PARALLEL_WORKERS. It returns 0
// for "unlimited".
func ResolveMaxParallelWorkers() int {
raw := strings.TrimSpace(os.Getenv("CODEAGENT_MAX_PARALLEL_WORKERS"))
if raw == "" {
return 0
}
value, err := strconv.Atoi(raw)
if err != nil || value < 0 {
return 0
}
if value > maxParallelWorkersLimit {
return maxParallelWorkersLimit
}
return value
}

View File

@@ -1,47 +0,0 @@
package config
import (
"errors"
"os"
"path/filepath"
"strings"
"github.com/spf13/viper"
)
// NewViper returns a viper instance configured for CODEAGENT_* environment
// variables and an optional config file.
//
// Search order when configFile is empty:
// - $HOME/.codeagent/config.(yaml|yml|json|toml|...)
func NewViper(configFile string) (*viper.Viper, error) {
v := viper.New()
v.SetEnvPrefix("CODEAGENT")
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
v.AutomaticEnv()
if strings.TrimSpace(configFile) != "" {
v.SetConfigFile(configFile)
if err := v.ReadInConfig(); err != nil {
return nil, err
}
return v, nil
}
home, err := os.UserHomeDir()
if err != nil || strings.TrimSpace(home) == "" {
return v, nil
}
v.SetConfigName("config")
v.AddConfigPath(filepath.Join(home, ".codeagent"))
if err := v.ReadInConfig(); err != nil {
var notFound viper.ConfigFileNotFoundError
if errors.As(err, &notFound) {
return v, nil
}
return nil, err
}
return v, nil
}

View File

@@ -1,193 +0,0 @@
package executor
import (
"os"
"path/filepath"
"strings"
"testing"
backend "codeagent-wrapper/internal/backend"
config "codeagent-wrapper/internal/config"
)
// TestEnvInjectionWithAgent tests the full flow of env injection with agent config
func TestEnvInjectionWithAgent(t *testing.T) {
// Setup temp config
tmpDir := t.TempDir()
configDir := filepath.Join(tmpDir, ".codeagent")
if err := os.MkdirAll(configDir, 0755); err != nil {
t.Fatal(err)
}
// Write test config with agent that has base_url and api_key
configContent := `{
"default_backend": "codex",
"agents": {
"test-agent": {
"backend": "claude",
"model": "test-model",
"base_url": "https://test.api.com",
"api_key": "test-api-key-12345678"
}
}
}`
configPath := filepath.Join(configDir, "models.json")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatal(err)
}
// Override HOME to use temp dir
oldHome := os.Getenv("HOME")
os.Setenv("HOME", tmpDir)
defer os.Setenv("HOME", oldHome)
// Reset config cache
config.ResetModelsConfigCacheForTest()
defer config.ResetModelsConfigCacheForTest()
// Test ResolveAgentConfig
agentBackend, model, _, _, baseURL, apiKey, _ := config.ResolveAgentConfig("test-agent")
t.Logf("ResolveAgentConfig: backend=%q, model=%q, baseURL=%q, apiKey=%q",
agentBackend, model, baseURL, apiKey)
if agentBackend != "claude" {
t.Errorf("expected backend 'claude', got %q", agentBackend)
}
if baseURL != "https://test.api.com" {
t.Errorf("expected baseURL 'https://test.api.com', got %q", baseURL)
}
if apiKey != "test-api-key-12345678" {
t.Errorf("expected apiKey 'test-api-key-12345678', got %q", apiKey)
}
// Test Backend.Env
b := backend.ClaudeBackend{}
env := b.Env(baseURL, apiKey)
t.Logf("Backend.Env: %v", env)
if env == nil {
t.Fatal("expected non-nil env from Backend.Env")
}
if env["ANTHROPIC_BASE_URL"] != baseURL {
t.Errorf("expected ANTHROPIC_BASE_URL=%q, got %q", baseURL, env["ANTHROPIC_BASE_URL"])
}
if env["ANTHROPIC_AUTH_TOKEN"] != apiKey {
t.Errorf("expected ANTHROPIC_AUTH_TOKEN=%q, got %q", apiKey, env["ANTHROPIC_AUTH_TOKEN"])
}
}
// TestEnvInjectionLogic tests the exact logic used in executor
func TestEnvInjectionLogic(t *testing.T) {
// Setup temp config
tmpDir := t.TempDir()
configDir := filepath.Join(tmpDir, ".codeagent")
if err := os.MkdirAll(configDir, 0755); err != nil {
t.Fatal(err)
}
configContent := `{
"default_backend": "codex",
"agents": {
"explore": {
"backend": "claude",
"model": "MiniMax-M2.1",
"base_url": "https://api.minimaxi.com/anthropic",
"api_key": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.test"
}
}
}`
configPath := filepath.Join(configDir, "models.json")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatal(err)
}
oldHome := os.Getenv("HOME")
os.Setenv("HOME", tmpDir)
defer os.Setenv("HOME", oldHome)
config.ResetModelsConfigCacheForTest()
defer config.ResetModelsConfigCacheForTest()
// Simulate the executor logic
cfgBackend := "claude" // This should come from taskSpec.Backend
agentName := "explore"
// Step 1: Get backend config (usually empty for claude without global config)
baseURL, apiKey := config.ResolveBackendConfig(cfgBackend)
t.Logf("Step 1 - ResolveBackendConfig(%q): baseURL=%q, apiKey=%q", cfgBackend, baseURL, apiKey)
// Step 2: If agent specified, get agent config
if agentName != "" {
agentBackend, _, _, _, agentBaseURL, agentAPIKey, _ := config.ResolveAgentConfig(agentName)
t.Logf("Step 2 - ResolveAgentConfig(%q): backend=%q, baseURL=%q, apiKey=%q",
agentName, agentBackend, agentBaseURL, agentAPIKey)
// Step 3: Check if agent backend matches cfg backend
if strings.EqualFold(strings.TrimSpace(agentBackend), strings.TrimSpace(cfgBackend)) {
baseURL, apiKey = agentBaseURL, agentAPIKey
t.Logf("Step 3 - Backend match! Using agent config: baseURL=%q, apiKey=%q", baseURL, apiKey)
} else {
t.Logf("Step 3 - Backend mismatch: agent=%q, cfg=%q", agentBackend, cfgBackend)
}
}
// Step 4: Get env vars from backend
b := backend.ClaudeBackend{}
injected := b.Env(baseURL, apiKey)
t.Logf("Step 4 - Backend.Env: %v", injected)
// Verify
if len(injected) == 0 {
t.Fatal("Expected env vars to be injected, got none")
}
expectedURL := "https://api.minimaxi.com/anthropic"
if injected["ANTHROPIC_BASE_URL"] != expectedURL {
t.Errorf("ANTHROPIC_BASE_URL: expected %q, got %q", expectedURL, injected["ANTHROPIC_BASE_URL"])
}
if _, ok := injected["ANTHROPIC_AUTH_TOKEN"]; !ok {
t.Error("ANTHROPIC_AUTH_TOKEN not set")
}
// Step 5: Test masking
for k, v := range injected {
masked := maskSensitiveValue(k, v)
t.Logf("Step 5 - Env log: %s=%s", k, masked)
}
}
// TestTaskSpecBackendPropagation tests that taskSpec.Backend is properly used
func TestTaskSpecBackendPropagation(t *testing.T) {
// Simulate what happens in RunCodexTaskWithContext
taskSpec := TaskSpec{
ID: "test",
Task: "hello",
Backend: "claude",
Agent: "explore",
}
// This is the logic from executor.go lines 889-916
cfg := &config.Config{
Mode: "new",
Task: taskSpec.Task,
Backend: "codex", // default
}
var backend Backend = nil // nil in single mode
commandName := "codex" // default
if backend != nil {
cfg.Backend = backend.Name()
} else if taskSpec.Backend != "" {
cfg.Backend = taskSpec.Backend
} else if commandName != "" {
cfg.Backend = commandName
}
t.Logf("taskSpec.Backend=%q, cfg.Backend=%q", taskSpec.Backend, cfg.Backend)
if cfg.Backend != "claude" {
t.Errorf("expected cfg.Backend='claude', got %q", cfg.Backend)
}
}

View File

@@ -1,333 +0,0 @@
package executor
import (
"strings"
"testing"
backend "codeagent-wrapper/internal/backend"
)
func TestMaskSensitiveValue(t *testing.T) {
tests := []struct {
name string
key string
value string
expected string
}{
{
name: "API_KEY with long value",
key: "ANTHROPIC_AUTH_TOKEN",
value: "sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxx",
expected: "sk-a****xxxx",
},
{
name: "api_key lowercase",
key: "api_key",
value: "abcdefghijklmnop",
expected: "abcd****mnop",
},
{
name: "AUTH_TOKEN",
key: "AUTH_TOKEN",
value: "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9",
expected: "eyJh****VCJ9",
},
{
name: "SECRET",
key: "MY_SECRET",
value: "super-secret-value-12345",
expected: "supe****2345",
},
{
name: "short key value (8 chars)",
key: "API_KEY",
value: "12345678",
expected: "****",
},
{
name: "very short key value",
key: "API_KEY",
value: "abc",
expected: "****",
},
{
name: "empty key value",
key: "API_KEY",
value: "",
expected: "",
},
{
name: "non-sensitive BASE_URL",
key: "ANTHROPIC_BASE_URL",
value: "https://api.anthropic.com",
expected: "https://api.anthropic.com",
},
{
name: "non-sensitive MODEL",
key: "MODEL",
value: "claude-3-opus",
expected: "claude-3-opus",
},
{
name: "case insensitive - Key",
key: "My_Key",
value: "1234567890abcdef",
expected: "1234****cdef",
},
{
name: "case insensitive - TOKEN",
key: "ACCESS_TOKEN",
value: "access123456789",
expected: "acce****6789",
},
{
name: "partial match - apikey",
key: "MYAPIKEY",
value: "1234567890",
expected: "1234****7890",
},
{
name: "partial match - secretvalue",
key: "SECRETVALUE",
value: "abcdefghij",
expected: "abcd****ghij",
},
{
name: "9 char value (just above threshold)",
key: "API_KEY",
value: "123456789",
expected: "1234****6789",
},
{
name: "exactly 8 char value (at threshold)",
key: "API_KEY",
value: "12345678",
expected: "****",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := maskSensitiveValue(tt.key, tt.value)
if result != tt.expected {
t.Errorf("maskSensitiveValue(%q, %q) = %q, want %q", tt.key, tt.value, result, tt.expected)
}
})
}
}
func TestMaskSensitiveValue_NoLeakage(t *testing.T) {
// Ensure sensitive values are never fully exposed
sensitiveKeys := []string{"API_KEY", "api_key", "AUTH_TOKEN", "SECRET", "access_token", "MYAPIKEY"}
longValue := "this-is-a-very-long-secret-value-that-should-be-masked"
for _, key := range sensitiveKeys {
t.Run(key, func(t *testing.T) {
masked := maskSensitiveValue(key, longValue)
// Should not contain the full value
if masked == longValue {
t.Errorf("key %q: value was not masked", key)
}
// Should contain mask marker
if !strings.Contains(masked, "****") {
t.Errorf("key %q: masked value %q does not contain ****", key, masked)
}
// First 4 chars should be visible
if !strings.HasPrefix(masked, longValue[:4]) {
t.Errorf("key %q: masked value should start with first 4 chars", key)
}
// Last 4 chars should be visible
if !strings.HasSuffix(masked, longValue[len(longValue)-4:]) {
t.Errorf("key %q: masked value should end with last 4 chars", key)
}
})
}
}
func TestMaskSensitiveValue_NonSensitivePassthrough(t *testing.T) {
// Non-sensitive keys should pass through unchanged
nonSensitiveKeys := []string{
"ANTHROPIC_BASE_URL",
"BASE_URL",
"MODEL",
"BACKEND",
"WORKDIR",
"HOME",
"PATH",
}
value := "any-value-here-12345"
for _, key := range nonSensitiveKeys {
t.Run(key, func(t *testing.T) {
result := maskSensitiveValue(key, value)
if result != value {
t.Errorf("key %q: expected passthrough but got %q", key, result)
}
})
}
}
// TestClaudeBackendEnv tests that ClaudeBackend.Env returns correct env vars
func TestClaudeBackendEnv(t *testing.T) {
tests := []struct {
name string
baseURL string
apiKey string
expectKeys []string
expectNil bool
}{
{
name: "both base_url and api_key",
baseURL: "https://api.custom.com",
apiKey: "sk-test-key-12345",
expectKeys: []string{"ANTHROPIC_BASE_URL", "ANTHROPIC_AUTH_TOKEN"},
},
{
name: "only base_url",
baseURL: "https://api.custom.com",
apiKey: "",
expectKeys: []string{"ANTHROPIC_BASE_URL"},
},
{
name: "only api_key",
baseURL: "",
apiKey: "sk-test-key-12345",
expectKeys: []string{"ANTHROPIC_AUTH_TOKEN"},
},
{
name: "both empty",
baseURL: "",
apiKey: "",
expectNil: true,
},
{
name: "whitespace only",
baseURL: " ",
apiKey: " ",
expectNil: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := backend.ClaudeBackend{}
env := b.Env(tt.baseURL, tt.apiKey)
if tt.expectNil {
if env != nil {
t.Errorf("expected nil env, got %v", env)
}
return
}
if env == nil {
t.Fatal("expected non-nil env")
}
for _, key := range tt.expectKeys {
if _, ok := env[key]; !ok {
t.Errorf("expected key %q in env", key)
}
}
// Verify values are correct
if tt.baseURL != "" && strings.TrimSpace(tt.baseURL) != "" {
if env["ANTHROPIC_BASE_URL"] != strings.TrimSpace(tt.baseURL) {
t.Errorf("ANTHROPIC_BASE_URL = %q, want %q", env["ANTHROPIC_BASE_URL"], strings.TrimSpace(tt.baseURL))
}
}
if tt.apiKey != "" && strings.TrimSpace(tt.apiKey) != "" {
if env["ANTHROPIC_AUTH_TOKEN"] != strings.TrimSpace(tt.apiKey) {
t.Errorf("ANTHROPIC_AUTH_TOKEN = %q, want %q", env["ANTHROPIC_AUTH_TOKEN"], strings.TrimSpace(tt.apiKey))
}
}
})
}
}
// TestEnvLoggingIntegration tests that env vars are properly masked in logs
func TestEnvLoggingIntegration(t *testing.T) {
b := backend.ClaudeBackend{}
baseURL := "https://api.minimaxi.com/anthropic"
apiKey := "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.longjwttoken"
env := b.Env(baseURL, apiKey)
if env == nil {
t.Fatal("expected non-nil env")
}
// Verify that when we log these values, sensitive ones are masked
for k, v := range env {
masked := maskSensitiveValue(k, v)
if k == "ANTHROPIC_BASE_URL" {
// URL should not be masked
if masked != v {
t.Errorf("BASE_URL should not be masked: got %q, want %q", masked, v)
}
}
if k == "ANTHROPIC_AUTH_TOKEN" {
// API key should be masked
if masked == v {
t.Errorf("API_KEY should be masked, but got original value")
}
if !strings.Contains(masked, "****") {
t.Errorf("masked API_KEY should contain ****: got %q", masked)
}
// Should still show first 4 and last 4 chars
if !strings.HasPrefix(masked, v[:4]) {
t.Errorf("masked value should start with first 4 chars of original")
}
if !strings.HasSuffix(masked, v[len(v)-4:]) {
t.Errorf("masked value should end with last 4 chars of original")
}
}
}
}
// TestGeminiBackendEnv tests GeminiBackend.Env for comparison
func TestGeminiBackendEnv(t *testing.T) {
b := backend.GeminiBackend{}
env := b.Env("https://custom.api", "gemini-api-key-12345")
if env == nil {
t.Fatal("expected non-nil env")
}
// Check that GEMINI env vars are set
if _, ok := env["GOOGLE_GEMINI_BASE_URL"]; !ok {
t.Error("expected GOOGLE_GEMINI_BASE_URL in env")
}
if _, ok := env["GEMINI_API_KEY"]; !ok {
t.Error("expected GEMINI_API_KEY in env")
}
// Verify masking works for Gemini keys too
for k, v := range env {
masked := maskSensitiveValue(k, v)
if strings.Contains(strings.ToLower(k), "key") {
if masked == v && len(v) > 0 {
t.Errorf("key %q should be masked", k)
}
}
}
}
// TestCodexBackendEnv tests CodexBackend.Env
func TestCodexBackendEnv(t *testing.T) {
b := backend.CodexBackend{}
env := b.Env("https://custom.api", "codex-api-key-12345")
if env == nil {
t.Fatal("expected non-nil env for codex")
}
// Check for OPENAI env vars
if _, ok := env["OPENAI_BASE_URL"]; !ok {
t.Error("expected OPENAI_BASE_URL in env")
}
if _, ok := env["OPENAI_API_KEY"]; !ok {
t.Error("expected OPENAI_API_KEY in env")
}
}

View File

@@ -1,133 +0,0 @@
package executor
import (
"context"
"errors"
"io"
"os"
"path/filepath"
"strings"
"testing"
config "codeagent-wrapper/internal/config"
)
type fakeCmd struct {
env map[string]string
}
func (f *fakeCmd) Start() error { return nil }
func (f *fakeCmd) Wait() error { return nil }
func (f *fakeCmd) StdoutPipe() (io.ReadCloser, error) {
return io.NopCloser(strings.NewReader("")), nil
}
func (f *fakeCmd) StderrPipe() (io.ReadCloser, error) {
return nil, errors.New("fake stderr pipe error")
}
func (f *fakeCmd) StdinPipe() (io.WriteCloser, error) {
return nil, errors.New("fake stdin pipe error")
}
func (f *fakeCmd) SetStderr(io.Writer) {}
func (f *fakeCmd) SetDir(string) {}
func (f *fakeCmd) SetEnv(env map[string]string) {
if len(env) == 0 {
return
}
if f.env == nil {
f.env = make(map[string]string, len(env))
}
for k, v := range env {
f.env[k] = v
}
}
func (f *fakeCmd) Process() processHandle { return nil }
func TestEnvInjection_LogsToStderrAndMasksKey(t *testing.T) {
// Arrange ~/.codeagent/models.json via HOME override.
tmpDir := t.TempDir()
configDir := filepath.Join(tmpDir, ".codeagent")
if err := os.MkdirAll(configDir, 0o755); err != nil {
t.Fatal(err)
}
const baseURL = "https://api.minimaxi.com/anthropic"
const apiKey = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.test"
models := `{
"agents": {
"explore": {
"backend": "claude",
"model": "MiniMax-M2.1",
"base_url": "` + baseURL + `",
"api_key": "` + apiKey + `"
}
}
}`
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(models), 0o644); err != nil {
t.Fatal(err)
}
oldHome := os.Getenv("HOME")
if err := os.Setenv("HOME", tmpDir); err != nil {
t.Fatal(err)
}
defer func() { _ = os.Setenv("HOME", oldHome) }()
config.ResetModelsConfigCacheForTest()
defer config.ResetModelsConfigCacheForTest()
// Capture stderr (RunCodexTaskWithContext prints env injection lines there).
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
oldStderr := os.Stderr
os.Stderr = w
defer func() { os.Stderr = oldStderr }()
readDone := make(chan string, 1)
go func() {
defer r.Close()
b, _ := io.ReadAll(r)
readDone <- string(b)
}()
var cmd *fakeCmd
restoreRunner := SetNewCommandRunner(func(ctx context.Context, name string, args ...string) CommandRunner {
cmd = &fakeCmd{}
return cmd
})
defer restoreRunner()
// Act: force an early return right after env injection by making StderrPipe fail.
_ = RunCodexTaskWithContext(
context.Background(),
TaskSpec{Task: "hi", WorkDir: ".", Backend: "claude", Agent: "explore"},
nil,
"claude",
nil,
nil,
false,
false,
1,
)
_ = w.Close()
got := <-readDone
// Assert: env was injected into the command and logging is present with masking.
if cmd == nil || cmd.env == nil {
t.Fatalf("expected cmd env to be set, got cmd=%v env=%v", cmd, nil)
}
if cmd.env["ANTHROPIC_BASE_URL"] != baseURL {
t.Fatalf("ANTHROPIC_BASE_URL=%q, want %q", cmd.env["ANTHROPIC_BASE_URL"], baseURL)
}
if cmd.env["ANTHROPIC_AUTH_TOKEN"] != apiKey {
t.Fatalf("ANTHROPIC_AUTH_TOKEN=%q, want %q", cmd.env["ANTHROPIC_AUTH_TOKEN"], apiKey)
}
if !strings.Contains(got, "Env: ANTHROPIC_BASE_URL="+baseURL) {
t.Fatalf("stderr missing base URL env log; stderr=%q", got)
}
if !strings.Contains(got, "Env: ANTHROPIC_AUTH_TOKEN=eyJh****test") {
t.Fatalf("stderr missing masked API key log; stderr=%q", got)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,72 +0,0 @@
package executor
import (
"bytes"
"io"
"strings"
)
// geminiNoisePatterns contains stderr patterns to filter for gemini backend
var geminiNoisePatterns = []string{
"[STARTUP]",
"Session cleanup disabled",
"Warning:",
"(node:",
"(Use `node --trace-warnings",
"Loaded cached credentials",
"Loading extension:",
"YOLO mode is enabled",
}
// codexNoisePatterns contains stderr patterns to filter for codex backend
var codexNoisePatterns = []string{
"ERROR codex_core::codex: needs_follow_up:",
"ERROR codex_core::skills::loader:",
}
// filteringWriter wraps an io.Writer and filters out lines matching patterns
type filteringWriter struct {
w io.Writer
patterns []string
buf bytes.Buffer
}
func newFilteringWriter(w io.Writer, patterns []string) *filteringWriter {
return &filteringWriter{w: w, patterns: patterns}
}
func (f *filteringWriter) Write(p []byte) (n int, err error) {
f.buf.Write(p)
for {
line, err := f.buf.ReadString('\n')
if err != nil {
// incomplete line, put it back
f.buf.WriteString(line)
break
}
if !f.shouldFilter(line) {
_, _ = f.w.Write([]byte(line))
}
}
return len(p), nil
}
func (f *filteringWriter) shouldFilter(line string) bool {
for _, pattern := range f.patterns {
if strings.Contains(line, pattern) {
return true
}
}
return false
}
// Flush writes any remaining buffered content
func (f *filteringWriter) Flush() {
if f.buf.Len() > 0 {
remaining := f.buf.String()
if !f.shouldFilter(remaining) {
_, _ = f.w.Write([]byte(remaining))
}
f.buf.Reset()
}
}

View File

@@ -1,73 +0,0 @@
package executor
import (
"bytes"
"testing"
)
func TestFilteringWriter(t *testing.T) {
tests := []struct {
name string
patterns []string
input string
want string
}{
{
name: "filter STARTUP lines",
patterns: geminiNoisePatterns,
input: "[STARTUP] Recording metric\nHello World\n[STARTUP] Another line\n",
want: "Hello World\n",
},
{
name: "filter Warning lines",
patterns: geminiNoisePatterns,
input: "Warning: something bad\nActual output\n",
want: "Actual output\n",
},
{
name: "filter multiple patterns",
patterns: geminiNoisePatterns,
input: "YOLO mode is enabled\nSession cleanup disabled\nReal content\nLoading extension: foo\n",
want: "Real content\n",
},
{
name: "no filtering needed",
patterns: geminiNoisePatterns,
input: "Line 1\nLine 2\nLine 3\n",
want: "Line 1\nLine 2\nLine 3\n",
},
{
name: "empty input",
patterns: geminiNoisePatterns,
input: "",
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var buf bytes.Buffer
fw := newFilteringWriter(&buf, tt.patterns)
_, _ = fw.Write([]byte(tt.input))
fw.Flush()
if got := buf.String(); got != tt.want {
t.Errorf("got %q, want %q", got, tt.want)
}
})
}
}
func TestFilteringWriterPartialLines(t *testing.T) {
var buf bytes.Buffer
fw := newFilteringWriter(&buf, geminiNoisePatterns)
// Write partial line
_, _ = fw.Write([]byte("Hello "))
_, _ = fw.Write([]byte("World\n"))
fw.Flush()
if got := buf.String(); got != "Hello World\n" {
t.Errorf("got %q, want %q", got, "Hello World\n")
}
}

View File

@@ -1,124 +0,0 @@
package executor
import "bytes"
type logWriter struct {
prefix string
maxLen int
buf bytes.Buffer
dropped bool
}
func newLogWriter(prefix string, maxLen int) *logWriter {
if maxLen <= 0 {
maxLen = codexLogLineLimit
}
return &logWriter{prefix: prefix, maxLen: maxLen}
}
func (lw *logWriter) Write(p []byte) (int, error) {
if lw == nil {
return len(p), nil
}
total := len(p)
for len(p) > 0 {
if idx := bytes.IndexByte(p, '\n'); idx >= 0 {
lw.writeLimited(p[:idx])
lw.logLine(true)
p = p[idx+1:]
continue
}
lw.writeLimited(p)
break
}
return total, nil
}
func (lw *logWriter) Flush() {
if lw == nil || lw.buf.Len() == 0 {
return
}
lw.logLine(false)
}
func (lw *logWriter) logLine(force bool) {
if lw == nil {
return
}
line := lw.buf.String()
dropped := lw.dropped
lw.dropped = false
lw.buf.Reset()
if line == "" && !force {
return
}
if lw.maxLen > 0 {
if dropped {
if lw.maxLen > 3 {
line = line[:min(len(line), lw.maxLen-3)] + "..."
} else {
line = line[:min(len(line), lw.maxLen)]
}
} else if len(line) > lw.maxLen {
cutoff := lw.maxLen
if cutoff > 3 {
line = line[:cutoff-3] + "..."
} else {
line = line[:cutoff]
}
}
}
logInfo(lw.prefix + line)
}
func (lw *logWriter) writeLimited(p []byte) {
if lw == nil || len(p) == 0 {
return
}
if lw.maxLen <= 0 {
lw.buf.Write(p)
return
}
remaining := lw.maxLen - lw.buf.Len()
if remaining <= 0 {
lw.dropped = true
return
}
if len(p) <= remaining {
lw.buf.Write(p)
return
}
lw.buf.Write(p[:remaining])
lw.dropped = true
}
type tailBuffer struct {
limit int
data []byte
}
func (b *tailBuffer) Write(p []byte) (int, error) {
if b.limit <= 0 {
return len(p), nil
}
if len(p) >= b.limit {
b.data = append(b.data[:0], p[len(p)-b.limit:]...)
return len(p), nil
}
total := len(b.data) + len(p)
if total <= b.limit {
b.data = append(b.data, p...)
return len(p), nil
}
overflow := total - b.limit
b.data = append(b.data[overflow:], p...)
return len(p), nil
}
func (b *tailBuffer) String() string {
return string(b.data)
}

View File

@@ -1,36 +0,0 @@
package executor
import (
"os"
"strings"
"testing"
)
func TestLogWriterWriteLimitsBuffer(t *testing.T) {
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger error: %v", err)
}
setLogger(logger)
t.Cleanup(func() { _ = closeLogger() })
lw := newLogWriter("P:", 10)
_, _ = lw.Write([]byte(strings.Repeat("a", 100)))
if lw.buf.Len() != 10 {
t.Fatalf("logWriter buffer len=%d, want %d", lw.buf.Len(), 10)
}
if !lw.dropped {
t.Fatalf("expected logWriter to drop overlong line bytes")
}
lw.Flush()
logger.Flush()
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("ReadFile error: %v", err)
}
if !strings.Contains(string(data), "P:aaaaaaa...") {
t.Fatalf("log output missing truncated entry, got %q", string(data))
}
}

View File

@@ -1,135 +0,0 @@
package executor
import (
"bytes"
"fmt"
"strings"
config "codeagent-wrapper/internal/config"
)
func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
trimmed := bytes.TrimSpace(data)
if len(trimmed) == 0 {
return nil, fmt.Errorf("parallel config is empty")
}
tasks := strings.Split(string(trimmed), "---TASK---")
var cfg ParallelConfig
seen := make(map[string]struct{})
taskIndex := 0
for _, taskBlock := range tasks {
taskBlock = strings.TrimSpace(taskBlock)
if taskBlock == "" {
continue
}
taskIndex++
parts := strings.SplitN(taskBlock, "---CONTENT---", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("task block #%d missing ---CONTENT--- separator", taskIndex)
}
meta := strings.TrimSpace(parts[0])
content := strings.TrimSpace(parts[1])
task := TaskSpec{WorkDir: defaultWorkdir}
agentSpecified := false
for _, line := range strings.Split(meta, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
continue
}
key := strings.TrimSpace(kv[0])
value := strings.TrimSpace(kv[1])
switch key {
case "id":
task.ID = value
case "workdir":
// Validate workdir: "-" is not a valid directory
if value == "-" {
return nil, fmt.Errorf("task block #%d has invalid workdir: '-' is not a valid directory path", taskIndex)
}
task.WorkDir = value
case "session_id":
task.SessionID = value
task.Mode = "resume"
case "backend":
task.Backend = value
case "model":
task.Model = value
case "reasoning_effort":
task.ReasoningEffort = value
case "agent":
agentSpecified = true
task.Agent = value
case "skip_permissions", "skip-permissions":
if value == "" {
task.SkipPermissions = true
continue
}
task.SkipPermissions = config.ParseBoolFlag(value, false)
case "dependencies":
for _, dep := range strings.Split(value, ",") {
dep = strings.TrimSpace(dep)
if dep != "" {
task.Dependencies = append(task.Dependencies, dep)
}
}
}
}
if task.Mode == "" {
task.Mode = "new"
}
if agentSpecified {
if strings.TrimSpace(task.Agent) == "" {
return nil, fmt.Errorf("task block #%d has empty agent field", taskIndex)
}
if err := config.ValidateAgentName(task.Agent); err != nil {
return nil, fmt.Errorf("task block #%d invalid agent name: %w", taskIndex, err)
}
backend, model, promptFile, reasoning, _, _, _ := config.ResolveAgentConfig(task.Agent)
if task.Backend == "" {
task.Backend = backend
}
if task.Model == "" {
task.Model = model
}
if task.ReasoningEffort == "" {
task.ReasoningEffort = reasoning
}
task.PromptFile = promptFile
}
if task.ID == "" {
return nil, fmt.Errorf("task block #%d missing id field", taskIndex)
}
if content == "" {
return nil, fmt.Errorf("task block #%d (%q) missing content", taskIndex, task.ID)
}
if task.Mode == "resume" && strings.TrimSpace(task.SessionID) == "" {
return nil, fmt.Errorf("task block #%d (%q) has empty session_id", taskIndex, task.ID)
}
if _, exists := seen[task.ID]; exists {
return nil, fmt.Errorf("task block #%d has duplicate id: %s", taskIndex, task.ID)
}
task.Task = content
cfg.Tasks = append(cfg.Tasks, task)
seen[task.ID] = struct{}{}
}
if len(cfg.Tasks) == 0 {
return nil, fmt.Errorf("no tasks found")
}
return &cfg, nil
}

View File

@@ -1,130 +0,0 @@
package executor
import (
"fmt"
"os"
"path/filepath"
"strings"
)
func ReadAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error) {
raw := strings.TrimSpace(path)
if raw == "" {
return "", nil
}
expanded := raw
if raw == "~" || strings.HasPrefix(raw, "~/") || strings.HasPrefix(raw, "~\\") {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
if raw == "~" {
expanded = home
} else {
expanded = home + raw[1:]
}
}
absPath, err := filepath.Abs(expanded)
if err != nil {
return "", err
}
absPath = filepath.Clean(absPath)
home, err := os.UserHomeDir()
if err != nil {
if !allowOutsideClaudeDir {
return "", err
}
logWarn(fmt.Sprintf("Failed to resolve home directory for prompt file validation: %v; proceeding without restriction", err))
} else {
allowedDirs := []string{
filepath.Clean(filepath.Join(home, ".claude")),
filepath.Clean(filepath.Join(home, ".codeagent", "agents")),
}
for i := range allowedDirs {
allowedAbs, err := filepath.Abs(allowedDirs[i])
if err == nil {
allowedDirs[i] = filepath.Clean(allowedAbs)
}
}
isWithinDir := func(path, dir string) bool {
rel, err := filepath.Rel(dir, path)
if err != nil {
return false
}
rel = filepath.Clean(rel)
if rel == "." {
return true
}
if rel == ".." {
return false
}
prefix := ".." + string(os.PathSeparator)
return !strings.HasPrefix(rel, prefix)
}
if !allowOutsideClaudeDir {
withinAllowed := false
for _, dir := range allowedDirs {
if isWithinDir(absPath, dir) {
withinAllowed = true
break
}
}
if !withinAllowed {
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
}
resolvedPath, errPath := filepath.EvalSymlinks(absPath)
if errPath == nil {
resolvedPath = filepath.Clean(resolvedPath)
resolvedAllowed := make([]string, 0, len(allowedDirs))
for _, dir := range allowedDirs {
resolvedBase, errBase := filepath.EvalSymlinks(dir)
if errBase != nil {
continue
}
resolvedAllowed = append(resolvedAllowed, filepath.Clean(resolvedBase))
}
if len(resolvedAllowed) > 0 {
withinResolved := false
for _, dir := range resolvedAllowed {
if isWithinDir(resolvedPath, dir) {
withinResolved = true
break
}
}
if !withinResolved {
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s) (resolved): %s", strings.Join(resolvedAllowed, ", "), resolvedPath))
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
}
}
}
} else {
withinAllowed := false
for _, dir := range allowedDirs {
if isWithinDir(absPath, dir) {
withinAllowed = true
break
}
}
if !withinAllowed {
logWarn(fmt.Sprintf("Reading prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
}
}
}
data, err := os.ReadFile(absPath)
if err != nil {
return "", err
}
return strings.TrimRight(string(data), "\r\n"), nil
}
func WrapTaskWithAgentPrompt(prompt string, task string) string {
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
}

View File

@@ -1,186 +0,0 @@
package executor
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
)
func TestWrapTaskWithAgentPrompt(t *testing.T) {
got := WrapTaskWithAgentPrompt("P", "do")
want := "<agent-prompt>\nP\n</agent-prompt>\n\ndo"
if got != want {
t.Fatalf("wrapTaskWithAgentPrompt mismatch:\n got=%q\nwant=%q", got, want)
}
}
func TestReadAgentPromptFile_EmptyPath(t *testing.T) {
for _, allowOutside := range []bool{false, true} {
got, err := ReadAgentPromptFile(" ", allowOutside)
if err != nil {
t.Fatalf("unexpected error (allowOutside=%v): %v", allowOutside, err)
}
if got != "" {
t.Fatalf("expected empty result (allowOutside=%v), got %q", allowOutside, got)
}
}
}
func TestReadAgentPromptFile_ExplicitAbsolutePath(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "prompt.md")
if err := os.WriteFile(path, []byte("LINE1\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got, err := ReadAgentPromptFile(path, true)
if err != nil {
t.Fatalf("readAgentPromptFile error: %v", err)
}
if got != "LINE1" {
t.Fatalf("got %q, want %q", got, "LINE1")
}
}
func TestReadAgentPromptFile_ExplicitTildeExpansion(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
path := filepath.Join(home, "prompt.md")
if err := os.WriteFile(path, []byte("P\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got, err := ReadAgentPromptFile("~/prompt.md", true)
if err != nil {
t.Fatalf("readAgentPromptFile error: %v", err)
}
if got != "P" {
t.Fatalf("got %q, want %q", got, "P")
}
}
func TestReadAgentPromptFile_RestrictedAllowsClaudeDir(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
claudeDir := filepath.Join(home, ".claude")
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
path := filepath.Join(claudeDir, "prompt.md")
if err := os.WriteFile(path, []byte("OK\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got, err := ReadAgentPromptFile("~/.claude/prompt.md", false)
if err != nil {
t.Fatalf("readAgentPromptFile error: %v", err)
}
if got != "OK" {
t.Fatalf("got %q, want %q", got, "OK")
}
}
func TestReadAgentPromptFile_RestrictedAllowsCodeagentAgentsDir(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
agentDir := filepath.Join(home, ".codeagent", "agents")
if err := os.MkdirAll(agentDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
path := filepath.Join(agentDir, "sarsh.md")
if err := os.WriteFile(path, []byte("OK\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got, err := ReadAgentPromptFile("~/.codeagent/agents/sarsh.md", false)
if err != nil {
t.Fatalf("readAgentPromptFile error: %v", err)
}
if got != "OK" {
t.Fatalf("got %q, want %q", got, "OK")
}
}
func TestReadAgentPromptFile_RestrictedRejectsOutsideClaudeDir(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
path := filepath.Join(home, "prompt.md")
if err := os.WriteFile(path, []byte("NO\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if _, err := ReadAgentPromptFile("~/prompt.md", false); err == nil {
t.Fatalf("expected error for prompt file outside ~/.claude, got nil")
}
}
func TestReadAgentPromptFile_RestrictedRejectsTraversal(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
path := filepath.Join(home, "secret.md")
if err := os.WriteFile(path, []byte("SECRET\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if _, err := ReadAgentPromptFile("~/.claude/../secret.md", false); err == nil {
t.Fatalf("expected traversal to be rejected, got nil")
}
}
func TestReadAgentPromptFile_NotFound(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
claudeDir := filepath.Join(home, ".claude")
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
_, err := ReadAgentPromptFile("~/.claude/missing.md", false)
if err == nil || !os.IsNotExist(err) {
t.Fatalf("expected not-exist error, got %v", err)
}
}
func TestReadAgentPromptFile_PermissionDenied(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("chmod-based permission test is not reliable on Windows")
}
home := t.TempDir()
t.Setenv("HOME", home)
t.Setenv("USERPROFILE", home)
claudeDir := filepath.Join(home, ".claude")
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
path := filepath.Join(claudeDir, "private.md")
if err := os.WriteFile(path, []byte("PRIVATE\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if err := os.Chmod(path, 0o000); err != nil {
t.Fatalf("Chmod: %v", err)
}
_, err := ReadAgentPromptFile("~/.claude/private.md", false)
if err == nil {
t.Fatalf("expected permission error, got nil")
}
if !os.IsPermission(err) && !strings.Contains(strings.ToLower(err.Error()), "permission") {
t.Fatalf("expected permission denied, got: %v", err)
}
}

View File

@@ -1,104 +0,0 @@
package executor
import "strings"
// extractCoverageGap extracts what's missing from coverage reports.
func extractCoverageGap(message string) string {
if message == "" {
return ""
}
lower := strings.ToLower(message)
lines := strings.Split(message, "\n")
for _, line := range lines {
lineLower := strings.ToLower(line)
line = strings.TrimSpace(line)
if strings.Contains(lineLower, "uncovered") ||
strings.Contains(lineLower, "not covered") ||
strings.Contains(lineLower, "missing coverage") ||
strings.Contains(lineLower, "lines not covered") {
if len(line) > 100 {
return line[:97] + "..."
}
return line
}
if strings.Contains(lineLower, "branch") && strings.Contains(lineLower, "not taken") {
if len(line) > 100 {
return line[:97] + "..."
}
return line
}
}
if strings.Contains(lower, "function") && strings.Contains(lower, "0%") {
for _, line := range lines {
if strings.Contains(strings.ToLower(line), "0%") && strings.Contains(line, "function") {
line = strings.TrimSpace(line)
if len(line) > 100 {
return line[:97] + "..."
}
return line
}
}
}
return ""
}
// extractErrorDetail extracts meaningful error context from task output.
func extractErrorDetail(message string, maxLen int) string {
if message == "" || maxLen <= 0 {
return ""
}
lines := strings.Split(message, "\n")
var errorLines []string
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
lower := strings.ToLower(line)
if strings.HasPrefix(line, "at ") && strings.Contains(line, "(") {
if len(errorLines) > 0 && strings.HasPrefix(strings.ToLower(errorLines[len(errorLines)-1]), "at ") {
continue
}
}
if strings.Contains(lower, "error") ||
strings.Contains(lower, "fail") ||
strings.Contains(lower, "exception") ||
strings.Contains(lower, "assert") ||
strings.Contains(lower, "expected") ||
strings.Contains(lower, "timeout") ||
strings.Contains(lower, "not found") ||
strings.Contains(lower, "cannot") ||
strings.Contains(lower, "undefined") ||
strings.HasPrefix(line, "FAIL") ||
strings.HasPrefix(line, "●") {
errorLines = append(errorLines, line)
}
}
if len(errorLines) == 0 {
start := len(lines) - 5
if start < 0 {
start = 0
}
for _, line := range lines[start:] {
line = strings.TrimSpace(line)
if line != "" {
errorLines = append(errorLines, line)
}
}
}
result := strings.Join(errorLines, " | ")
return safeTruncate(result, maxLen)
}

View File

@@ -1,16 +0,0 @@
//go:build unix || darwin || linux
// +build unix darwin linux
package executor
import (
"syscall"
)
// sendTermSignal sends SIGTERM for graceful shutdown on Unix.
func sendTermSignal(proc processHandle) error {
if proc == nil {
return nil
}
return proc.Signal(syscall.SIGTERM)
}

View File

@@ -1,87 +0,0 @@
//go:build windows
// +build windows
package executor
import (
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
)
// sendTermSignal on Windows directly kills the process.
// SIGTERM is not supported on Windows.
func sendTermSignal(proc processHandle) error {
if proc == nil {
return nil
}
pid := proc.Pid()
if pid > 0 {
// Kill the whole process tree to avoid leaving inheriting child processes around.
// This also helps prevent exec.Cmd.Wait() from blocking on stderr/stdout pipes held open by children.
taskkill := "taskkill"
if root := os.Getenv("SystemRoot"); root != "" {
taskkill = filepath.Join(root, "System32", "taskkill.exe")
}
cmd := exec.Command(taskkill, "/PID", strconv.Itoa(pid), "/T", "/F")
cmd.Stdout = io.Discard
cmd.Stderr = io.Discard
if err := cmd.Run(); err == nil {
return nil
}
if err := killProcessTree(pid); err == nil {
return nil
}
}
return proc.Kill()
}
func killProcessTree(pid int) error {
if pid <= 0 {
return nil
}
wmic := "wmic"
if root := os.Getenv("SystemRoot"); root != "" {
wmic = filepath.Join(root, "System32", "wbem", "WMIC.exe")
}
queryChildren := "(ParentProcessId=" + strconv.Itoa(pid) + ")"
listCmd := exec.Command(wmic, "process", "where", queryChildren, "get", "ProcessId", "/VALUE")
listCmd.Stderr = io.Discard
out, err := listCmd.Output()
if err == nil {
for _, childPID := range parseWMICPIDs(out) {
_ = killProcessTree(childPID)
}
}
querySelf := "(ProcessId=" + strconv.Itoa(pid) + ")"
termCmd := exec.Command(wmic, "process", "where", querySelf, "call", "terminate")
termCmd.Stdout = io.Discard
termCmd.Stderr = io.Discard
if termErr := termCmd.Run(); termErr != nil && err == nil {
err = termErr
}
return err
}
func parseWMICPIDs(out []byte) []int {
const prefix = "ProcessId="
var pids []int
for _, line := range strings.Split(string(out), "\n") {
line = strings.TrimSpace(line)
if !strings.HasPrefix(line, prefix) {
continue
}
n, err := strconv.Atoi(strings.TrimSpace(strings.TrimPrefix(line, prefix)))
if err != nil || n <= 0 {
continue
}
pids = append(pids, n)
}
return pids
}

View File

@@ -1,15 +0,0 @@
package executor
import "strings"
const stdinSpecialChars = "\n\\\"'`$"
func ShouldUseStdin(taskText string, piped bool) bool {
if piped {
return true
}
if len(taskText) > 800 {
return true
}
return strings.ContainsAny(taskText, stdinSpecialChars)
}

View File

@@ -1,46 +0,0 @@
package executor
import "context"
// ParallelConfig defines the JSON schema for parallel execution.
type ParallelConfig struct {
Tasks []TaskSpec `json:"tasks"`
GlobalBackend string `json:"backend,omitempty"`
}
// TaskSpec describes an individual task entry in the parallel config.
type TaskSpec struct {
ID string `json:"id"`
Task string `json:"task"`
WorkDir string `json:"workdir,omitempty"`
Dependencies []string `json:"dependencies,omitempty"`
SessionID string `json:"session_id,omitempty"`
Backend string `json:"backend,omitempty"`
Model string `json:"model,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
Agent string `json:"agent,omitempty"`
PromptFile string `json:"prompt_file,omitempty"`
SkipPermissions bool `json:"skip_permissions,omitempty"`
Mode string `json:"-"`
UseStdin bool `json:"-"`
Context context.Context `json:"-"`
}
// TaskResult captures the execution outcome of a task.
type TaskResult struct {
TaskID string `json:"task_id"`
ExitCode int `json:"exit_code"`
Message string `json:"message"`
SessionID string `json:"session_id"`
Error string `json:"error"`
LogPath string `json:"log_path"`
// Structured report fields
Coverage string `json:"coverage,omitempty"` // extracted coverage percentage (e.g., "92%")
CoverageNum float64 `json:"coverage_num,omitempty"` // numeric coverage for comparison
CoverageTarget float64 `json:"coverage_target,omitempty"` // target coverage (default 90)
FilesChanged []string `json:"files_changed,omitempty"` // list of changed files
KeyOutput string `json:"key_output,omitempty"` // brief summary of what was done
TestsPassed int `json:"tests_passed,omitempty"` // number of tests passed
TestsFailed int `json:"tests_failed,omitempty"` // number of tests failed
sharedLog bool
}

View File

@@ -1,57 +0,0 @@
package executor
import (
"context"
"os/exec"
backend "codeagent-wrapper/internal/backend"
)
type CommandRunner = commandRunner
type ProcessHandle = processHandle
func SetForceKillDelay(seconds int32) (restore func()) {
prev := forceKillDelay.Load()
forceKillDelay.Store(seconds)
return func() { forceKillDelay.Store(prev) }
}
func SetSelectBackendFn(fn func(string) (Backend, error)) (restore func()) {
prev := selectBackendFn
if fn != nil {
selectBackendFn = fn
} else {
selectBackendFn = backend.Select
}
return func() { selectBackendFn = prev }
}
func SetCommandContextFn(fn func(context.Context, string, ...string) *exec.Cmd) (restore func()) {
prev := commandContext
if fn != nil {
commandContext = fn
} else {
commandContext = exec.CommandContext
}
return func() { commandContext = prev }
}
func SetNewCommandRunner(fn func(context.Context, string, ...string) CommandRunner) (restore func()) {
prev := newCommandRunner
if fn != nil {
newCommandRunner = fn
} else {
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
return &realCmd{cmd: commandContext(ctx, name, args...)}
}
}
return func() { newCommandRunner = prev }
}
func WithTaskLogger(ctx context.Context, logger *Logger) context.Context {
return withTaskLogger(ctx, logger)
}
func TaskLoggerFromContext(ctx context.Context) *Logger {
return taskLoggerFromContext(ctx)
}

View File

@@ -1,59 +0,0 @@
package logger
import "sync/atomic"
var loggerPtr atomic.Pointer[Logger]
func setLogger(l *Logger) {
loggerPtr.Store(l)
}
func closeLogger() error {
logger := loggerPtr.Swap(nil)
if logger == nil {
return nil
}
return logger.Close()
}
func activeLogger() *Logger {
return loggerPtr.Load()
}
func logDebug(msg string) {
if logger := activeLogger(); logger != nil {
logger.Debug(msg)
}
}
func logInfo(msg string) {
if logger := activeLogger(); logger != nil {
logger.Info(msg)
}
}
func logWarn(msg string) {
if logger := activeLogger(); logger != nil {
logger.Warn(msg)
}
}
func logError(msg string) {
if logger := activeLogger(); logger != nil {
logger.Error(msg)
}
}
func SetLogger(l *Logger) { setLogger(l) }
func CloseLogger() error { return closeLogger() }
func ActiveLogger() *Logger { return activeLogger() }
func LogInfo(msg string) { logInfo(msg) }
func LogDebug(msg string) { logDebug(msg) }
func LogWarn(msg string) { logWarn(msg) }
func LogError(msg string) { logError(msg) }

View File

@@ -1,683 +0,0 @@
package logger
import (
"bufio"
"context"
"errors"
"fmt"
"hash/crc32"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rs/zerolog"
)
// Logger writes log messages asynchronously to a temp file.
// It is intentionally minimal: a buffered channel + single worker goroutine
// to avoid contention while keeping ordering guarantees.
type Logger struct {
path string
file *os.File
writer *bufio.Writer
zlogger zerolog.Logger
ch chan logEntry
flushReq chan chan struct{}
done chan struct{}
closed atomic.Bool
closeOnce sync.Once
workerWG sync.WaitGroup
pendingWG sync.WaitGroup
flushMu sync.Mutex
workerErr error
errorEntries []string // Cache of recent ERROR/WARN entries
errorMu sync.Mutex
}
type logEntry struct {
msg string
level zerolog.Level
isError bool // true for ERROR or WARN levels
}
// CleanupStats captures the outcome of a cleanupOldLogs run.
type CleanupStats struct {
Scanned int
Deleted int
Kept int
Errors int
DeletedFiles []string
KeptFiles []string
}
var (
processRunningCheck = isProcessRunning
processStartTimeFn = getProcessStartTime
removeLogFileFn = os.Remove
globLogFiles = filepath.Glob
fileStatFn = os.Lstat // Use Lstat to detect symlinks
evalSymlinksFn = filepath.EvalSymlinks
)
const maxLogSuffixLen = 64
var logSuffixCounter atomic.Uint64
// NewLogger creates the async logger and starts the worker goroutine.
// The log file is created under os.TempDir() using the required naming scheme.
func NewLogger() (*Logger, error) {
return NewLoggerWithSuffix("")
}
// NewLoggerWithSuffix creates a logger with an optional suffix in the filename.
// Useful for tests that need isolated log files within the same process.
func NewLoggerWithSuffix(suffix string) (*Logger, error) {
pid := os.Getpid()
filename := fmt.Sprintf("%s-%d", PrimaryLogPrefix(), pid)
var safeSuffix string
if suffix != "" {
safeSuffix = sanitizeLogSuffix(suffix)
}
if safeSuffix != "" {
filename += "-" + safeSuffix
}
filename += ".log"
path := filepath.Clean(filepath.Join(os.TempDir(), filename))
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return nil, err
}
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600)
if err != nil {
return nil, err
}
l := &Logger{
path: path,
file: f,
writer: bufio.NewWriterSize(f, 4096),
ch: make(chan logEntry, 1000),
flushReq: make(chan chan struct{}, 1),
done: make(chan struct{}),
}
l.zlogger = zerolog.New(l.writer).With().Timestamp().Logger()
l.workerWG.Add(1)
go l.run()
return l, nil
}
func sanitizeLogSuffix(raw string) string {
trimmed := strings.TrimSpace(raw)
if trimmed == "" {
return fallbackLogSuffix()
}
var b strings.Builder
changed := false
for _, r := range trimmed {
if isSafeLogRune(r) {
b.WriteRune(r)
} else {
changed = true
b.WriteByte('-')
}
if b.Len() >= maxLogSuffixLen {
changed = true
break
}
}
sanitized := strings.Trim(b.String(), "-.")
if sanitized != b.String() {
changed = true // Mark if trim removed any characters
}
if sanitized == "" {
return fallbackLogSuffix()
}
if changed || len(sanitized) > maxLogSuffixLen {
hash := crc32.ChecksumIEEE([]byte(trimmed))
hashStr := fmt.Sprintf("%x", hash)
maxPrefix := maxLogSuffixLen - len(hashStr) - 1
if maxPrefix < 1 {
maxPrefix = 1
}
if len(sanitized) > maxPrefix {
sanitized = sanitized[:maxPrefix]
}
sanitized = fmt.Sprintf("%s-%s", sanitized, hashStr)
}
return sanitized
}
func fallbackLogSuffix() string {
next := logSuffixCounter.Add(1)
return fmt.Sprintf("task-%d", next)
}
func isSafeLogRune(r rune) bool {
switch {
case r >= 'a' && r <= 'z':
return true
case r >= 'A' && r <= 'Z':
return true
case r >= '0' && r <= '9':
return true
case r == '-', r == '_', r == '.':
return true
default:
return false
}
}
// Path returns the underlying log file path (useful for tests/inspection).
func (l *Logger) Path() string {
if l == nil {
return ""
}
return l.path
}
func (l *Logger) IsClosed() bool {
if l == nil {
return true
}
return l.closed.Load()
}
// Info logs at INFO level.
func (l *Logger) Info(msg string) { l.logWithLevel(zerolog.InfoLevel, msg) }
// Warn logs at WARN level.
func (l *Logger) Warn(msg string) { l.logWithLevel(zerolog.WarnLevel, msg) }
// Debug logs at DEBUG level.
func (l *Logger) Debug(msg string) { l.logWithLevel(zerolog.DebugLevel, msg) }
// Error logs at ERROR level.
func (l *Logger) Error(msg string) { l.logWithLevel(zerolog.ErrorLevel, msg) }
// Close signals the worker to flush and close the log file.
// The log file is NOT removed, allowing inspection after program exit.
// It is safe to call multiple times.
// Waits up to CODEAGENT_LOGGER_CLOSE_TIMEOUT_MS (default: 5000) for shutdown; set to 0 to wait indefinitely.
// Returns an error if shutdown doesn't complete within the timeout.
func (l *Logger) Close() error {
if l == nil {
return nil
}
var closeErr error
l.closeOnce.Do(func() {
l.closed.Store(true)
close(l.done)
timeout := loggerCloseTimeout()
workerDone := make(chan struct{})
go func() {
l.workerWG.Wait()
close(workerDone)
}()
if timeout > 0 {
select {
case <-workerDone:
// Worker stopped gracefully
case <-time.After(timeout):
closeErr = fmt.Errorf("logger worker timeout during close")
return
}
} else {
<-workerDone
}
if l.workerErr != nil && closeErr == nil {
closeErr = l.workerErr
}
})
return closeErr
}
func loggerCloseTimeout() time.Duration {
const defaultTimeout = 5 * time.Second
raw := strings.TrimSpace(os.Getenv("CODEAGENT_LOGGER_CLOSE_TIMEOUT_MS"))
if raw == "" {
return defaultTimeout
}
ms, err := strconv.Atoi(raw)
if err != nil {
return defaultTimeout
}
if ms <= 0 {
return 0
}
return time.Duration(ms) * time.Millisecond
}
// RemoveLogFile removes the log file. Should only be called after Close().
func (l *Logger) RemoveLogFile() error {
if l == nil {
return nil
}
return os.Remove(l.path)
}
// ExtractRecentErrors returns the most recent ERROR and WARN entries from memory cache.
// Returns up to maxEntries entries in chronological order.
func (l *Logger) ExtractRecentErrors(maxEntries int) []string {
if l == nil || maxEntries <= 0 {
return nil
}
l.errorMu.Lock()
defer l.errorMu.Unlock()
if len(l.errorEntries) == 0 {
return nil
}
// Return last N entries
start := 0
if len(l.errorEntries) > maxEntries {
start = len(l.errorEntries) - maxEntries
}
result := make([]string, len(l.errorEntries)-start)
copy(result, l.errorEntries[start:])
return result
}
// Flush waits for all pending log entries to be written. Primarily for tests.
// Returns after a 5-second timeout to prevent indefinite blocking.
func (l *Logger) Flush() {
if l == nil {
return
}
l.flushMu.Lock()
defer l.flushMu.Unlock()
// Wait for pending entries with timeout
done := make(chan struct{})
go func() {
l.pendingWG.Wait()
close(done)
}()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
select {
case <-done:
// All pending entries processed
case <-ctx.Done():
// Timeout - return without full flush
return
}
// Trigger writer flush
flushDone := make(chan struct{})
select {
case l.flushReq <- flushDone:
// Wait for flush to complete
select {
case <-flushDone:
// Flush completed
case <-time.After(1 * time.Second):
// Flush timeout
}
case <-l.done:
// Logger is closing
case <-time.After(1 * time.Second):
// Timeout sending flush request
}
}
func (l *Logger) logWithLevel(entryLevel zerolog.Level, msg string) {
if l == nil {
return
}
if l.closed.Load() {
return
}
isError := entryLevel == zerolog.WarnLevel || entryLevel == zerolog.ErrorLevel
entry := logEntry{msg: msg, level: entryLevel, isError: isError}
l.flushMu.Lock()
l.pendingWG.Add(1)
l.flushMu.Unlock()
select {
case l.ch <- entry:
// Successfully sent to channel
case <-l.done:
// Logger is closing, drop this entry
l.pendingWG.Done()
return
}
}
func (l *Logger) run() {
defer l.workerWG.Done()
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
writeEntry := func(entry logEntry) {
l.zlogger.WithLevel(entry.level).Msg(entry.msg)
// Cache error/warn entries in memory for fast extraction
if entry.isError {
l.errorMu.Lock()
l.errorEntries = append(l.errorEntries, entry.msg)
if len(l.errorEntries) > 100 { // Keep last 100
l.errorEntries = l.errorEntries[1:]
}
l.errorMu.Unlock()
}
l.pendingWG.Done()
}
finalize := func() {
if err := l.writer.Flush(); err != nil && l.workerErr == nil {
l.workerErr = err
}
if err := l.file.Sync(); err != nil && l.workerErr == nil {
l.workerErr = err
}
if err := l.file.Close(); err != nil && l.workerErr == nil {
l.workerErr = err
}
}
for {
select {
case entry, ok := <-l.ch:
if !ok {
finalize()
return
}
writeEntry(entry)
case <-ticker.C:
_ = l.writer.Flush()
case flushDone := <-l.flushReq:
// Explicit flush request - flush writer and sync to disk
_ = l.writer.Flush()
_ = l.file.Sync()
close(flushDone)
case <-l.done:
for {
select {
case entry, ok := <-l.ch:
if !ok {
finalize()
return
}
writeEntry(entry)
default:
finalize()
return
}
}
}
}
}
// cleanupOldLogs scans os.TempDir() for wrapper log files and removes those
// whose owning process is no longer running (i.e., orphaned logs).
// It includes safety checks for:
// - PID reuse: Compares file modification time with process start time
// - Symlink attacks: Ensures files are within TempDir and not symlinks
func cleanupOldLogs() (CleanupStats, error) {
var stats CleanupStats
tempDir := os.TempDir()
prefixes := LogPrefixes()
seen := make(map[string]struct{})
var matches []string
for _, prefix := range prefixes {
pattern := filepath.Join(tempDir, fmt.Sprintf("%s-*.log", prefix))
found, err := globLogFiles(pattern)
if err != nil {
logWarn(fmt.Sprintf("cleanupOldLogs: failed to list logs: %v", err))
return stats, fmt.Errorf("cleanupOldLogs: %w", err)
}
for _, path := range found {
if _, ok := seen[path]; ok {
continue
}
seen[path] = struct{}{}
matches = append(matches, path)
}
}
var removeErr error
for _, path := range matches {
stats.Scanned++
filename := filepath.Base(path)
// Security check: Verify file is not a symlink and is within tempDir
if shouldSkipFile, reason := isUnsafeFile(path, tempDir); shouldSkipFile {
stats.Kept++
stats.KeptFiles = append(stats.KeptFiles, filename)
if reason != "" {
// Use Debug level to avoid polluting Recent Errors with cleanup noise
logDebug(fmt.Sprintf("cleanupOldLogs: skipping %s: %s", filename, reason))
}
continue
}
pid, ok := parsePIDFromLog(path)
if !ok {
stats.Kept++
stats.KeptFiles = append(stats.KeptFiles, filename)
continue
}
// Check if process is running
if !processRunningCheck(pid) {
// Process not running, safe to delete
if err := removeLogFileFn(path); err != nil {
if errors.Is(err, os.ErrNotExist) {
// File already deleted by another process, don't count as success
stats.Kept++
stats.KeptFiles = append(stats.KeptFiles, filename+" (already deleted)")
continue
}
stats.Errors++
logWarn(fmt.Sprintf("cleanupOldLogs: failed to remove %s: %v", filename, err))
removeErr = errors.Join(removeErr, fmt.Errorf("failed to remove %s: %w", filename, err))
continue
}
stats.Deleted++
stats.DeletedFiles = append(stats.DeletedFiles, filename)
continue
}
// Process is running, check for PID reuse
if isPIDReused(path, pid) {
// PID was reused, the log file is orphaned
if err := removeLogFileFn(path); err != nil {
if errors.Is(err, os.ErrNotExist) {
stats.Kept++
stats.KeptFiles = append(stats.KeptFiles, filename+" (already deleted)")
continue
}
stats.Errors++
logWarn(fmt.Sprintf("cleanupOldLogs: failed to remove %s (PID reused): %v", filename, err))
removeErr = errors.Join(removeErr, fmt.Errorf("failed to remove %s: %w", filename, err))
continue
}
stats.Deleted++
stats.DeletedFiles = append(stats.DeletedFiles, filename)
continue
}
// Process is running and owns this log file
stats.Kept++
stats.KeptFiles = append(stats.KeptFiles, filename)
}
if removeErr != nil {
return stats, fmt.Errorf("cleanupOldLogs: %w", removeErr)
}
return stats, nil
}
// isUnsafeFile checks if a file is unsafe to delete (symlink or outside tempDir).
// Returns (true, reason) if the file should be skipped.
func isUnsafeFile(path string, tempDir string) (bool, string) {
// Check if file is a symlink
info, err := fileStatFn(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return true, "" // File disappeared, skip silently
}
return true, fmt.Sprintf("stat failed: %v", err)
}
// Check if it's a symlink
if info.Mode()&os.ModeSymlink != 0 {
return true, "refusing to delete symlink"
}
// Resolve any path traversal and verify it's within tempDir
resolvedPath, err := evalSymlinksFn(path)
if err != nil {
return true, fmt.Sprintf("path resolution failed: %v", err)
}
// Get absolute path of tempDir
absTempDir, err := filepath.Abs(tempDir)
if err != nil {
return true, fmt.Sprintf("tempDir resolution failed: %v", err)
}
// Ensure resolved path is within tempDir
relPath, err := filepath.Rel(absTempDir, resolvedPath)
if err != nil || strings.HasPrefix(relPath, "..") {
return true, "file is outside tempDir"
}
return false, ""
}
// isPIDReused checks if a PID has been reused by comparing file modification time
// with process start time. Returns true if the log file was created by a different
// process that previously had the same PID.
func isPIDReused(logPath string, pid int) bool {
// Get file modification time (when log was last written)
info, err := fileStatFn(logPath)
if err != nil {
// If we can't stat the file, be conservative and keep it
return false
}
fileModTime := info.ModTime()
// Get process start time
procStartTime := processStartTimeFn(pid)
if procStartTime.IsZero() {
// Can't determine process start time
// Check if file is very old (>7 days), likely from a dead process
return time.Since(fileModTime) > 7*24*time.Hour
}
// If the log file was modified before the process started, PID was reused
// Add a small buffer (1 second) to account for clock skew and file system timing
return fileModTime.Add(1 * time.Second).Before(procStartTime)
}
func parsePIDFromLog(path string) (int, bool) {
name := filepath.Base(path)
prefixes := LogPrefixes()
for _, prefix := range prefixes {
prefixWithDash := fmt.Sprintf("%s-", prefix)
if !strings.HasPrefix(name, prefixWithDash) || !strings.HasSuffix(name, ".log") {
continue
}
core := strings.TrimSuffix(strings.TrimPrefix(name, prefixWithDash), ".log")
if core == "" {
continue
}
pidPart := core
if idx := strings.IndexRune(core, '-'); idx != -1 {
pidPart = core[:idx]
}
if pidPart == "" {
continue
}
pid, err := strconv.Atoi(pidPart)
if err != nil || pid <= 0 {
continue
}
return pid, true
}
return 0, false
}
func logConcurrencyPlanning(limit, total int) {
logger := activeLogger()
if logger == nil {
return
}
logger.Info(fmt.Sprintf("parallel: worker_limit=%s total_tasks=%d", renderWorkerLimit(limit), total))
}
func logConcurrencyState(event, taskID string, active, limit int) {
logger := activeLogger()
if logger == nil {
return
}
logger.Debug(fmt.Sprintf("parallel: %s task=%s active=%d limit=%s", event, taskID, active, renderWorkerLimit(limit)))
}
func renderWorkerLimit(limit int) string {
if limit <= 0 {
return "unbounded"
}
return strconv.Itoa(limit)
}
func CleanupOldLogs() (CleanupStats, error) { return cleanupOldLogs() }
func IsUnsafeFile(path string, tempDir string) (bool, string) { return isUnsafeFile(path, tempDir) }
func IsPIDReused(logPath string, pid int) bool { return isPIDReused(logPath, pid) }
func ParsePIDFromLog(path string) (int, bool) { return parsePIDFromLog(path) }
func LogConcurrencyPlanning(limit, total int) { logConcurrencyPlanning(limit, total) }
func LogConcurrencyState(event, taskID string, active, limit int) {
logConcurrencyState(event, taskID, active, limit)
}
func SanitizeLogSuffix(raw string) string { return sanitizeLogSuffix(raw) }

View File

@@ -1,158 +0,0 @@
package logger
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
)
func TestLoggerNilReceiverNoop(t *testing.T) {
var logger *Logger
logger.Info("info")
logger.Warn("warn")
logger.Debug("debug")
logger.Error("error")
logger.Flush()
if err := logger.Close(); err != nil {
t.Fatalf("Close() on nil logger should return nil, got %v", err)
}
}
func TestLoggerConcurrencyLogHelpers(t *testing.T) {
setTempDirEnv(t, t.TempDir())
logger, err := NewLoggerWithSuffix("concurrency")
if err != nil {
t.Fatalf("NewLoggerWithSuffix error: %v", err)
}
setLogger(logger)
defer func() { _ = closeLogger() }()
logConcurrencyPlanning(0, 2)
logConcurrencyPlanning(3, 2)
logConcurrencyState("start", "task-1", 1, 0)
logConcurrencyState("done", "task-1", 0, 3)
logger.Flush()
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("failed to read log file: %v", err)
}
output := string(data)
checks := []string{
"parallel: worker_limit=unbounded total_tasks=2",
"parallel: worker_limit=3 total_tasks=2",
"parallel: start task=task-1 active=1 limit=unbounded",
"parallel: done task=task-1 active=0 limit=3",
}
for _, c := range checks {
if !strings.Contains(output, c) {
t.Fatalf("log output missing %q, got: %s", c, output)
}
}
}
func TestLoggerConcurrencyLogHelpersNoopWithoutActiveLogger(t *testing.T) {
_ = closeLogger()
logConcurrencyPlanning(1, 1)
logConcurrencyState("start", "task-1", 0, 1)
}
func TestLoggerCleanupOldLogsSkipsUnsafeAndHandlesAlreadyDeleted(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
unsafePath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", PrimaryLogPrefix(), 222))
orphanPath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", PrimaryLogPrefix(), 111))
stubFileStat(t, func(path string) (os.FileInfo, error) {
if path == unsafePath {
return fakeFileInfo{mode: os.ModeSymlink}, nil
}
return os.Lstat(path)
})
stubProcessRunning(t, func(pid int) bool {
if pid == 111 {
_ = os.Remove(orphanPath)
}
return false
})
stats, err := cleanupOldLogs()
if err != nil {
t.Fatalf("cleanupOldLogs() unexpected error: %v", err)
}
if stats.Scanned != 2 {
t.Fatalf("scanned = %d, want %d", stats.Scanned, 2)
}
if stats.Deleted != 0 {
t.Fatalf("deleted = %d, want %d", stats.Deleted, 0)
}
if stats.Kept != 2 {
t.Fatalf("kept = %d, want %d", stats.Kept, 2)
}
if stats.Errors != 0 {
t.Fatalf("errors = %d, want %d", stats.Errors, 0)
}
hasSkip := false
hasAlreadyDeleted := false
for _, name := range stats.KeptFiles {
if strings.Contains(name, "already deleted") {
hasAlreadyDeleted = true
}
if strings.Contains(name, filepath.Base(unsafePath)) {
hasSkip = true
}
}
if !hasSkip {
t.Fatalf("expected kept files to include unsafe log %q, got %+v", filepath.Base(unsafePath), stats.KeptFiles)
}
if !hasAlreadyDeleted {
t.Fatalf("expected kept files to include already deleted marker, got %+v", stats.KeptFiles)
}
}
func TestLoggerIsUnsafeFileErrorPaths(t *testing.T) {
tempDir := t.TempDir()
t.Run("stat ErrNotExist", func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
return nil, os.ErrNotExist
})
unsafe, reason := isUnsafeFile("missing.log", tempDir)
if !unsafe || reason != "" {
t.Fatalf("expected missing file to be skipped silently, got unsafe=%v reason=%q", unsafe, reason)
}
})
t.Run("stat error", func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
return nil, fmt.Errorf("boom")
})
unsafe, reason := isUnsafeFile("broken.log", tempDir)
if !unsafe || !strings.Contains(reason, "stat failed") {
t.Fatalf("expected stat failure to be unsafe, got unsafe=%v reason=%q", unsafe, reason)
}
})
t.Run("EvalSymlinks error", func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
return fakeFileInfo{}, nil
})
stubEvalSymlinks(t, func(string) (string, error) {
return "", fmt.Errorf("resolve failed")
})
unsafe, reason := isUnsafeFile("cannot-resolve.log", tempDir)
if !unsafe || !strings.Contains(reason, "path resolution failed") {
t.Fatalf("expected resolution failure to be unsafe, got unsafe=%v reason=%q", unsafe, reason)
}
})
}

View File

@@ -1,115 +0,0 @@
package logger
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
)
func TestLoggerWithSuffixNamingAndIsolation(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
taskA := "task-1"
taskB := "task-2"
loggerA, err := NewLoggerWithSuffix(taskA)
if err != nil {
t.Fatalf("NewLoggerWithSuffix(%q) error = %v", taskA, err)
}
defer loggerA.Close()
loggerB, err := NewLoggerWithSuffix(taskB)
if err != nil {
t.Fatalf("NewLoggerWithSuffix(%q) error = %v", taskB, err)
}
defer loggerB.Close()
wantA := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", PrimaryLogPrefix(), os.Getpid(), taskA))
if loggerA.Path() != wantA {
t.Fatalf("loggerA path = %q, want %q", loggerA.Path(), wantA)
}
wantB := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", PrimaryLogPrefix(), os.Getpid(), taskB))
if loggerB.Path() != wantB {
t.Fatalf("loggerB path = %q, want %q", loggerB.Path(), wantB)
}
if loggerA.Path() == loggerB.Path() {
t.Fatalf("expected different log files, got %q", loggerA.Path())
}
loggerA.Info("from taskA")
loggerB.Info("from taskB")
loggerA.Flush()
loggerB.Flush()
dataA, err := os.ReadFile(loggerA.Path())
if err != nil {
t.Fatalf("failed to read loggerA file: %v", err)
}
dataB, err := os.ReadFile(loggerB.Path())
if err != nil {
t.Fatalf("failed to read loggerB file: %v", err)
}
if !strings.Contains(string(dataA), "from taskA") {
t.Fatalf("loggerA missing its message, got: %q", string(dataA))
}
if strings.Contains(string(dataA), "from taskB") {
t.Fatalf("loggerA contains loggerB message, got: %q", string(dataA))
}
if !strings.Contains(string(dataB), "from taskB") {
t.Fatalf("loggerB missing its message, got: %q", string(dataB))
}
if strings.Contains(string(dataB), "from taskA") {
t.Fatalf("loggerB contains loggerA message, got: %q", string(dataB))
}
}
func TestLoggerWithSuffixReturnsErrorWhenTempDirNotWritable(t *testing.T) {
base := t.TempDir()
noWrite := filepath.Join(base, "ro")
if err := os.Mkdir(noWrite, 0o500); err != nil {
t.Fatalf("failed to create read-only temp dir: %v", err)
}
t.Cleanup(func() { _ = os.Chmod(noWrite, 0o700) })
setTempDirEnv(t, noWrite)
logger, err := NewLoggerWithSuffix("task-err")
if err == nil {
_ = logger.Close()
t.Fatalf("expected error when temp dir is not writable")
}
}
func TestLoggerWithSuffixSanitizesUnsafeSuffix(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
raw := "../bad id/with?chars"
safe := sanitizeLogSuffix(raw)
if safe == "" {
t.Fatalf("sanitizeLogSuffix returned empty string")
}
if strings.ContainsAny(safe, "/\\") {
t.Fatalf("sanitized suffix should not contain path separators, got %q", safe)
}
logger, err := NewLoggerWithSuffix(raw)
if err != nil {
t.Fatalf("NewLoggerWithSuffix(%q) error = %v", raw, err)
}
t.Cleanup(func() {
_ = logger.Close()
_ = os.Remove(logger.Path())
})
wantBase := fmt.Sprintf("%s-%d-%s.log", PrimaryLogPrefix(), os.Getpid(), safe)
if gotBase := filepath.Base(logger.Path()); gotBase != wantBase {
t.Fatalf("log filename = %q, want %q", gotBase, wantBase)
}
if dir := filepath.Dir(logger.Path()); dir != tempDir {
t.Fatalf("logger path dir = %q, want %q", dir, tempDir)
}
}

View File

@@ -1,903 +0,0 @@
package logger
import (
"bufio"
"errors"
"fmt"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
)
func compareCleanupStats(got, want CleanupStats) bool {
if got.Scanned != want.Scanned || got.Deleted != want.Deleted || got.Kept != want.Kept || got.Errors != want.Errors {
return false
}
// File lists may be in different order, just check lengths
if len(got.DeletedFiles) != want.Deleted || len(got.KeptFiles) != want.Kept {
return false
}
return true
}
func TestLoggerCreatesFileWithPID(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
defer logger.Close()
expectedPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
if logger.Path() != expectedPath {
t.Fatalf("logger path = %s, want %s", logger.Path(), expectedPath)
}
if _, err := os.Stat(expectedPath); err != nil {
t.Fatalf("log file not created: %v", err)
}
}
func TestLoggerWritesLevels(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
defer logger.Close()
logger.Info("info message")
logger.Warn("warn message")
logger.Debug("debug message")
logger.Error("error message")
logger.Flush()
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("failed to read log file: %v", err)
}
content := string(data)
checks := []string{"info message", "warn message", "debug message", "error message"}
for _, c := range checks {
if !strings.Contains(content, c) {
t.Fatalf("log file missing entry %q, content: %s", c, content)
}
}
}
func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
logger.Info("before close")
logger.Flush()
logPath := logger.Path()
if err := logger.Close(); err != nil {
t.Fatalf("Close() returned error: %v", err)
}
// After recent changes, log file is kept for debugging - NOT removed
if _, err := os.Stat(logPath); os.IsNotExist(err) {
t.Fatalf("log file should exist after Close for debugging, but got IsNotExist")
}
// Clean up manually for test
defer os.Remove(logPath)
}
func TestLoggerConcurrentWritesSafe(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
defer logger.Close()
const goroutines = 10
const perGoroutine = 50
var wg sync.WaitGroup
wg.Add(goroutines)
for i := 0; i < goroutines; i++ {
go func(id int) {
defer wg.Done()
for j := 0; j < perGoroutine; j++ {
logger.Debug(fmt.Sprintf("g%d-%d", id, j))
}
}(i)
}
wg.Wait()
logger.Flush()
f, err := os.Open(logger.Path())
if err != nil {
t.Fatalf("failed to open log file: %v", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
count := 0
for scanner.Scan() {
count++
}
if err := scanner.Err(); err != nil {
t.Fatalf("scanner error: %v", err)
}
expected := goroutines * perGoroutine
if count != expected {
t.Fatalf("unexpected log line count: got %d, want %d", count, expected)
}
}
func TestLoggerCleanupOldLogsRemovesOrphans(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
orphan1 := createTempLog(t, tempDir, "codeagent-wrapper-111.log")
orphan2 := createTempLog(t, tempDir, "codeagent-wrapper-222-suffix.log")
running1 := createTempLog(t, tempDir, "codeagent-wrapper-333.log")
running2 := createTempLog(t, tempDir, "codeagent-wrapper-444-extra-info.log")
untouched := createTempLog(t, tempDir, "unrelated.log")
runningPIDs := map[int]bool{333: true, 444: true}
stubProcessRunning(t, func(pid int) bool {
return runningPIDs[pid]
})
// Stub process start time to be in the past so files won't be considered as PID reused
stubProcessStartTime(t, func(pid int) time.Time {
if runningPIDs[pid] {
// Return a time before file creation
return time.Now().Add(-1 * time.Hour)
}
return time.Time{}
})
stats, err := cleanupOldLogs()
if err != nil {
t.Fatalf("cleanupOldLogs() unexpected error: %v", err)
}
want := CleanupStats{Scanned: 4, Deleted: 2, Kept: 2}
if !compareCleanupStats(stats, want) {
t.Fatalf("cleanup stats mismatch: got %+v, want %+v", stats, want)
}
if _, err := os.Stat(orphan1); !os.IsNotExist(err) {
t.Fatalf("expected orphan %s to be removed, err=%v", orphan1, err)
}
if _, err := os.Stat(orphan2); !os.IsNotExist(err) {
t.Fatalf("expected orphan %s to be removed, err=%v", orphan2, err)
}
if _, err := os.Stat(running1); err != nil {
t.Fatalf("expected running log %s to remain, err=%v", running1, err)
}
if _, err := os.Stat(running2); err != nil {
t.Fatalf("expected running log %s to remain, err=%v", running2, err)
}
if _, err := os.Stat(untouched); err != nil {
t.Fatalf("expected unrelated file %s to remain, err=%v", untouched, err)
}
}
func TestLoggerCleanupOldLogsHandlesInvalidNamesAndErrors(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
invalid := []string{
"codeagent-wrapper-.log",
"codeagent-wrapper.log",
"codeagent-wrapper-foo-bar.txt",
"not-a-codex.log",
}
for _, name := range invalid {
createTempLog(t, tempDir, name)
}
target := createTempLog(t, tempDir, "codeagent-wrapper-555-extra.log")
var checked []int
stubProcessRunning(t, func(pid int) bool {
checked = append(checked, pid)
return false
})
stubProcessStartTime(t, func(pid int) time.Time {
return time.Time{} // Return zero time for processes not running
})
removeErr := errors.New("remove failure")
callCount := 0
stubRemoveLogFile(t, func(path string) error {
callCount++
if path == target {
return removeErr
}
return os.Remove(path)
})
stats, err := cleanupOldLogs()
if err == nil {
t.Fatalf("cleanupOldLogs() expected error")
}
if !errors.Is(err, removeErr) {
t.Fatalf("cleanupOldLogs error = %v, want %v", err, removeErr)
}
want := CleanupStats{Scanned: 2, Kept: 1, Errors: 1}
if !compareCleanupStats(stats, want) {
t.Fatalf("cleanup stats mismatch: got %+v, want %+v", stats, want)
}
if len(checked) != 1 || checked[0] != 555 {
t.Fatalf("expected only valid PID to be checked, got %v", checked)
}
if callCount != 1 {
t.Fatalf("expected remove to be called once, got %d", callCount)
}
if _, err := os.Stat(target); err != nil {
t.Fatalf("expected errored file %s to remain for manual cleanup, err=%v", target, err)
}
}
func TestLoggerCleanupOldLogsHandlesGlobFailures(t *testing.T) {
stubProcessRunning(t, func(pid int) bool {
t.Fatalf("process check should not run when glob fails")
return false
})
stubProcessStartTime(t, func(int) time.Time {
return time.Time{}
})
globErr := errors.New("glob failure")
stubGlobLogFiles(t, func(pattern string) ([]string, error) {
return nil, globErr
})
stats, err := cleanupOldLogs()
if err == nil {
t.Fatalf("cleanupOldLogs() expected error")
}
if !errors.Is(err, globErr) {
t.Fatalf("cleanupOldLogs error = %v, want %v", err, globErr)
}
if stats.Scanned != 0 || stats.Deleted != 0 || stats.Kept != 0 || stats.Errors != 0 || len(stats.DeletedFiles) != 0 || len(stats.KeptFiles) != 0 {
t.Fatalf("cleanup stats mismatch: got %+v, want zero", stats)
}
}
func TestLoggerCleanupOldLogsEmptyDirectoryStats(t *testing.T) {
setTempDirEnv(t, t.TempDir())
stubProcessRunning(t, func(int) bool {
t.Fatalf("process check should not run for empty directory")
return false
})
stubProcessStartTime(t, func(int) time.Time {
return time.Time{}
})
stats, err := cleanupOldLogs()
if err != nil {
t.Fatalf("cleanupOldLogs() unexpected error: %v", err)
}
if stats.Scanned != 0 || stats.Deleted != 0 || stats.Kept != 0 || stats.Errors != 0 || len(stats.DeletedFiles) != 0 || len(stats.KeptFiles) != 0 {
t.Fatalf("cleanup stats mismatch: got %+v, want zero", stats)
}
}
func TestLoggerCleanupOldLogsHandlesTempDirPermissionErrors(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
paths := []string{
createTempLog(t, tempDir, "codeagent-wrapper-6100.log"),
createTempLog(t, tempDir, "codeagent-wrapper-6101.log"),
}
stubProcessRunning(t, func(int) bool { return false })
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
var attempts int
stubRemoveLogFile(t, func(path string) error {
attempts++
return &os.PathError{Op: "remove", Path: path, Err: os.ErrPermission}
})
stats, err := cleanupOldLogs()
if err == nil {
t.Fatalf("cleanupOldLogs() expected error")
}
if !errors.Is(err, os.ErrPermission) {
t.Fatalf("cleanupOldLogs error = %v, want permission", err)
}
want := CleanupStats{Scanned: len(paths), Errors: len(paths)}
if !compareCleanupStats(stats, want) {
t.Fatalf("cleanup stats mismatch: got %+v, want %+v", stats, want)
}
if attempts != len(paths) {
t.Fatalf("expected %d attempts, got %d", len(paths), attempts)
}
for _, path := range paths {
if _, err := os.Stat(path); err != nil {
t.Fatalf("expected protected file %s to remain, err=%v", path, err)
}
}
}
func TestLoggerCleanupOldLogsHandlesPermissionDeniedFile(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
protected := createTempLog(t, tempDir, "codeagent-wrapper-6200.log")
deletable := createTempLog(t, tempDir, "codeagent-wrapper-6201.log")
stubProcessRunning(t, func(int) bool { return false })
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
stubRemoveLogFile(t, func(path string) error {
if path == protected {
return &os.PathError{Op: "remove", Path: path, Err: os.ErrPermission}
}
return os.Remove(path)
})
stats, err := cleanupOldLogs()
if err == nil {
t.Fatalf("cleanupOldLogs() expected error")
}
if !errors.Is(err, os.ErrPermission) {
t.Fatalf("cleanupOldLogs error = %v, want permission", err)
}
want := CleanupStats{Scanned: 2, Deleted: 1, Errors: 1}
if !compareCleanupStats(stats, want) {
t.Fatalf("cleanup stats mismatch: got %+v, want %+v", stats, want)
}
if _, err := os.Stat(protected); err != nil {
t.Fatalf("expected protected file to remain, err=%v", err)
}
if _, err := os.Stat(deletable); !os.IsNotExist(err) {
t.Fatalf("expected deletable file to be removed, err=%v", err)
}
}
func TestLoggerCleanupOldLogsPerformanceBound(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
const fileCount = 400
fakePaths := make([]string, fileCount)
for i := 0; i < fileCount; i++ {
name := fmt.Sprintf("codeagent-wrapper-%d.log", 10000+i)
fakePaths[i] = createTempLog(t, tempDir, name)
}
stubGlobLogFiles(t, func(pattern string) ([]string, error) {
return fakePaths, nil
})
stubProcessRunning(t, func(int) bool { return false })
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
var removed int
stubRemoveLogFile(t, func(path string) error {
removed++
return nil
})
start := time.Now()
stats, err := cleanupOldLogs()
elapsed := time.Since(start)
if err != nil {
t.Fatalf("cleanupOldLogs() unexpected error: %v", err)
}
if removed != fileCount {
t.Fatalf("expected %d removals, got %d", fileCount, removed)
}
if elapsed > 100*time.Millisecond {
t.Fatalf("cleanup took too long: %v for %d files", elapsed, fileCount)
}
want := CleanupStats{Scanned: fileCount, Deleted: fileCount}
if !compareCleanupStats(stats, want) {
t.Fatalf("cleanup stats mismatch: got %+v, want %+v", stats, want)
}
}
func TestLoggerCleanupOldLogsKeepsCurrentProcessLog(t *testing.T) {
tempDir := setTempDirEnv(t, t.TempDir())
currentPID := os.Getpid()
currentLog := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", currentPID))
stubProcessRunning(t, func(pid int) bool {
if pid != currentPID {
t.Fatalf("unexpected pid check: %d", pid)
}
return true
})
stubProcessStartTime(t, func(pid int) time.Time {
if pid == currentPID {
return time.Now().Add(-1 * time.Hour)
}
return time.Time{}
})
stats, err := cleanupOldLogs()
if err != nil {
t.Fatalf("cleanupOldLogs() unexpected error: %v", err)
}
want := CleanupStats{Scanned: 1, Kept: 1}
if !compareCleanupStats(stats, want) {
t.Fatalf("cleanup stats mismatch: got %+v, want %+v", stats, want)
}
if _, err := os.Stat(currentLog); err != nil {
t.Fatalf("expected current process log to remain, err=%v", err)
}
}
func TestLoggerIsPIDReusedScenarios(t *testing.T) {
now := time.Now()
tests := []struct {
name string
statErr error
modTime time.Time
startTime time.Time
want bool
}{
{"stat error", errors.New("stat failed"), time.Time{}, time.Time{}, false},
{"old file unknown start", nil, now.Add(-8 * 24 * time.Hour), time.Time{}, true},
{"recent file unknown start", nil, now.Add(-2 * time.Hour), time.Time{}, false},
{"pid reused", nil, now.Add(-2 * time.Hour), now.Add(-30 * time.Minute), true},
{"pid active", nil, now.Add(-30 * time.Minute), now.Add(-2 * time.Hour), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
if tt.statErr != nil {
return nil, tt.statErr
}
return fakeFileInfo{modTime: tt.modTime}, nil
})
stubProcessStartTime(t, func(int) time.Time {
return tt.startTime
})
if got := isPIDReused("log", 1234); got != tt.want {
t.Fatalf("isPIDReused() = %v, want %v", got, tt.want)
}
})
}
}
func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
tempDir := t.TempDir()
absTempDir, err := filepath.Abs(tempDir)
if err != nil {
t.Fatalf("filepath.Abs() error = %v", err)
}
t.Run("symlink", func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
return fakeFileInfo{mode: os.ModeSymlink}, nil
})
stubEvalSymlinks(t, func(path string) (string, error) {
return filepath.Join(absTempDir, filepath.Base(path)), nil
})
unsafe, reason := isUnsafeFile(filepath.Join(absTempDir, "codeagent-wrapper-1.log"), tempDir)
if !unsafe || reason != "refusing to delete symlink" {
t.Fatalf("expected symlink to be rejected, got unsafe=%v reason=%q", unsafe, reason)
}
})
t.Run("path traversal", func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
return fakeFileInfo{}, nil
})
outside := filepath.Join(filepath.Dir(absTempDir), "etc", "passwd")
stubEvalSymlinks(t, func(string) (string, error) {
return outside, nil
})
unsafe, reason := isUnsafeFile(filepath.Join("..", "..", "etc", "passwd"), tempDir)
if !unsafe || reason != "file is outside tempDir" {
t.Fatalf("expected traversal path to be rejected, got unsafe=%v reason=%q", unsafe, reason)
}
})
t.Run("outside temp dir", func(t *testing.T) {
stubFileStat(t, func(string) (os.FileInfo, error) {
return fakeFileInfo{}, nil
})
otherDir := t.TempDir()
stubEvalSymlinks(t, func(string) (string, error) {
return filepath.Join(otherDir, "codeagent-wrapper-9.log"), nil
})
unsafe, reason := isUnsafeFile(filepath.Join(otherDir, "codeagent-wrapper-9.log"), tempDir)
if !unsafe || reason != "file is outside tempDir" {
t.Fatalf("expected outside file to be rejected, got unsafe=%v reason=%q", unsafe, reason)
}
})
}
func TestLoggerPathAndRemove(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLoggerWithSuffix("sample")
if err != nil {
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
}
path := logger.Path()
if path == "" {
_ = logger.Close()
t.Fatalf("logger.Path() returned empty path")
}
if err := logger.Close(); err != nil {
t.Fatalf("Close() error = %v", err)
}
if err := logger.RemoveLogFile(); err != nil {
t.Fatalf("RemoveLogFile() error = %v", err)
}
if _, err := os.Stat(path); !os.IsNotExist(err) {
t.Fatalf("expected log file to be removed, err=%v", err)
}
var nilLogger *Logger
if nilLogger.Path() != "" {
t.Fatalf("nil logger Path() should be empty")
}
if err := nilLogger.RemoveLogFile(); err != nil {
t.Fatalf("nil logger RemoveLogFile() should return nil, got %v", err)
}
}
func TestLoggerParsePIDFromLog(t *testing.T) {
hugePID := strconv.FormatInt(math.MaxInt64, 10) + "0"
tests := []struct {
name string
pid int
ok bool
}{
{"codeagent-wrapper-123.log", 123, true},
{"codeagent-wrapper-999-extra.log", 999, true},
{"codeagent-wrapper-.log", 0, false},
{"invalid-name.log", 0, false},
{"codeagent-wrapper--5.log", 0, false},
{"codeagent-wrapper-0.log", 0, false},
{fmt.Sprintf("codeagent-wrapper-%s.log", hugePID), 0, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, ok := parsePIDFromLog(filepath.Join("/tmp", tt.name))
if ok != tt.ok {
t.Fatalf("parsePIDFromLog ok = %v, want %v", ok, tt.ok)
}
if ok && got != tt.pid {
t.Fatalf("pid = %d, want %d", got, tt.pid)
}
})
}
}
func createTempLog(t *testing.T, dir, name string) string {
t.Helper()
path := filepath.Join(dir, name)
if err := os.WriteFile(path, []byte("test"), 0o644); err != nil {
t.Fatalf("failed to create temp log %s: %v", path, err)
}
return path
}
func setTempDirEnv(t *testing.T, dir string) string {
t.Helper()
resolved := dir
if eval, err := filepath.EvalSymlinks(dir); err == nil {
resolved = eval
}
t.Setenv("TMPDIR", resolved)
t.Setenv("TEMP", resolved)
t.Setenv("TMP", resolved)
return resolved
}
func stubProcessRunning(t *testing.T, fn func(int) bool) {
t.Helper()
t.Cleanup(SetProcessRunningCheck(fn))
}
func stubProcessStartTime(t *testing.T, fn func(int) time.Time) {
t.Helper()
t.Cleanup(SetProcessStartTimeFn(fn))
}
func stubRemoveLogFile(t *testing.T, fn func(string) error) {
t.Helper()
t.Cleanup(SetRemoveLogFileFn(fn))
}
func stubGlobLogFiles(t *testing.T, fn func(string) ([]string, error)) {
t.Helper()
t.Cleanup(SetGlobLogFilesFn(fn))
}
func stubFileStat(t *testing.T, fn func(string) (os.FileInfo, error)) {
t.Helper()
t.Cleanup(SetFileStatFn(fn))
}
func stubEvalSymlinks(t *testing.T, fn func(string) (string, error)) {
t.Helper()
t.Cleanup(SetEvalSymlinksFn(fn))
}
type fakeFileInfo struct {
modTime time.Time
mode os.FileMode
}
func (f fakeFileInfo) Name() string { return "fake" }
func (f fakeFileInfo) Size() int64 { return 0 }
func (f fakeFileInfo) Mode() os.FileMode { return f.mode }
func (f fakeFileInfo) ModTime() time.Time { return f.modTime }
func (f fakeFileInfo) IsDir() bool { return false }
func (f fakeFileInfo) Sys() interface{} { return nil }
func TestLoggerExtractRecentErrors(t *testing.T) {
tests := []struct {
name string
logs []struct{ level, msg string }
maxEntries int
want []string
}{
{
name: "empty log",
logs: nil,
maxEntries: 10,
want: nil,
},
{
name: "no errors",
logs: []struct{ level, msg string }{
{"INFO", "started"},
{"DEBUG", "processing"},
},
maxEntries: 10,
want: nil,
},
{
name: "single error",
logs: []struct{ level, msg string }{
{"INFO", "started"},
{"ERROR", "something failed"},
},
maxEntries: 10,
want: []string{"something failed"},
},
{
name: "error and warn",
logs: []struct{ level, msg string }{
{"INFO", "started"},
{"WARN", "warning message"},
{"ERROR", "error message"},
},
maxEntries: 10,
want: []string{
"warning message",
"error message",
},
},
{
name: "truncate to max",
logs: []struct{ level, msg string }{
{"ERROR", "error 1"},
{"ERROR", "error 2"},
{"ERROR", "error 3"},
{"ERROR", "error 4"},
{"ERROR", "error 5"},
},
maxEntries: 3,
want: []string{
"error 3",
"error 4",
"error 5",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger, err := NewLoggerWithSuffix("extract-test")
if err != nil {
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
}
defer logger.Close()
defer func() { _ = logger.RemoveLogFile() }()
// Write logs using logger methods
for _, entry := range tt.logs {
switch entry.level {
case "INFO":
logger.Info(entry.msg)
case "WARN":
logger.Warn(entry.msg)
case "ERROR":
logger.Error(entry.msg)
case "DEBUG":
logger.Debug(entry.msg)
}
}
logger.Flush()
got := logger.ExtractRecentErrors(tt.maxEntries)
if len(got) != len(tt.want) {
t.Fatalf("ExtractRecentErrors() got %d entries, want %d", len(got), len(tt.want))
}
for i, entry := range got {
if entry != tt.want[i] {
t.Errorf("entry[%d] = %q, want %q", i, entry, tt.want[i])
}
}
})
}
}
func TestLoggerExtractRecentErrorsNilLogger(t *testing.T) {
var logger *Logger
if got := logger.ExtractRecentErrors(10); got != nil {
t.Fatalf("nil logger ExtractRecentErrors() should return nil, got %v", got)
}
}
func TestLoggerExtractRecentErrorsEmptyPath(t *testing.T) {
logger := &Logger{}
if got := logger.ExtractRecentErrors(10); got != nil {
t.Fatalf("empty path ExtractRecentErrors() should return nil, got %v", got)
}
}
func TestLoggerExtractRecentErrorsFileNotExist(t *testing.T) {
logger := &Logger{}
if got := logger.ExtractRecentErrors(10); got != nil {
t.Fatalf("nonexistent file ExtractRecentErrors() should return nil, got %v", got)
}
}
func TestSanitizeLogSuffixNoDuplicates(t *testing.T) {
testCases := []string{
"task",
"task.",
".task",
"-task",
"task-",
"--task--",
"..task..",
}
seen := make(map[string]string)
for _, input := range testCases {
result := sanitizeLogSuffix(input)
if result == "" {
t.Fatalf("sanitizeLogSuffix(%q) returned empty string", input)
}
if prev, exists := seen[result]; exists {
t.Fatalf("collision detected: %q and %q both produce %q", input, prev, result)
}
seen[result] = input
// Verify result is safe for file names
if strings.ContainsAny(result, "/\\:*?\"<>|") {
t.Fatalf("sanitizeLogSuffix(%q) = %q contains unsafe characters", input, result)
}
}
}
func TestExtractRecentErrorsBoundaryCheck(t *testing.T) {
logger, err := NewLoggerWithSuffix("boundary-test")
if err != nil {
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
}
defer logger.Close()
defer func() { _ = logger.RemoveLogFile() }()
// Write some errors
logger.Error("error 1")
logger.Warn("warn 1")
logger.Error("error 2")
logger.Flush()
// Test zero
result := logger.ExtractRecentErrors(0)
if result != nil {
t.Fatalf("ExtractRecentErrors(0) should return nil, got %v", result)
}
// Test negative
result = logger.ExtractRecentErrors(-5)
if result != nil {
t.Fatalf("ExtractRecentErrors(-5) should return nil, got %v", result)
}
// Test positive still works
result = logger.ExtractRecentErrors(10)
if len(result) != 3 {
t.Fatalf("ExtractRecentErrors(10) expected 3 entries, got %d", len(result))
}
}
func TestErrorEntriesMaxLimit(t *testing.T) {
logger, err := NewLoggerWithSuffix("max-limit-test")
if err != nil {
t.Fatalf("NewLoggerWithSuffix() error = %v", err)
}
defer logger.Close()
defer func() { _ = logger.RemoveLogFile() }()
// Write 150 error/warn entries
for i := 1; i <= 150; i++ {
if i%2 == 0 {
logger.Error(fmt.Sprintf("error-%03d", i))
} else {
logger.Warn(fmt.Sprintf("warn-%03d", i))
}
}
logger.Flush()
// Extract all cached errors
result := logger.ExtractRecentErrors(200) // Request more than cache size
// Should only have last 100 entries (entries 51-150 in sequence)
if len(result) != 100 {
t.Fatalf("expected 100 cached entries, got %d", len(result))
}
// Verify entries are the last 100 (entries 51-150)
if !strings.Contains(result[0], "051") {
t.Fatalf("first cached entry should be entry 51, got: %s", result[0])
}
if !strings.Contains(result[99], "150") {
t.Fatalf("last cached entry should be entry 150, got: %s", result[99])
}
// Verify order is preserved - simplified logic
for i := 0; i < len(result)-1; i++ {
expectedNum := 51 + i
nextNum := 51 + i + 1
expectedEntry := fmt.Sprintf("%03d", expectedNum)
nextEntry := fmt.Sprintf("%03d", nextNum)
if !strings.Contains(result[i], expectedEntry) {
t.Fatalf("entry at index %d should contain %s, got: %s", i, expectedEntry, result[i])
}
if !strings.Contains(result[i+1], nextEntry) {
t.Fatalf("entry at index %d should contain %s, got: %s", i+1, nextEntry, result[i+1])
}
}
}

View File

@@ -1,63 +0,0 @@
package logger
import (
"errors"
"math"
"time"
"github.com/shirou/gopsutil/v3/process"
)
func pidToInt32(pid int) (int32, bool) {
if pid <= 0 || pid > math.MaxInt32 {
return 0, false
}
return int32(pid), true
}
// isProcessRunning reports whether a process with the given pid appears to be running.
// It is intentionally conservative on errors to avoid deleting logs for live processes.
func isProcessRunning(pid int) bool {
pid32, ok := pidToInt32(pid)
if !ok {
return false
}
exists, err := process.PidExists(pid32)
if err == nil {
return exists
}
// If we can positively identify that the process doesn't exist, report false.
if errors.Is(err, process.ErrorProcessNotRunning) {
return false
}
// Permission/inspection failures: assume it's running to be safe.
return true
}
// getProcessStartTime returns the start time of a process.
// Returns zero time if the start time cannot be determined.
func getProcessStartTime(pid int) time.Time {
pid32, ok := pidToInt32(pid)
if !ok {
return time.Time{}
}
proc, err := process.NewProcess(pid32)
if err != nil {
return time.Time{}
}
ms, err := proc.CreateTime()
if err != nil || ms <= 0 {
return time.Time{}
}
return time.UnixMilli(ms)
}
func IsProcessRunning(pid int) bool { return isProcessRunning(pid) }
func GetProcessStartTime(pid int) time.Time { return getProcessStartTime(pid) }

View File

@@ -1,112 +0,0 @@
package logger
import (
"math"
"os"
"os/exec"
"runtime"
"strconv"
"testing"
"time"
)
func TestIsProcessRunning(t *testing.T) {
t.Run("boundary values", func(t *testing.T) {
if isProcessRunning(0) {
t.Fatalf("pid 0 should never be treated as running")
}
if isProcessRunning(-1) {
t.Fatalf("negative pid should never be treated as running")
}
})
t.Run("pid out of int32 range", func(t *testing.T) {
if strconv.IntSize <= 32 {
t.Skip("int cannot represent values above int32 range")
}
pid := int(int64(math.MaxInt32) + 1)
if isProcessRunning(pid) {
t.Fatalf("expected pid %d (out of int32 range) to be treated as not running", pid)
}
})
t.Run("current process", func(t *testing.T) {
if !isProcessRunning(os.Getpid()) {
t.Fatalf("expected current process (pid=%d) to be running", os.Getpid())
}
})
t.Run("fake pid", func(t *testing.T) {
const nonexistentPID = 1 << 30
if isProcessRunning(nonexistentPID) {
t.Fatalf("expected pid %d to be reported as not running", nonexistentPID)
}
})
t.Run("terminated process", func(t *testing.T) {
pid := exitedProcessPID(t)
if isProcessRunning(pid) {
t.Fatalf("expected exited child process (pid=%d) to be reported as not running", pid)
}
})
}
func exitedProcessPID(t *testing.T) int {
t.Helper()
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("cmd", "/c", "exit 0")
} else {
cmd = exec.Command("sh", "-c", "exit 0")
}
if err := cmd.Start(); err != nil {
t.Fatalf("failed to start helper process: %v", err)
}
pid := cmd.Process.Pid
if err := cmd.Wait(); err != nil {
t.Fatalf("helper process did not exit cleanly: %v", err)
}
time.Sleep(50 * time.Millisecond)
return pid
}
func TestGetProcessStartTimeReadsProcStat(t *testing.T) {
start := getProcessStartTime(os.Getpid())
if start.IsZero() {
t.Fatalf("expected non-zero start time for current process")
}
if start.After(time.Now().Add(5 * time.Second)) {
t.Fatalf("start time is unexpectedly in the future: %v", start)
}
}
func TestGetProcessStartTimeInvalidData(t *testing.T) {
if !getProcessStartTime(0).IsZero() {
t.Fatalf("expected zero time for pid 0")
}
if !getProcessStartTime(-1).IsZero() {
t.Fatalf("expected zero time for negative pid")
}
if !getProcessStartTime(1 << 30).IsZero() {
t.Fatalf("expected zero time for non-existent pid")
}
if strconv.IntSize > 32 {
pid := int(int64(math.MaxInt32) + 1)
if !getProcessStartTime(pid).IsZero() {
t.Fatalf("expected zero time for pid %d (out of int32 range)", pid)
}
}
}
func TestGetBootTimeParsesBtime(t *testing.T) {
t.Skip("legacy boot-time probing removed; start time now uses gopsutil")
}
func TestGetBootTimeInvalidData(t *testing.T) {
t.Skip("legacy boot-time probing removed; start time now uses gopsutil")
}

View File

@@ -1,67 +0,0 @@
package logger
import (
"os"
"path/filepath"
"time"
)
func SetProcessRunningCheck(fn func(int) bool) (restore func()) {
prev := processRunningCheck
if fn != nil {
processRunningCheck = fn
} else {
processRunningCheck = isProcessRunning
}
return func() { processRunningCheck = prev }
}
func SetProcessStartTimeFn(fn func(int) time.Time) (restore func()) {
prev := processStartTimeFn
if fn != nil {
processStartTimeFn = fn
} else {
processStartTimeFn = getProcessStartTime
}
return func() { processStartTimeFn = prev }
}
func SetRemoveLogFileFn(fn func(string) error) (restore func()) {
prev := removeLogFileFn
if fn != nil {
removeLogFileFn = fn
} else {
removeLogFileFn = os.Remove
}
return func() { removeLogFileFn = prev }
}
func SetGlobLogFilesFn(fn func(string) ([]string, error)) (restore func()) {
prev := globLogFiles
if fn != nil {
globLogFiles = fn
} else {
globLogFiles = filepath.Glob
}
return func() { globLogFiles = prev }
}
func SetFileStatFn(fn func(string) (os.FileInfo, error)) (restore func()) {
prev := fileStatFn
if fn != nil {
fileStatFn = fn
} else {
fileStatFn = os.Lstat
}
return func() { fileStatFn = prev }
}
func SetEvalSymlinksFn(fn func(string) (string, error)) (restore func()) {
prev := evalSymlinksFn
if fn != nil {
evalSymlinksFn = fn
} else {
evalSymlinksFn = filepath.EvalSymlinks
}
return func() { evalSymlinksFn = prev }
}

View File

@@ -1,13 +0,0 @@
package logger
// WrapperName is the fixed name for this tool.
const WrapperName = "codeagent-wrapper"
// CurrentWrapperName returns the wrapper name (always "codeagent-wrapper").
func CurrentWrapperName() string { return WrapperName }
// LogPrefixes returns the log file name prefixes to look for.
func LogPrefixes() []string { return []string{WrapperName} }
// PrimaryLogPrefix returns the preferred filename prefix for log files.
func PrimaryLogPrefix() string { return WrapperName }

View File

@@ -1,74 +0,0 @@
package parser
import "github.com/goccy/go-json"
// JSONEvent represents a Codex JSON output event.
type JSONEvent struct {
Type string `json:"type"`
ThreadID string `json:"thread_id,omitempty"`
Item *EventItem `json:"item,omitempty"`
}
// EventItem represents the item field in a JSON event.
type EventItem struct {
Type string `json:"type"`
Text interface{} `json:"text"`
}
// ClaudeEvent for Claude stream-json format.
type ClaudeEvent struct {
Type string `json:"type"`
Subtype string `json:"subtype,omitempty"`
SessionID string `json:"session_id,omitempty"`
Result string `json:"result,omitempty"`
}
// GeminiEvent for Gemini stream-json format.
type GeminiEvent struct {
Type string `json:"type"`
SessionID string `json:"session_id,omitempty"`
Role string `json:"role,omitempty"`
Content string `json:"content,omitempty"`
Delta bool `json:"delta,omitempty"`
Status string `json:"status,omitempty"`
}
// UnifiedEvent combines all backend event formats into a single structure
// to avoid multiple JSON unmarshal operations per event.
type UnifiedEvent struct {
// Common fields
Type string `json:"type"`
// Codex-specific fields
ThreadID string `json:"thread_id,omitempty"`
Item json.RawMessage `json:"item,omitempty"` // Lazy parse
// Claude-specific fields
Subtype string `json:"subtype,omitempty"`
SessionID string `json:"session_id,omitempty"`
Result string `json:"result,omitempty"`
// Gemini-specific fields
Role string `json:"role,omitempty"`
Content string `json:"content,omitempty"`
Delta *bool `json:"delta,omitempty"`
Status string `json:"status,omitempty"`
// Opencode-specific fields (camelCase sessionID)
OpencodeSessionID string `json:"sessionID,omitempty"`
Part json.RawMessage `json:"part,omitempty"`
}
// OpencodePart represents the part field in opencode events.
type OpencodePart struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
Reason string `json:"reason,omitempty"`
SessionID string `json:"sessionID,omitempty"`
}
// ItemContent represents the parsed item.text field for Codex events.
type ItemContent struct {
Type string `json:"type"`
Text interface{} `json:"text"`
}

View File

@@ -1,436 +0,0 @@
package parser
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strings"
"sync"
"github.com/goccy/go-json"
)
const (
jsonLineReaderSize = 64 * 1024
jsonLineMaxBytes = 10 * 1024 * 1024
jsonLinePreviewBytes = 256
)
type lineScratch struct {
buf []byte
preview []byte
}
const maxPooledLineScratchCap = 1 << 20 // 1 MiB
var lineScratchPool = sync.Pool{
New: func() any {
return &lineScratch{
buf: make([]byte, 0, jsonLineReaderSize),
preview: make([]byte, 0, jsonLinePreviewBytes),
}
},
}
func ParseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
reader := bufio.NewReaderSize(r, jsonLineReaderSize)
scratch := lineScratchPool.Get().(*lineScratch)
if scratch.buf == nil {
scratch.buf = make([]byte, 0, jsonLineReaderSize)
} else {
scratch.buf = scratch.buf[:0]
}
if scratch.preview == nil {
scratch.preview = make([]byte, 0, jsonLinePreviewBytes)
} else {
scratch.preview = scratch.preview[:0]
}
defer func() {
if cap(scratch.buf) > maxPooledLineScratchCap {
scratch.buf = nil
} else if scratch.buf != nil {
scratch.buf = scratch.buf[:0]
}
if cap(scratch.preview) > jsonLinePreviewBytes*4 {
scratch.preview = nil
} else if scratch.preview != nil {
scratch.preview = scratch.preview[:0]
}
lineScratchPool.Put(scratch)
}()
if warnFn == nil {
warnFn = func(string) {}
}
if infoFn == nil {
infoFn = func(string) {}
}
notifyMessage := func() {
if onMessage != nil {
onMessage()
}
}
notifyComplete := func() {
if onComplete != nil {
onComplete()
}
}
totalEvents := 0
var (
codexMessage string
claudeMessage string
geminiBuffer strings.Builder
opencodeMessage strings.Builder
)
for {
line, tooLong, err := readLineWithLimit(reader, jsonLineMaxBytes, jsonLinePreviewBytes, scratch)
if err != nil {
if errors.Is(err, io.EOF) {
break
}
warnFn("Read stdout error: " + err.Error())
break
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
continue
}
totalEvents++
if tooLong {
warnFn(fmt.Sprintf("Skipped overlong JSON line (> %d bytes): %s", jsonLineMaxBytes, TruncateBytes(line, 100)))
continue
}
// Single unmarshal for all backend types
var event UnifiedEvent
if err := json.Unmarshal(line, &event); err != nil {
warnFn(fmt.Sprintf("Failed to parse event: %s", TruncateBytes(line, 100)))
continue
}
// Detect backend type by field presence
isCodex := event.ThreadID != ""
if !isCodex && len(event.Item) > 0 {
var itemHeader struct {
Type string `json:"type"`
}
if json.Unmarshal(event.Item, &itemHeader) == nil && itemHeader.Type != "" {
isCodex = true
}
}
// Codex-specific event types without thread_id or item
if !isCodex && (event.Type == "turn.started" || event.Type == "turn.completed") {
isCodex = true
}
isClaude := event.Subtype != "" || event.Result != ""
if !isClaude && event.Type == "result" && event.SessionID != "" && event.Status == "" {
isClaude = true
}
isGemini := (event.Type == "init" && event.SessionID != "") || event.Role != "" || event.Delta != nil || event.Status != ""
isOpencode := event.OpencodeSessionID != "" && len(event.Part) > 0
// Handle Opencode events first (most specific detection)
if isOpencode {
if threadID == "" {
threadID = event.OpencodeSessionID
}
var part OpencodePart
if err := json.Unmarshal(event.Part, &part); err != nil {
warnFn(fmt.Sprintf("Failed to parse opencode part: %s", err.Error()))
continue
}
// Extract sessionID from part if available
if part.SessionID != "" && threadID == "" {
threadID = part.SessionID
}
infoFn(fmt.Sprintf("Parsed Opencode event #%d type=%s part_type=%s", totalEvents, event.Type, part.Type))
if event.Type == "text" && part.Text != "" {
opencodeMessage.WriteString(part.Text)
notifyMessage()
}
if part.Type == "step-finish" && part.Reason == "stop" {
notifyComplete()
}
continue
}
// Handle Codex events
if isCodex {
var details []string
if event.ThreadID != "" {
details = append(details, fmt.Sprintf("thread_id=%s", event.ThreadID))
}
if len(details) > 0 {
infoFn(fmt.Sprintf("Parsed event #%d type=%s (%s)", totalEvents, event.Type, strings.Join(details, ", ")))
} else {
infoFn(fmt.Sprintf("Parsed event #%d type=%s", totalEvents, event.Type))
}
switch event.Type {
case "thread.started":
threadID = event.ThreadID
infoFn(fmt.Sprintf("thread.started event thread_id=%s", threadID))
case "thread.completed":
if event.ThreadID != "" && threadID == "" {
threadID = event.ThreadID
}
infoFn(fmt.Sprintf("thread.completed event thread_id=%s", event.ThreadID))
notifyComplete()
case "turn.completed":
infoFn("turn.completed event")
notifyComplete()
case "item.completed":
var itemType string
if len(event.Item) > 0 {
var itemHeader struct {
Type string `json:"type"`
}
if err := json.Unmarshal(event.Item, &itemHeader); err == nil {
itemType = itemHeader.Type
}
}
if itemType == "agent_message" && len(event.Item) > 0 {
// Lazy parse: only parse item content when needed
var item ItemContent
if err := json.Unmarshal(event.Item, &item); err == nil {
normalized := NormalizeText(item.Text)
infoFn(fmt.Sprintf("item.completed event item_type=%s message_len=%d", itemType, len(normalized)))
if normalized != "" {
codexMessage = normalized
notifyMessage()
}
} else {
warnFn(fmt.Sprintf("Failed to parse item content: %s", err.Error()))
}
} else {
infoFn(fmt.Sprintf("item.completed event item_type=%s", itemType))
}
}
continue
}
// Handle Claude events
if isClaude {
if event.SessionID != "" && threadID == "" {
threadID = event.SessionID
}
infoFn(fmt.Sprintf("Parsed Claude event #%d type=%s subtype=%s result_len=%d", totalEvents, event.Type, event.Subtype, len(event.Result)))
if event.Result != "" {
claudeMessage = event.Result
notifyMessage()
}
if event.Type == "result" {
notifyComplete()
}
continue
}
// Handle Gemini events
if isGemini {
if event.SessionID != "" && threadID == "" {
threadID = event.SessionID
}
if event.Content != "" {
geminiBuffer.WriteString(event.Content)
}
if event.Status != "" {
notifyMessage()
if event.Type == "result" && (event.Status == "success" || event.Status == "error" || event.Status == "complete" || event.Status == "failed") {
notifyComplete()
}
}
delta := false
if event.Delta != nil {
delta = *event.Delta
}
infoFn(fmt.Sprintf("Parsed Gemini event #%d type=%s role=%s delta=%t status=%s content_len=%d", totalEvents, event.Type, event.Role, delta, event.Status, len(event.Content)))
continue
}
// Unknown event format from other backends (turn.started/assistant/user); ignore.
continue
}
switch {
case opencodeMessage.Len() > 0:
message = opencodeMessage.String()
case geminiBuffer.Len() > 0:
message = geminiBuffer.String()
case claudeMessage != "":
message = claudeMessage
default:
message = codexMessage
}
infoFn(fmt.Sprintf("parseJSONStream completed: events=%d, message_len=%d, thread_id_found=%t", totalEvents, len(message), threadID != ""))
return message, threadID
}
func HasKey(m map[string]json.RawMessage, key string) bool {
_, ok := m[key]
return ok
}
func DiscardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
var buffered bytes.Buffer
if decoder != nil {
if buf := decoder.Buffered(); buf != nil {
_, _ = buffered.ReadFrom(buf)
}
}
line, err := reader.ReadBytes('\n')
buffered.Write(line)
data := buffered.Bytes()
newline := bytes.IndexByte(data, '\n')
if newline == -1 {
return reader, err
}
remaining := data[newline+1:]
if len(remaining) == 0 {
return reader, err
}
return bufio.NewReader(io.MultiReader(bytes.NewReader(remaining), reader)), err
}
func readLineWithLimit(r *bufio.Reader, maxBytes int, previewBytes int, scratch *lineScratch) (line []byte, tooLong bool, err error) {
if r == nil {
return nil, false, errors.New("reader is nil")
}
if maxBytes <= 0 {
return nil, false, errors.New("maxBytes must be > 0")
}
if previewBytes < 0 {
previewBytes = 0
}
part, isPrefix, err := r.ReadLine()
if err != nil {
return nil, false, err
}
if !isPrefix {
if len(part) > maxBytes {
return part[:min(len(part), previewBytes)], true, nil
}
return part, false, nil
}
if scratch == nil {
scratch = &lineScratch{}
}
if scratch.preview == nil {
scratch.preview = make([]byte, 0, min(previewBytes, len(part)))
}
if scratch.buf == nil {
scratch.buf = make([]byte, 0, min(maxBytes, len(part)*2))
}
preview := scratch.preview[:0]
if previewBytes > 0 {
preview = append(preview, part[:min(previewBytes, len(part))]...)
}
buf := scratch.buf[:0]
total := 0
if len(part) > maxBytes {
tooLong = true
} else {
buf = append(buf, part...)
total = len(part)
}
for isPrefix {
part, isPrefix, err = r.ReadLine()
if err != nil {
return nil, tooLong, err
}
if previewBytes > 0 && len(preview) < previewBytes {
preview = append(preview, part[:min(previewBytes-len(preview), len(part))]...)
}
if !tooLong {
if total+len(part) > maxBytes {
tooLong = true
continue
}
buf = append(buf, part...)
total += len(part)
}
}
if tooLong {
scratch.preview = preview
scratch.buf = buf
return preview, true, nil
}
scratch.preview = preview
scratch.buf = buf
return buf, false, nil
}
func TruncateBytes(b []byte, maxLen int) string {
if len(b) <= maxLen {
return string(b)
}
if maxLen < 0 {
return ""
}
return string(b[:maxLen]) + "..."
}
func NormalizeText(text interface{}) string {
switch v := text.(type) {
case string:
return v
case []interface{}:
var sb strings.Builder
for _, item := range v {
if s, ok := item.(string); ok {
sb.WriteString(s)
}
}
return sb.String()
default:
return ""
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@@ -1,50 +0,0 @@
package parser
import (
"strings"
"testing"
)
func TestParseJSONStream_Opencode(t *testing.T) {
input := `{"type":"step_start","timestamp":1768187730683,"sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","part":{"id":"prt_bb0339afa001NTqoJ2NS8x91zP","sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","messageID":"msg_bb033866f0011oZxTqvfy0TKtS","type":"step-start","snapshot":"904f0fd58c125b79e60f0993e38f9d9f6200bf47"}}
{"type":"text","timestamp":1768187744432,"sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","part":{"id":"prt_bb0339cb5001QDd0Lh0PzFZpa3","sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","messageID":"msg_bb033866f0011oZxTqvfy0TKtS","type":"text","text":"Hello from opencode"}}
{"type":"step_finish","timestamp":1768187744471,"sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","part":{"id":"prt_bb033d0af0019VRZzpO2OVW1na","sessionID":"ses_44fced3c7ffe83sZpzY1rlQka3","messageID":"msg_bb033866f0011oZxTqvfy0TKtS","type":"step-finish","reason":"stop","snapshot":"904f0fd58c125b79e60f0993e38f9d9f6200bf47","cost":0}}`
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, nil, nil, nil)
if threadID != "ses_44fced3c7ffe83sZpzY1rlQka3" {
t.Errorf("threadID = %q, want %q", threadID, "ses_44fced3c7ffe83sZpzY1rlQka3")
}
if message != "Hello from opencode" {
t.Errorf("message = %q, want %q", message, "Hello from opencode")
}
}
func TestParseJSONStream_Opencode_MultipleTextEvents(t *testing.T) {
input := `{"type":"text","sessionID":"ses_123","part":{"type":"text","text":"Part 1"}}
{"type":"text","sessionID":"ses_123","part":{"type":"text","text":" Part 2"}}
{"type":"step_finish","sessionID":"ses_123","part":{"type":"step-finish","reason":"stop"}}`
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, nil, nil, nil)
if threadID != "ses_123" {
t.Errorf("threadID = %q, want %q", threadID, "ses_123")
}
if message != "Part 1 Part 2" {
t.Errorf("message = %q, want %q", message, "Part 1 Part 2")
}
}
func TestParseJSONStream_Opencode_NoStopReason(t *testing.T) {
input := `{"type":"text","sessionID":"ses_456","part":{"type":"text","text":"Content"}}
{"type":"step_finish","sessionID":"ses_456","part":{"type":"step-finish","reason":"tool-calls"}}`
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, nil, nil, nil)
if threadID != "ses_456" {
t.Errorf("threadID = %q, want %q", threadID, "ses_456")
}
if message != "Content" {
t.Errorf("message = %q, want %q", message, "Content")
}
}

View File

@@ -1,31 +0,0 @@
package parser
import (
"strings"
"testing"
)
func TestParseJSONStream_SkipsOverlongLineAndContinues(t *testing.T) {
// Exceed the 10MB bufio.Scanner limit in parseJSONStreamInternal.
tooLong := strings.Repeat("a", 11*1024*1024)
input := strings.Join([]string{
`{"type":"item.completed","item":{"type":"other_type","text":"` + tooLong + `"}}`,
`{"type":"thread.started","thread_id":"t-1"}`,
`{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`,
}, "\n")
var warns []string
warnFn := func(msg string) { warns = append(warns, msg) }
gotMessage, gotThreadID := ParseJSONStreamInternal(strings.NewReader(input), warnFn, nil, nil, nil)
if gotMessage != "ok" {
t.Fatalf("message=%q, want %q (warns=%v)", gotMessage, "ok", warns)
}
if gotThreadID != "t-1" {
t.Fatalf("threadID=%q, want %q (warns=%v)", gotThreadID, "t-1", warns)
}
if len(warns) == 0 || !strings.Contains(warns[0], "Skipped overlong JSON line") {
t.Fatalf("expected warning about overlong JSON line, got %v", warns)
}
}

View File

@@ -1,32 +0,0 @@
package parser
import (
"strings"
"testing"
)
func TestBackendParseJSONStream_UnknownEventsAreSilent(t *testing.T) {
input := strings.Join([]string{
`{"type":"turn.started"}`,
`{"type":"assistant","text":"hi"}`,
`{"type":"user","text":"yo"}`,
`{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`,
}, "\n")
var infos []string
infoFn := func(msg string) { infos = append(infos, msg) }
message, threadID := ParseJSONStreamInternal(strings.NewReader(input), nil, infoFn, nil, nil)
if message != "ok" {
t.Fatalf("message=%q, want %q (infos=%v)", message, "ok", infos)
}
if threadID != "" {
t.Fatalf("threadID=%q, want empty (infos=%v)", threadID, infos)
}
for _, msg := range infos {
if strings.Contains(msg, "Agent event:") {
t.Fatalf("unexpected log for unknown event: %q", msg)
}
}
}

View File

@@ -1,15 +0,0 @@
package parser
import "testing"
func TestTruncateBytes(t *testing.T) {
if got := TruncateBytes([]byte("abc"), 3); got != "abc" {
t.Fatalf("TruncateBytes() = %q, want %q", got, "abc")
}
if got := TruncateBytes([]byte("abcd"), 3); got != "abc..." {
t.Fatalf("TruncateBytes() = %q, want %q", got, "abc...")
}
if got := TruncateBytes([]byte("abcd"), -1); got != "" {
t.Fatalf("TruncateBytes() = %q, want empty string", got)
}
}

View File

@@ -1,8 +0,0 @@
package utils
func Min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@@ -1,36 +0,0 @@
package utils
import "testing"
func TestMin(t *testing.T) {
tests := []struct {
name string
a, b int
want int
}{
{"a less than b", 1, 2, 1},
{"b less than a", 5, 3, 3},
{"equal values", 7, 7, 7},
{"negative a", -5, 3, -5},
{"negative b", 5, -3, -3},
{"both negative", -5, -3, -5},
{"zero and positive", 0, 5, 0},
{"zero and negative", 0, -5, -5},
{"large values", 1000000, 999999, 999999},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := Min(tt.a, tt.b)
if got != tt.want {
t.Errorf("Min(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want)
}
})
}
}
func BenchmarkMin(b *testing.B) {
for i := 0; i < b.N; i++ {
Min(i, i+1)
}
}

View File

@@ -1,62 +0,0 @@
package utils
import "strings"
func Truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
if maxLen < 0 {
return ""
}
return s[:maxLen] + "..."
}
// SafeTruncate safely truncates string to maxLen, avoiding panic and UTF-8 corruption.
func SafeTruncate(s string, maxLen int) string {
if maxLen <= 0 || s == "" {
return ""
}
runes := []rune(s)
if len(runes) <= maxLen {
return s
}
if maxLen < 4 {
return string(runes[:1])
}
cutoff := maxLen - 3
if cutoff <= 0 {
return string(runes[:1])
}
if len(runes) <= cutoff {
return s
}
return string(runes[:cutoff]) + "..."
}
// SanitizeOutput removes ANSI escape sequences and control characters.
func SanitizeOutput(s string) string {
var result strings.Builder
inEscape := false
for i := 0; i < len(s); i++ {
if s[i] == '\x1b' && i+1 < len(s) && s[i+1] == '[' {
inEscape = true
i++ // skip '['
continue
}
if inEscape {
if (s[i] >= 'A' && s[i] <= 'Z') || (s[i] >= 'a' && s[i] <= 'z') {
inEscape = false
}
continue
}
// Keep printable chars and common whitespace.
if s[i] >= 32 || s[i] == '\n' || s[i] == '\t' {
result.WriteByte(s[i])
}
}
return result.String()
}

View File

@@ -1,122 +0,0 @@
package utils
import (
"strings"
"testing"
)
func TestTruncate(t *testing.T) {
tests := []struct {
name string
s string
maxLen int
want string
}{
{"empty string", "", 10, ""},
{"short string", "hello", 10, "hello"},
{"exact length", "hello", 5, "hello"},
{"needs truncation", "hello world", 5, "hello..."},
{"zero maxLen", "hello", 0, "..."},
{"negative maxLen", "hello", -1, ""},
{"maxLen 1", "hello", 1, "h..."},
{"unicode bytes truncate", "你好世界", 10, "你好世\xe7..."}, // Truncate works on bytes, not runes
{"mixed truncate", "hello世界abc", 7, "hello\xe4\xb8..."}, // byte-based truncation
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := Truncate(tt.s, tt.maxLen)
if got != tt.want {
t.Errorf("Truncate(%q, %d) = %q, want %q", tt.s, tt.maxLen, got, tt.want)
}
})
}
}
func TestSafeTruncate(t *testing.T) {
tests := []struct {
name string
s string
maxLen int
want string
}{
{"empty string", "", 10, ""},
{"zero maxLen", "hello", 0, ""},
{"negative maxLen", "hello", -1, ""},
{"short string", "hello", 10, "hello"},
{"exact length", "hello", 5, "hello"},
{"needs truncation", "hello world", 8, "hello..."},
{"maxLen 1", "hello", 1, "h"},
{"maxLen 2", "hello", 2, "h"},
{"maxLen 3", "hello", 3, "h"},
{"maxLen 4", "hello", 4, "h..."},
{"unicode preserved", "你好世界", 10, "你好世界"},
{"unicode exact", "你好世界", 4, "你好世界"},
{"unicode truncate", "你好世界test", 6, "你好世..."},
{"mixed unicode", "ab你好cd", 5, "ab..."},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := SafeTruncate(tt.s, tt.maxLen)
if got != tt.want {
t.Errorf("SafeTruncate(%q, %d) = %q, want %q", tt.s, tt.maxLen, got, tt.want)
}
})
}
}
func TestSanitizeOutput(t *testing.T) {
tests := []struct {
name string
s string
want string
}{
{"empty string", "", ""},
{"plain text", "hello world", "hello world"},
{"with newline", "hello\nworld", "hello\nworld"},
{"with tab", "hello\tworld", "hello\tworld"},
{"ANSI color red", "\x1b[31mred\x1b[0m", "red"},
{"ANSI bold", "\x1b[1mbold\x1b[0m", "bold"},
{"ANSI complex", "\x1b[1;31;40mtext\x1b[0m", "text"},
{"control chars", "hello\x00\x01\x02world", "helloworld"},
{"mixed ANSI and control", "\x1b[32m\x00ok\x1b[0m", "ok"},
{"multiple ANSI sequences", "\x1b[31mred\x1b[0m \x1b[32mgreen\x1b[0m", "red green"},
{"incomplete escape", "\x1b[", ""},
{"escape without bracket", "\x1bA", "A"},
{"cursor movement", "\x1b[2Aup\x1b[2Bdown", "updown"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := SanitizeOutput(tt.s)
if got != tt.want {
t.Errorf("SanitizeOutput(%q) = %q, want %q", tt.s, got, tt.want)
}
})
}
}
func BenchmarkTruncate(b *testing.B) {
s := strings.Repeat("hello world ", 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Truncate(s, 50)
}
}
func BenchmarkSafeTruncate(b *testing.B) {
s := strings.Repeat("你好世界", 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
SafeTruncate(s, 50)
}
}
func BenchmarkSanitizeOutput(b *testing.B) {
s := strings.Repeat("\x1b[31mred\x1b[0m text ", 50)
b.ResetTimer()
for i := 0; i < b.N; i++ {
SanitizeOutput(s)
}
}

View File

@@ -1,43 +0,0 @@
#!/bin/bash
# Benchmark script for Claude CLI stability test
# Tests if the stdoutDrainTimeout fix resolves intermittent failures
set -euo pipefail
RUNS=${1:-100}
FAIL_COUNT=0
SUCCESS_COUNT=0
TIMEOUT_COUNT=0
echo "Running $RUNS iterations..."
echo "---"
for i in $(seq 1 $RUNS); do
result=$(timeout 30 codeagent --backend claude --skip-permissions 'say OK' 2>&1) || true
if echo "$result" | grep -q 'without agent_message'; then
((FAIL_COUNT++))
echo "[$i] FAIL: without agent_message"
elif echo "$result" | grep -q 'timeout'; then
((TIMEOUT_COUNT++))
echo "[$i] TIMEOUT"
elif echo "$result" | grep -q 'OK\|ok'; then
((SUCCESS_COUNT++))
printf "\r[$i] OK "
else
((FAIL_COUNT++))
echo "[$i] FAIL: unexpected output"
echo "$result" | head -3
fi
done
echo ""
echo "---"
echo "Results ($RUNS runs):"
echo " Success: $SUCCESS_COUNT ($(echo "scale=1; $SUCCESS_COUNT * 100 / $RUNS" | bc)%)"
echo " Fail: $FAIL_COUNT ($(echo "scale=1; $FAIL_COUNT * 100 / $RUNS" | bc)%)"
echo " Timeout: $TIMEOUT_COUNT ($(echo "scale=1; $TIMEOUT_COUNT * 100 / $RUNS" | bc)%)"
if [ $FAIL_COUNT -gt 0 ]; then
exit 1
fi

View File

@@ -1,223 +0,0 @@
{
"version": "1.0",
"install_dir": "~/.claude",
"log_file": "install.log",
"modules": {
"dev": {
"enabled": true,
"description": "Core dev workflow with Codex integration",
"operations": [
{
"type": "merge_dir",
"source": "dev-workflow",
"description": "Merge commands/ and agents/ into install dir"
},
{
"type": "copy_file",
"source": "memorys/CLAUDE.md",
"target": "CLAUDE.md",
"description": "Copy core role and guidelines"
},
{
"type": "copy_file",
"source": "skills/codeagent/SKILL.md",
"target": "skills/codeagent/SKILL.md",
"description": "Install codeagent skill"
},
{
"type": "copy_file",
"source": "skills/product-requirements/SKILL.md",
"target": "skills/product-requirements/SKILL.md",
"description": "Install product-requirements skill"
},
{
"type": "copy_file",
"source": "skills/prototype-prompt-generator/SKILL.md",
"target": "skills/prototype-prompt-generator/SKILL.md",
"description": "Install prototype-prompt-generator skill"
},
{
"type": "copy_file",
"source": "skills/prototype-prompt-generator/references/prompt-structure.md",
"target": "skills/prototype-prompt-generator/references/prompt-structure.md",
"description": "Install prototype-prompt-generator prompt structure reference"
},
{
"type": "copy_file",
"source": "skills/prototype-prompt-generator/references/design-systems.md",
"target": "skills/prototype-prompt-generator/references/design-systems.md",
"description": "Install prototype-prompt-generator design systems reference"
},
{
"type": "run_command",
"command": "bash install.sh",
"description": "Install codeagent-wrapper binary",
"env": {
"INSTALL_DIR": "${install_dir}"
}
}
]
},
"bmad": {
"enabled": false,
"description": "BMAD agile workflow with multi-agent orchestration",
"operations": [
{
"type": "merge_dir",
"source": "bmad-agile-workflow",
"description": "Merge BMAD commands and agents"
},
{
"type": "copy_file",
"source": "docs/BMAD-WORKFLOW.md",
"target": "docs/BMAD-WORKFLOW.md",
"description": "Copy BMAD workflow documentation"
}
]
},
"requirements": {
"enabled": false,
"description": "Requirements-driven development workflow",
"operations": [
{
"type": "merge_dir",
"source": "requirements-driven-workflow",
"description": "Merge requirements workflow commands and agents"
},
{
"type": "copy_file",
"source": "docs/REQUIREMENTS-WORKFLOW.md",
"target": "docs/REQUIREMENTS-WORKFLOW.md",
"description": "Copy requirements workflow documentation"
}
]
},
"essentials": {
"enabled": false,
"description": "Core development commands and utilities",
"operations": [
{
"type": "merge_dir",
"source": "development-essentials",
"description": "Merge essential development commands"
},
{
"type": "copy_file",
"source": "docs/DEVELOPMENT-COMMANDS.md",
"target": "docs/DEVELOPMENT-COMMANDS.md",
"description": "Copy development commands documentation"
}
]
},
"omo": {
"enabled": false,
"description": "OmO multi-agent orchestration with Sisyphus coordinator",
"operations": [
{
"type": "copy_file",
"source": "skills/omo/SKILL.md",
"target": "skills/omo/SKILL.md",
"description": "Install omo skill"
},
{
"type": "copy_file",
"source": "skills/omo/references/oracle.md",
"target": "skills/omo/references/oracle.md",
"description": "Install oracle agent prompt"
},
{
"type": "copy_file",
"source": "skills/omo/references/librarian.md",
"target": "skills/omo/references/librarian.md",
"description": "Install librarian agent prompt"
},
{
"type": "copy_file",
"source": "skills/omo/references/explore.md",
"target": "skills/omo/references/explore.md",
"description": "Install explore agent prompt"
},
{
"type": "copy_file",
"source": "skills/omo/references/frontend-ui-ux-engineer.md",
"target": "skills/omo/references/frontend-ui-ux-engineer.md",
"description": "Install frontend-ui-ux-engineer agent prompt"
},
{
"type": "copy_file",
"source": "skills/omo/references/document-writer.md",
"target": "skills/omo/references/document-writer.md",
"description": "Install document-writer agent prompt"
},
{
"type": "copy_file",
"source": "skills/omo/references/develop.md",
"target": "skills/omo/references/develop.md",
"description": "Install develop agent prompt"
}
]
},
"sparv": {
"enabled": false,
"description": "SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point gate",
"operations": [
{
"type": "copy_dir",
"source": "skills/sparv",
"target": "skills/sparv",
"description": "Install sparv skill with all scripts and hooks"
}
]
},
"do": {
"enabled": false,
"description": "7-phase feature development workflow with codeagent orchestration",
"operations": [
{
"type": "copy_dir",
"source": "skills/do",
"target": "skills/do",
"description": "Install do skill with hooks"
}
]
},
"course": {
"enabled": false,
"description": "课程开发工作流,包含 dev、产品需求和测试用例技能",
"operations": [
{
"type": "copy_dir",
"source": "skills/dev",
"target": "skills/dev",
"description": "Install dev skill with agents"
},
{
"type": "copy_file",
"source": "skills/product-requirements/SKILL.md",
"target": "skills/product-requirements/SKILL.md",
"description": "Install product-requirements skill"
},
{
"type": "copy_dir",
"source": "skills/test-cases",
"target": "skills/test-cases",
"description": "Install test-cases skill with references"
},
{
"type": "copy_file",
"source": "skills/codeagent/SKILL.md",
"target": "skills/codeagent/SKILL.md",
"description": "Install codeagent skill"
},
{
"type": "run_command",
"command": "bash install.sh",
"description": "Install codeagent-wrapper binary",
"env": {
"INSTALL_DIR": "${install_dir}"
}
}
]
}
}
}

Some files were not shown because too many files have changed in this diff Show More