mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-10 03:14:32 +08:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8db49f198e | ||
|
|
97dfa907d9 | ||
|
|
5853539cab | ||
|
|
81fa6843d9 | ||
|
|
74e4d181c2 | ||
|
|
04fa1626ae | ||
|
|
c0f61d5cc2 | ||
|
|
716d1eb173 | ||
|
|
4bc9ffa907 | ||
|
|
c6c2f93e02 | ||
|
|
cd3115446d | ||
|
|
2b8bfd714c | ||
|
|
71485558df | ||
|
|
b711b44c0e | ||
|
|
eda2475543 | ||
|
|
2c0553794a | ||
|
|
c96193fca6 | ||
|
|
e2cd5be812 | ||
|
|
3dfa447f10 | ||
|
|
e9a8013c6f | ||
|
|
3d76d46336 | ||
|
|
5a50131a13 | ||
|
|
fca5c13c8d | ||
|
|
c1d3a0a07a | ||
|
|
2856055e2e |
@@ -15,32 +15,25 @@
|
|||||||
"source": "./skills/omo",
|
"source": "./skills/omo",
|
||||||
"category": "development"
|
"category": "development"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "dev",
|
|
||||||
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
|
||||||
"version": "5.6.1",
|
|
||||||
"source": "./dev-workflow",
|
|
||||||
"category": "development"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "requirements",
|
"name": "requirements",
|
||||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||||
"version": "5.6.1",
|
"version": "5.6.1",
|
||||||
"source": "./requirements-driven-workflow",
|
"source": "./agents/requirements",
|
||||||
"category": "development"
|
"category": "development"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "bmad",
|
"name": "bmad",
|
||||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||||
"version": "5.6.1",
|
"version": "5.6.1",
|
||||||
"source": "./bmad-agile-workflow",
|
"source": "./agents/bmad",
|
||||||
"category": "development"
|
"category": "development"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "dev-kit",
|
"name": "dev-kit",
|
||||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||||
"version": "5.6.1",
|
"version": "5.6.1",
|
||||||
"source": "./development-essentials",
|
"source": "./agents/development-essentials",
|
||||||
"category": "productivity"
|
"category": "productivity"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
@@ -8,7 +8,10 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
@@ -21,11 +24,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd codeagent-wrapper
|
cd codeagent-wrapper
|
||||||
go test -v -cover -coverprofile=coverage.out ./...
|
go test -v -cover -coverprofile=coverage.out ./...
|
||||||
|
shell: bash
|
||||||
|
|
||||||
- name: Check coverage
|
- name: Check coverage
|
||||||
run: |
|
run: |
|
||||||
cd codeagent-wrapper
|
cd codeagent-wrapper
|
||||||
go tool cover -func=coverage.out | grep total | awk '{print $3}'
|
go tool cover -func=coverage.out | grep total | awk '{print $3}'
|
||||||
|
shell: bash
|
||||||
|
|
||||||
- name: Upload coverage
|
- name: Upload coverage
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v4
|
||||||
|
|||||||
31
.github/workflows/release.yml
vendored
31
.github/workflows/release.yml
vendored
@@ -74,7 +74,7 @@ jobs:
|
|||||||
if [ "${{ matrix.goos }}" = "windows" ]; then
|
if [ "${{ matrix.goos }}" = "windows" ]; then
|
||||||
OUTPUT_NAME="${OUTPUT_NAME}.exe"
|
OUTPUT_NAME="${OUTPUT_NAME}.exe"
|
||||||
fi
|
fi
|
||||||
go build -ldflags="-s -w -X main.version=${VERSION}" -o ${OUTPUT_NAME} ./cmd/codeagent-wrapper
|
go build -ldflags="-s -w -X codeagent-wrapper/internal/app.version=${VERSION}" -o ${OUTPUT_NAME} ./cmd/codeagent-wrapper
|
||||||
chmod +x ${OUTPUT_NAME}
|
chmod +x ${OUTPUT_NAME}
|
||||||
echo "artifact_path=codeagent-wrapper/${OUTPUT_NAME}" >> $GITHUB_OUTPUT
|
echo "artifact_path=codeagent-wrapper/${OUTPUT_NAME}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
@@ -91,6 +91,33 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Generate Release Notes
|
||||||
|
id: release_notes
|
||||||
|
run: |
|
||||||
|
# Get previous tag
|
||||||
|
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v "^${{ github.ref_name }}$" | head -n 1)
|
||||||
|
|
||||||
|
if [ -z "$PREVIOUS_TAG" ]; then
|
||||||
|
echo "No previous tag found, using all commits"
|
||||||
|
COMMITS=$(git log --pretty=format:"- %s (%h)" --no-merges)
|
||||||
|
else
|
||||||
|
echo "Generating notes from $PREVIOUS_TAG to ${{ github.ref_name }}"
|
||||||
|
COMMITS=$(git log ${PREVIOUS_TAG}..${{ github.ref_name }} --pretty=format:"- %s (%h)" --no-merges)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create release notes
|
||||||
|
cat > release_notes.md <<EOF
|
||||||
|
## What's Changed
|
||||||
|
|
||||||
|
${COMMITS}
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/${{ github.repository }}/compare/${PREVIOUS_TAG}...${{ github.ref_name }}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat release_notes.md
|
||||||
|
|
||||||
- name: Download all artifacts
|
- name: Download all artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
@@ -108,6 +135,6 @@ jobs:
|
|||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
files: release/*
|
files: release/*
|
||||||
generate_release_notes: true
|
body_path: release_notes.md
|
||||||
draft: false
|
draft: false
|
||||||
prerelease: false
|
prerelease: false
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,3 +8,5 @@ __pycache__
|
|||||||
.coverage
|
.coverage
|
||||||
coverage.out
|
coverage.out
|
||||||
references
|
references
|
||||||
|
output/
|
||||||
|
.worktrees/
|
||||||
|
|||||||
473
CHANGELOG.md
473
CHANGELOG.md
@@ -2,66 +2,451 @@
|
|||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
## [5.6.4] - 2026-01-15
|
## [6.0.0] - 2026-01-26
|
||||||
|
|
||||||
### 🚀 Features
|
### 🚀 Features
|
||||||
|
|
||||||
- add reasoning effort config for codex backend
|
- support `npx github:cexll/myclaude` for installation and execution
|
||||||
- default to skip-permissions and bypass-sandbox
|
- default module changed from `dev` to `do`
|
||||||
- add multi-agent support with yolo mode
|
|
||||||
- add omo module for multi-agent orchestration
|
|
||||||
- add intelligent backend selection based on task complexity (#61)
|
|
||||||
- v5.4.0 structured execution report (#94)
|
|
||||||
- add millisecond-precision timestamps to all log entries (#91)
|
|
||||||
- skill-install install script and security scan
|
|
||||||
- add uninstall scripts with selective module removal
|
|
||||||
|
|
||||||
### 🐛 Bug Fixes
|
|
||||||
|
|
||||||
- filter codex stderr noise logs
|
|
||||||
- use config override for codex reasoning effort
|
|
||||||
- propagate SkipPermissions to parallel tasks (#113)
|
|
||||||
- add timeout for Windows process termination
|
|
||||||
- reject dash as workdir parameter (#118)
|
|
||||||
- add sleep in fake script to prevent CI race condition
|
|
||||||
- fix gemini env load
|
|
||||||
- fix omo
|
|
||||||
- fix codeagent skill TaskOutput
|
|
||||||
- 修复 Gemini init 事件 session_id 未提取的问题 (#111)
|
|
||||||
- Windows 后端退出:taskkill 结束进程树 + turn.completed 支持 (#108)
|
|
||||||
- support model parameter for all backends, auto-inject from settings (#105)
|
|
||||||
- replace setx with reg add to avoid 1024-char PATH truncation (#101)
|
|
||||||
- 移除未知事件格式的日志噪声 (#96)
|
|
||||||
- prevent duplicate PATH entries on reinstall (#95)
|
|
||||||
- Minor issues #12 and #13 - ASCII mode and performance optimization
|
|
||||||
- correct settings.json filename and bump version to v5.2.8
|
|
||||||
- allow claude backend to read env from setting.json while preventing recursion (#92)
|
|
||||||
- comprehensive security and quality improvements for PR #85 & #87 (#90)
|
|
||||||
- Improve backend termination after message and extend timeout (#86)
|
|
||||||
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 (#88)
|
|
||||||
- filter noisy stderr output from gemini backend (#83)
|
|
||||||
- 修復 wsl install.sh 格式問題 (#78)
|
|
||||||
- 修复多 backend 并行日志 PID 混乱并移除包装格式 (#74) (#76)
|
|
||||||
|
|
||||||
### 🚜 Refactor
|
### 🚜 Refactor
|
||||||
|
|
||||||
- remove sisyphus agent and unused code
|
- restructure: create `agents/` and move `bmad-agile-workflow` → `agents/bmad`, `requirements-driven-workflow` → `agents/requirements`, `development-essentials` → `agents/development-essentials`
|
||||||
- streamline agent documentation and remove sisyphus
|
- remove legacy directories: `docs/`, `hooks/`, `dev-workflow/`
|
||||||
|
- update references across `config.json`, `README.md`, `README_CN.md`, `marketplace.json`, etc.
|
||||||
|
|
||||||
### 📚 Documentation
|
### 📚 Documentation
|
||||||
|
|
||||||
- add OmO workflow to README and fix plugin marketplace structure
|
- add `skills/README.md` and `PLUGIN_README.md`
|
||||||
- update FAQ for default bypass/skip-permissions behavior
|
|
||||||
- 添加 FAQ 常见问题章节
|
|
||||||
- update troubleshooting with idempotent PATH commands (#95)
|
|
||||||
|
|
||||||
### 💼 Other
|
### 💼 Other
|
||||||
|
|
||||||
|
- add `package.json` and `bin/cli.js` for npx packaging
|
||||||
|
|
||||||
|
## [6.1.5] - 2026-01-25
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- correct gitignore to not exclude cmd/codeagent-wrapper
|
||||||
|
|
||||||
|
## [6.1.4] - 2026-01-25
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- support concurrent tasks with unique state files
|
||||||
|
|
||||||
|
## [6.1.3] - 2026-01-25
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- correct build path in release workflow
|
||||||
|
|
||||||
|
- increase stdoutDrainTimeout from 100ms to 500ms
|
||||||
|
|
||||||
|
## [6.1.2] - 2026-01-24
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- use ANTHROPIC_AUTH_TOKEN for Claude CLI env injection
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- update codeagent version
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
|
||||||
|
- restructure root READMEs with do as recommended workflow
|
||||||
|
|
||||||
|
- update do/omo/sparv module READMEs with detailed workflows
|
||||||
|
|
||||||
|
- add README for bmad and requirements modules
|
||||||
|
|
||||||
|
### 🧪 Testing
|
||||||
|
|
||||||
|
|
||||||
|
- use prefix match for version flag tests
|
||||||
|
|
||||||
|
## [6.1.1] - 2026-01-23
|
||||||
|
|
||||||
|
|
||||||
|
### 🚜 Refactor
|
||||||
|
|
||||||
|
|
||||||
|
- rename feature-dev to do workflow
|
||||||
|
|
||||||
|
## [6.1.0] - 2026-01-23
|
||||||
|
|
||||||
|
|
||||||
|
### ⚙️ Miscellaneous Tasks
|
||||||
|
|
||||||
|
|
||||||
|
- ignore references directory
|
||||||
|
|
||||||
|
- add go.work.sum for workspace dependencies
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- read GEMINI_MODEL from ~/.gemini/.env ([#131](https://github.com/cexll/myclaude/issues/131))
|
||||||
|
|
||||||
|
- validate non-empty output message before printing
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- add feature-dev skill with 7-phase workflow
|
||||||
|
|
||||||
|
- support \${CLAUDE_PLUGIN_ROOT} variable in hooks config
|
||||||
|
|
||||||
|
## [6.0.0-alpha1] - 2026-01-20
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- add missing cmd/codeagent/main.go entry point
|
||||||
|
|
||||||
|
- update release workflow build path for new directory structure
|
||||||
|
|
||||||
|
- write PATH config to both profile and rc files ([#128](https://github.com/cexll/myclaude/issues/128))
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- add course module with dev, product-requirements and test-cases skills
|
||||||
|
|
||||||
|
- add hooks management to install.py
|
||||||
|
|
||||||
|
### 🚜 Refactor
|
||||||
|
|
||||||
|
|
||||||
|
- restructure codebase to internal/ directory with modular architecture
|
||||||
|
|
||||||
|
## [5.6.7] - 2026-01-17
|
||||||
|
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- remove .sparv
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
|
||||||
|
- update 'Agent Hierarchy' model for frontend-ui-ux-engineer and document-writer in README ([#127](https://github.com/cexll/myclaude/issues/127))
|
||||||
|
|
||||||
|
- update mappings for frontend-ui-ux-engineer and document-writer in README ([#126](https://github.com/cexll/myclaude/issues/126))
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- add sparv module and interactive plugin manager
|
||||||
|
|
||||||
|
- add sparv enhanced rules v1.1
|
||||||
|
|
||||||
|
- add sparv skill to claude-plugin v1.1.0
|
||||||
|
|
||||||
|
- feat sparv skill
|
||||||
|
|
||||||
|
## [5.6.6] - 2026-01-16
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- remove extraneous dash arg for opencode stdin mode ([#124](https://github.com/cexll/myclaude/issues/124))
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- update readme
|
||||||
|
|
||||||
|
## [5.6.5] - 2026-01-16
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- correct default models for oracle and librarian agents ([#120](https://github.com/cexll/myclaude/issues/120))
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- feat dev skill
|
||||||
|
|
||||||
|
## [5.6.4] - 2026-01-15
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- filter codex 0.84.0 stderr noise logs ([#122](https://github.com/cexll/myclaude/issues/122))
|
||||||
|
|
||||||
|
- filter codex stderr noise logs
|
||||||
|
|
||||||
|
## [5.6.3] - 2026-01-14
|
||||||
|
|
||||||
|
|
||||||
|
### ⚙️ Miscellaneous Tasks
|
||||||
|
|
||||||
|
|
||||||
|
- bump codeagent-wrapper version to 5.6.3
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- update version tests to match 5.6.3
|
||||||
|
|
||||||
|
- use config override for codex reasoning effort
|
||||||
|
|
||||||
|
## [5.6.2] - 2026-01-14
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- propagate SkipPermissions to parallel tasks ([#113](https://github.com/cexll/myclaude/issues/113))
|
||||||
|
|
||||||
|
- add timeout for Windows process termination
|
||||||
|
|
||||||
|
- reject dash as workdir parameter ([#118](https://github.com/cexll/myclaude/issues/118))
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
|
||||||
|
- add OmO workflow to README and fix plugin marketplace structure
|
||||||
|
|
||||||
|
### 🚜 Refactor
|
||||||
|
|
||||||
|
|
||||||
|
- remove sisyphus agent and unused code
|
||||||
|
|
||||||
|
## [5.6.1] - 2026-01-13
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- add sleep in fake script to prevent CI race condition
|
||||||
|
|
||||||
|
- fix gemini env load
|
||||||
|
|
||||||
|
- fix omo
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- add reasoning effort config for codex backend
|
||||||
|
|
||||||
|
## [5.6.0] - 2026-01-13
|
||||||
|
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
|
||||||
|
- update FAQ for default bypass/skip-permissions behavior
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- default to skip-permissions and bypass-sandbox
|
||||||
|
|
||||||
|
- add omo module for multi-agent orchestration
|
||||||
|
|
||||||
|
### 🚜 Refactor
|
||||||
|
|
||||||
|
|
||||||
|
- streamline agent documentation and remove sisyphus
|
||||||
|
|
||||||
|
## [5.5.0] - 2026-01-12
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- 修复 Gemini init 事件 session_id 未提取的问题 ([#111](https://github.com/cexll/myclaude/issues/111))
|
||||||
|
|
||||||
|
- fix codeagent skill TaskOutput
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- Merge branch 'master' of github.com:cexll/myclaude
|
||||||
|
|
||||||
- add test-cases skill
|
- add test-cases skill
|
||||||
|
|
||||||
- add browser skill
|
- add browser skill
|
||||||
- BMADh和Requirements-Driven支持根据语义生成对应的文档 (#82)
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- add multi-agent support with yolo mode
|
||||||
|
|
||||||
|
## [5.4.4] - 2026-01-08
|
||||||
|
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- 修复 Windows 后端退出:taskkill 结束进程树 + turn.completed 支持 ([#108](https://github.com/cexll/myclaude/issues/108))
|
||||||
|
|
||||||
|
## [5.4.3] - 2026-01-06
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- support model parameter for all backends, auto-inject from settings ([#105](https://github.com/cexll/myclaude/issues/105))
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
|
||||||
|
- add FAQ Q5 for permission/sandbox env vars
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- feat skill-install install script and security scan
|
||||||
|
|
||||||
|
- add uninstall scripts with selective module removal
|
||||||
|
|
||||||
|
## [5.4.2] - 2025-12-31
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- replace setx with reg add to avoid 1024-char PATH truncation ([#101](https://github.com/cexll/myclaude/issues/101))
|
||||||
|
|
||||||
|
## [5.4.1] - 2025-12-26
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- 移除未知事件格式的日志噪声 ([#96](https://github.com/cexll/myclaude/issues/96))
|
||||||
|
|
||||||
|
- prevent duplicate PATH entries on reinstall ([#95](https://github.com/cexll/myclaude/issues/95))
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
|
||||||
|
- 添加 FAQ 常见问题章节
|
||||||
|
|
||||||
|
- update troubleshooting with idempotent PATH commands ([#95](https://github.com/cexll/myclaude/issues/95))
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- Add intelligent backend selection based on task complexity ([#61](https://github.com/cexll/myclaude/issues/61))
|
||||||
|
|
||||||
|
## [5.4.0] - 2025-12-24
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- Minor issues #12 and #13 - ASCII mode and performance optimization
|
||||||
|
|
||||||
|
- code review fixes for PR #94 - all critical and major issues resolved
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- v5.4.0 structured execution report ([#94](https://github.com/cexll/myclaude/issues/94))
|
||||||
|
|
||||||
|
## [5.2.8] - 2025-12-22
|
||||||
|
|
||||||
|
|
||||||
|
### ⚙️ Miscellaneous Tasks
|
||||||
|
|
||||||
|
|
||||||
|
- simplify release workflow to use GitHub auto-generated notes
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- correct settings.json filename and bump version to v5.2.8
|
||||||
|
|
||||||
|
## [5.2.7] - 2025-12-21
|
||||||
|
|
||||||
|
|
||||||
|
### ⚙️ Miscellaneous Tasks
|
||||||
|
|
||||||
|
|
||||||
|
- bump version to v5.2.7
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- allow claude backend to read env from setting.json while preventing recursion ([#92](https://github.com/cexll/myclaude/issues/92))
|
||||||
|
|
||||||
|
- comprehensive security and quality improvements for PR #85 & #87 ([#90](https://github.com/cexll/myclaude/issues/90))
|
||||||
|
|
||||||
|
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 ([#88](https://github.com/cexll/myclaude/issues/88))
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- Improve backend termination after message and extend timeout ([#86](https://github.com/cexll/myclaude/issues/86))
|
||||||
|
|
||||||
|
### 🚀 Features
|
||||||
|
|
||||||
|
|
||||||
|
- add millisecond-precision timestamps to all log entries ([#91](https://github.com/cexll/myclaude/issues/91))
|
||||||
|
|
||||||
|
## [5.2.6] - 2025-12-19
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- filter noisy stderr output from gemini backend ([#83](https://github.com/cexll/myclaude/issues/83))
|
||||||
|
|
||||||
|
- 修復 wsl install.sh 格式問題 ([#78](https://github.com/cexll/myclaude/issues/78))
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
- update all readme
|
- update all readme
|
||||||
|
|
||||||
|
- BMADh和Requirements-Driven支持根据语义生成对应的文档 ([#82](https://github.com/cexll/myclaude/issues/82))
|
||||||
|
|
||||||
|
## [5.2.5] - 2025-12-17
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
|
||||||
|
- 修复多 backend 并行日志 PID 混乱并移除包装格式 ([#74](https://github.com/cexll/myclaude/issues/74)) ([#76](https://github.com/cexll/myclaude/issues/76))
|
||||||
|
|
||||||
|
- replace "Codex" to "codeagent" in dev-plan-generator subagent
|
||||||
|
|
||||||
|
- 修復 win python install.py
|
||||||
|
|
||||||
|
### 💼 Other
|
||||||
|
|
||||||
|
|
||||||
|
- Merge pull request #71 from aliceric27/master
|
||||||
|
|
||||||
|
- Merge branch 'cexll:master' into master
|
||||||
|
|
||||||
|
- Merge pull request #72 from changxvv/master
|
||||||
|
|
||||||
|
- update changelog
|
||||||
|
|
||||||
|
- update codeagent skill backend select
|
||||||
|
|
||||||
## [5.2.4] - 2025-12-16
|
## [5.2.4] - 2025-12-16
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
13
Makefile
13
Makefile
@@ -7,12 +7,12 @@
|
|||||||
help:
|
help:
|
||||||
@echo "Claude Code Multi-Agent Workflow - Quick Deployment"
|
@echo "Claude Code Multi-Agent Workflow - Quick Deployment"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Recommended installation: python3 install.py --install-dir ~/.claude"
|
@echo "Recommended installation: npx github:cexll/myclaude"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Usage: make [target]"
|
@echo "Usage: make [target]"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Targets:"
|
@echo "Targets:"
|
||||||
@echo " install - LEGACY: install all configurations (prefer install.py)"
|
@echo " install - LEGACY: install all configurations (prefer npx github:cexll/myclaude)"
|
||||||
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
|
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
|
||||||
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
|
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
|
||||||
@echo " deploy-essentials - Deploy Development Essentials workflow"
|
@echo " deploy-essentials - Deploy Development Essentials workflow"
|
||||||
@@ -31,16 +31,16 @@ CLAUDE_CONFIG_DIR = ~/.claude
|
|||||||
SPECS_DIR = .claude/specs
|
SPECS_DIR = .claude/specs
|
||||||
|
|
||||||
# Workflow directories
|
# Workflow directories
|
||||||
BMAD_DIR = bmad-agile-workflow
|
BMAD_DIR = agents/bmad
|
||||||
REQUIREMENTS_DIR = requirements-driven-workflow
|
REQUIREMENTS_DIR = agents/requirements
|
||||||
ESSENTIALS_DIR = development-essentials
|
ESSENTIALS_DIR = agents/development-essentials
|
||||||
ADVANCED_DIR = advanced-ai-agents
|
ADVANCED_DIR = advanced-ai-agents
|
||||||
OUTPUT_STYLES_DIR = output-styles
|
OUTPUT_STYLES_DIR = output-styles
|
||||||
|
|
||||||
# Install all configurations
|
# Install all configurations
|
||||||
install: deploy-all
|
install: deploy-all
|
||||||
@echo "⚠️ LEGACY PATH: make install will be removed in future versions."
|
@echo "⚠️ LEGACY PATH: make install will be removed in future versions."
|
||||||
@echo " Prefer: python3 install.py --install-dir ~/.claude"
|
@echo " Prefer: npx github:cexll/myclaude"
|
||||||
@echo "✅ Installation complete!"
|
@echo "✅ Installation complete!"
|
||||||
|
|
||||||
# Deploy BMAD workflow
|
# Deploy BMAD workflow
|
||||||
@@ -159,4 +159,3 @@ changelog:
|
|||||||
@echo ""
|
@echo ""
|
||||||
@echo "Preview the changes:"
|
@echo "Preview the changes:"
|
||||||
@echo " git diff CHANGELOG.md"
|
@echo " git diff CHANGELOG.md"
|
||||||
|
|
||||||
|
|||||||
18
PLUGIN_README.md
Normal file
18
PLUGIN_README.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Plugin System
|
||||||
|
|
||||||
|
Claude Code plugins for this repo are defined in `.claude-plugin/marketplace.json`.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/plugin marketplace add cexll/myclaude
|
||||||
|
/plugin list
|
||||||
|
```
|
||||||
|
|
||||||
|
## Available Plugins
|
||||||
|
|
||||||
|
- `bmad` - BMAD workflow (`./agents/bmad`)
|
||||||
|
- `requirements` - requirements-driven workflow (`./agents/requirements`)
|
||||||
|
- `dev-kit` - development essentials (`./agents/development-essentials`)
|
||||||
|
- `omo` - orchestration skill (`./skills/omo`)
|
||||||
|
- `sparv` - SPARV workflow (`./skills/sparv`)
|
||||||
47
README.md
47
README.md
@@ -12,9 +12,7 @@
|
|||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/cexll/myclaude.git
|
npx github:cexll/myclaude
|
||||||
cd myclaude
|
|
||||||
python3 install.py --install-dir ~/.claude
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Modules Overview
|
## Modules Overview
|
||||||
@@ -22,30 +20,31 @@ python3 install.py --install-dir ~/.claude
|
|||||||
| Module | Description | Documentation |
|
| Module | Description | Documentation |
|
||||||
|--------|-------------|---------------|
|
|--------|-------------|---------------|
|
||||||
| [do](skills/do/README.md) | **Recommended** - 7-phase feature development with codeagent orchestration | `/do` command |
|
| [do](skills/do/README.md) | **Recommended** - 7-phase feature development with codeagent orchestration | `/do` command |
|
||||||
| [dev](dev-workflow/README.md) | Lightweight dev workflow with Codex integration | `/dev` command |
|
|
||||||
| [omo](skills/omo/README.md) | Multi-agent orchestration with intelligent routing | `/omo` command |
|
| [omo](skills/omo/README.md) | Multi-agent orchestration with intelligent routing | `/omo` command |
|
||||||
| [bmad](bmad-agile-workflow/README.md) | BMAD agile workflow with 6 specialized agents | `/bmad-pilot` command |
|
| [bmad](agents/bmad/README.md) | BMAD agile workflow with 6 specialized agents | `/bmad-pilot` command |
|
||||||
| [requirements](requirements-driven-workflow/README.md) | Lightweight requirements-to-code pipeline | `/requirements-pilot` command |
|
| [requirements](agents/requirements/README.md) | Lightweight requirements-to-code pipeline | `/requirements-pilot` command |
|
||||||
| [essentials](development-essentials/README.md) | Core development commands and utilities | `/code`, `/debug`, etc. |
|
| [essentials](agents/development-essentials/README.md) | Core development commands and utilities | `/code`, `/debug`, etc. |
|
||||||
| [sparv](skills/sparv/README.md) | SPARV workflow (Specify→Plan→Act→Review→Vault) | `/sparv` command |
|
| [sparv](skills/sparv/README.md) | SPARV workflow (Specify→Plan→Act→Review→Vault) | `/sparv` command |
|
||||||
| course | Course development (combines dev + product-requirements + test-cases) | Composite module |
|
| course | Course development (combines dev + product-requirements + test-cases) | Composite module |
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install all enabled modules
|
# Interactive installer (recommended)
|
||||||
python3 install.py --install-dir ~/.claude
|
npx github:cexll/myclaude
|
||||||
|
|
||||||
# Install specific module
|
# List installable items (modules / skills / wrapper)
|
||||||
python3 install.py --module dev
|
npx github:cexll/myclaude --list
|
||||||
|
|
||||||
# List available modules
|
# Detect installed modules and update from GitHub
|
||||||
python3 install.py --list-modules
|
npx github:cexll/myclaude --update
|
||||||
|
|
||||||
# Force overwrite
|
# Custom install directory / overwrite
|
||||||
python3 install.py --force
|
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`--update` detects already installed modules in the target install dir (defaults to `~/.claude`, via `installed_modules.json` when present) and updates them from GitHub (latest release) by overwriting the module files.
|
||||||
|
|
||||||
### Module Configuration
|
### Module Configuration
|
||||||
|
|
||||||
Edit `config.json` to enable/disable modules:
|
Edit `config.json` to enable/disable modules:
|
||||||
@@ -53,13 +52,12 @@ Edit `config.json` to enable/disable modules:
|
|||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"modules": {
|
"modules": {
|
||||||
"dev": { "enabled": true },
|
|
||||||
"bmad": { "enabled": false },
|
"bmad": { "enabled": false },
|
||||||
"requirements": { "enabled": false },
|
"requirements": { "enabled": false },
|
||||||
"essentials": { "enabled": false },
|
"essentials": { "enabled": false },
|
||||||
"omo": { "enabled": false },
|
"omo": { "enabled": false },
|
||||||
"sparv": { "enabled": false },
|
"sparv": { "enabled": false },
|
||||||
"do": { "enabled": false },
|
"do": { "enabled": true },
|
||||||
"course": { "enabled": false }
|
"course": { "enabled": false }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -70,7 +68,6 @@ Edit `config.json` to enable/disable modules:
|
|||||||
| Scenario | Recommended |
|
| Scenario | Recommended |
|
||||||
|----------|-------------|
|
|----------|-------------|
|
||||||
| Feature development (default) | `/do` |
|
| Feature development (default) | `/do` |
|
||||||
| Lightweight feature | `/dev` |
|
|
||||||
| Bug investigation + fix | `/omo` |
|
| Bug investigation + fix | `/omo` |
|
||||||
| Large enterprise project | `/bmad-pilot` |
|
| Large enterprise project | `/bmad-pilot` |
|
||||||
| Quick prototype | `/requirements-pilot` |
|
| Quick prototype | `/requirements-pilot` |
|
||||||
@@ -105,9 +102,8 @@ Edit `config.json` to enable/disable modules:
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
- [Codeagent-Wrapper Guide](docs/CODEAGENT-WRAPPER.md)
|
|
||||||
- [Hooks Documentation](docs/HOOKS.md)
|
|
||||||
- [codeagent-wrapper](codeagent-wrapper/README.md)
|
- [codeagent-wrapper](codeagent-wrapper/README.md)
|
||||||
|
- [Plugin System](PLUGIN_README.md)
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
@@ -115,13 +111,14 @@ Edit `config.json` to enable/disable modules:
|
|||||||
|
|
||||||
**Codex wrapper not found:**
|
**Codex wrapper not found:**
|
||||||
```bash
|
```bash
|
||||||
bash install.sh
|
# Select: codeagent-wrapper
|
||||||
|
npx github:cexll/myclaude
|
||||||
```
|
```
|
||||||
|
|
||||||
**Module not loading:**
|
**Module not loading:**
|
||||||
```bash
|
```bash
|
||||||
cat ~/.claude/installed_modules.json
|
cat ~/.claude/installed_modules.json
|
||||||
python3 install.py --module <name> --force
|
npx github:cexll/myclaude --force
|
||||||
```
|
```
|
||||||
|
|
||||||
**Backend CLI errors:**
|
**Backend CLI errors:**
|
||||||
@@ -137,7 +134,6 @@ which gemini && gemini --version
|
|||||||
|-------|----------|
|
|-------|----------|
|
||||||
| "Unknown event format" | Logging display issue, can be ignored |
|
| "Unknown event format" | Logging display issue, can be ignored |
|
||||||
| Gemini can't read .gitignore files | Remove from .gitignore or use different backend |
|
| Gemini can't read .gitignore files | Remove from .gitignore or use different backend |
|
||||||
| `/dev` slow | Check logs, try faster model, use single repo |
|
|
||||||
| Codex permission denied | Set `approval_policy = "never"` in ~/.codex/config.yaml |
|
| Codex permission denied | Set `approval_policy = "never"` in ~/.codex/config.yaml |
|
||||||
|
|
||||||
See [GitHub Issues](https://github.com/cexll/myclaude/issues) for more.
|
See [GitHub Issues](https://github.com/cexll/myclaude/issues) for more.
|
||||||
@@ -146,7 +142,10 @@ See [GitHub Issues](https://github.com/cexll/myclaude/issues) for more.
|
|||||||
|
|
||||||
AGPL-3.0 - see [LICENSE](LICENSE)
|
AGPL-3.0 - see [LICENSE](LICENSE)
|
||||||
|
|
||||||
|
### Commercial Licensing
|
||||||
|
|
||||||
|
For commercial use without AGPL obligations, contact: evanxian9@gmail.com
|
||||||
|
|
||||||
## Support
|
## Support
|
||||||
|
|
||||||
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||||
- [Documentation](docs/)
|
|
||||||
|
|||||||
62
README_CN.md
62
README_CN.md
@@ -9,9 +9,7 @@
|
|||||||
## 快速开始
|
## 快速开始
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/cexll/myclaude.git
|
npx github:cexll/myclaude
|
||||||
cd myclaude
|
|
||||||
python3 install.py --install-dir ~/.claude
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 模块概览
|
## 模块概览
|
||||||
@@ -19,11 +17,10 @@ python3 install.py --install-dir ~/.claude
|
|||||||
| 模块 | 描述 | 文档 |
|
| 模块 | 描述 | 文档 |
|
||||||
|------|------|------|
|
|------|------|------|
|
||||||
| [do](skills/do/README.md) | **推荐** - 7 阶段功能开发 + codeagent 编排 | `/do` 命令 |
|
| [do](skills/do/README.md) | **推荐** - 7 阶段功能开发 + codeagent 编排 | `/do` 命令 |
|
||||||
| [dev](dev-workflow/README.md) | 轻量级开发工作流 + Codex 集成 | `/dev` 命令 |
|
|
||||||
| [omo](skills/omo/README.md) | 多智能体编排 + 智能路由 | `/omo` 命令 |
|
| [omo](skills/omo/README.md) | 多智能体编排 + 智能路由 | `/omo` 命令 |
|
||||||
| [bmad](bmad-agile-workflow/README.md) | BMAD 敏捷工作流 + 6 个专业智能体 | `/bmad-pilot` 命令 |
|
| [bmad](agents/bmad/README.md) | BMAD 敏捷工作流 + 6 个专业智能体 | `/bmad-pilot` 命令 |
|
||||||
| [requirements](requirements-driven-workflow/README.md) | 轻量级需求到代码流水线 | `/requirements-pilot` 命令 |
|
| [requirements](agents/requirements/README.md) | 轻量级需求到代码流水线 | `/requirements-pilot` 命令 |
|
||||||
| [essentials](development-essentials/README.md) | 核心开发命令和工具 | `/code`, `/debug` 等 |
|
| [essentials](agents/development-essentials/README.md) | 核心开发命令和工具 | `/code`, `/debug` 等 |
|
||||||
| [sparv](skills/sparv/README.md) | SPARV 工作流 (Specify→Plan→Act→Review→Vault) | `/sparv` 命令 |
|
| [sparv](skills/sparv/README.md) | SPARV 工作流 (Specify→Plan→Act→Review→Vault) | `/sparv` 命令 |
|
||||||
| course | 课程开发(组合 dev + product-requirements + test-cases) | 组合模块 |
|
| course | 课程开发(组合 dev + product-requirements + test-cases) | 组合模块 |
|
||||||
|
|
||||||
@@ -63,24 +60,6 @@ python3 install.py --install-dir ~/.claude
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Dev 工作流
|
|
||||||
|
|
||||||
轻量级开发工作流,适合简单功能开发。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/dev "实现 JWT 用户认证"
|
|
||||||
```
|
|
||||||
|
|
||||||
**6 步流程:**
|
|
||||||
1. 需求澄清 - 交互式问答
|
|
||||||
2. Codex 深度分析 - 代码库探索
|
|
||||||
3. 开发计划生成 - 结构化任务分解
|
|
||||||
4. 并行执行 - Codex 并发执行
|
|
||||||
5. 覆盖率验证 - 强制 ≥90%
|
|
||||||
6. 完成总结 - 报告生成
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### OmO 多智能体编排器
|
### OmO 多智能体编排器
|
||||||
|
|
||||||
基于风险信号智能路由任务到专业智能体。
|
基于风险信号智能路由任务到专业智能体。
|
||||||
@@ -189,19 +168,21 @@ python3 install.py --install-dir ~/.claude
|
|||||||
## 安装
|
## 安装
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 安装所有启用的模块
|
# 交互式安装器(推荐)
|
||||||
python3 install.py --install-dir ~/.claude
|
npx github:cexll/myclaude
|
||||||
|
|
||||||
# 安装特定模块
|
# 列出可安装项(module:* / skill:* / codeagent-wrapper)
|
||||||
python3 install.py --module dev
|
npx github:cexll/myclaude --list
|
||||||
|
|
||||||
# 列出可用模块
|
# 检测已安装 modules 并从 GitHub 更新
|
||||||
python3 install.py --list-modules
|
npx github:cexll/myclaude --update
|
||||||
|
|
||||||
# 强制覆盖
|
# 指定安装目录 / 强制覆盖
|
||||||
python3 install.py --force
|
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`--update` 会在目标安装目录(默认 `~/.claude`,优先读取 `installed_modules.json`)检测已安装 modules,并从 GitHub 拉取最新发布版本覆盖更新。
|
||||||
|
|
||||||
### 模块配置
|
### 模块配置
|
||||||
|
|
||||||
编辑 `config.json` 启用/禁用模块:
|
编辑 `config.json` 启用/禁用模块:
|
||||||
@@ -209,13 +190,12 @@ python3 install.py --force
|
|||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"modules": {
|
"modules": {
|
||||||
"dev": { "enabled": true },
|
|
||||||
"bmad": { "enabled": false },
|
"bmad": { "enabled": false },
|
||||||
"requirements": { "enabled": false },
|
"requirements": { "enabled": false },
|
||||||
"essentials": { "enabled": false },
|
"essentials": { "enabled": false },
|
||||||
"omo": { "enabled": false },
|
"omo": { "enabled": false },
|
||||||
"sparv": { "enabled": false },
|
"sparv": { "enabled": false },
|
||||||
"do": { "enabled": false },
|
"do": { "enabled": true },
|
||||||
"course": { "enabled": false }
|
"course": { "enabled": false }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -226,7 +206,6 @@ python3 install.py --force
|
|||||||
| 场景 | 推荐 |
|
| 场景 | 推荐 |
|
||||||
|------|------|
|
|------|------|
|
||||||
| 功能开发(默认) | `/do` |
|
| 功能开发(默认) | `/do` |
|
||||||
| 轻量级功能 | `/dev` |
|
|
||||||
| Bug 调查 + 修复 | `/omo` |
|
| Bug 调查 + 修复 | `/omo` |
|
||||||
| 大型企业项目 | `/bmad-pilot` |
|
| 大型企业项目 | `/bmad-pilot` |
|
||||||
| 快速原型 | `/requirements-pilot` |
|
| 快速原型 | `/requirements-pilot` |
|
||||||
@@ -244,13 +223,14 @@ python3 install.py --force
|
|||||||
|
|
||||||
**Codex wrapper 未找到:**
|
**Codex wrapper 未找到:**
|
||||||
```bash
|
```bash
|
||||||
bash install.sh
|
# 选择:codeagent-wrapper
|
||||||
|
npx github:cexll/myclaude
|
||||||
```
|
```
|
||||||
|
|
||||||
**模块未加载:**
|
**模块未加载:**
|
||||||
```bash
|
```bash
|
||||||
cat ~/.claude/installed_modules.json
|
cat ~/.claude/installed_modules.json
|
||||||
python3 install.py --module <name> --force
|
npx github:cexll/myclaude --force
|
||||||
```
|
```
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
@@ -259,7 +239,6 @@ python3 install.py --module <name> --force
|
|||||||
|------|----------|
|
|------|----------|
|
||||||
| "Unknown event format" | 日志显示问题,可忽略 |
|
| "Unknown event format" | 日志显示问题,可忽略 |
|
||||||
| Gemini 无法读取 .gitignore 文件 | 从 .gitignore 移除或使用其他后端 |
|
| Gemini 无法读取 .gitignore 文件 | 从 .gitignore 移除或使用其他后端 |
|
||||||
| `/dev` 执行慢 | 检查日志,尝试更快模型,使用单一仓库 |
|
|
||||||
| Codex 权限拒绝 | 在 ~/.codex/config.yaml 设置 `approval_policy = "never"` |
|
| Codex 权限拒绝 | 在 ~/.codex/config.yaml 设置 `approval_policy = "never"` |
|
||||||
|
|
||||||
更多问题请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues)。
|
更多问题请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues)。
|
||||||
@@ -268,7 +247,10 @@ python3 install.py --module <name> --force
|
|||||||
|
|
||||||
AGPL-3.0 - 查看 [LICENSE](LICENSE)
|
AGPL-3.0 - 查看 [LICENSE](LICENSE)
|
||||||
|
|
||||||
|
### 商业授权
|
||||||
|
|
||||||
|
如需商业授权(无需遵守 AGPL 义务),请联系:evanxian9@gmail.com
|
||||||
|
|
||||||
## 支持
|
## 支持
|
||||||
|
|
||||||
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||||
- [文档](docs/)
|
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ PO and Architect phases use iterative refinement:
|
|||||||
## Directory Structure
|
## Directory Structure
|
||||||
|
|
||||||
```
|
```
|
||||||
bmad-agile-workflow/
|
agents/bmad/
|
||||||
├── README.md
|
├── README.md
|
||||||
├── commands/
|
├── commands/
|
||||||
│ └── bmad-pilot.md
|
│ └── bmad-pilot.md
|
||||||
@@ -304,7 +304,7 @@ Deep reasoning and analysis for complex problems.
|
|||||||
## 🔌 Agent Configuration
|
## 🔌 Agent Configuration
|
||||||
|
|
||||||
All commands use specialized agents configured in:
|
All commands use specialized agents configured in:
|
||||||
- `development-essentials/agents/`
|
- `agents/development-essentials/agents/`
|
||||||
- Agent prompt templates
|
- Agent prompt templates
|
||||||
- Tool access permissions
|
- Tool access permissions
|
||||||
- Output formatting
|
- Output formatting
|
||||||
@@ -244,8 +244,8 @@ Development Essentials 模块包含以下专用代理:
|
|||||||
## 🔗 相关文档
|
## 🔗 相关文档
|
||||||
|
|
||||||
- [主文档](../README.md) - 项目总览
|
- [主文档](../README.md) - 项目总览
|
||||||
- [BMAD工作流](../docs/BMAD-WORKFLOW.md) - 完整敏捷流程
|
- [BMAD工作流](../agents/bmad/BMAD-WORKFLOW.md) - 完整敏捷流程
|
||||||
- [Requirements工作流](../docs/REQUIREMENTS-WORKFLOW.md) - 轻量级开发流程
|
- [Requirements工作流](../agents/requirements/REQUIREMENTS-WORKFLOW.md) - 轻量级开发流程
|
||||||
- [插件系统](../PLUGIN_README.md) - 插件安装和管理
|
- [插件系统](../PLUGIN_README.md) - 插件安装和管理
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -78,7 +78,7 @@ After code review passes (≥90%):
|
|||||||
## Directory Structure
|
## Directory Structure
|
||||||
|
|
||||||
```
|
```
|
||||||
requirements-driven-workflow/
|
agents/requirements/
|
||||||
├── README.md
|
├── README.md
|
||||||
├── commands/
|
├── commands/
|
||||||
│ └── requirements-pilot.md
|
│ └── requirements-pilot.md
|
||||||
1125
bin/cli.js
Executable file
1125
bin/cli.js
Executable file
File diff suppressed because it is too large
Load Diff
8
codeagent-wrapper/.github/workflows/ci.yml
vendored
8
codeagent-wrapper/.github/workflows/ci.yml
vendored
@@ -17,6 +17,9 @@ jobs:
|
|||||||
go-version: ["1.21", "1.22"]
|
go-version: ["1.21", "1.22"]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
@@ -25,11 +28,16 @@ jobs:
|
|||||||
run: make test
|
run: make test
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make build
|
run: make build
|
||||||
|
- name: Verify version
|
||||||
|
run: ./codeagent-wrapper --version
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.22"
|
go-version: "1.22"
|
||||||
|
|||||||
4
codeagent-wrapper/.gitignore
vendored
4
codeagent-wrapper/.gitignore
vendored
@@ -2,8 +2,8 @@
|
|||||||
bin/
|
bin/
|
||||||
codeagent
|
codeagent
|
||||||
codeagent.exe
|
codeagent.exe
|
||||||
codeagent-wrapper
|
/codeagent-wrapper
|
||||||
codeagent-wrapper.exe
|
/codeagent-wrapper.exe
|
||||||
*.test
|
*.test
|
||||||
|
|
||||||
# Coverage reports
|
# Coverage reports
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
GO ?= go
|
GO ?= go
|
||||||
|
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo dev)
|
||||||
|
LDFLAGS := -ldflags "-X codeagent-wrapper/internal/app.version=$(VERSION)"
|
||||||
|
|
||||||
TOOLS_BIN := $(CURDIR)/bin
|
TOOLS_BIN := $(CURDIR)/bin
|
||||||
TOOLCHAIN ?= go1.22.0
|
TOOLCHAIN ?= go1.22.0
|
||||||
@@ -11,8 +13,7 @@ STATICCHECK := $(TOOLS_BIN)/staticcheck
|
|||||||
.PHONY: build test lint clean install
|
.PHONY: build test lint clean install
|
||||||
|
|
||||||
build:
|
build:
|
||||||
$(GO) build -o codeagent ./cmd/codeagent
|
$(GO) build $(LDFLAGS) -o codeagent-wrapper ./cmd/codeagent-wrapper
|
||||||
$(GO) build -o codeagent-wrapper ./cmd/codeagent-wrapper
|
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(GO) test ./...
|
$(GO) test ./...
|
||||||
@@ -33,5 +34,4 @@ clean:
|
|||||||
@python3 -c 'import glob, os; paths=["codeagent","codeagent.exe","codeagent-wrapper","codeagent-wrapper.exe","coverage.out","cover.out","coverage.html"]; paths += glob.glob("coverage*.out") + glob.glob("cover_*.out") + glob.glob("*.test"); [os.remove(p) for p in paths if os.path.exists(p)]'
|
@python3 -c 'import glob, os; paths=["codeagent","codeagent.exe","codeagent-wrapper","codeagent-wrapper.exe","coverage.out","cover.out","coverage.html"]; paths += glob.glob("coverage*.out") + glob.glob("cover_*.out") + glob.glob("*.test"); [os.remove(p) for p in paths if os.path.exists(p)]'
|
||||||
|
|
||||||
install:
|
install:
|
||||||
$(GO) install ./cmd/codeagent
|
$(GO) install $(LDFLAGS) ./cmd/codeagent-wrapper
|
||||||
$(GO) install ./cmd/codeagent-wrapper
|
|
||||||
|
|||||||
@@ -150,3 +150,8 @@ make test
|
|||||||
make lint
|
make lint
|
||||||
make clean
|
make clean
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 故障排查
|
||||||
|
|
||||||
|
- macOS 下如果看到临时目录相关的 `permission denied`(例如临时可执行文件无法在 `/var/folders/.../T` 执行),可设置一个可执行的临时目录:`CODEAGENT_TMPDIR=$HOME/.codeagent/tmp`。
|
||||||
|
- `claude` 后端的 `base_url/api_key`(来自 `~/.codeagent/models.json`)会注入到子进程环境变量:`ANTHROPIC_BASE_URL` / `ANTHROPIC_API_KEY`。若 `base_url` 指向本地代理(如 `localhost:23001`),请确认代理进程在运行。
|
||||||
|
|||||||
@@ -14,14 +14,10 @@ Multi-backend AI code execution wrapper supporting Codex, Claude, and Gemini.
|
|||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Clone repository
|
# Recommended: run the installer and select "codeagent-wrapper"
|
||||||
git clone https://github.com/cexll/myclaude.git
|
npx github:cexll/myclaude
|
||||||
cd myclaude
|
|
||||||
|
|
||||||
# Install via install.py (includes binary compilation)
|
# Manual build (optional; requires repo checkout)
|
||||||
python3 install.py --module dev
|
|
||||||
|
|
||||||
# Or manual installation
|
|
||||||
cd codeagent-wrapper
|
cd codeagent-wrapper
|
||||||
go build -o ~/.claude/bin/codeagent-wrapper
|
go build -o ~/.claude/bin/codeagent-wrapper
|
||||||
```
|
```
|
||||||
7
codeagent-wrapper/cmd/codeagent-wrapper/main.go
Normal file
7
codeagent-wrapper/cmd/codeagent-wrapper/main.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import app "codeagent-wrapper/internal/app"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
app.Run()
|
||||||
|
}
|
||||||
@@ -9,8 +9,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var version = "dev"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
version = "6.1.2"
|
|
||||||
defaultWorkdir = "."
|
defaultWorkdir = "."
|
||||||
defaultTimeout = 7200 // seconds (2 hours)
|
defaultTimeout = 7200 // seconds (2 hours)
|
||||||
defaultCoverageTarget = 90.0
|
defaultCoverageTarget = 90.0
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package wrapper
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
config "codeagent-wrapper/internal/config"
|
config "codeagent-wrapper/internal/config"
|
||||||
@@ -29,6 +30,18 @@ func BenchmarkConfigParse_ParseArgs(b *testing.B) {
|
|||||||
b.Setenv("HOME", home)
|
b.Setenv("HOME", home)
|
||||||
b.Setenv("USERPROFILE", home)
|
b.Setenv("USERPROFILE", home)
|
||||||
|
|
||||||
|
configDir := filepath.Join(home, ".codeagent")
|
||||||
|
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||||
|
"agents": {
|
||||||
|
"develop": { "backend": "codex", "model": "gpt-test" }
|
||||||
|
}
|
||||||
|
}`), 0o644); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
config.ResetModelsConfigCacheForTest()
|
config.ResetModelsConfigCacheForTest()
|
||||||
b.Cleanup(config.ResetModelsConfigCacheForTest)
|
b.Cleanup(config.ResetModelsConfigCacheForTest)
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,9 @@ type cliOptions struct {
|
|||||||
ReasoningEffort string
|
ReasoningEffort string
|
||||||
Agent string
|
Agent string
|
||||||
PromptFile string
|
PromptFile string
|
||||||
|
Skills string
|
||||||
SkipPermissions bool
|
SkipPermissions bool
|
||||||
|
Worktree bool
|
||||||
|
|
||||||
Parallel bool
|
Parallel bool
|
||||||
FullOutput bool
|
FullOutput bool
|
||||||
@@ -133,9 +135,11 @@ func addRootFlags(fs *pflag.FlagSet, opts *cliOptions) {
|
|||||||
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
|
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
|
||||||
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
|
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
|
||||||
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
|
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
|
||||||
|
fs.StringVar(&opts.Skills, "skills", "", "Comma-separated skill names for spec injection")
|
||||||
|
|
||||||
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
|
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
|
||||||
fs.BoolVar(&opts.SkipPermissions, "dangerously-skip-permissions", false, "Alias for --skip-permissions")
|
fs.BoolVar(&opts.SkipPermissions, "dangerously-skip-permissions", false, "Alias for --skip-permissions")
|
||||||
|
fs.BoolVar(&opts.Worktree, "worktree", false, "Execute in a new git worktree (auto-generates task ID)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func newVersionCommand(name string) *cobra.Command {
|
func newVersionCommand(name string) *cobra.Command {
|
||||||
@@ -168,6 +172,7 @@ func newCleanupCommand() *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runWithLoggerAndCleanup(fn func() int) (exitCode int) {
|
func runWithLoggerAndCleanup(fn func() int) (exitCode int) {
|
||||||
|
ensureExecutableTempDir()
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
|
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
|
||||||
@@ -252,9 +257,14 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
var resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning string
|
var resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning string
|
||||||
|
var resolvedAllowedTools, resolvedDisallowedTools []string
|
||||||
if agentName != "" {
|
if agentName != "" {
|
||||||
var resolvedYolo bool
|
var resolvedYolo bool
|
||||||
resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning, _, _, resolvedYolo = config.ResolveAgentConfig(agentName)
|
var err error
|
||||||
|
resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning, _, _, resolvedYolo, resolvedAllowedTools, resolvedDisallowedTools, err = config.ResolveAgentConfig(agentName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to resolve agent %q: %w", agentName, err)
|
||||||
|
}
|
||||||
yolo = resolvedYolo
|
yolo = resolvedYolo
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -331,6 +341,16 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
|||||||
return nil, fmt.Errorf("task required")
|
return nil, fmt.Errorf("task required")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var skills []string
|
||||||
|
if cmd.Flags().Changed("skills") {
|
||||||
|
for _, s := range strings.Split(opts.Skills, ",") {
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
if s != "" {
|
||||||
|
skills = append(skills, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
WorkDir: defaultWorkdir,
|
WorkDir: defaultWorkdir,
|
||||||
Backend: backendName,
|
Backend: backendName,
|
||||||
@@ -342,6 +362,10 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
|||||||
Model: model,
|
Model: model,
|
||||||
ReasoningEffort: reasoningEffort,
|
ReasoningEffort: reasoningEffort,
|
||||||
MaxParallelWorkers: config.ResolveMaxParallelWorkers(),
|
MaxParallelWorkers: config.ResolveMaxParallelWorkers(),
|
||||||
|
AllowedTools: resolvedAllowedTools,
|
||||||
|
DisallowedTools: resolvedDisallowedTools,
|
||||||
|
Skills: skills,
|
||||||
|
Worktree: opts.Worktree,
|
||||||
}
|
}
|
||||||
|
|
||||||
if args[0] == "resume" {
|
if args[0] == "resume" {
|
||||||
@@ -407,7 +431,7 @@ func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *vip
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") {
|
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") || cmd.Flags().Changed("skills") {
|
||||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
@@ -574,6 +598,17 @@ func runSingleMode(cfg *Config, name string) int {
|
|||||||
taskText = wrapTaskWithAgentPrompt(prompt, taskText)
|
taskText = wrapTaskWithAgentPrompt(prompt, taskText)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resolve skills: explicit > auto-detect from workdir
|
||||||
|
skills := cfg.Skills
|
||||||
|
if len(skills) == 0 {
|
||||||
|
skills = detectProjectSkills(cfg.WorkDir)
|
||||||
|
}
|
||||||
|
if len(skills) > 0 {
|
||||||
|
if content := resolveSkillContent(skills, 0); content != "" {
|
||||||
|
taskText = taskText + "\n\n# Domain Best Practices\n\n" + content
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
||||||
|
|
||||||
targetArg := taskText
|
targetArg := taskText
|
||||||
@@ -594,6 +629,11 @@ func runSingleMode(cfg *Config, name string) int {
|
|||||||
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
|
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
|
||||||
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
|
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
|
||||||
|
|
||||||
|
if cfg.Mode == "new" && strings.TrimSpace(taskText) == "integration-log-check" {
|
||||||
|
logInfo("Integration log check: skipping backend execution")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
if useStdin {
|
if useStdin {
|
||||||
var reasons []string
|
var reasons []string
|
||||||
if piped {
|
if piped {
|
||||||
@@ -640,6 +680,9 @@ func runSingleMode(cfg *Config, name string) int {
|
|||||||
ReasoningEffort: cfg.ReasoningEffort,
|
ReasoningEffort: cfg.ReasoningEffort,
|
||||||
Agent: cfg.Agent,
|
Agent: cfg.Agent,
|
||||||
SkipPermissions: cfg.SkipPermissions,
|
SkipPermissions: cfg.SkipPermissions,
|
||||||
|
Worktree: cfg.Worktree,
|
||||||
|
AllowedTools: cfg.AllowedTools,
|
||||||
|
DisallowedTools: cfg.DisallowedTools,
|
||||||
UseStdin: useStdin,
|
UseStdin: useStdin,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -52,3 +52,11 @@ func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText str
|
|||||||
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||||
return executor.RunCodexTaskWithContext(parentCtx, taskSpec, backend, codexCommand, buildCodexArgsFn, customArgs, useCustomArgs, silent, timeoutSec)
|
return executor.RunCodexTaskWithContext(parentCtx, taskSpec, backend, codexCommand, buildCodexArgsFn, customArgs, useCustomArgs, silent, timeoutSec)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func detectProjectSkills(workDir string) []string {
|
||||||
|
return executor.DetectProjectSkills(workDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveSkillContent(skills []string, maxBudget int) string {
|
||||||
|
return executor.ResolveSkillContent(skills, maxBudget)
|
||||||
|
}
|
||||||
|
|||||||
@@ -567,8 +567,7 @@ func TestExecutorParallelLogIsolation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConcurrentExecutorParallelLogIsolationAndClosure(t *testing.T) {
|
func TestConcurrentExecutorParallelLogIsolationAndClosure(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
oldArgs := os.Args
|
oldArgs := os.Args
|
||||||
os.Args = []string{wrapperName}
|
os.Args = []string{wrapperName}
|
||||||
@@ -929,8 +928,7 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
|||||||
t.Run("TestConcurrentTaskLoggerFailure", func(t *testing.T) {
|
t.Run("TestConcurrentTaskLoggerFailure", func(t *testing.T) {
|
||||||
// Create a writable temp dir for the main logger, then flip TMPDIR to a read-only
|
// Create a writable temp dir for the main logger, then flip TMPDIR to a read-only
|
||||||
// location so task-specific loggers fail to open.
|
// location so task-specific loggers fail to open.
|
||||||
writable := t.TempDir()
|
writable := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", writable)
|
|
||||||
|
|
||||||
mainLogger, err := NewLoggerWithSuffix("shared-main")
|
mainLogger, err := NewLoggerWithSuffix("shared-main")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -943,11 +941,11 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
|||||||
_ = os.Remove(mainLogger.Path())
|
_ = os.Remove(mainLogger.Path())
|
||||||
})
|
})
|
||||||
|
|
||||||
noWrite := filepath.Join(writable, "ro")
|
notDir := filepath.Join(writable, "not-a-dir")
|
||||||
if err := os.Mkdir(noWrite, 0o500); err != nil {
|
if err := os.WriteFile(notDir, []byte("x"), 0o644); err != nil {
|
||||||
t.Fatalf("failed to create read-only temp dir: %v", err)
|
t.Fatalf("failed to create temp file: %v", err)
|
||||||
}
|
}
|
||||||
t.Setenv("TMPDIR", noWrite)
|
setTempDirEnv(t, notDir)
|
||||||
|
|
||||||
taskA := nextExecutorTestTaskID("shared-a")
|
taskA := nextExecutorTestTaskID("shared-a")
|
||||||
taskB := nextExecutorTestTaskID("shared-b")
|
taskB := nextExecutorTestTaskID("shared-b")
|
||||||
@@ -1011,8 +1009,7 @@ func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("TestSanitizeTaskID", func(t *testing.T) {
|
t.Run("TestSanitizeTaskID", func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
orig := runCodexTaskFn
|
orig := runCodexTaskFn
|
||||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||||
@@ -1081,8 +1078,7 @@ func TestExecutorSharedLogFalseWhenCustomLogPath(t *testing.T) {
|
|||||||
_ = devNull.Close()
|
_ = devNull.Close()
|
||||||
})
|
})
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
// Setup: 创建主 logger
|
// Setup: 创建主 logger
|
||||||
mainLogger, err := NewLoggerWithSuffix("shared-main")
|
mainLogger, err := NewLoggerWithSuffix("shared-main")
|
||||||
@@ -1098,11 +1094,11 @@ func TestExecutorSharedLogFalseWhenCustomLogPath(t *testing.T) {
|
|||||||
// 模拟场景:task logger 创建失败(通过设置只读的 TMPDIR),
|
// 模拟场景:task logger 创建失败(通过设置只读的 TMPDIR),
|
||||||
// 回退到主 logger(handle.shared=true),
|
// 回退到主 logger(handle.shared=true),
|
||||||
// 但 runCodexTaskFn 返回自定义的 LogPath(不等于主 logger 的路径)
|
// 但 runCodexTaskFn 返回自定义的 LogPath(不等于主 logger 的路径)
|
||||||
roDir := filepath.Join(tempDir, "ro")
|
notDir := filepath.Join(tempDir, "not-a-dir")
|
||||||
if err := os.Mkdir(roDir, 0o500); err != nil {
|
if err := os.WriteFile(notDir, []byte("x"), 0o644); err != nil {
|
||||||
t.Fatalf("failed to create read-only dir: %v", err)
|
t.Fatalf("failed to create temp file: %v", err)
|
||||||
}
|
}
|
||||||
t.Setenv("TMPDIR", roDir)
|
setTempDirEnv(t, notDir)
|
||||||
|
|
||||||
orig := runCodexTaskFn
|
orig := runCodexTaskFn
|
||||||
customLogPath := "/custom/path/to.log"
|
customLogPath := "/custom/path/to.log"
|
||||||
|
|||||||
@@ -550,10 +550,8 @@ func TestRunNonParallelOutputsIncludeLogPathsIntegration(t *testing.T) {
|
|||||||
os.Args = []string{"codeagent-wrapper", "integration-log-check"}
|
os.Args = []string{"codeagent-wrapper", "integration-log-check"}
|
||||||
stdinReader = strings.NewReader("")
|
stdinReader = strings.NewReader("")
|
||||||
isTerminalFn = func() bool { return true }
|
isTerminalFn = func() bool { return true }
|
||||||
codexCommand = "echo"
|
codexCommand = createFakeCodexScript(t, "integration-session", "done")
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string {
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||||
return []string{`{"type":"thread.started","thread_id":"integration-session"}` + "\n" + `{"type":"item.completed","item":{"type":"agent_message","text":"done"}}`}
|
|
||||||
}
|
|
||||||
|
|
||||||
var exitCode int
|
var exitCode int
|
||||||
stderr := captureStderr(t, func() {
|
stderr := captureStderr(t, func() {
|
||||||
@@ -725,20 +723,18 @@ func TestRunConcurrentSpeedupBenchmark(t *testing.T) {
|
|||||||
layers := [][]TaskSpec{tasks}
|
layers := [][]TaskSpec{tasks}
|
||||||
|
|
||||||
serialStart := time.Now()
|
serialStart := time.Now()
|
||||||
for _, task := range tasks {
|
_ = executeConcurrentWithContext(nil, layers, 5, 1)
|
||||||
_ = runCodexTaskFn(task, 5)
|
|
||||||
}
|
|
||||||
serialElapsed := time.Since(serialStart)
|
serialElapsed := time.Since(serialStart)
|
||||||
|
|
||||||
concurrentStart := time.Now()
|
concurrentStart := time.Now()
|
||||||
_ = executeConcurrent(layers, 5)
|
_ = executeConcurrentWithContext(nil, layers, 5, 0)
|
||||||
concurrentElapsed := time.Since(concurrentStart)
|
concurrentElapsed := time.Since(concurrentStart)
|
||||||
|
|
||||||
if concurrentElapsed >= serialElapsed/5 {
|
|
||||||
t.Fatalf("expected concurrent time <20%% of serial, serial=%v concurrent=%v", serialElapsed, concurrentElapsed)
|
|
||||||
}
|
|
||||||
ratio := float64(concurrentElapsed) / float64(serialElapsed)
|
ratio := float64(concurrentElapsed) / float64(serialElapsed)
|
||||||
t.Logf("speedup ratio (concurrent/serial)=%.3f", ratio)
|
t.Logf("speedup ratio (concurrent/serial)=%.3f", ratio)
|
||||||
|
if concurrentElapsed >= serialElapsed/2 {
|
||||||
|
t.Fatalf("expected concurrent time <50%% of serial, serial=%v concurrent=%v", serialElapsed, concurrentElapsed)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunStartupCleanupRemovesOrphansEndToEnd(t *testing.T) {
|
func TestRunStartupCleanupRemovesOrphansEndToEnd(t *testing.T) {
|
||||||
@@ -830,15 +826,20 @@ func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
|||||||
|
|
||||||
tempDir := setTempDirEnv(t, t.TempDir())
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
|
|
||||||
staleA := createTempLog(t, tempDir, "codeagent-wrapper-2100.log")
|
basePID := os.Getpid()
|
||||||
staleB := createTempLog(t, tempDir, "codeagent-wrapper-2200-extra.log")
|
stalePID1 := basePID + 10000
|
||||||
keeper := createTempLog(t, tempDir, "codeagent-wrapper-2300.log")
|
stalePID2 := basePID + 11000
|
||||||
|
keeperPID := basePID + 12000
|
||||||
|
|
||||||
|
staleA := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", stalePID1))
|
||||||
|
staleB := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d-extra.log", stalePID2))
|
||||||
|
keeper := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", keeperPID))
|
||||||
|
|
||||||
stubProcessRunning(t, func(pid int) bool {
|
stubProcessRunning(t, func(pid int) bool {
|
||||||
return pid == 2300 || pid == os.Getpid()
|
return pid == keeperPID || pid == basePID
|
||||||
})
|
})
|
||||||
stubProcessStartTime(t, func(pid int) time.Time {
|
stubProcessStartTime(t, func(pid int) time.Time {
|
||||||
if pid == 2300 || pid == os.Getpid() {
|
if pid == keeperPID || pid == basePID {
|
||||||
return time.Now().Add(-1 * time.Hour)
|
return time.Now().Add(-1 * time.Hour)
|
||||||
}
|
}
|
||||||
return time.Time{}
|
return time.Time{}
|
||||||
@@ -868,10 +869,10 @@ func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
|||||||
if !strings.Contains(output, "Files kept: 1") {
|
if !strings.Contains(output, "Files kept: 1") {
|
||||||
t.Fatalf("missing 'Files kept: 1' in output: %q", output)
|
t.Fatalf("missing 'Files kept: 1' in output: %q", output)
|
||||||
}
|
}
|
||||||
if !strings.Contains(output, "codeagent-wrapper-2100.log") || !strings.Contains(output, "codeagent-wrapper-2200-extra.log") {
|
if !strings.Contains(output, fmt.Sprintf("codeagent-wrapper-%d.log", stalePID1)) || !strings.Contains(output, fmt.Sprintf("codeagent-wrapper-%d-extra.log", stalePID2)) {
|
||||||
t.Fatalf("missing deleted file names in output: %q", output)
|
t.Fatalf("missing deleted file names in output: %q", output)
|
||||||
}
|
}
|
||||||
if !strings.Contains(output, "codeagent-wrapper-2300.log") {
|
if !strings.Contains(output, fmt.Sprintf("codeagent-wrapper-%d.log", keeperPID)) {
|
||||||
t.Fatalf("missing kept file names in output: %q", output)
|
t.Fatalf("missing kept file names in output: %q", output)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -643,10 +643,24 @@ func (f *fakeCmd) StdinContents() string {
|
|||||||
|
|
||||||
func createFakeCodexScript(t *testing.T, threadID, message string) string {
|
func createFakeCodexScript(t *testing.T, threadID, message string) string {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
scriptPath := filepath.Join(t.TempDir(), "codex.sh")
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
// Add small sleep to ensure parser goroutine has time to read stdout before
|
// Add small sleep to ensure parser goroutine has time to read stdout before
|
||||||
// the process exits and closes the pipe. This prevents race conditions in CI
|
// the process exits and closes the pipe. This prevents race conditions in CI
|
||||||
// where fast shell script execution can close stdout before parsing completes.
|
// where fast shell script execution can close stdout before parsing completes.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
scriptPath := filepath.Join(tempDir, "codex.bat")
|
||||||
|
script := fmt.Sprintf("@echo off\r\n"+
|
||||||
|
"echo {\"type\":\"thread.started\",\"thread_id\":\"%s\"}\r\n"+
|
||||||
|
"echo {\"type\":\"item.completed\",\"item\":{\"type\":\"agent_message\",\"text\":\"%s\"}}\r\n"+
|
||||||
|
"exit /b 0\r\n", threadID, message)
|
||||||
|
if err := os.WriteFile(scriptPath, []byte(script), 0o755); err != nil {
|
||||||
|
t.Fatalf("failed to create fake codex script: %v", err)
|
||||||
|
}
|
||||||
|
return scriptPath
|
||||||
|
}
|
||||||
|
|
||||||
|
scriptPath := filepath.Join(tempDir, "codex.sh")
|
||||||
script := fmt.Sprintf(`#!/bin/sh
|
script := fmt.Sprintf(`#!/bin/sh
|
||||||
printf '%%s\n' '{"type":"thread.started","thread_id":"%s"}'
|
printf '%%s\n' '{"type":"thread.started","thread_id":"%s"}'
|
||||||
printf '%%s\n' '{"type":"item.completed","item":{"type":"agent_message","text":"%s"}}'
|
printf '%%s\n' '{"type":"item.completed","item":{"type":"agent_message","text":"%s"}}'
|
||||||
@@ -1392,6 +1406,24 @@ func TestBackendParseArgs_PromptFileFlag(t *testing.T) {
|
|||||||
func TestBackendParseArgs_PromptFileOverridesAgent(t *testing.T) {
|
func TestBackendParseArgs_PromptFileOverridesAgent(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
|
|
||||||
|
home := t.TempDir()
|
||||||
|
t.Setenv("HOME", home)
|
||||||
|
t.Setenv("USERPROFILE", home)
|
||||||
|
t.Cleanup(config.ResetModelsConfigCacheForTest)
|
||||||
|
config.ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
|
configDir := filepath.Join(home, ".codeagent")
|
||||||
|
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||||
|
t.Fatalf("MkdirAll: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||||
|
"agents": {
|
||||||
|
"develop": { "backend": "codex", "model": "gpt-test" }
|
||||||
|
}
|
||||||
|
}`), 0o644); err != nil {
|
||||||
|
t.Fatalf("WriteFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
os.Args = []string{"codeagent-wrapper", "--prompt-file", "/tmp/custom.md", "--agent", "develop", "task"}
|
os.Args = []string{"codeagent-wrapper", "--prompt-file", "/tmp/custom.md", "--agent", "develop", "task"}
|
||||||
cfg, err := parseArgs()
|
cfg, err := parseArgs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1584,6 +1616,60 @@ do something`
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParallelParseConfig_Worktree(t *testing.T) {
|
||||||
|
input := `---TASK---
|
||||||
|
id: task-1
|
||||||
|
worktree: true
|
||||||
|
---CONTENT---
|
||||||
|
do something`
|
||||||
|
|
||||||
|
cfg, err := parseParallelConfig([]byte(input))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if len(cfg.Tasks) != 1 {
|
||||||
|
t.Fatalf("expected 1 task, got %d", len(cfg.Tasks))
|
||||||
|
}
|
||||||
|
task := cfg.Tasks[0]
|
||||||
|
if !task.Worktree {
|
||||||
|
t.Fatalf("Worktree = %v, want true", task.Worktree)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParallelParseConfig_WorktreeBooleanValue(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{"true", "true", true},
|
||||||
|
{"1", "1", true},
|
||||||
|
{"yes", "yes", true},
|
||||||
|
{"false", "false", false},
|
||||||
|
{"0", "0", false},
|
||||||
|
{"no", "no", false},
|
||||||
|
{"empty", "", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
input := fmt.Sprintf(`---TASK---
|
||||||
|
id: task-1
|
||||||
|
worktree: %s
|
||||||
|
---CONTENT---
|
||||||
|
do something`, tt.value)
|
||||||
|
|
||||||
|
cfg, err := parseParallelConfig([]byte(input))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if cfg.Tasks[0].Worktree != tt.want {
|
||||||
|
t.Fatalf("Worktree = %v, want %v for value %q", cfg.Tasks[0].Worktree, tt.want, tt.value)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParallelParseConfig_EmptySessionID(t *testing.T) {
|
func TestParallelParseConfig_EmptySessionID(t *testing.T) {
|
||||||
input := `---TASK---
|
input := `---TASK---
|
||||||
id: task-1
|
id: task-1
|
||||||
@@ -1916,7 +2002,7 @@ func TestRun_PassesReasoningEffortToTaskSpec(t *testing.T) {
|
|||||||
func TestRun_NoOutputMessage_ReturnsExitCode1AndWritesStderr(t *testing.T) {
|
func TestRun_NoOutputMessage_ReturnsExitCode1AndWritesStderr(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
||||||
t.Setenv("TMPDIR", t.TempDir())
|
setTempDirEnv(t, t.TempDir())
|
||||||
|
|
||||||
selectBackendFn = func(name string) (Backend, error) {
|
selectBackendFn = func(name string) (Backend, error) {
|
||||||
return testBackend{name: name, command: "echo"}, nil
|
return testBackend{name: name, command: "echo"}, nil
|
||||||
@@ -2067,8 +2153,7 @@ func TestRunBuildCodexArgs_ResumeMode_EmptySessionHandledGracefully(t *testing.T
|
|||||||
|
|
||||||
func TestRunBuildCodexArgs_BypassSandboxEnvTrue(t *testing.T) {
|
func TestRunBuildCodexArgs_BypassSandboxEnvTrue(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2712,8 +2797,7 @@ func TestTailBufferWrite(t *testing.T) {
|
|||||||
|
|
||||||
func TestRunLogFunctions(t *testing.T) {
|
func TestRunLogFunctions(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2760,8 +2844,7 @@ func TestLoggerLogDropOnDone(t *testing.T) {
|
|||||||
|
|
||||||
func TestLoggerLogAfterClose(t *testing.T) {
|
func TestLoggerLogAfterClose(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2924,13 +3007,10 @@ func TestRunCodexTask_StartError(t *testing.T) {
|
|||||||
|
|
||||||
func TestRunCodexTask_WithEcho(t *testing.T) {
|
func TestRunCodexTask_WithEcho(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
codexCommand = "echo"
|
codexCommand = createFakeCodexScript(t, "test-session", "Test output")
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||||
|
|
||||||
jsonOutput := `{"type":"thread.started","thread_id":"test-session"}
|
res := runCodexTask(TaskSpec{Task: "ignored"}, false, 10)
|
||||||
{"type":"item.completed","item":{"type":"agent_message","text":"Test output"}}`
|
|
||||||
|
|
||||||
res := runCodexTask(TaskSpec{Task: jsonOutput}, false, 10)
|
|
||||||
if res.ExitCode != 0 || res.Message != "Test output" || res.SessionID != "test-session" {
|
if res.ExitCode != 0 || res.Message != "Test output" || res.SessionID != "test-session" {
|
||||||
t.Fatalf("unexpected result: %+v", res)
|
t.Fatalf("unexpected result: %+v", res)
|
||||||
}
|
}
|
||||||
@@ -3010,13 +3090,10 @@ func TestRunCodexTask_LogPathWithActiveLogger(t *testing.T) {
|
|||||||
}
|
}
|
||||||
setLogger(logger)
|
setLogger(logger)
|
||||||
|
|
||||||
codexCommand = "echo"
|
codexCommand = createFakeCodexScript(t, "fake-thread", "ok")
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||||
|
|
||||||
jsonOutput := `{"type":"thread.started","thread_id":"fake-thread"}
|
result := runCodexTask(TaskSpec{Task: "ignored"}, false, 5)
|
||||||
{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`
|
|
||||||
|
|
||||||
result := runCodexTask(TaskSpec{Task: jsonOutput}, false, 5)
|
|
||||||
if result.LogPath != logger.Path() {
|
if result.LogPath != logger.Path() {
|
||||||
t.Fatalf("LogPath = %q, want %q", result.LogPath, logger.Path())
|
t.Fatalf("LogPath = %q, want %q", result.LogPath, logger.Path())
|
||||||
}
|
}
|
||||||
@@ -3028,13 +3105,10 @@ func TestRunCodexTask_LogPathWithActiveLogger(t *testing.T) {
|
|||||||
func TestRunCodexTask_LogPathWithTempLogger(t *testing.T) {
|
func TestRunCodexTask_LogPathWithTempLogger(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
|
|
||||||
codexCommand = "echo"
|
codexCommand = createFakeCodexScript(t, "temp-thread", "temp")
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||||
|
|
||||||
jsonOutput := `{"type":"thread.started","thread_id":"temp-thread"}
|
result := runCodexTask(TaskSpec{Task: "ignored"}, true, 5)
|
||||||
{"type":"item.completed","item":{"type":"agent_message","text":"temp"}}`
|
|
||||||
|
|
||||||
result := runCodexTask(TaskSpec{Task: jsonOutput}, true, 5)
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
if result.LogPath != "" {
|
if result.LogPath != "" {
|
||||||
os.Remove(result.LogPath)
|
os.Remove(result.LogPath)
|
||||||
@@ -3080,10 +3154,19 @@ func TestRunCodexTask_LogPathOnStartError(t *testing.T) {
|
|||||||
|
|
||||||
func TestRunCodexTask_NoMessage(t *testing.T) {
|
func TestRunCodexTask_NoMessage(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
codexCommand = "echo"
|
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
fake := newFakeCmd(fakeCmdConfig{
|
||||||
jsonOutput := `{"type":"thread.started","thread_id":"test-session"}`
|
StdoutPlan: []fakeStdoutEvent{
|
||||||
res := runCodexTask(TaskSpec{Task: jsonOutput}, false, 10)
|
{Data: `{"type":"thread.started","thread_id":"test-session"}` + "\n"},
|
||||||
|
},
|
||||||
|
WaitDelay: 5 * time.Millisecond,
|
||||||
|
})
|
||||||
|
restore := executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner { return fake })
|
||||||
|
t.Cleanup(restore)
|
||||||
|
|
||||||
|
codexCommand = "fake-cmd"
|
||||||
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||||
|
res := runCodexTask(TaskSpec{Task: "ignored"}, false, 10)
|
||||||
if res.ExitCode != 1 || res.Error == "" {
|
if res.ExitCode != 1 || res.Error == "" {
|
||||||
t.Fatalf("expected error for missing agent_message, got %+v", res)
|
t.Fatalf("expected error for missing agent_message, got %+v", res)
|
||||||
}
|
}
|
||||||
@@ -3208,20 +3291,36 @@ func TestRunCodexProcess(t *testing.T) {
|
|||||||
|
|
||||||
func TestRunSilentMode(t *testing.T) {
|
func TestRunSilentMode(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
setTempDirEnv(t, tmpDir)
|
||||||
jsonOutput := `{"type":"thread.started","thread_id":"silent-session"}
|
jsonOutput := `{"type":"thread.started","thread_id":"silent-session"}
|
||||||
{"type":"item.completed","item":{"type":"agent_message","text":"quiet"}}`
|
{"type":"item.completed","item":{"type":"agent_message","text":"quiet"}}`
|
||||||
codexCommand = "echo"
|
codexCommand = "fake-cmd"
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{targetArg} }
|
||||||
|
_ = executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner {
|
||||||
|
return newFakeCmd(fakeCmdConfig{
|
||||||
|
StdoutPlan: []fakeStdoutEvent{{Data: jsonOutput + "\n"}},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
capture := func(silent bool) string {
|
capture := func(silent bool) string {
|
||||||
oldStderr := os.Stderr
|
oldStderr := os.Stderr
|
||||||
r, w, _ := os.Pipe()
|
r, w, err := os.Pipe()
|
||||||
os.Stderr = w
|
if err != nil {
|
||||||
res := runCodexTask(TaskSpec{Task: jsonOutput}, silent, 10)
|
t.Fatalf("os.Pipe() error = %v", err)
|
||||||
if res.ExitCode != 0 {
|
|
||||||
t.Fatalf("unexpected exitCode %d", res.ExitCode)
|
|
||||||
}
|
}
|
||||||
w.Close()
|
os.Stderr = w
|
||||||
|
defer func() {
|
||||||
|
os.Stderr = oldStderr
|
||||||
|
_ = w.Close()
|
||||||
|
_ = r.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
res := runCodexTask(TaskSpec{Task: "ignored"}, silent, 10)
|
||||||
|
if res.ExitCode != 0 {
|
||||||
|
t.Fatalf("unexpected exitCode %d: %s", res.ExitCode, res.Error)
|
||||||
|
}
|
||||||
|
_ = w.Close()
|
||||||
os.Stderr = oldStderr
|
os.Stderr = oldStderr
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
if _, err := io.Copy(&buf, r); err != nil {
|
if _, err := io.Copy(&buf, r); err != nil {
|
||||||
@@ -3579,6 +3678,7 @@ do two`)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParallelFlag(t *testing.T) {
|
func TestParallelFlag(t *testing.T) {
|
||||||
|
defer resetTestHooks()
|
||||||
oldArgs := os.Args
|
oldArgs := os.Args
|
||||||
defer func() { os.Args = oldArgs }()
|
defer func() { os.Args = oldArgs }()
|
||||||
|
|
||||||
@@ -3588,14 +3688,10 @@ id: T1
|
|||||||
---CONTENT---
|
---CONTENT---
|
||||||
test`
|
test`
|
||||||
stdinReader = strings.NewReader(jsonInput)
|
stdinReader = strings.NewReader(jsonInput)
|
||||||
defer func() { stdinReader = os.Stdin }()
|
|
||||||
|
|
||||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "test output"}
|
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "test output"}
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { return runCodexTask(task, true, timeout) }
|
|
||||||
}()
|
|
||||||
|
|
||||||
exitCode := run()
|
exitCode := run()
|
||||||
if exitCode != 0 {
|
if exitCode != 0 {
|
||||||
@@ -4211,8 +4307,7 @@ func TestRun_ExplicitStdinEmpty(t *testing.T) {
|
|||||||
|
|
||||||
func TestRun_ExplicitStdinReadError(t *testing.T) {
|
func TestRun_ExplicitStdinReadError(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||||
|
|
||||||
var logOutput string
|
var logOutput string
|
||||||
@@ -4308,8 +4403,7 @@ func TestRun_ExplicitStdinSuccess(t *testing.T) {
|
|||||||
|
|
||||||
func TestRun_PipedTaskReadError(t *testing.T) {
|
func TestRun_PipedTaskReadError(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||||
|
|
||||||
var logOutput string
|
var logOutput string
|
||||||
@@ -4362,8 +4456,7 @@ func TestRun_PipedTaskSuccess(t *testing.T) {
|
|||||||
|
|
||||||
func TestRun_LoggerLifecycle(t *testing.T) {
|
func TestRun_LoggerLifecycle(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||||
|
|
||||||
stdout := captureStdoutPipe()
|
stdout := captureStdoutPipe()
|
||||||
@@ -4411,8 +4504,7 @@ func TestRun_LoggerRemovedOnSignal(t *testing.T) {
|
|||||||
// Set shorter delays for faster test
|
// Set shorter delays for faster test
|
||||||
_ = executor.SetForceKillDelay(1)
|
_ = executor.SetForceKillDelay(1)
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||||
|
|
||||||
scriptPath := filepath.Join(tempDir, "sleepy-codex.sh")
|
scriptPath := filepath.Join(tempDir, "sleepy-codex.sh")
|
||||||
@@ -4466,10 +4558,8 @@ func TestRun_CleanupHookAlwaysCalled(t *testing.T) {
|
|||||||
called := false
|
called := false
|
||||||
cleanupHook = func() { called = true }
|
cleanupHook = func() { called = true }
|
||||||
// Use a command that goes through normal flow, not --version which returns early
|
// Use a command that goes through normal flow, not --version which returns early
|
||||||
restore := withBackend("echo", func(cfg *Config, targetArg string) []string {
|
scriptPath := createFakeCodexScript(t, "x", "ok")
|
||||||
return []string{`{"type":"thread.started","thread_id":"x"}
|
restore := withBackend(scriptPath, func(cfg *Config, targetArg string) []string { return []string{} })
|
||||||
{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`}
|
|
||||||
})
|
|
||||||
defer restore()
|
defer restore()
|
||||||
os.Args = []string{"codeagent-wrapper", "task"}
|
os.Args = []string{"codeagent-wrapper", "task"}
|
||||||
if exitCode := run(); exitCode != 0 {
|
if exitCode := run(); exitCode != 0 {
|
||||||
@@ -4696,16 +4786,13 @@ func TestBackendRunCoverage(t *testing.T) {
|
|||||||
func TestParallelLogPathInSerialMode(t *testing.T) {
|
func TestParallelLogPathInSerialMode(t *testing.T) {
|
||||||
defer resetTestHooks()
|
defer resetTestHooks()
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
os.Args = []string{"codeagent-wrapper", "do-stuff"}
|
os.Args = []string{"codeagent-wrapper", "do-stuff"}
|
||||||
stdinReader = strings.NewReader("")
|
stdinReader = strings.NewReader("")
|
||||||
isTerminalFn = func() bool { return true }
|
isTerminalFn = func() bool { return true }
|
||||||
codexCommand = "echo"
|
codexCommand = createFakeCodexScript(t, "cli-session", "ok")
|
||||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string {
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||||
return []string{`{"type":"thread.started","thread_id":"cli-session"}` + "\n" + `{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`}
|
|
||||||
}
|
|
||||||
|
|
||||||
var exitCode int
|
var exitCode int
|
||||||
stderr := captureStderr(t, func() {
|
stderr := captureStderr(t, func() {
|
||||||
@@ -4729,9 +4816,8 @@ func TestRun_CLI_Success(t *testing.T) {
|
|||||||
stdinReader = strings.NewReader("")
|
stdinReader = strings.NewReader("")
|
||||||
isTerminalFn = func() bool { return true }
|
isTerminalFn = func() bool { return true }
|
||||||
|
|
||||||
restore := withBackend("echo", func(cfg *Config, targetArg string) []string {
|
scriptPath := createFakeCodexScript(t, "cli-session", "ok")
|
||||||
return []string{`{"type":"thread.started","thread_id":"cli-session"}` + "\n" + `{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`}
|
restore := withBackend(scriptPath, func(cfg *Config, targetArg string) []string { return []string{} })
|
||||||
})
|
|
||||||
defer restore()
|
defer restore()
|
||||||
|
|
||||||
var exitCode int
|
var exitCode int
|
||||||
|
|||||||
46
codeagent-wrapper/internal/app/os_paths_test.go
Normal file
46
codeagent-wrapper/internal/app/os_paths_test.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
package wrapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseArgs_Workdir_OSPaths(t *testing.T) {
|
||||||
|
oldArgv := os.Args
|
||||||
|
t.Cleanup(func() { os.Args = oldArgv })
|
||||||
|
|
||||||
|
workdirs := []struct {
|
||||||
|
name string
|
||||||
|
path string
|
||||||
|
}{
|
||||||
|
{name: "windows drive forward slashes", path: "D:/repo/path"},
|
||||||
|
{name: "windows drive backslashes", path: `C:\repo\path`},
|
||||||
|
{name: "windows UNC", path: `\\server\share\repo`},
|
||||||
|
{name: "unix absolute", path: "/home/user/repo"},
|
||||||
|
{name: "relative", path: "./relative/repo"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, wd := range workdirs {
|
||||||
|
t.Run("new mode: "+wd.name, func(t *testing.T) {
|
||||||
|
os.Args = []string{"codeagent-wrapper", "task", wd.path}
|
||||||
|
cfg, err := parseArgs()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("parseArgs() error: %v", err)
|
||||||
|
}
|
||||||
|
if cfg.Mode != "new" || cfg.Task != "task" || cfg.WorkDir != wd.path {
|
||||||
|
t.Fatalf("cfg mismatch: got mode=%q task=%q workdir=%q, want mode=%q task=%q workdir=%q", cfg.Mode, cfg.Task, cfg.WorkDir, "new", "task", wd.path)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("resume mode: "+wd.name, func(t *testing.T) {
|
||||||
|
os.Args = []string{"codeagent-wrapper", "resume", "sid-1", "task", wd.path}
|
||||||
|
cfg, err := parseArgs()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("parseArgs() error: %v", err)
|
||||||
|
}
|
||||||
|
if cfg.Mode != "resume" || cfg.SessionID != "sid-1" || cfg.Task != "task" || cfg.WorkDir != wd.path {
|
||||||
|
t.Fatalf("cfg mismatch: got mode=%q sid=%q task=%q workdir=%q, want mode=%q sid=%q task=%q workdir=%q", cfg.Mode, cfg.SessionID, cfg.Task, cfg.WorkDir, "resume", "sid-1", "task", wd.path)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
119
codeagent-wrapper/internal/app/stdin_mode_test.go
Normal file
119
codeagent-wrapper/internal/app/stdin_mode_test.go
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
package wrapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRunSingleMode_UseStdin_TargetArgAndTaskText(t *testing.T) {
|
||||||
|
defer resetTestHooks()
|
||||||
|
|
||||||
|
setTempDirEnv(t, t.TempDir())
|
||||||
|
logger, err := NewLogger()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewLogger(): %v", err)
|
||||||
|
}
|
||||||
|
setLogger(logger)
|
||||||
|
t.Cleanup(func() { _ = closeLogger() })
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
name string
|
||||||
|
cfgTask string
|
||||||
|
explicit bool
|
||||||
|
stdinData string
|
||||||
|
isTerminal bool
|
||||||
|
|
||||||
|
wantUseStdin bool
|
||||||
|
wantTarget string
|
||||||
|
wantTaskText string
|
||||||
|
}
|
||||||
|
|
||||||
|
longTask := strings.Repeat("a", 801)
|
||||||
|
|
||||||
|
tests := []testCase{
|
||||||
|
{
|
||||||
|
name: "piped input forces stdin mode",
|
||||||
|
cfgTask: "cli-task",
|
||||||
|
stdinData: "piped task text",
|
||||||
|
isTerminal: false,
|
||||||
|
wantUseStdin: true,
|
||||||
|
wantTarget: "-",
|
||||||
|
wantTaskText: "piped task text",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "explicit dash forces stdin mode",
|
||||||
|
cfgTask: "-",
|
||||||
|
explicit: true,
|
||||||
|
stdinData: "explicit task text",
|
||||||
|
isTerminal: true,
|
||||||
|
wantUseStdin: true,
|
||||||
|
wantTarget: "-",
|
||||||
|
wantTaskText: "explicit task text",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "special char backslash forces stdin mode",
|
||||||
|
cfgTask: `C:\repo\file.go`,
|
||||||
|
isTerminal: true,
|
||||||
|
wantUseStdin: true,
|
||||||
|
wantTarget: "-",
|
||||||
|
wantTaskText: `C:\repo\file.go`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "length>800 forces stdin mode",
|
||||||
|
cfgTask: longTask,
|
||||||
|
isTerminal: true,
|
||||||
|
wantUseStdin: true,
|
||||||
|
wantTarget: "-",
|
||||||
|
wantTaskText: longTask,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "simple task uses argv target",
|
||||||
|
cfgTask: "analyze code",
|
||||||
|
isTerminal: true,
|
||||||
|
wantUseStdin: false,
|
||||||
|
wantTarget: "analyze code",
|
||||||
|
wantTaskText: "analyze code",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
var gotTarget string
|
||||||
|
buildCodexArgsFn = func(cfg *Config, targetArg string) []string {
|
||||||
|
gotTarget = targetArg
|
||||||
|
return []string{targetArg}
|
||||||
|
}
|
||||||
|
|
||||||
|
var gotTask TaskSpec
|
||||||
|
runTaskFn = func(task TaskSpec, silent bool, timeout int) TaskResult {
|
||||||
|
gotTask = task
|
||||||
|
return TaskResult{ExitCode: 0, Message: "ok"}
|
||||||
|
}
|
||||||
|
|
||||||
|
stdinReader = strings.NewReader(tt.stdinData)
|
||||||
|
isTerminalFn = func() bool { return tt.isTerminal }
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "new",
|
||||||
|
Task: tt.cfgTask,
|
||||||
|
WorkDir: defaultWorkdir,
|
||||||
|
Backend: defaultBackendName,
|
||||||
|
ExplicitStdin: tt.explicit,
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := runSingleMode(cfg, "codeagent-wrapper"); code != 0 {
|
||||||
|
t.Fatalf("runSingleMode() = %d, want 0", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
if gotTarget != tt.wantTarget {
|
||||||
|
t.Fatalf("targetArg = %q, want %q", gotTarget, tt.wantTarget)
|
||||||
|
}
|
||||||
|
if gotTask.UseStdin != tt.wantUseStdin {
|
||||||
|
t.Fatalf("taskSpec.UseStdin = %v, want %v", gotTask.UseStdin, tt.wantUseStdin)
|
||||||
|
}
|
||||||
|
if gotTask.Task != tt.wantTaskText {
|
||||||
|
t.Fatalf("taskSpec.Task = %q, want %q", gotTask.Task, tt.wantTaskText)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
134
codeagent-wrapper/internal/app/tmpdir.go
Normal file
134
codeagent-wrapper/internal/app/tmpdir.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
package wrapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const tmpDirEnvOverrideKey = "CODEAGENT_TMPDIR"
|
||||||
|
|
||||||
|
var tmpDirExecutableCheckFn = canExecuteInDir
|
||||||
|
|
||||||
|
func ensureExecutableTempDir() {
|
||||||
|
// Windows doesn't execute scripts via shebang, and os.TempDir semantics differ.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if override := strings.TrimSpace(os.Getenv(tmpDirEnvOverrideKey)); override != "" {
|
||||||
|
if resolved, err := resolvePathWithTilde(override); err == nil {
|
||||||
|
if err := os.MkdirAll(resolved, 0o700); err == nil {
|
||||||
|
if ok, _ := tmpDirExecutableCheckFn(resolved); ok {
|
||||||
|
setTempEnv(resolved)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Invalid override should not block execution; fall back to default behavior.
|
||||||
|
}
|
||||||
|
|
||||||
|
current := currentTempDirFromEnv()
|
||||||
|
if current == "" {
|
||||||
|
current = "/tmp"
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, _ := tmpDirExecutableCheckFn(current)
|
||||||
|
if ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fallback := defaultFallbackTempDir()
|
||||||
|
if fallback == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(fallback, 0o700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ok, _ := tmpDirExecutableCheckFn(fallback); !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
setTempEnv(fallback)
|
||||||
|
fmt.Fprintf(os.Stderr, "INFO: temp dir is not executable; set TMPDIR=%s\n", fallback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setTempEnv(dir string) {
|
||||||
|
_ = os.Setenv("TMPDIR", dir)
|
||||||
|
_ = os.Setenv("TMP", dir)
|
||||||
|
_ = os.Setenv("TEMP", dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultFallbackTempDir() string {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil || strings.TrimSpace(home) == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return filepath.Clean(filepath.Join(home, ".codeagent", "tmp"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func currentTempDirFromEnv() string {
|
||||||
|
for _, k := range []string{"TMPDIR", "TMP", "TEMP"} {
|
||||||
|
if v := strings.TrimSpace(os.Getenv(k)); v != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolvePathWithTilde(p string) (string, error) {
|
||||||
|
p = strings.TrimSpace(p)
|
||||||
|
if p == "" {
|
||||||
|
return "", errors.New("empty path")
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == "~" || strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil || strings.TrimSpace(home) == "" {
|
||||||
|
if err == nil {
|
||||||
|
err = errors.New("empty home directory")
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("resolve ~: %w", err)
|
||||||
|
}
|
||||||
|
if p == "~" {
|
||||||
|
return home, nil
|
||||||
|
}
|
||||||
|
return filepath.Clean(home + p[1:]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Clean(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func canExecuteInDir(dir string) (bool, error) {
|
||||||
|
dir = strings.TrimSpace(dir)
|
||||||
|
if dir == "" {
|
||||||
|
return false, errors.New("empty dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.CreateTemp(dir, "codeagent-tmp-exec-*")
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
path := f.Name()
|
||||||
|
defer func() { _ = os.Remove(path) }()
|
||||||
|
|
||||||
|
if _, err := f.WriteString("#!/bin/sh\nexit 0\n"); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err := os.Chmod(path, 0o700); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := exec.Command(path).Run(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
103
codeagent-wrapper/internal/app/tmpdir_test.go
Normal file
103
codeagent-wrapper/internal/app/tmpdir_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package wrapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnsureExecutableTempDir_Override(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Skip("ensureExecutableTempDir is no-op on Windows")
|
||||||
|
}
|
||||||
|
restore := captureTempEnv()
|
||||||
|
t.Cleanup(restore)
|
||||||
|
|
||||||
|
t.Setenv("HOME", t.TempDir())
|
||||||
|
t.Setenv("USERPROFILE", os.Getenv("HOME"))
|
||||||
|
|
||||||
|
orig := tmpDirExecutableCheckFn
|
||||||
|
tmpDirExecutableCheckFn = func(string) (bool, error) { return true, nil }
|
||||||
|
t.Cleanup(func() { tmpDirExecutableCheckFn = orig })
|
||||||
|
|
||||||
|
override := filepath.Join(t.TempDir(), "mytmp")
|
||||||
|
t.Setenv(tmpDirEnvOverrideKey, override)
|
||||||
|
|
||||||
|
ensureExecutableTempDir()
|
||||||
|
|
||||||
|
if got := os.Getenv("TMPDIR"); got != override {
|
||||||
|
t.Fatalf("TMPDIR=%q, want %q", got, override)
|
||||||
|
}
|
||||||
|
if got := os.Getenv("TMP"); got != override {
|
||||||
|
t.Fatalf("TMP=%q, want %q", got, override)
|
||||||
|
}
|
||||||
|
if got := os.Getenv("TEMP"); got != override {
|
||||||
|
t.Fatalf("TEMP=%q, want %q", got, override)
|
||||||
|
}
|
||||||
|
if st, err := os.Stat(override); err != nil || !st.IsDir() {
|
||||||
|
t.Fatalf("override dir not created: stat=%v err=%v", st, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureExecutableTempDir_FallbackWhenCurrentNotExecutable(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Skip("ensureExecutableTempDir is no-op on Windows")
|
||||||
|
}
|
||||||
|
restore := captureTempEnv()
|
||||||
|
t.Cleanup(restore)
|
||||||
|
|
||||||
|
home := t.TempDir()
|
||||||
|
t.Setenv("HOME", home)
|
||||||
|
t.Setenv("USERPROFILE", home)
|
||||||
|
|
||||||
|
cur := filepath.Join(t.TempDir(), "cur-tmp")
|
||||||
|
if err := os.MkdirAll(cur, 0o700); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Setenv("TMPDIR", cur)
|
||||||
|
|
||||||
|
fallback := filepath.Join(home, ".codeagent", "tmp")
|
||||||
|
|
||||||
|
orig := tmpDirExecutableCheckFn
|
||||||
|
tmpDirExecutableCheckFn = func(dir string) (bool, error) {
|
||||||
|
if filepath.Clean(dir) == filepath.Clean(cur) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if filepath.Clean(dir) == filepath.Clean(fallback) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { tmpDirExecutableCheckFn = orig })
|
||||||
|
|
||||||
|
ensureExecutableTempDir()
|
||||||
|
|
||||||
|
if got := os.Getenv("TMPDIR"); filepath.Clean(got) != filepath.Clean(fallback) {
|
||||||
|
t.Fatalf("TMPDIR=%q, want %q", got, fallback)
|
||||||
|
}
|
||||||
|
if st, err := os.Stat(fallback); err != nil || !st.IsDir() {
|
||||||
|
t.Fatalf("fallback dir not created: stat=%v err=%v", st, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func captureTempEnv() func() {
|
||||||
|
type entry struct {
|
||||||
|
set bool
|
||||||
|
val string
|
||||||
|
}
|
||||||
|
snapshot := make(map[string]entry, 3)
|
||||||
|
for _, k := range []string{"TMPDIR", "TMP", "TEMP"} {
|
||||||
|
v, ok := os.LookupEnv(k)
|
||||||
|
snapshot[k] = entry{set: ok, val: v}
|
||||||
|
}
|
||||||
|
return func() {
|
||||||
|
for k, e := range snapshot {
|
||||||
|
if !e.set {
|
||||||
|
_ = os.Unsetenv(k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_ = os.Setenv(k, e.val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -25,7 +25,8 @@ func (ClaudeBackend) Env(baseURL, apiKey string) map[string]string {
|
|||||||
env["ANTHROPIC_BASE_URL"] = baseURL
|
env["ANTHROPIC_BASE_URL"] = baseURL
|
||||||
}
|
}
|
||||||
if apiKey != "" {
|
if apiKey != "" {
|
||||||
env["ANTHROPIC_AUTH_TOKEN"] = apiKey
|
// Claude Code CLI uses ANTHROPIC_API_KEY for API-key based auth.
|
||||||
|
env["ANTHROPIC_API_KEY"] = apiKey
|
||||||
}
|
}
|
||||||
return env
|
return env
|
||||||
}
|
}
|
||||||
@@ -133,6 +134,15 @@ func buildClaudeArgs(cfg *config.Config, targetArg string) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(cfg.AllowedTools) > 0 {
|
||||||
|
args = append(args, "--allowedTools")
|
||||||
|
args = append(args, cfg.AllowedTools...)
|
||||||
|
}
|
||||||
|
if len(cfg.DisallowedTools) > 0 {
|
||||||
|
args = append(args, "--disallowedTools")
|
||||||
|
args = append(args, cfg.DisallowedTools...)
|
||||||
|
}
|
||||||
|
|
||||||
args = append(args, "--output-format", "stream-json", "--verbose", targetArg)
|
args = append(args, "--output-format", "stream-json", "--verbose", targetArg)
|
||||||
|
|
||||||
return args
|
return args
|
||||||
|
|||||||
54
codeagent-wrapper/internal/backend/codex_paths_test.go
Normal file
54
codeagent-wrapper/internal/backend/codex_paths_test.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package backend
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
config "codeagent-wrapper/internal/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBuildCodexArgs_Workdir_OSPaths(t *testing.T) {
|
||||||
|
t.Setenv("CODEX_BYPASS_SANDBOX", "false")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
workdir string
|
||||||
|
}{
|
||||||
|
{name: "windows drive forward slashes", workdir: "D:/repo/path"},
|
||||||
|
{name: "windows drive backslashes", workdir: `C:\repo\path`},
|
||||||
|
{name: "windows UNC", workdir: `\\server\share\repo`},
|
||||||
|
{name: "unix absolute", workdir: "/home/user/repo"},
|
||||||
|
{name: "relative", workdir: "./relative/repo"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
cfg := &config.Config{Mode: "new", WorkDir: tt.workdir}
|
||||||
|
got := BuildCodexArgs(cfg, "task")
|
||||||
|
want := []string{"e", "--skip-git-repo-check", "-C", tt.workdir, "--json", "task"}
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("BuildCodexArgs() = %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("new mode stdin target uses dash", func(t *testing.T) {
|
||||||
|
cfg := &config.Config{Mode: "new", WorkDir: `C:\repo\path`}
|
||||||
|
got := BuildCodexArgs(cfg, "-")
|
||||||
|
want := []string{"e", "--skip-git-repo-check", "-C", `C:\repo\path`, "--json", "-"}
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("BuildCodexArgs() = %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildCodexArgs_ResumeMode_OmitsWorkdir(t *testing.T) {
|
||||||
|
t.Setenv("CODEX_BYPASS_SANDBOX", "false")
|
||||||
|
|
||||||
|
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", WorkDir: `C:\repo\path`}
|
||||||
|
got := BuildCodexArgs(cfg, "-")
|
||||||
|
want := []string{"e", "--skip-git-repo-check", "--json", "resume", "sid-123", "-"}
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("BuildCodexArgs() = %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,8 +7,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
ilogger "codeagent-wrapper/internal/logger"
|
|
||||||
|
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -18,14 +16,16 @@ type BackendConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type AgentModelConfig struct {
|
type AgentModelConfig struct {
|
||||||
Backend string `json:"backend"`
|
Backend string `json:"backend"`
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
PromptFile string `json:"prompt_file,omitempty"`
|
PromptFile string `json:"prompt_file,omitempty"`
|
||||||
Description string `json:"description,omitempty"`
|
Description string `json:"description,omitempty"`
|
||||||
Yolo bool `json:"yolo,omitempty"`
|
Yolo bool `json:"yolo,omitempty"`
|
||||||
Reasoning string `json:"reasoning,omitempty"`
|
Reasoning string `json:"reasoning,omitempty"`
|
||||||
BaseURL string `json:"base_url,omitempty"`
|
BaseURL string `json:"base_url,omitempty"`
|
||||||
APIKey string `json:"api_key,omitempty"`
|
APIKey string `json:"api_key,omitempty"`
|
||||||
|
AllowedTools []string `json:"allowed_tools,omitempty"`
|
||||||
|
DisallowedTools []string `json:"disallowed_tools,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ModelsConfig struct {
|
type ModelsConfig struct {
|
||||||
@@ -35,80 +35,85 @@ type ModelsConfig struct {
|
|||||||
Backends map[string]BackendConfig `json:"backends,omitempty"`
|
Backends map[string]BackendConfig `json:"backends,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultModelsConfig = ModelsConfig{
|
var defaultModelsConfig = ModelsConfig{}
|
||||||
DefaultBackend: "opencode",
|
|
||||||
DefaultModel: "opencode/grok-code",
|
const modelsConfigTildePath = "~/.codeagent/models.json"
|
||||||
Agents: map[string]AgentModelConfig{
|
|
||||||
"oracle": {Backend: "claude", Model: "claude-opus-4-5-20251101", PromptFile: "~/.claude/skills/omo/references/oracle.md", Description: "Technical advisor"},
|
const modelsConfigExample = `{
|
||||||
"librarian": {Backend: "claude", Model: "claude-sonnet-4-5-20250929", PromptFile: "~/.claude/skills/omo/references/librarian.md", Description: "Researcher"},
|
"default_backend": "codex",
|
||||||
"explore": {Backend: "opencode", Model: "opencode/grok-code", PromptFile: "~/.claude/skills/omo/references/explore.md", Description: "Code search"},
|
"default_model": "gpt-4.1",
|
||||||
"develop": {Backend: "codex", Model: "", PromptFile: "~/.claude/skills/omo/references/develop.md", Description: "Code development"},
|
"backends": {
|
||||||
"frontend-ui-ux-engineer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md", Description: "Frontend engineer"},
|
"codex": { "api_key": "..." },
|
||||||
"document-writer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/document-writer.md", Description: "Documentation"},
|
"claude": { "api_key": "..." }
|
||||||
},
|
},
|
||||||
}
|
"agents": {
|
||||||
|
"develop": {
|
||||||
|
"backend": "codex",
|
||||||
|
"model": "gpt-4.1",
|
||||||
|
"prompt_file": "~/.codeagent/prompts/develop.md",
|
||||||
|
"reasoning": "high",
|
||||||
|
"yolo": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
var (
|
var (
|
||||||
modelsConfigOnce sync.Once
|
modelsConfigOnce sync.Once
|
||||||
modelsConfigCached *ModelsConfig
|
modelsConfigCached *ModelsConfig
|
||||||
|
modelsConfigErr error
|
||||||
)
|
)
|
||||||
|
|
||||||
func modelsConfig() *ModelsConfig {
|
func modelsConfig() (*ModelsConfig, error) {
|
||||||
modelsConfigOnce.Do(func() {
|
modelsConfigOnce.Do(func() {
|
||||||
modelsConfigCached = loadModelsConfig()
|
modelsConfigCached, modelsConfigErr = loadModelsConfig()
|
||||||
})
|
})
|
||||||
if modelsConfigCached == nil {
|
return modelsConfigCached, modelsConfigErr
|
||||||
return &defaultModelsConfig
|
|
||||||
}
|
|
||||||
return modelsConfigCached
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadModelsConfig() *ModelsConfig {
|
func modelsConfigPath() (string, error) {
|
||||||
home, err := os.UserHomeDir()
|
home, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil || strings.TrimSpace(home) == "" {
|
||||||
ilogger.LogWarn(fmt.Sprintf("Failed to resolve home directory for models config: %v; using defaults", err))
|
return "", fmt.Errorf("failed to resolve user home directory: %w", err)
|
||||||
return &defaultModelsConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
configDir := filepath.Clean(filepath.Join(home, ".codeagent"))
|
configDir := filepath.Clean(filepath.Join(home, ".codeagent"))
|
||||||
configPath := filepath.Clean(filepath.Join(configDir, "models.json"))
|
configPath := filepath.Clean(filepath.Join(configDir, "models.json"))
|
||||||
rel, err := filepath.Rel(configDir, configPath)
|
rel, err := filepath.Rel(configDir, configPath)
|
||||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
return &defaultModelsConfig
|
return "", fmt.Errorf("refusing to read models config outside %s: %s", configDir, configPath)
|
||||||
|
}
|
||||||
|
return configPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func modelsConfigHint(configPath string) string {
|
||||||
|
configPath = strings.TrimSpace(configPath)
|
||||||
|
if configPath == "" {
|
||||||
|
return fmt.Sprintf("Create %s with e.g.:\n%s", modelsConfigTildePath, modelsConfigExample)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Create %s (resolved to %s) with e.g.:\n%s", modelsConfigTildePath, configPath, modelsConfigExample)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadModelsConfig() (*ModelsConfig, error) {
|
||||||
|
configPath, err := modelsConfigPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w\n\n%s", err, modelsConfigHint(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := os.ReadFile(configPath) // #nosec G304 -- path is fixed under user home and validated to stay within configDir
|
data, err := os.ReadFile(configPath) // #nosec G304 -- path is fixed under user home and validated to stay within configDir
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
ilogger.LogWarn(fmt.Sprintf("Failed to read models config %s: %v; using defaults", configPath, err))
|
return nil, fmt.Errorf("models config not found: %s\n\n%s", configPath, modelsConfigHint(configPath))
|
||||||
}
|
}
|
||||||
return &defaultModelsConfig
|
return nil, fmt.Errorf("failed to read models config %s: %w\n\n%s", configPath, err, modelsConfigHint(configPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
var cfg ModelsConfig
|
var cfg ModelsConfig
|
||||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||||
ilogger.LogWarn(fmt.Sprintf("Failed to parse models config %s: %v; using defaults", configPath, err))
|
return nil, fmt.Errorf("failed to parse models config %s: %w\n\n%s", configPath, err, modelsConfigHint(configPath))
|
||||||
return &defaultModelsConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.DefaultBackend = strings.TrimSpace(cfg.DefaultBackend)
|
cfg.DefaultBackend = strings.TrimSpace(cfg.DefaultBackend)
|
||||||
if cfg.DefaultBackend == "" {
|
|
||||||
cfg.DefaultBackend = defaultModelsConfig.DefaultBackend
|
|
||||||
}
|
|
||||||
cfg.DefaultModel = strings.TrimSpace(cfg.DefaultModel)
|
cfg.DefaultModel = strings.TrimSpace(cfg.DefaultModel)
|
||||||
if cfg.DefaultModel == "" {
|
|
||||||
cfg.DefaultModel = defaultModelsConfig.DefaultModel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge with defaults
|
|
||||||
for name, agent := range defaultModelsConfig.Agents {
|
|
||||||
if _, exists := cfg.Agents[name]; !exists {
|
|
||||||
if cfg.Agents == nil {
|
|
||||||
cfg.Agents = make(map[string]AgentModelConfig)
|
|
||||||
}
|
|
||||||
cfg.Agents[name] = agent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize backend keys so lookups can be case-insensitive.
|
// Normalize backend keys so lookups can be case-insensitive.
|
||||||
if len(cfg.Backends) > 0 {
|
if len(cfg.Backends) > 0 {
|
||||||
@@ -127,7 +132,7 @@ func loadModelsConfig() *ModelsConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &cfg
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadDynamicAgent(name string) (AgentModelConfig, bool) {
|
func LoadDynamicAgent(name string) (AgentModelConfig, bool) {
|
||||||
@@ -150,7 +155,10 @@ func LoadDynamicAgent(name string) (AgentModelConfig, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ResolveBackendConfig(backendName string) (baseURL, apiKey string) {
|
func ResolveBackendConfig(backendName string) (baseURL, apiKey string) {
|
||||||
cfg := modelsConfig()
|
cfg, err := modelsConfig()
|
||||||
|
if err != nil || cfg == nil {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
resolved := resolveBackendConfig(cfg, backendName)
|
resolved := resolveBackendConfig(cfg, backendName)
|
||||||
return strings.TrimSpace(resolved.BaseURL), strings.TrimSpace(resolved.APIKey)
|
return strings.TrimSpace(resolved.BaseURL), strings.TrimSpace(resolved.APIKey)
|
||||||
}
|
}
|
||||||
@@ -172,12 +180,30 @@ func resolveBackendConfig(cfg *ModelsConfig, backendName string) BackendConfig {
|
|||||||
return BackendConfig{}
|
return BackendConfig{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool) {
|
func resolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool, allowedTools, disallowedTools []string, err error) {
|
||||||
cfg := modelsConfig()
|
if err := ValidateAgentName(agentName); err != nil {
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := modelsConfig()
|
||||||
|
if err != nil {
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, err
|
||||||
|
}
|
||||||
|
if cfg == nil {
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("models config is nil\n\n%s", modelsConfigHint(""))
|
||||||
|
}
|
||||||
|
|
||||||
if agent, ok := cfg.Agents[agentName]; ok {
|
if agent, ok := cfg.Agents[agentName]; ok {
|
||||||
backend = strings.TrimSpace(agent.Backend)
|
backend = strings.TrimSpace(agent.Backend)
|
||||||
if backend == "" {
|
if backend == "" {
|
||||||
backend = cfg.DefaultBackend
|
backend = strings.TrimSpace(cfg.DefaultBackend)
|
||||||
|
if backend == "" {
|
||||||
|
configPath, pathErr := modelsConfigPath()
|
||||||
|
if pathErr != nil {
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("agent %q has empty backend and default_backend is not set\n\n%s", agentName, modelsConfigHint(""))
|
||||||
|
}
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("agent %q has empty backend and default_backend is not set\n\n%s", agentName, modelsConfigHint(configPath))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
backendCfg := resolveBackendConfig(cfg, backend)
|
backendCfg := resolveBackendConfig(cfg, backend)
|
||||||
|
|
||||||
@@ -190,31 +216,46 @@ func resolveAgentConfig(agentName string) (backend, model, promptFile, reasoning
|
|||||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
return backend, strings.TrimSpace(agent.Model), agent.PromptFile, agent.Reasoning, baseURL, apiKey, agent.Yolo
|
model = strings.TrimSpace(agent.Model)
|
||||||
|
if model == "" {
|
||||||
|
configPath, pathErr := modelsConfigPath()
|
||||||
|
if pathErr != nil {
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("agent %q has empty model; set agents.%s.model in %s\n\n%s", agentName, agentName, modelsConfigTildePath, modelsConfigHint(""))
|
||||||
|
}
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("agent %q has empty model; set agents.%s.model in %s\n\n%s", agentName, agentName, modelsConfigTildePath, modelsConfigHint(configPath))
|
||||||
|
}
|
||||||
|
return backend, model, agent.PromptFile, agent.Reasoning, baseURL, apiKey, agent.Yolo, agent.AllowedTools, agent.DisallowedTools, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if dynamic, ok := LoadDynamicAgent(agentName); ok {
|
if dynamic, ok := LoadDynamicAgent(agentName); ok {
|
||||||
backend = cfg.DefaultBackend
|
backend = strings.TrimSpace(cfg.DefaultBackend)
|
||||||
model = cfg.DefaultModel
|
model = strings.TrimSpace(cfg.DefaultModel)
|
||||||
|
configPath, pathErr := modelsConfigPath()
|
||||||
|
if backend == "" || model == "" {
|
||||||
|
if pathErr != nil {
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("dynamic agent %q requires default_backend and default_model to be set in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(""))
|
||||||
|
}
|
||||||
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("dynamic agent %q requires default_backend and default_model to be set in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(configPath))
|
||||||
|
}
|
||||||
backendCfg := resolveBackendConfig(cfg, backend)
|
backendCfg := resolveBackendConfig(cfg, backend)
|
||||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
||||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||||
return backend, model, dynamic.PromptFile, "", baseURL, apiKey, false
|
return backend, model, dynamic.PromptFile, "", baseURL, apiKey, false, nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
backend = cfg.DefaultBackend
|
configPath, pathErr := modelsConfigPath()
|
||||||
model = cfg.DefaultModel
|
if pathErr != nil {
|
||||||
backendCfg := resolveBackendConfig(cfg, backend)
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("agent %q not found in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(""))
|
||||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
}
|
||||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
return "", "", "", "", "", "", false, nil, nil, fmt.Errorf("agent %q not found in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(configPath))
|
||||||
return backend, model, "", "", baseURL, apiKey, false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ResolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool) {
|
func ResolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool, allowedTools, disallowedTools []string, err error) {
|
||||||
return resolveAgentConfig(agentName)
|
return resolveAgentConfig(agentName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ResetModelsConfigCacheForTest() {
|
func ResetModelsConfigCacheForTest() {
|
||||||
modelsConfigCached = nil
|
modelsConfigCached = nil
|
||||||
|
modelsConfigErr = nil
|
||||||
modelsConfigOnce = sync.Once{}
|
modelsConfigOnce = sync.Once{}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,78 +3,43 @@ package config
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestResolveAgentConfig_Defaults(t *testing.T) {
|
func TestResolveAgentConfig_NoConfig_ReturnsHelpfulError(t *testing.T) {
|
||||||
home := t.TempDir()
|
home := t.TempDir()
|
||||||
t.Setenv("HOME", home)
|
t.Setenv("HOME", home)
|
||||||
t.Setenv("USERPROFILE", home)
|
t.Setenv("USERPROFILE", home)
|
||||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||||
ResetModelsConfigCacheForTest()
|
ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
// Test that default agents resolve correctly without config file
|
_, _, _, _, _, _, _, _, _, err := ResolveAgentConfig("develop")
|
||||||
tests := []struct {
|
if err == nil {
|
||||||
agent string
|
t.Fatalf("expected error, got nil")
|
||||||
wantBackend string
|
|
||||||
wantModel string
|
|
||||||
wantPromptFile string
|
|
||||||
}{
|
|
||||||
{"oracle", "claude", "claude-opus-4-5-20251101", "~/.claude/skills/omo/references/oracle.md"},
|
|
||||||
{"librarian", "claude", "claude-sonnet-4-5-20250929", "~/.claude/skills/omo/references/librarian.md"},
|
|
||||||
{"explore", "opencode", "opencode/grok-code", "~/.claude/skills/omo/references/explore.md"},
|
|
||||||
{"frontend-ui-ux-engineer", "gemini", "", "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md"},
|
|
||||||
{"document-writer", "gemini", "", "~/.claude/skills/omo/references/document-writer.md"},
|
|
||||||
}
|
}
|
||||||
|
msg := err.Error()
|
||||||
for _, tt := range tests {
|
if !strings.Contains(msg, modelsConfigTildePath) {
|
||||||
t.Run(tt.agent, func(t *testing.T) {
|
t.Fatalf("error should mention %s, got: %s", modelsConfigTildePath, msg)
|
||||||
backend, model, promptFile, _, _, _, _ := resolveAgentConfig(tt.agent)
|
|
||||||
if backend != tt.wantBackend {
|
|
||||||
t.Errorf("backend = %q, want %q", backend, tt.wantBackend)
|
|
||||||
}
|
|
||||||
if model != tt.wantModel {
|
|
||||||
t.Errorf("model = %q, want %q", model, tt.wantModel)
|
|
||||||
}
|
|
||||||
if promptFile != tt.wantPromptFile {
|
|
||||||
t.Errorf("promptFile = %q, want %q", promptFile, tt.wantPromptFile)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
if !strings.Contains(msg, filepath.Join(home, ".codeagent", "models.json")) {
|
||||||
|
t.Fatalf("error should mention resolved config path, got: %s", msg)
|
||||||
func TestResolveAgentConfig_UnknownAgent(t *testing.T) {
|
|
||||||
home := t.TempDir()
|
|
||||||
t.Setenv("HOME", home)
|
|
||||||
t.Setenv("USERPROFILE", home)
|
|
||||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
|
||||||
ResetModelsConfigCacheForTest()
|
|
||||||
|
|
||||||
backend, model, promptFile, _, _, _, _ := resolveAgentConfig("unknown-agent")
|
|
||||||
if backend != "opencode" {
|
|
||||||
t.Errorf("unknown agent backend = %q, want %q", backend, "opencode")
|
|
||||||
}
|
}
|
||||||
if model != "opencode/grok-code" {
|
if !strings.Contains(msg, "\"agents\"") {
|
||||||
t.Errorf("unknown agent model = %q, want %q", model, "opencode/grok-code")
|
t.Fatalf("error should include example config, got: %s", msg)
|
||||||
}
|
|
||||||
if promptFile != "" {
|
|
||||||
t.Errorf("unknown agent promptFile = %q, want empty", promptFile)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadModelsConfig_NoFile(t *testing.T) {
|
func TestLoadModelsConfig_NoFile(t *testing.T) {
|
||||||
home := "/nonexistent/path/that/does/not/exist"
|
home := t.TempDir()
|
||||||
t.Setenv("HOME", home)
|
t.Setenv("HOME", home)
|
||||||
t.Setenv("USERPROFILE", home)
|
t.Setenv("USERPROFILE", home)
|
||||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||||
ResetModelsConfigCacheForTest()
|
ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
cfg := loadModelsConfig()
|
_, err := loadModelsConfig()
|
||||||
if cfg.DefaultBackend != "opencode" {
|
if err == nil {
|
||||||
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "opencode")
|
t.Fatalf("expected error, got nil")
|
||||||
}
|
|
||||||
if len(cfg.Agents) != 6 {
|
|
||||||
t.Errorf("len(Agents) = %d, want 6", len(cfg.Agents))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,7 +84,10 @@ func TestLoadModelsConfig_WithFile(t *testing.T) {
|
|||||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||||
ResetModelsConfigCacheForTest()
|
ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
cfg := loadModelsConfig()
|
cfg, err := loadModelsConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("loadModelsConfig: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.DefaultBackend != "claude" {
|
if cfg.DefaultBackend != "claude" {
|
||||||
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "claude")
|
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "claude")
|
||||||
@@ -140,9 +108,8 @@ func TestLoadModelsConfig_WithFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that defaults are merged
|
if _, ok := cfg.Agents["oracle"]; ok {
|
||||||
if _, ok := cfg.Agents["oracle"]; !ok {
|
t.Error("oracle should not be present without explicit config")
|
||||||
t.Error("default agent oracle should be merged")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
baseURL, apiKey := ResolveBackendConfig("claude")
|
baseURL, apiKey := ResolveBackendConfig("claude")
|
||||||
@@ -153,7 +120,10 @@ func TestLoadModelsConfig_WithFile(t *testing.T) {
|
|||||||
t.Errorf("ResolveBackendConfig(apiKey) = %q, want %q", apiKey, "backend-key")
|
t.Errorf("ResolveBackendConfig(apiKey) = %q, want %q", apiKey, "backend-key")
|
||||||
}
|
}
|
||||||
|
|
||||||
backend, model, _, _, agentBaseURL, agentAPIKey, _ := ResolveAgentConfig("custom-agent")
|
backend, model, _, _, agentBaseURL, agentAPIKey, _, _, _, err := ResolveAgentConfig("custom-agent")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ResolveAgentConfig(custom-agent): %v", err)
|
||||||
|
}
|
||||||
if backend != "codex" {
|
if backend != "codex" {
|
||||||
t.Errorf("ResolveAgentConfig(backend) = %q, want %q", backend, "codex")
|
t.Errorf("ResolveAgentConfig(backend) = %q, want %q", backend, "codex")
|
||||||
}
|
}
|
||||||
@@ -183,12 +153,26 @@ func TestResolveAgentConfig_DynamicAgent(t *testing.T) {
|
|||||||
t.Fatalf("WriteFile: %v", err)
|
t.Fatalf("WriteFile: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backend, model, promptFile, _, _, _, _ := resolveAgentConfig("sarsh")
|
configDir := filepath.Join(home, ".codeagent")
|
||||||
if backend != "opencode" {
|
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||||
t.Errorf("backend = %q, want %q", backend, "opencode")
|
t.Fatalf("MkdirAll: %v", err)
|
||||||
}
|
}
|
||||||
if model != "opencode/grok-code" {
|
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||||
t.Errorf("model = %q, want %q", model, "opencode/grok-code")
|
"default_backend": "codex",
|
||||||
|
"default_model": "gpt-test"
|
||||||
|
}`), 0o644); err != nil {
|
||||||
|
t.Fatalf("WriteFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
backend, model, promptFile, _, _, _, _, _, _, err := ResolveAgentConfig("sarsh")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ResolveAgentConfig(sarsh): %v", err)
|
||||||
|
}
|
||||||
|
if backend != "codex" {
|
||||||
|
t.Errorf("backend = %q, want %q", backend, "codex")
|
||||||
|
}
|
||||||
|
if model != "gpt-test" {
|
||||||
|
t.Errorf("model = %q, want %q", model, "gpt-test")
|
||||||
}
|
}
|
||||||
if promptFile != "~/.codeagent/agents/sarsh.md" {
|
if promptFile != "~/.codeagent/agents/sarsh.md" {
|
||||||
t.Errorf("promptFile = %q, want %q", promptFile, "~/.codeagent/agents/sarsh.md")
|
t.Errorf("promptFile = %q, want %q", promptFile, "~/.codeagent/agents/sarsh.md")
|
||||||
@@ -213,9 +197,66 @@ func TestLoadModelsConfig_InvalidJSON(t *testing.T) {
|
|||||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||||
ResetModelsConfigCacheForTest()
|
ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
cfg := loadModelsConfig()
|
_, err := loadModelsConfig()
|
||||||
// Should fall back to defaults
|
if err == nil {
|
||||||
if cfg.DefaultBackend != "opencode" {
|
t.Fatalf("expected error, got nil")
|
||||||
t.Errorf("invalid JSON should fallback, got DefaultBackend = %q", cfg.DefaultBackend)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveAgentConfig_UnknownAgent_ReturnsError(t *testing.T) {
|
||||||
|
home := t.TempDir()
|
||||||
|
t.Setenv("HOME", home)
|
||||||
|
t.Setenv("USERPROFILE", home)
|
||||||
|
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||||
|
ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
|
configDir := filepath.Join(home, ".codeagent")
|
||||||
|
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||||
|
t.Fatalf("MkdirAll: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||||
|
"default_backend": "codex",
|
||||||
|
"default_model": "gpt-test",
|
||||||
|
"agents": {
|
||||||
|
"develop": { "backend": "codex", "model": "gpt-test" }
|
||||||
|
}
|
||||||
|
}`), 0o644); err != nil {
|
||||||
|
t.Fatalf("WriteFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _, _, _, _, _, _, _, err := ResolveAgentConfig("unknown-agent")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error, got nil")
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "unknown-agent") {
|
||||||
|
t.Fatalf("error should mention agent name, got: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveAgentConfig_EmptyModel_ReturnsError(t *testing.T) {
|
||||||
|
home := t.TempDir()
|
||||||
|
t.Setenv("HOME", home)
|
||||||
|
t.Setenv("USERPROFILE", home)
|
||||||
|
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||||
|
ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
|
configDir := filepath.Join(home, ".codeagent")
|
||||||
|
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||||
|
t.Fatalf("MkdirAll: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||||
|
"agents": {
|
||||||
|
"bad-agent": { "backend": "codex", "model": " " }
|
||||||
|
}
|
||||||
|
}`), 0o644); err != nil {
|
||||||
|
t.Fatalf("WriteFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _, _, _, _, _, _, _, err := ResolveAgentConfig("bad-agent")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error, got nil")
|
||||||
|
}
|
||||||
|
if !strings.Contains(strings.ToLower(err.Error()), "empty model") {
|
||||||
|
t.Fatalf("error should mention empty model, got: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,10 @@ type Config struct {
|
|||||||
SkipPermissions bool
|
SkipPermissions bool
|
||||||
Yolo bool
|
Yolo bool
|
||||||
MaxParallelWorkers int
|
MaxParallelWorkers int
|
||||||
|
AllowedTools []string
|
||||||
|
DisallowedTools []string
|
||||||
|
Skills []string
|
||||||
|
Worktree bool // Execute in a new git worktree
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnvFlagEnabled returns true when the environment variable exists and is not
|
// EnvFlagEnabled returns true when the environment variable exists and is not
|
||||||
|
|||||||
@@ -36,17 +36,18 @@ func TestEnvInjectionWithAgent(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override HOME to use temp dir
|
t.Setenv("HOME", tmpDir)
|
||||||
oldHome := os.Getenv("HOME")
|
t.Setenv("USERPROFILE", tmpDir)
|
||||||
os.Setenv("HOME", tmpDir)
|
|
||||||
defer os.Setenv("HOME", oldHome)
|
|
||||||
|
|
||||||
// Reset config cache
|
// Reset config cache
|
||||||
config.ResetModelsConfigCacheForTest()
|
config.ResetModelsConfigCacheForTest()
|
||||||
defer config.ResetModelsConfigCacheForTest()
|
defer config.ResetModelsConfigCacheForTest()
|
||||||
|
|
||||||
// Test ResolveAgentConfig
|
// Test ResolveAgentConfig
|
||||||
agentBackend, model, _, _, baseURL, apiKey, _ := config.ResolveAgentConfig("test-agent")
|
agentBackend, model, _, _, baseURL, apiKey, _, _, _, err := config.ResolveAgentConfig("test-agent")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ResolveAgentConfig: %v", err)
|
||||||
|
}
|
||||||
t.Logf("ResolveAgentConfig: backend=%q, model=%q, baseURL=%q, apiKey=%q",
|
t.Logf("ResolveAgentConfig: backend=%q, model=%q, baseURL=%q, apiKey=%q",
|
||||||
agentBackend, model, baseURL, apiKey)
|
agentBackend, model, baseURL, apiKey)
|
||||||
|
|
||||||
@@ -71,8 +72,8 @@ func TestEnvInjectionWithAgent(t *testing.T) {
|
|||||||
if env["ANTHROPIC_BASE_URL"] != baseURL {
|
if env["ANTHROPIC_BASE_URL"] != baseURL {
|
||||||
t.Errorf("expected ANTHROPIC_BASE_URL=%q, got %q", baseURL, env["ANTHROPIC_BASE_URL"])
|
t.Errorf("expected ANTHROPIC_BASE_URL=%q, got %q", baseURL, env["ANTHROPIC_BASE_URL"])
|
||||||
}
|
}
|
||||||
if env["ANTHROPIC_AUTH_TOKEN"] != apiKey {
|
if env["ANTHROPIC_API_KEY"] != apiKey {
|
||||||
t.Errorf("expected ANTHROPIC_AUTH_TOKEN=%q, got %q", apiKey, env["ANTHROPIC_AUTH_TOKEN"])
|
t.Errorf("expected ANTHROPIC_API_KEY=%q, got %q", apiKey, env["ANTHROPIC_API_KEY"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,9 +102,8 @@ func TestEnvInjectionLogic(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldHome := os.Getenv("HOME")
|
t.Setenv("HOME", tmpDir)
|
||||||
os.Setenv("HOME", tmpDir)
|
t.Setenv("USERPROFILE", tmpDir)
|
||||||
defer os.Setenv("HOME", oldHome)
|
|
||||||
|
|
||||||
config.ResetModelsConfigCacheForTest()
|
config.ResetModelsConfigCacheForTest()
|
||||||
defer config.ResetModelsConfigCacheForTest()
|
defer config.ResetModelsConfigCacheForTest()
|
||||||
@@ -118,7 +118,10 @@ func TestEnvInjectionLogic(t *testing.T) {
|
|||||||
|
|
||||||
// Step 2: If agent specified, get agent config
|
// Step 2: If agent specified, get agent config
|
||||||
if agentName != "" {
|
if agentName != "" {
|
||||||
agentBackend, _, _, _, agentBaseURL, agentAPIKey, _ := config.ResolveAgentConfig(agentName)
|
agentBackend, _, _, _, agentBaseURL, agentAPIKey, _, _, _, err := config.ResolveAgentConfig(agentName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ResolveAgentConfig(%q): %v", agentName, err)
|
||||||
|
}
|
||||||
t.Logf("Step 2 - ResolveAgentConfig(%q): backend=%q, baseURL=%q, apiKey=%q",
|
t.Logf("Step 2 - ResolveAgentConfig(%q): backend=%q, baseURL=%q, apiKey=%q",
|
||||||
agentName, agentBackend, agentBaseURL, agentAPIKey)
|
agentName, agentBackend, agentBaseURL, agentAPIKey)
|
||||||
|
|
||||||
@@ -146,8 +149,8 @@ func TestEnvInjectionLogic(t *testing.T) {
|
|||||||
t.Errorf("ANTHROPIC_BASE_URL: expected %q, got %q", expectedURL, injected["ANTHROPIC_BASE_URL"])
|
t.Errorf("ANTHROPIC_BASE_URL: expected %q, got %q", expectedURL, injected["ANTHROPIC_BASE_URL"])
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := injected["ANTHROPIC_AUTH_TOKEN"]; !ok {
|
if _, ok := injected["ANTHROPIC_API_KEY"]; !ok {
|
||||||
t.Error("ANTHROPIC_AUTH_TOKEN not set")
|
t.Error("ANTHROPIC_API_KEY not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 5: Test masking
|
// Step 5: Test masking
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func TestMaskSensitiveValue(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "API_KEY with long value",
|
name: "API_KEY with long value",
|
||||||
key: "ANTHROPIC_AUTH_TOKEN",
|
key: "ANTHROPIC_API_KEY",
|
||||||
value: "sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
value: "sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
expected: "sk-a****xxxx",
|
expected: "sk-a****xxxx",
|
||||||
},
|
},
|
||||||
@@ -180,7 +180,7 @@ func TestClaudeBackendEnv(t *testing.T) {
|
|||||||
name: "both base_url and api_key",
|
name: "both base_url and api_key",
|
||||||
baseURL: "https://api.custom.com",
|
baseURL: "https://api.custom.com",
|
||||||
apiKey: "sk-test-key-12345",
|
apiKey: "sk-test-key-12345",
|
||||||
expectKeys: []string{"ANTHROPIC_BASE_URL", "ANTHROPIC_AUTH_TOKEN"},
|
expectKeys: []string{"ANTHROPIC_BASE_URL", "ANTHROPIC_API_KEY"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "only base_url",
|
name: "only base_url",
|
||||||
@@ -192,7 +192,7 @@ func TestClaudeBackendEnv(t *testing.T) {
|
|||||||
name: "only api_key",
|
name: "only api_key",
|
||||||
baseURL: "",
|
baseURL: "",
|
||||||
apiKey: "sk-test-key-12345",
|
apiKey: "sk-test-key-12345",
|
||||||
expectKeys: []string{"ANTHROPIC_AUTH_TOKEN"},
|
expectKeys: []string{"ANTHROPIC_API_KEY"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "both empty",
|
name: "both empty",
|
||||||
@@ -237,8 +237,8 @@ func TestClaudeBackendEnv(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if tt.apiKey != "" && strings.TrimSpace(tt.apiKey) != "" {
|
if tt.apiKey != "" && strings.TrimSpace(tt.apiKey) != "" {
|
||||||
if env["ANTHROPIC_AUTH_TOKEN"] != strings.TrimSpace(tt.apiKey) {
|
if env["ANTHROPIC_API_KEY"] != strings.TrimSpace(tt.apiKey) {
|
||||||
t.Errorf("ANTHROPIC_AUTH_TOKEN = %q, want %q", env["ANTHROPIC_AUTH_TOKEN"], strings.TrimSpace(tt.apiKey))
|
t.Errorf("ANTHROPIC_API_KEY = %q, want %q", env["ANTHROPIC_API_KEY"], strings.TrimSpace(tt.apiKey))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -267,7 +267,7 @@ func TestEnvLoggingIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if k == "ANTHROPIC_AUTH_TOKEN" {
|
if k == "ANTHROPIC_API_KEY" {
|
||||||
// API key should be masked
|
// API key should be masked
|
||||||
if masked == v {
|
if masked == v {
|
||||||
t.Errorf("API_KEY should be masked, but got original value")
|
t.Errorf("API_KEY should be masked, but got original value")
|
||||||
|
|||||||
@@ -65,11 +65,8 @@ func TestEnvInjection_LogsToStderrAndMasksKey(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldHome := os.Getenv("HOME")
|
t.Setenv("HOME", tmpDir)
|
||||||
if err := os.Setenv("HOME", tmpDir); err != nil {
|
t.Setenv("USERPROFILE", tmpDir)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer func() { _ = os.Setenv("HOME", oldHome) }()
|
|
||||||
|
|
||||||
config.ResetModelsConfigCacheForTest()
|
config.ResetModelsConfigCacheForTest()
|
||||||
defer config.ResetModelsConfigCacheForTest()
|
defer config.ResetModelsConfigCacheForTest()
|
||||||
@@ -120,14 +117,14 @@ func TestEnvInjection_LogsToStderrAndMasksKey(t *testing.T) {
|
|||||||
if cmd.env["ANTHROPIC_BASE_URL"] != baseURL {
|
if cmd.env["ANTHROPIC_BASE_URL"] != baseURL {
|
||||||
t.Fatalf("ANTHROPIC_BASE_URL=%q, want %q", cmd.env["ANTHROPIC_BASE_URL"], baseURL)
|
t.Fatalf("ANTHROPIC_BASE_URL=%q, want %q", cmd.env["ANTHROPIC_BASE_URL"], baseURL)
|
||||||
}
|
}
|
||||||
if cmd.env["ANTHROPIC_AUTH_TOKEN"] != apiKey {
|
if cmd.env["ANTHROPIC_API_KEY"] != apiKey {
|
||||||
t.Fatalf("ANTHROPIC_AUTH_TOKEN=%q, want %q", cmd.env["ANTHROPIC_AUTH_TOKEN"], apiKey)
|
t.Fatalf("ANTHROPIC_API_KEY=%q, want %q", cmd.env["ANTHROPIC_API_KEY"], apiKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.Contains(got, "Env: ANTHROPIC_BASE_URL="+baseURL) {
|
if !strings.Contains(got, "Env: ANTHROPIC_BASE_URL="+baseURL) {
|
||||||
t.Fatalf("stderr missing base URL env log; stderr=%q", got)
|
t.Fatalf("stderr missing base URL env log; stderr=%q", got)
|
||||||
}
|
}
|
||||||
if !strings.Contains(got, "Env: ANTHROPIC_AUTH_TOKEN=eyJh****test") {
|
if !strings.Contains(got, "Env: ANTHROPIC_API_KEY=eyJh****test") {
|
||||||
t.Fatalf("stderr missing masked API key log; stderr=%q", got)
|
t.Fatalf("stderr missing masked API key log; stderr=%q", got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -20,6 +21,7 @@ import (
|
|||||||
ilogger "codeagent-wrapper/internal/logger"
|
ilogger "codeagent-wrapper/internal/logger"
|
||||||
parser "codeagent-wrapper/internal/parser"
|
parser "codeagent-wrapper/internal/parser"
|
||||||
utils "codeagent-wrapper/internal/utils"
|
utils "codeagent-wrapper/internal/utils"
|
||||||
|
"codeagent-wrapper/internal/worktree"
|
||||||
)
|
)
|
||||||
|
|
||||||
const postMessageTerminateDelay = 1 * time.Second
|
const postMessageTerminateDelay = 1 * time.Second
|
||||||
@@ -48,6 +50,7 @@ var (
|
|||||||
selectBackendFn = backend.Select
|
selectBackendFn = backend.Select
|
||||||
commandContext = exec.CommandContext
|
commandContext = exec.CommandContext
|
||||||
terminateCommandFn = terminateCommand
|
terminateCommandFn = terminateCommand
|
||||||
|
createWorktreeFn = worktree.CreateWorktree
|
||||||
)
|
)
|
||||||
|
|
||||||
var forceKillDelay atomic.Int32
|
var forceKillDelay atomic.Int32
|
||||||
@@ -253,6 +256,15 @@ func (p *realProcess) Signal(sig os.Signal) error {
|
|||||||
|
|
||||||
// newCommandRunner creates a new commandRunner (test hook injection point)
|
// newCommandRunner creates a new commandRunner (test hook injection point)
|
||||||
var newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
var newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
lowerName := strings.ToLower(strings.TrimSpace(name))
|
||||||
|
if strings.HasSuffix(lowerName, ".bat") || strings.HasSuffix(lowerName, ".cmd") {
|
||||||
|
cmdArgs := make([]string, 0, 2+len(args))
|
||||||
|
cmdArgs = append(cmdArgs, "/c", name)
|
||||||
|
cmdArgs = append(cmdArgs, args...)
|
||||||
|
return &realCmd{cmd: commandContext(ctx, "cmd.exe", cmdArgs...)}
|
||||||
|
}
|
||||||
|
}
|
||||||
return &realCmd{cmd: commandContext(ctx, name, args...)}
|
return &realCmd{cmd: commandContext(ctx, name, args...)}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -325,6 +337,16 @@ func DefaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
|||||||
}
|
}
|
||||||
task.Task = WrapTaskWithAgentPrompt(prompt, task.Task)
|
task.Task = WrapTaskWithAgentPrompt(prompt, task.Task)
|
||||||
}
|
}
|
||||||
|
// Resolve skills: explicit > auto-detect from workdir
|
||||||
|
skills := task.Skills
|
||||||
|
if len(skills) == 0 {
|
||||||
|
skills = DetectProjectSkills(task.WorkDir)
|
||||||
|
}
|
||||||
|
if len(skills) > 0 {
|
||||||
|
if content := ResolveSkillContent(skills, 0); content != "" {
|
||||||
|
task.Task = task.Task + "\n\n# Domain Best Practices\n\n" + content
|
||||||
|
}
|
||||||
|
}
|
||||||
if task.UseStdin || ShouldUseStdin(task.Task, false) {
|
if task.UseStdin || ShouldUseStdin(task.Task, false) {
|
||||||
task.UseStdin = true
|
task.UseStdin = true
|
||||||
}
|
}
|
||||||
@@ -895,6 +917,8 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
ReasoningEffort: taskSpec.ReasoningEffort,
|
ReasoningEffort: taskSpec.ReasoningEffort,
|
||||||
SkipPermissions: taskSpec.SkipPermissions,
|
SkipPermissions: taskSpec.SkipPermissions,
|
||||||
Backend: defaultBackendName,
|
Backend: defaultBackendName,
|
||||||
|
AllowedTools: taskSpec.AllowedTools,
|
||||||
|
DisallowedTools: taskSpec.DisallowedTools,
|
||||||
}
|
}
|
||||||
|
|
||||||
commandName := strings.TrimSpace(defaultCommandName)
|
commandName := strings.TrimSpace(defaultCommandName)
|
||||||
@@ -911,6 +935,11 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
cfg.Backend = backend.Name()
|
cfg.Backend = backend.Name()
|
||||||
} else if taskSpec.Backend != "" {
|
} else if taskSpec.Backend != "" {
|
||||||
cfg.Backend = taskSpec.Backend
|
cfg.Backend = taskSpec.Backend
|
||||||
|
if selectBackendFn != nil {
|
||||||
|
if b, err := selectBackendFn(taskSpec.Backend); err == nil {
|
||||||
|
argsBuilder = b.BuildArgs
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if commandName != "" {
|
} else if commandName != "" {
|
||||||
cfg.Backend = commandName
|
cfg.Backend = commandName
|
||||||
}
|
}
|
||||||
@@ -922,6 +951,23 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
cfg.WorkDir = defaultWorkdir
|
cfg.WorkDir = defaultWorkdir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle worktree mode: check DO_WORKTREE_DIR env var first, then create if needed
|
||||||
|
if worktreeDir := os.Getenv("DO_WORKTREE_DIR"); worktreeDir != "" {
|
||||||
|
// Use existing worktree from /do setup
|
||||||
|
cfg.WorkDir = worktreeDir
|
||||||
|
logInfo(fmt.Sprintf("Using existing worktree from DO_WORKTREE_DIR: %s", worktreeDir))
|
||||||
|
} else if taskSpec.Worktree {
|
||||||
|
// Create new worktree (backward compatibility for standalone --worktree usage)
|
||||||
|
paths, err := createWorktreeFn(cfg.WorkDir)
|
||||||
|
if err != nil {
|
||||||
|
result.ExitCode = 1
|
||||||
|
result.Error = fmt.Sprintf("failed to create worktree: %v", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
cfg.WorkDir = paths.Dir
|
||||||
|
logInfo(fmt.Sprintf("Using worktree: %s (task_id: %s, branch: %s)", paths.Dir, paths.TaskID, paths.Branch))
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.Mode == "resume" && strings.TrimSpace(cfg.SessionID) == "" {
|
if cfg.Mode == "resume" && strings.TrimSpace(cfg.SessionID) == "" {
|
||||||
result.ExitCode = 1
|
result.ExitCode = 1
|
||||||
result.Error = "resume mode requires non-empty session_id"
|
result.Error = "resume mode requires non-empty session_id"
|
||||||
@@ -1060,9 +1106,11 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
if envBackend != nil {
|
if envBackend != nil {
|
||||||
baseURL, apiKey := config.ResolveBackendConfig(cfg.Backend)
|
baseURL, apiKey := config.ResolveBackendConfig(cfg.Backend)
|
||||||
if agentName := strings.TrimSpace(taskSpec.Agent); agentName != "" {
|
if agentName := strings.TrimSpace(taskSpec.Agent); agentName != "" {
|
||||||
agentBackend, _, _, _, agentBaseURL, agentAPIKey, _ := config.ResolveAgentConfig(agentName)
|
agentBackend, _, _, _, agentBaseURL, agentAPIKey, _, _, _, err := config.ResolveAgentConfig(agentName)
|
||||||
if strings.EqualFold(strings.TrimSpace(agentBackend), strings.TrimSpace(cfg.Backend)) {
|
if err == nil {
|
||||||
baseURL, apiKey = agentBaseURL, agentAPIKey
|
if strings.EqualFold(strings.TrimSpace(agentBackend), strings.TrimSpace(cfg.Backend)) {
|
||||||
|
baseURL, apiKey = agentBaseURL, agentAPIKey
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if injected := envBackend.Env(baseURL, apiKey); len(injected) > 0 {
|
if injected := envBackend.Env(baseURL, apiKey); len(injected) > 0 {
|
||||||
@@ -1076,6 +1124,8 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
injectTempEnv(cmd)
|
||||||
|
|
||||||
// For backends that don't support -C flag (claude, gemini), set working directory via cmd.Dir
|
// For backends that don't support -C flag (claude, gemini), set working directory via cmd.Dir
|
||||||
// Codex passes workdir via -C flag, so we skip setting Dir for it to avoid conflicts
|
// Codex passes workdir via -C flag, so we skip setting Dir for it to avoid conflicts
|
||||||
if cfg.Mode != "resume" && commandName != "codex" && cfg.WorkDir != "" {
|
if cfg.Mode != "resume" && commandName != "codex" && cfg.WorkDir != "" {
|
||||||
@@ -1385,6 +1435,22 @@ waitLoop:
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func injectTempEnv(cmd commandRunner) {
|
||||||
|
if cmd == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
env := make(map[string]string, 3)
|
||||||
|
for _, k := range []string{"TMPDIR", "TMP", "TEMP"} {
|
||||||
|
if v := strings.TrimSpace(os.Getenv(k)); v != "" {
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(env) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmd.SetEnv(env)
|
||||||
|
}
|
||||||
|
|
||||||
func cancelReason(commandName string, ctx context.Context) string {
|
func cancelReason(commandName string, ctx context.Context) string {
|
||||||
if ctx == nil {
|
if ctx == nil {
|
||||||
return "Context cancelled"
|
return "Context cancelled"
|
||||||
|
|||||||
@@ -75,6 +75,12 @@ func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
task.SkipPermissions = config.ParseBoolFlag(value, false)
|
task.SkipPermissions = config.ParseBoolFlag(value, false)
|
||||||
|
case "worktree":
|
||||||
|
if value == "" {
|
||||||
|
task.Worktree = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
task.Worktree = config.ParseBoolFlag(value, false)
|
||||||
case "dependencies":
|
case "dependencies":
|
||||||
for _, dep := range strings.Split(value, ",") {
|
for _, dep := range strings.Split(value, ",") {
|
||||||
dep = strings.TrimSpace(dep)
|
dep = strings.TrimSpace(dep)
|
||||||
@@ -82,6 +88,13 @@ func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
|||||||
task.Dependencies = append(task.Dependencies, dep)
|
task.Dependencies = append(task.Dependencies, dep)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "skills":
|
||||||
|
for _, s := range strings.Split(value, ",") {
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
if s != "" {
|
||||||
|
task.Skills = append(task.Skills, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,7 +109,10 @@ func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
|||||||
if err := config.ValidateAgentName(task.Agent); err != nil {
|
if err := config.ValidateAgentName(task.Agent); err != nil {
|
||||||
return nil, fmt.Errorf("task block #%d invalid agent name: %w", taskIndex, err)
|
return nil, fmt.Errorf("task block #%d invalid agent name: %w", taskIndex, err)
|
||||||
}
|
}
|
||||||
backend, model, promptFile, reasoning, _, _, _ := config.ResolveAgentConfig(task.Agent)
|
backend, model, promptFile, reasoning, _, _, _, allowedTools, disallowedTools, err := config.ResolveAgentConfig(task.Agent)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("task block #%d failed to resolve agent %q: %w", taskIndex, task.Agent, err)
|
||||||
|
}
|
||||||
if task.Backend == "" {
|
if task.Backend == "" {
|
||||||
task.Backend = backend
|
task.Backend = backend
|
||||||
}
|
}
|
||||||
@@ -107,6 +123,8 @@ func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
|||||||
task.ReasoningEffort = reasoning
|
task.ReasoningEffort = reasoning
|
||||||
}
|
}
|
||||||
task.PromptFile = promptFile
|
task.PromptFile = promptFile
|
||||||
|
task.AllowedTools = allowedTools
|
||||||
|
task.DisallowedTools = disallowedTools
|
||||||
}
|
}
|
||||||
|
|
||||||
if task.ID == "" {
|
if task.ID == "" {
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -128,3 +129,116 @@ func ReadAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error
|
|||||||
func WrapTaskWithAgentPrompt(prompt string, task string) string {
|
func WrapTaskWithAgentPrompt(prompt string, task string) string {
|
||||||
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
|
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// techSkillMap maps file-existence fingerprints to skill names.
|
||||||
|
var techSkillMap = []struct {
|
||||||
|
Files []string // any of these files → this tech
|
||||||
|
Skills []string
|
||||||
|
}{
|
||||||
|
{Files: []string{"go.mod", "go.sum"}, Skills: []string{"golang-base-practices"}},
|
||||||
|
{Files: []string{"Cargo.toml"}, Skills: []string{"rust-best-practices"}},
|
||||||
|
{Files: []string{"pyproject.toml", "setup.py", "requirements.txt", "Pipfile"}, Skills: []string{"python-best-practices"}},
|
||||||
|
{Files: []string{"package.json"}, Skills: []string{"vercel-react-best-practices", "frontend-design"}},
|
||||||
|
{Files: []string{"vue.config.js", "vite.config.ts", "nuxt.config.ts"}, Skills: []string{"vue-web-app"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectProjectSkills scans workDir for tech-stack fingerprints and returns
|
||||||
|
// skill names that are both detected and installed at ~/.claude/skills/{name}/SKILL.md.
|
||||||
|
func DetectProjectSkills(workDir string) []string {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var detected []string
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
for _, entry := range techSkillMap {
|
||||||
|
for _, f := range entry.Files {
|
||||||
|
if _, err := os.Stat(filepath.Join(workDir, f)); err == nil {
|
||||||
|
for _, skill := range entry.Skills {
|
||||||
|
if seen[skill] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
skillPath := filepath.Join(home, ".claude", "skills", skill, "SKILL.md")
|
||||||
|
if _, err := os.Stat(skillPath); err == nil {
|
||||||
|
detected = append(detected, skill)
|
||||||
|
seen[skill] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break // one matching file is enough for this entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return detected
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultSkillBudget = 16000 // chars, ~4K tokens
|
||||||
|
|
||||||
|
// validSkillName ensures skill names contain only safe characters to prevent path traversal
|
||||||
|
var validSkillName = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
|
||||||
|
|
||||||
|
// ResolveSkillContent reads SKILL.md files for the given skill names,
|
||||||
|
// strips YAML frontmatter, wraps each in <skill> tags, and enforces a
|
||||||
|
// character budget to prevent context bloat.
|
||||||
|
func ResolveSkillContent(skills []string, maxBudget int) string {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if maxBudget <= 0 {
|
||||||
|
maxBudget = defaultSkillBudget
|
||||||
|
}
|
||||||
|
var sections []string
|
||||||
|
remaining := maxBudget
|
||||||
|
for _, name := range skills {
|
||||||
|
name = strings.TrimSpace(name)
|
||||||
|
if name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !validSkillName.MatchString(name) {
|
||||||
|
logWarn(fmt.Sprintf("skill %q: invalid name (must contain only [a-zA-Z0-9_-]), skipping", name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path := filepath.Join(home, ".claude", "skills", name, "SKILL.md")
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil || len(data) == 0 {
|
||||||
|
logWarn(fmt.Sprintf("skill %q: SKILL.md not found or empty, skipping", name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
body := stripYAMLFrontmatter(strings.TrimSpace(string(data)))
|
||||||
|
tagOverhead := len("<skill name=\"\">") + len(name) + len("\n") + len("\n</skill>")
|
||||||
|
bodyBudget := remaining - tagOverhead
|
||||||
|
if bodyBudget <= 0 {
|
||||||
|
logWarn(fmt.Sprintf("skill %q: skipped, insufficient budget for tags", name))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if len(body) > bodyBudget {
|
||||||
|
logWarn(fmt.Sprintf("skill %q: truncated from %d to %d chars (budget)", name, len(body), bodyBudget))
|
||||||
|
body = body[:bodyBudget]
|
||||||
|
}
|
||||||
|
remaining -= len(body) + tagOverhead
|
||||||
|
sections = append(sections, "<skill name=\""+name+"\">\n"+body+"\n</skill>")
|
||||||
|
if remaining <= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(sections) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return strings.Join(sections, "\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripYAMLFrontmatter(s string) string {
|
||||||
|
s = strings.ReplaceAll(s, "\r\n", "\n")
|
||||||
|
if !strings.HasPrefix(s, "---") {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
idx := strings.Index(s[3:], "\n---")
|
||||||
|
if idx < 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
result := s[3+idx+4:]
|
||||||
|
if len(result) > 0 && result[0] == '\n' {
|
||||||
|
result = result[1:]
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(result)
|
||||||
|
}
|
||||||
|
|||||||
343
codeagent-wrapper/internal/executor/skills_test.go
Normal file
343
codeagent-wrapper/internal/executor/skills_test.go
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
package executor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setTestHome overrides the home directory for both Unix (HOME) and Windows (USERPROFILE).
|
||||||
|
func setTestHome(t *testing.T, home string) {
|
||||||
|
t.Helper()
|
||||||
|
t.Setenv("HOME", home)
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Setenv("USERPROFILE", home)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- helper: create a temp skill dir with SKILL.md ---
|
||||||
|
|
||||||
|
func createTempSkill(t *testing.T, name, content string) string {
|
||||||
|
t.Helper()
|
||||||
|
home := t.TempDir()
|
||||||
|
skillDir := filepath.Join(home, ".claude", "skills", name)
|
||||||
|
if err := os.MkdirAll(skillDir, 0755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(skillDir, "SKILL.md"), []byte(content), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return home
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- ParseParallelConfig skills parsing tests ---
|
||||||
|
|
||||||
|
func TestParseParallelConfig_SkillsField(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
taskIdx int
|
||||||
|
expectedSkills []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "single skill",
|
||||||
|
input: `---TASK---
|
||||||
|
id: t1
|
||||||
|
workdir: .
|
||||||
|
skills: golang-base-practices
|
||||||
|
---CONTENT---
|
||||||
|
Do something.
|
||||||
|
`,
|
||||||
|
taskIdx: 0,
|
||||||
|
expectedSkills: []string{"golang-base-practices"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple comma-separated skills",
|
||||||
|
input: `---TASK---
|
||||||
|
id: t1
|
||||||
|
workdir: .
|
||||||
|
skills: golang-base-practices, vercel-react-best-practices
|
||||||
|
---CONTENT---
|
||||||
|
Do something.
|
||||||
|
`,
|
||||||
|
taskIdx: 0,
|
||||||
|
expectedSkills: []string{"golang-base-practices", "vercel-react-best-practices"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no skills field",
|
||||||
|
input: `---TASK---
|
||||||
|
id: t1
|
||||||
|
workdir: .
|
||||||
|
---CONTENT---
|
||||||
|
Do something.
|
||||||
|
`,
|
||||||
|
taskIdx: 0,
|
||||||
|
expectedSkills: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty skills value",
|
||||||
|
input: `---TASK---
|
||||||
|
id: t1
|
||||||
|
workdir: .
|
||||||
|
skills:
|
||||||
|
---CONTENT---
|
||||||
|
Do something.
|
||||||
|
`,
|
||||||
|
taskIdx: 0,
|
||||||
|
expectedSkills: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
cfg, err := ParseParallelConfig([]byte(tt.input))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ParseParallelConfig error: %v", err)
|
||||||
|
}
|
||||||
|
got := cfg.Tasks[tt.taskIdx].Skills
|
||||||
|
if len(got) != len(tt.expectedSkills) {
|
||||||
|
t.Fatalf("skills: got %v, want %v", got, tt.expectedSkills)
|
||||||
|
}
|
||||||
|
for i := range got {
|
||||||
|
if got[i] != tt.expectedSkills[i] {
|
||||||
|
t.Errorf("skills[%d]: got %q, want %q", i, got[i], tt.expectedSkills[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- stripYAMLFrontmatter tests ---
|
||||||
|
|
||||||
|
func TestStripYAMLFrontmatter(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "with frontmatter",
|
||||||
|
input: "---\nname: test\ndescription: foo\n---\n\n# Body\nContent here.",
|
||||||
|
expected: "# Body\nContent here.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no frontmatter",
|
||||||
|
input: "# Just a body\nNo frontmatter.",
|
||||||
|
expected: "# Just a body\nNo frontmatter.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
input: "",
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "only frontmatter",
|
||||||
|
input: "---\nname: test\n---",
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "frontmatter with allowed-tools",
|
||||||
|
input: "---\nname: do\nallowed-tools: [\"Bash\"]\n---\n\n# Skill content",
|
||||||
|
expected: "# Skill content",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CRLF line endings",
|
||||||
|
input: "---\r\nname: test\r\n---\r\n\r\n# Body\r\nContent.",
|
||||||
|
expected: "# Body\nContent.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := stripYAMLFrontmatter(tt.input)
|
||||||
|
if got != tt.expected {
|
||||||
|
t.Errorf("got %q, want %q", got, tt.expected)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- DetectProjectSkills tests ---
|
||||||
|
|
||||||
|
func TestDetectProjectSkills_GoProject(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module test"), 0644)
|
||||||
|
|
||||||
|
skills := DetectProjectSkills(tmpDir)
|
||||||
|
// Result depends on whether golang-base-practices is installed locally
|
||||||
|
t.Logf("detected skills for Go project: %v", skills)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDetectProjectSkills_NoFingerprints(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
skills := DetectProjectSkills(tmpDir)
|
||||||
|
if len(skills) != 0 {
|
||||||
|
t.Errorf("expected no skills for empty dir, got %v", skills)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDetectProjectSkills_FullStack(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module test"), 0644)
|
||||||
|
os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"name":"test"}`), 0644)
|
||||||
|
|
||||||
|
skills := DetectProjectSkills(tmpDir)
|
||||||
|
t.Logf("detected skills for fullstack project: %v", skills)
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
for _, s := range skills {
|
||||||
|
if seen[s] {
|
||||||
|
t.Errorf("duplicate skill detected: %s", s)
|
||||||
|
}
|
||||||
|
seen[s] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDetectProjectSkills_NonexistentDir(t *testing.T) {
|
||||||
|
skills := DetectProjectSkills("/nonexistent/path/xyz")
|
||||||
|
if len(skills) != 0 {
|
||||||
|
t.Errorf("expected no skills for nonexistent dir, got %v", skills)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- ResolveSkillContent tests (CI-friendly with temp dirs) ---
|
||||||
|
|
||||||
|
func TestResolveSkillContent_ValidSkill(t *testing.T) {
|
||||||
|
home := createTempSkill(t, "test-skill", "---\nname: test\n---\n\n# Test Skill\nBest practices here.")
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
result := ResolveSkillContent([]string{"test-skill"}, 0)
|
||||||
|
if result == "" {
|
||||||
|
t.Fatal("expected non-empty content")
|
||||||
|
}
|
||||||
|
if !strings.Contains(result, `<skill name="test-skill">`) {
|
||||||
|
t.Error("missing opening <skill> tag")
|
||||||
|
}
|
||||||
|
if !strings.Contains(result, "</skill>") {
|
||||||
|
t.Error("missing closing </skill> tag")
|
||||||
|
}
|
||||||
|
if !strings.Contains(result, "# Test Skill") {
|
||||||
|
t.Error("missing skill body content")
|
||||||
|
}
|
||||||
|
if strings.Contains(result, "name: test") {
|
||||||
|
t.Error("frontmatter was not stripped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_NonexistentSkill(t *testing.T) {
|
||||||
|
home := t.TempDir()
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
result := ResolveSkillContent([]string{"nonexistent-skill-xyz"}, 0)
|
||||||
|
if result != "" {
|
||||||
|
t.Errorf("expected empty for nonexistent skill, got %d bytes", len(result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_Empty(t *testing.T) {
|
||||||
|
if result := ResolveSkillContent(nil, 0); result != "" {
|
||||||
|
t.Errorf("expected empty for nil, got %q", result)
|
||||||
|
}
|
||||||
|
if result := ResolveSkillContent([]string{}, 0); result != "" {
|
||||||
|
t.Errorf("expected empty for empty, got %q", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_Budget(t *testing.T) {
|
||||||
|
longBody := strings.Repeat("x", 500)
|
||||||
|
home := createTempSkill(t, "big-skill", "---\nname: big\n---\n\n"+longBody)
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
result := ResolveSkillContent([]string{"big-skill"}, 200)
|
||||||
|
if result == "" {
|
||||||
|
t.Fatal("expected non-empty even with small budget")
|
||||||
|
}
|
||||||
|
if len(result) > 200 {
|
||||||
|
t.Errorf("result %d bytes exceeds budget 200", len(result))
|
||||||
|
}
|
||||||
|
t.Logf("budget=200, result=%d bytes", len(result))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_MultipleSkills(t *testing.T) {
|
||||||
|
home := t.TempDir()
|
||||||
|
for _, name := range []string{"skill-a", "skill-b"} {
|
||||||
|
skillDir := filepath.Join(home, ".claude", "skills", name)
|
||||||
|
os.MkdirAll(skillDir, 0755)
|
||||||
|
os.WriteFile(filepath.Join(skillDir, "SKILL.md"), []byte("# "+name+"\nContent."), 0644)
|
||||||
|
}
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
result := ResolveSkillContent([]string{"skill-a", "skill-b"}, 0)
|
||||||
|
if result == "" {
|
||||||
|
t.Fatal("expected non-empty for multiple skills")
|
||||||
|
}
|
||||||
|
if !strings.Contains(result, `<skill name="skill-a">`) {
|
||||||
|
t.Error("missing skill-a tag")
|
||||||
|
}
|
||||||
|
if !strings.Contains(result, `<skill name="skill-b">`) {
|
||||||
|
t.Error("missing skill-b tag")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_PathTraversal(t *testing.T) {
|
||||||
|
home := t.TempDir()
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
result := ResolveSkillContent([]string{"../../../etc/passwd"}, 0)
|
||||||
|
if result != "" {
|
||||||
|
t.Errorf("expected empty for path traversal name, got %d bytes", len(result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_InvalidNames(t *testing.T) {
|
||||||
|
home := t.TempDir()
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
tests := []string{"../bad", "foo/bar", "skill name", "skill.name", "a b"}
|
||||||
|
for _, name := range tests {
|
||||||
|
result := ResolveSkillContent([]string{name}, 0)
|
||||||
|
if result != "" {
|
||||||
|
t.Errorf("expected empty for invalid name %q, got %d bytes", name, len(result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSkillContent_ValidNamePattern(t *testing.T) {
|
||||||
|
if !validSkillName.MatchString("golang-base-practices") {
|
||||||
|
t.Error("golang-base-practices should be valid")
|
||||||
|
}
|
||||||
|
if !validSkillName.MatchString("my_skill_v2") {
|
||||||
|
t.Error("my_skill_v2 should be valid")
|
||||||
|
}
|
||||||
|
if validSkillName.MatchString("../bad") {
|
||||||
|
t.Error("../bad should be invalid")
|
||||||
|
}
|
||||||
|
if validSkillName.MatchString("") {
|
||||||
|
t.Error("empty should be invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Integration: skill injection format test ---
|
||||||
|
|
||||||
|
func TestSkillInjectionFormat(t *testing.T) {
|
||||||
|
home := createTempSkill(t, "test-go", "---\nname: go\n---\n\n# Go Best Practices\nUse gofmt.")
|
||||||
|
setTestHome(t, home)
|
||||||
|
|
||||||
|
taskText := "Implement the feature."
|
||||||
|
content := ResolveSkillContent([]string{"test-go"}, 0)
|
||||||
|
injected := taskText + "\n\n# Domain Best Practices\n\n" + content
|
||||||
|
|
||||||
|
if !strings.Contains(injected, "Implement the feature.") {
|
||||||
|
t.Error("original task text lost")
|
||||||
|
}
|
||||||
|
if !strings.Contains(injected, "# Domain Best Practices") {
|
||||||
|
t.Error("missing section header")
|
||||||
|
}
|
||||||
|
if !strings.Contains(injected, `<skill name="test-go">`) {
|
||||||
|
t.Error("missing <skill> tag")
|
||||||
|
}
|
||||||
|
if !strings.Contains(injected, "Use gofmt.") {
|
||||||
|
t.Error("missing skill body")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -21,6 +21,10 @@ type TaskSpec struct {
|
|||||||
Agent string `json:"agent,omitempty"`
|
Agent string `json:"agent,omitempty"`
|
||||||
PromptFile string `json:"prompt_file,omitempty"`
|
PromptFile string `json:"prompt_file,omitempty"`
|
||||||
SkipPermissions bool `json:"skip_permissions,omitempty"`
|
SkipPermissions bool `json:"skip_permissions,omitempty"`
|
||||||
|
Worktree bool `json:"worktree,omitempty"`
|
||||||
|
AllowedTools []string `json:"allowed_tools,omitempty"`
|
||||||
|
DisallowedTools []string `json:"disallowed_tools,omitempty"`
|
||||||
|
Skills []string `json:"skills,omitempty"`
|
||||||
Mode string `json:"-"`
|
Mode string `json:"-"`
|
||||||
UseStdin bool `json:"-"`
|
UseStdin bool `json:"-"`
|
||||||
Context context.Context `json:"-"`
|
Context context.Context `json:"-"`
|
||||||
|
|||||||
@@ -70,12 +70,11 @@ func TestLoggerWithSuffixNamingAndIsolation(t *testing.T) {
|
|||||||
|
|
||||||
func TestLoggerWithSuffixReturnsErrorWhenTempDirNotWritable(t *testing.T) {
|
func TestLoggerWithSuffixReturnsErrorWhenTempDirNotWritable(t *testing.T) {
|
||||||
base := t.TempDir()
|
base := t.TempDir()
|
||||||
noWrite := filepath.Join(base, "ro")
|
notDir := filepath.Join(base, "not-a-dir")
|
||||||
if err := os.Mkdir(noWrite, 0o500); err != nil {
|
if err := os.WriteFile(notDir, []byte("x"), 0o644); err != nil {
|
||||||
t.Fatalf("failed to create read-only temp dir: %v", err)
|
t.Fatalf("failed to create temp file: %v", err)
|
||||||
}
|
}
|
||||||
t.Cleanup(func() { _ = os.Chmod(noWrite, 0o700) })
|
setTempDirEnv(t, notDir)
|
||||||
setTempDirEnv(t, noWrite)
|
|
||||||
|
|
||||||
logger, err := NewLoggerWithSuffix("task-err")
|
logger, err := NewLoggerWithSuffix("task-err")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|||||||
@@ -26,8 +26,7 @@ func compareCleanupStats(got, want CleanupStats) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoggerCreatesFileWithPID(t *testing.T) {
|
func TestLoggerCreatesFileWithPID(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -46,8 +45,7 @@ func TestLoggerCreatesFileWithPID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoggerWritesLevels(t *testing.T) {
|
func TestLoggerWritesLevels(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,8 +75,7 @@ func TestLoggerWritesLevels(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
|
func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -104,8 +101,7 @@ func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoggerConcurrentWritesSafe(t *testing.T) {
|
func TestLoggerConcurrentWritesSafe(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLogger()
|
logger, err := NewLogger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -390,12 +386,14 @@ func TestLoggerCleanupOldLogsPerformanceBound(t *testing.T) {
|
|||||||
fakePaths := make([]string, fileCount)
|
fakePaths := make([]string, fileCount)
|
||||||
for i := 0; i < fileCount; i++ {
|
for i := 0; i < fileCount; i++ {
|
||||||
name := fmt.Sprintf("codeagent-wrapper-%d.log", 10000+i)
|
name := fmt.Sprintf("codeagent-wrapper-%d.log", 10000+i)
|
||||||
fakePaths[i] = createTempLog(t, tempDir, name)
|
fakePaths[i] = filepath.Join(tempDir, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
stubGlobLogFiles(t, func(pattern string) ([]string, error) {
|
stubGlobLogFiles(t, func(pattern string) ([]string, error) {
|
||||||
return fakePaths, nil
|
return fakePaths, nil
|
||||||
})
|
})
|
||||||
|
stubFileStat(t, func(string) (os.FileInfo, error) { return fakeFileInfo{}, nil })
|
||||||
|
stubEvalSymlinks(t, func(path string) (string, error) { return path, nil })
|
||||||
stubProcessRunning(t, func(int) bool { return false })
|
stubProcessRunning(t, func(int) bool { return false })
|
||||||
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
|
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
|
||||||
|
|
||||||
@@ -542,8 +540,7 @@ func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoggerPathAndRemove(t *testing.T) {
|
func TestLoggerPathAndRemove(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
setTempDirEnv(t, t.TempDir())
|
||||||
t.Setenv("TMPDIR", tempDir)
|
|
||||||
|
|
||||||
logger, err := NewLoggerWithSuffix("sample")
|
logger, err := NewLoggerWithSuffix("sample")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func TestTruncate(t *testing.T) {
|
|||||||
{"zero maxLen", "hello", 0, "..."},
|
{"zero maxLen", "hello", 0, "..."},
|
||||||
{"negative maxLen", "hello", -1, ""},
|
{"negative maxLen", "hello", -1, ""},
|
||||||
{"maxLen 1", "hello", 1, "h..."},
|
{"maxLen 1", "hello", 1, "h..."},
|
||||||
{"unicode bytes truncate", "你好世界", 10, "你好世\xe7..."}, // Truncate works on bytes, not runes
|
{"unicode bytes truncate", "你好世界", 10, "你好世\xe7..."}, // Truncate works on bytes, not runes
|
||||||
{"mixed truncate", "hello世界abc", 7, "hello\xe4\xb8..."}, // byte-based truncation
|
{"mixed truncate", "hello世界abc", 7, "hello\xe4\xb8..."}, // byte-based truncation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
97
codeagent-wrapper/internal/worktree/worktree.go
Normal file
97
codeagent-wrapper/internal/worktree/worktree.go
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
package worktree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Paths contains worktree information
|
||||||
|
type Paths struct {
|
||||||
|
Dir string // .worktrees/do-{task_id}/
|
||||||
|
Branch string // do/{task_id}
|
||||||
|
TaskID string // auto-generated task_id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook points for testing
|
||||||
|
var (
|
||||||
|
randReader io.Reader = rand.Reader
|
||||||
|
timeNowFunc = time.Now
|
||||||
|
execCommand = exec.Command
|
||||||
|
)
|
||||||
|
|
||||||
|
// generateTaskID creates a unique task ID in format: YYYYMMDD-{6 hex chars}
|
||||||
|
func generateTaskID() (string, error) {
|
||||||
|
bytes := make([]byte, 3)
|
||||||
|
if _, err := io.ReadFull(randReader, bytes); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to generate random bytes: %w", err)
|
||||||
|
}
|
||||||
|
date := timeNowFunc().Format("20060102")
|
||||||
|
return fmt.Sprintf("%s-%s", date, hex.EncodeToString(bytes)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isGitRepo checks if the given directory is inside a git repository
|
||||||
|
func isGitRepo(dir string) bool {
|
||||||
|
cmd := execCommand("git", "-C", dir, "rev-parse", "--is-inside-work-tree")
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(output)) == "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
// getGitRoot returns the root directory of the git repository
|
||||||
|
func getGitRoot(dir string) (string, error) {
|
||||||
|
cmd := execCommand("git", "-C", dir, "rev-parse", "--show-toplevel")
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to get git root: %w", err)
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(output)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateWorktree creates a new git worktree with auto-generated task_id
|
||||||
|
// Returns Paths containing the worktree directory, branch name, and task_id
|
||||||
|
func CreateWorktree(projectDir string) (*Paths, error) {
|
||||||
|
if projectDir == "" {
|
||||||
|
projectDir = "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify it's a git repository
|
||||||
|
if !isGitRepo(projectDir) {
|
||||||
|
return nil, fmt.Errorf("not a git repository: %s", projectDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get git root for consistent path calculation
|
||||||
|
gitRoot, err := getGitRoot(projectDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate task ID
|
||||||
|
taskID, err := generateTaskID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate paths
|
||||||
|
worktreeDir := filepath.Join(gitRoot, ".worktrees", fmt.Sprintf("do-%s", taskID))
|
||||||
|
branchName := fmt.Sprintf("do/%s", taskID)
|
||||||
|
|
||||||
|
// Create worktree with new branch
|
||||||
|
cmd := execCommand("git", "-C", gitRoot, "worktree", "add", "-b", branchName, worktreeDir)
|
||||||
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create worktree: %w\noutput: %s", err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Paths{
|
||||||
|
Dir: worktreeDir,
|
||||||
|
Branch: branchName,
|
||||||
|
TaskID: taskID,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
449
codeagent-wrapper/internal/worktree/worktree_test.go
Normal file
449
codeagent-wrapper/internal/worktree/worktree_test.go
Normal file
@@ -0,0 +1,449 @@
|
|||||||
|
package worktree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resetHooks() {
|
||||||
|
randReader = rand.Reader
|
||||||
|
timeNowFunc = time.Now
|
||||||
|
execCommand = exec.Command
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTaskID(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
taskID, err := generateTaskID()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generateTaskID() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format: YYYYMMDD-6hex
|
||||||
|
pattern := regexp.MustCompile(`^\d{8}-[0-9a-f]{6}$`)
|
||||||
|
if !pattern.MatchString(taskID) {
|
||||||
|
t.Errorf("generateTaskID() = %q, want format YYYYMMDD-xxxxxx", taskID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTaskID_FixedTime(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Mock time to a fixed date
|
||||||
|
timeNowFunc = func() time.Time {
|
||||||
|
return time.Date(2026, 2, 3, 12, 0, 0, 0, time.UTC)
|
||||||
|
}
|
||||||
|
|
||||||
|
taskID, err := generateTaskID()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generateTaskID() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !regexp.MustCompile(`^20260203-[0-9a-f]{6}$`).MatchString(taskID) {
|
||||||
|
t.Errorf("generateTaskID() = %q, want prefix 20260203-", taskID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTaskID_RandReaderError(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Mock rand reader to return error
|
||||||
|
randReader = &errorReader{err: errors.New("mock rand error")}
|
||||||
|
|
||||||
|
_, err := generateTaskID()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("generateTaskID() expected error, got nil")
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`failed to generate random bytes`).MatchString(err.Error()) {
|
||||||
|
t.Errorf("error = %q, want 'failed to generate random bytes'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorReader struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *errorReader) Read(p []byte) (n int, err error) {
|
||||||
|
return 0, e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTaskID_Uniqueness(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
const count = 100
|
||||||
|
ids := make(map[string]struct{}, count)
|
||||||
|
var mu sync.Mutex
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
id, err := generateTaskID()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("generateTaskID() error = %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
ids[id] = struct{}{}
|
||||||
|
mu.Unlock()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(ids) != count {
|
||||||
|
t.Errorf("generateTaskID() produced %d unique IDs out of %d, expected all unique", len(ids), count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateWorktree_NotGitRepo(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
_, err = CreateWorktree(tmpDir)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("CreateWorktree() expected error for non-git directory, got nil")
|
||||||
|
}
|
||||||
|
if err != nil && !regexp.MustCompile(`not a git repository`).MatchString(err.Error()) {
|
||||||
|
t.Errorf("CreateWorktree() error = %q, want 'not a git repository'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateWorktree_EmptyProjectDir(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// When projectDir is empty, it should default to "."
|
||||||
|
// This will fail because current dir may not be a git repo, but we test the default behavior
|
||||||
|
_, err := CreateWorktree("")
|
||||||
|
// We just verify it doesn't panic and returns an error (likely "not a git repository: .")
|
||||||
|
if err == nil {
|
||||||
|
// If we happen to be in a git repo, that's fine too
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`not a git repository: \.`).MatchString(err.Error()) {
|
||||||
|
// It might be a git repo and fail later, which is also acceptable
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateWorktree_Success(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Create temp git repo
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
// Initialize git repo
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "init").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to init git repo: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "config", "user.email", "test@test.com").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to set git email: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "config", "user.name", "Test").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to set git name: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create initial commit (required for worktree)
|
||||||
|
testFile := filepath.Join(tmpDir, "test.txt")
|
||||||
|
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
||||||
|
t.Fatalf("failed to create test file: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "add", ".").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to git add: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "commit", "-m", "initial").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to git commit: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test CreateWorktree
|
||||||
|
paths, err := CreateWorktree(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("CreateWorktree() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify task ID format
|
||||||
|
pattern := regexp.MustCompile(`^\d{8}-[0-9a-f]{6}$`)
|
||||||
|
if !pattern.MatchString(paths.TaskID) {
|
||||||
|
t.Errorf("TaskID = %q, want format YYYYMMDD-xxxxxx", paths.TaskID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify branch name
|
||||||
|
expectedBranch := "do/" + paths.TaskID
|
||||||
|
if paths.Branch != expectedBranch {
|
||||||
|
t.Errorf("Branch = %q, want %q", paths.Branch, expectedBranch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify worktree directory exists
|
||||||
|
if _, err := os.Stat(paths.Dir); os.IsNotExist(err) {
|
||||||
|
t.Errorf("worktree directory %q does not exist", paths.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify worktree directory is under .worktrees/
|
||||||
|
expectedDirSuffix := filepath.Join(".worktrees", "do-"+paths.TaskID)
|
||||||
|
if !regexp.MustCompile(regexp.QuoteMeta(expectedDirSuffix) + `$`).MatchString(paths.Dir) {
|
||||||
|
t.Errorf("Dir = %q, want suffix %q", paths.Dir, expectedDirSuffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify branch exists
|
||||||
|
cmd := exec.Command("git", "-C", tmpDir, "branch", "--list", paths.Branch)
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to list branches: %v", err)
|
||||||
|
}
|
||||||
|
if len(output) == 0 {
|
||||||
|
t.Errorf("branch %q was not created", paths.Branch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateWorktree_GetGitRootError(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Create a temp dir and mock git commands
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
callCount := 0
|
||||||
|
execCommand = func(name string, args ...string) *exec.Cmd {
|
||||||
|
callCount++
|
||||||
|
if callCount == 1 {
|
||||||
|
// First call: isGitRepo - return true
|
||||||
|
return exec.Command("echo", "true")
|
||||||
|
}
|
||||||
|
// Second call: getGitRoot - return error
|
||||||
|
return exec.Command("false")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = CreateWorktree(tmpDir)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("CreateWorktree() expected error, got nil")
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`failed to get git root`).MatchString(err.Error()) {
|
||||||
|
t.Errorf("error = %q, want 'failed to get git root'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateWorktree_GenerateTaskIDError(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Create temp git repo
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
// Initialize git repo with commit
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "init").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to init git repo: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "config", "user.email", "test@test.com").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to set git email: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "config", "user.name", "Test").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to set git name: %v", err)
|
||||||
|
}
|
||||||
|
testFile := filepath.Join(tmpDir, "test.txt")
|
||||||
|
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
||||||
|
t.Fatalf("failed to create test file: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "add", ".").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to git add: %v", err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "commit", "-m", "initial").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to git commit: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mock rand reader to fail
|
||||||
|
randReader = &errorReader{err: errors.New("mock rand error")}
|
||||||
|
|
||||||
|
_, err = CreateWorktree(tmpDir)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("CreateWorktree() expected error, got nil")
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`failed to generate random bytes`).MatchString(err.Error()) {
|
||||||
|
t.Errorf("error = %q, want 'failed to generate random bytes'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateWorktree_WorktreeAddError(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
callCount := 0
|
||||||
|
execCommand = func(name string, args ...string) *exec.Cmd {
|
||||||
|
callCount++
|
||||||
|
switch callCount {
|
||||||
|
case 1:
|
||||||
|
// isGitRepo - return true
|
||||||
|
return exec.Command("echo", "true")
|
||||||
|
case 2:
|
||||||
|
// getGitRoot - return tmpDir
|
||||||
|
return exec.Command("echo", tmpDir)
|
||||||
|
case 3:
|
||||||
|
// worktree add - return error
|
||||||
|
return exec.Command("false")
|
||||||
|
}
|
||||||
|
return exec.Command("false")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = CreateWorktree(tmpDir)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("CreateWorktree() expected error, got nil")
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`failed to create worktree`).MatchString(err.Error()) {
|
||||||
|
t.Errorf("error = %q, want 'failed to create worktree'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsGitRepo(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Test non-git directory
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
if isGitRepo(tmpDir) {
|
||||||
|
t.Error("isGitRepo() = true for non-git directory, want false")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test git directory
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "init").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to init git repo: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isGitRepo(tmpDir) {
|
||||||
|
t.Error("isGitRepo() = false for git directory, want true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsGitRepo_CommandError(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Mock execCommand to return error
|
||||||
|
execCommand = func(name string, args ...string) *exec.Cmd {
|
||||||
|
return exec.Command("false")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGitRepo("/some/path") {
|
||||||
|
t.Error("isGitRepo() = true when command fails, want false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsGitRepo_NotTrueOutput(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Mock execCommand to return something other than "true"
|
||||||
|
execCommand = func(name string, args ...string) *exec.Cmd {
|
||||||
|
return exec.Command("echo", "false")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGitRepo("/some/path") {
|
||||||
|
t.Error("isGitRepo() = true when output is 'false', want false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitRoot(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Create temp git repo
|
||||||
|
tmpDir, err := os.MkdirTemp("", "worktree-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
if err := exec.Command("git", "-C", tmpDir, "init").Run(); err != nil {
|
||||||
|
t.Fatalf("failed to init git repo: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := getGitRoot(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getGitRoot() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The root should match tmpDir (accounting for symlinks)
|
||||||
|
absRoot, _ := filepath.EvalSymlinks(root)
|
||||||
|
absTmp, _ := filepath.EvalSymlinks(tmpDir)
|
||||||
|
if absRoot != absTmp {
|
||||||
|
t.Errorf("getGitRoot() = %q, want %q", absRoot, absTmp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitRoot_Error(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
execCommand = func(name string, args ...string) *exec.Cmd {
|
||||||
|
return exec.Command("false")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := getGitRoot("/some/path")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("getGitRoot() expected error, got nil")
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`failed to get git root`).MatchString(err.Error()) {
|
||||||
|
t.Errorf("error = %q, want 'failed to get git root'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that rand reader produces expected bytes
|
||||||
|
func TestGenerateTaskID_RandReaderBytes(t *testing.T) {
|
||||||
|
defer resetHooks()
|
||||||
|
|
||||||
|
// Mock rand reader to return fixed bytes
|
||||||
|
randReader = &fixedReader{data: []byte{0xab, 0xcd, 0xef}}
|
||||||
|
timeNowFunc = func() time.Time {
|
||||||
|
return time.Date(2026, 1, 15, 0, 0, 0, 0, time.UTC)
|
||||||
|
}
|
||||||
|
|
||||||
|
taskID, err := generateTaskID()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generateTaskID() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := "20260115-abcdef"
|
||||||
|
if taskID != expected {
|
||||||
|
t.Errorf("generateTaskID() = %q, want %q", taskID, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fixedReader struct {
|
||||||
|
data []byte
|
||||||
|
pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fixedReader) Read(p []byte) (n int, err error) {
|
||||||
|
if f.pos >= len(f.data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n = copy(p, f.data[f.pos:])
|
||||||
|
f.pos += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
99
config.json
99
config.json
@@ -3,75 +3,14 @@
|
|||||||
"install_dir": "~/.claude",
|
"install_dir": "~/.claude",
|
||||||
"log_file": "install.log",
|
"log_file": "install.log",
|
||||||
"modules": {
|
"modules": {
|
||||||
"dev": {
|
|
||||||
"enabled": true,
|
|
||||||
"description": "Core dev workflow with Codex integration",
|
|
||||||
"operations": [
|
|
||||||
{
|
|
||||||
"type": "merge_dir",
|
|
||||||
"source": "dev-workflow",
|
|
||||||
"description": "Merge commands/ and agents/ into install dir"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "memorys/CLAUDE.md",
|
|
||||||
"target": "CLAUDE.md",
|
|
||||||
"description": "Copy core role and guidelines"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "skills/codeagent/SKILL.md",
|
|
||||||
"target": "skills/codeagent/SKILL.md",
|
|
||||||
"description": "Install codeagent skill"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "skills/product-requirements/SKILL.md",
|
|
||||||
"target": "skills/product-requirements/SKILL.md",
|
|
||||||
"description": "Install product-requirements skill"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "skills/prototype-prompt-generator/SKILL.md",
|
|
||||||
"target": "skills/prototype-prompt-generator/SKILL.md",
|
|
||||||
"description": "Install prototype-prompt-generator skill"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "skills/prototype-prompt-generator/references/prompt-structure.md",
|
|
||||||
"target": "skills/prototype-prompt-generator/references/prompt-structure.md",
|
|
||||||
"description": "Install prototype-prompt-generator prompt structure reference"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "skills/prototype-prompt-generator/references/design-systems.md",
|
|
||||||
"target": "skills/prototype-prompt-generator/references/design-systems.md",
|
|
||||||
"description": "Install prototype-prompt-generator design systems reference"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "run_command",
|
|
||||||
"command": "bash install.sh",
|
|
||||||
"description": "Install codeagent-wrapper binary",
|
|
||||||
"env": {
|
|
||||||
"INSTALL_DIR": "${install_dir}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"bmad": {
|
"bmad": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"description": "BMAD agile workflow with multi-agent orchestration",
|
"description": "BMAD agile workflow with multi-agent orchestration",
|
||||||
"operations": [
|
"operations": [
|
||||||
{
|
{
|
||||||
"type": "merge_dir",
|
"type": "merge_dir",
|
||||||
"source": "bmad-agile-workflow",
|
"source": "agents/bmad",
|
||||||
"description": "Merge BMAD commands and agents"
|
"description": "Merge BMAD commands and agents"
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "docs/BMAD-WORKFLOW.md",
|
|
||||||
"target": "docs/BMAD-WORKFLOW.md",
|
|
||||||
"description": "Copy BMAD workflow documentation"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -81,14 +20,8 @@
|
|||||||
"operations": [
|
"operations": [
|
||||||
{
|
{
|
||||||
"type": "merge_dir",
|
"type": "merge_dir",
|
||||||
"source": "requirements-driven-workflow",
|
"source": "agents/requirements",
|
||||||
"description": "Merge requirements workflow commands and agents"
|
"description": "Merge requirements workflow commands and agents"
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "docs/REQUIREMENTS-WORKFLOW.md",
|
|
||||||
"target": "docs/REQUIREMENTS-WORKFLOW.md",
|
|
||||||
"description": "Copy requirements workflow documentation"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -98,14 +31,8 @@
|
|||||||
"operations": [
|
"operations": [
|
||||||
{
|
{
|
||||||
"type": "merge_dir",
|
"type": "merge_dir",
|
||||||
"source": "development-essentials",
|
"source": "agents/development-essentials",
|
||||||
"description": "Merge essential development commands"
|
"description": "Merge essential development commands"
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "copy_file",
|
|
||||||
"source": "docs/DEVELOPMENT-COMMANDS.md",
|
|
||||||
"target": "docs/DEVELOPMENT-COMMANDS.md",
|
|
||||||
"description": "Copy development commands documentation"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -170,7 +97,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"do": {
|
"do": {
|
||||||
"enabled": false,
|
"enabled": true,
|
||||||
"description": "7-phase feature development workflow with codeagent orchestration",
|
"description": "7-phase feature development workflow with codeagent orchestration",
|
||||||
"operations": [
|
"operations": [
|
||||||
{
|
{
|
||||||
@@ -218,6 +145,24 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
"claudekit": {
|
||||||
|
"enabled": false,
|
||||||
|
"description": "ClaudeKit workflow: skills/do + global hooks (pre-bash, inject-spec, log-prompt, on-stop)",
|
||||||
|
"operations": [
|
||||||
|
{
|
||||||
|
"type": "copy_dir",
|
||||||
|
"source": "skills/do",
|
||||||
|
"target": "skills/do",
|
||||||
|
"description": "Install do skill with 5-phase workflow"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "copy_dir",
|
||||||
|
"source": "hooks",
|
||||||
|
"target": "hooks",
|
||||||
|
"description": "Install global hooks (pre-bash, inject-spec, log-prompt, on-stop)"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "dev",
|
|
||||||
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
|
||||||
"version": "5.6.1",
|
|
||||||
"author": {
|
|
||||||
"name": "cexll",
|
|
||||||
"email": "cexll@cexll.com"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
# /dev - Minimal Dev Workflow
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
A freshly designed lightweight development workflow with no legacy baggage, focused on delivering high-quality code fast.
|
|
||||||
|
|
||||||
## Flow
|
|
||||||
|
|
||||||
```
|
|
||||||
/dev trigger
|
|
||||||
↓
|
|
||||||
AskUserQuestion (backend selection)
|
|
||||||
↓
|
|
||||||
AskUserQuestion (requirements clarification)
|
|
||||||
↓
|
|
||||||
codeagent analysis (plan mode + task typing + UI auto-detection)
|
|
||||||
↓
|
|
||||||
dev-plan-generator (create dev doc)
|
|
||||||
↓
|
|
||||||
codeagent concurrent development (2–5 tasks, backend routing)
|
|
||||||
↓
|
|
||||||
codeagent testing & verification (≥90% coverage)
|
|
||||||
↓
|
|
||||||
Done (generate summary)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Step 0 + The 6 Steps
|
|
||||||
|
|
||||||
### 0. Select Allowed Backends (FIRST ACTION)
|
|
||||||
- Use **AskUserQuestion** with multiSelect to ask which backends are allowed for this run
|
|
||||||
- Options (user can select multiple):
|
|
||||||
- `codex` - Stable, high quality, best cost-performance (default for most tasks)
|
|
||||||
- `claude` - Fast, lightweight (for quick fixes and config changes)
|
|
||||||
- `gemini` - UI/UX specialist (for frontend styling and components)
|
|
||||||
- If user selects ONLY `codex`, ALL subsequent tasks must use `codex` (including UI/quick-fix)
|
|
||||||
|
|
||||||
### 1. Clarify Requirements
|
|
||||||
- Use **AskUserQuestion** to ask the user directly
|
|
||||||
- No scoring system, no complex logic
|
|
||||||
- 2–3 rounds of Q&A until the requirement is clear
|
|
||||||
|
|
||||||
### 2. codeagent Analysis + Task Typing + UI Detection
|
|
||||||
- Call codeagent to analyze the request in plan mode style
|
|
||||||
- Extract: core functions, technical points, task list (2–5 items)
|
|
||||||
- For each task, assign exactly one type: `default` / `ui` / `quick-fix`
|
|
||||||
- UI auto-detection: needs UI work when task involves style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue); output yes/no plus evidence
|
|
||||||
|
|
||||||
### 3. Generate Dev Doc
|
|
||||||
- Call the **dev-plan-generator** agent
|
|
||||||
- Produce a single `dev-plan.md`
|
|
||||||
- Append a dedicated UI task when Step 2 marks `needs_ui: true`
|
|
||||||
- Include: task breakdown, `type`, file scope, dependencies, test commands
|
|
||||||
|
|
||||||
### 4. Concurrent Development
|
|
||||||
- Work from the task list in dev-plan.md
|
|
||||||
- Route backend per task type (with user constraints + fallback):
|
|
||||||
- `default` → `codex`
|
|
||||||
- `ui` → `gemini` (enforced when allowed)
|
|
||||||
- `quick-fix` → `claude`
|
|
||||||
- Missing `type` → treat as `default`
|
|
||||||
- If the preferred backend is not allowed, fallback to an allowed backend by priority: `codex` → `claude` → `gemini`
|
|
||||||
- Independent tasks → run in parallel
|
|
||||||
- Conflicting tasks → run serially
|
|
||||||
|
|
||||||
### 5. Testing & Verification
|
|
||||||
- Each codeagent task:
|
|
||||||
- Implements the feature
|
|
||||||
- Writes tests
|
|
||||||
- Runs coverage
|
|
||||||
- Reports results (≥90%)
|
|
||||||
|
|
||||||
### 6. Complete
|
|
||||||
- Summarize task status
|
|
||||||
- Record coverage
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/dev "Implement user login with email + password"
|
|
||||||
```
|
|
||||||
|
|
||||||
No CLI flags required; workflow starts with an interactive backend selection.
|
|
||||||
|
|
||||||
## Output Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
.claude/specs/{feature_name}/
|
|
||||||
└── dev-plan.md # Dev document generated by agent
|
|
||||||
```
|
|
||||||
|
|
||||||
Only one file—minimal and clear.
|
|
||||||
|
|
||||||
## Core Components
|
|
||||||
|
|
||||||
### Tools
|
|
||||||
- **AskUserQuestion**: interactive requirement clarification
|
|
||||||
- **codeagent skill**: analysis, development, testing; supports `--backend` for `codex` / `claude` / `gemini`
|
|
||||||
- **dev-plan-generator agent**: generate dev doc (subagent via Task tool, saves context)
|
|
||||||
|
|
||||||
## Backend Selection & Routing
|
|
||||||
- **Step 0**: user selects allowed backends; if `仅 codex`, all tasks use codex
|
|
||||||
- **UI detection standard**: style files (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component code (.tsx, .jsx, .vue) trigger `needs_ui: true`
|
|
||||||
- **Task type field**: each task in `dev-plan.md` must have `type: default|ui|quick-fix`
|
|
||||||
- **Routing**: `default`→codex, `ui`→gemini, `quick-fix`→claude; if disallowed, fallback to an allowed backend by priority: codex→claude→gemini
|
|
||||||
|
|
||||||
## Key Features
|
|
||||||
|
|
||||||
### ✅ Fresh Design
|
|
||||||
- No legacy project residue
|
|
||||||
- No complex scoring logic
|
|
||||||
- No extra abstraction layers
|
|
||||||
|
|
||||||
### ✅ Minimal Orchestration
|
|
||||||
- Orchestrator controls the flow directly
|
|
||||||
- Only three tools/components
|
|
||||||
- Steps are straightforward
|
|
||||||
|
|
||||||
### ✅ Concurrency
|
|
||||||
- Tasks split based on natural functional boundaries
|
|
||||||
- Auto-detect dependencies and conflicts
|
|
||||||
- codeagent executes independently with optimal backend
|
|
||||||
|
|
||||||
### ✅ Quality Assurance
|
|
||||||
- Enforces 90% coverage
|
|
||||||
- codeagent tests and verifies its own work
|
|
||||||
- Automatic retry on failure
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Trigger
|
|
||||||
/dev "Add user login feature"
|
|
||||||
|
|
||||||
# Step 0: Select backends
|
|
||||||
Q: Which backends are allowed? (multiSelect)
|
|
||||||
A: Selected: codex, claude
|
|
||||||
|
|
||||||
# Step 1: Clarify requirements
|
|
||||||
Q: What login methods are supported?
|
|
||||||
A: Email + password
|
|
||||||
Q: Should login be remembered?
|
|
||||||
A: Yes, use JWT token
|
|
||||||
|
|
||||||
# Step 2: codeagent analysis
|
|
||||||
Output:
|
|
||||||
- Core: email/password login + JWT auth
|
|
||||||
- Task 1: Backend API (type=default)
|
|
||||||
- Task 2: Password hashing (type=default)
|
|
||||||
- Task 3: Frontend form (type=ui)
|
|
||||||
UI detection: needs_ui = true (tailwindcss classes in frontend form)
|
|
||||||
|
|
||||||
# Step 3: Generate doc
|
|
||||||
dev-plan.md generated with typed tasks ✓
|
|
||||||
|
|
||||||
# Step 4-5: Concurrent development (routing + fallback)
|
|
||||||
[task-1] Backend API (codex) → tests → 92% ✓
|
|
||||||
[task-2] Password hashing (codex) → tests → 95% ✓
|
|
||||||
[task-3] Frontend form (fallback to codex; gemini not allowed) → tests → 91% ✓
|
|
||||||
```
|
|
||||||
|
|
||||||
## Directory Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
dev-workflow/
|
|
||||||
├── README.md # This doc
|
|
||||||
├── commands/
|
|
||||||
│ └── dev.md # /dev workflow orchestrator definition
|
|
||||||
└── agents/
|
|
||||||
└── dev-plan-generator.md # Dev plan document generator agent
|
|
||||||
```
|
|
||||||
|
|
||||||
Minimal structure, only three files.
|
|
||||||
|
|
||||||
## When to Use
|
|
||||||
|
|
||||||
✅ **Good for**:
|
|
||||||
- Any feature size
|
|
||||||
- Fast iterations
|
|
||||||
- High test coverage needs
|
|
||||||
- Wanting concurrent speed-up
|
|
||||||
|
|
||||||
## Design Principles
|
|
||||||
|
|
||||||
1. **KISS**: keep it simple
|
|
||||||
2. **Disposable**: no persistent config
|
|
||||||
3. **Quality first**: enforce 90% coverage
|
|
||||||
4. **Concurrency first**: leverage codeagent
|
|
||||||
5. **No legacy baggage**: clean-slate design
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Philosophy**: zero tolerance for complexity—ship the smallest usable solution, like Linus would.
|
|
||||||
@@ -1,124 +0,0 @@
|
|||||||
---
|
|
||||||
name: dev-plan-generator
|
|
||||||
description: Use this agent when you need to generate a structured development plan document (`dev-plan.md`) that breaks down a feature into concrete implementation tasks with testing requirements and acceptance criteria. This agent should be called after requirements analysis and before actual implementation begins.\n\n<example>\nContext: User is orchestrating a feature development workflow and needs to create a development plan after codeagent analysis is complete.\nuser: "Create a development plan for the user authentication feature based on the requirements and analysis"\nassistant: "I'll use the Task tool to launch the dev-plan-generator agent to create the structured development plan document."\n<commentary>\nThe user needs a dev-plan.md document generated from requirements and analysis. Use the dev-plan-generator agent to create the structured task breakdown.\n</commentary>\n</example>\n\n<example>\nContext: Orchestrator has completed requirements gathering and codeagent analysis for a new feature and needs to generate the development plan before moving to implementation.\nuser: "We've completed the analysis for the payment integration feature. Generate the development plan."\nassistant: "I'm going to use the Task tool to launch the dev-plan-generator agent to create the dev-plan.md document with task breakdown and testing requirements."\n<commentary>\nThis is the step in the workflow where the development plan document needs to be generated. Use the dev-plan-generator agent to create the structured plan.\n</commentary>\n</example>\n\n<example>\nContext: User is working through a requirements-driven workflow and has just approved the technical specifications.\nuser: "The specs look good. Let's move forward with creating the implementation plan."\nassistant: "I'll use the Task tool to launch the dev-plan-generator agent to generate the dev-plan.md document with the task breakdown."\n<commentary>\nAfter spec approval, the next step is generating the development plan. Use the dev-plan-generator agent to create the structured document.\n</commentary>\n</example>
|
|
||||||
tools: Glob, Grep, Read, Edit, Write, TodoWrite
|
|
||||||
model: sonnet
|
|
||||||
color: green
|
|
||||||
---
|
|
||||||
|
|
||||||
You are a specialized Development Plan Document Generator. Your sole responsibility is to create structured, actionable development plan documents (`dev-plan.md`) that break down features into concrete implementation tasks.
|
|
||||||
|
|
||||||
## Your Role
|
|
||||||
|
|
||||||
You receive context from an orchestrator including:
|
|
||||||
- Feature requirements description
|
|
||||||
- codeagent analysis results (feature highlights, task decomposition, UI detection flag, and task typing hints)
|
|
||||||
- Feature name (in kebab-case format)
|
|
||||||
|
|
||||||
Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
|
||||||
|
|
||||||
## Document Structure You Must Follow
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
# {Feature Name} - Development Plan
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
[One-sentence description of core functionality]
|
|
||||||
|
|
||||||
## Task Breakdown
|
|
||||||
|
|
||||||
### Task 1: [Task Name]
|
|
||||||
- **ID**: task-1
|
|
||||||
- **type**: default|ui|quick-fix
|
|
||||||
- **Description**: [What needs to be done]
|
|
||||||
- **File Scope**: [Directories or files involved, e.g., src/auth/**, tests/auth/]
|
|
||||||
- **Dependencies**: [None or depends on task-x]
|
|
||||||
- **Test Command**: [e.g., pytest tests/auth --cov=src/auth --cov-report=term]
|
|
||||||
- **Test Focus**: [Scenarios to cover]
|
|
||||||
|
|
||||||
### Task 2: [Task Name]
|
|
||||||
...
|
|
||||||
|
|
||||||
(Tasks based on natural functional boundaries, typically 2-5)
|
|
||||||
|
|
||||||
## Acceptance Criteria
|
|
||||||
- [ ] Feature point 1
|
|
||||||
- [ ] Feature point 2
|
|
||||||
- [ ] All unit tests pass
|
|
||||||
- [ ] Code coverage ≥90%
|
|
||||||
|
|
||||||
## Technical Notes
|
|
||||||
- [Key technical decisions]
|
|
||||||
- [Constraints to be aware of]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Generation Rules You Must Enforce
|
|
||||||
|
|
||||||
1. **Task Count**: Generate tasks based on natural functional boundaries (no artificial limits)
|
|
||||||
- Typical range: 2-5 tasks
|
|
||||||
- Quality over quantity: prefer fewer well-scoped tasks over excessive fragmentation
|
|
||||||
- Each task should be independently completable by one agent
|
|
||||||
2. **Task Requirements**: Each task MUST include:
|
|
||||||
- Clear ID (task-1, task-2, etc.)
|
|
||||||
- A single task type field: `type: default|ui|quick-fix`
|
|
||||||
- Specific description of what needs to be done
|
|
||||||
- Explicit file scope (directories or files affected)
|
|
||||||
- Dependency declaration ("None" or "depends on task-x")
|
|
||||||
- Complete test command with coverage parameters
|
|
||||||
- Testing focus points (scenarios to cover)
|
|
||||||
3. **Task Independence**: Design tasks to be as independent as possible to enable parallel execution
|
|
||||||
4. **Test Commands**: Must include coverage parameters (e.g., `--cov=module --cov-report=term` for pytest, `--coverage` for npm)
|
|
||||||
5. **Coverage Threshold**: Always require ≥90% code coverage in acceptance criteria
|
|
||||||
|
|
||||||
## Your Workflow
|
|
||||||
|
|
||||||
1. **Analyze Input**: Review the requirements description and codeagent analysis results (including `needs_ui` and any task typing hints)
|
|
||||||
2. **Identify Tasks**: Break down the feature into 2-5 logical, independent tasks
|
|
||||||
3. **Determine Dependencies**: Map out which tasks depend on others (minimize dependencies)
|
|
||||||
4. **Assign Task Type**: For each task, set exactly one `type`:
|
|
||||||
- `ui`: touches UI/style/component work (e.g., .css/.scss/.tsx/.jsx/.vue, tailwind, design tweaks)
|
|
||||||
- `quick-fix`: small, fast changes (config tweaks, small bug fix, minimal scope); do NOT use for UI work
|
|
||||||
- `default`: everything else
|
|
||||||
- Note: `/dev` Step 4 routes backend by `type` (default→codex, ui→gemini, quick-fix→claude; missing type → default)
|
|
||||||
5. **Specify Testing**: For each task, define the exact test command and coverage requirements
|
|
||||||
6. **Define Acceptance**: List concrete, measurable acceptance criteria including the 90% coverage requirement
|
|
||||||
7. **Document Technical Points**: Note key technical decisions and constraints
|
|
||||||
8. **Write File**: Use the Write tool to create `./.claude/specs/{feature_name}/dev-plan.md`
|
|
||||||
|
|
||||||
## Quality Checks Before Writing
|
|
||||||
|
|
||||||
- [ ] Task count is between 2-5
|
|
||||||
- [ ] Every task has all required fields (ID, type, Description, File Scope, Dependencies, Test Command, Test Focus)
|
|
||||||
- [ ] Test commands include coverage parameters
|
|
||||||
- [ ] Dependencies are explicitly stated
|
|
||||||
- [ ] Acceptance criteria includes 90% coverage requirement
|
|
||||||
- [ ] File scope is specific (not vague like "all files")
|
|
||||||
- [ ] Testing focus is concrete (not generic like "test everything")
|
|
||||||
|
|
||||||
## Critical Constraints
|
|
||||||
|
|
||||||
- **Document Only**: You generate documentation. You do NOT execute code, run tests, or modify source files.
|
|
||||||
- **Single Output**: You produce exactly one file: `dev-plan.md` in the correct location
|
|
||||||
- **Path Accuracy**: The path must be `./.claude/specs/{feature_name}/dev-plan.md` where {feature_name} matches the input
|
|
||||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc)
|
|
||||||
- **Structured Format**: Follow the exact markdown structure provided
|
|
||||||
|
|
||||||
## Example Output Quality
|
|
||||||
|
|
||||||
Refer to the user login example in your instructions as the quality benchmark. Your outputs should have:
|
|
||||||
- Clear, actionable task descriptions
|
|
||||||
- Specific file paths (not generic)
|
|
||||||
- Realistic test commands for the actual tech stack
|
|
||||||
- Concrete testing scenarios (not abstract)
|
|
||||||
- Measurable acceptance criteria
|
|
||||||
- Relevant technical decisions
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
If the input context is incomplete or unclear:
|
|
||||||
1. Request the missing information explicitly
|
|
||||||
2. Do NOT proceed with generating a low-quality document
|
|
||||||
3. Do NOT make up requirements or technical details
|
|
||||||
4. Ask for clarification on: feature scope, tech stack, testing framework, file structure
|
|
||||||
|
|
||||||
Remember: Your document will be used by other agents to implement the feature. Precision and completeness are critical. Every field must be filled with specific, actionable information.
|
|
||||||
@@ -1,213 +0,0 @@
|
|||||||
---
|
|
||||||
description: Extreme lightweight end-to-end development workflow with requirements clarification, intelligent backend selection, parallel codeagent execution, and mandatory 90% test coverage
|
|
||||||
---
|
|
||||||
|
|
||||||
You are the /dev Workflow Orchestrator, an expert development workflow manager specializing in orchestrating minimal, efficient end-to-end development processes with parallel task execution and rigorous test coverage validation.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## CRITICAL CONSTRAINTS (NEVER VIOLATE)
|
|
||||||
|
|
||||||
These rules have HIGHEST PRIORITY and override all other instructions:
|
|
||||||
|
|
||||||
1. **NEVER use Edit, Write, or MultiEdit tools directly** - ALL code changes MUST go through codeagent-wrapper
|
|
||||||
2. **MUST use AskUserQuestion in Step 0** - Backend selection MUST be the FIRST action (before requirement clarification)
|
|
||||||
3. **MUST use AskUserQuestion in Step 1** - Do NOT skip requirement clarification
|
|
||||||
4. **MUST use TodoWrite after Step 1** - Create task tracking list before any analysis
|
|
||||||
5. **MUST use codeagent-wrapper for Step 2 analysis** - Do NOT use Read/Glob/Grep directly for deep analysis
|
|
||||||
6. **MUST wait for user confirmation in Step 3** - Do NOT proceed to Step 4 without explicit approval
|
|
||||||
7. **MUST invoke codeagent-wrapper --parallel for Step 4 execution** - Use Bash tool, NOT Edit/Write or Task tool
|
|
||||||
|
|
||||||
**Violation of any constraint above invalidates the entire workflow. Stop and restart if violated.**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Core Responsibilities**
|
|
||||||
- Orchestrate a streamlined 7-step development workflow (Step 0 + Step 1–6):
|
|
||||||
0. Backend selection (user constrained)
|
|
||||||
1. Requirement clarification through targeted questioning
|
|
||||||
2. Technical analysis using codeagent-wrapper
|
|
||||||
3. Development documentation generation
|
|
||||||
4. Parallel development execution (backend routing per task type)
|
|
||||||
5. Coverage validation (≥90% requirement)
|
|
||||||
6. Completion summary
|
|
||||||
|
|
||||||
**Workflow Execution**
|
|
||||||
- **Step 0: Backend Selection [MANDATORY - FIRST ACTION]**
|
|
||||||
- MUST use AskUserQuestion tool as the FIRST action with multiSelect enabled
|
|
||||||
- Ask which backends are allowed for this /dev run
|
|
||||||
- Options (user can select multiple):
|
|
||||||
- `codex` - Stable, high quality, best cost-performance (default for most tasks)
|
|
||||||
- `claude` - Fast, lightweight (for quick fixes and config changes)
|
|
||||||
- `gemini` - UI/UX specialist (for frontend styling and components)
|
|
||||||
- Store the selected backends as `allowed_backends` set for routing in Step 4
|
|
||||||
- Special rule: if user selects ONLY `codex`, then ALL subsequent tasks (including UI/quick-fix) MUST use `codex` (no exceptions)
|
|
||||||
|
|
||||||
- **Step 1: Requirement Clarification [MANDATORY - DO NOT SKIP]**
|
|
||||||
- MUST use AskUserQuestion tool
|
|
||||||
- Focus questions on functional boundaries, inputs/outputs, constraints, testing, and required unit-test coverage levels
|
|
||||||
- Iterate 2-3 rounds until clear; rely on judgment; keep questions concise
|
|
||||||
- After clarification complete: MUST use TodoWrite to create task tracking list with workflow steps
|
|
||||||
|
|
||||||
- **Step 2: codeagent-wrapper Deep Analysis (Plan Mode Style) [USE CODEAGENT-WRAPPER ONLY]**
|
|
||||||
|
|
||||||
MUST use Bash tool to invoke `codeagent-wrapper` for deep analysis. Do NOT use Read/Glob/Grep tools directly - delegate all exploration to codeagent-wrapper.
|
|
||||||
|
|
||||||
**How to invoke for analysis**:
|
|
||||||
```bash
|
|
||||||
# analysis_backend selection:
|
|
||||||
# - prefer codex if it is in allowed_backends
|
|
||||||
# - otherwise pick the first backend in allowed_backends
|
|
||||||
codeagent-wrapper --backend {analysis_backend} - <<'EOF'
|
|
||||||
Analyze the codebase for implementing [feature name].
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
- [requirement 1]
|
|
||||||
- [requirement 2]
|
|
||||||
|
|
||||||
Deliverables:
|
|
||||||
1. Explore codebase structure and existing patterns
|
|
||||||
2. Evaluate implementation options with trade-offs
|
|
||||||
3. Make architectural decisions
|
|
||||||
4. Break down into 2-5 parallelizable tasks with dependencies and file scope
|
|
||||||
5. Classify each task with a single `type`: `default` / `ui` / `quick-fix`
|
|
||||||
6. Determine if UI work is needed (check for .css/.tsx/.vue files)
|
|
||||||
|
|
||||||
Output the analysis following the structure below.
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
**When Deep Analysis is Needed** (any condition triggers):
|
|
||||||
- Multiple valid approaches exist (e.g., Redis vs in-memory vs file-based caching)
|
|
||||||
- Significant architectural decisions required (e.g., WebSockets vs SSE vs polling)
|
|
||||||
- Large-scale changes touching many files or systems
|
|
||||||
- Unclear scope requiring exploration first
|
|
||||||
|
|
||||||
**UI Detection Requirements**:
|
|
||||||
- During analysis, output whether the task needs UI work (yes/no) and the evidence
|
|
||||||
- UI criteria: presence of style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue)
|
|
||||||
|
|
||||||
**What the AI backend does in Analysis Mode** (when invoked via codeagent-wrapper):
|
|
||||||
1. **Explore Codebase**: Use Glob, Grep, Read to understand structure, patterns, architecture
|
|
||||||
2. **Identify Existing Patterns**: Find how similar features are implemented, reuse conventions
|
|
||||||
3. **Evaluate Options**: When multiple approaches exist, list trade-offs (complexity, performance, security, maintainability)
|
|
||||||
4. **Make Architectural Decisions**: Choose patterns, APIs, data models with justification
|
|
||||||
5. **Design Task Breakdown**: Produce parallelizable tasks based on natural functional boundaries with file scope and dependencies
|
|
||||||
|
|
||||||
**Analysis Output Structure**:
|
|
||||||
```
|
|
||||||
## Context & Constraints
|
|
||||||
[Tech stack, existing patterns, constraints discovered]
|
|
||||||
|
|
||||||
## Codebase Exploration
|
|
||||||
[Key files, modules, patterns found via Glob/Grep/Read]
|
|
||||||
|
|
||||||
## Implementation Options (if multiple approaches)
|
|
||||||
| Option | Pros | Cons | Recommendation |
|
|
||||||
|
|
||||||
## Technical Decisions
|
|
||||||
[API design, data models, architecture choices made]
|
|
||||||
|
|
||||||
## Task Breakdown
|
|
||||||
[2-5 tasks with: ID, description, file scope, dependencies, test command, type(default|ui|quick-fix)]
|
|
||||||
|
|
||||||
## UI Determination
|
|
||||||
needs_ui: [true/false]
|
|
||||||
evidence: [files and reasoning tied to style + component criteria]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Skip Deep Analysis When**:
|
|
||||||
- Simple, straightforward implementation with obvious approach
|
|
||||||
- Small changes confined to 1-2 files
|
|
||||||
- Clear requirements with single implementation path
|
|
||||||
|
|
||||||
- **Step 3: Generate Development Documentation**
|
|
||||||
- invoke agent dev-plan-generator
|
|
||||||
- When creating `dev-plan.md`, ensure every task has `type: default|ui|quick-fix`
|
|
||||||
- Append a dedicated UI task if Step 2 marked `needs_ui: true` but no UI task exists
|
|
||||||
- Output a brief summary of dev-plan.md:
|
|
||||||
- Number of tasks and their IDs
|
|
||||||
- Task type for each task
|
|
||||||
- File scope for each task
|
|
||||||
- Dependencies between tasks
|
|
||||||
- Test commands
|
|
||||||
- Use AskUserQuestion to confirm with user:
|
|
||||||
- Question: "Proceed with this development plan?" (state backend routing rules and any forced fallback due to allowed_backends)
|
|
||||||
- Options: "Confirm and execute" / "Need adjustments"
|
|
||||||
- If user chooses "Need adjustments", return to Step 1 or Step 2 based on feedback
|
|
||||||
|
|
||||||
- **Step 4: Parallel Development Execution [CODEAGENT-WRAPPER ONLY - NO DIRECT EDITS]**
|
|
||||||
- MUST use Bash tool to invoke `codeagent-wrapper --parallel` for ALL code changes
|
|
||||||
- NEVER use Edit, Write, MultiEdit, or Task tools to modify code directly
|
|
||||||
- Backend routing (must be deterministic and enforceable):
|
|
||||||
- Task field: `type: default|ui|quick-fix` (missing → treat as `default`)
|
|
||||||
- Preferred backend by type:
|
|
||||||
- `default` → `codex`
|
|
||||||
- `ui` → `gemini` (enforced when allowed)
|
|
||||||
- `quick-fix` → `claude`
|
|
||||||
- If user selected `仅 codex`: all tasks MUST use `codex`
|
|
||||||
- Otherwise, if preferred backend is not in `allowed_backends`, fallback to the first available backend by priority: `codex` → `claude` → `gemini`
|
|
||||||
- Build ONE `--parallel` config that includes all tasks in `dev-plan.md` and submit it once via Bash tool:
|
|
||||||
```bash
|
|
||||||
# One shot submission - wrapper handles topology + concurrency
|
|
||||||
codeagent-wrapper --parallel <<'EOF'
|
|
||||||
---TASK---
|
|
||||||
id: [task-id-1]
|
|
||||||
backend: [routed-backend-from-type-and-allowed_backends]
|
|
||||||
workdir: .
|
|
||||||
dependencies: [optional, comma-separated ids]
|
|
||||||
---CONTENT---
|
|
||||||
Task: [task-id-1]
|
|
||||||
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
|
||||||
Scope: [task file scope]
|
|
||||||
Test: [test command]
|
|
||||||
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
|
||||||
|
|
||||||
---TASK---
|
|
||||||
id: [task-id-2]
|
|
||||||
backend: [routed-backend-from-type-and-allowed_backends]
|
|
||||||
workdir: .
|
|
||||||
dependencies: [optional, comma-separated ids]
|
|
||||||
---CONTENT---
|
|
||||||
Task: [task-id-2]
|
|
||||||
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
|
||||||
Scope: [task file scope]
|
|
||||||
Test: [test command]
|
|
||||||
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
- **Note**: Use `workdir: .` (current directory) for all tasks unless specific subdirectory is required
|
|
||||||
- Execute independent tasks concurrently; serialize conflicting ones; track coverage reports
|
|
||||||
- Backend is routed deterministically based on task `type`, no manual intervention needed
|
|
||||||
|
|
||||||
- **Step 5: Coverage Validation**
|
|
||||||
- Validate each task’s coverage:
|
|
||||||
- All ≥90% → pass
|
|
||||||
- Any <90% → request more tests (max 2 rounds)
|
|
||||||
|
|
||||||
- **Step 6: Completion Summary**
|
|
||||||
- Provide completed task list, coverage per task, key file changes
|
|
||||||
|
|
||||||
**Error Handling**
|
|
||||||
- **codeagent-wrapper failure**: Retry once with same input; if still fails, log error and ask user for guidance
|
|
||||||
- **Insufficient coverage (<90%)**: Request more tests from the failed task (max 2 rounds); if still fails, report to user
|
|
||||||
- **Dependency conflicts**:
|
|
||||||
- Circular dependencies: codeagent-wrapper will detect and fail with error; revise task breakdown to remove cycles
|
|
||||||
- Missing dependencies: Ensure all task IDs referenced in `dependencies` field exist
|
|
||||||
- **Parallel execution timeout**: Individual tasks timeout after 2 hours (configurable via CODEX_TIMEOUT); failed tasks can be retried individually
|
|
||||||
- **Backend unavailable**: If a routed backend is unavailable, fallback to another backend in `allowed_backends` (priority: codex → claude → gemini); if none works, fail with a clear error message
|
|
||||||
|
|
||||||
**Quality Standards**
|
|
||||||
- Code coverage ≥90%
|
|
||||||
- Tasks based on natural functional boundaries (typically 2-5)
|
|
||||||
- Each task has exactly one `type: default|ui|quick-fix`
|
|
||||||
- Backend routed by `type`: `default`→codex, `ui`→gemini, `quick-fix`→claude (with allowed_backends fallback)
|
|
||||||
- Documentation must be minimal yet actionable
|
|
||||||
- No verbose implementations; only essential code
|
|
||||||
|
|
||||||
**Communication Style**
|
|
||||||
- Be direct and concise
|
|
||||||
- Report progress at each workflow step
|
|
||||||
- Highlight blockers immediately
|
|
||||||
- Provide actionable next steps when coverage fails
|
|
||||||
- Prioritize speed via parallelization while enforcing coverage validation
|
|
||||||
197
docs/HOOKS.md
197
docs/HOOKS.md
@@ -1,197 +0,0 @@
|
|||||||
# Claude Code Hooks Guide
|
|
||||||
|
|
||||||
Hooks are shell scripts or commands that execute in response to Claude Code events.
|
|
||||||
|
|
||||||
## Available Hook Types
|
|
||||||
|
|
||||||
### 1. UserPromptSubmit
|
|
||||||
Runs after user submits a prompt, before Claude processes it.
|
|
||||||
|
|
||||||
**Use cases:**
|
|
||||||
- Auto-activate skills based on keywords
|
|
||||||
- Add context injection
|
|
||||||
- Log user requests
|
|
||||||
|
|
||||||
### 2. PostToolUse
|
|
||||||
Runs after Claude uses a tool.
|
|
||||||
|
|
||||||
**Use cases:**
|
|
||||||
- Validate tool outputs
|
|
||||||
- Run additional checks (linting, formatting)
|
|
||||||
- Log tool usage
|
|
||||||
|
|
||||||
### 3. Stop
|
|
||||||
Runs when Claude Code session ends.
|
|
||||||
|
|
||||||
**Use cases:**
|
|
||||||
- Cleanup temporary files
|
|
||||||
- Generate session reports
|
|
||||||
- Commit changes automatically
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Hooks are configured in `.claude/settings.json`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"hooks": {
|
|
||||||
"UserPromptSubmit": [
|
|
||||||
{
|
|
||||||
"hooks": [
|
|
||||||
{
|
|
||||||
"type": "command",
|
|
||||||
"command": "$CLAUDE_PROJECT_DIR/hooks/skill-activation-prompt.sh"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"PostToolUse": [
|
|
||||||
{
|
|
||||||
"hooks": [
|
|
||||||
{
|
|
||||||
"type": "command",
|
|
||||||
"command": "$CLAUDE_PROJECT_DIR/hooks/post-tool-check.sh"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Creating Custom Hooks
|
|
||||||
|
|
||||||
### Example: Pre-Commit Hook
|
|
||||||
|
|
||||||
**File:** `hooks/pre-commit.sh`
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Get staged files
|
|
||||||
STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM)
|
|
||||||
|
|
||||||
# Run tests on Go files
|
|
||||||
GO_FILES=$(echo "$STAGED_FILES" | grep '\.go$' || true)
|
|
||||||
if [ -n "$GO_FILES" ]; then
|
|
||||||
go test ./... -short || exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Validate JSON files
|
|
||||||
JSON_FILES=$(echo "$STAGED_FILES" | grep '\.json$' || true)
|
|
||||||
if [ -n "$JSON_FILES" ]; then
|
|
||||||
for file in $JSON_FILES; do
|
|
||||||
jq empty "$file" || exit 1
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ Pre-commit checks passed"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Register in settings.json:**
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"hooks": {
|
|
||||||
"PostToolUse": [
|
|
||||||
{
|
|
||||||
"hooks": [
|
|
||||||
{
|
|
||||||
"type": "command",
|
|
||||||
"command": "$CLAUDE_PROJECT_DIR/hooks/pre-commit.sh"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example: Auto-Format Hook
|
|
||||||
|
|
||||||
**File:** `hooks/auto-format.sh`
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Format Go files
|
|
||||||
find . -name "*.go" -exec gofmt -w {} \;
|
|
||||||
|
|
||||||
# Format JSON files
|
|
||||||
find . -name "*.json" -exec jq --indent 2 . {} \; -exec mv {} {}.tmp \; -exec mv {}.tmp {} \;
|
|
||||||
|
|
||||||
echo "✅ Files formatted"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Environment Variables
|
|
||||||
|
|
||||||
Hooks have access to:
|
|
||||||
- `$CLAUDE_PROJECT_DIR` - Project root directory
|
|
||||||
- `$PWD` - Current working directory
|
|
||||||
- All shell environment variables
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
1. **Keep hooks fast** - Slow hooks block Claude Code
|
|
||||||
2. **Handle errors gracefully** - Return non-zero on failure
|
|
||||||
3. **Use absolute paths** - Reference `$CLAUDE_PROJECT_DIR`
|
|
||||||
4. **Make scripts executable** - `chmod +x hooks/script.sh`
|
|
||||||
5. **Test independently** - Run hooks manually first
|
|
||||||
6. **Document behavior** - Add comments explaining logic
|
|
||||||
|
|
||||||
## Debugging Hooks
|
|
||||||
|
|
||||||
Enable verbose logging:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Add to your hook
|
|
||||||
set -x # Print commands
|
|
||||||
set -e # Exit on error
|
|
||||||
```
|
|
||||||
|
|
||||||
Test manually:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd /path/to/project
|
|
||||||
./hooks/your-hook.sh
|
|
||||||
echo $? # Check exit code
|
|
||||||
```
|
|
||||||
|
|
||||||
## Built-in Hooks
|
|
||||||
|
|
||||||
This repository includes:
|
|
||||||
|
|
||||||
| Hook | File | Purpose |
|
|
||||||
|------|------|---------|
|
|
||||||
| Skill Activation | `skill-activation-prompt.sh` | Auto-suggest skills |
|
|
||||||
| Pre-commit | `pre-commit.sh` | Code quality checks |
|
|
||||||
|
|
||||||
## Disabling Hooks
|
|
||||||
|
|
||||||
Remove hook configuration from `.claude/settings.json` or set empty array:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"hooks": {
|
|
||||||
"UserPromptSubmit": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
**Hook not running?**
|
|
||||||
- Check `.claude/settings.json` syntax
|
|
||||||
- Verify script is executable: `ls -l hooks/`
|
|
||||||
- Check script path is correct
|
|
||||||
|
|
||||||
**Hook failing silently?**
|
|
||||||
- Add `set -e` to script
|
|
||||||
- Check exit codes: `echo $?`
|
|
||||||
- Add logging: `echo "debug" >> /tmp/hook.log`
|
|
||||||
|
|
||||||
## Further Reading
|
|
||||||
|
|
||||||
- [Claude Code Hooks Documentation](https://docs.anthropic.com/claude-code/hooks)
|
|
||||||
- [Bash Scripting Guide](https://www.gnu.org/software/bash/manual/)
|
|
||||||
@@ -1,348 +0,0 @@
|
|||||||
# Plugin System Guide
|
|
||||||
|
|
||||||
> Native Claude Code plugin support for modular workflow installation
|
|
||||||
|
|
||||||
## 🎯 Overview
|
|
||||||
|
|
||||||
This repository provides 4 ready-to-use Claude Code plugins that can be installed individually or as a complete suite.
|
|
||||||
|
|
||||||
## 📦 Available Plugins
|
|
||||||
|
|
||||||
### 1. bmad-agile-workflow
|
|
||||||
|
|
||||||
**Complete BMAD methodology with 6 specialized agents**
|
|
||||||
|
|
||||||
**Commands**:
|
|
||||||
- `/bmad-pilot` - Full agile workflow orchestration
|
|
||||||
|
|
||||||
**Agents**:
|
|
||||||
- `bmad-po` - Product Owner (Sarah)
|
|
||||||
- `bmad-architect` - System Architect (Winston)
|
|
||||||
- `bmad-sm` - Scrum Master (Mike)
|
|
||||||
- `bmad-dev` - Developer (Alex)
|
|
||||||
- `bmad-review` - Code Reviewer
|
|
||||||
- `bmad-qa` - QA Engineer (Emma)
|
|
||||||
- `bmad-orchestrator` - Main orchestrator
|
|
||||||
|
|
||||||
**Use for**: Enterprise projects, complex features, full agile process
|
|
||||||
|
|
||||||
### 2. requirements-driven-workflow
|
|
||||||
|
|
||||||
**Streamlined requirements-to-code workflow**
|
|
||||||
|
|
||||||
**Commands**:
|
|
||||||
- `/requirements-pilot` - Requirements-driven development flow
|
|
||||||
|
|
||||||
**Agents**:
|
|
||||||
- `requirements-generate` - Requirements generation
|
|
||||||
- `requirements-code` - Code implementation
|
|
||||||
- `requirements-review` - Code review
|
|
||||||
- `requirements-testing` - Testing strategy
|
|
||||||
|
|
||||||
**Use for**: Quick prototyping, simple features, rapid development
|
|
||||||
|
|
||||||
### 3. development-essentials
|
|
||||||
|
|
||||||
**Core development slash commands**
|
|
||||||
|
|
||||||
**Commands**:
|
|
||||||
- `/code` - Direct implementation
|
|
||||||
- `/debug` - Systematic debugging
|
|
||||||
- `/test` - Testing strategy
|
|
||||||
- `/optimize` - Performance tuning
|
|
||||||
- `/bugfix` - Bug resolution
|
|
||||||
- `/refactor` - Code improvement
|
|
||||||
- `/review` - Code validation
|
|
||||||
- `/ask` - Technical consultation
|
|
||||||
- `/docs` - Documentation
|
|
||||||
- `/think` - Advanced analysis
|
|
||||||
|
|
||||||
**Agents**:
|
|
||||||
- `code` - Code implementation
|
|
||||||
- `bugfix` - Bug fixing
|
|
||||||
- `debug` - Debugging
|
|
||||||
- `develop` - General development
|
|
||||||
|
|
||||||
**Use for**: Daily coding tasks, quick implementations
|
|
||||||
|
|
||||||
### 4. advanced-ai-agents
|
|
||||||
|
|
||||||
**GPT-5 deep reasoning integration**
|
|
||||||
|
|
||||||
**Commands**: None (agent-only)
|
|
||||||
|
|
||||||
**Agents**:
|
|
||||||
- `gpt5` - Deep reasoning and analysis
|
|
||||||
|
|
||||||
**Use for**: Complex architectural decisions, strategic planning
|
|
||||||
|
|
||||||
## 🚀 Installation Methods
|
|
||||||
|
|
||||||
### Method 1: Plugin Commands (Recommended)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# List all available plugins
|
|
||||||
/plugin list
|
|
||||||
|
|
||||||
# Get detailed information about a plugin
|
|
||||||
/plugin info bmad-agile-workflow
|
|
||||||
|
|
||||||
# Install a specific plugin
|
|
||||||
/plugin install bmad-agile-workflow
|
|
||||||
|
|
||||||
# Install all plugins
|
|
||||||
/plugin install bmad-agile-workflow
|
|
||||||
/plugin install requirements-driven-workflow
|
|
||||||
/plugin install development-essentials
|
|
||||||
/plugin install advanced-ai-agents
|
|
||||||
|
|
||||||
# Remove an installed plugin
|
|
||||||
/plugin remove development-essentials
|
|
||||||
```
|
|
||||||
|
|
||||||
### Method 2: Repository Reference
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install from GitHub repository
|
|
||||||
/plugin marketplace add cexll/myclaude
|
|
||||||
```
|
|
||||||
|
|
||||||
This will present all available plugins from the repository.
|
|
||||||
|
|
||||||
### Method 3: Make Commands
|
|
||||||
|
|
||||||
For traditional installation or selective deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install everything
|
|
||||||
make install
|
|
||||||
|
|
||||||
# Deploy specific workflows
|
|
||||||
make deploy-bmad # BMAD workflow only
|
|
||||||
make deploy-requirements # Requirements workflow only
|
|
||||||
make deploy-commands # All slash commands
|
|
||||||
make deploy-agents # All agents
|
|
||||||
|
|
||||||
# Deploy everything
|
|
||||||
make deploy-all
|
|
||||||
|
|
||||||
# View all options
|
|
||||||
make help
|
|
||||||
```
|
|
||||||
|
|
||||||
### Method 4: Manual Installation
|
|
||||||
|
|
||||||
Copy files to Claude Code configuration directories:
|
|
||||||
|
|
||||||
**Commands**:
|
|
||||||
```bash
|
|
||||||
cp bmad-agile-workflow/commands/*.md ~/.config/claude/commands/
|
|
||||||
cp requirements-driven-workflow/commands/*.md ~/.config/claude/commands/
|
|
||||||
cp development-essentials/commands/*.md ~/.config/claude/commands/
|
|
||||||
```
|
|
||||||
|
|
||||||
**Agents**:
|
|
||||||
```bash
|
|
||||||
cp bmad-agile-workflow/agents/*.md ~/.config/claude/agents/
|
|
||||||
cp requirements-driven-workflow/agents/*.md ~/.config/claude/agents/
|
|
||||||
cp development-essentials/agents/*.md ~/.config/claude/agents/
|
|
||||||
cp advanced-ai-agents/agents/*.md ~/.config/claude/agents/
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output Styles** (optional):
|
|
||||||
```bash
|
|
||||||
cp output-styles/*.md ~/.config/claude/output-styles/
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📋 Plugin Configuration
|
|
||||||
|
|
||||||
Plugins are defined in `.claude-plugin/marketplace.json` following the Claude Code plugin specification.
|
|
||||||
|
|
||||||
### Plugin Metadata Structure
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "plugin-name",
|
|
||||||
"displayName": "Human Readable Name",
|
|
||||||
"description": "Plugin description",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"author": "Author Name",
|
|
||||||
"category": "workflow|development|analysis",
|
|
||||||
"keywords": ["keyword1", "keyword2"],
|
|
||||||
"commands": ["command1", "command2"],
|
|
||||||
"agents": ["agent1", "agent2"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 Plugin Management
|
|
||||||
|
|
||||||
### Check Installed Plugins
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/plugin list
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows all installed plugins with their status.
|
|
||||||
|
|
||||||
### Plugin Information
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/plugin info <plugin-name>
|
|
||||||
```
|
|
||||||
|
|
||||||
Displays detailed information:
|
|
||||||
- Description
|
|
||||||
- Version
|
|
||||||
- Commands provided
|
|
||||||
- Agents included
|
|
||||||
- Author and keywords
|
|
||||||
|
|
||||||
### Update Plugins
|
|
||||||
|
|
||||||
Plugins are updated when you pull the latest repository changes:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git pull origin main
|
|
||||||
make install
|
|
||||||
```
|
|
||||||
|
|
||||||
### Uninstall Plugins
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/plugin remove <plugin-name>
|
|
||||||
```
|
|
||||||
|
|
||||||
Or manually remove files:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Remove commands
|
|
||||||
rm ~/.config/claude/commands/<command-name>.md
|
|
||||||
|
|
||||||
# Remove agents
|
|
||||||
rm ~/.config/claude/agents/<agent-name>.md
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🎯 Plugin Selection Guide
|
|
||||||
|
|
||||||
### Install Everything (Recommended for New Users)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
make install
|
|
||||||
```
|
|
||||||
|
|
||||||
Provides complete functionality with all workflows and commands.
|
|
||||||
|
|
||||||
### Selective Installation
|
|
||||||
|
|
||||||
**For Agile Teams**:
|
|
||||||
```bash
|
|
||||||
/plugin install bmad-agile-workflow
|
|
||||||
```
|
|
||||||
|
|
||||||
**For Rapid Development**:
|
|
||||||
```bash
|
|
||||||
/plugin install requirements-driven-workflow
|
|
||||||
/plugin install development-essentials
|
|
||||||
```
|
|
||||||
|
|
||||||
**For Individual Developers**:
|
|
||||||
```bash
|
|
||||||
/plugin install development-essentials
|
|
||||||
/plugin install advanced-ai-agents
|
|
||||||
```
|
|
||||||
|
|
||||||
**For Code Quality Focus**:
|
|
||||||
```bash
|
|
||||||
/plugin install development-essentials # Includes /review
|
|
||||||
/plugin install bmad-agile-workflow # Includes bmad-review
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📁 Directory Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
myclaude/
|
|
||||||
├── .claude-plugin/
|
|
||||||
│ └── marketplace.json # Plugin registry
|
|
||||||
├── bmad-agile-workflow/
|
|
||||||
│ ├── commands/
|
|
||||||
│ │ └── bmad-pilot.md
|
|
||||||
│ └── agents/
|
|
||||||
│ ├── bmad-po.md
|
|
||||||
│ ├── bmad-architect.md
|
|
||||||
│ ├── bmad-sm.md
|
|
||||||
│ ├── bmad-dev.md
|
|
||||||
│ ├── bmad-review.md
|
|
||||||
│ ├── bmad-qa.md
|
|
||||||
│ └── bmad-orchestrator.md
|
|
||||||
├── requirements-driven-workflow/
|
|
||||||
│ ├── commands/
|
|
||||||
│ │ └── requirements-pilot.md
|
|
||||||
│ └── agents/
|
|
||||||
│ ├── requirements-generate.md
|
|
||||||
│ ├── requirements-code.md
|
|
||||||
│ ├── requirements-review.md
|
|
||||||
│ └── requirements-testing.md
|
|
||||||
├── development-essentials/
|
|
||||||
│ ├── commands/
|
|
||||||
│ │ ├── code.md
|
|
||||||
│ │ ├── debug.md
|
|
||||||
│ │ ├── test.md
|
|
||||||
│ │ └── ... (more commands)
|
|
||||||
│ └── agents/
|
|
||||||
│ ├── code.md
|
|
||||||
│ ├── bugfix.md
|
|
||||||
│ ├── debug.md
|
|
||||||
│ └── develop.md
|
|
||||||
├── advanced-ai-agents/
|
|
||||||
│ └── agents/
|
|
||||||
│ └── gpt5.md
|
|
||||||
└── output-styles/
|
|
||||||
└── bmad-phase-context.md
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔄 Plugin Dependencies
|
|
||||||
|
|
||||||
**No Dependencies**: All plugins work independently
|
|
||||||
|
|
||||||
**Complementary Combinations**:
|
|
||||||
- BMAD + Advanced Agents (enhanced reviews)
|
|
||||||
- Requirements + Development Essentials (complete toolkit)
|
|
||||||
- All four plugins (full suite)
|
|
||||||
|
|
||||||
## 🛠️ Makefile Reference
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Installation
|
|
||||||
make install # Install all plugins
|
|
||||||
make deploy-all # Deploy all configurations
|
|
||||||
|
|
||||||
# Selective Deployment
|
|
||||||
make deploy-bmad # BMAD workflow only
|
|
||||||
make deploy-requirements # Requirements workflow only
|
|
||||||
make deploy-commands # All slash commands only
|
|
||||||
make deploy-agents # All agents only
|
|
||||||
|
|
||||||
# Testing
|
|
||||||
make test-bmad # Test BMAD workflow
|
|
||||||
make test-requirements # Test Requirements workflow
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
make clean # Remove generated artifacts
|
|
||||||
make help # Show all available commands
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📚 Related Documentation
|
|
||||||
|
|
||||||
- **[BMAD Workflow](BMAD-WORKFLOW.md)** - Complete BMAD guide
|
|
||||||
- **[Requirements Workflow](REQUIREMENTS-WORKFLOW.md)** - Lightweight workflow guide
|
|
||||||
- **[Development Commands](DEVELOPMENT-COMMANDS.md)** - Command reference
|
|
||||||
- **[Quick Start Guide](QUICK-START.md)** - Get started quickly
|
|
||||||
|
|
||||||
## 🔗 External Resources
|
|
||||||
|
|
||||||
- **[Claude Code Plugin Docs](https://docs.claude.com/en/docs/claude-code/plugins)** - Official plugin documentation
|
|
||||||
- **[Claude Code CLI](https://claude.ai/code)** - Claude Code interface
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Modular Installation** - Install only what you need, when you need it.
|
|
||||||
@@ -1,326 +0,0 @@
|
|||||||
# Quick Start Guide
|
|
||||||
|
|
||||||
> Get started with Claude Code Multi-Agent Workflow System in 5 minutes
|
|
||||||
|
|
||||||
## 🚀 Installation (2 minutes)
|
|
||||||
|
|
||||||
### Option 1: Plugin System (Fastest)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install everything with one command
|
|
||||||
/plugin marketplace add cexll/myclaude
|
|
||||||
```
|
|
||||||
|
|
||||||
### Option 2: Make Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/cexll/myclaude.git
|
|
||||||
cd myclaude
|
|
||||||
make install
|
|
||||||
```
|
|
||||||
|
|
||||||
### Option 3: Selective Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install only what you need
|
|
||||||
/plugin install bmad-agile-workflow # Full agile workflow
|
|
||||||
/plugin install development-essentials # Daily coding commands
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🎯 Your First Workflow (3 minutes)
|
|
||||||
|
|
||||||
### Try BMAD Workflow
|
|
||||||
|
|
||||||
Complete agile development automation:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/bmad-pilot "Build a simple todo list API with CRUD operations"
|
|
||||||
```
|
|
||||||
|
|
||||||
**What happens**:
|
|
||||||
1. **Product Owner** generates requirements (PRD)
|
|
||||||
2. **Architect** designs system architecture
|
|
||||||
3. **Scrum Master** creates sprint plan
|
|
||||||
4. **Developer** implements code
|
|
||||||
5. **Reviewer** performs code review
|
|
||||||
6. **QA** runs tests
|
|
||||||
|
|
||||||
All documents saved to `.claude/specs/todo-list-api/`
|
|
||||||
|
|
||||||
### Try Requirements Workflow
|
|
||||||
|
|
||||||
Fast prototyping:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/requirements-pilot "Add user authentication to existing API"
|
|
||||||
```
|
|
||||||
|
|
||||||
**What happens**:
|
|
||||||
1. Generate functional requirements
|
|
||||||
2. Implement code
|
|
||||||
3. Review implementation
|
|
||||||
4. Create tests
|
|
||||||
|
|
||||||
### Try Direct Commands
|
|
||||||
|
|
||||||
Quick coding without workflow:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Implement a feature
|
|
||||||
/code "Add input validation for email fields"
|
|
||||||
|
|
||||||
# Debug an issue
|
|
||||||
/debug "API returns 500 on missing parameters"
|
|
||||||
|
|
||||||
# Add tests
|
|
||||||
/test "Create unit tests for validation logic"
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📋 Common Use Cases
|
|
||||||
|
|
||||||
### 1. New Feature Development
|
|
||||||
|
|
||||||
**Complex Feature** (use BMAD):
|
|
||||||
```bash
|
|
||||||
/bmad-pilot "User authentication system with OAuth2, MFA, and role-based access control"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Simple Feature** (use Requirements):
|
|
||||||
```bash
|
|
||||||
/requirements-pilot "Add pagination to user list endpoint"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Tiny Feature** (use direct command):
|
|
||||||
```bash
|
|
||||||
/code "Add created_at timestamp to user model"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Bug Fixing
|
|
||||||
|
|
||||||
**Complex Bug** (use debug):
|
|
||||||
```bash
|
|
||||||
/debug "Memory leak in background job processor"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Simple Bug** (use bugfix):
|
|
||||||
```bash
|
|
||||||
/bugfix "Login button not working on mobile Safari"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Code Quality
|
|
||||||
|
|
||||||
**Full Review**:
|
|
||||||
```bash
|
|
||||||
/review "Review authentication module for security issues"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Refactoring**:
|
|
||||||
```bash
|
|
||||||
/refactor "Simplify user validation logic and remove duplication"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Optimization**:
|
|
||||||
```bash
|
|
||||||
/optimize "Reduce database queries in dashboard API"
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🎨 Workflow Selection Guide
|
|
||||||
|
|
||||||
```
|
|
||||||
┌─────────────────────────────────────────────────────────┐
|
|
||||||
│ Choose Your Workflow │
|
|
||||||
└─────────────────────────────────────────────────────────┘
|
|
||||||
|
|
||||||
Complex Business Feature + Architecture Needed
|
|
||||||
↓
|
|
||||||
🏢 Use BMAD Workflow
|
|
||||||
/bmad-pilot "description"
|
|
||||||
• 6 specialized agents
|
|
||||||
• Quality gates (PRD ≥90, Design ≥90)
|
|
||||||
• Complete documentation
|
|
||||||
• Sprint planning included
|
|
||||||
|
|
||||||
────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
Clear Requirements + Fast Iteration Needed
|
|
||||||
↓
|
|
||||||
⚡ Use Requirements Workflow
|
|
||||||
/requirements-pilot "description"
|
|
||||||
• 4 phases: Requirements → Code → Review → Test
|
|
||||||
• Quality gate (Requirements ≥90)
|
|
||||||
• Minimal documentation
|
|
||||||
• Direct to implementation
|
|
||||||
|
|
||||||
────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
Well-Defined Task + No Workflow Overhead
|
|
||||||
↓
|
|
||||||
🔧 Use Direct Commands
|
|
||||||
/code | /debug | /test | /optimize
|
|
||||||
• Single-purpose commands
|
|
||||||
• Immediate execution
|
|
||||||
• No documentation overhead
|
|
||||||
• Perfect for daily tasks
|
|
||||||
```
|
|
||||||
|
|
||||||
## 💡 Tips for Success
|
|
||||||
|
|
||||||
### 1. Be Specific
|
|
||||||
|
|
||||||
**❌ Bad**:
|
|
||||||
```bash
|
|
||||||
/bmad-pilot "Build an app"
|
|
||||||
```
|
|
||||||
|
|
||||||
**✅ Good**:
|
|
||||||
```bash
|
|
||||||
/bmad-pilot "Build a task management API with user authentication, task CRUD,
|
|
||||||
task assignment, and real-time notifications via WebSocket"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Provide Context
|
|
||||||
|
|
||||||
Include relevant technical details:
|
|
||||||
```bash
|
|
||||||
/code "Add Redis caching to user profile endpoint, cache TTL 5 minutes,
|
|
||||||
invalidate on profile update"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Engage with Agents
|
|
||||||
|
|
||||||
During BMAD workflow, provide feedback at quality gates:
|
|
||||||
|
|
||||||
```
|
|
||||||
PO: "Here's the PRD (Score: 85/100)"
|
|
||||||
You: "Add mobile app support and offline mode requirements"
|
|
||||||
PO: "Updated PRD (Score: 94/100) ✅"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Review Generated Artifacts
|
|
||||||
|
|
||||||
Check documents before confirming:
|
|
||||||
- `.claude/specs/{feature}/01-product-requirements.md`
|
|
||||||
- `.claude/specs/{feature}/02-system-architecture.md`
|
|
||||||
- `.claude/specs/{feature}/03-sprint-plan.md`
|
|
||||||
|
|
||||||
### 5. Chain Commands for Complex Tasks
|
|
||||||
|
|
||||||
Break down complex work:
|
|
||||||
```bash
|
|
||||||
/ask "Best approach for implementing real-time chat"
|
|
||||||
/bmad-pilot "Real-time chat system with message history and typing indicators"
|
|
||||||
/test "Add integration tests for chat message delivery"
|
|
||||||
/docs "Document chat API endpoints and WebSocket events"
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🎓 Learning Path
|
|
||||||
|
|
||||||
**Day 1**: Try direct commands
|
|
||||||
```bash
|
|
||||||
/code "simple task"
|
|
||||||
/test "add some tests"
|
|
||||||
/review "check my code"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Day 2**: Try Requirements workflow
|
|
||||||
```bash
|
|
||||||
/requirements-pilot "small feature"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Week 2**: Try BMAD workflow
|
|
||||||
```bash
|
|
||||||
/bmad-pilot "larger feature"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Week 3**: Combine workflows
|
|
||||||
```bash
|
|
||||||
# Use BMAD for planning
|
|
||||||
/bmad-pilot "new module" --direct-dev
|
|
||||||
|
|
||||||
# Use Requirements for sprint tasks
|
|
||||||
/requirements-pilot "individual task from sprint"
|
|
||||||
|
|
||||||
# Use commands for daily work
|
|
||||||
/code "quick fix"
|
|
||||||
/test "add test"
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📚 Next Steps
|
|
||||||
|
|
||||||
### Explore Documentation
|
|
||||||
|
|
||||||
- **[BMAD Workflow Guide](BMAD-WORKFLOW.md)** - Deep dive into full agile workflow
|
|
||||||
- **[Requirements Workflow Guide](REQUIREMENTS-WORKFLOW.md)** - Learn lightweight development
|
|
||||||
- **[Development Commands Reference](DEVELOPMENT-COMMANDS.md)** - All command details
|
|
||||||
- **[Plugin System Guide](PLUGIN-SYSTEM.md)** - Plugin management
|
|
||||||
|
|
||||||
### Try Advanced Features
|
|
||||||
|
|
||||||
**BMAD Options**:
|
|
||||||
```bash
|
|
||||||
# Skip testing for prototype
|
|
||||||
/bmad-pilot "prototype" --skip-tests
|
|
||||||
|
|
||||||
# Skip sprint planning for quick dev
|
|
||||||
/bmad-pilot "feature" --direct-dev
|
|
||||||
|
|
||||||
# Skip repo scan (if context exists)
|
|
||||||
/bmad-pilot "feature" --skip-scan
|
|
||||||
```
|
|
||||||
|
|
||||||
**Individual Agents**:
|
|
||||||
```bash
|
|
||||||
# Just requirements
|
|
||||||
/bmad-po "feature requirements"
|
|
||||||
|
|
||||||
# Just architecture
|
|
||||||
/bmad-architect "system design"
|
|
||||||
|
|
||||||
# Just orchestration
|
|
||||||
/bmad-orchestrator "complex project coordination"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Check Quality
|
|
||||||
|
|
||||||
Run tests and validation:
|
|
||||||
```bash
|
|
||||||
make test-bmad # Test BMAD workflow
|
|
||||||
make test-requirements # Test Requirements workflow
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🆘 Troubleshooting
|
|
||||||
|
|
||||||
**Commands not found**?
|
|
||||||
```bash
|
|
||||||
# Verify installation
|
|
||||||
/plugin list
|
|
||||||
|
|
||||||
# Reinstall if needed
|
|
||||||
make install
|
|
||||||
```
|
|
||||||
|
|
||||||
**Agents not working**?
|
|
||||||
```bash
|
|
||||||
# Check agent configuration
|
|
||||||
ls ~/.config/claude/agents/
|
|
||||||
|
|
||||||
# Redeploy agents
|
|
||||||
make deploy-agents
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output styles missing**?
|
|
||||||
```bash
|
|
||||||
# Deploy output styles
|
|
||||||
cp output-styles/*.md ~/.config/claude/output-styles/
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📞 Get Help
|
|
||||||
|
|
||||||
- **Issues**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
|
||||||
- **Documentation**: [docs/](.)
|
|
||||||
- **Examples**: Check `.claude/specs/` after running workflows
|
|
||||||
- **Make Help**: Run `make help` for all commands
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**You're ready!** Start with `/code "your first task"` and explore from there.
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"UserPromptSubmit": [
|
|
||||||
{
|
|
||||||
"hooks": [
|
|
||||||
{
|
|
||||||
"type": "command",
|
|
||||||
"command": "$CLAUDE_PROJECT_DIR/hooks/skill-activation-prompt.sh"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
30
hooks/hooks.json
Normal file
30
hooks/hooks.json
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"description": "ClaudeKit global hooks: dangerous command blocker, spec injection, prompt logging, session review",
|
||||||
|
"hooks": {
|
||||||
|
"PreToolUse": [
|
||||||
|
{
|
||||||
|
"matcher": "Bash",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/pre-bash.py \"$CLAUDE_TOOL_INPUT\""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/inject-spec.py"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"UserPromptSubmit": [
|
||||||
|
{
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/log-prompt.py"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
13
hooks/inject-spec.py
Normal file
13
hooks/inject-spec.py
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Global Spec Injection Hook (DEPRECATED).
|
||||||
|
|
||||||
|
Spec injection is now handled internally by codeagent-wrapper via the
|
||||||
|
per-task `skills:` field in parallel config and the `--skills` CLI flag.
|
||||||
|
|
||||||
|
This hook is kept as a no-op for backward compatibility.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.exit(0)
|
||||||
55
hooks/log-prompt.py
Normal file
55
hooks/log-prompt.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Log Prompt Hook - Record user prompts to session-specific log files.
|
||||||
|
Used for review on Stop.
|
||||||
|
Uses session-isolated logs to handle concurrency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def get_session_id() -> str:
|
||||||
|
"""Get unique session identifier."""
|
||||||
|
return os.environ.get("CLAUDE_CODE_SSE_PORT", "default")
|
||||||
|
|
||||||
|
|
||||||
|
def write_log(prompt: str) -> None:
|
||||||
|
"""Write prompt to session log file."""
|
||||||
|
log_dir = Path(".claude/state")
|
||||||
|
session_id = get_session_id()
|
||||||
|
log_file = log_dir / f"session-{session_id}.log"
|
||||||
|
|
||||||
|
log_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
timestamp = datetime.now().isoformat()
|
||||||
|
entry = f"[{timestamp}] {prompt[:500]}\n"
|
||||||
|
|
||||||
|
with open(log_file, "a", encoding="utf-8") as f:
|
||||||
|
f.write(entry)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
input_data = ""
|
||||||
|
if not sys.stdin.isatty():
|
||||||
|
try:
|
||||||
|
input_data = sys.stdin.read()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
prompt = ""
|
||||||
|
try:
|
||||||
|
data = json.loads(input_data)
|
||||||
|
prompt = data.get("prompt", "")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
prompt = input_data.strip()
|
||||||
|
|
||||||
|
if prompt:
|
||||||
|
write_log(prompt)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
30
hooks/pre-bash.py
Normal file
30
hooks/pre-bash.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Pre-Bash Hook - Block dangerous commands before execution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
DANGEROUS_PATTERNS = [
|
||||||
|
'rm -rf /',
|
||||||
|
'rm -rf ~',
|
||||||
|
'dd if=',
|
||||||
|
':(){:|:&};:',
|
||||||
|
'mkfs.',
|
||||||
|
'> /dev/sd',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
command = sys.argv[1] if len(sys.argv) > 1 else ''
|
||||||
|
|
||||||
|
for pattern in DANGEROUS_PATTERNS:
|
||||||
|
if pattern in command:
|
||||||
|
print(f"[CWF] BLOCKED: Dangerous command detected: {pattern}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,82 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Example pre-commit hook
|
|
||||||
# This hook runs before git commit to validate code quality
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Get staged files
|
|
||||||
STAGED_FILES="$(git diff --cached --name-only --diff-filter=ACM)"
|
|
||||||
|
|
||||||
if [ -z "$STAGED_FILES" ]; then
|
|
||||||
echo "No files to validate"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Running pre-commit checks..."
|
|
||||||
|
|
||||||
# Check Go files
|
|
||||||
GO_FILES="$(printf '%s\n' "$STAGED_FILES" | grep '\.go$' || true)"
|
|
||||||
if [ -n "$GO_FILES" ]; then
|
|
||||||
echo "Checking Go files..."
|
|
||||||
|
|
||||||
if ! command -v gofmt &> /dev/null; then
|
|
||||||
echo "❌ gofmt not found. Please install Go (gofmt is included with the Go toolchain)."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Format check
|
|
||||||
GO_FILE_ARGS=()
|
|
||||||
while IFS= read -r file; do
|
|
||||||
if [ -n "$file" ]; then
|
|
||||||
GO_FILE_ARGS+=("$file")
|
|
||||||
fi
|
|
||||||
done <<< "$GO_FILES"
|
|
||||||
|
|
||||||
if [ "${#GO_FILE_ARGS[@]}" -gt 0 ]; then
|
|
||||||
UNFORMATTED="$(gofmt -l "${GO_FILE_ARGS[@]}")"
|
|
||||||
if [ -n "$UNFORMATTED" ]; then
|
|
||||||
echo "❌ The following files need formatting:"
|
|
||||||
echo "$UNFORMATTED"
|
|
||||||
echo "Run: gofmt -w <file>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
if command -v go &> /dev/null; then
|
|
||||||
echo "Running go tests..."
|
|
||||||
go test ./... -short || {
|
|
||||||
echo "❌ Tests failed"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check JSON files
|
|
||||||
JSON_FILES="$(printf '%s\n' "$STAGED_FILES" | grep '\.json$' || true)"
|
|
||||||
if [ -n "$JSON_FILES" ]; then
|
|
||||||
echo "Validating JSON files..."
|
|
||||||
if ! command -v jq &> /dev/null; then
|
|
||||||
echo "❌ jq not found. Please install jq to validate JSON files."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
while IFS= read -r file; do
|
|
||||||
if [ -z "$file" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if ! jq empty "$file" 2>/dev/null; then
|
|
||||||
echo "❌ Invalid JSON: $file"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done <<< "$JSON_FILES"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check Markdown files
|
|
||||||
MD_FILES="$(printf '%s\n' "$STAGED_FILES" | grep '\.md$' || true)"
|
|
||||||
if [ -n "$MD_FILES" ]; then
|
|
||||||
echo "Checking markdown files..."
|
|
||||||
# Add markdown linting if needed
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ All pre-commit checks passed"
|
|
||||||
exit 0
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
#!/usr/bin/env node
|
|
||||||
|
|
||||||
const fs = require("fs");
|
|
||||||
const path = require("path");
|
|
||||||
|
|
||||||
function readInput() {
|
|
||||||
const raw = fs.readFileSync(0, "utf8").trim();
|
|
||||||
if (!raw) return {};
|
|
||||||
try {
|
|
||||||
return JSON.parse(raw);
|
|
||||||
} catch (_err) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function extractPrompt(payload) {
|
|
||||||
return (
|
|
||||||
payload.prompt ||
|
|
||||||
payload.text ||
|
|
||||||
payload.userPrompt ||
|
|
||||||
(payload.data && payload.data.prompt) ||
|
|
||||||
""
|
|
||||||
).toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
function loadRules() {
|
|
||||||
const rulesPath = path.resolve(__dirname, "../skills/skill-rules.json");
|
|
||||||
try {
|
|
||||||
const file = fs.readFileSync(rulesPath, "utf8");
|
|
||||||
return JSON.parse(file);
|
|
||||||
} catch (_err) {
|
|
||||||
return { skills: {} };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function matchSkill(prompt, rule, skillName) {
|
|
||||||
const triggers = (rule && rule.promptTriggers) || {};
|
|
||||||
const keywords = [...(triggers.keywords || []), skillName].filter(Boolean);
|
|
||||||
const patterns = triggers.intentPatterns || [];
|
|
||||||
const promptLower = prompt.toLowerCase();
|
|
||||||
|
|
||||||
const keyword = keywords.find((k) => promptLower.includes(k.toLowerCase()));
|
|
||||||
if (keyword) {
|
|
||||||
return `命中关键词 "${keyword}"`;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const pattern of patterns) {
|
|
||||||
try {
|
|
||||||
if (new RegExp(pattern, "i").test(prompt)) {
|
|
||||||
return `命中模式 /${pattern}/`;
|
|
||||||
}
|
|
||||||
} catch (_err) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
function main() {
|
|
||||||
const payload = readInput();
|
|
||||||
const prompt = extractPrompt(payload);
|
|
||||||
if (!prompt.trim()) {
|
|
||||||
console.log(JSON.stringify({ suggestedSkills: [] }, null, 2));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const rules = loadRules();
|
|
||||||
const suggestions = [];
|
|
||||||
|
|
||||||
for (const [name, rule] of Object.entries(rules.skills || {})) {
|
|
||||||
const matchReason = matchSkill(prompt, rule, name);
|
|
||||||
if (matchReason) {
|
|
||||||
suggestions.push({
|
|
||||||
skill: name,
|
|
||||||
enforcement: rule.enforcement || "suggest",
|
|
||||||
priority: rule.priority || "normal",
|
|
||||||
reason: matchReason
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(JSON.stringify({ suggestedSkills: suggestions }, null, 2));
|
|
||||||
}
|
|
||||||
|
|
||||||
main();
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
SCRIPT="$SCRIPT_DIR/skill-activation-prompt.js"
|
|
||||||
|
|
||||||
if command -v node >/dev/null 2>&1; then
|
|
||||||
node "$SCRIPT" "$@" || true
|
|
||||||
else
|
|
||||||
echo '{"suggestedSkills":[],"meta":{"warning":"node not found"}}'
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Simple test runner for skill-activation-prompt hook.
|
|
||||||
# Each case feeds JSON to the hook and validates suggested skills.
|
|
||||||
|
|
||||||
set -uo pipefail
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
HOOK_SCRIPT="$SCRIPT_DIR/skill-activation-prompt.sh"
|
|
||||||
|
|
||||||
parse_skills() {
|
|
||||||
node -e 'const data = JSON.parse(require("fs").readFileSync(0, "utf8")); const skills = (data.suggestedSkills || []).map(s => s.skill); console.log(skills.join(" "));'
|
|
||||||
}
|
|
||||||
|
|
||||||
run_case() {
|
|
||||||
local name="$1"
|
|
||||||
local input="$2"
|
|
||||||
shift 2
|
|
||||||
local expected=("$@")
|
|
||||||
|
|
||||||
local output skills
|
|
||||||
output="$("$HOOK_SCRIPT" <<<"$input")"
|
|
||||||
skills="$(printf "%s" "$output" | parse_skills)"
|
|
||||||
|
|
||||||
local pass=0
|
|
||||||
if [[ ${#expected[@]} -eq 1 && ${expected[0]} == "none" ]]; then
|
|
||||||
[[ -z "$skills" ]] && pass=1
|
|
||||||
else
|
|
||||||
pass=1
|
|
||||||
for need in "${expected[@]}"; do
|
|
||||||
if [[ " $skills " != *" $need "* ]]; then
|
|
||||||
pass=0
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $pass -eq 1 ]]; then
|
|
||||||
echo "PASS: $name"
|
|
||||||
else
|
|
||||||
echo "FAIL: $name"
|
|
||||||
echo " input: $input"
|
|
||||||
echo " expected skills: ${expected[*]}"
|
|
||||||
echo " actual skills: ${skills:-<empty>}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
local status=0
|
|
||||||
|
|
||||||
run_case "keyword 'issue' => gh-workflow" \
|
|
||||||
'{"prompt":"Please open an issue for this bug"}' \
|
|
||||||
"gh-workflow" || status=1
|
|
||||||
|
|
||||||
run_case "keyword 'codex' => codex" \
|
|
||||||
'{"prompt":"codex please handle this change"}' \
|
|
||||||
"codex" || status=1
|
|
||||||
|
|
||||||
run_case "no matching keywords => none" \
|
|
||||||
'{"prompt":"Just saying hello"}' \
|
|
||||||
"none" || status=1
|
|
||||||
|
|
||||||
run_case "multiple keywords => codex & gh-workflow" \
|
|
||||||
'{"prompt":"codex refactor then open an issue"}' \
|
|
||||||
"codex" "gh-workflow" || status=1
|
|
||||||
|
|
||||||
if [[ $status -eq 0 ]]; then
|
|
||||||
echo "All tests passed."
|
|
||||||
else
|
|
||||||
echo "Some tests failed."
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit "$status"
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user