mirror of
https://github.com/cexll/myclaude.git
synced 2026-03-02 15:23:16 +08:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08877af530 | ||
|
|
683409464c | ||
|
|
62309d1429 | ||
|
|
33a94d2bc4 | ||
|
|
b204ca94e2 | ||
|
|
a39bf72bc2 | ||
|
|
f43244ec3e | ||
|
|
4c25dd8d2f | ||
|
|
19d411a6a2 | ||
|
|
791bd03724 | ||
|
|
8252b67567 | ||
|
|
207d3c5436 | ||
|
|
5fe8c24f55 |
102
CHANGELOG.md
102
CHANGELOG.md
@@ -47,7 +47,7 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- support `npx github:cexll/myclaude` for installation and execution
|
||||
- support `npx github:stellarlinkco/myclaude` for installation and execution
|
||||
- default module changed from `dev` to `do`
|
||||
|
||||
### 🚜 Refactor
|
||||
@@ -138,7 +138,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- read GEMINI_MODEL from ~/.gemini/.env ([#131](https://github.com/cexll/myclaude/issues/131))
|
||||
- read GEMINI_MODEL from ~/.gemini/.env ([#131](https://github.com/stellarlinkco/myclaude/issues/131))
|
||||
|
||||
- validate non-empty output message before printing
|
||||
|
||||
@@ -159,7 +159,7 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- update release workflow build path for new directory structure
|
||||
|
||||
- write PATH config to both profile and rc files ([#128](https://github.com/cexll/myclaude/issues/128))
|
||||
- write PATH config to both profile and rc files ([#128](https://github.com/stellarlinkco/myclaude/issues/128))
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
@@ -184,9 +184,9 @@ All notable changes to this project will be documented in this file.
|
||||
### 📚 Documentation
|
||||
|
||||
|
||||
- update 'Agent Hierarchy' model for frontend-ui-ux-engineer and document-writer in README ([#127](https://github.com/cexll/myclaude/issues/127))
|
||||
- update 'Agent Hierarchy' model for frontend-ui-ux-engineer and document-writer in README ([#127](https://github.com/stellarlinkco/myclaude/issues/127))
|
||||
|
||||
- update mappings for frontend-ui-ux-engineer and document-writer in README ([#126](https://github.com/cexll/myclaude/issues/126))
|
||||
- update mappings for frontend-ui-ux-engineer and document-writer in README ([#126](https://github.com/stellarlinkco/myclaude/issues/126))
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
@@ -205,7 +205,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- remove extraneous dash arg for opencode stdin mode ([#124](https://github.com/cexll/myclaude/issues/124))
|
||||
- remove extraneous dash arg for opencode stdin mode ([#124](https://github.com/stellarlinkco/myclaude/issues/124))
|
||||
|
||||
### 💼 Other
|
||||
|
||||
@@ -218,7 +218,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- correct default models for oracle and librarian agents ([#120](https://github.com/cexll/myclaude/issues/120))
|
||||
- correct default models for oracle and librarian agents ([#120](https://github.com/stellarlinkco/myclaude/issues/120))
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
@@ -231,7 +231,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- filter codex 0.84.0 stderr noise logs ([#122](https://github.com/cexll/myclaude/issues/122))
|
||||
- filter codex 0.84.0 stderr noise logs ([#122](https://github.com/stellarlinkco/myclaude/issues/122))
|
||||
|
||||
- filter codex stderr noise logs
|
||||
|
||||
@@ -256,11 +256,11 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- propagate SkipPermissions to parallel tasks ([#113](https://github.com/cexll/myclaude/issues/113))
|
||||
- propagate SkipPermissions to parallel tasks ([#113](https://github.com/stellarlinkco/myclaude/issues/113))
|
||||
|
||||
- add timeout for Windows process termination
|
||||
|
||||
- reject dash as workdir parameter ([#118](https://github.com/cexll/myclaude/issues/118))
|
||||
- reject dash as workdir parameter ([#118](https://github.com/stellarlinkco/myclaude/issues/118))
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
@@ -315,14 +315,14 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- 修复 Gemini init 事件 session_id 未提取的问题 ([#111](https://github.com/cexll/myclaude/issues/111))
|
||||
- 修复 Gemini init 事件 session_id 未提取的问题 ([#111](https://github.com/stellarlinkco/myclaude/issues/111))
|
||||
|
||||
- fix codeagent skill TaskOutput
|
||||
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge branch 'master' of github.com:cexll/myclaude
|
||||
- Merge branch 'master' of github.com:stellarlinkco/myclaude
|
||||
|
||||
- add test-cases skill
|
||||
|
||||
@@ -339,7 +339,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- 修复 Windows 后端退出:taskkill 结束进程树 + turn.completed 支持 ([#108](https://github.com/cexll/myclaude/issues/108))
|
||||
- 修复 Windows 后端退出:taskkill 结束进程树 + turn.completed 支持 ([#108](https://github.com/stellarlinkco/myclaude/issues/108))
|
||||
|
||||
## [5.4.3] - 2026-01-06
|
||||
|
||||
@@ -347,7 +347,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- support model parameter for all backends, auto-inject from settings ([#105](https://github.com/cexll/myclaude/issues/105))
|
||||
- support model parameter for all backends, auto-inject from settings ([#105](https://github.com/stellarlinkco/myclaude/issues/105))
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
@@ -367,7 +367,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- replace setx with reg add to avoid 1024-char PATH truncation ([#101](https://github.com/cexll/myclaude/issues/101))
|
||||
- replace setx with reg add to avoid 1024-char PATH truncation ([#101](https://github.com/stellarlinkco/myclaude/issues/101))
|
||||
|
||||
## [5.4.1] - 2025-12-26
|
||||
|
||||
@@ -375,21 +375,21 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- 移除未知事件格式的日志噪声 ([#96](https://github.com/cexll/myclaude/issues/96))
|
||||
- 移除未知事件格式的日志噪声 ([#96](https://github.com/stellarlinkco/myclaude/issues/96))
|
||||
|
||||
- prevent duplicate PATH entries on reinstall ([#95](https://github.com/cexll/myclaude/issues/95))
|
||||
- prevent duplicate PATH entries on reinstall ([#95](https://github.com/stellarlinkco/myclaude/issues/95))
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
|
||||
- 添加 FAQ 常见问题章节
|
||||
|
||||
- update troubleshooting with idempotent PATH commands ([#95](https://github.com/cexll/myclaude/issues/95))
|
||||
- update troubleshooting with idempotent PATH commands ([#95](https://github.com/stellarlinkco/myclaude/issues/95))
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
|
||||
- Add intelligent backend selection based on task complexity ([#61](https://github.com/cexll/myclaude/issues/61))
|
||||
- Add intelligent backend selection based on task complexity ([#61](https://github.com/stellarlinkco/myclaude/issues/61))
|
||||
|
||||
## [5.4.0] - 2025-12-24
|
||||
|
||||
@@ -404,7 +404,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🚀 Features
|
||||
|
||||
|
||||
- v5.4.0 structured execution report ([#94](https://github.com/cexll/myclaude/issues/94))
|
||||
- v5.4.0 structured execution report ([#94](https://github.com/stellarlinkco/myclaude/issues/94))
|
||||
|
||||
## [5.2.8] - 2025-12-22
|
||||
|
||||
@@ -430,21 +430,21 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- allow claude backend to read env from setting.json while preventing recursion ([#92](https://github.com/cexll/myclaude/issues/92))
|
||||
- allow claude backend to read env from setting.json while preventing recursion ([#92](https://github.com/stellarlinkco/myclaude/issues/92))
|
||||
|
||||
- comprehensive security and quality improvements for PR #85 & #87 ([#90](https://github.com/cexll/myclaude/issues/90))
|
||||
- comprehensive security and quality improvements for PR #85 & #87 ([#90](https://github.com/stellarlinkco/myclaude/issues/90))
|
||||
|
||||
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 ([#88](https://github.com/cexll/myclaude/issues/88))
|
||||
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 ([#88](https://github.com/stellarlinkco/myclaude/issues/88))
|
||||
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Improve backend termination after message and extend timeout ([#86](https://github.com/cexll/myclaude/issues/86))
|
||||
- Improve backend termination after message and extend timeout ([#86](https://github.com/stellarlinkco/myclaude/issues/86))
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
|
||||
- add millisecond-precision timestamps to all log entries ([#91](https://github.com/cexll/myclaude/issues/91))
|
||||
- add millisecond-precision timestamps to all log entries ([#91](https://github.com/stellarlinkco/myclaude/issues/91))
|
||||
|
||||
## [5.2.6] - 2025-12-19
|
||||
|
||||
@@ -452,16 +452,16 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- filter noisy stderr output from gemini backend ([#83](https://github.com/cexll/myclaude/issues/83))
|
||||
- filter noisy stderr output from gemini backend ([#83](https://github.com/stellarlinkco/myclaude/issues/83))
|
||||
|
||||
- 修復 wsl install.sh 格式問題 ([#78](https://github.com/cexll/myclaude/issues/78))
|
||||
- 修復 wsl install.sh 格式問題 ([#78](https://github.com/stellarlinkco/myclaude/issues/78))
|
||||
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- update all readme
|
||||
|
||||
- BMADh和Requirements-Driven支持根据语义生成对应的文档 ([#82](https://github.com/cexll/myclaude/issues/82))
|
||||
- BMADh和Requirements-Driven支持根据语义生成对应的文档 ([#82](https://github.com/stellarlinkco/myclaude/issues/82))
|
||||
|
||||
## [5.2.5] - 2025-12-17
|
||||
|
||||
@@ -469,7 +469,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- 修复多 backend 并行日志 PID 混乱并移除包装格式 ([#74](https://github.com/cexll/myclaude/issues/74)) ([#76](https://github.com/cexll/myclaude/issues/76))
|
||||
- 修复多 backend 并行日志 PID 混乱并移除包装格式 ([#74](https://github.com/stellarlinkco/myclaude/issues/74)) ([#76](https://github.com/stellarlinkco/myclaude/issues/76))
|
||||
|
||||
- replace "Codex" to "codeagent" in dev-plan-generator subagent
|
||||
|
||||
@@ -480,7 +480,7 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- Merge pull request #71 from aliceric27/master
|
||||
|
||||
- Merge branch 'cexll:master' into master
|
||||
- Merge branch 'stellarlinkco:master' into master
|
||||
|
||||
- Merge pull request #72 from changxvv/master
|
||||
|
||||
@@ -508,13 +508,13 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #70 from cexll/fix/prevent-codeagent-infinite-recursion
|
||||
- Merge pull request #70 from stellarlinkco/fix/prevent-codeagent-infinite-recursion
|
||||
|
||||
- Merge pull request #69 from cexll/myclaude-master-20251215-073053-338465000
|
||||
- Merge pull request #69 from stellarlinkco/myclaude-master-20251215-073053-338465000
|
||||
|
||||
- update CHANGELOG.md
|
||||
|
||||
- Merge pull request #65 from cexll/fix/issue-64-buffer-overflow
|
||||
- Merge pull request #65 from stellarlinkco/fix/issue-64-buffer-overflow
|
||||
|
||||
## [5.2.3] - 2025-12-15
|
||||
|
||||
@@ -522,7 +522,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- 修复 bufio.Scanner token too long 错误 ([#64](https://github.com/cexll/myclaude/issues/64))
|
||||
- 修复 bufio.Scanner token too long 错误 ([#64](https://github.com/stellarlinkco/myclaude/issues/64))
|
||||
|
||||
### 💼 Other
|
||||
|
||||
@@ -609,7 +609,7 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- Merge rc/5.2 into master: v5.2.0 release improvements
|
||||
|
||||
- Merge pull request #53 from cexll/rc/5.2
|
||||
- Merge pull request #53 from stellarlinkco/rc/5.2
|
||||
|
||||
- remove docs
|
||||
|
||||
@@ -627,7 +627,7 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- Merge branch 'master' into rc/5.2
|
||||
|
||||
- Merge pull request #52 from cexll/fix/parallel-log-path-on-startup
|
||||
- Merge pull request #52 from stellarlinkco/fix/parallel-log-path-on-startup
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
@@ -684,7 +684,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #51 from cexll/fix/channel-sync-race-conditions
|
||||
- Merge pull request #51 from stellarlinkco/fix/channel-sync-race-conditions
|
||||
|
||||
- change codex-wrapper version
|
||||
|
||||
@@ -701,7 +701,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #49 from cexll/freespace8/master
|
||||
- Merge pull request #49 from stellarlinkco/freespace8/master
|
||||
|
||||
- resolve signal handling conflict preserving testability and Windows support
|
||||
|
||||
@@ -751,7 +751,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge branch 'master' of github.com:cexll/myclaude
|
||||
- Merge branch 'master' of github.com:stellarlinkco/myclaude
|
||||
|
||||
- Merge pull request #43 from gurdasnijor/smithery/add-badge
|
||||
|
||||
@@ -795,7 +795,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #41 from cexll/fix-async-log
|
||||
- Merge pull request #41 from stellarlinkco/fix-async-log
|
||||
|
||||
- remove test case 90
|
||||
|
||||
@@ -840,7 +840,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #34 from cexll/cce-worktree-master-20251129-111802-997076000
|
||||
- Merge pull request #34 from stellarlinkco/cce-worktree-master-20251129-111802-997076000
|
||||
|
||||
- update CLAUDE.md and codex skill
|
||||
|
||||
@@ -892,7 +892,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
|
||||
- update repository URLs to cexll/myclaude
|
||||
- update repository URLs to stellarlinkco/myclaude
|
||||
|
||||
## [4.7-alpha1] - 2025-11-27
|
||||
|
||||
@@ -905,7 +905,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #29 from cexll/feat/codex-wrapper
|
||||
- Merge pull request #29 from stellarlinkco/feat/codex-wrapper
|
||||
|
||||
- Add codex-wrapper Go implementation
|
||||
|
||||
@@ -957,9 +957,9 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- update codex skills model config
|
||||
|
||||
- Merge branch 'master' of github.com:cexll/myclaude
|
||||
- Merge branch 'master' of github.com:stellarlinkco/myclaude
|
||||
|
||||
- Merge pull request #24 from cexll/swe-agent/23-1763544297
|
||||
- Merge pull request #24 from stellarlinkco/swe-agent/23-1763544297
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
@@ -1025,7 +1025,7 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- optimize codex skills
|
||||
|
||||
- Merge branch 'master' of github.com:cexll/myclaude
|
||||
- Merge branch 'master' of github.com:stellarlinkco/myclaude
|
||||
|
||||
- Rename SKILLS.md to SKILL.md
|
||||
|
||||
@@ -1062,9 +1062,9 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge branch 'master' of github.com:cexll/myclaude
|
||||
- Merge branch 'master' of github.com:stellarlinkco/myclaude
|
||||
|
||||
- Merge pull request #18 from cexll/swe-agent/17-1760969135
|
||||
- Merge pull request #18 from stellarlinkco/swe-agent/17-1760969135
|
||||
|
||||
- update requirements clarity
|
||||
|
||||
@@ -1092,13 +1092,13 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #15 from cexll/swe-agent/13-1760944712
|
||||
- Merge pull request #15 from stellarlinkco/swe-agent/13-1760944712
|
||||
|
||||
- Fix #13: Clean up redundant README files
|
||||
|
||||
- Optimize README structure - Solution A (modular)
|
||||
|
||||
- Merge pull request #14 from cexll/swe-agent/12-1760944588
|
||||
- Merge pull request #14 from stellarlinkco/swe-agent/12-1760944588
|
||||
|
||||
- Fix #12: Update Makefile install paths for new directory structure
|
||||
|
||||
@@ -1108,7 +1108,7 @@ All notable changes to this project will be documented in this file.
|
||||
### 💼 Other
|
||||
|
||||
|
||||
- Merge pull request #11 from cexll/swe-agent/10-1760752533
|
||||
- Merge pull request #11 from stellarlinkco/swe-agent/10-1760752533
|
||||
|
||||
- Fix marketplace metadata references
|
||||
|
||||
|
||||
6
Makefile
6
Makefile
@@ -7,12 +7,12 @@
|
||||
help:
|
||||
@echo "Claude Code Multi-Agent Workflow - Quick Deployment"
|
||||
@echo ""
|
||||
@echo "Recommended installation: npx github:cexll/myclaude"
|
||||
@echo "Recommended installation: npx github:stellarlinkco/myclaude"
|
||||
@echo ""
|
||||
@echo "Usage: make [target]"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " install - LEGACY: install all configurations (prefer npx github:cexll/myclaude)"
|
||||
@echo " install - LEGACY: install all configurations (prefer npx github:stellarlinkco/myclaude)"
|
||||
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
|
||||
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
|
||||
@echo " deploy-essentials - Deploy Development Essentials workflow"
|
||||
@@ -40,7 +40,7 @@ OUTPUT_STYLES_DIR = output-styles
|
||||
# Install all configurations
|
||||
install: deploy-all
|
||||
@echo "⚠️ LEGACY PATH: make install will be removed in future versions."
|
||||
@echo " Prefer: npx github:cexll/myclaude"
|
||||
@echo " Prefer: npx github:stellarlinkco/myclaude"
|
||||
@echo "✅ Installation complete!"
|
||||
|
||||
# Deploy BMAD workflow
|
||||
|
||||
@@ -5,7 +5,7 @@ Claude Code plugins for this repo are defined in `.claude-plugin/marketplace.jso
|
||||
## Install
|
||||
|
||||
```bash
|
||||
/plugin marketplace add cexll/myclaude
|
||||
/plugin marketplace add stellarlinkco/myclaude
|
||||
/plugin list
|
||||
```
|
||||
|
||||
|
||||
26
README.md
26
README.md
@@ -2,17 +2,17 @@
|
||||
|
||||
# Claude Code Multi-Agent Workflow System
|
||||
|
||||
[](https://smithery.ai/skills?ns=cexll&utm_source=github&utm_medium=badge)
|
||||
[](https://smithery.ai/skills?ns=stellarlinkco&utm_source=github&utm_medium=badge)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://github.com/stellarlinkco/myclaude)
|
||||
|
||||
> AI-powered development automation with multi-backend execution (Codex/Claude/Gemini/OpenCode)
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
## Modules Overview
|
||||
@@ -30,7 +30,7 @@ npx github:cexll/myclaude
|
||||
|
||||
### Available Skills
|
||||
|
||||
Individual skills can be installed separately via `npx github:cexll/myclaude --list` (skills bundled in modules like do, omo, sparv are listed above):
|
||||
Individual skills can be installed separately via `npx github:stellarlinkco/myclaude --list` (skills bundled in modules like do, omo, sparv are listed above):
|
||||
|
||||
| Skill | Description |
|
||||
|-------|-------------|
|
||||
@@ -48,16 +48,16 @@ Individual skills can be installed separately via `npx github:cexll/myclaude --l
|
||||
|
||||
```bash
|
||||
# Interactive installer (recommended)
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
|
||||
# List installable items (modules / skills / wrapper)
|
||||
npx github:cexll/myclaude --list
|
||||
npx github:stellarlinkco/myclaude --list
|
||||
|
||||
# Detect installed modules and update from GitHub
|
||||
npx github:cexll/myclaude --update
|
||||
npx github:stellarlinkco/myclaude --update
|
||||
|
||||
# Custom install directory / overwrite
|
||||
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||
npx github:stellarlinkco/myclaude --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
`--update` detects already installed modules in the target install dir (defaults to `~/.claude`, via `installed_modules.json` when present) and updates them from GitHub (latest release) by overwriting the module files.
|
||||
@@ -132,13 +132,13 @@ Edit `config.json` to enable/disable modules:
|
||||
**Codex wrapper not found:**
|
||||
```bash
|
||||
# Select: codeagent-wrapper
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
**Module not loading:**
|
||||
```bash
|
||||
cat ~/.claude/installed_modules.json
|
||||
npx github:cexll/myclaude --force
|
||||
npx github:stellarlinkco/myclaude --force
|
||||
```
|
||||
|
||||
**Backend CLI errors:**
|
||||
@@ -156,7 +156,7 @@ which gemini && gemini --version
|
||||
| Gemini can't read .gitignore files | Remove from .gitignore or use different backend |
|
||||
| Codex permission denied | Set `approval_policy = "never"` in ~/.codex/config.yaml |
|
||||
|
||||
See [GitHub Issues](https://github.com/cexll/myclaude/issues) for more.
|
||||
See [GitHub Issues](https://github.com/stellarlinkco/myclaude/issues) for more.
|
||||
|
||||
## License
|
||||
|
||||
@@ -164,8 +164,8 @@ AGPL-3.0 - see [LICENSE](LICENSE)
|
||||
|
||||
### Commercial Licensing
|
||||
|
||||
For commercial use without AGPL obligations, contact: evanxian9@gmail.com
|
||||
For commercial use without AGPL obligations, contact: support@stellarlink.co
|
||||
|
||||
## Support
|
||||
|
||||
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
- [GitHub Issues](https://github.com/stellarlinkco/myclaude/issues)
|
||||
|
||||
24
README_CN.md
24
README_CN.md
@@ -2,14 +2,14 @@
|
||||
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://github.com/stellarlinkco/myclaude)
|
||||
|
||||
> AI 驱动的开发自动化 - 多后端执行架构 (Codex/Claude/Gemini/OpenCode)
|
||||
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
## 模块概览
|
||||
@@ -27,7 +27,7 @@ npx github:cexll/myclaude
|
||||
|
||||
### 可用技能
|
||||
|
||||
可通过 `npx github:cexll/myclaude --list` 单独安装技能(模块内置技能如 do、omo、sparv 见上表):
|
||||
可通过 `npx github:stellarlinkco/myclaude --list` 单独安装技能(模块内置技能如 do、omo、sparv 见上表):
|
||||
|
||||
| 技能 | 描述 |
|
||||
|------|------|
|
||||
@@ -188,16 +188,16 @@ npx github:cexll/myclaude
|
||||
|
||||
```bash
|
||||
# 交互式安装器(推荐)
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
|
||||
# 列出可安装项(module:* / skill:* / codeagent-wrapper)
|
||||
npx github:cexll/myclaude --list
|
||||
npx github:stellarlinkco/myclaude --list
|
||||
|
||||
# 检测已安装 modules 并从 GitHub 更新
|
||||
npx github:cexll/myclaude --update
|
||||
npx github:stellarlinkco/myclaude --update
|
||||
|
||||
# 指定安装目录 / 强制覆盖
|
||||
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||
npx github:stellarlinkco/myclaude --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
`--update` 会在目标安装目录(默认 `~/.claude`,优先读取 `installed_modules.json`)检测已安装 modules,并从 GitHub 拉取最新发布版本覆盖更新。
|
||||
@@ -244,13 +244,13 @@ npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||
**Codex wrapper 未找到:**
|
||||
```bash
|
||||
# 选择:codeagent-wrapper
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
**模块未加载:**
|
||||
```bash
|
||||
cat ~/.claude/installed_modules.json
|
||||
npx github:cexll/myclaude --force
|
||||
npx github:stellarlinkco/myclaude --force
|
||||
```
|
||||
|
||||
## FAQ
|
||||
@@ -261,7 +261,7 @@ npx github:cexll/myclaude --force
|
||||
| Gemini 无法读取 .gitignore 文件 | 从 .gitignore 移除或使用其他后端 |
|
||||
| Codex 权限拒绝 | 在 ~/.codex/config.yaml 设置 `approval_policy = "never"` |
|
||||
|
||||
更多问题请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues)。
|
||||
更多问题请访问 [GitHub Issues](https://github.com/stellarlinkco/myclaude/issues)。
|
||||
|
||||
## 许可证
|
||||
|
||||
@@ -269,8 +269,8 @@ AGPL-3.0 - 查看 [LICENSE](LICENSE)
|
||||
|
||||
### 商业授权
|
||||
|
||||
如需商业授权(无需遵守 AGPL 义务),请联系:evanxian9@gmail.com
|
||||
如需商业授权(无需遵守 AGPL 义务),请联系:support@stellarlink.co
|
||||
|
||||
## 支持
|
||||
|
||||
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
- [GitHub Issues](https://github.com/stellarlinkco/myclaude/issues)
|
||||
|
||||
126
bin/cli.js
126
bin/cli.js
@@ -10,11 +10,13 @@ const readline = require("readline");
|
||||
const zlib = require("zlib");
|
||||
const { spawn, spawnSync } = require("child_process");
|
||||
|
||||
const REPO = { owner: "cexll", name: "myclaude" };
|
||||
const REPO = { owner: "stellarlinkco", name: "myclaude" };
|
||||
const API_HEADERS = {
|
||||
"User-Agent": "myclaude-npx",
|
||||
Accept: "application/vnd.github+json",
|
||||
};
|
||||
const WRAPPER_REQUIRED_MODULES = new Set(["do", "omo"]);
|
||||
const WRAPPER_REQUIRED_SKILLS = new Set(["dev"]);
|
||||
|
||||
function parseArgs(argv) {
|
||||
const out = {
|
||||
@@ -58,12 +60,12 @@ function printHelp() {
|
||||
"myclaude (npx installer)",
|
||||
"",
|
||||
"Usage:",
|
||||
" npx github:cexll/myclaude",
|
||||
" npx github:cexll/myclaude --list",
|
||||
" npx github:cexll/myclaude --update",
|
||||
" npx github:cexll/myclaude --install-dir ~/.claude --force",
|
||||
" npx github:cexll/myclaude uninstall",
|
||||
" npx github:cexll/myclaude uninstall --module bmad,do -y",
|
||||
" npx github:stellarlinkco/myclaude",
|
||||
" npx github:stellarlinkco/myclaude --list",
|
||||
" npx github:stellarlinkco/myclaude --update",
|
||||
" npx github:stellarlinkco/myclaude --install-dir ~/.claude --force",
|
||||
" npx github:stellarlinkco/myclaude uninstall",
|
||||
" npx github:stellarlinkco/myclaude uninstall --module bmad,do -y",
|
||||
"",
|
||||
"Options:",
|
||||
" --install-dir <path> Default: ~/.claude",
|
||||
@@ -499,9 +501,19 @@ async function updateInstalledModules(installDir, tag, config, dryRun) {
|
||||
}
|
||||
|
||||
await fs.promises.mkdir(installDir, { recursive: true });
|
||||
const installState = { wrapperInstalled: false };
|
||||
|
||||
async function ensureWrapperInstalled() {
|
||||
if (installState.wrapperInstalled) return;
|
||||
process.stdout.write("Installing codeagent-wrapper...\n");
|
||||
await runInstallSh(repoRoot, installDir, tag);
|
||||
installState.wrapperInstalled = true;
|
||||
}
|
||||
|
||||
for (const name of toUpdate) {
|
||||
if (WRAPPER_REQUIRED_MODULES.has(name)) await ensureWrapperInstalled();
|
||||
process.stdout.write(`Updating module: ${name}\n`);
|
||||
const r = await applyModule(name, config, repoRoot, installDir, true, tag);
|
||||
const r = await applyModule(name, config, repoRoot, installDir, true, tag, installState);
|
||||
upsertModuleStatus(installDir, r);
|
||||
}
|
||||
} finally {
|
||||
@@ -777,7 +789,57 @@ async function rmTree(p) {
|
||||
await fs.promises.rmdir(p, { recursive: true });
|
||||
}
|
||||
|
||||
async function applyModule(moduleName, config, repoRoot, installDir, force, tag) {
|
||||
function defaultModelsConfig() {
|
||||
return {
|
||||
default_backend: "codex",
|
||||
default_model: "gpt-4.1",
|
||||
backends: {},
|
||||
agents: {},
|
||||
};
|
||||
}
|
||||
|
||||
function mergeModuleAgentsToModels(moduleName, mod, repoRoot) {
|
||||
const moduleAgents = mod && mod.agents;
|
||||
if (!isPlainObject(moduleAgents) || !Object.keys(moduleAgents).length) return false;
|
||||
|
||||
const modelsPath = path.join(os.homedir(), ".codeagent", "models.json");
|
||||
fs.mkdirSync(path.dirname(modelsPath), { recursive: true });
|
||||
|
||||
let models;
|
||||
if (fs.existsSync(modelsPath)) {
|
||||
models = JSON.parse(fs.readFileSync(modelsPath, "utf8"));
|
||||
} else {
|
||||
const templatePath = path.join(repoRoot, "templates", "models.json.example");
|
||||
if (fs.existsSync(templatePath)) {
|
||||
models = JSON.parse(fs.readFileSync(templatePath, "utf8"));
|
||||
if (!isPlainObject(models)) models = defaultModelsConfig();
|
||||
models.agents = {};
|
||||
} else {
|
||||
models = defaultModelsConfig();
|
||||
}
|
||||
}
|
||||
|
||||
if (!isPlainObject(models)) models = defaultModelsConfig();
|
||||
if (!isPlainObject(models.agents)) models.agents = {};
|
||||
|
||||
let modified = false;
|
||||
for (const [agentName, agentCfg] of Object.entries(moduleAgents)) {
|
||||
if (!isPlainObject(agentCfg)) continue;
|
||||
const existing = models.agents[agentName];
|
||||
const canOverwrite = !isPlainObject(existing) || Object.prototype.hasOwnProperty.call(existing, "__module__");
|
||||
if (!canOverwrite) continue;
|
||||
const next = { ...agentCfg, __module__: moduleName };
|
||||
if (!deepEqual(existing, next)) {
|
||||
models.agents[agentName] = next;
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (modified) fs.writeFileSync(modelsPath, JSON.stringify(models, null, 2) + "\n", "utf8");
|
||||
return modified;
|
||||
}
|
||||
|
||||
async function applyModule(moduleName, config, repoRoot, installDir, force, tag, installState) {
|
||||
const mod = config && config.modules && config.modules[moduleName];
|
||||
if (!mod) throw new Error(`Unknown module: ${moduleName}`);
|
||||
const ops = Array.isArray(mod.operations) ? mod.operations : [];
|
||||
@@ -803,7 +865,12 @@ async function applyModule(moduleName, config, repoRoot, installDir, force, tag)
|
||||
if (cmd !== "bash install.sh") {
|
||||
throw new Error(`Refusing run_command: ${cmd || "(empty)"}`);
|
||||
}
|
||||
if (installState && installState.wrapperInstalled) {
|
||||
result.operations.push({ type, status: "success", skipped: true });
|
||||
continue;
|
||||
}
|
||||
await runInstallSh(repoRoot, installDir, tag);
|
||||
if (installState) installState.wrapperInstalled = true;
|
||||
} else {
|
||||
throw new Error(`Unsupported operation type: ${type}`);
|
||||
}
|
||||
@@ -834,6 +901,19 @@ async function applyModule(moduleName, config, repoRoot, installDir, force, tag)
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
if (mergeModuleAgentsToModels(moduleName, mod, repoRoot)) {
|
||||
result.has_agents = true;
|
||||
result.operations.push({ type: "merge_agents", status: "success" });
|
||||
}
|
||||
} catch (err) {
|
||||
result.operations.push({
|
||||
type: "merge_agents",
|
||||
status: "failed",
|
||||
error: err && err.message ? err.message : String(err),
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -1006,7 +1086,7 @@ async function installSelected(picks, tag, config, installDir, force, dryRun) {
|
||||
try {
|
||||
let repoRoot = repoRootFromHere();
|
||||
if (needRepo || needWrapper) {
|
||||
if (!tag) throw new Error("No tag available to download");
|
||||
if (tag) {
|
||||
const archive = path.join(tmp, "src.tgz");
|
||||
const url = `https://codeload.github.com/${REPO.owner}/${REPO.name}/tar.gz/refs/tags/${encodeURIComponent(
|
||||
tag
|
||||
@@ -1017,23 +1097,43 @@ async function installSelected(picks, tag, config, installDir, force, dryRun) {
|
||||
const extracted = path.join(tmp, "src");
|
||||
await extractTarGz(archive, extracted);
|
||||
repoRoot = extracted;
|
||||
} else {
|
||||
process.stdout.write("Offline mode: installing from local package contents.\n");
|
||||
}
|
||||
}
|
||||
|
||||
await fs.promises.mkdir(installDir, { recursive: true });
|
||||
const installState = { wrapperInstalled: false };
|
||||
|
||||
async function ensureWrapperInstalled() {
|
||||
if (installState.wrapperInstalled) return;
|
||||
process.stdout.write("Installing codeagent-wrapper...\n");
|
||||
await runInstallSh(repoRoot, installDir, tag);
|
||||
installState.wrapperInstalled = true;
|
||||
}
|
||||
|
||||
for (const p of picks) {
|
||||
if (p.kind === "wrapper") {
|
||||
process.stdout.write("Installing codeagent-wrapper...\n");
|
||||
await runInstallSh(repoRoot, installDir, tag);
|
||||
await ensureWrapperInstalled();
|
||||
continue;
|
||||
}
|
||||
if (p.kind === "module") {
|
||||
if (WRAPPER_REQUIRED_MODULES.has(p.moduleName)) await ensureWrapperInstalled();
|
||||
process.stdout.write(`Installing module: ${p.moduleName}\n`);
|
||||
const r = await applyModule(p.moduleName, config, repoRoot, installDir, force, tag);
|
||||
const r = await applyModule(
|
||||
p.moduleName,
|
||||
config,
|
||||
repoRoot,
|
||||
installDir,
|
||||
force,
|
||||
tag,
|
||||
installState
|
||||
);
|
||||
upsertModuleStatus(installDir, r);
|
||||
continue;
|
||||
}
|
||||
if (p.kind === "skill") {
|
||||
if (WRAPPER_REQUIRED_SKILLS.has(p.skillName)) await ensureWrapperInstalled();
|
||||
process.stdout.write(`Installing skill: ${p.skillName}\n`);
|
||||
await copyDirRecursive(
|
||||
path.join(repoRoot, "skills", p.skillName),
|
||||
|
||||
@@ -39,7 +39,7 @@ filter_unconventional = false
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/cexll/myclaude/issues/${2}))" },
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/stellarlinkco/myclaude/issues/${2}))" },
|
||||
]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
|
||||
@@ -28,7 +28,7 @@ Entry point: `cmd/codeagent-wrapper/main.go` (binary: `codeagent-wrapper`).
|
||||
### Recommended (interactive installer)
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
Select the `codeagent-wrapper` module to install.
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
### 推荐方式(交互式安装器)
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
选择 `codeagent-wrapper` 模块进行安装。
|
||||
|
||||
@@ -15,7 +15,7 @@ Multi-backend AI code execution wrapper supporting Codex, Claude, Gemini, and Op
|
||||
|
||||
```bash
|
||||
# Recommended: run the installer and select "codeagent-wrapper"
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
|
||||
# Manual build (optional; requires repo checkout)
|
||||
cd codeagent-wrapper
|
||||
|
||||
@@ -29,6 +29,7 @@ type cliOptions struct {
|
||||
ReasoningEffort string
|
||||
Agent string
|
||||
PromptFile string
|
||||
Output string
|
||||
Skills string
|
||||
SkipPermissions bool
|
||||
Worktree bool
|
||||
@@ -135,6 +136,7 @@ func addRootFlags(fs *pflag.FlagSet, opts *cliOptions) {
|
||||
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
|
||||
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
|
||||
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
|
||||
fs.StringVar(&opts.Output, "output", "", "Write structured JSON output to file")
|
||||
fs.StringVar(&opts.Skills, "skills", "", "Comma-separated skill names for spec injection")
|
||||
|
||||
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
|
||||
@@ -198,10 +200,9 @@ func runWithLoggerAndCleanup(fn func() int) (exitCode int) {
|
||||
for _, entry := range entries {
|
||||
fmt.Fprintln(os.Stderr, entry)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Log file: %s (deleted)\n", logger.Path())
|
||||
fmt.Fprintf(os.Stderr, "Log file: %s\n", logger.Path())
|
||||
}
|
||||
}
|
||||
_ = logger.RemoveLogFile()
|
||||
}()
|
||||
defer runCleanupHook()
|
||||
|
||||
@@ -237,6 +238,7 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
||||
agentName := ""
|
||||
promptFile := ""
|
||||
promptFileExplicit := false
|
||||
outputPath := ""
|
||||
yolo := false
|
||||
|
||||
if cmd.Flags().Changed("agent") {
|
||||
@@ -281,6 +283,15 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
||||
promptFile = resolvedPromptFile
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("output") {
|
||||
outputPath = strings.TrimSpace(opts.Output)
|
||||
if outputPath == "" {
|
||||
return nil, fmt.Errorf("--output flag requires a value")
|
||||
}
|
||||
} else if val := strings.TrimSpace(v.GetString("output")); val != "" {
|
||||
outputPath = val
|
||||
}
|
||||
|
||||
agentFlagChanged := cmd.Flags().Changed("agent")
|
||||
backendFlagChanged := cmd.Flags().Changed("backend")
|
||||
if backendFlagChanged {
|
||||
@@ -357,6 +368,7 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
||||
Agent: agentName,
|
||||
PromptFile: promptFile,
|
||||
PromptFileExplicit: promptFileExplicit,
|
||||
OutputPath: outputPath,
|
||||
SkipPermissions: skipPermissions,
|
||||
Yolo: yolo,
|
||||
Model: model,
|
||||
@@ -432,7 +444,7 @@ func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *vip
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") || cmd.Flags().Changed("skills") {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --output, --full-output and --skip-permissions are allowed.")
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -463,6 +475,17 @@ func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *vip
|
||||
fullOutput = v.GetBool("full-output")
|
||||
}
|
||||
|
||||
outputPath := ""
|
||||
if cmd.Flags().Changed("output") {
|
||||
outputPath = strings.TrimSpace(opts.Output)
|
||||
if outputPath == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --output flag requires a value")
|
||||
return 1
|
||||
}
|
||||
} else if val := strings.TrimSpace(v.GetString("output")); val != "" {
|
||||
outputPath = val
|
||||
}
|
||||
|
||||
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
|
||||
skipPermissions := false
|
||||
if skipChanged {
|
||||
@@ -525,6 +548,11 @@ func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *vip
|
||||
results[i].KeyOutput = extractKeyOutputFromLines(lines, 150)
|
||||
}
|
||||
|
||||
if err := writeStructuredOutput(outputPath, results); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Println(generateFinalOutputWithMode(results, !fullOutput))
|
||||
|
||||
exitCode := 0
|
||||
@@ -688,16 +716,32 @@ func runSingleMode(cfg *Config, name string) int {
|
||||
|
||||
result := runTaskFn(taskSpec, false, cfg.Timeout)
|
||||
|
||||
if result.ExitCode != 0 {
|
||||
return result.ExitCode
|
||||
exitCode := result.ExitCode
|
||||
if exitCode == 0 && strings.TrimSpace(result.Message) == "" {
|
||||
errMsg := fmt.Sprintf("no output message: backend=%s returned empty result.Message with exit_code=0", cfg.Backend)
|
||||
logError(errMsg)
|
||||
exitCode = 1
|
||||
if strings.TrimSpace(result.Error) == "" {
|
||||
result.Error = errMsg
|
||||
}
|
||||
}
|
||||
|
||||
// Validate that we got a meaningful output message
|
||||
if strings.TrimSpace(result.Message) == "" {
|
||||
logError(fmt.Sprintf("no output message: backend=%s returned empty result.Message with exit_code=0", cfg.Backend))
|
||||
if err := writeStructuredOutput(cfg.OutputPath, []TaskResult{result}); err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
// Surface any parsed backend output even on non-zero exit to avoid "(no output)" in tool runners.
|
||||
if strings.TrimSpace(result.Message) != "" {
|
||||
fmt.Println(result.Message)
|
||||
if result.SessionID != "" {
|
||||
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
|
||||
}
|
||||
}
|
||||
return exitCode
|
||||
}
|
||||
|
||||
fmt.Println(result.Message)
|
||||
if result.SessionID != "" {
|
||||
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
|
||||
|
||||
@@ -169,6 +169,12 @@ func (f *execFakeRunner) Process() executor.ProcessHandle {
|
||||
return &execFakeProcess{pid: 1}
|
||||
}
|
||||
|
||||
func (f *execFakeRunner) UnsetEnv(keys ...string) {
|
||||
for _, k := range keys {
|
||||
delete(f.env, k)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
|
||||
@@ -274,6 +274,10 @@ func (d *drainBlockingCmd) Process() executor.ProcessHandle {
|
||||
return d.inner.Process()
|
||||
}
|
||||
|
||||
func (d *drainBlockingCmd) UnsetEnv(keys ...string) {
|
||||
d.inner.UnsetEnv(keys...)
|
||||
}
|
||||
|
||||
type bufferWriteCloser struct {
|
||||
buf bytes.Buffer
|
||||
mu sync.Mutex
|
||||
@@ -568,6 +572,14 @@ func (f *fakeCmd) Process() executor.ProcessHandle {
|
||||
return f.process
|
||||
}
|
||||
|
||||
func (f *fakeCmd) UnsetEnv(keys ...string) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
for _, k := range keys {
|
||||
delete(f.env, k)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeCmd) runStdoutScript() {
|
||||
if len(f.stdoutPlan) == 0 {
|
||||
if !f.keepStdoutOpen {
|
||||
@@ -1443,6 +1455,60 @@ func TestBackendParseArgs_PromptFileOverridesAgent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendParseArgs_OutputFlag(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "output flag",
|
||||
args: []string{"codeagent-wrapper", "--output", "/tmp/out.json", "task"},
|
||||
want: "/tmp/out.json",
|
||||
},
|
||||
{
|
||||
name: "output equals syntax",
|
||||
args: []string{"codeagent-wrapper", "--output=/tmp/out.json", "task"},
|
||||
want: "/tmp/out.json",
|
||||
},
|
||||
{
|
||||
name: "output trimmed",
|
||||
args: []string{"codeagent-wrapper", "--output", " /tmp/out.json ", "task"},
|
||||
want: "/tmp/out.json",
|
||||
},
|
||||
{
|
||||
name: "output missing value",
|
||||
args: []string{"codeagent-wrapper", "--output"},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "output equals missing value",
|
||||
args: []string{"codeagent-wrapper", "--output=", "task"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
os.Args = tt.args
|
||||
cfg, err := parseArgs()
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.OutputPath != tt.want {
|
||||
t.Fatalf("OutputPath = %q, want %q", cfg.OutputPath, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendParseArgs_SkipPermissions(t *testing.T) {
|
||||
const envKey = "CODEAGENT_SKIP_PERMISSIONS"
|
||||
t.Setenv(envKey, "true")
|
||||
@@ -3739,6 +3805,245 @@ noop`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSingleWithOutputFile(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
outputPath := filepath.Join(tempDir, "single-output.json")
|
||||
|
||||
oldArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = oldArgs })
|
||||
os.Args = []string{"codeagent-wrapper", "--output", outputPath, "task"}
|
||||
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
|
||||
origRunTaskFn := runTaskFn
|
||||
runTaskFn = func(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return TaskResult{
|
||||
TaskID: "single-task",
|
||||
ExitCode: 0,
|
||||
Message: "single-result",
|
||||
SessionID: "sid-single",
|
||||
}
|
||||
}
|
||||
t.Cleanup(func() { runTaskFn = origRunTaskFn })
|
||||
|
||||
if code := run(); code != 0 {
|
||||
t.Fatalf("run exit = %d, want 0", code)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(outputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read output file: %v", err)
|
||||
}
|
||||
if len(data) == 0 || data[len(data)-1] != '\n' {
|
||||
t.Fatalf("output file should end with newline, got %q", string(data))
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Results []TaskResult `json:"results"`
|
||||
Summary struct {
|
||||
Total int `json:"total"`
|
||||
Success int `json:"success"`
|
||||
Failed int `json:"failed"`
|
||||
} `json:"summary"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &payload); err != nil {
|
||||
t.Fatalf("failed to unmarshal output json: %v", err)
|
||||
}
|
||||
|
||||
if payload.Summary.Total != 1 || payload.Summary.Success != 1 || payload.Summary.Failed != 0 {
|
||||
t.Fatalf("unexpected summary: %+v", payload.Summary)
|
||||
}
|
||||
if len(payload.Results) != 1 {
|
||||
t.Fatalf("results length = %d, want 1", len(payload.Results))
|
||||
}
|
||||
if payload.Results[0].Message != "single-result" {
|
||||
t.Fatalf("result message = %q, want %q", payload.Results[0].Message, "single-result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSingleWithOutputFileOnFailureExitCode(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
outputPath := filepath.Join(tempDir, "single-output-failed.json")
|
||||
|
||||
oldArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = oldArgs })
|
||||
os.Args = []string{"codeagent-wrapper", "--output", outputPath, "task"}
|
||||
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
|
||||
origRunTaskFn := runTaskFn
|
||||
runTaskFn = func(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return TaskResult{
|
||||
TaskID: "single-task",
|
||||
ExitCode: 7,
|
||||
Message: "failed-result",
|
||||
Error: "backend error",
|
||||
}
|
||||
}
|
||||
t.Cleanup(func() { runTaskFn = origRunTaskFn })
|
||||
|
||||
if code := run(); code != 7 {
|
||||
t.Fatalf("run exit = %d, want 7", code)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(outputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read output file: %v", err)
|
||||
}
|
||||
if len(data) == 0 || data[len(data)-1] != '\n' {
|
||||
t.Fatalf("output file should end with newline, got %q", string(data))
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Results []TaskResult `json:"results"`
|
||||
Summary struct {
|
||||
Total int `json:"total"`
|
||||
Success int `json:"success"`
|
||||
Failed int `json:"failed"`
|
||||
} `json:"summary"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &payload); err != nil {
|
||||
t.Fatalf("failed to unmarshal output json: %v", err)
|
||||
}
|
||||
|
||||
if payload.Summary.Total != 1 || payload.Summary.Success != 0 || payload.Summary.Failed != 1 {
|
||||
t.Fatalf("unexpected summary: %+v", payload.Summary)
|
||||
}
|
||||
if len(payload.Results) != 1 {
|
||||
t.Fatalf("results length = %d, want 1", len(payload.Results))
|
||||
}
|
||||
if payload.Results[0].ExitCode != 7 {
|
||||
t.Fatalf("result exit_code = %d, want 7", payload.Results[0].ExitCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSingleWithOutputFileOnEmptyMessage(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
outputPath := filepath.Join(tempDir, "single-output-empty.json")
|
||||
|
||||
oldArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = oldArgs })
|
||||
os.Args = []string{"codeagent-wrapper", "--output", outputPath, "task"}
|
||||
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
|
||||
origRunTaskFn := runTaskFn
|
||||
runTaskFn = func(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return TaskResult{
|
||||
TaskID: "single-task",
|
||||
ExitCode: 0,
|
||||
}
|
||||
}
|
||||
t.Cleanup(func() { runTaskFn = origRunTaskFn })
|
||||
|
||||
if code := run(); code != 1 {
|
||||
t.Fatalf("run exit = %d, want 1", code)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(outputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read output file: %v", err)
|
||||
}
|
||||
if len(data) == 0 || data[len(data)-1] != '\n' {
|
||||
t.Fatalf("output file should end with newline, got %q", string(data))
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Results []TaskResult `json:"results"`
|
||||
Summary struct {
|
||||
Total int `json:"total"`
|
||||
Success int `json:"success"`
|
||||
Failed int `json:"failed"`
|
||||
} `json:"summary"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &payload); err != nil {
|
||||
t.Fatalf("failed to unmarshal output json: %v", err)
|
||||
}
|
||||
|
||||
if payload.Summary.Total != 1 || payload.Summary.Success != 0 || payload.Summary.Failed != 1 {
|
||||
t.Fatalf("unexpected summary: %+v", payload.Summary)
|
||||
}
|
||||
if len(payload.Results) != 1 {
|
||||
t.Fatalf("results length = %d, want 1", len(payload.Results))
|
||||
}
|
||||
if !strings.Contains(payload.Results[0].Error, "no output message:") {
|
||||
t.Fatalf("result error = %q, want no output message", payload.Results[0].Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParallelWithOutputFile(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
||||
|
||||
tempDir := t.TempDir()
|
||||
outputPath := filepath.Join(tempDir, "parallel-output.json")
|
||||
|
||||
oldArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = oldArgs })
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel", "--output", outputPath}
|
||||
|
||||
stdinReader = strings.NewReader(`---TASK---
|
||||
id: T1
|
||||
---CONTENT---
|
||||
noop`)
|
||||
t.Cleanup(func() { stdinReader = os.Stdin })
|
||||
|
||||
origRunCodexTaskFn := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "parallel output marker"}
|
||||
}
|
||||
t.Cleanup(func() { runCodexTaskFn = origRunCodexTaskFn })
|
||||
|
||||
out := captureOutput(t, func() {
|
||||
if code := run(); code != 0 {
|
||||
t.Fatalf("run exit = %d, want 0", code)
|
||||
}
|
||||
})
|
||||
|
||||
if !strings.Contains(out, "=== Execution Report ===") {
|
||||
t.Fatalf("stdout should keep summary format, got %q", out)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(outputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read output file: %v", err)
|
||||
}
|
||||
if len(data) == 0 || data[len(data)-1] != '\n' {
|
||||
t.Fatalf("output file should end with newline, got %q", string(data))
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Results []TaskResult `json:"results"`
|
||||
Summary struct {
|
||||
Total int `json:"total"`
|
||||
Success int `json:"success"`
|
||||
Failed int `json:"failed"`
|
||||
} `json:"summary"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &payload); err != nil {
|
||||
t.Fatalf("failed to unmarshal output json: %v", err)
|
||||
}
|
||||
|
||||
if payload.Summary.Total != 1 || payload.Summary.Success != 1 || payload.Summary.Failed != 0 {
|
||||
t.Fatalf("unexpected summary: %+v", payload.Summary)
|
||||
}
|
||||
if len(payload.Results) != 1 {
|
||||
t.Fatalf("results length = %d, want 1", len(payload.Results))
|
||||
}
|
||||
if payload.Results[0].TaskID != "T1" {
|
||||
t.Fatalf("result task_id = %q, want %q", payload.Results[0].TaskID, "T1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParallelInvalidBackend(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
||||
@@ -4330,9 +4635,9 @@ func TestRun_ExplicitStdinReadError(t *testing.T) {
|
||||
if !strings.Contains(logOutput, "Failed to read stdin: broken stdin") {
|
||||
t.Fatalf("log missing read error entry, got %q", logOutput)
|
||||
}
|
||||
// Log file is always removed after completion (new behavior)
|
||||
if _, err := os.Stat(logPath); !os.IsNotExist(err) {
|
||||
t.Fatalf("log file should be removed after completion")
|
||||
// Log file should remain for inspection; cleanup is handled via `codeagent-wrapper cleanup`.
|
||||
if _, err := os.Stat(logPath); err != nil {
|
||||
t.Fatalf("expected log file to exist after completion: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4348,6 +4653,51 @@ func TestRun_CommandFails(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun_NonZeroExitPrintsParsedMessage(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
var scriptPath string
|
||||
if runtime.GOOS == "windows" {
|
||||
scriptPath = filepath.Join(tempDir, "codex.bat")
|
||||
script := "@echo off\r\n" +
|
||||
"echo {\"type\":\"thread.started\",\"thread_id\":\"tid\"}\r\n" +
|
||||
"echo {\"type\":\"item.completed\",\"item\":{\"type\":\"agent_message\",\"text\":\"parsed-error\"}}\r\n" +
|
||||
"exit /b 1\r\n"
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0o755); err != nil {
|
||||
t.Fatalf("failed to write script: %v", err)
|
||||
}
|
||||
} else {
|
||||
scriptPath = filepath.Join(tempDir, "codex.sh")
|
||||
script := `#!/bin/sh
|
||||
printf '%s\n' '{"type":"thread.started","thread_id":"tid"}'
|
||||
printf '%s\n' '{"type":"item.completed","item":{"type":"agent_message","text":"parsed-error"}}'
|
||||
sleep 0.05
|
||||
exit 1
|
||||
`
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0o755); err != nil {
|
||||
t.Fatalf("failed to write script: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
restore := withBackend(scriptPath, func(cfg *Config, targetArg string) []string { return []string{} })
|
||||
defer restore()
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "task"}
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
|
||||
var exitCode int
|
||||
output := captureOutput(t, func() { exitCode = run() })
|
||||
if exitCode != 1 {
|
||||
t.Fatalf("exit=%d, want 1", exitCode)
|
||||
}
|
||||
|
||||
if !strings.Contains(output, "parsed-error") {
|
||||
t.Fatalf("stdout=%q, want parsed backend message", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun_InvalidBackend(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
os.Args = []string{"codeagent-wrapper", "--backend", "unknown", "task"}
|
||||
@@ -4427,9 +4777,9 @@ func TestRun_PipedTaskReadError(t *testing.T) {
|
||||
if !strings.Contains(logOutput, "Failed to read piped stdin: read stdin: pipe failure") {
|
||||
t.Fatalf("log missing piped read error, got %q", logOutput)
|
||||
}
|
||||
// Log file is always removed after completion (new behavior)
|
||||
if _, err := os.Stat(logPath); !os.IsNotExist(err) {
|
||||
t.Fatalf("log file should be removed after completion")
|
||||
// Log file should remain for inspection; cleanup is handled via `codeagent-wrapper cleanup`.
|
||||
if _, err := os.Stat(logPath); err != nil {
|
||||
t.Fatalf("expected log file to exist after completion: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4483,12 +4833,12 @@ func TestRun_LoggerLifecycle(t *testing.T) {
|
||||
if !fileExisted {
|
||||
t.Fatalf("log file was not present during run")
|
||||
}
|
||||
if _, err := os.Stat(logPath); !os.IsNotExist(err) {
|
||||
t.Fatalf("log file should be removed on success, but it exists")
|
||||
if _, err := os.Stat(logPath); err != nil {
|
||||
t.Fatalf("expected log file to exist on success: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun_LoggerRemovedOnSignal(t *testing.T) {
|
||||
func TestRun_LoggerKeptOnSignal(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("signal-based test is not supported on Windows")
|
||||
}
|
||||
@@ -4502,7 +4852,8 @@ func TestRun_LoggerRemovedOnSignal(t *testing.T) {
|
||||
defer signal.Reset(syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Set shorter delays for faster test
|
||||
_ = executor.SetForceKillDelay(1)
|
||||
restoreForceKillDelay := executor.SetForceKillDelay(1)
|
||||
defer restoreForceKillDelay()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
logPath := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
@@ -4525,13 +4876,19 @@ printf '%s\n' '{"type":"item.completed","item":{"type":"agent_message","text":"l
|
||||
exitCh := make(chan int, 1)
|
||||
go func() { exitCh <- run() }()
|
||||
|
||||
deadline := time.Now().Add(1 * time.Second)
|
||||
ready := false
|
||||
deadline := time.Now().Add(2 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
if _, err := os.Stat(logPath); err == nil {
|
||||
data, err := os.ReadFile(logPath)
|
||||
if err == nil && strings.Contains(string(data), "Starting ") {
|
||||
ready = true
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if !ready {
|
||||
t.Fatalf("logger did not become ready before deadline")
|
||||
}
|
||||
|
||||
if proc, err := os.FindProcess(os.Getpid()); err == nil && proc != nil {
|
||||
_ = proc.Signal(syscall.SIGINT)
|
||||
@@ -4547,9 +4904,9 @@ printf '%s\n' '{"type":"item.completed","item":{"type":"agent_message","text":"l
|
||||
if exitCode != 130 {
|
||||
t.Fatalf("exit code = %d, want 130", exitCode)
|
||||
}
|
||||
// Log file is always removed after completion (new behavior)
|
||||
if _, err := os.Stat(logPath); !os.IsNotExist(err) {
|
||||
t.Fatalf("log file should be removed after completion")
|
||||
// Log file should remain for inspection; cleanup is handled via `codeagent-wrapper cleanup`.
|
||||
if _, err := os.Stat(logPath); err != nil {
|
||||
t.Fatalf("expected log file to exist after completion: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4810,6 +5167,34 @@ func TestParallelLogPathInSerialMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun_KeptLogFileOnSuccess(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "do-stuff"}
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
codexCommand = createFakeCodexScript(t, "cli-session", "ok")
|
||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||
cleanupLogsFn = nil
|
||||
|
||||
var exitCode int
|
||||
_ = captureStderr(t, func() {
|
||||
_ = captureOutput(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
})
|
||||
if exitCode != 0 {
|
||||
t.Fatalf("run() exit = %d, want 0", exitCode)
|
||||
}
|
||||
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
if _, err := os.Stat(expectedLog); err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun_CLI_Success(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
os.Args = []string{"codeagent-wrapper", "do-things"}
|
||||
|
||||
65
codeagent-wrapper/internal/app/output_file.go
Normal file
65
codeagent-wrapper/internal/app/output_file.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type outputSummary struct {
|
||||
Total int `json:"total"`
|
||||
Success int `json:"success"`
|
||||
Failed int `json:"failed"`
|
||||
}
|
||||
|
||||
type outputPayload struct {
|
||||
Results []TaskResult `json:"results"`
|
||||
Summary outputSummary `json:"summary"`
|
||||
}
|
||||
|
||||
func writeStructuredOutput(path string, results []TaskResult) error {
|
||||
path = strings.TrimSpace(path)
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cleanPath := filepath.Clean(path)
|
||||
dir := filepath.Dir(cleanPath)
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create output directory for %q: %w", cleanPath, err)
|
||||
}
|
||||
|
||||
f, err := os.Create(cleanPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file %q: %w", cleanPath, err)
|
||||
}
|
||||
|
||||
encodeErr := json.NewEncoder(f).Encode(outputPayload{
|
||||
Results: results,
|
||||
Summary: summarizeResults(results),
|
||||
})
|
||||
closeErr := f.Close()
|
||||
|
||||
if encodeErr != nil {
|
||||
return fmt.Errorf("failed to write structured output to %q: %w", cleanPath, encodeErr)
|
||||
}
|
||||
if closeErr != nil {
|
||||
return fmt.Errorf("failed to close output file %q: %w", cleanPath, closeErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func summarizeResults(results []TaskResult) outputSummary {
|
||||
summary := outputSummary{Total: len(results)}
|
||||
for _, res := range results {
|
||||
if res.ExitCode == 0 && res.Error == "" {
|
||||
summary.Success++
|
||||
} else {
|
||||
summary.Failed++
|
||||
}
|
||||
}
|
||||
return summary
|
||||
}
|
||||
@@ -13,6 +13,7 @@ type Config struct {
|
||||
Task string
|
||||
SessionID string
|
||||
WorkDir string
|
||||
OutputPath string
|
||||
Model string
|
||||
ReasoningEffort string
|
||||
ExplicitStdin bool
|
||||
|
||||
@@ -41,6 +41,11 @@ func (f *fakeCmd) SetEnv(env map[string]string) {
|
||||
}
|
||||
}
|
||||
func (f *fakeCmd) Process() processHandle { return nil }
|
||||
func (f *fakeCmd) UnsetEnv(keys ...string) {
|
||||
for _, k := range keys {
|
||||
delete(f.env, k)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnvInjection_LogsToStderrAndMasksKey(t *testing.T) {
|
||||
// Arrange ~/.codeagent/models.json via HOME override.
|
||||
@@ -120,6 +125,9 @@ func TestEnvInjection_LogsToStderrAndMasksKey(t *testing.T) {
|
||||
if cmd.env["ANTHROPIC_API_KEY"] != apiKey {
|
||||
t.Fatalf("ANTHROPIC_API_KEY=%q, want %q", cmd.env["ANTHROPIC_API_KEY"], apiKey)
|
||||
}
|
||||
if cmd.env["CLAUDE_CODE_TMPDIR"] == "" {
|
||||
t.Fatalf("expected CLAUDE_CODE_TMPDIR to be set for nested claude, got empty")
|
||||
}
|
||||
|
||||
if !strings.Contains(got, "Env: ANTHROPIC_BASE_URL="+baseURL) {
|
||||
t.Fatalf("stderr missing base URL env log; stderr=%q", got)
|
||||
@@ -127,4 +135,7 @@ func TestEnvInjection_LogsToStderrAndMasksKey(t *testing.T) {
|
||||
if !strings.Contains(got, "Env: ANTHROPIC_API_KEY=eyJh****test") {
|
||||
t.Fatalf("stderr missing masked API key log; stderr=%q", got)
|
||||
}
|
||||
if !strings.Contains(got, "CLAUDE_CODE_TMPDIR: ") {
|
||||
t.Fatalf("stderr missing CLAUDE_CODE_TMPDIR log; stderr=%q", got)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,6 +113,7 @@ type commandRunner interface {
|
||||
SetStderr(io.Writer)
|
||||
SetDir(string)
|
||||
SetEnv(env map[string]string)
|
||||
UnsetEnv(keys ...string)
|
||||
Process() processHandle
|
||||
}
|
||||
|
||||
@@ -221,6 +222,33 @@ func (r *realCmd) SetEnv(env map[string]string) {
|
||||
r.cmd.Env = out
|
||||
}
|
||||
|
||||
func (r *realCmd) UnsetEnv(keys ...string) {
|
||||
if r == nil || r.cmd == nil || len(keys) == 0 {
|
||||
return
|
||||
}
|
||||
// If cmd.Env is nil, Go inherits all parent env vars.
|
||||
// Populate explicitly so we can selectively remove keys.
|
||||
if r.cmd.Env == nil {
|
||||
r.cmd.Env = os.Environ()
|
||||
}
|
||||
drop := make(map[string]struct{}, len(keys))
|
||||
for _, k := range keys {
|
||||
drop[k] = struct{}{}
|
||||
}
|
||||
filtered := make([]string, 0, len(r.cmd.Env))
|
||||
for _, kv := range r.cmd.Env {
|
||||
idx := strings.IndexByte(kv, '=')
|
||||
name := kv
|
||||
if idx >= 0 {
|
||||
name = kv[:idx]
|
||||
}
|
||||
if _, ok := drop[name]; !ok {
|
||||
filtered = append(filtered, kv)
|
||||
}
|
||||
}
|
||||
r.cmd.Env = filtered
|
||||
}
|
||||
|
||||
func (r *realCmd) Process() processHandle {
|
||||
if r == nil || r.cmd == nil || r.cmd.Process == nil {
|
||||
return nil
|
||||
@@ -1126,6 +1154,26 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
|
||||
injectTempEnv(cmd)
|
||||
|
||||
if commandName == "claude" {
|
||||
// Claude 2.1.45+ calls Nz7() on startup to clean its tasks directory,
|
||||
// which deletes the parent session's *.output files and causes "(no output)".
|
||||
// Assign each nested claude its own isolated tmpdir so it only cleans its own files.
|
||||
nestedTmpDir, err := os.MkdirTemp("", fmt.Sprintf("cc-nested-%d-", os.Getpid()))
|
||||
if err != nil {
|
||||
logWarnFn("Failed to create isolated CLAUDE_CODE_TMPDIR: " + err.Error())
|
||||
} else {
|
||||
cmd.SetEnv(map[string]string{"CLAUDE_CODE_TMPDIR": nestedTmpDir})
|
||||
defer os.RemoveAll(nestedTmpDir) //nolint:errcheck
|
||||
logInfoFn("CLAUDE_CODE_TMPDIR: " + nestedTmpDir)
|
||||
fmt.Fprintln(os.Stderr, " CLAUDE_CODE_TMPDIR: "+nestedTmpDir)
|
||||
}
|
||||
|
||||
// Claude Code sets CLAUDECODE=1 in its child processes. If we don't
|
||||
// remove it, the spawned `claude -p` detects the variable and refuses
|
||||
// to start ("cannot be launched inside another Claude Code session").
|
||||
cmd.UnsetEnv("CLAUDECODE")
|
||||
}
|
||||
|
||||
// For backends that don't support -C flag (claude, gemini), set working directory via cmd.Dir
|
||||
// Codex passes workdir via -C flag, so we skip setting Dir for it to avoid conflicts
|
||||
if cfg.Mode != "resume" && commandName != "codex" && cfg.WorkDir != "" {
|
||||
@@ -1400,6 +1448,15 @@ waitLoop:
|
||||
logErrorFn(fmt.Sprintf("%s exited with status %d", commandName, code))
|
||||
result.ExitCode = code
|
||||
result.Error = attachStderr(fmt.Sprintf("%s exited with status %d", commandName, code))
|
||||
// Preserve parsed output when the backend exits non-zero (e.g. API error with stream-json output).
|
||||
result.Message = parsed.message
|
||||
result.SessionID = parsed.threadID
|
||||
if stdoutLogger != nil {
|
||||
stdoutLogger.Flush()
|
||||
}
|
||||
if stderrLogger != nil {
|
||||
stderrLogger.Flush()
|
||||
}
|
||||
return result
|
||||
}
|
||||
logErrorFn(commandName + " error: " + waitErr.Error())
|
||||
|
||||
@@ -20,8 +20,7 @@ var geminiNoisePatterns = []string{
|
||||
|
||||
// codexNoisePatterns contains stderr patterns to filter for codex backend
|
||||
var codexNoisePatterns = []string{
|
||||
"ERROR codex_core::codex: needs_follow_up:",
|
||||
"ERROR codex_core::skills::loader:",
|
||||
"ERROR codex_core::",
|
||||
}
|
||||
|
||||
// filteringWriter wraps an io.Writer and filters out lines matching patterns
|
||||
|
||||
@@ -71,3 +71,35 @@ func TestFilteringWriterPartialLines(t *testing.T) {
|
||||
t.Errorf("got %q, want %q", got, "Hello World\n")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilteringWriterCodexNoise(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "filter all codex_core errors",
|
||||
input: "ERROR codex_core::rollout::list: state db missing rollout path for thread 123\nERROR codex_core::skills::loader: missing skill\nVisible output\n",
|
||||
want: "Visible output\n",
|
||||
},
|
||||
{
|
||||
name: "keep non codex_core errors",
|
||||
input: "ERROR another_module::state: real failure\nERROR codex_core::codex: needs_follow_up: true\nDone\n",
|
||||
want: "ERROR another_module::state: real failure\nDone\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
fw := newFilteringWriter(&buf, codexNoisePatterns)
|
||||
_, _ = fw.Write([]byte(tt.input))
|
||||
fw.Flush()
|
||||
|
||||
if got := buf.String(); got != tt.want {
|
||||
t.Errorf("got %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,11 +569,17 @@ func isUnsafeFile(path string, tempDir string) (bool, string) {
|
||||
return true, fmt.Sprintf("path resolution failed: %v", err)
|
||||
}
|
||||
|
||||
// Get absolute path of tempDir
|
||||
absTempDir, err := filepath.Abs(tempDir)
|
||||
// Get canonical path of tempDir, resolving symlinks to match resolvedPath.
|
||||
// On macOS, os.TempDir() returns /var/folders/... but EvalSymlinks resolves
|
||||
// files to /private/var/folders/..., causing a spurious "outside tempDir" mismatch.
|
||||
absTempDir, err := evalSymlinksFn(tempDir)
|
||||
if err != nil {
|
||||
// Fallback to Abs if symlink resolution fails
|
||||
absTempDir, err = filepath.Abs(tempDir)
|
||||
if err != nil {
|
||||
return true, fmt.Sprintf("tempDir resolution failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure resolved path is within tempDir
|
||||
relPath, err := filepath.Rel(absTempDir, resolvedPath)
|
||||
|
||||
@@ -515,7 +515,10 @@ func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
|
||||
return fakeFileInfo{}, nil
|
||||
})
|
||||
outside := filepath.Join(filepath.Dir(absTempDir), "etc", "passwd")
|
||||
stubEvalSymlinks(t, func(string) (string, error) {
|
||||
stubEvalSymlinks(t, func(p string) (string, error) {
|
||||
if p == tempDir {
|
||||
return absTempDir, nil
|
||||
}
|
||||
return outside, nil
|
||||
})
|
||||
unsafe, reason := isUnsafeFile(filepath.Join("..", "..", "etc", "passwd"), tempDir)
|
||||
@@ -529,16 +532,73 @@ func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) {
|
||||
return fakeFileInfo{}, nil
|
||||
})
|
||||
otherDir := t.TempDir()
|
||||
stubEvalSymlinks(t, func(string) (string, error) {
|
||||
return filepath.Join(otherDir, "codeagent-wrapper-9.log"), nil
|
||||
outsidePath := filepath.Join(otherDir, "codeagent-wrapper-9.log")
|
||||
stubEvalSymlinks(t, func(p string) (string, error) {
|
||||
if p == tempDir {
|
||||
return absTempDir, nil
|
||||
}
|
||||
return outsidePath, nil
|
||||
})
|
||||
unsafe, reason := isUnsafeFile(filepath.Join(otherDir, "codeagent-wrapper-9.log"), tempDir)
|
||||
unsafe, reason := isUnsafeFile(outsidePath, tempDir)
|
||||
if !unsafe || reason != "file is outside tempDir" {
|
||||
t.Fatalf("expected outside file to be rejected, got unsafe=%v reason=%q", unsafe, reason)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoggerIsUnsafeFileCanonicalizesTempDir(t *testing.T) {
|
||||
stubFileStat(t, func(string) (os.FileInfo, error) {
|
||||
return fakeFileInfo{}, nil
|
||||
})
|
||||
|
||||
tempDir := filepath.FromSlash("/var/folders/abc/T")
|
||||
canonicalTempDir := filepath.FromSlash("/private/var/folders/abc/T")
|
||||
logPath := filepath.Join(tempDir, "codeagent-wrapper-1.log")
|
||||
canonicalLogPath := filepath.Join(canonicalTempDir, "codeagent-wrapper-1.log")
|
||||
|
||||
stubEvalSymlinks(t, func(p string) (string, error) {
|
||||
switch p {
|
||||
case tempDir:
|
||||
return canonicalTempDir, nil
|
||||
case logPath:
|
||||
return canonicalLogPath, nil
|
||||
default:
|
||||
return p, nil
|
||||
}
|
||||
})
|
||||
|
||||
unsafe, reason := isUnsafeFile(logPath, tempDir)
|
||||
if unsafe {
|
||||
t.Fatalf("expected canonicalized tempDir to be accepted, got unsafe=%v reason=%q", unsafe, reason)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerIsUnsafeFileFallsBackToAbsOnTempDirEvalFailure(t *testing.T) {
|
||||
stubFileStat(t, func(string) (os.FileInfo, error) {
|
||||
return fakeFileInfo{}, nil
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
absTempDir, err := filepath.Abs(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Abs() error = %v", err)
|
||||
}
|
||||
logPath := filepath.Join(tempDir, "codeagent-wrapper-1.log")
|
||||
absLogPath := filepath.Join(absTempDir, "codeagent-wrapper-1.log")
|
||||
|
||||
stubEvalSymlinks(t, func(p string) (string, error) {
|
||||
if p == tempDir {
|
||||
return "", errors.New("boom")
|
||||
}
|
||||
return absLogPath, nil
|
||||
})
|
||||
|
||||
unsafe, reason := isUnsafeFile(logPath, tempDir)
|
||||
if unsafe {
|
||||
t.Fatalf("expected Abs fallback to allow file, got unsafe=%v reason=%q", unsafe, reason)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerPathAndRemove(t *testing.T) {
|
||||
setTempDirEnv(t, t.TempDir())
|
||||
|
||||
|
||||
12
config.json
12
config.json
@@ -196,6 +196,18 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"harness": {
|
||||
"enabled": false,
|
||||
"description": "Multi-session autonomous agent harness with progress checkpointing, failure recovery, task dependencies, and post-completion self-reflection",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "skills/harness",
|
||||
"target": "skills/harness",
|
||||
"description": "Install harness skill with hooks (Stop, SessionStart, TeammateIdle, SubagentStop, self-reflect)"
|
||||
}
|
||||
]
|
||||
},
|
||||
"claudekit": {
|
||||
"enabled": false,
|
||||
"description": "ClaudeKit workflow: skills/do + global hooks (pre-bash, inject-spec, log-prompt)",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "https://github.com/cexll/myclaude/config.schema.json",
|
||||
"$id": "https://github.com/stellarlinkco/myclaude/config.schema.json",
|
||||
"title": "Modular Installation Config",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
set "EXIT_CODE=0"
|
||||
set "REPO=cexll/myclaude"
|
||||
set "REPO=stellarlinkco/myclaude"
|
||||
set "VERSION=latest"
|
||||
set "OS=windows"
|
||||
|
||||
|
||||
44
install.py
44
install.py
@@ -24,6 +24,7 @@ except ImportError: # pragma: no cover
|
||||
|
||||
DEFAULT_INSTALL_DIR = "~/.claude"
|
||||
SETTINGS_FILE = "settings.json"
|
||||
WRAPPER_REQUIRED_MODULES = {"do", "omo"}
|
||||
|
||||
|
||||
def _ensure_list(ctx: Dict[str, Any], key: str) -> List[Any]:
|
||||
@@ -898,6 +899,24 @@ def execute_module(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[
|
||||
"installed_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
if name in WRAPPER_REQUIRED_MODULES:
|
||||
try:
|
||||
ensure_wrapper_installed(ctx)
|
||||
result["operations"].append({"type": "ensure_wrapper", "status": "success"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
result["status"] = "failed"
|
||||
result["operations"].append(
|
||||
{"type": "ensure_wrapper", "status": "failed", "error": str(exc)}
|
||||
)
|
||||
write_log(
|
||||
{
|
||||
"level": "ERROR",
|
||||
"message": f"Module {name} failed on ensure_wrapper: {exc}",
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
raise
|
||||
|
||||
for op in cfg.get("operations", []):
|
||||
op_type = op.get("type")
|
||||
try:
|
||||
@@ -1081,8 +1100,13 @@ def op_run_command(op: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
for key, value in op.get("env", {}).items():
|
||||
env[key] = value.replace("${install_dir}", str(ctx["install_dir"]))
|
||||
|
||||
command = op.get("command", "")
|
||||
if sys.platform == "win32" and command.strip() == "bash install.sh":
|
||||
raw_command = str(op.get("command", "")).strip()
|
||||
if raw_command == "bash install.sh" and ctx.get("_wrapper_installed"):
|
||||
write_log({"level": "INFO", "message": "Skip wrapper install; already installed in this run"}, ctx)
|
||||
return
|
||||
|
||||
command = raw_command
|
||||
if sys.platform == "win32" and raw_command == "bash install.sh":
|
||||
command = "cmd /c install.bat"
|
||||
|
||||
# Stream output in real-time while capturing for logging
|
||||
@@ -1156,6 +1180,22 @@ def op_run_command(op: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
if process.returncode != 0:
|
||||
raise RuntimeError(f"Command failed with code {process.returncode}: {command}")
|
||||
|
||||
if raw_command == "bash install.sh":
|
||||
ctx["_wrapper_installed"] = True
|
||||
|
||||
|
||||
def ensure_wrapper_installed(ctx: Dict[str, Any]) -> None:
|
||||
if ctx.get("_wrapper_installed"):
|
||||
return
|
||||
op_run_command(
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": "bash install.sh",
|
||||
"env": {"INSTALL_DIR": "${install_dir}"},
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
|
||||
def write_log(entry: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
log_path = Path(ctx["log_file"])
|
||||
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
if [ -z "${SKIP_WARNING:-}" ]; then
|
||||
echo "⚠️ WARNING: install.sh is LEGACY and will be removed in future versions."
|
||||
echo "Please use the new installation method:"
|
||||
echo " npx github:cexll/myclaude"
|
||||
echo " npx github:stellarlinkco/myclaude"
|
||||
echo ""
|
||||
echo "Set SKIP_WARNING=1 to bypass this message"
|
||||
echo "Continuing with legacy installation in 5 seconds..."
|
||||
@@ -23,7 +23,7 @@ case "$ARCH" in
|
||||
esac
|
||||
|
||||
# Build download URL
|
||||
REPO="cexll/myclaude"
|
||||
REPO="stellarlinkco/myclaude"
|
||||
VERSION="${CODEAGENT_WRAPPER_VERSION:-latest}"
|
||||
BINARY_NAME="codeagent-wrapper-${OS}-${ARCH}"
|
||||
if [ "$VERSION" = "latest" ]; then
|
||||
|
||||
@@ -7,17 +7,17 @@ This directory contains agent skills (each skill lives in its own folder with a
|
||||
List installable items:
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude --list
|
||||
npx github:stellarlinkco/myclaude --list
|
||||
```
|
||||
|
||||
Install (interactive; pick `skill:<name>`):
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude
|
||||
npx github:stellarlinkco/myclaude
|
||||
```
|
||||
|
||||
Force overwrite / custom install directory:
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||
npx github:stellarlinkco/myclaude --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
@@ -1,334 +0,0 @@
|
||||
---
|
||||
name: codex
|
||||
description: Execute Codex CLI for code analysis, refactoring, and automated code changes. Use when you need to delegate complex code tasks to Codex AI with file references (@syntax) and structured output.
|
||||
---
|
||||
|
||||
# Codex CLI Integration
|
||||
|
||||
## Overview
|
||||
|
||||
Execute Codex CLI commands and parse structured JSON responses. Supports file references via `@` syntax, multiple models, and sandbox controls.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Complex code analysis requiring deep understanding
|
||||
- Large-scale refactoring across multiple files
|
||||
- Automated code generation with safety controls
|
||||
|
||||
## Fallback Policy
|
||||
|
||||
Codex is the **primary execution method** for all code edits and tests. Direct execution is only permitted when:
|
||||
|
||||
1. Codex is unavailable (service down, network issues)
|
||||
2. Codex fails **twice consecutively** on the same task
|
||||
|
||||
When falling back to direct execution:
|
||||
- Log `CODEX_FALLBACK` with the reason
|
||||
- Retry Codex on the next task (don't permanently switch)
|
||||
- Document the fallback in the final summary
|
||||
|
||||
## Usage
|
||||
|
||||
**Mandatory**: Run every automated invocation through the Bash tool in the foreground with **HEREDOC syntax** to avoid shell quoting issues, keeping the `timeout` parameter fixed at `7200000` milliseconds (do not change it or use any other entry point).
|
||||
|
||||
```bash
|
||||
codex-wrapper - [working_dir] <<'EOF'
|
||||
<task content here>
|
||||
EOF
|
||||
```
|
||||
|
||||
**Why HEREDOC?** Tasks often contain code blocks, nested quotes, shell metacharacters (`$`, `` ` ``, `\`), and multiline text. HEREDOC (Here Document) syntax passes these safely without shell interpretation, eliminating quote-escaping nightmares.
|
||||
|
||||
**Foreground only (no background/BashOutput)**: Never set `background: true`, never accept Claude's "Running in the background" mode, and avoid `BashOutput` streaming loops. Keep a single foreground Bash call per Codex task; if work might be long, split it into smaller foreground runs instead of offloading to background execution.
|
||||
|
||||
**Simple tasks** (backward compatibility):
|
||||
For simple single-line tasks without special characters, you can still use direct quoting:
|
||||
```bash
|
||||
codex-wrapper "simple task here" [working_dir]
|
||||
```
|
||||
|
||||
**Resume a session with HEREDOC:**
|
||||
```bash
|
||||
codex-wrapper resume <session_id> - [working_dir] <<'EOF'
|
||||
<task content>
|
||||
EOF
|
||||
```
|
||||
|
||||
**Cross-platform notes:**
|
||||
- **Bash/Zsh**: Use `<<'EOF'` (single quotes prevent variable expansion)
|
||||
- **PowerShell 5.1+**: Use `@'` and `'@` (here-string syntax)
|
||||
```powershell
|
||||
codex-wrapper - @'
|
||||
task content
|
||||
'@
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- **CODEX_TIMEOUT**: Override timeout in milliseconds (default: 7200000 = 2 hours)
|
||||
- Example: `export CODEX_TIMEOUT=3600000` for 1 hour
|
||||
|
||||
## Timeout Control
|
||||
|
||||
- **Built-in**: Binary enforces 2-hour timeout by default
|
||||
- **Override**: Set `CODEX_TIMEOUT` environment variable (in milliseconds, e.g., `CODEX_TIMEOUT=3600000` for 1 hour)
|
||||
- **Behavior**: On timeout, sends SIGTERM, then SIGKILL after 5s if process doesn't exit
|
||||
- **Exit code**: Returns 124 on timeout (consistent with GNU timeout)
|
||||
- **Bash tool**: Always set `timeout: 7200000` parameter for double protection
|
||||
|
||||
### Parameters
|
||||
|
||||
- `task` (required): Task description, supports `@file` references
|
||||
- `working_dir` (optional): Working directory (default: current)
|
||||
|
||||
### Return Format
|
||||
|
||||
Extracts `agent_message` from Codex JSON stream and appends session ID:
|
||||
```
|
||||
Agent response text here...
|
||||
|
||||
---
|
||||
SESSION_ID: 019a7247-ac9d-71f3-89e2-a823dbd8fd14
|
||||
```
|
||||
|
||||
Error format (stderr):
|
||||
```
|
||||
ERROR: Error message
|
||||
```
|
||||
|
||||
Return only the final agent message and session ID—do not paste raw `BashOutput` logs or background-task chatter into the conversation.
|
||||
|
||||
### Invocation Pattern
|
||||
|
||||
All automated executions must use HEREDOC syntax through the Bash tool in the foreground, with `timeout` fixed at `7200000` (non-negotiable):
|
||||
|
||||
```
|
||||
Bash tool parameters:
|
||||
- command: codex-wrapper - [working_dir] <<'EOF'
|
||||
<task content>
|
||||
EOF
|
||||
- timeout: 7200000
|
||||
- description: <brief description of the task>
|
||||
```
|
||||
|
||||
Run every call in the foreground—never append `&` to background it—so logs and errors stay visible for timely interruption or diagnosis.
|
||||
|
||||
**Important:** Use HEREDOC (`<<'EOF'`) for all but the simplest tasks. This prevents shell interpretation of quotes, variables, and special characters.
|
||||
|
||||
### Examples
|
||||
|
||||
**Basic code analysis:**
|
||||
```bash
|
||||
# Recommended: with HEREDOC (handles any special characters)
|
||||
codex-wrapper - <<'EOF'
|
||||
explain @src/main.ts
|
||||
EOF
|
||||
# timeout: 7200000
|
||||
|
||||
# Alternative: simple direct quoting (if task is simple)
|
||||
codex-wrapper "explain @src/main.ts"
|
||||
```
|
||||
|
||||
**Refactoring with multiline instructions:**
|
||||
```bash
|
||||
codex-wrapper - <<'EOF'
|
||||
refactor @src/utils for performance:
|
||||
- Extract duplicate code into helpers
|
||||
- Use memoization for expensive calculations
|
||||
- Add inline comments for non-obvious logic
|
||||
EOF
|
||||
# timeout: 7200000
|
||||
```
|
||||
|
||||
**Multi-file analysis:**
|
||||
```bash
|
||||
codex-wrapper - "/path/to/project" <<'EOF'
|
||||
analyze @. and find security issues:
|
||||
1. Check for SQL injection vulnerabilities
|
||||
2. Identify XSS risks in templates
|
||||
3. Review authentication/authorization logic
|
||||
4. Flag hardcoded credentials or secrets
|
||||
EOF
|
||||
# timeout: 7200000
|
||||
```
|
||||
|
||||
**Resume previous session:**
|
||||
```bash
|
||||
# First session
|
||||
codex-wrapper - <<'EOF'
|
||||
add comments to @utils.js explaining the caching logic
|
||||
EOF
|
||||
# Output includes: SESSION_ID: 019a7247-ac9d-71f3-89e2-a823dbd8fd14
|
||||
|
||||
# Continue the conversation with more context
|
||||
codex-wrapper resume 019a7247-ac9d-71f3-89e2-a823dbd8fd14 - <<'EOF'
|
||||
now add TypeScript type hints and handle edge cases where cache is null
|
||||
EOF
|
||||
# timeout: 7200000
|
||||
```
|
||||
|
||||
**Task with code snippets and special characters:**
|
||||
```bash
|
||||
codex-wrapper - <<'EOF'
|
||||
Fix the bug in @app.js where the regex /\d+/ doesn't match "123"
|
||||
The current code is:
|
||||
const re = /\d+/;
|
||||
if (re.test(input)) { ... }
|
||||
Add proper escaping and handle $variables correctly.
|
||||
EOF
|
||||
```
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
> Important:
|
||||
> - `--parallel` only reads task definitions from stdin.
|
||||
> - It does not accept extra command-line arguments (no inline `workdir`, `task`, or other params).
|
||||
> - Put all task metadata and content in stdin; nothing belongs after `--parallel` on the command line.
|
||||
|
||||
**Correct vs Incorrect Usage**
|
||||
|
||||
**Correct:**
|
||||
```bash
|
||||
# Option 1: file redirection
|
||||
codex-wrapper --parallel < tasks.txt
|
||||
|
||||
# Option 2: heredoc (recommended for multiple tasks)
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: task1
|
||||
workdir: /path/to/dir
|
||||
---CONTENT---
|
||||
task content
|
||||
EOF
|
||||
|
||||
# Option 3: pipe
|
||||
echo "---TASK---..." | codex-wrapper --parallel
|
||||
```
|
||||
|
||||
**Incorrect (will trigger shell parsing errors):**
|
||||
```bash
|
||||
# Bad: no extra args allowed after --parallel
|
||||
codex-wrapper --parallel - /path/to/dir <<'EOF'
|
||||
...
|
||||
EOF
|
||||
|
||||
# Bad: --parallel does not take a task argument
|
||||
codex-wrapper --parallel "task description"
|
||||
|
||||
# Bad: workdir must live inside the task config
|
||||
codex-wrapper --parallel /path/to/dir < tasks.txt
|
||||
```
|
||||
|
||||
For multiple independent or dependent tasks, use `--parallel` mode with delimiter format:
|
||||
|
||||
**Typical Workflow (analyze → implement → test, chained in a single parallel call)**:
|
||||
```bash
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: analyze_1732876800
|
||||
workdir: /home/user/project
|
||||
---CONTENT---
|
||||
analyze @spec.md and summarize API and UI requirements
|
||||
---TASK---
|
||||
id: implement_1732876801
|
||||
workdir: /home/user/project
|
||||
dependencies: analyze_1732876800
|
||||
---CONTENT---
|
||||
implement features from analyze_1732876800 summary in backend @services and frontend @ui
|
||||
---TASK---
|
||||
id: test_1732876802
|
||||
workdir: /home/user/project
|
||||
dependencies: implement_1732876801
|
||||
---CONTENT---
|
||||
add and run regression tests covering the new endpoints and UI flows
|
||||
EOF
|
||||
```
|
||||
A single `codex-wrapper --parallel` call schedules all three stages concurrently, using `dependencies` to enforce sequential ordering without multiple invocations.
|
||||
|
||||
```bash
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: backend_1732876800
|
||||
workdir: /home/user/project/backend
|
||||
---CONTENT---
|
||||
implement /api/orders endpoints with validation and pagination
|
||||
---TASK---
|
||||
id: frontend_1732876801
|
||||
workdir: /home/user/project/frontend
|
||||
---CONTENT---
|
||||
build Orders page consuming /api/orders with loading/error states
|
||||
---TASK---
|
||||
id: tests_1732876802
|
||||
workdir: /home/user/project/tests
|
||||
dependencies: backend_1732876800, frontend_1732876801
|
||||
---CONTENT---
|
||||
run API contract tests and UI smoke tests (waits for backend+frontend)
|
||||
EOF
|
||||
```
|
||||
|
||||
**Delimiter Format**:
|
||||
- `---TASK---`: Starts a new task block
|
||||
- `id: <task-id>`: Required, unique task identifier
|
||||
- Best practice: use `<feature>_<timestamp>` format (e.g., `auth_1732876800`, `api_test_1732876801`)
|
||||
- Ensures uniqueness across runs and makes tasks traceable
|
||||
- `workdir: <path>`: Optional, working directory (default: `.`)
|
||||
- Best practice: use absolute paths (e.g., `/home/user/project/backend`)
|
||||
- Avoids ambiguity and ensures consistent behavior across environments
|
||||
- Must be specified inside each task block; do not pass `workdir` as a CLI argument to `--parallel`
|
||||
- Each task can set its own `workdir` when different directories are needed
|
||||
- `dependencies: <id1>, <id2>`: Optional, comma-separated task IDs
|
||||
- `session_id: <uuid>`: Optional, resume a previous session
|
||||
- `---CONTENT---`: Separates metadata from task content
|
||||
- Task content: Any text, code, special characters (no escaping needed)
|
||||
|
||||
**Dependencies Best Practices**
|
||||
|
||||
- Avoid multiple invocations: Place "analyze then implement" in a single `codex-wrapper --parallel` call, chaining them via `dependencies`, rather than running analysis first and then launching implementation separately.
|
||||
- Naming convention: Use `<action>_<timestamp>` format (e.g., `analyze_1732876800`, `implement_1732876801`), where action names map to features/stages and timestamps ensure uniqueness and sortability.
|
||||
- Dependency chain design: Keep chains short; only add dependencies for tasks that truly require ordering, let others run in parallel, avoiding over-serialization that reduces throughput.
|
||||
|
||||
**Resume Failed Tasks**:
|
||||
```bash
|
||||
# Use session_id from previous output to resume
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: T2
|
||||
session_id: 019xxx-previous-session-id
|
||||
---CONTENT---
|
||||
fix the previous error and retry
|
||||
EOF
|
||||
```
|
||||
|
||||
**Output**: Human-readable text format
|
||||
```
|
||||
=== Parallel Execution Summary ===
|
||||
Total: 3 | Success: 2 | Failed: 1
|
||||
|
||||
--- Task: T1 ---
|
||||
Status: SUCCESS
|
||||
Session: 019xxx
|
||||
|
||||
Task output message...
|
||||
|
||||
--- Task: T2 ---
|
||||
Status: FAILED (exit code 1)
|
||||
Error: some error message
|
||||
```
|
||||
|
||||
**Features**:
|
||||
- Automatic topological sorting based on dependencies
|
||||
- Unlimited concurrency for independent tasks
|
||||
- Error isolation (failed tasks don't stop others)
|
||||
- Dependency blocking (dependent tasks skip if parent fails)
|
||||
|
||||
## Notes
|
||||
|
||||
- **Binary distribution**: Single Go binary, zero dependencies
|
||||
- **Installation**: Download from GitHub Releases or use install.sh
|
||||
- **Cross-platform compatible**: Linux (amd64/arm64), macOS (amd64/arm64)
|
||||
- All automated runs must use the Bash tool with the fixed timeout to provide dual timeout protection and unified logging/exit semantics
|
||||
for automation (new sessions only)
|
||||
- Uses `--skip-git-repo-check` to work in any directory
|
||||
- Streams progress, returns only final agent message
|
||||
- Every execution returns a session ID for resuming conversations
|
||||
- Requires Codex CLI installed and authenticated
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: do
|
||||
description: This skill should be used for structured feature development with codebase understanding. Triggers on /do command. Provides a 5-phase workflow (Understand, Clarify, Design, Implement, Complete) using codeagent-wrapper to orchestrate code-explorer, code-architect, code-reviewer, and develop agents in parallel.
|
||||
allowed-tools: ["Bash(.claude/skills/do/scripts/setup-do.py:*)", "Bash(.claude/skills/do/scripts/task.py:*)"]
|
||||
allowed-tools: ["Bash(python3:*/.claude/skills/do/scripts/setup-do.py*)", "Bash(python3:*/.claude/skills/do/scripts/task.py*)"]
|
||||
---
|
||||
|
||||
# do - Feature Development Orchestrator
|
||||
@@ -13,7 +13,7 @@ An orchestrator for systematic feature development. Invoke agents via `codeagent
|
||||
When triggered via `/do <task>`, initialize the task directory immediately without asking about worktree:
|
||||
|
||||
```bash
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" "<task description>"
|
||||
python3 "$HOME/.claude/skills/do/scripts/setup-do.py" "<task description>"
|
||||
```
|
||||
|
||||
This creates a task directory under `.claude/do-tasks/` with:
|
||||
@@ -27,13 +27,13 @@ Use `task.py` to manage task state:
|
||||
|
||||
```bash
|
||||
# Update phase
|
||||
python3 ".claude/skills/do/scripts/task.py" update-phase 2
|
||||
python3 "$HOME/.claude/skills/do/scripts/task.py" update-phase 2
|
||||
|
||||
# Check status
|
||||
python3 ".claude/skills/do/scripts/task.py" status
|
||||
python3 "$HOME/.claude/skills/do/scripts/task.py" status
|
||||
|
||||
# List all tasks
|
||||
python3 ".claude/skills/do/scripts/task.py" list
|
||||
python3 "$HOME/.claude/skills/do/scripts/task.py" list
|
||||
```
|
||||
|
||||
## Worktree Mode
|
||||
@@ -42,7 +42,7 @@ The worktree is created **only when needed** (right before Phase 4: Implement).
|
||||
|
||||
1. Run setup with `--worktree` flag to create the worktree:
|
||||
```bash
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" --worktree "<task description>"
|
||||
python3 "$HOME/.claude/skills/do/scripts/setup-do.py" --worktree "<task description>"
|
||||
```
|
||||
|
||||
2. Use the `DO_WORKTREE_DIR` environment variable to direct `codeagent-wrapper` develop agent into the worktree. **Do NOT pass `--worktree` to subsequent calls** — that creates a new worktree each time.
|
||||
@@ -181,7 +181,7 @@ Develop in a separate worktree? (Isolates changes from main branch)
|
||||
|
||||
If user chooses worktree:
|
||||
```bash
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" --worktree "<task description>"
|
||||
python3 "$HOME/.claude/skills/do/scripts/setup-do.py" --worktree "<task description>"
|
||||
# Save the worktree path from output for DO_WORKTREE_DIR
|
||||
```
|
||||
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
---
|
||||
name: gemini
|
||||
description: Execute Gemini CLI for AI-powered code analysis and generation. Use when you need to leverage Google's Gemini models for complex reasoning tasks.
|
||||
---
|
||||
|
||||
# Gemini CLI Integration
|
||||
|
||||
## Overview
|
||||
|
||||
Execute Gemini CLI commands with support for multiple models and flexible prompt input. Integrates Google's Gemini AI models into Claude Code workflows.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Complex reasoning tasks requiring advanced AI capabilities
|
||||
- Code generation and analysis with Gemini models
|
||||
- Tasks requiring Google's latest AI technology
|
||||
- Alternative perspective on code problems
|
||||
|
||||
## Usage
|
||||
**Mandatory**: Run via uv with fixed timeout 7200000ms (foreground):
|
||||
```bash
|
||||
uv run ~/.claude/skills/gemini/scripts/gemini.py "<prompt>" [working_dir]
|
||||
```
|
||||
|
||||
**Optional** (direct execution or using Python):
|
||||
```bash
|
||||
~/.claude/skills/gemini/scripts/gemini.py "<prompt>" [working_dir]
|
||||
# or
|
||||
python3 ~/.claude/skills/gemini/scripts/gemini.py "<prompt>" [working_dir]
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- **GEMINI_MODEL**: Configure model (default: `gemini-3-pro-preview`)
|
||||
- Example: `export GEMINI_MODEL=gemini-3`
|
||||
|
||||
## Timeout Control
|
||||
|
||||
- **Fixed**: 7200000 milliseconds (2 hours), immutable
|
||||
- **Bash tool**: Always set `timeout: 7200000` for double protection
|
||||
|
||||
### Parameters
|
||||
|
||||
- `prompt` (required): Task prompt or question
|
||||
- `working_dir` (optional): Working directory (default: current directory)
|
||||
|
||||
### Return Format
|
||||
|
||||
Plain text output from Gemini:
|
||||
|
||||
```text
|
||||
Model response text here...
|
||||
```
|
||||
|
||||
Error format (stderr):
|
||||
|
||||
```text
|
||||
ERROR: Error message
|
||||
```
|
||||
|
||||
### Invocation Pattern
|
||||
|
||||
When calling via Bash tool, always include the timeout parameter:
|
||||
|
||||
```yaml
|
||||
Bash tool parameters:
|
||||
- command: uv run ~/.claude/skills/gemini/scripts/gemini.py "<prompt>"
|
||||
- timeout: 7200000
|
||||
- description: <brief description of the task>
|
||||
```
|
||||
|
||||
Alternatives:
|
||||
|
||||
```yaml
|
||||
# Direct execution (simplest)
|
||||
- command: ~/.claude/skills/gemini/scripts/gemini.py "<prompt>"
|
||||
|
||||
# Using python3
|
||||
- command: python3 ~/.claude/skills/gemini/scripts/gemini.py "<prompt>"
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
**Basic query:**
|
||||
|
||||
```bash
|
||||
uv run ~/.claude/skills/gemini/scripts/gemini.py "explain quantum computing"
|
||||
# timeout: 7200000
|
||||
```
|
||||
|
||||
**Code analysis:**
|
||||
|
||||
```bash
|
||||
uv run ~/.claude/skills/gemini/scripts/gemini.py "review this code for security issues: $(cat app.py)"
|
||||
# timeout: 7200000
|
||||
```
|
||||
|
||||
**With specific working directory:**
|
||||
|
||||
```bash
|
||||
uv run ~/.claude/skills/gemini/scripts/gemini.py "analyze project structure" "/path/to/project"
|
||||
# timeout: 7200000
|
||||
```
|
||||
|
||||
**Using python3 directly (alternative):**
|
||||
|
||||
```bash
|
||||
python3 ~/.claude/skills/gemini/scripts/gemini.py "your prompt here"
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- **Recommended**: Use `uv run` for automatic Python environment management (requires uv installed)
|
||||
- **Alternative**: Direct execution `./gemini.py` (uses system Python via shebang)
|
||||
- Python implementation using standard library (zero dependencies)
|
||||
- Cross-platform compatible (Windows/macOS/Linux)
|
||||
- PEP 723 compliant (inline script metadata)
|
||||
- Requires Gemini CLI installed and authenticated
|
||||
- Supports all Gemini model variants (configure via `GEMINI_MODEL` environment variable)
|
||||
- Output is streamed directly from Gemini CLI
|
||||
@@ -1,140 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# /// script
|
||||
# requires-python = ">=3.8"
|
||||
# dependencies = []
|
||||
# ///
|
||||
"""
|
||||
Gemini CLI wrapper with cross-platform support.
|
||||
|
||||
Usage:
|
||||
uv run gemini.py "<prompt>" [workdir]
|
||||
python3 gemini.py "<prompt>"
|
||||
./gemini.py "your prompt"
|
||||
"""
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
|
||||
DEFAULT_MODEL = os.environ.get('GEMINI_MODEL', 'gemini-3-pro-preview')
|
||||
DEFAULT_WORKDIR = '.'
|
||||
TIMEOUT_MS = 7_200_000 # 固定 2 小时,毫秒
|
||||
DEFAULT_TIMEOUT = TIMEOUT_MS // 1000
|
||||
FORCE_KILL_DELAY = 5
|
||||
|
||||
|
||||
def log_error(message: str):
|
||||
"""输出错误信息到 stderr"""
|
||||
sys.stderr.write(f"ERROR: {message}\n")
|
||||
|
||||
|
||||
def log_warn(message: str):
|
||||
"""输出警告信息到 stderr"""
|
||||
sys.stderr.write(f"WARN: {message}\n")
|
||||
|
||||
|
||||
def log_info(message: str):
|
||||
"""输出信息到 stderr"""
|
||||
sys.stderr.write(f"INFO: {message}\n")
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""解析位置参数"""
|
||||
if len(sys.argv) < 2:
|
||||
log_error('Prompt required')
|
||||
sys.exit(1)
|
||||
|
||||
return {
|
||||
'prompt': sys.argv[1],
|
||||
'workdir': sys.argv[2] if len(sys.argv) > 2 else DEFAULT_WORKDIR
|
||||
}
|
||||
|
||||
|
||||
def build_gemini_args(args) -> list:
|
||||
"""构建 gemini CLI 参数"""
|
||||
return [
|
||||
'gemini',
|
||||
'-m', DEFAULT_MODEL,
|
||||
'-p', args['prompt']
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
log_info('Script started')
|
||||
args = parse_args()
|
||||
log_info(f"Prompt length: {len(args['prompt'])}")
|
||||
log_info(f"Working dir: {args['workdir']}")
|
||||
gemini_args = build_gemini_args(args)
|
||||
timeout_sec = DEFAULT_TIMEOUT
|
||||
log_info(f"Timeout: {timeout_sec}s")
|
||||
|
||||
# 如果指定了工作目录,切换到该目录
|
||||
if args['workdir'] != DEFAULT_WORKDIR:
|
||||
try:
|
||||
os.chdir(args['workdir'])
|
||||
except FileNotFoundError:
|
||||
log_error(f"Working directory not found: {args['workdir']}")
|
||||
sys.exit(1)
|
||||
except PermissionError:
|
||||
log_error(f"Permission denied: {args['workdir']}")
|
||||
sys.exit(1)
|
||||
log_info('Changed working directory')
|
||||
|
||||
try:
|
||||
log_info(f"Starting gemini with model {DEFAULT_MODEL}")
|
||||
process = None
|
||||
# 启动 gemini 子进程,直接透传 stdout 和 stderr
|
||||
process = subprocess.Popen(
|
||||
gemini_args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1 # 行缓冲
|
||||
)
|
||||
|
||||
# 实时输出 stdout
|
||||
for line in process.stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# 等待进程结束
|
||||
returncode = process.wait(timeout=timeout_sec)
|
||||
|
||||
# 读取 stderr
|
||||
stderr_output = process.stderr.read()
|
||||
if stderr_output:
|
||||
sys.stderr.write(stderr_output)
|
||||
|
||||
# 检查退出码
|
||||
if returncode != 0:
|
||||
log_error(f'Gemini exited with status {returncode}')
|
||||
sys.exit(returncode)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
log_error(f'Gemini execution timeout ({timeout_sec}s)')
|
||||
if process is not None:
|
||||
process.kill()
|
||||
try:
|
||||
process.wait(timeout=FORCE_KILL_DELAY)
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
sys.exit(124)
|
||||
|
||||
except FileNotFoundError:
|
||||
log_error("gemini command not found in PATH")
|
||||
log_error("Please install Gemini CLI: https://github.com/google/generative-ai-python")
|
||||
sys.exit(127)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
if process is not None:
|
||||
process.terminate()
|
||||
try:
|
||||
process.wait(timeout=FORCE_KILL_DELAY)
|
||||
except subprocess.TimeoutExpired:
|
||||
process.kill()
|
||||
sys.exit(130)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -26,6 +26,15 @@ Executable protocol enabling any agent task to run continuously across multiple
|
||||
/harness add "task description" # Add a task to the list
|
||||
```
|
||||
|
||||
## Activation Marker
|
||||
|
||||
Hooks only take effect when `.harness-active` marker file exists in the harness root (same directory as `harness-tasks.json`).
|
||||
Hook 注册配置在 `hooks/hooks.json`。
|
||||
|
||||
- `/harness init` and `/harness run` MUST create this marker: `touch <project-path>/.harness-active`
|
||||
- When all tasks complete (no pending/in_progress/retryable left), remove it: `rm <project-path>/.harness-active`
|
||||
- Without this marker, all hooks are no-ops — they exit 0 immediately
|
||||
|
||||
## Progress Persistence (Dual-File System)
|
||||
|
||||
Maintain two files in the project working directory:
|
||||
@@ -54,6 +63,7 @@ Free-text log of all agent actions across sessions. Never truncate.
|
||||
"version": 2,
|
||||
"created": "2025-07-01T10:00:00Z",
|
||||
"session_config": {
|
||||
"concurrency_mode": "exclusive",
|
||||
"max_tasks_per_session": 20,
|
||||
"max_sessions": 50
|
||||
},
|
||||
@@ -126,6 +136,8 @@ Free-text log of all agent actions across sessions. Never truncate.
|
||||
|
||||
Task statuses: `pending` → `in_progress` (transient, set only during active execution) → `completed` or `failed`. A task found as `in_progress` at session start means the previous session was interrupted — handle via Context Window Recovery Protocol.
|
||||
|
||||
In concurrent mode (see Concurrency Control), tasks may also carry claim metadata: `claimed_by` and `lease_expires_at` (ISO timestamp).
|
||||
|
||||
**Session boundary**: A session starts when the agent begins executing the Session Start protocol and ends when a Stopping Condition is met or the context window resets. Each session gets a unique `SESSION-N` identifier (N = `session_count` after increment).
|
||||
|
||||
## Concurrency Control
|
||||
@@ -134,7 +146,23 @@ Before modifying `harness-tasks.json`, acquire an exclusive lock using portable
|
||||
|
||||
```bash
|
||||
# Acquire lock (fail fast if another agent is running)
|
||||
LOCKDIR="/tmp/harness-$(printf '%s' "$(pwd)" | shasum -a 256 2>/dev/null || sha256sum | cut -c1-8).lock"
|
||||
# Lock key must be stable even if invoked from a subdirectory.
|
||||
ROOT="$PWD"
|
||||
SEARCH="$PWD"
|
||||
while [ "$SEARCH" != "/" ] && [ ! -f "$SEARCH/harness-tasks.json" ]; do
|
||||
SEARCH="$(dirname "$SEARCH")"
|
||||
done
|
||||
if [ -f "$SEARCH/harness-tasks.json" ]; then
|
||||
ROOT="$SEARCH"
|
||||
fi
|
||||
|
||||
PWD_HASH="$(
|
||||
printf '%s' "$ROOT" |
|
||||
(shasum -a 256 2>/dev/null || sha256sum 2>/dev/null) |
|
||||
awk '{print $1}' |
|
||||
cut -c1-16
|
||||
)"
|
||||
LOCKDIR="/tmp/harness-${PWD_HASH:-unknown}.lock"
|
||||
if ! mkdir "$LOCKDIR" 2>/dev/null; then
|
||||
# Check if lock holder is still alive
|
||||
LOCK_PID=$(cat "$LOCKDIR/pid" 2>/dev/null)
|
||||
@@ -158,7 +186,16 @@ trap 'rm -rf "$LOCKDIR"' EXIT
|
||||
Log lock acquisition: `[timestamp] [SESSION-N] LOCK acquired (pid=<PID>)`
|
||||
Log lock release: `[timestamp] [SESSION-N] LOCK released`
|
||||
|
||||
The lock is held for the entire session. The `trap EXIT` handler releases it automatically on normal exit, errors, or signals. Never release the lock between tasks within a session.
|
||||
Modes:
|
||||
|
||||
- **Exclusive (default)**: hold the lock for the entire session (the `trap EXIT` handler releases it automatically). Any second session in the same state root fails fast.
|
||||
- **Concurrent (opt-in via `session_config.concurrency_mode: "concurrent"`)**: treat this as a **state transaction lock**. Hold it only while reading/modifying/writing `harness-tasks.json` (including `.bak`/`.tmp`) and appending to `harness-progress.txt`. Release it immediately before doing real work.
|
||||
|
||||
Concurrent mode invariants:
|
||||
|
||||
- All workers MUST point at the same state root (the directory that contains `harness-tasks.json`). If you are using separate worktrees/clones, pin it explicitly (e.g., `HARNESS_STATE_ROOT=/abs/path/to/state-root`).
|
||||
- Task selection is advisory; the real gate is **atomic claim** under the lock: set `status="in_progress"`, set `claimed_by` (stable worker id, e.g., `HARNESS_WORKER_ID`), set `lease_expires_at`. If claim fails (already `in_progress` with a valid lease), pick another eligible task and retry.
|
||||
- Never run two workers in the same git working directory. Use separate worktrees/clones. Otherwise rollback (`git reset --hard` / `git clean -fd`) will destroy other workers.
|
||||
|
||||
## Infinite Loop Protocol
|
||||
|
||||
@@ -166,7 +203,7 @@ The lock is held for the entire session. The `trap EXIT` handler releases it aut
|
||||
|
||||
1. **Read state**: Read last 200 lines of `harness-progress.txt` + full `harness-tasks.json`. If JSON is unparseable, see JSON corruption recovery in Error Handling.
|
||||
2. **Read git**: Run `git log --oneline -20` and `git diff --stat` to detect uncommitted work
|
||||
3. **Acquire lock**: Fail if another session is active
|
||||
3. **Acquire lock** (mode-dependent): Exclusive mode fails if another session is active. Concurrent mode uses the lock only for state transactions.
|
||||
4. **Recover interrupted tasks** (see Context Window Recovery below)
|
||||
5. **Health check**: Run `harness-init.sh` if it exists
|
||||
6. **Track session**: Increment `session_count` in JSON. Check `session_count` against `max_sessions` — if reached, log STATS and STOP. Initialize per-session task counter to 0.
|
||||
@@ -189,13 +226,13 @@ Then pick the next task in this priority order:
|
||||
|
||||
For each task, execute this exact sequence:
|
||||
|
||||
1. **Claim**: Record `started_at_commit` = current HEAD hash. Set status to `in_progress`, log `Starting [<task-id>] <title> (base=<hash>)`
|
||||
1. **Claim** (atomic, under lock): Record `started_at_commit` = current HEAD hash. Set status to `in_progress`, set `claimed_by`, set `lease_expires_at`, log `Starting [<task-id>] <title> (base=<hash>)`. If the task is already claimed (`in_progress` with a valid lease), pick another eligible task and retry.
|
||||
2. **Execute with checkpoints**: Perform the work. After each significant step, log:
|
||||
```
|
||||
[timestamp] [SESSION-N] CHECKPOINT [task-id] step=M/N "description of what was done"
|
||||
```
|
||||
Also append to the task's `checkpoints` array: `{ "step": M, "total": N, "description": "...", "timestamp": "ISO" }`
|
||||
3. **Validate**: Run the task's `validation.command` wrapped with `timeout`: `timeout <timeout_seconds> <command>`. If no validation command, skip. Before running, verify the command exists (e.g., `command -v <binary>`) — if missing, treat as `ENV_SETUP` error.
|
||||
Also append to the task's `checkpoints` array: `{ "step": M, "total": N, "description": "...", "timestamp": "ISO" }`. In concurrent mode, renew the lease at each checkpoint (push `lease_expires_at` forward).
|
||||
3. **Validate**: Run the task's `validation.command` with a timeout wrapper (prefer `timeout`; on macOS use `gtimeout` from coreutils). If `validation.command` is empty/null, log `ERROR [<task-id>] [CONFIG] Missing validation.command` and STOP — do not declare completion without an objective check. Before running, verify the command exists (e.g., `command -v <binary>`) — if missing, treat as `ENV_SETUP` error.
|
||||
- Command exits 0 → PASS
|
||||
- Command exits non-zero → FAIL
|
||||
- Command exceeds timeout → TIMEOUT
|
||||
@@ -217,6 +254,9 @@ For each task, execute this exact sequence:
|
||||
|
||||
When a new session starts and finds a task with `status: "in_progress"`:
|
||||
|
||||
- Exclusive mode: treat this as an interrupted previous session and run the Recovery Protocol below.
|
||||
- Concurrent mode: only recover a task if either (a) `claimed_by` matches this worker, or (b) `lease_expires_at` is in the past (stale lease). Otherwise, treat it as owned by another worker and do not modify it.
|
||||
|
||||
1. **Check git state**:
|
||||
```bash
|
||||
git diff --stat # Uncommitted changes?
|
||||
@@ -243,6 +283,7 @@ Each error category has a default recovery strategy:
|
||||
| Category | Default Recovery | Agent Action |
|
||||
|----------|-----------------|--------------|
|
||||
| `ENV_SETUP` | Re-run init, then STOP if still failing | Run `harness-init.sh` again immediately. If fails twice, log and stop — environment is broken |
|
||||
| `CONFIG` | STOP (requires human fix) | Log the config error precisely (file + field), then STOP. Do not guess or auto-mutate task metadata |
|
||||
| `TASK_EXEC` | Rollback via `git reset --hard <started_at_commit>`, retry | Verify `started_at_commit` exists (`git cat-file -t <hash>`). If missing, mark failed at max_attempts. Otherwise reset, run `on_failure.cleanup` if defined, retry if attempts < max_attempts |
|
||||
| `TEST_FAIL` | Rollback via `git reset --hard <started_at_commit>`, retry | Reset to `started_at_commit`, analyze test output to identify fix, retry with targeted changes |
|
||||
| `TIMEOUT` | Kill process, execute cleanup, retry | Wrap validation with `timeout <seconds> <command>`. On timeout, run `on_failure.cleanup`, retry (consider splitting task if repeated) |
|
||||
@@ -251,7 +292,7 @@ Each error category has a default recovery strategy:
|
||||
|
||||
**JSON corruption**: If `harness-tasks.json` cannot be parsed, check for `harness-tasks.json.bak` (written before each modification). If backup exists and is valid, restore from it. If no valid backup, log `ERROR [ENV_SETUP] harness-tasks.json corrupted and unrecoverable` and STOP — task metadata (validation commands, dependencies, cleanup) cannot be reconstructed from logs alone.
|
||||
|
||||
**Backup protocol**: Before every write to `harness-tasks.json`, copy the current file to `harness-tasks.json.bak`.
|
||||
**Backup protocol**: Before every write to `harness-tasks.json`, copy the current file to `harness-tasks.json.bak`. Write updates atomically: write JSON to `harness-tasks.json.tmp` then `mv` it into place (readers should never see a partial file).
|
||||
|
||||
## Environment Initialization
|
||||
|
||||
@@ -279,7 +320,7 @@ All log entries use grep-friendly format on a single line:
|
||||
|
||||
Types: `INIT`, `Starting`, `Completed`, `ERROR`, `CHECKPOINT`, `ROLLBACK`, `RECOVERY`, `STATS`, `LOCK`, `WARN`
|
||||
|
||||
Error categories: `ENV_SETUP`, `TASK_EXEC`, `TEST_FAIL`, `TIMEOUT`, `DEPENDENCY`, `SESSION_TIMEOUT`
|
||||
Error categories: `ENV_SETUP`, `CONFIG`, `TASK_EXEC`, `TEST_FAIL`, `TIMEOUT`, `DEPENDENCY`, `SESSION_TIMEOUT`
|
||||
|
||||
Filtering:
|
||||
```bash
|
||||
@@ -293,7 +334,7 @@ grep "RECOVERY" harness-progress.txt # All recovery actions
|
||||
|
||||
## Session Statistics
|
||||
|
||||
At session end, update `harness-tasks.json`: increment `session_count`, set `last_session` to current timestamp. Then append:
|
||||
At session end, update `harness-tasks.json`: set `last_session` to current timestamp. (Do NOT increment `session_count` here — it is incremented at Session Start.) Then append:
|
||||
|
||||
```
|
||||
[timestamp] [SESSION-N] STATS tasks_total=10 completed=7 failed=1 pending=2 blocked=0 attempts_total=12 checkpoints=23
|
||||
@@ -321,9 +362,11 @@ Does NOT acquire the lock (read-only operation).
|
||||
|
||||
## Add Command (`/harness add`)
|
||||
|
||||
Append a new task to `harness-tasks.json` with auto-incremented id (`task-NNN`), status `pending`, default `max_attempts: 3`, empty `depends_on`, and no validation command. Prompt user for optional fields: `priority`, `depends_on`, `validation.command`, `timeout_seconds`. Requires lock acquisition (modifies JSON).
|
||||
Append a new task to `harness-tasks.json` with auto-incremented id (`task-NNN`), status `pending`, default `max_attempts: 3`, empty `depends_on`, and no validation command (required before the task can be completed). Prompt user for optional fields: `priority`, `depends_on`, `validation.command`, `timeout_seconds`. Requires lock acquisition (modifies JSON).
|
||||
|
||||
## Tool Dependencies
|
||||
|
||||
Requires: Bash, file read/write, git. All harness operations must be executed from the project root directory.
|
||||
Does NOT require: specific MCP servers, programming languages, or test frameworks.
|
||||
|
||||
Concurrent mode requires isolated working directories (`git worktree` or separate clones). Do not run concurrent workers in the same working tree.
|
||||
|
||||
410
skills/harness/hooks/_harness_common.py
Normal file
410
skills/harness/hooks/_harness_common.py
Normal file
@@ -0,0 +1,410 @@
|
||||
"""Shared utilities for harness hooks.
|
||||
|
||||
Consolidates duplicated logic: payload reading, state root discovery,
|
||||
JSON I/O, lock primitives, task eligibility, and ISO time helpers.
|
||||
|
||||
Ported from Codex harness hooks to Claude Code.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as _dt
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Time helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def utc_now() -> _dt.datetime:
|
||||
return _dt.datetime.now(tz=_dt.timezone.utc)
|
||||
|
||||
|
||||
def iso_z(dt: _dt.datetime) -> str:
|
||||
dt = dt.astimezone(_dt.timezone.utc).replace(microsecond=0)
|
||||
return dt.isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def parse_iso(ts: Any) -> Optional[_dt.datetime]:
|
||||
if not isinstance(ts, str) or not ts.strip():
|
||||
return None
|
||||
s = ts.strip()
|
||||
if s.endswith("Z"):
|
||||
s = s[:-1] + "+00:00"
|
||||
try:
|
||||
dt = _dt.datetime.fromisoformat(s)
|
||||
except Exception:
|
||||
return None
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=_dt.timezone.utc)
|
||||
return dt.astimezone(_dt.timezone.utc)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hook payload
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def read_hook_payload() -> dict[str, Any]:
|
||||
"""Read JSON payload from stdin (sent by Claude Code to command hooks)."""
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def maybe_log_hook_event(root: Path, payload: dict[str, Any], hook_script: str) -> None:
|
||||
"""Optionally append a compact hook execution record to HARNESS_HOOK_LOG.
|
||||
|
||||
This is opt-in debugging: when HARNESS_HOOK_LOG is unset, it is a no-op.
|
||||
Call this only after the .harness-active guard passes.
|
||||
"""
|
||||
log_path = os.environ.get("HARNESS_HOOK_LOG")
|
||||
if not log_path:
|
||||
return
|
||||
|
||||
entry: dict[str, Any] = {
|
||||
"ts": iso_z(utc_now()),
|
||||
"hook_script": hook_script,
|
||||
"hook_event_name": payload.get("hook_event_name"),
|
||||
"harness_root": str(root),
|
||||
}
|
||||
for k in (
|
||||
"session_id",
|
||||
"cwd",
|
||||
"source",
|
||||
"reason",
|
||||
"teammate_name",
|
||||
"team_name",
|
||||
"agent_id",
|
||||
"agent_type",
|
||||
"stop_hook_active",
|
||||
):
|
||||
if k in payload:
|
||||
entry[k] = payload.get(k)
|
||||
|
||||
try:
|
||||
Path(log_path).expanduser().parent.mkdir(parents=True, exist_ok=True)
|
||||
with Path(log_path).expanduser().open("a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# State root discovery
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def find_harness_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
"""Locate the directory containing harness-tasks.json.
|
||||
|
||||
Search order:
|
||||
1. HARNESS_STATE_ROOT env var
|
||||
2. CLAUDE_PROJECT_DIR env var (+ parents)
|
||||
3. payload["cwd"] / os.getcwd() (+ parents)
|
||||
"""
|
||||
env_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if env_root:
|
||||
p = Path(env_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def is_harness_active(root: Path) -> bool:
|
||||
"""True when .harness-active marker exists (hooks are live)."""
|
||||
return (root / ".harness-active").is_file()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# JSON I/O
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def load_json(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"{path.name} must be a JSON object")
|
||||
return data
|
||||
|
||||
|
||||
def atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
"""Write JSON atomically: backup -> tmp -> rename."""
|
||||
bak = path.with_name(f"{path.name}.bak")
|
||||
tmp = path.with_name(f"{path.name}.tmp")
|
||||
shutil.copy2(path, bak)
|
||||
tmp.write_text(
|
||||
json.dumps(data, ensure_ascii=False, indent=2) + "\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
os.replace(tmp, path)
|
||||
|
||||
|
||||
def tail_text(path: Path, max_bytes: int = 200_000) -> str:
|
||||
"""Read the last max_bytes of a text file."""
|
||||
with path.open("rb") as f:
|
||||
try:
|
||||
f.seek(0, os.SEEK_END)
|
||||
size = f.tell()
|
||||
f.seek(max(0, size - max_bytes), os.SEEK_SET)
|
||||
except Exception:
|
||||
f.seek(0, os.SEEK_SET)
|
||||
chunk = f.read()
|
||||
return chunk.decode("utf-8", errors="replace")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Lock primitives (mkdir-based, POSIX-portable)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def lockdir_for_root(root: Path) -> Path:
|
||||
h = hashlib.sha256(str(root).encode("utf-8")).hexdigest()[:16]
|
||||
return Path("/tmp") / f"harness-{h}.lock"
|
||||
|
||||
|
||||
def _pid_alive(pid: int) -> bool:
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _read_pid(lockdir: Path) -> Optional[int]:
|
||||
try:
|
||||
raw = (lockdir / "pid").read_text("utf-8").strip()
|
||||
return int(raw) if raw else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def acquire_lock(lockdir: Path, timeout_seconds: float = 5.0) -> None:
|
||||
deadline = time.time() + timeout_seconds
|
||||
missing_pid_since: Optional[float] = None
|
||||
while True:
|
||||
try:
|
||||
lockdir.mkdir(mode=0o700)
|
||||
(lockdir / "pid").write_text(str(os.getpid()), encoding="utf-8")
|
||||
return
|
||||
except FileExistsError:
|
||||
pid = _read_pid(lockdir)
|
||||
if pid is None:
|
||||
if missing_pid_since is None:
|
||||
missing_pid_since = time.time()
|
||||
if time.time() - missing_pid_since < 1.0:
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError("lock busy (pid missing)")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
else:
|
||||
missing_pid_since = None
|
||||
if _pid_alive(pid):
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError(f"lock busy (pid={pid})")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
|
||||
stale = lockdir.with_name(
|
||||
f"{lockdir.name}.stale.{os.getpid()}.{int(time.time())}"
|
||||
)
|
||||
try:
|
||||
lockdir.rename(stale)
|
||||
except Exception:
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError("lock contention")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
shutil.rmtree(stale, ignore_errors=True)
|
||||
missing_pid_since = None
|
||||
continue
|
||||
|
||||
|
||||
def release_lock(lockdir: Path) -> None:
|
||||
shutil.rmtree(lockdir, ignore_errors=True)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Task helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def priority_rank(v: Any) -> int:
|
||||
return {"P0": 0, "P1": 1, "P2": 2}.get(str(v or ""), 9)
|
||||
|
||||
|
||||
def task_attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
return int(t.get("attempts") or 0)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def task_max_attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
v = t.get("max_attempts")
|
||||
return int(v) if v is not None else 3
|
||||
except Exception:
|
||||
return 3
|
||||
|
||||
|
||||
def deps_completed(t: dict[str, Any], completed_ids: set[str]) -> bool:
|
||||
deps = t.get("depends_on") or []
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
return all(str(d) in completed_ids for d in deps)
|
||||
|
||||
|
||||
def parse_tasks(state: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
"""Extract validated task list from state dict."""
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
raise ValueError("tasks must be a list")
|
||||
return [t for t in tasks_raw if isinstance(t, dict)]
|
||||
|
||||
|
||||
def completed_ids(tasks: list[dict[str, Any]]) -> set[str]:
|
||||
return {
|
||||
str(t.get("id", ""))
|
||||
for t in tasks
|
||||
if str(t.get("status", "")) == "completed"
|
||||
}
|
||||
|
||||
|
||||
def eligible_tasks(
|
||||
tasks: list[dict[str, Any]],
|
||||
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
||||
"""Return (pending_eligible, retryable) sorted by priority then id."""
|
||||
done = completed_ids(tasks)
|
||||
|
||||
pending = [
|
||||
t for t in tasks
|
||||
if str(t.get("status", "")) == "pending" and deps_completed(t, done)
|
||||
]
|
||||
retry = [
|
||||
t for t in tasks
|
||||
if str(t.get("status", "")) == "failed"
|
||||
and task_attempts(t) < task_max_attempts(t)
|
||||
and deps_completed(t, done)
|
||||
]
|
||||
|
||||
def key(t: dict[str, Any]) -> tuple[int, str]:
|
||||
return (priority_rank(t.get("priority")), str(t.get("id", "")))
|
||||
|
||||
pending.sort(key=key)
|
||||
retry.sort(key=key)
|
||||
return pending, retry
|
||||
|
||||
|
||||
def pick_next(
|
||||
pending: list[dict[str, Any]], retry: list[dict[str, Any]]
|
||||
) -> Optional[dict[str, Any]]:
|
||||
return pending[0] if pending else (retry[0] if retry else None)
|
||||
|
||||
|
||||
def status_counts(tasks: list[dict[str, Any]]) -> dict[str, int]:
|
||||
counts: dict[str, int] = {}
|
||||
for t in tasks:
|
||||
s = str(t.get("status") or "pending")
|
||||
counts[s] = counts.get(s, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
def reap_stale_leases(
|
||||
tasks: list[dict[str, Any]], now: _dt.datetime
|
||||
) -> bool:
|
||||
"""Reset in_progress tasks with expired leases to failed. Returns True if any changed."""
|
||||
changed = False
|
||||
for t in tasks:
|
||||
if str(t.get("status", "")) != "in_progress":
|
||||
continue
|
||||
exp = parse_iso(t.get("lease_expires_at"))
|
||||
if exp is None or exp > now:
|
||||
continue
|
||||
|
||||
t["attempts"] = task_attempts(t) + 1
|
||||
err = f"[SESSION_TIMEOUT] Lease expired (claimed_by={t.get('claimed_by')})"
|
||||
log = t.get("error_log")
|
||||
if isinstance(log, list):
|
||||
log.append(err)
|
||||
else:
|
||||
t["error_log"] = [err]
|
||||
|
||||
t["status"] = "failed"
|
||||
t.pop("claimed_by", None)
|
||||
t.pop("lease_expires_at", None)
|
||||
t.pop("claimed_at", None)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Session config helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_session_config(state: dict[str, Any]) -> dict[str, Any]:
|
||||
cfg = state.get("session_config") or {}
|
||||
return cfg if isinstance(cfg, dict) else {}
|
||||
|
||||
|
||||
def is_concurrent(cfg: dict[str, Any]) -> bool:
|
||||
return str(cfg.get("concurrency_mode") or "exclusive") == "concurrent"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hook output helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def emit_block(reason: str) -> None:
|
||||
"""Print a JSON block decision to stdout and exit 0."""
|
||||
print(json.dumps({"decision": "block", "reason": reason}, ensure_ascii=False))
|
||||
|
||||
|
||||
def emit_allow(reason: str = "") -> None:
|
||||
"""Print a JSON allow decision to stdout and exit 0."""
|
||||
out: dict[str, Any] = {"decision": "allow"}
|
||||
if reason:
|
||||
out["reason"] = reason
|
||||
print(json.dumps(out, ensure_ascii=False))
|
||||
|
||||
|
||||
def emit_context(context: str) -> None:
|
||||
"""Inject additional context via hookSpecificOutput."""
|
||||
print(json.dumps(
|
||||
{"hookSpecificOutput": {"additionalContext": context}},
|
||||
ensure_ascii=False,
|
||||
))
|
||||
|
||||
|
||||
def emit_json(data: dict[str, Any]) -> None:
|
||||
"""Print arbitrary JSON to stdout."""
|
||||
print(json.dumps(data, ensure_ascii=False))
|
||||
301
skills/harness/hooks/harness-claim.py
Executable file
301
skills/harness/hooks/harness-claim.py
Executable file
@@ -0,0 +1,301 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as _dt
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def _utc_now() -> _dt.datetime:
|
||||
return _dt.datetime.now(tz=_dt.timezone.utc)
|
||||
|
||||
|
||||
def _iso_z(dt: _dt.datetime) -> str:
|
||||
dt = dt.astimezone(_dt.timezone.utc).replace(microsecond=0)
|
||||
return dt.isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def _parse_iso(ts: Any) -> Optional[_dt.datetime]:
|
||||
if not isinstance(ts, str) or not ts.strip():
|
||||
return None
|
||||
s = ts.strip()
|
||||
if s.endswith("Z"):
|
||||
s = s[:-1] + "+00:00"
|
||||
try:
|
||||
dt = _dt.datetime.fromisoformat(s)
|
||||
except Exception:
|
||||
return None
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=_dt.timezone.utc)
|
||||
return dt.astimezone(_dt.timezone.utc)
|
||||
|
||||
|
||||
def _read_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def _find_state_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _lockdir_for_root(root: Path) -> Path:
|
||||
h = hashlib.sha256(str(root).encode("utf-8")).hexdigest()[:16]
|
||||
return Path("/tmp") / f"harness-{h}.lock"
|
||||
|
||||
|
||||
def _pid_alive(pid: int) -> bool:
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _read_pid(lockdir: Path) -> Optional[int]:
|
||||
try:
|
||||
raw = (lockdir / "pid").read_text("utf-8").strip()
|
||||
return int(raw) if raw else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _acquire_lock(lockdir: Path, timeout_seconds: float) -> None:
|
||||
deadline = time.time() + timeout_seconds
|
||||
missing_pid_since: Optional[float] = None
|
||||
while True:
|
||||
try:
|
||||
lockdir.mkdir(mode=0o700)
|
||||
(lockdir / "pid").write_text(str(os.getpid()), encoding="utf-8")
|
||||
return
|
||||
except FileExistsError:
|
||||
pid = _read_pid(lockdir)
|
||||
if pid is None:
|
||||
if missing_pid_since is None:
|
||||
missing_pid_since = time.time()
|
||||
if time.time() - missing_pid_since < 1.0:
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError("lock busy (pid missing)")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
else:
|
||||
missing_pid_since = None
|
||||
if _pid_alive(pid):
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError(f"lock busy (pid={pid})")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
|
||||
stale = lockdir.with_name(f"{lockdir.name}.stale.{os.getpid()}.{int(time.time())}")
|
||||
try:
|
||||
lockdir.rename(stale)
|
||||
except Exception:
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError("lock contention")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
shutil.rmtree(stale, ignore_errors=True)
|
||||
missing_pid_since = None
|
||||
continue
|
||||
|
||||
|
||||
def _release_lock(lockdir: Path) -> None:
|
||||
shutil.rmtree(lockdir, ignore_errors=True)
|
||||
|
||||
|
||||
def _load_state(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError("harness-tasks.json must be an object")
|
||||
return data
|
||||
|
||||
|
||||
def _atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
bak = path.with_name(f"{path.name}.bak")
|
||||
tmp = path.with_name(f"{path.name}.tmp")
|
||||
shutil.copy2(path, bak)
|
||||
tmp.write_text(json.dumps(data, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
||||
os.replace(tmp, path)
|
||||
|
||||
|
||||
def _priority_rank(v: Any) -> int:
|
||||
return {"P0": 0, "P1": 1, "P2": 2}.get(str(v or ""), 9)
|
||||
|
||||
|
||||
def _eligible_tasks(tasks: list[dict[str, Any]]) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
||||
completed = {str(t.get("id", "")) for t in tasks if str(t.get("status", "")) == "completed"}
|
||||
|
||||
def deps_ok(t: dict[str, Any]) -> bool:
|
||||
deps = t.get("depends_on") or []
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
return all(str(d) in completed for d in deps)
|
||||
|
||||
def attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
return int(t.get("attempts") or 0)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def max_attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
v = t.get("max_attempts")
|
||||
return int(v) if v is not None else 3
|
||||
except Exception:
|
||||
return 3
|
||||
|
||||
pending = [t for t in tasks if str(t.get("status", "")) == "pending" and deps_ok(t)]
|
||||
retry = [
|
||||
t
|
||||
for t in tasks
|
||||
if str(t.get("status", "")) == "failed"
|
||||
and attempts(t) < max_attempts(t)
|
||||
and deps_ok(t)
|
||||
]
|
||||
|
||||
def key(t: dict[str, Any]) -> tuple[int, str]:
|
||||
return (_priority_rank(t.get("priority")), str(t.get("id", "")))
|
||||
|
||||
pending.sort(key=key)
|
||||
retry.sort(key=key)
|
||||
return pending, retry
|
||||
|
||||
|
||||
def _reap_stale_leases(tasks: list[dict[str, Any]], now: _dt.datetime) -> bool:
|
||||
changed = False
|
||||
for t in tasks:
|
||||
if str(t.get("status", "")) != "in_progress":
|
||||
continue
|
||||
exp = _parse_iso(t.get("lease_expires_at"))
|
||||
if exp is None or exp > now:
|
||||
continue
|
||||
|
||||
try:
|
||||
t["attempts"] = int(t.get("attempts") or 0) + 1
|
||||
except Exception:
|
||||
t["attempts"] = 1
|
||||
|
||||
err = f"[SESSION_TIMEOUT] Lease expired (claimed_by={t.get('claimed_by')})"
|
||||
log = t.get("error_log")
|
||||
if isinstance(log, list):
|
||||
log.append(err)
|
||||
else:
|
||||
t["error_log"] = [err]
|
||||
|
||||
t["status"] = "failed"
|
||||
t.pop("claimed_by", None)
|
||||
t.pop("lease_expires_at", None)
|
||||
t.pop("claimed_at", None)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_payload()
|
||||
root = _find_state_root(payload)
|
||||
if root is None:
|
||||
print(json.dumps({"claimed": False, "error": "state root not found"}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
lockdir = _lockdir_for_root(root)
|
||||
|
||||
timeout_s = float(os.environ.get("HARNESS_LOCK_TIMEOUT_SECONDS") or "5")
|
||||
_acquire_lock(lockdir, timeout_s)
|
||||
try:
|
||||
state = _load_state(tasks_path)
|
||||
session_config = state.get("session_config") or {}
|
||||
if not isinstance(session_config, dict):
|
||||
session_config = {}
|
||||
concurrency_mode = str(session_config.get("concurrency_mode") or "exclusive")
|
||||
is_concurrent = concurrency_mode == "concurrent"
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
raise ValueError("tasks must be a list")
|
||||
tasks = [t for t in tasks_raw if isinstance(t, dict)]
|
||||
|
||||
now = _utc_now()
|
||||
if _reap_stale_leases(tasks, now):
|
||||
state["tasks"] = tasks
|
||||
_atomic_write_json(tasks_path, state)
|
||||
|
||||
pending, retry = _eligible_tasks(tasks)
|
||||
task = pending[0] if pending else (retry[0] if retry else None)
|
||||
if task is None:
|
||||
print(json.dumps({"claimed": False}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
worker_id = os.environ.get("HARNESS_WORKER_ID") or ""
|
||||
if is_concurrent and not worker_id:
|
||||
print(json.dumps({"claimed": False, "error": "missing HARNESS_WORKER_ID"}, ensure_ascii=False))
|
||||
return 0
|
||||
if not worker_id:
|
||||
worker_id = f"{socket.gethostname()}:{os.getpid()}"
|
||||
lease_seconds = int(os.environ.get("HARNESS_LEASE_SECONDS") or "1800")
|
||||
exp = now + _dt.timedelta(seconds=lease_seconds)
|
||||
|
||||
task["status"] = "in_progress"
|
||||
task["claimed_by"] = worker_id
|
||||
task["claimed_at"] = _iso_z(now)
|
||||
task["lease_expires_at"] = _iso_z(exp)
|
||||
state["tasks"] = tasks
|
||||
_atomic_write_json(tasks_path, state)
|
||||
|
||||
out = {
|
||||
"claimed": True,
|
||||
"worker_id": worker_id,
|
||||
"task_id": str(task.get("id") or ""),
|
||||
"title": str(task.get("title") or ""),
|
||||
"lease_expires_at": task["lease_expires_at"],
|
||||
}
|
||||
print(json.dumps(out, ensure_ascii=False))
|
||||
return 0
|
||||
finally:
|
||||
_release_lock(lockdir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
214
skills/harness/hooks/harness-renew.py
Executable file
214
skills/harness/hooks/harness-renew.py
Executable file
@@ -0,0 +1,214 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as _dt
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def _utc_now() -> _dt.datetime:
|
||||
return _dt.datetime.now(tz=_dt.timezone.utc)
|
||||
|
||||
|
||||
def _iso_z(dt: _dt.datetime) -> str:
|
||||
dt = dt.astimezone(_dt.timezone.utc).replace(microsecond=0)
|
||||
return dt.isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def _read_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def _find_state_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _lockdir_for_root(root: Path) -> Path:
|
||||
h = hashlib.sha256(str(root).encode("utf-8")).hexdigest()[:16]
|
||||
return Path("/tmp") / f"harness-{h}.lock"
|
||||
|
||||
|
||||
def _pid_alive(pid: int) -> bool:
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _read_pid(lockdir: Path) -> Optional[int]:
|
||||
try:
|
||||
raw = (lockdir / "pid").read_text("utf-8").strip()
|
||||
return int(raw) if raw else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _acquire_lock(lockdir: Path, timeout_seconds: float) -> None:
|
||||
deadline = time.time() + timeout_seconds
|
||||
missing_pid_since: Optional[float] = None
|
||||
while True:
|
||||
try:
|
||||
lockdir.mkdir(mode=0o700)
|
||||
(lockdir / "pid").write_text(str(os.getpid()), encoding="utf-8")
|
||||
return
|
||||
except FileExistsError:
|
||||
pid = _read_pid(lockdir)
|
||||
if pid is None:
|
||||
if missing_pid_since is None:
|
||||
missing_pid_since = time.time()
|
||||
if time.time() - missing_pid_since < 1.0:
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError("lock busy (pid missing)")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
else:
|
||||
missing_pid_since = None
|
||||
if _pid_alive(pid):
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError(f"lock busy (pid={pid})")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
|
||||
stale = lockdir.with_name(f"{lockdir.name}.stale.{os.getpid()}.{int(time.time())}")
|
||||
try:
|
||||
lockdir.rename(stale)
|
||||
except Exception:
|
||||
if time.time() >= deadline:
|
||||
raise TimeoutError("lock contention")
|
||||
time.sleep(0.05)
|
||||
continue
|
||||
shutil.rmtree(stale, ignore_errors=True)
|
||||
missing_pid_since = None
|
||||
continue
|
||||
|
||||
|
||||
def _release_lock(lockdir: Path) -> None:
|
||||
shutil.rmtree(lockdir, ignore_errors=True)
|
||||
|
||||
|
||||
def _load_state(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError("harness-tasks.json must be an object")
|
||||
return data
|
||||
|
||||
|
||||
def _atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
bak = path.with_name(f"{path.name}.bak")
|
||||
tmp = path.with_name(f"{path.name}.tmp")
|
||||
shutil.copy2(path, bak)
|
||||
tmp.write_text(json.dumps(data, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
||||
os.replace(tmp, path)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_payload()
|
||||
root = _find_state_root(payload)
|
||||
if root is None:
|
||||
print(json.dumps({"renewed": False, "error": "state root not found"}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
task_id = os.environ.get("HARNESS_TASK_ID") or str(payload.get("task_id") or "").strip()
|
||||
if not task_id:
|
||||
print(json.dumps({"renewed": False, "error": "missing task_id"}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
worker_id = os.environ.get("HARNESS_WORKER_ID") or ""
|
||||
if not worker_id:
|
||||
print(json.dumps({"renewed": False, "error": "missing HARNESS_WORKER_ID"}, ensure_ascii=False))
|
||||
return 0
|
||||
lease_seconds = int(os.environ.get("HARNESS_LEASE_SECONDS") or "1800")
|
||||
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
lockdir = _lockdir_for_root(root)
|
||||
|
||||
timeout_s = float(os.environ.get("HARNESS_LOCK_TIMEOUT_SECONDS") or "5")
|
||||
try:
|
||||
_acquire_lock(lockdir, timeout_s)
|
||||
except Exception as e:
|
||||
print(json.dumps({"renewed": False, "error": str(e)}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
try:
|
||||
state = _load_state(tasks_path)
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
raise ValueError("tasks must be a list")
|
||||
tasks = [t for t in tasks_raw if isinstance(t, dict)]
|
||||
|
||||
task = next((t for t in tasks if str(t.get("id") or "") == task_id), None)
|
||||
if task is None:
|
||||
print(json.dumps({"renewed": False, "error": "task not found"}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
if str(task.get("status") or "") != "in_progress":
|
||||
print(json.dumps({"renewed": False, "error": "task not in_progress"}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
claimed_by = str(task.get("claimed_by") or "")
|
||||
if claimed_by and claimed_by != worker_id:
|
||||
print(json.dumps({"renewed": False, "error": "task owned by other worker"}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
now = _utc_now()
|
||||
exp = now + _dt.timedelta(seconds=lease_seconds)
|
||||
task["lease_expires_at"] = _iso_z(exp)
|
||||
task["claimed_by"] = worker_id
|
||||
state["tasks"] = tasks
|
||||
_atomic_write_json(tasks_path, state)
|
||||
|
||||
print(json.dumps({"renewed": True, "task_id": task_id, "lease_expires_at": task["lease_expires_at"]}, ensure_ascii=False))
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(json.dumps({"renewed": False, "error": str(e)}, ensure_ascii=False))
|
||||
return 0
|
||||
finally:
|
||||
_release_lock(lockdir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
186
skills/harness/hooks/harness-sessionstart.py
Executable file
186
skills/harness/hooks/harness-sessionstart.py
Executable file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def _read_hook_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {"_invalid_json": True}
|
||||
|
||||
|
||||
def _find_harness_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _load_json(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"{path.name} must be a JSON object")
|
||||
return data
|
||||
|
||||
|
||||
def _tail_text(path: Path, max_bytes: int = 8192) -> str:
|
||||
with path.open("rb") as f:
|
||||
try:
|
||||
f.seek(0, os.SEEK_END)
|
||||
size = f.tell()
|
||||
f.seek(max(0, size - max_bytes), os.SEEK_SET)
|
||||
except Exception:
|
||||
f.seek(0, os.SEEK_SET)
|
||||
chunk = f.read()
|
||||
return chunk.decode("utf-8", errors="replace")
|
||||
|
||||
|
||||
def _priority_rank(v: Any) -> int:
|
||||
return {"P0": 0, "P1": 1, "P2": 2}.get(str(v or ""), 9)
|
||||
|
||||
|
||||
def _pick_next_eligible(tasks: list[dict[str, Any]]) -> Optional[dict[str, Any]]:
|
||||
completed = {str(t.get("id", "")) for t in tasks if str(t.get("status", "")) == "completed"}
|
||||
|
||||
def deps_ok(t: dict[str, Any]) -> bool:
|
||||
deps = t.get("depends_on") or []
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
return all(str(d) in completed for d in deps)
|
||||
|
||||
def attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
return int(t.get("attempts") or 0)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def max_attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
v = t.get("max_attempts")
|
||||
return int(v) if v is not None else 3
|
||||
except Exception:
|
||||
return 3
|
||||
|
||||
pending = [t for t in tasks if str(t.get("status", "")) == "pending" and deps_ok(t)]
|
||||
retry = [
|
||||
t
|
||||
for t in tasks
|
||||
if str(t.get("status", "")) == "failed"
|
||||
and attempts(t) < max_attempts(t)
|
||||
and deps_ok(t)
|
||||
]
|
||||
|
||||
def key(t: dict[str, Any]) -> tuple[int, str]:
|
||||
return (_priority_rank(t.get("priority")), str(t.get("id", "")))
|
||||
|
||||
pending.sort(key=key)
|
||||
retry.sort(key=key)
|
||||
return pending[0] if pending else (retry[0] if retry else None)
|
||||
|
||||
|
||||
def _is_harness_active(root: Path) -> bool:
|
||||
"""Check if harness skill is actively running (marker file exists)."""
|
||||
return (root / ".harness-active").is_file()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_hook_payload()
|
||||
root = _find_harness_root(payload)
|
||||
if root is None:
|
||||
return 0
|
||||
|
||||
# Guard: only active when harness skill is triggered
|
||||
if not _is_harness_active(root):
|
||||
return 0
|
||||
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
progress_path = root / "harness-progress.txt"
|
||||
|
||||
try:
|
||||
state = _load_json(tasks_path)
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
raise ValueError("tasks must be a list")
|
||||
tasks = [t for t in tasks_raw if isinstance(t, dict)]
|
||||
except Exception as e:
|
||||
context = f"HARNESS: CONFIG error: cannot read {tasks_path.name}: {e}"
|
||||
print(json.dumps({"hookSpecificOutput": {"additionalContext": context}}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
counts: dict[str, int] = {}
|
||||
for t in tasks:
|
||||
s = str(t.get("status") or "pending")
|
||||
counts[s] = counts.get(s, 0) + 1
|
||||
|
||||
next_task = _pick_next_eligible(tasks)
|
||||
next_hint = ""
|
||||
if next_task is not None:
|
||||
tid = str(next_task.get("id") or "")
|
||||
title = str(next_task.get("title") or "").strip()
|
||||
next_hint = f" next={tid}{(': ' + title) if title else ''}"
|
||||
|
||||
last_stats = ""
|
||||
if progress_path.is_file():
|
||||
tail = _tail_text(progress_path)
|
||||
lines = [ln.strip() for ln in tail.splitlines() if ln.strip()]
|
||||
for ln in reversed(lines[-200:]):
|
||||
if " STATS " in f" {ln} " or ln.endswith(" STATS"):
|
||||
last_stats = ln
|
||||
break
|
||||
if not last_stats and lines:
|
||||
last_stats = lines[-1]
|
||||
if len(last_stats) > 220:
|
||||
last_stats = last_stats[:217] + "..."
|
||||
|
||||
summary = (
|
||||
"HARNESS: "
|
||||
+ " ".join(f"{k}={v}" for k, v in sorted(counts.items()))
|
||||
+ f" total={len(tasks)}"
|
||||
+ next_hint
|
||||
).strip()
|
||||
if last_stats:
|
||||
summary += f"\nHARNESS: last_log={last_stats}"
|
||||
|
||||
print(json.dumps({"hookSpecificOutput": {"additionalContext": summary}}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
319
skills/harness/hooks/harness-stop.py
Executable file
319
skills/harness/hooks/harness-stop.py
Executable file
@@ -0,0 +1,319 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Harness Stop hook — blocks Claude from stopping when eligible tasks remain.
|
||||
|
||||
Uses `stop_hook_active` field and a consecutive-block counter to prevent
|
||||
infinite loops. If the hook blocks N times in a row without any task
|
||||
completing, it allows the stop with a warning.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
MAX_CONSECUTIVE_BLOCKS = 8 # safety valve
|
||||
|
||||
|
||||
def _read_hook_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {"_invalid_json": True}
|
||||
|
||||
|
||||
def _find_harness_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _load_json(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"{path.name} must be a JSON object")
|
||||
return data
|
||||
|
||||
|
||||
def _tail_text(path: Path, max_bytes: int = 200_000) -> str:
|
||||
with path.open("rb") as f:
|
||||
try:
|
||||
f.seek(0, os.SEEK_END)
|
||||
size = f.tell()
|
||||
f.seek(max(0, size - max_bytes), os.SEEK_SET)
|
||||
except Exception:
|
||||
f.seek(0, os.SEEK_SET)
|
||||
chunk = f.read()
|
||||
return chunk.decode("utf-8", errors="replace")
|
||||
|
||||
|
||||
def _priority_rank(v: Any) -> int:
|
||||
return {"P0": 0, "P1": 1, "P2": 2}.get(str(v or ""), 9)
|
||||
|
||||
|
||||
def _deps_completed(t: dict[str, Any], completed: set[str]) -> bool:
|
||||
deps = t.get("depends_on") or []
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
return all(str(d) in completed for d in deps)
|
||||
|
||||
|
||||
def _attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
return int(t.get("attempts") or 0)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def _max_attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
v = t.get("max_attempts")
|
||||
return int(v) if v is not None else 3
|
||||
except Exception:
|
||||
return 3
|
||||
|
||||
|
||||
def _pick_next(pending: list[dict[str, Any]], retry: list[dict[str, Any]]) -> Optional[dict[str, Any]]:
|
||||
def key(t: dict[str, Any]) -> tuple[int, str]:
|
||||
return (_priority_rank(t.get("priority")), str(t.get("id", "")))
|
||||
pending.sort(key=key)
|
||||
retry.sort(key=key)
|
||||
return pending[0] if pending else (retry[0] if retry else None)
|
||||
|
||||
|
||||
def _block_counter_path(root: Path) -> Path:
|
||||
return root / ".harness-stop-counter"
|
||||
|
||||
|
||||
def _read_block_counter(root: Path) -> tuple[int, int]:
|
||||
"""Returns (consecutive_blocks, last_completed_count)."""
|
||||
p = _block_counter_path(root)
|
||||
try:
|
||||
raw = p.read_text("utf-8").strip()
|
||||
parts = raw.split(",")
|
||||
return int(parts[0]), int(parts[1]) if len(parts) > 1 else 0
|
||||
except Exception:
|
||||
return 0, 0
|
||||
|
||||
|
||||
def _write_block_counter(root: Path, blocks: int, completed: int) -> None:
|
||||
p = _block_counter_path(root)
|
||||
tmp = p.with_name(f"{p.name}.tmp.{os.getpid()}")
|
||||
try:
|
||||
tmp.write_text(f"{blocks},{completed}", encoding="utf-8")
|
||||
os.replace(tmp, p)
|
||||
except Exception:
|
||||
try:
|
||||
tmp.unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _reset_block_counter(root: Path) -> None:
|
||||
p = _block_counter_path(root)
|
||||
try:
|
||||
p.unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _is_harness_active(root: Path) -> bool:
|
||||
"""Check if harness skill is actively running (marker file exists)."""
|
||||
return (root / ".harness-active").is_file()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_hook_payload()
|
||||
|
||||
# Safety: if stop_hook_active is True, Claude is already continuing
|
||||
# from a previous Stop hook block. Check if we should allow stop
|
||||
# to prevent infinite loops.
|
||||
stop_hook_active = payload.get("stop_hook_active", False)
|
||||
|
||||
root = _find_harness_root(payload)
|
||||
if root is None:
|
||||
return 0 # no harness project, allow stop
|
||||
|
||||
# Guard: only active when harness skill is triggered
|
||||
if not _is_harness_active(root):
|
||||
return 0
|
||||
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
progress_path = root / "harness-progress.txt"
|
||||
try:
|
||||
state = _load_json(tasks_path)
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
raise ValueError("tasks must be a list")
|
||||
tasks = [t for t in tasks_raw if isinstance(t, dict)]
|
||||
except Exception as e:
|
||||
if stop_hook_active:
|
||||
sys.stderr.write(
|
||||
"HARNESS: WARN — harness-tasks.json 无法解析且 stop_hook_active=True,"
|
||||
"为避免无限循环,本次允许停止。\n"
|
||||
)
|
||||
return 0
|
||||
reason = (
|
||||
"HARNESS: 检测到配置损坏,无法解析 harness-tasks.json。\n"
|
||||
f"HARNESS: error={e}\n"
|
||||
"按 SKILL.md 的 JSON corruption 恢复:优先用 harness-tasks.json.bak 还原;无法还原则停止并要求人工修复。"
|
||||
)
|
||||
print(json.dumps({"decision": "block", "reason": reason}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
session_config = state.get("session_config") or {}
|
||||
if not isinstance(session_config, dict):
|
||||
session_config = {}
|
||||
|
||||
concurrency_mode = str(session_config.get("concurrency_mode") or "exclusive")
|
||||
is_concurrent = concurrency_mode == "concurrent"
|
||||
worker_id = os.environ.get("HARNESS_WORKER_ID") or None
|
||||
|
||||
# Check session limits
|
||||
try:
|
||||
session_count = int(state.get("session_count") or 0)
|
||||
except Exception:
|
||||
session_count = 0
|
||||
try:
|
||||
max_sessions = int(session_config.get("max_sessions") or 0)
|
||||
except Exception:
|
||||
max_sessions = 0
|
||||
if max_sessions > 0 and session_count >= max_sessions:
|
||||
_reset_block_counter(root)
|
||||
return 0 # session limit reached, allow stop
|
||||
|
||||
# Check per-session task limit
|
||||
try:
|
||||
max_tasks_per_session = int(session_config.get("max_tasks_per_session") or 0)
|
||||
except Exception:
|
||||
max_tasks_per_session = 0
|
||||
if not is_concurrent and max_tasks_per_session > 0 and session_count > 0 and progress_path.is_file():
|
||||
tail = _tail_text(progress_path)
|
||||
tag = f"[SESSION-{session_count}]"
|
||||
finished = 0
|
||||
for ln in tail.splitlines():
|
||||
if tag not in ln:
|
||||
continue
|
||||
if " Completed [" in ln or (" ERROR [" in ln and "[task-" in ln):
|
||||
finished += 1
|
||||
if finished >= max_tasks_per_session:
|
||||
_reset_block_counter(root)
|
||||
return 0 # per-session limit reached, allow stop
|
||||
|
||||
# Compute eligible tasks
|
||||
counts: dict[str, int] = {}
|
||||
for t in tasks:
|
||||
s = str(t.get("status") or "pending")
|
||||
counts[s] = counts.get(s, 0) + 1
|
||||
|
||||
completed_ids = {str(t.get("id", "")) for t in tasks if str(t.get("status", "")) == "completed"}
|
||||
completed_count = len(completed_ids)
|
||||
|
||||
pending_eligible = [t for t in tasks if str(t.get("status", "")) == "pending" and _deps_completed(t, completed_ids)]
|
||||
retryable = [
|
||||
t for t in tasks
|
||||
if str(t.get("status", "")) == "failed"
|
||||
and _attempts(t) < _max_attempts(t)
|
||||
and _deps_completed(t, completed_ids)
|
||||
]
|
||||
in_progress_any = [t for t in tasks if str(t.get("status", "")) == "in_progress"]
|
||||
if is_concurrent and worker_id:
|
||||
in_progress_blocking = [
|
||||
t for t in in_progress_any
|
||||
if str(t.get("claimed_by") or "") == worker_id or not t.get("claimed_by")
|
||||
]
|
||||
else:
|
||||
in_progress_blocking = in_progress_any
|
||||
|
||||
# If nothing left to do, allow stop
|
||||
if not pending_eligible and not retryable and not in_progress_blocking:
|
||||
_reset_block_counter(root)
|
||||
# Signal self-reflect hook BEFORE removing active marker
|
||||
try:
|
||||
(root / ".harness-reflect").touch()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
(root / ".harness-active").unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
return 0
|
||||
|
||||
# Safety valve: track consecutive blocks without progress
|
||||
prev_blocks, prev_completed = _read_block_counter(root)
|
||||
if completed_count > prev_completed:
|
||||
# Progress was made, reset counter
|
||||
prev_blocks = 0
|
||||
consecutive = prev_blocks + 1
|
||||
_write_block_counter(root, consecutive, completed_count)
|
||||
|
||||
if stop_hook_active and consecutive > MAX_CONSECUTIVE_BLOCKS:
|
||||
# Too many consecutive blocks without progress — allow stop to prevent infinite loop
|
||||
_reset_block_counter(root)
|
||||
sys.stderr.write(
|
||||
f"HARNESS: WARN — Stop hook blocked {consecutive} times without progress. "
|
||||
"Allowing stop to prevent infinite loop. Check task definitions and validation commands.\n"
|
||||
)
|
||||
return 0
|
||||
|
||||
# Block the stop — tasks remain
|
||||
next_task = _pick_next(pending_eligible, retryable)
|
||||
next_hint = ""
|
||||
if next_task is not None:
|
||||
tid = str(next_task.get("id") or "")
|
||||
title = str(next_task.get("title") or "").strip()
|
||||
next_hint = f"next={tid}{(': ' + title) if title else ''}"
|
||||
|
||||
summary = (
|
||||
"HARNESS: 未满足停止条件,继续执行。\n"
|
||||
+ "HARNESS: "
|
||||
+ " ".join(f"{k}={v}" for k, v in sorted(counts.items()))
|
||||
+ f" total={len(tasks)}"
|
||||
+ (f" {next_hint}" if next_hint else "")
|
||||
).strip()
|
||||
|
||||
reason = (
|
||||
summary
|
||||
+ "\n"
|
||||
+ "请按 SKILL.md 的 Task Selection Algorithm 选择下一个 eligible 任务,并完整执行 Task Execution Cycle:"
|
||||
"Claim → Checkpoint → Validate → Record outcome → STATS(如需)→ Continue。"
|
||||
)
|
||||
|
||||
print(json.dumps({"decision": "block", "reason": reason}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
137
skills/harness/hooks/harness-subagentstop.py
Executable file
137
skills/harness/hooks/harness-subagentstop.py
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Harness SubagentStop hook — blocks subagents from stopping when they
|
||||
have assigned harness tasks still in progress.
|
||||
|
||||
Uses the same decision format as Stop hooks:
|
||||
{"decision": "block", "reason": "..."}
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def _read_hook_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def _find_harness_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _load_json(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"{path.name} must be a JSON object")
|
||||
return data
|
||||
|
||||
|
||||
def _is_harness_active(root: Path) -> bool:
|
||||
"""Check if harness skill is actively running (marker file exists)."""
|
||||
return (root / ".harness-active").is_file()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_hook_payload()
|
||||
|
||||
# Safety: respect stop_hook_active to prevent infinite loops
|
||||
if payload.get("stop_hook_active", False):
|
||||
return 0
|
||||
|
||||
root = _find_harness_root(payload)
|
||||
if root is None:
|
||||
return 0 # no harness project, allow stop
|
||||
|
||||
# Guard: only active when harness skill is triggered
|
||||
if not _is_harness_active(root):
|
||||
return 0
|
||||
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
try:
|
||||
state = _load_json(tasks_path)
|
||||
session_config = state.get("session_config") or {}
|
||||
if not isinstance(session_config, dict):
|
||||
session_config = {}
|
||||
is_concurrent = str(session_config.get("concurrency_mode") or "exclusive") == "concurrent"
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
return 0
|
||||
tasks = [t for t in tasks_raw if isinstance(t, dict)]
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
in_progress = [t for t in tasks if str(t.get("status", "")) == "in_progress"]
|
||||
worker_id = str(os.environ.get("HARNESS_WORKER_ID") or "").strip()
|
||||
agent_id = str(payload.get("agent_id") or "").strip()
|
||||
teammate_name = str(payload.get("teammate_name") or "").strip()
|
||||
identities = {x for x in (worker_id, agent_id, teammate_name) if x}
|
||||
|
||||
if is_concurrent and in_progress and not identities:
|
||||
reason = (
|
||||
"HARNESS: concurrent 模式缺少 worker identity(HARNESS_WORKER_ID/agent_id)。"
|
||||
"为避免误停导致任务悬空,本次阻止停止。"
|
||||
)
|
||||
print(json.dumps({"decision": "block", "reason": reason}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
if is_concurrent:
|
||||
owned = [
|
||||
t for t in in_progress
|
||||
if str(t.get("claimed_by") or "") in identities
|
||||
] if identities else []
|
||||
else:
|
||||
owned = in_progress
|
||||
|
||||
# Only block when this subagent still owns in-progress work.
|
||||
if owned:
|
||||
tid = str(owned[0].get("id") or "")
|
||||
title = str(owned[0].get("title") or "")
|
||||
reason = (
|
||||
f"HARNESS: 子代理仍有进行中的任务 [{tid}] {title}。"
|
||||
"请完成当前任务的验证和记录后再停止。"
|
||||
)
|
||||
print(json.dumps({"decision": "block", "reason": reason}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
return 0 # all done, allow stop
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
160
skills/harness/hooks/harness-teammateidle.py
Executable file
160
skills/harness/hooks/harness-teammateidle.py
Executable file
@@ -0,0 +1,160 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Harness TeammateIdle hook — prevents teammates from going idle when
|
||||
harness tasks remain eligible for execution.
|
||||
|
||||
Exit code 2 + stderr message keeps the teammate working.
|
||||
Exit code 0 allows the teammate to go idle.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def _read_hook_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def _find_harness_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
candidates: list[Path] = []
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _load_json(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"{path.name} must be a JSON object")
|
||||
return data
|
||||
|
||||
|
||||
def _priority_rank(v: Any) -> int:
|
||||
return {"P0": 0, "P1": 1, "P2": 2}.get(str(v or ""), 9)
|
||||
|
||||
|
||||
def _is_harness_active(root: Path) -> bool:
|
||||
"""Check if harness skill is actively running (marker file exists)."""
|
||||
return (root / ".harness-active").is_file()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_hook_payload()
|
||||
root = _find_harness_root(payload)
|
||||
if root is None:
|
||||
return 0 # no harness project, allow idle
|
||||
|
||||
# Guard: only active when harness skill is triggered
|
||||
if not _is_harness_active(root):
|
||||
return 0
|
||||
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
try:
|
||||
state = _load_json(tasks_path)
|
||||
tasks_raw = state.get("tasks") or []
|
||||
if not isinstance(tasks_raw, list):
|
||||
return 0
|
||||
tasks = [t for t in tasks_raw if isinstance(t, dict)]
|
||||
except Exception:
|
||||
return 0 # can't read state, allow idle
|
||||
|
||||
completed = {str(t.get("id", "")) for t in tasks if str(t.get("status", "")) == "completed"}
|
||||
|
||||
def deps_ok(t: dict[str, Any]) -> bool:
|
||||
deps = t.get("depends_on") or []
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
return all(str(d) in completed for d in deps)
|
||||
|
||||
def attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
return int(t.get("attempts") or 0)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def max_attempts(t: dict[str, Any]) -> int:
|
||||
try:
|
||||
v = t.get("max_attempts")
|
||||
return int(v) if v is not None else 3
|
||||
except Exception:
|
||||
return 3
|
||||
|
||||
pending = [t for t in tasks if str(t.get("status", "")) == "pending" and deps_ok(t)]
|
||||
retryable = [
|
||||
t for t in tasks
|
||||
if str(t.get("status", "")) == "failed"
|
||||
and attempts(t) < max_attempts(t)
|
||||
and deps_ok(t)
|
||||
]
|
||||
def key(t: dict[str, Any]) -> tuple[int, str]:
|
||||
return (_priority_rank(t.get("priority")), str(t.get("id", "")))
|
||||
pending.sort(key=key)
|
||||
retryable.sort(key=key)
|
||||
in_progress = [t for t in tasks if str(t.get("status", "")) == "in_progress"]
|
||||
|
||||
# Check if this teammate owns any in-progress tasks
|
||||
worker_id = os.environ.get("HARNESS_WORKER_ID") or ""
|
||||
teammate_name = payload.get("teammate_name", "")
|
||||
owned = [
|
||||
t for t in in_progress
|
||||
if str(t.get("claimed_by") or "") in (worker_id, teammate_name)
|
||||
] if (worker_id or teammate_name) else []
|
||||
|
||||
if owned:
|
||||
tid = str(owned[0].get("id") or "")
|
||||
title = str(owned[0].get("title") or "")
|
||||
sys.stderr.write(
|
||||
f"HARNESS: 你仍有进行中的任务 [{tid}] {title}。"
|
||||
"请继续执行或完成该任务后再停止。\n"
|
||||
)
|
||||
return 2 # block idle
|
||||
|
||||
if pending or retryable:
|
||||
next_t = pending[0] if pending else retryable[0]
|
||||
tid = str(next_t.get("id") or "")
|
||||
title = str(next_t.get("title") or "")
|
||||
sys.stderr.write(
|
||||
f"HARNESS: 仍有 {len(pending)} 个待执行 + {len(retryable)} 个可重试任务。"
|
||||
f"下一个: [{tid}] {title}。请继续执行。\n"
|
||||
)
|
||||
return 2 # block idle
|
||||
|
||||
return 0 # all done, allow idle
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
60
skills/harness/hooks/hooks.json
Normal file
60
skills/harness/hooks/hooks.json
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"description": "Harness hooks: prevent premature stop, self-reflection iteration, inject task context on session start, keep teammates working, block subagent stop when tasks remain",
|
||||
"hooks": {
|
||||
"Stop": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 \"${CLAUDE_PLUGIN_ROOT}/hooks/harness-stop.py\"",
|
||||
"timeout": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 \"${CLAUDE_PLUGIN_ROOT}/hooks/self-reflect-stop.py\"",
|
||||
"timeout": 15,
|
||||
"statusMessage": "Self-reflecting on task completion..."
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "startup|resume|compact",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 \"${CLAUDE_PLUGIN_ROOT}/hooks/harness-sessionstart.py\"",
|
||||
"timeout": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"TeammateIdle": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 \"${CLAUDE_PLUGIN_ROOT}/hooks/harness-teammateidle.py\"",
|
||||
"timeout": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SubagentStop": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 \"${CLAUDE_PLUGIN_ROOT}/hooks/harness-subagentstop.py\"",
|
||||
"timeout": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
216
skills/harness/hooks/self-reflect-stop.py
Normal file
216
skills/harness/hooks/self-reflect-stop.py
Normal file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Self-reflection Stop hook — harness 任务循环完成后注入自省 prompt。
|
||||
|
||||
仅在以下条件同时满足时生效:
|
||||
1. harness-tasks.json 存在(harness 曾被初始化)
|
||||
2. .harness-active 不存在(harness 任务已全部完成)
|
||||
|
||||
当 harness 未曾启动时,本 hook 是完全的 no-op。
|
||||
|
||||
配置:
|
||||
- REFLECT_MAX_ITERATIONS 环境变量(默认 5)
|
||||
- 设为 0 可禁用
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
# Add hooks directory to sys.path for _harness_common import
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
||||
try:
|
||||
import _harness_common as hc
|
||||
except ImportError:
|
||||
hc = None # type: ignore[assignment]
|
||||
|
||||
DEFAULT_MAX_ITERATIONS = 5
|
||||
|
||||
|
||||
def _read_payload() -> dict[str, Any]:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def _find_harness_root(payload: dict[str, Any]) -> Optional[Path]:
|
||||
"""查找 harness-tasks.json 所在的目录。存在则说明 harness 曾被使用。"""
|
||||
if hc is not None:
|
||||
return hc.find_harness_root(payload)
|
||||
|
||||
# Fallback: inline discovery if _harness_common not available
|
||||
candidates: list[Path] = []
|
||||
state_root = os.environ.get("HARNESS_STATE_ROOT")
|
||||
if state_root:
|
||||
p = Path(state_root)
|
||||
if (p / "harness-tasks.json").is_file():
|
||||
try:
|
||||
return p.resolve()
|
||||
except Exception:
|
||||
return p
|
||||
env_dir = os.environ.get("CLAUDE_PROJECT_DIR")
|
||||
if env_dir:
|
||||
candidates.append(Path(env_dir))
|
||||
cwd = payload.get("cwd") or os.getcwd()
|
||||
candidates.append(Path(cwd))
|
||||
seen: set[str] = set()
|
||||
for base in candidates:
|
||||
try:
|
||||
base = base.resolve()
|
||||
except Exception:
|
||||
continue
|
||||
if str(base) in seen:
|
||||
continue
|
||||
seen.add(str(base))
|
||||
for parent in [base, *list(base.parents)[:8]]:
|
||||
if (parent / "harness-tasks.json").is_file():
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _counter_path(session_id: str) -> Path:
|
||||
"""每个 session 独立计数文件。"""
|
||||
return Path(tempfile.gettempdir()) / f"claude-reflect-{session_id}"
|
||||
|
||||
|
||||
def _read_counter(session_id: str) -> int:
|
||||
p = _counter_path(session_id)
|
||||
try:
|
||||
return int(p.read_text("utf-8").strip().split("\n")[0])
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def _write_counter(session_id: str, count: int) -> None:
|
||||
p = _counter_path(session_id)
|
||||
try:
|
||||
p.write_text(str(count), encoding="utf-8")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _extract_original_prompt(transcript_path: str, max_bytes: int = 100_000) -> str:
|
||||
"""从 transcript JSONL 中提取第一条用户消息作为原始 prompt。"""
|
||||
try:
|
||||
p = Path(transcript_path)
|
||||
if not p.is_file():
|
||||
return ""
|
||||
with p.open("r", encoding="utf-8") as f:
|
||||
# JSONL 格式,逐行解析找第一条 user message
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
entry = json.loads(line)
|
||||
except Exception:
|
||||
continue
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
# Claude Code transcript 格式:role + content
|
||||
role = entry.get("role") or entry.get("type", "")
|
||||
if role == "user":
|
||||
content = entry.get("content", "")
|
||||
if isinstance(content, list):
|
||||
# content 可能是 list of blocks
|
||||
texts = []
|
||||
for block in content:
|
||||
if isinstance(block, dict):
|
||||
t = block.get("text", "")
|
||||
if t:
|
||||
texts.append(t)
|
||||
elif isinstance(block, str):
|
||||
texts.append(block)
|
||||
content = "\n".join(texts)
|
||||
if isinstance(content, str) and content.strip():
|
||||
# 截断过长的 prompt
|
||||
if len(content) > 2000:
|
||||
content = content[:2000] + "..."
|
||||
return content.strip()
|
||||
except Exception:
|
||||
pass
|
||||
return ""
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = _read_payload()
|
||||
session_id = payload.get("session_id", "")
|
||||
if not session_id:
|
||||
return 0 # 无 session_id,放行
|
||||
|
||||
# 守卫:仅当 harness 完成所有任务后(.harness-reflect 存在)才触发自省
|
||||
# 这避免了两个问题:
|
||||
# 1. 历史残留的 harness-tasks.json 导致误触发(false positive)
|
||||
# 2. harness-stop.py 移除 .harness-active 后 Claude Code 跳过后续 hook(false negative)
|
||||
root = _find_harness_root(payload)
|
||||
if root is None:
|
||||
return 0
|
||||
|
||||
if not (root / ".harness-reflect").is_file():
|
||||
return 0
|
||||
|
||||
# 读取最大迭代次数
|
||||
try:
|
||||
max_iter = int(os.environ.get("REFLECT_MAX_ITERATIONS", DEFAULT_MAX_ITERATIONS))
|
||||
except (ValueError, TypeError):
|
||||
max_iter = DEFAULT_MAX_ITERATIONS
|
||||
|
||||
# 禁用
|
||||
if max_iter <= 0:
|
||||
return 0
|
||||
|
||||
# 读取当前计数
|
||||
count = _read_counter(session_id)
|
||||
|
||||
# 超过最大次数,清理 marker 并放行
|
||||
if count >= max_iter:
|
||||
try:
|
||||
(root / ".harness-reflect").unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
return 0
|
||||
|
||||
# 递增计数
|
||||
_write_counter(session_id, count + 1)
|
||||
|
||||
# 提取原始 prompt
|
||||
transcript_path = payload.get("transcript_path", "")
|
||||
original_prompt = _extract_original_prompt(transcript_path)
|
||||
last_message = payload.get("last_assistant_message", "")
|
||||
if last_message and len(last_message) > 3000:
|
||||
last_message = last_message[:3000] + "..."
|
||||
|
||||
# 构建自省 prompt
|
||||
parts = [
|
||||
f"[Self-Reflect] 迭代 {count + 1}/{max_iter} — 请在继续之前进行自省检查:",
|
||||
]
|
||||
|
||||
if original_prompt:
|
||||
parts.append(f"\n📋 原始请求:\n{original_prompt}")
|
||||
|
||||
parts.append(
|
||||
"\n🔍 自省清单:"
|
||||
"\n1. 对照原始请求,逐项确认每个需求点是否已完整实现"
|
||||
"\n2. 检查是否有遗漏的边界情况、错误处理或异常场景"
|
||||
"\n3. 代码质量:是否有可以改进的地方(可读性、性能、安全性)"
|
||||
"\n4. 是否需要补充测试或文档"
|
||||
"\n5. 最终确认:所有改动是否一致且不互相冲突"
|
||||
"\n\n如果一切已完成,简要总结成果即可结束。如果发现问题,继续修复。"
|
||||
)
|
||||
|
||||
reason = "\n".join(parts)
|
||||
|
||||
print(json.dumps({"decision": "block", "reason": reason}, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
178
skills/harness/tests/e2e-100tasks.sh
Executable file
178
skills/harness/tests/e2e-100tasks.sh
Executable file
@@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# E2E test: 100 harness tasks + 5 self-reflection iterations via claude -p
|
||||
# Usage: bash e2e-100tasks.sh
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_DIR="$(mktemp -d /tmp/harness-e2e-XXXXXX)"
|
||||
LOG_FILE="${PROJECT_DIR}/test-output.log"
|
||||
|
||||
echo "=== Harness E2E Test: 100 tasks + 5 self-reflect ==="
|
||||
echo "Project dir: ${PROJECT_DIR}"
|
||||
echo ""
|
||||
|
||||
# --- 1. Generate harness-tasks.json with 100 trivial tasks ---
|
||||
python3 - "${PROJECT_DIR}" <<'PYEOF'
|
||||
import json, sys
|
||||
|
||||
root = sys.argv[1]
|
||||
tasks = []
|
||||
for i in range(1, 101):
|
||||
tid = f"task-{i:03d}"
|
||||
tasks.append({
|
||||
"id": tid,
|
||||
"title": f"Create file {tid}.txt",
|
||||
"status": "pending",
|
||||
"priority": "P1",
|
||||
"depends_on": [],
|
||||
"attempts": 0,
|
||||
"max_attempts": 3,
|
||||
"started_at_commit": None,
|
||||
"validation": {
|
||||
"command": f"test -f {tid}.txt && grep -q 'done-{tid}' {tid}.txt",
|
||||
"timeout_seconds": 10
|
||||
},
|
||||
"on_failure": {"cleanup": None},
|
||||
"error_log": [],
|
||||
"checkpoints": [],
|
||||
"completed_at": None
|
||||
})
|
||||
|
||||
state = {
|
||||
"version": 2,
|
||||
"created": "2026-03-01T00:00:00Z",
|
||||
"session_config": {
|
||||
"concurrency_mode": "exclusive",
|
||||
"max_tasks_per_session": 100,
|
||||
"max_sessions": 50,
|
||||
"max_reflect_iterations": 5
|
||||
},
|
||||
"tasks": tasks,
|
||||
"session_count": 0,
|
||||
"last_session": None
|
||||
}
|
||||
|
||||
with open(f"{root}/harness-tasks.json", "w") as f:
|
||||
json.dump(state, f, indent=2, ensure_ascii=False)
|
||||
|
||||
print(f"Generated {len(tasks)} tasks")
|
||||
PYEOF
|
||||
|
||||
# --- 2. Create progress log ---
|
||||
touch "${PROJECT_DIR}/harness-progress.txt"
|
||||
|
||||
# --- 3. Create .harness-active marker ---
|
||||
touch "${PROJECT_DIR}/.harness-active"
|
||||
|
||||
# --- 4. Init git repo (required for harness commit tracking) ---
|
||||
cd "${PROJECT_DIR}"
|
||||
git init -q
|
||||
git add harness-tasks.json harness-progress.txt .harness-active
|
||||
git commit -q -m "harness init"
|
||||
|
||||
echo "Setup complete. Running claude -p ..."
|
||||
echo ""
|
||||
|
||||
# --- 5. Build the prompt ---
|
||||
PROMPT="$(cat <<'PROMPT_EOF'
|
||||
You are in a project with a harness setup. Run /harness run to execute all tasks.
|
||||
|
||||
The project is at the current working directory. There are 100 tasks in harness-tasks.json.
|
||||
Each task requires creating a file: for task-001, create task-001.txt with content "done-task-001".
|
||||
|
||||
Execute the harness infinite loop protocol:
|
||||
1. Read harness-tasks.json and harness-progress.txt
|
||||
2. Pick next eligible task by priority
|
||||
3. For each task: create the file with the required content, run validation, mark completed
|
||||
4. Continue until all tasks are done
|
||||
5. After completion, the self-reflect stop hook will trigger 5 times — complete those iterations
|
||||
|
||||
IMPORTANT: Do NOT use any skill tools. Just directly create files and update harness state.
|
||||
For efficiency, you can batch multiple file creations in a single command.
|
||||
After creating files, update harness-tasks.json to mark them completed.
|
||||
Do all work directly — no planning mode, no subagents.
|
||||
PROMPT_EOF
|
||||
)"
|
||||
|
||||
# --- 6. Run claude -p ---
|
||||
START_TIME=$(date +%s)
|
||||
|
||||
cd "${PROJECT_DIR}"
|
||||
unset CLAUDECODE
|
||||
REFLECT_MAX_ITERATIONS=5 \
|
||||
HARNESS_STATE_ROOT="${PROJECT_DIR}" \
|
||||
claude -p "${PROMPT}" \
|
||||
--model sonnet \
|
||||
--dangerously-skip-permissions \
|
||||
--disable-slash-commands \
|
||||
--no-session-persistence \
|
||||
--max-budget-usd 5 \
|
||||
--allowedTools 'Bash(*)' 'Read' 'Write' 'Glob' 'Grep' 'Edit' \
|
||||
2>&1 | tee "${LOG_FILE}"
|
||||
|
||||
END_TIME=$(date +%s)
|
||||
ELAPSED=$((END_TIME - START_TIME))
|
||||
|
||||
echo ""
|
||||
echo "=== Test Results ==="
|
||||
echo "Duration: ${ELAPSED}s"
|
||||
echo ""
|
||||
|
||||
# --- 7. Verify results ---
|
||||
python3 - "${PROJECT_DIR}" <<'VERIFY_EOF'
|
||||
import json, sys, os
|
||||
from pathlib import Path
|
||||
|
||||
root = Path(sys.argv[1])
|
||||
tasks_path = root / "harness-tasks.json"
|
||||
progress_path = root / "harness-progress.txt"
|
||||
|
||||
# Check task files created
|
||||
created = 0
|
||||
for i in range(1, 101):
|
||||
tid = f"task-{i:03d}"
|
||||
fpath = root / f"{tid}.txt"
|
||||
if fpath.is_file():
|
||||
content = fpath.read_text().strip()
|
||||
if f"done-{tid}" in content:
|
||||
created += 1
|
||||
|
||||
# Check task statuses
|
||||
with tasks_path.open() as f:
|
||||
state = json.load(f)
|
||||
tasks = state.get("tasks", [])
|
||||
completed = sum(1 for t in tasks if t.get("status") == "completed")
|
||||
failed = sum(1 for t in tasks if t.get("status") == "failed")
|
||||
pending = sum(1 for t in tasks if t.get("status") == "pending")
|
||||
in_progress = sum(1 for t in tasks if t.get("status") == "in_progress")
|
||||
|
||||
# Check .harness-active removed
|
||||
marker_removed = not (root / ".harness-active").is_file()
|
||||
|
||||
# Check progress log
|
||||
progress_lines = 0
|
||||
if progress_path.is_file():
|
||||
progress_lines = len([l for l in progress_path.read_text().splitlines() if l.strip()])
|
||||
|
||||
print(f"Files created: {created}/100")
|
||||
print(f"Tasks completed: {completed}/100")
|
||||
print(f"Tasks failed: {failed}")
|
||||
print(f"Tasks pending: {pending}")
|
||||
print(f"Tasks in_progress: {in_progress}")
|
||||
print(f"Marker removed: {marker_removed}")
|
||||
print(f"Progress log lines: {progress_lines}")
|
||||
print()
|
||||
|
||||
if created >= 95 and completed >= 95:
|
||||
print("PASS: >= 95% tasks completed successfully")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"PARTIAL: {created} files, {completed} completed")
|
||||
print("Check the log for details")
|
||||
sys.exit(1)
|
||||
VERIFY_EOF
|
||||
|
||||
echo ""
|
||||
echo "Log: ${LOG_FILE}"
|
||||
echo "Project: ${PROJECT_DIR}"
|
||||
803
skills/harness/tests/test_hooks.py
Normal file
803
skills/harness/tests/test_hooks.py
Normal file
@@ -0,0 +1,803 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Unit tests for harness hook scripts.
|
||||
|
||||
Tests the activation guard (.harness-active marker), task state logic,
|
||||
and edge cases for all 4 hooks: Stop, SessionStart, TeammateIdle, SubagentStop.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
HOOKS_DIR = Path(__file__).resolve().parent.parent / "hooks"
|
||||
STOP_HOOK = HOOKS_DIR / "harness-stop.py"
|
||||
SESSION_HOOK = HOOKS_DIR / "harness-sessionstart.py"
|
||||
IDLE_HOOK = HOOKS_DIR / "harness-teammateidle.py"
|
||||
SUBAGENT_HOOK = HOOKS_DIR / "harness-subagentstop.py"
|
||||
|
||||
|
||||
def build_hook_env(env_extra: dict | None = None) -> dict[str, str]:
|
||||
"""Build an isolated environment for hook subprocesses."""
|
||||
env = os.environ.copy()
|
||||
# Clear harness env vars to avoid interference
|
||||
env.pop("HARNESS_STATE_ROOT", None)
|
||||
env.pop("HARNESS_WORKER_ID", None)
|
||||
env.pop("CLAUDE_PROJECT_DIR", None)
|
||||
if env_extra:
|
||||
env.update(env_extra)
|
||||
return env
|
||||
|
||||
|
||||
def run_hook(script: Path, payload: dict, env_extra: dict | None = None) -> tuple[int, str, str]:
|
||||
"""Run a hook script with JSON payload on stdin. Returns (exit_code, stdout, stderr)."""
|
||||
env = build_hook_env(env_extra)
|
||||
proc = subprocess.run(
|
||||
[sys.executable, str(script)],
|
||||
input=json.dumps(payload),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
env=env,
|
||||
)
|
||||
return proc.returncode, proc.stdout.strip(), proc.stderr.strip()
|
||||
|
||||
|
||||
def write_tasks(root: Path, tasks: list[dict], **extra) -> None:
|
||||
state = {"tasks": tasks, **extra}
|
||||
(root / "harness-tasks.json").write_text(json.dumps(state), encoding="utf-8")
|
||||
|
||||
|
||||
def activate(root: Path) -> None:
|
||||
(root / ".harness-active").touch()
|
||||
|
||||
|
||||
def deactivate(root: Path) -> None:
|
||||
p = root / ".harness-active"
|
||||
if p.exists():
|
||||
p.unlink()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Activation Guard Tests (shared across all hooks)
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestActivationGuard(unittest.TestCase):
|
||||
"""All hooks must be no-ops when .harness-active is absent."""
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "title": "Pending task", "status": "pending", "priority": "P0", "depends_on": []},
|
||||
])
|
||||
(self.root / "harness-progress.txt").write_text("[SESSION-1] INIT\n")
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def _payload(self, **extra):
|
||||
return {"cwd": self.tmpdir, **extra}
|
||||
|
||||
def test_stop_inactive_allows(self):
|
||||
"""Stop hook allows stop when .harness-active is absent."""
|
||||
deactivate(self.root)
|
||||
code, stdout, stderr = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_stop_active_blocks(self):
|
||||
"""Stop hook blocks when .harness-active is present and tasks remain."""
|
||||
activate(self.root)
|
||||
code, stdout, stderr = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
def test_sessionstart_inactive_noop(self):
|
||||
"""SessionStart hook produces no output when inactive."""
|
||||
deactivate(self.root)
|
||||
code, stdout, stderr = run_hook(SESSION_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_sessionstart_active_injects(self):
|
||||
"""SessionStart hook injects context when active."""
|
||||
activate(self.root)
|
||||
code, stdout, stderr = run_hook(SESSION_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("additionalContext", data.get("hookSpecificOutput", {}))
|
||||
|
||||
def test_teammateidle_inactive_allows(self):
|
||||
"""TeammateIdle hook allows idle when inactive."""
|
||||
deactivate(self.root)
|
||||
code, stdout, stderr = run_hook(IDLE_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stderr, "")
|
||||
|
||||
def test_teammateidle_active_blocks(self):
|
||||
"""TeammateIdle hook blocks idle when active and tasks remain."""
|
||||
activate(self.root)
|
||||
code, stdout, stderr = run_hook(IDLE_HOOK, self._payload())
|
||||
self.assertEqual(code, 2)
|
||||
self.assertIn("HARNESS", stderr)
|
||||
|
||||
def test_subagentstop_inactive_allows(self):
|
||||
"""SubagentStop hook allows stop when inactive."""
|
||||
deactivate(self.root)
|
||||
code, stdout, stderr = run_hook(SUBAGENT_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_subagentstop_active_blocks(self):
|
||||
"""SubagentStop hook blocks when active and tasks in progress."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "title": "Working task", "status": "in_progress", "priority": "P0", "depends_on": []},
|
||||
])
|
||||
activate(self.root)
|
||||
code, stdout, stderr = run_hook(SUBAGENT_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# No Harness Root Tests
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestNoHarnessRoot(unittest.TestCase):
|
||||
"""All hooks must be no-ops when no harness-tasks.json exists."""
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def test_stop_no_root(self):
|
||||
code, stdout, _ = run_hook(STOP_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_sessionstart_no_root(self):
|
||||
code, stdout, _ = run_hook(SESSION_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_teammateidle_no_root(self):
|
||||
code, _, stderr = run_hook(IDLE_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stderr, "")
|
||||
|
||||
def test_subagentstop_no_root(self):
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stop Hook — Task State Logic
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestStopHookTaskLogic(unittest.TestCase):
|
||||
"""Stop hook task selection, completion detection, and safety valve."""
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
activate(self.root)
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def _payload(self, **extra):
|
||||
return {"cwd": self.tmpdir, **extra}
|
||||
|
||||
def test_all_completed_allows_stop(self):
|
||||
"""When all tasks are completed, stop is allowed and .harness-reflect created."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "completed"},
|
||||
])
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
self.assertFalse((self.root / ".harness-active").exists())
|
||||
self.assertTrue(
|
||||
(self.root / ".harness-reflect").exists(),
|
||||
".harness-reflect should be created when all tasks complete",
|
||||
)
|
||||
|
||||
def test_pending_with_unmet_deps_allows_stop(self):
|
||||
"""Pending tasks with unmet dependencies don't block stop."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "failed", "attempts": 3, "max_attempts": 3},
|
||||
{"id": "t2", "status": "pending", "depends_on": ["t1"]},
|
||||
])
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_retryable_failed_blocks(self):
|
||||
"""Failed task with attempts < max_attempts blocks stop."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "failed", "attempts": 1, "max_attempts": 3, "priority": "P0", "depends_on": [], "title": "Retry me"},
|
||||
])
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
self.assertIn("Retry me", data["reason"])
|
||||
|
||||
def test_exhausted_retries_allows_stop(self):
|
||||
"""Failed task with attempts >= max_attempts allows stop."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "failed", "attempts": 3, "max_attempts": 3, "depends_on": []},
|
||||
])
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_in_progress_blocks(self):
|
||||
"""In-progress tasks block stop."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "priority": "P0"},
|
||||
])
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
def test_session_limit_allows_stop(self):
|
||||
"""Session limit reached allows stop even with pending tasks."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": [], "priority": "P0"},
|
||||
], session_count=5, session_config={"max_sessions": 5})
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_max_tasks_per_session_limit_allows_stop(self):
|
||||
"""Per-session completed-task cap allows stop when reached."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": [], "priority": "P0"},
|
||||
], session_count=2, session_config={"max_tasks_per_session": 1})
|
||||
(self.root / "harness-progress.txt").write_text("[SESSION-2] Completed [task-1]\n")
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_concurrent_other_worker_in_progress_allows_stop(self):
|
||||
"""Concurrent mode should not block on another worker's in-progress task."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "worker-a", "priority": "P0"},
|
||||
], session_config={"concurrency_mode": "concurrent"})
|
||||
code, stdout, _ = run_hook(
|
||||
STOP_HOOK, self._payload(),
|
||||
env_extra={"HARNESS_WORKER_ID": "worker-b"},
|
||||
)
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_priority_ordering_in_block_reason(self):
|
||||
"""Block reason shows highest priority task as next."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "priority": "P2", "depends_on": [], "title": "Low"},
|
||||
{"id": "t2", "status": "pending", "priority": "P0", "depends_on": [], "title": "High"},
|
||||
])
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload())
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("t2", data["reason"])
|
||||
self.assertIn("High", data["reason"])
|
||||
|
||||
def test_stop_hook_active_safety_valve(self):
|
||||
"""After MAX_CONSECUTIVE_BLOCKS with stop_hook_active, allows stop."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": [], "priority": "P0"},
|
||||
])
|
||||
(self.root / ".harness-stop-counter").write_text("9,0")
|
||||
code, stdout, stderr = run_hook(STOP_HOOK, self._payload(stop_hook_active=True))
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
self.assertIn("WARN", stderr)
|
||||
|
||||
def test_stop_hook_active_below_threshold_blocks(self):
|
||||
"""Below MAX_CONSECUTIVE_BLOCKS with stop_hook_active still blocks."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": [], "priority": "P0"},
|
||||
])
|
||||
(self.root / ".harness-stop-counter").write_text("2,0")
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload(stop_hook_active=True))
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
def test_progress_resets_block_counter(self):
|
||||
"""When completed count increases, block counter resets."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "pending", "depends_on": [], "priority": "P0"},
|
||||
])
|
||||
(self.root / ".harness-stop-counter").write_text("7,0")
|
||||
code, stdout, _ = run_hook(STOP_HOOK, self._payload(stop_hook_active=True))
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
counter = (self.root / ".harness-stop-counter").read_text().strip()
|
||||
self.assertEqual(counter, "1,1")
|
||||
|
||||
def test_corrupt_json_with_stop_hook_active_allows(self):
|
||||
"""Corrupt config + stop_hook_active should allow stop to avoid loop."""
|
||||
(self.root / "harness-tasks.json").write_text("{invalid json")
|
||||
code, stdout, stderr = run_hook(STOP_HOOK, self._payload(stop_hook_active=True))
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
self.assertIn("WARN", stderr)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SessionStart Hook — Context Injection
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestSessionStartHook(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
activate(self.root)
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def _payload(self):
|
||||
return {"cwd": self.tmpdir}
|
||||
|
||||
def test_summary_includes_counts(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "pending", "depends_on": ["t1"]},
|
||||
{"id": "t3", "status": "failed", "depends_on": []},
|
||||
])
|
||||
(self.root / "harness-progress.txt").write_text("[SESSION-1] STATS total=3\n")
|
||||
code, stdout, _ = run_hook(SESSION_HOOK, self._payload())
|
||||
data = json.loads(stdout)
|
||||
ctx = data["hookSpecificOutput"]["additionalContext"]
|
||||
self.assertIn("completed=1", ctx)
|
||||
self.assertIn("pending=1", ctx)
|
||||
self.assertIn("failed=1", ctx)
|
||||
self.assertIn("total=3", ctx)
|
||||
|
||||
def test_next_task_hint(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "pending", "priority": "P0", "depends_on": ["t1"], "title": "Do stuff"},
|
||||
])
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
code, stdout, _ = run_hook(SESSION_HOOK, self._payload())
|
||||
data = json.loads(stdout)
|
||||
ctx = data["hookSpecificOutput"]["additionalContext"]
|
||||
self.assertIn("next=t2", ctx)
|
||||
self.assertIn("Do stuff", ctx)
|
||||
|
||||
def test_empty_tasks_no_crash(self):
|
||||
write_tasks(self.root, [])
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
code, stdout, _ = run_hook(SESSION_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("total=0", data["hookSpecificOutput"]["additionalContext"])
|
||||
|
||||
def test_corrupt_json_reports_error(self):
|
||||
(self.root / "harness-tasks.json").write_text("{invalid json")
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
code, stdout, _ = run_hook(SESSION_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("error", data["hookSpecificOutput"]["additionalContext"].lower())
|
||||
|
||||
def test_invalid_attempt_fields_no_crash(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "failed", "attempts": "oops", "max_attempts": "bad", "depends_on": []},
|
||||
])
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
code, stdout, _ = run_hook(SESSION_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("total=1", data["hookSpecificOutput"]["additionalContext"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# TeammateIdle Hook — Ownership & Task State
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestTeammateIdleHook(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
activate(self.root)
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def test_owned_in_progress_blocks(self):
|
||||
"""Teammate with in-progress task is blocked from going idle."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "alice", "title": "My task"},
|
||||
])
|
||||
code, _, stderr = run_hook(IDLE_HOOK, {"cwd": self.tmpdir, "teammate_name": "alice"})
|
||||
self.assertEqual(code, 2)
|
||||
self.assertIn("t1", stderr)
|
||||
|
||||
def test_unowned_in_progress_allows(self):
|
||||
"""Teammate without owned tasks and no pending allows idle."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "bob"},
|
||||
])
|
||||
code, _, stderr = run_hook(IDLE_HOOK, {"cwd": self.tmpdir, "teammate_name": "alice"})
|
||||
self.assertEqual(code, 0)
|
||||
|
||||
def test_pending_tasks_block(self):
|
||||
"""Pending eligible tasks block idle even without ownership."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": [], "title": "Next up"},
|
||||
])
|
||||
code, _, stderr = run_hook(IDLE_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 2)
|
||||
self.assertIn("t1", stderr)
|
||||
|
||||
def test_all_completed_allows(self):
|
||||
"""All tasks completed allows idle."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "completed"},
|
||||
])
|
||||
code, _, stderr = run_hook(IDLE_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stderr, "")
|
||||
|
||||
def test_failed_retryable_blocks(self):
|
||||
"""Retryable failed tasks block idle."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "failed", "attempts": 1, "max_attempts": 3, "depends_on": [], "title": "Retry"},
|
||||
])
|
||||
code, _, stderr = run_hook(IDLE_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 2)
|
||||
self.assertIn("t1", stderr)
|
||||
|
||||
def test_worker_id_env_matches(self):
|
||||
"""HARNESS_WORKER_ID env var matches claimed_by."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "w-123"},
|
||||
])
|
||||
code, _, stderr = run_hook(
|
||||
IDLE_HOOK, {"cwd": self.tmpdir},
|
||||
env_extra={"HARNESS_WORKER_ID": "w-123"},
|
||||
)
|
||||
self.assertEqual(code, 2)
|
||||
self.assertIn("t1", stderr)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SubagentStop Hook — Stop Guard & stop_hook_active
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestSubagentStopHook(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
activate(self.root)
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def test_in_progress_blocks(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "title": "Working"},
|
||||
])
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir})
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
self.assertIn("Working", data["reason"])
|
||||
|
||||
def test_pending_allows(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "pending", "depends_on": ["t1"], "title": "Next"},
|
||||
])
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_all_done_allows(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
{"id": "t2", "status": "completed"},
|
||||
])
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_stop_hook_active_allows(self):
|
||||
"""stop_hook_active=True bypasses all checks to prevent infinite loop."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress"},
|
||||
])
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir, "stop_hook_active": True})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_blocked_deps_not_counted(self):
|
||||
"""Pending tasks with unmet deps don't trigger block."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "failed", "attempts": 3, "max_attempts": 3},
|
||||
{"id": "t2", "status": "pending", "depends_on": ["t1"]},
|
||||
])
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_concurrent_owned_in_progress_blocks(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "worker-a", "title": "Mine"},
|
||||
], session_config={"concurrency_mode": "concurrent"})
|
||||
code, stdout, _ = run_hook(
|
||||
SUBAGENT_HOOK, {"cwd": self.tmpdir},
|
||||
env_extra={"HARNESS_WORKER_ID": "worker-a"},
|
||||
)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
self.assertIn("Mine", data["reason"])
|
||||
|
||||
def test_concurrent_other_worker_in_progress_allows(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "worker-a", "title": "Other"},
|
||||
], session_config={"concurrency_mode": "concurrent"})
|
||||
code, stdout, _ = run_hook(
|
||||
SUBAGENT_HOOK, {"cwd": self.tmpdir},
|
||||
env_extra={"HARNESS_WORKER_ID": "worker-b"},
|
||||
)
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_concurrent_missing_identity_blocks(self):
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "in_progress", "claimed_by": "worker-a", "title": "Other"},
|
||||
], session_config={"concurrency_mode": "concurrent"})
|
||||
code, stdout, _ = run_hook(SUBAGENT_HOOK, {"cwd": self.tmpdir})
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
self.assertIn("worker identity", data["reason"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Edge Cases
|
||||
# ---------------------------------------------------------------------------
|
||||
class TestEdgeCases(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
def test_empty_stdin(self):
|
||||
"""Hooks handle empty stdin gracefully."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "pending", "depends_on": []}])
|
||||
activate(self.root)
|
||||
for hook in [STOP_HOOK, SESSION_HOOK, IDLE_HOOK, SUBAGENT_HOOK]:
|
||||
proc = subprocess.run(
|
||||
[sys.executable, str(hook)],
|
||||
input="",
|
||||
capture_output=True, text=True, timeout=10,
|
||||
cwd=self.tmpdir,
|
||||
env=build_hook_env(),
|
||||
)
|
||||
self.assertIn(proc.returncode, {0, 2}, f"{hook.name} failed on empty stdin")
|
||||
self.assertNotIn("Traceback", proc.stderr)
|
||||
|
||||
def test_invalid_json_stdin(self):
|
||||
"""Hooks handle invalid JSON stdin gracefully."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "pending", "depends_on": []}])
|
||||
activate(self.root)
|
||||
for hook in [STOP_HOOK, SESSION_HOOK, IDLE_HOOK, SUBAGENT_HOOK]:
|
||||
proc = subprocess.run(
|
||||
[sys.executable, str(hook)],
|
||||
input="not json at all",
|
||||
capture_output=True, text=True, timeout=10,
|
||||
cwd=self.tmpdir,
|
||||
env=build_hook_env(),
|
||||
)
|
||||
self.assertIn(proc.returncode, {0, 2}, f"{hook.name} crashed on invalid JSON")
|
||||
self.assertNotIn("Traceback", proc.stderr)
|
||||
|
||||
def test_harness_state_root_env(self):
|
||||
"""HARNESS_STATE_ROOT env var is respected."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": [], "priority": "P0"},
|
||||
])
|
||||
activate(self.root)
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
code, stdout, _ = run_hook(
|
||||
STOP_HOOK, {"cwd": "/nonexistent"},
|
||||
env_extra={"HARNESS_STATE_ROOT": self.tmpdir},
|
||||
)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
def test_tasks_not_a_list(self):
|
||||
"""Hooks handle tasks field being non-list."""
|
||||
(self.root / "harness-tasks.json").write_text('{"tasks": "not a list"}')
|
||||
activate(self.root)
|
||||
(self.root / "harness-progress.txt").write_text("")
|
||||
code, stdout, _ = run_hook(STOP_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Self-Reflect Stop Hook — Only triggers after harness completes
|
||||
# ---------------------------------------------------------------------------
|
||||
REFLECT_HOOK = HOOKS_DIR / "self-reflect-stop.py"
|
||||
|
||||
|
||||
class TestSelfReflectStopHook(unittest.TestCase):
|
||||
"""self-reflect-stop.py must only trigger when .harness-reflect marker exists."""
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.root = Path(self.tmpdir)
|
||||
|
||||
def tearDown(self):
|
||||
import shutil
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
# Clean up counter files
|
||||
for p in Path(tempfile.gettempdir()).glob("claude-reflect-test-*"):
|
||||
try:
|
||||
p.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _payload(self, session_id="test-reflect-001", **extra):
|
||||
return {"cwd": self.tmpdir, "session_id": session_id, **extra}
|
||||
|
||||
def _set_reflect(self):
|
||||
"""Create .harness-reflect marker (simulates harness completion)."""
|
||||
(self.root / ".harness-reflect").touch()
|
||||
|
||||
def test_no_harness_root_is_noop(self):
|
||||
"""When harness-tasks.json doesn't exist, hook is a complete no-op."""
|
||||
code, stdout, stderr = run_hook(REFLECT_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "", "Should produce no output when harness never used")
|
||||
|
||||
def test_harness_active_no_reflect_marker(self):
|
||||
"""When .harness-active exists but no .harness-reflect, hook is no-op."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "pending", "depends_on": []},
|
||||
])
|
||||
activate(self.root)
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, self._payload())
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "", "Should not self-reflect while harness is active")
|
||||
|
||||
def test_stale_tasks_without_reflect_marker_is_noop(self):
|
||||
"""Stale harness-tasks.json without .harness-reflect does NOT trigger (fixes false positive)."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
])
|
||||
deactivate(self.root)
|
||||
# No .harness-reflect marker — this is a stale file from a previous run
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, self._payload(session_id="test-stale"))
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "", "Stale harness-tasks.json should NOT trigger self-reflect")
|
||||
|
||||
def test_harness_completed_triggers_reflection(self):
|
||||
"""When .harness-reflect marker exists, triggers self-reflection."""
|
||||
write_tasks(self.root, [
|
||||
{"id": "t1", "status": "completed"},
|
||||
])
|
||||
deactivate(self.root)
|
||||
self._set_reflect()
|
||||
sid = "test-reflect-trigger"
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, self._payload(session_id=sid))
|
||||
self.assertEqual(code, 0)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
self.assertIn("Self-Reflect", data["reason"])
|
||||
|
||||
def test_counter_increments(self):
|
||||
"""Each invocation increments the iteration counter."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "completed"}])
|
||||
deactivate(self.root)
|
||||
self._set_reflect()
|
||||
sid = "test-reflect-counter"
|
||||
|
||||
# First call: iteration 1
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, self._payload(session_id=sid))
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("1/5", data["reason"])
|
||||
|
||||
# Second call: iteration 2
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, self._payload(session_id=sid))
|
||||
data = json.loads(stdout)
|
||||
self.assertIn("2/5", data["reason"])
|
||||
|
||||
def test_max_iterations_allows_stop_and_cleans_marker(self):
|
||||
"""After max iterations, hook allows stop and removes .harness-reflect."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "completed"}])
|
||||
deactivate(self.root)
|
||||
self._set_reflect()
|
||||
sid = "test-reflect-max"
|
||||
|
||||
# Write counter at max
|
||||
counter_path = Path(tempfile.gettempdir()) / f"claude-reflect-{sid}"
|
||||
counter_path.write_text("5", encoding="utf-8")
|
||||
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, self._payload(session_id=sid))
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "", "Should allow stop after max iterations")
|
||||
self.assertFalse(
|
||||
(self.root / ".harness-reflect").exists(),
|
||||
".harness-reflect should be cleaned up after max iterations",
|
||||
)
|
||||
|
||||
def test_disabled_via_env(self):
|
||||
"""REFLECT_MAX_ITERATIONS=0 disables self-reflection."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "completed"}])
|
||||
deactivate(self.root)
|
||||
self._set_reflect()
|
||||
code, stdout, _ = run_hook(
|
||||
REFLECT_HOOK,
|
||||
self._payload(session_id="test-reflect-disabled"),
|
||||
env_extra={"REFLECT_MAX_ITERATIONS": "0"},
|
||||
)
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "", "Should be disabled when max=0")
|
||||
|
||||
def test_no_session_id_is_noop(self):
|
||||
"""Missing session_id makes hook a no-op."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "completed"}])
|
||||
deactivate(self.root)
|
||||
self._set_reflect()
|
||||
code, stdout, _ = run_hook(REFLECT_HOOK, {"cwd": self.tmpdir})
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(stdout, "")
|
||||
|
||||
def test_empty_stdin_no_crash(self):
|
||||
"""Empty stdin doesn't crash."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "completed"}])
|
||||
self._set_reflect()
|
||||
proc = subprocess.run(
|
||||
[sys.executable, str(REFLECT_HOOK)],
|
||||
input="",
|
||||
capture_output=True, text=True, timeout=10,
|
||||
cwd=self.tmpdir,
|
||||
env=build_hook_env(),
|
||||
)
|
||||
self.assertEqual(proc.returncode, 0)
|
||||
self.assertNotIn("Traceback", proc.stderr)
|
||||
|
||||
def test_harness_state_root_env_respected(self):
|
||||
"""HARNESS_STATE_ROOT env var is used for root discovery."""
|
||||
write_tasks(self.root, [{"id": "t1", "status": "completed"}])
|
||||
deactivate(self.root)
|
||||
self._set_reflect()
|
||||
sid = "test-reflect-env"
|
||||
code, stdout, _ = run_hook(
|
||||
REFLECT_HOOK,
|
||||
{"cwd": "/nonexistent", "session_id": sid},
|
||||
env_extra={"HARNESS_STATE_ROOT": self.tmpdir},
|
||||
)
|
||||
data = json.loads(stdout)
|
||||
self.assertEqual(data["decision"], "block")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
42
uninstall.py
42
uninstall.py
@@ -12,6 +12,7 @@ from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
|
||||
DEFAULT_INSTALL_DIR = "~/.claude"
|
||||
SETTINGS_FILE = "settings.json"
|
||||
|
||||
# Files created by installer itself (not by modules)
|
||||
INSTALLER_FILES = ["install.log", "installed_modules.json", "installed_modules.json.bak"]
|
||||
@@ -80,6 +81,42 @@ def load_config(install_dir: Path) -> Dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
def unmerge_hooks_from_settings(module_name: str, install_dir: Path) -> bool:
|
||||
"""Remove hooks tagged with __module__=module_name from settings.json."""
|
||||
settings_path = install_dir / SETTINGS_FILE
|
||||
if not settings_path.exists():
|
||||
return False
|
||||
|
||||
try:
|
||||
with settings_path.open("r", encoding="utf-8") as f:
|
||||
settings = json.load(f)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return False
|
||||
|
||||
if "hooks" not in settings:
|
||||
return False
|
||||
|
||||
modified = False
|
||||
for hook_type in list(settings["hooks"].keys()):
|
||||
original_len = len(settings["hooks"][hook_type])
|
||||
settings["hooks"][hook_type] = [
|
||||
entry for entry in settings["hooks"][hook_type]
|
||||
if entry.get("__module__") != module_name
|
||||
]
|
||||
if len(settings["hooks"][hook_type]) < original_len:
|
||||
modified = True
|
||||
# Remove empty hook type arrays
|
||||
if not settings["hooks"][hook_type]:
|
||||
del settings["hooks"][hook_type]
|
||||
|
||||
if modified:
|
||||
with settings_path.open("w", encoding="utf-8") as f:
|
||||
json.dump(settings, f, indent=2, ensure_ascii=False)
|
||||
f.write("\n")
|
||||
|
||||
return modified
|
||||
|
||||
|
||||
def get_module_files(module_name: str, config: Dict[str, Any]) -> Set[str]:
|
||||
"""Extract files/dirs that a module installs based on config.json operations."""
|
||||
files: Set[str] = set()
|
||||
@@ -261,6 +298,11 @@ def main(argv: Optional[List[str]] = None) -> int:
|
||||
except OSError as e:
|
||||
print(f" ✗ Failed to remove {item}: {e}", file=sys.stderr)
|
||||
|
||||
# Remove module hooks from settings.json
|
||||
for m in selected:
|
||||
if unmerge_hooks_from_settings(m, install_dir):
|
||||
print(f" ✓ Removed hooks for module '{m}' from settings.json")
|
||||
|
||||
# Update installed_modules.json
|
||||
status_file = install_dir / "installed_modules.json"
|
||||
if status_file.exists() and selected != list(installed_modules.keys()):
|
||||
|
||||
Reference in New Issue
Block a user