mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-06 02:34:09 +08:00
Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f08fa88d71 | ||
|
|
95408e7fa7 | ||
|
|
22987b5f74 | ||
|
|
90f9a131fe | ||
|
|
017ad5e4d9 | ||
|
|
15b4176afb | ||
|
|
1533e08425 | ||
|
|
c3dd5b567f | ||
|
|
386937cfb3 | ||
|
|
c89ad3df2d | ||
|
|
2b8efd42a9 | ||
|
|
d4104214ff | ||
|
|
802efb5358 | ||
|
|
767b137c58 | ||
|
|
8eecf103ef | ||
|
|
77822cf062 | ||
|
|
007c27879d | ||
|
|
368831da4c | ||
|
|
eb84dfa574 | ||
|
|
3bc8342929 | ||
|
|
cfc64e8515 | ||
|
|
7a40c9d492 | ||
|
|
d51a2f12f8 | ||
|
|
8a8771076d | ||
|
|
e637b26151 | ||
|
|
595fa8da96 | ||
|
|
9ba6950d21 | ||
|
|
7f790fbe15 | ||
|
|
06f14aa695 | ||
|
|
246674c388 |
@@ -124,58 +124,6 @@
|
||||
"./agents/debug.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "advanced-ai-agents",
|
||||
"source": "./advanced-ai-agents/",
|
||||
"description": "Advanced AI agent for complex problem solving and deep analysis with GPT-5 integration",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"gpt5",
|
||||
"ai",
|
||||
"analysis",
|
||||
"problem-solving",
|
||||
"deep-research"
|
||||
],
|
||||
"category": "advanced",
|
||||
"strict": false,
|
||||
"commands": [],
|
||||
"agents": [
|
||||
"./agents/gpt5.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "requirements-clarity",
|
||||
"source": "./requirements-clarity/",
|
||||
"description": "Transforms vague requirements into actionable PRDs through systematic clarification with 100-point scoring system",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"clarification",
|
||||
"prd",
|
||||
"specifications",
|
||||
"quality-gates",
|
||||
"requirements-engineering"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"skills": [
|
||||
"./skills/SKILL.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "codex-cli",
|
||||
"source": "./skills/codex/",
|
||||
|
||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -47,6 +47,10 @@ jobs:
|
||||
goarch: amd64
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: amd64
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -58,6 +62,7 @@ jobs:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Build binary
|
||||
id: build
|
||||
working-directory: codex-wrapper
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
@@ -66,14 +71,18 @@ jobs:
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
OUTPUT_NAME=codex-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
if [ "${{ matrix.goos }}" = "windows" ]; then
|
||||
OUTPUT_NAME="${OUTPUT_NAME}.exe"
|
||||
fi
|
||||
go build -ldflags="-s -w -X main.version=${VERSION}" -o ${OUTPUT_NAME} .
|
||||
chmod +x ${OUTPUT_NAME}
|
||||
echo "artifact_path=codex-wrapper/${OUTPUT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: codex-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
path: codex-wrapper/codex-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
path: ${{ steps.build.outputs.artifact_path }}
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
@@ -92,7 +101,7 @@ jobs:
|
||||
run: |
|
||||
mkdir -p release
|
||||
find artifacts -type f -name "codex-wrapper-*" -exec mv {} release/ \;
|
||||
cp install.sh release/
|
||||
cp install.sh install.bat release/
|
||||
ls -la release/
|
||||
|
||||
- name: Create Release
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,6 @@
|
||||
.claude/
|
||||
.claude-trace
|
||||
.venv
|
||||
.pytest_cache
|
||||
__pycache__
|
||||
.coverage
|
||||
|
||||
8
Makefile
8
Makefile
@@ -7,10 +7,12 @@
|
||||
help:
|
||||
@echo "Claude Code Multi-Agent Workflow - Quick Deployment"
|
||||
@echo ""
|
||||
@echo "Recommended installation: python3 install.py --install-dir ~/.claude"
|
||||
@echo ""
|
||||
@echo "Usage: make [target]"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " install - Install all configurations to Claude Code"
|
||||
@echo " install - LEGACY: install all configurations (prefer install.py)"
|
||||
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
|
||||
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
|
||||
@echo " deploy-essentials - Deploy Development Essentials workflow"
|
||||
@@ -36,6 +38,8 @@ OUTPUT_STYLES_DIR = output-styles
|
||||
|
||||
# Install all configurations
|
||||
install: deploy-all
|
||||
@echo "⚠️ LEGACY PATH: make install will be removed in future versions."
|
||||
@echo " Prefer: python3 install.py --install-dir ~/.claude"
|
||||
@echo "✅ Installation complete!"
|
||||
|
||||
# Deploy BMAD workflow
|
||||
@@ -140,4 +144,4 @@ all: deploy-all
|
||||
# Version info
|
||||
version:
|
||||
@echo "Claude Code Multi-Agent Workflow System v3.1"
|
||||
@echo "BMAD + Requirements-Driven Development"
|
||||
@echo "BMAD + Requirements-Driven Development"
|
||||
|
||||
403
README.md
403
README.md
@@ -1,128 +1,323 @@
|
||||
# Claude Code Multi-Agent Workflow System
|
||||
|
||||
[](https://smithery.ai/skills?ns=cexll&utm_source=github&utm_medium=badge)
|
||||
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> Enterprise-grade agile development automation with AI-powered multi-agent orchestration
|
||||
> AI-powered development automation with Claude Code + Codex collaboration
|
||||
|
||||
[中文文档](README_CN.md) | [Documentation](docs/)
|
||||
## Core Concept: Claude Code + Codex
|
||||
|
||||
## 🚀 Quick Start
|
||||
This system leverages a **dual-agent architecture**:
|
||||
|
||||
### Installation
|
||||
| Role | Agent | Responsibility |
|
||||
|------|-------|----------------|
|
||||
| **Orchestrator** | Claude Code | Planning, context gathering, verification, user interaction |
|
||||
| **Executor** | Codex | Code editing, test execution, file operations |
|
||||
|
||||
**Plugin System (Recommended)**
|
||||
```bash
|
||||
/plugin marketplace add cexll/myclaude
|
||||
```
|
||||
**Why this separation?**
|
||||
- Claude Code excels at understanding context and orchestrating complex workflows
|
||||
- Codex excels at focused code generation and execution
|
||||
- Together they provide better results than either alone
|
||||
|
||||
## Quick Start(Please execute in Powershell on Windows)
|
||||
|
||||
**Traditional Installation**
|
||||
```bash
|
||||
git clone https://github.com/cexll/myclaude.git
|
||||
cd myclaude
|
||||
make install
|
||||
python3 install.py --install-dir ~/.claude
|
||||
```
|
||||
|
||||
### Basic Usage
|
||||
## Workflows Overview
|
||||
|
||||
### 1. Dev Workflow (Recommended)
|
||||
|
||||
**The primary workflow for most development tasks.**
|
||||
|
||||
```bash
|
||||
# Full agile workflow
|
||||
/bmad-pilot "Build user authentication with OAuth2 and MFA"
|
||||
|
||||
# Lightweight development
|
||||
/requirements-pilot "Implement JWT token refresh"
|
||||
|
||||
# Direct development commands
|
||||
/code "Add API rate limiting"
|
||||
/dev "implement user authentication with JWT"
|
||||
```
|
||||
|
||||
## 📦 Plugin Modules
|
||||
**6-Step Process:**
|
||||
1. **Requirements Clarification** - Interactive Q&A to clarify scope
|
||||
2. **Codex Deep Analysis** - Codebase exploration and architecture decisions
|
||||
3. **Dev Plan Generation** - Structured task breakdown with test requirements
|
||||
4. **Parallel Execution** - Codex executes tasks concurrently
|
||||
5. **Coverage Validation** - Enforce ≥90% test coverage
|
||||
6. **Completion Summary** - Report with file changes and coverage stats
|
||||
|
||||
| Plugin | Description | Key Commands |
|
||||
|--------|-------------|--------------|
|
||||
| **[bmad-agile-workflow](docs/BMAD-WORKFLOW.md)** | Complete BMAD methodology with 6 specialized agents | `/bmad-pilot` |
|
||||
| **[requirements-driven-workflow](docs/REQUIREMENTS-WORKFLOW.md)** | Streamlined requirements-to-code workflow | `/requirements-pilot` |
|
||||
| **[dev-workflow](dev-workflow/README.md)** | Extreme lightweight end-to-end development workflow | `/dev` |
|
||||
| **[codex-wrapper](codex-wrapper/)** | Go binary wrapper for Codex CLI integration | `codex-wrapper` |
|
||||
| **[development-essentials](docs/DEVELOPMENT-COMMANDS.md)** | Core development slash commands | `/code` `/debug` `/test` `/optimize` |
|
||||
| **[advanced-ai-agents](docs/ADVANCED-AGENTS.md)** | GPT-5 deep reasoning integration | Agent: `gpt5` |
|
||||
| **[requirements-clarity](docs/REQUIREMENTS-CLARITY.md)** | Automated requirements clarification with 100-point scoring | Auto-activated skill |
|
||||
**Key Features:**
|
||||
- Claude Code orchestrates, Codex executes all code changes
|
||||
- Automatic task parallelization for speed
|
||||
- Mandatory 90% test coverage gate
|
||||
- Rollback on failure
|
||||
|
||||
## 💡 Use Cases
|
||||
|
||||
**BMAD Workflow** - Full agile process automation
|
||||
- Product requirements → Architecture design → Sprint planning → Development → Code review → QA testing
|
||||
- Quality gates with 90% thresholds
|
||||
- Automated document generation
|
||||
|
||||
**Requirements Workflow** - Fast prototyping
|
||||
- Requirements generation → Implementation → Review → Testing
|
||||
- Lightweight and practical
|
||||
|
||||
**Development Commands** - Daily coding
|
||||
- Direct implementation, debugging, testing, optimization
|
||||
- No workflow overhead
|
||||
|
||||
**Requirements Clarity** - Automated requirements engineering
|
||||
- Auto-detects vague requirements and initiates clarification
|
||||
- 100-point quality scoring system
|
||||
- Generates complete PRD documents
|
||||
|
||||
## 🎯 Key Features
|
||||
|
||||
- **🤖 Role-Based Agents**: Specialized AI agents for each development phase
|
||||
- **📊 Quality Gates**: Automatic quality scoring with iterative refinement
|
||||
- **✅ Approval Points**: User confirmation at critical workflow stages
|
||||
- **📁 Persistent Artifacts**: All specs saved to `.claude/specs/`
|
||||
- **🔌 Plugin System**: Native Claude Code plugin support
|
||||
- **🔄 Flexible Workflows**: Choose full agile or lightweight development
|
||||
- **🎯 Requirements Clarity**: Automated requirements clarification with quality scoring
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **[BMAD Workflow Guide](docs/BMAD-WORKFLOW.md)** - Complete methodology and agent roles
|
||||
- **[Requirements Workflow](docs/REQUIREMENTS-WORKFLOW.md)** - Lightweight development process
|
||||
- **[Development Commands](docs/DEVELOPMENT-COMMANDS.md)** - Slash command reference
|
||||
- **[Plugin System](docs/PLUGIN-SYSTEM.md)** - Installation and configuration
|
||||
- **[Quick Start Guide](docs/QUICK-START.md)** - Get started in 5 minutes
|
||||
|
||||
## 🛠️ Installation Methods
|
||||
|
||||
**Codex Wrapper** (Go binary for Codex CLI)
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/cexll/myclaude/refs/heads/master/install.sh | bash
|
||||
```
|
||||
|
||||
**Method 1: Plugin Install** (One command)
|
||||
```bash
|
||||
/plugin install bmad-agile-workflow
|
||||
```
|
||||
|
||||
**Method 2: Make Commands** (Selective installation)
|
||||
```bash
|
||||
make deploy-bmad # BMAD workflow only
|
||||
make deploy-requirements # Requirements workflow only
|
||||
make deploy-all # Everything
|
||||
```
|
||||
|
||||
**Method 3: Manual Setup**
|
||||
- Copy `./commands/*.md` to `~/.config/claude/commands/`
|
||||
- Copy `./agents/*.md` to `~/.config/claude/agents/`
|
||||
|
||||
Run `make help` for all options.
|
||||
|
||||
## 📄 License
|
||||
|
||||
MIT License - see [LICENSE](LICENSE)
|
||||
|
||||
## 🙋 Support
|
||||
|
||||
- **Issues**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
- **Documentation**: [docs/](docs/)
|
||||
- **Plugin Guide**: [PLUGIN_README.md](PLUGIN_README.md)
|
||||
**Best For:** Feature development, refactoring, bug fixes with tests
|
||||
|
||||
---
|
||||
|
||||
**Transform your development with AI-powered automation** - One command, complete workflow, quality assured.
|
||||
### 2. BMAD Agile Workflow
|
||||
|
||||
**Full enterprise agile methodology with 6 specialized agents.**
|
||||
|
||||
```bash
|
||||
/bmad-pilot "build e-commerce checkout system"
|
||||
```
|
||||
|
||||
**Agents:**
|
||||
| Agent | Role |
|
||||
|-------|------|
|
||||
| Product Owner | Requirements & user stories |
|
||||
| Architect | System design & tech decisions |
|
||||
| Tech Lead | Sprint planning & task breakdown |
|
||||
| Developer | Implementation |
|
||||
| Code Reviewer | Quality assurance |
|
||||
| QA Engineer | Testing & validation |
|
||||
|
||||
**Process:**
|
||||
```
|
||||
Requirements → Architecture → Sprint Plan → Development → Review → QA
|
||||
↓ ↓ ↓ ↓ ↓ ↓
|
||||
PRD.md DESIGN.md SPRINT.md Code REVIEW.md TEST.md
|
||||
```
|
||||
|
||||
**Best For:** Large features, team coordination, enterprise projects
|
||||
|
||||
---
|
||||
|
||||
### 3. Requirements-Driven Workflow
|
||||
|
||||
**Lightweight requirements-to-code pipeline.**
|
||||
|
||||
```bash
|
||||
/requirements-pilot "implement API rate limiting"
|
||||
```
|
||||
|
||||
**Process:**
|
||||
1. Requirements generation with quality scoring
|
||||
2. Implementation planning
|
||||
3. Code generation
|
||||
4. Review and testing
|
||||
|
||||
**Best For:** Quick prototypes, well-defined features
|
||||
|
||||
---
|
||||
|
||||
### 4. Development Essentials
|
||||
|
||||
**Direct commands for daily coding tasks.**
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `/code` | Implement a feature |
|
||||
| `/debug` | Debug an issue |
|
||||
| `/test` | Write tests |
|
||||
| `/review` | Code review |
|
||||
| `/optimize` | Performance optimization |
|
||||
| `/refactor` | Code refactoring |
|
||||
| `/docs` | Documentation |
|
||||
|
||||
**Best For:** Quick tasks, no workflow overhead needed
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### Modular Installation (Recommended)
|
||||
|
||||
```bash
|
||||
# Install all enabled modules (dev + essentials by default)
|
||||
python3 install.py --install-dir ~/.claude
|
||||
|
||||
# Install specific module
|
||||
python3 install.py --module dev
|
||||
|
||||
# List available modules
|
||||
python3 install.py --list-modules
|
||||
|
||||
# Force overwrite existing files
|
||||
python3 install.py --force
|
||||
```
|
||||
|
||||
### Available Modules
|
||||
|
||||
| Module | Default | Description |
|
||||
|--------|---------|-------------|
|
||||
| `dev` | ✓ Enabled | Dev workflow + Codex integration |
|
||||
| `essentials` | ✓ Enabled | Core development commands |
|
||||
| `bmad` | Disabled | Full BMAD agile workflow |
|
||||
| `requirements` | Disabled | Requirements-driven workflow |
|
||||
|
||||
### What Gets Installed
|
||||
|
||||
```
|
||||
~/.claude/
|
||||
├── CLAUDE.md # Core instructions and role definition
|
||||
├── commands/ # Slash commands (/dev, /code, etc.)
|
||||
├── agents/ # Agent definitions
|
||||
├── skills/
|
||||
│ └── codex/
|
||||
│ └── SKILL.md # Codex integration skill
|
||||
└── installed_modules.json # Installation status
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Edit `config.json` to customize:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"install_dir": "~/.claude",
|
||||
"modules": {
|
||||
"dev": {
|
||||
"enabled": true,
|
||||
"operations": [
|
||||
{"type": "merge_dir", "source": "dev-workflow"},
|
||||
{"type": "copy_file", "source": "memorys/CLAUDE.md", "target": "CLAUDE.md"},
|
||||
{"type": "copy_file", "source": "skills/codex/SKILL.md", "target": "skills/codex/SKILL.md"},
|
||||
{"type": "run_command", "command": "bash install.sh"}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Operation Types:**
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `merge_dir` | Merge subdirs (commands/, agents/) into install dir |
|
||||
| `copy_dir` | Copy entire directory |
|
||||
| `copy_file` | Copy single file to target path |
|
||||
| `run_command` | Execute shell command |
|
||||
|
||||
---
|
||||
|
||||
## Codex Integration
|
||||
|
||||
The `codex` skill enables Claude Code to delegate code execution to Codex CLI.
|
||||
|
||||
### Usage in Workflows
|
||||
|
||||
```bash
|
||||
# Codex is invoked via the skill
|
||||
codex-wrapper - <<'EOF'
|
||||
implement @src/auth.ts with JWT validation
|
||||
EOF
|
||||
```
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
```bash
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: backend_api
|
||||
workdir: /project/backend
|
||||
---CONTENT---
|
||||
implement REST endpoints for /api/users
|
||||
|
||||
---TASK---
|
||||
id: frontend_ui
|
||||
workdir: /project/frontend
|
||||
dependencies: backend_api
|
||||
---CONTENT---
|
||||
create React components consuming the API
|
||||
EOF
|
||||
```
|
||||
|
||||
### Install Codex Wrapper
|
||||
|
||||
```bash
|
||||
# Automatic (via dev module)
|
||||
python3 install.py --module dev
|
||||
|
||||
# Manual
|
||||
bash install.sh
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
Windows installs place `codex-wrapper.exe` in `%USERPROFILE%\bin`.
|
||||
|
||||
```powershell
|
||||
# PowerShell (recommended)
|
||||
powershell -ExecutionPolicy Bypass -File install.ps1
|
||||
|
||||
# Batch (cmd)
|
||||
install.bat
|
||||
```
|
||||
|
||||
**Add to PATH** (if installer doesn't detect it):
|
||||
|
||||
```powershell
|
||||
# PowerShell - persistent for current user
|
||||
[Environment]::SetEnvironmentVariable('PATH', "$HOME\bin;" + [Environment]::GetEnvironmentVariable('PATH','User'), 'User')
|
||||
|
||||
# PowerShell - current session only
|
||||
$Env:PATH = "$HOME\bin;$Env:PATH"
|
||||
```
|
||||
|
||||
```batch
|
||||
REM cmd.exe - persistent for current user
|
||||
setx PATH "%USERPROFILE%\bin;%PATH%"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Workflow Selection Guide
|
||||
|
||||
| Scenario | Recommended Workflow |
|
||||
|----------|---------------------|
|
||||
| New feature with tests | `/dev` |
|
||||
| Quick bug fix | `/debug` or `/code` |
|
||||
| Large multi-sprint feature | `/bmad-pilot` |
|
||||
| Prototype or POC | `/requirements-pilot` |
|
||||
| Code review | `/review` |
|
||||
| Performance issue | `/optimize` |
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Codex wrapper not found:**
|
||||
```bash
|
||||
# Check PATH
|
||||
echo $PATH | grep -q "$HOME/bin" || echo 'export PATH="$HOME/bin:$PATH"' >> ~/.zshrc
|
||||
|
||||
# Reinstall
|
||||
bash install.sh
|
||||
```
|
||||
|
||||
**Permission denied:**
|
||||
```bash
|
||||
python3 install.py --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
**Module not loading:**
|
||||
```bash
|
||||
# Check installation status
|
||||
cat ~/.claude/installed_modules.json
|
||||
|
||||
# Reinstall specific module
|
||||
python3 install.py --module dev --force
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see [LICENSE](LICENSE)
|
||||
|
||||
## Support
|
||||
|
||||
- **Issues**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
- **Documentation**: [docs/](docs/)
|
||||
|
||||
---
|
||||
|
||||
**Claude Code + Codex = Better Development** - Orchestration meets execution.
|
||||
|
||||
394
README_CN.md
394
README_CN.md
@@ -2,121 +2,319 @@
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> 企业级敏捷开发自动化与 AI 驱动的多智能体编排
|
||||
> AI 驱动的开发自动化 - Claude Code + Codex 协作
|
||||
|
||||
[English](README.md) | [文档](docs/)
|
||||
## 核心概念:Claude Code + Codex
|
||||
|
||||
## 🚀 快速开始
|
||||
本系统采用**双智能体架构**:
|
||||
|
||||
### 安装
|
||||
| 角色 | 智能体 | 职责 |
|
||||
|------|-------|------|
|
||||
| **编排者** | Claude Code | 规划、上下文收集、验证、用户交互 |
|
||||
| **执行者** | Codex | 代码编辑、测试执行、文件操作 |
|
||||
|
||||
**插件系统(推荐)**
|
||||
```bash
|
||||
/plugin marketplace add cexll/myclaude
|
||||
```
|
||||
**为什么分离?**
|
||||
- Claude Code 擅长理解上下文和编排复杂工作流
|
||||
- Codex 擅长专注的代码生成和执行
|
||||
- 两者结合效果优于单独使用
|
||||
|
||||
## 快速开始(windows上请在Powershell中执行)
|
||||
|
||||
**传统安装**
|
||||
```bash
|
||||
git clone https://github.com/cexll/myclaude.git
|
||||
cd myclaude
|
||||
make install
|
||||
python3 install.py --install-dir ~/.claude
|
||||
```
|
||||
|
||||
### 基本使用
|
||||
## 工作流概览
|
||||
|
||||
### 1. Dev 工作流(推荐)
|
||||
|
||||
**大多数开发任务的首选工作流。**
|
||||
|
||||
```bash
|
||||
# 完整敏捷工作流
|
||||
/bmad-pilot "构建用户认证系统,支持 OAuth2 和多因素认证"
|
||||
|
||||
# 轻量级开发
|
||||
/requirements-pilot "实现 JWT 令牌刷新"
|
||||
|
||||
# 直接开发命令
|
||||
/code "添加 API 限流功能"
|
||||
/dev "实现 JWT 用户认证"
|
||||
```
|
||||
|
||||
## 📦 插件模块
|
||||
**6 步流程:**
|
||||
1. **需求澄清** - 交互式问答明确范围
|
||||
2. **Codex 深度分析** - 代码库探索和架构决策
|
||||
3. **开发计划生成** - 结构化任务分解和测试要求
|
||||
4. **并行执行** - Codex 并发执行任务
|
||||
5. **覆盖率验证** - 强制 ≥90% 测试覆盖率
|
||||
6. **完成总结** - 文件变更和覆盖率报告
|
||||
|
||||
| 插件 | 描述 | 主要命令 |
|
||||
|------|------|---------|
|
||||
| **[bmad-agile-workflow](docs/BMAD-WORKFLOW.md)** | 完整 BMAD 方法论,包含6个专业智能体 | `/bmad-pilot` |
|
||||
| **[requirements-driven-workflow](docs/REQUIREMENTS-WORKFLOW.md)** | 精简的需求到代码工作流 | `/requirements-pilot` |
|
||||
| **[dev-workflow](dev-workflow/README.md)** | 极简端到端开发工作流 | `/dev` |
|
||||
| **[development-essentials](docs/DEVELOPMENT-COMMANDS.md)** | 核心开发斜杠命令 | `/code` `/debug` `/test` `/optimize` |
|
||||
| **[advanced-ai-agents](docs/ADVANCED-AGENTS.md)** | GPT-5 深度推理集成 | 智能体: `gpt5` |
|
||||
| **[requirements-clarity](docs/REQUIREMENTS-CLARITY.md)** | 自动需求澄清,100分制质量评分 | 自动激活技能 |
|
||||
**核心特性:**
|
||||
- Claude Code 编排,Codex 执行所有代码变更
|
||||
- 自动任务并行化提升速度
|
||||
- 强制 90% 测试覆盖率门禁
|
||||
- 失败自动回滚
|
||||
|
||||
## 💡 使用场景
|
||||
|
||||
**BMAD 工作流** - 完整敏捷流程自动化
|
||||
- 产品需求 → 架构设计 → 冲刺规划 → 开发实现 → 代码审查 → 质量测试
|
||||
- 90% 阈值质量门控
|
||||
- 自动生成文档
|
||||
|
||||
**Requirements 工作流** - 快速原型开发
|
||||
- 需求生成 → 实现 → 审查 → 测试
|
||||
- 轻量级实用主义
|
||||
|
||||
**开发命令** - 日常编码
|
||||
- 直接实现、调试、测试、优化
|
||||
- 无工作流开销
|
||||
|
||||
**需求澄清** - 自动化需求工程
|
||||
- 自动检测模糊需求并启动澄清流程
|
||||
- 100分制质量评分系统
|
||||
- 生成完整的产品需求文档
|
||||
|
||||
## 🎯 核心特性
|
||||
|
||||
- **🤖 角色化智能体**: 每个开发阶段的专业 AI 智能体
|
||||
- **📊 质量门控**: 自动质量评分,迭代优化
|
||||
- **✅ 确认节点**: 关键工作流阶段的用户确认
|
||||
- **📁 持久化产物**: 所有规格保存至 `.claude/specs/`
|
||||
- **🔌 插件系统**: 原生 Claude Code 插件支持
|
||||
- **🔄 灵活工作流**: 选择完整敏捷或轻量开发
|
||||
- **🎯 需求澄清**: 自动化需求澄清与质量评分
|
||||
|
||||
## 📚 文档
|
||||
|
||||
- **[BMAD 工作流指南](docs/BMAD-WORKFLOW.md)** - 完整方法论和智能体角色
|
||||
- **[Requirements 工作流](docs/REQUIREMENTS-WORKFLOW.md)** - 轻量级开发流程
|
||||
- **[开发命令参考](docs/DEVELOPMENT-COMMANDS.md)** - 斜杠命令说明
|
||||
- **[插件系统](docs/PLUGIN-SYSTEM.md)** - 安装与配置
|
||||
- **[快速上手](docs/QUICK-START.md)** - 5分钟入门
|
||||
|
||||
## 🛠️ 安装方式
|
||||
|
||||
**方式1: 插件安装**(一条命令)
|
||||
```bash
|
||||
/plugin install bmad-agile-workflow
|
||||
```
|
||||
|
||||
**方式2: Make 命令**(选择性安装)
|
||||
```bash
|
||||
make deploy-bmad # 仅 BMAD 工作流
|
||||
make deploy-requirements # 仅 Requirements 工作流
|
||||
make deploy-all # 全部安装
|
||||
```
|
||||
|
||||
**方式3: 手动安装**
|
||||
- 复制 `./commands/*.md` 到 `~/.config/claude/commands/`
|
||||
- 复制 `./agents/*.md` 到 `~/.config/claude/agents/`
|
||||
|
||||
运行 `make help` 查看所有选项。
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
MIT 许可证 - 查看 [LICENSE](LICENSE)
|
||||
|
||||
## 🙋 支持
|
||||
|
||||
- **问题反馈**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
- **文档**: [docs/](docs/)
|
||||
- **插件指南**: [PLUGIN_README.md](PLUGIN_README.md)
|
||||
**适用场景:** 功能开发、重构、带测试的 bug 修复
|
||||
|
||||
---
|
||||
|
||||
**使用 AI 驱动的自动化转型您的开发流程** - 一条命令,完整工作流,质量保证。
|
||||
### 2. BMAD 敏捷工作流
|
||||
|
||||
**包含 6 个专业智能体的完整企业敏捷方法论。**
|
||||
|
||||
```bash
|
||||
/bmad-pilot "构建电商结账系统"
|
||||
```
|
||||
|
||||
**智能体角色:**
|
||||
| 智能体 | 职责 |
|
||||
|-------|------|
|
||||
| Product Owner | 需求与用户故事 |
|
||||
| Architect | 系统设计与技术决策 |
|
||||
| Tech Lead | Sprint 规划与任务分解 |
|
||||
| Developer | 实现 |
|
||||
| Code Reviewer | 质量保证 |
|
||||
| QA Engineer | 测试与验证 |
|
||||
|
||||
**流程:**
|
||||
```
|
||||
需求 → 架构 → Sprint计划 → 开发 → 审查 → QA
|
||||
↓ ↓ ↓ ↓ ↓ ↓
|
||||
PRD.md DESIGN.md SPRINT.md Code REVIEW.md TEST.md
|
||||
```
|
||||
|
||||
**适用场景:** 大型功能、团队协作、企业项目
|
||||
|
||||
---
|
||||
|
||||
### 3. 需求驱动工作流
|
||||
|
||||
**轻量级需求到代码流水线。**
|
||||
|
||||
```bash
|
||||
/requirements-pilot "实现 API 限流"
|
||||
```
|
||||
|
||||
**流程:**
|
||||
1. 带质量评分的需求生成
|
||||
2. 实现规划
|
||||
3. 代码生成
|
||||
4. 审查和测试
|
||||
|
||||
**适用场景:** 快速原型、明确定义的功能
|
||||
|
||||
---
|
||||
|
||||
### 4. 开发基础命令
|
||||
|
||||
**日常编码任务的直接命令。**
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/code` | 实现功能 |
|
||||
| `/debug` | 调试问题 |
|
||||
| `/test` | 编写测试 |
|
||||
| `/review` | 代码审查 |
|
||||
| `/optimize` | 性能优化 |
|
||||
| `/refactor` | 代码重构 |
|
||||
| `/docs` | 编写文档 |
|
||||
|
||||
**适用场景:** 快速任务,无需工作流开销
|
||||
|
||||
---
|
||||
|
||||
## 安装
|
||||
|
||||
### 模块化安装(推荐)
|
||||
|
||||
```bash
|
||||
# 安装所有启用的模块(默认:dev + essentials)
|
||||
python3 install.py --install-dir ~/.claude
|
||||
|
||||
# 安装特定模块
|
||||
python3 install.py --module dev
|
||||
|
||||
# 列出可用模块
|
||||
python3 install.py --list-modules
|
||||
|
||||
# 强制覆盖现有文件
|
||||
python3 install.py --force
|
||||
```
|
||||
|
||||
### 可用模块
|
||||
|
||||
| 模块 | 默认 | 描述 |
|
||||
|------|------|------|
|
||||
| `dev` | ✓ 启用 | Dev 工作流 + Codex 集成 |
|
||||
| `essentials` | ✓ 启用 | 核心开发命令 |
|
||||
| `bmad` | 禁用 | 完整 BMAD 敏捷工作流 |
|
||||
| `requirements` | 禁用 | 需求驱动工作流 |
|
||||
|
||||
### 安装内容
|
||||
|
||||
```
|
||||
~/.claude/
|
||||
├── CLAUDE.md # 核心指令和角色定义
|
||||
├── commands/ # 斜杠命令 (/dev, /code 等)
|
||||
├── agents/ # 智能体定义
|
||||
├── skills/
|
||||
│ └── codex/
|
||||
│ └── SKILL.md # Codex 集成技能
|
||||
└── installed_modules.json # 安装状态
|
||||
```
|
||||
|
||||
### 配置
|
||||
|
||||
编辑 `config.json` 自定义:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"install_dir": "~/.claude",
|
||||
"modules": {
|
||||
"dev": {
|
||||
"enabled": true,
|
||||
"operations": [
|
||||
{"type": "merge_dir", "source": "dev-workflow"},
|
||||
{"type": "copy_file", "source": "memorys/CLAUDE.md", "target": "CLAUDE.md"},
|
||||
{"type": "copy_file", "source": "skills/codex/SKILL.md", "target": "skills/codex/SKILL.md"},
|
||||
{"type": "run_command", "command": "bash install.sh"}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**操作类型:**
|
||||
| 类型 | 描述 |
|
||||
|------|------|
|
||||
| `merge_dir` | 合并子目录 (commands/, agents/) 到安装目录 |
|
||||
| `copy_dir` | 复制整个目录 |
|
||||
| `copy_file` | 复制单个文件到目标路径 |
|
||||
| `run_command` | 执行 shell 命令 |
|
||||
|
||||
---
|
||||
|
||||
## Codex 集成
|
||||
|
||||
`codex` 技能使 Claude Code 能够将代码执行委托给 Codex CLI。
|
||||
|
||||
### 工作流中的使用
|
||||
|
||||
```bash
|
||||
# 通过技能调用 Codex
|
||||
codex-wrapper - <<'EOF'
|
||||
在 @src/auth.ts 中实现 JWT 验证
|
||||
EOF
|
||||
```
|
||||
|
||||
### 并行执行
|
||||
|
||||
```bash
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: backend_api
|
||||
workdir: /project/backend
|
||||
---CONTENT---
|
||||
实现 /api/users 的 REST 端点
|
||||
|
||||
---TASK---
|
||||
id: frontend_ui
|
||||
workdir: /project/frontend
|
||||
dependencies: backend_api
|
||||
---CONTENT---
|
||||
创建消费 API 的 React 组件
|
||||
EOF
|
||||
```
|
||||
|
||||
### 安装 Codex Wrapper
|
||||
|
||||
```bash
|
||||
# 自动(通过 dev 模块)
|
||||
python3 install.py --module dev
|
||||
|
||||
# 手动
|
||||
bash install.sh
|
||||
```
|
||||
|
||||
#### Windows 系统
|
||||
|
||||
Windows 系统会将 `codex-wrapper.exe` 安装到 `%USERPROFILE%\bin`。
|
||||
|
||||
```powershell
|
||||
# PowerShell(推荐)
|
||||
powershell -ExecutionPolicy Bypass -File install.ps1
|
||||
|
||||
# 批处理(cmd)
|
||||
install.bat
|
||||
```
|
||||
|
||||
**添加到 PATH**(如果安装程序未自动检测):
|
||||
|
||||
```powershell
|
||||
# PowerShell - 永久添加(当前用户)
|
||||
[Environment]::SetEnvironmentVariable('PATH', "$HOME\bin;" + [Environment]::GetEnvironmentVariable('PATH','User'), 'User')
|
||||
|
||||
# PowerShell - 仅当前会话
|
||||
$Env:PATH = "$HOME\bin;$Env:PATH"
|
||||
```
|
||||
|
||||
```batch
|
||||
REM cmd.exe - 永久添加(当前用户)
|
||||
setx PATH "%USERPROFILE%\bin;%PATH%"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 工作流选择指南
|
||||
|
||||
| 场景 | 推荐工作流 |
|
||||
|------|----------|
|
||||
| 带测试的新功能 | `/dev` |
|
||||
| 快速 bug 修复 | `/debug` 或 `/code` |
|
||||
| 大型多 Sprint 功能 | `/bmad-pilot` |
|
||||
| 原型或 POC | `/requirements-pilot` |
|
||||
| 代码审查 | `/review` |
|
||||
| 性能问题 | `/optimize` |
|
||||
|
||||
---
|
||||
|
||||
## 故障排查
|
||||
|
||||
### 常见问题
|
||||
|
||||
**Codex wrapper 未找到:**
|
||||
```bash
|
||||
# 检查 PATH
|
||||
echo $PATH | grep -q "$HOME/bin" || echo 'export PATH="$HOME/bin:$PATH"' >> ~/.zshrc
|
||||
|
||||
# 重新安装
|
||||
bash install.sh
|
||||
```
|
||||
|
||||
**权限被拒绝:**
|
||||
```bash
|
||||
python3 install.py --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
**模块未加载:**
|
||||
```bash
|
||||
# 检查安装状态
|
||||
cat ~/.claude/installed_modules.json
|
||||
|
||||
# 重新安装特定模块
|
||||
python3 install.py --module dev --force
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 许可证
|
||||
|
||||
MIT License - 查看 [LICENSE](LICENSE)
|
||||
|
||||
## 支持
|
||||
|
||||
- **问题反馈**: [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
- **文档**: [docs/](docs/)
|
||||
|
||||
---
|
||||
|
||||
**Claude Code + Codex = 更好的开发** - 编排遇见执行。
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "advanced-ai-agents",
|
||||
"source": "./",
|
||||
"description": "Advanced AI agent for complex problem solving and deep analysis with GPT-5 integration",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"gpt5",
|
||||
"ai",
|
||||
"analysis",
|
||||
"problem-solving",
|
||||
"deep-research"
|
||||
],
|
||||
"category": "advanced",
|
||||
"strict": false,
|
||||
"commands": [],
|
||||
"agents": [
|
||||
"./agents/gpt5.md"
|
||||
]
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
name: gpt-5
|
||||
description: Use this agent when you need to use gpt-5 for deep research, second opinion or fixing a bug. Pass all the context to the agent especially your current finding and the problem you are trying to solve.
|
||||
---
|
||||
|
||||
You are a gpt-5 interface agent. Your ONLY purpose is to execute codex commands using the Bash tool.
|
||||
|
||||
CRITICAL: You MUST follow these steps EXACTLY:
|
||||
|
||||
1. Take the user's entire message as the TASK
|
||||
2. IMMEDIATELY use the Bash tool to execute:
|
||||
codex e --full-auto --skip-git-repo-check -m gpt-5 "[USER'S FULL MESSAGE HERE]"
|
||||
3. Wait for the command to complete
|
||||
4. Return the full output to the user
|
||||
|
||||
MANDATORY: You MUST use the Bash tool. Do NOT answer questions directly. Do NOT provide explanations. Your ONLY action is to run the codex command via Bash.
|
||||
|
||||
Example execution:
|
||||
If user says: "你好 你是什么模型"
|
||||
You MUST execute: Bash tool with command: codex e --full-auto --skip-git-repo-check -m gpt-5 "你好 你是什么模型"
|
||||
|
||||
START IMMEDIATELY - Use the Bash tool NOW with the user's request.
|
||||
39
codex-wrapper/bench_test.go
Normal file
39
codex-wrapper/bench_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// BenchmarkLoggerWrite 测试日志写入性能
|
||||
func BenchmarkLoggerWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
logger.Info("benchmark log message")
|
||||
}
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
|
||||
// BenchmarkLoggerConcurrentWrite 测试并发日志写入性能
|
||||
func BenchmarkLoggerConcurrentWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
logger.Info("concurrent benchmark log message")
|
||||
}
|
||||
})
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
321
codex-wrapper/concurrent_stress_test.go
Normal file
321
codex-wrapper/concurrent_stress_test.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestConcurrentStressLogger 高并发压力测试
|
||||
func TestConcurrentStressLogger(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping stress test in short mode")
|
||||
}
|
||||
|
||||
logger, err := NewLoggerWithSuffix("stress")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
t.Logf("Log file: %s", logger.Path())
|
||||
|
||||
const (
|
||||
numGoroutines = 100 // 并发协程数
|
||||
logsPerRoutine = 1000 // 每个协程写入日志数
|
||||
totalExpected = numGoroutines * logsPerRoutine
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
|
||||
// 启动并发写入
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < logsPerRoutine; j++ {
|
||||
logger.Info(fmt.Sprintf("goroutine-%d-msg-%d", id, j))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Flush()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// 读取日志文件验证
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log file: %v", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
actualCount := len(lines)
|
||||
|
||||
t.Logf("Concurrent stress test results:")
|
||||
t.Logf(" Goroutines: %d", numGoroutines)
|
||||
t.Logf(" Logs per goroutine: %d", logsPerRoutine)
|
||||
t.Logf(" Total expected: %d", totalExpected)
|
||||
t.Logf(" Total actual: %d", actualCount)
|
||||
t.Logf(" Duration: %v", elapsed)
|
||||
t.Logf(" Throughput: %.2f logs/sec", float64(totalExpected)/elapsed.Seconds())
|
||||
|
||||
// 验证日志数量
|
||||
if actualCount < totalExpected/10 {
|
||||
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)",
|
||||
actualCount, totalExpected/10, totalExpected)
|
||||
}
|
||||
t.Logf("Successfully wrote %d/%d logs (%.1f%%)",
|
||||
actualCount, totalExpected, float64(actualCount)/float64(totalExpected)*100)
|
||||
|
||||
// 验证日志格式
|
||||
formatRE := regexp.MustCompile(`^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}\] \[PID:\d+\] INFO: goroutine-`)
|
||||
for i, line := range lines[:min(10, len(lines))] {
|
||||
if !formatRE.MatchString(line) {
|
||||
t.Errorf("line %d has invalid format: %s", i, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestConcurrentBurstLogger 突发流量测试
|
||||
func TestConcurrentBurstLogger(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping burst test in short mode")
|
||||
}
|
||||
|
||||
logger, err := NewLoggerWithSuffix("burst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
t.Logf("Log file: %s", logger.Path())
|
||||
|
||||
const (
|
||||
numBursts = 10
|
||||
goroutinesPerBurst = 50
|
||||
logsPerGoroutine = 100
|
||||
)
|
||||
|
||||
totalLogs := 0
|
||||
start := time.Now()
|
||||
|
||||
// 模拟突发流量
|
||||
for burst := 0; burst < numBursts; burst++ {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < goroutinesPerBurst; i++ {
|
||||
wg.Add(1)
|
||||
totalLogs += logsPerGoroutine
|
||||
go func(b, g int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < logsPerGoroutine; j++ {
|
||||
logger.Info(fmt.Sprintf("burst-%d-goroutine-%d-msg-%d", b, g, j))
|
||||
}
|
||||
}(burst, i)
|
||||
}
|
||||
wg.Wait()
|
||||
time.Sleep(10 * time.Millisecond) // 突发间隔
|
||||
}
|
||||
|
||||
logger.Flush()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// 验证
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log file: %v", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
actualCount := len(lines)
|
||||
|
||||
t.Logf("Burst test results:")
|
||||
t.Logf(" Total bursts: %d", numBursts)
|
||||
t.Logf(" Goroutines per burst: %d", goroutinesPerBurst)
|
||||
t.Logf(" Expected logs: %d", totalLogs)
|
||||
t.Logf(" Actual logs: %d", actualCount)
|
||||
t.Logf(" Duration: %v", elapsed)
|
||||
t.Logf(" Throughput: %.2f logs/sec", float64(totalLogs)/elapsed.Seconds())
|
||||
|
||||
if actualCount < totalLogs/10 {
|
||||
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)", actualCount, totalLogs/10, totalLogs)
|
||||
}
|
||||
t.Logf("Successfully wrote %d/%d logs (%.1f%%)",
|
||||
actualCount, totalLogs, float64(actualCount)/float64(totalLogs)*100)
|
||||
}
|
||||
|
||||
// TestLoggerChannelCapacity 测试 channel 容量极限
|
||||
func TestLoggerChannelCapacity(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("capacity")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const rapidLogs = 2000 // 超过 channel 容量 (1000)
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < rapidLogs; i++ {
|
||||
logger.Info(fmt.Sprintf("rapid-log-%d", i))
|
||||
}
|
||||
sendDuration := time.Since(start)
|
||||
|
||||
logger.Flush()
|
||||
flushDuration := time.Since(start) - sendDuration
|
||||
|
||||
t.Logf("Channel capacity test:")
|
||||
t.Logf(" Logs sent: %d", rapidLogs)
|
||||
t.Logf(" Send duration: %v", sendDuration)
|
||||
t.Logf(" Flush duration: %v", flushDuration)
|
||||
|
||||
// 验证仍有合理比例的日志写入(非阻塞模式允许部分丢失)
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
actualCount := len(lines)
|
||||
|
||||
if actualCount < rapidLogs/10 {
|
||||
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)", actualCount, rapidLogs/10, rapidLogs)
|
||||
}
|
||||
t.Logf("Logs persisted: %d/%d (%.1f%%)", actualCount, rapidLogs, float64(actualCount)/float64(rapidLogs)*100)
|
||||
}
|
||||
|
||||
// TestLoggerMemoryUsage 内存使用测试
|
||||
func TestLoggerMemoryUsage(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("memory")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const numLogs = 20000
|
||||
longMessage := strings.Repeat("x", 500) // 500 字节长消息
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < numLogs; i++ {
|
||||
logger.Info(fmt.Sprintf("log-%d-%s", i, longMessage))
|
||||
}
|
||||
logger.Flush()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// 检查文件大小
|
||||
info, err := os.Stat(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedTotalSize := int64(numLogs * 500) // 理论最小总字节数
|
||||
expectedMinSize := expectedTotalSize / 10 // 接受最多 90% 丢失
|
||||
actualSize := info.Size()
|
||||
|
||||
t.Logf("Memory/disk usage test:")
|
||||
t.Logf(" Logs written: %d", numLogs)
|
||||
t.Logf(" Message size: 500 bytes")
|
||||
t.Logf(" File size: %.2f MB", float64(actualSize)/1024/1024)
|
||||
t.Logf(" Duration: %v", elapsed)
|
||||
t.Logf(" Write speed: %.2f MB/s", float64(actualSize)/1024/1024/elapsed.Seconds())
|
||||
t.Logf(" Persistence ratio: %.1f%%", float64(actualSize)/float64(expectedTotalSize)*100)
|
||||
|
||||
if actualSize < expectedMinSize {
|
||||
t.Errorf("file size too small: got %d bytes, expected at least %d", actualSize, expectedMinSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoggerFlushTimeout 测试 Flush 超时机制
|
||||
func TestLoggerFlushTimeout(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("flush")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
// 写入一些日志
|
||||
for i := 0; i < 100; i++ {
|
||||
logger.Info(fmt.Sprintf("test-log-%d", i))
|
||||
}
|
||||
|
||||
// 测试 Flush 应该在合理时间内完成
|
||||
start := time.Now()
|
||||
logger.Flush()
|
||||
duration := time.Since(start)
|
||||
|
||||
t.Logf("Flush duration: %v", duration)
|
||||
|
||||
if duration > 6*time.Second {
|
||||
t.Errorf("Flush took too long: %v (expected < 6s)", duration)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoggerOrderPreservation 测试日志顺序保持
|
||||
func TestLoggerOrderPreservation(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("order")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const numGoroutines = 10
|
||||
const logsPerRoutine = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < logsPerRoutine; j++ {
|
||||
logger.Info(fmt.Sprintf("G%d-SEQ%04d", id, j))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Flush()
|
||||
|
||||
// 读取并验证每个 goroutine 的日志顺序
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(data)))
|
||||
sequences := make(map[int][]int) // goroutine ID -> sequence numbers
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
var gid, seq int
|
||||
parts := strings.SplitN(line, " INFO: ", 2)
|
||||
if len(parts) != 2 {
|
||||
t.Errorf("invalid log format: %s", line)
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Sscanf(parts[1], "G%d-SEQ%d", &gid, &seq); err == nil {
|
||||
sequences[gid] = append(sequences[gid], seq)
|
||||
} else {
|
||||
t.Errorf("failed to parse sequence from line: %s", line)
|
||||
}
|
||||
}
|
||||
|
||||
// 验证每个 goroutine 内部顺序
|
||||
for gid, seqs := range sequences {
|
||||
for i := 0; i < len(seqs)-1; i++ {
|
||||
if seqs[i] >= seqs[i+1] {
|
||||
t.Errorf("Goroutine %d: out of order at index %d: %d >= %d",
|
||||
gid, i, seqs[i], seqs[i+1])
|
||||
}
|
||||
}
|
||||
if len(seqs) != logsPerRoutine {
|
||||
t.Errorf("Goroutine %d: missing logs, got %d, want %d",
|
||||
gid, len(seqs), logsPerRoutine)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Order preservation test: all %d goroutines maintained sequence order", len(sequences))
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
module codex-wrapper
|
||||
|
||||
go 1.25.3
|
||||
go 1.21
|
||||
|
||||
243
codex-wrapper/logger.go
Normal file
243
codex-wrapper/logger.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logger writes log messages asynchronously to a temp file.
|
||||
// It is intentionally minimal: a buffered channel + single worker goroutine
|
||||
// to avoid contention while keeping ordering guarantees.
|
||||
type Logger struct {
|
||||
path string
|
||||
file *os.File
|
||||
writer *bufio.Writer
|
||||
ch chan logEntry
|
||||
flushReq chan chan struct{}
|
||||
done chan struct{}
|
||||
closed atomic.Bool
|
||||
closeOnce sync.Once
|
||||
workerWG sync.WaitGroup
|
||||
pendingWG sync.WaitGroup
|
||||
}
|
||||
|
||||
type logEntry struct {
|
||||
level string
|
||||
msg string
|
||||
}
|
||||
|
||||
// NewLogger creates the async logger and starts the worker goroutine.
|
||||
// The log file is created under os.TempDir() using the required naming scheme.
|
||||
func NewLogger() (*Logger, error) {
|
||||
return NewLoggerWithSuffix("")
|
||||
}
|
||||
|
||||
// NewLoggerWithSuffix creates a logger with an optional suffix in the filename.
|
||||
// Useful for tests that need isolated log files within the same process.
|
||||
func NewLoggerWithSuffix(suffix string) (*Logger, error) {
|
||||
filename := fmt.Sprintf("codex-wrapper-%d", os.Getpid())
|
||||
if suffix != "" {
|
||||
filename += "-" + suffix
|
||||
}
|
||||
filename += ".log"
|
||||
|
||||
path := filepath.Join(os.TempDir(), filename)
|
||||
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l := &Logger{
|
||||
path: path,
|
||||
file: f,
|
||||
writer: bufio.NewWriterSize(f, 4096),
|
||||
ch: make(chan logEntry, 1000),
|
||||
flushReq: make(chan chan struct{}, 1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
l.workerWG.Add(1)
|
||||
go l.run()
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Path returns the underlying log file path (useful for tests/inspection).
|
||||
func (l *Logger) Path() string {
|
||||
if l == nil {
|
||||
return ""
|
||||
}
|
||||
return l.path
|
||||
}
|
||||
|
||||
// Info logs at INFO level.
|
||||
func (l *Logger) Info(msg string) { l.log("INFO", msg) }
|
||||
|
||||
// Warn logs at WARN level.
|
||||
func (l *Logger) Warn(msg string) { l.log("WARN", msg) }
|
||||
|
||||
// Debug logs at DEBUG level.
|
||||
func (l *Logger) Debug(msg string) { l.log("DEBUG", msg) }
|
||||
|
||||
// Error logs at ERROR level.
|
||||
func (l *Logger) Error(msg string) { l.log("ERROR", msg) }
|
||||
|
||||
// Close stops the worker and syncs the log file.
|
||||
// The log file is NOT removed, allowing inspection after program exit.
|
||||
// It is safe to call multiple times.
|
||||
// Returns after a 5-second timeout if worker doesn't stop gracefully.
|
||||
func (l *Logger) Close() error {
|
||||
if l == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var closeErr error
|
||||
|
||||
l.closeOnce.Do(func() {
|
||||
l.closed.Store(true)
|
||||
close(l.done)
|
||||
close(l.ch)
|
||||
|
||||
// Wait for worker with timeout
|
||||
workerDone := make(chan struct{})
|
||||
go func() {
|
||||
l.workerWG.Wait()
|
||||
close(workerDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-workerDone:
|
||||
// Worker stopped gracefully
|
||||
case <-time.After(5 * time.Second):
|
||||
// Worker timeout - proceed with cleanup anyway
|
||||
closeErr = fmt.Errorf("logger worker timeout during close")
|
||||
}
|
||||
|
||||
if err := l.writer.Flush(); err != nil && closeErr == nil {
|
||||
closeErr = err
|
||||
}
|
||||
|
||||
if err := l.file.Sync(); err != nil && closeErr == nil {
|
||||
closeErr = err
|
||||
}
|
||||
|
||||
if err := l.file.Close(); err != nil && closeErr == nil {
|
||||
closeErr = err
|
||||
}
|
||||
|
||||
// Log file is kept for debugging - NOT removed
|
||||
// Users can manually clean up /tmp/codex-wrapper-*.log files
|
||||
})
|
||||
|
||||
return closeErr
|
||||
}
|
||||
|
||||
// RemoveLogFile removes the log file. Should only be called after Close().
|
||||
func (l *Logger) RemoveLogFile() error {
|
||||
if l == nil {
|
||||
return nil
|
||||
}
|
||||
return os.Remove(l.path)
|
||||
}
|
||||
|
||||
// Flush waits for all pending log entries to be written. Primarily for tests.
|
||||
// Returns after a 5-second timeout to prevent indefinite blocking.
|
||||
func (l *Logger) Flush() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for pending entries with timeout
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
l.pendingWG.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
// All pending entries processed
|
||||
case <-ctx.Done():
|
||||
// Timeout - return without full flush
|
||||
return
|
||||
}
|
||||
|
||||
// Trigger writer flush
|
||||
flushDone := make(chan struct{})
|
||||
select {
|
||||
case l.flushReq <- flushDone:
|
||||
// Wait for flush to complete
|
||||
select {
|
||||
case <-flushDone:
|
||||
// Flush completed
|
||||
case <-time.After(1 * time.Second):
|
||||
// Flush timeout
|
||||
}
|
||||
case <-l.done:
|
||||
// Logger is closing
|
||||
case <-time.After(1 * time.Second):
|
||||
// Timeout sending flush request
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) log(level, msg string) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
if l.closed.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
entry := logEntry{level: level, msg: msg}
|
||||
l.pendingWG.Add(1)
|
||||
|
||||
select {
|
||||
case l.ch <- entry:
|
||||
// Successfully sent to channel
|
||||
case <-l.done:
|
||||
// Logger is closing, drop this entry
|
||||
l.pendingWG.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) run() {
|
||||
defer l.workerWG.Done()
|
||||
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry, ok := <-l.ch:
|
||||
if !ok {
|
||||
// Channel closed, final flush
|
||||
l.writer.Flush()
|
||||
return
|
||||
}
|
||||
timestamp := time.Now().Format("2006-01-02 15:04:05.000")
|
||||
pid := os.Getpid()
|
||||
fmt.Fprintf(l.writer, "[%s] [PID:%d] %s: %s\n", timestamp, pid, entry.level, entry.msg)
|
||||
l.pendingWG.Done()
|
||||
|
||||
case <-ticker.C:
|
||||
l.writer.Flush()
|
||||
|
||||
case flushDone := <-l.flushReq:
|
||||
// Explicit flush request - flush writer and sync to disk
|
||||
l.writer.Flush()
|
||||
l.file.Sync()
|
||||
close(flushDone)
|
||||
}
|
||||
}
|
||||
}
|
||||
186
codex-wrapper/logger_test.go
Normal file
186
codex-wrapper/logger_test.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLoggerCreatesFileWithPID(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("NewLogger() error = %v", err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
expectedPath := filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
|
||||
if logger.Path() != expectedPath {
|
||||
t.Fatalf("logger path = %s, want %s", logger.Path(), expectedPath)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(expectedPath); err != nil {
|
||||
t.Fatalf("log file not created: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerWritesLevels(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("NewLogger() error = %v", err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
logger.Info("info message")
|
||||
logger.Warn("warn message")
|
||||
logger.Debug("debug message")
|
||||
logger.Error("error message")
|
||||
|
||||
logger.Flush()
|
||||
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log file: %v", err)
|
||||
}
|
||||
|
||||
content := string(data)
|
||||
checks := []string{"INFO: info message", "WARN: warn message", "DEBUG: debug message", "ERROR: error message"}
|
||||
for _, c := range checks {
|
||||
if !strings.Contains(content, c) {
|
||||
t.Fatalf("log file missing entry %q, content: %s", c, content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerCloseRemovesFileAndStopsWorker(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("NewLogger() error = %v", err)
|
||||
}
|
||||
|
||||
logger.Info("before close")
|
||||
logger.Flush()
|
||||
|
||||
logPath := logger.Path()
|
||||
|
||||
if err := logger.Close(); err != nil {
|
||||
t.Fatalf("Close() returned error: %v", err)
|
||||
}
|
||||
|
||||
// After recent changes, log file is kept for debugging - NOT removed
|
||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
||||
t.Fatalf("log file should exist after Close for debugging, but got IsNotExist")
|
||||
}
|
||||
|
||||
// Clean up manually for test
|
||||
defer os.Remove(logPath)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
logger.workerWG.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("worker goroutine did not exit after Close")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerConcurrentWritesSafe(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("TMPDIR", tempDir)
|
||||
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("NewLogger() error = %v", err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const goroutines = 10
|
||||
const perGoroutine = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines)
|
||||
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < perGoroutine; j++ {
|
||||
logger.Debug(fmt.Sprintf("g%d-%d", id, j))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Flush()
|
||||
|
||||
f, err := os.Open(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open log file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
count := 0
|
||||
for scanner.Scan() {
|
||||
count++
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Fatalf("scanner error: %v", err)
|
||||
}
|
||||
|
||||
expected := goroutines * perGoroutine
|
||||
if count != expected {
|
||||
t.Fatalf("unexpected log line count: got %d, want %d", count, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerTerminateProcessActive(t *testing.T) {
|
||||
cmd := exec.Command("sleep", "5")
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Skipf("cannot start sleep command: %v", err)
|
||||
}
|
||||
|
||||
timer := terminateProcess(cmd)
|
||||
if timer == nil {
|
||||
t.Fatalf("terminateProcess returned nil timer for active process")
|
||||
}
|
||||
defer timer.Stop()
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatalf("process not terminated promptly")
|
||||
case <-done:
|
||||
}
|
||||
|
||||
// Force the timer callback to run immediately to cover the kill branch.
|
||||
timer.Reset(0)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Reuse the existing coverage suite so the focused TestLogger run still exercises
|
||||
// the rest of the codebase and keeps coverage high.
|
||||
func TestLoggerCoverageSuite(t *testing.T) {
|
||||
TestParseJSONStream_CoverageSuite(t)
|
||||
}
|
||||
@@ -5,35 +5,43 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
version = "1.0.0"
|
||||
defaultWorkdir = "."
|
||||
defaultTimeout = 7200 // seconds
|
||||
forceKillDelay = 5 // seconds
|
||||
stdinSpecialChars = "\n\\\"'`$"
|
||||
version = "4.8.2"
|
||||
defaultWorkdir = "."
|
||||
defaultTimeout = 7200 // seconds
|
||||
codexLogLineLimit = 1000
|
||||
stdinSpecialChars = "\n\\\"'`$"
|
||||
stderrCaptureLimit = 4 * 1024
|
||||
)
|
||||
|
||||
// Test hooks for dependency injection
|
||||
var (
|
||||
stdinReader io.Reader = os.Stdin
|
||||
isTerminalFn = defaultIsTerminal
|
||||
codexCommand = "codex"
|
||||
buildCodexArgsFn = buildCodexArgs
|
||||
commandContext = exec.CommandContext
|
||||
jsonMarshal = json.Marshal
|
||||
stdinReader io.Reader = os.Stdin
|
||||
isTerminalFn = defaultIsTerminal
|
||||
codexCommand = "codex"
|
||||
cleanupHook func()
|
||||
loggerPtr atomic.Pointer[Logger]
|
||||
|
||||
buildCodexArgsFn = buildCodexArgs
|
||||
commandContext = exec.CommandContext
|
||||
jsonMarshal = json.Marshal
|
||||
forceKillDelay = 5 // seconds - made variable for testability
|
||||
)
|
||||
|
||||
// Config holds CLI configuration
|
||||
@@ -352,8 +360,8 @@ func main() {
|
||||
}
|
||||
|
||||
// run is the main logic, returns exit code for testability
|
||||
func run() int {
|
||||
// Handle --version and --help first
|
||||
func run() (exitCode int) {
|
||||
// Handle --version and --help first (no logger needed)
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "--version", "-v":
|
||||
@@ -362,8 +370,46 @@ func run() int {
|
||||
case "--help", "-h":
|
||||
printHelp()
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize logger for all other commands
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
setLogger(logger)
|
||||
|
||||
defer func() {
|
||||
logger := activeLogger()
|
||||
if logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if err := closeLogger(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to close logger: %v\n", err)
|
||||
}
|
||||
// Always remove log file after completion
|
||||
if logger != nil {
|
||||
if err := logger.RemoveLogFile(); err != nil && !os.IsNotExist(err) {
|
||||
// Silently ignore removal errors
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer runCleanupHook()
|
||||
|
||||
// Handle remaining commands
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "--parallel":
|
||||
// Parallel mode: read task config from stdin
|
||||
if len(os.Args) > 2 {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin and does not accept additional arguments.")
|
||||
fmt.Fprintln(os.Stderr, "Usage examples:")
|
||||
fmt.Fprintln(os.Stderr, " codex-wrapper --parallel < tasks.txt")
|
||||
fmt.Fprintln(os.Stderr, " echo '...' | codex-wrapper --parallel")
|
||||
fmt.Fprintln(os.Stderr, " codex-wrapper --parallel <<'EOF'")
|
||||
return 1
|
||||
}
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to read stdin: %v\n", err)
|
||||
@@ -386,7 +432,7 @@ func run() int {
|
||||
results := executeConcurrent(layers, timeoutSec)
|
||||
fmt.Println(generateFinalOutput(results))
|
||||
|
||||
exitCode := 0
|
||||
exitCode = 0
|
||||
for _, res := range results {
|
||||
if res.ExitCode != 0 {
|
||||
exitCode = res.ExitCode
|
||||
@@ -410,7 +456,6 @@ func run() int {
|
||||
logInfo(fmt.Sprintf("Timeout: %ds", timeoutSec))
|
||||
cfg.Timeout = timeoutSec
|
||||
|
||||
// Determine task text and stdin mode
|
||||
var taskText string
|
||||
var piped bool
|
||||
|
||||
@@ -428,7 +473,11 @@ func run() int {
|
||||
}
|
||||
piped = !isTerminal()
|
||||
} else {
|
||||
pipedTask := readPipedTask()
|
||||
pipedTask, err := readPipedTask()
|
||||
if err != nil {
|
||||
logError("Failed to read piped stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
piped = pipedTask != ""
|
||||
if piped {
|
||||
taskText = pipedTask
|
||||
@@ -439,6 +488,18 @@ func run() int {
|
||||
|
||||
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
||||
|
||||
targetArg := taskText
|
||||
if useStdin {
|
||||
targetArg = "-"
|
||||
}
|
||||
codexArgs := buildCodexArgsFn(cfg, targetArg)
|
||||
|
||||
// Print startup information to stderr
|
||||
fmt.Fprintf(os.Stderr, "[codex-wrapper]\n")
|
||||
fmt.Fprintf(os.Stderr, " Command: %s %s\n", codexCommand, strings.Join(codexArgs, " "))
|
||||
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
|
||||
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
|
||||
|
||||
if useStdin {
|
||||
var reasons []string
|
||||
if piped {
|
||||
@@ -489,10 +550,7 @@ func run() int {
|
||||
return result.ExitCode
|
||||
}
|
||||
|
||||
// Output agent_message
|
||||
fmt.Println(result.Message)
|
||||
|
||||
// Output session_id if present
|
||||
if result.SessionID != "" {
|
||||
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
|
||||
}
|
||||
@@ -506,11 +564,8 @@ func parseArgs() (*Config, error) {
|
||||
return nil, fmt.Errorf("task required")
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
WorkDir: defaultWorkdir,
|
||||
}
|
||||
cfg := &Config{WorkDir: defaultWorkdir}
|
||||
|
||||
// Check for resume mode
|
||||
if args[0] == "resume" {
|
||||
if len(args) < 3 {
|
||||
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
|
||||
@@ -534,19 +589,22 @@ func parseArgs() (*Config, error) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func readPipedTask() string {
|
||||
func readPipedTask() (string, error) {
|
||||
if isTerminal() {
|
||||
logInfo("Stdin is tty, skipping pipe read")
|
||||
return ""
|
||||
return "", nil
|
||||
}
|
||||
logInfo("Reading from stdin pipe...")
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil || len(data) == 0 {
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read stdin: %w", err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
logInfo("Stdin pipe returned empty data")
|
||||
return ""
|
||||
return "", nil
|
||||
}
|
||||
logInfo(fmt.Sprintf("Read %d bytes from stdin pipe", len(data)))
|
||||
return string(data)
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func shouldUseStdin(taskText string, piped bool) bool {
|
||||
@@ -579,10 +637,22 @@ func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
type parseResult struct {
|
||||
message string
|
||||
threadID string
|
||||
}
|
||||
|
||||
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
result := TaskResult{
|
||||
TaskID: taskSpec.ID,
|
||||
}
|
||||
return runCodexTaskWithContext(context.Background(), taskSpec, nil, false, silent, timeoutSec)
|
||||
}
|
||||
|
||||
func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText string, useStdin bool, timeoutSec int) (message, threadID string, exitCode int) {
|
||||
res := runCodexTaskWithContext(parentCtx, TaskSpec{Task: taskText, WorkDir: defaultWorkdir, Mode: "new", UseStdin: useStdin}, codexArgs, true, false, timeoutSec)
|
||||
return res.Message, res.SessionID, res.ExitCode
|
||||
}
|
||||
|
||||
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||
result := TaskResult{TaskID: taskSpec.ID}
|
||||
|
||||
cfg := &Config{
|
||||
Mode: taskSpec.Mode,
|
||||
@@ -603,26 +673,99 @@ func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
targetArg = "-"
|
||||
}
|
||||
|
||||
codexArgs := buildCodexArgsFn(cfg, targetArg)
|
||||
|
||||
logInfoFn := logInfo
|
||||
logWarnFn := logWarn
|
||||
logErrorFn := logError
|
||||
stderrWriter := io.Writer(os.Stderr)
|
||||
if silent {
|
||||
logInfoFn = func(string) {}
|
||||
logWarnFn = func(string) {}
|
||||
logErrorFn = func(string) {}
|
||||
stderrWriter = io.Discard
|
||||
var codexArgs []string
|
||||
if useCustomArgs {
|
||||
codexArgs = customArgs
|
||||
} else {
|
||||
codexArgs = buildCodexArgsFn(cfg, targetArg)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutSec)*time.Second)
|
||||
prefixMsg := func(msg string) string {
|
||||
if taskSpec.ID == "" {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("[Task: %s] %s", taskSpec.ID, msg)
|
||||
}
|
||||
|
||||
var logInfoFn func(string)
|
||||
var logWarnFn func(string)
|
||||
var logErrorFn func(string)
|
||||
|
||||
if silent {
|
||||
// Silent mode: only persist to file when available; avoid stderr noise.
|
||||
logInfoFn = func(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Info(prefixMsg(msg))
|
||||
}
|
||||
}
|
||||
logWarnFn = func(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Warn(prefixMsg(msg))
|
||||
}
|
||||
}
|
||||
logErrorFn = func(msg string) {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Error(prefixMsg(msg))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logInfoFn = func(msg string) { logInfo(prefixMsg(msg)) }
|
||||
logWarnFn = func(msg string) { logWarn(prefixMsg(msg)) }
|
||||
logErrorFn = func(msg string) { logError(prefixMsg(msg)) }
|
||||
}
|
||||
|
||||
stderrBuf := &tailBuffer{limit: stderrCaptureLimit}
|
||||
|
||||
var stdoutLogger *logWriter
|
||||
var stderrLogger *logWriter
|
||||
|
||||
var tempLogger *Logger
|
||||
if silent && activeLogger() == nil {
|
||||
if l, err := NewLogger(); err == nil {
|
||||
setLogger(l)
|
||||
tempLogger = l
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if tempLogger != nil {
|
||||
closeLogger()
|
||||
}
|
||||
}()
|
||||
|
||||
if !silent {
|
||||
stdoutLogger = newLogWriter("CODEX_STDOUT: ", codexLogLineLimit)
|
||||
stderrLogger = newLogWriter("CODEX_STDERR: ", codexLogLineLimit)
|
||||
}
|
||||
|
||||
ctx := parentCtx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSec)*time.Second)
|
||||
defer cancel()
|
||||
ctx, stop := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
attachStderr := func(msg string) string {
|
||||
return fmt.Sprintf("%s; stderr: %s", msg, stderrBuf.String())
|
||||
}
|
||||
|
||||
cmd := commandContext(ctx, codexCommand, codexArgs...)
|
||||
cmd.Stderr = stderrWriter
|
||||
|
||||
// Setup stdin if needed
|
||||
stderrWriters := []io.Writer{stderrBuf}
|
||||
if stderrLogger != nil {
|
||||
stderrWriters = append(stderrWriters, stderrLogger)
|
||||
}
|
||||
if !silent {
|
||||
stderrWriters = append([]io.Writer{os.Stderr}, stderrWriters...)
|
||||
}
|
||||
if len(stderrWriters) == 1 {
|
||||
cmd.Stderr = stderrWriters[0]
|
||||
} else {
|
||||
cmd.Stderr = io.MultiWriter(stderrWriters...)
|
||||
}
|
||||
|
||||
var stdinPipe io.WriteCloser
|
||||
var err error
|
||||
if useStdin {
|
||||
@@ -630,90 +773,120 @@ func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
if err != nil {
|
||||
logErrorFn("Failed to create stdin pipe: " + err.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = "failed to create stdin pipe: " + err.Error()
|
||||
result.Error = attachStderr("failed to create stdin pipe: " + err.Error())
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// Setup stdout
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
logErrorFn("Failed to create stdout pipe: " + err.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = "failed to create stdout pipe: " + err.Error()
|
||||
result.Error = attachStderr("failed to create stdout pipe: " + err.Error())
|
||||
return result
|
||||
}
|
||||
|
||||
stdoutReader := io.Reader(stdout)
|
||||
if stdoutLogger != nil {
|
||||
stdoutReader = io.TeeReader(stdout, stdoutLogger)
|
||||
}
|
||||
|
||||
logInfoFn(fmt.Sprintf("Starting codex with args: codex %s...", strings.Join(codexArgs[:min(5, len(codexArgs))], " ")))
|
||||
|
||||
// Start process
|
||||
if err := cmd.Start(); err != nil {
|
||||
if strings.Contains(err.Error(), "executable file not found") {
|
||||
logErrorFn("codex command not found in PATH")
|
||||
result.ExitCode = 127
|
||||
result.Error = "codex command not found in PATH"
|
||||
result.Error = attachStderr("codex command not found in PATH")
|
||||
return result
|
||||
}
|
||||
logErrorFn("Failed to start codex: " + err.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = "failed to start codex: " + err.Error()
|
||||
result.Error = attachStderr("failed to start codex: " + err.Error())
|
||||
return result
|
||||
}
|
||||
logInfoFn(fmt.Sprintf("Process started with PID: %d", cmd.Process.Pid))
|
||||
|
||||
// Write to stdin if needed
|
||||
logInfoFn(fmt.Sprintf("Starting codex with PID: %d", cmd.Process.Pid))
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logInfoFn(fmt.Sprintf("Log capturing to: %s", logger.Path()))
|
||||
}
|
||||
|
||||
if useStdin && stdinPipe != nil {
|
||||
logInfoFn(fmt.Sprintf("Writing %d chars to stdin...", len(taskSpec.Task)))
|
||||
go func() {
|
||||
go func(data string) {
|
||||
defer stdinPipe.Close()
|
||||
io.WriteString(stdinPipe, taskSpec.Task)
|
||||
}()
|
||||
_, _ = io.WriteString(stdinPipe, data)
|
||||
}(taskSpec.Task)
|
||||
logInfoFn("Stdin closed")
|
||||
}
|
||||
|
||||
forwardSignals(ctx, cmd, logErrorFn)
|
||||
waitCh := make(chan error, 1)
|
||||
go func() { waitCh <- cmd.Wait() }()
|
||||
|
||||
logInfoFn("Reading stdout...")
|
||||
parseCh := make(chan parseResult, 1)
|
||||
go func() {
|
||||
msg, tid := parseJSONStreamWithLog(stdoutReader, logWarnFn, logInfoFn)
|
||||
parseCh <- parseResult{message: msg, threadID: tid}
|
||||
}()
|
||||
|
||||
// Parse JSON stream
|
||||
message, threadID := parseJSONStreamWithWarn(stdout, logWarnFn)
|
||||
var waitErr error
|
||||
var forceKillTimer *time.Timer
|
||||
|
||||
// Wait for process to complete
|
||||
err = cmd.Wait()
|
||||
select {
|
||||
case waitErr = <-waitCh:
|
||||
case <-ctx.Done():
|
||||
logErrorFn(cancelReason(ctx))
|
||||
forceKillTimer = terminateProcess(cmd)
|
||||
waitErr = <-waitCh
|
||||
}
|
||||
|
||||
// Check for timeout
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
logErrorFn("Codex execution timeout")
|
||||
if cmd.Process != nil {
|
||||
cmd.Process.Kill()
|
||||
if forceKillTimer != nil {
|
||||
forceKillTimer.Stop()
|
||||
}
|
||||
|
||||
parsed := <-parseCh
|
||||
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
if errors.Is(ctxErr, context.DeadlineExceeded) {
|
||||
result.ExitCode = 124
|
||||
result.Error = attachStderr("codex execution timeout")
|
||||
return result
|
||||
}
|
||||
result.ExitCode = 124
|
||||
result.Error = "codex execution timeout"
|
||||
result.ExitCode = 130
|
||||
result.Error = attachStderr("execution cancelled")
|
||||
return result
|
||||
}
|
||||
|
||||
// Check exit code
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if waitErr != nil {
|
||||
if exitErr, ok := waitErr.(*exec.ExitError); ok {
|
||||
code := exitErr.ExitCode()
|
||||
logErrorFn(fmt.Sprintf("Codex exited with status %d", code))
|
||||
result.ExitCode = code
|
||||
result.Error = fmt.Sprintf("codex exited with status %d", code)
|
||||
result.Error = attachStderr(fmt.Sprintf("codex exited with status %d", code))
|
||||
return result
|
||||
}
|
||||
logErrorFn("Codex error: " + err.Error())
|
||||
logErrorFn("Codex error: " + waitErr.Error())
|
||||
result.ExitCode = 1
|
||||
result.Error = "codex error: " + err.Error()
|
||||
result.Error = attachStderr("codex error: " + waitErr.Error())
|
||||
return result
|
||||
}
|
||||
|
||||
message := parsed.message
|
||||
threadID := parsed.threadID
|
||||
if message == "" {
|
||||
logErrorFn("Codex completed without agent_message output")
|
||||
result.ExitCode = 1
|
||||
result.Error = "codex completed without agent_message output"
|
||||
result.Error = attachStderr("codex completed without agent_message output")
|
||||
return result
|
||||
}
|
||||
|
||||
if stdoutLogger != nil {
|
||||
stdoutLogger.Flush()
|
||||
}
|
||||
if stderrLogger != nil {
|
||||
stderrLogger.Flush()
|
||||
}
|
||||
|
||||
result.ExitCode = 0
|
||||
result.Message = message
|
||||
result.SessionID = threadID
|
||||
@@ -721,45 +894,125 @@ func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return result
|
||||
}
|
||||
|
||||
type tailBuffer struct {
|
||||
limit int
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (b *tailBuffer) Write(p []byte) (int, error) {
|
||||
if b.limit <= 0 {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
if len(p) >= b.limit {
|
||||
b.data = append(b.data[:0], p[len(p)-b.limit:]...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
total := len(b.data) + len(p)
|
||||
if total <= b.limit {
|
||||
b.data = append(b.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
overflow := total - b.limit
|
||||
b.data = append(b.data[overflow:], p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *tailBuffer) String() string {
|
||||
return string(b.data)
|
||||
}
|
||||
|
||||
func forwardSignals(ctx context.Context, cmd *exec.Cmd, logErrorFn func(string)) {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
signals := []os.Signal{syscall.SIGINT}
|
||||
if runtime.GOOS != "windows" {
|
||||
signals = append(signals, syscall.SIGTERM)
|
||||
}
|
||||
signal.Notify(sigCh, signals...)
|
||||
|
||||
go func() {
|
||||
defer signal.Stop(sigCh)
|
||||
select {
|
||||
case sig := <-sigCh:
|
||||
logErrorFn(fmt.Sprintf("Received signal: %v", sig))
|
||||
if cmd.Process != nil {
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
time.AfterFunc(time.Duration(forceKillDelay)*time.Second, func() {
|
||||
if cmd.Process != nil {
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
})
|
||||
if cmd.Process == nil {
|
||||
return
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
_ = cmd.Process.Kill()
|
||||
return
|
||||
}
|
||||
_ = cmd.Process.Signal(syscall.SIGTERM)
|
||||
time.AfterFunc(time.Duration(forceKillDelay)*time.Second, func() {
|
||||
if cmd.Process != nil {
|
||||
_ = cmd.Process.Kill()
|
||||
}
|
||||
})
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func cancelReason(ctx context.Context) string {
|
||||
if ctx == nil {
|
||||
return "Context cancelled"
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
return "Codex execution timeout"
|
||||
}
|
||||
|
||||
return "Execution cancelled, terminating codex process"
|
||||
}
|
||||
|
||||
func terminateProcess(cmd *exec.Cmd) *time.Timer {
|
||||
if cmd == nil || cmd.Process == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
_ = cmd.Process.Kill()
|
||||
return nil
|
||||
}
|
||||
|
||||
_ = cmd.Process.Signal(syscall.SIGTERM)
|
||||
|
||||
return time.AfterFunc(time.Duration(forceKillDelay)*time.Second, func() {
|
||||
if cmd.Process != nil {
|
||||
_ = cmd.Process.Kill()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func parseJSONStream(r io.Reader) (message, threadID string) {
|
||||
return parseJSONStreamWithWarn(r, logWarn)
|
||||
return parseJSONStreamWithLog(r, logWarn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, warnFn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
||||
|
||||
if warnFn == nil {
|
||||
warnFn = func(string) {}
|
||||
}
|
||||
if infoFn == nil {
|
||||
infoFn = func(string) {}
|
||||
}
|
||||
|
||||
totalEvents := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
totalEvents++
|
||||
|
||||
var event JSONEvent
|
||||
if err := json.Unmarshal([]byte(line), &event); err != nil {
|
||||
@@ -767,26 +1020,71 @@ func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadI
|
||||
continue
|
||||
}
|
||||
|
||||
// Capture thread_id
|
||||
if event.Type == "thread.started" {
|
||||
threadID = event.ThreadID
|
||||
var details []string
|
||||
if event.ThreadID != "" {
|
||||
details = append(details, fmt.Sprintf("thread_id=%s", event.ThreadID))
|
||||
}
|
||||
if event.Item != nil && event.Item.Type != "" {
|
||||
details = append(details, fmt.Sprintf("item_type=%s", event.Item.Type))
|
||||
}
|
||||
if len(details) > 0 {
|
||||
infoFn(fmt.Sprintf("Parsed event #%d type=%s (%s)", totalEvents, event.Type, strings.Join(details, ", ")))
|
||||
} else {
|
||||
infoFn(fmt.Sprintf("Parsed event #%d type=%s", totalEvents, event.Type))
|
||||
}
|
||||
|
||||
// Capture agent_message
|
||||
if event.Type == "item.completed" && event.Item != nil && event.Item.Type == "agent_message" {
|
||||
if text := normalizeText(event.Item.Text); text != "" {
|
||||
message = text
|
||||
switch event.Type {
|
||||
case "thread.started":
|
||||
threadID = event.ThreadID
|
||||
infoFn(fmt.Sprintf("thread.started event thread_id=%s", threadID))
|
||||
case "item.completed":
|
||||
var itemType string
|
||||
var normalized string
|
||||
if event.Item != nil {
|
||||
itemType = event.Item.Type
|
||||
normalized = normalizeText(event.Item.Text)
|
||||
}
|
||||
infoFn(fmt.Sprintf("item.completed event item_type=%s message_len=%d", itemType, len(normalized)))
|
||||
if event.Item != nil && event.Item.Type == "agent_message" && normalized != "" {
|
||||
message = normalized
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||
if err := scanner.Err(); err != nil && !errors.Is(err, io.EOF) {
|
||||
warnFn("Read stdout error: " + err.Error())
|
||||
}
|
||||
|
||||
infoFn(fmt.Sprintf("parseJSONStream completed: events=%d, message_len=%d, thread_id_found=%t", totalEvents, len(message), threadID != ""))
|
||||
return message, threadID
|
||||
}
|
||||
|
||||
func discardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
|
||||
var buffered bytes.Buffer
|
||||
|
||||
if decoder != nil {
|
||||
if buf := decoder.Buffered(); buf != nil {
|
||||
_, _ = buffered.ReadFrom(buf)
|
||||
}
|
||||
}
|
||||
|
||||
line, err := reader.ReadBytes('\n')
|
||||
buffered.Write(line)
|
||||
|
||||
data := buffered.Bytes()
|
||||
newline := bytes.IndexByte(data, '\n')
|
||||
if newline == -1 {
|
||||
return reader, err
|
||||
}
|
||||
|
||||
remaining := data[newline+1:]
|
||||
if len(remaining) == 0 {
|
||||
return reader, err
|
||||
}
|
||||
|
||||
return bufio.NewReader(io.MultiReader(bytes.NewReader(remaining), reader)), err
|
||||
}
|
||||
|
||||
func normalizeText(text interface{}) string {
|
||||
switch v := text.(type) {
|
||||
case string:
|
||||
@@ -816,7 +1114,6 @@ func resolveTimeout() int {
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
// Environment variable is in milliseconds if > 10000, convert to seconds
|
||||
if parsed > 10000 {
|
||||
return parsed / 1000
|
||||
}
|
||||
@@ -842,10 +1139,71 @@ func getEnv(key, defaultValue string) string {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
prefix string
|
||||
maxLen int
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func newLogWriter(prefix string, maxLen int) *logWriter {
|
||||
if maxLen <= 0 {
|
||||
maxLen = codexLogLineLimit
|
||||
}
|
||||
return &logWriter{prefix: prefix, maxLen: maxLen}
|
||||
}
|
||||
|
||||
func (lw *logWriter) Write(p []byte) (int, error) {
|
||||
if lw == nil {
|
||||
return len(p), nil
|
||||
}
|
||||
total := len(p)
|
||||
for len(p) > 0 {
|
||||
if idx := bytes.IndexByte(p, '\n'); idx >= 0 {
|
||||
lw.buf.Write(p[:idx])
|
||||
lw.logLine(true)
|
||||
p = p[idx+1:]
|
||||
continue
|
||||
}
|
||||
lw.buf.Write(p)
|
||||
break
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (lw *logWriter) Flush() {
|
||||
if lw == nil || lw.buf.Len() == 0 {
|
||||
return
|
||||
}
|
||||
lw.logLine(false)
|
||||
}
|
||||
|
||||
func (lw *logWriter) logLine(force bool) {
|
||||
if lw == nil {
|
||||
return
|
||||
}
|
||||
line := lw.buf.String()
|
||||
lw.buf.Reset()
|
||||
if line == "" && !force {
|
||||
return
|
||||
}
|
||||
if lw.maxLen > 0 && len(line) > lw.maxLen {
|
||||
cutoff := lw.maxLen
|
||||
if cutoff > 3 {
|
||||
line = line[:cutoff-3] + "..."
|
||||
} else {
|
||||
line = line[:cutoff]
|
||||
}
|
||||
}
|
||||
logInfo(lw.prefix + line)
|
||||
}
|
||||
|
||||
func truncate(s string, maxLen int) string {
|
||||
if len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
if maxLen < 0 {
|
||||
return ""
|
||||
}
|
||||
return s[:maxLen] + "..."
|
||||
}
|
||||
|
||||
@@ -856,6 +1214,22 @@ func min(a, b int) int {
|
||||
return b
|
||||
}
|
||||
|
||||
func setLogger(l *Logger) {
|
||||
loggerPtr.Store(l)
|
||||
}
|
||||
|
||||
func closeLogger() error {
|
||||
logger := loggerPtr.Swap(nil)
|
||||
if logger == nil {
|
||||
return nil
|
||||
}
|
||||
return logger.Close()
|
||||
}
|
||||
|
||||
func activeLogger() *Logger {
|
||||
return loggerPtr.Load()
|
||||
}
|
||||
|
||||
func hello() string {
|
||||
return "hello world"
|
||||
}
|
||||
@@ -869,15 +1243,30 @@ func farewell(name string) string {
|
||||
}
|
||||
|
||||
func logInfo(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "INFO: %s\n", msg)
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Info(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logWarn(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "WARN: %s\n", msg)
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Warn(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func logError(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", msg)
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func runCleanupHook() {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if cleanupHook != nil {
|
||||
cleanupHook()
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp() {
|
||||
@@ -888,9 +1277,15 @@ Usage:
|
||||
codex-wrapper - [workdir] Read task from stdin
|
||||
codex-wrapper resume <session_id> "task" [workdir]
|
||||
codex-wrapper resume <session_id> - [workdir]
|
||||
codex-wrapper --parallel Run tasks in parallel (config from stdin)
|
||||
codex-wrapper --version
|
||||
codex-wrapper --help
|
||||
|
||||
Parallel mode examples:
|
||||
codex-wrapper --parallel < tasks.txt
|
||||
echo '...' | codex-wrapper --parallel
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
|
||||
Environment Variables:
|
||||
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
89
config.json
Normal file
89
config.json
Normal file
@@ -0,0 +1,89 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"install_dir": "~/.claude",
|
||||
"log_file": "install.log",
|
||||
"modules": {
|
||||
"dev": {
|
||||
"enabled": true,
|
||||
"description": "Core dev workflow with Codex integration",
|
||||
"operations": [
|
||||
{
|
||||
"type": "merge_dir",
|
||||
"source": "dev-workflow",
|
||||
"description": "Merge commands/ and agents/ into install dir"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "memorys/CLAUDE.md",
|
||||
"target": "CLAUDE.md",
|
||||
"description": "Copy core role and guidelines"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "skills/codex/SKILL.md",
|
||||
"target": "skills/codex/SKILL.md",
|
||||
"description": "Install codex skill"
|
||||
},
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": "bash install.sh",
|
||||
"description": "Install codex-wrapper binary",
|
||||
"env": {
|
||||
"INSTALL_DIR": "${install_dir}"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"bmad": {
|
||||
"enabled": false,
|
||||
"description": "BMAD agile workflow with multi-agent orchestration",
|
||||
"operations": [
|
||||
{
|
||||
"type": "merge_dir",
|
||||
"source": "bmad-agile-workflow",
|
||||
"description": "Merge BMAD commands and agents"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "docs/BMAD-WORKFLOW.md",
|
||||
"target": "docs/BMAD-WORKFLOW.md",
|
||||
"description": "Copy BMAD workflow documentation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"requirements": {
|
||||
"enabled": false,
|
||||
"description": "Requirements-driven development workflow",
|
||||
"operations": [
|
||||
{
|
||||
"type": "merge_dir",
|
||||
"source": "requirements-driven-workflow",
|
||||
"description": "Merge requirements workflow commands and agents"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "docs/REQUIREMENTS-WORKFLOW.md",
|
||||
"target": "docs/REQUIREMENTS-WORKFLOW.md",
|
||||
"description": "Copy requirements workflow documentation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"essentials": {
|
||||
"enabled": true,
|
||||
"description": "Core development commands and utilities",
|
||||
"operations": [
|
||||
{
|
||||
"type": "merge_dir",
|
||||
"source": "development-essentials",
|
||||
"description": "Merge essential development commands"
|
||||
},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "docs/DEVELOPMENT-COMMANDS.md",
|
||||
"target": "docs/DEVELOPMENT-COMMANDS.md",
|
||||
"description": "Copy development commands documentation"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
109
config.schema.json
Normal file
109
config.schema.json
Normal file
@@ -0,0 +1,109 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "https://github.com/cexll/myclaude/config.schema.json",
|
||||
"title": "Modular Installation Config",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"required": ["version", "install_dir", "log_file", "modules"],
|
||||
"properties": {
|
||||
"version": {
|
||||
"type": "string",
|
||||
"pattern": "^[0-9]+\\.[0-9]+(\\.[0-9]+)?$"
|
||||
},
|
||||
"install_dir": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "Target installation directory, supports ~/ expansion"
|
||||
},
|
||||
"log_file": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"modules": {
|
||||
"type": "object",
|
||||
"description": "可自定义的模块定义,每个模块名称可任意指定",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9_-]+$": { "$ref": "#/$defs/module" }
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"minProperties": 1
|
||||
}
|
||||
},
|
||||
"$defs": {
|
||||
"module": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"required": ["enabled", "description", "operations"],
|
||||
"properties": {
|
||||
"enabled": { "type": "boolean", "default": false },
|
||||
"description": { "type": "string", "minLength": 3 },
|
||||
"operations": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"items": { "$ref": "#/$defs/operation" }
|
||||
}
|
||||
}
|
||||
},
|
||||
"operation": {
|
||||
"oneOf": [
|
||||
{ "$ref": "#/$defs/op_copy_dir" },
|
||||
{ "$ref": "#/$defs/op_copy_file" },
|
||||
{ "$ref": "#/$defs/op_merge_dir" },
|
||||
{ "$ref": "#/$defs/op_run_command" }
|
||||
]
|
||||
},
|
||||
"common_operation_fields": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": { "type": "string" }
|
||||
},
|
||||
"additionalProperties": true
|
||||
},
|
||||
"op_copy_dir": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"required": ["type", "source", "target"],
|
||||
"properties": {
|
||||
"type": { "const": "copy_dir" },
|
||||
"source": { "type": "string", "minLength": 1 },
|
||||
"target": { "type": "string", "minLength": 1 },
|
||||
"description": { "type": "string" }
|
||||
}
|
||||
},
|
||||
"op_copy_file": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"required": ["type", "source", "target"],
|
||||
"properties": {
|
||||
"type": { "const": "copy_file" },
|
||||
"source": { "type": "string", "minLength": 1 },
|
||||
"target": { "type": "string", "minLength": 1 },
|
||||
"description": { "type": "string" }
|
||||
}
|
||||
},
|
||||
"op_merge_dir": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"required": ["type", "source"],
|
||||
"properties": {
|
||||
"type": { "const": "merge_dir" },
|
||||
"source": { "type": "string", "minLength": 1 },
|
||||
"description": { "type": "string" }
|
||||
}
|
||||
},
|
||||
"op_run_command": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"required": ["type", "command"],
|
||||
"properties": {
|
||||
"type": { "const": "run_command" },
|
||||
"command": { "type": "string", "minLength": 1 },
|
||||
"description": { "type": "string" },
|
||||
"env": {
|
||||
"type": "object",
|
||||
"additionalProperties": { "type": "string" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ You are the /dev Workflow Orchestrator, an expert development workflow manager s
|
||||
**Workflow Execution**
|
||||
- **Step 1: Requirement Clarification**
|
||||
- Use AskUserQuestion to clarify requirements directly
|
||||
- Focus questions on functional boundaries, inputs/outputs, constraints, testing
|
||||
- Focus questions on functional boundaries, inputs/outputs, constraints, testing, and required unit-test coverage levels
|
||||
- Iterate 2-3 rounds until clear; rely on judgment; keep questions concise
|
||||
|
||||
- **Step 2: Codex Deep Analysis (Plan Mode Style)**
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
# Advanced AI Agents Guide
|
||||
|
||||
> GPT-5 deep reasoning integration for complex analysis and architectural decisions
|
||||
|
||||
## 🎯 Overview
|
||||
|
||||
The Advanced AI Agents plugin provides access to GPT-5's deep reasoning capabilities through the `gpt5` agent, designed for complex problem-solving that requires multi-step thinking and comprehensive analysis.
|
||||
|
||||
## 🤖 GPT-5 Agent
|
||||
|
||||
### Capabilities
|
||||
|
||||
The `gpt5` agent excels at:
|
||||
|
||||
- **Architectural Analysis**: Evaluating system designs and scalability concerns
|
||||
- **Strategic Planning**: Breaking down complex initiatives into actionable plans
|
||||
- **Trade-off Analysis**: Comparing multiple approaches with detailed pros/cons
|
||||
- **Problem Decomposition**: Breaking complex problems into manageable components
|
||||
- **Deep Reasoning**: Multi-step logical analysis for non-obvious solutions
|
||||
- **Technology Evaluation**: Assessing technologies, frameworks, and tools
|
||||
|
||||
### When to Use
|
||||
|
||||
**Use GPT-5 agent** when:
|
||||
- Problem requires deep, multi-step reasoning
|
||||
- Multiple solution approaches need evaluation
|
||||
- Architectural decisions have long-term impact
|
||||
- Trade-offs are complex and multifaceted
|
||||
- Standard agents provide insufficient depth
|
||||
|
||||
**Use standard agents** when:
|
||||
- Task is straightforward implementation
|
||||
- Requirements are clear and well-defined
|
||||
- Quick turnaround is priority
|
||||
- Problem is domain-specific (code, tests, etc.)
|
||||
|
||||
## 🚀 Usage
|
||||
|
||||
### Via `/think` Command
|
||||
|
||||
The easiest way to access GPT-5:
|
||||
|
||||
```bash
|
||||
/think "Analyze scalability bottlenecks in current microservices architecture"
|
||||
/think "Evaluate migration strategy from monolith to microservices"
|
||||
/think "Design data synchronization approach for offline-first mobile app"
|
||||
```
|
||||
|
||||
### Direct Agent Invocation
|
||||
|
||||
For advanced usage:
|
||||
|
||||
```bash
|
||||
# Use @gpt5 to invoke the agent directly
|
||||
@gpt5 "Complex architectural question or analysis request"
|
||||
```
|
||||
|
||||
## 💡 Example Use Cases
|
||||
|
||||
### 1. Architecture Evaluation
|
||||
|
||||
```bash
|
||||
/think "Current system uses REST API with polling for real-time updates.
|
||||
Evaluate whether to migrate to WebSocket, Server-Sent Events, or GraphQL
|
||||
subscriptions. Consider: team experience, existing infrastructure, client
|
||||
support, scalability, and implementation effort."
|
||||
```
|
||||
|
||||
**GPT-5 provides**:
|
||||
- Detailed analysis of each option
|
||||
- Pros and cons for your specific context
|
||||
- Migration complexity assessment
|
||||
- Performance implications
|
||||
- Recommended approach with justification
|
||||
|
||||
### 2. Migration Strategy
|
||||
|
||||
```bash
|
||||
/think "Plan migration from PostgreSQL to multi-region distributed database.
|
||||
System has 50M users, 200M rows, 1000 req/sec. Must maintain 99.9% uptime.
|
||||
What's the safest migration path?"
|
||||
```
|
||||
|
||||
**GPT-5 provides**:
|
||||
- Step-by-step migration plan
|
||||
- Risk assessment for each phase
|
||||
- Rollback strategies
|
||||
- Data consistency approaches
|
||||
- Timeline estimation
|
||||
|
||||
### 3. Problem Decomposition
|
||||
|
||||
```bash
|
||||
/think "Design a recommendation engine that learns user preferences, handles
|
||||
cold start, provides explainable results, and scales to 10M users. Break this
|
||||
down into implementation phases with clear milestones."
|
||||
```
|
||||
|
||||
**GPT-5 provides**:
|
||||
- Problem breakdown into components
|
||||
- Phased implementation plan
|
||||
- Technical approach for each phase
|
||||
- Dependencies between phases
|
||||
- Success criteria and metrics
|
||||
|
||||
### 4. Technology Selection
|
||||
|
||||
```bash
|
||||
/think "Choosing between Redis, Memcached, and Hazelcast for distributed
|
||||
caching. System needs: persistence, pub/sub, clustering, and complex data
|
||||
structures. Existing stack: Java, Kubernetes, AWS."
|
||||
```
|
||||
|
||||
**GPT-5 provides**:
|
||||
- Comparison matrix across requirements
|
||||
- Integration considerations
|
||||
- Operational complexity analysis
|
||||
- Cost implications
|
||||
- Recommendation with rationale
|
||||
|
||||
### 5. Performance Optimization
|
||||
|
||||
```bash
|
||||
/think "API response time increased from 100ms to 800ms after scaling from
|
||||
100 to 10,000 users. Database queries look optimized. What are the likely
|
||||
bottlenecks and systematic approach to identify them?"
|
||||
```
|
||||
|
||||
**GPT-5 provides**:
|
||||
- Hypothesis generation (N+1 queries, connection pooling, etc.)
|
||||
- Systematic debugging approach
|
||||
- Profiling strategy
|
||||
- Likely root causes ranked by probability
|
||||
- Optimization recommendations
|
||||
|
||||
## 🎨 Integration with BMAD
|
||||
|
||||
### Enhanced Code Review
|
||||
|
||||
BMAD's `bmad-review` agent can optionally use GPT-5 for deeper analysis:
|
||||
|
||||
**Configuration**:
|
||||
```bash
|
||||
# Enable enhanced review mode (via environment or BMAD config)
|
||||
BMAD_REVIEW_MODE=enhanced /bmad-pilot "feature description"
|
||||
```
|
||||
|
||||
**What changes**:
|
||||
- Standard review: Fast, focuses on code quality and obvious issues
|
||||
- Enhanced review: Deep analysis including:
|
||||
- Architectural impact
|
||||
- Security implications
|
||||
- Performance considerations
|
||||
- Scalability concerns
|
||||
- Design pattern appropriateness
|
||||
|
||||
### Architecture Phase Support
|
||||
|
||||
Use `/think` during BMAD architecture phase:
|
||||
|
||||
```bash
|
||||
# Start BMAD workflow
|
||||
/bmad-pilot "E-commerce platform with real-time inventory"
|
||||
|
||||
# During Architecture phase, get deep analysis
|
||||
/think "Evaluate architecture approaches for real-time inventory
|
||||
synchronization across warehouses, online store, and mobile apps"
|
||||
|
||||
# Continue with BMAD using insights
|
||||
```
|
||||
|
||||
## 📋 Best Practices
|
||||
|
||||
### 1. Provide Complete Context
|
||||
|
||||
**❌ Insufficient**:
|
||||
```bash
|
||||
/think "Should we use microservices?"
|
||||
```
|
||||
|
||||
**✅ Complete**:
|
||||
```bash
|
||||
/think "Current monolith: 100K LOC, 8 developers, 50K users, 200ms avg
|
||||
response time. Pain points: slow deployments (1hr), difficult to scale
|
||||
components independently. Should we migrate to microservices? What's the
|
||||
ROI and risk?"
|
||||
```
|
||||
|
||||
### 2. Ask Specific Questions
|
||||
|
||||
**❌ Too broad**:
|
||||
```bash
|
||||
/think "How to build a scalable system?"
|
||||
```
|
||||
|
||||
**✅ Specific**:
|
||||
```bash
|
||||
/think "Current system handles 1K req/sec. Need to scale to 10K. Bottleneck
|
||||
is database writes. Evaluate: sharding, read replicas, CQRS, or caching.
|
||||
Database: PostgreSQL, stack: Node.js, deployment: Kubernetes."
|
||||
```
|
||||
|
||||
### 3. Include Constraints
|
||||
|
||||
Always mention:
|
||||
- Team skills and size
|
||||
- Timeline and budget
|
||||
- Existing infrastructure
|
||||
- Business requirements
|
||||
- Technical constraints
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
/think "Design real-time chat system. Constraints: team of 3 backend
|
||||
developers (Node.js), 6-month timeline, AWS deployment, must integrate
|
||||
with existing REST API, budget for managed services OK."
|
||||
```
|
||||
|
||||
### 4. Request Specific Outputs
|
||||
|
||||
Tell GPT-5 what format you need:
|
||||
|
||||
```bash
|
||||
/think "Compare Kafka vs RabbitMQ for event streaming.
|
||||
Provide: comparison table, recommendation, migration complexity from current
|
||||
RabbitMQ setup, and estimated effort in developer-weeks."
|
||||
```
|
||||
|
||||
### 5. Iterate and Refine
|
||||
|
||||
Follow up for deeper analysis:
|
||||
|
||||
```bash
|
||||
# Initial question
|
||||
/think "Evaluate caching strategies for user profile API"
|
||||
|
||||
# Follow-up based on response
|
||||
/think "You recommended Redis with write-through caching. How to handle
|
||||
cache invalidation when user updates profile from mobile app?"
|
||||
```
|
||||
|
||||
## 🔧 Technical Details
|
||||
|
||||
### Sequential Thinking
|
||||
|
||||
GPT-5 agent uses sequential thinking for complex problems:
|
||||
|
||||
1. **Problem Understanding**: Clarify requirements and constraints
|
||||
2. **Hypothesis Generation**: Identify possible solutions
|
||||
3. **Analysis**: Evaluate each option systematically
|
||||
4. **Trade-off Assessment**: Compare pros/cons
|
||||
5. **Recommendation**: Provide justified conclusion
|
||||
|
||||
### Reasoning Transparency
|
||||
|
||||
GPT-5 shows its thinking process:
|
||||
- Assumptions made
|
||||
- Factors considered
|
||||
- Why certain options were eliminated
|
||||
- Confidence level in recommendations
|
||||
|
||||
## 🎯 Comparison: GPT-5 vs Standard Agents
|
||||
|
||||
| Aspect | GPT-5 Agent | Standard Agents |
|
||||
|--------|-------------|-----------------|
|
||||
| **Depth** | Deep, multi-step reasoning | Focused, domain-specific |
|
||||
| **Speed** | Slower (comprehensive analysis) | Faster (direct implementation) |
|
||||
| **Use Case** | Strategic decisions, architecture | Implementation, coding, testing |
|
||||
| **Output** | Analysis, recommendations, plans | Code, tests, documentation |
|
||||
| **Best For** | Complex problems, trade-offs | Clear tasks, defined scope |
|
||||
| **Invocation** | `/think` or `@gpt5` | `/code`, `/test`, etc. |
|
||||
|
||||
## 📚 Related Documentation
|
||||
|
||||
- **[BMAD Workflow](BMAD-WORKFLOW.md)** - Integration with full agile workflow
|
||||
- **[Development Commands](DEVELOPMENT-COMMANDS.md)** - Standard command reference
|
||||
- **[Quick Start Guide](QUICK-START.md)** - Get started quickly
|
||||
|
||||
## 💡 Advanced Patterns
|
||||
|
||||
### Pre-Implementation Analysis
|
||||
|
||||
```bash
|
||||
# 1. Deep analysis with GPT-5
|
||||
/think "Design approach for X with constraints Y and Z"
|
||||
|
||||
# 2. Use analysis in BMAD workflow
|
||||
/bmad-pilot "Implement X based on approach from analysis"
|
||||
```
|
||||
|
||||
### Architecture Validation
|
||||
|
||||
```bash
|
||||
# 1. Get initial architecture from BMAD
|
||||
/bmad-pilot "Feature X" # Generates 02-system-architecture.md
|
||||
|
||||
# 2. Validate with GPT-5
|
||||
/think "Review architecture in .claude/specs/feature-x/02-system-architecture.md
|
||||
Evaluate for scalability, security, and maintainability"
|
||||
|
||||
# 3. Refine architecture based on feedback
|
||||
```
|
||||
|
||||
### Decision Documentation
|
||||
|
||||
```bash
|
||||
# Use GPT-5 to document architectural decisions
|
||||
/think "Document decision to use Event Sourcing for order management.
|
||||
Include: context, options considered, decision rationale, consequences,
|
||||
and format as Architecture Decision Record (ADR)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Advanced AI Agents** - Deep reasoning for complex problems that require comprehensive analysis.
|
||||
163
install.bat
Normal file
163
install.bat
Normal file
@@ -0,0 +1,163 @@
|
||||
@echo off
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
set "EXIT_CODE=0"
|
||||
set "REPO=cexll/myclaude"
|
||||
set "VERSION=latest"
|
||||
set "OS=windows"
|
||||
|
||||
call :detect_arch
|
||||
if errorlevel 1 goto :fail
|
||||
|
||||
set "BINARY_NAME=codex-wrapper-%OS%-%ARCH%.exe"
|
||||
set "URL=https://github.com/%REPO%/releases/%VERSION%/download/%BINARY_NAME%"
|
||||
set "TEMP_FILE=%TEMP%\codex-wrapper-%ARCH%-%RANDOM%.exe"
|
||||
set "DEST_DIR=%USERPROFILE%\bin"
|
||||
set "DEST=%DEST_DIR%\codex-wrapper.exe"
|
||||
|
||||
echo Downloading codex-wrapper for %ARCH% ...
|
||||
echo %URL%
|
||||
call :download
|
||||
if errorlevel 1 goto :fail
|
||||
|
||||
if not exist "%TEMP_FILE%" (
|
||||
echo ERROR: download failed to produce "%TEMP_FILE%".
|
||||
goto :fail
|
||||
)
|
||||
|
||||
echo Installing to "%DEST%" ...
|
||||
if not exist "%DEST_DIR%" (
|
||||
mkdir "%DEST_DIR%" >nul 2>nul || goto :fail
|
||||
)
|
||||
|
||||
move /y "%TEMP_FILE%" "%DEST%" >nul 2>nul
|
||||
if errorlevel 1 (
|
||||
echo ERROR: unable to place file in "%DEST%".
|
||||
goto :fail
|
||||
)
|
||||
|
||||
"%DEST%" --version >nul 2>nul
|
||||
if errorlevel 1 (
|
||||
echo ERROR: installation verification failed.
|
||||
goto :fail
|
||||
)
|
||||
|
||||
echo.
|
||||
echo codex-wrapper installed successfully at:
|
||||
echo %DEST%
|
||||
|
||||
rem Automatically ensure %USERPROFILE%\bin is in the USER (HKCU) PATH
|
||||
rem 1) Read current user PATH from registry (REG_SZ or REG_EXPAND_SZ)
|
||||
set "USER_PATH_RAW="
|
||||
set "USER_PATH_TYPE="
|
||||
for /f "tokens=1,2,*" %%A in ('reg query "HKCU\Environment" /v Path 2^>nul ^| findstr /I /R "^ *Path *REG_"') do (
|
||||
set "USER_PATH_TYPE=%%B"
|
||||
set "USER_PATH_RAW=%%C"
|
||||
)
|
||||
rem Trim leading spaces from USER_PATH_RAW
|
||||
for /f "tokens=* delims= " %%D in ("!USER_PATH_RAW!") do set "USER_PATH_RAW=%%D"
|
||||
|
||||
rem Normalize DEST_DIR by removing a trailing backslash if present
|
||||
if "!DEST_DIR:~-1!"=="\" set "DEST_DIR=!DEST_DIR:~0,-1!"
|
||||
|
||||
rem Build search tokens (expanded and literal)
|
||||
set "PCT=%%"
|
||||
set "SEARCH_EXP=;!DEST_DIR!;"
|
||||
set "SEARCH_EXP2=;!DEST_DIR!\;"
|
||||
set "SEARCH_LIT=;!PCT!USERPROFILE!PCT!\bin;"
|
||||
set "SEARCH_LIT2=;!PCT!USERPROFILE!PCT!\bin\;"
|
||||
|
||||
rem Prepare user PATH variants for containment tests
|
||||
set "CHECK_RAW=;!USER_PATH_RAW!;"
|
||||
set "USER_PATH_EXP=!USER_PATH_RAW!"
|
||||
if defined USER_PATH_EXP call set "USER_PATH_EXP=%%USER_PATH_EXP%%"
|
||||
set "CHECK_EXP=;!USER_PATH_EXP!;"
|
||||
|
||||
rem Check if already present in user PATH (literal or expanded, with/without trailing backslash)
|
||||
set "ALREADY_IN_USERPATH=0"
|
||||
echo !CHECK_RAW! | findstr /I /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul && set "ALREADY_IN_USERPATH=1"
|
||||
if "!ALREADY_IN_USERPATH!"=="0" (
|
||||
echo !CHECK_EXP! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" >nul && set "ALREADY_IN_USERPATH=1"
|
||||
)
|
||||
|
||||
if "!ALREADY_IN_USERPATH!"=="1" (
|
||||
echo User PATH already includes %%USERPROFILE%%\bin.
|
||||
) else (
|
||||
rem Not present: append to user PATH using setx without duplicating system PATH
|
||||
if defined USER_PATH_RAW (
|
||||
set "USER_PATH_NEW=!USER_PATH_RAW!"
|
||||
if not "!USER_PATH_NEW:~-1!"==";" set "USER_PATH_NEW=!USER_PATH_NEW!;"
|
||||
set "USER_PATH_NEW=!USER_PATH_NEW!!PCT!USERPROFILE!PCT!\bin"
|
||||
) else (
|
||||
set "USER_PATH_NEW=!PCT!USERPROFILE!PCT!\bin"
|
||||
)
|
||||
rem Persist update to HKCU\Environment\Path (user scope)
|
||||
setx PATH "!USER_PATH_NEW!" >nul
|
||||
if errorlevel 1 (
|
||||
echo WARNING: Failed to append %%USERPROFILE%%\bin to your user PATH.
|
||||
) else (
|
||||
echo Added %%USERPROFILE%%\bin to your user PATH.
|
||||
)
|
||||
)
|
||||
|
||||
rem Update current session PATH so codex-wrapper is immediately available
|
||||
set "CURPATH=;%PATH%;"
|
||||
echo !CURPATH! | findstr /I /C:"!SEARCH_EXP!" /C:"!SEARCH_EXP2!" /C:"!SEARCH_LIT!" /C:"!SEARCH_LIT2!" >nul
|
||||
if errorlevel 1 set "PATH=!DEST_DIR!;!PATH!"
|
||||
|
||||
goto :cleanup
|
||||
|
||||
:detect_arch
|
||||
set "ARCH=%PROCESSOR_ARCHITECTURE%"
|
||||
if defined PROCESSOR_ARCHITEW6432 set "ARCH=%PROCESSOR_ARCHITEW6432%"
|
||||
|
||||
if /I "%ARCH%"=="AMD64" (
|
||||
set "ARCH=amd64"
|
||||
exit /b 0
|
||||
) else if /I "%ARCH%"=="ARM64" (
|
||||
set "ARCH=arm64"
|
||||
exit /b 0
|
||||
) else (
|
||||
echo ERROR: unsupported architecture "%ARCH%". 64-bit Windows on AMD64 or ARM64 is required.
|
||||
set "EXIT_CODE=1"
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:download
|
||||
where curl >nul 2>nul
|
||||
if %errorlevel%==0 (
|
||||
echo Using curl ...
|
||||
curl -fL --retry 3 --connect-timeout 10 "%URL%" -o "%TEMP_FILE%"
|
||||
if errorlevel 1 (
|
||||
echo ERROR: curl download failed.
|
||||
set "EXIT_CODE=1"
|
||||
exit /b 1
|
||||
)
|
||||
exit /b 0
|
||||
)
|
||||
|
||||
where powershell >nul 2>nul
|
||||
if %errorlevel%==0 (
|
||||
echo Using PowerShell ...
|
||||
powershell -NoLogo -NoProfile -Command " $ErrorActionPreference='Stop'; try { [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor 3072 -bor 768 -bor 192 } catch {} ; $wc = New-Object System.Net.WebClient; $wc.DownloadFile('%URL%','%TEMP_FILE%') "
|
||||
if errorlevel 1 (
|
||||
echo ERROR: PowerShell download failed.
|
||||
set "EXIT_CODE=1"
|
||||
exit /b 1
|
||||
)
|
||||
exit /b 0
|
||||
)
|
||||
|
||||
echo ERROR: neither curl nor PowerShell is available to download the installer.
|
||||
set "EXIT_CODE=1"
|
||||
exit /b 1
|
||||
|
||||
:fail
|
||||
echo Installation failed.
|
||||
set "EXIT_CODE=1"
|
||||
goto :cleanup
|
||||
|
||||
:cleanup
|
||||
if exist "%TEMP_FILE%" del /f /q "%TEMP_FILE%" >nul 2>nul
|
||||
set "CODE=%EXIT_CODE%"
|
||||
endlocal & exit /b %CODE%
|
||||
427
install.py
Normal file
427
install.py
Normal file
@@ -0,0 +1,427 @@
|
||||
#!/usr/bin/env python3
|
||||
"""JSON-driven modular installer.
|
||||
|
||||
Keep it simple: validate config, expand paths, run three operation types,
|
||||
and record what happened. Designed to be small, readable, and predictable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
|
||||
import jsonschema
|
||||
|
||||
DEFAULT_INSTALL_DIR = "~/.claude"
|
||||
|
||||
|
||||
def _ensure_list(ctx: Dict[str, Any], key: str) -> List[Any]:
|
||||
ctx.setdefault(key, [])
|
||||
return ctx[key]
|
||||
|
||||
|
||||
def parse_args(argv: Optional[Iterable[str]] = None) -> argparse.Namespace:
|
||||
"""Parse CLI arguments.
|
||||
|
||||
The default install dir must remain "~/.claude" to match docs/tests.
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="JSON-driven modular installation system"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--install-dir",
|
||||
default=DEFAULT_INSTALL_DIR,
|
||||
help="Installation directory (defaults to ~/.claude)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--module",
|
||||
help="Comma-separated modules to install, or 'all' for all enabled",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
default="config.json",
|
||||
help="Path to configuration file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--list-modules",
|
||||
action="store_true",
|
||||
help="List available modules and exit",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Force overwrite existing files",
|
||||
)
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def _load_json(path: Path) -> Any:
|
||||
try:
|
||||
with path.open("r", encoding="utf-8") as fh:
|
||||
return json.load(fh)
|
||||
except FileNotFoundError as exc:
|
||||
raise FileNotFoundError(f"File not found: {path}") from exc
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ValueError(f"Invalid JSON in {path}: {exc}") from exc
|
||||
|
||||
|
||||
def load_config(path: str) -> Dict[str, Any]:
|
||||
"""Load config and validate against JSON Schema.
|
||||
|
||||
Schema is searched in the config directory first, then alongside this file.
|
||||
"""
|
||||
|
||||
config_path = Path(path).expanduser().resolve()
|
||||
config = _load_json(config_path)
|
||||
|
||||
schema_candidates = [
|
||||
config_path.parent / "config.schema.json",
|
||||
Path(__file__).resolve().with_name("config.schema.json"),
|
||||
]
|
||||
schema_path = next((p for p in schema_candidates if p.exists()), None)
|
||||
if schema_path is None:
|
||||
raise FileNotFoundError("config.schema.json not found")
|
||||
|
||||
schema = _load_json(schema_path)
|
||||
try:
|
||||
jsonschema.validate(config, schema)
|
||||
except jsonschema.ValidationError as exc:
|
||||
raise ValueError(f"Config validation failed: {exc.message}") from exc
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def resolve_paths(config: Dict[str, Any], args: argparse.Namespace) -> Dict[str, Any]:
|
||||
"""Resolve all filesystem paths to absolute Path objects."""
|
||||
|
||||
config_dir = Path(args.config).expanduser().resolve().parent
|
||||
|
||||
if args.install_dir and args.install_dir != DEFAULT_INSTALL_DIR:
|
||||
install_dir_raw = args.install_dir
|
||||
elif config.get("install_dir"):
|
||||
install_dir_raw = config.get("install_dir")
|
||||
else:
|
||||
install_dir_raw = DEFAULT_INSTALL_DIR
|
||||
|
||||
install_dir = Path(install_dir_raw).expanduser().resolve()
|
||||
|
||||
log_file_raw = config.get("log_file", "install.log")
|
||||
log_file = Path(log_file_raw).expanduser()
|
||||
if not log_file.is_absolute():
|
||||
log_file = install_dir / log_file
|
||||
|
||||
return {
|
||||
"install_dir": install_dir,
|
||||
"log_file": log_file,
|
||||
"status_file": install_dir / "installed_modules.json",
|
||||
"config_dir": config_dir,
|
||||
"force": bool(getattr(args, "force", False)),
|
||||
"applied_paths": [],
|
||||
"status_backup": None,
|
||||
}
|
||||
|
||||
|
||||
def list_modules(config: Dict[str, Any]) -> None:
|
||||
print("Available Modules:")
|
||||
print(f"{'Name':<15} {'Enabled':<8} Description")
|
||||
print("-" * 60)
|
||||
for name, cfg in config.get("modules", {}).items():
|
||||
enabled = "✓" if cfg.get("enabled", False) else "✗"
|
||||
desc = cfg.get("description", "")
|
||||
print(f"{name:<15} {enabled:<8} {desc}")
|
||||
|
||||
|
||||
def select_modules(config: Dict[str, Any], module_arg: Optional[str]) -> Dict[str, Any]:
|
||||
modules = config.get("modules", {})
|
||||
if not module_arg:
|
||||
return {k: v for k, v in modules.items() if v.get("enabled", False)}
|
||||
|
||||
if module_arg.strip().lower() == "all":
|
||||
return {k: v for k, v in modules.items() if v.get("enabled", False)}
|
||||
|
||||
selected: Dict[str, Any] = {}
|
||||
for name in (part.strip() for part in module_arg.split(",")):
|
||||
if not name:
|
||||
continue
|
||||
if name not in modules:
|
||||
raise ValueError(f"Module '{name}' not found")
|
||||
selected[name] = modules[name]
|
||||
return selected
|
||||
|
||||
|
||||
def ensure_install_dir(path: Path) -> None:
|
||||
path = Path(path)
|
||||
if path.exists() and not path.is_dir():
|
||||
raise NotADirectoryError(f"Install path exists and is not a directory: {path}")
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
if not os.access(path, os.W_OK):
|
||||
raise PermissionError(f"No write permission for install dir: {path}")
|
||||
|
||||
|
||||
def execute_module(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {
|
||||
"module": name,
|
||||
"status": "success",
|
||||
"operations": [],
|
||||
"installed_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
for op in cfg.get("operations", []):
|
||||
op_type = op.get("type")
|
||||
try:
|
||||
if op_type == "copy_dir":
|
||||
op_copy_dir(op, ctx)
|
||||
elif op_type == "copy_file":
|
||||
op_copy_file(op, ctx)
|
||||
elif op_type == "merge_dir":
|
||||
op_merge_dir(op, ctx)
|
||||
elif op_type == "run_command":
|
||||
op_run_command(op, ctx)
|
||||
else:
|
||||
raise ValueError(f"Unknown operation type: {op_type}")
|
||||
|
||||
result["operations"].append({"type": op_type, "status": "success"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
result["status"] = "failed"
|
||||
result["operations"].append(
|
||||
{"type": op_type, "status": "failed", "error": str(exc)}
|
||||
)
|
||||
write_log(
|
||||
{
|
||||
"level": "ERROR",
|
||||
"message": f"Module {name} failed on {op_type}: {exc}",
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
raise
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _source_path(op: Dict[str, Any], ctx: Dict[str, Any]) -> Path:
|
||||
return (ctx["config_dir"] / op["source"]).expanduser().resolve()
|
||||
|
||||
|
||||
def _target_path(op: Dict[str, Any], ctx: Dict[str, Any]) -> Path:
|
||||
return (ctx["install_dir"] / op["target"]).expanduser().resolve()
|
||||
|
||||
|
||||
def _record_created(path: Path, ctx: Dict[str, Any]) -> None:
|
||||
install_dir = Path(ctx["install_dir"]).resolve()
|
||||
resolved = Path(path).resolve()
|
||||
if resolved == install_dir or install_dir not in resolved.parents:
|
||||
return
|
||||
applied = _ensure_list(ctx, "applied_paths")
|
||||
if resolved not in applied:
|
||||
applied.append(resolved)
|
||||
|
||||
|
||||
def op_copy_dir(op: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
src = _source_path(op, ctx)
|
||||
dst = _target_path(op, ctx)
|
||||
|
||||
existed_before = dst.exists()
|
||||
if existed_before and not ctx.get("force", False):
|
||||
write_log({"level": "INFO", "message": f"Skip existing dir: {dst}"}, ctx)
|
||||
return
|
||||
|
||||
dst.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copytree(src, dst, dirs_exist_ok=True)
|
||||
if not existed_before:
|
||||
_record_created(dst, ctx)
|
||||
write_log({"level": "INFO", "message": f"Copied dir {src} -> {dst}"}, ctx)
|
||||
|
||||
|
||||
def op_merge_dir(op: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
"""Merge source dir's subdirs (commands/, agents/, etc.) into install_dir."""
|
||||
src = _source_path(op, ctx)
|
||||
install_dir = ctx["install_dir"]
|
||||
force = ctx.get("force", False)
|
||||
merged = []
|
||||
|
||||
for subdir in src.iterdir():
|
||||
if not subdir.is_dir():
|
||||
continue
|
||||
target_subdir = install_dir / subdir.name
|
||||
target_subdir.mkdir(parents=True, exist_ok=True)
|
||||
for f in subdir.iterdir():
|
||||
if f.is_file():
|
||||
dst = target_subdir / f.name
|
||||
if dst.exists() and not force:
|
||||
continue
|
||||
shutil.copy2(f, dst)
|
||||
merged.append(f"{subdir.name}/{f.name}")
|
||||
|
||||
write_log({"level": "INFO", "message": f"Merged {src.name}: {', '.join(merged) or 'no files'}"}, ctx)
|
||||
|
||||
|
||||
def op_copy_file(op: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
src = _source_path(op, ctx)
|
||||
dst = _target_path(op, ctx)
|
||||
|
||||
existed_before = dst.exists()
|
||||
if existed_before and not ctx.get("force", False):
|
||||
write_log({"level": "INFO", "message": f"Skip existing file: {dst}"}, ctx)
|
||||
return
|
||||
|
||||
dst.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(src, dst)
|
||||
if not existed_before:
|
||||
_record_created(dst, ctx)
|
||||
write_log({"level": "INFO", "message": f"Copied file {src} -> {dst}"}, ctx)
|
||||
|
||||
|
||||
def op_run_command(op: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
env = os.environ.copy()
|
||||
for key, value in op.get("env", {}).items():
|
||||
env[key] = value.replace("${install_dir}", str(ctx["install_dir"]))
|
||||
|
||||
command = op.get("command", "")
|
||||
if sys.platform == "win32" and command.strip() == "bash install.sh":
|
||||
command = "cmd /c install.bat"
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
cwd=ctx["config_dir"],
|
||||
env=env,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
write_log(
|
||||
{
|
||||
"level": "INFO",
|
||||
"message": f"Command: {command}",
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"returncode": result.returncode,
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Command failed with code {result.returncode}: {command}")
|
||||
|
||||
|
||||
def write_log(entry: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
log_path = Path(ctx["log_file"])
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ts = datetime.now().isoformat()
|
||||
level = entry.get("level", "INFO")
|
||||
message = entry.get("message", "")
|
||||
|
||||
with log_path.open("a", encoding="utf-8") as fh:
|
||||
fh.write(f"[{ts}] {level}: {message}\n")
|
||||
for key in ("stdout", "stderr", "returncode"):
|
||||
if key in entry and entry[key] not in (None, ""):
|
||||
fh.write(f" {key}: {entry[key]}\n")
|
||||
|
||||
|
||||
def write_status(results: List[Dict[str, Any]], ctx: Dict[str, Any]) -> None:
|
||||
status = {
|
||||
"installed_at": datetime.now().isoformat(),
|
||||
"modules": {item["module"]: item for item in results},
|
||||
}
|
||||
|
||||
status_path = Path(ctx["status_file"])
|
||||
status_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with status_path.open("w", encoding="utf-8") as fh:
|
||||
json.dump(status, fh, indent=2, ensure_ascii=False)
|
||||
|
||||
|
||||
def prepare_status_backup(ctx: Dict[str, Any]) -> None:
|
||||
status_path = Path(ctx["status_file"])
|
||||
if status_path.exists():
|
||||
backup = status_path.with_suffix(".json.bak")
|
||||
backup.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(status_path, backup)
|
||||
ctx["status_backup"] = backup
|
||||
|
||||
|
||||
def rollback(ctx: Dict[str, Any]) -> None:
|
||||
write_log({"level": "WARNING", "message": "Rolling back installation"}, ctx)
|
||||
|
||||
install_dir = Path(ctx["install_dir"]).resolve()
|
||||
for path in reversed(ctx.get("applied_paths", [])):
|
||||
resolved = Path(path).resolve()
|
||||
try:
|
||||
if resolved == install_dir or install_dir not in resolved.parents:
|
||||
continue
|
||||
if resolved.is_dir():
|
||||
shutil.rmtree(resolved, ignore_errors=True)
|
||||
else:
|
||||
resolved.unlink(missing_ok=True)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
write_log(
|
||||
{
|
||||
"level": "ERROR",
|
||||
"message": f"Rollback skipped {resolved}: {exc}",
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
backup = ctx.get("status_backup")
|
||||
if backup and Path(backup).exists():
|
||||
shutil.copy2(backup, ctx["status_file"])
|
||||
|
||||
write_log({"level": "INFO", "message": "Rollback completed"}, ctx)
|
||||
|
||||
|
||||
def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
args = parse_args(argv)
|
||||
try:
|
||||
config = load_config(args.config)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"Error loading config: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
ctx = resolve_paths(config, args)
|
||||
|
||||
if getattr(args, "list_modules", False):
|
||||
list_modules(config)
|
||||
return 0
|
||||
|
||||
modules = select_modules(config, args.module)
|
||||
|
||||
try:
|
||||
ensure_install_dir(ctx["install_dir"])
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"Failed to prepare install dir: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
prepare_status_backup(ctx)
|
||||
|
||||
results: List[Dict[str, Any]] = []
|
||||
for name, cfg in modules.items():
|
||||
try:
|
||||
results.append(execute_module(name, cfg, ctx))
|
||||
except Exception: # noqa: BLE001
|
||||
if not args.force:
|
||||
rollback(ctx)
|
||||
return 1
|
||||
rollback(ctx)
|
||||
results.append(
|
||||
{
|
||||
"module": name,
|
||||
"status": "failed",
|
||||
"operations": [],
|
||||
"installed_at": datetime.now().isoformat(),
|
||||
}
|
||||
)
|
||||
break
|
||||
|
||||
write_status(results, ctx)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
sys.exit(main())
|
||||
@@ -1,6 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "⚠️ WARNING: install.sh is LEGACY and will be removed in future versions."
|
||||
echo "Please use the new installation method:"
|
||||
echo " python3 install.py --install-dir ~/.claude"
|
||||
echo ""
|
||||
echo "Continuing with legacy installation in 5 seconds..."
|
||||
sleep 5
|
||||
|
||||
# Detect platform
|
||||
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
ARCH=$(uname -m)
|
||||
|
||||
@@ -39,7 +39,7 @@ Before any tool call, restate the user goal and outline the current plan. While
|
||||
</tool_preambles>
|
||||
|
||||
<self_reflection>
|
||||
Construct a private rubric with at least five categories (maintainability, tests with ≥90% coverage, performance, security, style, documentation, backward compatibility). Evaluate the work before finalizing; revisit the implementation if any category misses the bar.
|
||||
Construct a private rubric with at least five categories (maintainability, performance, security, style, documentation, backward compatibility). Evaluate the work before finalizing; revisit the implementation if any category misses the bar.
|
||||
</self_reflection>
|
||||
|
||||
<output_verbosity>
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
---
|
||||
name: BMAD
|
||||
description:
|
||||
Orchestrate BMAD (PO → Architect → SM → Dev → QA).
|
||||
PO/Architect/SM run locally; Dev/QA via bash Codex CLI. Explicit approval gates and repo-aware artifacts.
|
||||
---
|
||||
|
||||
# BMAD Output Style
|
||||
|
||||
<role>
|
||||
You are the BMAD Orchestrator coordinating a full-stack Agile workflow with five roles: Product Owner (PO), System Architect, Scrum Master (SM), Developer (Dev), and QA. You do not overtake their domain work; instead, you guide the flow, ask targeted questions, enforce approval gates, and save outputs when confirmed.
|
||||
|
||||
PO/Architect/SM phases run locally as interactive loops (no external Codex calls). Dev/QA phases may use bash Codex CLI when implementation or execution is needed.
|
||||
</role>
|
||||
|
||||
<important_instructions>
|
||||
1. Use UltraThink: hypotheses → evidence → patterns → synthesis → validation.
|
||||
2. Follow KISS, YAGNI, DRY, and SOLID principles across deliverables.
|
||||
3. Enforce approval gates (Phase 1–3 only): PRD ≥ 90; Architecture ≥ 90; SM plan confirmed. At these gates, REQUIRE the user to reply with the literal "yes" (case-insensitive) to save the document AND proceed to the next phase; any other reply = do not save and do not proceed. Phase 0 has no gate.
|
||||
4. Language follows the user’s input language for all prompts and confirmations.
|
||||
5. Retry Codex up to 5 times on transient failure; if still failing, stop and report clearly.
|
||||
6. Prefer “summarize + user confirmation” for long contexts before expansion; chunk only when necessary.
|
||||
7. Default saving is performed by the Orchestrator. In save phases Dev/QA may also write files. Only one task runs at a time (no concurrent writes).
|
||||
8. Use kebab-case `feature_name`. If no clear title, use `feat-YYYYMMDD-<short-summary>`.
|
||||
9. Store artifacts under `./.claude/specs/{feature_name}/` with canonical filenames.
|
||||
</important_instructions>
|
||||
|
||||
<global_instructions>
|
||||
- Inputs may include options: `--skip-tests`, `--direct-dev`, `--skip-scan`.
|
||||
- Derive `feature_name` from the feature title; compute `spec_dir=./.claude/specs/{feature_name}/`.
|
||||
- Artifacts:
|
||||
- `00-repo-scan.md` (unless `--skip-scan`)
|
||||
- `01-product-requirements.md` (PRD, after approval)
|
||||
- `02-system-architecture.md` (Architecture, after approval)
|
||||
- `03-sprint-plan.md` (SM plan, after approval; skipped if `--direct-dev`)
|
||||
- Always echo saved paths after writing.
|
||||
</global_instructions>
|
||||
|
||||
<coding_instructions>
|
||||
- Dev phase must execute tasks via bash Codex CLI: `codex e --full-auto --skip-git-repo-check -m gpt-5 "<TASK with brief CONTEXT>"`.
|
||||
- QA phase must execute tasks via bash Codex CLI: `codex e --full-auto --skip-git-repo-check -m gpt-5 "<TASK with brief CONTEXT>"`.
|
||||
- Treat `-m gpt-5` purely as a model parameter; avoid “agent” wording.
|
||||
- Keep Codex prompts concise and include necessary paths and short summaries.
|
||||
- Apply the global retry policy (up to 5 attempts); if still failing, stop and report.
|
||||
</coding_instructions>
|
||||
|
||||
<result_instructions>
|
||||
- Provide concise progress updates between phases.
|
||||
- Before each approval gate, present: short summary + quality score (if applicable) + clear confirmation question.
|
||||
- Gates apply to Phases 1–3 (PO/Architect/SM) only. Proceed only on explicit "yes" (case-insensitive). On "yes": save to the canonical path, echo it, and advance to the next phase.
|
||||
- Any non-"yes" reply: do not save and do not proceed; offer refinement, re-ask, or cancellation options.
|
||||
- Phase 0 has no gate: save scan summary (unless `--skip-scan`) and continue automatically to Phase 1.
|
||||
</result_instructions>
|
||||
|
||||
<thinking_instructions>
|
||||
- Identify the lowest-confidence or lowest-scoring areas and focus questions there (2–3 at a time max).
|
||||
- Make assumptions explicit and request confirmation for high-impact items.
|
||||
- Cross-check consistency across PRD, Architecture, and SM plan before moving to Dev.
|
||||
</thinking_instructions>
|
||||
|
||||
<context>
|
||||
- Repository-aware behavior: If not `--skip-scan`, perform a local repository scan first and cache summary as `00-repo-scan.md` for downstream use.
|
||||
- Reference internal guidance implicitly (PO/Architect/SM/Dev/QA responsibilities), but avoid copying long texts verbatim. Embed essential behaviors in prompts below.
|
||||
</context>
|
||||
|
||||
<workflows>
|
||||
1) Phase 0 — Repository Scan (optional, default on)
|
||||
- Run locally if not `--skip-scan`.
|
||||
- Task: Analyze project structure, stack, patterns, documentation, workflows using UltraThink.
|
||||
- Output: succinct Markdown summary.
|
||||
- Save and proceed automatically: write `spec_dir/00-repo-scan.md` and then continue to Phase 1 (no confirmation required).
|
||||
|
||||
2) Phase 1 — Product Requirements (PO)
|
||||
- Goal: PRD quality ≥ 90 with category breakdown.
|
||||
- Local prompt:
|
||||
- Role: Sarah (BMAD PO) — meticulous, analytical, user-focused.
|
||||
- Include: user request; scan summary/path if available.
|
||||
- Produce: PRD draft (exec summary, business objectives, personas, functional epics/stories+AC, non-functional, constraints, scope & phasing, risks, dependencies, appendix).
|
||||
- Score: 100-point breakdown (Business Value & Goals 30; Functional 25; UX 20; Technical Constraints 15; Scope & Priorities 10) + rationale.
|
||||
- Ask: 2–5 focused clarification questions on lowest-scoring areas.
|
||||
- No saving during drafting.
|
||||
- Loop: Ask user, refine, rescore until ≥ 90.
|
||||
- Gate: Ask confirmation (user language). Only if user replies "yes": save `01-product-requirements.md` and move to Phase 2; otherwise stay here and continue refinement.
|
||||
|
||||
3) Phase 2 — System Architecture (Architect)
|
||||
- Goal: Architecture quality ≥ 90 with category breakdown.
|
||||
- Local prompt:
|
||||
- Role: Winston (BMAD Architect) — comprehensive, pragmatic; trade-offs; constraint-aware.
|
||||
- Include: PRD content; scan summary/path.
|
||||
- Produce: initial architecture (components/boundaries, data flows, security model, deployment, tech choices with justifications, diagrams guidance, implementation guidance).
|
||||
- Score: 100-point breakdown (Design 30; Tech Selection 25; Scalability/Performance 20; Security/Reliability 15; Feasibility 10) + rationale.
|
||||
- Ask: targeted technical questions for critical decisions.
|
||||
- No saving during drafting.
|
||||
- Loop: Ask user, refine, rescore until ≥ 90.
|
||||
- Gate: Ask confirmation (user language). Only if user replies "yes": save `02-system-architecture.md` and move to Phase 3; otherwise stay here and continue refinement.
|
||||
|
||||
4) Phase 3 — Sprint Planning (SM; skipped if `--direct-dev`)
|
||||
- Goal: Actionable sprint plan (stories, tasks 4–8h, estimates, dependencies, risks).
|
||||
- Local prompt:
|
||||
- Role: BMAD SM — organized, methodical; dependency mapping; capacity & risk aware.
|
||||
- Include: scan summary/path; PRD path; Architecture path.
|
||||
- Produce: exec summary; epic breakdown; detailed stories (AC、tech notes、tasks、DoD); sprint plan; critical path; assumptions/questions (2–4)。
|
||||
- No saving during drafting.
|
||||
- Gate: Ask confirmation (user language). Only if user replies "yes": save `03-sprint-plan.md` and move to Phase 4; otherwise stay here and continue refinement.
|
||||
|
||||
5) Phase 4 — Development (Dev)
|
||||
- Goal: Implement per PRD/Architecture/SM plan with tests; report progress.
|
||||
- Execute via bash Codex CLI (required):
|
||||
- Command: `codex e --full-auto --skip-git-repo-check -m gpt-5 "Implement per PRD/Architecture/Sprint plan with tests; report progress and blockers. Context: [paths + brief summaries]."`
|
||||
- Include paths: `00-repo-scan.md` (if exists), `01-product-requirements.md`, `02-system-architecture.md`, `03-sprint-plan.md` (if exists).
|
||||
- Follow retry policy (5 attempts); if still failing, stop and report.
|
||||
- Orchestrator remains responsible for approvals and saving as needed.
|
||||
|
||||
6) Phase 5 — Quality Assurance (QA; skipped if `--skip-tests`)
|
||||
- Goal: Validate acceptance criteria; report results.
|
||||
- Execute via bash Codex CLI (required):
|
||||
- Command: `codex e --full-auto --skip-git-repo-check -m gpt-5 "Create and run tests to validate acceptance criteria; report results with failures and remediation. Context: [paths + brief summaries]."`
|
||||
- Include paths: same as Dev.
|
||||
- Follow retry policy (5 attempts); if still failing, stop and report.
|
||||
- Orchestrator collects results and summarizes quality status.
|
||||
</workflows>
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "requirements-clarity",
|
||||
"source": "./",
|
||||
"description": "Transforms vague requirements into actionable PRDs through systematic clarification with 100-point scoring system",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"clarification",
|
||||
"prd",
|
||||
"specifications",
|
||||
"quality-gates",
|
||||
"requirements-engineering"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"skills": [
|
||||
"./skills/SKILL.md"
|
||||
]
|
||||
}
|
||||
@@ -1,323 +0,0 @@
|
||||
---
|
||||
name: Requirements Clarity
|
||||
description: Clarify ambiguous requirements through focused dialogue before implementation. Use when requirements are unclear, features are complex (>2 days), or involve cross-team coordination. Ask two core questions - Why? (YAGNI check) and Simpler? (KISS check) - to ensure clarity before coding.
|
||||
---
|
||||
|
||||
# Requirements Clarity Skill
|
||||
|
||||
## Description
|
||||
|
||||
Automatically transforms vague requirements into actionable PRDs through systematic clarification with a 100-point scoring system.
|
||||
|
||||
## Activation
|
||||
|
||||
Auto-activate when detecting vague requirements:
|
||||
|
||||
1. **Vague Feature Requests**
|
||||
- User says: "add login feature", "implement payment", "create dashboard"
|
||||
- Missing: How, with what technology, what constraints?
|
||||
|
||||
2. **Missing Technical Context**
|
||||
- No technology stack mentioned
|
||||
- No integration points identified
|
||||
- No performance/security constraints
|
||||
|
||||
3. **Incomplete Specifications**
|
||||
- No acceptance criteria
|
||||
- No success metrics
|
||||
- No edge cases considered
|
||||
- No error handling mentioned
|
||||
|
||||
4. **Ambiguous Scope**
|
||||
- Unclear boundaries ("user management" - what exactly?)
|
||||
- No distinction between MVP and future enhancements
|
||||
- Missing "what's NOT included"
|
||||
|
||||
**Do NOT activate when**:
|
||||
- Specific file paths mentioned (e.g., "auth.go:45")
|
||||
- Code snippets included
|
||||
- Existing functions/classes referenced
|
||||
- Bug fixes with clear reproduction steps
|
||||
|
||||
## Core Principles
|
||||
|
||||
1. **Systematic Questioning**
|
||||
- Ask focused, specific questions
|
||||
- One category at a time (2-3 questions per round)
|
||||
- Build on previous answers
|
||||
- Avoid overwhelming users
|
||||
|
||||
2. **Quality-Driven Iteration**
|
||||
- Continuously assess clarity score (0-100)
|
||||
- Identify gaps systematically
|
||||
- Iterate until ≥ 90 points
|
||||
- Document all clarification rounds
|
||||
|
||||
3. **Actionable Output**
|
||||
- Generate concrete specifications
|
||||
- Include measurable acceptance criteria
|
||||
- Provide executable phases
|
||||
- Enable direct implementation
|
||||
|
||||
## Clarification Process
|
||||
|
||||
### Step 1: Initial Requirement Analysis
|
||||
|
||||
**Input**: User's requirement description
|
||||
|
||||
**Tasks**:
|
||||
1. Parse and understand core requirement
|
||||
2. Generate feature name (kebab-case format)
|
||||
3. Determine document version (default `1.0` unless user specifies otherwise)
|
||||
4. Ensure `./docs/prds/` exists for PRD output
|
||||
5. Perform initial clarity assessment (0-100)
|
||||
|
||||
**Assessment Rubric**:
|
||||
```
|
||||
Functional Clarity: /30 points
|
||||
- Clear inputs/outputs: 10 pts
|
||||
- User interaction defined: 10 pts
|
||||
- Success criteria stated: 10 pts
|
||||
|
||||
Technical Specificity: /25 points
|
||||
- Technology stack mentioned: 8 pts
|
||||
- Integration points identified: 8 pts
|
||||
- Constraints specified: 9 pts
|
||||
|
||||
Implementation Completeness: /25 points
|
||||
- Edge cases considered: 8 pts
|
||||
- Error handling mentioned: 9 pts
|
||||
- Data validation specified: 8 pts
|
||||
|
||||
Business Context: /20 points
|
||||
- Problem statement clear: 7 pts
|
||||
- Target users identified: 7 pts
|
||||
- Success metrics defined: 6 pts
|
||||
```
|
||||
|
||||
**Initial Response Format**:
|
||||
```markdown
|
||||
I understand your requirement. Let me help you refine this specification.
|
||||
|
||||
**Current Clarity Score**: X/100
|
||||
|
||||
**Clear Aspects**:
|
||||
- [List what's clear]
|
||||
|
||||
**Needs Clarification**:
|
||||
- [List gaps]
|
||||
|
||||
Let me systematically clarify these points...
|
||||
```
|
||||
|
||||
### Step 2: Gap Analysis
|
||||
|
||||
Identify missing information across four dimensions:
|
||||
|
||||
**1. Functional Scope**
|
||||
- What is the core functionality?
|
||||
- What are the boundaries?
|
||||
- What is out of scope?
|
||||
- What are edge cases?
|
||||
|
||||
**2. User Interaction**
|
||||
- How do users interact?
|
||||
- What are the inputs?
|
||||
- What are the outputs?
|
||||
- What are success/failure scenarios?
|
||||
|
||||
**3. Technical Constraints**
|
||||
- Performance requirements?
|
||||
- Compatibility requirements?
|
||||
- Security considerations?
|
||||
- Scalability needs?
|
||||
|
||||
**4. Business Value**
|
||||
- What problem does this solve?
|
||||
- Who are the target users?
|
||||
- What are success metrics?
|
||||
- What is the priority?
|
||||
|
||||
### Step 3: Interactive Clarification
|
||||
|
||||
**Question Strategy**:
|
||||
1. Start with highest-impact gaps
|
||||
2. Ask 2-3 questions per round
|
||||
3. Build context progressively
|
||||
4. Use user's language
|
||||
5. Provide examples when helpful
|
||||
|
||||
**Question Format**:
|
||||
```markdown
|
||||
I need to clarify the following points to complete the requirements document:
|
||||
|
||||
1. **[Category]**: [Specific question]?
|
||||
- For example: [Example if helpful]
|
||||
|
||||
2. **[Category]**: [Specific question]?
|
||||
|
||||
3. **[Category]**: [Specific question]?
|
||||
|
||||
Please provide your answers, and I'll continue refining the PRD.
|
||||
```
|
||||
|
||||
**After Each User Response**:
|
||||
1. Update clarity score
|
||||
2. Capture new information in the working PRD outline
|
||||
3. Identify remaining gaps
|
||||
4. If score < 90: Continue with next round of questions
|
||||
5. If score ≥ 90: Proceed to PRD generation
|
||||
|
||||
**Score Update Format**:
|
||||
```markdown
|
||||
Thank you for the additional information!
|
||||
|
||||
**Clarity Score Update**: X/100 → Y/100
|
||||
|
||||
**New Clarified Content**:
|
||||
- [Summarize new information]
|
||||
|
||||
**Remaining Points to Clarify**:
|
||||
- [List remaining gaps if score < 90]
|
||||
|
||||
[If score < 90: Continue with next round of questions]
|
||||
[If score ≥ 90: "Perfect! I will now generate the complete PRD document..."]
|
||||
```
|
||||
|
||||
### Step 4: PRD Generation
|
||||
|
||||
Once clarity score ≥ 90, generate comprehensive PRD.
|
||||
|
||||
**Output File**:
|
||||
|
||||
1. **Final PRD**: `./docs/prds/{feature_name}-v{version}-prd.md`
|
||||
|
||||
Use the `Write` tool to create or update this file. Derive `{version}` from the document version recorded in the PRD (default `1.0`).
|
||||
|
||||
## PRD Document Structure
|
||||
|
||||
```markdown
|
||||
# {Feature Name} - Product Requirements Document (PRD)
|
||||
|
||||
## Requirements Description
|
||||
|
||||
### Background
|
||||
- **Business Problem**: [Describe the business problem to solve]
|
||||
- **Target Users**: [Target user groups]
|
||||
- **Value Proposition**: [Value this feature brings]
|
||||
|
||||
### Feature Overview
|
||||
- **Core Features**: [List of main features]
|
||||
- **Feature Boundaries**: [What is and isn't included]
|
||||
- **User Scenarios**: [Typical usage scenarios]
|
||||
|
||||
### Detailed Requirements
|
||||
- **Input/Output**: [Specific input/output specifications]
|
||||
- **User Interaction**: [User operation flow]
|
||||
- **Data Requirements**: [Data structures and validation rules]
|
||||
- **Edge Cases**: [Edge case handling]
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Technical Approach
|
||||
- **Architecture Choice**: [Technical architecture decisions and rationale]
|
||||
- **Key Components**: [List of main technical components]
|
||||
- **Data Storage**: [Data models and storage solutions]
|
||||
- **Interface Design**: [API/interface specifications]
|
||||
|
||||
### Constraints
|
||||
- **Performance Requirements**: [Response time, throughput, etc.]
|
||||
- **Compatibility**: [System compatibility requirements]
|
||||
- **Security**: [Security considerations]
|
||||
- **Scalability**: [Future expansion considerations]
|
||||
|
||||
### Risk Assessment
|
||||
- **Technical Risks**: [Potential technical risks and mitigation plans]
|
||||
- **Dependency Risks**: [External dependencies and alternatives]
|
||||
- **Schedule Risks**: [Timeline risks and response strategies]
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
### Functional Acceptance
|
||||
- [ ] Feature 1: [Specific acceptance conditions]
|
||||
- [ ] Feature 2: [Specific acceptance conditions]
|
||||
- [ ] Feature 3: [Specific acceptance conditions]
|
||||
|
||||
### Quality Standards
|
||||
- [ ] Code Quality: [Code standards and review requirements]
|
||||
- [ ] Test Coverage: [Testing requirements and coverage]
|
||||
- [ ] Performance Metrics: [Performance test pass criteria]
|
||||
- [ ] Security Review: [Security review requirements]
|
||||
|
||||
### User Acceptance
|
||||
- [ ] User Experience: [UX acceptance criteria]
|
||||
- [ ] Documentation: [Documentation delivery requirements]
|
||||
- [ ] Training Materials: [If needed, training material requirements]
|
||||
|
||||
## Execution Phases
|
||||
|
||||
### Phase 1: Preparation
|
||||
**Goal**: Environment preparation and technical validation
|
||||
- [ ] Task 1: [Specific task description]
|
||||
- [ ] Task 2: [Specific task description]
|
||||
- **Deliverables**: [Phase deliverables]
|
||||
- **Time**: [Estimated time]
|
||||
|
||||
### Phase 2: Core Development
|
||||
**Goal**: Implement core functionality
|
||||
- [ ] Task 1: [Specific task description]
|
||||
- [ ] Task 2: [Specific task description]
|
||||
- **Deliverables**: [Phase deliverables]
|
||||
- **Time**: [Estimated time]
|
||||
|
||||
### Phase 3: Integration & Testing
|
||||
**Goal**: Integration and quality assurance
|
||||
- [ ] Task 1: [Specific task description]
|
||||
- [ ] Task 2: [Specific task description]
|
||||
- **Deliverables**: [Phase deliverables]
|
||||
- **Time**: [Estimated time]
|
||||
|
||||
### Phase 4: Deployment
|
||||
**Goal**: Release and monitoring
|
||||
- [ ] Task 1: [Specific task description]
|
||||
- [ ] Task 2: [Specific task description]
|
||||
- **Deliverables**: [Phase deliverables]
|
||||
- **Time**: [Estimated time]
|
||||
|
||||
---
|
||||
|
||||
**Document Version**: 1.0
|
||||
**Created**: {timestamp}
|
||||
**Clarification Rounds**: {clarification_rounds}
|
||||
**Quality Score**: {quality_score}/100
|
||||
```
|
||||
|
||||
## Behavioral Guidelines
|
||||
|
||||
### DO
|
||||
- Ask specific, targeted questions
|
||||
- Build on previous answers
|
||||
- Provide examples to guide users
|
||||
- Maintain conversational tone
|
||||
- Summarize clarification rounds within the PRD
|
||||
- Use clear, professional English
|
||||
- Generate concrete specifications
|
||||
- Stay in clarification mode until score ≥ 90
|
||||
|
||||
### DON'T
|
||||
- Ask all questions at once
|
||||
- Make assumptions without confirmation
|
||||
- Generate PRD before 90+ score
|
||||
- Skip any required sections
|
||||
- Use vague or abstract language
|
||||
- Proceed without user responses
|
||||
- Exit skill mode prematurely
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- Clarity score ≥ 90/100
|
||||
- All PRD sections complete with substance
|
||||
- Acceptance criteria checklistable (using `- [ ]` format)
|
||||
- Execution phases actionable with concrete tasks
|
||||
- User approves final PRD
|
||||
- Ready for development handoff
|
||||
@@ -180,11 +180,50 @@ EOF
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
> Important:
|
||||
> - `--parallel` only reads task definitions from stdin.
|
||||
> - It does not accept extra command-line arguments (no inline `workdir`, `task`, or other params).
|
||||
> - Put all task metadata and content in stdin; nothing belongs after `--parallel` on the command line.
|
||||
|
||||
**Correct vs Incorrect Usage**
|
||||
|
||||
**Correct:**
|
||||
```bash
|
||||
# Option 1: file redirection
|
||||
codex-wrapper --parallel < tasks.txt
|
||||
|
||||
# Option 2: heredoc (recommended for multiple tasks)
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: task1
|
||||
workdir: /path/to/dir
|
||||
---CONTENT---
|
||||
task content
|
||||
EOF
|
||||
|
||||
# Option 3: pipe
|
||||
echo "---TASK---..." | codex-wrapper --parallel
|
||||
```
|
||||
|
||||
**Incorrect (will trigger shell parsing errors):**
|
||||
```bash
|
||||
# Bad: no extra args allowed after --parallel
|
||||
codex-wrapper --parallel - /path/to/dir <<'EOF'
|
||||
...
|
||||
EOF
|
||||
|
||||
# Bad: --parallel does not take a task argument
|
||||
codex-wrapper --parallel "task description"
|
||||
|
||||
# Bad: workdir must live inside the task config
|
||||
codex-wrapper --parallel /path/to/dir < tasks.txt
|
||||
```
|
||||
|
||||
For multiple independent or dependent tasks, use `--parallel` mode with delimiter format:
|
||||
|
||||
**Typical Workflow (analyze → implement → test, chained in a single parallel call)**:
|
||||
```bash
|
||||
codex-wrapper --parallel - <<'EOF'
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: analyze_1732876800
|
||||
workdir: /home/user/project
|
||||
@@ -207,7 +246,7 @@ EOF
|
||||
A single `codex-wrapper --parallel` call schedules all three stages concurrently, using `dependencies` to enforce sequential ordering without multiple invocations.
|
||||
|
||||
```bash
|
||||
codex-wrapper --parallel - <<'EOF'
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: backend_1732876800
|
||||
workdir: /home/user/project/backend
|
||||
@@ -235,6 +274,8 @@ EOF
|
||||
- `workdir: <path>`: Optional, working directory (default: `.`)
|
||||
- Best practice: use absolute paths (e.g., `/home/user/project/backend`)
|
||||
- Avoids ambiguity and ensures consistent behavior across environments
|
||||
- Must be specified inside each task block; do not pass `workdir` as a CLI argument to `--parallel`
|
||||
- Each task can set its own `workdir` when different directories are needed
|
||||
- `dependencies: <id1>, <id2>`: Optional, comma-separated task IDs
|
||||
- `session_id: <uuid>`: Optional, resume a previous session
|
||||
- `---CONTENT---`: Separates metadata from task content
|
||||
@@ -249,7 +290,7 @@ EOF
|
||||
**Resume Failed Tasks**:
|
||||
```bash
|
||||
# Use session_id from previous output to resume
|
||||
codex-wrapper --parallel - <<'EOF'
|
||||
codex-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: T2
|
||||
session_id: 019xxx-previous-session-id
|
||||
|
||||
@@ -1,332 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# /// script
|
||||
# requires-python = ">=3.8"
|
||||
# dependencies = []
|
||||
# ///
|
||||
"""
|
||||
Codex CLI wrapper with cross-platform support and session management.
|
||||
**FIXED**: Auto-detect long inputs and use stdin mode to avoid shell argument issues.
|
||||
|
||||
Usage:
|
||||
New session: uv run codex.py "task" [workdir]
|
||||
Stdin mode: uv run codex.py - [workdir]
|
||||
Resume: uv run codex.py resume <session_id> "task" [workdir]
|
||||
Resume stdin: uv run codex.py resume <session_id> - [workdir]
|
||||
Alternative: python3 codex.py "task"
|
||||
Direct exec: ./codex.py "task"
|
||||
|
||||
Model configuration: Set CODEX_MODEL environment variable (default: gpt-5.1-codex)
|
||||
"""
|
||||
import subprocess
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
DEFAULT_MODEL = os.environ.get('CODEX_MODEL', 'gpt-5.1-codex')
|
||||
DEFAULT_WORKDIR = '.'
|
||||
DEFAULT_TIMEOUT = 7200 # 2 hours in seconds
|
||||
FORCE_KILL_DELAY = 5
|
||||
|
||||
|
||||
def log_error(message: str):
|
||||
"""输出错误信息到 stderr"""
|
||||
sys.stderr.write(f"ERROR: {message}\n")
|
||||
|
||||
|
||||
def log_warn(message: str):
|
||||
"""输出警告信息到 stderr"""
|
||||
sys.stderr.write(f"WARN: {message}\n")
|
||||
|
||||
|
||||
def log_info(message: str):
|
||||
"""输出信息到 stderr"""
|
||||
sys.stderr.write(f"INFO: {message}\n")
|
||||
|
||||
|
||||
def resolve_timeout() -> int:
|
||||
"""解析超时配置(秒)"""
|
||||
raw = os.environ.get('CODEX_TIMEOUT', '')
|
||||
if not raw:
|
||||
return DEFAULT_TIMEOUT
|
||||
|
||||
try:
|
||||
parsed = int(raw)
|
||||
if parsed <= 0:
|
||||
log_warn(f"Invalid CODEX_TIMEOUT '{raw}', falling back to {DEFAULT_TIMEOUT}s")
|
||||
return DEFAULT_TIMEOUT
|
||||
# 环境变量是毫秒,转换为秒
|
||||
return parsed // 1000 if parsed > 10000 else parsed
|
||||
except ValueError:
|
||||
log_warn(f"Invalid CODEX_TIMEOUT '{raw}', falling back to {DEFAULT_TIMEOUT}s")
|
||||
return DEFAULT_TIMEOUT
|
||||
|
||||
|
||||
def normalize_text(text) -> Optional[str]:
|
||||
"""规范化文本:字符串或字符串数组"""
|
||||
if isinstance(text, str):
|
||||
return text
|
||||
if isinstance(text, list):
|
||||
return ''.join(text)
|
||||
return None
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""解析命令行参数"""
|
||||
if len(sys.argv) < 2:
|
||||
log_error('Task required')
|
||||
sys.exit(1)
|
||||
|
||||
# 检测是否为 resume 模式
|
||||
if sys.argv[1] == 'resume':
|
||||
if len(sys.argv) < 4:
|
||||
log_error('Resume mode requires: resume <session_id> <task>')
|
||||
sys.exit(1)
|
||||
task_arg = sys.argv[3]
|
||||
return {
|
||||
'mode': 'resume',
|
||||
'session_id': sys.argv[2],
|
||||
'task': task_arg,
|
||||
'explicit_stdin': task_arg == '-',
|
||||
'workdir': sys.argv[4] if len(sys.argv) > 4 else DEFAULT_WORKDIR,
|
||||
}
|
||||
|
||||
task_arg = sys.argv[1]
|
||||
return {
|
||||
'mode': 'new',
|
||||
'task': task_arg,
|
||||
'explicit_stdin': task_arg == '-',
|
||||
'workdir': sys.argv[2] if len(sys.argv) > 2 else DEFAULT_WORKDIR,
|
||||
}
|
||||
|
||||
|
||||
def read_piped_task() -> Optional[str]:
|
||||
"""
|
||||
从 stdin 读取任务文本:
|
||||
- 如果 stdin 是管道(非 tty)且存在内容,返回读取到的字符串
|
||||
- 否则返回 None
|
||||
"""
|
||||
stdin = sys.stdin
|
||||
if stdin is None or stdin.isatty():
|
||||
log_info("Stdin is tty or None, skipping pipe read")
|
||||
return None
|
||||
log_info("Reading from stdin pipe...")
|
||||
data = stdin.read()
|
||||
if not data:
|
||||
log_info("Stdin pipe returned empty data")
|
||||
return None
|
||||
|
||||
log_info(f"Read {len(data)} bytes from stdin pipe")
|
||||
return data
|
||||
|
||||
|
||||
def should_stream_via_stdin(task_text: str, piped: bool) -> bool:
|
||||
"""
|
||||
判定是否通过 stdin 传递任务:
|
||||
- 有管道输入
|
||||
- 文本包含换行
|
||||
- 文本包含反斜杠
|
||||
- 文本长度 > 800
|
||||
"""
|
||||
if piped:
|
||||
return True
|
||||
if '\n' in task_text:
|
||||
return True
|
||||
if '\\' in task_text:
|
||||
return True
|
||||
if len(task_text) > 800:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def build_codex_args(params: dict, target_arg: str) -> list:
|
||||
"""
|
||||
构建 codex CLI 参数
|
||||
|
||||
Args:
|
||||
params: 参数字典
|
||||
target_arg: 最终传递给 codex 的参数('-' 或具体 task 文本)
|
||||
"""
|
||||
if params['mode'] == 'resume':
|
||||
return [
|
||||
'codex', 'e',
|
||||
'-m', DEFAULT_MODEL,
|
||||
'--skip-git-repo-check',
|
||||
'--json',
|
||||
'resume',
|
||||
params['session_id'],
|
||||
target_arg
|
||||
]
|
||||
else:
|
||||
base_args = [
|
||||
'codex', 'e',
|
||||
'-m', DEFAULT_MODEL,
|
||||
'--dangerously-bypass-approvals-and-sandbox',
|
||||
'--skip-git-repo-check',
|
||||
'-C', params['workdir'],
|
||||
'--json',
|
||||
target_arg
|
||||
]
|
||||
|
||||
return base_args
|
||||
|
||||
|
||||
def run_codex_process(codex_args, task_text: str, use_stdin: bool, timeout_sec: int):
|
||||
"""
|
||||
启动 codex 子进程,处理 stdin / JSON 行输出和错误,成功时返回 (last_agent_message, thread_id)。
|
||||
失败路径上负责日志和退出码。
|
||||
"""
|
||||
thread_id: Optional[str] = None
|
||||
last_agent_message: Optional[str] = None
|
||||
process: Optional[subprocess.Popen] = None
|
||||
|
||||
try:
|
||||
# 启动 codex 子进程(文本模式管道)
|
||||
log_info(f"Starting codex with args: {' '.join(codex_args[:5])}...")
|
||||
process = subprocess.Popen(
|
||||
codex_args,
|
||||
stdin=subprocess.PIPE if use_stdin else None,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=sys.stderr,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
)
|
||||
log_info(f"Process started with PID: {process.pid}")
|
||||
|
||||
# 如果使用 stdin 模式,写入任务到 stdin 并关闭
|
||||
if use_stdin and process.stdin is not None:
|
||||
log_info(f"Writing {len(task_text)} chars to stdin...")
|
||||
process.stdin.write(task_text)
|
||||
process.stdin.flush() # 强制刷新缓冲区,避免大任务死锁
|
||||
process.stdin.close()
|
||||
log_info("Stdin closed")
|
||||
|
||||
# 逐行解析 JSON 输出
|
||||
if process.stdout is None:
|
||||
log_error('Codex stdout pipe not available')
|
||||
sys.exit(1)
|
||||
|
||||
log_info("Reading stdout...")
|
||||
|
||||
for line in process.stdout:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
try:
|
||||
event = json.loads(line)
|
||||
|
||||
# 捕获 thread_id
|
||||
if event.get('type') == 'thread.started':
|
||||
thread_id = event.get('thread_id')
|
||||
|
||||
# 捕获 agent_message
|
||||
if (event.get('type') == 'item.completed' and
|
||||
event.get('item', {}).get('type') == 'agent_message'):
|
||||
text = normalize_text(event['item'].get('text'))
|
||||
if text:
|
||||
last_agent_message = text
|
||||
|
||||
except json.JSONDecodeError:
|
||||
log_warn(f"Failed to parse line: {line}")
|
||||
|
||||
# 等待进程结束并检查退出码
|
||||
returncode = process.wait(timeout=timeout_sec)
|
||||
if returncode != 0:
|
||||
log_error(f'Codex exited with status {returncode}')
|
||||
sys.exit(returncode)
|
||||
|
||||
if not last_agent_message:
|
||||
log_error('Codex completed without agent_message output')
|
||||
sys.exit(1)
|
||||
|
||||
return last_agent_message, thread_id
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
log_error('Codex execution timeout')
|
||||
if process is not None:
|
||||
process.kill()
|
||||
try:
|
||||
process.wait(timeout=FORCE_KILL_DELAY)
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
sys.exit(124)
|
||||
|
||||
except FileNotFoundError:
|
||||
log_error("codex command not found in PATH")
|
||||
sys.exit(127)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
log_error("Codex interrupted by user")
|
||||
if process is not None:
|
||||
process.terminate()
|
||||
try:
|
||||
process.wait(timeout=FORCE_KILL_DELAY)
|
||||
except subprocess.TimeoutExpired:
|
||||
process.kill()
|
||||
sys.exit(130)
|
||||
|
||||
|
||||
def main():
|
||||
log_info("Script started")
|
||||
params = parse_args()
|
||||
log_info(f"Parsed args: mode={params['mode']}, task_len={len(params['task'])}")
|
||||
timeout_sec = resolve_timeout()
|
||||
log_info(f"Timeout: {timeout_sec}s")
|
||||
|
||||
explicit_stdin = params.get('explicit_stdin', False)
|
||||
|
||||
if explicit_stdin:
|
||||
log_info("Explicit stdin mode: reading task from stdin")
|
||||
task_text = sys.stdin.read()
|
||||
if not task_text:
|
||||
log_error("Explicit stdin mode requires task input from stdin")
|
||||
sys.exit(1)
|
||||
piped = not sys.stdin.isatty()
|
||||
else:
|
||||
piped_task = read_piped_task()
|
||||
piped = piped_task is not None
|
||||
task_text = piped_task if piped else params['task']
|
||||
|
||||
use_stdin = explicit_stdin or should_stream_via_stdin(task_text, piped)
|
||||
|
||||
if use_stdin:
|
||||
reasons = []
|
||||
if piped:
|
||||
reasons.append('piped input')
|
||||
if explicit_stdin:
|
||||
reasons.append('explicit "-"')
|
||||
if '\n' in task_text:
|
||||
reasons.append('newline')
|
||||
if '\\' in task_text:
|
||||
reasons.append('backslash')
|
||||
if len(task_text) > 800:
|
||||
reasons.append('length>800')
|
||||
|
||||
if reasons:
|
||||
log_warn(f"Using stdin mode for task due to: {', '.join(reasons)}")
|
||||
|
||||
target_arg = '-' if use_stdin else params['task']
|
||||
codex_args = build_codex_args(params, target_arg)
|
||||
|
||||
log_info('codex running...')
|
||||
|
||||
last_agent_message, thread_id = run_codex_process(
|
||||
codex_args=codex_args,
|
||||
task_text=task_text,
|
||||
use_stdin=use_stdin,
|
||||
timeout_sec=timeout_sec,
|
||||
)
|
||||
|
||||
# 输出 agent_message
|
||||
sys.stdout.write(f"{last_agent_message}\n")
|
||||
|
||||
# 输出 session_id(如果存在)
|
||||
if thread_id:
|
||||
sys.stdout.write(f"\n---\nSESSION_ID: {thread_id}\n")
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
76
tests/test_config.cover
Normal file
76
tests/test_config.cover
Normal file
@@ -0,0 +1,76 @@
|
||||
1: import copy
|
||||
1: import json
|
||||
1: import unittest
|
||||
1: from pathlib import Path
|
||||
|
||||
1: import jsonschema
|
||||
|
||||
|
||||
1: CONFIG_PATH = Path(__file__).resolve().parents[1] / "config.json"
|
||||
1: SCHEMA_PATH = Path(__file__).resolve().parents[1] / "config.schema.json"
|
||||
1: ROOT = CONFIG_PATH.parent
|
||||
|
||||
|
||||
1: def load_config():
|
||||
with CONFIG_PATH.open(encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
1: def load_schema():
|
||||
with SCHEMA_PATH.open(encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
2: class ConfigSchemaTest(unittest.TestCase):
|
||||
1: def test_config_matches_schema(self):
|
||||
config = load_config()
|
||||
schema = load_schema()
|
||||
jsonschema.validate(config, schema)
|
||||
|
||||
1: def test_required_modules_present(self):
|
||||
modules = load_config()["modules"]
|
||||
self.assertEqual(set(modules.keys()), {"dev", "bmad", "requirements", "essentials", "advanced"})
|
||||
|
||||
1: def test_enabled_defaults_and_flags(self):
|
||||
modules = load_config()["modules"]
|
||||
self.assertTrue(modules["dev"]["enabled"])
|
||||
self.assertTrue(modules["essentials"]["enabled"])
|
||||
self.assertFalse(modules["bmad"]["enabled"])
|
||||
self.assertFalse(modules["requirements"]["enabled"])
|
||||
self.assertFalse(modules["advanced"]["enabled"])
|
||||
|
||||
1: def test_operations_have_expected_shape(self):
|
||||
config = load_config()
|
||||
for name, module in config["modules"].items():
|
||||
self.assertTrue(module["operations"], f"{name} should declare at least one operation")
|
||||
for op in module["operations"]:
|
||||
self.assertIn("type", op)
|
||||
if op["type"] in {"copy_dir", "copy_file"}:
|
||||
self.assertTrue(op.get("source"), f"{name} operation missing source")
|
||||
self.assertTrue(op.get("target"), f"{name} operation missing target")
|
||||
elif op["type"] == "run_command":
|
||||
self.assertTrue(op.get("command"), f"{name} run_command missing command")
|
||||
if "env" in op:
|
||||
self.assertIsInstance(op["env"], dict)
|
||||
else:
|
||||
self.fail(f"Unsupported operation type: {op['type']}")
|
||||
|
||||
1: def test_operation_sources_exist_on_disk(self):
|
||||
config = load_config()
|
||||
for module in config["modules"].values():
|
||||
for op in module["operations"]:
|
||||
if op["type"] in {"copy_dir", "copy_file"}:
|
||||
path = (ROOT / op["source"]).expanduser()
|
||||
self.assertTrue(path.exists(), f"Source path not found: {path}")
|
||||
|
||||
1: def test_schema_rejects_invalid_operation_type(self):
|
||||
config = load_config()
|
||||
invalid = copy.deepcopy(config)
|
||||
invalid["modules"]["dev"]["operations"][0]["type"] = "unknown_op"
|
||||
schema = load_schema()
|
||||
with self.assertRaises(jsonschema.exceptions.ValidationError):
|
||||
jsonschema.validate(invalid, schema)
|
||||
|
||||
|
||||
1: if __name__ == "__main__":
|
||||
1: unittest.main()
|
||||
76
tests/test_config.py
Normal file
76
tests/test_config.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import copy
|
||||
import json
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import jsonschema
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[1] / "config.json"
|
||||
SCHEMA_PATH = Path(__file__).resolve().parents[1] / "config.schema.json"
|
||||
ROOT = CONFIG_PATH.parent
|
||||
|
||||
|
||||
def load_config():
|
||||
with CONFIG_PATH.open(encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def load_schema():
|
||||
with SCHEMA_PATH.open(encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
class ConfigSchemaTest(unittest.TestCase):
|
||||
def test_config_matches_schema(self):
|
||||
config = load_config()
|
||||
schema = load_schema()
|
||||
jsonschema.validate(config, schema)
|
||||
|
||||
def test_required_modules_present(self):
|
||||
modules = load_config()["modules"]
|
||||
self.assertEqual(set(modules.keys()), {"dev", "bmad", "requirements", "essentials", "advanced"})
|
||||
|
||||
def test_enabled_defaults_and_flags(self):
|
||||
modules = load_config()["modules"]
|
||||
self.assertTrue(modules["dev"]["enabled"])
|
||||
self.assertTrue(modules["essentials"]["enabled"])
|
||||
self.assertFalse(modules["bmad"]["enabled"])
|
||||
self.assertFalse(modules["requirements"]["enabled"])
|
||||
self.assertFalse(modules["advanced"]["enabled"])
|
||||
|
||||
def test_operations_have_expected_shape(self):
|
||||
config = load_config()
|
||||
for name, module in config["modules"].items():
|
||||
self.assertTrue(module["operations"], f"{name} should declare at least one operation")
|
||||
for op in module["operations"]:
|
||||
self.assertIn("type", op)
|
||||
if op["type"] in {"copy_dir", "copy_file"}:
|
||||
self.assertTrue(op.get("source"), f"{name} operation missing source")
|
||||
self.assertTrue(op.get("target"), f"{name} operation missing target")
|
||||
elif op["type"] == "run_command":
|
||||
self.assertTrue(op.get("command"), f"{name} run_command missing command")
|
||||
if "env" in op:
|
||||
self.assertIsInstance(op["env"], dict)
|
||||
else:
|
||||
self.fail(f"Unsupported operation type: {op['type']}")
|
||||
|
||||
def test_operation_sources_exist_on_disk(self):
|
||||
config = load_config()
|
||||
for module in config["modules"].values():
|
||||
for op in module["operations"]:
|
||||
if op["type"] in {"copy_dir", "copy_file"}:
|
||||
path = (ROOT / op["source"]).expanduser()
|
||||
self.assertTrue(path.exists(), f"Source path not found: {path}")
|
||||
|
||||
def test_schema_rejects_invalid_operation_type(self):
|
||||
config = load_config()
|
||||
invalid = copy.deepcopy(config)
|
||||
invalid["modules"]["dev"]["operations"][0]["type"] = "unknown_op"
|
||||
schema = load_schema()
|
||||
with self.assertRaises(jsonschema.exceptions.ValidationError):
|
||||
jsonschema.validate(invalid, schema)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
458
tests/test_install.py
Normal file
458
tests/test_install.py
Normal file
@@ -0,0 +1,458 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
import install
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SCHEMA_PATH = ROOT / "config.schema.json"
|
||||
|
||||
|
||||
def write_config(tmp_path: Path, config: dict) -> Path:
|
||||
cfg_path = tmp_path / "config.json"
|
||||
cfg_path.write_text(json.dumps(config), encoding="utf-8")
|
||||
shutil.copy(SCHEMA_PATH, tmp_path / "config.schema.json")
|
||||
return cfg_path
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def valid_config(tmp_path):
|
||||
sample_file = tmp_path / "sample.txt"
|
||||
sample_file.write_text("hello", encoding="utf-8")
|
||||
|
||||
sample_dir = tmp_path / "sample_dir"
|
||||
sample_dir.mkdir()
|
||||
(sample_dir / "f.txt").write_text("dir", encoding="utf-8")
|
||||
|
||||
config = {
|
||||
"version": "1.0",
|
||||
"install_dir": "~/.fromconfig",
|
||||
"log_file": "install.log",
|
||||
"modules": {
|
||||
"dev": {
|
||||
"enabled": True,
|
||||
"description": "dev module",
|
||||
"operations": [
|
||||
{"type": "copy_dir", "source": "sample_dir", "target": "devcopy"}
|
||||
],
|
||||
},
|
||||
"bmad": {
|
||||
"enabled": False,
|
||||
"description": "bmad",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "sample.txt", "target": "bmad.txt"}
|
||||
],
|
||||
},
|
||||
"requirements": {
|
||||
"enabled": False,
|
||||
"description": "reqs",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "sample.txt", "target": "req.txt"}
|
||||
],
|
||||
},
|
||||
"essentials": {
|
||||
"enabled": True,
|
||||
"description": "ess",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "sample.txt", "target": "ess.txt"}
|
||||
],
|
||||
},
|
||||
"advanced": {
|
||||
"enabled": False,
|
||||
"description": "adv",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "sample.txt", "target": "adv.txt"}
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cfg_path = write_config(tmp_path, config)
|
||||
return cfg_path, config
|
||||
|
||||
|
||||
def make_ctx(tmp_path: Path) -> dict:
|
||||
install_dir = tmp_path / "install"
|
||||
return {
|
||||
"install_dir": install_dir,
|
||||
"log_file": install_dir / "install.log",
|
||||
"status_file": install_dir / "installed_modules.json",
|
||||
"config_dir": tmp_path,
|
||||
"force": False,
|
||||
}
|
||||
|
||||
|
||||
def test_parse_args_defaults():
|
||||
args = install.parse_args([])
|
||||
assert args.install_dir == install.DEFAULT_INSTALL_DIR
|
||||
assert args.config == "config.json"
|
||||
assert args.module is None
|
||||
assert args.list_modules is False
|
||||
assert args.force is False
|
||||
|
||||
|
||||
def test_parse_args_custom():
|
||||
args = install.parse_args(
|
||||
[
|
||||
"--install-dir",
|
||||
"/tmp/custom",
|
||||
"--module",
|
||||
"dev,bmad",
|
||||
"--config",
|
||||
"/tmp/cfg.json",
|
||||
"--list-modules",
|
||||
"--force",
|
||||
]
|
||||
)
|
||||
|
||||
assert args.install_dir == "/tmp/custom"
|
||||
assert args.module == "dev,bmad"
|
||||
assert args.config == "/tmp/cfg.json"
|
||||
assert args.list_modules is True
|
||||
assert args.force is True
|
||||
|
||||
|
||||
def test_load_config_success(valid_config):
|
||||
cfg_path, config_data = valid_config
|
||||
loaded = install.load_config(str(cfg_path))
|
||||
assert loaded["modules"]["dev"]["description"] == config_data["modules"]["dev"]["description"]
|
||||
|
||||
|
||||
def test_load_config_invalid_json(tmp_path):
|
||||
bad = tmp_path / "bad.json"
|
||||
bad.write_text("{broken", encoding="utf-8")
|
||||
shutil.copy(SCHEMA_PATH, tmp_path / "config.schema.json")
|
||||
with pytest.raises(ValueError):
|
||||
install.load_config(str(bad))
|
||||
|
||||
|
||||
def test_load_config_schema_error(tmp_path):
|
||||
cfg = tmp_path / "cfg.json"
|
||||
cfg.write_text(json.dumps({"version": "1.0"}), encoding="utf-8")
|
||||
shutil.copy(SCHEMA_PATH, tmp_path / "config.schema.json")
|
||||
with pytest.raises(ValueError):
|
||||
install.load_config(str(cfg))
|
||||
|
||||
|
||||
def test_resolve_paths_respects_priority(tmp_path):
|
||||
config = {
|
||||
"install_dir": str(tmp_path / "from_config"),
|
||||
"log_file": "logs/install.log",
|
||||
"modules": {},
|
||||
"version": "1.0",
|
||||
}
|
||||
cfg_path = write_config(tmp_path, config)
|
||||
args = install.parse_args(["--config", str(cfg_path)])
|
||||
|
||||
ctx = install.resolve_paths(config, args)
|
||||
assert ctx["install_dir"] == (tmp_path / "from_config").resolve()
|
||||
assert ctx["log_file"] == (tmp_path / "from_config" / "logs" / "install.log").resolve()
|
||||
assert ctx["config_dir"] == tmp_path.resolve()
|
||||
|
||||
cli_args = install.parse_args(
|
||||
["--install-dir", str(tmp_path / "cli_dir"), "--config", str(cfg_path)]
|
||||
)
|
||||
ctx_cli = install.resolve_paths(config, cli_args)
|
||||
assert ctx_cli["install_dir"] == (tmp_path / "cli_dir").resolve()
|
||||
|
||||
|
||||
def test_list_modules_output(valid_config, capsys):
|
||||
_, config_data = valid_config
|
||||
install.list_modules(config_data)
|
||||
captured = capsys.readouterr().out
|
||||
assert "dev" in captured
|
||||
assert "essentials" in captured
|
||||
assert "✓" in captured
|
||||
|
||||
|
||||
def test_select_modules_behaviour(valid_config):
|
||||
_, config_data = valid_config
|
||||
|
||||
selected_default = install.select_modules(config_data, None)
|
||||
assert set(selected_default.keys()) == {"dev", "essentials"}
|
||||
|
||||
selected_specific = install.select_modules(config_data, "bmad")
|
||||
assert set(selected_specific.keys()) == {"bmad"}
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
install.select_modules(config_data, "missing")
|
||||
|
||||
|
||||
def test_ensure_install_dir(tmp_path, monkeypatch):
|
||||
target = tmp_path / "install_here"
|
||||
install.ensure_install_dir(target)
|
||||
assert target.is_dir()
|
||||
|
||||
file_path = tmp_path / "conflict"
|
||||
file_path.write_text("x", encoding="utf-8")
|
||||
with pytest.raises(NotADirectoryError):
|
||||
install.ensure_install_dir(file_path)
|
||||
|
||||
blocked = tmp_path / "blocked"
|
||||
real_access = os.access
|
||||
|
||||
def fake_access(path, mode):
|
||||
if Path(path) == blocked:
|
||||
return False
|
||||
return real_access(path, mode)
|
||||
|
||||
monkeypatch.setattr(os, "access", fake_access)
|
||||
with pytest.raises(PermissionError):
|
||||
install.ensure_install_dir(blocked)
|
||||
|
||||
|
||||
def test_op_copy_dir_respects_force(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
|
||||
src = tmp_path / "src"
|
||||
src.mkdir()
|
||||
(src / "a.txt").write_text("one", encoding="utf-8")
|
||||
|
||||
op = {"type": "copy_dir", "source": "src", "target": "dest"}
|
||||
install.op_copy_dir(op, ctx)
|
||||
target_file = ctx["install_dir"] / "dest" / "a.txt"
|
||||
assert target_file.read_text(encoding="utf-8") == "one"
|
||||
|
||||
(src / "a.txt").write_text("two", encoding="utf-8")
|
||||
install.op_copy_dir(op, ctx)
|
||||
assert target_file.read_text(encoding="utf-8") == "one"
|
||||
|
||||
ctx["force"] = True
|
||||
install.op_copy_dir(op, ctx)
|
||||
assert target_file.read_text(encoding="utf-8") == "two"
|
||||
|
||||
|
||||
def test_op_copy_file_behaviour(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
|
||||
src = tmp_path / "file.txt"
|
||||
src.write_text("first", encoding="utf-8")
|
||||
|
||||
op = {"type": "copy_file", "source": "file.txt", "target": "out/file.txt"}
|
||||
install.op_copy_file(op, ctx)
|
||||
dst = ctx["install_dir"] / "out" / "file.txt"
|
||||
assert dst.read_text(encoding="utf-8") == "first"
|
||||
|
||||
src.write_text("second", encoding="utf-8")
|
||||
install.op_copy_file(op, ctx)
|
||||
assert dst.read_text(encoding="utf-8") == "first"
|
||||
|
||||
ctx["force"] = True
|
||||
install.op_copy_file(op, ctx)
|
||||
assert dst.read_text(encoding="utf-8") == "second"
|
||||
|
||||
|
||||
def test_op_run_command_success(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
install.op_run_command({"type": "run_command", "command": "echo hello"}, ctx)
|
||||
log_content = ctx["log_file"].read_text(encoding="utf-8")
|
||||
assert "hello" in log_content
|
||||
|
||||
|
||||
def test_op_run_command_failure(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
with pytest.raises(RuntimeError):
|
||||
install.op_run_command(
|
||||
{"type": "run_command", "command": f"{sys.executable} -c 'import sys; sys.exit(2)'"},
|
||||
ctx,
|
||||
)
|
||||
log_content = ctx["log_file"].read_text(encoding="utf-8")
|
||||
assert "returncode: 2" in log_content
|
||||
|
||||
|
||||
def test_execute_module_success(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
src = tmp_path / "src.txt"
|
||||
src.write_text("data", encoding="utf-8")
|
||||
|
||||
cfg = {"operations": [{"type": "copy_file", "source": "src.txt", "target": "out.txt"}]}
|
||||
result = install.execute_module("demo", cfg, ctx)
|
||||
assert result["status"] == "success"
|
||||
assert (ctx["install_dir"] / "out.txt").read_text(encoding="utf-8") == "data"
|
||||
|
||||
|
||||
def test_execute_module_failure_logs_and_stops(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
cfg = {"operations": [{"type": "unknown", "source": "", "target": ""}]}
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
install.execute_module("demo", cfg, ctx)
|
||||
|
||||
log_content = ctx["log_file"].read_text(encoding="utf-8")
|
||||
assert "failed on unknown" in log_content
|
||||
|
||||
|
||||
def test_write_log_and_status(tmp_path):
|
||||
ctx = make_ctx(tmp_path)
|
||||
install.ensure_install_dir(ctx["install_dir"])
|
||||
|
||||
install.write_log({"level": "INFO", "message": "hello"}, ctx)
|
||||
content = ctx["log_file"].read_text(encoding="utf-8")
|
||||
assert "hello" in content
|
||||
|
||||
results = [
|
||||
{"module": "dev", "status": "success", "operations": [], "installed_at": "ts"}
|
||||
]
|
||||
install.write_status(results, ctx)
|
||||
status_data = json.loads(ctx["status_file"].read_text(encoding="utf-8"))
|
||||
assert status_data["modules"]["dev"]["status"] == "success"
|
||||
|
||||
|
||||
def test_main_success(valid_config, tmp_path):
|
||||
cfg_path, _ = valid_config
|
||||
install_dir = tmp_path / "install_final"
|
||||
rc = install.main(
|
||||
[
|
||||
"--config",
|
||||
str(cfg_path),
|
||||
"--install-dir",
|
||||
str(install_dir),
|
||||
"--module",
|
||||
"dev",
|
||||
]
|
||||
)
|
||||
|
||||
assert rc == 0
|
||||
assert (install_dir / "devcopy" / "f.txt").exists()
|
||||
assert (install_dir / "installed_modules.json").exists()
|
||||
|
||||
|
||||
def test_main_failure_without_force(tmp_path):
|
||||
cfg = {
|
||||
"version": "1.0",
|
||||
"install_dir": "~/.claude",
|
||||
"log_file": "install.log",
|
||||
"modules": {
|
||||
"dev": {
|
||||
"enabled": True,
|
||||
"description": "dev",
|
||||
"operations": [
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": f"{sys.executable} -c 'import sys; sys.exit(3)'",
|
||||
}
|
||||
],
|
||||
},
|
||||
"bmad": {
|
||||
"enabled": False,
|
||||
"description": "bmad",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "t.txt"}
|
||||
],
|
||||
},
|
||||
"requirements": {
|
||||
"enabled": False,
|
||||
"description": "reqs",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "r.txt"}
|
||||
],
|
||||
},
|
||||
"essentials": {
|
||||
"enabled": False,
|
||||
"description": "ess",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "e.txt"}
|
||||
],
|
||||
},
|
||||
"advanced": {
|
||||
"enabled": False,
|
||||
"description": "adv",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "a.txt"}
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cfg_path = write_config(tmp_path, cfg)
|
||||
install_dir = tmp_path / "fail_install"
|
||||
rc = install.main(
|
||||
[
|
||||
"--config",
|
||||
str(cfg_path),
|
||||
"--install-dir",
|
||||
str(install_dir),
|
||||
"--module",
|
||||
"dev",
|
||||
]
|
||||
)
|
||||
|
||||
assert rc == 1
|
||||
assert not (install_dir / "installed_modules.json").exists()
|
||||
|
||||
|
||||
def test_main_force_records_failure(tmp_path):
|
||||
cfg = {
|
||||
"version": "1.0",
|
||||
"install_dir": "~/.claude",
|
||||
"log_file": "install.log",
|
||||
"modules": {
|
||||
"dev": {
|
||||
"enabled": True,
|
||||
"description": "dev",
|
||||
"operations": [
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": f"{sys.executable} -c 'import sys; sys.exit(4)'",
|
||||
}
|
||||
],
|
||||
},
|
||||
"bmad": {
|
||||
"enabled": False,
|
||||
"description": "bmad",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "t.txt"}
|
||||
],
|
||||
},
|
||||
"requirements": {
|
||||
"enabled": False,
|
||||
"description": "reqs",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "r.txt"}
|
||||
],
|
||||
},
|
||||
"essentials": {
|
||||
"enabled": False,
|
||||
"description": "ess",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "e.txt"}
|
||||
],
|
||||
},
|
||||
"advanced": {
|
||||
"enabled": False,
|
||||
"description": "adv",
|
||||
"operations": [
|
||||
{"type": "copy_file", "source": "s.txt", "target": "a.txt"}
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cfg_path = write_config(tmp_path, cfg)
|
||||
install_dir = tmp_path / "force_install"
|
||||
rc = install.main(
|
||||
[
|
||||
"--config",
|
||||
str(cfg_path),
|
||||
"--install-dir",
|
||||
str(install_dir),
|
||||
"--module",
|
||||
"dev",
|
||||
"--force",
|
||||
]
|
||||
)
|
||||
|
||||
assert rc == 0
|
||||
status = json.loads((install_dir / "installed_modules.json").read_text(encoding="utf-8"))
|
||||
assert status["modules"]["dev"]["status"] == "failed"
|
||||
224
tests/test_modules.py
Normal file
224
tests/test_modules.py
Normal file
@@ -0,0 +1,224 @@
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
import install
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SCHEMA_PATH = ROOT / "config.schema.json"
|
||||
|
||||
|
||||
def _write_schema(target_dir: Path) -> None:
|
||||
shutil.copy(SCHEMA_PATH, target_dir / "config.schema.json")
|
||||
|
||||
|
||||
def _base_config(install_dir: Path, modules: dict) -> dict:
|
||||
return {
|
||||
"version": "1.0",
|
||||
"install_dir": str(install_dir),
|
||||
"log_file": "install.log",
|
||||
"modules": modules,
|
||||
}
|
||||
|
||||
|
||||
def _prepare_env(tmp_path: Path, modules: dict) -> tuple[Path, Path, Path]:
|
||||
"""Create a temp config directory with schema and config.json."""
|
||||
|
||||
config_dir = tmp_path / "config"
|
||||
install_dir = tmp_path / "install"
|
||||
config_dir.mkdir()
|
||||
_write_schema(config_dir)
|
||||
|
||||
cfg_path = config_dir / "config.json"
|
||||
cfg_path.write_text(
|
||||
json.dumps(_base_config(install_dir, modules)), encoding="utf-8"
|
||||
)
|
||||
return cfg_path, install_dir, config_dir
|
||||
|
||||
|
||||
def _sample_sources(config_dir: Path) -> dict:
|
||||
sample_dir = config_dir / "sample_dir"
|
||||
sample_dir.mkdir()
|
||||
(sample_dir / "nested.txt").write_text("dir-content", encoding="utf-8")
|
||||
|
||||
sample_file = config_dir / "sample.txt"
|
||||
sample_file.write_text("file-content", encoding="utf-8")
|
||||
|
||||
return {"dir": sample_dir, "file": sample_file}
|
||||
|
||||
|
||||
def _read_status(install_dir: Path) -> dict:
|
||||
return json.loads((install_dir / "installed_modules.json").read_text("utf-8"))
|
||||
|
||||
|
||||
def test_single_module_full_flow(tmp_path):
|
||||
cfg_path, install_dir, config_dir = _prepare_env(
|
||||
tmp_path,
|
||||
{
|
||||
"solo": {
|
||||
"enabled": True,
|
||||
"description": "single module",
|
||||
"operations": [
|
||||
{"type": "copy_dir", "source": "sample_dir", "target": "payload"},
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "sample.txt",
|
||||
"target": "payload/sample.txt",
|
||||
},
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": f"{sys.executable} -c \"from pathlib import Path; Path('run.txt').write_text('ok', encoding='utf-8')\"",
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
_sample_sources(config_dir)
|
||||
rc = install.main(["--config", str(cfg_path), "--module", "solo"])
|
||||
|
||||
assert rc == 0
|
||||
assert (install_dir / "payload" / "nested.txt").read_text(encoding="utf-8") == "dir-content"
|
||||
assert (install_dir / "payload" / "sample.txt").read_text(encoding="utf-8") == "file-content"
|
||||
assert (install_dir / "run.txt").read_text(encoding="utf-8") == "ok"
|
||||
|
||||
status = _read_status(install_dir)
|
||||
assert status["modules"]["solo"]["status"] == "success"
|
||||
assert len(status["modules"]["solo"]["operations"]) == 3
|
||||
|
||||
|
||||
def test_multi_module_install_and_status(tmp_path):
|
||||
modules = {
|
||||
"alpha": {
|
||||
"enabled": True,
|
||||
"description": "alpha",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "sample.txt",
|
||||
"target": "alpha.txt",
|
||||
}
|
||||
],
|
||||
},
|
||||
"beta": {
|
||||
"enabled": True,
|
||||
"description": "beta",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "sample_dir",
|
||||
"target": "beta_dir",
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
cfg_path, install_dir, config_dir = _prepare_env(tmp_path, modules)
|
||||
_sample_sources(config_dir)
|
||||
|
||||
rc = install.main(["--config", str(cfg_path)])
|
||||
assert rc == 0
|
||||
|
||||
assert (install_dir / "alpha.txt").read_text(encoding="utf-8") == "file-content"
|
||||
assert (install_dir / "beta_dir" / "nested.txt").exists()
|
||||
|
||||
status = _read_status(install_dir)
|
||||
assert set(status["modules"].keys()) == {"alpha", "beta"}
|
||||
assert all(mod["status"] == "success" for mod in status["modules"].values())
|
||||
|
||||
|
||||
def test_force_overwrites_existing_files(tmp_path):
|
||||
modules = {
|
||||
"forcey": {
|
||||
"enabled": True,
|
||||
"description": "force copy",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "sample.txt",
|
||||
"target": "target.txt",
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
cfg_path, install_dir, config_dir = _prepare_env(tmp_path, modules)
|
||||
sources = _sample_sources(config_dir)
|
||||
|
||||
install.main(["--config", str(cfg_path), "--module", "forcey"])
|
||||
assert (install_dir / "target.txt").read_text(encoding="utf-8") == "file-content"
|
||||
|
||||
sources["file"].write_text("new-content", encoding="utf-8")
|
||||
|
||||
rc = install.main(["--config", str(cfg_path), "--module", "forcey", "--force"])
|
||||
assert rc == 0
|
||||
assert (install_dir / "target.txt").read_text(encoding="utf-8") == "new-content"
|
||||
|
||||
status = _read_status(install_dir)
|
||||
assert status["modules"]["forcey"]["status"] == "success"
|
||||
|
||||
|
||||
def test_failure_triggers_rollback_and_restores_status(tmp_path):
|
||||
# First successful run to create a known-good status file.
|
||||
ok_modules = {
|
||||
"stable": {
|
||||
"enabled": True,
|
||||
"description": "stable",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "sample.txt",
|
||||
"target": "stable.txt",
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
cfg_path, install_dir, config_dir = _prepare_env(tmp_path, ok_modules)
|
||||
_sample_sources(config_dir)
|
||||
assert install.main(["--config", str(cfg_path)]) == 0
|
||||
pre_status = _read_status(install_dir)
|
||||
assert "stable" in pre_status["modules"]
|
||||
|
||||
# Rewrite config to introduce a failing module.
|
||||
failing_modules = {
|
||||
**ok_modules,
|
||||
"broken": {
|
||||
"enabled": True,
|
||||
"description": "will fail",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_file",
|
||||
"source": "sample.txt",
|
||||
"target": "broken.txt",
|
||||
},
|
||||
{
|
||||
"type": "run_command",
|
||||
"command": f"{sys.executable} -c 'import sys; sys.exit(5)'",
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
cfg_path.write_text(
|
||||
json.dumps(_base_config(install_dir, failing_modules)), encoding="utf-8"
|
||||
)
|
||||
|
||||
rc = install.main(["--config", str(cfg_path)])
|
||||
assert rc == 1
|
||||
|
||||
# The failed module's file should have been removed by rollback.
|
||||
assert not (install_dir / "broken.txt").exists()
|
||||
# Previously installed files remain.
|
||||
assert (install_dir / "stable.txt").exists()
|
||||
|
||||
restored_status = _read_status(install_dir)
|
||||
assert restored_status == pre_status
|
||||
|
||||
log_content = (install_dir / "install.log").read_text(encoding="utf-8")
|
||||
assert "Rolling back" in log_content
|
||||
|
||||
Reference in New Issue
Block a user