mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-05 02:30:26 +08:00
Compare commits
265 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd3115446d | ||
|
|
2b8bfd714c | ||
|
|
71485558df | ||
|
|
b711b44c0e | ||
|
|
eda2475543 | ||
|
|
2c0553794a | ||
|
|
c96193fca6 | ||
|
|
e2cd5be812 | ||
|
|
3dfa447f10 | ||
|
|
e9a8013c6f | ||
|
|
3d76d46336 | ||
|
|
5a50131a13 | ||
|
|
fca5c13c8d | ||
|
|
c1d3a0a07a | ||
|
|
2856055e2e | ||
|
|
a9c1e8178f | ||
|
|
1afeca88ae | ||
|
|
326ad85c74 | ||
|
|
e66bec0083 | ||
|
|
eb066395c2 | ||
|
|
b49dad842a | ||
|
|
d98086c661 | ||
|
|
0420646258 | ||
|
|
19a8d8e922 | ||
|
|
669b1d82ce | ||
|
|
a21c31fd89 | ||
|
|
773f133111 | ||
|
|
4f5d24531c | ||
|
|
cc24d43c8b | ||
|
|
27d4ac8afd | ||
|
|
2e5d12570d | ||
|
|
7c89c40e8f | ||
|
|
fa617d1599 | ||
|
|
90c630e30e | ||
|
|
25bbbc32a7 | ||
|
|
d8304bf2b9 | ||
|
|
7240e08900 | ||
|
|
e122d8ff25 | ||
|
|
6985a30a6a | ||
|
|
dd4c12b8e2 | ||
|
|
a88315d92d | ||
|
|
d1f13b3379 | ||
|
|
5d362852ab | ||
|
|
238c7b9a13 | ||
|
|
0986fa82ee | ||
|
|
a989ce343c | ||
|
|
abe0839249 | ||
|
|
d75c973f32 | ||
|
|
e7f329940b | ||
|
|
0fc5eaaa2d | ||
|
|
420eb857ff | ||
|
|
661656c587 | ||
|
|
ed4b088631 | ||
|
|
55a574280a | ||
|
|
8f05626075 | ||
|
|
4395c5785d | ||
|
|
b0d7a09ff2 | ||
|
|
f7aeaa5c7e | ||
|
|
c8f75faf84 | ||
|
|
b8b06257ff | ||
|
|
369a3319f9 | ||
|
|
75f08ab81f | ||
|
|
23282ef460 | ||
|
|
c7cb28a1da | ||
|
|
0a4982e96d | ||
|
|
17e52d78d2 | ||
|
|
55246ce9c4 | ||
|
|
890fec81bf | ||
|
|
81f298c2ea | ||
|
|
8ea6d10be5 | ||
|
|
bdf62d0f1c | ||
|
|
40e2d00d35 | ||
|
|
13465b12e5 | ||
|
|
cf93a0ada9 | ||
|
|
b81953a1d7 | ||
|
|
1d2f28101a | ||
|
|
81e95777a8 | ||
|
|
993249acb1 | ||
|
|
0d28e70026 | ||
|
|
7560ce1976 | ||
|
|
683d18e6bb | ||
|
|
a7147f692c | ||
|
|
b71d74f01f | ||
|
|
af1c860f54 | ||
|
|
70b1896011 | ||
|
|
3fd3c67749 | ||
|
|
156a072a0b | ||
|
|
0ceb819419 | ||
|
|
4d69c8aef1 | ||
|
|
eec844d850 | ||
|
|
1f42bcc1c6 | ||
|
|
0f359b048f | ||
|
|
4e2df6a80e | ||
|
|
a30f434b5d | ||
|
|
41f4e21268 | ||
|
|
a67aa00c9a | ||
|
|
d61a0f9ffd | ||
|
|
fe5508228f | ||
|
|
50093036c3 | ||
|
|
0cae0ede08 | ||
|
|
4613b57240 | ||
|
|
7535a7b101 | ||
|
|
f6bb97eba9 | ||
|
|
78a411462b | ||
|
|
9471a981e3 | ||
|
|
3d27d44676 | ||
|
|
6a66c9741f | ||
|
|
a09c103cfb | ||
|
|
1dec763e26 | ||
|
|
f57ea2df59 | ||
|
|
d215c33549 | ||
|
|
b3f8fcfea6 | ||
|
|
806bb04a35 | ||
|
|
b1156038de | ||
|
|
0c93bbe574 | ||
|
|
6f4f4e701b | ||
|
|
ff301507fe | ||
|
|
93b72eba42 | ||
|
|
b01758e7e1 | ||
|
|
c51b38c671 | ||
|
|
b227fee225 | ||
|
|
2b7569335b | ||
|
|
9e667f0895 | ||
|
|
4759eb2c42 | ||
|
|
edbf168b57 | ||
|
|
9bfea81ca6 | ||
|
|
a9bcea45f5 | ||
|
|
8554da6e2f | ||
|
|
b2f941af5f | ||
|
|
6861a9d057 | ||
|
|
18189f095c | ||
|
|
f1c306cb23 | ||
|
|
0dc6df4e71 | ||
|
|
21bb45a7af | ||
|
|
e7464d1286 | ||
|
|
373d75cc36 | ||
|
|
0bbcc6c68e | ||
|
|
3c6f22ca48 | ||
|
|
87016ce331 | ||
|
|
86d18ca19a | ||
|
|
4edd2d2d2d | ||
|
|
ef47ed57e9 | ||
|
|
b2e3f416bc | ||
|
|
7231c6d2c4 | ||
|
|
fa342f98c2 | ||
|
|
90478d2049 | ||
|
|
e1ad08fcc1 | ||
|
|
cf2e4fefa4 | ||
|
|
d7bb28a9ce | ||
|
|
b41b223fc8 | ||
|
|
a86ee9340c | ||
|
|
c6cd20d2fd | ||
|
|
132df6cb28 | ||
|
|
d7c514e869 | ||
|
|
3ef288bfaa | ||
|
|
d86a5b67b6 | ||
|
|
8f3941adae | ||
|
|
18c6c32628 | ||
|
|
1ad2cfe629 | ||
|
|
7bad716fbc | ||
|
|
220be6eb5c | ||
|
|
ead11d6996 | ||
|
|
fec4b7dba3 | ||
|
|
da257b860b | ||
|
|
9452b77307 | ||
|
|
85303126d6 | ||
|
|
f08fa88d71 | ||
|
|
33149d9615 | ||
|
|
95408e7fa7 | ||
|
|
22987b5f74 | ||
|
|
90f9a131fe | ||
|
|
017ad5e4d9 | ||
|
|
15b4176afb | ||
|
|
1533e08425 | ||
|
|
c3dd5b567f | ||
|
|
386937cfb3 | ||
|
|
c89ad3df2d | ||
|
|
2b8efd42a9 | ||
|
|
d4104214ff | ||
|
|
802efb5358 | ||
|
|
767b137c58 | ||
|
|
8eecf103ef | ||
|
|
77822cf062 | ||
|
|
007c27879d | ||
|
|
368831da4c | ||
|
|
eb84dfa574 | ||
|
|
3bc8342929 | ||
|
|
cfc64e8515 | ||
|
|
7a40c9d492 | ||
|
|
d51a2f12f8 | ||
|
|
8a8771076d | ||
|
|
e637b26151 | ||
|
|
595fa8da96 | ||
|
|
9ba6950d21 | ||
|
|
7f790fbe15 | ||
|
|
06f14aa695 | ||
|
|
9fa872a1f0 | ||
|
|
6d263fe8c9 | ||
|
|
e55b13c2c5 | ||
|
|
f95f5f5e88 | ||
|
|
246674c388 | ||
|
|
23c212f8be | ||
|
|
90477abb81 | ||
|
|
11afae2dff | ||
|
|
3df4fec6dd | ||
|
|
aea19f0e1f | ||
|
|
291a4e3d0a | ||
|
|
957b737126 | ||
|
|
3e30f4e207 | ||
|
|
b172343235 | ||
|
|
c8a652ec15 | ||
|
|
12e47affa9 | ||
|
|
612150f72e | ||
|
|
77d9870094 | ||
|
|
c96c07be2a | ||
|
|
cee467fc0e | ||
|
|
71305da77e | ||
|
|
c4021cf58a | ||
|
|
9a18a03061 | ||
|
|
b5183c7711 | ||
|
|
3fab18a6bb | ||
|
|
12af992d8c | ||
|
|
bbd2f50c38 | ||
|
|
3f7652f992 | ||
|
|
2cbe36b532 | ||
|
|
fdb152872d | ||
|
|
916b970665 | ||
|
|
10070a9bef | ||
|
|
b18439f268 | ||
|
|
4230479ff4 | ||
|
|
18c26a252a | ||
|
|
f6fc9a338f | ||
|
|
6223d59042 | ||
|
|
e6b229645a | ||
|
|
9dc3e8f43d | ||
|
|
e9faa0bc2d | ||
|
|
70caa8d7fc | ||
|
|
4f74d5afa1 | ||
|
|
7f61437eea | ||
|
|
ed604f6db7 | ||
|
|
fb66b52b68 | ||
|
|
05e32203ee | ||
|
|
1bf7dd9a83 | ||
|
|
19aa237d47 | ||
|
|
5cd1103b85 | ||
|
|
e2f80508b5 | ||
|
|
86cb8f6611 | ||
|
|
04cffd2d21 | ||
|
|
74b47a6f5a | ||
|
|
32514920da | ||
|
|
a36b37c66d | ||
|
|
b4a80f833a | ||
|
|
cc1d22167a | ||
|
|
c080eea98c | ||
|
|
95b43c68fe | ||
|
|
6b06403014 | ||
|
|
4d3789d0dc | ||
|
|
4110ee4600 | ||
|
|
9d16cb4406 | ||
|
|
daa50177f3 | ||
|
|
9c2c91bb1a | ||
|
|
34f1557f83 | ||
|
|
41d776c09e | ||
|
|
9dea5d37ef | ||
|
|
656bdd27c5 |
@@ -1,154 +1,47 @@
|
||||
{
|
||||
"name": "claude-code-dev-workflows",
|
||||
"$schema": "https://anthropic.com/claude-code/marketplace.schema.json",
|
||||
"name": "myclaude",
|
||||
"version": "5.6.1",
|
||||
"description": "Professional multi-agent development workflows with OmO orchestration, Requirements-Driven and BMAD methodologies",
|
||||
"owner": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"email": "contact@example.com",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Professional multi-agent development workflows with Requirements-Driven and BMAD methodologies, featuring 16+ specialized agents and 12+ commands",
|
||||
"version": "1.0.0"
|
||||
"name": "cexll",
|
||||
"email": "evanxian9@gmail.com"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "requirements-driven-development",
|
||||
"source": "./requirements-driven-workflow/",
|
||||
"description": "Streamlined requirements-driven development workflow with 90% quality gates for practical feature implementation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"workflow",
|
||||
"automation",
|
||||
"quality-gates",
|
||||
"feature-development",
|
||||
"agile",
|
||||
"specifications"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/requirements-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/requirements-generate.md",
|
||||
"./agents/requirements-code.md",
|
||||
"./agents/requirements-testing.md",
|
||||
"./agents/requirements-review.md"
|
||||
]
|
||||
"name": "omo",
|
||||
"description": "Multi-agent orchestration for code analysis, bug investigation, fix planning, and implementation with intelligent routing to specialized agents",
|
||||
"version": "5.6.1",
|
||||
"source": "./skills/omo",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "bmad-agile-workflow",
|
||||
"source": "./bmad-agile-workflow/",
|
||||
"name": "requirements",
|
||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||
"version": "5.6.1",
|
||||
"source": "./agents/requirements",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "bmad",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
"agile",
|
||||
"scrum",
|
||||
"product-owner",
|
||||
"architect",
|
||||
"developer",
|
||||
"qa",
|
||||
"workflow-orchestration"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/bmad-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/bmad-po.md",
|
||||
"./agents/bmad-architect.md",
|
||||
"./agents/bmad-sm.md",
|
||||
"./agents/bmad-dev.md",
|
||||
"./agents/bmad-qa.md",
|
||||
"./agents/bmad-orchestrator.md",
|
||||
"./agents/bmad-review.md"
|
||||
]
|
||||
"version": "5.6.1",
|
||||
"source": "./agents/bmad",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "development-essentials",
|
||||
"source": "./development-essentials/",
|
||||
"name": "dev-kit",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"code",
|
||||
"debug",
|
||||
"test",
|
||||
"optimize",
|
||||
"review",
|
||||
"bugfix",
|
||||
"refactor",
|
||||
"documentation"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/code.md",
|
||||
"./commands/debug.md",
|
||||
"./commands/test.md",
|
||||
"./commands/optimize.md",
|
||||
"./commands/review.md",
|
||||
"./commands/bugfix.md",
|
||||
"./commands/refactor.md",
|
||||
"./commands/docs.md",
|
||||
"./commands/ask.md",
|
||||
"./commands/think.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/code.md",
|
||||
"./agents/bugfix.md",
|
||||
"./agents/bugfix-verify.md",
|
||||
"./agents/optimize.md",
|
||||
"./agents/debug.md"
|
||||
]
|
||||
"version": "5.6.1",
|
||||
"source": "./agents/development-essentials",
|
||||
"category": "productivity"
|
||||
},
|
||||
{
|
||||
"name": "advanced-ai-agents",
|
||||
"source": "./advanced-ai-agents/",
|
||||
"description": "Advanced AI agent for complex problem solving and deep analysis with GPT-5 integration",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"gpt5",
|
||||
"ai",
|
||||
"analysis",
|
||||
"problem-solving",
|
||||
"deep-research"
|
||||
],
|
||||
"category": "advanced",
|
||||
"strict": false,
|
||||
"commands": [],
|
||||
"agents": [
|
||||
"./agents/gpt5.md"
|
||||
]
|
||||
"name": "sparv",
|
||||
"description": "Minimal SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point spec gate, unified journal, 2-action saves, 3-failure protocol, and EHRB risk detection",
|
||||
"version": "1.1.0",
|
||||
"source": "./skills/sparv",
|
||||
"category": "development"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
22
.gitattributes
vendored
Normal file
22
.gitattributes
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Ensure shell scripts always use LF line endings on all platforms
|
||||
*.sh text eol=lf
|
||||
|
||||
# Ensure Python files use LF line endings
|
||||
*.py text eol=lf
|
||||
|
||||
# Auto-detect text files and normalize line endings to LF
|
||||
* text=auto eol=lf
|
||||
|
||||
# Explicitly declare files that should always be treated as binary
|
||||
*.exe binary
|
||||
*.png binary
|
||||
*.jpg binary
|
||||
*.jpeg binary
|
||||
*.gif binary
|
||||
*.ico binary
|
||||
*.mov binary
|
||||
*.mp4 binary
|
||||
*.mp3 binary
|
||||
*.zip binary
|
||||
*.gz binary
|
||||
*.tar binary
|
||||
39
.github/workflows/ci.yml
vendored
Normal file
39
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, rc/*]
|
||||
pull_request:
|
||||
branches: [master, rc/*]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd codeagent-wrapper
|
||||
go test -v -cover -coverprofile=coverage.out ./...
|
||||
shell: bash
|
||||
|
||||
- name: Check coverage
|
||||
run: |
|
||||
cd codeagent-wrapper
|
||||
go tool cover -func=coverage.out | grep total | awk '{print $3}'
|
||||
shell: bash
|
||||
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: codeagent-wrapper/coverage.out
|
||||
continue-on-error: true
|
||||
113
.github/workflows/release.yml
vendored
Normal file
113
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: Release codeagent-wrapper
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Run tests
|
||||
working-directory: codeagent-wrapper
|
||||
run: go test -v -coverprofile=cover.out ./...
|
||||
|
||||
- name: Check coverage
|
||||
working-directory: codeagent-wrapper
|
||||
run: |
|
||||
go tool cover -func=cover.out | grep total
|
||||
COVERAGE=$(go tool cover -func=cover.out | grep total | awk '{print $3}' | sed 's/%//')
|
||||
echo "Coverage: ${COVERAGE}%"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
needs: test
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- goos: linux
|
||||
goarch: amd64
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: amd64
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Build binary
|
||||
id: build
|
||||
working-directory: codeagent-wrapper
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: 0
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
OUTPUT_NAME=codeagent-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
if [ "${{ matrix.goos }}" = "windows" ]; then
|
||||
OUTPUT_NAME="${OUTPUT_NAME}.exe"
|
||||
fi
|
||||
go build -ldflags="-s -w -X main.version=${VERSION}" -o ${OUTPUT_NAME} ./cmd/codeagent-wrapper
|
||||
chmod +x ${OUTPUT_NAME}
|
||||
echo "artifact_path=codeagent-wrapper/${OUTPUT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: codeagent-wrapper-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
path: ${{ steps.build.outputs.artifact_path }}
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Prepare release files
|
||||
run: |
|
||||
mkdir -p release
|
||||
find artifacts -type f -name "codeagent-wrapper-*" -exec mv {} release/ \;
|
||||
cp install.sh install.bat release/
|
||||
ls -la release/
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: release/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: false
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -1,3 +1,11 @@
|
||||
CLAUDE.md
|
||||
|
||||
.claude/
|
||||
.claude-trace
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
.venv
|
||||
.pytest_cache
|
||||
__pycache__
|
||||
.coverage
|
||||
coverage.out
|
||||
references
|
||||
output/
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
# BMAD Pilot 使用指南
|
||||
|
||||
本指南介绍如何使用 BMAD Pilot 工作流,编排一组协作 AI 角色(PO/Architect/SM/Dev/QA)在仓库上下文中完成:01 产品需求文档、02 系统设计规范、03 冲刺计划,并自动进入开发与测试,整个过程包含多次用户确认门与质量评分。
|
||||
|
||||
参考阅读:BMAD-README.md(BMAD 方法概览)、BMAD-INTEGRATION-GUIDE.md(进阶集成)。
|
||||
|
||||
---
|
||||
|
||||
## 命令总览
|
||||
|
||||
- 命令:`/bmad-pilot <PROJECT_DESCRIPTION> [OPTIONS]`
|
||||
- 作用:在仓库上下文中,按阶段编排 `bmad-po → bmad-architect → bmad-sm → bmad-dev → bmad-qa`。
|
||||
- Orchestrator:由工作流统一编排(使用 bmad-orchestrator 进行仓库扫描)。
|
||||
|
||||
### Options
|
||||
- `--skip-tests`:跳过 QA 阶段
|
||||
- `--direct-dev`:跳过 SM 冲刺计划,架构后直接进入开发
|
||||
- `--skip-scan`:跳过初始仓库扫描(不推荐)
|
||||
|
||||
### 输出目录
|
||||
- 所有产出归档在:`./.claude/specs/{feature_name}/`
|
||||
- `00-repo-scan.md` — 仓库扫描摘要(自动生成)
|
||||
- `01-product-requirements.md` — 产品需求文档(确认后保存)
|
||||
- `02-system-architecture.md` — 系统设计规范(确认后保存)
|
||||
- `03-sprint-plan.md` — 冲刺计划(确认后保存;`--direct-dev` 时跳过)
|
||||
|
||||
`{feature_name}` 由 `<PROJECT_DESCRIPTION>` 生成(kebab-case:小写,空格/标点转 `-`,连续合并,首尾去除)。
|
||||
|
||||
---
|
||||
|
||||
## 快速开始
|
||||
|
||||
1) 执行 Pilot:
|
||||
```
|
||||
/bmad-pilot 为现有项目新增看板模块,支持多用户权限与移动端适配
|
||||
```
|
||||
2) 与 PO 交互澄清,直至 PRD ≥ 90 分 → 确认保存。
|
||||
3) 与 Architect 讨论技术决策,直至架构 ≥ 90 分 → 确认保存。
|
||||
4) 审阅并确认 SM 的冲刺计划(或使用 `--direct-dev` 跳过该阶段)。
|
||||
5) Dev 基于文档实现;QA 基于文档与实现测试(除非 `--skip-tests`)。
|
||||
6) 查看产出目录:`./.claude/specs/{feature_name}/`。
|
||||
|
||||
---
|
||||
|
||||
## 工作流阶段
|
||||
|
||||
- Phase 0:仓库扫描(自动,除非 `--skip-scan`)
|
||||
- Agent:`bmad-orchestrator`
|
||||
- 结果:扫描摘要返回并写入 `00-repo-scan.md`
|
||||
- 内容:项目类型、技术栈、代码组织、惯例、集成点、约束与注意事项
|
||||
|
||||
- Phase 1:产品需求(交互)
|
||||
- Agent:`bmad-po`
|
||||
- 循环:澄清问题 → 更新 PRD → 评分(目标 ≥ 90)
|
||||
- 确认门:PRD ≥ 90 分后,需要用户明确确认再继续
|
||||
- 保存:`01-product-requirements.md`
|
||||
|
||||
- Phase 2:系统架构(交互)
|
||||
- Agent:`bmad-architect`
|
||||
- 循环:技术选型与设计澄清 → 更新架构 → 评分(目标 ≥ 90)
|
||||
- 确认门:架构 ≥ 90 分后,需要用户明确确认再继续
|
||||
- 保存:`02-system-architecture.md`
|
||||
|
||||
- Phase 3:冲刺计划(交互,除非 `--direct-dev`)
|
||||
- Agent:`bmad-sm`
|
||||
- 循环:计划要点与问题澄清 → 更新计划 → 确认保存
|
||||
- 保存:`03-sprint-plan.md`
|
||||
|
||||
- Phase 4:开发实现(自动)
|
||||
- Agent:`bmad-dev`
|
||||
- 输入:PRD、架构、冲刺计划、`00-repo-scan.md`
|
||||
|
||||
- Phase 5:质量保障(自动,除非 `--skip-tests`)
|
||||
- Agent:`bmad-qa`
|
||||
- 输入:PRD、架构、冲刺计划、实现、`00-repo-scan.md`
|
||||
|
||||
---
|
||||
|
||||
## 交互与质量门
|
||||
|
||||
- 质控阈值:PRD 与架构质量评分需达到 ≥ 90 分。
|
||||
- 强制确认门:每个关键阶段完成后,Orchestrator 会停下等待你的“继续/确认”。
|
||||
- 迭代澄清:PO/Architect/SM 会提出 2-5 个精准问题,Orchestrator 转述并汇总你的回答以供下一轮完善。
|
||||
|
||||
---
|
||||
|
||||
## 仓库上下文
|
||||
|
||||
- 首次扫描:由工作流触发的 orchestrator 扫描(`bmad-orchestrator`)自动分析当前仓库(`--skip-scan` 可跳过)。
|
||||
- 缓存路径:`./.claude/specs/{feature_name}/00-repo-scan.md`(供所有后续 Agent 引用)。
|
||||
- 作用:提供技术栈识别、约定、测试模式、集成点,避免上下文丢失并保持一致性。
|
||||
|
||||
---
|
||||
|
||||
## 角色职责
|
||||
|
||||
- `bmad-po`:需求澄清与 PRD 产出,评分与问题驱动迭代。
|
||||
- `bmad-architect`:技术架构与关键决策,评分与问题驱动迭代。
|
||||
- `bmad-sm`:冲刺计划、任务拆分、依赖/风险/节奏规划。
|
||||
- `bmad-dev`:按文档实现、测试、日志/安全/性能与同构风格。
|
||||
- `bmad-qa`:基于需求与实现的全维度测试(单测/集成/E2E/性能/安全)。
|
||||
|
||||
---
|
||||
|
||||
## 示例
|
||||
|
||||
- 基础运行:
|
||||
```
|
||||
/bmad-pilot 在线商城结算流程升级,支持优惠券与发票
|
||||
```
|
||||
|
||||
- 跳过测试:
|
||||
```
|
||||
/bmad-pilot H5 活动页生成器 --skip-tests
|
||||
```
|
||||
|
||||
- 直接从架构进入开发(跳过 SM):
|
||||
```
|
||||
/bmad-pilot 小程序客服模块重构 --direct-dev
|
||||
```
|
||||
|
||||
- 跳过扫描(不推荐):
|
||||
```
|
||||
/bmad-pilot 部署流水线可视化 --skip-scan
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 目录结构
|
||||
|
||||
```
|
||||
.claude/
|
||||
specs/
|
||||
{feature_name}/
|
||||
00-repo-scan.md
|
||||
01-product-requirements.md
|
||||
02-system-architecture.md
|
||||
03-sprint-plan.md
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Tips & 常见问题
|
||||
|
||||
- 分数上不去:优先补齐评分分项的缺口(业务指标、关键流程、性能/安全约束等)。
|
||||
- 上下文不一致:检查并引用 `00-repo-scan.md` 的关键约定与模式,保证 PRD/架构/计划一致。
|
||||
- 依赖/网络受限:Dev/QA 的实际执行受环境影响;请在项目内准备依赖与测试环境,或先提交伪实现/测试策略。
|
||||
- 文档路径:确保在项目根目录执行,Pilot 会将文件写入 `./.claude/specs/{feature_name}/`。
|
||||
|
||||
---
|
||||
|
||||
## 最佳实践
|
||||
|
||||
- 小步快跑:每轮补充最关键信息,快速达成 ≥ 90 分文档。
|
||||
- 统一术语:在 PRD 固定术语词表;架构与代码沿用同名。
|
||||
- 用例先行:PRD 的验收标准应转化为 QA 的关键测试用例。
|
||||
- 复用模式:尽量沿用扫描识别的现有代码/测试模式,减少偏差。
|
||||
|
||||
---
|
||||
|
||||
## 版本记录
|
||||
|
||||
- 2025-08-11:新增仓库扫描摘要缓存 `00-repo-scan.md`,统一路径与跨阶段引用;明确确认门与目录预创建说明。
|
||||
339
BMAD-README.md
339
BMAD-README.md
@@ -1,339 +0,0 @@
|
||||
# BMAD方法论 Claude Code 使用指南
|
||||
|
||||
[](https://github.com/bmadcode/BMAD-METHOD)
|
||||
[](https://claude.ai/code)
|
||||
|
||||
> 从产品理念到代码实现的完整AI驱动敏捷开发工作流
|
||||
|
||||
## 🎯 什么是BMAD方法论?
|
||||
|
||||
BMAD (Business, Market, Architecture, Development) 是一个AI驱动的敏捷开发方法论,通过专业化代理团队实现从商业需求到技术实现的完整工作流程。
|
||||
|
||||
### 核心理念
|
||||
- **智能体规划**: 专门代理协作创建详细、一致的PRD和架构文档
|
||||
- **上下文工程开发**: 将详细计划转换为超详细的开发故事
|
||||
- **角色专业化**: 每个代理专注特定领域,避免角色切换导致的质量下降
|
||||
|
||||
## 🏗️ BMAD代理体系
|
||||
|
||||
### 代理角色说明
|
||||
- **PO (Product Owner)** - 产品负责人Sarah:需求分析、用户故事、验收标准
|
||||
- **Analyst** - 业务分析师Mary:市场研究、竞争分析、商业案例
|
||||
- **Architect** - 系统架构师Winston:技术架构、系统设计、技术选择
|
||||
- **SM (Scrum Master)** - 敏捷教练:任务分解、冲刺规划、流程协调
|
||||
- **Dev (Developer)** - 开发工程师:代码实现、技术文档
|
||||
- **QA (Quality Assurance)** - 质量保证:测试策略、质量验证
|
||||
- **UX Expert** - 用户体验专家:交互设计、可用性测试
|
||||
|
||||
## 🚀 快速开始
|
||||
|
||||
### 安装配置
|
||||
BMAD方法论已集成到您的Claude Code系统中,无需额外安装。
|
||||
|
||||
### 基本使用方法
|
||||
|
||||
#### 1. 完整BMAD工作流
|
||||
```bash
|
||||
# 一键执行完整开发流程
|
||||
/bmad-pilot "实现企业级用户管理系统,支持RBAC权限控制和LDAP集成"
|
||||
|
||||
# 执行流程:PO → Architect → SM → Dev → QA
|
||||
```
|
||||
|
||||
#### 2. 常用选项
|
||||
```bash
|
||||
# 跳过测试(PO → Architect → SM → Dev)
|
||||
/bmad-pilot "实现支付网关API" --skip-tests
|
||||
|
||||
# 直接从架构进入开发(跳过 SM 规划)
|
||||
/bmad-pilot "设计微服务电商平台" --direct-dev
|
||||
|
||||
# 跳过仓库扫描(不推荐)
|
||||
/bmad-pilot "用户界面优化" --skip-scan
|
||||
```
|
||||
|
||||
#### 3. 直接开发与部分流程
|
||||
```bash
|
||||
# 技术焦点(架构后直接进入开发与测试)
|
||||
/bmad-pilot "API网关实现" --direct-dev
|
||||
|
||||
# 完整设计流程(需求→架构→规划→开发→测试)
|
||||
/bmad-pilot "系统重构规划"
|
||||
|
||||
# 仅业务相关分析 → 请使用下方“独立代理使用”中的 /bmad-po 与 /bmad-analyst
|
||||
```
|
||||
|
||||
#### 4. 独立代理使用
|
||||
```bash
|
||||
# 产品需求分析
|
||||
/bmad-po "企业CRM系统功能需求定义"
|
||||
|
||||
# 市场调研分析
|
||||
/bmad-analyst "SaaS市场竞争格局和机会分析"
|
||||
|
||||
# 系统架构设计
|
||||
/bmad-architect "高并发分布式系统架构设计"
|
||||
|
||||
# 主协调器(可转换为任意代理)
|
||||
/bmad-orchestrator "协调多代理完成复杂项目"
|
||||
```
|
||||
|
||||
## 📋 详细命令说明
|
||||
|
||||
### `/bmad-pilot` - 完整工作流执行
|
||||
**用法**: `/bmad-pilot <项目描述> [选项]`
|
||||
|
||||
**选项**:
|
||||
- `--skip-tests`: 跳过 QA 阶段
|
||||
- `--direct-dev`: 跳过 SM 冲刺计划,架构后直接进入开发
|
||||
- `--skip-scan`: 跳过初始仓库扫描(不推荐)
|
||||
|
||||
**示例**:
|
||||
```bash
|
||||
/bmad-pilot "构建在线教育平台,支持直播、录播、作业系统"
|
||||
/bmad-pilot "API网关设计" --direct-dev
|
||||
/bmad-pilot "支付模块" --skip-tests
|
||||
```
|
||||
|
||||
### `/bmad-po` - 产品负责人
|
||||
**角色**: Sarah - 技术产品负责人 & 流程管家
|
||||
**专长**: 需求分析、用户故事、验收标准、冲刺规划
|
||||
|
||||
**用法**: `/bmad-po <需求描述>`
|
||||
|
||||
**工作流程**:
|
||||
1. 需求分解和功能点识别
|
||||
2. 用户故事创建(As a... I want... So that...)
|
||||
3. 验收标准定义和优先级排序
|
||||
4. 利益相关者验证和签署
|
||||
|
||||
**示例**:
|
||||
```bash
|
||||
/bmad-po "设计企业级权限管理系统,支持多租户和细粒度权限控制"
|
||||
/bmad-po "移动端电商APP功能需求分析"
|
||||
```
|
||||
|
||||
### `/bmad-analyst` - 业务分析师
|
||||
**角色**: Mary - 洞察分析师 & 战略合作伙伴
|
||||
**专长**: 市场研究、竞争分析、商业案例开发、利益相关者分析
|
||||
|
||||
**用法**: `/bmad-analyst <分析主题>`
|
||||
|
||||
**工作流程**:
|
||||
1. 市场格局和竞争对手分析
|
||||
2. 商业案例开发和ROI分析
|
||||
3. 利益相关者分析和需求收集
|
||||
4. 项目简报和战略建议
|
||||
|
||||
**示例**:
|
||||
```bash
|
||||
/bmad-analyst "企业级认证市场分析,JWT vs OAuth2.0 vs SAML"
|
||||
/bmad-analyst "云原生架构迁移的商业价值和风险评估"
|
||||
```
|
||||
|
||||
### `/bmad-architect` - 系统架构师
|
||||
**角色**: Winston - 全栈系统架构师 & 技术领导者
|
||||
**专长**: 系统设计、技术选择、API设计、基础架构规划
|
||||
|
||||
**用法**: `/bmad-architect <系统设计需求>`
|
||||
|
||||
**工作流程**:
|
||||
1. 系统需求和约束分析
|
||||
2. 技术栈和架构模式选择
|
||||
3. 组件设计和系统架构图
|
||||
4. 实施策略和开发指导
|
||||
|
||||
**示例**:
|
||||
```bash
|
||||
/bmad-architect "微服务架构设计,支持事件驱动和最终一致性"
|
||||
/bmad-architect "高可用API网关架构,支持限流、熔断、监控"
|
||||
```
|
||||
|
||||
### `/bmad-orchestrator` - 主协调器
|
||||
**角色**: BMAD主协调器
|
||||
**专长**: 工作流协调、代理转换、多代理任务管理
|
||||
|
||||
**用法**: `/bmad-orchestrator [命令] [参数]`
|
||||
|
||||
**功能**:
|
||||
- 动态转换为任意专门代理
|
||||
- 协调复杂多代理工作流
|
||||
- 管理代理间的上下文传递
|
||||
- 提供工作流指导和建议
|
||||
|
||||
## 🔄 与现有系统集成
|
||||
|
||||
### 现有系统 vs BMAD方法论
|
||||
|
||||
| 特性 | Requirements-Pilot | BMAD方法论 |
|
||||
|------|-------------------|-----------|
|
||||
| **执行时间** | 30分钟 | 1-2小时 |
|
||||
| **适用场景** | 快速功能开发 | 企业级项目 |
|
||||
| **覆盖范围** | 技术实现 | 商业+技术全流程 |
|
||||
| **质量门控** | 90%技术质量 | 多维度质量验证 |
|
||||
| **代理数量** | 4个技术代理 | 7个全角色代理 |
|
||||
|
||||
### 使用场景建议
|
||||
|
||||
#### 🚅 快速开发(推荐现有系统)
|
||||
```bash
|
||||
# 简单功能快速实现
|
||||
/requirements-pilot "添加用户登录功能"
|
||||
/requirements-pilot "实现数据导出API"
|
||||
```
|
||||
|
||||
#### 🏢 企业级项目(推荐BMAD)
|
||||
```bash
|
||||
# 复杂系统完整流程
|
||||
/bmad-pilot "构建企业级ERP系统,集成财务、人事、项目管理模块"
|
||||
/bmad-pilot "设计多租户SaaS平台,支持自定义配置和第三方集成"
|
||||
```
|
||||
|
||||
#### 🔄 混合模式(规划+实现)
|
||||
```bash
|
||||
# 先用BMAD做规划(在 PRD/架构确认门停留)
|
||||
/bmad-pilot "电商平台架构设计"
|
||||
|
||||
# 再用现有系统快速实现
|
||||
/requirements-pilot "基于架构规格实现用户服务模块"
|
||||
/requirements-pilot "基于架构规格实现订单服务模块"
|
||||
```
|
||||
|
||||
## 🎯 典型工作流示例
|
||||
|
||||
### 示例1: 企业级认证系统
|
||||
```bash
|
||||
# 完整BMAD流程
|
||||
/bmad-pilot "企业级JWT认证系统,支持RBAC权限控制、LDAP集成、审计日志、高可用部署"
|
||||
|
||||
# 预期输出:
|
||||
# 1. PO: 详细用户故事和验收标准
|
||||
# 2. Architect: 完整系统架构和技术选择
|
||||
# 3. SM: 开发任务分解和冲刺计划
|
||||
# 4. Dev: 生产就绪代码实现
|
||||
# 5. QA: 测试策略与用例并执行(可选)
|
||||
```
|
||||
|
||||
### 示例2: API网关开发
|
||||
```bash
|
||||
# 技术焦点流程(跳过SM,架构后直接进入开发)
|
||||
/bmad-pilot "高性能API网关,支持限流、熔断、监控、服务发现" --direct-dev
|
||||
|
||||
# 执行流程:
|
||||
# 1. Architect: 系统架构设计
|
||||
# 2. Dev: 代码实现
|
||||
# 3. QA: 性能测试和质量验证
|
||||
```
|
||||
|
||||
### 示例3: 产品市场分析
|
||||
```bash
|
||||
# 业务分析流程(使用独立代理)
|
||||
/bmad-po "云原生数据库市场机会分析的产品需求假设与范围界定"
|
||||
/bmad-analyst "云原生数据库市场机会分析"
|
||||
|
||||
# 执行流程:
|
||||
# 1. PO: 产品需求定义
|
||||
# 2. Analyst: 市场研究和竞争分析
|
||||
```
|
||||
|
||||
## 📊 质量保证体系
|
||||
|
||||
### BMAD质量标准
|
||||
- **需求完整性**: 90+ 分需求清晰度评分
|
||||
- **商业对齐**: 明确的价值主张和市场定位
|
||||
- **架构完善**: 全面的系统设计和技术选择
|
||||
- **实现就绪**: 可执行的开发规格和质量标准
|
||||
|
||||
### 集成现有质量门控
|
||||
- 保持90%技术质量阈值
|
||||
- 增加商业价值验证维度
|
||||
- 多代理交叉验证机制
|
||||
- 自动化质量反馈循环
|
||||
|
||||
## 🔧 高级用法和最佳实践
|
||||
|
||||
### 1. 渐进式复杂度管理
|
||||
```bash
|
||||
# MVP阶段
|
||||
/bmad-workflow "用户管理系统MVP版本" --phase=development
|
||||
|
||||
# 功能增强阶段
|
||||
/bmad-analyst "用户反馈分析和功能增强建议"
|
||||
/requirements-pilot "基于反馈实现增强功能"
|
||||
|
||||
# 企业级增强
|
||||
/bmad-workflow "企业级安全增强和合规支持" --agents=architect,dev,qa
|
||||
```
|
||||
|
||||
### 2. 跨项目知识管理
|
||||
```bash
|
||||
# 项目文档化
|
||||
/bmad-orchestrator "将当前项目架构文档化,便于后续项目参考"
|
||||
|
||||
# 最佳实践提取
|
||||
/bmad-architect "基于项目经验总结微服务架构最佳实践"
|
||||
```
|
||||
|
||||
### 3. 团队协作优化
|
||||
```bash
|
||||
# 团队能力评估
|
||||
/bmad-analyst "评估团队技术栈和能力匹配度"
|
||||
|
||||
# 开发计划调整
|
||||
/bmad-po "根据团队能力调整功能优先级和实现计划"
|
||||
```
|
||||
|
||||
## 🚦 故障排除
|
||||
|
||||
### 常见问题
|
||||
|
||||
**Q: BMAD工作流执行时间较长,如何优化?**
|
||||
A:
|
||||
- 简单功能使用 `/requirements-pilot`
|
||||
- 复杂项目使用分阶段执行 `--phase=planning`
|
||||
- 使用自定义代理序列减少不必要的步骤
|
||||
|
||||
**Q: 如何在BMAD和现有系统间选择?**
|
||||
A:
|
||||
- 项目复杂度 < 中等:使用 `/requirements-pilot`
|
||||
- 项目复杂度 ≥ 高:使用 `/bmad-workflow`
|
||||
- 需要商业分析:必须使用BMAD
|
||||
- 纯技术实现:可选择任一系统
|
||||
|
||||
**Q: 代理输出质量不符合预期怎么办?**
|
||||
A:
|
||||
- 提供更详细的项目描述
|
||||
- 使用分阶段执行,逐步细化
|
||||
- 结合独立代理使用进行专项优化
|
||||
|
||||
## 🎉 开始你的BMAD之旅
|
||||
|
||||
### 第一次使用
|
||||
```bash
|
||||
# 体验完整BMAD工作流
|
||||
/bmad-workflow "构建一个简单的博客系统,支持文章发布、评论、用户管理"
|
||||
```
|
||||
|
||||
### 学习不同代理角色
|
||||
```bash
|
||||
# 产品思维
|
||||
/bmad-po "分析博客系统的用户需求和使用场景"
|
||||
|
||||
# 商业思维
|
||||
/bmad-analyst "个人博客vs企业CMS市场定位分析"
|
||||
|
||||
# 技术思维
|
||||
/bmad-architect "可扩展博客系统架构设计"
|
||||
```
|
||||
|
||||
## 📚 进阶学习资源
|
||||
|
||||
- [BMAD-METHOD原理](https://github.com/bmadcode/BMAD-METHOD)
|
||||
- [Claude Code文档](https://docs.anthropic.com/en/docs/claude-code)
|
||||
- [敏捷开发最佳实践](https://agilemanifesto.org/)
|
||||
|
||||
---
|
||||
|
||||
**BMAD方法论 + Claude Code = 从理念到代码的完整AI开发工作流** 🚀
|
||||
|
||||
开始使用BMAD方法论,体验专业化AI代理团队带来的开发效率和质量提升!
|
||||
1157
CHANGELOG.md
Normal file
1157
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load Diff
661
LICENSE
Normal file
661
LICENSE
Normal file
@@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
90
Makefile
90
Makefile
@@ -1,35 +1,46 @@
|
||||
# Claude Code Multi-Agent Workflow System Makefile
|
||||
# Quick deployment for BMAD and Requirements workflows
|
||||
|
||||
.PHONY: help install deploy-bmad deploy-requirements deploy-all clean test
|
||||
.PHONY: help install deploy-bmad deploy-requirements deploy-essentials deploy-advanced deploy-all deploy-commands deploy-agents clean test changelog
|
||||
|
||||
# Default target
|
||||
help:
|
||||
@echo "Claude Code Multi-Agent Workflow - Quick Deployment"
|
||||
@echo ""
|
||||
@echo "Recommended installation: npx github:cexll/myclaude"
|
||||
@echo ""
|
||||
@echo "Usage: make [target]"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " install - Install all configurations to Claude Code"
|
||||
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
|
||||
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
|
||||
@echo " deploy-commands - Deploy all slash commands"
|
||||
@echo " deploy-agents - Deploy all agent configurations"
|
||||
@echo " deploy-all - Deploy everything (commands + agents)"
|
||||
@echo " test-bmad - Test BMAD workflow with sample"
|
||||
@echo " test-requirements - Test Requirements workflow with sample"
|
||||
@echo " clean - Clean generated artifacts"
|
||||
@echo " help - Show this help message"
|
||||
@echo " install - LEGACY: install all configurations (prefer npx github:cexll/myclaude)"
|
||||
@echo " deploy-bmad - Deploy BMAD workflow (bmad-pilot)"
|
||||
@echo " deploy-requirements - Deploy Requirements workflow (requirements-pilot)"
|
||||
@echo " deploy-essentials - Deploy Development Essentials workflow"
|
||||
@echo " deploy-advanced - Deploy Advanced AI Agents"
|
||||
@echo " deploy-commands - Deploy all slash commands"
|
||||
@echo " deploy-agents - Deploy all agent configurations"
|
||||
@echo " deploy-all - Deploy everything (commands + agents)"
|
||||
@echo " test-bmad - Test BMAD workflow with sample"
|
||||
@echo " test-requirements - Test Requirements workflow with sample"
|
||||
@echo " changelog - Update CHANGELOG.md using git-cliff"
|
||||
@echo " clean - Clean generated artifacts"
|
||||
@echo " help - Show this help message"
|
||||
|
||||
# Configuration paths
|
||||
CLAUDE_CONFIG_DIR = ~/.claude
|
||||
COMMANDS_DIR = commands
|
||||
AGENTS_DIR = agents
|
||||
OUTPUT_STYLES_DIR = output-styles
|
||||
SPECS_DIR = .claude/specs
|
||||
|
||||
# Workflow directories
|
||||
BMAD_DIR = agents/bmad
|
||||
REQUIREMENTS_DIR = agents/requirements
|
||||
ESSENTIALS_DIR = agents/development-essentials
|
||||
ADVANCED_DIR = advanced-ai-agents
|
||||
OUTPUT_STYLES_DIR = output-styles
|
||||
|
||||
# Install all configurations
|
||||
install: deploy-all
|
||||
@echo "⚠️ LEGACY PATH: make install will be removed in future versions."
|
||||
@echo " Prefer: npx github:cexll/myclaude"
|
||||
@echo "✅ Installation complete!"
|
||||
|
||||
# Deploy BMAD workflow
|
||||
@@ -38,8 +49,8 @@ deploy-bmad:
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/commands
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/agents
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/output-styles
|
||||
@cp $(COMMANDS_DIR)/bmad-pilot.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(AGENTS_DIR)/bmad-*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(BMAD_DIR)/commands/bmad-pilot.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(BMAD_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(OUTPUT_STYLES_DIR)/bmad.md $(CLAUDE_CONFIG_DIR)/output-styles/ 2>/dev/null || true
|
||||
@echo "✅ BMAD workflow deployed successfully!"
|
||||
@echo " Usage: /bmad-pilot \"your feature description\""
|
||||
@@ -49,16 +60,35 @@ deploy-requirements:
|
||||
@echo "🚀 Deploying Requirements workflow..."
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/commands
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/agents
|
||||
@cp $(COMMANDS_DIR)/requirements-pilot.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(AGENTS_DIR)/requirements-*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(REQUIREMENTS_DIR)/commands/requirements-pilot.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(REQUIREMENTS_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@echo "✅ Requirements workflow deployed successfully!"
|
||||
@echo " Usage: /requirements-pilot \"your feature description\""
|
||||
|
||||
# Deploy Development Essentials workflow
|
||||
deploy-essentials:
|
||||
@echo "🚀 Deploying Development Essentials workflow..."
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/commands
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/agents
|
||||
@cp $(ESSENTIALS_DIR)/commands/*.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(ESSENTIALS_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@echo "✅ Development Essentials deployed successfully!"
|
||||
@echo " Available commands: /ask, /code, /debug, /test, /review, /optimize, /bugfix, /refactor, /docs, /think"
|
||||
|
||||
# Deploy Advanced AI Agents
|
||||
deploy-advanced:
|
||||
@echo "🚀 Deploying Advanced AI Agents..."
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/agents
|
||||
@cp $(ADVANCED_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@echo "✅ Advanced AI Agents deployed successfully!"
|
||||
|
||||
# Deploy all commands
|
||||
deploy-commands:
|
||||
@echo "📦 Deploying all slash commands..."
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/commands
|
||||
@cp $(COMMANDS_DIR)/*.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(BMAD_DIR)/commands/*.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(REQUIREMENTS_DIR)/commands/*.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@cp $(ESSENTIALS_DIR)/commands/*.md $(CLAUDE_CONFIG_DIR)/commands/
|
||||
@echo "✅ All commands deployed!"
|
||||
@echo " Available commands:"
|
||||
@echo " - /bmad-pilot (Full agile workflow)"
|
||||
@@ -70,7 +100,10 @@ deploy-commands:
|
||||
deploy-agents:
|
||||
@echo "🤖 Deploying all agents..."
|
||||
@mkdir -p $(CLAUDE_CONFIG_DIR)/agents
|
||||
@cp $(AGENTS_DIR)/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(BMAD_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(REQUIREMENTS_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(ESSENTIALS_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@cp $(ADVANCED_DIR)/agents/*.md $(CLAUDE_CONFIG_DIR)/agents/
|
||||
@echo "✅ All agents deployed!"
|
||||
|
||||
# Deploy everything
|
||||
@@ -105,9 +138,24 @@ clean:
|
||||
# Quick deployment shortcuts
|
||||
bmad: deploy-bmad
|
||||
requirements: deploy-requirements
|
||||
essentials: deploy-essentials
|
||||
advanced: deploy-advanced
|
||||
all: deploy-all
|
||||
|
||||
# Version info
|
||||
version:
|
||||
@echo "Claude Code Multi-Agent Workflow System v3.1"
|
||||
@echo "BMAD + Requirements-Driven Development"
|
||||
@echo "BMAD + Requirements-Driven Development"
|
||||
|
||||
# Update CHANGELOG.md using git-cliff
|
||||
changelog:
|
||||
@echo "📝 Updating CHANGELOG.md with git-cliff..."
|
||||
@if ! command -v git-cliff > /dev/null 2>&1; then \
|
||||
echo "❌ git-cliff not found. Installing via Homebrew..."; \
|
||||
brew install git-cliff; \
|
||||
fi
|
||||
@git-cliff -o CHANGELOG.md
|
||||
@echo "✅ CHANGELOG.md updated successfully!"
|
||||
@echo ""
|
||||
@echo "Preview the changes:"
|
||||
@echo " git diff CHANGELOG.md"
|
||||
|
||||
@@ -1,95 +1,18 @@
|
||||
# Claude Code Plugin System
|
||||
# Plugin System
|
||||
|
||||
本项目已支持Claude Code插件系统,可以将命令和代理打包成可安装的插件包。
|
||||
Claude Code plugins for this repo are defined in `.claude-plugin/marketplace.json`.
|
||||
|
||||
## 插件配置
|
||||
## Install
|
||||
|
||||
插件配置文件位于 `.claude-plugin/marketplace.json`,定义了所有可用的插件包。
|
||||
|
||||
## 可用插件
|
||||
|
||||
### 1. Requirements-Driven Development
|
||||
- **描述**: 需求驱动的开发工作流,包含90%质量门控
|
||||
- **命令**: `/requirements-pilot`
|
||||
- **代理**: requirements-generate, requirements-code, requirements-testing, requirements-review
|
||||
|
||||
### 2. BMAD Agile Workflow
|
||||
- **描述**: 完整的BMAD敏捷工作流(产品负责人→架构师→SM→开发→QA)
|
||||
- **命令**: `/bmad-pilot`
|
||||
- **代理**: bmad-po, bmad-architect, bmad-sm, bmad-dev, bmad-qa, bmad-orchestrator
|
||||
|
||||
### 3. Development Essentials
|
||||
- **描述**: 核心开发命令套件
|
||||
- **命令**: `/code`, `/debug`, `/test`, `/optimize`, `/review`, `/bugfix`, `/refactor`, `/docs`, `/ask`, `/think`
|
||||
- **代理**: code, bugfix, bugfix-verify, code-optimize, debug, develop
|
||||
|
||||
### 4. Advanced AI Agents
|
||||
- **描述**: 高级AI代理,集成GPT-5进行深度分析
|
||||
- **代理**: gpt5
|
||||
|
||||
## 使用插件命令
|
||||
|
||||
### 列出所有可用插件
|
||||
```bash
|
||||
/plugin marketplace add cexll/myclaude
|
||||
/plugin list
|
||||
```
|
||||
|
||||
### 查看插件详情
|
||||
```bash
|
||||
/plugin info <plugin-name>
|
||||
```
|
||||
例如:`/plugin info requirements-driven-development`
|
||||
## Available Plugins
|
||||
|
||||
### 安装插件
|
||||
```bash
|
||||
/plugin install <plugin-name>
|
||||
```
|
||||
例如:`/plugin install bmad-agile-workflow`
|
||||
|
||||
### 移除插件
|
||||
```bash
|
||||
/plugin remove <plugin-name>
|
||||
```
|
||||
|
||||
## 创建自定义插件
|
||||
|
||||
要创建自己的插件:
|
||||
|
||||
1. 在 `.claude-plugin/marketplace.json` 中添加新的插件定义
|
||||
2. 指定插件包含的命令和代理文件路径
|
||||
3. 设置适当的元数据(版本、作者、关键词等)
|
||||
|
||||
示例插件结构:
|
||||
```json
|
||||
{
|
||||
"name": "my-custom-plugin",
|
||||
"source": "./",
|
||||
"description": "自定义插件描述",
|
||||
"version": "1.0.0",
|
||||
"commands": [
|
||||
"./commands/my-command.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/my-agent.md"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## 分享插件
|
||||
|
||||
要分享插件给其他项目:
|
||||
1. 复制整个 `.claude-plugin` 目录到目标项目
|
||||
2. 确保相关的命令和代理文件存在
|
||||
3. 在新项目中使用 `/plugin` 命令管理插件
|
||||
|
||||
## 注意事项
|
||||
|
||||
- 插件系统遵循Claude Code的插件规范
|
||||
- 所有命令和代理文件必须是有效的Markdown格式
|
||||
- 插件配置支持版本管理和依赖关系
|
||||
- 插件可以包含多个命令、代理和输出样式
|
||||
|
||||
## 相关文档
|
||||
|
||||
- [Claude Code插件文档](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
- [示例插件仓库](https://github.com/wshobson/agents)
|
||||
- `bmad` - BMAD workflow (`./agents/bmad`)
|
||||
- `requirements` - requirements-driven workflow (`./agents/requirements`)
|
||||
- `dev-kit` - development essentials (`./agents/development-essentials`)
|
||||
- `omo` - orchestration skill (`./skills/omo`)
|
||||
- `sparv` - SPARV workflow (`./skills/sparv`)
|
||||
|
||||
288
README-zh.md
288
README-zh.md
@@ -1,288 +0,0 @@
|
||||
# Claude Code 多智能体工作流系统
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/)
|
||||
[](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
|
||||
> 企业级敏捷开发工作流自动化与多智能体编排
|
||||
|
||||
[English](README.md)
|
||||
|
||||
## 🚀 BMAD 方法论:敏捷开发自动化
|
||||
|
||||
**BMAD (Business-Minded Agile Development)** 将您的开发流程转换为全自动化的敏捷工作流,配备角色化 AI 智能体和质量门控。
|
||||
|
||||
### 一条命令,完整工作流
|
||||
|
||||
```bash
|
||||
/bmad-pilot "构建电商结账系统,集成支付功能"
|
||||
# 自动化:产品 → 架构 → 冲刺 → 开发 → 审查 → 测试
|
||||
```
|
||||
|
||||
## 🎯 BMAD 工作流架构
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
PO[产品负责人] -->|PRD 90+| Architect[架构师]
|
||||
Architect -->|设计 90+| SM[Scrum Master]
|
||||
SM -->|冲刺计划| Dev[开发者]
|
||||
Dev -->|代码| Review[审查]
|
||||
Review -->|Pass/Fail| QA[测试]
|
||||
QA -->|测试| Done[完成]
|
||||
```
|
||||
|
||||
### 核心特性
|
||||
|
||||
- **🤖 6个专业智能体**:PO、Architect、SM、Dev、Review、QA
|
||||
- **📊 质量门控**:90% 阈值自动优化
|
||||
- **✅ 确认节点**:关键阶段用户确认
|
||||
- **📁 持久化产物**:所有文档保存至 `./.claude/specs/`
|
||||
- **🔄 迭代优化**:自动改进直至质量达标
|
||||
|
||||
## 📋 BMAD 智能体与角色
|
||||
|
||||
| 智能体 | 角色 | 质量门控 | 输出 |
|
||||
|--------|------|----------|------|
|
||||
| **bmad-po** (Sarah) | 产品需求收集 | 90/100 PRD 评分 | `01-product-requirements.md` |
|
||||
| **bmad-architect** (Winston) | 技术设计与架构 | 90/100 设计评分 | `02-system-architecture.md` |
|
||||
| **bmad-sm** (Mike) | 冲刺计划与任务分解 | 用户确认 | `03-sprint-plan.md` |
|
||||
| **bmad-dev** (Alex) | 功能实现 | 代码完成 | 实现文件 |
|
||||
| **bmad-review** | 独立代码审查 | Pass/Risk/Fail | `04-dev-reviewed.md` |
|
||||
| **bmad-qa** (Emma) | 测试与质量保证 | 测试执行 | `05-qa-report.md` |
|
||||
|
||||
## 🚀 快速开始
|
||||
|
||||
### 安装方法
|
||||
|
||||
#### 方法1:插件系统(推荐)🎯
|
||||
|
||||
```bash
|
||||
/plugin github.com/cexll/myclaude
|
||||
```
|
||||
|
||||
#### 方法2:传统安装
|
||||
|
||||
```bash
|
||||
# 克隆仓库
|
||||
git clone https://github.com/your-repo/claude-code-workflows.git
|
||||
cd claude-code-workflows
|
||||
|
||||
# 使用 make 安装所有配置
|
||||
make install
|
||||
|
||||
# 或部署特定工作流
|
||||
make deploy-bmad # 仅部署 BMAD 工作流
|
||||
make deploy-requirements # 仅部署 Requirements 工作流
|
||||
make deploy-all # 部署所有命令和智能体
|
||||
```
|
||||
|
||||
### 基本 BMAD 工作流
|
||||
|
||||
```bash
|
||||
# 完整敏捷工作流(所有阶段)
|
||||
/bmad-pilot "用户认证系统,支持 OAuth2 和多因素认证"
|
||||
|
||||
# 快速原型(跳过测试)
|
||||
/bmad-pilot "管理后台" --skip-tests
|
||||
|
||||
# 直接开发(跳过冲刺计划)
|
||||
/bmad-pilot "修复登录问题" --direct-dev
|
||||
|
||||
# 跳过仓库扫描(使用现有上下文)
|
||||
/bmad-pilot "添加功能" --skip-scan
|
||||
```
|
||||
|
||||
### 工作流产物
|
||||
|
||||
每次 BMAD 运行创建结构化文档:
|
||||
|
||||
```
|
||||
.claude/specs/user-authentication/
|
||||
├── 00-repository-context.md # 仓库分析
|
||||
├── 01-product-requirements.md # PRD 及业务目标
|
||||
├── 02-system-architecture.md # 技术设计
|
||||
├── 03-sprint-plan.md # 冲刺任务
|
||||
├── 04-dev-reviewed.md # 代码审查报告(v3.1 新增)
|
||||
└── 05-qa-report.md # 测试结果
|
||||
```
|
||||
|
||||
## 🎨 BMAD 输出样式
|
||||
|
||||
BMAD 工作流使用专门的输出样式:
|
||||
- 创建阶段隔离的上下文
|
||||
- 管理智能体交接
|
||||
- 跟踪质量评分
|
||||
- 处理确认门控
|
||||
- 支持 Codex CLI 集成
|
||||
|
||||
## ⚡ v3.2 插件系统
|
||||
|
||||
### 🔌 原生插件支持(新增)
|
||||
本项目现已包含原生 Claude Code 插件支持,提供4个即装即用的插件包:
|
||||
|
||||
#### 可用插件
|
||||
|
||||
| 插件 | 描述 | 命令 | 智能体 |
|
||||
|------|------|------|--------|
|
||||
| **bmad-agile-workflow** | 完整 BMAD 方法论及角色化智能体 | `/bmad-pilot` | bmad-po, bmad-architect, bmad-sm, bmad-dev, bmad-qa |
|
||||
| **requirements-driven-development** | 精简需求工作流 | `/requirements-pilot` | requirements-generate, requirements-code, requirements-review |
|
||||
| **development-essentials** | 核心开发命令 | `/code`, `/debug`, `/test`, `/optimize` | code, bugfix, debug, develop |
|
||||
| **advanced-ai-agents** | GPT-5 深度分析集成 | - | gpt5 |
|
||||
|
||||
#### 使用插件
|
||||
|
||||
```bash
|
||||
# 列出所有可用插件
|
||||
/plugin list
|
||||
|
||||
# 获取插件详细信息
|
||||
/plugin info bmad-agile-workflow
|
||||
|
||||
# 安装插件以激活其命令和智能体
|
||||
/plugin install requirements-driven-development
|
||||
|
||||
# 移除已安装的插件
|
||||
/plugin remove development-essentials
|
||||
```
|
||||
|
||||
#### 插件配置
|
||||
插件定义在 `.claude-plugin/marketplace.json`,遵循 Claude Code 插件规范。每个插件包含:
|
||||
- 命令(斜杠命令)
|
||||
- 智能体(专业 AI 智能体)
|
||||
- 元数据(版本、作者、关键词)
|
||||
- 类别分类
|
||||
|
||||
## ⚡ v3.1 特性
|
||||
|
||||
### 独立代码审查智能体
|
||||
- **bmad-review**:Dev 和 QA 之间的自动审查
|
||||
- **双版本支持**:
|
||||
- 标准版:Claude Code 原生审查
|
||||
- 增强版:通过 Codex CLI 调用 GPT-5
|
||||
- **三级状态**:Pass / Pass with Risk / Fail
|
||||
|
||||
### 增强工作流
|
||||
- Dev → Review → QA 质量链
|
||||
- 自动更新冲刺计划
|
||||
- 针对性 QA 测试建议
|
||||
|
||||
## 📊 质量评分系统
|
||||
|
||||
### PRD 质量(100分)
|
||||
- 业务价值:30
|
||||
- 功能需求:25
|
||||
- 用户体验:20
|
||||
- 技术约束:15
|
||||
- 范围与优先级:10
|
||||
|
||||
### 架构质量(100分)
|
||||
- 设计质量:30
|
||||
- 技术选型:25
|
||||
- 可扩展性:20
|
||||
- 安全性:15
|
||||
- 可行性:10
|
||||
|
||||
### 审查状态
|
||||
- **Pass**:无问题,进入 QA
|
||||
- **Pass with Risk**:非关键问题
|
||||
- **Fail**:必须返回 Dev
|
||||
|
||||
## 🔧 高级用法
|
||||
|
||||
### 仓库上下文
|
||||
BMAD 自动扫描仓库了解:
|
||||
- 技术栈
|
||||
- 项目结构
|
||||
- 现有模式
|
||||
- 依赖关系
|
||||
- 编码规范
|
||||
|
||||
### 交互式优化
|
||||
每个阶段支持迭代改进:
|
||||
```
|
||||
PO: "这是 PRD(评分:75/100)"
|
||||
用户: "添加移动端支持和离线模式"
|
||||
PO: "更新的 PRD(评分:92/100)✅"
|
||||
```
|
||||
|
||||
### 确认门控
|
||||
关键阶段需要明确确认:
|
||||
```
|
||||
架构师: "技术设计完成(评分:93/100)"
|
||||
系统: "准备继续?(yes/no)"
|
||||
用户: yes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🏭 Requirements-Driven 工作流
|
||||
|
||||
适用于简单项目的轻量级替代方案:
|
||||
|
||||
```bash
|
||||
/requirements-pilot "实现 JWT 认证"
|
||||
# 自动化:需求 → 代码 → 审查 → 测试
|
||||
```
|
||||
|
||||
### 特性
|
||||
- 90% 质量门控
|
||||
- 自动优化循环
|
||||
- 实现导向规格
|
||||
- 实用主义优先
|
||||
|
||||
## 🛠️ 其他命令
|
||||
|
||||
### 开发命令
|
||||
- `/ask` - 技术咨询
|
||||
- `/code` - 直接实现
|
||||
- `/debug` - 系统化调试
|
||||
- `/test` - 测试策略
|
||||
- `/review` - 代码验证
|
||||
- `/optimize` - 性能优化
|
||||
- `/bugfix` - 错误解决
|
||||
- `/refactor` - 代码改进
|
||||
- `/docs` - 文档生成
|
||||
- `/think` - 高级分析
|
||||
|
||||
### 手动工作流示例
|
||||
```bash
|
||||
/ask "实时消息的设计模式"
|
||||
/code "实现 WebSocket 服务器"
|
||||
/test "创建集成测试"
|
||||
/review "验证安全性"
|
||||
```
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
MIT 许可证 - 查看 [LICENSE](LICENSE) 文件
|
||||
|
||||
## 🙋 支持
|
||||
|
||||
- **文档**:查看 `/commands/` 和 `/agents/` 目录
|
||||
- **插件指南**:查看 [PLUGIN_README.md](PLUGIN_README.md) 了解插件系统详情
|
||||
- **问题**:GitHub issues 用于报告 bug 和功能请求
|
||||
- **Makefile 帮助**:运行 `make help` 查看所有部署选项
|
||||
- **Claude Code 文档**:[插件系统](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
|
||||
### 可用的 Make 命令
|
||||
|
||||
```bash
|
||||
make install # 安装所有配置到 Claude Code
|
||||
make deploy-bmad # 仅部署 BMAD 工作流
|
||||
make deploy-requirements # 仅部署 Requirements 工作流
|
||||
make deploy-commands # 部署所有斜杠命令
|
||||
make deploy-agents # 部署所有智能体配置
|
||||
make test-bmad # 测试 BMAD 工作流示例
|
||||
make test-requirements # 测试 Requirements 工作流示例
|
||||
make clean # 清理生成的文件
|
||||
make help # 显示所有可用命令
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**使用 BMAD 转型您的开发** - 一条命令,完整敏捷工作流,质量保证。
|
||||
|
||||
*通过 `/plugin install bmad-agile-workflow` 安装或使用传统安装方法。*
|
||||
|
||||
*让专业的 AI 智能体处理专业工作。*
|
||||
374
README.md
374
README.md
@@ -1,289 +1,151 @@
|
||||
[中文](README_CN.md) [English](README.md)
|
||||
|
||||
# Claude Code Multi-Agent Workflow System
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://smithery.ai/skills?ns=cexll&utm_source=github&utm_medium=badge)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/)
|
||||
[](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> Enterprise-grade agile development workflow automation with multi-agent orchestration
|
||||
> AI-powered development automation with multi-backend execution (Codex/Claude/Gemini/OpenCode)
|
||||
|
||||
[中文](README-zh.md)
|
||||
|
||||
## 🚀 BMAD Methodology: Agile Development Automation
|
||||
|
||||
**BMAD (Business-Minded Agile Development)** transforms your development process into a fully automated agile workflow with role-based AI agents and quality gates.
|
||||
|
||||
### One Command, Complete Workflow
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
/bmad-pilot "Build e-commerce checkout system with payment integration"
|
||||
# Automated: Product → Architecture → Sprint → Dev → Review → QA
|
||||
npx github:cexll/myclaude
|
||||
```
|
||||
|
||||
## 🎯 BMAD Workflow Architecture
|
||||
## Modules Overview
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
PO[Product Owner] -->|PRD 90+| Architect
|
||||
Architect -->|Design 90+| SM[Scrum Master]
|
||||
SM -->|Sprint Plan| Dev
|
||||
Dev -->|Code| Review
|
||||
Review -->|Pass/Fail| QA
|
||||
QA -->|Tests| Done
|
||||
```
|
||||
| Module | Description | Documentation |
|
||||
|--------|-------------|---------------|
|
||||
| [do](skills/do/README.md) | **Recommended** - 7-phase feature development with codeagent orchestration | `/do` command |
|
||||
| [omo](skills/omo/README.md) | Multi-agent orchestration with intelligent routing | `/omo` command |
|
||||
| [bmad](agents/bmad/README.md) | BMAD agile workflow with 6 specialized agents | `/bmad-pilot` command |
|
||||
| [requirements](agents/requirements/README.md) | Lightweight requirements-to-code pipeline | `/requirements-pilot` command |
|
||||
| [essentials](agents/development-essentials/README.md) | Core development commands and utilities | `/code`, `/debug`, etc. |
|
||||
| [sparv](skills/sparv/README.md) | SPARV workflow (Specify→Plan→Act→Review→Vault) | `/sparv` command |
|
||||
| course | Course development (combines dev + product-requirements + test-cases) | Composite module |
|
||||
|
||||
### Key Features
|
||||
|
||||
- **🤖 6 Specialized Agents**: PO, Architect, SM, Dev, Review, QA
|
||||
- **📊 Quality Gates**: 90% thresholds with automatic optimization
|
||||
- **✅ Approval Points**: User confirmation at critical phases
|
||||
- **📁 Persistent Artifacts**: All documents saved to `./.claude/specs/`
|
||||
- **🔄 Iterative Refinement**: Automatic improvement until quality met
|
||||
|
||||
## 📋 BMAD Agents & Roles
|
||||
|
||||
| Agent | Role | Quality Gate | Output |
|
||||
|-------|------|--------------|--------|
|
||||
| **bmad-po** (Sarah) | Product requirements gathering | 90/100 PRD score | `01-product-requirements.md` |
|
||||
| **bmad-architect** (Winston) | Technical design & architecture | 90/100 design score | `02-system-architecture.md` |
|
||||
| **bmad-sm** (Mike) | Sprint planning & task breakdown | User approval | `03-sprint-plan.md` |
|
||||
| **bmad-dev** (Alex) | Feature implementation | Code completion | Implementation files |
|
||||
| **bmad-review** | Independent code review | Pass/Risk/Fail | `04-dev-reviewed.md` |
|
||||
| **bmad-qa** (Emma) | Testing & quality assurance | Test execution | `05-qa-report.md` |
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Installation Methods
|
||||
|
||||
#### Method 1: Plugin System (Recommended) 🎯
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# List available plugins
|
||||
/plugin github.com/cexll/myclaude
|
||||
# Interactive installer (recommended)
|
||||
npx github:cexll/myclaude
|
||||
|
||||
# List installable items (modules / skills / wrapper)
|
||||
npx github:cexll/myclaude --list
|
||||
|
||||
# Detect installed modules and update from GitHub
|
||||
npx github:cexll/myclaude --update
|
||||
|
||||
# Custom install directory / overwrite
|
||||
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
#### Method 2: Traditional Installation
|
||||
`--update` detects already installed modules in the target install dir (defaults to `~/.claude`, via `installed_modules.json` when present) and updates them from GitHub (latest release) by overwriting the module files.
|
||||
|
||||
### Module Configuration
|
||||
|
||||
Edit `config.json` to enable/disable modules:
|
||||
|
||||
```json
|
||||
{
|
||||
"modules": {
|
||||
"bmad": { "enabled": false },
|
||||
"requirements": { "enabled": false },
|
||||
"essentials": { "enabled": false },
|
||||
"omo": { "enabled": false },
|
||||
"sparv": { "enabled": false },
|
||||
"do": { "enabled": true },
|
||||
"course": { "enabled": false }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Workflow Selection Guide
|
||||
|
||||
| Scenario | Recommended |
|
||||
|----------|-------------|
|
||||
| Feature development (default) | `/do` |
|
||||
| Bug investigation + fix | `/omo` |
|
||||
| Large enterprise project | `/bmad-pilot` |
|
||||
| Quick prototype | `/requirements-pilot` |
|
||||
| Simple task | `/code`, `/debug` |
|
||||
|
||||
## Core Architecture
|
||||
|
||||
| Role | Agent | Responsibility |
|
||||
|------|-------|----------------|
|
||||
| **Orchestrator** | Claude Code | Planning, context gathering, verification |
|
||||
| **Executor** | codeagent-wrapper | Code editing, test execution (Codex/Claude/Gemini/OpenCode) |
|
||||
|
||||
## Backend CLI Requirements
|
||||
|
||||
| Backend | Required Features |
|
||||
|---------|-------------------|
|
||||
| Codex | `codex e`, `--json`, `-C`, `resume` |
|
||||
| Claude | `--output-format stream-json`, `-r` |
|
||||
| Gemini | `-o stream-json`, `-y`, `-r` |
|
||||
|
||||
## Directory Structure After Installation
|
||||
|
||||
```
|
||||
~/.claude/
|
||||
├── bin/codeagent-wrapper
|
||||
├── CLAUDE.md
|
||||
├── commands/
|
||||
├── agents/
|
||||
├── skills/
|
||||
└── config.json
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
- [codeagent-wrapper](codeagent-wrapper/README.md)
|
||||
- [Plugin System](PLUGIN_README.md)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Codex wrapper not found:**
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/your-repo/claude-code-workflows.git
|
||||
cd claude-code-workflows
|
||||
|
||||
# Install everything with make
|
||||
make install
|
||||
|
||||
# Or deploy specific workflows
|
||||
make deploy-bmad # Deploy BMAD workflow only
|
||||
make deploy-requirements # Deploy Requirements workflow only
|
||||
make deploy-all # Deploy all commands and agents
|
||||
# Select: codeagent-wrapper
|
||||
npx github:cexll/myclaude
|
||||
```
|
||||
|
||||
### Basic BMAD Workflow
|
||||
|
||||
**Module not loading:**
|
||||
```bash
|
||||
# Full agile workflow with all phases
|
||||
/bmad-pilot "User authentication system with OAuth2 and MFA"
|
||||
|
||||
# Skip testing for quick prototypes
|
||||
/bmad-pilot "Admin dashboard" --skip-tests
|
||||
|
||||
# Direct development (skip sprint planning)
|
||||
/bmad-pilot "Bug fix for login issue" --direct-dev
|
||||
|
||||
# Skip repository scanning (use existing context)
|
||||
/bmad-pilot "Add feature" --skip-scan
|
||||
cat ~/.claude/installed_modules.json
|
||||
npx github:cexll/myclaude --force
|
||||
```
|
||||
|
||||
### Workflow Artifacts
|
||||
|
||||
Each BMAD run creates structured documentation:
|
||||
|
||||
```
|
||||
.claude/specs/user-authentication/
|
||||
├── 00-repository-context.md # Repository analysis
|
||||
├── 01-product-requirements.md # PRD with business goals
|
||||
├── 02-system-architecture.md # Technical design
|
||||
├── 03-sprint-plan.md # Sprint tasks
|
||||
├── 04-dev-reviewed.md # Code review report (NEW v3.1)
|
||||
└── 05-qa-report.md # Test results
|
||||
```
|
||||
|
||||
## 🎨 BMAD Output Style
|
||||
|
||||
The BMAD workflow uses a specialized output style that:
|
||||
- Creates phase-separated contexts
|
||||
- Manages agent handoffs
|
||||
- Tracks quality scores
|
||||
- Handles approval gates
|
||||
- Supports Codex CLI integration
|
||||
|
||||
## ⚡ v3.2 Plugin System
|
||||
|
||||
### 🔌 Native Plugin Support (NEW)
|
||||
This project now includes native Claude Code plugin support with 4 ready-to-use plugin packages:
|
||||
|
||||
#### Available Plugins
|
||||
|
||||
| Plugin | Description | Commands | Agents |
|
||||
|--------|------------|----------|--------|
|
||||
| **bmad-agile-workflow** | Full BMAD methodology with role-based agents | `/bmad-pilot` | bmad-po, bmad-architect, bmad-sm, bmad-dev, bmad-qa |
|
||||
| **requirements-driven-development** | Streamlined requirements workflow | `/requirements-pilot` | requirements-generate, requirements-code, requirements-review |
|
||||
| **development-essentials** | Core development commands | `/code`, `/debug`, `/test`, `/optimize` | code, bugfix, debug, develop |
|
||||
| **advanced-ai-agents** | GPT-5 deep analysis integration | - | gpt5 |
|
||||
|
||||
#### Using Plugins
|
||||
|
||||
**Backend CLI errors:**
|
||||
```bash
|
||||
# List all available plugins
|
||||
/plugin list
|
||||
|
||||
# Get detailed information about a plugin
|
||||
/plugin info bmad-agile-workflow
|
||||
|
||||
# Install a plugin to activate its commands and agents
|
||||
/plugin install requirements-driven-development
|
||||
|
||||
# Remove an installed plugin
|
||||
/plugin remove development-essentials
|
||||
which codex && codex --version
|
||||
which claude && claude --version
|
||||
which gemini && gemini --version
|
||||
```
|
||||
|
||||
#### Plugin Configuration
|
||||
Plugins are defined in `.claude-plugin/marketplace.json` following the Claude Code plugin specification. Each plugin includes:
|
||||
- Commands (slash commands)
|
||||
- Agents (specialized AI agents)
|
||||
- Metadata (version, author, keywords)
|
||||
- Category classification
|
||||
## FAQ
|
||||
|
||||
## ⚡ v3.1 Features
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| "Unknown event format" | Logging display issue, can be ignored |
|
||||
| Gemini can't read .gitignore files | Remove from .gitignore or use different backend |
|
||||
| Codex permission denied | Set `approval_policy = "never"` in ~/.codex/config.yaml |
|
||||
|
||||
### Independent Code Review Agent
|
||||
- **bmad-review**: Automated review between Dev and QA
|
||||
- **Dual Version Support**:
|
||||
- Standard: Native Claude Code review
|
||||
- Enhanced: GPT-5 via Codex CLI
|
||||
- **Three-tier Status**: Pass / Pass with Risk / Fail
|
||||
See [GitHub Issues](https://github.com/cexll/myclaude/issues) for more.
|
||||
|
||||
### Enhanced Workflow
|
||||
- Dev → Review → QA quality chain
|
||||
- Automatic Sprint plan updates
|
||||
- Targeted QA test recommendations
|
||||
## License
|
||||
|
||||
## 📊 Quality Scoring Systems
|
||||
AGPL-3.0 - see [LICENSE](LICENSE)
|
||||
|
||||
### PRD Quality (100 points)
|
||||
- Business Value: 30
|
||||
- Functional Requirements: 25
|
||||
- User Experience: 20
|
||||
- Technical Constraints: 15
|
||||
- Scope & Priorities: 10
|
||||
### Commercial Licensing
|
||||
|
||||
### Architecture Quality (100 points)
|
||||
- Design Quality: 30
|
||||
- Technology Selection: 25
|
||||
- Scalability: 20
|
||||
- Security: 15
|
||||
- Feasibility: 10
|
||||
For commercial use without AGPL obligations, contact: evanxian9@gmail.com
|
||||
|
||||
### Review Status
|
||||
- **Pass**: No issues, proceed to QA
|
||||
- **Pass with Risk**: Non-critical issues
|
||||
- **Fail**: Must return to Dev
|
||||
## Support
|
||||
|
||||
## 🔧 Advanced Usage
|
||||
|
||||
### Repository Context
|
||||
BMAD automatically scans your repository to understand:
|
||||
- Technology stack
|
||||
- Project structure
|
||||
- Existing patterns
|
||||
- Dependencies
|
||||
- Conventions
|
||||
|
||||
### Interactive Refinement
|
||||
Each phase supports iterative improvement:
|
||||
```
|
||||
PO: "Here's the PRD (Score: 75/100)"
|
||||
User: "Add mobile support and offline mode"
|
||||
PO: "Updated PRD (Score: 92/100) ✅"
|
||||
```
|
||||
|
||||
### Approval Gates
|
||||
Critical phases require explicit confirmation:
|
||||
```
|
||||
Architect: "Technical design complete (Score: 93/100)"
|
||||
System: "Ready to proceed? (yes/no)"
|
||||
User: yes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🏭 Requirements-Driven Workflow
|
||||
|
||||
An alternative lightweight workflow for simpler projects:
|
||||
|
||||
```bash
|
||||
/requirements-pilot "Implement JWT authentication"
|
||||
# Automated: Requirements → Code → Review → Test
|
||||
```
|
||||
|
||||
### Features
|
||||
- 90% quality gates
|
||||
- Automatic optimization loops
|
||||
- Implementation-focused specs
|
||||
- Pragmatic over architectural
|
||||
|
||||
## 🛠️ Other Commands
|
||||
|
||||
### Development Commands
|
||||
- `/ask` - Technical consultation
|
||||
- `/code` - Direct implementation
|
||||
- `/debug` - Systematic debugging
|
||||
- `/test` - Testing strategies
|
||||
- `/review` - Code validation
|
||||
- `/optimize` - Performance tuning
|
||||
- `/bugfix` - Bug resolution
|
||||
- `/refactor` - Code improvement
|
||||
- `/docs` - Documentation
|
||||
- `/think` - Advanced analysis
|
||||
|
||||
### Manual Workflow Example
|
||||
```bash
|
||||
/ask "Design patterns for real-time messaging"
|
||||
/code "Implement WebSocket server"
|
||||
/test "Create integration tests"
|
||||
/review "Validate security"
|
||||
```
|
||||
|
||||
## 📄 License
|
||||
|
||||
MIT License - see [LICENSE](LICENSE) file
|
||||
|
||||
## 🙋 Support
|
||||
|
||||
- **Documentation**: Check `/commands/` and `/agents/` directories
|
||||
- **Plugin Guide**: See [PLUGIN_README.md](PLUGIN_README.md) for plugin system details
|
||||
- **Issues**: GitHub issues for bugs and features
|
||||
- **Makefile Help**: Run `make help` for all deployment options
|
||||
- **Claude Code Docs**: [Plugin System](https://docs.claude.com/en/docs/claude-code/plugins)
|
||||
|
||||
### Available Make Commands
|
||||
|
||||
```bash
|
||||
make install # Install everything to Claude Code
|
||||
make deploy-bmad # Deploy BMAD workflow only
|
||||
make deploy-requirements # Deploy Requirements workflow only
|
||||
make deploy-commands # Deploy all slash commands
|
||||
make deploy-agents # Deploy all agent configurations
|
||||
make test-bmad # Test BMAD workflow sample
|
||||
make test-requirements # Test Requirements workflow sample
|
||||
make clean # Clean generated artifacts
|
||||
make help # Show all available commands
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Transform your development with BMAD** - One command, complete agile workflow, quality assured.
|
||||
|
||||
*Install with `/plugin install bmad-agile-workflow` or use traditional installation methods.*
|
||||
|
||||
*Let specialized AI agents handle specialized work.*
|
||||
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
|
||||
256
README_CN.md
Normal file
256
README_CN.md
Normal file
@@ -0,0 +1,256 @@
|
||||
# Claude Code 多智能体工作流系统
|
||||
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> AI 驱动的开发自动化 - 多后端执行架构 (Codex/Claude/Gemini/OpenCode)
|
||||
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
npx github:cexll/myclaude
|
||||
```
|
||||
|
||||
## 模块概览
|
||||
|
||||
| 模块 | 描述 | 文档 |
|
||||
|------|------|------|
|
||||
| [do](skills/do/README.md) | **推荐** - 7 阶段功能开发 + codeagent 编排 | `/do` 命令 |
|
||||
| [omo](skills/omo/README.md) | 多智能体编排 + 智能路由 | `/omo` 命令 |
|
||||
| [bmad](agents/bmad/README.md) | BMAD 敏捷工作流 + 6 个专业智能体 | `/bmad-pilot` 命令 |
|
||||
| [requirements](agents/requirements/README.md) | 轻量级需求到代码流水线 | `/requirements-pilot` 命令 |
|
||||
| [essentials](agents/development-essentials/README.md) | 核心开发命令和工具 | `/code`, `/debug` 等 |
|
||||
| [sparv](skills/sparv/README.md) | SPARV 工作流 (Specify→Plan→Act→Review→Vault) | `/sparv` 命令 |
|
||||
| course | 课程开发(组合 dev + product-requirements + test-cases) | 组合模块 |
|
||||
|
||||
## 核心架构
|
||||
|
||||
| 角色 | 智能体 | 职责 |
|
||||
|------|-------|------|
|
||||
| **编排者** | Claude Code | 规划、上下文收集、验证 |
|
||||
| **执行者** | codeagent-wrapper | 代码编辑、测试执行(Codex/Claude/Gemini/OpenCode 后端)|
|
||||
|
||||
## 工作流详解
|
||||
|
||||
### do 工作流(推荐)
|
||||
|
||||
7 阶段功能开发,通过 codeagent-wrapper 编排多个智能体。**大多数功能开发任务的首选工作流。**
|
||||
|
||||
```bash
|
||||
/do "添加用户登录功能"
|
||||
```
|
||||
|
||||
**7 阶段:**
|
||||
| 阶段 | 名称 | 目标 |
|
||||
|------|------|------|
|
||||
| 1 | Discovery | 理解需求 |
|
||||
| 2 | Exploration | 映射代码库模式 |
|
||||
| 3 | Clarification | 解决歧义(**强制**)|
|
||||
| 4 | Architecture | 设计实现方案 |
|
||||
| 5 | Implementation | 构建功能(**需审批**)|
|
||||
| 6 | Review | 捕获缺陷 |
|
||||
| 7 | Summary | 记录结果 |
|
||||
|
||||
**智能体:**
|
||||
- `code-explorer` - 代码追踪、架构映射
|
||||
- `code-architect` - 设计方案、文件规划
|
||||
- `code-reviewer` - 代码审查、简化建议
|
||||
- `develop` - 实现代码、运行测试
|
||||
|
||||
---
|
||||
|
||||
### OmO 多智能体编排器
|
||||
|
||||
基于风险信号智能路由任务到专业智能体。
|
||||
|
||||
```bash
|
||||
/omo "分析并修复这个认证 bug"
|
||||
```
|
||||
|
||||
**智能体层级:**
|
||||
| 智能体 | 角色 | 后端 |
|
||||
|-------|------|------|
|
||||
| `oracle` | 技术顾问 | Claude |
|
||||
| `librarian` | 外部研究 | Claude |
|
||||
| `explore` | 代码库搜索 | OpenCode |
|
||||
| `develop` | 代码实现 | Codex |
|
||||
| `frontend-ui-ux-engineer` | UI/UX 专家 | Gemini |
|
||||
| `document-writer` | 文档撰写 | Gemini |
|
||||
|
||||
**常用配方:**
|
||||
- 解释代码:`explore`
|
||||
- 位置已知的小修复:直接 `develop`
|
||||
- Bug 修复(位置未知):`explore → develop`
|
||||
- 跨模块重构:`explore → oracle → develop`
|
||||
|
||||
---
|
||||
|
||||
### SPARV 工作流
|
||||
|
||||
极简 5 阶段工作流:Specify → Plan → Act → Review → Vault。
|
||||
|
||||
```bash
|
||||
/sparv "实现订单导出功能"
|
||||
```
|
||||
|
||||
**核心规则:**
|
||||
- **10 分规格门**:得分 0-10,必须 >=9 才能进入 Plan
|
||||
- **2 动作保存**:每 2 次工具调用写入 journal.md
|
||||
- **3 失败协议**:连续 3 次失败后停止并上报
|
||||
- **EHRB**:高风险操作需明确确认
|
||||
|
||||
**评分维度(各 0-2 分):**
|
||||
1. Value - 为什么做,可验证的收益
|
||||
2. Scope - MVP + 不在范围内的内容
|
||||
3. Acceptance - 可测试的验收标准
|
||||
4. Boundaries - 错误/性能/兼容/安全边界
|
||||
5. Risk - EHRB/依赖/未知 + 处理方式
|
||||
|
||||
---
|
||||
|
||||
### BMAD 敏捷工作流
|
||||
|
||||
完整企业敏捷方法论 + 6 个专业智能体。
|
||||
|
||||
```bash
|
||||
/bmad-pilot "构建电商结账系统"
|
||||
```
|
||||
|
||||
**智能体角色:**
|
||||
| 智能体 | 职责 |
|
||||
|-------|------|
|
||||
| Product Owner | 需求与用户故事 |
|
||||
| Architect | 系统设计与技术决策 |
|
||||
| Scrum Master | Sprint 规划与任务分解 |
|
||||
| Developer | 实现 |
|
||||
| Code Reviewer | 质量保证 |
|
||||
| QA Engineer | 测试与验证 |
|
||||
|
||||
**审批门:**
|
||||
- PRD 完成后(90+ 分)需用户审批
|
||||
- 架构完成后(90+ 分)需用户审批
|
||||
|
||||
---
|
||||
|
||||
### 需求驱动工作流
|
||||
|
||||
轻量级需求到代码流水线。
|
||||
|
||||
```bash
|
||||
/requirements-pilot "实现 API 限流"
|
||||
```
|
||||
|
||||
**100 分质量评分:**
|
||||
- 功能清晰度:30 分
|
||||
- 技术具体性:25 分
|
||||
- 实现完整性:25 分
|
||||
- 业务上下文:20 分
|
||||
|
||||
---
|
||||
|
||||
### 开发基础命令
|
||||
|
||||
日常编码任务的直接命令。
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/code` | 实现功能 |
|
||||
| `/debug` | 调试问题 |
|
||||
| `/test` | 编写测试 |
|
||||
| `/review` | 代码审查 |
|
||||
| `/optimize` | 性能优化 |
|
||||
| `/refactor` | 代码重构 |
|
||||
| `/docs` | 编写文档 |
|
||||
|
||||
---
|
||||
|
||||
## 安装
|
||||
|
||||
```bash
|
||||
# 交互式安装器(推荐)
|
||||
npx github:cexll/myclaude
|
||||
|
||||
# 列出可安装项(module:* / skill:* / codeagent-wrapper)
|
||||
npx github:cexll/myclaude --list
|
||||
|
||||
# 检测已安装 modules 并从 GitHub 更新
|
||||
npx github:cexll/myclaude --update
|
||||
|
||||
# 指定安装目录 / 强制覆盖
|
||||
npx github:cexll/myclaude --install-dir ~/.claude --force
|
||||
```
|
||||
|
||||
`--update` 会在目标安装目录(默认 `~/.claude`,优先读取 `installed_modules.json`)检测已安装 modules,并从 GitHub 拉取最新发布版本覆盖更新。
|
||||
|
||||
### 模块配置
|
||||
|
||||
编辑 `config.json` 启用/禁用模块:
|
||||
|
||||
```json
|
||||
{
|
||||
"modules": {
|
||||
"bmad": { "enabled": false },
|
||||
"requirements": { "enabled": false },
|
||||
"essentials": { "enabled": false },
|
||||
"omo": { "enabled": false },
|
||||
"sparv": { "enabled": false },
|
||||
"do": { "enabled": true },
|
||||
"course": { "enabled": false }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 工作流选择指南
|
||||
|
||||
| 场景 | 推荐 |
|
||||
|------|------|
|
||||
| 功能开发(默认) | `/do` |
|
||||
| Bug 调查 + 修复 | `/omo` |
|
||||
| 大型企业项目 | `/bmad-pilot` |
|
||||
| 快速原型 | `/requirements-pilot` |
|
||||
| 简单任务 | `/code`, `/debug` |
|
||||
|
||||
## 后端 CLI 要求
|
||||
|
||||
| 后端 | 必需功能 |
|
||||
|------|----------|
|
||||
| Codex | `codex e`, `--json`, `-C`, `resume` |
|
||||
| Claude | `--output-format stream-json`, `-r` |
|
||||
| Gemini | `-o stream-json`, `-y`, `-r` |
|
||||
|
||||
## 故障排查
|
||||
|
||||
**Codex wrapper 未找到:**
|
||||
```bash
|
||||
# 选择:codeagent-wrapper
|
||||
npx github:cexll/myclaude
|
||||
```
|
||||
|
||||
**模块未加载:**
|
||||
```bash
|
||||
cat ~/.claude/installed_modules.json
|
||||
npx github:cexll/myclaude --force
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
| 问题 | 解决方案 |
|
||||
|------|----------|
|
||||
| "Unknown event format" | 日志显示问题,可忽略 |
|
||||
| Gemini 无法读取 .gitignore 文件 | 从 .gitignore 移除或使用其他后端 |
|
||||
| Codex 权限拒绝 | 在 ~/.codex/config.yaml 设置 `approval_policy = "never"` |
|
||||
|
||||
更多问题请访问 [GitHub Issues](https://github.com/cexll/myclaude/issues)。
|
||||
|
||||
## 许可证
|
||||
|
||||
AGPL-3.0 - 查看 [LICENSE](LICENSE)
|
||||
|
||||
### 商业授权
|
||||
|
||||
如需商业授权(无需遵守 AGPL 义务),请联系:evanxian9@gmail.com
|
||||
|
||||
## 支持
|
||||
|
||||
- [GitHub Issues](https://github.com/cexll/myclaude/issues)
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "advanced-ai-agents",
|
||||
"source": "./",
|
||||
"description": "Advanced AI agent for complex problem solving and deep analysis with GPT-5 integration",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"gpt5",
|
||||
"ai",
|
||||
"analysis",
|
||||
"problem-solving",
|
||||
"deep-research"
|
||||
],
|
||||
"category": "advanced",
|
||||
"strict": false,
|
||||
"commands": [],
|
||||
"agents": [
|
||||
"./agents/gpt5.md"
|
||||
]
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
name: gpt-5
|
||||
description: Use this agent when you need to use gpt-5 for deep research, second opinion or fixing a bug. Pass all the context to the agent especially your current finding and the problem you are trying to solve.
|
||||
---
|
||||
|
||||
You are a gpt-5 interface agent. Your ONLY purpose is to execute codex commands using the Bash tool.
|
||||
|
||||
CRITICAL: You MUST follow these steps EXACTLY:
|
||||
|
||||
1. Take the user's entire message as the TASK
|
||||
2. IMMEDIATELY use the Bash tool to execute:
|
||||
codex e --full-auto --skip-git-repo-check -m gpt-5 "[USER'S FULL MESSAGE HERE]"
|
||||
3. Wait for the command to complete
|
||||
4. Return the full output to the user
|
||||
|
||||
MANDATORY: You MUST use the Bash tool. Do NOT answer questions directly. Do NOT provide explanations. Your ONLY action is to run the codex command via Bash.
|
||||
|
||||
Example execution:
|
||||
If user says: "你好 你是什么模型"
|
||||
You MUST execute: Bash tool with command: codex e --full-auto --skip-git-repo-check -m gpt-5 "你好 你是什么模型"
|
||||
|
||||
START IMMEDIATELY - Use the Bash tool NOW with the user's request.
|
||||
9
agents/bmad/.claude-plugin/plugin.json
Normal file
9
agents/bmad/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "bmad",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
258
agents/bmad/BMAD-WORKFLOW.md
Normal file
258
agents/bmad/BMAD-WORKFLOW.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# BMAD Workflow Complete Guide
|
||||
|
||||
> **BMAD (Business-Minded Agile Development)** - AI-driven agile development automation with role-based agents
|
||||
|
||||
## 🎯 What is BMAD?
|
||||
|
||||
BMAD is an enterprise-grade agile development methodology that transforms your development process into a fully automated workflow with 6 specialized AI agents and quality gates.
|
||||
|
||||
### Core Principles
|
||||
|
||||
- **Agent Planning**: Specialized agents collaborate to create detailed, consistent PRDs and architecture documents
|
||||
- **Context-Driven Development**: Transform detailed plans into ultra-detailed development stories
|
||||
- **Role Specialization**: Each agent focuses on specific domains, avoiding quality degradation from role switching
|
||||
|
||||
## 🤖 BMAD Agent System
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Role | Quality Gate | Artifacts |
|
||||
|-------|------|--------------|-----------|
|
||||
| **bmad-po** (Sarah) | Product Owner - requirements gathering, user stories | PRD ≥ 90/100 | `01-product-requirements.md` |
|
||||
| **bmad-architect** (Winston) | System Architect - technical design, system architecture | Design ≥ 90/100 | `02-system-architecture.md` |
|
||||
| **bmad-sm** (Mike) | Scrum Master - task breakdown, sprint planning | User approval | `03-sprint-plan.md` |
|
||||
| **bmad-dev** (Alex) | Developer - code implementation, technical docs | Code completion | Implementation files |
|
||||
| **bmad-review** | Code Reviewer - independent review between Dev and QA | Pass/Risk/Fail | `04-dev-reviewed.md` |
|
||||
| **bmad-qa** (Emma) | QA Engineer - testing strategy, quality assurance | Test execution | `05-qa-report.md` |
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Command Overview
|
||||
|
||||
```bash
|
||||
# Full BMAD workflow
|
||||
/bmad-pilot "Build e-commerce checkout system with payment integration"
|
||||
|
||||
# Workflow: PO → Architect → SM → Dev → Review → QA
|
||||
```
|
||||
|
||||
### Command Options
|
||||
|
||||
```bash
|
||||
# Skip testing phase
|
||||
/bmad-pilot "Admin dashboard" --skip-tests
|
||||
|
||||
# Skip sprint planning (architecture → dev directly)
|
||||
/bmad-pilot "API gateway implementation" --direct-dev
|
||||
|
||||
# Skip repository scan (not recommended)
|
||||
/bmad-pilot "Add feature" --skip-scan
|
||||
```
|
||||
|
||||
### Individual Agent Usage
|
||||
|
||||
```bash
|
||||
# Product requirements analysis only
|
||||
/bmad-po "Enterprise CRM system requirements"
|
||||
|
||||
# Technical architecture design only
|
||||
/bmad-architect "High-concurrency distributed system design"
|
||||
|
||||
# Orchestrator (can transform into any agent)
|
||||
/bmad-orchestrator "Coordinate multi-agent complex project"
|
||||
```
|
||||
|
||||
## 📋 Workflow Phases
|
||||
|
||||
### Phase 0: Repository Scan (Automatic)
|
||||
- **Agent**: `bmad-orchestrator`
|
||||
- **Output**: `00-repository-context.md`
|
||||
- **Content**: Project type, tech stack, code organization, conventions, integration points
|
||||
|
||||
### Phase 1: Product Requirements (PO)
|
||||
- **Agent**: `bmad-po` (Sarah - Product Owner)
|
||||
- **Quality Gate**: PRD score ≥ 90/100
|
||||
- **Output**: `01-product-requirements.md`
|
||||
- **Process**:
|
||||
1. PO generates initial PRD
|
||||
2. System calculates quality score (100-point scale)
|
||||
3. If < 90: User provides feedback → PO revises → Recalculate
|
||||
4. If ≥ 90: User confirms → Save artifact → Next phase
|
||||
|
||||
### Phase 2: System Architecture (Architect)
|
||||
- **Agent**: `bmad-architect` (Winston - System Architect)
|
||||
- **Quality Gate**: Design score ≥ 90/100
|
||||
- **Output**: `02-system-architecture.md`
|
||||
- **Process**:
|
||||
1. Architect reads PRD + repo context
|
||||
2. Generates technical design document
|
||||
3. System calculates design quality score
|
||||
4. If < 90: User provides feedback → Architect revises
|
||||
5. If ≥ 90: User confirms → Save artifact → Next phase
|
||||
|
||||
### Phase 3: Sprint Planning (SM)
|
||||
- **Agent**: `bmad-sm` (Mike - Scrum Master)
|
||||
- **Quality Gate**: User approval
|
||||
- **Output**: `03-sprint-plan.md`
|
||||
- **Process**:
|
||||
1. SM reads PRD + Architecture
|
||||
2. Breaks down tasks with story points
|
||||
3. User reviews sprint plan
|
||||
4. User confirms → Save artifact → Next phase
|
||||
- **Skip**: Use `--direct-dev` to skip this phase
|
||||
|
||||
### Phase 4: Development (Dev)
|
||||
- **Agent**: `bmad-dev` (Alex - Developer)
|
||||
- **Quality Gate**: Code completion
|
||||
- **Output**: Implementation files
|
||||
- **Process**:
|
||||
1. Dev reads all previous artifacts
|
||||
2. Implements features following sprint plan
|
||||
3. Creates or modifies code files
|
||||
4. Completes implementation → Next phase
|
||||
|
||||
### Phase 5: Code Review (Review)
|
||||
- **Agent**: `bmad-review` (Independent Reviewer)
|
||||
- **Quality Gate**: Pass / Pass with Risk / Fail
|
||||
- **Output**: `04-dev-reviewed.md`
|
||||
- **Process**:
|
||||
1. Review reads implementation + all specs
|
||||
2. Performs comprehensive code review
|
||||
3. Generates review report with status:
|
||||
- **Pass**: No issues, proceed to QA
|
||||
- **Pass with Risk**: Non-critical issues noted
|
||||
- **Fail**: Critical issues, return to Dev
|
||||
4. Updates sprint plan with review findings
|
||||
|
||||
**Enhanced Review (Optional)**:
|
||||
- Use GPT-5 via Codex CLI for deeper analysis
|
||||
- Set via `BMAD_REVIEW_MODE=enhanced` environment variable
|
||||
|
||||
### Phase 6: Quality Assurance (QA)
|
||||
- **Agent**: `bmad-qa` (Emma - QA Engineer)
|
||||
- **Quality Gate**: Test execution
|
||||
- **Output**: `05-qa-report.md`
|
||||
- **Process**:
|
||||
1. QA reads implementation + review + all specs
|
||||
2. Creates targeted test strategy
|
||||
3. Executes tests
|
||||
4. Generates QA report
|
||||
5. Workflow complete
|
||||
- **Skip**: Use `--skip-tests` to skip this phase
|
||||
|
||||
## 📊 Quality Scoring System
|
||||
|
||||
### PRD Quality (100 points)
|
||||
- **Business Value** (30): Clear value proposition, user benefits
|
||||
- **Functional Requirements** (25): Complete, unambiguous requirements
|
||||
- **User Experience** (20): User flows, interaction patterns
|
||||
- **Technical Constraints** (15): Performance, security, scalability
|
||||
- **Scope & Priorities** (10): Clear boundaries, must-have vs nice-to-have
|
||||
|
||||
### Architecture Quality (100 points)
|
||||
- **Design Quality** (30): Modularity, maintainability, clarity
|
||||
- **Technology Selection** (25): Appropriate tech stack, justification
|
||||
- **Scalability** (20): Growth handling, performance considerations
|
||||
- **Security** (15): Authentication, authorization, data protection
|
||||
- **Feasibility** (10): Realistic implementation, resource alignment
|
||||
|
||||
### Review Status (3 levels)
|
||||
- **Pass**: No critical issues, code meets standards
|
||||
- **Pass with Risk**: Non-critical issues, recommendations included
|
||||
- **Fail**: Critical issues, requires Dev iteration
|
||||
|
||||
## 📁 Workflow Artifacts
|
||||
|
||||
All documents are saved to `.claude/specs/{feature-name}/`:
|
||||
|
||||
```
|
||||
.claude/specs/e-commerce-checkout/
|
||||
├── 00-repository-context.md # Repo analysis (auto)
|
||||
├── 01-product-requirements.md # PRD (PO, score ≥ 90)
|
||||
├── 02-system-architecture.md # Design (Architect, score ≥ 90)
|
||||
├── 03-sprint-plan.md # Sprint plan (SM, user approved)
|
||||
├── 04-dev-reviewed.md # Code review (Review, Pass/Risk/Fail)
|
||||
└── 05-qa-report.md # Test report (QA, tests executed)
|
||||
```
|
||||
|
||||
Feature name generated from project description (kebab-case: lowercase, spaces/punctuation → `-`).
|
||||
|
||||
## 🔧 Advanced Usage
|
||||
|
||||
### Approval Gates
|
||||
|
||||
Critical phases require explicit user confirmation:
|
||||
|
||||
```
|
||||
Architect: "Technical design complete (Score: 93/100)"
|
||||
System: "Ready to proceed to sprint planning? (yes/no)"
|
||||
User: yes
|
||||
```
|
||||
|
||||
### Iterative Refinement
|
||||
|
||||
Each phase supports feedback loops:
|
||||
|
||||
```
|
||||
PO: "Here's the PRD (Score: 75/100)"
|
||||
User: "Add mobile support and offline mode"
|
||||
PO: "Updated PRD (Score: 92/100) ✅"
|
||||
```
|
||||
|
||||
### Repository Context
|
||||
|
||||
BMAD automatically scans your repository to understand:
|
||||
- Technology stack (languages, frameworks, libraries)
|
||||
- Project structure (directories, modules, patterns)
|
||||
- Existing conventions (naming, formatting, architecture)
|
||||
- Dependencies (package managers, external services)
|
||||
- Integration points (APIs, databases, third-party services)
|
||||
|
||||
### Workflow Variations
|
||||
|
||||
**Fast Prototyping** - Skip non-essential phases:
|
||||
```bash
|
||||
/bmad-pilot "Quick admin UI" --skip-tests --direct-dev
|
||||
# Workflow: PO → Architect → Dev
|
||||
```
|
||||
|
||||
**Architecture-First** - Focus on design:
|
||||
```bash
|
||||
/bmad-architect "Microservices architecture for e-commerce"
|
||||
# Only runs Architect agent
|
||||
```
|
||||
|
||||
**Full Rigor** - All phases with maximum quality:
|
||||
```bash
|
||||
/bmad-pilot "Enterprise payment gateway with PCI compliance"
|
||||
# Workflow: Scan → PO → Architect → SM → Dev → Review → QA
|
||||
```
|
||||
|
||||
## 🎨 Output Style
|
||||
|
||||
BMAD workflow uses a specialized output style that:
|
||||
- Creates phase-separated contexts
|
||||
- Manages agent handoffs with clear boundaries
|
||||
- Tracks quality scores across phases
|
||||
- Handles approval gates with user prompts
|
||||
- Supports Codex CLI integration for enhanced reviews
|
||||
|
||||
## 📚 Related Documentation
|
||||
|
||||
- **[Quick Start Guide](QUICK-START.md)** - Get started in 5 minutes
|
||||
- **[Plugin System](PLUGIN-SYSTEM.md)** - Installation and configuration
|
||||
- **[Development Commands](DEVELOPMENT-COMMANDS.md)** - Alternative workflows
|
||||
- **[Requirements Workflow](REQUIREMENTS-WORKFLOW.md)** - Lightweight alternative
|
||||
|
||||
## 💡 Best Practices
|
||||
|
||||
1. **Don't skip repository scan** - Helps agents understand your project context
|
||||
2. **Provide detailed descriptions** - Better input → better output
|
||||
3. **Engage with agents** - Provide feedback during quality gates
|
||||
4. **Review artifacts** - Check generated documents before confirming
|
||||
5. **Use appropriate workflows** - Full BMAD for complex features, lightweight for simple tasks
|
||||
6. **Keep artifacts** - They serve as project documentation and context for future work
|
||||
|
||||
---
|
||||
|
||||
**Transform your development with BMAD** - One command, complete agile workflow, quality assured.
|
||||
109
agents/bmad/README.md
Normal file
109
agents/bmad/README.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# bmad - BMAD Agile Workflow
|
||||
|
||||
Full enterprise agile methodology with 6 specialized agents, UltraThink analysis, and repository-aware development.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
python install.py --module bmad
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/bmad-pilot <PROJECT_DESCRIPTION> [OPTIONS]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--skip-tests` | Skip QA testing phase |
|
||||
| `--direct-dev` | Skip SM planning, go directly to development |
|
||||
| `--skip-scan` | Skip initial repository scanning |
|
||||
|
||||
## Workflow Phases
|
||||
|
||||
| Phase | Agent | Deliverable | Description |
|
||||
|-------|-------|-------------|-------------|
|
||||
| 0 | Orchestrator | `00-repo-scan.md` | Repository scanning with UltraThink analysis |
|
||||
| 1 | Product Owner (PO) | `01-product-requirements.md` | PRD with 90+ quality score required |
|
||||
| 2 | Architect | `02-system-architecture.md` | Technical design with 90+ score required |
|
||||
| 3 | Scrum Master (SM) | `03-sprint-plan.md` | Sprint backlog with stories and estimates |
|
||||
| 4 | Developer | Implementation code | Multi-sprint implementation |
|
||||
| 4.5 | Reviewer | `04-dev-reviewed.md` | Code review (Pass/Pass with Risk/Fail) |
|
||||
| 5 | QA Engineer | Test suite | Comprehensive testing and validation |
|
||||
|
||||
## Agents
|
||||
|
||||
| Agent | Role |
|
||||
|-------|------|
|
||||
| `bmad-orchestrator` | Repository scanning, workflow coordination |
|
||||
| `bmad-po` | Requirements gathering, PRD creation |
|
||||
| `bmad-architect` | System design, technology decisions |
|
||||
| `bmad-sm` | Sprint planning, task breakdown |
|
||||
| `bmad-dev` | Code implementation |
|
||||
| `bmad-review` | Code review, quality assessment |
|
||||
| `bmad-qa` | Testing, validation |
|
||||
|
||||
## Approval Gates
|
||||
|
||||
Two mandatory stop points require explicit user approval:
|
||||
|
||||
1. **After PRD** (Phase 1 → 2): User must approve requirements before architecture
|
||||
2. **After Architecture** (Phase 2 → 3): User must approve design before implementation
|
||||
|
||||
## Output Structure
|
||||
|
||||
```
|
||||
.claude/specs/{feature_name}/
|
||||
├── 00-repo-scan.md
|
||||
├── 01-product-requirements.md
|
||||
├── 02-system-architecture.md
|
||||
├── 03-sprint-plan.md
|
||||
└── 04-dev-reviewed.md
|
||||
```
|
||||
|
||||
## UltraThink Methodology
|
||||
|
||||
Applied throughout the workflow for deep analysis:
|
||||
|
||||
1. **Hypothesis Generation** - Form hypotheses about the problem
|
||||
2. **Evidence Collection** - Gather evidence from codebase
|
||||
3. **Pattern Recognition** - Identify recurring patterns
|
||||
4. **Synthesis** - Create comprehensive understanding
|
||||
5. **Validation** - Cross-check findings
|
||||
|
||||
## Interactive Confirmation Flow
|
||||
|
||||
PO and Architect phases use iterative refinement:
|
||||
|
||||
1. Agent produces initial draft + quality score
|
||||
2. Orchestrator presents to user with clarification questions
|
||||
3. User provides responses
|
||||
4. Agent refines until quality >= 90
|
||||
5. User confirms to save deliverable
|
||||
|
||||
## When to Use
|
||||
|
||||
- Large multi-sprint features
|
||||
- Enterprise projects requiring documentation
|
||||
- Team coordination scenarios
|
||||
- Projects needing formal approval gates
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
agents/bmad/
|
||||
├── README.md
|
||||
├── commands/
|
||||
│ └── bmad-pilot.md
|
||||
└── agents/
|
||||
├── bmad-orchestrator.md
|
||||
├── bmad-po.md
|
||||
├── bmad-architect.md
|
||||
├── bmad-sm.md
|
||||
├── bmad-dev.md
|
||||
├── bmad-review.md
|
||||
└── bmad-qa.md
|
||||
```
|
||||
@@ -427,6 +427,10 @@ Generate architecture document at `./.claude/specs/{feature_name}/02-system-arch
|
||||
|
||||
## Important Behaviors
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, REST, GraphQL, JWT, RBAC, etc.) in English; translate explanatory text only.
|
||||
|
||||
### DO:
|
||||
- Start by reviewing and referencing the PRD
|
||||
- Present initial architecture based on requirements
|
||||
@@ -419,6 +419,10 @@ logger.info('User created', {
|
||||
|
||||
## Important Implementation Rules
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, CRUD, JWT, SQL, etc.) in English; translate explanatory text only.
|
||||
|
||||
### DO:
|
||||
- Follow architecture specifications exactly
|
||||
- Implement all acceptance criteria from PRD
|
||||
@@ -22,6 +22,10 @@ You are the BMAD Orchestrator. Your core focus is repository analysis, workflow
|
||||
- Consistency: ensure conventions and patterns discovered in scan are preserved downstream
|
||||
- Explicit handoffs: clearly document assumptions, risks, and integration points for other agents
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, PRD, Sprint, etc.) in English; translate explanatory text only.
|
||||
|
||||
## UltraThink Repository Scan
|
||||
|
||||
When asked to analyze the repository, follow this structure and return a clear, actionable summary.
|
||||
@@ -313,6 +313,10 @@ Generate PRD at `./.claude/specs/{feature_name}/01-product-requirements.md`:
|
||||
|
||||
## Important Behaviors
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, Sprint, PRD, KPI, MVP, etc.) in English; translate explanatory text only.
|
||||
|
||||
### DO:
|
||||
- Start immediately with greeting and initial understanding
|
||||
- Show quality scores transparently
|
||||
@@ -478,6 +478,10 @@ module.exports = {
|
||||
|
||||
## Important Testing Rules
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, E2E, CI/CD, Mock, etc.) in English; translate explanatory text only.
|
||||
|
||||
### DO:
|
||||
- Test all acceptance criteria from PRD
|
||||
- Cover happy path, edge cases, and error scenarios
|
||||
@@ -45,3 +45,7 @@ You are an independent code review agent responsible for conducting reviews betw
|
||||
- Focus on actionable findings
|
||||
- Provide specific QA guidance
|
||||
- Use clear, parseable output format
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, PRD, Sprint, etc.) in English; translate explanatory text only.
|
||||
@@ -351,6 +351,10 @@ So that [benefit]
|
||||
|
||||
## Important Behaviors
|
||||
|
||||
### Language Rules:
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (Sprint, Epic, Story, Backlog, Velocity, etc.) in English; translate explanatory text only.
|
||||
|
||||
### DO:
|
||||
- Read both PRD and Architecture documents thoroughly
|
||||
- Create comprehensive task breakdown
|
||||
9
agents/development-essentials/.claude-plugin/plugin.json
Normal file
9
agents/development-essentials/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "essentials",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
321
agents/development-essentials/DEVELOPMENT-COMMANDS.md
Normal file
321
agents/development-essentials/DEVELOPMENT-COMMANDS.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# Development Commands Reference
|
||||
|
||||
> Direct slash commands for daily coding tasks without workflow overhead
|
||||
|
||||
## 🎯 Overview
|
||||
|
||||
Development Essentials provides focused slash commands for common development tasks. Use these when you need direct implementation without the full workflow structure.
|
||||
|
||||
## 📋 Available Commands
|
||||
|
||||
### `/code` - Direct Implementation
|
||||
|
||||
Implement features, add functionality, or write code directly.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/code "Add input validation for email fields"
|
||||
/code "Implement pagination for user list API"
|
||||
/code "Create database migration for orders table"
|
||||
```
|
||||
|
||||
**Agent**: `code`
|
||||
|
||||
**Best for**:
|
||||
- Clear, well-defined tasks
|
||||
- Quick implementations
|
||||
- Following existing patterns
|
||||
- Adding straightforward features
|
||||
|
||||
### `/debug` - Systematic Debugging
|
||||
|
||||
Analyze and fix bugs with structured debugging approach.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/debug "Login fails with 500 error on invalid credentials"
|
||||
/debug "Memory leak in background worker process"
|
||||
/debug "Race condition in order processing"
|
||||
```
|
||||
|
||||
**Agent**: `debug`
|
||||
|
||||
**Approach**:
|
||||
1. Reproduce the issue
|
||||
2. Analyze root cause
|
||||
3. Propose solution
|
||||
4. Implement fix
|
||||
5. Verify resolution
|
||||
|
||||
### `/test` - Testing Strategy
|
||||
|
||||
Create tests, improve test coverage, or test existing code.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/test "Add unit tests for authentication service"
|
||||
/test "Create integration tests for payment flow"
|
||||
/test "Test edge cases for date parser"
|
||||
```
|
||||
|
||||
**Agent**: `develop` (testing mode)
|
||||
|
||||
**Covers**:
|
||||
- Unit tests
|
||||
- Integration tests
|
||||
- Edge cases
|
||||
- Error scenarios
|
||||
- Test data setup
|
||||
|
||||
### `/optimize` - Performance Tuning
|
||||
|
||||
Improve performance, reduce resource usage, or optimize algorithms.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/optimize "Reduce database queries in dashboard endpoint"
|
||||
/optimize "Speed up report generation process"
|
||||
/optimize "Improve memory usage in data processing pipeline"
|
||||
```
|
||||
|
||||
**Agent**: `develop` (optimization mode)
|
||||
|
||||
**Focus areas**:
|
||||
- Algorithm efficiency
|
||||
- Database query optimization
|
||||
- Caching strategies
|
||||
- Resource utilization
|
||||
- Load time reduction
|
||||
|
||||
### `/bugfix` - Bug Resolution
|
||||
|
||||
Fix specific bugs with focused approach.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/bugfix "Users can't reset password with special characters"
|
||||
/bugfix "Session expires too quickly on mobile"
|
||||
/bugfix "File upload fails for large files"
|
||||
```
|
||||
|
||||
**Agent**: `bugfix`
|
||||
|
||||
**Process**:
|
||||
1. Understand the bug
|
||||
2. Locate problematic code
|
||||
3. Implement fix
|
||||
4. Add regression tests
|
||||
5. Verify fix
|
||||
|
||||
### `/refactor` - Code Improvement
|
||||
|
||||
Improve code structure, readability, or maintainability without changing behavior.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/refactor "Extract user validation logic into separate module"
|
||||
/refactor "Simplify nested conditionals in order processing"
|
||||
/refactor "Remove code duplication in API handlers"
|
||||
```
|
||||
|
||||
**Agent**: `develop` (refactor mode)
|
||||
|
||||
**Goals**:
|
||||
- Improve readability
|
||||
- Reduce complexity
|
||||
- Eliminate duplication
|
||||
- Enhance maintainability
|
||||
- Follow best practices
|
||||
|
||||
### `/review` - Code Validation
|
||||
|
||||
Review code for quality, security, and best practices.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/review "Check authentication implementation for security issues"
|
||||
/review "Validate API error handling patterns"
|
||||
/review "Assess database schema design"
|
||||
```
|
||||
|
||||
**Agent**: Independent reviewer
|
||||
|
||||
**Review criteria**:
|
||||
- Code quality
|
||||
- Security vulnerabilities
|
||||
- Performance issues
|
||||
- Best practices compliance
|
||||
- Maintainability
|
||||
|
||||
### `/ask` - Technical Consultation
|
||||
|
||||
Get technical advice, design patterns, or implementation guidance.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/ask "Best approach for real-time notifications in React"
|
||||
/ask "How to handle database migrations in production"
|
||||
/ask "Design pattern for plugin system"
|
||||
```
|
||||
|
||||
**Agent**: Technical consultant
|
||||
|
||||
**Provides**:
|
||||
- Architecture guidance
|
||||
- Technology recommendations
|
||||
- Design patterns
|
||||
- Best practices
|
||||
- Trade-off analysis
|
||||
|
||||
### `/docs` - Documentation
|
||||
|
||||
Generate or improve documentation.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/docs "Create API documentation for user endpoints"
|
||||
/docs "Add JSDoc comments to utility functions"
|
||||
/docs "Write README for authentication module"
|
||||
```
|
||||
|
||||
**Agent**: Documentation writer
|
||||
|
||||
**Creates**:
|
||||
- Code comments
|
||||
- API documentation
|
||||
- README files
|
||||
- Usage examples
|
||||
- Architecture docs
|
||||
|
||||
### `/think` - Advanced Analysis
|
||||
|
||||
Deep reasoning and analysis for complex problems.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/think "Analyze scalability bottlenecks in current architecture"
|
||||
/think "Evaluate different approaches for data synchronization"
|
||||
/think "Design migration strategy from monolith to microservices"
|
||||
```
|
||||
|
||||
**Agent**: `gpt5` (deep reasoning)
|
||||
|
||||
**Best for**:
|
||||
- Complex architectural decisions
|
||||
- Multi-faceted problems
|
||||
- Trade-off analysis
|
||||
- Strategic planning
|
||||
- System design
|
||||
|
||||
## 🔄 Command Workflows
|
||||
|
||||
### Simple Feature Development
|
||||
|
||||
```bash
|
||||
# 1. Ask for guidance
|
||||
/ask "Best way to implement rate limiting in Express"
|
||||
|
||||
# 2. Implement the feature
|
||||
/code "Add rate limiting middleware to API routes"
|
||||
|
||||
# 3. Add tests
|
||||
/test "Create tests for rate limiting behavior"
|
||||
|
||||
# 4. Review implementation
|
||||
/review "Validate rate limiting implementation"
|
||||
```
|
||||
|
||||
### Bug Investigation and Fix
|
||||
|
||||
```bash
|
||||
# 1. Debug the issue
|
||||
/debug "API returns 500 on concurrent requests"
|
||||
|
||||
# 2. Fix the bug
|
||||
/bugfix "Add mutex lock to prevent race condition"
|
||||
|
||||
# 3. Add regression tests
|
||||
/test "Test concurrent request handling"
|
||||
```
|
||||
|
||||
### Code Quality Improvement
|
||||
|
||||
```bash
|
||||
# 1. Review current code
|
||||
/review "Analyze user service for improvements"
|
||||
|
||||
# 2. Refactor based on findings
|
||||
/refactor "Simplify user validation logic"
|
||||
|
||||
# 3. Optimize performance
|
||||
/optimize "Cache frequently accessed user data"
|
||||
|
||||
# 4. Update documentation
|
||||
/docs "Document user service API"
|
||||
```
|
||||
|
||||
## 🎯 When to Use What
|
||||
|
||||
### Use Direct Commands When:
|
||||
- Task is clear and well-defined
|
||||
- No complex planning needed
|
||||
- Fast iteration is priority
|
||||
- Working within existing patterns
|
||||
|
||||
### Use Requirements Workflow When:
|
||||
- Feature has unclear requirements
|
||||
- Need documented specifications
|
||||
- Multiple implementation approaches possible
|
||||
- Quality gates desired
|
||||
|
||||
### Use BMAD Workflow When:
|
||||
- Complex business requirements
|
||||
- Architecture design needed
|
||||
- Sprint planning required
|
||||
- Multiple stakeholders involved
|
||||
|
||||
## 💡 Best Practices
|
||||
|
||||
1. **Be Specific**: Provide clear, detailed descriptions
|
||||
- ❌ `/code "fix the bug"`
|
||||
- ✅ `/code "Fix null pointer exception in user login when email is missing"`
|
||||
|
||||
2. **One Task Per Command**: Keep commands focused
|
||||
- ❌ `/code "Add feature X, fix bug Y, refactor module Z"`
|
||||
- ✅ `/code "Add email validation to registration form"`
|
||||
|
||||
3. **Provide Context**: Include relevant details
|
||||
- ✅ `/debug "Login API returns 401 after password change, only on Safari"`
|
||||
|
||||
4. **Use Appropriate Command**: Match command to task type
|
||||
- Use `/bugfix` for bugs, not `/code`
|
||||
- Use `/refactor` for restructuring, not `/optimize`
|
||||
- Use `/think` for complex analysis, not `/ask`
|
||||
|
||||
5. **Chain Commands**: Break complex tasks into steps
|
||||
```bash
|
||||
/ask "How to implement OAuth2"
|
||||
/code "Implement OAuth2 authorization flow"
|
||||
/test "Add OAuth2 integration tests"
|
||||
/review "Validate OAuth2 security"
|
||||
/docs "Document OAuth2 setup process"
|
||||
```
|
||||
|
||||
## 🔌 Agent Configuration
|
||||
|
||||
All commands use specialized agents configured in:
|
||||
- `agents/development-essentials/agents/`
|
||||
- Agent prompt templates
|
||||
- Tool access permissions
|
||||
- Output formatting
|
||||
|
||||
## 📚 Related Documentation
|
||||
|
||||
- **[BMAD Workflow](BMAD-WORKFLOW.md)** - Full agile methodology
|
||||
- **[Requirements Workflow](REQUIREMENTS-WORKFLOW.md)** - Lightweight workflow
|
||||
- **[Quick Start Guide](QUICK-START.md)** - Get started quickly
|
||||
- **[Plugin System](PLUGIN-SYSTEM.md)** - Installation and configuration
|
||||
|
||||
---
|
||||
|
||||
**Development Essentials** - Direct commands for productive coding without workflow overhead.
|
||||
253
agents/development-essentials/README.md
Normal file
253
agents/development-essentials/README.md
Normal file
@@ -0,0 +1,253 @@
|
||||
# Development Essentials - Core Development Commands
|
||||
|
||||
核心开发命令套件,提供日常开发所需的所有基础命令。无需工作流开销,直接执行开发任务。
|
||||
|
||||
## 📋 命令列表
|
||||
|
||||
### 1. `/ask` - 技术咨询
|
||||
**用途**: 架构问题咨询和技术决策指导
|
||||
**适用场景**: 需要架构建议、技术选型、系统设计方案时
|
||||
|
||||
**特点**:
|
||||
- 四位架构顾问协同:系统设计师、技术策略师、可扩展性顾问、风险分析师
|
||||
- 遵循 KISS、YAGNI、SOLID 原则
|
||||
- 提供架构分析、设计建议、技术指导和实施策略
|
||||
- **不生成代码**,专注于架构咨询
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/ask "如何设计一个支持百万并发的消息队列系统?"
|
||||
/ask "微服务架构中应该如何处理分布式事务?"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. `/code` - 功能实现
|
||||
**用途**: 直接实现新功能或特性
|
||||
**适用场景**: 需要快速开发新功能时
|
||||
|
||||
**特点**:
|
||||
- 四位开发专家协同:架构师、实现工程师、集成专家、代码审查员
|
||||
- 渐进式开发,每步验证
|
||||
- 包含完整的实现计划、代码实现、集成指南和测试策略
|
||||
- 生成可运行的高质量代码
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/code "实现JWT认证中间件"
|
||||
/code "添加用户头像上传功能"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3. `/debug` - 系统调试
|
||||
**用途**: 使用 UltraThink 方法系统性调试问题
|
||||
**适用场景**: 遇到复杂bug或系统性问题时
|
||||
|
||||
**特点**:
|
||||
- 四位专家协同:架构师、研究员、编码员、测试员
|
||||
- UltraThink 反思阶段:综合所有洞察形成解决方案
|
||||
- 生成5-7个假设,逐步缩减到1-2个最可能的原因
|
||||
- 在实施修复前要求用户确认诊断结果
|
||||
- 证据驱动的系统性问题分析
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/debug "API响应时间突然增加10倍"
|
||||
/debug "生产环境内存泄漏问题"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4. `/test` - 测试策略
|
||||
**用途**: 设计和实现全面的测试策略
|
||||
**适用场景**: 需要为组件或功能编写测试时
|
||||
|
||||
**特点**:
|
||||
- 四位测试专家:测试架构师、单元测试专家、集成测试工程师、质量验证员
|
||||
- 测试金字塔策略(单元/集成/端到端比例)
|
||||
- 提供测试覆盖率分析和优先级建议
|
||||
- 包含 CI/CD 集成计划
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/test "用户认证模块"
|
||||
/test "支付处理流程"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 5. `/optimize` - 性能优化
|
||||
**用途**: 识别和优化性能瓶颈
|
||||
**适用场景**: 系统存在性能问题或需要提升性能时
|
||||
|
||||
**特点**:
|
||||
- 四位优化专家:性能分析师、算法工程师、资源管理员、可扩展性架构师
|
||||
- 建立性能基线和量化指标
|
||||
- 优化算法复杂度、内存使用、I/O操作
|
||||
- 设计水平扩展和并发处理方案
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/optimize "数据库查询性能"
|
||||
/optimize "API响应时间优化到200ms以内"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 6. `/review` - 代码审查
|
||||
**用途**: 全方位代码质量审查
|
||||
**适用场景**: 需要审查代码质量、安全性和架构设计时
|
||||
|
||||
**特点**:
|
||||
- 四位审查专家:质量审计员、安全分析师、性能审查员、架构评估员
|
||||
- 多维度审查:可读性、安全性、性能、架构设计
|
||||
- 提供优先级分类的改进建议
|
||||
- 包含具体代码示例和重构建议
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/review "src/auth/middleware.ts"
|
||||
/review "支付模块代码"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 7. `/bugfix` - Bug修复
|
||||
**用途**: 快速定位和修复Bug
|
||||
**适用场景**: 需要修复已知Bug时
|
||||
|
||||
**特点**:
|
||||
- 专注于快速修复
|
||||
- 包含验证流程
|
||||
- 确保修复不引入新问题
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/bugfix "登录失败后session未清理"
|
||||
/bugfix "订单状态更新不及时"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 8. `/refactor` - 代码重构
|
||||
**用途**: 改进代码结构和可维护性
|
||||
**适用场景**: 代码质量下降或需要优化代码结构时
|
||||
|
||||
**特点**:
|
||||
- 保持功能不变
|
||||
- 提升代码质量和可维护性
|
||||
- 遵循设计模式和最佳实践
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/refactor "将用户管理模块拆分为独立服务"
|
||||
/refactor "优化支付流程代码结构"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 9. `/docs` - 文档生成
|
||||
**用途**: 生成项目文档和API文档
|
||||
**适用场景**: 需要为代码或API生成文档时
|
||||
|
||||
**特点**:
|
||||
- 自动分析代码结构
|
||||
- 生成清晰的文档
|
||||
- 包含使用示例
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/docs "API接口文档"
|
||||
/docs "为认证模块生成开发者文档"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 10. `/think` - 深度分析
|
||||
**用途**: 对复杂问题进行深度思考和分析
|
||||
**适用场景**: 需要全面分析复杂技术问题时
|
||||
|
||||
**特点**:
|
||||
- 系统性思考框架
|
||||
- 多角度问题分析
|
||||
- 提供深入见解
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/think "如何设计一个高可用的分布式系统?"
|
||||
/think "微服务拆分的最佳实践是什么?"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 11. `/enhance-prompt` - 提示词增强 🆕
|
||||
**用途**: 优化和增强用户提供的指令
|
||||
**适用场景**: 需要改进模糊或不清晰的指令时
|
||||
|
||||
**特点**:
|
||||
- 自动分析指令上下文
|
||||
- 消除歧义,提高清晰度
|
||||
- 修正错误并提高具体性
|
||||
- 立即返回增强后的提示词
|
||||
- 保留代码块等特殊格式
|
||||
|
||||
**输出格式**:
|
||||
```
|
||||
### Here is an enhanced version of the original instruction that is more specific and clear:
|
||||
<enhanced-prompt>增强后的提示词</enhanced-prompt>
|
||||
```
|
||||
|
||||
**使用示例**:
|
||||
```bash
|
||||
/enhance-prompt "帮我做一个登录功能"
|
||||
/enhance-prompt "优化一下这个API"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🎯 命令选择指南
|
||||
|
||||
| 需求场景 | 推荐命令 | 说明 |
|
||||
|---------|---------|------|
|
||||
| 需要架构建议 | `/ask` | 不生成代码,专注咨询 |
|
||||
| 实现新功能 | `/code` | 完整的功能实现流程 |
|
||||
| 调试复杂问题 | `/debug` | UltraThink系统性调试 |
|
||||
| 编写测试 | `/test` | 全面的测试策略 |
|
||||
| 性能优化 | `/optimize` | 性能瓶颈分析和优化 |
|
||||
| 代码审查 | `/review` | 多维度质量审查 |
|
||||
| 修复Bug | `/bugfix` | 快速定位和修复 |
|
||||
| 重构代码 | `/refactor` | 提升代码质量 |
|
||||
| 生成文档 | `/docs` | API和开发者文档 |
|
||||
| 深度思考 | `/think` | 复杂问题分析 |
|
||||
| 优化指令 | `/enhance-prompt` | 提示词增强 |
|
||||
|
||||
## 🔧 代理列表
|
||||
|
||||
Development Essentials 模块包含以下专用代理:
|
||||
|
||||
- `code` - 代码实现代理
|
||||
- `bugfix` - Bug修复代理
|
||||
- `bugfix-verify` - Bug验证代理
|
||||
- `code-optimize` - 代码优化代理
|
||||
- `debug` - 调试分析代理
|
||||
- `develop` - 通用开发代理
|
||||
|
||||
## 📖 使用原则
|
||||
|
||||
1. **直接执行**: 无需工作流开销,直接运行命令
|
||||
2. **专注单一任务**: 每个命令聚焦特定开发任务
|
||||
3. **质量优先**: 所有命令都包含质量验证环节
|
||||
4. **实用主义**: KISS/YAGNI/DRY 原则贯穿始终
|
||||
5. **上下文感知**: 自动理解项目结构和编码规范
|
||||
|
||||
## 🔗 相关文档
|
||||
|
||||
- [主文档](../README.md) - 项目总览
|
||||
- [BMAD工作流](../agents/bmad/BMAD-WORKFLOW.md) - 完整敏捷流程
|
||||
- [Requirements工作流](../agents/requirements/REQUIREMENTS-WORKFLOW.md) - 轻量级开发流程
|
||||
- [插件系统](../PLUGIN_README.md) - 插件安装和管理
|
||||
|
||||
---
|
||||
|
||||
**提示**: 这些命令可以单独使用,也可以组合使用。例如:`/code` → `/test` → `/review` → `/optimize` 构成一个完整的开发周期。
|
||||
9
agents/development-essentials/commands/enhance-prompt.md
Normal file
9
agents/development-essentials/commands/enhance-prompt.md
Normal file
@@ -0,0 +1,9 @@
|
||||
`/enhance-prompt <task info>`
|
||||
|
||||
Here is an instruction that I'd like to give you, but it needs to be improved. Rewrite and enhance this instruction to make it clearer, more specific, less ambiguous, and correct any mistakes. Do not use any tools: reply immediately with your answer, even if you're not sure. Consider the context of our conversation history when enhancing the prompt. If there is code in triple backticks (```) consider whether it is a code sample and should remain unchanged.Reply with the following format:
|
||||
|
||||
### BEGIN RESPONSE
|
||||
|
||||
<enhanced-prompt>enhanced prompt goes here</enhanced-prompt>
|
||||
|
||||
### END RESPONSE
|
||||
9
agents/requirements/.claude-plugin/plugin.json
Normal file
9
agents/requirements/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "requirements",
|
||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
90
agents/requirements/README.md
Normal file
90
agents/requirements/README.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# requirements - Requirements-Driven Workflow
|
||||
|
||||
Lightweight requirements-to-code pipeline with interactive quality gates.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
python install.py --module requirements
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/requirements-pilot <FEATURE_DESCRIPTION> [OPTIONS]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--skip-tests` | Skip testing phase entirely |
|
||||
| `--skip-scan` | Skip initial repository scanning |
|
||||
|
||||
## Workflow Phases
|
||||
|
||||
| Phase | Description | Output |
|
||||
|-------|-------------|--------|
|
||||
| 0 | Repository scanning | `00-repository-context.md` |
|
||||
| 1 | Requirements confirmation | `requirements-confirm.md` (90+ score required) |
|
||||
| 2 | Implementation | Code + `requirements-spec.md` |
|
||||
|
||||
## Quality Scoring (100-point system)
|
||||
|
||||
| Category | Points | Focus |
|
||||
|----------|--------|-------|
|
||||
| Functional Clarity | 30 | Input/output specs, success criteria |
|
||||
| Technical Specificity | 25 | Integration points, constraints |
|
||||
| Implementation Completeness | 25 | Edge cases, error handling |
|
||||
| Business Context | 20 | User value, priority |
|
||||
|
||||
## Sub-Agents
|
||||
|
||||
| Agent | Role |
|
||||
|-------|------|
|
||||
| `requirements-generate` | Create technical specifications |
|
||||
| `requirements-code` | Implement functionality |
|
||||
| `requirements-review` | Code quality evaluation |
|
||||
| `requirements-testing` | Test case creation |
|
||||
|
||||
## Approval Gate
|
||||
|
||||
One mandatory stop point after Phase 1:
|
||||
- Requirements must achieve 90+ quality score
|
||||
- User must explicitly approve before implementation begins
|
||||
|
||||
## Testing Decision
|
||||
|
||||
After code review passes (≥90%):
|
||||
- `--skip-tests`: Complete without testing
|
||||
- No option: Interactive prompt with smart recommendations based on task complexity
|
||||
|
||||
## Output Structure
|
||||
|
||||
```
|
||||
.claude/specs/{feature_name}/
|
||||
├── 00-repository-context.md
|
||||
├── requirements-confirm.md
|
||||
└── requirements-spec.md
|
||||
```
|
||||
|
||||
## When to Use
|
||||
|
||||
- Quick prototypes
|
||||
- Well-defined features
|
||||
- Smaller scope tasks
|
||||
- When full BMAD workflow is overkill
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
agents/requirements/
|
||||
├── README.md
|
||||
├── commands/
|
||||
│ └── requirements-pilot.md
|
||||
└── agents/
|
||||
├── requirements-generate.md
|
||||
├── requirements-code.md
|
||||
├── requirements-review.md
|
||||
└── requirements-testing.md
|
||||
```
|
||||
259
agents/requirements/REQUIREMENTS-WORKFLOW.md
Normal file
259
agents/requirements/REQUIREMENTS-WORKFLOW.md
Normal file
@@ -0,0 +1,259 @@
|
||||
# Requirements-Driven Workflow Guide
|
||||
|
||||
> Lightweight alternative to BMAD for rapid prototyping and simple feature development
|
||||
|
||||
## 🎯 What is Requirements Workflow?
|
||||
|
||||
A streamlined 4-phase workflow that focuses on getting from requirements to working code quickly:
|
||||
|
||||
**Requirements → Implementation → Review → Testing**
|
||||
|
||||
Perfect for:
|
||||
- Quick prototypes
|
||||
- Small features
|
||||
- Bug fixes with clear scope
|
||||
- Projects without complex architecture needs
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Basic Command
|
||||
|
||||
```bash
|
||||
/requirements-pilot "Implement JWT authentication with refresh tokens"
|
||||
|
||||
# Automated workflow:
|
||||
# 1. Requirements generation (90% quality gate)
|
||||
# 2. Code implementation
|
||||
# 3. Code review
|
||||
# 4. Testing strategy
|
||||
```
|
||||
|
||||
### When to Use
|
||||
|
||||
**Use Requirements Workflow** when:
|
||||
- Feature scope is clear and simple
|
||||
- No complex architecture design needed
|
||||
- Fast iteration is priority
|
||||
- You want minimal workflow overhead
|
||||
|
||||
**Use BMAD Workflow** when:
|
||||
- Complex business requirements
|
||||
- Multiple systems integration
|
||||
- Architecture design is critical
|
||||
- Need detailed sprint planning
|
||||
|
||||
## 📋 Workflow Phases
|
||||
|
||||
### Phase 1: Requirements Generation
|
||||
- **Agent**: `requirements-generate`
|
||||
- **Quality Gate**: Requirements score ≥ 90/100
|
||||
- **Output**: Functional requirements document
|
||||
- **Focus**:
|
||||
- Clear functional requirements
|
||||
- Acceptance criteria
|
||||
- Technical constraints
|
||||
- Implementation notes
|
||||
|
||||
**Quality Criteria (100 points)**:
|
||||
- Clarity (30): Unambiguous, well-defined
|
||||
- Completeness (25): All aspects covered
|
||||
- Testability (20): Clear verification points
|
||||
- Technical Feasibility (15): Realistic implementation
|
||||
- Scope Definition (10): Clear boundaries
|
||||
|
||||
### Phase 2: Code Implementation
|
||||
- **Agent**: `requirements-code`
|
||||
- **Quality Gate**: Code completion
|
||||
- **Output**: Implementation files
|
||||
- **Process**:
|
||||
1. Read requirements + repository context
|
||||
2. Implement features following requirements
|
||||
3. Create or modify code files
|
||||
4. Follow existing code conventions
|
||||
|
||||
### Phase 3: Code Review
|
||||
- **Agent**: `requirements-review`
|
||||
- **Quality Gate**: Pass / Pass with Risk / Fail
|
||||
- **Output**: Review report
|
||||
- **Focus**:
|
||||
- Code quality
|
||||
- Requirements alignment
|
||||
- Security concerns
|
||||
- Performance issues
|
||||
- Best practices compliance
|
||||
|
||||
**Review Status**:
|
||||
- **Pass**: Meets standards, ready for testing
|
||||
- **Pass with Risk**: Minor issues noted
|
||||
- **Fail**: Requires implementation revision
|
||||
|
||||
### Phase 4: Testing Strategy
|
||||
- **Agent**: `requirements-testing`
|
||||
- **Quality Gate**: Test execution
|
||||
- **Output**: Test report
|
||||
- **Process**:
|
||||
1. Create test strategy from requirements
|
||||
2. Generate test cases
|
||||
3. Execute tests (unit, integration)
|
||||
4. Report results
|
||||
|
||||
## 📁 Workflow Artifacts
|
||||
|
||||
Generated in `.claude/requirements/{feature-name}/`:
|
||||
|
||||
```
|
||||
.claude/requirements/jwt-authentication/
|
||||
├── 01-requirements.md # Functional requirements (score ≥ 90)
|
||||
├── 02-implementation.md # Implementation summary
|
||||
├── 03-review.md # Code review report
|
||||
└── 04-testing.md # Test strategy and results
|
||||
```
|
||||
|
||||
## 🔧 Command Options
|
||||
|
||||
```bash
|
||||
# Standard workflow
|
||||
/requirements-pilot "Add API rate limiting"
|
||||
|
||||
# With specific technology
|
||||
/requirements-pilot "Redis caching layer with TTL management"
|
||||
|
||||
# Bug fix with requirements
|
||||
/requirements-pilot "Fix login session timeout issue"
|
||||
```
|
||||
|
||||
## 📊 Quality Scoring
|
||||
|
||||
### Requirements Score (100 points)
|
||||
|
||||
| Category | Points | Description |
|
||||
|----------|--------|-------------|
|
||||
| Clarity | 30 | Unambiguous, well-defined requirements |
|
||||
| Completeness | 25 | All functional aspects covered |
|
||||
| Testability | 20 | Clear acceptance criteria |
|
||||
| Technical Feasibility | 15 | Realistic implementation plan |
|
||||
| Scope Definition | 10 | Clear feature boundaries |
|
||||
|
||||
**Threshold**: ≥ 90 points to proceed
|
||||
|
||||
### Automatic Optimization
|
||||
|
||||
If initial score < 90:
|
||||
1. User provides feedback
|
||||
2. Agent revises requirements
|
||||
3. System recalculates score
|
||||
4. Repeat until ≥ 90
|
||||
5. User confirms → Save → Next phase
|
||||
|
||||
## 🎯 Comparison: Requirements vs BMAD
|
||||
|
||||
| Aspect | Requirements Workflow | BMAD Workflow |
|
||||
|--------|----------------------|---------------|
|
||||
| **Phases** | 4 (Requirements → Code → Review → Test) | 6 (PO → Arch → SM → Dev → Review → QA) |
|
||||
| **Duration** | Fast (hours) | Thorough (days) |
|
||||
| **Documentation** | Minimal | Comprehensive |
|
||||
| **Quality Gates** | 1 (Requirements ≥ 90) | 2 (PRD ≥ 90, Design ≥ 90) |
|
||||
| **Approval Points** | None | Multiple (after PRD, Architecture, Sprint Plan) |
|
||||
| **Best For** | Simple features, prototypes | Complex features, enterprise projects |
|
||||
| **Artifacts** | 4 documents | 6 documents |
|
||||
| **Planning** | Direct implementation | Sprint planning included |
|
||||
| **Architecture** | Implicit in requirements | Explicit design phase |
|
||||
|
||||
## 💡 Usage Examples
|
||||
|
||||
### Example 1: API Feature
|
||||
|
||||
```bash
|
||||
/requirements-pilot "REST API endpoint for user profile updates with validation"
|
||||
|
||||
# Generated requirements include:
|
||||
# - Endpoint specification (PUT /api/users/:id/profile)
|
||||
# - Request/response schemas
|
||||
# - Validation rules
|
||||
# - Error handling
|
||||
# - Authentication requirements
|
||||
|
||||
# Implementation follows directly
|
||||
# Review checks API best practices
|
||||
# Testing includes endpoint testing
|
||||
```
|
||||
|
||||
### Example 2: Database Schema
|
||||
|
||||
```bash
|
||||
/requirements-pilot "Add audit logging table for user actions"
|
||||
|
||||
# Generated requirements include:
|
||||
# - Table schema definition
|
||||
# - Indexing strategy
|
||||
# - Retention policy
|
||||
# - Query patterns
|
||||
|
||||
# Implementation creates migration
|
||||
# Review checks schema design
|
||||
# Testing verifies logging behavior
|
||||
```
|
||||
|
||||
### Example 3: Bug Fix
|
||||
|
||||
```bash
|
||||
/requirements-pilot "Fix race condition in order processing queue"
|
||||
|
||||
# Generated requirements include:
|
||||
# - Problem description
|
||||
# - Root cause analysis
|
||||
# - Solution approach
|
||||
# - Verification steps
|
||||
|
||||
# Implementation applies fix
|
||||
# Review checks concurrency handling
|
||||
# Testing includes stress tests
|
||||
```
|
||||
|
||||
## 🔄 Iterative Refinement
|
||||
|
||||
Each phase supports feedback:
|
||||
|
||||
```
|
||||
Agent: "Requirements complete (Score: 85/100)"
|
||||
User: "Add error handling for network failures"
|
||||
Agent: "Updated requirements (Score: 93/100) ✅"
|
||||
```
|
||||
|
||||
## 🚀 Advanced Usage
|
||||
|
||||
### Combining with Individual Commands
|
||||
|
||||
```bash
|
||||
# Generate requirements only
|
||||
/requirements-generate "OAuth2 integration requirements"
|
||||
|
||||
# Just code implementation (requires existing requirements)
|
||||
/requirements-code "Implement based on requirements.md"
|
||||
|
||||
# Standalone review
|
||||
/requirements-review "Review current implementation"
|
||||
```
|
||||
|
||||
### Integration with BMAD
|
||||
|
||||
Use Requirements Workflow for sub-tasks within BMAD sprints:
|
||||
|
||||
```bash
|
||||
# BMAD creates sprint plan
|
||||
/bmad-pilot "E-commerce platform"
|
||||
|
||||
# Use Requirements for individual sprint tasks
|
||||
/requirements-pilot "Shopping cart session management"
|
||||
/requirements-pilot "Payment webhook handling"
|
||||
```
|
||||
|
||||
## 📚 Related Documentation
|
||||
|
||||
- **[BMAD Workflow](BMAD-WORKFLOW.md)** - Full agile methodology
|
||||
- **[Development Commands](DEVELOPMENT-COMMANDS.md)** - Direct coding commands
|
||||
- **[Quick Start Guide](QUICK-START.md)** - Get started quickly
|
||||
|
||||
---
|
||||
|
||||
**Requirements-Driven Development** - From requirements to working code in hours, not days.
|
||||
@@ -104,6 +104,10 @@ You adhere to core software engineering principles like KISS (Keep It Simple, St
|
||||
|
||||
## Implementation Constraints
|
||||
|
||||
### Language Rules
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, SQL, CRUD, etc.) in English; translate explanatory text only.
|
||||
|
||||
### MUST Requirements
|
||||
- **Working Solution**: Code must fully implement the specified functionality
|
||||
- **Integration Compatibility**: Must work seamlessly with existing codebase
|
||||
@@ -88,6 +88,10 @@ Each phase should be independently deployable and testable.
|
||||
|
||||
## Key Constraints
|
||||
|
||||
### Language Rules
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, SQL, CRUD, etc.) in English; translate explanatory text only.
|
||||
|
||||
### MUST Requirements
|
||||
- **Direct Implementability**: Every item must be directly translatable to code
|
||||
- **Specific Technical Details**: Include exact file paths, function names, table schemas
|
||||
@@ -176,6 +176,10 @@ You adhere to core software engineering principles like KISS (Keep It Simple, St
|
||||
|
||||
## Key Constraints
|
||||
|
||||
### Language Rules
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, E2E, CI/CD, etc.) in English; translate explanatory text only.
|
||||
|
||||
### MUST Requirements
|
||||
- **Functional Verification**: Verify all specified functionality works
|
||||
- **Integration Testing**: Ensure seamless integration with existing code
|
||||
@@ -199,6 +199,10 @@ func TestAPIEndpoint(t *testing.T) {
|
||||
|
||||
## Key Constraints
|
||||
|
||||
### Language Rules
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc). When language is ambiguous, default to Chinese.
|
||||
- **Technical Terms**: Keep technical terms (API, E2E, CI/CD, Mock, etc.) in English; translate explanatory text only.
|
||||
|
||||
### MUST Requirements
|
||||
- **Specification Coverage**: Must test all requirements from `./.claude/specs/{feature_name}/requirements-spec.md`
|
||||
- **Critical Path Testing**: Must test all critical business functionality
|
||||
1122
bin/cli.js
Executable file
1122
bin/cli.js
Executable file
File diff suppressed because it is too large
Load Diff
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"name": "bmad-agile-workflow",
|
||||
"source": "./",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
"agile",
|
||||
"scrum",
|
||||
"product-owner",
|
||||
"architect",
|
||||
"developer",
|
||||
"qa",
|
||||
"workflow-orchestration"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/bmad-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/bmad-po.md",
|
||||
"./agents/bmad-architect.md",
|
||||
"./agents/bmad-sm.md",
|
||||
"./agents/bmad-dev.md",
|
||||
"./agents/bmad-qa.md",
|
||||
"./agents/bmad-orchestrator.md",
|
||||
"./agents/bmad-review.md"
|
||||
]
|
||||
}
|
||||
72
cliff.toml
Normal file
72
cliff.toml
Normal file
@@ -0,0 +1,72 @@
|
||||
# git-cliff configuration file
|
||||
# https://git-cliff.org/docs/configuration
|
||||
|
||||
[changelog]
|
||||
# changelog header
|
||||
header = """
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
"""
|
||||
# template for the changelog body
|
||||
body = """
|
||||
{% if version %}
|
||||
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{% else %}
|
||||
## Unreleased
|
||||
{% endif %}
|
||||
{% for group, commits in commits | group_by(attribute="group") %}
|
||||
### {{ group }}
|
||||
|
||||
{% for commit in commits %}
|
||||
- {{ commit.message | split(pat="\n") | first }}
|
||||
{% endfor -%}
|
||||
{% endfor -%}
|
||||
"""
|
||||
# remove the leading and trailing whitespace from the template
|
||||
trim = true
|
||||
# changelog footer
|
||||
footer = """
|
||||
<!-- generated by git-cliff -->
|
||||
"""
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = false
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/cexll/myclaude/issues/${2}))" },
|
||||
]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "🚀 Features" },
|
||||
{ message = "^fix", group = "🐛 Bug Fixes" },
|
||||
{ message = "^doc", group = "📚 Documentation" },
|
||||
{ message = "^perf", group = "⚡ Performance" },
|
||||
{ message = "^refactor", group = "🚜 Refactor" },
|
||||
{ message = "^style", group = "🎨 Styling" },
|
||||
{ message = "^test", group = "🧪 Testing" },
|
||||
{ message = "^chore\\(release\\):", skip = true },
|
||||
{ message = "^chore", group = "⚙️ Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "🛡️ Security" },
|
||||
{ message = "^revert", group = "◀️ Revert" },
|
||||
{ message = ".*", group = "💼 Other" },
|
||||
]
|
||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||
protect_breaking_commits = false
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# glob pattern for matching git tags
|
||||
tag_pattern = "v[0-9]*"
|
||||
# regex for skipping tags
|
||||
skip_tags = "v0.1.0-beta.1"
|
||||
# regex for ignoring tags
|
||||
ignore_tags = ""
|
||||
# sort the tags topologically
|
||||
topo_order = false
|
||||
# sort the commits inside sections by oldest/newest order
|
||||
sort_commits = "newest"
|
||||
47
codeagent-wrapper/.github/workflows/ci.yml
vendored
Normal file
47
codeagent-wrapper/.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go-version: ["1.21", "1.22"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
cache: true
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: Build
|
||||
run: make build
|
||||
- name: Verify version
|
||||
run: ./codeagent-wrapper --version
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
cache: true
|
||||
- name: Lint
|
||||
run: make lint
|
||||
|
||||
23
codeagent-wrapper/.gitignore
vendored
Normal file
23
codeagent-wrapper/.gitignore
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# Build artifacts
|
||||
bin/
|
||||
codeagent
|
||||
codeagent.exe
|
||||
/codeagent-wrapper
|
||||
/codeagent-wrapper.exe
|
||||
*.test
|
||||
|
||||
# Coverage reports
|
||||
coverage.out
|
||||
coverage*.out
|
||||
cover.out
|
||||
cover_*.out
|
||||
coverage.html
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Temp files
|
||||
*.tmp
|
||||
*.swp
|
||||
*~
|
||||
.DS_Store
|
||||
37
codeagent-wrapper/Makefile
Normal file
37
codeagent-wrapper/Makefile
Normal file
@@ -0,0 +1,37 @@
|
||||
GO ?= go
|
||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo dev)
|
||||
LDFLAGS := -ldflags "-X codeagent-wrapper/internal/app.version=$(VERSION)"
|
||||
|
||||
TOOLS_BIN := $(CURDIR)/bin
|
||||
TOOLCHAIN ?= go1.22.0
|
||||
GOLANGCI_LINT_VERSION := v1.56.2
|
||||
STATICCHECK_VERSION := v0.4.7
|
||||
|
||||
GOLANGCI_LINT := $(TOOLS_BIN)/golangci-lint
|
||||
STATICCHECK := $(TOOLS_BIN)/staticcheck
|
||||
|
||||
.PHONY: build test lint clean install
|
||||
|
||||
build:
|
||||
$(GO) build $(LDFLAGS) -o codeagent-wrapper ./cmd/codeagent-wrapper
|
||||
|
||||
test:
|
||||
$(GO) test ./...
|
||||
|
||||
$(GOLANGCI_LINT):
|
||||
@mkdir -p $(TOOLS_BIN)
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) GOBIN=$(TOOLS_BIN) $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
|
||||
$(STATICCHECK):
|
||||
@mkdir -p $(TOOLS_BIN)
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) GOBIN=$(TOOLS_BIN) $(GO) install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||
|
||||
lint: $(GOLANGCI_LINT) $(STATICCHECK)
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) $(GOLANGCI_LINT) run ./...
|
||||
GOTOOLCHAIN=$(TOOLCHAIN) $(STATICCHECK) ./...
|
||||
|
||||
clean:
|
||||
@python3 -c 'import glob, os; paths=["codeagent","codeagent.exe","codeagent-wrapper","codeagent-wrapper.exe","coverage.out","cover.out","coverage.html"]; paths += glob.glob("coverage*.out") + glob.glob("cover_*.out") + glob.glob("*.test"); [os.remove(p) for p in paths if os.path.exists(p)]'
|
||||
|
||||
install:
|
||||
$(GO) install $(LDFLAGS) ./cmd/codeagent-wrapper
|
||||
157
codeagent-wrapper/README.md
Normal file
157
codeagent-wrapper/README.md
Normal file
@@ -0,0 +1,157 @@
|
||||
# codeagent-wrapper
|
||||
|
||||
`codeagent-wrapper` 是一个用 Go 编写的“多后端 AI 代码代理”命令行包装器:用统一的 CLI 入口封装不同的 AI 工具后端(Codex / Claude / Gemini / Opencode),并提供一致的参数、配置与会话恢复体验。
|
||||
|
||||
入口:`cmd/codeagent/main.go`(生成二进制名:`codeagent`)和 `cmd/codeagent-wrapper/main.go`(生成二进制名:`codeagent-wrapper`)。两者行为一致。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 多后端支持:`codex` / `claude` / `gemini` / `opencode`
|
||||
- 统一命令行:`codeagent [flags] <task>` / `codeagent resume <session_id> <task> [workdir]`
|
||||
- 自动 stdin:遇到换行/特殊字符/超长任务自动走 stdin,避免 shell quoting 地狱;也可显式使用 `-`
|
||||
- 配置合并:支持配置文件与 `CODEAGENT_*` 环境变量(viper)
|
||||
- Agent 预设:从 `~/.codeagent/models.json` 读取 backend/model/prompt 等预设
|
||||
- 并行执行:`--parallel` 从 stdin 读取多任务配置,支持依赖拓扑并发执行
|
||||
- 日志清理:`codeagent cleanup` 清理旧日志(日志写入系统临时目录)
|
||||
|
||||
## 安装
|
||||
|
||||
要求:Go 1.21+。
|
||||
|
||||
在仓库根目录执行:
|
||||
|
||||
```bash
|
||||
go install ./cmd/codeagent
|
||||
go install ./cmd/codeagent-wrapper
|
||||
```
|
||||
|
||||
安装后确认:
|
||||
|
||||
```bash
|
||||
codeagent version
|
||||
codeagent-wrapper version
|
||||
```
|
||||
|
||||
## 使用示例
|
||||
|
||||
最简单用法(默认后端:`codex`):
|
||||
|
||||
```bash
|
||||
codeagent "分析 internal/app/cli.go 的入口逻辑,给出改进建议"
|
||||
```
|
||||
|
||||
指定后端:
|
||||
|
||||
```bash
|
||||
codeagent --backend claude "解释 internal/executor/parallel_config.go 的并行配置格式"
|
||||
```
|
||||
|
||||
指定工作目录(第 2 个位置参数):
|
||||
|
||||
```bash
|
||||
codeagent "在当前 repo 下搜索潜在数据竞争" .
|
||||
```
|
||||
|
||||
显式从 stdin 读取 task(使用 `-`):
|
||||
|
||||
```bash
|
||||
cat task.txt | codeagent -
|
||||
```
|
||||
|
||||
恢复会话:
|
||||
|
||||
```bash
|
||||
codeagent resume <session_id> "继续上次任务"
|
||||
```
|
||||
|
||||
并行模式(从 stdin 读取任务配置;禁止位置参数):
|
||||
|
||||
```bash
|
||||
codeagent --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: t1
|
||||
workdir: .
|
||||
backend: codex
|
||||
---CONTENT---
|
||||
列出本项目的主要模块以及它们的职责。
|
||||
---TASK---
|
||||
id: t2
|
||||
dependencies: t1
|
||||
backend: claude
|
||||
---CONTENT---
|
||||
基于 t1 的结论,提出重构风险点与建议。
|
||||
EOF
|
||||
```
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 配置文件
|
||||
|
||||
默认查找路径(当 `--config` 为空时):
|
||||
|
||||
- `$HOME/.codeagent/config.(yaml|yml|json|toml|...)`
|
||||
|
||||
示例(YAML):
|
||||
|
||||
```yaml
|
||||
backend: codex
|
||||
model: gpt-4.1
|
||||
skip-permissions: false
|
||||
```
|
||||
|
||||
也可以通过 `--config /path/to/config.yaml` 显式指定。
|
||||
|
||||
### 环境变量(`CODEAGENT_*`)
|
||||
|
||||
通过 viper 读取并自动映射 `-` 为 `_`,常用项:
|
||||
|
||||
- `CODEAGENT_BACKEND`(`codex|claude|gemini|opencode`)
|
||||
- `CODEAGENT_MODEL`
|
||||
- `CODEAGENT_AGENT`
|
||||
- `CODEAGENT_PROMPT_FILE`
|
||||
- `CODEAGENT_REASONING_EFFORT`
|
||||
- `CODEAGENT_SKIP_PERMISSIONS`
|
||||
- `CODEAGENT_FULL_OUTPUT`(并行模式 legacy 输出)
|
||||
- `CODEAGENT_MAX_PARALLEL_WORKERS`(0 表示不限制,上限 100)
|
||||
|
||||
### Agent 预设(`~/.codeagent/models.json`)
|
||||
|
||||
可在 `~/.codeagent/models.json` 定义 agent → backend/model/prompt 等映射,用 `--agent <name>` 选择:
|
||||
|
||||
```json
|
||||
{
|
||||
"default_backend": "opencode",
|
||||
"default_model": "opencode/grok-code",
|
||||
"agents": {
|
||||
"develop": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-4.1",
|
||||
"prompt_file": "~/.codeagent/prompts/develop.md",
|
||||
"description": "Code development"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 支持的后端
|
||||
|
||||
该项目本身不内置模型能力,依赖你本机安装并可在 `PATH` 中找到对应 CLI:
|
||||
|
||||
- `codex`:执行 `codex e ...`(默认会添加 `--dangerously-bypass-approvals-and-sandbox`;如需关闭请设置 `CODEX_BYPASS_SANDBOX=false`)
|
||||
- `claude`:执行 `claude -p ... --output-format stream-json`(默认会跳过权限提示;如需开启请设置 `CODEAGENT_SKIP_PERMISSIONS=false`)
|
||||
- `gemini`:执行 `gemini ... -o stream-json`(可从 `~/.gemini/.env` 加载环境变量)
|
||||
- `opencode`:执行 `opencode run --format json`
|
||||
|
||||
## 开发
|
||||
|
||||
```bash
|
||||
make build
|
||||
make test
|
||||
make lint
|
||||
make clean
|
||||
```
|
||||
|
||||
## 故障排查
|
||||
|
||||
- macOS 下如果看到临时目录相关的 `permission denied`(例如临时可执行文件无法在 `/var/folders/.../T` 执行),可设置一个可执行的临时目录:`CODEAGENT_TMPDIR=$HOME/.codeagent/tmp`。
|
||||
- `claude` 后端的 `base_url/api_key`(来自 `~/.codeagent/models.json`)会注入到子进程环境变量:`ANTHROPIC_BASE_URL` / `ANTHROPIC_API_KEY`。若 `base_url` 指向本地代理(如 `localhost:23001`),请确认代理进程在运行。
|
||||
447
codeagent-wrapper/USER_GUIDE.md
Normal file
447
codeagent-wrapper/USER_GUIDE.md
Normal file
@@ -0,0 +1,447 @@
|
||||
# Codeagent-Wrapper User Guide
|
||||
|
||||
Multi-backend AI code execution wrapper supporting Codex, Claude, and Gemini.
|
||||
|
||||
## Overview
|
||||
|
||||
`codeagent-wrapper` is a Go-based CLI tool that provides a unified interface to multiple AI coding backends. It handles:
|
||||
- Multi-backend execution (Codex, Claude, Gemini)
|
||||
- JSON stream parsing and output formatting
|
||||
- Session management and resumption
|
||||
- Parallel task execution with dependency resolution
|
||||
- Timeout handling and signal forwarding
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Recommended: run the installer and select "codeagent-wrapper"
|
||||
npx github:cexll/myclaude
|
||||
|
||||
# Manual build (optional; requires repo checkout)
|
||||
cd codeagent-wrapper
|
||||
go build -o ~/.claude/bin/codeagent-wrapper
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Simple task (default: codex backend)
|
||||
codeagent-wrapper "explain @src/main.go"
|
||||
|
||||
# With backend selection
|
||||
codeagent-wrapper --backend claude "refactor @utils.ts"
|
||||
|
||||
# With HEREDOC (recommended for complex tasks)
|
||||
codeagent-wrapper --backend gemini - <<'EOF'
|
||||
Implement user authentication:
|
||||
- JWT tokens
|
||||
- Password hashing with bcrypt
|
||||
- Session management
|
||||
EOF
|
||||
```
|
||||
|
||||
### Backend Selection
|
||||
|
||||
| Backend | Command | Best For |
|
||||
|---------|---------|----------|
|
||||
| **Codex** | `--backend codex` | General code tasks (default) |
|
||||
| **Claude** | `--backend claude` | Complex reasoning, architecture |
|
||||
| **Gemini** | `--backend gemini` | Fast iteration, prototyping |
|
||||
|
||||
## Core Features
|
||||
|
||||
### 1. Multi-Backend Support
|
||||
|
||||
```bash
|
||||
# Codex (default)
|
||||
codeagent-wrapper "add logging to @app.js"
|
||||
|
||||
# Claude for architecture decisions
|
||||
codeagent-wrapper --backend claude - <<'EOF'
|
||||
Design a microservices architecture for e-commerce:
|
||||
- Service boundaries
|
||||
- Communication patterns
|
||||
- Data consistency strategy
|
||||
EOF
|
||||
|
||||
# Gemini for quick prototypes
|
||||
codeagent-wrapper --backend gemini "create React component for user profile"
|
||||
```
|
||||
|
||||
### 2. File References with @ Syntax
|
||||
|
||||
```bash
|
||||
# Single file
|
||||
codeagent-wrapper "optimize @src/utils.ts"
|
||||
|
||||
# Multiple files
|
||||
codeagent-wrapper "refactor @src/auth.ts and @src/middleware.ts"
|
||||
|
||||
# Entire directory
|
||||
codeagent-wrapper "analyze @src for security issues"
|
||||
```
|
||||
|
||||
### 3. Session Management
|
||||
|
||||
```bash
|
||||
# First task
|
||||
codeagent-wrapper "add validation to user form"
|
||||
# Output includes: SESSION_ID: 019a7247-ac9d-71f3-89e2-a823dbd8fd14
|
||||
|
||||
# Resume session
|
||||
codeagent-wrapper resume 019a7247-ac9d-71f3-89e2-a823dbd8fd14 - <<'EOF'
|
||||
Now add error messages for each validation rule
|
||||
EOF
|
||||
```
|
||||
|
||||
### 4. Parallel Execution
|
||||
|
||||
Execute multiple tasks concurrently with dependency management:
|
||||
|
||||
```bash
|
||||
# Default: summary output (context-efficient, recommended)
|
||||
codeagent-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: backend_1701234567
|
||||
workdir: /project/backend
|
||||
---CONTENT---
|
||||
implement /api/users endpoints with CRUD operations
|
||||
|
||||
---TASK---
|
||||
id: frontend_1701234568
|
||||
workdir: /project/frontend
|
||||
---CONTENT---
|
||||
build Users page consuming /api/users
|
||||
|
||||
---TASK---
|
||||
id: tests_1701234569
|
||||
workdir: /project/tests
|
||||
dependencies: backend_1701234567, frontend_1701234568
|
||||
---CONTENT---
|
||||
add integration tests for user management flow
|
||||
EOF
|
||||
|
||||
# Full output mode (for debugging, includes complete task messages)
|
||||
codeagent-wrapper --parallel --full-output <<'EOF'
|
||||
...
|
||||
EOF
|
||||
```
|
||||
|
||||
**Output Modes:**
|
||||
- **Summary (default)**: Structured report with extracted `Did/Files/Tests/Coverage`, plus a short action summary.
|
||||
- **Full (`--full-output`)**: Complete task messages included. Use only for debugging.
|
||||
|
||||
**Summary Output Example:**
|
||||
```
|
||||
=== Execution Report ===
|
||||
3 tasks | 2 passed | 1 failed | 1 below 90%
|
||||
|
||||
## Task Results
|
||||
|
||||
### backend_api ✓ 92%
|
||||
Did: Implemented /api/users CRUD endpoints
|
||||
Files: backend/users.go, backend/router.go
|
||||
Tests: 12 passed
|
||||
Log: /tmp/codeagent-xxx.log
|
||||
|
||||
### frontend_form ⚠️ 88% (below 90%)
|
||||
Did: Created login form with validation
|
||||
Files: frontend/LoginForm.tsx
|
||||
Tests: 8 passed
|
||||
Gap: lines not covered: frontend/LoginForm.tsx:42-47
|
||||
Log: /tmp/codeagent-yyy.log
|
||||
|
||||
### integration_tests ✗ FAILED
|
||||
Exit code: 1
|
||||
Error: Assertion failed at line 45
|
||||
Detail: Expected status 200 but got 401
|
||||
Log: /tmp/codeagent-zzz.log
|
||||
|
||||
## Summary
|
||||
- 2/3 completed successfully
|
||||
- Fix: integration_tests (Assertion failed at line 45)
|
||||
- Coverage: frontend_form
|
||||
```
|
||||
|
||||
**Parallel Task Format:**
|
||||
- `---TASK---` - Starts task block
|
||||
- `id: <unique_id>` - Required, use `<feature>_<timestamp>` format
|
||||
- `workdir: <path>` - Optional, defaults to current directory
|
||||
- `dependencies: <id1>, <id2>` - Optional, comma-separated task IDs
|
||||
- `---CONTENT---` - Separates metadata from task content
|
||||
|
||||
**Features:**
|
||||
- Automatic topological sorting
|
||||
- Unlimited concurrency for independent tasks
|
||||
- Error isolation (failures don't stop other tasks)
|
||||
- Dependency blocking (skip if parent fails)
|
||||
|
||||
### 5. Working Directory
|
||||
|
||||
```bash
|
||||
# Execute in specific directory
|
||||
codeagent-wrapper "run tests" /path/to/project
|
||||
|
||||
# With backend selection
|
||||
codeagent-wrapper --backend claude "analyze code" /project/backend
|
||||
|
||||
# With HEREDOC
|
||||
codeagent-wrapper - /path/to/project <<'EOF'
|
||||
refactor database layer
|
||||
EOF
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Timeout Control
|
||||
|
||||
```bash
|
||||
# Set custom timeout (1 hour = 3600000ms)
|
||||
CODEX_TIMEOUT=3600000 codeagent-wrapper "long running task"
|
||||
|
||||
# Default timeout: 7200000ms (2 hours)
|
||||
```
|
||||
|
||||
**Timeout behavior:**
|
||||
- Sends SIGTERM to backend process
|
||||
- Waits 5 seconds
|
||||
- Sends SIGKILL if process doesn't exit
|
||||
- Returns exit code 124 (consistent with GNU timeout)
|
||||
|
||||
### Complex Multi-line Tasks
|
||||
|
||||
Use HEREDOC to avoid shell escaping issues:
|
||||
|
||||
```bash
|
||||
codeagent-wrapper - <<'EOF'
|
||||
Refactor authentication system:
|
||||
|
||||
Current issues:
|
||||
- Password stored as plain text
|
||||
- No rate limiting on login
|
||||
- Sessions don't expire
|
||||
|
||||
Requirements:
|
||||
1. Hash passwords with bcrypt
|
||||
2. Add rate limiting (5 attempts/15min)
|
||||
3. Session expiry after 24h
|
||||
4. Add refresh token mechanism
|
||||
|
||||
Files to modify:
|
||||
- @src/auth/login.ts
|
||||
- @src/middleware/rateLimit.ts
|
||||
- @config/session.ts
|
||||
EOF
|
||||
```
|
||||
|
||||
### Backend-Specific Features
|
||||
|
||||
**Codex:**
|
||||
```bash
|
||||
# Best for code editing and refactoring
|
||||
codeagent-wrapper --backend codex - <<'EOF'
|
||||
extract duplicate code in @src into reusable helpers
|
||||
EOF
|
||||
```
|
||||
|
||||
**Claude:**
|
||||
```bash
|
||||
# Best for complex reasoning
|
||||
codeagent-wrapper --backend claude - <<'EOF'
|
||||
review @src/payment/processor.ts for:
|
||||
- Race conditions
|
||||
- Edge cases
|
||||
- Security vulnerabilities
|
||||
EOF
|
||||
```
|
||||
|
||||
**Gemini:**
|
||||
```bash
|
||||
# Best for fast iteration
|
||||
codeagent-wrapper --backend gemini "add TypeScript types to @api.js"
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
Standard output includes parsed agent messages and session ID:
|
||||
|
||||
```
|
||||
Agent response text here...
|
||||
Implementation details...
|
||||
|
||||
---
|
||||
SESSION_ID: 019a7247-ac9d-71f3-89e2-a823dbd8fd14
|
||||
```
|
||||
|
||||
Error output (stderr):
|
||||
```
|
||||
ERROR: Error message details
|
||||
```
|
||||
|
||||
Parallel execution output:
|
||||
```
|
||||
=== Parallel Execution Summary ===
|
||||
Total: 3 | Success: 2 | Failed: 1
|
||||
|
||||
--- Task: backend_1701234567 ---
|
||||
Status: SUCCESS
|
||||
Session: 019a7247-ac9d-71f3-89e2-a823dbd8fd14
|
||||
|
||||
Implementation complete...
|
||||
|
||||
--- Task: frontend_1701234568 ---
|
||||
Status: SUCCESS
|
||||
Session: 019a7248-ac9d-71f3-89e2-a823dbd8fd14
|
||||
|
||||
UI components created...
|
||||
|
||||
--- Task: tests_1701234569 ---
|
||||
Status: FAILED (exit code 1)
|
||||
Error: dependency backend_1701234567 failed
|
||||
```
|
||||
|
||||
## Exit Codes
|
||||
|
||||
| Code | Meaning |
|
||||
|------|---------|
|
||||
| 0 | Success |
|
||||
| 1 | General error (missing args, no output) |
|
||||
| 124 | Timeout |
|
||||
| 127 | Backend command not found |
|
||||
| 130 | Interrupted (Ctrl+C) |
|
||||
| * | Passthrough from backend process |
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `CODEX_TIMEOUT` | 7200000 | Timeout in milliseconds |
|
||||
| `CODEX_BYPASS_SANDBOX` | true | Bypass Codex sandbox/approval. Set `false` to disable |
|
||||
| `CODEAGENT_SKIP_PERMISSIONS` | true | Skip Claude permission prompts. Set `false` to disable |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Backend not found:**
|
||||
```bash
|
||||
# Ensure backend CLI is installed
|
||||
which codex
|
||||
which claude
|
||||
which gemini
|
||||
|
||||
# Check PATH
|
||||
echo $PATH
|
||||
```
|
||||
|
||||
**Timeout too short:**
|
||||
```bash
|
||||
# Increase timeout to 4 hours
|
||||
CODEX_TIMEOUT=14400000 codeagent-wrapper "complex task"
|
||||
```
|
||||
|
||||
**Session ID not found:**
|
||||
```bash
|
||||
# List recent sessions (backend-specific)
|
||||
codex history
|
||||
|
||||
# Ensure session ID is copied correctly
|
||||
codeagent-wrapper resume <session_id> "continue task"
|
||||
```
|
||||
|
||||
**Parallel tasks not running:**
|
||||
```bash
|
||||
# Check task format
|
||||
# Ensure ---TASK--- and ---CONTENT--- delimiters are correct
|
||||
# Verify task IDs are unique
|
||||
# Check dependencies reference existing task IDs
|
||||
```
|
||||
|
||||
## Integration with Claude Code
|
||||
|
||||
Use via the `codeagent` skill:
|
||||
|
||||
```bash
|
||||
# In Claude Code conversation
|
||||
User: Use codeagent to implement authentication
|
||||
|
||||
# Claude will execute:
|
||||
codeagent-wrapper --backend codex - <<'EOF'
|
||||
implement JWT authentication in @src/auth
|
||||
EOF
|
||||
```
|
||||
|
||||
## Performance Tips
|
||||
|
||||
1. **Use parallel execution** for independent tasks
|
||||
2. **Choose the right backend** for the task type
|
||||
3. **Keep working directory specific** to reduce context
|
||||
4. **Resume sessions** for multi-step workflows
|
||||
5. **Use @ syntax** to minimize file content in prompts
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **HEREDOC for complex tasks** - Avoid shell escaping nightmares
|
||||
2. **Descriptive task IDs** - Use `<feature>_<timestamp>` format
|
||||
3. **Absolute paths** - Avoid relative path confusion
|
||||
4. **Session resumption** - Continue conversations with context
|
||||
5. **Timeout tuning** - Set appropriate timeouts for task complexity
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Code Review
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --backend claude - <<'EOF'
|
||||
Review @src/payment/stripe.ts for:
|
||||
1. Security issues (API key handling, input validation)
|
||||
2. Error handling (network failures, API errors)
|
||||
3. Edge cases (duplicate charges, partial refunds)
|
||||
4. Code quality (naming, structure, comments)
|
||||
EOF
|
||||
```
|
||||
|
||||
### Example 2: Refactoring
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --backend codex - <<'EOF'
|
||||
Refactor @src/utils:
|
||||
- Extract duplicate code into helpers
|
||||
- Add TypeScript types
|
||||
- Improve function naming
|
||||
- Add JSDoc comments
|
||||
EOF
|
||||
```
|
||||
|
||||
### Example 3: Full-Stack Feature
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: api_1701234567
|
||||
workdir: /project/backend
|
||||
---CONTENT---
|
||||
implement /api/notifications endpoints with WebSocket support
|
||||
|
||||
---TASK---
|
||||
id: ui_1701234568
|
||||
workdir: /project/frontend
|
||||
dependencies: api_1701234567
|
||||
---CONTENT---
|
||||
build Notifications component with real-time updates
|
||||
|
||||
---TASK---
|
||||
id: tests_1701234569
|
||||
workdir: /project
|
||||
dependencies: api_1701234567, ui_1701234568
|
||||
---CONTENT---
|
||||
add E2E tests for notification flow
|
||||
EOF
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [Codex CLI Documentation](https://codex.docs)
|
||||
- [Claude CLI Documentation](https://claude.ai/docs)
|
||||
- [Gemini CLI Documentation](https://ai.google.dev/docs)
|
||||
- [Architecture Overview](./architecture.md)
|
||||
7
codeagent-wrapper/cmd/codeagent-wrapper/main.go
Normal file
7
codeagent-wrapper/cmd/codeagent-wrapper/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import app "codeagent-wrapper/internal/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
43
codeagent-wrapper/go.mod
Normal file
43
codeagent-wrapper/go.mod
Normal file
@@ -0,0 +1,43 @@
|
||||
module codeagent-wrapper
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/goccy/go-json v0.10.5
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.19.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
117
codeagent-wrapper/go.sum
Normal file
117
codeagent-wrapper/go.sum
Normal file
@@ -0,0 +1,117 @@
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
150
codeagent-wrapper/internal/app/agent_validation_test.go
Normal file
150
codeagent-wrapper/internal/app/agent_validation_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
func TestValidateAgentName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{name: "simple", input: "develop", wantErr: false},
|
||||
{name: "upper", input: "ABC", wantErr: false},
|
||||
{name: "digits", input: "a1", wantErr: false},
|
||||
{name: "dash underscore", input: "a-b_c", wantErr: false},
|
||||
{name: "empty", input: "", wantErr: true},
|
||||
{name: "space", input: "a b", wantErr: true},
|
||||
{name: "slash", input: "a/b", wantErr: true},
|
||||
{name: "dotdot", input: "../evil", wantErr: true},
|
||||
{name: "unicode", input: "中文", wantErr: true},
|
||||
{name: "symbol", input: "a$b", wantErr: true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := config.ValidateAgentName(tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Fatalf("validateAgentName(%q) err=%v, wantErr=%v", tt.input, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseArgs_InvalidAgentNameRejected(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
os.Args = []string{"codeagent-wrapper", "--agent", "../evil", "task"}
|
||||
if _, err := parseArgs(); err == nil {
|
||||
t.Fatalf("expected parseArgs to reject invalid agent name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseParallelConfig_InvalidAgentNameRejected(t *testing.T) {
|
||||
input := `---TASK---
|
||||
id: task-1
|
||||
agent: ../evil
|
||||
---CONTENT---
|
||||
do something`
|
||||
if _, err := parseParallelConfig([]byte(input)); err == nil {
|
||||
t.Fatalf("expected parseParallelConfig to reject invalid agent name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseParallelConfig_ResolvesAgentPromptFile(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(config.ResetModelsConfigCacheForTest)
|
||||
config.ResetModelsConfigCacheForTest()
|
||||
|
||||
configDir := filepath.Join(home, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||
"default_backend": "codex",
|
||||
"default_model": "gpt-test",
|
||||
"agents": {
|
||||
"custom-agent": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-test",
|
||||
"prompt_file": "~/.claude/prompt.md"
|
||||
}
|
||||
}
|
||||
}`), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
input := `---TASK---
|
||||
id: task-1
|
||||
agent: custom-agent
|
||||
---CONTENT---
|
||||
do something`
|
||||
cfg, err := parseParallelConfig([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
|
||||
}
|
||||
if len(cfg.Tasks) != 1 {
|
||||
t.Fatalf("expected 1 task, got %d", len(cfg.Tasks))
|
||||
}
|
||||
if got := cfg.Tasks[0].PromptFile; got != "~/.claude/prompt.md" {
|
||||
t.Fatalf("PromptFile = %q, want %q", got, "~/.claude/prompt.md")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRunCodexTaskFn_AppliesAgentPromptFile(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
claudeDir := filepath.Join(home, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "prompt.md"), []byte("P\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
fake := newFakeCmd(fakeCmdConfig{
|
||||
StdoutPlan: []fakeStdoutEvent{
|
||||
{Data: `{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}` + "\n"},
|
||||
},
|
||||
WaitDelay: 2 * time.Millisecond,
|
||||
})
|
||||
|
||||
_ = executor.SetNewCommandRunner(func(ctx context.Context, name string, args ...string) executor.CommandRunner { return fake })
|
||||
_ = executor.SetSelectBackendFn(func(name string) (Backend, error) {
|
||||
return testBackend{
|
||||
name: name,
|
||||
command: "fake-cmd",
|
||||
argsFn: func(cfg *Config, targetArg string) []string {
|
||||
return []string{targetArg}
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
res := defaultRunCodexTaskFn(TaskSpec{
|
||||
ID: "t",
|
||||
Task: "do",
|
||||
Backend: "codex",
|
||||
PromptFile: "~/.claude/prompt.md",
|
||||
}, 5)
|
||||
if res.ExitCode != 0 {
|
||||
t.Fatalf("unexpected result: %+v", res)
|
||||
}
|
||||
|
||||
want := "<agent-prompt>\nP\n</agent-prompt>\n\ndo"
|
||||
if got := fake.StdinContents(); got != want {
|
||||
t.Fatalf("stdin mismatch:\n got=%q\nwant=%q", got, want)
|
||||
}
|
||||
}
|
||||
279
codeagent-wrapper/internal/app/app.go
Normal file
279
codeagent-wrapper/internal/app/app.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
|
||||
const (
|
||||
defaultWorkdir = "."
|
||||
defaultTimeout = 7200 // seconds (2 hours)
|
||||
defaultCoverageTarget = 90.0
|
||||
codexLogLineLimit = 1000
|
||||
stdinSpecialChars = "\n\\\"'`$"
|
||||
stderrCaptureLimit = 4 * 1024
|
||||
defaultBackendName = "codex"
|
||||
defaultCodexCommand = "codex"
|
||||
|
||||
// stdout close reasons
|
||||
stdoutCloseReasonWait = "wait-done"
|
||||
stdoutCloseReasonDrain = "drain-timeout"
|
||||
stdoutCloseReasonCtx = "context-cancel"
|
||||
stdoutDrainTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// Test hooks for dependency injection
|
||||
var (
|
||||
stdinReader io.Reader = os.Stdin
|
||||
isTerminalFn = defaultIsTerminal
|
||||
codexCommand = defaultCodexCommand
|
||||
cleanupHook func()
|
||||
startupCleanupAsync = true
|
||||
|
||||
buildCodexArgsFn = buildCodexArgs
|
||||
selectBackendFn = selectBackend
|
||||
cleanupLogsFn = cleanupOldLogs
|
||||
defaultBuildArgsFn = buildCodexArgs
|
||||
runTaskFn = runCodexTask
|
||||
exitFn = os.Exit
|
||||
)
|
||||
|
||||
func runStartupCleanup() {
|
||||
if cleanupLogsFn == nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
|
||||
}
|
||||
}()
|
||||
if _, err := cleanupLogsFn(); err != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleStartupCleanup() {
|
||||
if !startupCleanupAsync {
|
||||
runStartupCleanup()
|
||||
return
|
||||
}
|
||||
if cleanupLogsFn == nil {
|
||||
return
|
||||
}
|
||||
fn := cleanupLogsFn
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs panic: %v", r))
|
||||
}
|
||||
}()
|
||||
if _, err := fn(); err != nil {
|
||||
logWarn(fmt.Sprintf("cleanupOldLogs error: %v", err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func runCleanupMode() int {
|
||||
if cleanupLogsFn == nil {
|
||||
fmt.Fprintln(os.Stderr, "Cleanup failed: log cleanup function not configured")
|
||||
return 1
|
||||
}
|
||||
|
||||
stats, err := cleanupLogsFn()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Cleanup failed: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Println("Cleanup completed")
|
||||
fmt.Printf("Files scanned: %d\n", stats.Scanned)
|
||||
fmt.Printf("Files deleted: %d\n", stats.Deleted)
|
||||
if len(stats.DeletedFiles) > 0 {
|
||||
for _, f := range stats.DeletedFiles {
|
||||
fmt.Printf(" - %s\n", f)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Files kept: %d\n", stats.Kept)
|
||||
if len(stats.KeptFiles) > 0 {
|
||||
for _, f := range stats.KeptFiles {
|
||||
fmt.Printf(" - %s\n", f)
|
||||
}
|
||||
}
|
||||
if stats.Errors > 0 {
|
||||
fmt.Printf("Deletion errors: %d\n", stats.Errors)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func readAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error) {
|
||||
raw := strings.TrimSpace(path)
|
||||
if raw == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
expanded := raw
|
||||
if raw == "~" || strings.HasPrefix(raw, "~/") || strings.HasPrefix(raw, "~\\") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if raw == "~" {
|
||||
expanded = home
|
||||
} else {
|
||||
expanded = home + raw[1:]
|
||||
}
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(expanded)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath = filepath.Clean(absPath)
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
if !allowOutsideClaudeDir {
|
||||
return "", err
|
||||
}
|
||||
logWarn(fmt.Sprintf("Failed to resolve home directory for prompt file validation: %v; proceeding without restriction", err))
|
||||
} else {
|
||||
allowedDirs := []string{
|
||||
filepath.Clean(filepath.Join(home, ".claude")),
|
||||
filepath.Clean(filepath.Join(home, ".codeagent", "agents")),
|
||||
}
|
||||
for i := range allowedDirs {
|
||||
allowedAbs, err := filepath.Abs(allowedDirs[i])
|
||||
if err == nil {
|
||||
allowedDirs[i] = filepath.Clean(allowedAbs)
|
||||
}
|
||||
}
|
||||
|
||||
isWithinDir := func(path, dir string) bool {
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
rel = filepath.Clean(rel)
|
||||
if rel == "." {
|
||||
return true
|
||||
}
|
||||
if rel == ".." {
|
||||
return false
|
||||
}
|
||||
prefix := ".." + string(os.PathSeparator)
|
||||
return !strings.HasPrefix(rel, prefix)
|
||||
}
|
||||
|
||||
if !allowOutsideClaudeDir {
|
||||
withinAllowed := false
|
||||
for _, dir := range allowedDirs {
|
||||
if isWithinDir(absPath, dir) {
|
||||
withinAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinAllowed {
|
||||
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
|
||||
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
|
||||
}
|
||||
|
||||
resolvedPath, errPath := filepath.EvalSymlinks(absPath)
|
||||
if errPath == nil {
|
||||
resolvedPath = filepath.Clean(resolvedPath)
|
||||
resolvedAllowed := make([]string, 0, len(allowedDirs))
|
||||
for _, dir := range allowedDirs {
|
||||
resolvedBase, errBase := filepath.EvalSymlinks(dir)
|
||||
if errBase != nil {
|
||||
continue
|
||||
}
|
||||
resolvedAllowed = append(resolvedAllowed, filepath.Clean(resolvedBase))
|
||||
}
|
||||
if len(resolvedAllowed) > 0 {
|
||||
withinResolved := false
|
||||
for _, dir := range resolvedAllowed {
|
||||
if isWithinDir(resolvedPath, dir) {
|
||||
withinResolved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinResolved {
|
||||
logWarn(fmt.Sprintf("Refusing to read prompt file outside allowed dirs (%s) (resolved): %s", strings.Join(resolvedAllowed, ", "), resolvedPath))
|
||||
return "", fmt.Errorf("prompt file must be under ~/.claude or ~/.codeagent/agents")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
withinAllowed := false
|
||||
for _, dir := range allowedDirs {
|
||||
if isWithinDir(absPath, dir) {
|
||||
withinAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinAllowed {
|
||||
logWarn(fmt.Sprintf("Reading prompt file outside allowed dirs (%s): %s", strings.Join(allowedDirs, ", "), absPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimRight(string(data), "\r\n"), nil
|
||||
}
|
||||
|
||||
func wrapTaskWithAgentPrompt(prompt string, task string) string {
|
||||
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
|
||||
}
|
||||
|
||||
func runCleanupHook() {
|
||||
if logger := activeLogger(); logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if cleanupHook != nil {
|
||||
cleanupHook()
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp() {
|
||||
name := currentWrapperName()
|
||||
help := fmt.Sprintf(`%[1]s - Go wrapper for AI CLI backends
|
||||
|
||||
Usage:
|
||||
%[1]s "task" [workdir]
|
||||
%[1]s --backend claude "task" [workdir]
|
||||
%[1]s --prompt-file /path/to/prompt.md "task" [workdir]
|
||||
%[1]s - [workdir] Read task from stdin
|
||||
%[1]s resume <session_id> "task" [workdir]
|
||||
%[1]s resume <session_id> - [workdir]
|
||||
%[1]s --parallel Run tasks in parallel (config from stdin)
|
||||
%[1]s --parallel --full-output Run tasks in parallel with full output (legacy)
|
||||
%[1]s --version
|
||||
%[1]s --help
|
||||
|
||||
Parallel mode examples:
|
||||
%[1]s --parallel < tasks.txt
|
||||
echo '...' | %[1]s --parallel
|
||||
%[1]s --parallel --full-output < tasks.txt
|
||||
%[1]s --parallel <<'EOF'
|
||||
|
||||
Environment Variables:
|
||||
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)
|
||||
CODEAGENT_ASCII_MODE Use ASCII symbols instead of Unicode (PASS/WARN/FAIL)
|
||||
|
||||
Exit Codes:
|
||||
0 Success
|
||||
1 General error (missing args, no output)
|
||||
124 Timeout
|
||||
127 backend command not found
|
||||
130 Interrupted (Ctrl+C)
|
||||
* Passthrough from backend process`, name)
|
||||
fmt.Println(help)
|
||||
}
|
||||
9
codeagent-wrapper/internal/app/backend.go
Normal file
9
codeagent-wrapper/internal/app/backend.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package wrapper
|
||||
|
||||
import backend "codeagent-wrapper/internal/backend"
|
||||
|
||||
type Backend = backend.Backend
|
||||
type CodexBackend = backend.CodexBackend
|
||||
type ClaudeBackend = backend.ClaudeBackend
|
||||
type GeminiBackend = backend.GeminiBackend
|
||||
type OpencodeBackend = backend.OpencodeBackend
|
||||
7
codeagent-wrapper/internal/app/backend_init.go
Normal file
7
codeagent-wrapper/internal/app/backend_init.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package wrapper
|
||||
|
||||
import backend "codeagent-wrapper/internal/backend"
|
||||
|
||||
func init() {
|
||||
backend.SetLogFuncs(logWarn, logError)
|
||||
}
|
||||
5
codeagent-wrapper/internal/app/backend_registry.go
Normal file
5
codeagent-wrapper/internal/app/backend_registry.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package wrapper
|
||||
|
||||
import backend "codeagent-wrapper/internal/backend"
|
||||
|
||||
func selectBackend(name string) (Backend, error) { return backend.Select(name) }
|
||||
116
codeagent-wrapper/internal/app/bench_test.go
Normal file
116
codeagent-wrapper/internal/app/bench_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
benchCmdSink any
|
||||
benchConfigSink *Config
|
||||
benchMessageSink string
|
||||
benchThreadIDSink string
|
||||
)
|
||||
|
||||
// BenchmarkStartup_NewRootCommand measures CLI startup overhead (command+flags construction).
|
||||
func BenchmarkStartup_NewRootCommand(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchCmdSink = newRootCommand()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkConfigParse_ParseArgs measures config parsing from argv/env (steady-state).
|
||||
func BenchmarkConfigParse_ParseArgs(b *testing.B) {
|
||||
home := b.TempDir()
|
||||
b.Setenv("HOME", home)
|
||||
b.Setenv("USERPROFILE", home)
|
||||
|
||||
configDir := filepath.Join(home, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||
"agents": {
|
||||
"develop": { "backend": "codex", "model": "gpt-test" }
|
||||
}
|
||||
}`), 0o644); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
config.ResetModelsConfigCacheForTest()
|
||||
b.Cleanup(config.ResetModelsConfigCacheForTest)
|
||||
|
||||
origArgs := os.Args
|
||||
os.Args = []string{"codeagent-wrapper", "--agent", "develop", "task"}
|
||||
b.Cleanup(func() { os.Args = origArgs })
|
||||
|
||||
if _, err := parseArgs(); err != nil {
|
||||
b.Fatalf("warmup parseArgs() error: %v", err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cfg, err := parseArgs()
|
||||
if err != nil {
|
||||
b.Fatalf("parseArgs() error: %v", err)
|
||||
}
|
||||
benchConfigSink = cfg
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkJSONParse_ParseJSONStreamInternal measures line-delimited JSON stream parsing.
|
||||
func BenchmarkJSONParse_ParseJSONStreamInternal(b *testing.B) {
|
||||
stream := []byte(
|
||||
`{"type":"thread.started","thread_id":"t"}` + "\n" +
|
||||
`{"type":"item.completed","item":{"type":"agent_message","text":"hello"}}` + "\n" +
|
||||
`{"type":"thread.completed","thread_id":"t"}` + "\n",
|
||||
)
|
||||
b.SetBytes(int64(len(stream)))
|
||||
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
message, threadID := parseJSONStreamInternal(bytes.NewReader(stream), nil, nil, nil, nil)
|
||||
benchMessageSink = message
|
||||
benchThreadIDSink = threadID
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkLoggerWrite 测试日志写入性能
|
||||
func BenchmarkLoggerWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
logger.Info("benchmark log message")
|
||||
}
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
|
||||
// BenchmarkLoggerConcurrentWrite 测试并发日志写入性能
|
||||
func BenchmarkLoggerConcurrentWrite(b *testing.B) {
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
logger.Info("concurrent benchmark log message")
|
||||
}
|
||||
})
|
||||
b.StopTimer()
|
||||
logger.Flush()
|
||||
}
|
||||
669
codeagent-wrapper/internal/app/cli.go
Normal file
669
codeagent-wrapper/internal/app/cli.go
Normal file
@@ -0,0 +1,669 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type exitError struct {
|
||||
code int
|
||||
}
|
||||
|
||||
func (e exitError) Error() string {
|
||||
return fmt.Sprintf("exit %d", e.code)
|
||||
}
|
||||
|
||||
type cliOptions struct {
|
||||
Backend string
|
||||
Model string
|
||||
ReasoningEffort string
|
||||
Agent string
|
||||
PromptFile string
|
||||
SkipPermissions bool
|
||||
|
||||
Parallel bool
|
||||
FullOutput bool
|
||||
|
||||
Cleanup bool
|
||||
Version bool
|
||||
ConfigFile string
|
||||
}
|
||||
|
||||
func Main() {
|
||||
Run()
|
||||
}
|
||||
|
||||
// Run is the program entrypoint for cmd/codeagent/main.go.
|
||||
func Run() {
|
||||
exitFn(run())
|
||||
}
|
||||
|
||||
func run() int {
|
||||
cmd := newRootCommand()
|
||||
cmd.SetArgs(os.Args[1:])
|
||||
if err := cmd.Execute(); err != nil {
|
||||
var ee exitError
|
||||
if errors.As(err, &ee) {
|
||||
return ee.code
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func newRootCommand() *cobra.Command {
|
||||
name := currentWrapperName()
|
||||
opts := &cliOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: fmt.Sprintf("%s [flags] <task>|resume <session_id> <task> [workdir]", name),
|
||||
Short: "Go wrapper for AI CLI backends",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
Args: cobra.ArbitraryArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if opts.Version {
|
||||
fmt.Printf("%s version %s\n", name, version)
|
||||
return nil
|
||||
}
|
||||
if opts.Cleanup {
|
||||
code := runCleanupMode()
|
||||
if code == 0 {
|
||||
return nil
|
||||
}
|
||||
return exitError{code: code}
|
||||
}
|
||||
|
||||
exitCode := runWithLoggerAndCleanup(func() int {
|
||||
v, err := config.NewViper(opts.ConfigFile)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
if opts.Parallel {
|
||||
return runParallelMode(cmd, args, opts, v, name)
|
||||
}
|
||||
|
||||
logInfo("Script started")
|
||||
|
||||
cfg, err := buildSingleConfig(cmd, args, os.Args[1:], opts, v)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
logInfo(fmt.Sprintf("Parsed args: mode=%s, task_len=%d, backend=%s", cfg.Mode, len(cfg.Task), cfg.Backend))
|
||||
return runSingleMode(cfg, name)
|
||||
})
|
||||
|
||||
if exitCode == 0 {
|
||||
return nil
|
||||
}
|
||||
return exitError{code: exitCode}
|
||||
},
|
||||
}
|
||||
cmd.CompletionOptions.DisableDefaultCmd = true
|
||||
|
||||
addRootFlags(cmd.Flags(), opts)
|
||||
cmd.AddCommand(newVersionCommand(name), newCleanupCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addRootFlags(fs *pflag.FlagSet, opts *cliOptions) {
|
||||
fs.StringVar(&opts.ConfigFile, "config", "", "Config file path (default: $HOME/.codeagent/config.*)")
|
||||
fs.BoolVarP(&opts.Version, "version", "v", false, "Print version and exit")
|
||||
fs.BoolVar(&opts.Cleanup, "cleanup", false, "Clean up old logs and exit")
|
||||
|
||||
fs.BoolVar(&opts.Parallel, "parallel", false, "Run tasks in parallel (config from stdin)")
|
||||
fs.BoolVar(&opts.FullOutput, "full-output", false, "Parallel mode: include full task output (legacy)")
|
||||
|
||||
fs.StringVar(&opts.Backend, "backend", defaultBackendName, "Backend to use (codex, claude, gemini, opencode)")
|
||||
fs.StringVar(&opts.Model, "model", "", "Model override")
|
||||
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
|
||||
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
|
||||
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
|
||||
|
||||
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
|
||||
fs.BoolVar(&opts.SkipPermissions, "dangerously-skip-permissions", false, "Alias for --skip-permissions")
|
||||
}
|
||||
|
||||
func newVersionCommand(name string) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print version and exit",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s version %s\n", name, version)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newCleanupCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "cleanup",
|
||||
Short: "Clean up old logs and exit",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
code := runCleanupMode()
|
||||
if code == 0 {
|
||||
return nil
|
||||
}
|
||||
return exitError{code: code}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runWithLoggerAndCleanup(fn func() int) (exitCode int) {
|
||||
ensureExecutableTempDir()
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
setLogger(logger)
|
||||
|
||||
defer func() {
|
||||
logger := activeLogger()
|
||||
if logger != nil {
|
||||
logger.Flush()
|
||||
}
|
||||
if err := closeLogger(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to close logger: %v\n", err)
|
||||
}
|
||||
if logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
if entries := logger.ExtractRecentErrors(10); len(entries) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "\n=== Recent Errors ===")
|
||||
for _, entry := range entries {
|
||||
fmt.Fprintln(os.Stderr, entry)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Log file: %s (deleted)\n", logger.Path())
|
||||
}
|
||||
}
|
||||
_ = logger.RemoveLogFile()
|
||||
}()
|
||||
defer runCleanupHook()
|
||||
|
||||
// Clean up stale logs from previous runs.
|
||||
scheduleStartupCleanup()
|
||||
|
||||
return fn()
|
||||
}
|
||||
|
||||
func parseArgs() (*Config, error) {
|
||||
opts := &cliOptions{}
|
||||
cmd := &cobra.Command{SilenceErrors: true, SilenceUsage: true, Args: cobra.ArbitraryArgs}
|
||||
addRootFlags(cmd.Flags(), opts)
|
||||
|
||||
rawArgv := os.Args[1:]
|
||||
if err := cmd.ParseFlags(rawArgv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args := cmd.Flags().Args()
|
||||
|
||||
v, err := config.NewViper(opts.ConfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buildSingleConfig(cmd, args, rawArgv, opts, v)
|
||||
}
|
||||
|
||||
func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts *cliOptions, v *viper.Viper) (*Config, error) {
|
||||
backendName := defaultBackendName
|
||||
model := ""
|
||||
reasoningEffort := ""
|
||||
agentName := ""
|
||||
promptFile := ""
|
||||
promptFileExplicit := false
|
||||
yolo := false
|
||||
|
||||
if cmd.Flags().Changed("agent") {
|
||||
agentName = strings.TrimSpace(opts.Agent)
|
||||
if agentName == "" {
|
||||
return nil, fmt.Errorf("--agent flag requires a value")
|
||||
}
|
||||
if err := config.ValidateAgentName(agentName); err != nil {
|
||||
return nil, fmt.Errorf("--agent flag invalid value: %w", err)
|
||||
}
|
||||
} else {
|
||||
agentName = strings.TrimSpace(v.GetString("agent"))
|
||||
if agentName != "" {
|
||||
if err := config.ValidateAgentName(agentName); err != nil {
|
||||
return nil, fmt.Errorf("--agent flag invalid value: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning string
|
||||
if agentName != "" {
|
||||
var resolvedYolo bool
|
||||
var err error
|
||||
resolvedBackend, resolvedModel, resolvedPromptFile, resolvedReasoning, _, _, resolvedYolo, err = config.ResolveAgentConfig(agentName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve agent %q: %w", agentName, err)
|
||||
}
|
||||
yolo = resolvedYolo
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("prompt-file") {
|
||||
promptFile = strings.TrimSpace(opts.PromptFile)
|
||||
if promptFile == "" {
|
||||
return nil, fmt.Errorf("--prompt-file flag requires a value")
|
||||
}
|
||||
promptFileExplicit = true
|
||||
} else if val := strings.TrimSpace(v.GetString("prompt-file")); val != "" {
|
||||
promptFile = val
|
||||
promptFileExplicit = true
|
||||
} else {
|
||||
promptFile = resolvedPromptFile
|
||||
}
|
||||
|
||||
agentFlagChanged := cmd.Flags().Changed("agent")
|
||||
backendFlagChanged := cmd.Flags().Changed("backend")
|
||||
if backendFlagChanged {
|
||||
backendName = strings.TrimSpace(opts.Backend)
|
||||
if backendName == "" {
|
||||
return nil, fmt.Errorf("--backend flag requires a value")
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case agentFlagChanged && backendFlagChanged && lastFlagIndex(rawArgv, "agent") > lastFlagIndex(rawArgv, "backend"):
|
||||
backendName = resolvedBackend
|
||||
case !backendFlagChanged && agentName != "":
|
||||
backendName = resolvedBackend
|
||||
case !backendFlagChanged:
|
||||
if val := strings.TrimSpace(v.GetString("backend")); val != "" {
|
||||
backendName = val
|
||||
}
|
||||
}
|
||||
|
||||
modelFlagChanged := cmd.Flags().Changed("model")
|
||||
if modelFlagChanged {
|
||||
model = strings.TrimSpace(opts.Model)
|
||||
if model == "" {
|
||||
return nil, fmt.Errorf("--model flag requires a value")
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case agentFlagChanged && modelFlagChanged && lastFlagIndex(rawArgv, "agent") > lastFlagIndex(rawArgv, "model"):
|
||||
model = strings.TrimSpace(resolvedModel)
|
||||
case !modelFlagChanged && agentName != "":
|
||||
model = strings.TrimSpace(resolvedModel)
|
||||
case !modelFlagChanged:
|
||||
model = strings.TrimSpace(v.GetString("model"))
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("reasoning-effort") {
|
||||
reasoningEffort = strings.TrimSpace(opts.ReasoningEffort)
|
||||
if reasoningEffort == "" {
|
||||
return nil, fmt.Errorf("--reasoning-effort flag requires a value")
|
||||
}
|
||||
} else if val := strings.TrimSpace(v.GetString("reasoning-effort")); val != "" {
|
||||
reasoningEffort = val
|
||||
} else if agentName != "" {
|
||||
reasoningEffort = strings.TrimSpace(resolvedReasoning)
|
||||
}
|
||||
|
||||
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
|
||||
skipPermissions := false
|
||||
if skipChanged {
|
||||
skipPermissions = opts.SkipPermissions
|
||||
} else {
|
||||
skipPermissions = v.GetBool("skip-permissions")
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return nil, fmt.Errorf("task required")
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
WorkDir: defaultWorkdir,
|
||||
Backend: backendName,
|
||||
Agent: agentName,
|
||||
PromptFile: promptFile,
|
||||
PromptFileExplicit: promptFileExplicit,
|
||||
SkipPermissions: skipPermissions,
|
||||
Yolo: yolo,
|
||||
Model: model,
|
||||
ReasoningEffort: reasoningEffort,
|
||||
MaxParallelWorkers: config.ResolveMaxParallelWorkers(),
|
||||
}
|
||||
|
||||
if args[0] == "resume" {
|
||||
if len(args) < 3 {
|
||||
return nil, fmt.Errorf("resume mode requires: resume <session_id> <task>")
|
||||
}
|
||||
cfg.Mode = "resume"
|
||||
cfg.SessionID = strings.TrimSpace(args[1])
|
||||
if cfg.SessionID == "" {
|
||||
return nil, fmt.Errorf("resume mode requires non-empty session_id")
|
||||
}
|
||||
cfg.Task = args[2]
|
||||
cfg.ExplicitStdin = (args[2] == "-")
|
||||
if len(args) > 3 {
|
||||
if args[3] == "-" {
|
||||
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
|
||||
}
|
||||
cfg.WorkDir = args[3]
|
||||
}
|
||||
} else {
|
||||
cfg.Mode = "new"
|
||||
cfg.Task = args[0]
|
||||
cfg.ExplicitStdin = (args[0] == "-")
|
||||
if len(args) > 1 {
|
||||
if args[1] == "-" {
|
||||
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
|
||||
}
|
||||
cfg.WorkDir = args[1]
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func lastFlagIndex(argv []string, name string) int {
|
||||
if len(argv) == 0 {
|
||||
return -1
|
||||
}
|
||||
name = strings.TrimSpace(name)
|
||||
if name == "" {
|
||||
return -1
|
||||
}
|
||||
|
||||
needle := "--" + name
|
||||
prefix := needle + "="
|
||||
last := -1
|
||||
for i, arg := range argv {
|
||||
if arg == needle || strings.HasPrefix(arg, prefix) {
|
||||
last = i
|
||||
}
|
||||
}
|
||||
return last
|
||||
}
|
||||
|
||||
func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *viper.Viper, name string) int {
|
||||
if len(args) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; no positional arguments are allowed.")
|
||||
fmt.Fprintln(os.Stderr, "Usage examples:")
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
|
||||
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel <<'EOF'\n", name)
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel --full-output <<'EOF' # include full task output\n", name)
|
||||
return 1
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
||||
return 1
|
||||
}
|
||||
|
||||
backendName := defaultBackendName
|
||||
if cmd.Flags().Changed("backend") {
|
||||
backendName = strings.TrimSpace(opts.Backend)
|
||||
if backendName == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --backend flag requires a value")
|
||||
return 1
|
||||
}
|
||||
} else if val := strings.TrimSpace(v.GetString("backend")); val != "" {
|
||||
backendName = val
|
||||
}
|
||||
|
||||
model := ""
|
||||
if cmd.Flags().Changed("model") {
|
||||
model = strings.TrimSpace(opts.Model)
|
||||
if model == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --model flag requires a value")
|
||||
return 1
|
||||
}
|
||||
} else {
|
||||
model = strings.TrimSpace(v.GetString("model"))
|
||||
}
|
||||
|
||||
fullOutput := opts.FullOutput
|
||||
if !cmd.Flags().Changed("full-output") && v.IsSet("full-output") {
|
||||
fullOutput = v.GetBool("full-output")
|
||||
}
|
||||
|
||||
skipChanged := cmd.Flags().Changed("skip-permissions") || cmd.Flags().Changed("dangerously-skip-permissions")
|
||||
skipPermissions := false
|
||||
if skipChanged {
|
||||
skipPermissions = opts.SkipPermissions
|
||||
} else {
|
||||
skipPermissions = v.GetBool("skip-permissions")
|
||||
}
|
||||
|
||||
backend, err := selectBackendFn(backendName)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
backendName = backend.Name()
|
||||
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: failed to read stdin: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
cfg, err := parseParallelConfig(data)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
cfg.GlobalBackend = backendName
|
||||
model = strings.TrimSpace(model)
|
||||
for i := range cfg.Tasks {
|
||||
if strings.TrimSpace(cfg.Tasks[i].Backend) == "" {
|
||||
cfg.Tasks[i].Backend = backendName
|
||||
}
|
||||
if strings.TrimSpace(cfg.Tasks[i].Model) == "" && model != "" {
|
||||
cfg.Tasks[i].Model = model
|
||||
}
|
||||
cfg.Tasks[i].SkipPermissions = cfg.Tasks[i].SkipPermissions || skipPermissions
|
||||
}
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
layers, err := topologicalSort(cfg.Tasks)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
results := executeConcurrent(layers, timeoutSec)
|
||||
|
||||
for i := range results {
|
||||
results[i].CoverageTarget = defaultCoverageTarget
|
||||
if results[i].Message == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
lines := strings.Split(results[i].Message, "\n")
|
||||
results[i].Coverage = extractCoverageFromLines(lines)
|
||||
results[i].CoverageNum = extractCoverageNum(results[i].Coverage)
|
||||
results[i].FilesChanged = extractFilesChangedFromLines(lines)
|
||||
results[i].TestsPassed, results[i].TestsFailed = extractTestResultsFromLines(lines)
|
||||
results[i].KeyOutput = extractKeyOutputFromLines(lines, 150)
|
||||
}
|
||||
|
||||
fmt.Println(generateFinalOutputWithMode(results, !fullOutput))
|
||||
|
||||
exitCode := 0
|
||||
for _, res := range results {
|
||||
if res.ExitCode != 0 {
|
||||
exitCode = res.ExitCode
|
||||
}
|
||||
}
|
||||
return exitCode
|
||||
}
|
||||
|
||||
func runSingleMode(cfg *Config, name string) int {
|
||||
backend, err := selectBackendFn(cfg.Backend)
|
||||
if err != nil {
|
||||
logError(err.Error())
|
||||
return 1
|
||||
}
|
||||
cfg.Backend = backend.Name()
|
||||
|
||||
cmdInjected := codexCommand != defaultCodexCommand
|
||||
argsInjected := buildCodexArgsFn != nil && reflect.ValueOf(buildCodexArgsFn).Pointer() != reflect.ValueOf(defaultBuildArgsFn).Pointer()
|
||||
|
||||
if backend.Name() != defaultBackendName || !cmdInjected {
|
||||
codexCommand = backend.Command()
|
||||
}
|
||||
if backend.Name() != defaultBackendName || !argsInjected {
|
||||
buildCodexArgsFn = backend.BuildArgs
|
||||
}
|
||||
logInfo(fmt.Sprintf("Selected backend: %s", backend.Name()))
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
logInfo(fmt.Sprintf("Timeout: %ds", timeoutSec))
|
||||
cfg.Timeout = timeoutSec
|
||||
|
||||
var taskText string
|
||||
var piped bool
|
||||
|
||||
if cfg.ExplicitStdin {
|
||||
logInfo("Explicit stdin mode: reading task from stdin")
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
logError("Failed to read stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
taskText = string(data)
|
||||
if taskText == "" {
|
||||
logError("Explicit stdin mode requires task input from stdin")
|
||||
return 1
|
||||
}
|
||||
piped = !isTerminal()
|
||||
} else {
|
||||
pipedTask, err := readPipedTask()
|
||||
if err != nil {
|
||||
logError("Failed to read piped stdin: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
piped = pipedTask != ""
|
||||
if piped {
|
||||
taskText = pipedTask
|
||||
} else {
|
||||
taskText = cfg.Task
|
||||
}
|
||||
}
|
||||
|
||||
if strings.TrimSpace(cfg.PromptFile) != "" {
|
||||
prompt, err := readAgentPromptFile(cfg.PromptFile, cfg.PromptFileExplicit)
|
||||
if err != nil {
|
||||
logError("Failed to read prompt file: " + err.Error())
|
||||
return 1
|
||||
}
|
||||
taskText = wrapTaskWithAgentPrompt(prompt, taskText)
|
||||
}
|
||||
|
||||
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
||||
|
||||
targetArg := taskText
|
||||
if useStdin {
|
||||
targetArg = "-"
|
||||
}
|
||||
codexArgs := buildCodexArgsFn(cfg, targetArg)
|
||||
|
||||
logger := activeLogger()
|
||||
if logger == nil {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: logger is not initialized")
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "[%s]\n", name)
|
||||
fmt.Fprintf(os.Stderr, " Backend: %s\n", cfg.Backend)
|
||||
fmt.Fprintf(os.Stderr, " Command: %s %s\n", codexCommand, strings.Join(codexArgs, " "))
|
||||
fmt.Fprintf(os.Stderr, " PID: %d\n", os.Getpid())
|
||||
fmt.Fprintf(os.Stderr, " Log: %s\n", logger.Path())
|
||||
|
||||
if useStdin {
|
||||
var reasons []string
|
||||
if piped {
|
||||
reasons = append(reasons, "piped input")
|
||||
}
|
||||
if cfg.ExplicitStdin {
|
||||
reasons = append(reasons, "explicit \"-\"")
|
||||
}
|
||||
if strings.Contains(taskText, "\n") {
|
||||
reasons = append(reasons, "newline")
|
||||
}
|
||||
if strings.Contains(taskText, "\\") {
|
||||
reasons = append(reasons, "backslash")
|
||||
}
|
||||
if strings.Contains(taskText, "\"") {
|
||||
reasons = append(reasons, "double-quote")
|
||||
}
|
||||
if strings.Contains(taskText, "'") {
|
||||
reasons = append(reasons, "single-quote")
|
||||
}
|
||||
if strings.Contains(taskText, "`") {
|
||||
reasons = append(reasons, "backtick")
|
||||
}
|
||||
if strings.Contains(taskText, "$") {
|
||||
reasons = append(reasons, "dollar")
|
||||
}
|
||||
if len(taskText) > 800 {
|
||||
reasons = append(reasons, "length>800")
|
||||
}
|
||||
if len(reasons) > 0 {
|
||||
logWarn(fmt.Sprintf("Using stdin mode for task due to: %s", strings.Join(reasons, ", ")))
|
||||
}
|
||||
}
|
||||
|
||||
logInfo(fmt.Sprintf("%s running...", cfg.Backend))
|
||||
|
||||
taskSpec := TaskSpec{
|
||||
Task: taskText,
|
||||
WorkDir: cfg.WorkDir,
|
||||
Mode: cfg.Mode,
|
||||
SessionID: cfg.SessionID,
|
||||
Backend: cfg.Backend,
|
||||
Model: cfg.Model,
|
||||
ReasoningEffort: cfg.ReasoningEffort,
|
||||
Agent: cfg.Agent,
|
||||
SkipPermissions: cfg.SkipPermissions,
|
||||
UseStdin: useStdin,
|
||||
}
|
||||
|
||||
result := runTaskFn(taskSpec, false, cfg.Timeout)
|
||||
|
||||
if result.ExitCode != 0 {
|
||||
return result.ExitCode
|
||||
}
|
||||
|
||||
// Validate that we got a meaningful output message
|
||||
if strings.TrimSpace(result.Message) == "" {
|
||||
logError(fmt.Sprintf("no output message: backend=%s returned empty result.Message with exit_code=0", cfg.Backend))
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Println(result.Message)
|
||||
if result.SessionID != "" {
|
||||
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
445
codeagent-wrapper/internal/app/concurrent_stress_test.go
Normal file
445
codeagent-wrapper/internal/app/concurrent_stress_test.go
Normal file
@@ -0,0 +1,445 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
func stripTimestampPrefix(line string) string {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "{") {
|
||||
var evt struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &evt); err == nil && evt.Message != "" {
|
||||
return evt.Message
|
||||
}
|
||||
}
|
||||
if !strings.HasPrefix(line, "[") {
|
||||
return line
|
||||
}
|
||||
if idx := strings.Index(line, "] "); idx >= 0 {
|
||||
return line[idx+2:]
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
// TestConcurrentStressLogger 高并发压力测试
|
||||
func TestConcurrentStressLogger(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping stress test in short mode")
|
||||
}
|
||||
|
||||
logger, err := NewLoggerWithSuffix("stress")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
t.Logf("Log file: %s", logger.Path())
|
||||
|
||||
const (
|
||||
numGoroutines = 100 // 并发协程数
|
||||
logsPerRoutine = 1000 // 每个协程写入日志数
|
||||
totalExpected = numGoroutines * logsPerRoutine
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
|
||||
// 启动并发写入
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < logsPerRoutine; j++ {
|
||||
logger.Info(fmt.Sprintf("goroutine-%d-msg-%d", id, j))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Flush()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// 读取日志文件验证
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log file: %v", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
actualCount := len(lines)
|
||||
|
||||
t.Logf("Concurrent stress test results:")
|
||||
t.Logf(" Goroutines: %d", numGoroutines)
|
||||
t.Logf(" Logs per goroutine: %d", logsPerRoutine)
|
||||
t.Logf(" Total expected: %d", totalExpected)
|
||||
t.Logf(" Total actual: %d", actualCount)
|
||||
t.Logf(" Duration: %v", elapsed)
|
||||
t.Logf(" Throughput: %.2f logs/sec", float64(totalExpected)/elapsed.Seconds())
|
||||
|
||||
// 验证日志数量
|
||||
if actualCount < totalExpected/10 {
|
||||
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)",
|
||||
actualCount, totalExpected/10, totalExpected)
|
||||
}
|
||||
t.Logf("Successfully wrote %d/%d logs (%.1f%%)",
|
||||
actualCount, totalExpected, float64(actualCount)/float64(totalExpected)*100)
|
||||
|
||||
// 验证日志格式(纯文本,无前缀)
|
||||
formatRE := regexp.MustCompile(`^goroutine-\d+-msg-\d+$`)
|
||||
for i, line := range lines[:min(10, len(lines))] {
|
||||
msg := stripTimestampPrefix(line)
|
||||
if !formatRE.MatchString(msg) {
|
||||
t.Errorf("line %d has invalid format: %s", i, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestConcurrentBurstLogger 突发流量测试
|
||||
func TestConcurrentBurstLogger(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping burst test in short mode")
|
||||
}
|
||||
|
||||
logger, err := NewLoggerWithSuffix("burst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
t.Logf("Log file: %s", logger.Path())
|
||||
|
||||
const (
|
||||
numBursts = 10
|
||||
goroutinesPerBurst = 50
|
||||
logsPerGoroutine = 100
|
||||
)
|
||||
|
||||
totalLogs := 0
|
||||
start := time.Now()
|
||||
|
||||
// 模拟突发流量
|
||||
for burst := 0; burst < numBursts; burst++ {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < goroutinesPerBurst; i++ {
|
||||
wg.Add(1)
|
||||
totalLogs += logsPerGoroutine
|
||||
go func(b, g int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < logsPerGoroutine; j++ {
|
||||
logger.Info(fmt.Sprintf("burst-%d-goroutine-%d-msg-%d", b, g, j))
|
||||
}
|
||||
}(burst, i)
|
||||
}
|
||||
wg.Wait()
|
||||
time.Sleep(10 * time.Millisecond) // 突发间隔
|
||||
}
|
||||
|
||||
logger.Flush()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// 验证
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log file: %v", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
actualCount := len(lines)
|
||||
|
||||
t.Logf("Burst test results:")
|
||||
t.Logf(" Total bursts: %d", numBursts)
|
||||
t.Logf(" Goroutines per burst: %d", goroutinesPerBurst)
|
||||
t.Logf(" Expected logs: %d", totalLogs)
|
||||
t.Logf(" Actual logs: %d", actualCount)
|
||||
t.Logf(" Duration: %v", elapsed)
|
||||
t.Logf(" Throughput: %.2f logs/sec", float64(totalLogs)/elapsed.Seconds())
|
||||
|
||||
if actualCount < totalLogs/10 {
|
||||
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)", actualCount, totalLogs/10, totalLogs)
|
||||
}
|
||||
t.Logf("Successfully wrote %d/%d logs (%.1f%%)",
|
||||
actualCount, totalLogs, float64(actualCount)/float64(totalLogs)*100)
|
||||
}
|
||||
|
||||
// TestLoggerChannelCapacity 测试 channel 容量极限
|
||||
func TestLoggerChannelCapacity(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("capacity")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const rapidLogs = 2000 // 超过 channel 容量 (1000)
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < rapidLogs; i++ {
|
||||
logger.Info(fmt.Sprintf("rapid-log-%d", i))
|
||||
}
|
||||
sendDuration := time.Since(start)
|
||||
|
||||
logger.Flush()
|
||||
flushDuration := time.Since(start) - sendDuration
|
||||
|
||||
t.Logf("Channel capacity test:")
|
||||
t.Logf(" Logs sent: %d", rapidLogs)
|
||||
t.Logf(" Send duration: %v", sendDuration)
|
||||
t.Logf(" Flush duration: %v", flushDuration)
|
||||
|
||||
// 验证仍有合理比例的日志写入(非阻塞模式允许部分丢失)
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
actualCount := len(lines)
|
||||
|
||||
if actualCount < rapidLogs/10 {
|
||||
t.Errorf("too many logs lost: got %d, want at least %d (10%% of %d)", actualCount, rapidLogs/10, rapidLogs)
|
||||
}
|
||||
t.Logf("Logs persisted: %d/%d (%.1f%%)", actualCount, rapidLogs, float64(actualCount)/float64(rapidLogs)*100)
|
||||
}
|
||||
|
||||
// TestLoggerMemoryUsage 内存使用测试
|
||||
func TestLoggerMemoryUsage(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("memory")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const numLogs = 20000
|
||||
longMessage := strings.Repeat("x", 500) // 500 字节长消息
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < numLogs; i++ {
|
||||
logger.Info(fmt.Sprintf("log-%d-%s", i, longMessage))
|
||||
}
|
||||
logger.Flush()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// 检查文件大小
|
||||
info, err := os.Stat(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedTotalSize := int64(numLogs * 500) // 理论最小总字节数
|
||||
expectedMinSize := expectedTotalSize / 10 // 接受最多 90% 丢失
|
||||
actualSize := info.Size()
|
||||
|
||||
t.Logf("Memory/disk usage test:")
|
||||
t.Logf(" Logs written: %d", numLogs)
|
||||
t.Logf(" Message size: 500 bytes")
|
||||
t.Logf(" File size: %.2f MB", float64(actualSize)/1024/1024)
|
||||
t.Logf(" Duration: %v", elapsed)
|
||||
t.Logf(" Write speed: %.2f MB/s", float64(actualSize)/1024/1024/elapsed.Seconds())
|
||||
t.Logf(" Persistence ratio: %.1f%%", float64(actualSize)/float64(expectedTotalSize)*100)
|
||||
|
||||
if actualSize < expectedMinSize {
|
||||
t.Errorf("file size too small: got %d bytes, expected at least %d", actualSize, expectedMinSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoggerFlushTimeout 测试 Flush 超时机制
|
||||
func TestLoggerFlushTimeout(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("flush")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
// 写入一些日志
|
||||
for i := 0; i < 100; i++ {
|
||||
logger.Info(fmt.Sprintf("test-log-%d", i))
|
||||
}
|
||||
|
||||
// 测试 Flush 应该在合理时间内完成
|
||||
start := time.Now()
|
||||
logger.Flush()
|
||||
duration := time.Since(start)
|
||||
|
||||
t.Logf("Flush duration: %v", duration)
|
||||
|
||||
if duration > 6*time.Second {
|
||||
t.Errorf("Flush took too long: %v (expected < 6s)", duration)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoggerOrderPreservation 测试日志顺序保持
|
||||
func TestLoggerOrderPreservation(t *testing.T) {
|
||||
logger, err := NewLoggerWithSuffix("order")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logger.Close()
|
||||
|
||||
const numGoroutines = 10
|
||||
const logsPerRoutine = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < logsPerRoutine; j++ {
|
||||
logger.Info(fmt.Sprintf("G%d-SEQ%04d", id, j))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Flush()
|
||||
|
||||
// 读取并验证每个 goroutine 的日志顺序
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(data)))
|
||||
sequences := make(map[int][]int) // goroutine ID -> sequence numbers
|
||||
|
||||
for scanner.Scan() {
|
||||
line := stripTimestampPrefix(scanner.Text())
|
||||
var gid, seq int
|
||||
// Parse format: G0-SEQ0001 (without INFO: prefix)
|
||||
_, err := fmt.Sscanf(line, "G%d-SEQ%04d", &gid, &seq)
|
||||
if err != nil {
|
||||
t.Errorf("invalid log format: %s (error: %v)", line, err)
|
||||
continue
|
||||
}
|
||||
sequences[gid] = append(sequences[gid], seq)
|
||||
}
|
||||
|
||||
// 验证每个 goroutine 内部顺序
|
||||
for gid, seqs := range sequences {
|
||||
for i := 0; i < len(seqs)-1; i++ {
|
||||
if seqs[i] >= seqs[i+1] {
|
||||
t.Errorf("Goroutine %d: out of order at index %d: %d >= %d",
|
||||
gid, i, seqs[i], seqs[i+1])
|
||||
}
|
||||
}
|
||||
if len(seqs) != logsPerRoutine {
|
||||
t.Errorf("Goroutine %d: missing logs, got %d, want %d",
|
||||
gid, len(seqs), logsPerRoutine)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Order preservation test: all %d goroutines maintained sequence order", len(sequences))
|
||||
}
|
||||
|
||||
func TestConcurrentWorkerPoolLimit(t *testing.T) {
|
||||
orig := runCodexTaskFn
|
||||
defer func() { runCodexTaskFn = orig }()
|
||||
|
||||
logger, err := NewLoggerWithSuffix("pool-limit")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setLogger(logger)
|
||||
t.Cleanup(func() {
|
||||
_ = closeLogger()
|
||||
_ = logger.RemoveLogFile()
|
||||
})
|
||||
|
||||
var active int64
|
||||
var maxSeen int64
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
if task.Context == nil {
|
||||
t.Fatalf("context not propagated for task %s", task.ID)
|
||||
}
|
||||
cur := atomic.AddInt64(&active, 1)
|
||||
for {
|
||||
prev := atomic.LoadInt64(&maxSeen)
|
||||
if cur <= prev || atomic.CompareAndSwapInt64(&maxSeen, prev, cur) {
|
||||
break
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-task.Context.Done():
|
||||
atomic.AddInt64(&active, -1)
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 130, Error: "context cancelled"}
|
||||
case <-time.After(30 * time.Millisecond):
|
||||
}
|
||||
atomic.AddInt64(&active, -1)
|
||||
return TaskResult{TaskID: task.ID}
|
||||
}
|
||||
|
||||
layers := [][]TaskSpec{{{ID: "t1"}, {ID: "t2"}, {ID: "t3"}, {ID: "t4"}, {ID: "t5"}}}
|
||||
results := executeConcurrentWithContext(context.Background(), layers, 5, 2)
|
||||
|
||||
if len(results) != 5 {
|
||||
t.Fatalf("unexpected result count: got %d", len(results))
|
||||
}
|
||||
if maxSeen > 2 {
|
||||
t.Fatalf("worker pool exceeded limit: saw %d active workers", maxSeen)
|
||||
}
|
||||
|
||||
logger.Flush()
|
||||
data, err := os.ReadFile(logger.Path())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log file: %v", err)
|
||||
}
|
||||
content := string(data)
|
||||
if !strings.Contains(content, "worker_limit=2") {
|
||||
t.Fatalf("concurrency planning log missing, content: %s", content)
|
||||
}
|
||||
if !strings.Contains(content, "parallel: start") {
|
||||
t.Fatalf("concurrency start logs missing, content: %s", content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentCancellationPropagation(t *testing.T) {
|
||||
orig := runCodexTaskFn
|
||||
defer func() { runCodexTaskFn = orig }()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
if task.Context == nil {
|
||||
t.Fatalf("context not propagated for task %s", task.ID)
|
||||
}
|
||||
select {
|
||||
case <-task.Context.Done():
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 130, Error: "context cancelled"}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
return TaskResult{TaskID: task.ID}
|
||||
}
|
||||
}
|
||||
|
||||
layers := [][]TaskSpec{{{ID: "a"}, {ID: "b"}, {ID: "c"}}}
|
||||
go func() {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
results := executeConcurrentWithContext(ctx, layers, 1, 2)
|
||||
if len(results) != 3 {
|
||||
t.Fatalf("unexpected result count: got %d", len(results))
|
||||
}
|
||||
|
||||
cancelled := 0
|
||||
for _, res := range results {
|
||||
if res.ExitCode != 0 {
|
||||
cancelled++
|
||||
}
|
||||
}
|
||||
|
||||
if cancelled == 0 {
|
||||
t.Fatalf("expected cancellation to propagate, got results: %+v", results)
|
||||
}
|
||||
}
|
||||
7
codeagent-wrapper/internal/app/config_alias.go
Normal file
7
codeagent-wrapper/internal/app/config_alias.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package wrapper
|
||||
|
||||
import config "codeagent-wrapper/internal/config"
|
||||
|
||||
// Keep the existing Config name throughout the codebase, but source the
|
||||
// implementation from internal/config.
|
||||
type Config = config.Config
|
||||
54
codeagent-wrapper/internal/app/executor_alias.go
Normal file
54
codeagent-wrapper/internal/app/executor_alias.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
backend "codeagent-wrapper/internal/backend"
|
||||
config "codeagent-wrapper/internal/config"
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
// defaultRunCodexTaskFn is the default implementation of runCodexTaskFn (exposed for test reset).
|
||||
func defaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
||||
return executor.DefaultRunCodexTaskFn(task, timeout)
|
||||
}
|
||||
|
||||
var runCodexTaskFn = defaultRunCodexTaskFn
|
||||
|
||||
func topologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
|
||||
return executor.TopologicalSort(tasks)
|
||||
}
|
||||
|
||||
func executeConcurrent(layers [][]TaskSpec, timeout int) []TaskResult {
|
||||
maxWorkers := config.ResolveMaxParallelWorkers()
|
||||
return executeConcurrentWithContext(context.Background(), layers, timeout, maxWorkers)
|
||||
}
|
||||
|
||||
func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec, timeout int, maxWorkers int) []TaskResult {
|
||||
return executor.ExecuteConcurrentWithContext(parentCtx, layers, timeout, maxWorkers, runCodexTaskFn)
|
||||
}
|
||||
|
||||
func generateFinalOutput(results []TaskResult) string {
|
||||
return executor.GenerateFinalOutput(results)
|
||||
}
|
||||
|
||||
func generateFinalOutputWithMode(results []TaskResult, summaryOnly bool) string {
|
||||
return executor.GenerateFinalOutputWithMode(results, summaryOnly)
|
||||
}
|
||||
|
||||
func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||
return backend.BuildCodexArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
|
||||
return runCodexTaskWithContext(context.Background(), taskSpec, nil, nil, false, silent, timeoutSec)
|
||||
}
|
||||
|
||||
func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText string, useStdin bool, timeoutSec int) (message, threadID string, exitCode int) {
|
||||
res := runCodexTaskWithContext(parentCtx, TaskSpec{Task: taskText, WorkDir: defaultWorkdir, Mode: "new", UseStdin: useStdin}, nil, codexArgs, true, false, timeoutSec)
|
||||
return res.Message, res.SessionID, res.ExitCode
|
||||
}
|
||||
|
||||
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||
return executor.RunCodexTaskWithContext(parentCtx, taskSpec, backend, codexCommand, buildCodexArgsFn, customArgs, useCustomArgs, silent, timeoutSec)
|
||||
}
|
||||
1132
codeagent-wrapper/internal/app/executor_concurrent_test.go
Normal file
1132
codeagent-wrapper/internal/app/executor_concurrent_test.go
Normal file
File diff suppressed because it is too large
Load Diff
26
codeagent-wrapper/internal/app/logger.go
Normal file
26
codeagent-wrapper/internal/app/logger.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package wrapper
|
||||
|
||||
import ilogger "codeagent-wrapper/internal/logger"
|
||||
|
||||
type Logger = ilogger.Logger
|
||||
type CleanupStats = ilogger.CleanupStats
|
||||
|
||||
func NewLogger() (*Logger, error) { return ilogger.NewLogger() }
|
||||
|
||||
func NewLoggerWithSuffix(suffix string) (*Logger, error) { return ilogger.NewLoggerWithSuffix(suffix) }
|
||||
|
||||
func setLogger(l *Logger) { ilogger.SetLogger(l) }
|
||||
|
||||
func closeLogger() error { return ilogger.CloseLogger() }
|
||||
|
||||
func activeLogger() *Logger { return ilogger.ActiveLogger() }
|
||||
|
||||
func logInfo(msg string) { ilogger.LogInfo(msg) }
|
||||
|
||||
func logWarn(msg string) { ilogger.LogWarn(msg) }
|
||||
|
||||
func logError(msg string) { ilogger.LogError(msg) }
|
||||
|
||||
func cleanupOldLogs() (CleanupStats, error) { return ilogger.CleanupOldLogs() }
|
||||
|
||||
func sanitizeLogSuffix(raw string) string { return ilogger.SanitizeLogSuffix(raw) }
|
||||
950
codeagent-wrapper/internal/app/main_integration_test.go
Normal file
950
codeagent-wrapper/internal/app/main_integration_test.go
Normal file
@@ -0,0 +1,950 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"codeagent-wrapper/internal/logger"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type integrationSummary struct {
|
||||
Total int `json:"total"`
|
||||
Success int `json:"success"`
|
||||
Failed int `json:"failed"`
|
||||
}
|
||||
|
||||
type integrationOutput struct {
|
||||
Results []TaskResult `json:"results"`
|
||||
Summary integrationSummary `json:"summary"`
|
||||
}
|
||||
|
||||
func captureStdout(t *testing.T, fn func()) string {
|
||||
t.Helper()
|
||||
old := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
fn()
|
||||
|
||||
w.Close()
|
||||
os.Stdout = old
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("io.Copy() error = %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
t.Helper()
|
||||
var payload integrationOutput
|
||||
|
||||
lines := strings.Split(out, "\n")
|
||||
var currentTask *TaskResult
|
||||
inTaskResults := false
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Parse new format header: "X tasks | Y passed | Z failed"
|
||||
if strings.Contains(line, "tasks |") && strings.Contains(line, "passed |") {
|
||||
parts := strings.Split(line, "|")
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if strings.HasSuffix(p, "tasks") {
|
||||
if _, err := fmt.Sscanf(p, "%d tasks", &payload.Summary.Total); err != nil {
|
||||
t.Fatalf("failed to parse total tasks from %q: %v", p, err)
|
||||
}
|
||||
} else if strings.HasSuffix(p, "passed") {
|
||||
if _, err := fmt.Sscanf(p, "%d passed", &payload.Summary.Success); err != nil {
|
||||
t.Fatalf("failed to parse passed tasks from %q: %v", p, err)
|
||||
}
|
||||
} else if strings.HasSuffix(p, "failed") {
|
||||
if _, err := fmt.Sscanf(p, "%d failed", &payload.Summary.Failed); err != nil {
|
||||
t.Fatalf("failed to parse failed tasks from %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if strings.HasPrefix(line, "Total:") {
|
||||
// Legacy format: "Total: X | Success: Y | Failed: Z"
|
||||
parts := strings.Split(line, "|")
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if strings.HasPrefix(p, "Total:") {
|
||||
if _, err := fmt.Sscanf(p, "Total: %d", &payload.Summary.Total); err != nil {
|
||||
t.Fatalf("failed to parse total tasks from %q: %v", p, err)
|
||||
}
|
||||
} else if strings.HasPrefix(p, "Success:") {
|
||||
if _, err := fmt.Sscanf(p, "Success: %d", &payload.Summary.Success); err != nil {
|
||||
t.Fatalf("failed to parse passed tasks from %q: %v", p, err)
|
||||
}
|
||||
} else if strings.HasPrefix(p, "Failed:") {
|
||||
if _, err := fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed); err != nil {
|
||||
t.Fatalf("failed to parse failed tasks from %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if line == "## Task Results" {
|
||||
inTaskResults = true
|
||||
} else if line == "## Summary" {
|
||||
// End of task results section
|
||||
if currentTask != nil {
|
||||
payload.Results = append(payload.Results, *currentTask)
|
||||
currentTask = nil
|
||||
}
|
||||
inTaskResults = false
|
||||
} else if inTaskResults && strings.HasPrefix(line, "### ") {
|
||||
// New task: ### task-id ✓ 92% or ### task-id PASS 92% (ASCII mode)
|
||||
if currentTask != nil {
|
||||
payload.Results = append(payload.Results, *currentTask)
|
||||
}
|
||||
currentTask = &TaskResult{}
|
||||
|
||||
taskLine := strings.TrimPrefix(line, "### ")
|
||||
parseMarker := func(marker string, exitCode int) bool {
|
||||
needle := " " + marker
|
||||
if !strings.Contains(taskLine, needle) {
|
||||
return false
|
||||
}
|
||||
parts := strings.Split(taskLine, needle)
|
||||
currentTask.TaskID = strings.TrimSpace(parts[0])
|
||||
currentTask.ExitCode = exitCode
|
||||
if exitCode == 0 && len(parts) > 1 {
|
||||
coveragePart := strings.TrimSpace(parts[1])
|
||||
if strings.HasSuffix(coveragePart, "%") {
|
||||
currentTask.Coverage = coveragePart
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case parseMarker("✓", 0), parseMarker("PASS", 0):
|
||||
// ok
|
||||
case parseMarker("⚠️", 0), parseMarker("WARN", 0):
|
||||
// warning
|
||||
case parseMarker("✗", 1), parseMarker("FAIL", 1):
|
||||
// fail
|
||||
default:
|
||||
currentTask.TaskID = taskLine
|
||||
}
|
||||
} else if currentTask != nil && inTaskResults {
|
||||
// Parse task details
|
||||
if strings.HasPrefix(line, "Exit code:") {
|
||||
if _, err := fmt.Sscanf(line, "Exit code: %d", ¤tTask.ExitCode); err != nil {
|
||||
t.Fatalf("failed to parse exit code from %q: %v", line, err)
|
||||
}
|
||||
} else if strings.HasPrefix(line, "Error:") {
|
||||
currentTask.Error = strings.TrimPrefix(line, "Error: ")
|
||||
} else if strings.HasPrefix(line, "Log:") {
|
||||
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
|
||||
} else if strings.HasPrefix(line, "Did:") {
|
||||
currentTask.KeyOutput = strings.TrimSpace(strings.TrimPrefix(line, "Did:"))
|
||||
} else if strings.HasPrefix(line, "Detail:") {
|
||||
// Error detail for failed tasks
|
||||
if currentTask.Message == "" {
|
||||
currentTask.Message = strings.TrimSpace(strings.TrimPrefix(line, "Detail:"))
|
||||
}
|
||||
}
|
||||
} else if strings.HasPrefix(line, "--- Task:") {
|
||||
// Legacy full output format
|
||||
if currentTask != nil {
|
||||
payload.Results = append(payload.Results, *currentTask)
|
||||
}
|
||||
currentTask = &TaskResult{}
|
||||
currentTask.TaskID = strings.TrimSuffix(strings.TrimPrefix(line, "--- Task: "), " ---")
|
||||
} else if currentTask != nil && !inTaskResults {
|
||||
// Legacy format parsing
|
||||
if strings.HasPrefix(line, "Status: SUCCESS") {
|
||||
currentTask.ExitCode = 0
|
||||
} else if strings.HasPrefix(line, "Status: FAILED") {
|
||||
if strings.Contains(line, "exit code") {
|
||||
if _, err := fmt.Sscanf(line, "Status: FAILED (exit code %d)", ¤tTask.ExitCode); err != nil {
|
||||
t.Fatalf("failed to parse exit code from %q: %v", line, err)
|
||||
}
|
||||
} else {
|
||||
currentTask.ExitCode = 1
|
||||
}
|
||||
} else if strings.HasPrefix(line, "Error:") {
|
||||
currentTask.Error = strings.TrimPrefix(line, "Error: ")
|
||||
} else if strings.HasPrefix(line, "Session:") {
|
||||
currentTask.SessionID = strings.TrimPrefix(line, "Session: ")
|
||||
} else if strings.HasPrefix(line, "Log:") {
|
||||
currentTask.LogPath = strings.TrimSpace(strings.TrimPrefix(line, "Log:"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle last task
|
||||
if currentTask != nil {
|
||||
payload.Results = append(payload.Results, *currentTask)
|
||||
}
|
||||
|
||||
return payload
|
||||
}
|
||||
|
||||
func findResultByID(t *testing.T, payload integrationOutput, id string) TaskResult {
|
||||
t.Helper()
|
||||
for _, res := range payload.Results {
|
||||
if res.TaskID == id {
|
||||
return res
|
||||
}
|
||||
}
|
||||
t.Fatalf("result for task %s not found", id)
|
||||
return TaskResult{}
|
||||
}
|
||||
|
||||
func setTempDirEnv(t *testing.T, dir string) string {
|
||||
t.Helper()
|
||||
resolved := dir
|
||||
if eval, err := filepath.EvalSymlinks(dir); err == nil {
|
||||
resolved = eval
|
||||
}
|
||||
t.Setenv("TMPDIR", resolved)
|
||||
t.Setenv("TEMP", resolved)
|
||||
t.Setenv("TMP", resolved)
|
||||
return resolved
|
||||
}
|
||||
|
||||
func createTempLog(t *testing.T, dir, name string) string {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.WriteFile(path, []byte("test"), 0o644); err != nil {
|
||||
t.Fatalf("failed to create temp log %s: %v", path, err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func stubProcessRunning(t *testing.T, fn func(int) bool) {
|
||||
t.Helper()
|
||||
t.Cleanup(logger.SetProcessRunningCheck(fn))
|
||||
}
|
||||
|
||||
func stubProcessStartTime(t *testing.T, fn func(int) time.Time) {
|
||||
t.Helper()
|
||||
t.Cleanup(logger.SetProcessStartTimeFn(fn))
|
||||
}
|
||||
|
||||
func TestRunParallelEndToEnd_OrderAndConcurrency(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
})
|
||||
|
||||
input := `---TASK---
|
||||
id: A
|
||||
---CONTENT---
|
||||
task-a
|
||||
---TASK---
|
||||
id: B
|
||||
dependencies: A
|
||||
---CONTENT---
|
||||
task-b
|
||||
---TASK---
|
||||
id: C
|
||||
dependencies: B
|
||||
---CONTENT---
|
||||
task-c
|
||||
---TASK---
|
||||
id: D
|
||||
---CONTENT---
|
||||
task-d
|
||||
---TASK---
|
||||
id: E
|
||||
---CONTENT---
|
||||
task-e`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
|
||||
var mu sync.Mutex
|
||||
starts := make(map[string]time.Time)
|
||||
ends := make(map[string]time.Time)
|
||||
var running int64
|
||||
var maxParallel int64
|
||||
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
start := time.Now()
|
||||
mu.Lock()
|
||||
starts[task.ID] = start
|
||||
mu.Unlock()
|
||||
|
||||
cur := atomic.AddInt64(&running, 1)
|
||||
for {
|
||||
prev := atomic.LoadInt64(&maxParallel)
|
||||
if cur <= prev {
|
||||
break
|
||||
}
|
||||
if atomic.CompareAndSwapInt64(&maxParallel, prev, cur) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(40 * time.Millisecond)
|
||||
|
||||
mu.Lock()
|
||||
ends[task.ID] = time.Now()
|
||||
mu.Unlock()
|
||||
|
||||
atomic.AddInt64(&running, -1)
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task}
|
||||
}
|
||||
|
||||
var exitCode int
|
||||
output := captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
if exitCode != 0 {
|
||||
t.Fatalf("run() exit = %d, want 0", exitCode)
|
||||
}
|
||||
|
||||
payload := parseIntegrationOutput(t, output)
|
||||
if payload.Summary.Failed != 0 || payload.Summary.Total != 5 || payload.Summary.Success != 5 {
|
||||
t.Fatalf("unexpected summary: %+v", payload.Summary)
|
||||
}
|
||||
|
||||
aEnd := ends["A"]
|
||||
bStart := starts["B"]
|
||||
cStart := starts["C"]
|
||||
bEnd := ends["B"]
|
||||
if aEnd.IsZero() || bStart.IsZero() || bEnd.IsZero() || cStart.IsZero() {
|
||||
t.Fatalf("missing timestamps, starts=%v ends=%v", starts, ends)
|
||||
}
|
||||
if !aEnd.Before(bStart) && !aEnd.Equal(bStart) {
|
||||
t.Fatalf("B should start after A ends: A_end=%v B_start=%v", aEnd, bStart)
|
||||
}
|
||||
if !bEnd.Before(cStart) && !bEnd.Equal(cStart) {
|
||||
t.Fatalf("C should start after B ends: B_end=%v C_start=%v", bEnd, cStart)
|
||||
}
|
||||
|
||||
dStart := starts["D"]
|
||||
eStart := starts["E"]
|
||||
if dStart.IsZero() || eStart.IsZero() {
|
||||
t.Fatalf("missing D/E start times: %v", starts)
|
||||
}
|
||||
delta := dStart.Sub(eStart)
|
||||
if delta < 0 {
|
||||
delta = -delta
|
||||
}
|
||||
if delta > 25*time.Millisecond {
|
||||
t.Fatalf("D and E should run in parallel, delta=%v", delta)
|
||||
}
|
||||
if maxParallel < 2 {
|
||||
t.Fatalf("expected at least 2 concurrent tasks, got %d", maxParallel)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParallelCycleDetectionStopsExecution(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
t.Fatalf("task %s should not execute on cycle", task.ID)
|
||||
return TaskResult{}
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
})
|
||||
|
||||
input := `---TASK---
|
||||
id: A
|
||||
dependencies: B
|
||||
---CONTENT---
|
||||
a
|
||||
---TASK---
|
||||
id: B
|
||||
dependencies: A
|
||||
---CONTENT---
|
||||
b`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
|
||||
exitCode := 0
|
||||
output := captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
if exitCode == 0 {
|
||||
t.Fatalf("cycle should cause non-zero exit, got %d", exitCode)
|
||||
}
|
||||
if strings.TrimSpace(output) != "" {
|
||||
t.Fatalf("expected no JSON output on cycle, got %q", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParallelOutputsIncludeLogPaths(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
logPathFor := func(id string) string {
|
||||
return filepath.Join(tempDir, fmt.Sprintf("%s.log", id))
|
||||
}
|
||||
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
res := TaskResult{
|
||||
TaskID: task.ID,
|
||||
Message: fmt.Sprintf("result-%s", task.ID),
|
||||
SessionID: fmt.Sprintf("session-%s", task.ID),
|
||||
LogPath: logPathFor(task.ID),
|
||||
}
|
||||
if task.ID == "beta" {
|
||||
res.ExitCode = 9
|
||||
res.Error = "boom"
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
input := `---TASK---
|
||||
id: alpha
|
||||
---CONTENT---
|
||||
task-alpha
|
||||
---TASK---
|
||||
id: beta
|
||||
---CONTENT---
|
||||
task-beta`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
|
||||
var exitCode int
|
||||
output := captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
if exitCode != 9 {
|
||||
t.Fatalf("parallel run exit=%d, want 9", exitCode)
|
||||
}
|
||||
|
||||
payload := parseIntegrationOutput(t, output)
|
||||
alpha := findResultByID(t, payload, "alpha")
|
||||
beta := findResultByID(t, payload, "beta")
|
||||
|
||||
if alpha.LogPath != logPathFor("alpha") {
|
||||
t.Fatalf("alpha log path = %q, want %q", alpha.LogPath, logPathFor("alpha"))
|
||||
}
|
||||
if beta.LogPath != logPathFor("beta") {
|
||||
t.Fatalf("beta log path = %q, want %q", beta.LogPath, logPathFor("beta"))
|
||||
}
|
||||
|
||||
for _, id := range []string{"alpha", "beta"} {
|
||||
// Summary mode shows log paths in table format, not "Log: xxx"
|
||||
logPath := logPathFor(id)
|
||||
if !strings.Contains(output, logPath) {
|
||||
t.Fatalf("parallel output missing log path %q for %s:\n%s", logPath, id, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParallelStartupLogsPrinted(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
input := `---TASK---
|
||||
id: a
|
||||
---CONTENT---
|
||||
fail
|
||||
---TASK---
|
||||
id: b
|
||||
---CONTENT---
|
||||
ok-b
|
||||
---TASK---
|
||||
id: c
|
||||
dependencies: a
|
||||
---CONTENT---
|
||||
should-skip
|
||||
---TASK---
|
||||
id: d
|
||||
---CONTENT---
|
||||
ok-d`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
|
||||
origRun := runCodexTaskFn
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
path := expectedLog
|
||||
if logger := activeLogger(); logger != nil && logger.Path() != "" {
|
||||
path = logger.Path()
|
||||
}
|
||||
if task.ID == "a" {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 3, Error: "boom", LogPath: path}
|
||||
}
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task, LogPath: path}
|
||||
}
|
||||
t.Cleanup(func() { runCodexTaskFn = origRun })
|
||||
|
||||
var exitCode int
|
||||
var stdoutOut string
|
||||
stderrOut := captureStderr(t, func() {
|
||||
stdoutOut = captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
})
|
||||
|
||||
if exitCode == 0 {
|
||||
t.Fatalf("expected non-zero exit due to task failure, got %d", exitCode)
|
||||
}
|
||||
if stdoutOut == "" {
|
||||
t.Fatalf("expected parallel summary on stdout")
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(stderrOut), "\n")
|
||||
var bannerSeen bool
|
||||
var taskLines []string
|
||||
for _, raw := range lines {
|
||||
line := strings.TrimSpace(raw)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if line == "=== Starting Parallel Execution ===" {
|
||||
if bannerSeen {
|
||||
t.Fatalf("banner printed multiple times:\n%s", stderrOut)
|
||||
}
|
||||
bannerSeen = true
|
||||
continue
|
||||
}
|
||||
taskLines = append(taskLines, line)
|
||||
}
|
||||
|
||||
if !bannerSeen {
|
||||
t.Fatalf("expected startup banner in stderr, got:\n%s", stderrOut)
|
||||
}
|
||||
|
||||
// After parallel log isolation fix, each task has its own log file
|
||||
expectedLines := map[string]struct{}{
|
||||
fmt.Sprintf("Task a: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-a.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task b: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-b.log", os.Getpid()))): {},
|
||||
fmt.Sprintf("Task d: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d-d.log", os.Getpid()))): {},
|
||||
}
|
||||
|
||||
if len(taskLines) != len(expectedLines) {
|
||||
t.Fatalf("startup log lines mismatch, got %d lines:\n%s", len(taskLines), stderrOut)
|
||||
}
|
||||
|
||||
for _, line := range taskLines {
|
||||
if _, ok := expectedLines[line]; !ok {
|
||||
t.Fatalf("unexpected startup line %q\nstderr:\n%s", line, stderrOut)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunNonParallelOutputsIncludeLogPathsIntegration(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
os.Args = []string{"codeagent-wrapper", "integration-log-check"}
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
codexCommand = createFakeCodexScript(t, "integration-session", "done")
|
||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string { return []string{} }
|
||||
|
||||
var exitCode int
|
||||
stderr := captureStderr(t, func() {
|
||||
_ = captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
})
|
||||
|
||||
if exitCode != 0 {
|
||||
t.Fatalf("run() exit=%d, want 0", exitCode)
|
||||
}
|
||||
expectedLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
wantLine := fmt.Sprintf("Log: %s", expectedLog)
|
||||
if !strings.Contains(stderr, wantLine) {
|
||||
t.Fatalf("stderr missing %q, got: %q", wantLine, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParallelPartialFailureBlocksDependents(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
logPathFor := func(id string) string {
|
||||
return filepath.Join(tempDir, fmt.Sprintf("%s.log", id))
|
||||
}
|
||||
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
path := logPathFor(task.ID)
|
||||
if task.ID == "A" {
|
||||
return TaskResult{TaskID: "A", ExitCode: 2, Error: "boom", LogPath: path}
|
||||
}
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task, LogPath: path}
|
||||
}
|
||||
|
||||
input := `---TASK---
|
||||
id: A
|
||||
---CONTENT---
|
||||
fail
|
||||
---TASK---
|
||||
id: B
|
||||
dependencies: A
|
||||
---CONTENT---
|
||||
blocked
|
||||
---TASK---
|
||||
id: D
|
||||
---CONTENT---
|
||||
ok-d
|
||||
---TASK---
|
||||
id: E
|
||||
---CONTENT---
|
||||
ok-e`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
|
||||
var exitCode int
|
||||
output := captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
payload := parseIntegrationOutput(t, output)
|
||||
if exitCode == 0 {
|
||||
t.Fatalf("expected non-zero exit when a task fails, got %d", exitCode)
|
||||
}
|
||||
|
||||
resA := findResultByID(t, payload, "A")
|
||||
resB := findResultByID(t, payload, "B")
|
||||
resD := findResultByID(t, payload, "D")
|
||||
resE := findResultByID(t, payload, "E")
|
||||
|
||||
if resA.ExitCode == 0 {
|
||||
t.Fatalf("task A should fail, got %+v", resA)
|
||||
}
|
||||
if resB.ExitCode == 0 || !strings.Contains(resB.Error, "dependencies") {
|
||||
t.Fatalf("task B should be skipped due to dependency failure, got %+v", resB)
|
||||
}
|
||||
if resD.ExitCode != 0 || resE.ExitCode != 0 {
|
||||
t.Fatalf("independent tasks should run successfully, D=%+v E=%+v", resD, resE)
|
||||
}
|
||||
if payload.Summary.Failed != 2 || payload.Summary.Total != 4 {
|
||||
t.Fatalf("unexpected summary after partial failure: %+v", payload.Summary)
|
||||
}
|
||||
if resA.LogPath != logPathFor("A") {
|
||||
t.Fatalf("task A log path = %q, want %q", resA.LogPath, logPathFor("A"))
|
||||
}
|
||||
if resB.LogPath != "" {
|
||||
t.Fatalf("task B should not report a log path when skipped, got %q", resB.LogPath)
|
||||
}
|
||||
if resD.LogPath != logPathFor("D") || resE.LogPath != logPathFor("E") {
|
||||
t.Fatalf("expected log paths for D/E, got D=%q E=%q", resD.LogPath, resE.LogPath)
|
||||
}
|
||||
// Summary mode shows log paths in table, verify they appear in output
|
||||
for _, id := range []string{"A", "D", "E"} {
|
||||
logPath := logPathFor(id)
|
||||
if !strings.Contains(output, logPath) {
|
||||
t.Fatalf("task %s log path %q not found in output:\n%s", id, logPath, output)
|
||||
}
|
||||
}
|
||||
// Task B was skipped, should have "-" or empty log path in table
|
||||
if resB.LogPath != "" {
|
||||
t.Fatalf("skipped task B should have empty log path, got %q", resB.LogPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParallelTimeoutPropagation(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
})
|
||||
|
||||
var receivedTimeout int
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
receivedTimeout = timeout
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 124, Error: "timeout"}
|
||||
}
|
||||
|
||||
t.Setenv("CODEX_TIMEOUT", "1")
|
||||
input := `---TASK---
|
||||
id: T
|
||||
---CONTENT---
|
||||
slow`
|
||||
stdinReader = bytes.NewReader([]byte(input))
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel"}
|
||||
|
||||
exitCode := 0
|
||||
output := captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
payload := parseIntegrationOutput(t, output)
|
||||
if receivedTimeout != 1 {
|
||||
t.Fatalf("expected timeout 1s to propagate, got %d", receivedTimeout)
|
||||
}
|
||||
if exitCode != 124 {
|
||||
t.Fatalf("expected timeout exit code 124, got %d", exitCode)
|
||||
}
|
||||
if payload.Summary.Failed != 1 || payload.Summary.Total != 1 {
|
||||
t.Fatalf("unexpected summary for timeout case: %+v", payload.Summary)
|
||||
}
|
||||
res := findResultByID(t, payload, "T")
|
||||
if res.Error == "" || res.ExitCode != 124 {
|
||||
t.Fatalf("timeout result not propagated, got %+v", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConcurrentSpeedupBenchmark(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
origRun := runCodexTaskFn
|
||||
t.Cleanup(func() {
|
||||
runCodexTaskFn = origRun
|
||||
resetTestHooks()
|
||||
})
|
||||
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
return TaskResult{TaskID: task.ID}
|
||||
}
|
||||
|
||||
tasks := make([]TaskSpec, 10)
|
||||
for i := range tasks {
|
||||
tasks[i] = TaskSpec{ID: fmt.Sprintf("task-%d", i)}
|
||||
}
|
||||
layers := [][]TaskSpec{tasks}
|
||||
|
||||
serialStart := time.Now()
|
||||
_ = executeConcurrentWithContext(nil, layers, 5, 1)
|
||||
serialElapsed := time.Since(serialStart)
|
||||
|
||||
concurrentStart := time.Now()
|
||||
_ = executeConcurrentWithContext(nil, layers, 5, 0)
|
||||
concurrentElapsed := time.Since(concurrentStart)
|
||||
|
||||
ratio := float64(concurrentElapsed) / float64(serialElapsed)
|
||||
t.Logf("speedup ratio (concurrent/serial)=%.3f", ratio)
|
||||
if concurrentElapsed >= serialElapsed/2 {
|
||||
t.Fatalf("expected concurrent time <50%% of serial, serial=%v concurrent=%v", serialElapsed, concurrentElapsed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunStartupCleanupRemovesOrphansEndToEnd(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
orphanA := createTempLog(t, tempDir, "codeagent-wrapper-5001.log")
|
||||
orphanB := createTempLog(t, tempDir, "codeagent-wrapper-5002-extra.log")
|
||||
orphanC := createTempLog(t, tempDir, "codeagent-wrapper-5003-suffix.log")
|
||||
runningPID := 81234
|
||||
runningLog := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", runningPID))
|
||||
unrelated := createTempLog(t, tempDir, "wrapper.log")
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
return pid == runningPID || pid == os.Getpid()
|
||||
})
|
||||
stubProcessStartTime(t, func(pid int) time.Time {
|
||||
if pid == runningPID || pid == os.Getpid() {
|
||||
return time.Now().Add(-1 * time.Hour)
|
||||
}
|
||||
return time.Time{}
|
||||
})
|
||||
|
||||
codexCommand = createFakeCodexScript(t, "tid-startup", "ok")
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
os.Args = []string{"codeagent-wrapper", "task"}
|
||||
|
||||
if exit := run(); exit != 0 {
|
||||
t.Fatalf("run() exit=%d, want 0", exit)
|
||||
}
|
||||
|
||||
for _, orphan := range []string{orphanA, orphanB, orphanC} {
|
||||
if _, err := os.Stat(orphan); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected orphan %s to be removed, err=%v", orphan, err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(runningLog); err != nil {
|
||||
t.Fatalf("expected running log to remain, err=%v", err)
|
||||
}
|
||||
if _, err := os.Stat(unrelated); err != nil {
|
||||
t.Fatalf("expected unrelated file to remain, err=%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunStartupCleanupConcurrentWrappers(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
const totalLogs = 40
|
||||
for i := 0; i < totalLogs; i++ {
|
||||
createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", 9000+i))
|
||||
}
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
return false
|
||||
})
|
||||
stubProcessStartTime(t, func(int) time.Time { return time.Time{} })
|
||||
|
||||
var wg sync.WaitGroup
|
||||
const instances = 5
|
||||
start := make(chan struct{})
|
||||
|
||||
for i := 0; i < instances; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
<-start
|
||||
runStartupCleanup()
|
||||
}()
|
||||
}
|
||||
|
||||
close(start)
|
||||
wg.Wait()
|
||||
|
||||
matches, err := filepath.Glob(filepath.Join(tempDir, "codeagent-wrapper-*.log"))
|
||||
if err != nil {
|
||||
t.Fatalf("glob error: %v", err)
|
||||
}
|
||||
if len(matches) != 0 {
|
||||
t.Fatalf("expected all orphan logs to be removed, remaining=%v", matches)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunCleanupFlagEndToEnd_Success(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
basePID := os.Getpid()
|
||||
stalePID1 := basePID + 10000
|
||||
stalePID2 := basePID + 11000
|
||||
keeperPID := basePID + 12000
|
||||
|
||||
staleA := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", stalePID1))
|
||||
staleB := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d-extra.log", stalePID2))
|
||||
keeper := createTempLog(t, tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", keeperPID))
|
||||
|
||||
stubProcessRunning(t, func(pid int) bool {
|
||||
return pid == keeperPID || pid == basePID
|
||||
})
|
||||
stubProcessStartTime(t, func(pid int) time.Time {
|
||||
if pid == keeperPID || pid == basePID {
|
||||
return time.Now().Add(-1 * time.Hour)
|
||||
}
|
||||
return time.Time{}
|
||||
})
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "--cleanup"}
|
||||
|
||||
var exitCode int
|
||||
output := captureStdout(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
if exitCode != 0 {
|
||||
t.Fatalf("cleanup exit = %d, want 0", exitCode)
|
||||
}
|
||||
|
||||
// Check that output contains expected counts and file names
|
||||
if !strings.Contains(output, "Cleanup completed") {
|
||||
t.Fatalf("missing 'Cleanup completed' in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, "Files scanned: 3") {
|
||||
t.Fatalf("missing 'Files scanned: 3' in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, "Files deleted: 2") {
|
||||
t.Fatalf("missing 'Files deleted: 2' in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, "Files kept: 1") {
|
||||
t.Fatalf("missing 'Files kept: 1' in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, fmt.Sprintf("codeagent-wrapper-%d.log", stalePID1)) || !strings.Contains(output, fmt.Sprintf("codeagent-wrapper-%d-extra.log", stalePID2)) {
|
||||
t.Fatalf("missing deleted file names in output: %q", output)
|
||||
}
|
||||
if !strings.Contains(output, fmt.Sprintf("codeagent-wrapper-%d.log", keeperPID)) {
|
||||
t.Fatalf("missing kept file names in output: %q", output)
|
||||
}
|
||||
|
||||
for _, path := range []string{staleA, staleB} {
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected %s to be removed, err=%v", path, err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(keeper); err != nil {
|
||||
t.Fatalf("expected kept log to remain, err=%v", err)
|
||||
}
|
||||
|
||||
currentLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
if _, err := os.Stat(currentLog); err == nil {
|
||||
t.Fatalf("cleanup mode should not create new log file %s", currentLog)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Fatalf("stat(%s) unexpected error: %v", currentLog, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunCleanupFlagEndToEnd_FailureDoesNotAffectStartup(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
tempDir := setTempDirEnv(t, t.TempDir())
|
||||
|
||||
calls := 0
|
||||
cleanupLogsFn = func() (CleanupStats, error) {
|
||||
calls++
|
||||
return CleanupStats{Scanned: 1}, fmt.Errorf("permission denied")
|
||||
}
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "--cleanup"}
|
||||
|
||||
var exitCode int
|
||||
errOutput := captureStderr(t, func() {
|
||||
exitCode = run()
|
||||
})
|
||||
|
||||
if exitCode != 1 {
|
||||
t.Fatalf("cleanup failure exit = %d, want 1", exitCode)
|
||||
}
|
||||
if !strings.Contains(errOutput, "Cleanup failed") || !strings.Contains(errOutput, "permission denied") {
|
||||
t.Fatalf("cleanup stderr = %q, want failure message", errOutput)
|
||||
}
|
||||
if calls != 1 {
|
||||
t.Fatalf("cleanup called %d times, want 1", calls)
|
||||
}
|
||||
|
||||
currentLog := filepath.Join(tempDir, fmt.Sprintf("codeagent-wrapper-%d.log", os.Getpid()))
|
||||
if _, err := os.Stat(currentLog); err == nil {
|
||||
t.Fatalf("cleanup failure should not create new log file %s", currentLog)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Fatalf("stat(%s) unexpected error: %v", currentLog, err)
|
||||
}
|
||||
|
||||
cleanupLogsFn = func() (CleanupStats, error) {
|
||||
return CleanupStats{}, nil
|
||||
}
|
||||
codexCommand = createFakeCodexScript(t, "tid-cleanup-e2e", "ok")
|
||||
stdinReader = strings.NewReader("")
|
||||
isTerminalFn = func() bool { return true }
|
||||
os.Args = []string{"codeagent-wrapper", "post-cleanup task"}
|
||||
|
||||
var normalExit int
|
||||
normalOutput := captureStdout(t, func() {
|
||||
normalExit = run()
|
||||
})
|
||||
|
||||
if normalExit != 0 {
|
||||
t.Fatalf("normal run exit = %d, want 0", normalExit)
|
||||
}
|
||||
if !strings.Contains(normalOutput, "ok") {
|
||||
t.Fatalf("normal run output = %q, want codex output", normalOutput)
|
||||
}
|
||||
}
|
||||
4805
codeagent-wrapper/internal/app/main_test.go
Normal file
4805
codeagent-wrapper/internal/app/main_test.go
Normal file
File diff suppressed because it is too large
Load Diff
46
codeagent-wrapper/internal/app/os_paths_test.go
Normal file
46
codeagent-wrapper/internal/app/os_paths_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseArgs_Workdir_OSPaths(t *testing.T) {
|
||||
oldArgv := os.Args
|
||||
t.Cleanup(func() { os.Args = oldArgv })
|
||||
|
||||
workdirs := []struct {
|
||||
name string
|
||||
path string
|
||||
}{
|
||||
{name: "windows drive forward slashes", path: "D:/repo/path"},
|
||||
{name: "windows drive backslashes", path: `C:\repo\path`},
|
||||
{name: "windows UNC", path: `\\server\share\repo`},
|
||||
{name: "unix absolute", path: "/home/user/repo"},
|
||||
{name: "relative", path: "./relative/repo"},
|
||||
}
|
||||
|
||||
for _, wd := range workdirs {
|
||||
t.Run("new mode: "+wd.name, func(t *testing.T) {
|
||||
os.Args = []string{"codeagent-wrapper", "task", wd.path}
|
||||
cfg, err := parseArgs()
|
||||
if err != nil {
|
||||
t.Fatalf("parseArgs() error: %v", err)
|
||||
}
|
||||
if cfg.Mode != "new" || cfg.Task != "task" || cfg.WorkDir != wd.path {
|
||||
t.Fatalf("cfg mismatch: got mode=%q task=%q workdir=%q, want mode=%q task=%q workdir=%q", cfg.Mode, cfg.Task, cfg.WorkDir, "new", "task", wd.path)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume mode: "+wd.name, func(t *testing.T) {
|
||||
os.Args = []string{"codeagent-wrapper", "resume", "sid-1", "task", wd.path}
|
||||
cfg, err := parseArgs()
|
||||
if err != nil {
|
||||
t.Fatalf("parseArgs() error: %v", err)
|
||||
}
|
||||
if cfg.Mode != "resume" || cfg.SessionID != "sid-1" || cfg.Task != "task" || cfg.WorkDir != wd.path {
|
||||
t.Fatalf("cfg mismatch: got mode=%q sid=%q task=%q workdir=%q, want mode=%q sid=%q task=%q workdir=%q", cfg.Mode, cfg.SessionID, cfg.Task, cfg.WorkDir, "resume", "sid-1", "task", wd.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
9
codeagent-wrapper/internal/app/parallel_config.go
Normal file
9
codeagent-wrapper/internal/app/parallel_config.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
executor "codeagent-wrapper/internal/executor"
|
||||
)
|
||||
|
||||
func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
return executor.ParseParallelConfig(data)
|
||||
}
|
||||
34
codeagent-wrapper/internal/app/parser.go
Normal file
34
codeagent-wrapper/internal/app/parser.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
|
||||
parser "codeagent-wrapper/internal/parser"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
func parseJSONStream(r io.Reader) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, logWarn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamWithLog(r, warnFn, logInfo)
|
||||
}
|
||||
|
||||
func parseJSONStreamWithLog(r io.Reader, warnFn func(string), infoFn func(string)) (message, threadID string) {
|
||||
return parseJSONStreamInternal(r, warnFn, infoFn, nil, nil)
|
||||
}
|
||||
|
||||
func parseJSONStreamInternal(r io.Reader, warnFn func(string), infoFn func(string), onMessage func(), onComplete func()) (message, threadID string) {
|
||||
return parser.ParseJSONStreamInternal(r, warnFn, infoFn, onMessage, onComplete)
|
||||
}
|
||||
|
||||
func hasKey(m map[string]json.RawMessage, key string) bool { return parser.HasKey(m, key) }
|
||||
|
||||
func discardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
|
||||
return parser.DiscardInvalidJSON(decoder, reader)
|
||||
}
|
||||
|
||||
func normalizeText(text interface{}) string { return parser.NormalizeText(text) }
|
||||
119
codeagent-wrapper/internal/app/stdin_mode_test.go
Normal file
119
codeagent-wrapper/internal/app/stdin_mode_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRunSingleMode_UseStdin_TargetArgAndTaskText(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
setTempDirEnv(t, t.TempDir())
|
||||
logger, err := NewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("NewLogger(): %v", err)
|
||||
}
|
||||
setLogger(logger)
|
||||
t.Cleanup(func() { _ = closeLogger() })
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
cfgTask string
|
||||
explicit bool
|
||||
stdinData string
|
||||
isTerminal bool
|
||||
|
||||
wantUseStdin bool
|
||||
wantTarget string
|
||||
wantTaskText string
|
||||
}
|
||||
|
||||
longTask := strings.Repeat("a", 801)
|
||||
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "piped input forces stdin mode",
|
||||
cfgTask: "cli-task",
|
||||
stdinData: "piped task text",
|
||||
isTerminal: false,
|
||||
wantUseStdin: true,
|
||||
wantTarget: "-",
|
||||
wantTaskText: "piped task text",
|
||||
},
|
||||
{
|
||||
name: "explicit dash forces stdin mode",
|
||||
cfgTask: "-",
|
||||
explicit: true,
|
||||
stdinData: "explicit task text",
|
||||
isTerminal: true,
|
||||
wantUseStdin: true,
|
||||
wantTarget: "-",
|
||||
wantTaskText: "explicit task text",
|
||||
},
|
||||
{
|
||||
name: "special char backslash forces stdin mode",
|
||||
cfgTask: `C:\repo\file.go`,
|
||||
isTerminal: true,
|
||||
wantUseStdin: true,
|
||||
wantTarget: "-",
|
||||
wantTaskText: `C:\repo\file.go`,
|
||||
},
|
||||
{
|
||||
name: "length>800 forces stdin mode",
|
||||
cfgTask: longTask,
|
||||
isTerminal: true,
|
||||
wantUseStdin: true,
|
||||
wantTarget: "-",
|
||||
wantTaskText: longTask,
|
||||
},
|
||||
{
|
||||
name: "simple task uses argv target",
|
||||
cfgTask: "analyze code",
|
||||
isTerminal: true,
|
||||
wantUseStdin: false,
|
||||
wantTarget: "analyze code",
|
||||
wantTaskText: "analyze code",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var gotTarget string
|
||||
buildCodexArgsFn = func(cfg *Config, targetArg string) []string {
|
||||
gotTarget = targetArg
|
||||
return []string{targetArg}
|
||||
}
|
||||
|
||||
var gotTask TaskSpec
|
||||
runTaskFn = func(task TaskSpec, silent bool, timeout int) TaskResult {
|
||||
gotTask = task
|
||||
return TaskResult{ExitCode: 0, Message: "ok"}
|
||||
}
|
||||
|
||||
stdinReader = strings.NewReader(tt.stdinData)
|
||||
isTerminalFn = func() bool { return tt.isTerminal }
|
||||
|
||||
cfg := &Config{
|
||||
Mode: "new",
|
||||
Task: tt.cfgTask,
|
||||
WorkDir: defaultWorkdir,
|
||||
Backend: defaultBackendName,
|
||||
ExplicitStdin: tt.explicit,
|
||||
}
|
||||
|
||||
if code := runSingleMode(cfg, "codeagent-wrapper"); code != 0 {
|
||||
t.Fatalf("runSingleMode() = %d, want 0", code)
|
||||
}
|
||||
|
||||
if gotTarget != tt.wantTarget {
|
||||
t.Fatalf("targetArg = %q, want %q", gotTarget, tt.wantTarget)
|
||||
}
|
||||
if gotTask.UseStdin != tt.wantUseStdin {
|
||||
t.Fatalf("taskSpec.UseStdin = %v, want %v", gotTask.UseStdin, tt.wantUseStdin)
|
||||
}
|
||||
if gotTask.Task != tt.wantTaskText {
|
||||
t.Fatalf("taskSpec.Task = %q, want %q", gotTask.Task, tt.wantTaskText)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
8
codeagent-wrapper/internal/app/task_types.go
Normal file
8
codeagent-wrapper/internal/app/task_types.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package wrapper
|
||||
|
||||
import executor "codeagent-wrapper/internal/executor"
|
||||
|
||||
// Type aliases to keep existing names in the wrapper package.
|
||||
type ParallelConfig = executor.ParallelConfig
|
||||
type TaskSpec = executor.TaskSpec
|
||||
type TaskResult = executor.TaskResult
|
||||
30
codeagent-wrapper/internal/app/terminal_test.go
Normal file
30
codeagent-wrapper/internal/app/terminal_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDefaultIsTerminalCoverage(t *testing.T) {
|
||||
oldStdin := os.Stdin
|
||||
t.Cleanup(func() { os.Stdin = oldStdin })
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), "stdin-*")
|
||||
if err != nil {
|
||||
t.Fatalf("os.CreateTemp() error = %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
os.Stdin = f
|
||||
if got := defaultIsTerminal(); got {
|
||||
t.Fatalf("defaultIsTerminal() = %v, want false for regular file", got)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatalf("Close() error = %v", err)
|
||||
}
|
||||
os.Stdin = f
|
||||
if got := defaultIsTerminal(); !got {
|
||||
t.Fatalf("defaultIsTerminal() = %v, want true when Stat fails", got)
|
||||
}
|
||||
}
|
||||
134
codeagent-wrapper/internal/app/tmpdir.go
Normal file
134
codeagent-wrapper/internal/app/tmpdir.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const tmpDirEnvOverrideKey = "CODEAGENT_TMPDIR"
|
||||
|
||||
var tmpDirExecutableCheckFn = canExecuteInDir
|
||||
|
||||
func ensureExecutableTempDir() {
|
||||
// Windows doesn't execute scripts via shebang, and os.TempDir semantics differ.
|
||||
if runtime.GOOS == "windows" {
|
||||
return
|
||||
}
|
||||
|
||||
if override := strings.TrimSpace(os.Getenv(tmpDirEnvOverrideKey)); override != "" {
|
||||
if resolved, err := resolvePathWithTilde(override); err == nil {
|
||||
if err := os.MkdirAll(resolved, 0o700); err == nil {
|
||||
if ok, _ := tmpDirExecutableCheckFn(resolved); ok {
|
||||
setTempEnv(resolved)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Invalid override should not block execution; fall back to default behavior.
|
||||
}
|
||||
|
||||
current := currentTempDirFromEnv()
|
||||
if current == "" {
|
||||
current = "/tmp"
|
||||
}
|
||||
|
||||
ok, _ := tmpDirExecutableCheckFn(current)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
fallback := defaultFallbackTempDir()
|
||||
if fallback == "" {
|
||||
return
|
||||
}
|
||||
if err := os.MkdirAll(fallback, 0o700); err != nil {
|
||||
return
|
||||
}
|
||||
if ok, _ := tmpDirExecutableCheckFn(fallback); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
setTempEnv(fallback)
|
||||
fmt.Fprintf(os.Stderr, "INFO: temp dir is not executable; set TMPDIR=%s\n", fallback)
|
||||
}
|
||||
|
||||
func setTempEnv(dir string) {
|
||||
_ = os.Setenv("TMPDIR", dir)
|
||||
_ = os.Setenv("TMP", dir)
|
||||
_ = os.Setenv("TEMP", dir)
|
||||
}
|
||||
|
||||
func defaultFallbackTempDir() string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || strings.TrimSpace(home) == "" {
|
||||
return ""
|
||||
}
|
||||
return filepath.Clean(filepath.Join(home, ".codeagent", "tmp"))
|
||||
}
|
||||
|
||||
func currentTempDirFromEnv() string {
|
||||
for _, k := range []string{"TMPDIR", "TMP", "TEMP"} {
|
||||
if v := strings.TrimSpace(os.Getenv(k)); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func resolvePathWithTilde(p string) (string, error) {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
return "", errors.New("empty path")
|
||||
}
|
||||
|
||||
if p == "~" || strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || strings.TrimSpace(home) == "" {
|
||||
if err == nil {
|
||||
err = errors.New("empty home directory")
|
||||
}
|
||||
return "", fmt.Errorf("resolve ~: %w", err)
|
||||
}
|
||||
if p == "~" {
|
||||
return home, nil
|
||||
}
|
||||
return filepath.Clean(home + p[1:]), nil
|
||||
}
|
||||
|
||||
return filepath.Clean(p), nil
|
||||
}
|
||||
|
||||
func canExecuteInDir(dir string) (bool, error) {
|
||||
dir = strings.TrimSpace(dir)
|
||||
if dir == "" {
|
||||
return false, errors.New("empty dir")
|
||||
}
|
||||
|
||||
f, err := os.CreateTemp(dir, "codeagent-tmp-exec-*")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
path := f.Name()
|
||||
defer func() { _ = os.Remove(path) }()
|
||||
|
||||
if _, err := f.WriteString("#!/bin/sh\nexit 0\n"); err != nil {
|
||||
_ = f.Close()
|
||||
return false, err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chmod(path, 0o700); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := exec.Command(path).Run(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
96
codeagent-wrapper/internal/app/tmpdir_test.go
Normal file
96
codeagent-wrapper/internal/app/tmpdir_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEnsureExecutableTempDir_Override(t *testing.T) {
|
||||
restore := captureTempEnv()
|
||||
t.Cleanup(restore)
|
||||
|
||||
t.Setenv("HOME", t.TempDir())
|
||||
t.Setenv("USERPROFILE", os.Getenv("HOME"))
|
||||
|
||||
orig := tmpDirExecutableCheckFn
|
||||
tmpDirExecutableCheckFn = func(string) (bool, error) { return true, nil }
|
||||
t.Cleanup(func() { tmpDirExecutableCheckFn = orig })
|
||||
|
||||
override := filepath.Join(t.TempDir(), "mytmp")
|
||||
t.Setenv(tmpDirEnvOverrideKey, override)
|
||||
|
||||
ensureExecutableTempDir()
|
||||
|
||||
if got := os.Getenv("TMPDIR"); got != override {
|
||||
t.Fatalf("TMPDIR=%q, want %q", got, override)
|
||||
}
|
||||
if got := os.Getenv("TMP"); got != override {
|
||||
t.Fatalf("TMP=%q, want %q", got, override)
|
||||
}
|
||||
if got := os.Getenv("TEMP"); got != override {
|
||||
t.Fatalf("TEMP=%q, want %q", got, override)
|
||||
}
|
||||
if st, err := os.Stat(override); err != nil || !st.IsDir() {
|
||||
t.Fatalf("override dir not created: stat=%v err=%v", st, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureExecutableTempDir_FallbackWhenCurrentNotExecutable(t *testing.T) {
|
||||
restore := captureTempEnv()
|
||||
t.Cleanup(restore)
|
||||
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
cur := filepath.Join(t.TempDir(), "cur-tmp")
|
||||
if err := os.MkdirAll(cur, 0o700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Setenv("TMPDIR", cur)
|
||||
|
||||
fallback := filepath.Join(home, ".codeagent", "tmp")
|
||||
|
||||
orig := tmpDirExecutableCheckFn
|
||||
tmpDirExecutableCheckFn = func(dir string) (bool, error) {
|
||||
if filepath.Clean(dir) == filepath.Clean(cur) {
|
||||
return false, nil
|
||||
}
|
||||
if filepath.Clean(dir) == filepath.Clean(fallback) {
|
||||
return true, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
t.Cleanup(func() { tmpDirExecutableCheckFn = orig })
|
||||
|
||||
ensureExecutableTempDir()
|
||||
|
||||
if got := os.Getenv("TMPDIR"); filepath.Clean(got) != filepath.Clean(fallback) {
|
||||
t.Fatalf("TMPDIR=%q, want %q", got, fallback)
|
||||
}
|
||||
if st, err := os.Stat(fallback); err != nil || !st.IsDir() {
|
||||
t.Fatalf("fallback dir not created: stat=%v err=%v", st, err)
|
||||
}
|
||||
}
|
||||
|
||||
func captureTempEnv() func() {
|
||||
type entry struct {
|
||||
set bool
|
||||
val string
|
||||
}
|
||||
snapshot := make(map[string]entry, 3)
|
||||
for _, k := range []string{"TMPDIR", "TMP", "TEMP"} {
|
||||
v, ok := os.LookupEnv(k)
|
||||
snapshot[k] = entry{set: ok, val: v}
|
||||
}
|
||||
return func() {
|
||||
for k, e := range snapshot {
|
||||
if !e.set {
|
||||
_ = os.Unsetenv(k)
|
||||
continue
|
||||
}
|
||||
_ = os.Setenv(k, e.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
523
codeagent-wrapper/internal/app/utils.go
Normal file
523
codeagent-wrapper/internal/app/utils.go
Normal file
@@ -0,0 +1,523 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
utils "codeagent-wrapper/internal/utils"
|
||||
)
|
||||
|
||||
func resolveTimeout() int {
|
||||
raw := os.Getenv("CODEX_TIMEOUT")
|
||||
if raw == "" {
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
parsed, err := strconv.Atoi(raw)
|
||||
if err != nil || parsed <= 0 {
|
||||
logWarn(fmt.Sprintf("Invalid CODEX_TIMEOUT '%s', falling back to %ds", raw, defaultTimeout))
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
if parsed > 10000 {
|
||||
return parsed / 1000
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
func readPipedTask() (string, error) {
|
||||
if isTerminal() {
|
||||
logInfo("Stdin is tty, skipping pipe read")
|
||||
return "", nil
|
||||
}
|
||||
logInfo("Reading from stdin pipe...")
|
||||
data, err := io.ReadAll(stdinReader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read stdin: %w", err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
logInfo("Stdin pipe returned empty data")
|
||||
return "", nil
|
||||
}
|
||||
logInfo(fmt.Sprintf("Read %d bytes from stdin pipe", len(data)))
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func shouldUseStdin(taskText string, piped bool) bool {
|
||||
if piped {
|
||||
return true
|
||||
}
|
||||
if len(taskText) > 800 {
|
||||
return true
|
||||
}
|
||||
return strings.ContainsAny(taskText, stdinSpecialChars)
|
||||
}
|
||||
|
||||
func defaultIsTerminal() bool {
|
||||
fi, err := os.Stdin.Stat()
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return (fi.Mode() & os.ModeCharDevice) != 0
|
||||
}
|
||||
|
||||
func isTerminal() bool {
|
||||
return isTerminalFn()
|
||||
}
|
||||
|
||||
func getEnv(key, defaultValue string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
prefix string
|
||||
maxLen int
|
||||
buf bytes.Buffer
|
||||
dropped bool
|
||||
}
|
||||
|
||||
func newLogWriter(prefix string, maxLen int) *logWriter {
|
||||
if maxLen <= 0 {
|
||||
maxLen = codexLogLineLimit
|
||||
}
|
||||
return &logWriter{prefix: prefix, maxLen: maxLen}
|
||||
}
|
||||
|
||||
func (lw *logWriter) Write(p []byte) (int, error) {
|
||||
if lw == nil {
|
||||
return len(p), nil
|
||||
}
|
||||
total := len(p)
|
||||
for len(p) > 0 {
|
||||
if idx := bytes.IndexByte(p, '\n'); idx >= 0 {
|
||||
lw.writeLimited(p[:idx])
|
||||
lw.logLine(true)
|
||||
p = p[idx+1:]
|
||||
continue
|
||||
}
|
||||
lw.writeLimited(p)
|
||||
break
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (lw *logWriter) Flush() {
|
||||
if lw == nil || lw.buf.Len() == 0 {
|
||||
return
|
||||
}
|
||||
lw.logLine(false)
|
||||
}
|
||||
|
||||
func (lw *logWriter) logLine(force bool) {
|
||||
if lw == nil {
|
||||
return
|
||||
}
|
||||
line := lw.buf.String()
|
||||
dropped := lw.dropped
|
||||
lw.dropped = false
|
||||
lw.buf.Reset()
|
||||
if line == "" && !force {
|
||||
return
|
||||
}
|
||||
if lw.maxLen > 0 {
|
||||
if dropped {
|
||||
if lw.maxLen > 3 {
|
||||
line = line[:min(len(line), lw.maxLen-3)] + "..."
|
||||
} else {
|
||||
line = line[:min(len(line), lw.maxLen)]
|
||||
}
|
||||
} else if len(line) > lw.maxLen {
|
||||
cutoff := lw.maxLen
|
||||
if cutoff > 3 {
|
||||
line = line[:cutoff-3] + "..."
|
||||
} else {
|
||||
line = line[:cutoff]
|
||||
}
|
||||
}
|
||||
}
|
||||
logInfo(lw.prefix + line)
|
||||
}
|
||||
|
||||
func (lw *logWriter) writeLimited(p []byte) {
|
||||
if lw == nil || len(p) == 0 {
|
||||
return
|
||||
}
|
||||
if lw.maxLen <= 0 {
|
||||
lw.buf.Write(p)
|
||||
return
|
||||
}
|
||||
|
||||
remaining := lw.maxLen - lw.buf.Len()
|
||||
if remaining <= 0 {
|
||||
lw.dropped = true
|
||||
return
|
||||
}
|
||||
if len(p) <= remaining {
|
||||
lw.buf.Write(p)
|
||||
return
|
||||
}
|
||||
lw.buf.Write(p[:remaining])
|
||||
lw.dropped = true
|
||||
}
|
||||
|
||||
type tailBuffer struct {
|
||||
limit int
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (b *tailBuffer) Write(p []byte) (int, error) {
|
||||
if b.limit <= 0 {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
if len(p) >= b.limit {
|
||||
b.data = append(b.data[:0], p[len(p)-b.limit:]...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
total := len(b.data) + len(p)
|
||||
if total <= b.limit {
|
||||
b.data = append(b.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
overflow := total - b.limit
|
||||
b.data = append(b.data[overflow:], p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *tailBuffer) String() string {
|
||||
return string(b.data)
|
||||
}
|
||||
|
||||
func truncate(s string, maxLen int) string {
|
||||
return utils.Truncate(s, maxLen)
|
||||
}
|
||||
|
||||
// safeTruncate safely truncates string to maxLen, avoiding panic and UTF-8 corruption.
|
||||
func safeTruncate(s string, maxLen int) string {
|
||||
return utils.SafeTruncate(s, maxLen)
|
||||
}
|
||||
|
||||
// sanitizeOutput removes ANSI escape sequences and control characters.
|
||||
func sanitizeOutput(s string) string {
|
||||
return utils.SanitizeOutput(s)
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
return utils.Min(a, b)
|
||||
}
|
||||
|
||||
func hello() string {
|
||||
return "hello world"
|
||||
}
|
||||
|
||||
func greet(name string) string {
|
||||
return "hello " + name
|
||||
}
|
||||
|
||||
func farewell(name string) string {
|
||||
return "goodbye " + name
|
||||
}
|
||||
|
||||
// extractCoverageFromLines extracts coverage from pre-split lines.
|
||||
func extractCoverageFromLines(lines []string) string {
|
||||
if len(lines) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
end := len(lines)
|
||||
for end > 0 && strings.TrimSpace(lines[end-1]) == "" {
|
||||
end--
|
||||
}
|
||||
|
||||
if end == 1 {
|
||||
trimmed := strings.TrimSpace(lines[0])
|
||||
if strings.HasSuffix(trimmed, "%") {
|
||||
if num, err := strconv.ParseFloat(strings.TrimSuffix(trimmed, "%"), 64); err == nil && num >= 0 && num <= 100 {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coverageKeywords := []string{"file", "stmt", "branch", "line", "coverage", "total"}
|
||||
|
||||
for _, line := range lines[:end] {
|
||||
lower := strings.ToLower(line)
|
||||
|
||||
hasKeyword := false
|
||||
tokens := strings.FieldsFunc(lower, func(r rune) bool { return r < 'a' || r > 'z' })
|
||||
for _, token := range tokens {
|
||||
for _, kw := range coverageKeywords {
|
||||
if strings.HasPrefix(token, kw) {
|
||||
hasKeyword = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if hasKeyword {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasKeyword {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(line, "%") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract percentage pattern: number followed by %
|
||||
for i := 0; i < len(line); i++ {
|
||||
if line[i] == '%' && i > 0 {
|
||||
// Walk back to find the number
|
||||
j := i - 1
|
||||
for j >= 0 && (line[j] == '.' || (line[j] >= '0' && line[j] <= '9')) {
|
||||
j--
|
||||
}
|
||||
if j < i-1 {
|
||||
numStr := line[j+1 : i]
|
||||
// Validate it's a reasonable percentage
|
||||
if num, err := strconv.ParseFloat(numStr, 64); err == nil && num >= 0 && num <= 100 {
|
||||
return numStr + "%"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractCoverage extracts coverage percentage from task output
|
||||
// Supports common formats: "Coverage: 92%", "92% coverage", "coverage 92%", "TOTAL 92%"
|
||||
func extractCoverage(message string) string {
|
||||
if message == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
return extractCoverageFromLines(strings.Split(message, "\n"))
|
||||
}
|
||||
|
||||
// extractCoverageNum extracts coverage as a numeric value for comparison
|
||||
func extractCoverageNum(coverage string) float64 {
|
||||
if coverage == "" {
|
||||
return 0
|
||||
}
|
||||
// Remove % sign and parse
|
||||
numStr := strings.TrimSuffix(coverage, "%")
|
||||
if num, err := strconv.ParseFloat(numStr, 64); err == nil {
|
||||
return num
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// extractFilesChangedFromLines extracts files from pre-split lines.
|
||||
func extractFilesChangedFromLines(lines []string) []string {
|
||||
if len(lines) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var files []string
|
||||
seen := make(map[string]bool)
|
||||
exts := []string{".ts", ".tsx", ".js", ".jsx", ".go", ".py", ".rs", ".java", ".vue", ".css", ".scss", ".md", ".json", ".yaml", ".yml", ".toml"}
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Pattern 1: "Modified: path/to/file.ts" or "Created: path/to/file.ts"
|
||||
matchedPrefix := false
|
||||
for _, prefix := range []string{"Modified:", "Created:", "Updated:", "Edited:", "Wrote:", "Changed:"} {
|
||||
if strings.HasPrefix(line, prefix) {
|
||||
file := strings.TrimSpace(strings.TrimPrefix(line, prefix))
|
||||
file = strings.Trim(file, "`\"'()[],:")
|
||||
file = strings.TrimPrefix(file, "@")
|
||||
if file != "" && !seen[file] {
|
||||
files = append(files, file)
|
||||
seen[file] = true
|
||||
}
|
||||
matchedPrefix = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if matchedPrefix {
|
||||
continue
|
||||
}
|
||||
|
||||
// Pattern 2: Tokens that look like file paths (allow root files, strip @ prefix).
|
||||
parts := strings.Fields(line)
|
||||
for _, part := range parts {
|
||||
part = strings.Trim(part, "`\"'()[],:")
|
||||
part = strings.TrimPrefix(part, "@")
|
||||
for _, ext := range exts {
|
||||
if strings.HasSuffix(part, ext) && !seen[part] {
|
||||
files = append(files, part)
|
||||
seen[part] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Limit to first 10 files to avoid bloat
|
||||
if len(files) > 10 {
|
||||
files = files[:10]
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// extractFilesChanged extracts list of changed files from task output
|
||||
// Looks for common patterns like "Modified: file.ts", "Created: file.ts", file paths in output
|
||||
func extractFilesChanged(message string) []string {
|
||||
if message == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return extractFilesChangedFromLines(strings.Split(message, "\n"))
|
||||
}
|
||||
|
||||
// extractTestResultsFromLines extracts test results from pre-split lines.
|
||||
func extractTestResultsFromLines(lines []string) (passed, failed int) {
|
||||
if len(lines) == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Common patterns:
|
||||
// pytest: "12 passed, 2 failed"
|
||||
// jest: "Tests: 2 failed, 12 passed"
|
||||
// go: "ok ... 12 tests"
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.ToLower(line)
|
||||
|
||||
// Look for test result lines
|
||||
if !strings.Contains(line, "pass") && !strings.Contains(line, "fail") && !strings.Contains(line, "test") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract numbers near "passed" or "pass"
|
||||
if idx := strings.Index(line, "pass"); idx != -1 {
|
||||
// Look for number before "pass"
|
||||
num := extractNumberBefore(line, idx)
|
||||
if num > 0 {
|
||||
passed = num
|
||||
}
|
||||
}
|
||||
|
||||
// Extract numbers near "failed" or "fail"
|
||||
if idx := strings.Index(line, "fail"); idx != -1 {
|
||||
num := extractNumberBefore(line, idx)
|
||||
if num > 0 {
|
||||
failed = num
|
||||
}
|
||||
}
|
||||
|
||||
// go test style: "ok ... 12 tests"
|
||||
if passed == 0 {
|
||||
if idx := strings.Index(line, "test"); idx != -1 {
|
||||
num := extractNumberBefore(line, idx)
|
||||
if num > 0 {
|
||||
passed = num
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found both, stop
|
||||
if passed > 0 && failed > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return passed, failed
|
||||
}
|
||||
|
||||
// extractTestResults extracts test pass/fail counts from task output
|
||||
func extractTestResults(message string) (passed, failed int) {
|
||||
if message == "" {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
return extractTestResultsFromLines(strings.Split(message, "\n"))
|
||||
}
|
||||
|
||||
// extractNumberBefore extracts a number that appears before the given index
|
||||
func extractNumberBefore(s string, idx int) int {
|
||||
if idx <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Walk backwards to find digits
|
||||
end := idx - 1
|
||||
for end >= 0 && (s[end] == ' ' || s[end] == ':' || s[end] == ',') {
|
||||
end--
|
||||
}
|
||||
if end < 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
start := end
|
||||
for start >= 0 && s[start] >= '0' && s[start] <= '9' {
|
||||
start--
|
||||
}
|
||||
start++
|
||||
|
||||
if start > end {
|
||||
return 0
|
||||
}
|
||||
|
||||
numStr := s[start : end+1]
|
||||
if num, err := strconv.Atoi(numStr); err == nil {
|
||||
return num
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// extractKeyOutputFromLines extracts key output from pre-split lines.
|
||||
func extractKeyOutputFromLines(lines []string, maxLen int) string {
|
||||
if len(lines) == 0 || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Priority 1: Look for explicit summary lines
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
lower := strings.ToLower(line)
|
||||
if strings.HasPrefix(lower, "summary:") || strings.HasPrefix(lower, "completed:") ||
|
||||
strings.HasPrefix(lower, "implemented:") || strings.HasPrefix(lower, "added:") ||
|
||||
strings.HasPrefix(lower, "created:") || strings.HasPrefix(lower, "fixed:") {
|
||||
content := line
|
||||
for _, prefix := range []string{"Summary:", "Completed:", "Implemented:", "Added:", "Created:", "Fixed:",
|
||||
"summary:", "completed:", "implemented:", "added:", "created:", "fixed:"} {
|
||||
content = strings.TrimPrefix(content, prefix)
|
||||
}
|
||||
content = strings.TrimSpace(content)
|
||||
if len(content) > 0 {
|
||||
return safeTruncate(content, maxLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Priority 2: First meaningful line (skip noise)
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "```") || strings.HasPrefix(line, "---") ||
|
||||
strings.HasPrefix(line, "#") || strings.HasPrefix(line, "//") {
|
||||
continue
|
||||
}
|
||||
// Skip very short lines (likely headers or markers)
|
||||
if len(line) < 20 {
|
||||
continue
|
||||
}
|
||||
return safeTruncate(line, maxLen)
|
||||
}
|
||||
|
||||
// Fallback: truncate entire message
|
||||
clean := strings.TrimSpace(strings.Join(lines, "\n"))
|
||||
return safeTruncate(clean, maxLen)
|
||||
}
|
||||
143
codeagent-wrapper/internal/app/utils_test.go
Normal file
143
codeagent-wrapper/internal/app/utils_test.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExtractCoverage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"bare int", "92%", "92%"},
|
||||
{"bare float", "92.5%", "92.5%"},
|
||||
{"coverage prefix", "coverage: 92%", "92%"},
|
||||
{"total prefix", "TOTAL 92%", "92%"},
|
||||
{"all files", "All files 92%", "92%"},
|
||||
{"empty", "", ""},
|
||||
{"no number", "coverage: N/A", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := extractCoverage(tt.in); got != tt.want {
|
||||
t.Fatalf("extractCoverage(%q) = %q, want %q", tt.in, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractTestResults(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
wantPassed int
|
||||
wantFailed int
|
||||
}{
|
||||
{"pytest one line", "12 passed, 2 failed", 12, 2},
|
||||
{"pytest split lines", "12 passed\n2 failed", 12, 2},
|
||||
{"jest format", "Tests: 2 failed, 12 passed, 14 total", 12, 2},
|
||||
{"go test style count", "ok\texample.com/foo\t0.12s\t12 tests", 12, 0},
|
||||
{"zero counts", "0 passed, 0 failed", 0, 0},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
passed, failed := extractTestResults(tt.in)
|
||||
if passed != tt.wantPassed || failed != tt.wantFailed {
|
||||
t.Fatalf("extractTestResults(%q) = (%d, %d), want (%d, %d)", tt.in, passed, failed, tt.wantPassed, tt.wantFailed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractFilesChanged(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want []string
|
||||
}{
|
||||
{"root file", "Modified: main.go\n", []string{"main.go"}},
|
||||
{"path file", "Created: codeagent-wrapper/utils.go\n", []string{"codeagent-wrapper/utils.go"}},
|
||||
{"at prefix", "Updated: @codeagent-wrapper/main.go\n", []string{"codeagent-wrapper/main.go"}},
|
||||
{"token scan", "Files: @main.go, @codeagent-wrapper/utils.go\n", []string{"main.go", "codeagent-wrapper/utils.go"}},
|
||||
{"space path", "Modified: dir/with space/file.go\n", []string{"dir/with space/file.go"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := extractFilesChanged(tt.in); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Fatalf("extractFilesChanged(%q) = %#v, want %#v", tt.in, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("limits to first 10", func(t *testing.T) {
|
||||
var b strings.Builder
|
||||
for i := 0; i < 12; i++ {
|
||||
fmt.Fprintf(&b, "Modified: file%d.go\n", i)
|
||||
}
|
||||
got := extractFilesChanged(b.String())
|
||||
if len(got) != 10 {
|
||||
t.Fatalf("len(files)=%d, want 10: %#v", len(got), got)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
want := fmt.Sprintf("file%d.go", i)
|
||||
if got[i] != want {
|
||||
t.Fatalf("files[%d]=%q, want %q", i, got[i], want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSafeTruncate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
maxLen int
|
||||
want string
|
||||
}{
|
||||
{"empty", "", 4, ""},
|
||||
{"zero maxLen", "hello", 0, ""},
|
||||
{"one rune", "你好", 1, "你"},
|
||||
{"two runes no truncate", "你好", 2, "你好"},
|
||||
{"three runes no truncate", "你好", 3, "你好"},
|
||||
{"two runes truncates long", "你好世界", 2, "你"},
|
||||
{"three runes truncates long", "你好世界", 3, "你"},
|
||||
{"four with ellipsis", "你好世界啊", 4, "你..."},
|
||||
{"emoji", "🙂🙂🙂🙂🙂", 4, "🙂..."},
|
||||
{"no truncate", "你好世界", 4, "你好世界"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := safeTruncate(tt.in, tt.maxLen); got != tt.want {
|
||||
t.Fatalf("safeTruncate(%q, %d) = %q, want %q", tt.in, tt.maxLen, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeOutput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"ansi", "\x1b[31mred\x1b[0m", "red"},
|
||||
{"control chars", "a\x07b\r\nc\t", "ab\nc\t"},
|
||||
{"normal", "hello\nworld\t!", "hello\nworld\t!"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := sanitizeOutput(tt.in); got != tt.want {
|
||||
t.Fatalf("sanitizeOutput(%q) = %q, want %q", tt.in, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
9
codeagent-wrapper/internal/app/wrapper_name.go
Normal file
9
codeagent-wrapper/internal/app/wrapper_name.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package wrapper
|
||||
|
||||
import ilogger "codeagent-wrapper/internal/logger"
|
||||
|
||||
const wrapperName = ilogger.WrapperName
|
||||
|
||||
func currentWrapperName() string { return ilogger.CurrentWrapperName() }
|
||||
|
||||
func primaryLogPrefix() string { return ilogger.PrimaryLogPrefix() }
|
||||
33
codeagent-wrapper/internal/backend/backend.go
Normal file
33
codeagent-wrapper/internal/backend/backend.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package backend
|
||||
|
||||
import config "codeagent-wrapper/internal/config"
|
||||
|
||||
// Backend defines the contract for invoking different AI CLI backends.
|
||||
// Each backend is responsible for supplying the executable command and
|
||||
// building the argument list based on the wrapper config.
|
||||
type Backend interface {
|
||||
Name() string
|
||||
BuildArgs(cfg *config.Config, targetArg string) []string
|
||||
Command() string
|
||||
Env(baseURL, apiKey string) map[string]string
|
||||
}
|
||||
|
||||
var (
|
||||
logWarnFn = func(string) {}
|
||||
logErrorFn = func(string) {}
|
||||
)
|
||||
|
||||
// SetLogFuncs configures optional logging hooks used by some backends.
|
||||
// Callers can safely pass nil to disable the hook.
|
||||
func SetLogFuncs(warnFn, errorFn func(string)) {
|
||||
if warnFn != nil {
|
||||
logWarnFn = warnFn
|
||||
} else {
|
||||
logWarnFn = func(string) {}
|
||||
}
|
||||
if errorFn != nil {
|
||||
logErrorFn = errorFn
|
||||
} else {
|
||||
logErrorFn = func(string) {}
|
||||
}
|
||||
}
|
||||
322
codeagent-wrapper/internal/backend/backend_test.go
Normal file
322
codeagent-wrapper/internal/backend/backend_test.go
Normal file
@@ -0,0 +1,322 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
func TestClaudeBuildArgs_ModesAndPermissions(t *testing.T) {
|
||||
backend := ClaudeBackend{}
|
||||
|
||||
t.Run("new mode omits skip-permissions when env disabled", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/repo"}
|
||||
got := backend.BuildArgs(cfg, "todo")
|
||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "todo"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("new mode includes skip-permissions by default", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new", SkipPermissions: false}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "-"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume mode includes session id", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", WorkDir: "/ignored"}
|
||||
got := backend.BuildArgs(cfg, "resume-task")
|
||||
want := []string{"-p", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume mode without session still returns base flags", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
cfg := &config.Config{Mode: "resume", WorkDir: "/ignored"}
|
||||
got := backend.BuildArgs(cfg, "follow-up")
|
||||
want := []string{"-p", "--setting-sources", "", "--output-format", "stream-json", "--verbose", "follow-up"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume mode can opt-in skip permissions", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", SkipPermissions: true}
|
||||
got := backend.BuildArgs(cfg, "resume-task")
|
||||
want := []string{"-p", "--dangerously-skip-permissions", "--setting-sources", "", "-r", "sid-123", "--output-format", "stream-json", "--verbose", "resume-task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil config returns nil", func(t *testing.T) {
|
||||
if backend.BuildArgs(nil, "ignored") != nil {
|
||||
t.Fatalf("nil config should return nil args")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBackendBuildArgs_Model(t *testing.T) {
|
||||
t.Run("claude includes --model when set", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
backend := ClaudeBackend{}
|
||||
cfg := &config.Config{Mode: "new", Model: "opus"}
|
||||
got := backend.BuildArgs(cfg, "todo")
|
||||
want := []string{"-p", "--setting-sources", "", "--model", "opus", "--output-format", "stream-json", "--verbose", "todo"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini includes -m when set", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "new", Model: "gemini-3-pro-preview"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"-o", "stream-json", "-y", "-m", "gemini-3-pro-preview", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("codex includes --model when set", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Setenv(key, "false")
|
||||
|
||||
backend := CodexBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/tmp", Model: "o3"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"e", "--model", "o3", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClaudeBuildArgs_GeminiAndCodexModes(t *testing.T) {
|
||||
t.Run("gemini new mode defaults workdir", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/workspace"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"-o", "stream-json", "-y", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini resume mode uses session id", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-999"}
|
||||
got := backend.BuildArgs(cfg, "resume")
|
||||
want := []string{"-o", "stream-json", "-y", "-r", "sid-999", "resume"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini resume mode without session omits identifier", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "resume"}
|
||||
got := backend.BuildArgs(cfg, "resume")
|
||||
want := []string{"-o", "stream-json", "-y", "resume"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini nil config returns nil", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
if backend.BuildArgs(nil, "ignored") != nil {
|
||||
t.Fatalf("nil config should return nil args")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gemini stdin mode uses -p flag", func(t *testing.T) {
|
||||
backend := GeminiBackend{}
|
||||
cfg := &config.Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"-o", "stream-json", "-y", "-p", "-"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("codex build args omits bypass flag by default", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Setenv(key, "false")
|
||||
|
||||
backend := CodexBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/tmp"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"e", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("codex build args includes bypass flag when enabled", func(t *testing.T) {
|
||||
const key = "CODEX_BYPASS_SANDBOX"
|
||||
t.Setenv(key, "true")
|
||||
|
||||
backend := CodexBackend{}
|
||||
cfg := &config.Config{Mode: "new", WorkDir: "/tmp"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"e", "--dangerously-bypass-approvals-and-sandbox", "--skip-git-repo-check", "-C", "/tmp", "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClaudeBuildArgs_BackendMetadata(t *testing.T) {
|
||||
tests := []struct {
|
||||
backend Backend
|
||||
name string
|
||||
command string
|
||||
}{
|
||||
{backend: CodexBackend{}, name: "codex", command: "codex"},
|
||||
{backend: ClaudeBackend{}, name: "claude", command: "claude"},
|
||||
{backend: GeminiBackend{}, name: "gemini", command: "gemini"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if got := tt.backend.Name(); got != tt.name {
|
||||
t.Fatalf("Name() = %s, want %s", got, tt.name)
|
||||
}
|
||||
if got := tt.backend.Command(); got != tt.command {
|
||||
t.Fatalf("Command() = %s, want %s", got, tt.command)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMinimalEnvSettings(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
|
||||
t.Run("missing file returns empty", func(t *testing.T) {
|
||||
if got := LoadMinimalEnvSettings(); len(got) != 0 {
|
||||
t.Fatalf("got %v, want empty", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("valid env returns string map", func(t *testing.T) {
|
||||
dir := filepath.Join(home, ".claude")
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
path := filepath.Join(dir, "settings.json")
|
||||
data := []byte(`{"env":{"ANTHROPIC_API_KEY":"secret","FOO":"bar"}}`)
|
||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got := LoadMinimalEnvSettings()
|
||||
if got["ANTHROPIC_API_KEY"] != "secret" || got["FOO"] != "bar" {
|
||||
t.Fatalf("got %v, want keys present", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("non-string values are ignored", func(t *testing.T) {
|
||||
dir := filepath.Join(home, ".claude")
|
||||
path := filepath.Join(dir, "settings.json")
|
||||
data := []byte(`{"env":{"GOOD":"ok","BAD":123,"ALSO_BAD":true}}`)
|
||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
got := LoadMinimalEnvSettings()
|
||||
if got["GOOD"] != "ok" {
|
||||
t.Fatalf("got %v, want GOOD=ok", got)
|
||||
}
|
||||
if _, ok := got["BAD"]; ok {
|
||||
t.Fatalf("got %v, want BAD omitted", got)
|
||||
}
|
||||
if _, ok := got["ALSO_BAD"]; ok {
|
||||
t.Fatalf("got %v, want ALSO_BAD omitted", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("oversized file returns empty", func(t *testing.T) {
|
||||
dir := filepath.Join(home, ".claude")
|
||||
path := filepath.Join(dir, "settings.json")
|
||||
data := bytes.Repeat([]byte("a"), MaxClaudeSettingsBytes+1)
|
||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
if got := LoadMinimalEnvSettings(); len(got) != 0 {
|
||||
t.Fatalf("got %v, want empty", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpencodeBackend_BuildArgs(t *testing.T) {
|
||||
backend := OpencodeBackend{}
|
||||
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "hello")
|
||||
want := []string{"run", "--format", "json", "hello"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with model", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new", Model: "opencode/grok-code"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"run", "-m", "opencode/grok-code", "--format", "json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume mode", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "ses_123", Model: "opencode/grok-code"}
|
||||
got := backend.BuildArgs(cfg, "follow-up")
|
||||
want := []string{"run", "-m", "opencode/grok-code", "-s", "ses_123", "--format", "json", "follow-up"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resume without session", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "resume"}
|
||||
got := backend.BuildArgs(cfg, "task")
|
||||
want := []string{"run", "--format", "json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("stdin mode omits dash", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"run", "--format", "json"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpencodeBackend_Interface(t *testing.T) {
|
||||
backend := OpencodeBackend{}
|
||||
|
||||
if backend.Name() != "opencode" {
|
||||
t.Errorf("Name() = %q, want %q", backend.Name(), "opencode")
|
||||
}
|
||||
if backend.Command() != "opencode" {
|
||||
t.Errorf("Command() = %q, want %q", backend.Command(), "opencode")
|
||||
}
|
||||
}
|
||||
140
codeagent-wrapper/internal/backend/claude.go
Normal file
140
codeagent-wrapper/internal/backend/claude.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type ClaudeBackend struct{}
|
||||
|
||||
func (ClaudeBackend) Name() string { return "claude" }
|
||||
func (ClaudeBackend) Command() string { return "claude" }
|
||||
func (ClaudeBackend) Env(baseURL, apiKey string) map[string]string {
|
||||
baseURL = strings.TrimSpace(baseURL)
|
||||
apiKey = strings.TrimSpace(apiKey)
|
||||
if baseURL == "" && apiKey == "" {
|
||||
return nil
|
||||
}
|
||||
env := make(map[string]string, 2)
|
||||
if baseURL != "" {
|
||||
env["ANTHROPIC_BASE_URL"] = baseURL
|
||||
}
|
||||
if apiKey != "" {
|
||||
// Claude Code CLI uses ANTHROPIC_API_KEY for API-key based auth.
|
||||
env["ANTHROPIC_API_KEY"] = apiKey
|
||||
}
|
||||
return env
|
||||
}
|
||||
func (ClaudeBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
return buildClaudeArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
const MaxClaudeSettingsBytes = 1 << 20 // 1MB
|
||||
|
||||
type MinimalClaudeSettings struct {
|
||||
Env map[string]string
|
||||
Model string
|
||||
}
|
||||
|
||||
// LoadMinimalClaudeSettings 从 ~/.claude/settings.json 只提取安全的最小子集:
|
||||
// - env: 只接受字符串类型的值
|
||||
// - model: 只接受字符串类型的值
|
||||
// 文件缺失/解析失败/超限都返回空。
|
||||
func LoadMinimalClaudeSettings() MinimalClaudeSettings {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || home == "" {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
claudeDir := filepath.Clean(filepath.Join(home, ".claude"))
|
||||
settingPath := filepath.Clean(filepath.Join(claudeDir, "settings.json"))
|
||||
rel, err := filepath.Rel(claudeDir, settingPath)
|
||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
info, err := os.Stat(settingPath)
|
||||
if err != nil || info.Size() > MaxClaudeSettingsBytes {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(settingPath) // #nosec G304 -- path is fixed under user home and validated to stay within claudeDir
|
||||
if err != nil {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
var cfg struct {
|
||||
Env map[string]any `json:"env"`
|
||||
Model any `json:"model"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return MinimalClaudeSettings{}
|
||||
}
|
||||
|
||||
out := MinimalClaudeSettings{}
|
||||
|
||||
if model, ok := cfg.Model.(string); ok {
|
||||
out.Model = strings.TrimSpace(model)
|
||||
}
|
||||
|
||||
if len(cfg.Env) == 0 {
|
||||
return out
|
||||
}
|
||||
|
||||
env := make(map[string]string, len(cfg.Env))
|
||||
for k, v := range cfg.Env {
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
env[k] = s
|
||||
}
|
||||
if len(env) == 0 {
|
||||
return out
|
||||
}
|
||||
out.Env = env
|
||||
return out
|
||||
}
|
||||
|
||||
func LoadMinimalEnvSettings() map[string]string {
|
||||
settings := LoadMinimalClaudeSettings()
|
||||
if len(settings.Env) == 0 {
|
||||
return nil
|
||||
}
|
||||
return settings.Env
|
||||
}
|
||||
|
||||
func buildClaudeArgs(cfg *config.Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
args := []string{"-p"}
|
||||
// Default to skip permissions unless CODEAGENT_SKIP_PERMISSIONS=false
|
||||
if cfg.SkipPermissions || cfg.Yolo || config.EnvFlagDefaultTrue("CODEAGENT_SKIP_PERMISSIONS") {
|
||||
args = append(args, "--dangerously-skip-permissions")
|
||||
}
|
||||
|
||||
// Prevent infinite recursion: disable all setting sources (user, project, local)
|
||||
// This ensures a clean execution environment without CLAUDE.md or skills that would trigger codeagent
|
||||
args = append(args, "--setting-sources", "")
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "--model", model)
|
||||
}
|
||||
|
||||
if cfg.Mode == "resume" {
|
||||
if cfg.SessionID != "" {
|
||||
// Claude CLI uses -r <session_id> for resume.
|
||||
args = append(args, "-r", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
|
||||
args = append(args, "--output-format", "stream-json", "--verbose", targetArg)
|
||||
|
||||
return args
|
||||
}
|
||||
79
codeagent-wrapper/internal/backend/codex.go
Normal file
79
codeagent-wrapper/internal/backend/codex.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
type CodexBackend struct{}
|
||||
|
||||
func (CodexBackend) Name() string { return "codex" }
|
||||
func (CodexBackend) Command() string { return "codex" }
|
||||
func (CodexBackend) Env(baseURL, apiKey string) map[string]string {
|
||||
baseURL = strings.TrimSpace(baseURL)
|
||||
apiKey = strings.TrimSpace(apiKey)
|
||||
if baseURL == "" && apiKey == "" {
|
||||
return nil
|
||||
}
|
||||
env := make(map[string]string, 2)
|
||||
if baseURL != "" {
|
||||
env["OPENAI_BASE_URL"] = baseURL
|
||||
}
|
||||
if apiKey != "" {
|
||||
env["OPENAI_API_KEY"] = apiKey
|
||||
}
|
||||
return env
|
||||
}
|
||||
func (CodexBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
return BuildCodexArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
func BuildCodexArgs(cfg *config.Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
panic("buildCodexArgs: nil config")
|
||||
}
|
||||
|
||||
var resumeSessionID string
|
||||
isResume := cfg.Mode == "resume"
|
||||
if isResume {
|
||||
resumeSessionID = strings.TrimSpace(cfg.SessionID)
|
||||
if resumeSessionID == "" {
|
||||
logErrorFn("invalid config: resume mode requires non-empty session_id")
|
||||
isResume = false
|
||||
}
|
||||
}
|
||||
|
||||
args := []string{"e"}
|
||||
|
||||
// Default to bypass sandbox unless CODEX_BYPASS_SANDBOX=false
|
||||
if cfg.Yolo || config.EnvFlagDefaultTrue("CODEX_BYPASS_SANDBOX") {
|
||||
logWarnFn("YOLO mode or CODEX_BYPASS_SANDBOX enabled: running without approval/sandbox protection")
|
||||
args = append(args, "--dangerously-bypass-approvals-and-sandbox")
|
||||
}
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "--model", model)
|
||||
}
|
||||
|
||||
if reasoningEffort := strings.TrimSpace(cfg.ReasoningEffort); reasoningEffort != "" {
|
||||
args = append(args, "-c", "model_reasoning_effort="+reasoningEffort)
|
||||
}
|
||||
|
||||
args = append(args, "--skip-git-repo-check")
|
||||
|
||||
if isResume {
|
||||
return append(args,
|
||||
"--json",
|
||||
"resume",
|
||||
resumeSessionID,
|
||||
targetArg,
|
||||
)
|
||||
}
|
||||
|
||||
return append(args,
|
||||
"-C", cfg.WorkDir,
|
||||
"--json",
|
||||
targetArg,
|
||||
)
|
||||
}
|
||||
54
codeagent-wrapper/internal/backend/codex_paths_test.go
Normal file
54
codeagent-wrapper/internal/backend/codex_paths_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
func TestBuildCodexArgs_Workdir_OSPaths(t *testing.T) {
|
||||
t.Setenv("CODEX_BYPASS_SANDBOX", "false")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workdir string
|
||||
}{
|
||||
{name: "windows drive forward slashes", workdir: "D:/repo/path"},
|
||||
{name: "windows drive backslashes", workdir: `C:\repo\path`},
|
||||
{name: "windows UNC", workdir: `\\server\share\repo`},
|
||||
{name: "unix absolute", workdir: "/home/user/repo"},
|
||||
{name: "relative", workdir: "./relative/repo"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new", WorkDir: tt.workdir}
|
||||
got := BuildCodexArgs(cfg, "task")
|
||||
want := []string{"e", "--skip-git-repo-check", "-C", tt.workdir, "--json", "task"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("BuildCodexArgs() = %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("new mode stdin target uses dash", func(t *testing.T) {
|
||||
cfg := &config.Config{Mode: "new", WorkDir: `C:\repo\path`}
|
||||
got := BuildCodexArgs(cfg, "-")
|
||||
want := []string{"e", "--skip-git-repo-check", "-C", `C:\repo\path`, "--json", "-"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("BuildCodexArgs() = %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildCodexArgs_ResumeMode_OmitsWorkdir(t *testing.T) {
|
||||
t.Setenv("CODEX_BYPASS_SANDBOX", "false")
|
||||
|
||||
cfg := &config.Config{Mode: "resume", SessionID: "sid-123", WorkDir: `C:\repo\path`}
|
||||
got := BuildCodexArgs(cfg, "-")
|
||||
want := []string{"e", "--skip-git-repo-check", "--json", "resume", "sid-123", "-"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("BuildCodexArgs() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
110
codeagent-wrapper/internal/backend/gemini.go
Normal file
110
codeagent-wrapper/internal/backend/gemini.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
type GeminiBackend struct{}
|
||||
|
||||
func (GeminiBackend) Name() string { return "gemini" }
|
||||
func (GeminiBackend) Command() string { return "gemini" }
|
||||
func (GeminiBackend) Env(baseURL, apiKey string) map[string]string {
|
||||
baseURL = strings.TrimSpace(baseURL)
|
||||
apiKey = strings.TrimSpace(apiKey)
|
||||
if baseURL == "" && apiKey == "" {
|
||||
return nil
|
||||
}
|
||||
env := make(map[string]string, 2)
|
||||
if baseURL != "" {
|
||||
env["GOOGLE_GEMINI_BASE_URL"] = baseURL
|
||||
}
|
||||
if apiKey != "" {
|
||||
env["GEMINI_API_KEY"] = apiKey
|
||||
}
|
||||
return env
|
||||
}
|
||||
func (GeminiBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
return buildGeminiArgs(cfg, targetArg)
|
||||
}
|
||||
|
||||
// LoadGeminiEnv loads environment variables from ~/.gemini/.env
|
||||
// Supports GEMINI_API_KEY, GEMINI_MODEL, GOOGLE_GEMINI_BASE_URL
|
||||
// Also sets GEMINI_API_KEY_AUTH_MECHANISM=bearer for third-party API compatibility
|
||||
func LoadGeminiEnv() map[string]string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || home == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
envDir := filepath.Clean(filepath.Join(home, ".gemini"))
|
||||
envPath := filepath.Clean(filepath.Join(envDir, ".env"))
|
||||
rel, err := filepath.Rel(envDir, envPath)
|
||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(envPath) // #nosec G304 -- path is fixed under user home and validated to stay within envDir
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
env := make(map[string]string)
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
idx := strings.IndexByte(line, '=')
|
||||
if idx <= 0 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(line[:idx])
|
||||
value := strings.TrimSpace(line[idx+1:])
|
||||
if key != "" && value != "" {
|
||||
env[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Set bearer auth mechanism for third-party API compatibility
|
||||
if _, ok := env["GEMINI_API_KEY"]; ok {
|
||||
if _, hasAuth := env["GEMINI_API_KEY_AUTH_MECHANISM"]; !hasAuth {
|
||||
env["GEMINI_API_KEY_AUTH_MECHANISM"] = "bearer"
|
||||
}
|
||||
}
|
||||
|
||||
if len(env) == 0 {
|
||||
return nil
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func buildGeminiArgs(cfg *config.Config, targetArg string) []string {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
args := []string{"-o", "stream-json", "-y"}
|
||||
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "-m", model)
|
||||
}
|
||||
|
||||
if cfg.Mode == "resume" {
|
||||
if cfg.SessionID != "" {
|
||||
args = append(args, "-r", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// Use positional argument instead of deprecated -p flag.
|
||||
// For stdin mode ("-"), use -p to read from stdin.
|
||||
if targetArg == "-" {
|
||||
args = append(args, "-p", targetArg)
|
||||
} else {
|
||||
args = append(args, targetArg)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
29
codeagent-wrapper/internal/backend/opencode.go
Normal file
29
codeagent-wrapper/internal/backend/opencode.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
config "codeagent-wrapper/internal/config"
|
||||
)
|
||||
|
||||
type OpencodeBackend struct{}
|
||||
|
||||
func (OpencodeBackend) Name() string { return "opencode" }
|
||||
func (OpencodeBackend) Command() string { return "opencode" }
|
||||
func (OpencodeBackend) Env(baseURL, apiKey string) map[string]string { return nil }
|
||||
func (OpencodeBackend) BuildArgs(cfg *config.Config, targetArg string) []string {
|
||||
args := []string{"run"}
|
||||
if cfg != nil {
|
||||
if model := strings.TrimSpace(cfg.Model); model != "" {
|
||||
args = append(args, "-m", model)
|
||||
}
|
||||
if cfg.Mode == "resume" && cfg.SessionID != "" {
|
||||
args = append(args, "-s", cfg.SessionID)
|
||||
}
|
||||
}
|
||||
args = append(args, "--format", "json")
|
||||
if targetArg != "-" {
|
||||
args = append(args, targetArg)
|
||||
}
|
||||
return args
|
||||
}
|
||||
29
codeagent-wrapper/internal/backend/registry.go
Normal file
29
codeagent-wrapper/internal/backend/registry.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var registry = map[string]Backend{
|
||||
"codex": CodexBackend{},
|
||||
"claude": ClaudeBackend{},
|
||||
"gemini": GeminiBackend{},
|
||||
"opencode": OpencodeBackend{},
|
||||
}
|
||||
|
||||
// Registry exposes the available backends. Intended for internal inspection/tests.
|
||||
func Registry() map[string]Backend {
|
||||
return registry
|
||||
}
|
||||
|
||||
func Select(name string) (Backend, error) {
|
||||
key := strings.ToLower(strings.TrimSpace(name))
|
||||
if key == "" {
|
||||
key = "codex"
|
||||
}
|
||||
if backend, ok := registry[key]; ok {
|
||||
return backend, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported backend %q", name)
|
||||
}
|
||||
259
codeagent-wrapper/internal/config/agent.go
Normal file
259
codeagent-wrapper/internal/config/agent.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type BackendConfig struct {
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
APIKey string `json:"api_key,omitempty"`
|
||||
}
|
||||
|
||||
type AgentModelConfig struct {
|
||||
Backend string `json:"backend"`
|
||||
Model string `json:"model"`
|
||||
PromptFile string `json:"prompt_file,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Yolo bool `json:"yolo,omitempty"`
|
||||
Reasoning string `json:"reasoning,omitempty"`
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
APIKey string `json:"api_key,omitempty"`
|
||||
}
|
||||
|
||||
type ModelsConfig struct {
|
||||
DefaultBackend string `json:"default_backend"`
|
||||
DefaultModel string `json:"default_model"`
|
||||
Agents map[string]AgentModelConfig `json:"agents"`
|
||||
Backends map[string]BackendConfig `json:"backends,omitempty"`
|
||||
}
|
||||
|
||||
var defaultModelsConfig = ModelsConfig{}
|
||||
|
||||
const modelsConfigTildePath = "~/.codeagent/models.json"
|
||||
|
||||
const modelsConfigExample = `{
|
||||
"default_backend": "codex",
|
||||
"default_model": "gpt-4.1",
|
||||
"backends": {
|
||||
"codex": { "api_key": "..." },
|
||||
"claude": { "api_key": "..." }
|
||||
},
|
||||
"agents": {
|
||||
"develop": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-4.1",
|
||||
"prompt_file": "~/.codeagent/prompts/develop.md",
|
||||
"reasoning": "high",
|
||||
"yolo": true
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
var (
|
||||
modelsConfigOnce sync.Once
|
||||
modelsConfigCached *ModelsConfig
|
||||
modelsConfigErr error
|
||||
)
|
||||
|
||||
func modelsConfig() (*ModelsConfig, error) {
|
||||
modelsConfigOnce.Do(func() {
|
||||
modelsConfigCached, modelsConfigErr = loadModelsConfig()
|
||||
})
|
||||
return modelsConfigCached, modelsConfigErr
|
||||
}
|
||||
|
||||
func modelsConfigPath() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || strings.TrimSpace(home) == "" {
|
||||
return "", fmt.Errorf("failed to resolve user home directory: %w", err)
|
||||
}
|
||||
|
||||
configDir := filepath.Clean(filepath.Join(home, ".codeagent"))
|
||||
configPath := filepath.Clean(filepath.Join(configDir, "models.json"))
|
||||
rel, err := filepath.Rel(configDir, configPath)
|
||||
if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return "", fmt.Errorf("refusing to read models config outside %s: %s", configDir, configPath)
|
||||
}
|
||||
return configPath, nil
|
||||
}
|
||||
|
||||
func modelsConfigHint(configPath string) string {
|
||||
configPath = strings.TrimSpace(configPath)
|
||||
if configPath == "" {
|
||||
return fmt.Sprintf("Create %s with e.g.:\n%s", modelsConfigTildePath, modelsConfigExample)
|
||||
}
|
||||
return fmt.Sprintf("Create %s (resolved to %s) with e.g.:\n%s", modelsConfigTildePath, configPath, modelsConfigExample)
|
||||
}
|
||||
|
||||
func loadModelsConfig() (*ModelsConfig, error) {
|
||||
configPath, err := modelsConfigPath()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w\n\n%s", err, modelsConfigHint(""))
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath) // #nosec G304 -- path is fixed under user home and validated to stay within configDir
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("models config not found: %s\n\n%s", configPath, modelsConfigHint(configPath))
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read models config %s: %w\n\n%s", configPath, err, modelsConfigHint(configPath))
|
||||
}
|
||||
|
||||
var cfg ModelsConfig
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse models config %s: %w\n\n%s", configPath, err, modelsConfigHint(configPath))
|
||||
}
|
||||
|
||||
cfg.DefaultBackend = strings.TrimSpace(cfg.DefaultBackend)
|
||||
cfg.DefaultModel = strings.TrimSpace(cfg.DefaultModel)
|
||||
|
||||
// Normalize backend keys so lookups can be case-insensitive.
|
||||
if len(cfg.Backends) > 0 {
|
||||
normalized := make(map[string]BackendConfig, len(cfg.Backends))
|
||||
for k, v := range cfg.Backends {
|
||||
key := strings.ToLower(strings.TrimSpace(k))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
normalized[key] = v
|
||||
}
|
||||
if len(normalized) > 0 {
|
||||
cfg.Backends = normalized
|
||||
} else {
|
||||
cfg.Backends = nil
|
||||
}
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func LoadDynamicAgent(name string) (AgentModelConfig, bool) {
|
||||
if err := ValidateAgentName(name); err != nil {
|
||||
return AgentModelConfig{}, false
|
||||
}
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || strings.TrimSpace(home) == "" {
|
||||
return AgentModelConfig{}, false
|
||||
}
|
||||
|
||||
absPath := filepath.Join(home, ".codeagent", "agents", name+".md")
|
||||
info, err := os.Stat(absPath)
|
||||
if err != nil || info.IsDir() {
|
||||
return AgentModelConfig{}, false
|
||||
}
|
||||
|
||||
return AgentModelConfig{PromptFile: "~/.codeagent/agents/" + name + ".md"}, true
|
||||
}
|
||||
|
||||
func ResolveBackendConfig(backendName string) (baseURL, apiKey string) {
|
||||
cfg, err := modelsConfig()
|
||||
if err != nil || cfg == nil {
|
||||
return "", ""
|
||||
}
|
||||
resolved := resolveBackendConfig(cfg, backendName)
|
||||
return strings.TrimSpace(resolved.BaseURL), strings.TrimSpace(resolved.APIKey)
|
||||
}
|
||||
|
||||
func resolveBackendConfig(cfg *ModelsConfig, backendName string) BackendConfig {
|
||||
if cfg == nil || len(cfg.Backends) == 0 {
|
||||
return BackendConfig{}
|
||||
}
|
||||
key := strings.ToLower(strings.TrimSpace(backendName))
|
||||
if key == "" {
|
||||
key = strings.ToLower(strings.TrimSpace(cfg.DefaultBackend))
|
||||
}
|
||||
if key == "" {
|
||||
return BackendConfig{}
|
||||
}
|
||||
if backend, ok := cfg.Backends[key]; ok {
|
||||
return backend
|
||||
}
|
||||
return BackendConfig{}
|
||||
}
|
||||
|
||||
func resolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool, err error) {
|
||||
if err := ValidateAgentName(agentName); err != nil {
|
||||
return "", "", "", "", "", "", false, err
|
||||
}
|
||||
|
||||
cfg, err := modelsConfig()
|
||||
if err != nil {
|
||||
return "", "", "", "", "", "", false, err
|
||||
}
|
||||
if cfg == nil {
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("models config is nil\n\n%s", modelsConfigHint(""))
|
||||
}
|
||||
|
||||
if agent, ok := cfg.Agents[agentName]; ok {
|
||||
backend = strings.TrimSpace(agent.Backend)
|
||||
if backend == "" {
|
||||
backend = strings.TrimSpace(cfg.DefaultBackend)
|
||||
if backend == "" {
|
||||
configPath, pathErr := modelsConfigPath()
|
||||
if pathErr != nil {
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("agent %q has empty backend and default_backend is not set\n\n%s", agentName, modelsConfigHint(""))
|
||||
}
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("agent %q has empty backend and default_backend is not set\n\n%s", agentName, modelsConfigHint(configPath))
|
||||
}
|
||||
}
|
||||
backendCfg := resolveBackendConfig(cfg, backend)
|
||||
|
||||
baseURL = strings.TrimSpace(agent.BaseURL)
|
||||
if baseURL == "" {
|
||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
||||
}
|
||||
apiKey = strings.TrimSpace(agent.APIKey)
|
||||
if apiKey == "" {
|
||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||
}
|
||||
|
||||
model = strings.TrimSpace(agent.Model)
|
||||
if model == "" {
|
||||
configPath, pathErr := modelsConfigPath()
|
||||
if pathErr != nil {
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("agent %q has empty model; set agents.%s.model in %s\n\n%s", agentName, agentName, modelsConfigTildePath, modelsConfigHint(""))
|
||||
}
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("agent %q has empty model; set agents.%s.model in %s\n\n%s", agentName, agentName, modelsConfigTildePath, modelsConfigHint(configPath))
|
||||
}
|
||||
return backend, model, agent.PromptFile, agent.Reasoning, baseURL, apiKey, agent.Yolo, nil
|
||||
}
|
||||
|
||||
if dynamic, ok := LoadDynamicAgent(agentName); ok {
|
||||
backend = strings.TrimSpace(cfg.DefaultBackend)
|
||||
model = strings.TrimSpace(cfg.DefaultModel)
|
||||
configPath, pathErr := modelsConfigPath()
|
||||
if backend == "" || model == "" {
|
||||
if pathErr != nil {
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("dynamic agent %q requires default_backend and default_model to be set in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(""))
|
||||
}
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("dynamic agent %q requires default_backend and default_model to be set in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(configPath))
|
||||
}
|
||||
backendCfg := resolveBackendConfig(cfg, backend)
|
||||
baseURL = strings.TrimSpace(backendCfg.BaseURL)
|
||||
apiKey = strings.TrimSpace(backendCfg.APIKey)
|
||||
return backend, model, dynamic.PromptFile, "", baseURL, apiKey, false, nil
|
||||
}
|
||||
|
||||
configPath, pathErr := modelsConfigPath()
|
||||
if pathErr != nil {
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("agent %q not found in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(""))
|
||||
}
|
||||
return "", "", "", "", "", "", false, fmt.Errorf("agent %q not found in %s\n\n%s", agentName, modelsConfigTildePath, modelsConfigHint(configPath))
|
||||
}
|
||||
|
||||
func ResolveAgentConfig(agentName string) (backend, model, promptFile, reasoning, baseURL, apiKey string, yolo bool, err error) {
|
||||
return resolveAgentConfig(agentName)
|
||||
}
|
||||
|
||||
func ResetModelsConfigCacheForTest() {
|
||||
modelsConfigCached = nil
|
||||
modelsConfigErr = nil
|
||||
modelsConfigOnce = sync.Once{}
|
||||
}
|
||||
262
codeagent-wrapper/internal/config/agent_config_test.go
Normal file
262
codeagent-wrapper/internal/config/agent_config_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResolveAgentConfig_NoConfig_ReturnsHelpfulError(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
_, _, _, _, _, _, _, err := ResolveAgentConfig("develop")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
msg := err.Error()
|
||||
if !strings.Contains(msg, modelsConfigTildePath) {
|
||||
t.Fatalf("error should mention %s, got: %s", modelsConfigTildePath, msg)
|
||||
}
|
||||
if !strings.Contains(msg, filepath.Join(home, ".codeagent", "models.json")) {
|
||||
t.Fatalf("error should mention resolved config path, got: %s", msg)
|
||||
}
|
||||
if !strings.Contains(msg, "\"agents\"") {
|
||||
t.Fatalf("error should include example config, got: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModelsConfig_NoFile(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
_, err := loadModelsConfig()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModelsConfig_WithFile(t *testing.T) {
|
||||
// Create temp dir and config file
|
||||
tmpDir := t.TempDir()
|
||||
configDir := filepath.Join(tmpDir, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
configContent := `{
|
||||
"default_backend": "claude",
|
||||
"default_model": "claude-opus-4",
|
||||
"backends": {
|
||||
"Claude": {
|
||||
"base_url": "https://backend.example",
|
||||
"api_key": "backend-key"
|
||||
},
|
||||
"codex": {
|
||||
"base_url": "https://openai.example",
|
||||
"api_key": "openai-key"
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"custom-agent": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-4o",
|
||||
"description": "Custom agent",
|
||||
"base_url": "https://agent.example",
|
||||
"api_key": "agent-key"
|
||||
}
|
||||
}
|
||||
}`
|
||||
configPath := filepath.Join(configDir, "models.json")
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Setenv("HOME", tmpDir)
|
||||
t.Setenv("USERPROFILE", tmpDir)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
cfg, err := loadModelsConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("loadModelsConfig: %v", err)
|
||||
}
|
||||
|
||||
if cfg.DefaultBackend != "claude" {
|
||||
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "claude")
|
||||
}
|
||||
if cfg.DefaultModel != "claude-opus-4" {
|
||||
t.Errorf("DefaultModel = %q, want %q", cfg.DefaultModel, "claude-opus-4")
|
||||
}
|
||||
|
||||
// Check custom agent
|
||||
if agent, ok := cfg.Agents["custom-agent"]; !ok {
|
||||
t.Error("custom-agent not found")
|
||||
} else {
|
||||
if agent.Backend != "codex" {
|
||||
t.Errorf("custom-agent.Backend = %q, want %q", agent.Backend, "codex")
|
||||
}
|
||||
if agent.Model != "gpt-4o" {
|
||||
t.Errorf("custom-agent.Model = %q, want %q", agent.Model, "gpt-4o")
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := cfg.Agents["oracle"]; ok {
|
||||
t.Error("oracle should not be present without explicit config")
|
||||
}
|
||||
|
||||
baseURL, apiKey := ResolveBackendConfig("claude")
|
||||
if baseURL != "https://backend.example" {
|
||||
t.Errorf("ResolveBackendConfig(baseURL) = %q, want %q", baseURL, "https://backend.example")
|
||||
}
|
||||
if apiKey != "backend-key" {
|
||||
t.Errorf("ResolveBackendConfig(apiKey) = %q, want %q", apiKey, "backend-key")
|
||||
}
|
||||
|
||||
backend, model, _, _, agentBaseURL, agentAPIKey, _, err := ResolveAgentConfig("custom-agent")
|
||||
if err != nil {
|
||||
t.Fatalf("ResolveAgentConfig(custom-agent): %v", err)
|
||||
}
|
||||
if backend != "codex" {
|
||||
t.Errorf("ResolveAgentConfig(backend) = %q, want %q", backend, "codex")
|
||||
}
|
||||
if model != "gpt-4o" {
|
||||
t.Errorf("ResolveAgentConfig(model) = %q, want %q", model, "gpt-4o")
|
||||
}
|
||||
if agentBaseURL != "https://agent.example" {
|
||||
t.Errorf("ResolveAgentConfig(baseURL) = %q, want %q", agentBaseURL, "https://agent.example")
|
||||
}
|
||||
if agentAPIKey != "agent-key" {
|
||||
t.Errorf("ResolveAgentConfig(apiKey) = %q, want %q", agentAPIKey, "agent-key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveAgentConfig_DynamicAgent(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
agentDir := filepath.Join(home, ".codeagent", "agents")
|
||||
if err := os.MkdirAll(agentDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(agentDir, "sarsh.md"), []byte("prompt\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
configDir := filepath.Join(home, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||
"default_backend": "codex",
|
||||
"default_model": "gpt-test"
|
||||
}`), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
backend, model, promptFile, _, _, _, _, err := ResolveAgentConfig("sarsh")
|
||||
if err != nil {
|
||||
t.Fatalf("ResolveAgentConfig(sarsh): %v", err)
|
||||
}
|
||||
if backend != "codex" {
|
||||
t.Errorf("backend = %q, want %q", backend, "codex")
|
||||
}
|
||||
if model != "gpt-test" {
|
||||
t.Errorf("model = %q, want %q", model, "gpt-test")
|
||||
}
|
||||
if promptFile != "~/.codeagent/agents/sarsh.md" {
|
||||
t.Errorf("promptFile = %q, want %q", promptFile, "~/.codeagent/agents/sarsh.md")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModelsConfig_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
configDir := filepath.Join(tmpDir, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write invalid JSON
|
||||
configPath := filepath.Join(configDir, "models.json")
|
||||
if err := os.WriteFile(configPath, []byte("invalid json {"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Setenv("HOME", tmpDir)
|
||||
t.Setenv("USERPROFILE", tmpDir)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
_, err := loadModelsConfig()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveAgentConfig_UnknownAgent_ReturnsError(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
configDir := filepath.Join(home, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||
"default_backend": "codex",
|
||||
"default_model": "gpt-test",
|
||||
"agents": {
|
||||
"develop": { "backend": "codex", "model": "gpt-test" }
|
||||
}
|
||||
}`), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, _, _, _, _, err := ResolveAgentConfig("unknown-agent")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unknown-agent") {
|
||||
t.Fatalf("error should mention agent name, got: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveAgentConfig_EmptyModel_ReturnsError(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
t.Cleanup(ResetModelsConfigCacheForTest)
|
||||
ResetModelsConfigCacheForTest()
|
||||
|
||||
configDir := filepath.Join(home, ".codeagent")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("MkdirAll: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(configDir, "models.json"), []byte(`{
|
||||
"agents": {
|
||||
"bad-agent": { "backend": "codex", "model": " " }
|
||||
}
|
||||
}`), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, _, _, _, _, err := ResolveAgentConfig("bad-agent")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "empty model") {
|
||||
t.Fatalf("error should mention empty model, got: %s", err.Error())
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user