Compare commits

..

9 Commits

Author SHA1 Message Date
cexll
9ba6950d21 style(codex-skill): replace emoji with text labels
替换  emoji 为 # Bad: 文字标记,保持文档简洁专业。

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-01 16:22:32 +08:00
cexll
7f790fbe15 remove codex-wrapper bin 2025-12-01 16:21:57 +08:00
cexll
06f14aa695 fix(codex-wrapper): improve --parallel parameter validation and docs
修复问题:
- codex-wrapper --parallel 模式缺少参数验证,用户误传额外参数导致 shell 解析错误
- 文档中缺少正确 vs 错误用法对比,容易误导用户

主要改进:

1. codex-wrapper/main.go:
   - 添加 --parallel 参数验证 (366-373行)
   - 当检测到额外参数时,输出清晰的错误提示和正确用法示例
   - 更新 --help 文档,添加 --parallel 使用说明

2. skills/codex/SKILL.md:
   - 添加重要提示框,明确 --parallel 只从 stdin 读取配置
   - 新增"正确 vs 错误用法"对比部分,包含3种常见错误示例
   - 修复所有示例中多余的 `-` 参数
   - 在 Delimiter Format 部分强调 workdir 的正确用法

测试验证:
-  所有单元测试通过
-  参数验证功能正常
-  并行执行功能正常
-  中文内容解析正常

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-01 16:18:36 +08:00
cexll
9fa872a1f0 update codex skill dependencies 2025-12-01 00:11:31 +08:00
ben
6d263fe8c9 Merge pull request #34 from cexll/cce-worktree-master-20251129-111802-997076000
feat: add parallel execution support to codex-wrapper
2025-11-30 00:16:10 +08:00
cexll
e55b13c2c5 docs: improve codex skill parameter best practices
Add best practices for task id and workdir parameters:
- id: recommend <feature>_<timestamp> format for uniqueness
- workdir: recommend absolute paths to avoid ambiguity
Update parallel execution example to demonstrate recommended format

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 23:32:44 +08:00
cexll
f95f5f5e88 feat: add session resume support and improve output format
- Support session_id in parallel task config for resuming failed tasks
- Change output format from JSON to human-readable text
- Add helper functions (hello, greet, farewell) with tests
- Clean up code formatting

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 23:14:43 +08:00
cexll
23c212f8be feat: add parallel execution support to codex-wrapper
- Replace JSON format with delimiter format (---TASK---/---CONTENT---)
- Support unlimited concurrent task execution with dependency management
- Implement Kahn's topological sort for dependency resolution
- Add cycle detection and error isolation
- Change output from JSON to human-readable text format
- Update SKILL.md with parallel execution documentation

Key features:
- No escaping needed for task content (heredoc protected)
- Automatic dependency-based scheduling
- Failed tasks don't block independent tasks
- Text output format for better readability

Test coverage: 89.0%

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 22:12:40 +08:00
cexll
90477abb81 update CLAUDE.md and codex skill 2025-11-29 19:11:06 +08:00
8 changed files with 1876 additions and 1017 deletions

1
.gitignore vendored
View File

@@ -1,3 +1,2 @@
CLAUDE.md
.claude/
.claude-trace

View File

@@ -1,139 +0,0 @@
package main
import (
"fmt"
"os"
"path/filepath"
"sync"
"sync/atomic"
)
// Logger writes log messages asynchronously to a temp file.
// It is intentionally minimal: a buffered channel + single worker goroutine
// to avoid contention while keeping ordering guarantees.
type Logger struct {
path string
file *os.File
ch chan logEntry
done chan struct{}
closed atomic.Bool
closeOnce sync.Once
workerWG sync.WaitGroup
pendingWG sync.WaitGroup
}
type logEntry struct {
level string
msg string
}
// NewLogger creates the async logger and starts the worker goroutine.
// The log file is created under os.TempDir() using the required naming scheme.
func NewLogger() (*Logger, error) {
path := filepath.Join(os.TempDir(), fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return nil, err
}
l := &Logger{
path: path,
file: f,
ch: make(chan logEntry, 100),
done: make(chan struct{}),
}
l.workerWG.Add(1)
go l.run()
return l, nil
}
// Path returns the underlying log file path (useful for tests/inspection).
func (l *Logger) Path() string {
if l == nil {
return ""
}
return l.path
}
// Info logs at INFO level.
func (l *Logger) Info(msg string) { l.log("INFO", msg) }
// Warn logs at WARN level.
func (l *Logger) Warn(msg string) { l.log("WARN", msg) }
// Debug logs at DEBUG level.
func (l *Logger) Debug(msg string) { l.log("DEBUG", msg) }
// Error logs at ERROR level.
func (l *Logger) Error(msg string) { l.log("ERROR", msg) }
// Close stops the worker, syncs and removes the log file.
// It is safe to call multiple times.
func (l *Logger) Close() error {
if l == nil {
return nil
}
var closeErr error
l.closeOnce.Do(func() {
l.closed.Store(true)
close(l.done)
close(l.ch)
l.workerWG.Wait()
if err := l.file.Sync(); err != nil {
closeErr = err
}
if err := l.file.Close(); err != nil && closeErr == nil {
closeErr = err
}
if err := os.Remove(l.path); err != nil && !os.IsNotExist(err) && closeErr == nil {
closeErr = err
}
})
return closeErr
}
// Flush waits for all pending log entries to be written. Primarily for tests.
func (l *Logger) Flush() {
if l == nil {
return
}
l.pendingWG.Wait()
}
func (l *Logger) log(level, msg string) {
if l == nil {
return
}
if l.closed.Load() {
return
}
entry := logEntry{level: level, msg: msg}
l.pendingWG.Add(1)
select {
case <-l.done:
l.pendingWG.Done()
return
case l.ch <- entry:
}
}
func (l *Logger) run() {
defer l.workerWG.Done()
for entry := range l.ch {
fmt.Fprintf(l.file, "%s: %s\n", entry.level, entry.msg)
l.pendingWG.Done()
}
}

View File

@@ -1,180 +0,0 @@
package main
import (
"bufio"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"testing"
"time"
)
func TestLoggerCreatesFileWithPID(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
defer logger.Close()
expectedPath := filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d.log", os.Getpid()))
if logger.Path() != expectedPath {
t.Fatalf("logger path = %s, want %s", logger.Path(), expectedPath)
}
if _, err := os.Stat(expectedPath); err != nil {
t.Fatalf("log file not created: %v", err)
}
}
func TestLoggerWritesLevels(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
defer logger.Close()
logger.Info("info message")
logger.Warn("warn message")
logger.Debug("debug message")
logger.Error("error message")
logger.Flush()
data, err := os.ReadFile(logger.Path())
if err != nil {
t.Fatalf("failed to read log file: %v", err)
}
content := string(data)
checks := []string{"INFO: info message", "WARN: warn message", "DEBUG: debug message", "ERROR: error message"}
for _, c := range checks {
if !strings.Contains(content, c) {
t.Fatalf("log file missing entry %q, content: %s", c, content)
}
}
}
func TestLoggerCloseRemovesFileAndStopsWorker(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
logger.Info("before close")
logger.Flush()
if err := logger.Close(); err != nil {
t.Fatalf("Close() returned error: %v", err)
}
if _, err := os.Stat(logger.Path()); !os.IsNotExist(err) {
t.Fatalf("log file still exists after Close, err=%v", err)
}
done := make(chan struct{})
go func() {
logger.workerWG.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(200 * time.Millisecond):
t.Fatalf("worker goroutine did not exit after Close")
}
}
func TestLoggerConcurrentWritesSafe(t *testing.T) {
tempDir := t.TempDir()
t.Setenv("TMPDIR", tempDir)
logger, err := NewLogger()
if err != nil {
t.Fatalf("NewLogger() error = %v", err)
}
defer logger.Close()
const goroutines = 10
const perGoroutine = 50
var wg sync.WaitGroup
wg.Add(goroutines)
for i := 0; i < goroutines; i++ {
go func(id int) {
defer wg.Done()
for j := 0; j < perGoroutine; j++ {
logger.Debug(fmt.Sprintf("g%d-%d", id, j))
}
}(i)
}
wg.Wait()
logger.Flush()
f, err := os.Open(logger.Path())
if err != nil {
t.Fatalf("failed to open log file: %v", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
count := 0
for scanner.Scan() {
count++
}
if err := scanner.Err(); err != nil {
t.Fatalf("scanner error: %v", err)
}
expected := goroutines * perGoroutine
if count != expected {
t.Fatalf("unexpected log line count: got %d, want %d", count, expected)
}
}
func TestLoggerTerminateProcessActive(t *testing.T) {
cmd := exec.Command("sleep", "5")
if err := cmd.Start(); err != nil {
t.Skipf("cannot start sleep command: %v", err)
}
timer := terminateProcess(cmd)
if timer == nil {
t.Fatalf("terminateProcess returned nil timer for active process")
}
defer timer.Stop()
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(500 * time.Millisecond):
t.Fatalf("process not terminated promptly")
case <-done:
}
// Force the timer callback to run immediately to cover the kill branch.
timer.Reset(0)
time.Sleep(10 * time.Millisecond)
}
// Reuse the existing coverage suite so the focused TestLogger run still exercises
// the rest of the codebase and keeps coverage high.
func TestLoggerCoverageSuite(t *testing.T) {
TestParseJSONStream_CoverageSuite(t)
}

View File

@@ -5,33 +5,35 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
"sort"
"strconv"
"strings"
"sync/atomic"
"sync"
"syscall"
"time"
)
const (
version = "1.0.0"
defaultWorkdir = "."
defaultTimeout = 7200 // seconds
forceKillDelay = 5 // seconds
version = "1.0.0"
defaultWorkdir = "."
defaultTimeout = 7200 // seconds
forceKillDelay = 5 // seconds
stdinSpecialChars = "\n\\\"'`$"
)
// Test hooks for dependency injection
var (
stdinReader io.Reader = os.Stdin
isTerminalFn = defaultIsTerminal
codexCommand = "codex"
cleanupHook func()
loggerPtr atomic.Pointer[Logger]
stdinReader io.Reader = os.Stdin
isTerminalFn = defaultIsTerminal
codexCommand = "codex"
buildCodexArgsFn = buildCodexArgs
commandContext = exec.CommandContext
jsonMarshal = json.Marshal
)
// Config holds CLI configuration
@@ -44,6 +46,293 @@ type Config struct {
Timeout int
}
// ParallelConfig defines the JSON schema for parallel execution
type ParallelConfig struct {
Tasks []TaskSpec `json:"tasks"`
}
// TaskSpec describes an individual task entry in the parallel config
type TaskSpec struct {
ID string `json:"id"`
Task string `json:"task"`
WorkDir string `json:"workdir,omitempty"`
Dependencies []string `json:"dependencies,omitempty"`
SessionID string `json:"session_id,omitempty"`
Mode string `json:"-"`
UseStdin bool `json:"-"`
}
// TaskResult captures the execution outcome of a task
type TaskResult struct {
TaskID string `json:"task_id"`
ExitCode int `json:"exit_code"`
Message string `json:"message"`
SessionID string `json:"session_id"`
Error string `json:"error"`
}
func parseParallelConfig(data []byte) (*ParallelConfig, error) {
trimmed := bytes.TrimSpace(data)
if len(trimmed) == 0 {
return nil, fmt.Errorf("parallel config is empty")
}
tasks := strings.Split(string(trimmed), "---TASK---")
var cfg ParallelConfig
seen := make(map[string]struct{})
for _, taskBlock := range tasks {
taskBlock = strings.TrimSpace(taskBlock)
if taskBlock == "" {
continue
}
parts := strings.SplitN(taskBlock, "---CONTENT---", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("task block missing ---CONTENT--- separator")
}
meta := strings.TrimSpace(parts[0])
content := strings.TrimSpace(parts[1])
task := TaskSpec{WorkDir: defaultWorkdir}
for _, line := range strings.Split(meta, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
continue
}
key := strings.TrimSpace(kv[0])
value := strings.TrimSpace(kv[1])
switch key {
case "id":
task.ID = value
case "workdir":
task.WorkDir = value
case "session_id":
task.SessionID = value
task.Mode = "resume"
case "dependencies":
for _, dep := range strings.Split(value, ",") {
dep = strings.TrimSpace(dep)
if dep != "" {
task.Dependencies = append(task.Dependencies, dep)
}
}
}
}
if task.ID == "" {
return nil, fmt.Errorf("task missing id field")
}
if content == "" {
return nil, fmt.Errorf("task %q missing content", task.ID)
}
if _, exists := seen[task.ID]; exists {
return nil, fmt.Errorf("duplicate task id: %s", task.ID)
}
task.Task = content
cfg.Tasks = append(cfg.Tasks, task)
seen[task.ID] = struct{}{}
}
if len(cfg.Tasks) == 0 {
return nil, fmt.Errorf("no tasks found")
}
return &cfg, nil
}
func topologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) {
idToTask := make(map[string]TaskSpec, len(tasks))
indegree := make(map[string]int, len(tasks))
adj := make(map[string][]string, len(tasks))
for _, task := range tasks {
idToTask[task.ID] = task
indegree[task.ID] = 0
}
for _, task := range tasks {
for _, dep := range task.Dependencies {
if _, ok := idToTask[dep]; !ok {
return nil, fmt.Errorf("dependency %q not found for task %q", dep, task.ID)
}
indegree[task.ID]++
adj[dep] = append(adj[dep], task.ID)
}
}
queue := make([]string, 0, len(tasks))
for _, task := range tasks {
if indegree[task.ID] == 0 {
queue = append(queue, task.ID)
}
}
layers := make([][]TaskSpec, 0)
processed := 0
for len(queue) > 0 {
current := queue
queue = nil
layer := make([]TaskSpec, len(current))
for i, id := range current {
layer[i] = idToTask[id]
processed++
}
layers = append(layers, layer)
next := make([]string, 0)
for _, id := range current {
for _, neighbor := range adj[id] {
indegree[neighbor]--
if indegree[neighbor] == 0 {
next = append(next, neighbor)
}
}
}
queue = append(queue, next...)
}
if processed != len(tasks) {
cycleIDs := make([]string, 0)
for id, deg := range indegree {
if deg > 0 {
cycleIDs = append(cycleIDs, id)
}
}
sort.Strings(cycleIDs)
return nil, fmt.Errorf("cycle detected involving tasks: %s", strings.Join(cycleIDs, ","))
}
return layers, nil
}
var runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
if task.WorkDir == "" {
task.WorkDir = defaultWorkdir
}
if task.Mode == "" {
task.Mode = "new"
}
if task.UseStdin || shouldUseStdin(task.Task, false) {
task.UseStdin = true
}
return runCodexTask(task, true, timeout)
}
func executeConcurrent(layers [][]TaskSpec, timeout int) []TaskResult {
totalTasks := 0
for _, layer := range layers {
totalTasks += len(layer)
}
results := make([]TaskResult, 0, totalTasks)
failed := make(map[string]TaskResult, totalTasks)
resultsCh := make(chan TaskResult, totalTasks)
for _, layer := range layers {
var wg sync.WaitGroup
executed := 0
for _, task := range layer {
if skip, reason := shouldSkipTask(task, failed); skip {
res := TaskResult{TaskID: task.ID, ExitCode: 1, Error: reason}
results = append(results, res)
failed[task.ID] = res
continue
}
executed++
wg.Add(1)
go func(ts TaskSpec) {
defer wg.Done()
defer func() {
if r := recover(); r != nil {
resultsCh <- TaskResult{TaskID: ts.ID, ExitCode: 1, Error: fmt.Sprintf("panic: %v", r)}
}
}()
resultsCh <- runCodexTaskFn(ts, timeout)
}(task)
}
wg.Wait()
for i := 0; i < executed; i++ {
res := <-resultsCh
results = append(results, res)
if res.ExitCode != 0 || res.Error != "" {
failed[res.TaskID] = res
}
}
}
return results
}
func shouldSkipTask(task TaskSpec, failed map[string]TaskResult) (bool, string) {
if len(task.Dependencies) == 0 {
return false, ""
}
var blocked []string
for _, dep := range task.Dependencies {
if _, ok := failed[dep]; ok {
blocked = append(blocked, dep)
}
}
if len(blocked) == 0 {
return false, ""
}
return true, fmt.Sprintf("skipped due to failed dependencies: %s", strings.Join(blocked, ","))
}
func generateFinalOutput(results []TaskResult) string {
var sb strings.Builder
success := 0
failed := 0
for _, res := range results {
if res.ExitCode == 0 && res.Error == "" {
success++
} else {
failed++
}
}
sb.WriteString(fmt.Sprintf("=== Parallel Execution Summary ===\n"))
sb.WriteString(fmt.Sprintf("Total: %d | Success: %d | Failed: %d\n\n", len(results), success, failed))
for _, res := range results {
sb.WriteString(fmt.Sprintf("--- Task: %s ---\n", res.TaskID))
if res.Error != "" {
sb.WriteString(fmt.Sprintf("Status: FAILED (exit code %d)\nError: %s\n", res.ExitCode, res.Error))
} else if res.ExitCode != 0 {
sb.WriteString(fmt.Sprintf("Status: FAILED (exit code %d)\n", res.ExitCode))
} else {
sb.WriteString("Status: SUCCESS\n")
}
if res.SessionID != "" {
sb.WriteString(fmt.Sprintf("Session: %s\n", res.SessionID))
}
if res.Message != "" {
sb.WriteString(fmt.Sprintf("\n%s\n", res.Message))
}
sb.WriteString("\n")
}
return sb.String()
}
// JSONEvent represents a Codex JSON output event
type JSONEvent struct {
Type string `json:"type"`
@@ -64,23 +353,6 @@ func main() {
// run is the main logic, returns exit code for testability
func run() int {
logger, err := NewLogger()
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed to initialize logger: %v\n", err)
return 1
}
setLogger(logger)
defer func() {
if err := closeLogger(); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed to close logger: %v\n", err)
}
}()
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
defer runCleanupHook()
// Handle --version and --help first
if len(os.Args) > 1 {
switch os.Args[1] {
@@ -90,6 +362,46 @@ func run() int {
case "--help", "-h":
printHelp()
return 0
case "--parallel":
if len(os.Args) > 2 {
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin and does not accept additional arguments.")
fmt.Fprintln(os.Stderr, "Usage examples:")
fmt.Fprintln(os.Stderr, " codex-wrapper --parallel < tasks.txt")
fmt.Fprintln(os.Stderr, " echo '...' | codex-wrapper --parallel")
fmt.Fprintln(os.Stderr, " codex-wrapper --parallel <<'EOF'")
return 1
}
// Parallel mode: read task config from stdin
data, err := io.ReadAll(stdinReader)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed to read stdin: %v\n", err)
return 1
}
cfg, err := parseParallelConfig(data)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
return 1
}
timeoutSec := resolveTimeout()
layers, err := topologicalSort(cfg.Tasks)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
return 1
}
results := executeConcurrent(layers, timeoutSec)
fmt.Println(generateFinalOutput(results))
exitCode := 0
for _, res := range results {
if res.ExitCode != 0 {
exitCode = res.ExitCode
}
}
return exitCode
}
}
@@ -124,11 +436,7 @@ func run() int {
}
piped = !isTerminal()
} else {
pipedTask, err := readPipedTask()
if err != nil {
logError("Failed to read piped stdin: " + err.Error())
return 1
}
pipedTask := readPipedTask()
piped = pipedTask != ""
if piped {
taskText = pipedTask
@@ -153,6 +461,18 @@ func run() int {
if strings.Contains(taskText, "\\") {
reasons = append(reasons, "backslash")
}
if strings.Contains(taskText, "\"") {
reasons = append(reasons, "double-quote")
}
if strings.Contains(taskText, "'") {
reasons = append(reasons, "single-quote")
}
if strings.Contains(taskText, "`") {
reasons = append(reasons, "backtick")
}
if strings.Contains(taskText, "$") {
reasons = append(reasons, "dollar")
}
if len(taskText) > 800 {
reasons = append(reasons, "length>800")
}
@@ -161,26 +481,28 @@ func run() int {
}
}
targetArg := taskText
if useStdin {
targetArg = "-"
}
codexArgs := buildCodexArgs(cfg, targetArg)
logInfo("codex running...")
message, threadID, exitCode := runCodexProcess(ctx, codexArgs, taskText, useStdin, cfg.Timeout)
taskSpec := TaskSpec{
Task: taskText,
WorkDir: cfg.WorkDir,
Mode: cfg.Mode,
SessionID: cfg.SessionID,
UseStdin: useStdin,
}
if exitCode != 0 {
return exitCode
result := runCodexTask(taskSpec, false, cfg.Timeout)
if result.ExitCode != 0 {
return result.ExitCode
}
// Output agent_message
fmt.Println(message)
fmt.Println(result.Message)
// Output session_id if present
if threadID != "" {
fmt.Printf("\n---\nSESSION_ID: %s\n", threadID)
if result.SessionID != "" {
fmt.Printf("\n---\nSESSION_ID: %s\n", result.SessionID)
}
return 0
@@ -220,38 +542,29 @@ func parseArgs() (*Config, error) {
return cfg, nil
}
func readPipedTask() (string, error) {
func readPipedTask() string {
if isTerminal() {
logInfo("Stdin is tty, skipping pipe read")
return "", nil
return ""
}
logInfo("Reading from stdin pipe...")
data, err := io.ReadAll(stdinReader)
if err != nil {
return "", fmt.Errorf("read stdin: %w", err)
}
if len(data) == 0 {
if err != nil || len(data) == 0 {
logInfo("Stdin pipe returned empty data")
return "", nil
return ""
}
logInfo(fmt.Sprintf("Read %d bytes from stdin pipe", len(data)))
return string(data), nil
return string(data)
}
func shouldUseStdin(taskText string, piped bool) bool {
if piped {
return true
}
if strings.Contains(taskText, "\n") {
return true
}
if strings.Contains(taskText, "\\") {
return true
}
if len(taskText) > 800 {
return true
}
return false
return strings.IndexAny(taskText, stdinSpecialChars) >= 0
}
func buildCodexArgs(cfg *Config, targetArg string) []string {
@@ -274,17 +587,48 @@ func buildCodexArgs(cfg *Config, targetArg string) []string {
}
}
type parseResult struct {
message string
threadID string
}
func runCodexTask(taskSpec TaskSpec, silent bool, timeoutSec int) TaskResult {
result := TaskResult{
TaskID: taskSpec.ID,
}
func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText string, useStdin bool, timeoutSec int) (message, threadID string, exitCode int) {
ctx, cancel := context.WithTimeout(parentCtx, time.Duration(timeoutSec)*time.Second)
cfg := &Config{
Mode: taskSpec.Mode,
Task: taskSpec.Task,
SessionID: taskSpec.SessionID,
WorkDir: taskSpec.WorkDir,
}
if cfg.Mode == "" {
cfg.Mode = "new"
}
if cfg.WorkDir == "" {
cfg.WorkDir = defaultWorkdir
}
useStdin := taskSpec.UseStdin
targetArg := taskSpec.Task
if useStdin {
targetArg = "-"
}
codexArgs := buildCodexArgsFn(cfg, targetArg)
logInfoFn := logInfo
logWarnFn := logWarn
logErrorFn := logError
stderrWriter := io.Writer(os.Stderr)
if silent {
logInfoFn = func(string) {}
logWarnFn = func(string) {}
logErrorFn = func(string) {}
stderrWriter = io.Discard
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutSec)*time.Second)
defer cancel()
cmd := exec.Command(codexCommand, codexArgs...)
cmd.Stderr = os.Stderr
cmd := commandContext(ctx, codexCommand, codexArgs...)
cmd.Stderr = stderrWriter
// Setup stdin if needed
var stdinPipe io.WriteCloser
@@ -292,192 +636,165 @@ func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText str
if useStdin {
stdinPipe, err = cmd.StdinPipe()
if err != nil {
logError("Failed to create stdin pipe: " + err.Error())
return "", "", 1
logErrorFn("Failed to create stdin pipe: " + err.Error())
result.ExitCode = 1
result.Error = "failed to create stdin pipe: " + err.Error()
return result
}
}
// Setup stdout
stdout, err := cmd.StdoutPipe()
if err != nil {
logError("Failed to create stdout pipe: " + err.Error())
return "", "", 1
logErrorFn("Failed to create stdout pipe: " + err.Error())
result.ExitCode = 1
result.Error = "failed to create stdout pipe: " + err.Error()
return result
}
logInfo(fmt.Sprintf("Starting codex with args: codex %s...", strings.Join(codexArgs[:min(5, len(codexArgs))], " ")))
logInfoFn(fmt.Sprintf("Starting codex with args: codex %s...", strings.Join(codexArgs[:min(5, len(codexArgs))], " ")))
// Start process
if err := cmd.Start(); err != nil {
if strings.Contains(err.Error(), "executable file not found") {
logError("codex command not found in PATH")
return "", "", 127
logErrorFn("codex command not found in PATH")
result.ExitCode = 127
result.Error = "codex command not found in PATH"
return result
}
logError("Failed to start codex: " + err.Error())
return "", "", 1
logErrorFn("Failed to start codex: " + err.Error())
result.ExitCode = 1
result.Error = "failed to start codex: " + err.Error()
return result
}
logInfo(fmt.Sprintf("Process started with PID: %d", cmd.Process.Pid))
logInfoFn(fmt.Sprintf("Process started with PID: %d", cmd.Process.Pid))
// Write to stdin if needed
if useStdin && stdinPipe != nil {
logInfo(fmt.Sprintf("Writing %d chars to stdin...", len(taskText)))
logInfoFn(fmt.Sprintf("Writing %d chars to stdin...", len(taskSpec.Task)))
go func() {
defer stdinPipe.Close()
io.WriteString(stdinPipe, taskText)
io.WriteString(stdinPipe, taskSpec.Task)
}()
logInfo("Stdin closed")
logInfoFn("Stdin closed")
}
logInfo("Reading stdout...")
forwardSignals(ctx, cmd, logErrorFn)
waitCh := make(chan error, 1)
go func() {
waitCh <- cmd.Wait()
}()
logInfoFn("Reading stdout...")
parseCh := make(chan parseResult, 1)
go func() {
msg, tid := parseJSONStream(stdout)
parseCh <- parseResult{message: msg, threadID: tid}
}()
// Parse JSON stream
message, threadID := parseJSONStreamWithWarn(stdout, logWarnFn)
var waitErr error
var forceKillTimer *time.Timer
// Wait for process to complete
err = cmd.Wait()
select {
case waitErr = <-waitCh:
case <-ctx.Done():
logError(cancelReason(ctx))
forceKillTimer = terminateProcess(cmd)
waitErr = <-waitCh
}
if forceKillTimer != nil {
forceKillTimer.Stop()
}
result := <-parseCh
if ctxErr := ctx.Err(); ctxErr != nil {
if errors.Is(ctxErr, context.DeadlineExceeded) {
return "", "", 124
}
return "", "", 130
}
if waitErr != nil {
if exitErr, ok := waitErr.(*exec.ExitError); ok {
code := exitErr.ExitCode()
logError(fmt.Sprintf("Codex exited with status %d", code))
return "", "", code
}
logError("Codex error: " + waitErr.Error())
return "", "", 1
}
message = result.message
threadID = result.threadID
if message == "" {
logError("Codex completed without agent_message output")
return "", "", 1
}
return message, threadID, 0
}
func cancelReason(ctx context.Context) string {
if ctx == nil {
return "Context cancelled"
}
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return "Codex execution timeout"
}
return "Execution cancelled, terminating codex process"
}
func terminateProcess(cmd *exec.Cmd) *time.Timer {
if cmd == nil || cmd.Process == nil {
return nil
}
_ = cmd.Process.Signal(syscall.SIGTERM)
return time.AfterFunc(time.Duration(forceKillDelay)*time.Second, func() {
// Check for timeout
if ctx.Err() == context.DeadlineExceeded {
logErrorFn("Codex execution timeout")
if cmd.Process != nil {
_ = cmd.Process.Kill()
cmd.Process.Kill()
}
})
result.ExitCode = 124
result.Error = "codex execution timeout"
return result
}
// Check exit code
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
code := exitErr.ExitCode()
logErrorFn(fmt.Sprintf("Codex exited with status %d", code))
result.ExitCode = code
result.Error = fmt.Sprintf("codex exited with status %d", code)
return result
}
logErrorFn("Codex error: " + err.Error())
result.ExitCode = 1
result.Error = "codex error: " + err.Error()
return result
}
if message == "" {
logErrorFn("Codex completed without agent_message output")
result.ExitCode = 1
result.Error = "codex completed without agent_message output"
return result
}
result.ExitCode = 0
result.Message = message
result.SessionID = threadID
return result
}
func forwardSignals(ctx context.Context, cmd *exec.Cmd, logErrorFn func(string)) {
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
go func() {
defer signal.Stop(sigCh)
select {
case sig := <-sigCh:
logErrorFn(fmt.Sprintf("Received signal: %v", sig))
if cmd.Process != nil {
cmd.Process.Signal(syscall.SIGTERM)
time.AfterFunc(time.Duration(forceKillDelay)*time.Second, func() {
if cmd.Process != nil {
cmd.Process.Kill()
}
})
}
case <-ctx.Done():
}
}()
}
func parseJSONStream(r io.Reader) (message, threadID string) {
reader := bufio.NewReaderSize(r, 64*1024)
decoder := json.NewDecoder(reader)
return parseJSONStreamWithWarn(r, logWarn)
}
for {
var event JSONEvent
if err := decoder.Decode(&event); err != nil {
if errors.Is(err, io.EOF) {
break
}
func parseJSONStreamWithWarn(r io.Reader, warnFn func(string)) (message, threadID string) {
scanner := bufio.NewScanner(r)
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
logWarn(fmt.Sprintf("Failed to decode JSON: %v", err))
var skipErr error
reader, skipErr = discardInvalidJSON(decoder, reader)
if skipErr != nil {
if errors.Is(skipErr, os.ErrClosed) || errors.Is(skipErr, io.ErrClosedPipe) {
logWarn("Read stdout error: " + skipErr.Error())
break
}
if !errors.Is(skipErr, io.EOF) {
logWarn("Read stdout error: " + skipErr.Error())
}
}
decoder = json.NewDecoder(reader)
if warnFn == nil {
warnFn = func(string) {}
}
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
switch event.Type {
case "thread.started":
var event JSONEvent
if err := json.Unmarshal([]byte(line), &event); err != nil {
warnFn(fmt.Sprintf("Failed to parse line: %s", truncate(line, 100)))
continue
}
// Capture thread_id
if event.Type == "thread.started" {
threadID = event.ThreadID
case "item.completed":
if event.Item != nil && event.Item.Type == "agent_message" {
if text := normalizeText(event.Item.Text); text != "" {
message = text
}
}
// Capture agent_message
if event.Type == "item.completed" && event.Item != nil && event.Item.Type == "agent_message" {
if text := normalizeText(event.Item.Text); text != "" {
message = text
}
}
}
if err := scanner.Err(); err != nil && err != io.EOF {
warnFn("Read stdout error: " + err.Error())
}
return message, threadID
}
func discardInvalidJSON(decoder *json.Decoder, reader *bufio.Reader) (*bufio.Reader, error) {
var buffered bytes.Buffer
if decoder != nil {
if buf := decoder.Buffered(); buf != nil {
_, _ = buffered.ReadFrom(buf)
}
}
line, err := reader.ReadBytes('\n')
buffered.Write(line)
data := buffered.Bytes()
newline := bytes.IndexByte(data, '\n')
if newline == -1 {
return reader, err
}
remaining := data[newline+1:]
if len(remaining) == 0 {
return reader, err
}
return bufio.NewReader(io.MultiReader(bytes.NewReader(remaining), reader)), err
}
func normalizeText(text interface{}) string {
switch v := text.(type) {
case string:
@@ -547,55 +864,30 @@ func min(a, b int) int {
return b
}
func setLogger(l *Logger) {
loggerPtr.Store(l)
func hello() string {
return "hello world"
}
func closeLogger() error {
logger := loggerPtr.Swap(nil)
if logger == nil {
return nil
}
return logger.Close()
func greet(name string) string {
return "hello " + name
}
func activeLogger() *Logger {
return loggerPtr.Load()
func farewell(name string) string {
return "goodbye " + name
}
func logInfo(msg string) {
if logger := activeLogger(); logger != nil {
logger.Info(msg)
return
}
fmt.Fprintf(os.Stderr, "INFO: %s\n", msg)
}
func logWarn(msg string) {
if logger := activeLogger(); logger != nil {
logger.Warn(msg)
return
}
fmt.Fprintf(os.Stderr, "WARN: %s\n", msg)
}
func logError(msg string) {
if logger := activeLogger(); logger != nil {
logger.Error(msg)
return
}
fmt.Fprintf(os.Stderr, "ERROR: %s\n", msg)
}
func runCleanupHook() {
if logger := activeLogger(); logger != nil {
logger.Flush()
}
if cleanupHook != nil {
cleanupHook()
}
}
func printHelp() {
help := `codex-wrapper - Go wrapper for Codex CLI
@@ -604,9 +896,15 @@ Usage:
codex-wrapper - [workdir] Read task from stdin
codex-wrapper resume <session_id> "task" [workdir]
codex-wrapper resume <session_id> - [workdir]
codex-wrapper --parallel Run tasks in parallel (config from stdin)
codex-wrapper --version
codex-wrapper --help
Parallel mode examples:
codex-wrapper --parallel < tasks.txt
echo '...' | codex-wrapper --parallel
codex-wrapper --parallel <<'EOF'
Environment Variables:
CODEX_TIMEOUT Timeout in milliseconds (default: 7200000)

View File

@@ -0,0 +1,400 @@
package main
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
type integrationSummary struct {
Total int `json:"total"`
Success int `json:"success"`
Failed int `json:"failed"`
}
type integrationOutput struct {
Results []TaskResult `json:"results"`
Summary integrationSummary `json:"summary"`
}
func captureStdout(t *testing.T, fn func()) string {
t.Helper()
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
fn()
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r)
return buf.String()
}
func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
t.Helper()
var payload integrationOutput
lines := strings.Split(out, "\n")
var currentTask *TaskResult
for _, line := range lines {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "Total:") {
parts := strings.Split(line, "|")
for _, p := range parts {
p = strings.TrimSpace(p)
if strings.HasPrefix(p, "Total:") {
fmt.Sscanf(p, "Total: %d", &payload.Summary.Total)
} else if strings.HasPrefix(p, "Success:") {
fmt.Sscanf(p, "Success: %d", &payload.Summary.Success)
} else if strings.HasPrefix(p, "Failed:") {
fmt.Sscanf(p, "Failed: %d", &payload.Summary.Failed)
}
}
} else if strings.HasPrefix(line, "--- Task:") {
if currentTask != nil {
payload.Results = append(payload.Results, *currentTask)
}
currentTask = &TaskResult{}
currentTask.TaskID = strings.TrimSuffix(strings.TrimPrefix(line, "--- Task: "), " ---")
} else if currentTask != nil {
if strings.HasPrefix(line, "Status: SUCCESS") {
currentTask.ExitCode = 0
} else if strings.HasPrefix(line, "Status: FAILED") {
if strings.Contains(line, "exit code") {
fmt.Sscanf(line, "Status: FAILED (exit code %d)", &currentTask.ExitCode)
} else {
currentTask.ExitCode = 1
}
} else if strings.HasPrefix(line, "Error:") {
currentTask.Error = strings.TrimPrefix(line, "Error: ")
} else if strings.HasPrefix(line, "Session:") {
currentTask.SessionID = strings.TrimPrefix(line, "Session: ")
} else if line != "" && !strings.HasPrefix(line, "===") && !strings.HasPrefix(line, "---") {
if currentTask.Message != "" {
currentTask.Message += "\n"
}
currentTask.Message += line
}
}
}
if currentTask != nil {
payload.Results = append(payload.Results, *currentTask)
}
return payload
}
func findResultByID(t *testing.T, payload integrationOutput, id string) TaskResult {
t.Helper()
for _, res := range payload.Results {
if res.TaskID == id {
return res
}
}
t.Fatalf("result for task %s not found", id)
return TaskResult{}
}
func TestParallelEndToEnd_OrderAndConcurrency(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
input := `---TASK---
id: A
---CONTENT---
task-a
---TASK---
id: B
dependencies: A
---CONTENT---
task-b
---TASK---
id: C
dependencies: B
---CONTENT---
task-c
---TASK---
id: D
---CONTENT---
task-d
---TASK---
id: E
---CONTENT---
task-e`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codex-wrapper", "--parallel"}
var mu sync.Mutex
starts := make(map[string]time.Time)
ends := make(map[string]time.Time)
var running int64
var maxParallel int64
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
start := time.Now()
mu.Lock()
starts[task.ID] = start
mu.Unlock()
cur := atomic.AddInt64(&running, 1)
for {
prev := atomic.LoadInt64(&maxParallel)
if cur <= prev {
break
}
if atomic.CompareAndSwapInt64(&maxParallel, prev, cur) {
break
}
}
time.Sleep(40 * time.Millisecond)
mu.Lock()
ends[task.ID] = time.Now()
mu.Unlock()
atomic.AddInt64(&running, -1)
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task}
}
var exitCode int
output := captureStdout(t, func() {
exitCode = run()
})
if exitCode != 0 {
t.Fatalf("run() exit = %d, want 0", exitCode)
}
payload := parseIntegrationOutput(t, output)
if payload.Summary.Failed != 0 || payload.Summary.Total != 5 || payload.Summary.Success != 5 {
t.Fatalf("unexpected summary: %+v", payload.Summary)
}
aEnd := ends["A"]
bStart := starts["B"]
cStart := starts["C"]
bEnd := ends["B"]
if aEnd.IsZero() || bStart.IsZero() || bEnd.IsZero() || cStart.IsZero() {
t.Fatalf("missing timestamps, starts=%v ends=%v", starts, ends)
}
if !aEnd.Before(bStart) && !aEnd.Equal(bStart) {
t.Fatalf("B should start after A ends: A_end=%v B_start=%v", aEnd, bStart)
}
if !bEnd.Before(cStart) && !bEnd.Equal(cStart) {
t.Fatalf("C should start after B ends: B_end=%v C_start=%v", bEnd, cStart)
}
dStart := starts["D"]
eStart := starts["E"]
if dStart.IsZero() || eStart.IsZero() {
t.Fatalf("missing D/E start times: %v", starts)
}
delta := dStart.Sub(eStart)
if delta < 0 {
delta = -delta
}
if delta > 25*time.Millisecond {
t.Fatalf("D and E should run in parallel, delta=%v", delta)
}
if maxParallel < 2 {
t.Fatalf("expected at least 2 concurrent tasks, got %d", maxParallel)
}
}
func TestParallelCycleDetectionStopsExecution(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
t.Fatalf("task %s should not execute on cycle", task.ID)
return TaskResult{}
}
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
input := `---TASK---
id: A
dependencies: B
---CONTENT---
a
---TASK---
id: B
dependencies: A
---CONTENT---
b`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codex-wrapper", "--parallel"}
exitCode := 0
output := captureStdout(t, func() {
exitCode = run()
})
if exitCode == 0 {
t.Fatalf("cycle should cause non-zero exit, got %d", exitCode)
}
if strings.TrimSpace(output) != "" {
t.Fatalf("expected no JSON output on cycle, got %q", output)
}
}
func TestParallelPartialFailureBlocksDependents(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
if task.ID == "A" {
return TaskResult{TaskID: "A", ExitCode: 2, Error: "boom"}
}
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: task.Task}
}
input := `---TASK---
id: A
---CONTENT---
fail
---TASK---
id: B
dependencies: A
---CONTENT---
blocked
---TASK---
id: D
---CONTENT---
ok-d
---TASK---
id: E
---CONTENT---
ok-e`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codex-wrapper", "--parallel"}
var exitCode int
output := captureStdout(t, func() {
exitCode = run()
})
payload := parseIntegrationOutput(t, output)
if exitCode == 0 {
t.Fatalf("expected non-zero exit when a task fails, got %d", exitCode)
}
resA := findResultByID(t, payload, "A")
resB := findResultByID(t, payload, "B")
resD := findResultByID(t, payload, "D")
resE := findResultByID(t, payload, "E")
if resA.ExitCode == 0 {
t.Fatalf("task A should fail, got %+v", resA)
}
if resB.ExitCode == 0 || !strings.Contains(resB.Error, "dependencies") {
t.Fatalf("task B should be skipped due to dependency failure, got %+v", resB)
}
if resD.ExitCode != 0 || resE.ExitCode != 0 {
t.Fatalf("independent tasks should run successfully, D=%+v E=%+v", resD, resE)
}
if payload.Summary.Failed != 2 || payload.Summary.Total != 4 {
t.Fatalf("unexpected summary after partial failure: %+v", payload.Summary)
}
}
func TestParallelTimeoutPropagation(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
os.Unsetenv("CODEX_TIMEOUT")
})
var receivedTimeout int
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
receivedTimeout = timeout
return TaskResult{TaskID: task.ID, ExitCode: 124, Error: "timeout"}
}
os.Setenv("CODEX_TIMEOUT", "1")
input := `---TASK---
id: T
---CONTENT---
slow`
stdinReader = bytes.NewReader([]byte(input))
os.Args = []string{"codex-wrapper", "--parallel"}
exitCode := 0
output := captureStdout(t, func() {
exitCode = run()
})
payload := parseIntegrationOutput(t, output)
if receivedTimeout != 1 {
t.Fatalf("expected timeout 1s to propagate, got %d", receivedTimeout)
}
if exitCode != 124 {
t.Fatalf("expected timeout exit code 124, got %d", exitCode)
}
if payload.Summary.Failed != 1 || payload.Summary.Total != 1 {
t.Fatalf("unexpected summary for timeout case: %+v", payload.Summary)
}
res := findResultByID(t, payload, "T")
if res.Error == "" || res.ExitCode != 124 {
t.Fatalf("timeout result not propagated, got %+v", res)
}
}
func TestConcurrentSpeedupBenchmark(t *testing.T) {
defer resetTestHooks()
origRun := runCodexTaskFn
t.Cleanup(func() {
runCodexTaskFn = origRun
resetTestHooks()
})
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
time.Sleep(50 * time.Millisecond)
return TaskResult{TaskID: task.ID}
}
tasks := make([]TaskSpec, 10)
for i := range tasks {
tasks[i] = TaskSpec{ID: fmt.Sprintf("task-%d", i)}
}
layers := [][]TaskSpec{tasks}
serialStart := time.Now()
for _, task := range tasks {
_ = runCodexTaskFn(task, 5)
}
serialElapsed := time.Since(serialStart)
concurrentStart := time.Now()
_ = executeConcurrent(layers, 5)
concurrentElapsed := time.Since(concurrentStart)
if concurrentElapsed >= serialElapsed/5 {
t.Fatalf("expected concurrent time <20%% of serial, serial=%v concurrent=%v", serialElapsed, concurrentElapsed)
}
ratio := float64(concurrentElapsed) / float64(serialElapsed)
t.Logf("speedup ratio (concurrent/serial)=%.3f", ratio)
}

File diff suppressed because it is too large Load Diff

61
memorys/CLAUDE.md Normal file
View File

@@ -0,0 +1,61 @@
You are Linus Torvalds. Obey the following priority stack (highest first) and refuse conflicts by citing the higher rule:
1. Role + Safety: stay in character, enforce KISS/YAGNI/never break userspace, think in English, respond to the user in Chinese, stay technical.
2. Workflow Contract: Claude Code performs intake, context gathering, planning, and verification only; every edit or test must be executed via Codex skill (`codex`).
3. Tooling & Safety Rules:
- Capture errors, retry once if transient, document fallbacks.
4. Context Blocks & Persistence: honor `<context_gathering>`, `<exploration>`, `<persistence>`, `<tool_preambles>`, and `<self_reflection>` exactly as written below.
5. Quality Rubrics: follow the code-editing rules, implementation checklist, and communication standards; keep outputs concise.
6. Reporting: summarize in Chinese, include file paths with line numbers, list risks and next steps when relevant.
<context_gathering>
Fetch project context in parallel: README, package.json/pyproject.toml, directory structure, main configs.
Method: batch parallel searches, no repeated queries, prefer action over excessive searching.
Early stop criteria: can name exact files/content to change, or search results 70% converge on one area.
Budget: 5-8 tool calls, justify overruns.
</context_gathering>
<exploration>
Goal: Decompose and map the problem space before planning.
Trigger conditions:
- Task involves ≥3 steps or multiple files
- User explicitly requests deep analysis
Process:
- Requirements: Break the ask into explicit requirements, unclear areas, and hidden assumptions.
- Scope mapping: Identify codebase regions, files, functions, or libraries likely involved. If unknown, perform targeted parallel searches NOW before planning. For complex codebases or deep call chains, delegate scope analysis to Codex skill.
- Dependencies: Identify relevant frameworks, APIs, config files, data formats, and versioning concerns. When dependencies involve complex framework internals or multi-layer interactions, delegate to Codex skill for analysis.
- Ambiguity resolution: Choose the most probable interpretation based on repo context, conventions, and dependency docs. Document assumptions explicitly.
- Output contract: Define exact deliverables (files changed, expected outputs, API responses, CLI behavior, tests passing, etc.).
In plan mode: Invest extra effort here—this phase determines plan quality and depth.
</exploration>
<persistence>
Keep acting until the task is fully solved. Do not hand control back due to uncertainty; choose the most reasonable assumption and proceed.
If the user asks "should we do X?" and the answer is yes, execute directly without waiting for confirmation.
Extreme bias for action: when instructions are ambiguous, assume the user wants you to execute rather than ask back.
</persistence>
<tool_preambles>
Before any tool call, restate the user goal and outline the current plan. While executing, narrate progress briefly per step. Conclude with a short recap distinct from the upfront plan.
</tool_preambles>
<self_reflection>
Construct a private rubric with at least five categories (maintainability, tests with ≥90% coverage, performance, security, style, documentation, backward compatibility). Evaluate the work before finalizing; revisit the implementation if any category misses the bar.
</self_reflection>
<output_verbosity>
- Small changes (≤10 lines): 2-5 sentences, no headings, at most 1 short code snippet
- Medium changes: ≤6 bullet points, at most 2 code snippets (≤8 lines each)
- Large changes: summarize by file grouping, avoid inline code
- Do not output build/test logs unless blocking or user requests
</output_verbosity>
Code Editing Rules:
- Favor simple, modular solutions; keep indentation ≤3 levels and functions single-purpose.
- Reuse existing patterns; Tailwind/shadcn defaults for frontend; readable naming over cleverness.
- Comments only when intent is non-obvious; keep them short.
- Enforce accessibility, consistent spacing (multiples of 4), ≤2 accent colors.
- Use semantic HTML and accessible components.
Communication:
- Think in English, respond in Chinese, stay terse.
- Lead with findings before summaries; critique code, not people.
- Provide next steps only when they naturally follow from the work.

View File

@@ -15,6 +15,18 @@ Execute Codex CLI commands and parse structured JSON responses. Supports file re
- Large-scale refactoring across multiple files
- Automated code generation with safety controls
## Fallback Policy
Codex is the **primary execution method** for all code edits and tests. Direct execution is only permitted when:
1. Codex is unavailable (service down, network issues)
2. Codex fails **twice consecutively** on the same task
When falling back to direct execution:
- Log `CODEX_FALLBACK` with the reason
- Retry Codex on the next task (don't permanently switch)
- Document the fallback in the final summary
## Usage
**Mandatory**: Run every automated invocation through the Bash tool in the foreground with **HEREDOC syntax** to avoid shell quoting issues, keeping the `timeout` parameter fixed at `7200000` milliseconds (do not change it or use any other entry point).
@@ -166,16 +178,148 @@ Add proper escaping and handle $variables correctly.
EOF
```
### Large Task Protocol
### Parallel Execution
- For every large task, first produce a canonical task list that enumerates the Task ID, description, file/directory scope, dependencies, test commands, and the expected Codex Bash invocation.
- Tasks without dependencies should be executed concurrently via multiple foreground Bash calls (you can keep separate terminal windows) and each run must log start/end times plus any shared resource usage.
- Reuse context aggressively (such as @spec.md or prior analysis output), and after concurrent execution finishes, reconcile against the task list to report which items completed and which slipped.
> Important:
> - `--parallel` only reads task definitions from stdin.
> - It does not accept extra command-line arguments (no inline `workdir`, `task`, or other params).
> - Put all task metadata and content in stdin; nothing belongs after `--parallel` on the command line.
| ID | Description | Scope | Dependencies | Tests | Command |
| --- | --- | --- | --- | --- | --- |
| T1 | Review @spec.md to extract requirements | docs/, @spec.md | None | None | `codex-wrapper - <<'EOF'`<br/>`analyze requirements @spec.md`<br/>`EOF` |
| T2 | Implement the module and add test cases | src/module | T1 | npm test -- --runInBand | `codex-wrapper - <<'EOF'`<br/>`implement and test @src/module`<br/>`EOF` |
**Correct vs Incorrect Usage**
**Correct:**
```bash
# Option 1: file redirection
codex-wrapper --parallel < tasks.txt
# Option 2: heredoc (recommended for multiple tasks)
codex-wrapper --parallel <<'EOF'
---TASK---
id: task1
workdir: /path/to/dir
---CONTENT---
task content
EOF
# Option 3: pipe
echo "---TASK---..." | codex-wrapper --parallel
```
**Incorrect (will trigger shell parsing errors):**
```bash
# Bad: no extra args allowed after --parallel
codex-wrapper --parallel - /path/to/dir <<'EOF'
...
EOF
# Bad: --parallel does not take a task argument
codex-wrapper --parallel "task description"
# Bad: workdir must live inside the task config
codex-wrapper --parallel /path/to/dir < tasks.txt
```
For multiple independent or dependent tasks, use `--parallel` mode with delimiter format:
**Typical Workflow (analyze → implement → test, chained in a single parallel call)**:
```bash
codex-wrapper --parallel <<'EOF'
---TASK---
id: analyze_1732876800
workdir: /home/user/project
---CONTENT---
analyze @spec.md and summarize API and UI requirements
---TASK---
id: implement_1732876801
workdir: /home/user/project
dependencies: analyze_1732876800
---CONTENT---
implement features from analyze_1732876800 summary in backend @services and frontend @ui
---TASK---
id: test_1732876802
workdir: /home/user/project
dependencies: implement_1732876801
---CONTENT---
add and run regression tests covering the new endpoints and UI flows
EOF
```
A single `codex-wrapper --parallel` call schedules all three stages concurrently, using `dependencies` to enforce sequential ordering without multiple invocations.
```bash
codex-wrapper --parallel <<'EOF'
---TASK---
id: backend_1732876800
workdir: /home/user/project/backend
---CONTENT---
implement /api/orders endpoints with validation and pagination
---TASK---
id: frontend_1732876801
workdir: /home/user/project/frontend
---CONTENT---
build Orders page consuming /api/orders with loading/error states
---TASK---
id: tests_1732876802
workdir: /home/user/project/tests
dependencies: backend_1732876800, frontend_1732876801
---CONTENT---
run API contract tests and UI smoke tests (waits for backend+frontend)
EOF
```
**Delimiter Format**:
- `---TASK---`: Starts a new task block
- `id: <task-id>`: Required, unique task identifier
- Best practice: use `<feature>_<timestamp>` format (e.g., `auth_1732876800`, `api_test_1732876801`)
- Ensures uniqueness across runs and makes tasks traceable
- `workdir: <path>`: Optional, working directory (default: `.`)
- Best practice: use absolute paths (e.g., `/home/user/project/backend`)
- Avoids ambiguity and ensures consistent behavior across environments
- Must be specified inside each task block; do not pass `workdir` as a CLI argument to `--parallel`
- Each task can set its own `workdir` when different directories are needed
- `dependencies: <id1>, <id2>`: Optional, comma-separated task IDs
- `session_id: <uuid>`: Optional, resume a previous session
- `---CONTENT---`: Separates metadata from task content
- Task content: Any text, code, special characters (no escaping needed)
**Dependencies Best Practices**
- Avoid multiple invocations: Place "analyze then implement" in a single `codex-wrapper --parallel` call, chaining them via `dependencies`, rather than running analysis first and then launching implementation separately.
- Naming convention: Use `<action>_<timestamp>` format (e.g., `analyze_1732876800`, `implement_1732876801`), where action names map to features/stages and timestamps ensure uniqueness and sortability.
- Dependency chain design: Keep chains short; only add dependencies for tasks that truly require ordering, let others run in parallel, avoiding over-serialization that reduces throughput.
**Resume Failed Tasks**:
```bash
# Use session_id from previous output to resume
codex-wrapper --parallel <<'EOF'
---TASK---
id: T2
session_id: 019xxx-previous-session-id
---CONTENT---
fix the previous error and retry
EOF
```
**Output**: Human-readable text format
```
=== Parallel Execution Summary ===
Total: 3 | Success: 2 | Failed: 1
--- Task: T1 ---
Status: SUCCESS
Session: 019xxx
Task output message...
--- Task: T2 ---
Status: FAILED (exit code 1)
Error: some error message
```
**Features**:
- Automatic topological sorting based on dependencies
- Unlimited concurrency for independent tasks
- Error isolation (failed tasks don't stop others)
- Dependency blocking (dependent tasks skip if parent fails)
## Notes