mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-10 03:14:32 +08:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8db49f198e | ||
|
|
97dfa907d9 | ||
|
|
5853539cab | ||
|
|
81fa6843d9 |
29
.github/workflows/release.yml
vendored
29
.github/workflows/release.yml
vendored
@@ -91,6 +91,33 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Generate Release Notes
|
||||
id: release_notes
|
||||
run: |
|
||||
# Get previous tag
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v "^${{ github.ref_name }}$" | head -n 1)
|
||||
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
echo "No previous tag found, using all commits"
|
||||
COMMITS=$(git log --pretty=format:"- %s (%h)" --no-merges)
|
||||
else
|
||||
echo "Generating notes from $PREVIOUS_TAG to ${{ github.ref_name }}"
|
||||
COMMITS=$(git log ${PREVIOUS_TAG}..${{ github.ref_name }} --pretty=format:"- %s (%h)" --no-merges)
|
||||
fi
|
||||
|
||||
# Create release notes
|
||||
cat > release_notes.md <<EOF
|
||||
## What's Changed
|
||||
|
||||
${COMMITS}
|
||||
|
||||
**Full Changelog**: https://github.com/${{ github.repository }}/compare/${PREVIOUS_TAG}...${{ github.ref_name }}
|
||||
EOF
|
||||
|
||||
cat release_notes.md
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -108,6 +135,6 @@ jobs:
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: release/*
|
||||
generate_release_notes: true
|
||||
body_path: release_notes.md
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
@@ -29,6 +29,7 @@ type cliOptions struct {
|
||||
ReasoningEffort string
|
||||
Agent string
|
||||
PromptFile string
|
||||
Skills string
|
||||
SkipPermissions bool
|
||||
Worktree bool
|
||||
|
||||
@@ -134,6 +135,7 @@ func addRootFlags(fs *pflag.FlagSet, opts *cliOptions) {
|
||||
fs.StringVar(&opts.ReasoningEffort, "reasoning-effort", "", "Reasoning effort (backend-specific)")
|
||||
fs.StringVar(&opts.Agent, "agent", "", "Agent preset name (from ~/.codeagent/models.json)")
|
||||
fs.StringVar(&opts.PromptFile, "prompt-file", "", "Prompt file path")
|
||||
fs.StringVar(&opts.Skills, "skills", "", "Comma-separated skill names for spec injection")
|
||||
|
||||
fs.BoolVar(&opts.SkipPermissions, "skip-permissions", false, "Skip permissions prompts (also via CODEAGENT_SKIP_PERMISSIONS)")
|
||||
fs.BoolVar(&opts.SkipPermissions, "dangerously-skip-permissions", false, "Alias for --skip-permissions")
|
||||
@@ -339,6 +341,16 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
||||
return nil, fmt.Errorf("task required")
|
||||
}
|
||||
|
||||
var skills []string
|
||||
if cmd.Flags().Changed("skills") {
|
||||
for _, s := range strings.Split(opts.Skills, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
skills = append(skills, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
WorkDir: defaultWorkdir,
|
||||
Backend: backendName,
|
||||
@@ -352,6 +364,7 @@ func buildSingleConfig(cmd *cobra.Command, args []string, rawArgv []string, opts
|
||||
MaxParallelWorkers: config.ResolveMaxParallelWorkers(),
|
||||
AllowedTools: resolvedAllowedTools,
|
||||
DisallowedTools: resolvedDisallowedTools,
|
||||
Skills: skills,
|
||||
Worktree: opts.Worktree,
|
||||
}
|
||||
|
||||
@@ -418,7 +431,7 @@ func runParallelMode(cmd *cobra.Command, args []string, opts *cliOptions, v *vip
|
||||
return 1
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") {
|
||||
if cmd.Flags().Changed("agent") || cmd.Flags().Changed("prompt-file") || cmd.Flags().Changed("reasoning-effort") || cmd.Flags().Changed("skills") {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
||||
return 1
|
||||
}
|
||||
@@ -585,6 +598,17 @@ func runSingleMode(cfg *Config, name string) int {
|
||||
taskText = wrapTaskWithAgentPrompt(prompt, taskText)
|
||||
}
|
||||
|
||||
// Resolve skills: explicit > auto-detect from workdir
|
||||
skills := cfg.Skills
|
||||
if len(skills) == 0 {
|
||||
skills = detectProjectSkills(cfg.WorkDir)
|
||||
}
|
||||
if len(skills) > 0 {
|
||||
if content := resolveSkillContent(skills, 0); content != "" {
|
||||
taskText = taskText + "\n\n# Domain Best Practices\n\n" + content
|
||||
}
|
||||
}
|
||||
|
||||
useStdin := cfg.ExplicitStdin || shouldUseStdin(taskText, piped)
|
||||
|
||||
targetArg := taskText
|
||||
|
||||
@@ -52,3 +52,11 @@ func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText str
|
||||
func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult {
|
||||
return executor.RunCodexTaskWithContext(parentCtx, taskSpec, backend, codexCommand, buildCodexArgsFn, customArgs, useCustomArgs, silent, timeoutSec)
|
||||
}
|
||||
|
||||
func detectProjectSkills(workDir string) []string {
|
||||
return executor.DetectProjectSkills(workDir)
|
||||
}
|
||||
|
||||
func resolveSkillContent(skills []string, maxBudget int) string {
|
||||
return executor.ResolveSkillContent(skills, maxBudget)
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ type Config struct {
|
||||
MaxParallelWorkers int
|
||||
AllowedTools []string
|
||||
DisallowedTools []string
|
||||
Skills []string
|
||||
Worktree bool // Execute in a new git worktree
|
||||
}
|
||||
|
||||
|
||||
@@ -337,6 +337,16 @@ func DefaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult {
|
||||
}
|
||||
task.Task = WrapTaskWithAgentPrompt(prompt, task.Task)
|
||||
}
|
||||
// Resolve skills: explicit > auto-detect from workdir
|
||||
skills := task.Skills
|
||||
if len(skills) == 0 {
|
||||
skills = DetectProjectSkills(task.WorkDir)
|
||||
}
|
||||
if len(skills) > 0 {
|
||||
if content := ResolveSkillContent(skills, 0); content != "" {
|
||||
task.Task = task.Task + "\n\n# Domain Best Practices\n\n" + content
|
||||
}
|
||||
}
|
||||
if task.UseStdin || ShouldUseStdin(task.Task, false) {
|
||||
task.UseStdin = true
|
||||
}
|
||||
@@ -941,8 +951,13 @@ func RunCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
cfg.WorkDir = defaultWorkdir
|
||||
}
|
||||
|
||||
// Handle worktree mode: create a new git worktree and update cfg.WorkDir
|
||||
if taskSpec.Worktree {
|
||||
// Handle worktree mode: check DO_WORKTREE_DIR env var first, then create if needed
|
||||
if worktreeDir := os.Getenv("DO_WORKTREE_DIR"); worktreeDir != "" {
|
||||
// Use existing worktree from /do setup
|
||||
cfg.WorkDir = worktreeDir
|
||||
logInfo(fmt.Sprintf("Using existing worktree from DO_WORKTREE_DIR: %s", worktreeDir))
|
||||
} else if taskSpec.Worktree {
|
||||
// Create new worktree (backward compatibility for standalone --worktree usage)
|
||||
paths, err := createWorktreeFn(cfg.WorkDir)
|
||||
if err != nil {
|
||||
result.ExitCode = 1
|
||||
|
||||
@@ -88,6 +88,13 @@ func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
task.Dependencies = append(task.Dependencies, dep)
|
||||
}
|
||||
}
|
||||
case "skills":
|
||||
for _, s := range strings.Split(value, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
task.Skills = append(task.Skills, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,17 +106,17 @@ func ParseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
if strings.TrimSpace(task.Agent) == "" {
|
||||
return nil, fmt.Errorf("task block #%d has empty agent field", taskIndex)
|
||||
}
|
||||
if err := config.ValidateAgentName(task.Agent); err != nil {
|
||||
return nil, fmt.Errorf("task block #%d invalid agent name: %w", taskIndex, err)
|
||||
}
|
||||
backend, model, promptFile, reasoning, _, _, _, allowedTools, disallowedTools, err := config.ResolveAgentConfig(task.Agent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("task block #%d failed to resolve agent %q: %w", taskIndex, task.Agent, err)
|
||||
}
|
||||
if task.Backend == "" {
|
||||
task.Backend = backend
|
||||
}
|
||||
if task.Model == "" {
|
||||
if err := config.ValidateAgentName(task.Agent); err != nil {
|
||||
return nil, fmt.Errorf("task block #%d invalid agent name: %w", taskIndex, err)
|
||||
}
|
||||
backend, model, promptFile, reasoning, _, _, _, allowedTools, disallowedTools, err := config.ResolveAgentConfig(task.Agent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("task block #%d failed to resolve agent %q: %w", taskIndex, task.Agent, err)
|
||||
}
|
||||
if task.Backend == "" {
|
||||
task.Backend = backend
|
||||
}
|
||||
if task.Model == "" {
|
||||
task.Model = model
|
||||
}
|
||||
if task.ReasoningEffort == "" {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -128,3 +129,116 @@ func ReadAgentPromptFile(path string, allowOutsideClaudeDir bool) (string, error
|
||||
func WrapTaskWithAgentPrompt(prompt string, task string) string {
|
||||
return "<agent-prompt>\n" + prompt + "\n</agent-prompt>\n\n" + task
|
||||
}
|
||||
|
||||
// techSkillMap maps file-existence fingerprints to skill names.
|
||||
var techSkillMap = []struct {
|
||||
Files []string // any of these files → this tech
|
||||
Skills []string
|
||||
}{
|
||||
{Files: []string{"go.mod", "go.sum"}, Skills: []string{"golang-base-practices"}},
|
||||
{Files: []string{"Cargo.toml"}, Skills: []string{"rust-best-practices"}},
|
||||
{Files: []string{"pyproject.toml", "setup.py", "requirements.txt", "Pipfile"}, Skills: []string{"python-best-practices"}},
|
||||
{Files: []string{"package.json"}, Skills: []string{"vercel-react-best-practices", "frontend-design"}},
|
||||
{Files: []string{"vue.config.js", "vite.config.ts", "nuxt.config.ts"}, Skills: []string{"vue-web-app"}},
|
||||
}
|
||||
|
||||
// DetectProjectSkills scans workDir for tech-stack fingerprints and returns
|
||||
// skill names that are both detected and installed at ~/.claude/skills/{name}/SKILL.md.
|
||||
func DetectProjectSkills(workDir string) []string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var detected []string
|
||||
seen := make(map[string]bool)
|
||||
for _, entry := range techSkillMap {
|
||||
for _, f := range entry.Files {
|
||||
if _, err := os.Stat(filepath.Join(workDir, f)); err == nil {
|
||||
for _, skill := range entry.Skills {
|
||||
if seen[skill] {
|
||||
continue
|
||||
}
|
||||
skillPath := filepath.Join(home, ".claude", "skills", skill, "SKILL.md")
|
||||
if _, err := os.Stat(skillPath); err == nil {
|
||||
detected = append(detected, skill)
|
||||
seen[skill] = true
|
||||
}
|
||||
}
|
||||
break // one matching file is enough for this entry
|
||||
}
|
||||
}
|
||||
}
|
||||
return detected
|
||||
}
|
||||
|
||||
const defaultSkillBudget = 16000 // chars, ~4K tokens
|
||||
|
||||
// validSkillName ensures skill names contain only safe characters to prevent path traversal
|
||||
var validSkillName = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
|
||||
|
||||
// ResolveSkillContent reads SKILL.md files for the given skill names,
|
||||
// strips YAML frontmatter, wraps each in <skill> tags, and enforces a
|
||||
// character budget to prevent context bloat.
|
||||
func ResolveSkillContent(skills []string, maxBudget int) string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if maxBudget <= 0 {
|
||||
maxBudget = defaultSkillBudget
|
||||
}
|
||||
var sections []string
|
||||
remaining := maxBudget
|
||||
for _, name := range skills {
|
||||
name = strings.TrimSpace(name)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
if !validSkillName.MatchString(name) {
|
||||
logWarn(fmt.Sprintf("skill %q: invalid name (must contain only [a-zA-Z0-9_-]), skipping", name))
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(home, ".claude", "skills", name, "SKILL.md")
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil || len(data) == 0 {
|
||||
logWarn(fmt.Sprintf("skill %q: SKILL.md not found or empty, skipping", name))
|
||||
continue
|
||||
}
|
||||
body := stripYAMLFrontmatter(strings.TrimSpace(string(data)))
|
||||
tagOverhead := len("<skill name=\"\">") + len(name) + len("\n") + len("\n</skill>")
|
||||
bodyBudget := remaining - tagOverhead
|
||||
if bodyBudget <= 0 {
|
||||
logWarn(fmt.Sprintf("skill %q: skipped, insufficient budget for tags", name))
|
||||
break
|
||||
}
|
||||
if len(body) > bodyBudget {
|
||||
logWarn(fmt.Sprintf("skill %q: truncated from %d to %d chars (budget)", name, len(body), bodyBudget))
|
||||
body = body[:bodyBudget]
|
||||
}
|
||||
remaining -= len(body) + tagOverhead
|
||||
sections = append(sections, "<skill name=\""+name+"\">\n"+body+"\n</skill>")
|
||||
if remaining <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(sections) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(sections, "\n\n")
|
||||
}
|
||||
|
||||
func stripYAMLFrontmatter(s string) string {
|
||||
s = strings.ReplaceAll(s, "\r\n", "\n")
|
||||
if !strings.HasPrefix(s, "---") {
|
||||
return s
|
||||
}
|
||||
idx := strings.Index(s[3:], "\n---")
|
||||
if idx < 0 {
|
||||
return s
|
||||
}
|
||||
result := s[3+idx+4:]
|
||||
if len(result) > 0 && result[0] == '\n' {
|
||||
result = result[1:]
|
||||
}
|
||||
return strings.TrimSpace(result)
|
||||
}
|
||||
|
||||
343
codeagent-wrapper/internal/executor/skills_test.go
Normal file
343
codeagent-wrapper/internal/executor/skills_test.go
Normal file
@@ -0,0 +1,343 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// setTestHome overrides the home directory for both Unix (HOME) and Windows (USERPROFILE).
|
||||
func setTestHome(t *testing.T, home string) {
|
||||
t.Helper()
|
||||
t.Setenv("HOME", home)
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Setenv("USERPROFILE", home)
|
||||
}
|
||||
}
|
||||
|
||||
// --- helper: create a temp skill dir with SKILL.md ---
|
||||
|
||||
func createTempSkill(t *testing.T, name, content string) string {
|
||||
t.Helper()
|
||||
home := t.TempDir()
|
||||
skillDir := filepath.Join(home, ".claude", "skills", name)
|
||||
if err := os.MkdirAll(skillDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(skillDir, "SKILL.md"), []byte(content), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
// --- ParseParallelConfig skills parsing tests ---
|
||||
|
||||
func TestParseParallelConfig_SkillsField(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
taskIdx int
|
||||
expectedSkills []string
|
||||
}{
|
||||
{
|
||||
name: "single skill",
|
||||
input: `---TASK---
|
||||
id: t1
|
||||
workdir: .
|
||||
skills: golang-base-practices
|
||||
---CONTENT---
|
||||
Do something.
|
||||
`,
|
||||
taskIdx: 0,
|
||||
expectedSkills: []string{"golang-base-practices"},
|
||||
},
|
||||
{
|
||||
name: "multiple comma-separated skills",
|
||||
input: `---TASK---
|
||||
id: t1
|
||||
workdir: .
|
||||
skills: golang-base-practices, vercel-react-best-practices
|
||||
---CONTENT---
|
||||
Do something.
|
||||
`,
|
||||
taskIdx: 0,
|
||||
expectedSkills: []string{"golang-base-practices", "vercel-react-best-practices"},
|
||||
},
|
||||
{
|
||||
name: "no skills field",
|
||||
input: `---TASK---
|
||||
id: t1
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
Do something.
|
||||
`,
|
||||
taskIdx: 0,
|
||||
expectedSkills: nil,
|
||||
},
|
||||
{
|
||||
name: "empty skills value",
|
||||
input: `---TASK---
|
||||
id: t1
|
||||
workdir: .
|
||||
skills:
|
||||
---CONTENT---
|
||||
Do something.
|
||||
`,
|
||||
taskIdx: 0,
|
||||
expectedSkills: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := ParseParallelConfig([]byte(tt.input))
|
||||
if err != nil {
|
||||
t.Fatalf("ParseParallelConfig error: %v", err)
|
||||
}
|
||||
got := cfg.Tasks[tt.taskIdx].Skills
|
||||
if len(got) != len(tt.expectedSkills) {
|
||||
t.Fatalf("skills: got %v, want %v", got, tt.expectedSkills)
|
||||
}
|
||||
for i := range got {
|
||||
if got[i] != tt.expectedSkills[i] {
|
||||
t.Errorf("skills[%d]: got %q, want %q", i, got[i], tt.expectedSkills[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- stripYAMLFrontmatter tests ---
|
||||
|
||||
func TestStripYAMLFrontmatter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "with frontmatter",
|
||||
input: "---\nname: test\ndescription: foo\n---\n\n# Body\nContent here.",
|
||||
expected: "# Body\nContent here.",
|
||||
},
|
||||
{
|
||||
name: "no frontmatter",
|
||||
input: "# Just a body\nNo frontmatter.",
|
||||
expected: "# Just a body\nNo frontmatter.",
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "only frontmatter",
|
||||
input: "---\nname: test\n---",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "frontmatter with allowed-tools",
|
||||
input: "---\nname: do\nallowed-tools: [\"Bash\"]\n---\n\n# Skill content",
|
||||
expected: "# Skill content",
|
||||
},
|
||||
{
|
||||
name: "CRLF line endings",
|
||||
input: "---\r\nname: test\r\n---\r\n\r\n# Body\r\nContent.",
|
||||
expected: "# Body\nContent.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := stripYAMLFrontmatter(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("got %q, want %q", got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- DetectProjectSkills tests ---
|
||||
|
||||
func TestDetectProjectSkills_GoProject(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module test"), 0644)
|
||||
|
||||
skills := DetectProjectSkills(tmpDir)
|
||||
// Result depends on whether golang-base-practices is installed locally
|
||||
t.Logf("detected skills for Go project: %v", skills)
|
||||
}
|
||||
|
||||
func TestDetectProjectSkills_NoFingerprints(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
skills := DetectProjectSkills(tmpDir)
|
||||
if len(skills) != 0 {
|
||||
t.Errorf("expected no skills for empty dir, got %v", skills)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectProjectSkills_FullStack(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module test"), 0644)
|
||||
os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"name":"test"}`), 0644)
|
||||
|
||||
skills := DetectProjectSkills(tmpDir)
|
||||
t.Logf("detected skills for fullstack project: %v", skills)
|
||||
seen := make(map[string]bool)
|
||||
for _, s := range skills {
|
||||
if seen[s] {
|
||||
t.Errorf("duplicate skill detected: %s", s)
|
||||
}
|
||||
seen[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectProjectSkills_NonexistentDir(t *testing.T) {
|
||||
skills := DetectProjectSkills("/nonexistent/path/xyz")
|
||||
if len(skills) != 0 {
|
||||
t.Errorf("expected no skills for nonexistent dir, got %v", skills)
|
||||
}
|
||||
}
|
||||
|
||||
// --- ResolveSkillContent tests (CI-friendly with temp dirs) ---
|
||||
|
||||
func TestResolveSkillContent_ValidSkill(t *testing.T) {
|
||||
home := createTempSkill(t, "test-skill", "---\nname: test\n---\n\n# Test Skill\nBest practices here.")
|
||||
setTestHome(t, home)
|
||||
|
||||
result := ResolveSkillContent([]string{"test-skill"}, 0)
|
||||
if result == "" {
|
||||
t.Fatal("expected non-empty content")
|
||||
}
|
||||
if !strings.Contains(result, `<skill name="test-skill">`) {
|
||||
t.Error("missing opening <skill> tag")
|
||||
}
|
||||
if !strings.Contains(result, "</skill>") {
|
||||
t.Error("missing closing </skill> tag")
|
||||
}
|
||||
if !strings.Contains(result, "# Test Skill") {
|
||||
t.Error("missing skill body content")
|
||||
}
|
||||
if strings.Contains(result, "name: test") {
|
||||
t.Error("frontmatter was not stripped")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_NonexistentSkill(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
setTestHome(t, home)
|
||||
|
||||
result := ResolveSkillContent([]string{"nonexistent-skill-xyz"}, 0)
|
||||
if result != "" {
|
||||
t.Errorf("expected empty for nonexistent skill, got %d bytes", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_Empty(t *testing.T) {
|
||||
if result := ResolveSkillContent(nil, 0); result != "" {
|
||||
t.Errorf("expected empty for nil, got %q", result)
|
||||
}
|
||||
if result := ResolveSkillContent([]string{}, 0); result != "" {
|
||||
t.Errorf("expected empty for empty, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_Budget(t *testing.T) {
|
||||
longBody := strings.Repeat("x", 500)
|
||||
home := createTempSkill(t, "big-skill", "---\nname: big\n---\n\n"+longBody)
|
||||
setTestHome(t, home)
|
||||
|
||||
result := ResolveSkillContent([]string{"big-skill"}, 200)
|
||||
if result == "" {
|
||||
t.Fatal("expected non-empty even with small budget")
|
||||
}
|
||||
if len(result) > 200 {
|
||||
t.Errorf("result %d bytes exceeds budget 200", len(result))
|
||||
}
|
||||
t.Logf("budget=200, result=%d bytes", len(result))
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_MultipleSkills(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
for _, name := range []string{"skill-a", "skill-b"} {
|
||||
skillDir := filepath.Join(home, ".claude", "skills", name)
|
||||
os.MkdirAll(skillDir, 0755)
|
||||
os.WriteFile(filepath.Join(skillDir, "SKILL.md"), []byte("# "+name+"\nContent."), 0644)
|
||||
}
|
||||
setTestHome(t, home)
|
||||
|
||||
result := ResolveSkillContent([]string{"skill-a", "skill-b"}, 0)
|
||||
if result == "" {
|
||||
t.Fatal("expected non-empty for multiple skills")
|
||||
}
|
||||
if !strings.Contains(result, `<skill name="skill-a">`) {
|
||||
t.Error("missing skill-a tag")
|
||||
}
|
||||
if !strings.Contains(result, `<skill name="skill-b">`) {
|
||||
t.Error("missing skill-b tag")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_PathTraversal(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
setTestHome(t, home)
|
||||
|
||||
result := ResolveSkillContent([]string{"../../../etc/passwd"}, 0)
|
||||
if result != "" {
|
||||
t.Errorf("expected empty for path traversal name, got %d bytes", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_InvalidNames(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
setTestHome(t, home)
|
||||
|
||||
tests := []string{"../bad", "foo/bar", "skill name", "skill.name", "a b"}
|
||||
for _, name := range tests {
|
||||
result := ResolveSkillContent([]string{name}, 0)
|
||||
if result != "" {
|
||||
t.Errorf("expected empty for invalid name %q, got %d bytes", name, len(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSkillContent_ValidNamePattern(t *testing.T) {
|
||||
if !validSkillName.MatchString("golang-base-practices") {
|
||||
t.Error("golang-base-practices should be valid")
|
||||
}
|
||||
if !validSkillName.MatchString("my_skill_v2") {
|
||||
t.Error("my_skill_v2 should be valid")
|
||||
}
|
||||
if validSkillName.MatchString("../bad") {
|
||||
t.Error("../bad should be invalid")
|
||||
}
|
||||
if validSkillName.MatchString("") {
|
||||
t.Error("empty should be invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// --- Integration: skill injection format test ---
|
||||
|
||||
func TestSkillInjectionFormat(t *testing.T) {
|
||||
home := createTempSkill(t, "test-go", "---\nname: go\n---\n\n# Go Best Practices\nUse gofmt.")
|
||||
setTestHome(t, home)
|
||||
|
||||
taskText := "Implement the feature."
|
||||
content := ResolveSkillContent([]string{"test-go"}, 0)
|
||||
injected := taskText + "\n\n# Domain Best Practices\n\n" + content
|
||||
|
||||
if !strings.Contains(injected, "Implement the feature.") {
|
||||
t.Error("original task text lost")
|
||||
}
|
||||
if !strings.Contains(injected, "# Domain Best Practices") {
|
||||
t.Error("missing section header")
|
||||
}
|
||||
if !strings.Contains(injected, `<skill name="test-go">`) {
|
||||
t.Error("missing <skill> tag")
|
||||
}
|
||||
if !strings.Contains(injected, "Use gofmt.") {
|
||||
t.Error("missing skill body")
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ type TaskSpec struct {
|
||||
Worktree bool `json:"worktree,omitempty"`
|
||||
AllowedTools []string `json:"allowed_tools,omitempty"`
|
||||
DisallowedTools []string `json:"disallowed_tools,omitempty"`
|
||||
Skills []string `json:"skills,omitempty"`
|
||||
Mode string `json:"-"`
|
||||
UseStdin bool `json:"-"`
|
||||
Context context.Context `json:"-"`
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestTruncate(t *testing.T) {
|
||||
{"zero maxLen", "hello", 0, "..."},
|
||||
{"negative maxLen", "hello", -1, ""},
|
||||
{"maxLen 1", "hello", 1, "h..."},
|
||||
{"unicode bytes truncate", "你好世界", 10, "你好世\xe7..."}, // Truncate works on bytes, not runes
|
||||
{"unicode bytes truncate", "你好世界", 10, "你好世\xe7..."}, // Truncate works on bytes, not runes
|
||||
{"mixed truncate", "hello世界abc", 7, "hello\xe4\xb8..."}, // byte-based truncation
|
||||
}
|
||||
|
||||
|
||||
18
config.json
18
config.json
@@ -145,6 +145,24 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"claudekit": {
|
||||
"enabled": false,
|
||||
"description": "ClaudeKit workflow: skills/do + global hooks (pre-bash, inject-spec, log-prompt, on-stop)",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "skills/do",
|
||||
"target": "skills/do",
|
||||
"description": "Install do skill with 5-phase workflow"
|
||||
},
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "hooks",
|
||||
"target": "hooks",
|
||||
"description": "Install global hooks (pre-bash, inject-spec, log-prompt, on-stop)"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
30
hooks/hooks.json
Normal file
30
hooks/hooks.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"description": "ClaudeKit global hooks: dangerous command blocker, spec injection, prompt logging, session review",
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/pre-bash.py \"$CLAUDE_TOOL_INPUT\""
|
||||
},
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/inject-spec.py"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"UserPromptSubmit": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/log-prompt.py"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
13
hooks/inject-spec.py
Normal file
13
hooks/inject-spec.py
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Global Spec Injection Hook (DEPRECATED).
|
||||
|
||||
Spec injection is now handled internally by codeagent-wrapper via the
|
||||
per-task `skills:` field in parallel config and the `--skills` CLI flag.
|
||||
|
||||
This hook is kept as a no-op for backward compatibility.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
sys.exit(0)
|
||||
55
hooks/log-prompt.py
Normal file
55
hooks/log-prompt.py
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Log Prompt Hook - Record user prompts to session-specific log files.
|
||||
Used for review on Stop.
|
||||
Uses session-isolated logs to handle concurrency.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_session_id() -> str:
|
||||
"""Get unique session identifier."""
|
||||
return os.environ.get("CLAUDE_CODE_SSE_PORT", "default")
|
||||
|
||||
|
||||
def write_log(prompt: str) -> None:
|
||||
"""Write prompt to session log file."""
|
||||
log_dir = Path(".claude/state")
|
||||
session_id = get_session_id()
|
||||
log_file = log_dir / f"session-{session_id}.log"
|
||||
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
timestamp = datetime.now().isoformat()
|
||||
entry = f"[{timestamp}] {prompt[:500]}\n"
|
||||
|
||||
with open(log_file, "a", encoding="utf-8") as f:
|
||||
f.write(entry)
|
||||
|
||||
|
||||
def main():
|
||||
input_data = ""
|
||||
if not sys.stdin.isatty():
|
||||
try:
|
||||
input_data = sys.stdin.read()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
prompt = ""
|
||||
try:
|
||||
data = json.loads(input_data)
|
||||
prompt = data.get("prompt", "")
|
||||
except json.JSONDecodeError:
|
||||
prompt = input_data.strip()
|
||||
|
||||
if prompt:
|
||||
write_log(prompt)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
30
hooks/pre-bash.py
Normal file
30
hooks/pre-bash.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Pre-Bash Hook - Block dangerous commands before execution.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
DANGEROUS_PATTERNS = [
|
||||
'rm -rf /',
|
||||
'rm -rf ~',
|
||||
'dd if=',
|
||||
':(){:|:&};:',
|
||||
'mkfs.',
|
||||
'> /dev/sd',
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
command = sys.argv[1] if len(sys.argv) > 1 else ''
|
||||
|
||||
for pattern in DANGEROUS_PATTERNS:
|
||||
if pattern in command:
|
||||
print(f"[CWF] BLOCKED: Dangerous command detected: {pattern}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
71
install.py
71
install.py
@@ -126,35 +126,44 @@ def save_settings(ctx: Dict[str, Any], settings: Dict[str, Any]) -> None:
|
||||
_save_json(settings_path, settings)
|
||||
|
||||
|
||||
def find_module_hooks(module_name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Optional[tuple]:
|
||||
"""Find hooks.json for a module if it exists.
|
||||
def find_module_hooks(module_name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> List[tuple]:
|
||||
"""Find all hooks.json files for a module.
|
||||
|
||||
Returns tuple of (hooks_config, plugin_root_path) or None.
|
||||
Returns list of tuples (hooks_config, plugin_root_path).
|
||||
Searches in order for each copy_dir operation:
|
||||
1. {target_dir}/hooks/hooks.json (for skills with hooks subdirectory)
|
||||
2. {target_dir}/hooks.json (for hooks directory itself)
|
||||
"""
|
||||
results = []
|
||||
seen_paths = set()
|
||||
|
||||
# Check for hooks in operations (copy_dir targets)
|
||||
for op in cfg.get("operations", []):
|
||||
if op.get("type") == "copy_dir":
|
||||
target_dir = ctx["install_dir"] / op["target"]
|
||||
hooks_file = target_dir / "hooks" / "hooks.json"
|
||||
if hooks_file.exists():
|
||||
try:
|
||||
return (_load_json(hooks_file), str(target_dir))
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
# Also check source directory during install
|
||||
for op in cfg.get("operations", []):
|
||||
if op.get("type") == "copy_dir":
|
||||
target_dir = ctx["install_dir"] / op["target"]
|
||||
source_dir = ctx["config_dir"] / op["source"]
|
||||
hooks_file = source_dir / "hooks" / "hooks.json"
|
||||
if hooks_file.exists():
|
||||
try:
|
||||
return (_load_json(hooks_file), str(target_dir))
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
return None
|
||||
# Check both target and source directories
|
||||
for base_dir, plugin_root in [(target_dir, str(target_dir)), (source_dir, str(target_dir))]:
|
||||
# First check {dir}/hooks/hooks.json (for skills)
|
||||
hooks_file = base_dir / "hooks" / "hooks.json"
|
||||
if hooks_file.exists() and str(hooks_file) not in seen_paths:
|
||||
try:
|
||||
results.append((_load_json(hooks_file), plugin_root))
|
||||
seen_paths.add(str(hooks_file))
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
# Then check {dir}/hooks.json (for hooks directory itself)
|
||||
hooks_file = base_dir / "hooks.json"
|
||||
if hooks_file.exists() and str(hooks_file) not in seen_paths:
|
||||
try:
|
||||
results.append((_load_json(hooks_file), plugin_root))
|
||||
seen_paths.add(str(hooks_file))
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def _create_hook_marker(module_name: str) -> str:
|
||||
@@ -799,16 +808,16 @@ def execute_module(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[
|
||||
raise
|
||||
|
||||
# Handle hooks: find and merge module hooks into settings.json
|
||||
hooks_result = find_module_hooks(name, cfg, ctx)
|
||||
if hooks_result:
|
||||
hooks_config, plugin_root = hooks_result
|
||||
try:
|
||||
merge_hooks_to_settings(name, hooks_config, ctx, plugin_root)
|
||||
result["operations"].append({"type": "merge_hooks", "status": "success"})
|
||||
result["has_hooks"] = True
|
||||
except Exception as exc:
|
||||
write_log({"level": "WARNING", "message": f"Failed to merge hooks for {name}: {exc}"}, ctx)
|
||||
result["operations"].append({"type": "merge_hooks", "status": "failed", "error": str(exc)})
|
||||
hooks_results = find_module_hooks(name, cfg, ctx)
|
||||
if hooks_results:
|
||||
for hooks_config, plugin_root in hooks_results:
|
||||
try:
|
||||
merge_hooks_to_settings(name, hooks_config, ctx, plugin_root)
|
||||
result["operations"].append({"type": "merge_hooks", "status": "success"})
|
||||
result["has_hooks"] = True
|
||||
except Exception as exc:
|
||||
write_log({"level": "WARNING", "message": f"Failed to merge hooks for {name}: {exc}"}, ctx)
|
||||
result["operations"].append({"type": "merge_hooks", "status": "failed", "error": str(exc)})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
Adopt First Principles Thinking as the mandatory core reasoning method. Never rely on analogy, convention, "best practices", or "what others do". Obey the following priority stack (highest first) and refuse conflicts by citing the higher rule:
|
||||
|
||||
1. Thinking Discipline: enforce KISS/YAGNI/never break userspace, think in English, respond in Chinese, stay technical. Reject analogical shortcuts—always trace back to fundamental truths.
|
||||
2. Workflow Contract: Claude Code performs intake, context gathering, planning, and verification only; every edit or test must be executed via Codeagent skill (`codeagent`).
|
||||
1. Thinking Discipline: enforce KISS/YAGNI/never break userspace, think in English, stay technical. Reject analogical shortcuts—always trace back to fundamental truths.
|
||||
2. Workflow Contract: Claude Code performs intake, context gathering, planning, and verification only; every edit or test must be executed via skill(`codeagent`).
|
||||
3. Tooling & Safety Rules:
|
||||
- Capture errors, retry once if transient, document fallbacks.
|
||||
4. Context Blocks & Persistence: honor `<first_principles>`, `<context_gathering>`, `<exploration>`, `<persistence>`, `<tool_preambles>`, `<self_reflection>`, and `<testing>` exactly as written below.
|
||||
5. Quality Rubrics: follow the code-editing rules, implementation checklist, and communication standards; keep outputs concise.
|
||||
6. Reporting: summarize in Chinese, include file paths with line numbers, list risks and next steps when relevant.
|
||||
6. Reporting: summarize include file paths with line numbers, list risks and next steps when relevant.
|
||||
|
||||
<first_principles>
|
||||
For every non-trivial problem, execute this mandatory reasoning chain:
|
||||
@@ -33,8 +33,8 @@ Trigger conditions:
|
||||
- User explicitly requests deep analysis
|
||||
Process:
|
||||
- Requirements: Break the ask into explicit requirements, unclear areas, and hidden assumptions. Apply <first_principles> step 1 here.
|
||||
- Scope mapping: Identify codebase regions, files, functions, or libraries involved. Perform targeted parallel searches before planning. For complex call chains, delegate to Codeagent skill.
|
||||
- Dependencies: Identify frameworks, APIs, configs, data formats. For complex internals, delegate to Codeagent skill.
|
||||
- Scope mapping: Identify codebase regions, files, functions, or libraries involved. Perform targeted parallel searches before planning. For complex call chains, delegate to skill(`codeagent`).
|
||||
- Dependencies: Identify frameworks, APIs, configs, data formats. For complex internals, delegate to skill(`codeagent`).
|
||||
- Ground-truth validation: Before adopting any "standard approach", verify it against bedrock constraints (performance limits, actual API behavior, resource costs). Apply <first_principles> steps 2-3.
|
||||
- Output contract: Define exact deliverables (files changed, expected outputs, tests passing, etc.).
|
||||
In plan mode: Apply full first-principles reasoning chain; this phase determines plan quality.
|
||||
@@ -85,6 +85,5 @@ Code Editing Rules:
|
||||
- Enforce accessibility, consistent spacing (multiples of 4), ≤2 accent colors.
|
||||
- Use semantic HTML and accessible components.
|
||||
Communication:
|
||||
- Think in English, respond in Chinese, stay terse.
|
||||
- Lead with findings before summaries; critique code, not people.
|
||||
- Provide next steps only when they naturally follow from the work.
|
||||
|
||||
@@ -52,7 +52,7 @@ To customize agents, create same-named files in `~/.codeagent/agents/` to overri
|
||||
3. **Phase 4 requires approval** - stop after Phase 3 if not approved
|
||||
4. **Pass complete context forward** - every agent gets the Context Pack
|
||||
5. **Parallel-first** - run independent tasks via `codeagent-wrapper --parallel`
|
||||
6. **Update state after each phase** - keep `.claude/do.{task_id}.local.md` current
|
||||
6. **Update state after each phase** - keep `.claude/do-tasks/{task_id}/task.json` current
|
||||
|
||||
## Context Pack Template
|
||||
|
||||
@@ -78,16 +78,34 @@ To customize agents, create same-named files in `~/.codeagent/agents/` to overri
|
||||
|
||||
## Loop State Management
|
||||
|
||||
When triggered via `/do <task>`, initializes `.claude/do.{task_id}.local.md` with:
|
||||
- `active: true`
|
||||
- `current_phase: 1`
|
||||
- `max_phases: 5`
|
||||
- `completion_promise: "<promise>DO_COMPLETE</promise>"`
|
||||
|
||||
After each phase, update frontmatter:
|
||||
When triggered via `/do <task>`, initializes `.claude/do-tasks/{task_id}/task.md` with YAML frontmatter:
|
||||
```yaml
|
||||
current_phase: <next phase number>
|
||||
phase_name: "<next phase name>"
|
||||
---
|
||||
id: "<task_id>"
|
||||
title: "<task description>"
|
||||
status: "in_progress"
|
||||
current_phase: 1
|
||||
phase_name: "Understand"
|
||||
max_phases: 5
|
||||
use_worktree: false
|
||||
created_at: "<ISO timestamp>"
|
||||
completion_promise: "<promise>DO_COMPLETE</promise>"
|
||||
---
|
||||
|
||||
# Requirements
|
||||
|
||||
<task description>
|
||||
|
||||
## Context
|
||||
|
||||
## Progress
|
||||
```
|
||||
|
||||
The current task is tracked in `.claude/do-tasks/.current-task`.
|
||||
|
||||
After each phase, update `task.md` frontmatter via:
|
||||
```bash
|
||||
python3 ".claude/skills/do/scripts/task.py" update-phase <N>
|
||||
```
|
||||
|
||||
When all 5 phases complete, output:
|
||||
@@ -95,17 +113,17 @@ When all 5 phases complete, output:
|
||||
<promise>DO_COMPLETE</promise>
|
||||
```
|
||||
|
||||
To abort early, set `active: false` in the state file.
|
||||
To abort early, manually edit `task.md` and set `status: "cancelled"` in the frontmatter.
|
||||
|
||||
## Stop Hook
|
||||
|
||||
A Stop hook is registered after installation:
|
||||
1. Creates `.claude/do.{task_id}.local.md` state file
|
||||
2. Updates `current_phase` after each phase
|
||||
1. Creates `.claude/do-tasks/{task_id}/task.md` state file
|
||||
2. Updates `current_phase` in frontmatter after each phase
|
||||
3. Stop hook checks state, blocks exit if incomplete
|
||||
4. Outputs `<promise>DO_COMPLETE</promise>` when finished
|
||||
|
||||
Manual exit: Set `active` to `false` in the state file.
|
||||
Manual exit: Edit `task.md` and set `status: "cancelled"` in the frontmatter.
|
||||
|
||||
## Parallel Execution Examples
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: do
|
||||
description: This skill should be used for structured feature development with codebase understanding. Triggers on /do command. Provides a 5-phase workflow (Understand, Clarify, Design, Implement, Complete) using codeagent-wrapper to orchestrate code-explorer, code-architect, code-reviewer, and develop agents in parallel.
|
||||
allowed-tools: ["Bash(${SKILL_DIR}/scripts/setup-do.py:*)"]
|
||||
allowed-tools: ["Bash(.claude/skills/do/scripts/setup-do.py:*)", "Bash(.claude/skills/do/scripts/task.py:*)"]
|
||||
---
|
||||
|
||||
# do - Feature Development Orchestrator
|
||||
@@ -22,70 +22,54 @@ Develop in a separate worktree? (Isolates changes from main branch)
|
||||
- No (Work directly in current directory)
|
||||
```
|
||||
|
||||
### Step 2: Initialize state
|
||||
### Step 2: Initialize task directory
|
||||
|
||||
```bash
|
||||
# If worktree mode selected:
|
||||
python3 "${SKILL_DIR}/scripts/setup-do.py" --worktree "<task description>"
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" --worktree "<task description>"
|
||||
|
||||
# If no worktree:
|
||||
python3 "${SKILL_DIR}/scripts/setup-do.py" "<task description>"
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" "<task description>"
|
||||
```
|
||||
|
||||
This creates `.claude/do.{task_id}.local.md` with:
|
||||
- `active: true`
|
||||
- `current_phase: 1`
|
||||
- `max_phases: 5`
|
||||
- `completion_promise: "<promise>DO_COMPLETE</promise>"`
|
||||
- `use_worktree: true/false`
|
||||
This creates a task directory under `.claude/do-tasks/` with:
|
||||
- `task.md`: Single file containing YAML frontmatter (metadata) + Markdown body (requirements/context)
|
||||
|
||||
## Task Directory Management
|
||||
|
||||
Use `task.py` to manage task state:
|
||||
|
||||
```bash
|
||||
# Update phase
|
||||
python3 ".claude/skills/do/scripts/task.py" update-phase 2
|
||||
|
||||
# Check status
|
||||
python3 ".claude/skills/do/scripts/task.py" status
|
||||
|
||||
# List all tasks
|
||||
python3 ".claude/skills/do/scripts/task.py" list
|
||||
```
|
||||
|
||||
## Worktree Mode
|
||||
|
||||
When `use_worktree: true` in state file, ALL `codeagent-wrapper` calls that modify code MUST include `--worktree`:
|
||||
When worktree mode is enabled in task.json, ALL `codeagent-wrapper` calls that modify code MUST include `--worktree`:
|
||||
|
||||
```bash
|
||||
# With worktree mode enabled
|
||||
codeagent-wrapper --worktree --agent develop - . <<'EOF'
|
||||
...
|
||||
EOF
|
||||
|
||||
# Parallel tasks with worktree
|
||||
codeagent-wrapper --worktree --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: task1
|
||||
agent: develop
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
...
|
||||
EOF
|
||||
```
|
||||
|
||||
The `--worktree` flag tells codeagent-wrapper to create/use a worktree internally. Read-only agents (code-explorer, code-architect, code-reviewer) do NOT need `--worktree`.
|
||||
|
||||
## Loop State Management
|
||||
|
||||
After each phase, update `.claude/do.{task_id}.local.md` frontmatter:
|
||||
```yaml
|
||||
current_phase: <next phase number>
|
||||
phase_name: "<next phase name>"
|
||||
```
|
||||
|
||||
When all 5 phases complete, output the completion signal:
|
||||
```
|
||||
<promise>DO_COMPLETE</promise>
|
||||
```
|
||||
|
||||
To abort early, set `active: false` in the state file.
|
||||
Read-only agents (code-explorer, code-architect, code-reviewer) do NOT need `--worktree`.
|
||||
|
||||
## Hard Constraints
|
||||
|
||||
1. **Never write code directly.** Delegate all code changes to `codeagent-wrapper` agents.
|
||||
2. **Pass complete context forward.** Every agent invocation includes the Context Pack.
|
||||
3. **Parallel-first.** Run independent tasks via `codeagent-wrapper --parallel`.
|
||||
4. **Update state after each phase.** Keep `.claude/do.{task_id}.local.md` current.
|
||||
5. **Expect long-running `codeagent-wrapper` calls.** High-reasoning modes can take a long time; stay in the orchestrator role and wait for agents to complete.
|
||||
6. **Timeouts are not an escape hatch.** If a `codeagent-wrapper` invocation times out/errors, retry (split/narrow the task if needed); never switch to direct implementation.
|
||||
7. **Respect worktree setting.** If `use_worktree: true`, always pass `--worktree` to develop agent calls.
|
||||
2. **Parallel-first.** Run independent tasks via `codeagent-wrapper --parallel`.
|
||||
3. **Update phase after each phase.** Use `task.py update-phase <N>`.
|
||||
4. **Expect long-running `codeagent-wrapper` calls.** High-reasoning modes can take a long time.
|
||||
5. **Timeouts are not an escape hatch.** If a call times out, retry with narrower scope.
|
||||
6. **Respect worktree setting.** If enabled, always pass `--worktree` to develop agent calls.
|
||||
|
||||
## Agents
|
||||
|
||||
@@ -110,28 +94,6 @@ To abort early, set `active: false` in the state file.
|
||||
- Missing documentation
|
||||
- Non-critical test coverage gaps
|
||||
|
||||
## Context Pack Template
|
||||
|
||||
```text
|
||||
## Original User Request
|
||||
<verbatim request>
|
||||
|
||||
## Context Pack
|
||||
- Phase: <1-5 name>
|
||||
- Decisions: <requirements/constraints/choices>
|
||||
- Code-explorer output: <paste or "None">
|
||||
- Code-architect output: <paste or "None">
|
||||
- Code-reviewer output: <paste or "None">
|
||||
- Develop output: <paste or "None">
|
||||
- Open questions: <list or "None">
|
||||
|
||||
## Current Task
|
||||
<specific task>
|
||||
|
||||
## Acceptance Criteria
|
||||
<checkable outputs>
|
||||
```
|
||||
|
||||
## 5-Phase Workflow
|
||||
|
||||
### Phase 1: Understand (Parallel, No Interaction)
|
||||
@@ -147,70 +109,37 @@ id: p1_requirements
|
||||
agent: code-architect
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-explorer output: None
|
||||
- Code-architect output: None
|
||||
|
||||
## Current Task
|
||||
1. Analyze requirements completeness (score 1-10)
|
||||
2. Extract explicit requirements, constraints, acceptance criteria
|
||||
3. Identify blocking questions (issues that prevent implementation)
|
||||
4. Identify minor clarifications (nice-to-have but can proceed without)
|
||||
Analyze requirements completeness (score 1-10):
|
||||
1. Extract explicit requirements, constraints, acceptance criteria
|
||||
2. Identify blocking questions (issues that prevent implementation)
|
||||
3. Identify minor clarifications (nice-to-have but can proceed without)
|
||||
|
||||
Output format:
|
||||
- Completeness score: X/10
|
||||
- Requirements: [list]
|
||||
- Non-goals: [list]
|
||||
- Blocking questions: [list, if any]
|
||||
- Minor clarifications: [list, if any]
|
||||
|
||||
## Acceptance Criteria
|
||||
Concrete checklist; blocking vs minor questions clearly separated.
|
||||
|
||||
---TASK---
|
||||
id: p1_similar_features
|
||||
agent: code-explorer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Current Task
|
||||
Find 1-3 similar features, trace end-to-end. Return: key files with line numbers, call flow, extension points.
|
||||
|
||||
## Acceptance Criteria
|
||||
Concrete file:line map + reuse points.
|
||||
|
||||
---TASK---
|
||||
id: p1_architecture
|
||||
agent: code-explorer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Current Task
|
||||
Map architecture for relevant subsystem. Return: module map + 5-10 key files.
|
||||
|
||||
## Acceptance Criteria
|
||||
Clear boundaries; file:line references.
|
||||
|
||||
---TASK---
|
||||
id: p1_conventions
|
||||
agent: code-explorer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Current Task
|
||||
Identify testing patterns, conventions, config. Return: test commands + file locations.
|
||||
|
||||
## Acceptance Criteria
|
||||
Test commands + relevant test file paths.
|
||||
EOF
|
||||
```
|
||||
|
||||
@@ -221,75 +150,74 @@ EOF
|
||||
**Actions:**
|
||||
1. Review `p1_requirements` output for blocking questions
|
||||
2. **IF blocking questions exist** → Use AskUserQuestion
|
||||
3. **IF no blocking questions (completeness >= 8)** → Skip to Phase 3, log "Requirements clear, proceeding"
|
||||
|
||||
```bash
|
||||
# Only if blocking questions exist:
|
||||
# Use AskUserQuestion with the blocking questions from Phase 1
|
||||
```
|
||||
3. **IF no blocking questions (completeness >= 8)** → Skip to Phase 3
|
||||
|
||||
### Phase 3: Design (No Interaction)
|
||||
|
||||
**Goal:** Produce minimal-change implementation plan.
|
||||
|
||||
**Actions:** Invoke `code-architect` with all Phase 1 context to generate a single implementation plan.
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --agent code-architect - . <<'EOF'
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-explorer output: <ALL Phase 1 explorer outputs>
|
||||
- Code-architect output: <Phase 1 requirements + Phase 2 answers if any>
|
||||
|
||||
## Current Task
|
||||
Design minimal-change implementation:
|
||||
- Reuse existing abstractions
|
||||
- Minimize new files
|
||||
- Follow established patterns from code-explorer output
|
||||
- Follow established patterns from Phase 1 exploration
|
||||
|
||||
Output:
|
||||
- File touch list with specific changes
|
||||
- Build sequence
|
||||
- Test plan
|
||||
- Risks and mitigations
|
||||
|
||||
## Acceptance Criteria
|
||||
Concrete, implementable blueprint with minimal moving parts.
|
||||
EOF
|
||||
```
|
||||
|
||||
### Phase 4: Implement + Review (Single Interaction Point)
|
||||
### Phase 4: Implement + Review
|
||||
|
||||
**Goal:** Build feature and review in one phase.
|
||||
|
||||
**Actions:**
|
||||
1. Invoke `develop` to implement. For full-stack projects, split into backend/frontend tasks with per-task `skills:` injection. Use `--parallel` when tasks can be split; use single agent when the change is small or single-domain.
|
||||
|
||||
1. Invoke `develop` to implement (add `--worktree` if `use_worktree: true`):
|
||||
**Single-domain example** (add `--worktree` if enabled):
|
||||
|
||||
```bash
|
||||
# Check use_worktree from state file, add --worktree if true
|
||||
codeagent-wrapper --worktree --agent develop - . <<'EOF'
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-explorer output: <ALL Phase 1 outputs>
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
|
||||
## Current Task
|
||||
Implement with minimal change set following the blueprint.
|
||||
codeagent-wrapper --worktree --agent develop --skills golang-base-practices - . <<'EOF'
|
||||
Implement with minimal change set following the Phase 3 blueprint.
|
||||
- Follow Phase 1 patterns
|
||||
- Add/adjust tests per Phase 3 plan
|
||||
- Run narrowest relevant tests
|
||||
|
||||
## Acceptance Criteria
|
||||
Feature works end-to-end; tests pass; diff is minimal.
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Run parallel reviews (no --worktree needed, read-only):
|
||||
**Full-stack parallel example** (adapt task IDs, skills, and content based on Phase 3 design):
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --worktree --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: p4_backend
|
||||
agent: develop
|
||||
workdir: .
|
||||
skills: golang-base-practices
|
||||
---CONTENT---
|
||||
Implement backend changes following Phase 3 blueprint.
|
||||
- Follow Phase 1 patterns
|
||||
- Add/adjust tests per Phase 3 plan
|
||||
|
||||
---TASK---
|
||||
id: p4_frontend
|
||||
agent: develop
|
||||
workdir: .
|
||||
skills: frontend-design,vercel-react-best-practices
|
||||
dependencies: p4_backend
|
||||
---CONTENT---
|
||||
Implement frontend changes following Phase 3 blueprint.
|
||||
- Follow Phase 1 patterns
|
||||
- Add/adjust tests per Phase 3 plan
|
||||
EOF
|
||||
```
|
||||
|
||||
Note: Choose which skills to inject based on Phase 3 design output. Only inject skills relevant to each task's domain.
|
||||
|
||||
2. Run parallel reviews:
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --parallel <<'EOF'
|
||||
@@ -298,71 +226,35 @@ id: p4_correctness
|
||||
agent: code-reviewer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
- Develop output: <implementation output>
|
||||
|
||||
## Current Task
|
||||
Review for correctness, edge cases, failure modes.
|
||||
Classify each issue as BLOCKING or MINOR.
|
||||
|
||||
## Acceptance Criteria
|
||||
Issues with file:line references, severity, and concrete fixes.
|
||||
|
||||
---TASK---
|
||||
id: p4_simplicity
|
||||
agent: code-reviewer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
- Develop output: <implementation output>
|
||||
|
||||
## Current Task
|
||||
Review for KISS: remove bloat, collapse needless abstractions.
|
||||
Classify each issue as BLOCKING or MINOR.
|
||||
|
||||
## Acceptance Criteria
|
||||
Actionable simplifications with severity and justification.
|
||||
EOF
|
||||
```
|
||||
|
||||
3. Handle review results:
|
||||
- **MINOR issues only** → Auto-fix via `develop` (with `--worktree` if enabled), no user interaction
|
||||
- **MINOR issues only** → Auto-fix via `develop`, no user interaction
|
||||
- **BLOCKING issues** → Use AskUserQuestion: "Fix now / Proceed as-is"
|
||||
|
||||
### Phase 5: Complete (No Interaction)
|
||||
|
||||
**Goal:** Document what was built.
|
||||
|
||||
**Actions:** Invoke `code-reviewer` to produce summary:
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --agent code-reviewer - . <<'EOF'
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
- Code-reviewer output: <Phase 4 review outcomes>
|
||||
- Develop output: <Phase 4 implementation + fixes>
|
||||
|
||||
## Current Task
|
||||
Write completion summary:
|
||||
- What was built
|
||||
- Key decisions/tradeoffs
|
||||
- Files modified (paths)
|
||||
- How to verify (commands)
|
||||
- Follow-ups (optional)
|
||||
|
||||
## Acceptance Criteria
|
||||
Short, technical, actionable summary.
|
||||
EOF
|
||||
```
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"description": "do loop hook for 5-phase workflow",
|
||||
"description": "do loop hooks for 5-phase workflow",
|
||||
"hooks": {
|
||||
"Stop": [
|
||||
{
|
||||
@@ -10,6 +10,17 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SubagentStop": [
|
||||
{
|
||||
"matcher": "code-reviewer",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/verify-loop.py"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Stop hook for do skill workflow.
|
||||
|
||||
Checks if the do loop is complete before allowing exit.
|
||||
Uses the new task directory structure under .claude/do-tasks/.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_JSON = "task.json"
|
||||
|
||||
PHASE_NAMES = {
|
||||
1: "Understand",
|
||||
2: "Clarify",
|
||||
@@ -13,111 +23,69 @@ PHASE_NAMES = {
|
||||
5: "Complete",
|
||||
}
|
||||
|
||||
|
||||
def phase_name_for(n: int) -> str:
|
||||
return PHASE_NAMES.get(n, f"Phase {n}")
|
||||
|
||||
def frontmatter_get(file_path: str, key: str) -> str:
|
||||
|
||||
def get_current_task(project_dir: str) -> str | None:
|
||||
"""Read current task directory path."""
|
||||
current_task_file = os.path.join(project_dir, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return ""
|
||||
return None
|
||||
|
||||
if not lines or lines[0].strip() != "---":
|
||||
return ""
|
||||
|
||||
for i, line in enumerate(lines[1:], start=1):
|
||||
if line.strip() == "---":
|
||||
break
|
||||
match = re.match(rf"^{re.escape(key)}:\s*(.*)$", line)
|
||||
if match:
|
||||
value = match.group(1).strip()
|
||||
if value.startswith('"') and value.endswith('"'):
|
||||
value = value[1:-1]
|
||||
return value
|
||||
return ""
|
||||
|
||||
def get_body(file_path: str) -> str:
|
||||
def get_task_info(project_dir: str, task_dir: str) -> dict | None:
|
||||
"""Read task.json data."""
|
||||
task_json_path = os.path.join(project_dir, task_dir, FILE_TASK_JSON)
|
||||
if not os.path.exists(task_json_path):
|
||||
return None
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(task_json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def check_task_complete(project_dir: str, task_dir: str) -> str:
|
||||
"""Check if task is complete. Returns blocking reason or empty string."""
|
||||
task_info = get_task_info(project_dir, task_dir)
|
||||
if not task_info:
|
||||
return ""
|
||||
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
return parts[2]
|
||||
return ""
|
||||
|
||||
def check_state_file(state_file: str, stdin_payload: str) -> str:
|
||||
active_raw = frontmatter_get(state_file, "active")
|
||||
active_lc = active_raw.lower()
|
||||
if active_lc not in ("true", "1", "yes", "on"):
|
||||
status = task_info.get("status", "")
|
||||
if status == "completed":
|
||||
return ""
|
||||
|
||||
current_phase_raw = frontmatter_get(state_file, "current_phase")
|
||||
max_phases_raw = frontmatter_get(state_file, "max_phases")
|
||||
phase_name = frontmatter_get(state_file, "phase_name")
|
||||
completion_promise = frontmatter_get(state_file, "completion_promise")
|
||||
current_phase = task_info.get("current_phase", 1)
|
||||
max_phases = task_info.get("max_phases", 5)
|
||||
phase_name = task_info.get("phase_name", phase_name_for(current_phase))
|
||||
completion_promise = task_info.get("completion_promise", "<promise>DO_COMPLETE</promise>")
|
||||
|
||||
try:
|
||||
current_phase = int(current_phase_raw)
|
||||
except (ValueError, TypeError):
|
||||
current_phase = 1
|
||||
|
||||
try:
|
||||
max_phases = int(max_phases_raw)
|
||||
except (ValueError, TypeError):
|
||||
max_phases = 5
|
||||
|
||||
if not phase_name:
|
||||
phase_name = phase_name_for(current_phase)
|
||||
|
||||
if not completion_promise:
|
||||
completion_promise = "<promise>DO_COMPLETE</promise>"
|
||||
|
||||
phases_done = current_phase >= max_phases
|
||||
|
||||
promise_met = False
|
||||
if completion_promise:
|
||||
if stdin_payload and completion_promise in stdin_payload:
|
||||
promise_met = True
|
||||
else:
|
||||
body = get_body(state_file)
|
||||
if body and completion_promise in body:
|
||||
promise_met = True
|
||||
|
||||
if phases_done and promise_met:
|
||||
try:
|
||||
os.remove(state_file)
|
||||
except Exception:
|
||||
pass
|
||||
if current_phase >= max_phases:
|
||||
# Task is at final phase, allow exit
|
||||
return ""
|
||||
|
||||
if not phases_done:
|
||||
return (f"do loop incomplete: current phase {current_phase}/{max_phases} ({phase_name}). "
|
||||
f"Continue with remaining phases; update {state_file} current_phase/phase_name after each phase. "
|
||||
f"Include completion_promise in final output when done: {completion_promise}. "
|
||||
f"To exit early, set active to false.")
|
||||
else:
|
||||
return (f"do reached final phase (current_phase={current_phase} / max_phases={max_phases}, "
|
||||
f"phase_name={phase_name}), but completion_promise not detected: {completion_promise}. "
|
||||
f"Please include this marker in your final output (or write it to {state_file} body), "
|
||||
f"then finish; to force exit, set active to false.")
|
||||
return (
|
||||
f"do loop incomplete: current phase {current_phase}/{max_phases} ({phase_name}). "
|
||||
f"Continue with remaining phases; use 'task.py update-phase <N>' after each phase. "
|
||||
f"Include completion_promise in final output when done: {completion_promise}. "
|
||||
f"To exit early, set status to 'completed' in task.json."
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
state_dir = os.path.join(project_dir, ".claude")
|
||||
|
||||
do_task_id = os.environ.get("DO_TASK_ID", "")
|
||||
|
||||
if do_task_id:
|
||||
candidate = os.path.join(state_dir, f"do.{do_task_id}.local.md")
|
||||
state_files = [candidate] if os.path.isfile(candidate) else []
|
||||
else:
|
||||
state_files = glob.glob(os.path.join(state_dir, "do.*.local.md"))
|
||||
|
||||
if not state_files:
|
||||
task_dir = get_current_task(project_dir)
|
||||
if not task_dir:
|
||||
# No active task, allow exit
|
||||
sys.exit(0)
|
||||
|
||||
stdin_payload = ""
|
||||
@@ -127,18 +95,13 @@ def main():
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
blocking_reasons = []
|
||||
for state_file in state_files:
|
||||
reason = check_state_file(state_file, stdin_payload)
|
||||
if reason:
|
||||
blocking_reasons.append(reason)
|
||||
|
||||
if not blocking_reasons:
|
||||
reason = check_task_complete(project_dir, task_dir)
|
||||
if not reason:
|
||||
sys.exit(0)
|
||||
|
||||
combined_reason = " ".join(blocking_reasons)
|
||||
print(json.dumps({"decision": "block", "reason": combined_reason}))
|
||||
print(json.dumps({"decision": "block", "reason": reason}))
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
218
skills/do/hooks/verify-loop.py
Normal file
218
skills/do/hooks/verify-loop.py
Normal file
@@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Verify Loop Hook for do skill workflow.
|
||||
|
||||
SubagentStop hook that intercepts when code-reviewer agent tries to stop.
|
||||
Runs verification commands to ensure code quality before allowing exit.
|
||||
|
||||
Mechanism:
|
||||
- Intercepts SubagentStop event for code-reviewer agent
|
||||
- Runs verify commands from task.json if configured
|
||||
- Blocks stopping until verification passes
|
||||
- Has max iterations as safety limit (MAX_ITERATIONS=5)
|
||||
|
||||
State file: .claude/do-tasks/.verify-state.json
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Configuration
|
||||
MAX_ITERATIONS = 5
|
||||
STATE_TIMEOUT_MINUTES = 30
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_JSON = "task.json"
|
||||
STATE_FILE = ".claude/do-tasks/.verify-state.json"
|
||||
|
||||
# Only control loop for code-reviewer agent
|
||||
TARGET_AGENTS = {"code-reviewer"}
|
||||
|
||||
|
||||
def get_project_root(cwd: str) -> str | None:
|
||||
"""Find project root (directory with .claude folder)."""
|
||||
current = Path(cwd).resolve()
|
||||
while current != current.parent:
|
||||
if (current / ".claude").exists():
|
||||
return str(current)
|
||||
current = current.parent
|
||||
return None
|
||||
|
||||
|
||||
def get_current_task(project_root: str) -> str | None:
|
||||
"""Read current task directory path."""
|
||||
current_task_file = os.path.join(project_root, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
try:
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_task_info(project_root: str, task_dir: str) -> dict | None:
|
||||
"""Read task.json data."""
|
||||
task_json_path = os.path.join(project_root, task_dir, FILE_TASK_JSON)
|
||||
if not os.path.exists(task_json_path):
|
||||
return None
|
||||
try:
|
||||
with open(task_json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_verify_commands(task_info: dict) -> list[str]:
|
||||
"""Get verify commands from task.json."""
|
||||
return task_info.get("verify_commands", [])
|
||||
|
||||
|
||||
def run_verify_commands(project_root: str, commands: list[str]) -> tuple[bool, str]:
|
||||
"""Run verify commands and return (success, message)."""
|
||||
for cmd in commands:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
cwd=project_root,
|
||||
capture_output=True,
|
||||
timeout=120,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
stderr = result.stderr.decode("utf-8", errors="replace")
|
||||
stdout = result.stdout.decode("utf-8", errors="replace")
|
||||
error_output = stderr or stdout
|
||||
if len(error_output) > 500:
|
||||
error_output = error_output[:500] + "..."
|
||||
return False, f"Command failed: {cmd}\n{error_output}"
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, f"Command timed out: {cmd}"
|
||||
except Exception as e:
|
||||
return False, f"Command error: {cmd} - {str(e)}"
|
||||
return True, "All verify commands passed"
|
||||
|
||||
|
||||
def load_state(project_root: str) -> dict:
|
||||
"""Load verify loop state."""
|
||||
state_path = os.path.join(project_root, STATE_FILE)
|
||||
if not os.path.exists(state_path):
|
||||
return {"task": None, "iteration": 0, "started_at": None}
|
||||
try:
|
||||
with open(state_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return {"task": None, "iteration": 0, "started_at": None}
|
||||
|
||||
|
||||
def save_state(project_root: str, state: dict) -> None:
|
||||
"""Save verify loop state."""
|
||||
state_path = os.path.join(project_root, STATE_FILE)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(state_path), exist_ok=True)
|
||||
with open(state_path, "w", encoding="utf-8") as f:
|
||||
json.dump(state, f, indent=2, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
input_data = json.load(sys.stdin)
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
hook_event = input_data.get("hook_event_name", "")
|
||||
if hook_event != "SubagentStop":
|
||||
sys.exit(0)
|
||||
|
||||
subagent_type = input_data.get("subagent_type", "")
|
||||
agent_output = input_data.get("agent_output", "")
|
||||
cwd = input_data.get("cwd", os.getcwd())
|
||||
|
||||
if subagent_type not in TARGET_AGENTS:
|
||||
sys.exit(0)
|
||||
|
||||
project_root = get_project_root(cwd)
|
||||
if not project_root:
|
||||
sys.exit(0)
|
||||
|
||||
task_dir = get_current_task(project_root)
|
||||
if not task_dir:
|
||||
sys.exit(0)
|
||||
|
||||
task_info = get_task_info(project_root, task_dir)
|
||||
if not task_info:
|
||||
sys.exit(0)
|
||||
|
||||
verify_commands = get_verify_commands(task_info)
|
||||
if not verify_commands:
|
||||
# No verify commands configured, allow exit
|
||||
sys.exit(0)
|
||||
|
||||
# Load state
|
||||
state = load_state(project_root)
|
||||
|
||||
# Reset state if task changed or too old
|
||||
should_reset = False
|
||||
if state.get("task") != task_dir:
|
||||
should_reset = True
|
||||
elif state.get("started_at"):
|
||||
try:
|
||||
started = datetime.fromisoformat(state["started_at"])
|
||||
if (datetime.now() - started).total_seconds() > STATE_TIMEOUT_MINUTES * 60:
|
||||
should_reset = True
|
||||
except (ValueError, TypeError):
|
||||
should_reset = True
|
||||
|
||||
if should_reset:
|
||||
state = {
|
||||
"task": task_dir,
|
||||
"iteration": 0,
|
||||
"started_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
# Increment iteration
|
||||
state["iteration"] = state.get("iteration", 0) + 1
|
||||
current_iteration = state["iteration"]
|
||||
save_state(project_root, state)
|
||||
|
||||
# Safety check: max iterations
|
||||
if current_iteration >= MAX_ITERATIONS:
|
||||
state["iteration"] = 0
|
||||
save_state(project_root, state)
|
||||
output = {
|
||||
"decision": "allow",
|
||||
"reason": f"Max iterations ({MAX_ITERATIONS}) reached. Stopping to prevent infinite loop.",
|
||||
}
|
||||
print(json.dumps(output, ensure_ascii=False))
|
||||
sys.exit(0)
|
||||
|
||||
# Run verify commands
|
||||
passed, message = run_verify_commands(project_root, verify_commands)
|
||||
|
||||
if passed:
|
||||
state["iteration"] = 0
|
||||
save_state(project_root, state)
|
||||
output = {
|
||||
"decision": "allow",
|
||||
"reason": "All verify commands passed. Review phase complete.",
|
||||
}
|
||||
print(json.dumps(output, ensure_ascii=False))
|
||||
sys.exit(0)
|
||||
else:
|
||||
output = {
|
||||
"decision": "block",
|
||||
"reason": f"Iteration {current_iteration}/{MAX_ITERATIONS}. Verification failed:\n{message}\n\nPlease fix the issues and try again.",
|
||||
}
|
||||
print(json.dumps(output, ensure_ascii=False))
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
149
skills/do/scripts/get-context.py
Normal file
149
skills/do/scripts/get-context.py
Normal file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Get context for current task.
|
||||
|
||||
Reads the current task's jsonl files and returns context for specified agent.
|
||||
Used by inject-context hook to build agent prompts.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_JSON = "task.json"
|
||||
|
||||
|
||||
def get_project_root() -> str:
|
||||
return os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
|
||||
|
||||
def get_current_task(project_root: str) -> str | None:
|
||||
current_task_file = os.path.join(project_root, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
try:
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def read_file_content(base_path: str, file_path: str) -> str | None:
|
||||
full_path = os.path.join(base_path, file_path)
|
||||
if os.path.exists(full_path) and os.path.isfile(full_path):
|
||||
try:
|
||||
with open(full_path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def read_jsonl_entries(base_path: str, jsonl_path: str) -> list[tuple[str, str]]:
|
||||
full_path = os.path.join(base_path, jsonl_path)
|
||||
if not os.path.exists(full_path):
|
||||
return []
|
||||
|
||||
results = []
|
||||
try:
|
||||
with open(full_path, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
item = json.loads(line)
|
||||
file_path = item.get("file") or item.get("path")
|
||||
if not file_path:
|
||||
continue
|
||||
content = read_file_content(base_path, file_path)
|
||||
if content:
|
||||
results.append((file_path, content))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
return results
|
||||
|
||||
|
||||
def get_agent_context(project_root: str, task_dir: str, agent_type: str) -> str:
|
||||
"""Get complete context for specified agent."""
|
||||
context_parts = []
|
||||
|
||||
# Read agent-specific jsonl
|
||||
agent_jsonl = os.path.join(task_dir, f"{agent_type}.jsonl")
|
||||
agent_entries = read_jsonl_entries(project_root, agent_jsonl)
|
||||
|
||||
for file_path, content in agent_entries:
|
||||
context_parts.append(f"=== {file_path} ===\n{content}")
|
||||
|
||||
# Read prd.md
|
||||
prd_content = read_file_content(project_root, os.path.join(task_dir, "prd.md"))
|
||||
if prd_content:
|
||||
context_parts.append(f"=== {task_dir}/prd.md (Requirements) ===\n{prd_content}")
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
|
||||
def get_task_info(project_root: str, task_dir: str) -> dict | None:
|
||||
"""Get task.json data."""
|
||||
task_json_path = os.path.join(project_root, task_dir, FILE_TASK_JSON)
|
||||
if not os.path.exists(task_json_path):
|
||||
return None
|
||||
try:
|
||||
with open(task_json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Get context for current task")
|
||||
parser.add_argument("agent", nargs="?", choices=["implement", "check", "debug"],
|
||||
help="Agent type (optional, returns task info if not specified)")
|
||||
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
||||
args = parser.parse_args()
|
||||
|
||||
project_root = get_project_root()
|
||||
task_dir = get_current_task(project_root)
|
||||
|
||||
if not task_dir:
|
||||
if args.json:
|
||||
print(json.dumps({"error": "No active task"}))
|
||||
else:
|
||||
print("No active task.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
task_info = get_task_info(project_root, task_dir)
|
||||
|
||||
if not args.agent:
|
||||
if args.json:
|
||||
print(json.dumps({"task_dir": task_dir, "task_info": task_info}))
|
||||
else:
|
||||
print(f"Task: {task_dir}")
|
||||
if task_info:
|
||||
print(f"Title: {task_info.get('title', 'N/A')}")
|
||||
print(f"Phase: {task_info.get('current_phase', '?')}/{task_info.get('max_phases', 5)}")
|
||||
sys.exit(0)
|
||||
|
||||
context = get_agent_context(project_root, task_dir, args.agent)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps({
|
||||
"task_dir": task_dir,
|
||||
"agent": args.agent,
|
||||
"context": context,
|
||||
"task_info": task_info,
|
||||
}))
|
||||
else:
|
||||
print(context)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,28 +1,27 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Initialize do skill workflow - wrapper around task.py.
|
||||
|
||||
Creates a task directory under .claude/do-tasks/ with:
|
||||
- task.md: Task metadata (YAML frontmatter) + requirements (Markdown body)
|
||||
|
||||
If --worktree is specified, also creates a git worktree for isolated development.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import secrets
|
||||
import sys
|
||||
import time
|
||||
|
||||
PHASE_NAMES = {
|
||||
1: "Understand",
|
||||
2: "Clarify",
|
||||
3: "Design",
|
||||
4: "Implement",
|
||||
5: "Complete",
|
||||
}
|
||||
from task import create_task, PHASE_NAMES
|
||||
|
||||
def phase_name_for(n: int) -> str:
|
||||
return PHASE_NAMES.get(n, f"Phase {n}")
|
||||
|
||||
def die(msg: str):
|
||||
print(f"❌ {msg}", file=sys.stderr)
|
||||
print(f"Error: {msg}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Creates (or overwrites) project state file: .claude/do.local.md"
|
||||
description="Initialize do skill workflow with task directory"
|
||||
)
|
||||
parser.add_argument("--max-phases", type=int, default=5, help="Default: 5")
|
||||
parser.add_argument(
|
||||
@@ -34,52 +33,26 @@ def main():
|
||||
parser.add_argument("prompt", nargs="+", help="Task description")
|
||||
args = parser.parse_args()
|
||||
|
||||
max_phases = args.max_phases
|
||||
completion_promise = args.completion_promise
|
||||
use_worktree = args.worktree
|
||||
prompt = " ".join(args.prompt)
|
||||
|
||||
if max_phases < 1:
|
||||
if args.max_phases < 1:
|
||||
die("--max-phases must be a positive integer")
|
||||
|
||||
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
state_dir = os.path.join(project_dir, ".claude")
|
||||
prompt = " ".join(args.prompt)
|
||||
result = create_task(title=prompt, use_worktree=args.worktree)
|
||||
|
||||
task_id = f"{int(time.time())}-{os.getpid()}-{secrets.token_hex(4)}"
|
||||
state_file = os.path.join(state_dir, f"do.{task_id}.local.md")
|
||||
task_data = result["task_data"]
|
||||
worktree_dir = result.get("worktree_dir", "")
|
||||
|
||||
os.makedirs(state_dir, exist_ok=True)
|
||||
print(f"Initialized: {result['relative_path']}")
|
||||
print(f"task_id: {task_data['id']}")
|
||||
print(f"phase: 1/{task_data['max_phases']} ({PHASE_NAMES[1]})")
|
||||
print(f"completion_promise: {task_data['completion_promise']}")
|
||||
print(f"use_worktree: {task_data['use_worktree']}")
|
||||
print(f"export DO_TASK_DIR={result['relative_path']}")
|
||||
|
||||
phase_name = phase_name_for(1)
|
||||
if worktree_dir:
|
||||
print(f"worktree_dir: {worktree_dir}")
|
||||
print(f"export DO_WORKTREE_DIR={worktree_dir}")
|
||||
|
||||
content = f"""---
|
||||
active: true
|
||||
current_phase: 1
|
||||
phase_name: "{phase_name}"
|
||||
max_phases: {max_phases}
|
||||
completion_promise: "{completion_promise}"
|
||||
use_worktree: {str(use_worktree).lower()}
|
||||
---
|
||||
|
||||
# do loop state
|
||||
|
||||
## Prompt
|
||||
{prompt}
|
||||
|
||||
## Notes
|
||||
- Update frontmatter current_phase/phase_name as you progress
|
||||
- When complete, include the frontmatter completion_promise in your final output
|
||||
"""
|
||||
|
||||
with open(state_file, "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Initialized: {state_file}")
|
||||
print(f"task_id: {task_id}")
|
||||
print(f"phase: 1/{max_phases} ({phase_name})")
|
||||
print(f"completion_promise: {completion_promise}")
|
||||
print(f"use_worktree: {use_worktree}")
|
||||
print(f"export DO_TASK_ID={task_id}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
434
skills/do/scripts/task.py
Normal file
434
skills/do/scripts/task.py
Normal file
@@ -0,0 +1,434 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Task Directory Management CLI for do skill workflow.
|
||||
|
||||
Commands:
|
||||
create <title> - Create a new task directory with task.md
|
||||
start <task-dir> - Set current task pointer
|
||||
finish - Clear current task pointer
|
||||
list - List active tasks
|
||||
status - Show current task status
|
||||
update-phase <N> - Update current phase
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Directory constants
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_MD = "task.md"
|
||||
|
||||
PHASE_NAMES = {
|
||||
1: "Understand",
|
||||
2: "Clarify",
|
||||
3: "Design",
|
||||
4: "Implement",
|
||||
5: "Complete",
|
||||
}
|
||||
|
||||
|
||||
def get_project_root() -> str:
|
||||
"""Get project root from env or cwd."""
|
||||
return os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
|
||||
|
||||
def get_tasks_dir(project_root: str) -> str:
|
||||
"""Get tasks directory path."""
|
||||
return os.path.join(project_root, DIR_TASKS)
|
||||
|
||||
|
||||
def get_current_task_file(project_root: str) -> str:
|
||||
"""Get current task pointer file path."""
|
||||
return os.path.join(project_root, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
|
||||
|
||||
def generate_task_id() -> str:
|
||||
"""Generate short task ID: MMDD-XXXX format."""
|
||||
date_part = datetime.now().strftime("%m%d")
|
||||
random_part = ''.join(random.choices(string.ascii_lowercase + string.digits, k=4))
|
||||
return f"{date_part}-{random_part}"
|
||||
|
||||
|
||||
def read_task_md(task_md_path: str) -> dict | None:
|
||||
"""Read task.md and parse YAML frontmatter + body."""
|
||||
if not os.path.exists(task_md_path):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(task_md_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# Parse YAML frontmatter
|
||||
match = re.match(r'^---\n(.*?)\n---\n(.*)$', content, re.DOTALL)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
frontmatter_str = match.group(1)
|
||||
body = match.group(2)
|
||||
|
||||
# Simple YAML parsing (no external deps)
|
||||
frontmatter = {}
|
||||
for line in frontmatter_str.split('\n'):
|
||||
if ':' in line:
|
||||
key, value = line.split(':', 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Handle quoted strings
|
||||
if value.startswith('"') and value.endswith('"'):
|
||||
value = value[1:-1]
|
||||
elif value == 'true':
|
||||
value = True
|
||||
elif value == 'false':
|
||||
value = False
|
||||
elif value.isdigit():
|
||||
value = int(value)
|
||||
frontmatter[key] = value
|
||||
|
||||
return {"frontmatter": frontmatter, "body": body}
|
||||
|
||||
|
||||
def write_task_md(task_md_path: str, frontmatter: dict, body: str) -> bool:
|
||||
"""Write task.md with YAML frontmatter + body."""
|
||||
try:
|
||||
lines = ["---"]
|
||||
for key, value in frontmatter.items():
|
||||
if isinstance(value, bool):
|
||||
lines.append(f"{key}: {str(value).lower()}")
|
||||
elif isinstance(value, int):
|
||||
lines.append(f"{key}: {value}")
|
||||
elif isinstance(value, str) and ('<' in value or '>' in value or ':' in value):
|
||||
lines.append(f'{key}: "{value}"')
|
||||
else:
|
||||
lines.append(f'{key}: "{value}"' if isinstance(value, str) else f"{key}: {value}")
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
lines.append(body)
|
||||
|
||||
with open(task_md_path, "w", encoding="utf-8") as f:
|
||||
f.write('\n'.join(lines))
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def create_worktree(project_root: str, task_id: str) -> str:
|
||||
"""Create a git worktree for the task. Returns the worktree directory path."""
|
||||
# Get git root
|
||||
result = subprocess.run(
|
||||
["git", "-C", project_root, "rev-parse", "--show-toplevel"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Not a git repository: {project_root}")
|
||||
git_root = result.stdout.strip()
|
||||
|
||||
# Calculate paths
|
||||
worktree_dir = os.path.join(git_root, ".worktrees", f"do-{task_id}")
|
||||
branch_name = f"do/{task_id}"
|
||||
|
||||
# Create worktree with new branch
|
||||
result = subprocess.run(
|
||||
["git", "-C", git_root, "worktree", "add", "-b", branch_name, worktree_dir],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Failed to create worktree: {result.stderr}")
|
||||
|
||||
return worktree_dir
|
||||
|
||||
|
||||
def create_task(title: str, use_worktree: bool = False) -> dict:
|
||||
"""Create a new task directory with task.md."""
|
||||
project_root = get_project_root()
|
||||
tasks_dir = get_tasks_dir(project_root)
|
||||
os.makedirs(tasks_dir, exist_ok=True)
|
||||
|
||||
task_id = generate_task_id()
|
||||
task_dir = os.path.join(tasks_dir, task_id)
|
||||
|
||||
os.makedirs(task_dir, exist_ok=True)
|
||||
|
||||
# Create worktree if requested
|
||||
worktree_dir = ""
|
||||
if use_worktree:
|
||||
try:
|
||||
worktree_dir = create_worktree(project_root, task_id)
|
||||
except RuntimeError as e:
|
||||
print(f"Warning: {e}", file=sys.stderr)
|
||||
use_worktree = False
|
||||
|
||||
frontmatter = {
|
||||
"id": task_id,
|
||||
"title": title,
|
||||
"status": "in_progress",
|
||||
"current_phase": 1,
|
||||
"phase_name": PHASE_NAMES[1],
|
||||
"max_phases": 5,
|
||||
"use_worktree": use_worktree,
|
||||
"worktree_dir": worktree_dir,
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"completion_promise": "<promise>DO_COMPLETE</promise>",
|
||||
}
|
||||
|
||||
body = f"""# Requirements
|
||||
|
||||
{title}
|
||||
|
||||
## Context
|
||||
|
||||
## Progress
|
||||
"""
|
||||
|
||||
task_md_path = os.path.join(task_dir, FILE_TASK_MD)
|
||||
write_task_md(task_md_path, frontmatter, body)
|
||||
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
relative_task_dir = os.path.relpath(task_dir, project_root)
|
||||
with open(current_task_file, "w", encoding="utf-8") as f:
|
||||
f.write(relative_task_dir)
|
||||
|
||||
return {
|
||||
"task_dir": task_dir,
|
||||
"relative_path": relative_task_dir,
|
||||
"task_data": frontmatter,
|
||||
"worktree_dir": worktree_dir,
|
||||
}
|
||||
|
||||
|
||||
def get_current_task(project_root: str) -> str | None:
|
||||
"""Read current task directory path."""
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def start_task(task_dir: str) -> bool:
|
||||
"""Set current task pointer."""
|
||||
project_root = get_project_root()
|
||||
tasks_dir = get_tasks_dir(project_root)
|
||||
|
||||
if os.path.isabs(task_dir):
|
||||
full_path = task_dir
|
||||
relative_path = os.path.relpath(task_dir, project_root)
|
||||
else:
|
||||
if not task_dir.startswith(DIR_TASKS):
|
||||
full_path = os.path.join(tasks_dir, task_dir)
|
||||
relative_path = os.path.join(DIR_TASKS, task_dir)
|
||||
else:
|
||||
full_path = os.path.join(project_root, task_dir)
|
||||
relative_path = task_dir
|
||||
|
||||
if not os.path.exists(full_path):
|
||||
print(f"Error: Task directory not found: {full_path}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
os.makedirs(os.path.dirname(current_task_file), exist_ok=True)
|
||||
|
||||
with open(current_task_file, "w", encoding="utf-8") as f:
|
||||
f.write(relative_path)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def finish_task() -> bool:
|
||||
"""Clear current task pointer."""
|
||||
project_root = get_project_root()
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
|
||||
if os.path.exists(current_task_file):
|
||||
os.remove(current_task_file)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def list_tasks() -> list[dict]:
|
||||
"""List all task directories."""
|
||||
project_root = get_project_root()
|
||||
tasks_dir = get_tasks_dir(project_root)
|
||||
|
||||
if not os.path.exists(tasks_dir):
|
||||
return []
|
||||
|
||||
tasks = []
|
||||
current_task = get_current_task(project_root)
|
||||
|
||||
for entry in sorted(os.listdir(tasks_dir), reverse=True):
|
||||
entry_path = os.path.join(tasks_dir, entry)
|
||||
if not os.path.isdir(entry_path):
|
||||
continue
|
||||
|
||||
task_md_path = os.path.join(entry_path, FILE_TASK_MD)
|
||||
if not os.path.exists(task_md_path):
|
||||
continue
|
||||
|
||||
parsed = read_task_md(task_md_path)
|
||||
if parsed:
|
||||
task_data = parsed["frontmatter"]
|
||||
else:
|
||||
task_data = {"id": entry, "title": entry, "status": "unknown"}
|
||||
|
||||
relative_path = os.path.join(DIR_TASKS, entry)
|
||||
task_data["path"] = relative_path
|
||||
task_data["is_current"] = current_task == relative_path
|
||||
tasks.append(task_data)
|
||||
|
||||
return tasks
|
||||
|
||||
|
||||
def get_status() -> dict | None:
|
||||
"""Get current task status."""
|
||||
project_root = get_project_root()
|
||||
current_task = get_current_task(project_root)
|
||||
|
||||
if not current_task:
|
||||
return None
|
||||
|
||||
task_dir = os.path.join(project_root, current_task)
|
||||
task_md_path = os.path.join(task_dir, FILE_TASK_MD)
|
||||
|
||||
parsed = read_task_md(task_md_path)
|
||||
if not parsed:
|
||||
return None
|
||||
|
||||
task_data = parsed["frontmatter"]
|
||||
task_data["path"] = current_task
|
||||
return task_data
|
||||
|
||||
|
||||
def update_phase(phase: int) -> bool:
|
||||
"""Update current task phase."""
|
||||
project_root = get_project_root()
|
||||
current_task = get_current_task(project_root)
|
||||
|
||||
if not current_task:
|
||||
print("Error: No active task.", file=sys.stderr)
|
||||
return False
|
||||
|
||||
task_dir = os.path.join(project_root, current_task)
|
||||
task_md_path = os.path.join(task_dir, FILE_TASK_MD)
|
||||
|
||||
parsed = read_task_md(task_md_path)
|
||||
if not parsed:
|
||||
print("Error: task.md not found or invalid.", file=sys.stderr)
|
||||
return False
|
||||
|
||||
frontmatter = parsed["frontmatter"]
|
||||
frontmatter["current_phase"] = phase
|
||||
frontmatter["phase_name"] = PHASE_NAMES.get(phase, f"Phase {phase}")
|
||||
|
||||
if not write_task_md(task_md_path, frontmatter, parsed["body"]):
|
||||
print("Error: Failed to write task.md.", file=sys.stderr)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Task directory management for do skill workflow"
|
||||
)
|
||||
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
||||
|
||||
# create command
|
||||
create_parser = subparsers.add_parser("create", help="Create a new task")
|
||||
create_parser.add_argument("title", nargs="+", help="Task title")
|
||||
create_parser.add_argument("--worktree", action="store_true", help="Enable worktree mode")
|
||||
|
||||
# start command
|
||||
start_parser = subparsers.add_parser("start", help="Set current task")
|
||||
start_parser.add_argument("task_dir", help="Task directory path")
|
||||
|
||||
# finish command
|
||||
subparsers.add_parser("finish", help="Clear current task")
|
||||
|
||||
# list command
|
||||
subparsers.add_parser("list", help="List all tasks")
|
||||
|
||||
# status command
|
||||
subparsers.add_parser("status", help="Show current task status")
|
||||
|
||||
# update-phase command
|
||||
phase_parser = subparsers.add_parser("update-phase", help="Update current phase")
|
||||
phase_parser.add_argument("phase", type=int, help="Phase number (1-5)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "create":
|
||||
title = " ".join(args.title)
|
||||
result = create_task(title, args.worktree)
|
||||
print(f"Created task: {result['relative_path']}")
|
||||
print(f"Task ID: {result['task_data']['id']}")
|
||||
print(f"Phase: 1/{result['task_data']['max_phases']} (Understand)")
|
||||
print(f"Worktree: {result['task_data']['use_worktree']}")
|
||||
|
||||
elif args.command == "start":
|
||||
if start_task(args.task_dir):
|
||||
print(f"Started task: {args.task_dir}")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == "finish":
|
||||
if finish_task():
|
||||
print("Task finished, current task cleared.")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == "list":
|
||||
tasks = list_tasks()
|
||||
if not tasks:
|
||||
print("No tasks found.")
|
||||
else:
|
||||
for task in tasks:
|
||||
marker = "* " if task.get("is_current") else " "
|
||||
phase = task.get("current_phase", "?")
|
||||
max_phase = task.get("max_phases", 5)
|
||||
status = task.get("status", "unknown")
|
||||
print(f"{marker}{task['id']} [{status}] phase {phase}/{max_phase}")
|
||||
print(f" {task.get('title', 'No title')}")
|
||||
|
||||
elif args.command == "status":
|
||||
status = get_status()
|
||||
if not status:
|
||||
print("No active task.")
|
||||
else:
|
||||
print(f"Task: {status['id']}")
|
||||
print(f"Title: {status.get('title', 'No title')}")
|
||||
print(f"Status: {status.get('status', 'unknown')}")
|
||||
print(f"Phase: {status.get('current_phase', '?')}/{status.get('max_phases', 5)}")
|
||||
print(f"Worktree: {status.get('use_worktree', False)}")
|
||||
print(f"Path: {status['path']}")
|
||||
|
||||
elif args.command == "update-phase":
|
||||
if update_phase(args.phase):
|
||||
phase_name = PHASE_NAMES.get(args.phase, f"Phase {args.phase}")
|
||||
print(f"Updated to phase {args.phase} ({phase_name})")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user