mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-14 03:31:58 +08:00
feat(skills): add per-task skill spec auto-detection and injection
Replace external inject-spec.py hook with built-in zero-config skill detection in codeagent-wrapper. The system auto-detects project type from fingerprint files (go.mod, package.json, etc.), maps to installed skills, and injects SKILL.md content directly into sub-agent prompts. Key changes: - Add DetectProjectSkills/ResolveSkillContent in executor/prompt.go - Add Skills field to TaskSpec with parallel config parsing - Add --skills CLI flag for explicit override - Update /do SKILL.md Phase 4 with per-task skill examples - Remove on-stop.py global hook (not needed) - Replace inject-spec.py with no-op (detection now internal) - Add 20 unit tests covering detection, resolution, budget, security Security: path traversal protection via validSkillName regex, 16K char budget with tag overhead accounting, CRLF normalization. Generated with SWE-Agent.ai Co-Authored-By: SWE-Agent.ai <noreply@swe-agent.ai>
This commit is contained in:
@@ -52,7 +52,7 @@ To customize agents, create same-named files in `~/.codeagent/agents/` to overri
|
||||
3. **Phase 4 requires approval** - stop after Phase 3 if not approved
|
||||
4. **Pass complete context forward** - every agent gets the Context Pack
|
||||
5. **Parallel-first** - run independent tasks via `codeagent-wrapper --parallel`
|
||||
6. **Update state after each phase** - keep `.claude/do.{task_id}.local.md` current
|
||||
6. **Update state after each phase** - keep `.claude/do-tasks/{task_id}/task.json` current
|
||||
|
||||
## Context Pack Template
|
||||
|
||||
@@ -78,16 +78,34 @@ To customize agents, create same-named files in `~/.codeagent/agents/` to overri
|
||||
|
||||
## Loop State Management
|
||||
|
||||
When triggered via `/do <task>`, initializes `.claude/do.{task_id}.local.md` with:
|
||||
- `active: true`
|
||||
- `current_phase: 1`
|
||||
- `max_phases: 5`
|
||||
- `completion_promise: "<promise>DO_COMPLETE</promise>"`
|
||||
|
||||
After each phase, update frontmatter:
|
||||
When triggered via `/do <task>`, initializes `.claude/do-tasks/{task_id}/task.md` with YAML frontmatter:
|
||||
```yaml
|
||||
current_phase: <next phase number>
|
||||
phase_name: "<next phase name>"
|
||||
---
|
||||
id: "<task_id>"
|
||||
title: "<task description>"
|
||||
status: "in_progress"
|
||||
current_phase: 1
|
||||
phase_name: "Understand"
|
||||
max_phases: 5
|
||||
use_worktree: false
|
||||
created_at: "<ISO timestamp>"
|
||||
completion_promise: "<promise>DO_COMPLETE</promise>"
|
||||
---
|
||||
|
||||
# Requirements
|
||||
|
||||
<task description>
|
||||
|
||||
## Context
|
||||
|
||||
## Progress
|
||||
```
|
||||
|
||||
The current task is tracked in `.claude/do-tasks/.current-task`.
|
||||
|
||||
After each phase, update `task.md` frontmatter via:
|
||||
```bash
|
||||
python3 ".claude/skills/do/scripts/task.py" update-phase <N>
|
||||
```
|
||||
|
||||
When all 5 phases complete, output:
|
||||
@@ -95,17 +113,17 @@ When all 5 phases complete, output:
|
||||
<promise>DO_COMPLETE</promise>
|
||||
```
|
||||
|
||||
To abort early, set `active: false` in the state file.
|
||||
To abort early, manually edit `task.md` and set `status: "cancelled"` in the frontmatter.
|
||||
|
||||
## Stop Hook
|
||||
|
||||
A Stop hook is registered after installation:
|
||||
1. Creates `.claude/do.{task_id}.local.md` state file
|
||||
2. Updates `current_phase` after each phase
|
||||
1. Creates `.claude/do-tasks/{task_id}/task.md` state file
|
||||
2. Updates `current_phase` in frontmatter after each phase
|
||||
3. Stop hook checks state, blocks exit if incomplete
|
||||
4. Outputs `<promise>DO_COMPLETE</promise>` when finished
|
||||
|
||||
Manual exit: Set `active` to `false` in the state file.
|
||||
Manual exit: Edit `task.md` and set `status: "cancelled"` in the frontmatter.
|
||||
|
||||
## Parallel Execution Examples
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: do
|
||||
description: This skill should be used for structured feature development with codebase understanding. Triggers on /do command. Provides a 5-phase workflow (Understand, Clarify, Design, Implement, Complete) using codeagent-wrapper to orchestrate code-explorer, code-architect, code-reviewer, and develop agents in parallel.
|
||||
allowed-tools: ["Bash(${SKILL_DIR}/scripts/setup-do.py:*)"]
|
||||
allowed-tools: ["Bash(.claude/skills/do/scripts/setup-do.py:*)", "Bash(.claude/skills/do/scripts/task.py:*)"]
|
||||
---
|
||||
|
||||
# do - Feature Development Orchestrator
|
||||
@@ -22,75 +22,54 @@ Develop in a separate worktree? (Isolates changes from main branch)
|
||||
- No (Work directly in current directory)
|
||||
```
|
||||
|
||||
### Step 2: Initialize state
|
||||
### Step 2: Initialize task directory
|
||||
|
||||
```bash
|
||||
# If worktree mode selected:
|
||||
python3 "${SKILL_DIR}/scripts/setup-do.py" --worktree "<task description>"
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" --worktree "<task description>"
|
||||
|
||||
# If no worktree:
|
||||
python3 "${SKILL_DIR}/scripts/setup-do.py" "<task description>"
|
||||
python3 ".claude/skills/do/scripts/setup-do.py" "<task description>"
|
||||
```
|
||||
|
||||
This creates `.claude/do.{task_id}.local.md` with:
|
||||
- `active: true`
|
||||
- `current_phase: 1`
|
||||
- `max_phases: 5`
|
||||
- `completion_promise: "<promise>DO_COMPLETE</promise>"`
|
||||
- `use_worktree: true/false`
|
||||
This creates a task directory under `.claude/do-tasks/` with:
|
||||
- `task.md`: Single file containing YAML frontmatter (metadata) + Markdown body (requirements/context)
|
||||
|
||||
## Task Directory Management
|
||||
|
||||
Use `task.py` to manage task state:
|
||||
|
||||
```bash
|
||||
# Update phase
|
||||
python3 ".claude/skills/do/scripts/task.py" update-phase 2
|
||||
|
||||
# Check status
|
||||
python3 ".claude/skills/do/scripts/task.py" status
|
||||
|
||||
# List all tasks
|
||||
python3 ".claude/skills/do/scripts/task.py" list
|
||||
```
|
||||
|
||||
## Worktree Mode
|
||||
|
||||
When `use_worktree: true` in state file:
|
||||
- The worktree is created once during initialization (setup-do.py)
|
||||
- The worktree path is stored in `worktree_dir` frontmatter field
|
||||
- Environment variable `DO_WORKTREE_DIR` is exported for codeagent-wrapper to use
|
||||
- ALL `codeagent-wrapper` calls that modify code MUST include `--worktree` flag
|
||||
- codeagent-wrapper detects `DO_WORKTREE_DIR` and reuses the existing worktree instead of creating new ones
|
||||
When worktree mode is enabled in task.json, ALL `codeagent-wrapper` calls that modify code MUST include `--worktree`:
|
||||
|
||||
```bash
|
||||
# With worktree mode enabled - codeagent-wrapper will use DO_WORKTREE_DIR automatically
|
||||
codeagent-wrapper --worktree --agent develop - . <<'EOF'
|
||||
...
|
||||
EOF
|
||||
|
||||
# Parallel tasks with worktree
|
||||
codeagent-wrapper --worktree --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: task1
|
||||
agent: develop
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
...
|
||||
EOF
|
||||
```
|
||||
|
||||
The `--worktree` flag tells codeagent-wrapper to use worktree mode. When `DO_WORKTREE_DIR` is set, it reuses that directory; otherwise it creates a new worktree (backward compatibility). Read-only agents (code-explorer, code-architect, code-reviewer) do NOT need `--worktree`.
|
||||
|
||||
## Loop State Management
|
||||
|
||||
After each phase, update `.claude/do.{task_id}.local.md` frontmatter:
|
||||
```yaml
|
||||
current_phase: <next phase number>
|
||||
phase_name: "<next phase name>"
|
||||
```
|
||||
|
||||
When all 5 phases complete, output the completion signal:
|
||||
```
|
||||
<promise>DO_COMPLETE</promise>
|
||||
```
|
||||
|
||||
To abort early, set `active: false` in the state file.
|
||||
Read-only agents (code-explorer, code-architect, code-reviewer) do NOT need `--worktree`.
|
||||
|
||||
## Hard Constraints
|
||||
|
||||
1. **Never write code directly.** Delegate all code changes to `codeagent-wrapper` agents.
|
||||
2. **Pass complete context forward.** Every agent invocation includes the Context Pack.
|
||||
3. **Parallel-first.** Run independent tasks via `codeagent-wrapper --parallel`.
|
||||
4. **Update state after each phase.** Keep `.claude/do.{task_id}.local.md` current.
|
||||
5. **Expect long-running `codeagent-wrapper` calls.** High-reasoning modes can take a long time; stay in the orchestrator role and wait for agents to complete.
|
||||
6. **Timeouts are not an escape hatch.** If a `codeagent-wrapper` invocation times out/errors, retry (split/narrow the task if needed); never switch to direct implementation.
|
||||
7. **Respect worktree setting.** If `use_worktree: true`, always pass `--worktree` to develop agent calls.
|
||||
2. **Parallel-first.** Run independent tasks via `codeagent-wrapper --parallel`.
|
||||
3. **Update phase after each phase.** Use `task.py update-phase <N>`.
|
||||
4. **Expect long-running `codeagent-wrapper` calls.** High-reasoning modes can take a long time.
|
||||
5. **Timeouts are not an escape hatch.** If a call times out, retry with narrower scope.
|
||||
6. **Respect worktree setting.** If enabled, always pass `--worktree` to develop agent calls.
|
||||
|
||||
## Agents
|
||||
|
||||
@@ -115,28 +94,6 @@ To abort early, set `active: false` in the state file.
|
||||
- Missing documentation
|
||||
- Non-critical test coverage gaps
|
||||
|
||||
## Context Pack Template
|
||||
|
||||
```text
|
||||
## Original User Request
|
||||
<verbatim request>
|
||||
|
||||
## Context Pack
|
||||
- Phase: <1-5 name>
|
||||
- Decisions: <requirements/constraints/choices>
|
||||
- Code-explorer output: <paste or "None">
|
||||
- Code-architect output: <paste or "None">
|
||||
- Code-reviewer output: <paste or "None">
|
||||
- Develop output: <paste or "None">
|
||||
- Open questions: <list or "None">
|
||||
|
||||
## Current Task
|
||||
<specific task>
|
||||
|
||||
## Acceptance Criteria
|
||||
<checkable outputs>
|
||||
```
|
||||
|
||||
## 5-Phase Workflow
|
||||
|
||||
### Phase 1: Understand (Parallel, No Interaction)
|
||||
@@ -152,70 +109,37 @@ id: p1_requirements
|
||||
agent: code-architect
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-explorer output: None
|
||||
- Code-architect output: None
|
||||
|
||||
## Current Task
|
||||
1. Analyze requirements completeness (score 1-10)
|
||||
2. Extract explicit requirements, constraints, acceptance criteria
|
||||
3. Identify blocking questions (issues that prevent implementation)
|
||||
4. Identify minor clarifications (nice-to-have but can proceed without)
|
||||
Analyze requirements completeness (score 1-10):
|
||||
1. Extract explicit requirements, constraints, acceptance criteria
|
||||
2. Identify blocking questions (issues that prevent implementation)
|
||||
3. Identify minor clarifications (nice-to-have but can proceed without)
|
||||
|
||||
Output format:
|
||||
- Completeness score: X/10
|
||||
- Requirements: [list]
|
||||
- Non-goals: [list]
|
||||
- Blocking questions: [list, if any]
|
||||
- Minor clarifications: [list, if any]
|
||||
|
||||
## Acceptance Criteria
|
||||
Concrete checklist; blocking vs minor questions clearly separated.
|
||||
|
||||
---TASK---
|
||||
id: p1_similar_features
|
||||
agent: code-explorer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Current Task
|
||||
Find 1-3 similar features, trace end-to-end. Return: key files with line numbers, call flow, extension points.
|
||||
|
||||
## Acceptance Criteria
|
||||
Concrete file:line map + reuse points.
|
||||
|
||||
---TASK---
|
||||
id: p1_architecture
|
||||
agent: code-explorer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Current Task
|
||||
Map architecture for relevant subsystem. Return: module map + 5-10 key files.
|
||||
|
||||
## Acceptance Criteria
|
||||
Clear boundaries; file:line references.
|
||||
|
||||
---TASK---
|
||||
id: p1_conventions
|
||||
agent: code-explorer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Current Task
|
||||
Identify testing patterns, conventions, config. Return: test commands + file locations.
|
||||
|
||||
## Acceptance Criteria
|
||||
Test commands + relevant test file paths.
|
||||
EOF
|
||||
```
|
||||
|
||||
@@ -226,75 +150,74 @@ EOF
|
||||
**Actions:**
|
||||
1. Review `p1_requirements` output for blocking questions
|
||||
2. **IF blocking questions exist** → Use AskUserQuestion
|
||||
3. **IF no blocking questions (completeness >= 8)** → Skip to Phase 3, log "Requirements clear, proceeding"
|
||||
|
||||
```bash
|
||||
# Only if blocking questions exist:
|
||||
# Use AskUserQuestion with the blocking questions from Phase 1
|
||||
```
|
||||
3. **IF no blocking questions (completeness >= 8)** → Skip to Phase 3
|
||||
|
||||
### Phase 3: Design (No Interaction)
|
||||
|
||||
**Goal:** Produce minimal-change implementation plan.
|
||||
|
||||
**Actions:** Invoke `code-architect` with all Phase 1 context to generate a single implementation plan.
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --agent code-architect - . <<'EOF'
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-explorer output: <ALL Phase 1 explorer outputs>
|
||||
- Code-architect output: <Phase 1 requirements + Phase 2 answers if any>
|
||||
|
||||
## Current Task
|
||||
Design minimal-change implementation:
|
||||
- Reuse existing abstractions
|
||||
- Minimize new files
|
||||
- Follow established patterns from code-explorer output
|
||||
- Follow established patterns from Phase 1 exploration
|
||||
|
||||
Output:
|
||||
- File touch list with specific changes
|
||||
- Build sequence
|
||||
- Test plan
|
||||
- Risks and mitigations
|
||||
|
||||
## Acceptance Criteria
|
||||
Concrete, implementable blueprint with minimal moving parts.
|
||||
EOF
|
||||
```
|
||||
|
||||
### Phase 4: Implement + Review (Single Interaction Point)
|
||||
### Phase 4: Implement + Review
|
||||
|
||||
**Goal:** Build feature and review in one phase.
|
||||
|
||||
**Actions:**
|
||||
1. Invoke `develop` to implement. For full-stack projects, split into backend/frontend tasks with per-task `skills:` injection. Use `--parallel` when tasks can be split; use single agent when the change is small or single-domain.
|
||||
|
||||
1. Invoke `develop` to implement (add `--worktree` if `use_worktree: true`):
|
||||
**Single-domain example** (add `--worktree` if enabled):
|
||||
|
||||
```bash
|
||||
# Check use_worktree from state file, add --worktree if true
|
||||
codeagent-wrapper --worktree --agent develop - . <<'EOF'
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-explorer output: <ALL Phase 1 outputs>
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
|
||||
## Current Task
|
||||
Implement with minimal change set following the blueprint.
|
||||
codeagent-wrapper --worktree --agent develop --skills golang-base-practices - . <<'EOF'
|
||||
Implement with minimal change set following the Phase 3 blueprint.
|
||||
- Follow Phase 1 patterns
|
||||
- Add/adjust tests per Phase 3 plan
|
||||
- Run narrowest relevant tests
|
||||
|
||||
## Acceptance Criteria
|
||||
Feature works end-to-end; tests pass; diff is minimal.
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Run parallel reviews (no --worktree needed, read-only):
|
||||
**Full-stack parallel example** (adapt task IDs, skills, and content based on Phase 3 design):
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --worktree --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: p4_backend
|
||||
agent: develop
|
||||
workdir: .
|
||||
skills: golang-base-practices
|
||||
---CONTENT---
|
||||
Implement backend changes following Phase 3 blueprint.
|
||||
- Follow Phase 1 patterns
|
||||
- Add/adjust tests per Phase 3 plan
|
||||
|
||||
---TASK---
|
||||
id: p4_frontend
|
||||
agent: develop
|
||||
workdir: .
|
||||
skills: frontend-design,vercel-react-best-practices
|
||||
dependencies: p4_backend
|
||||
---CONTENT---
|
||||
Implement frontend changes following Phase 3 blueprint.
|
||||
- Follow Phase 1 patterns
|
||||
- Add/adjust tests per Phase 3 plan
|
||||
EOF
|
||||
```
|
||||
|
||||
Note: Choose which skills to inject based on Phase 3 design output. Only inject skills relevant to each task's domain.
|
||||
|
||||
2. Run parallel reviews:
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --parallel <<'EOF'
|
||||
@@ -303,71 +226,35 @@ id: p4_correctness
|
||||
agent: code-reviewer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
- Develop output: <implementation output>
|
||||
|
||||
## Current Task
|
||||
Review for correctness, edge cases, failure modes.
|
||||
Classify each issue as BLOCKING or MINOR.
|
||||
|
||||
## Acceptance Criteria
|
||||
Issues with file:line references, severity, and concrete fixes.
|
||||
|
||||
---TASK---
|
||||
id: p4_simplicity
|
||||
agent: code-reviewer
|
||||
workdir: .
|
||||
---CONTENT---
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
- Develop output: <implementation output>
|
||||
|
||||
## Current Task
|
||||
Review for KISS: remove bloat, collapse needless abstractions.
|
||||
Classify each issue as BLOCKING or MINOR.
|
||||
|
||||
## Acceptance Criteria
|
||||
Actionable simplifications with severity and justification.
|
||||
EOF
|
||||
```
|
||||
|
||||
3. Handle review results:
|
||||
- **MINOR issues only** → Auto-fix via `develop` (with `--worktree` if enabled), no user interaction
|
||||
- **MINOR issues only** → Auto-fix via `develop`, no user interaction
|
||||
- **BLOCKING issues** → Use AskUserQuestion: "Fix now / Proceed as-is"
|
||||
|
||||
### Phase 5: Complete (No Interaction)
|
||||
|
||||
**Goal:** Document what was built.
|
||||
|
||||
**Actions:** Invoke `code-reviewer` to produce summary:
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --agent code-reviewer - . <<'EOF'
|
||||
## Original User Request
|
||||
/do <request>
|
||||
|
||||
## Context Pack
|
||||
- Code-architect output: <Phase 3 blueprint>
|
||||
- Code-reviewer output: <Phase 4 review outcomes>
|
||||
- Develop output: <Phase 4 implementation + fixes>
|
||||
|
||||
## Current Task
|
||||
Write completion summary:
|
||||
- What was built
|
||||
- Key decisions/tradeoffs
|
||||
- Files modified (paths)
|
||||
- How to verify (commands)
|
||||
- Follow-ups (optional)
|
||||
|
||||
## Acceptance Criteria
|
||||
Short, technical, actionable summary.
|
||||
EOF
|
||||
```
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"description": "do loop hook for 5-phase workflow",
|
||||
"description": "do loop hooks for 5-phase workflow",
|
||||
"hooks": {
|
||||
"Stop": [
|
||||
{
|
||||
@@ -10,6 +10,17 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SubagentStop": [
|
||||
{
|
||||
"matcher": "code-reviewer",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/verify-loop.py"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Stop hook for do skill workflow.
|
||||
|
||||
Checks if the do loop is complete before allowing exit.
|
||||
Uses the new task directory structure under .claude/do-tasks/.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_JSON = "task.json"
|
||||
|
||||
PHASE_NAMES = {
|
||||
1: "Understand",
|
||||
2: "Clarify",
|
||||
@@ -13,98 +23,69 @@ PHASE_NAMES = {
|
||||
5: "Complete",
|
||||
}
|
||||
|
||||
|
||||
def phase_name_for(n: int) -> str:
|
||||
return PHASE_NAMES.get(n, f"Phase {n}")
|
||||
|
||||
def frontmatter_get(file_path: str, key: str) -> str:
|
||||
|
||||
def get_current_task(project_dir: str) -> str | None:
|
||||
"""Read current task directory path."""
|
||||
current_task_file = os.path.join(project_dir, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return ""
|
||||
return None
|
||||
|
||||
if not lines or lines[0].strip() != "---":
|
||||
return ""
|
||||
|
||||
for i, line in enumerate(lines[1:], start=1):
|
||||
if line.strip() == "---":
|
||||
break
|
||||
match = re.match(rf"^{re.escape(key)}:\s*(.*)$", line)
|
||||
if match:
|
||||
value = match.group(1).strip()
|
||||
if value.startswith('"') and value.endswith('"'):
|
||||
value = value[1:-1]
|
||||
return value
|
||||
return ""
|
||||
|
||||
def get_body(file_path: str) -> str:
|
||||
def get_task_info(project_dir: str, task_dir: str) -> dict | None:
|
||||
"""Read task.json data."""
|
||||
task_json_path = os.path.join(project_dir, task_dir, FILE_TASK_JSON)
|
||||
if not os.path.exists(task_json_path):
|
||||
return None
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(task_json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def check_task_complete(project_dir: str, task_dir: str) -> str:
|
||||
"""Check if task is complete. Returns blocking reason or empty string."""
|
||||
task_info = get_task_info(project_dir, task_dir)
|
||||
if not task_info:
|
||||
return ""
|
||||
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
return parts[2]
|
||||
return ""
|
||||
|
||||
def check_state_file(state_file: str, stdin_payload: str) -> str:
|
||||
active_raw = frontmatter_get(state_file, "active")
|
||||
active_lc = active_raw.lower()
|
||||
if active_lc not in ("true", "1", "yes", "on"):
|
||||
status = task_info.get("status", "")
|
||||
if status == "completed":
|
||||
return ""
|
||||
|
||||
current_phase_raw = frontmatter_get(state_file, "current_phase")
|
||||
max_phases_raw = frontmatter_get(state_file, "max_phases")
|
||||
phase_name = frontmatter_get(state_file, "phase_name")
|
||||
completion_promise = frontmatter_get(state_file, "completion_promise")
|
||||
current_phase = task_info.get("current_phase", 1)
|
||||
max_phases = task_info.get("max_phases", 5)
|
||||
phase_name = task_info.get("phase_name", phase_name_for(current_phase))
|
||||
completion_promise = task_info.get("completion_promise", "<promise>DO_COMPLETE</promise>")
|
||||
|
||||
try:
|
||||
current_phase = int(current_phase_raw)
|
||||
except (ValueError, TypeError):
|
||||
current_phase = 1
|
||||
|
||||
try:
|
||||
max_phases = int(max_phases_raw)
|
||||
except (ValueError, TypeError):
|
||||
max_phases = 5
|
||||
|
||||
if not phase_name:
|
||||
phase_name = phase_name_for(current_phase)
|
||||
|
||||
if not completion_promise:
|
||||
completion_promise = "<promise>DO_COMPLETE</promise>"
|
||||
|
||||
phases_done = current_phase >= max_phases
|
||||
|
||||
if phases_done:
|
||||
# 阶段已完成,清理状态文件并允许退出
|
||||
# promise 检测作为可选确认,不阻止退出
|
||||
try:
|
||||
os.remove(state_file)
|
||||
except Exception:
|
||||
pass
|
||||
if current_phase >= max_phases:
|
||||
# Task is at final phase, allow exit
|
||||
return ""
|
||||
|
||||
return (f"do loop incomplete: current phase {current_phase}/{max_phases} ({phase_name}). "
|
||||
f"Continue with remaining phases; update {state_file} current_phase/phase_name after each phase. "
|
||||
f"Include completion_promise in final output when done: {completion_promise}. "
|
||||
f"To exit early, set active to false.")
|
||||
return (
|
||||
f"do loop incomplete: current phase {current_phase}/{max_phases} ({phase_name}). "
|
||||
f"Continue with remaining phases; use 'task.py update-phase <N>' after each phase. "
|
||||
f"Include completion_promise in final output when done: {completion_promise}. "
|
||||
f"To exit early, set status to 'completed' in task.json."
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
state_dir = os.path.join(project_dir, ".claude")
|
||||
|
||||
do_task_id = os.environ.get("DO_TASK_ID", "")
|
||||
|
||||
if do_task_id:
|
||||
candidate = os.path.join(state_dir, f"do.{do_task_id}.local.md")
|
||||
state_files = [candidate] if os.path.isfile(candidate) else []
|
||||
else:
|
||||
state_files = glob.glob(os.path.join(state_dir, "do.*.local.md"))
|
||||
|
||||
if not state_files:
|
||||
task_dir = get_current_task(project_dir)
|
||||
if not task_dir:
|
||||
# No active task, allow exit
|
||||
sys.exit(0)
|
||||
|
||||
stdin_payload = ""
|
||||
@@ -114,18 +95,13 @@ def main():
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
blocking_reasons = []
|
||||
for state_file in state_files:
|
||||
reason = check_state_file(state_file, stdin_payload)
|
||||
if reason:
|
||||
blocking_reasons.append(reason)
|
||||
|
||||
if not blocking_reasons:
|
||||
reason = check_task_complete(project_dir, task_dir)
|
||||
if not reason:
|
||||
sys.exit(0)
|
||||
|
||||
combined_reason = " ".join(blocking_reasons)
|
||||
print(json.dumps({"decision": "block", "reason": combined_reason}))
|
||||
print(json.dumps({"decision": "block", "reason": reason}))
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
218
skills/do/hooks/verify-loop.py
Normal file
218
skills/do/hooks/verify-loop.py
Normal file
@@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Verify Loop Hook for do skill workflow.
|
||||
|
||||
SubagentStop hook that intercepts when code-reviewer agent tries to stop.
|
||||
Runs verification commands to ensure code quality before allowing exit.
|
||||
|
||||
Mechanism:
|
||||
- Intercepts SubagentStop event for code-reviewer agent
|
||||
- Runs verify commands from task.json if configured
|
||||
- Blocks stopping until verification passes
|
||||
- Has max iterations as safety limit (MAX_ITERATIONS=5)
|
||||
|
||||
State file: .claude/do-tasks/.verify-state.json
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Configuration
|
||||
MAX_ITERATIONS = 5
|
||||
STATE_TIMEOUT_MINUTES = 30
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_JSON = "task.json"
|
||||
STATE_FILE = ".claude/do-tasks/.verify-state.json"
|
||||
|
||||
# Only control loop for code-reviewer agent
|
||||
TARGET_AGENTS = {"code-reviewer"}
|
||||
|
||||
|
||||
def get_project_root(cwd: str) -> str | None:
|
||||
"""Find project root (directory with .claude folder)."""
|
||||
current = Path(cwd).resolve()
|
||||
while current != current.parent:
|
||||
if (current / ".claude").exists():
|
||||
return str(current)
|
||||
current = current.parent
|
||||
return None
|
||||
|
||||
|
||||
def get_current_task(project_root: str) -> str | None:
|
||||
"""Read current task directory path."""
|
||||
current_task_file = os.path.join(project_root, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
try:
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_task_info(project_root: str, task_dir: str) -> dict | None:
|
||||
"""Read task.json data."""
|
||||
task_json_path = os.path.join(project_root, task_dir, FILE_TASK_JSON)
|
||||
if not os.path.exists(task_json_path):
|
||||
return None
|
||||
try:
|
||||
with open(task_json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_verify_commands(task_info: dict) -> list[str]:
|
||||
"""Get verify commands from task.json."""
|
||||
return task_info.get("verify_commands", [])
|
||||
|
||||
|
||||
def run_verify_commands(project_root: str, commands: list[str]) -> tuple[bool, str]:
|
||||
"""Run verify commands and return (success, message)."""
|
||||
for cmd in commands:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
cwd=project_root,
|
||||
capture_output=True,
|
||||
timeout=120,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
stderr = result.stderr.decode("utf-8", errors="replace")
|
||||
stdout = result.stdout.decode("utf-8", errors="replace")
|
||||
error_output = stderr or stdout
|
||||
if len(error_output) > 500:
|
||||
error_output = error_output[:500] + "..."
|
||||
return False, f"Command failed: {cmd}\n{error_output}"
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, f"Command timed out: {cmd}"
|
||||
except Exception as e:
|
||||
return False, f"Command error: {cmd} - {str(e)}"
|
||||
return True, "All verify commands passed"
|
||||
|
||||
|
||||
def load_state(project_root: str) -> dict:
|
||||
"""Load verify loop state."""
|
||||
state_path = os.path.join(project_root, STATE_FILE)
|
||||
if not os.path.exists(state_path):
|
||||
return {"task": None, "iteration": 0, "started_at": None}
|
||||
try:
|
||||
with open(state_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return {"task": None, "iteration": 0, "started_at": None}
|
||||
|
||||
|
||||
def save_state(project_root: str, state: dict) -> None:
|
||||
"""Save verify loop state."""
|
||||
state_path = os.path.join(project_root, STATE_FILE)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(state_path), exist_ok=True)
|
||||
with open(state_path, "w", encoding="utf-8") as f:
|
||||
json.dump(state, f, indent=2, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
input_data = json.load(sys.stdin)
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
hook_event = input_data.get("hook_event_name", "")
|
||||
if hook_event != "SubagentStop":
|
||||
sys.exit(0)
|
||||
|
||||
subagent_type = input_data.get("subagent_type", "")
|
||||
agent_output = input_data.get("agent_output", "")
|
||||
cwd = input_data.get("cwd", os.getcwd())
|
||||
|
||||
if subagent_type not in TARGET_AGENTS:
|
||||
sys.exit(0)
|
||||
|
||||
project_root = get_project_root(cwd)
|
||||
if not project_root:
|
||||
sys.exit(0)
|
||||
|
||||
task_dir = get_current_task(project_root)
|
||||
if not task_dir:
|
||||
sys.exit(0)
|
||||
|
||||
task_info = get_task_info(project_root, task_dir)
|
||||
if not task_info:
|
||||
sys.exit(0)
|
||||
|
||||
verify_commands = get_verify_commands(task_info)
|
||||
if not verify_commands:
|
||||
# No verify commands configured, allow exit
|
||||
sys.exit(0)
|
||||
|
||||
# Load state
|
||||
state = load_state(project_root)
|
||||
|
||||
# Reset state if task changed or too old
|
||||
should_reset = False
|
||||
if state.get("task") != task_dir:
|
||||
should_reset = True
|
||||
elif state.get("started_at"):
|
||||
try:
|
||||
started = datetime.fromisoformat(state["started_at"])
|
||||
if (datetime.now() - started).total_seconds() > STATE_TIMEOUT_MINUTES * 60:
|
||||
should_reset = True
|
||||
except (ValueError, TypeError):
|
||||
should_reset = True
|
||||
|
||||
if should_reset:
|
||||
state = {
|
||||
"task": task_dir,
|
||||
"iteration": 0,
|
||||
"started_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
# Increment iteration
|
||||
state["iteration"] = state.get("iteration", 0) + 1
|
||||
current_iteration = state["iteration"]
|
||||
save_state(project_root, state)
|
||||
|
||||
# Safety check: max iterations
|
||||
if current_iteration >= MAX_ITERATIONS:
|
||||
state["iteration"] = 0
|
||||
save_state(project_root, state)
|
||||
output = {
|
||||
"decision": "allow",
|
||||
"reason": f"Max iterations ({MAX_ITERATIONS}) reached. Stopping to prevent infinite loop.",
|
||||
}
|
||||
print(json.dumps(output, ensure_ascii=False))
|
||||
sys.exit(0)
|
||||
|
||||
# Run verify commands
|
||||
passed, message = run_verify_commands(project_root, verify_commands)
|
||||
|
||||
if passed:
|
||||
state["iteration"] = 0
|
||||
save_state(project_root, state)
|
||||
output = {
|
||||
"decision": "allow",
|
||||
"reason": "All verify commands passed. Review phase complete.",
|
||||
}
|
||||
print(json.dumps(output, ensure_ascii=False))
|
||||
sys.exit(0)
|
||||
else:
|
||||
output = {
|
||||
"decision": "block",
|
||||
"reason": f"Iteration {current_iteration}/{MAX_ITERATIONS}. Verification failed:\n{message}\n\nPlease fix the issues and try again.",
|
||||
}
|
||||
print(json.dumps(output, ensure_ascii=False))
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
149
skills/do/scripts/get-context.py
Normal file
149
skills/do/scripts/get-context.py
Normal file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Get context for current task.
|
||||
|
||||
Reads the current task's jsonl files and returns context for specified agent.
|
||||
Used by inject-context hook to build agent prompts.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_JSON = "task.json"
|
||||
|
||||
|
||||
def get_project_root() -> str:
|
||||
return os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
|
||||
|
||||
def get_current_task(project_root: str) -> str | None:
|
||||
current_task_file = os.path.join(project_root, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
try:
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def read_file_content(base_path: str, file_path: str) -> str | None:
|
||||
full_path = os.path.join(base_path, file_path)
|
||||
if os.path.exists(full_path) and os.path.isfile(full_path):
|
||||
try:
|
||||
with open(full_path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def read_jsonl_entries(base_path: str, jsonl_path: str) -> list[tuple[str, str]]:
|
||||
full_path = os.path.join(base_path, jsonl_path)
|
||||
if not os.path.exists(full_path):
|
||||
return []
|
||||
|
||||
results = []
|
||||
try:
|
||||
with open(full_path, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
item = json.loads(line)
|
||||
file_path = item.get("file") or item.get("path")
|
||||
if not file_path:
|
||||
continue
|
||||
content = read_file_content(base_path, file_path)
|
||||
if content:
|
||||
results.append((file_path, content))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
return results
|
||||
|
||||
|
||||
def get_agent_context(project_root: str, task_dir: str, agent_type: str) -> str:
|
||||
"""Get complete context for specified agent."""
|
||||
context_parts = []
|
||||
|
||||
# Read agent-specific jsonl
|
||||
agent_jsonl = os.path.join(task_dir, f"{agent_type}.jsonl")
|
||||
agent_entries = read_jsonl_entries(project_root, agent_jsonl)
|
||||
|
||||
for file_path, content in agent_entries:
|
||||
context_parts.append(f"=== {file_path} ===\n{content}")
|
||||
|
||||
# Read prd.md
|
||||
prd_content = read_file_content(project_root, os.path.join(task_dir, "prd.md"))
|
||||
if prd_content:
|
||||
context_parts.append(f"=== {task_dir}/prd.md (Requirements) ===\n{prd_content}")
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
|
||||
def get_task_info(project_root: str, task_dir: str) -> dict | None:
|
||||
"""Get task.json data."""
|
||||
task_json_path = os.path.join(project_root, task_dir, FILE_TASK_JSON)
|
||||
if not os.path.exists(task_json_path):
|
||||
return None
|
||||
try:
|
||||
with open(task_json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Get context for current task")
|
||||
parser.add_argument("agent", nargs="?", choices=["implement", "check", "debug"],
|
||||
help="Agent type (optional, returns task info if not specified)")
|
||||
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
||||
args = parser.parse_args()
|
||||
|
||||
project_root = get_project_root()
|
||||
task_dir = get_current_task(project_root)
|
||||
|
||||
if not task_dir:
|
||||
if args.json:
|
||||
print(json.dumps({"error": "No active task"}))
|
||||
else:
|
||||
print("No active task.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
task_info = get_task_info(project_root, task_dir)
|
||||
|
||||
if not args.agent:
|
||||
if args.json:
|
||||
print(json.dumps({"task_dir": task_dir, "task_info": task_info}))
|
||||
else:
|
||||
print(f"Task: {task_dir}")
|
||||
if task_info:
|
||||
print(f"Title: {task_info.get('title', 'N/A')}")
|
||||
print(f"Phase: {task_info.get('current_phase', '?')}/{task_info.get('max_phases', 5)}")
|
||||
sys.exit(0)
|
||||
|
||||
context = get_agent_context(project_root, task_dir, args.agent)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps({
|
||||
"task_dir": task_dir,
|
||||
"agent": args.agent,
|
||||
"context": context,
|
||||
"task_info": task_info,
|
||||
}))
|
||||
else:
|
||||
print(context)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,57 +1,27 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Initialize do skill workflow - wrapper around task.py.
|
||||
|
||||
Creates a task directory under .claude/do-tasks/ with:
|
||||
- task.md: Task metadata (YAML frontmatter) + requirements (Markdown body)
|
||||
|
||||
If --worktree is specified, also creates a git worktree for isolated development.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
PHASE_NAMES = {
|
||||
1: "Understand",
|
||||
2: "Clarify",
|
||||
3: "Design",
|
||||
4: "Implement",
|
||||
5: "Complete",
|
||||
}
|
||||
from task import create_task, PHASE_NAMES
|
||||
|
||||
def phase_name_for(n: int) -> str:
|
||||
return PHASE_NAMES.get(n, f"Phase {n}")
|
||||
|
||||
def die(msg: str):
|
||||
print(f"❌ {msg}", file=sys.stderr)
|
||||
print(f"Error: {msg}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_worktree(project_dir: str, task_id: str) -> str:
|
||||
"""Create a git worktree for the task. Returns the worktree directory path."""
|
||||
# Get git root
|
||||
result = subprocess.run(
|
||||
["git", "-C", project_dir, "rev-parse", "--show-toplevel"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
die(f"Not a git repository: {project_dir}")
|
||||
git_root = result.stdout.strip()
|
||||
|
||||
# Calculate paths
|
||||
worktree_dir = os.path.join(git_root, ".worktrees", f"do-{task_id}")
|
||||
branch_name = f"do/{task_id}"
|
||||
|
||||
# Create worktree with new branch
|
||||
result = subprocess.run(
|
||||
["git", "-C", git_root, "worktree", "add", "-b", branch_name, worktree_dir],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
die(f"Failed to create worktree: {result.stderr}")
|
||||
|
||||
return worktree_dir
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Creates (or overwrites) project state file: .claude/do.local.md"
|
||||
description="Initialize do skill workflow with task directory"
|
||||
)
|
||||
parser.add_argument("--max-phases", type=int, default=5, help="Default: 5")
|
||||
parser.add_argument(
|
||||
@@ -63,61 +33,26 @@ def main():
|
||||
parser.add_argument("prompt", nargs="+", help="Task description")
|
||||
args = parser.parse_args()
|
||||
|
||||
max_phases = args.max_phases
|
||||
completion_promise = args.completion_promise
|
||||
use_worktree = args.worktree
|
||||
prompt = " ".join(args.prompt)
|
||||
|
||||
if max_phases < 1:
|
||||
if args.max_phases < 1:
|
||||
die("--max-phases must be a positive integer")
|
||||
|
||||
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
state_dir = os.path.join(project_dir, ".claude")
|
||||
prompt = " ".join(args.prompt)
|
||||
result = create_task(title=prompt, use_worktree=args.worktree)
|
||||
|
||||
task_id = f"{int(time.time())}-{os.getpid()}-{secrets.token_hex(4)}"
|
||||
state_file = os.path.join(state_dir, f"do.{task_id}.local.md")
|
||||
task_data = result["task_data"]
|
||||
worktree_dir = result.get("worktree_dir", "")
|
||||
|
||||
os.makedirs(state_dir, exist_ok=True)
|
||||
print(f"Initialized: {result['relative_path']}")
|
||||
print(f"task_id: {task_data['id']}")
|
||||
print(f"phase: 1/{task_data['max_phases']} ({PHASE_NAMES[1]})")
|
||||
print(f"completion_promise: {task_data['completion_promise']}")
|
||||
print(f"use_worktree: {task_data['use_worktree']}")
|
||||
print(f"export DO_TASK_DIR={result['relative_path']}")
|
||||
|
||||
# Create worktree if requested (before writing state file)
|
||||
worktree_dir = ""
|
||||
if use_worktree:
|
||||
worktree_dir = create_worktree(project_dir, task_id)
|
||||
|
||||
phase_name = phase_name_for(1)
|
||||
|
||||
content = f"""---
|
||||
active: true
|
||||
current_phase: 1
|
||||
phase_name: "{phase_name}"
|
||||
max_phases: {max_phases}
|
||||
completion_promise: "{completion_promise}"
|
||||
use_worktree: {str(use_worktree).lower()}
|
||||
worktree_dir: "{worktree_dir}"
|
||||
---
|
||||
|
||||
# do loop state
|
||||
|
||||
## Prompt
|
||||
{prompt}
|
||||
|
||||
## Notes
|
||||
- Update frontmatter current_phase/phase_name as you progress
|
||||
- When complete, include the frontmatter completion_promise in your final output
|
||||
"""
|
||||
|
||||
with open(state_file, "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Initialized: {state_file}")
|
||||
print(f"task_id: {task_id}")
|
||||
print(f"phase: 1/{max_phases} ({phase_name})")
|
||||
print(f"completion_promise: {completion_promise}")
|
||||
print(f"use_worktree: {use_worktree}")
|
||||
print(f"export DO_TASK_ID={task_id}")
|
||||
if worktree_dir:
|
||||
print(f"worktree_dir: {worktree_dir}")
|
||||
print(f"export DO_WORKTREE_DIR={worktree_dir}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
434
skills/do/scripts/task.py
Normal file
434
skills/do/scripts/task.py
Normal file
@@ -0,0 +1,434 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Task Directory Management CLI for do skill workflow.
|
||||
|
||||
Commands:
|
||||
create <title> - Create a new task directory with task.md
|
||||
start <task-dir> - Set current task pointer
|
||||
finish - Clear current task pointer
|
||||
list - List active tasks
|
||||
status - Show current task status
|
||||
update-phase <N> - Update current phase
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Directory constants
|
||||
DIR_TASKS = ".claude/do-tasks"
|
||||
FILE_CURRENT_TASK = ".current-task"
|
||||
FILE_TASK_MD = "task.md"
|
||||
|
||||
PHASE_NAMES = {
|
||||
1: "Understand",
|
||||
2: "Clarify",
|
||||
3: "Design",
|
||||
4: "Implement",
|
||||
5: "Complete",
|
||||
}
|
||||
|
||||
|
||||
def get_project_root() -> str:
|
||||
"""Get project root from env or cwd."""
|
||||
return os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
||||
|
||||
|
||||
def get_tasks_dir(project_root: str) -> str:
|
||||
"""Get tasks directory path."""
|
||||
return os.path.join(project_root, DIR_TASKS)
|
||||
|
||||
|
||||
def get_current_task_file(project_root: str) -> str:
|
||||
"""Get current task pointer file path."""
|
||||
return os.path.join(project_root, DIR_TASKS, FILE_CURRENT_TASK)
|
||||
|
||||
|
||||
def generate_task_id() -> str:
|
||||
"""Generate short task ID: MMDD-XXXX format."""
|
||||
date_part = datetime.now().strftime("%m%d")
|
||||
random_part = ''.join(random.choices(string.ascii_lowercase + string.digits, k=4))
|
||||
return f"{date_part}-{random_part}"
|
||||
|
||||
|
||||
def read_task_md(task_md_path: str) -> dict | None:
|
||||
"""Read task.md and parse YAML frontmatter + body."""
|
||||
if not os.path.exists(task_md_path):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(task_md_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# Parse YAML frontmatter
|
||||
match = re.match(r'^---\n(.*?)\n---\n(.*)$', content, re.DOTALL)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
frontmatter_str = match.group(1)
|
||||
body = match.group(2)
|
||||
|
||||
# Simple YAML parsing (no external deps)
|
||||
frontmatter = {}
|
||||
for line in frontmatter_str.split('\n'):
|
||||
if ':' in line:
|
||||
key, value = line.split(':', 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Handle quoted strings
|
||||
if value.startswith('"') and value.endswith('"'):
|
||||
value = value[1:-1]
|
||||
elif value == 'true':
|
||||
value = True
|
||||
elif value == 'false':
|
||||
value = False
|
||||
elif value.isdigit():
|
||||
value = int(value)
|
||||
frontmatter[key] = value
|
||||
|
||||
return {"frontmatter": frontmatter, "body": body}
|
||||
|
||||
|
||||
def write_task_md(task_md_path: str, frontmatter: dict, body: str) -> bool:
|
||||
"""Write task.md with YAML frontmatter + body."""
|
||||
try:
|
||||
lines = ["---"]
|
||||
for key, value in frontmatter.items():
|
||||
if isinstance(value, bool):
|
||||
lines.append(f"{key}: {str(value).lower()}")
|
||||
elif isinstance(value, int):
|
||||
lines.append(f"{key}: {value}")
|
||||
elif isinstance(value, str) and ('<' in value or '>' in value or ':' in value):
|
||||
lines.append(f'{key}: "{value}"')
|
||||
else:
|
||||
lines.append(f'{key}: "{value}"' if isinstance(value, str) else f"{key}: {value}")
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
lines.append(body)
|
||||
|
||||
with open(task_md_path, "w", encoding="utf-8") as f:
|
||||
f.write('\n'.join(lines))
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def create_worktree(project_root: str, task_id: str) -> str:
|
||||
"""Create a git worktree for the task. Returns the worktree directory path."""
|
||||
# Get git root
|
||||
result = subprocess.run(
|
||||
["git", "-C", project_root, "rev-parse", "--show-toplevel"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Not a git repository: {project_root}")
|
||||
git_root = result.stdout.strip()
|
||||
|
||||
# Calculate paths
|
||||
worktree_dir = os.path.join(git_root, ".worktrees", f"do-{task_id}")
|
||||
branch_name = f"do/{task_id}"
|
||||
|
||||
# Create worktree with new branch
|
||||
result = subprocess.run(
|
||||
["git", "-C", git_root, "worktree", "add", "-b", branch_name, worktree_dir],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Failed to create worktree: {result.stderr}")
|
||||
|
||||
return worktree_dir
|
||||
|
||||
|
||||
def create_task(title: str, use_worktree: bool = False) -> dict:
|
||||
"""Create a new task directory with task.md."""
|
||||
project_root = get_project_root()
|
||||
tasks_dir = get_tasks_dir(project_root)
|
||||
os.makedirs(tasks_dir, exist_ok=True)
|
||||
|
||||
task_id = generate_task_id()
|
||||
task_dir = os.path.join(tasks_dir, task_id)
|
||||
|
||||
os.makedirs(task_dir, exist_ok=True)
|
||||
|
||||
# Create worktree if requested
|
||||
worktree_dir = ""
|
||||
if use_worktree:
|
||||
try:
|
||||
worktree_dir = create_worktree(project_root, task_id)
|
||||
except RuntimeError as e:
|
||||
print(f"Warning: {e}", file=sys.stderr)
|
||||
use_worktree = False
|
||||
|
||||
frontmatter = {
|
||||
"id": task_id,
|
||||
"title": title,
|
||||
"status": "in_progress",
|
||||
"current_phase": 1,
|
||||
"phase_name": PHASE_NAMES[1],
|
||||
"max_phases": 5,
|
||||
"use_worktree": use_worktree,
|
||||
"worktree_dir": worktree_dir,
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"completion_promise": "<promise>DO_COMPLETE</promise>",
|
||||
}
|
||||
|
||||
body = f"""# Requirements
|
||||
|
||||
{title}
|
||||
|
||||
## Context
|
||||
|
||||
## Progress
|
||||
"""
|
||||
|
||||
task_md_path = os.path.join(task_dir, FILE_TASK_MD)
|
||||
write_task_md(task_md_path, frontmatter, body)
|
||||
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
relative_task_dir = os.path.relpath(task_dir, project_root)
|
||||
with open(current_task_file, "w", encoding="utf-8") as f:
|
||||
f.write(relative_task_dir)
|
||||
|
||||
return {
|
||||
"task_dir": task_dir,
|
||||
"relative_path": relative_task_dir,
|
||||
"task_data": frontmatter,
|
||||
"worktree_dir": worktree_dir,
|
||||
}
|
||||
|
||||
|
||||
def get_current_task(project_root: str) -> str | None:
|
||||
"""Read current task directory path."""
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
if not os.path.exists(current_task_file):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(current_task_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
return content if content else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def start_task(task_dir: str) -> bool:
|
||||
"""Set current task pointer."""
|
||||
project_root = get_project_root()
|
||||
tasks_dir = get_tasks_dir(project_root)
|
||||
|
||||
if os.path.isabs(task_dir):
|
||||
full_path = task_dir
|
||||
relative_path = os.path.relpath(task_dir, project_root)
|
||||
else:
|
||||
if not task_dir.startswith(DIR_TASKS):
|
||||
full_path = os.path.join(tasks_dir, task_dir)
|
||||
relative_path = os.path.join(DIR_TASKS, task_dir)
|
||||
else:
|
||||
full_path = os.path.join(project_root, task_dir)
|
||||
relative_path = task_dir
|
||||
|
||||
if not os.path.exists(full_path):
|
||||
print(f"Error: Task directory not found: {full_path}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
os.makedirs(os.path.dirname(current_task_file), exist_ok=True)
|
||||
|
||||
with open(current_task_file, "w", encoding="utf-8") as f:
|
||||
f.write(relative_path)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def finish_task() -> bool:
|
||||
"""Clear current task pointer."""
|
||||
project_root = get_project_root()
|
||||
current_task_file = get_current_task_file(project_root)
|
||||
|
||||
if os.path.exists(current_task_file):
|
||||
os.remove(current_task_file)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def list_tasks() -> list[dict]:
|
||||
"""List all task directories."""
|
||||
project_root = get_project_root()
|
||||
tasks_dir = get_tasks_dir(project_root)
|
||||
|
||||
if not os.path.exists(tasks_dir):
|
||||
return []
|
||||
|
||||
tasks = []
|
||||
current_task = get_current_task(project_root)
|
||||
|
||||
for entry in sorted(os.listdir(tasks_dir), reverse=True):
|
||||
entry_path = os.path.join(tasks_dir, entry)
|
||||
if not os.path.isdir(entry_path):
|
||||
continue
|
||||
|
||||
task_md_path = os.path.join(entry_path, FILE_TASK_MD)
|
||||
if not os.path.exists(task_md_path):
|
||||
continue
|
||||
|
||||
parsed = read_task_md(task_md_path)
|
||||
if parsed:
|
||||
task_data = parsed["frontmatter"]
|
||||
else:
|
||||
task_data = {"id": entry, "title": entry, "status": "unknown"}
|
||||
|
||||
relative_path = os.path.join(DIR_TASKS, entry)
|
||||
task_data["path"] = relative_path
|
||||
task_data["is_current"] = current_task == relative_path
|
||||
tasks.append(task_data)
|
||||
|
||||
return tasks
|
||||
|
||||
|
||||
def get_status() -> dict | None:
|
||||
"""Get current task status."""
|
||||
project_root = get_project_root()
|
||||
current_task = get_current_task(project_root)
|
||||
|
||||
if not current_task:
|
||||
return None
|
||||
|
||||
task_dir = os.path.join(project_root, current_task)
|
||||
task_md_path = os.path.join(task_dir, FILE_TASK_MD)
|
||||
|
||||
parsed = read_task_md(task_md_path)
|
||||
if not parsed:
|
||||
return None
|
||||
|
||||
task_data = parsed["frontmatter"]
|
||||
task_data["path"] = current_task
|
||||
return task_data
|
||||
|
||||
|
||||
def update_phase(phase: int) -> bool:
|
||||
"""Update current task phase."""
|
||||
project_root = get_project_root()
|
||||
current_task = get_current_task(project_root)
|
||||
|
||||
if not current_task:
|
||||
print("Error: No active task.", file=sys.stderr)
|
||||
return False
|
||||
|
||||
task_dir = os.path.join(project_root, current_task)
|
||||
task_md_path = os.path.join(task_dir, FILE_TASK_MD)
|
||||
|
||||
parsed = read_task_md(task_md_path)
|
||||
if not parsed:
|
||||
print("Error: task.md not found or invalid.", file=sys.stderr)
|
||||
return False
|
||||
|
||||
frontmatter = parsed["frontmatter"]
|
||||
frontmatter["current_phase"] = phase
|
||||
frontmatter["phase_name"] = PHASE_NAMES.get(phase, f"Phase {phase}")
|
||||
|
||||
if not write_task_md(task_md_path, frontmatter, parsed["body"]):
|
||||
print("Error: Failed to write task.md.", file=sys.stderr)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Task directory management for do skill workflow"
|
||||
)
|
||||
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
||||
|
||||
# create command
|
||||
create_parser = subparsers.add_parser("create", help="Create a new task")
|
||||
create_parser.add_argument("title", nargs="+", help="Task title")
|
||||
create_parser.add_argument("--worktree", action="store_true", help="Enable worktree mode")
|
||||
|
||||
# start command
|
||||
start_parser = subparsers.add_parser("start", help="Set current task")
|
||||
start_parser.add_argument("task_dir", help="Task directory path")
|
||||
|
||||
# finish command
|
||||
subparsers.add_parser("finish", help="Clear current task")
|
||||
|
||||
# list command
|
||||
subparsers.add_parser("list", help="List all tasks")
|
||||
|
||||
# status command
|
||||
subparsers.add_parser("status", help="Show current task status")
|
||||
|
||||
# update-phase command
|
||||
phase_parser = subparsers.add_parser("update-phase", help="Update current phase")
|
||||
phase_parser.add_argument("phase", type=int, help="Phase number (1-5)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "create":
|
||||
title = " ".join(args.title)
|
||||
result = create_task(title, args.worktree)
|
||||
print(f"Created task: {result['relative_path']}")
|
||||
print(f"Task ID: {result['task_data']['id']}")
|
||||
print(f"Phase: 1/{result['task_data']['max_phases']} (Understand)")
|
||||
print(f"Worktree: {result['task_data']['use_worktree']}")
|
||||
|
||||
elif args.command == "start":
|
||||
if start_task(args.task_dir):
|
||||
print(f"Started task: {args.task_dir}")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == "finish":
|
||||
if finish_task():
|
||||
print("Task finished, current task cleared.")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == "list":
|
||||
tasks = list_tasks()
|
||||
if not tasks:
|
||||
print("No tasks found.")
|
||||
else:
|
||||
for task in tasks:
|
||||
marker = "* " if task.get("is_current") else " "
|
||||
phase = task.get("current_phase", "?")
|
||||
max_phase = task.get("max_phases", 5)
|
||||
status = task.get("status", "unknown")
|
||||
print(f"{marker}{task['id']} [{status}] phase {phase}/{max_phase}")
|
||||
print(f" {task.get('title', 'No title')}")
|
||||
|
||||
elif args.command == "status":
|
||||
status = get_status()
|
||||
if not status:
|
||||
print("No active task.")
|
||||
else:
|
||||
print(f"Task: {status['id']}")
|
||||
print(f"Title: {status.get('title', 'No title')}")
|
||||
print(f"Status: {status.get('status', 'unknown')}")
|
||||
print(f"Phase: {status.get('current_phase', '?')}/{status.get('max_phases', 5)}")
|
||||
print(f"Worktree: {status.get('use_worktree', False)}")
|
||||
print(f"Path: {status['path']}")
|
||||
|
||||
elif args.command == "update-phase":
|
||||
if update_phase(args.phase):
|
||||
phase_name = PHASE_NAMES.get(args.phase, f"Phase {args.phase}")
|
||||
print(f"Updated to phase {args.phase} ({phase_name})")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user