mirror of
https://github.com/cexll/myclaude.git
synced 2026-02-05 02:30:26 +08:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7240e08900 | ||
|
|
e122d8ff25 | ||
|
|
6985a30a6a | ||
|
|
dd4c12b8e2 | ||
|
|
a88315d92d | ||
|
|
d1f13b3379 | ||
|
|
5d362852ab | ||
|
|
238c7b9a13 | ||
|
|
0986fa82ee | ||
|
|
a989ce343c | ||
|
|
abe0839249 | ||
|
|
d75c973f32 | ||
|
|
e7f329940b | ||
|
|
0fc5eaaa2d | ||
|
|
420eb857ff | ||
|
|
661656c587 | ||
|
|
ed4b088631 | ||
|
|
55a574280a | ||
|
|
8f05626075 | ||
|
|
4395c5785d | ||
|
|
b0d7a09ff2 |
@@ -1,209 +1,54 @@
|
||||
{
|
||||
"name": "claude-code-dev-workflows",
|
||||
"$schema": "https://anthropic.com/claude-code/marketplace.schema.json",
|
||||
"name": "myclaude",
|
||||
"version": "5.6.1",
|
||||
"description": "Professional multi-agent development workflows with OmO orchestration, Requirements-Driven and BMAD methodologies",
|
||||
"owner": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"email": "contact@example.com",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Professional multi-agent development workflows with Requirements-Driven and BMAD methodologies, featuring 16+ specialized agents and 12+ commands",
|
||||
"version": "1.0.0"
|
||||
"name": "cexll",
|
||||
"email": "evanxian9@gmail.com"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "requirements-driven-development",
|
||||
"source": "./requirements-driven-workflow/",
|
||||
"description": "Streamlined requirements-driven development workflow with 90% quality gates for practical feature implementation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"workflow",
|
||||
"automation",
|
||||
"quality-gates",
|
||||
"feature-development",
|
||||
"agile",
|
||||
"specifications"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/requirements-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/requirements-generate.md",
|
||||
"./agents/requirements-code.md",
|
||||
"./agents/requirements-testing.md",
|
||||
"./agents/requirements-review.md"
|
||||
]
|
||||
"name": "omo",
|
||||
"description": "Multi-agent orchestration for code analysis, bug investigation, fix planning, and implementation with intelligent routing to specialized agents",
|
||||
"version": "5.6.1",
|
||||
"source": "./skills/omo",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "bmad-agile-workflow",
|
||||
"source": "./bmad-agile-workflow/",
|
||||
"name": "dev",
|
||||
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
||||
"version": "5.6.1",
|
||||
"source": "./dev-workflow",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "requirements",
|
||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||
"version": "5.6.1",
|
||||
"source": "./requirements-driven-workflow",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "bmad",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
"agile",
|
||||
"scrum",
|
||||
"product-owner",
|
||||
"architect",
|
||||
"developer",
|
||||
"qa",
|
||||
"workflow-orchestration"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/bmad-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/bmad-po.md",
|
||||
"./agents/bmad-architect.md",
|
||||
"./agents/bmad-sm.md",
|
||||
"./agents/bmad-dev.md",
|
||||
"./agents/bmad-qa.md",
|
||||
"./agents/bmad-orchestrator.md",
|
||||
"./agents/bmad-review.md"
|
||||
]
|
||||
"version": "5.6.1",
|
||||
"source": "./bmad-agile-workflow",
|
||||
"category": "development"
|
||||
},
|
||||
{
|
||||
"name": "development-essentials",
|
||||
"source": "./development-essentials/",
|
||||
"name": "dev-kit",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"code",
|
||||
"debug",
|
||||
"test",
|
||||
"optimize",
|
||||
"review",
|
||||
"bugfix",
|
||||
"refactor",
|
||||
"documentation"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/code.md",
|
||||
"./commands/debug.md",
|
||||
"./commands/test.md",
|
||||
"./commands/optimize.md",
|
||||
"./commands/review.md",
|
||||
"./commands/bugfix.md",
|
||||
"./commands/refactor.md",
|
||||
"./commands/docs.md",
|
||||
"./commands/ask.md",
|
||||
"./commands/think.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/code.md",
|
||||
"./agents/bugfix.md",
|
||||
"./agents/bugfix-verify.md",
|
||||
"./agents/optimize.md",
|
||||
"./agents/debug.md"
|
||||
]
|
||||
"version": "5.6.1",
|
||||
"source": "./development-essentials",
|
||||
"category": "productivity"
|
||||
},
|
||||
{
|
||||
"name": "codex-cli",
|
||||
"source": "./skills/codex/",
|
||||
"description": "Execute Codex CLI for code analysis, refactoring, and automated code changes with file references (@syntax) and structured output",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"codex",
|
||||
"code-analysis",
|
||||
"refactoring",
|
||||
"automation",
|
||||
"gpt-5",
|
||||
"ai-coding"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"skills": [
|
||||
"./SKILL.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "gemini-cli",
|
||||
"source": "./skills/gemini/",
|
||||
"description": "Execute Gemini CLI for AI-powered code analysis and generation with Google's latest Gemini models",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"gemini",
|
||||
"google-ai",
|
||||
"code-analysis",
|
||||
"code-generation",
|
||||
"ai-reasoning"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"skills": [
|
||||
"./SKILL.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dev-workflow",
|
||||
"source": "./dev-workflow/",
|
||||
"description": "Minimal lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"dev",
|
||||
"workflow",
|
||||
"codex",
|
||||
"testing",
|
||||
"coverage",
|
||||
"concurrent",
|
||||
"lightweight"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/dev.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/dev-plan-generator.md"
|
||||
]
|
||||
"name": "sparv",
|
||||
"description": "Minimal SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point spec gate, unified journal, 2-action saves, 3-failure protocol, and EHRB risk detection",
|
||||
"version": "1.1.0",
|
||||
"source": "./skills/sparv",
|
||||
"category": "development"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
60
CHANGELOG.md
60
CHANGELOG.md
@@ -2,6 +2,66 @@
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [5.6.4] - 2026-01-15
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- add reasoning effort config for codex backend
|
||||
- default to skip-permissions and bypass-sandbox
|
||||
- add multi-agent support with yolo mode
|
||||
- add omo module for multi-agent orchestration
|
||||
- add intelligent backend selection based on task complexity (#61)
|
||||
- v5.4.0 structured execution report (#94)
|
||||
- add millisecond-precision timestamps to all log entries (#91)
|
||||
- skill-install install script and security scan
|
||||
- add uninstall scripts with selective module removal
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- filter codex stderr noise logs
|
||||
- use config override for codex reasoning effort
|
||||
- propagate SkipPermissions to parallel tasks (#113)
|
||||
- add timeout for Windows process termination
|
||||
- reject dash as workdir parameter (#118)
|
||||
- add sleep in fake script to prevent CI race condition
|
||||
- fix gemini env load
|
||||
- fix omo
|
||||
- fix codeagent skill TaskOutput
|
||||
- 修复 Gemini init 事件 session_id 未提取的问题 (#111)
|
||||
- Windows 后端退出:taskkill 结束进程树 + turn.completed 支持 (#108)
|
||||
- support model parameter for all backends, auto-inject from settings (#105)
|
||||
- replace setx with reg add to avoid 1024-char PATH truncation (#101)
|
||||
- 移除未知事件格式的日志噪声 (#96)
|
||||
- prevent duplicate PATH entries on reinstall (#95)
|
||||
- Minor issues #12 and #13 - ASCII mode and performance optimization
|
||||
- correct settings.json filename and bump version to v5.2.8
|
||||
- allow claude backend to read env from setting.json while preventing recursion (#92)
|
||||
- comprehensive security and quality improvements for PR #85 & #87 (#90)
|
||||
- Improve backend termination after message and extend timeout (#86)
|
||||
- Parser重复解析优化 + 严重bug修复 + PR #86兼容性 (#88)
|
||||
- filter noisy stderr output from gemini backend (#83)
|
||||
- 修復 wsl install.sh 格式問題 (#78)
|
||||
- 修复多 backend 并行日志 PID 混乱并移除包装格式 (#74) (#76)
|
||||
|
||||
### 🚜 Refactor
|
||||
|
||||
- remove sisyphus agent and unused code
|
||||
- streamline agent documentation and remove sisyphus
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- add OmO workflow to README and fix plugin marketplace structure
|
||||
- update FAQ for default bypass/skip-permissions behavior
|
||||
- 添加 FAQ 常见问题章节
|
||||
- update troubleshooting with idempotent PATH commands (#95)
|
||||
|
||||
### 💼 Other
|
||||
|
||||
- add test-cases skill
|
||||
- add browser skill
|
||||
- BMADh和Requirements-Driven支持根据语义生成对应的文档 (#82)
|
||||
- update all readme
|
||||
|
||||
## [5.2.4] - 2025-12-16
|
||||
|
||||
|
||||
|
||||
37
README.md
37
README.md
@@ -7,7 +7,7 @@
|
||||
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> AI-powered development automation with multi-backend execution (Codex/Claude/Gemini)
|
||||
|
||||
@@ -35,6 +35,41 @@ python3 install.py --install-dir ~/.claude
|
||||
|
||||
## Workflows Overview
|
||||
|
||||
### 0. OmO Multi-Agent Orchestrator (Recommended for Complex Tasks)
|
||||
|
||||
**Intelligent multi-agent orchestration that routes tasks to specialized agents based on risk signals.**
|
||||
|
||||
```bash
|
||||
/omo "analyze and fix this authentication bug"
|
||||
```
|
||||
|
||||
**Agent Hierarchy:**
|
||||
| Agent | Role | Backend | Model |
|
||||
|-------|------|---------|-------|
|
||||
| `oracle` | Technical advisor | Claude | claude-opus-4-5 |
|
||||
| `librarian` | External research | Claude | claude-sonnet-4-5 |
|
||||
| `explore` | Codebase search | OpenCode | grok-code |
|
||||
| `develop` | Code implementation | Codex | gpt-5.2 |
|
||||
| `frontend-ui-ux-engineer` | UI/UX specialist | Gemini | gemini-3-pro |
|
||||
| `document-writer` | Documentation | Gemini | gemini-3-flash |
|
||||
|
||||
**Routing Signals (Not Fixed Pipeline):**
|
||||
- Code location unclear → `explore`
|
||||
- External library/API → `librarian`
|
||||
- Risky/multi-file change → `oracle`
|
||||
- Implementation needed → `develop` / `frontend-ui-ux-engineer`
|
||||
|
||||
**Common Recipes:**
|
||||
- Explain code: `explore`
|
||||
- Small fix with known location: `develop` directly
|
||||
- Bug fix, location unknown: `explore → develop`
|
||||
- Cross-cutting refactor: `explore → oracle → develop`
|
||||
- External API integration: `explore + librarian → oracle → develop`
|
||||
|
||||
**Best For:** Complex bug investigation, multi-file refactoring, architecture decisions
|
||||
|
||||
---
|
||||
|
||||
### 1. Dev Workflow (Recommended)
|
||||
|
||||
**The primary workflow for most development tasks.**
|
||||
|
||||
37
README_CN.md
37
README_CN.md
@@ -2,7 +2,7 @@
|
||||
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://claude.ai/code)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
[](https://github.com/cexll/myclaude)
|
||||
|
||||
> AI 驱动的开发自动化 - 多后端执行架构 (Codex/Claude/Gemini)
|
||||
|
||||
@@ -30,6 +30,41 @@ python3 install.py --install-dir ~/.claude
|
||||
|
||||
## 工作流概览
|
||||
|
||||
### 0. OmO 多智能体编排器(复杂任务推荐)
|
||||
|
||||
**基于风险信号智能路由任务到专业智能体的多智能体编排系统。**
|
||||
|
||||
```bash
|
||||
/omo "分析并修复这个认证 bug"
|
||||
```
|
||||
|
||||
**智能体层级:**
|
||||
| 智能体 | 角色 | 后端 | 模型 |
|
||||
|-------|------|------|------|
|
||||
| `oracle` | 技术顾问 | Claude | claude-opus-4-5 |
|
||||
| `librarian` | 外部研究 | Claude | claude-sonnet-4-5 |
|
||||
| `explore` | 代码库搜索 | OpenCode | grok-code |
|
||||
| `develop` | 代码实现 | Codex | gpt-5.2 |
|
||||
| `frontend-ui-ux-engineer` | UI/UX 专家 | Gemini | gemini-3-pro |
|
||||
| `document-writer` | 文档撰写 | Gemini | gemini-3-flash |
|
||||
|
||||
**路由信号(非固定流水线):**
|
||||
- 代码位置不明确 → `explore`
|
||||
- 外部库/API → `librarian`
|
||||
- 高风险/多文件变更 → `oracle`
|
||||
- 需要实现 → `develop` / `frontend-ui-ux-engineer`
|
||||
|
||||
**常用配方:**
|
||||
- 解释代码:`explore`
|
||||
- 位置已知的小修复:直接 `develop`
|
||||
- Bug 修复,位置未知:`explore → develop`
|
||||
- 跨模块重构:`explore → oracle → develop`
|
||||
- 外部 API 集成:`explore + librarian → oracle → develop`
|
||||
|
||||
**适用场景:** 复杂 bug 调查、多文件重构、架构决策
|
||||
|
||||
---
|
||||
|
||||
### 1. Dev 工作流(推荐)
|
||||
|
||||
**大多数开发任务的首选工作流。**
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"name": "bmad-agile-workflow",
|
||||
"source": "./",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
"agile",
|
||||
"scrum",
|
||||
"product-owner",
|
||||
"architect",
|
||||
"developer",
|
||||
"qa",
|
||||
"workflow-orchestration"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/bmad-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/bmad-po.md",
|
||||
"./agents/bmad-architect.md",
|
||||
"./agents/bmad-sm.md",
|
||||
"./agents/bmad-dev.md",
|
||||
"./agents/bmad-qa.md",
|
||||
"./agents/bmad-orchestrator.md",
|
||||
"./agents/bmad-review.md"
|
||||
]
|
||||
}
|
||||
9
bmad-agile-workflow/.claude-plugin/plugin.json
Normal file
9
bmad-agile-workflow/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "bmad",
|
||||
"description": "Full BMAD agile workflow with role-based agents (PO, Architect, SM, Dev, QA) and interactive approval gates",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
@@ -26,9 +26,8 @@ var defaultModelsConfig = ModelsConfig{
|
||||
DefaultBackend: "opencode",
|
||||
DefaultModel: "opencode/grok-code",
|
||||
Agents: map[string]AgentModelConfig{
|
||||
"sisyphus": {Backend: "claude", Model: "claude-sonnet-4-20250514", PromptFile: "~/.claude/skills/omo/references/sisyphus.md", Description: "Primary orchestrator"},
|
||||
"oracle": {Backend: "claude", Model: "claude-sonnet-4-20250514", PromptFile: "~/.claude/skills/omo/references/oracle.md", Description: "Technical advisor"},
|
||||
"librarian": {Backend: "claude", Model: "claude-sonnet-4-5-20250514", PromptFile: "~/.claude/skills/omo/references/librarian.md", Description: "Researcher"},
|
||||
"oracle": {Backend: "claude", Model: "claude-opus-4-5-20251101", PromptFile: "~/.claude/skills/omo/references/oracle.md", Description: "Technical advisor"},
|
||||
"librarian": {Backend: "claude", Model: "claude-sonnet-4-5-20250929", PromptFile: "~/.claude/skills/omo/references/librarian.md", Description: "Researcher"},
|
||||
"explore": {Backend: "opencode", Model: "opencode/grok-code", PromptFile: "~/.claude/skills/omo/references/explore.md", Description: "Code search"},
|
||||
"develop": {Backend: "codex", Model: "", PromptFile: "~/.claude/skills/omo/references/develop.md", Description: "Code development"},
|
||||
"frontend-ui-ux-engineer": {Backend: "gemini", Model: "", PromptFile: "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md", Description: "Frontend engineer"},
|
||||
|
||||
@@ -19,9 +19,8 @@ func TestResolveAgentConfig_Defaults(t *testing.T) {
|
||||
wantModel string
|
||||
wantPromptFile string
|
||||
}{
|
||||
{"sisyphus", "claude", "claude-sonnet-4-20250514", "~/.claude/skills/omo/references/sisyphus.md"},
|
||||
{"oracle", "claude", "claude-sonnet-4-20250514", "~/.claude/skills/omo/references/oracle.md"},
|
||||
{"librarian", "claude", "claude-sonnet-4-5-20250514", "~/.claude/skills/omo/references/librarian.md"},
|
||||
{"oracle", "claude", "claude-opus-4-5-20251101", "~/.claude/skills/omo/references/oracle.md"},
|
||||
{"librarian", "claude", "claude-sonnet-4-5-20250929", "~/.claude/skills/omo/references/librarian.md"},
|
||||
{"explore", "opencode", "opencode/grok-code", "~/.claude/skills/omo/references/explore.md"},
|
||||
{"frontend-ui-ux-engineer", "gemini", "", "~/.claude/skills/omo/references/frontend-ui-ux-engineer.md"},
|
||||
{"document-writer", "gemini", "", "~/.claude/skills/omo/references/document-writer.md"},
|
||||
@@ -69,8 +68,8 @@ func TestLoadModelsConfig_NoFile(t *testing.T) {
|
||||
if cfg.DefaultBackend != "opencode" {
|
||||
t.Errorf("DefaultBackend = %q, want %q", cfg.DefaultBackend, "opencode")
|
||||
}
|
||||
if len(cfg.Agents) != 7 {
|
||||
t.Errorf("len(Agents) = %d, want 7", len(cfg.Agents))
|
||||
if len(cfg.Agents) != 6 {
|
||||
t.Errorf("len(Agents) = %d, want 6", len(cfg.Agents))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,8 +122,8 @@ func TestLoadModelsConfig_WithFile(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check that defaults are merged
|
||||
if _, ok := cfg.Agents["sisyphus"]; !ok {
|
||||
t.Error("default agent sisyphus should be merged")
|
||||
if _, ok := cfg.Agents["oracle"]; !ok {
|
||||
t.Error("default agent oracle should be merged")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,6 +188,15 @@ func TestOpencodeBackend_BuildArgs(t *testing.T) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("stdin mode omits dash", func(t *testing.T) {
|
||||
cfg := &Config{Mode: "new"}
|
||||
got := backend.BuildArgs(cfg, "-")
|
||||
want := []string{"run", "--format", "json"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpencodeBackend_Interface(t *testing.T) {
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestValidateAgentName(t *testing.T) {
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{name: "simple", input: "sisyphus", wantErr: false},
|
||||
{name: "simple", input: "develop", wantErr: false},
|
||||
{name: "upper", input: "ABC", wantErr: false},
|
||||
{name: "digits", input: "a1", wantErr: false},
|
||||
{name: "dash underscore", input: "a-b_c", wantErr: false},
|
||||
|
||||
@@ -204,7 +204,10 @@ func (OpencodeBackend) BuildArgs(cfg *Config, targetArg string) []string {
|
||||
if cfg.Mode == "resume" && cfg.SessionID != "" {
|
||||
args = append(args, "-s", cfg.SessionID)
|
||||
}
|
||||
args = append(args, "--format", "json", targetArg)
|
||||
args = append(args, "--format", "json")
|
||||
if targetArg != "-" {
|
||||
args = append(args, targetArg)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ type TaskSpec struct {
|
||||
ReasoningEffort string `json:"reasoning_effort,omitempty"`
|
||||
Agent string `json:"agent,omitempty"`
|
||||
PromptFile string `json:"prompt_file,omitempty"`
|
||||
SkipPermissions bool `json:"skip_permissions,omitempty"`
|
||||
Mode string `json:"-"`
|
||||
UseStdin bool `json:"-"`
|
||||
Context context.Context `json:"-"`
|
||||
@@ -184,6 +185,10 @@ func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
case "id":
|
||||
task.ID = value
|
||||
case "workdir":
|
||||
// Validate workdir: "-" is not a valid directory
|
||||
if value == "-" {
|
||||
return nil, fmt.Errorf("task block #%d has invalid workdir: '-' is not a valid directory path", taskIndex)
|
||||
}
|
||||
task.WorkDir = value
|
||||
case "session_id":
|
||||
task.SessionID = value
|
||||
@@ -197,6 +202,12 @@ func parseParallelConfig(data []byte) (*ParallelConfig, error) {
|
||||
case "agent":
|
||||
agentSpecified = true
|
||||
task.Agent = value
|
||||
case "skip_permissions", "skip-permissions":
|
||||
if value == "" {
|
||||
task.SkipPermissions = true
|
||||
continue
|
||||
}
|
||||
task.SkipPermissions = parseBoolFlag(value, false)
|
||||
case "dependencies":
|
||||
for _, dep := range strings.Split(value, ",") {
|
||||
dep = strings.TrimSpace(dep)
|
||||
@@ -417,6 +428,10 @@ func parseArgs() (*Config, error) {
|
||||
cfg.Task = args[2]
|
||||
cfg.ExplicitStdin = (args[2] == "-")
|
||||
if len(args) > 3 {
|
||||
// Validate workdir: "-" is not a valid directory
|
||||
if args[3] == "-" {
|
||||
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
|
||||
}
|
||||
cfg.WorkDir = args[3]
|
||||
}
|
||||
} else {
|
||||
@@ -424,6 +439,10 @@ func parseArgs() (*Config, error) {
|
||||
cfg.Task = args[0]
|
||||
cfg.ExplicitStdin = (args[0] == "-")
|
||||
if len(args) > 1 {
|
||||
// Validate workdir: "-" is not a valid directory
|
||||
if args[1] == "-" {
|
||||
return nil, fmt.Errorf("invalid workdir: '-' is not a valid directory path")
|
||||
}
|
||||
cfg.WorkDir = args[1]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
)
|
||||
|
||||
const postMessageTerminateDelay = 1 * time.Second
|
||||
const forceKillWaitTimeout = 5 * time.Second
|
||||
|
||||
// commandRunner abstracts exec.Cmd for testability
|
||||
type commandRunner interface {
|
||||
@@ -765,7 +766,7 @@ func buildCodexArgs(cfg *Config, targetArg string) []string {
|
||||
}
|
||||
|
||||
if reasoningEffort := strings.TrimSpace(cfg.ReasoningEffort); reasoningEffort != "" {
|
||||
args = append(args, "--reasoning-effort", reasoningEffort)
|
||||
args = append(args, "-c", "model_reasoning_effort="+reasoningEffort)
|
||||
}
|
||||
|
||||
args = append(args, "--skip-git-repo-check")
|
||||
@@ -814,6 +815,7 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
WorkDir: taskSpec.WorkDir,
|
||||
Model: taskSpec.Model,
|
||||
ReasoningEffort: taskSpec.ReasoningEffort,
|
||||
SkipPermissions: taskSpec.SkipPermissions,
|
||||
Backend: defaultBackendName,
|
||||
}
|
||||
|
||||
@@ -981,6 +983,9 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
if cfg.Backend == "gemini" {
|
||||
stderrFilter = newFilteringWriter(os.Stderr, geminiNoisePatterns)
|
||||
stderrOut = stderrFilter
|
||||
} else if cfg.Backend == "codex" {
|
||||
stderrFilter = newFilteringWriter(os.Stderr, codexNoisePatterns)
|
||||
stderrOut = stderrFilter
|
||||
}
|
||||
stderrWriters = append([]io.Writer{stderrOut}, stderrWriters...)
|
||||
}
|
||||
@@ -1109,7 +1114,8 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe
|
||||
waitLoop:
|
||||
for {
|
||||
select {
|
||||
case waitErr = <-waitCh:
|
||||
case err := <-waitCh:
|
||||
waitErr = err
|
||||
break waitLoop
|
||||
case <-ctx.Done():
|
||||
ctxCancelled = true
|
||||
@@ -1120,8 +1126,17 @@ waitLoop:
|
||||
terminated = true
|
||||
}
|
||||
}
|
||||
waitErr = <-waitCh
|
||||
break waitLoop
|
||||
for {
|
||||
select {
|
||||
case err := <-waitCh:
|
||||
waitErr = err
|
||||
break waitLoop
|
||||
case <-time.After(forceKillWaitTimeout):
|
||||
if proc := cmd.Process(); proc != nil {
|
||||
_ = proc.Kill()
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-messageTimerCh:
|
||||
forcedAfterComplete = true
|
||||
messageTimerCh = nil
|
||||
@@ -1135,8 +1150,17 @@ waitLoop:
|
||||
// Close pipes to unblock stream readers, then wait for process exit.
|
||||
closeWithReason(stdout, "terminate")
|
||||
closeWithReason(stderr, "terminate")
|
||||
waitErr = <-waitCh
|
||||
break waitLoop
|
||||
for {
|
||||
select {
|
||||
case err := <-waitCh:
|
||||
waitErr = err
|
||||
break waitLoop
|
||||
case <-time.After(forceKillWaitTimeout):
|
||||
if proc := cmd.Process(); proc != nil {
|
||||
_ = proc.Kill()
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-completeSeen:
|
||||
completeSeenObserved = true
|
||||
if messageTimer != nil {
|
||||
|
||||
@@ -625,6 +625,27 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("claudeSkipPermissionsPropagatesFromTaskSpec", func(t *testing.T) {
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
var gotArgs []string
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
gotArgs = append([]string(nil), args...)
|
||||
return &execFakeRunner{
|
||||
stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"ok"}}`),
|
||||
process: &execFakeProcess{pid: 15},
|
||||
}
|
||||
}
|
||||
|
||||
_ = closeLogger()
|
||||
res := runCodexTaskWithContext(context.Background(), TaskSpec{ID: "task-skip", Task: "payload", WorkDir: ".", SkipPermissions: true}, ClaudeBackend{}, nil, false, false, 1)
|
||||
if res.ExitCode != 0 || res.Error != "" {
|
||||
t.Fatalf("unexpected result: %+v", res)
|
||||
}
|
||||
if !slices.Contains(gotArgs, "--dangerously-skip-permissions") {
|
||||
t.Fatalf("expected --dangerously-skip-permissions in args, got %v", gotArgs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("missingMessage", func(t *testing.T) {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &execFakeRunner{
|
||||
|
||||
@@ -18,6 +18,12 @@ var geminiNoisePatterns = []string{
|
||||
"YOLO mode is enabled",
|
||||
}
|
||||
|
||||
// codexNoisePatterns contains stderr patterns to filter for codex backend
|
||||
var codexNoisePatterns = []string{
|
||||
"ERROR codex_core::codex: needs_follow_up:",
|
||||
"ERROR codex_core::skills::loader:",
|
||||
}
|
||||
|
||||
// filteringWriter wraps an io.Writer and filters out lines matching patterns
|
||||
type filteringWriter struct {
|
||||
w io.Writer
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
version = "5.5.0"
|
||||
version = "5.6.4"
|
||||
defaultWorkdir = "."
|
||||
defaultTimeout = 7200 // seconds (2 hours)
|
||||
defaultCoverageTarget = 90.0
|
||||
@@ -32,8 +31,6 @@ const (
|
||||
stdoutDrainTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
var useASCIIMode = os.Getenv("CODEAGENT_ASCII_MODE") == "true"
|
||||
|
||||
// Test hooks for dependency injection
|
||||
var (
|
||||
stdinReader io.Reader = os.Stdin
|
||||
@@ -45,7 +42,6 @@ var (
|
||||
buildCodexArgsFn = buildCodexArgs
|
||||
selectBackendFn = selectBackend
|
||||
commandContext = exec.CommandContext
|
||||
jsonMarshal = json.Marshal
|
||||
cleanupLogsFn = cleanupOldLogs
|
||||
signalNotifyFn = signal.Notify
|
||||
signalStopFn = signal.Stop
|
||||
@@ -181,6 +177,7 @@ func run() (exitCode int) {
|
||||
backendName := defaultBackendName
|
||||
model := ""
|
||||
fullOutput := false
|
||||
skipPermissions := envFlagEnabled("CODEAGENT_SKIP_PERMISSIONS")
|
||||
var extras []string
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
@@ -218,13 +215,19 @@ func run() (exitCode int) {
|
||||
return 1
|
||||
}
|
||||
model = value
|
||||
case arg == "--skip-permissions", arg == "--dangerously-skip-permissions":
|
||||
skipPermissions = true
|
||||
case strings.HasPrefix(arg, "--skip-permissions="):
|
||||
skipPermissions = parseBoolFlag(strings.TrimPrefix(arg, "--skip-permissions="), skipPermissions)
|
||||
case strings.HasPrefix(arg, "--dangerously-skip-permissions="):
|
||||
skipPermissions = parseBoolFlag(strings.TrimPrefix(arg, "--dangerously-skip-permissions="), skipPermissions)
|
||||
default:
|
||||
extras = append(extras, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(extras) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model and --full-output are allowed.")
|
||||
fmt.Fprintln(os.Stderr, "ERROR: --parallel reads its task configuration from stdin; only --backend, --model, --full-output and --skip-permissions are allowed.")
|
||||
fmt.Fprintln(os.Stderr, "Usage examples:")
|
||||
fmt.Fprintf(os.Stderr, " %s --parallel < tasks.txt\n", name)
|
||||
fmt.Fprintf(os.Stderr, " echo '...' | %s --parallel\n", name)
|
||||
@@ -261,6 +264,7 @@ func run() (exitCode int) {
|
||||
if strings.TrimSpace(cfg.Tasks[i].Model) == "" && model != "" {
|
||||
cfg.Tasks[i].Model = model
|
||||
}
|
||||
cfg.Tasks[i].SkipPermissions = cfg.Tasks[i].SkipPermissions || skipPermissions
|
||||
}
|
||||
|
||||
timeoutSec := resolveTimeout()
|
||||
@@ -440,6 +444,7 @@ func run() (exitCode int) {
|
||||
SessionID: cfg.SessionID,
|
||||
Model: cfg.Model,
|
||||
ReasoningEffort: cfg.ReasoningEffort,
|
||||
SkipPermissions: cfg.SkipPermissions,
|
||||
UseStdin: useStdin,
|
||||
}
|
||||
|
||||
|
||||
@@ -169,32 +169,6 @@ func parseIntegrationOutput(t *testing.T, out string) integrationOutput {
|
||||
return payload
|
||||
}
|
||||
|
||||
func extractTaskBlock(t *testing.T, output, taskID string) string {
|
||||
t.Helper()
|
||||
header := fmt.Sprintf("--- Task: %s ---", taskID)
|
||||
lines := strings.Split(output, "\n")
|
||||
var block []string
|
||||
collecting := false
|
||||
for _, raw := range lines {
|
||||
trimmed := strings.TrimSpace(raw)
|
||||
if !collecting {
|
||||
if trimmed == header {
|
||||
collecting = true
|
||||
block = append(block, trimmed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(trimmed, "--- Task: ") && trimmed != header {
|
||||
break
|
||||
}
|
||||
block = append(block, trimmed)
|
||||
}
|
||||
if len(block) == 0 {
|
||||
t.Fatalf("task block %s not found in output:\n%s", taskID, output)
|
||||
}
|
||||
return strings.Join(block, "\n")
|
||||
}
|
||||
|
||||
func findResultByID(t *testing.T, payload integrationOutput, id string) TaskResult {
|
||||
t.Helper()
|
||||
for _, res := range payload.Results {
|
||||
|
||||
@@ -36,7 +36,6 @@ func resetTestHooks() {
|
||||
newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner {
|
||||
return &realCmd{cmd: commandContext(ctx, name, args...)}
|
||||
}
|
||||
jsonMarshal = json.Marshal
|
||||
forceKillDelay.Store(5)
|
||||
closeLogger()
|
||||
executablePathFn = os.Executable
|
||||
@@ -1095,6 +1094,11 @@ func TestBackendParseArgs_NewMode(t *testing.T) {
|
||||
args: []string{"codeagent-wrapper", "-", "/some/dir"},
|
||||
want: &Config{Mode: "new", Task: "-", WorkDir: "/some/dir", ExplicitStdin: true, Backend: defaultBackendName},
|
||||
},
|
||||
{
|
||||
name: "stdin with dash workdir rejected",
|
||||
args: []string{"codeagent-wrapper", "-", "-"},
|
||||
wantErr: true,
|
||||
},
|
||||
{name: "no args", args: []string{"codeagent-wrapper"}, wantErr: true},
|
||||
}
|
||||
|
||||
@@ -1156,6 +1160,7 @@ func TestBackendParseArgs_ResumeMode(t *testing.T) {
|
||||
{name: "resume missing task", args: []string{"codeagent-wrapper", "resume", "session-123"}, wantErr: true},
|
||||
{name: "resume empty session_id", args: []string{"codeagent-wrapper", "resume", "", "task"}, wantErr: true},
|
||||
{name: "resume whitespace session_id", args: []string{"codeagent-wrapper", "resume", " ", "task"}, wantErr: true},
|
||||
{name: "resume with dash workdir rejected", args: []string{"codeagent-wrapper", "resume", "session-123", "task", "-"}, wantErr: true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1410,7 +1415,7 @@ func TestBackendParseArgs_PromptFileFlag(t *testing.T) {
|
||||
func TestBackendParseArgs_PromptFileOverridesAgent(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "--prompt-file", "/tmp/custom.md", "--agent", "sisyphus", "task"}
|
||||
os.Args = []string{"codeagent-wrapper", "--prompt-file", "/tmp/custom.md", "--agent", "develop", "task"}
|
||||
cfg, err := parseArgs()
|
||||
if err != nil {
|
||||
t.Fatalf("parseArgs() unexpected error: %v", err)
|
||||
@@ -1419,7 +1424,7 @@ func TestBackendParseArgs_PromptFileOverridesAgent(t *testing.T) {
|
||||
t.Fatalf("PromptFile = %q, want %q", cfg.PromptFile, "/tmp/custom.md")
|
||||
}
|
||||
|
||||
os.Args = []string{"codeagent-wrapper", "--agent", "sisyphus", "--prompt-file", "/tmp/custom.md", "task"}
|
||||
os.Args = []string{"codeagent-wrapper", "--agent", "develop", "--prompt-file", "/tmp/custom.md", "task"}
|
||||
cfg, err = parseArgs()
|
||||
if err != nil {
|
||||
t.Fatalf("parseArgs() unexpected error: %v", err)
|
||||
@@ -1582,6 +1587,26 @@ do something`
|
||||
}
|
||||
}
|
||||
|
||||
func TestParallelParseConfig_SkipPermissions(t *testing.T) {
|
||||
input := `---TASK---
|
||||
id: task-1
|
||||
skip_permissions: true
|
||||
---CONTENT---
|
||||
do something`
|
||||
|
||||
cfg, err := parseParallelConfig([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("parseParallelConfig() unexpected error: %v", err)
|
||||
}
|
||||
if len(cfg.Tasks) != 1 {
|
||||
t.Fatalf("expected 1 task, got %d", len(cfg.Tasks))
|
||||
}
|
||||
task := cfg.Tasks[0]
|
||||
if !task.SkipPermissions {
|
||||
t.Fatalf("SkipPermissions = %v, want true", task.SkipPermissions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParallelParseConfig_EmptySessionID(t *testing.T) {
|
||||
input := `---TASK---
|
||||
id: task-1
|
||||
@@ -1945,7 +1970,7 @@ func TestRunBuildCodexArgs_NewMode_WithReasoningEffort(t *testing.T) {
|
||||
args := buildCodexArgs(cfg, "my task")
|
||||
expected := []string{
|
||||
"e",
|
||||
"--reasoning-effort", "high",
|
||||
"-c", "model_reasoning_effort=high",
|
||||
"--skip-git-repo-check",
|
||||
"-C", "/test/dir",
|
||||
"--json",
|
||||
@@ -1985,13 +2010,13 @@ func TestRunCodexTaskWithContext_CodexReasoningEffort(t *testing.T) {
|
||||
|
||||
found := false
|
||||
for i := 0; i+1 < len(gotArgs); i++ {
|
||||
if gotArgs[i] == "--reasoning-effort" && gotArgs[i+1] == "high" {
|
||||
if gotArgs[i] == "-c" && gotArgs[i+1] == "model_reasoning_effort=high" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("expected --reasoning-effort high in args, got %v", gotArgs)
|
||||
t.Fatalf("expected -c model_reasoning_effort=high in args, got %v", gotArgs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3711,7 +3736,7 @@ func TestVersionFlag(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
want := "codeagent-wrapper version 5.5.0\n"
|
||||
want := "codeagent-wrapper version 5.6.4\n"
|
||||
|
||||
if output != want {
|
||||
t.Fatalf("output = %q, want %q", output, want)
|
||||
@@ -3727,7 +3752,7 @@ func TestVersionShortFlag(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
want := "codeagent-wrapper version 5.5.0\n"
|
||||
want := "codeagent-wrapper version 5.6.4\n"
|
||||
|
||||
if output != want {
|
||||
t.Fatalf("output = %q, want %q", output, want)
|
||||
@@ -3743,7 +3768,7 @@ func TestVersionLegacyAlias(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
want := "codex-wrapper version 5.5.0\n"
|
||||
want := "codex-wrapper version 5.6.4\n"
|
||||
|
||||
if output != want {
|
||||
t.Fatalf("output = %q, want %q", output, want)
|
||||
@@ -4009,6 +4034,30 @@ do two`)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parallelSkipPermissions", func(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
cleanupHook = func() {}
|
||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
||||
t.Setenv("CODEAGENT_SKIP_PERMISSIONS", "false")
|
||||
|
||||
runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult {
|
||||
if !task.SkipPermissions {
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "SkipPermissions not propagated"}
|
||||
}
|
||||
return TaskResult{TaskID: task.ID, ExitCode: 0, Message: "ok"}
|
||||
}
|
||||
|
||||
stdinReader = strings.NewReader(`---TASK---
|
||||
id: only
|
||||
backend: claude
|
||||
---CONTENT---
|
||||
do one`)
|
||||
os.Args = []string{"codeagent-wrapper", "--parallel", "--skip-permissions"}
|
||||
if code := run(); code != 0 {
|
||||
t.Fatalf("run exit = %d, want 0", code)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parallelErrors", func(t *testing.T) {
|
||||
defer resetTestHooks()
|
||||
cleanupLogsFn = func() (CleanupStats, error) { return CleanupStats{}, nil }
|
||||
|
||||
@@ -59,14 +59,6 @@ const (
|
||||
jsonLinePreviewBytes = 256
|
||||
)
|
||||
|
||||
type codexHeader struct {
|
||||
Type string `json:"type"`
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Item *struct {
|
||||
Type string `json:"type"`
|
||||
} `json:"item,omitempty"`
|
||||
}
|
||||
|
||||
// UnifiedEvent combines all backend event formats into a single structure
|
||||
// to avoid multiple JSON unmarshal operations per event
|
||||
type UnifiedEvent struct {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// sendTermSignal on Windows directly kills the process.
|
||||
@@ -31,6 +32,56 @@ func sendTermSignal(proc processHandle) error {
|
||||
if err := cmd.Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
if err := killProcessTree(pid); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return proc.Kill()
|
||||
}
|
||||
|
||||
func killProcessTree(pid int) error {
|
||||
if pid <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wmic := "wmic"
|
||||
if root := os.Getenv("SystemRoot"); root != "" {
|
||||
wmic = filepath.Join(root, "System32", "wbem", "WMIC.exe")
|
||||
}
|
||||
|
||||
queryChildren := "(ParentProcessId=" + strconv.Itoa(pid) + ")"
|
||||
listCmd := exec.Command(wmic, "process", "where", queryChildren, "get", "ProcessId", "/VALUE")
|
||||
listCmd.Stderr = io.Discard
|
||||
out, err := listCmd.Output()
|
||||
if err == nil {
|
||||
for _, childPID := range parseWMICPIDs(out) {
|
||||
_ = killProcessTree(childPID)
|
||||
}
|
||||
}
|
||||
|
||||
querySelf := "(ProcessId=" + strconv.Itoa(pid) + ")"
|
||||
termCmd := exec.Command(wmic, "process", "where", querySelf, "call", "terminate")
|
||||
termCmd.Stdout = io.Discard
|
||||
termCmd.Stderr = io.Discard
|
||||
if termErr := termCmd.Run(); termErr != nil && err == nil {
|
||||
err = termErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func parseWMICPIDs(out []byte) []int {
|
||||
const prefix = "ProcessId="
|
||||
var pids []int
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
continue
|
||||
}
|
||||
n, err := strconv.Atoi(strings.TrimSpace(strings.TrimPrefix(line, prefix)))
|
||||
if err != nil || n <= 0 {
|
||||
continue
|
||||
}
|
||||
pids = append(pids, n)
|
||||
}
|
||||
return pids
|
||||
}
|
||||
|
||||
@@ -273,30 +273,6 @@ func farewell(name string) string {
|
||||
return "goodbye " + name
|
||||
}
|
||||
|
||||
// extractMessageSummary extracts a brief summary from task output
|
||||
// Returns first meaningful line or truncated content up to maxLen chars
|
||||
func extractMessageSummary(message string, maxLen int) string {
|
||||
if message == "" || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Try to find a meaningful summary line
|
||||
lines := strings.Split(message, "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
// Skip empty lines and common noise
|
||||
if line == "" || strings.HasPrefix(line, "```") || strings.HasPrefix(line, "---") {
|
||||
continue
|
||||
}
|
||||
// Found a meaningful line
|
||||
return safeTruncate(line, maxLen)
|
||||
}
|
||||
|
||||
// Fallback: truncate entire message
|
||||
clean := strings.TrimSpace(message)
|
||||
return safeTruncate(clean, maxLen)
|
||||
}
|
||||
|
||||
// extractCoverageFromLines extracts coverage from pre-split lines.
|
||||
func extractCoverageFromLines(lines []string) string {
|
||||
if len(lines) == 0 {
|
||||
@@ -592,15 +568,6 @@ func extractKeyOutputFromLines(lines []string, maxLen int) string {
|
||||
return safeTruncate(clean, maxLen)
|
||||
}
|
||||
|
||||
// extractKeyOutput extracts a brief summary of what the task accomplished
|
||||
// Looks for summary lines, first meaningful sentence, or truncates message
|
||||
func extractKeyOutput(message string, maxLen int) string {
|
||||
if message == "" || maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
return extractKeyOutputFromLines(strings.Split(message, "\n"), maxLen)
|
||||
}
|
||||
|
||||
// extractCoverageGap extracts what's missing from coverage reports
|
||||
// Looks for uncovered lines, branches, or functions
|
||||
func extractCoverageGap(message string) string {
|
||||
|
||||
14
config.json
14
config.json
@@ -93,7 +93,7 @@
|
||||
]
|
||||
},
|
||||
"essentials": {
|
||||
"enabled": true,
|
||||
"enabled": false,
|
||||
"description": "Core development commands and utilities",
|
||||
"operations": [
|
||||
{
|
||||
@@ -156,6 +156,18 @@
|
||||
"description": "Install develop agent prompt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sparv": {
|
||||
"enabled": false,
|
||||
"description": "SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point gate",
|
||||
"operations": [
|
||||
{
|
||||
"type": "copy_dir",
|
||||
"source": "skills/sparv",
|
||||
"target": "skills/sparv",
|
||||
"description": "Install sparv skill with all scripts and hooks"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
9
dev-workflow/.claude-plugin/plugin.json
Normal file
9
dev-workflow/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "dev",
|
||||
"description": "Lightweight development workflow with requirements clarification, parallel codex execution, and mandatory 90% test coverage",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"name": "development-essentials",
|
||||
"source": "./",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"code",
|
||||
"debug",
|
||||
"test",
|
||||
"optimize",
|
||||
"review",
|
||||
"bugfix",
|
||||
"refactor",
|
||||
"documentation"
|
||||
],
|
||||
"category": "essentials",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/code.md",
|
||||
"./commands/debug.md",
|
||||
"./commands/test.md",
|
||||
"./commands/optimize.md",
|
||||
"./commands/review.md",
|
||||
"./commands/bugfix.md",
|
||||
"./commands/refactor.md",
|
||||
"./commands/docs.md",
|
||||
"./commands/ask.md",
|
||||
"./commands/think.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/code.md",
|
||||
"./agents/bugfix.md",
|
||||
"./agents/bugfix-verify.md",
|
||||
"./agents/optimize.md",
|
||||
"./agents/debug.md"
|
||||
]
|
||||
}
|
||||
9
development-essentials/.claude-plugin/plugin.json
Normal file
9
development-essentials/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "essentials",
|
||||
"description": "Essential development commands for coding, debugging, testing, optimization, and documentation",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
398
install.py
398
install.py
@@ -46,7 +46,7 @@ def parse_args(argv: Optional[Iterable[str]] = None) -> argparse.Namespace:
|
||||
)
|
||||
parser.add_argument(
|
||||
"--module",
|
||||
help="Comma-separated modules to install, or 'all' for all enabled",
|
||||
help="Comma-separated modules to install/uninstall, or 'all'",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
@@ -58,6 +58,16 @@ def parse_args(argv: Optional[Iterable[str]] = None) -> argparse.Namespace:
|
||||
action="store_true",
|
||||
help="List available modules and exit",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--status",
|
||||
action="store_true",
|
||||
help="Show installation status of all modules",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--uninstall",
|
||||
action="store_true",
|
||||
help="Uninstall specified modules",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
@@ -166,22 +176,93 @@ def resolve_paths(config: Dict[str, Any], args: argparse.Namespace) -> Dict[str,
|
||||
|
||||
def list_modules(config: Dict[str, Any]) -> None:
|
||||
print("Available Modules:")
|
||||
print(f"{'Name':<15} {'Default':<8} Description")
|
||||
print("-" * 60)
|
||||
for name, cfg in config.get("modules", {}).items():
|
||||
print(f"{'#':<3} {'Name':<15} {'Default':<8} Description")
|
||||
print("-" * 65)
|
||||
for idx, (name, cfg) in enumerate(config.get("modules", {}).items(), 1):
|
||||
default = "✓" if cfg.get("enabled", False) else "✗"
|
||||
desc = cfg.get("description", "")
|
||||
print(f"{name:<15} {default:<8} {desc}")
|
||||
print(f"{idx:<3} {name:<15} {default:<8} {desc}")
|
||||
print("\n✓ = installed by default when no --module specified")
|
||||
|
||||
|
||||
def load_installed_status(ctx: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Load installed modules status from status file."""
|
||||
status_path = Path(ctx["status_file"])
|
||||
if status_path.exists():
|
||||
try:
|
||||
return _load_json(status_path)
|
||||
except (ValueError, FileNotFoundError):
|
||||
return {"modules": {}}
|
||||
return {"modules": {}}
|
||||
|
||||
|
||||
def check_module_installed(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> bool:
|
||||
"""Check if a module is installed by verifying its files exist."""
|
||||
install_dir = ctx["install_dir"]
|
||||
|
||||
for op in cfg.get("operations", []):
|
||||
op_type = op.get("type")
|
||||
if op_type in ("copy_dir", "copy_file"):
|
||||
target = (install_dir / op["target"]).expanduser().resolve()
|
||||
if target.exists():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_installed_modules(config: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[str, bool]:
|
||||
"""Get installation status of all modules by checking files."""
|
||||
result = {}
|
||||
modules = config.get("modules", {})
|
||||
|
||||
# First check status file
|
||||
status = load_installed_status(ctx)
|
||||
status_modules = status.get("modules", {})
|
||||
|
||||
for name, cfg in modules.items():
|
||||
# Check both status file and filesystem
|
||||
in_status = name in status_modules
|
||||
files_exist = check_module_installed(name, cfg, ctx)
|
||||
result[name] = in_status or files_exist
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def list_modules_with_status(config: Dict[str, Any], ctx: Dict[str, Any]) -> None:
|
||||
"""List modules with installation status."""
|
||||
installed_status = get_installed_modules(config, ctx)
|
||||
status_data = load_installed_status(ctx)
|
||||
status_modules = status_data.get("modules", {})
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("Module Status")
|
||||
print("=" * 70)
|
||||
print(f"{'#':<3} {'Name':<15} {'Status':<15} {'Installed At':<20} Description")
|
||||
print("-" * 70)
|
||||
|
||||
for idx, (name, cfg) in enumerate(config.get("modules", {}).items(), 1):
|
||||
desc = cfg.get("description", "")[:25]
|
||||
if installed_status.get(name, False):
|
||||
status = "✅ Installed"
|
||||
installed_at = status_modules.get(name, {}).get("installed_at", "")[:16]
|
||||
else:
|
||||
status = "⬚ Not installed"
|
||||
installed_at = ""
|
||||
print(f"{idx:<3} {name:<15} {status:<15} {installed_at:<20} {desc}")
|
||||
|
||||
total = len(config.get("modules", {}))
|
||||
installed_count = sum(1 for v in installed_status.values() if v)
|
||||
print(f"\nTotal: {installed_count}/{total} modules installed")
|
||||
print(f"Install dir: {ctx['install_dir']}")
|
||||
|
||||
|
||||
def select_modules(config: Dict[str, Any], module_arg: Optional[str]) -> Dict[str, Any]:
|
||||
modules = config.get("modules", {})
|
||||
if not module_arg:
|
||||
return {k: v for k, v in modules.items() if v.get("enabled", False)}
|
||||
# No --module specified: show interactive selection
|
||||
return interactive_select_modules(config)
|
||||
|
||||
if module_arg.strip().lower() == "all":
|
||||
return {k: v for k, v in modules.items() if v.get("enabled", False)}
|
||||
return dict(modules.items())
|
||||
|
||||
selected: Dict[str, Any] = {}
|
||||
for name in (part.strip() for part in module_arg.split(",")):
|
||||
@@ -193,6 +274,256 @@ def select_modules(config: Dict[str, Any], module_arg: Optional[str]) -> Dict[st
|
||||
return selected
|
||||
|
||||
|
||||
def interactive_select_modules(config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Interactive module selection when no --module is specified."""
|
||||
modules = config.get("modules", {})
|
||||
module_names = list(modules.keys())
|
||||
|
||||
print("\n" + "=" * 65)
|
||||
print("Welcome to Claude Plugin Installer")
|
||||
print("=" * 65)
|
||||
print("\nNo modules specified. Please select modules to install:\n")
|
||||
|
||||
list_modules(config)
|
||||
|
||||
print("\nEnter module numbers or names (comma-separated), or:")
|
||||
print(" 'all' - Install all modules")
|
||||
print(" 'q' - Quit without installing")
|
||||
print()
|
||||
|
||||
while True:
|
||||
try:
|
||||
user_input = input("Select modules: ").strip()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\nInstallation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
if not user_input:
|
||||
print("No input. Please enter module numbers, names, 'all', or 'q'.")
|
||||
continue
|
||||
|
||||
if user_input.lower() == "q":
|
||||
print("Installation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
if user_input.lower() == "all":
|
||||
print(f"\nSelected all {len(modules)} modules.")
|
||||
return dict(modules.items())
|
||||
|
||||
# Parse selection
|
||||
selected: Dict[str, Any] = {}
|
||||
parts = [p.strip() for p in user_input.replace(" ", ",").split(",") if p.strip()]
|
||||
|
||||
try:
|
||||
for part in parts:
|
||||
# Try as number first
|
||||
if part.isdigit():
|
||||
idx = int(part) - 1
|
||||
if 0 <= idx < len(module_names):
|
||||
name = module_names[idx]
|
||||
selected[name] = modules[name]
|
||||
else:
|
||||
print(f"Invalid number: {part}. Valid range: 1-{len(module_names)}")
|
||||
selected = {}
|
||||
break
|
||||
# Try as name
|
||||
elif part in modules:
|
||||
selected[part] = modules[part]
|
||||
else:
|
||||
print(f"Module not found: '{part}'")
|
||||
selected = {}
|
||||
break
|
||||
|
||||
if selected:
|
||||
names = ", ".join(selected.keys())
|
||||
print(f"\nSelected {len(selected)} module(s): {names}")
|
||||
return selected
|
||||
|
||||
except ValueError:
|
||||
print("Invalid input. Please try again.")
|
||||
continue
|
||||
|
||||
|
||||
def uninstall_module(name: str, cfg: Dict[str, Any], ctx: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Uninstall a module by removing its files."""
|
||||
result: Dict[str, Any] = {
|
||||
"module": name,
|
||||
"status": "success",
|
||||
"uninstalled_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
install_dir = ctx["install_dir"]
|
||||
removed_paths = []
|
||||
|
||||
for op in cfg.get("operations", []):
|
||||
op_type = op.get("type")
|
||||
try:
|
||||
if op_type in ("copy_dir", "copy_file"):
|
||||
target = (install_dir / op["target"]).expanduser().resolve()
|
||||
if target.exists():
|
||||
if target.is_dir():
|
||||
shutil.rmtree(target)
|
||||
else:
|
||||
target.unlink()
|
||||
removed_paths.append(str(target))
|
||||
write_log({"level": "INFO", "message": f"Removed: {target}"}, ctx)
|
||||
# merge_dir and merge_json are harder to uninstall cleanly, skip
|
||||
except Exception as exc:
|
||||
write_log({"level": "WARNING", "message": f"Failed to remove {op.get('target', 'unknown')}: {exc}"}, ctx)
|
||||
|
||||
result["removed_paths"] = removed_paths
|
||||
return result
|
||||
|
||||
|
||||
def update_status_after_uninstall(uninstalled_modules: List[str], ctx: Dict[str, Any]) -> None:
|
||||
"""Remove uninstalled modules from status file."""
|
||||
status = load_installed_status(ctx)
|
||||
modules = status.get("modules", {})
|
||||
|
||||
for name in uninstalled_modules:
|
||||
if name in modules:
|
||||
del modules[name]
|
||||
|
||||
status["modules"] = modules
|
||||
status["updated_at"] = datetime.now().isoformat()
|
||||
|
||||
status_path = Path(ctx["status_file"])
|
||||
with status_path.open("w", encoding="utf-8") as fh:
|
||||
json.dump(status, fh, indent=2, ensure_ascii=False)
|
||||
|
||||
|
||||
def interactive_manage(config: Dict[str, Any], ctx: Dict[str, Any]) -> int:
|
||||
"""Interactive module management menu."""
|
||||
while True:
|
||||
installed_status = get_installed_modules(config, ctx)
|
||||
modules = config.get("modules", {})
|
||||
module_names = list(modules.keys())
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("Claude Plugin Manager")
|
||||
print("=" * 70)
|
||||
print(f"{'#':<3} {'Name':<15} {'Status':<15} Description")
|
||||
print("-" * 70)
|
||||
|
||||
for idx, (name, cfg) in enumerate(modules.items(), 1):
|
||||
desc = cfg.get("description", "")[:30]
|
||||
if installed_status.get(name, False):
|
||||
status = "✅ Installed"
|
||||
else:
|
||||
status = "⬚ Not installed"
|
||||
print(f"{idx:<3} {name:<15} {status:<15} {desc}")
|
||||
|
||||
total = len(modules)
|
||||
installed_count = sum(1 for v in installed_status.values() if v)
|
||||
print(f"\nInstalled: {installed_count}/{total} | Dir: {ctx['install_dir']}")
|
||||
|
||||
print("\nCommands:")
|
||||
print(" i <num/name> - Install module(s)")
|
||||
print(" u <num/name> - Uninstall module(s)")
|
||||
print(" q - Quit")
|
||||
print()
|
||||
|
||||
try:
|
||||
user_input = input("Enter command: ").strip()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\nExiting.")
|
||||
return 0
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
if user_input.lower() == "q":
|
||||
print("Goodbye!")
|
||||
return 0
|
||||
|
||||
parts = user_input.split(maxsplit=1)
|
||||
cmd = parts[0].lower()
|
||||
args = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
if cmd == "i":
|
||||
# Install
|
||||
selected = _parse_module_selection(args, modules, module_names)
|
||||
if selected:
|
||||
# Filter out already installed
|
||||
to_install = {k: v for k, v in selected.items() if not installed_status.get(k, False)}
|
||||
if not to_install:
|
||||
print("All selected modules are already installed.")
|
||||
continue
|
||||
print(f"\nInstalling: {', '.join(to_install.keys())}")
|
||||
results = []
|
||||
for name, cfg in to_install.items():
|
||||
try:
|
||||
results.append(execute_module(name, cfg, ctx))
|
||||
print(f" ✓ {name} installed")
|
||||
except Exception as exc:
|
||||
print(f" ✗ {name} failed: {exc}")
|
||||
# Update status
|
||||
current_status = load_installed_status(ctx)
|
||||
for r in results:
|
||||
if r.get("status") == "success":
|
||||
current_status.setdefault("modules", {})[r["module"]] = r
|
||||
current_status["updated_at"] = datetime.now().isoformat()
|
||||
with Path(ctx["status_file"]).open("w", encoding="utf-8") as fh:
|
||||
json.dump(current_status, fh, indent=2, ensure_ascii=False)
|
||||
|
||||
elif cmd == "u":
|
||||
# Uninstall
|
||||
selected = _parse_module_selection(args, modules, module_names)
|
||||
if selected:
|
||||
# Filter to only installed ones
|
||||
to_uninstall = {k: v for k, v in selected.items() if installed_status.get(k, False)}
|
||||
if not to_uninstall:
|
||||
print("None of the selected modules are installed.")
|
||||
continue
|
||||
print(f"\nUninstalling: {', '.join(to_uninstall.keys())}")
|
||||
confirm = input("Confirm? (y/N): ").strip().lower()
|
||||
if confirm != "y":
|
||||
print("Cancelled.")
|
||||
continue
|
||||
for name, cfg in to_uninstall.items():
|
||||
try:
|
||||
uninstall_module(name, cfg, ctx)
|
||||
print(f" ✓ {name} uninstalled")
|
||||
except Exception as exc:
|
||||
print(f" ✗ {name} failed: {exc}")
|
||||
update_status_after_uninstall(list(to_uninstall.keys()), ctx)
|
||||
|
||||
else:
|
||||
print(f"Unknown command: {cmd}. Use 'i', 'u', or 'q'.")
|
||||
|
||||
|
||||
def _parse_module_selection(
|
||||
args: str, modules: Dict[str, Any], module_names: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""Parse module selection from user input."""
|
||||
if not args:
|
||||
print("Please specify module number(s) or name(s).")
|
||||
return {}
|
||||
|
||||
if args.lower() == "all":
|
||||
return dict(modules.items())
|
||||
|
||||
selected: Dict[str, Any] = {}
|
||||
parts = [p.strip() for p in args.replace(",", " ").split() if p.strip()]
|
||||
|
||||
for part in parts:
|
||||
if part.isdigit():
|
||||
idx = int(part) - 1
|
||||
if 0 <= idx < len(module_names):
|
||||
name = module_names[idx]
|
||||
selected[name] = modules[name]
|
||||
else:
|
||||
print(f"Invalid number: {part}")
|
||||
return {}
|
||||
elif part in modules:
|
||||
selected[part] = modules[part]
|
||||
else:
|
||||
print(f"Module not found: '{part}'")
|
||||
return {}
|
||||
|
||||
return selected
|
||||
|
||||
|
||||
def ensure_install_dir(path: Path) -> None:
|
||||
path = Path(path)
|
||||
if path.exists() and not path.is_dir():
|
||||
@@ -529,10 +860,54 @@ def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
|
||||
ctx = resolve_paths(config, args)
|
||||
|
||||
# Handle --list-modules
|
||||
if getattr(args, "list_modules", False):
|
||||
list_modules(config)
|
||||
return 0
|
||||
|
||||
# Handle --status
|
||||
if getattr(args, "status", False):
|
||||
list_modules_with_status(config, ctx)
|
||||
return 0
|
||||
|
||||
# Handle --uninstall
|
||||
if getattr(args, "uninstall", False):
|
||||
if not args.module:
|
||||
print("Error: --uninstall requires --module to specify which modules to uninstall")
|
||||
return 1
|
||||
modules = config.get("modules", {})
|
||||
installed = load_installed_status(ctx)
|
||||
installed_modules = installed.get("modules", {})
|
||||
|
||||
selected = select_modules(config, args.module)
|
||||
to_uninstall = {k: v for k, v in selected.items() if k in installed_modules}
|
||||
|
||||
if not to_uninstall:
|
||||
print("None of the specified modules are installed.")
|
||||
return 0
|
||||
|
||||
print(f"Uninstalling {len(to_uninstall)} module(s): {', '.join(to_uninstall.keys())}")
|
||||
for name, cfg in to_uninstall.items():
|
||||
try:
|
||||
uninstall_module(name, cfg, ctx)
|
||||
print(f" ✓ {name} uninstalled")
|
||||
except Exception as exc:
|
||||
print(f" ✗ {name} failed: {exc}", file=sys.stderr)
|
||||
|
||||
update_status_after_uninstall(list(to_uninstall.keys()), ctx)
|
||||
print(f"\n✓ Uninstall complete")
|
||||
return 0
|
||||
|
||||
# No --module specified: enter interactive management mode
|
||||
if not args.module:
|
||||
try:
|
||||
ensure_install_dir(ctx["install_dir"])
|
||||
except Exception as exc:
|
||||
print(f"Failed to prepare install dir: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
return interactive_manage(config, ctx)
|
||||
|
||||
# Install specified modules
|
||||
modules = select_modules(config, args.module)
|
||||
|
||||
try:
|
||||
@@ -568,7 +943,14 @@ def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
)
|
||||
break
|
||||
|
||||
write_status(results, ctx)
|
||||
# Merge with existing status
|
||||
current_status = load_installed_status(ctx)
|
||||
for r in results:
|
||||
if r.get("status") == "success":
|
||||
current_status.setdefault("modules", {})[r["module"]] = r
|
||||
current_status["updated_at"] = datetime.now().isoformat()
|
||||
with Path(ctx["status_file"]).open("w", encoding="utf-8") as fh:
|
||||
json.dump(current_status, fh, indent=2, ensure_ascii=False)
|
||||
|
||||
# Summary
|
||||
success = sum(1 for r in results if r.get("status") == "success")
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"name": "requirements-driven-development",
|
||||
"source": "./",
|
||||
"description": "Streamlined requirements-driven development workflow with 90% quality gates for practical feature implementation",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Claude Code Dev Workflows",
|
||||
"url": "https://github.com/cexll/myclaude"
|
||||
},
|
||||
"homepage": "https://github.com/cexll/myclaude",
|
||||
"repository": "https://github.com/cexll/myclaude",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"requirements",
|
||||
"workflow",
|
||||
"automation",
|
||||
"quality-gates",
|
||||
"feature-development",
|
||||
"agile",
|
||||
"specifications"
|
||||
],
|
||||
"category": "workflows",
|
||||
"strict": false,
|
||||
"commands": [
|
||||
"./commands/requirements-pilot.md"
|
||||
],
|
||||
"agents": [
|
||||
"./agents/requirements-generate.md",
|
||||
"./agents/requirements-code.md",
|
||||
"./agents/requirements-testing.md",
|
||||
"./agents/requirements-review.md"
|
||||
]
|
||||
}
|
||||
9
requirements-driven-workflow/.claude-plugin/plugin.json
Normal file
9
requirements-driven-workflow/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "requirements",
|
||||
"description": "Requirements-driven development workflow with quality gates for practical feature implementation",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
214
skills/dev/SKILL.md
Normal file
214
skills/dev/SKILL.md
Normal file
@@ -0,0 +1,214 @@
|
||||
---
|
||||
name: dev
|
||||
description: Extreme lightweight end-to-end development workflow with requirements clarification, intelligent backend selection, parallel codeagent execution, and mandatory 90% test coverage
|
||||
---
|
||||
|
||||
You are the /dev Workflow Orchestrator, an expert development workflow manager specializing in orchestrating minimal, efficient end-to-end development processes with parallel task execution and rigorous test coverage validation.
|
||||
|
||||
---
|
||||
|
||||
## CRITICAL CONSTRAINTS (NEVER VIOLATE)
|
||||
|
||||
These rules have HIGHEST PRIORITY and override all other instructions:
|
||||
|
||||
1. **NEVER use Edit, Write, or MultiEdit tools directly** - ALL code changes MUST go through codeagent-wrapper
|
||||
2. **MUST use AskUserQuestion in Step 0** - Backend selection MUST be the FIRST action (before requirement clarification)
|
||||
3. **MUST use AskUserQuestion in Step 1** - Do NOT skip requirement clarification
|
||||
4. **MUST use TodoWrite after Step 1** - Create task tracking list before any analysis
|
||||
5. **MUST use codeagent-wrapper for Step 2 analysis** - Do NOT use Read/Glob/Grep directly for deep analysis
|
||||
6. **MUST wait for user confirmation in Step 3** - Do NOT proceed to Step 4 without explicit approval
|
||||
7. **MUST invoke codeagent-wrapper --parallel for Step 4 execution** - Use Bash tool, NOT Edit/Write or Task tool
|
||||
|
||||
**Violation of any constraint above invalidates the entire workflow. Stop and restart if violated.**
|
||||
|
||||
---
|
||||
|
||||
**Core Responsibilities**
|
||||
- Orchestrate a streamlined 7-step development workflow (Step 0 + Step 1–6):
|
||||
0. Backend selection (user constrained)
|
||||
1. Requirement clarification through targeted questioning
|
||||
2. Technical analysis using codeagent-wrapper
|
||||
3. Development documentation generation
|
||||
4. Parallel development execution (backend routing per task type)
|
||||
5. Coverage validation (≥90% requirement)
|
||||
6. Completion summary
|
||||
|
||||
**Workflow Execution**
|
||||
- **Step 0: Backend Selection [MANDATORY - FIRST ACTION]**
|
||||
- MUST use AskUserQuestion tool as the FIRST action with multiSelect enabled
|
||||
- Ask which backends are allowed for this /dev run
|
||||
- Options (user can select multiple):
|
||||
- `codex` - Stable, high quality, best cost-performance (default for most tasks)
|
||||
- `claude` - Fast, lightweight (for quick fixes and config changes)
|
||||
- `gemini` - UI/UX specialist (for frontend styling and components)
|
||||
- Store the selected backends as `allowed_backends` set for routing in Step 4
|
||||
- Special rule: if user selects ONLY `codex`, then ALL subsequent tasks (including UI/quick-fix) MUST use `codex` (no exceptions)
|
||||
|
||||
- **Step 1: Requirement Clarification [MANDATORY - DO NOT SKIP]**
|
||||
- MUST use AskUserQuestion tool
|
||||
- Focus questions on functional boundaries, inputs/outputs, constraints, testing, and required unit-test coverage levels
|
||||
- Iterate 2-3 rounds until clear; rely on judgment; keep questions concise
|
||||
- After clarification complete: MUST use TodoWrite to create task tracking list with workflow steps
|
||||
|
||||
- **Step 2: codeagent-wrapper Deep Analysis (Plan Mode Style) [USE CODEAGENT-WRAPPER ONLY]**
|
||||
|
||||
MUST use Bash tool to invoke `codeagent-wrapper` for deep analysis. Do NOT use Read/Glob/Grep tools directly - delegate all exploration to codeagent-wrapper.
|
||||
|
||||
**How to invoke for analysis**:
|
||||
```bash
|
||||
# analysis_backend selection:
|
||||
# - prefer codex if it is in allowed_backends
|
||||
# - otherwise pick the first backend in allowed_backends
|
||||
codeagent-wrapper --backend {analysis_backend} - <<'EOF'
|
||||
Analyze the codebase for implementing [feature name].
|
||||
|
||||
Requirements:
|
||||
- [requirement 1]
|
||||
- [requirement 2]
|
||||
|
||||
Deliverables:
|
||||
1. Explore codebase structure and existing patterns
|
||||
2. Evaluate implementation options with trade-offs
|
||||
3. Make architectural decisions
|
||||
4. Break down into 2-5 parallelizable tasks with dependencies and file scope
|
||||
5. Classify each task with a single `type`: `default` / `ui` / `quick-fix`
|
||||
6. Determine if UI work is needed (check for .css/.tsx/.vue files)
|
||||
|
||||
Output the analysis following the structure below.
|
||||
EOF
|
||||
```
|
||||
|
||||
**When Deep Analysis is Needed** (any condition triggers):
|
||||
- Multiple valid approaches exist (e.g., Redis vs in-memory vs file-based caching)
|
||||
- Significant architectural decisions required (e.g., WebSockets vs SSE vs polling)
|
||||
- Large-scale changes touching many files or systems
|
||||
- Unclear scope requiring exploration first
|
||||
|
||||
**UI Detection Requirements**:
|
||||
- During analysis, output whether the task needs UI work (yes/no) and the evidence
|
||||
- UI criteria: presence of style assets (.css, .scss, styled-components, CSS modules, tailwindcss) OR frontend component files (.tsx, .jsx, .vue)
|
||||
|
||||
**What the AI backend does in Analysis Mode** (when invoked via codeagent-wrapper):
|
||||
1. **Explore Codebase**: Use Glob, Grep, Read to understand structure, patterns, architecture
|
||||
2. **Identify Existing Patterns**: Find how similar features are implemented, reuse conventions
|
||||
3. **Evaluate Options**: When multiple approaches exist, list trade-offs (complexity, performance, security, maintainability)
|
||||
4. **Make Architectural Decisions**: Choose patterns, APIs, data models with justification
|
||||
5. **Design Task Breakdown**: Produce parallelizable tasks based on natural functional boundaries with file scope and dependencies
|
||||
|
||||
**Analysis Output Structure**:
|
||||
```
|
||||
## Context & Constraints
|
||||
[Tech stack, existing patterns, constraints discovered]
|
||||
|
||||
## Codebase Exploration
|
||||
[Key files, modules, patterns found via Glob/Grep/Read]
|
||||
|
||||
## Implementation Options (if multiple approaches)
|
||||
| Option | Pros | Cons | Recommendation |
|
||||
|
||||
## Technical Decisions
|
||||
[API design, data models, architecture choices made]
|
||||
|
||||
## Task Breakdown
|
||||
[2-5 tasks with: ID, description, file scope, dependencies, test command, type(default|ui|quick-fix)]
|
||||
|
||||
## UI Determination
|
||||
needs_ui: [true/false]
|
||||
evidence: [files and reasoning tied to style + component criteria]
|
||||
```
|
||||
|
||||
**Skip Deep Analysis When**:
|
||||
- Simple, straightforward implementation with obvious approach
|
||||
- Small changes confined to 1-2 files
|
||||
- Clear requirements with single implementation path
|
||||
|
||||
- **Step 3: Generate Development Documentation**
|
||||
- invoke agent dev-plan-generator
|
||||
- When creating `dev-plan.md`, ensure every task has `type: default|ui|quick-fix`
|
||||
- Append a dedicated UI task if Step 2 marked `needs_ui: true` but no UI task exists
|
||||
- Output a brief summary of dev-plan.md:
|
||||
- Number of tasks and their IDs
|
||||
- Task type for each task
|
||||
- File scope for each task
|
||||
- Dependencies between tasks
|
||||
- Test commands
|
||||
- Use AskUserQuestion to confirm with user:
|
||||
- Question: "Proceed with this development plan?" (state backend routing rules and any forced fallback due to allowed_backends)
|
||||
- Options: "Confirm and execute" / "Need adjustments"
|
||||
- If user chooses "Need adjustments", return to Step 1 or Step 2 based on feedback
|
||||
|
||||
- **Step 4: Parallel Development Execution [CODEAGENT-WRAPPER ONLY - NO DIRECT EDITS]**
|
||||
- MUST use Bash tool to invoke `codeagent-wrapper --parallel` for ALL code changes
|
||||
- NEVER use Edit, Write, MultiEdit, or Task tools to modify code directly
|
||||
- Backend routing (must be deterministic and enforceable):
|
||||
- Task field: `type: default|ui|quick-fix` (missing → treat as `default`)
|
||||
- Preferred backend by type:
|
||||
- `default` → `codex`
|
||||
- `ui` → `gemini` (enforced when allowed)
|
||||
- `quick-fix` → `claude`
|
||||
- If user selected `仅 codex`: all tasks MUST use `codex`
|
||||
- Otherwise, if preferred backend is not in `allowed_backends`, fallback to the first available backend by priority: `codex` → `claude` → `gemini`
|
||||
- Build ONE `--parallel` config that includes all tasks in `dev-plan.md` and submit it once via Bash tool:
|
||||
```bash
|
||||
# One shot submission - wrapper handles topology + concurrency
|
||||
codeagent-wrapper --parallel <<'EOF'
|
||||
---TASK---
|
||||
id: [task-id-1]
|
||||
backend: [routed-backend-from-type-and-allowed_backends]
|
||||
workdir: .
|
||||
dependencies: [optional, comma-separated ids]
|
||||
---CONTENT---
|
||||
Task: [task-id-1]
|
||||
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
||||
Scope: [task file scope]
|
||||
Test: [test command]
|
||||
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
||||
|
||||
---TASK---
|
||||
id: [task-id-2]
|
||||
backend: [routed-backend-from-type-and-allowed_backends]
|
||||
workdir: .
|
||||
dependencies: [optional, comma-separated ids]
|
||||
---CONTENT---
|
||||
Task: [task-id-2]
|
||||
Reference: @.claude/specs/{feature_name}/dev-plan.md
|
||||
Scope: [task file scope]
|
||||
Test: [test command]
|
||||
Deliverables: code + unit tests + coverage ≥90% + coverage summary
|
||||
EOF
|
||||
```
|
||||
- **Note**: Use `workdir: .` (current directory) for all tasks unless specific subdirectory is required
|
||||
- Execute independent tasks concurrently; serialize conflicting ones; track coverage reports
|
||||
- Backend is routed deterministically based on task `type`, no manual intervention needed
|
||||
|
||||
- **Step 5: Coverage Validation**
|
||||
- Validate each task’s coverage:
|
||||
- All ≥90% → pass
|
||||
- Any <90% → request more tests (max 2 rounds)
|
||||
|
||||
- **Step 6: Completion Summary**
|
||||
- Provide completed task list, coverage per task, key file changes
|
||||
|
||||
**Error Handling**
|
||||
- **codeagent-wrapper failure**: Retry once with same input; if still fails, log error and ask user for guidance
|
||||
- **Insufficient coverage (<90%)**: Request more tests from the failed task (max 2 rounds); if still fails, report to user
|
||||
- **Dependency conflicts**:
|
||||
- Circular dependencies: codeagent-wrapper will detect and fail with error; revise task breakdown to remove cycles
|
||||
- Missing dependencies: Ensure all task IDs referenced in `dependencies` field exist
|
||||
- **Parallel execution timeout**: Individual tasks timeout after 2 hours (configurable via CODEX_TIMEOUT); failed tasks can be retried individually
|
||||
- **Backend unavailable**: If a routed backend is unavailable, fallback to another backend in `allowed_backends` (priority: codex → claude → gemini); if none works, fail with a clear error message
|
||||
|
||||
**Quality Standards**
|
||||
- Code coverage ≥90%
|
||||
- Tasks based on natural functional boundaries (typically 2-5)
|
||||
- Each task has exactly one `type: default|ui|quick-fix`
|
||||
- Backend routed by `type`: `default`→codex, `ui`→gemini, `quick-fix`→claude (with allowed_backends fallback)
|
||||
- Documentation must be minimal yet actionable
|
||||
- No verbose implementations; only essential code
|
||||
|
||||
**Communication Style**
|
||||
- Be direct and concise
|
||||
- Report progress at each workflow step
|
||||
- Highlight blockers immediately
|
||||
- Provide actionable next steps when coverage fails
|
||||
- Prioritize speed via parallelization while enforcing coverage validation
|
||||
124
skills/dev/agents/dev-plan-generator.md
Normal file
124
skills/dev/agents/dev-plan-generator.md
Normal file
@@ -0,0 +1,124 @@
|
||||
---
|
||||
name: dev-plan-generator
|
||||
description: Use this agent when you need to generate a structured development plan document (`dev-plan.md`) that breaks down a feature into concrete implementation tasks with testing requirements and acceptance criteria. This agent should be called after requirements analysis and before actual implementation begins.\n\n<example>\nContext: User is orchestrating a feature development workflow and needs to create a development plan after codeagent analysis is complete.\nuser: "Create a development plan for the user authentication feature based on the requirements and analysis"\nassistant: "I'll use the Task tool to launch the dev-plan-generator agent to create the structured development plan document."\n<commentary>\nThe user needs a dev-plan.md document generated from requirements and analysis. Use the dev-plan-generator agent to create the structured task breakdown.\n</commentary>\n</example>\n\n<example>\nContext: Orchestrator has completed requirements gathering and codeagent analysis for a new feature and needs to generate the development plan before moving to implementation.\nuser: "We've completed the analysis for the payment integration feature. Generate the development plan."\nassistant: "I'm going to use the Task tool to launch the dev-plan-generator agent to create the dev-plan.md document with task breakdown and testing requirements."\n<commentary>\nThis is the step in the workflow where the development plan document needs to be generated. Use the dev-plan-generator agent to create the structured plan.\n</commentary>\n</example>\n\n<example>\nContext: User is working through a requirements-driven workflow and has just approved the technical specifications.\nuser: "The specs look good. Let's move forward with creating the implementation plan."\nassistant: "I'll use the Task tool to launch the dev-plan-generator agent to generate the dev-plan.md document with the task breakdown."\n<commentary>\nAfter spec approval, the next step is generating the development plan. Use the dev-plan-generator agent to create the structured document.\n</commentary>\n</example>
|
||||
tools: Glob, Grep, Read, Edit, Write, TodoWrite
|
||||
model: sonnet
|
||||
color: green
|
||||
---
|
||||
|
||||
You are a specialized Development Plan Document Generator. Your sole responsibility is to create structured, actionable development plan documents (`dev-plan.md`) that break down features into concrete implementation tasks.
|
||||
|
||||
## Your Role
|
||||
|
||||
You receive context from an orchestrator including:
|
||||
- Feature requirements description
|
||||
- codeagent analysis results (feature highlights, task decomposition, UI detection flag, and task typing hints)
|
||||
- Feature name (in kebab-case format)
|
||||
|
||||
Your output is a single file: `./.claude/specs/{feature_name}/dev-plan.md`
|
||||
|
||||
## Document Structure You Must Follow
|
||||
|
||||
```markdown
|
||||
# {Feature Name} - Development Plan
|
||||
|
||||
## Overview
|
||||
[One-sentence description of core functionality]
|
||||
|
||||
## Task Breakdown
|
||||
|
||||
### Task 1: [Task Name]
|
||||
- **ID**: task-1
|
||||
- **type**: default|ui|quick-fix
|
||||
- **Description**: [What needs to be done]
|
||||
- **File Scope**: [Directories or files involved, e.g., src/auth/**, tests/auth/]
|
||||
- **Dependencies**: [None or depends on task-x]
|
||||
- **Test Command**: [e.g., pytest tests/auth --cov=src/auth --cov-report=term]
|
||||
- **Test Focus**: [Scenarios to cover]
|
||||
|
||||
### Task 2: [Task Name]
|
||||
...
|
||||
|
||||
(Tasks based on natural functional boundaries, typically 2-5)
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Feature point 1
|
||||
- [ ] Feature point 2
|
||||
- [ ] All unit tests pass
|
||||
- [ ] Code coverage ≥90%
|
||||
|
||||
## Technical Notes
|
||||
- [Key technical decisions]
|
||||
- [Constraints to be aware of]
|
||||
```
|
||||
|
||||
## Generation Rules You Must Enforce
|
||||
|
||||
1. **Task Count**: Generate tasks based on natural functional boundaries (no artificial limits)
|
||||
- Typical range: 2-5 tasks
|
||||
- Quality over quantity: prefer fewer well-scoped tasks over excessive fragmentation
|
||||
- Each task should be independently completable by one agent
|
||||
2. **Task Requirements**: Each task MUST include:
|
||||
- Clear ID (task-1, task-2, etc.)
|
||||
- A single task type field: `type: default|ui|quick-fix`
|
||||
- Specific description of what needs to be done
|
||||
- Explicit file scope (directories or files affected)
|
||||
- Dependency declaration ("None" or "depends on task-x")
|
||||
- Complete test command with coverage parameters
|
||||
- Testing focus points (scenarios to cover)
|
||||
3. **Task Independence**: Design tasks to be as independent as possible to enable parallel execution
|
||||
4. **Test Commands**: Must include coverage parameters (e.g., `--cov=module --cov-report=term` for pytest, `--coverage` for npm)
|
||||
5. **Coverage Threshold**: Always require ≥90% code coverage in acceptance criteria
|
||||
|
||||
## Your Workflow
|
||||
|
||||
1. **Analyze Input**: Review the requirements description and codeagent analysis results (including `needs_ui` and any task typing hints)
|
||||
2. **Identify Tasks**: Break down the feature into 2-5 logical, independent tasks
|
||||
3. **Determine Dependencies**: Map out which tasks depend on others (minimize dependencies)
|
||||
4. **Assign Task Type**: For each task, set exactly one `type`:
|
||||
- `ui`: touches UI/style/component work (e.g., .css/.scss/.tsx/.jsx/.vue, tailwind, design tweaks)
|
||||
- `quick-fix`: small, fast changes (config tweaks, small bug fix, minimal scope); do NOT use for UI work
|
||||
- `default`: everything else
|
||||
- Note: `/dev` Step 4 routes backend by `type` (default→codex, ui→gemini, quick-fix→claude; missing type → default)
|
||||
5. **Specify Testing**: For each task, define the exact test command and coverage requirements
|
||||
6. **Define Acceptance**: List concrete, measurable acceptance criteria including the 90% coverage requirement
|
||||
7. **Document Technical Points**: Note key technical decisions and constraints
|
||||
8. **Write File**: Use the Write tool to create `./.claude/specs/{feature_name}/dev-plan.md`
|
||||
|
||||
## Quality Checks Before Writing
|
||||
|
||||
- [ ] Task count is between 2-5
|
||||
- [ ] Every task has all required fields (ID, type, Description, File Scope, Dependencies, Test Command, Test Focus)
|
||||
- [ ] Test commands include coverage parameters
|
||||
- [ ] Dependencies are explicitly stated
|
||||
- [ ] Acceptance criteria includes 90% coverage requirement
|
||||
- [ ] File scope is specific (not vague like "all files")
|
||||
- [ ] Testing focus is concrete (not generic like "test everything")
|
||||
|
||||
## Critical Constraints
|
||||
|
||||
- **Document Only**: You generate documentation. You do NOT execute code, run tests, or modify source files.
|
||||
- **Single Output**: You produce exactly one file: `dev-plan.md` in the correct location
|
||||
- **Path Accuracy**: The path must be `./.claude/specs/{feature_name}/dev-plan.md` where {feature_name} matches the input
|
||||
- **Language Matching**: Output language matches user input (Chinese input → Chinese doc, English input → English doc)
|
||||
- **Structured Format**: Follow the exact markdown structure provided
|
||||
|
||||
## Example Output Quality
|
||||
|
||||
Refer to the user login example in your instructions as the quality benchmark. Your outputs should have:
|
||||
- Clear, actionable task descriptions
|
||||
- Specific file paths (not generic)
|
||||
- Realistic test commands for the actual tech stack
|
||||
- Concrete testing scenarios (not abstract)
|
||||
- Measurable acceptance criteria
|
||||
- Relevant technical decisions
|
||||
|
||||
## Error Handling
|
||||
|
||||
If the input context is incomplete or unclear:
|
||||
1. Request the missing information explicitly
|
||||
2. Do NOT proceed with generating a low-quality document
|
||||
3. Do NOT make up requirements or technical details
|
||||
4. Ask for clarification on: feature scope, tech stack, testing framework, file structure
|
||||
|
||||
Remember: Your document will be used by other agents to implement the feature. Precision and completeness are critical. Every field must be filled with specific, actionable information.
|
||||
9
skills/omo/.claude-plugin/plugin.json
Normal file
9
skills/omo/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "omo",
|
||||
"description": "Multi-agent orchestration for code analysis, bug investigation, fix planning, and implementation with intelligent routing to specialized agents",
|
||||
"version": "5.6.1",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,12 @@
|
||||
# OmO Multi-Agent Orchestration
|
||||
|
||||
OmO (Oh-My-OpenCode) is a multi-agent orchestration skill that uses Sisyphus as the primary coordinator to delegate tasks to specialized agents.
|
||||
OmO (Oh-My-OpenCode) is a multi-agent orchestration skill that delegates tasks to specialized agents based on routing signals.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
python3 install.py --module omo
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -12,19 +18,17 @@ OmO (Oh-My-OpenCode) is a multi-agent orchestration skill that uses Sisyphus as
|
||||
|
||||
| Agent | Role | Backend | Model |
|
||||
|-------|------|---------|-------|
|
||||
| sisyphus | Primary orchestrator | claude | claude-sonnet-4-20250514 |
|
||||
| oracle | Technical advisor (EXPENSIVE) | claude | claude-sonnet-4-20250514 |
|
||||
| librarian | External research | claude | claude-sonnet-4-5-20250514 |
|
||||
| explore | Codebase search (FREE) | opencode | opencode/grok-code |
|
||||
| develop | Code implementation | codex | (default) |
|
||||
| oracle | Technical advisor | claude | claude-opus-4-5-20251101 |
|
||||
| librarian | External research | claude | claude-sonnet-4-5-20250929 |
|
||||
| explore | Codebase search | opencode | opencode/grok-code |
|
||||
| develop | Code implementation | codex | gpt-5.2 |
|
||||
| frontend-ui-ux-engineer | UI/UX specialist | gemini | gemini-3-pro-preview |
|
||||
| document-writer | Documentation | gemini | gemini-3-flash-preview |
|
||||
|
||||
## How It Works
|
||||
|
||||
1. `/omo` loads Sisyphus as the entry point
|
||||
2. Sisyphus analyzes your request via routing signals
|
||||
3. Based on task type, Sisyphus either:
|
||||
1. `/omo` analyzes your request via routing signals
|
||||
2. Based on task type, it either:
|
||||
- Answers directly (analysis/explanation tasks - no code changes)
|
||||
- Delegates to specialized agents (implementation tasks)
|
||||
- Fires parallel agents (exploration + research)
|
||||
@@ -44,7 +48,7 @@ OmO (Oh-My-OpenCode) is a multi-agent orchestration skill that uses Sisyphus as
|
||||
|
||||
## Agent Delegation
|
||||
|
||||
Sisyphus delegates via codeagent-wrapper with full Context Pack:
|
||||
Delegates via codeagent-wrapper with full Context Pack:
|
||||
|
||||
```bash
|
||||
codeagent-wrapper --agent oracle - . <<'EOF'
|
||||
@@ -70,11 +74,43 @@ Agent-model mappings are configured in `~/.codeagent/models.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default_backend": "opencode",
|
||||
"default_model": "opencode/grok-code",
|
||||
"default_backend": "codex",
|
||||
"default_model": "gpt-5.2",
|
||||
"agents": {
|
||||
"sisyphus": {"backend": "claude", "model": "claude-sonnet-4-20250514"},
|
||||
"oracle": {"backend": "claude", "model": "claude-sonnet-4-20250514"}
|
||||
"oracle": {
|
||||
"backend": "claude",
|
||||
"model": "claude-opus-4-5-20251101",
|
||||
"description": "Technical advisor",
|
||||
"yolo": true
|
||||
},
|
||||
"librarian": {
|
||||
"backend": "claude",
|
||||
"model": "claude-sonnet-4-5-20250929",
|
||||
"description": "Researcher",
|
||||
"yolo": true
|
||||
},
|
||||
"explore": {
|
||||
"backend": "opencode",
|
||||
"model": "opencode/grok-code",
|
||||
"description": "Code search"
|
||||
},
|
||||
"frontend-ui-ux-engineer": {
|
||||
"backend": "gemini",
|
||||
"model": "gemini-3-pro-preview",
|
||||
"description": "Frontend engineer"
|
||||
},
|
||||
"document-writer": {
|
||||
"backend": "gemini",
|
||||
"model": "gemini-3-flash-preview",
|
||||
"description": "Documentation"
|
||||
},
|
||||
"develop": {
|
||||
"backend": "codex",
|
||||
"model": "gpt-5.2",
|
||||
"description": "codex develop",
|
||||
"yolo": true,
|
||||
"reasoning": "xhigh"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -82,4 +118,4 @@ Agent-model mappings are configured in `~/.codeagent/models.json`:
|
||||
## Requirements
|
||||
|
||||
- codeagent-wrapper with `--agent` support
|
||||
- Backend CLIs: claude, opencode, gemini
|
||||
- Backend CLIs: claude, opencode, codex, gemini
|
||||
|
||||
9
skills/sparv/.claude-plugin/plugin.json
Normal file
9
skills/sparv/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "sparv",
|
||||
"description": "Minimal SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point spec gate, unified journal, 2-action saves, 3-failure protocol, and EHRB risk detection.",
|
||||
"version": "1.1.0",
|
||||
"author": {
|
||||
"name": "cexll",
|
||||
"email": "cexll@cexll.com"
|
||||
}
|
||||
}
|
||||
96
skills/sparv/README.md
Normal file
96
skills/sparv/README.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# SPARV - Unified Development Workflow (Simplified)
|
||||
|
||||
[]()
|
||||
[]()
|
||||
|
||||
**SPARV** is an end-to-end development workflow: maximize delivery quality with minimal rules while avoiding "infinite iteration + self-rationalization."
|
||||
|
||||
```
|
||||
S-Specify → P-Plan → A-Act → R-Review → V-Vault
|
||||
Clarify Plan Execute Review Archive
|
||||
```
|
||||
|
||||
## Key Changes (Over-engineering Removed)
|
||||
|
||||
- External memory merged from 3 files into 1 `.sparv/journal.md`
|
||||
- Specify scoring simplified from 100-point to 10-point scale (threshold `>=9`)
|
||||
- Reboot Test reduced from 5 questions to 3 questions
|
||||
- Removed concurrency locks (Claude is single-threaded; locks only cause failures)
|
||||
|
||||
## Installation
|
||||
|
||||
SPARV is installed at `~/.claude/skills/sparv/`.
|
||||
|
||||
Install from ZIP:
|
||||
|
||||
```bash
|
||||
unzip sparv.zip -d ~/.claude/skills/
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
Run in project root:
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/init-session.sh --force
|
||||
```
|
||||
|
||||
Creates:
|
||||
|
||||
```
|
||||
.sparv/
|
||||
├── state.yaml
|
||||
├── journal.md
|
||||
└── history/
|
||||
```
|
||||
|
||||
## External Memory System (Two Files)
|
||||
|
||||
- `state.yaml`: State (minimum fields: `session_id/current_phase/action_count/consecutive_failures`)
|
||||
- `journal.md`: Unified log (Plan/Progress/Findings all go here)
|
||||
|
||||
After archiving:
|
||||
|
||||
```
|
||||
.sparv/history/<session_id>/
|
||||
├── state.yaml
|
||||
└── journal.md
|
||||
```
|
||||
|
||||
## Key Numbers
|
||||
|
||||
| Number | Meaning |
|
||||
|--------|---------|
|
||||
| **9/10** | Specify score passing threshold |
|
||||
| **2** | Write to journal every 2 tool calls |
|
||||
| **3** | Failure retry limit / Review fix limit |
|
||||
| **3** | Reboot Test question count |
|
||||
| **12** | Default max iterations (optional safety valve) |
|
||||
|
||||
## Script Tools
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/init-session.sh --force
|
||||
~/.claude/skills/sparv/scripts/save-progress.sh "Edit" "done"
|
||||
~/.claude/skills/sparv/scripts/check-ehrb.sh --diff --fail-on-flags
|
||||
~/.claude/skills/sparv/scripts/failure-tracker.sh fail --note "tests are flaky"
|
||||
~/.claude/skills/sparv/scripts/reboot-test.sh --strict
|
||||
~/.claude/skills/sparv/scripts/archive-session.sh
|
||||
```
|
||||
|
||||
## Hooks
|
||||
|
||||
Hooks defined in `hooks/hooks.json`:
|
||||
|
||||
- PostToolUse: 2-Action auto-write to `journal.md`
|
||||
- PreToolUse: EHRB risk prompt (default dry-run)
|
||||
- Stop: 3-question reboot test (strict)
|
||||
|
||||
## References
|
||||
|
||||
- `SKILL.md`: Skill definition (for agent use)
|
||||
- `references/methodology.md`: Methodology quick reference
|
||||
|
||||
---
|
||||
|
||||
*Quality over speed—iterate until truly complete.*
|
||||
153
skills/sparv/SKILL.md
Normal file
153
skills/sparv/SKILL.md
Normal file
@@ -0,0 +1,153 @@
|
||||
---
|
||||
name: sparv
|
||||
description: Minimal SPARV workflow (Specify→Plan→Act→Review→Vault) with 10-point spec gate, unified journal, 2-action saves, 3-failure protocol, and EHRB risk detection.
|
||||
---
|
||||
|
||||
# SPARV
|
||||
|
||||
Five-phase workflow: **S**pecify → **P**lan → **A**ct → **R**eview → **V**ault.
|
||||
|
||||
Goal: Complete "requirements → verifiable delivery" in one pass, recording key decisions in external memory instead of relying on assumptions.
|
||||
|
||||
## Core Rules (Mandatory)
|
||||
|
||||
- **10-Point Specify Gate**: Spec score `0-10`; must be `>=9` to enter Plan.
|
||||
- **2-Action Save**: Append an entry to `.sparv/journal.md` every 2 tool calls.
|
||||
- **3-Failure Protocol**: Stop and escalate to user after 3 consecutive failures.
|
||||
- **EHRB**: Require explicit user confirmation when high-risk detected (production/sensitive data/destructive/billing API/security-critical).
|
||||
- **Fixed Phase Names**: `specify|plan|act|review|vault` (stored in `.sparv/state.yaml:current_phase`).
|
||||
|
||||
## Enhanced Rules (v1.1)
|
||||
|
||||
### Uncertainty Declaration (G3)
|
||||
|
||||
When any Specify dimension scores < 2:
|
||||
- Declare: `UNCERTAIN: <what> | ASSUMPTION: <fallback>`
|
||||
- List all assumptions in journal before Plan
|
||||
- Offer 2-3 options for ambiguous requirements
|
||||
|
||||
Example:
|
||||
```
|
||||
UNCERTAIN: deployment target | ASSUMPTION: Docker container
|
||||
UNCERTAIN: auth method | OPTIONS: JWT / OAuth2 / Session
|
||||
```
|
||||
|
||||
### Requirement Routing
|
||||
|
||||
| Mode | Condition | Flow |
|
||||
|------|-----------|------|
|
||||
| **Quick** | score >= 9 AND <= 3 files AND no EHRB | Specify → Act → Review |
|
||||
| **Full** | otherwise | Specify → Plan → Act → Review → Vault |
|
||||
|
||||
Quick mode skips formal Plan phase but still requires:
|
||||
- Completion promise written to journal
|
||||
- 2-action save rule applies
|
||||
- Review phase mandatory
|
||||
|
||||
### Context Acquisition (Optional)
|
||||
|
||||
Before Specify scoring:
|
||||
1. Check `.sparv/kb.md` for existing patterns/decisions
|
||||
2. If insufficient, scan codebase for relevant files
|
||||
3. Document findings in journal under `## Context`
|
||||
|
||||
Skip if user explicitly provides full context.
|
||||
|
||||
### Knowledge Base Maintenance
|
||||
|
||||
During Vault phase, update `.sparv/kb.md`:
|
||||
- **Patterns**: Reusable code patterns discovered
|
||||
- **Decisions**: Architectural choices + rationale
|
||||
- **Gotchas**: Common pitfalls + solutions
|
||||
|
||||
### CHANGELOG Update
|
||||
|
||||
Use during Review or Vault phase for non-trivial changes:
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/changelog-update.sh --type <Added|Changed|Fixed|Removed> --desc "..."
|
||||
```
|
||||
|
||||
## External Memory (Two Files)
|
||||
|
||||
Initialize (run in project root):
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/init-session.sh --force
|
||||
```
|
||||
|
||||
File conventions:
|
||||
|
||||
- `.sparv/state.yaml`: State machine (minimum fields: `session_id/current_phase/action_count/consecutive_failures`)
|
||||
- `.sparv/journal.md`: Unified log (Plan/Progress/Findings all go here)
|
||||
- `.sparv/history/<session_id>/`: Archive directory
|
||||
|
||||
## Phase 1: Specify (10-Point Scale)
|
||||
|
||||
Each item scores 0/1/2, total 0-10:
|
||||
|
||||
1) **Value**: Why do it, are benefits/metrics verifiable
|
||||
2) **Scope**: MVP + what's out of scope
|
||||
3) **Acceptance**: Testable acceptance criteria
|
||||
4) **Boundaries**: Error/performance/compatibility/security critical boundaries
|
||||
5) **Risk**: EHRB/dependencies/unknowns + handling approach
|
||||
|
||||
`score < 9`: Keep asking questions; do not enter Plan.
|
||||
`score >= 9`: Write a clear `completion_promise` (verifiable completion commitment), then enter Plan.
|
||||
|
||||
## Phase 2: Plan
|
||||
|
||||
- Break into atomic tasks (2-5 minute granularity), each with a verifiable output/test point.
|
||||
- Write the plan to `.sparv/journal.md` (Plan section or append directly).
|
||||
|
||||
## Phase 3: Act
|
||||
|
||||
- **TDD Rule**: No failing test → no production code.
|
||||
- Auto-write journal every 2 actions (PostToolUse hook).
|
||||
- Failure counting (3-Failure Protocol):
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/failure-tracker.sh fail --note "short blocker"
|
||||
~/.claude/skills/sparv/scripts/failure-tracker.sh reset
|
||||
```
|
||||
|
||||
## Phase 4: Review
|
||||
|
||||
- Two stages: Spec conformance → Code quality (correctness/performance/security/tests).
|
||||
- Maximum 3 fix rounds; escalate to user if exceeded.
|
||||
|
||||
Run 3-question reboot test before session ends:
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/reboot-test.sh --strict
|
||||
```
|
||||
|
||||
## Phase 5: Vault
|
||||
|
||||
Archive current session:
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/archive-session.sh
|
||||
```
|
||||
|
||||
## Script Tools
|
||||
|
||||
| Script | Purpose |
|
||||
|--------|---------|
|
||||
| `scripts/init-session.sh` | Initialize `.sparv/`, generate `state.yaml` + `journal.md` |
|
||||
| `scripts/save-progress.sh` | Maintain `action_count`, append to `journal.md` every 2 actions |
|
||||
| `scripts/check-ehrb.sh` | Scan diff/text, output (optionally write) `ehrb_flags` |
|
||||
| `scripts/failure-tracker.sh` | Maintain `consecutive_failures`, exit code 3 when reaching 3 |
|
||||
| `scripts/reboot-test.sh` | 3-question self-check (optional strict mode) |
|
||||
| `scripts/archive-session.sh` | Archive `journal.md` + `state.yaml` to `history/` |
|
||||
|
||||
## Auto Hooks
|
||||
|
||||
`hooks/hooks.json`:
|
||||
|
||||
- PostToolUse: `save-progress.sh` (2-Action save)
|
||||
- PreToolUse: `check-ehrb.sh --diff --dry-run` (prompt only, no state write)
|
||||
- Stop: `reboot-test.sh --strict` (3-question self-check)
|
||||
|
||||
---
|
||||
|
||||
*Quality over speed—iterate until truly complete.*
|
||||
37
skills/sparv/hooks/hooks.json
Normal file
37
skills/sparv/hooks/hooks.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"description": "SPARV auto-hooks for 2-Action save, EHRB detection, and 3-Question reboot test",
|
||||
"hooks": {
|
||||
"PostToolUse": [
|
||||
{
|
||||
"matcher": "Edit|Write|Bash|Read|Glob|Grep",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "[ -f .sparv/state.yaml ] && ${SKILL_PATH}/scripts/save-progress.sh \"${TOOL_NAME:-unknown}\" \"completed\" 2>/dev/null || true"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Edit|Write",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "[ -f .sparv/state.yaml ] && ${SKILL_PATH}/scripts/check-ehrb.sh --diff --dry-run 2>/dev/null || true"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "[ -f .sparv/state.yaml ] && ${SKILL_PATH}/scripts/reboot-test.sh --strict 2>/dev/null || true"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
132
skills/sparv/references/methodology.md
Normal file
132
skills/sparv/references/methodology.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# SPARV Methodology (Short)
|
||||
|
||||
This document is a quick reference; the canonical spec is in `SKILL.md`.
|
||||
|
||||
## Five Phases
|
||||
|
||||
- **Specify**: Write requirements as verifiable specs (10-point gate)
|
||||
- **Plan**: Break into atomic tasks (2-5 minute granularity)
|
||||
- **Act**: TDD-driven implementation; write to journal every 2 actions
|
||||
- **Review**: Spec conformance → Code quality; maximum 3 fix rounds
|
||||
- **Vault**: Archive session (state + journal)
|
||||
|
||||
## Enhanced Rules (v1.1)
|
||||
|
||||
### Uncertainty Declaration (G3)
|
||||
|
||||
When any Specify dimension scores < 2:
|
||||
- Declare: `UNCERTAIN: <what> | ASSUMPTION: <fallback>`
|
||||
- List all assumptions in journal before Plan
|
||||
- Offer 2-3 options for ambiguous requirements
|
||||
|
||||
### Requirement Routing
|
||||
|
||||
| Mode | Condition | Flow |
|
||||
|------|-----------|------|
|
||||
| **Quick** | score >= 9 AND <= 3 files AND no EHRB | Specify → Act → Review |
|
||||
| **Full** | otherwise | Specify → Plan → Act → Review → Vault |
|
||||
|
||||
### Context Acquisition (Optional)
|
||||
|
||||
Before Specify scoring:
|
||||
1. Check `.sparv/kb.md` for existing patterns/decisions
|
||||
2. If insufficient, scan codebase for relevant files
|
||||
3. Document findings in journal under `## Context`
|
||||
|
||||
### Knowledge Base Maintenance
|
||||
|
||||
During Vault phase, update `.sparv/kb.md`:
|
||||
- **Patterns**: Reusable code patterns discovered
|
||||
- **Decisions**: Architectural choices + rationale
|
||||
- **Gotchas**: Common pitfalls + solutions
|
||||
|
||||
### CHANGELOG Update
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/changelog-update.sh --type <Added|Changed|Fixed|Removed> --desc "..."
|
||||
```
|
||||
|
||||
## Specify (10-Point Scale)
|
||||
|
||||
Each item scores 0/1/2, total 0-10; `>=9` required to enter Plan:
|
||||
|
||||
1) Value: Why do it, are benefits/metrics verifiable
|
||||
2) Scope: MVP + what's out of scope
|
||||
3) Acceptance: Testable acceptance criteria
|
||||
4) Boundaries: Error/performance/compatibility/security critical boundaries
|
||||
5) Risk: EHRB/dependencies/unknowns + handling approach
|
||||
|
||||
If below threshold, keep asking—don't "just start coding."
|
||||
|
||||
## Journal Convention (Unified Log)
|
||||
|
||||
All Plan/Progress/Findings go into `.sparv/journal.md`.
|
||||
|
||||
Recommended format (just append, no need to "insert into specific sections"):
|
||||
|
||||
```markdown
|
||||
## 14:32 - Action #12
|
||||
- Tool: Edit
|
||||
- Result: Updated auth flow
|
||||
- Next: Add test for invalid token
|
||||
```
|
||||
|
||||
## 2-Action Save
|
||||
|
||||
Hook triggers `save-progress.sh` after each tool call; script only writes to journal when `action_count` is even.
|
||||
|
||||
## 3-Failure Protocol
|
||||
|
||||
When you fail consecutively, escalate by level:
|
||||
|
||||
1. Diagnose and fix (read errors, verify assumptions, minimal fix)
|
||||
2. Alternative approach (change strategy/entry point)
|
||||
3. Escalate (stop: document blocker + attempted solutions + request user decision)
|
||||
|
||||
Tools:
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/failure-tracker.sh fail --note "short reason"
|
||||
~/.claude/skills/sparv/scripts/failure-tracker.sh reset
|
||||
```
|
||||
|
||||
## 3-Question Reboot Test
|
||||
|
||||
Self-check before session ends (or when lost):
|
||||
|
||||
1) Where am I? (current_phase)
|
||||
2) Where am I going? (next_phase)
|
||||
3) How do I prove completion? (completion_promise + evidence at journal end)
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/reboot-test.sh --strict
|
||||
```
|
||||
|
||||
## EHRB (High-Risk Changes)
|
||||
|
||||
Detection items (any match requires explicit user confirmation):
|
||||
|
||||
- Production access
|
||||
- Sensitive data
|
||||
- Destructive operations
|
||||
- Billing external API
|
||||
- Security-critical changes
|
||||
|
||||
```bash
|
||||
~/.claude/skills/sparv/scripts/check-ehrb.sh --diff --fail-on-flags
|
||||
```
|
||||
|
||||
## state.yaml (Minimal Schema)
|
||||
|
||||
Scripts only enforce 4 core fields; other fields are optional:
|
||||
|
||||
```yaml
|
||||
session_id: "20260114-143022"
|
||||
current_phase: "act"
|
||||
action_count: 14
|
||||
consecutive_failures: 0
|
||||
max_iterations: 12
|
||||
iteration_count: 0
|
||||
completion_promise: "All acceptance criteria have tests and are green."
|
||||
ehrb_flags: []
|
||||
```
|
||||
95
skills/sparv/scripts/archive-session.sh
Executable file
95
skills/sparv/scripts/archive-session.sh
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/bin/bash
|
||||
# SPARV Session Archive Script
|
||||
# Archives completed session from .sparv/plan/<session_id>/ to .sparv/history/<session_id>/
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/lib/state-lock.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: archive-session.sh [--dry-run]
|
||||
|
||||
Moves current session from .sparv/plan/<session_id>/ to .sparv/history/<session_id>/
|
||||
Updates .sparv/history/index.md with session info.
|
||||
|
||||
Options:
|
||||
--dry-run Show what would be archived without doing it
|
||||
EOF
|
||||
}
|
||||
|
||||
SPARV_ROOT=".sparv"
|
||||
PLAN_DIR="$SPARV_ROOT/plan"
|
||||
HISTORY_DIR="$SPARV_ROOT/history"
|
||||
|
||||
dry_run=0
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) usage; exit 0 ;;
|
||||
--dry-run) dry_run=1; shift ;;
|
||||
*) usage >&2; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Find active session
|
||||
find_active_session() {
|
||||
if [ -d "$PLAN_DIR" ]; then
|
||||
local session
|
||||
session="$(ls -1 "$PLAN_DIR" 2>/dev/null | head -1)"
|
||||
if [ -n "$session" ] && [ -f "$PLAN_DIR/$session/state.yaml" ]; then
|
||||
echo "$session"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Update history/index.md
|
||||
update_history_index() {
|
||||
local session_id="$1"
|
||||
local index_file="$HISTORY_DIR/index.md"
|
||||
local state_file="$HISTORY_DIR/$session_id/state.yaml"
|
||||
|
||||
[ -f "$index_file" ] || return 0
|
||||
|
||||
# Get feature name from state.yaml
|
||||
local fname=""
|
||||
if [ -f "$state_file" ]; then
|
||||
fname="$(grep -E '^feature_name:' "$state_file" | sed -E 's/^feature_name:[[:space:]]*"?([^"]*)"?$/\1/' || true)"
|
||||
fi
|
||||
[ -z "$fname" ] && fname="unnamed"
|
||||
|
||||
local month="${session_id:0:6}"
|
||||
local formatted_month="${month:0:4}-${month:4:2}"
|
||||
|
||||
# Add to monthly section if not exists
|
||||
if ! grep -q "### $formatted_month" "$index_file"; then
|
||||
echo -e "\n### $formatted_month\n" >> "$index_file"
|
||||
fi
|
||||
echo "- \`${session_id}\` - $fname" >> "$index_file"
|
||||
}
|
||||
|
||||
SESSION_ID="$(find_active_session)"
|
||||
|
||||
if [ -z "$SESSION_ID" ]; then
|
||||
echo "No active session to archive"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
SRC_DIR="$PLAN_DIR/$SESSION_ID"
|
||||
DST_DIR="$HISTORY_DIR/$SESSION_ID"
|
||||
|
||||
if [ "$dry_run" -eq 1 ]; then
|
||||
echo "Would archive: $SRC_DIR -> $DST_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create history directory and move session
|
||||
mkdir -p "$HISTORY_DIR"
|
||||
mv "$SRC_DIR" "$DST_DIR"
|
||||
|
||||
# Update index
|
||||
update_history_index "$SESSION_ID"
|
||||
|
||||
echo "✅ Session archived: $SESSION_ID"
|
||||
echo "📁 Location: $DST_DIR"
|
||||
112
skills/sparv/scripts/changelog-update.sh
Executable file
112
skills/sparv/scripts/changelog-update.sh
Executable file
@@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
# SPARV Changelog Update Script
|
||||
# Adds entries to .sparv/CHANGELOG.md under [Unreleased] section
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: changelog-update.sh --type <TYPE> --desc "description" [--file PATH]
|
||||
|
||||
Adds a changelog entry under [Unreleased] section.
|
||||
|
||||
Options:
|
||||
--type TYPE Change type: Added|Changed|Fixed|Removed
|
||||
--desc DESC Description of the change
|
||||
--file PATH Custom changelog path (default: .sparv/CHANGELOG.md)
|
||||
|
||||
Examples:
|
||||
changelog-update.sh --type Added --desc "User authentication module"
|
||||
changelog-update.sh --type Fixed --desc "Login timeout issue"
|
||||
EOF
|
||||
}
|
||||
|
||||
CHANGELOG=".sparv/CHANGELOG.md"
|
||||
TYPE=""
|
||||
DESC=""
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) usage; exit 0 ;;
|
||||
--type) TYPE="$2"; shift 2 ;;
|
||||
--desc) DESC="$2"; shift 2 ;;
|
||||
--file) CHANGELOG="$2"; shift 2 ;;
|
||||
*) usage >&2; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate inputs
|
||||
if [ -z "$TYPE" ] || [ -z "$DESC" ]; then
|
||||
echo "❌ Error: --type and --desc are required" >&2
|
||||
usage >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate type
|
||||
case "$TYPE" in
|
||||
Added|Changed|Fixed|Removed) ;;
|
||||
*)
|
||||
echo "❌ Error: Invalid type '$TYPE'. Must be: Added|Changed|Fixed|Removed" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check changelog exists
|
||||
if [ ! -f "$CHANGELOG" ]; then
|
||||
echo "❌ Error: Changelog not found: $CHANGELOG" >&2
|
||||
echo " Run init-session.sh first to create it." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if [Unreleased] section exists
|
||||
if ! grep -q "## \[Unreleased\]" "$CHANGELOG"; then
|
||||
echo "❌ Error: [Unreleased] section not found in $CHANGELOG" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the type section already exists under [Unreleased]
|
||||
# We need to insert after [Unreleased] but before the next ## section
|
||||
TEMP_FILE=$(mktemp)
|
||||
trap "rm -f $TEMP_FILE" EXIT
|
||||
|
||||
# Find if ### $TYPE exists between [Unreleased] and next ## section
|
||||
IN_UNRELEASED=0
|
||||
TYPE_FOUND=0
|
||||
TYPE_LINE=0
|
||||
UNRELEASED_LINE=0
|
||||
NEXT_SECTION_LINE=0
|
||||
|
||||
line_num=0
|
||||
while IFS= read -r line; do
|
||||
((line_num++))
|
||||
if [[ "$line" =~ ^##[[:space:]]\[Unreleased\] ]]; then
|
||||
IN_UNRELEASED=1
|
||||
UNRELEASED_LINE=$line_num
|
||||
elif [[ $IN_UNRELEASED -eq 1 && "$line" =~ ^##[[:space:]] && ! "$line" =~ ^###[[:space:]] ]]; then
|
||||
NEXT_SECTION_LINE=$line_num
|
||||
break
|
||||
elif [[ $IN_UNRELEASED -eq 1 && "$line" =~ ^###[[:space:]]$TYPE ]]; then
|
||||
TYPE_FOUND=1
|
||||
TYPE_LINE=$line_num
|
||||
fi
|
||||
done < "$CHANGELOG"
|
||||
|
||||
if [ $TYPE_FOUND -eq 1 ]; then
|
||||
# Append under existing ### $TYPE section
|
||||
awk -v type_line="$TYPE_LINE" -v desc="$DESC" '
|
||||
NR == type_line { print; getline; print; print "- " desc; next }
|
||||
{ print }
|
||||
' "$CHANGELOG" > "$TEMP_FILE"
|
||||
else
|
||||
# Create new ### $TYPE section after [Unreleased]
|
||||
awk -v unreleased_line="$UNRELEASED_LINE" -v type="$TYPE" -v desc="$DESC" '
|
||||
NR == unreleased_line { print; print ""; print "### " type; print "- " desc; next }
|
||||
{ print }
|
||||
' "$CHANGELOG" > "$TEMP_FILE"
|
||||
fi
|
||||
|
||||
mv "$TEMP_FILE" "$CHANGELOG"
|
||||
|
||||
echo "✅ Added to $CHANGELOG:"
|
||||
echo " ### $TYPE"
|
||||
echo " - $DESC"
|
||||
182
skills/sparv/scripts/check-ehrb.sh
Executable file
182
skills/sparv/scripts/check-ehrb.sh
Executable file
@@ -0,0 +1,182 @@
|
||||
#!/bin/bash
|
||||
# EHRB Risk Detection Script
|
||||
# Heuristically detects high-risk changes/specs and writes flags to .sparv/state.yaml:ehrb_flags.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/lib/state-lock.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: check-ehrb.sh [options] [FILE...]
|
||||
|
||||
Options:
|
||||
--diff Scan current git diff (staged + unstaged) and changed file names
|
||||
--clear Clear ehrb_flags in .sparv/state.yaml (no scan needed)
|
||||
--dry-run Do not write .sparv/state.yaml (print detected flags only)
|
||||
--fail-on-flags Exit with code 2 if any flags are detected
|
||||
-h, --help Show this help
|
||||
|
||||
Input:
|
||||
- --diff
|
||||
- positional FILE...
|
||||
- stdin (if piped)
|
||||
|
||||
Examples:
|
||||
check-ehrb.sh --diff --fail-on-flags
|
||||
check-ehrb.sh docs/feature-prd.md
|
||||
echo "touching production db" | check-ehrb.sh --fail-on-flags
|
||||
EOF
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "❌ $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
is_piped_stdin() {
|
||||
[ ! -t 0 ]
|
||||
}
|
||||
|
||||
git_text() {
|
||||
git diff --cached 2>/dev/null || true
|
||||
git diff 2>/dev/null || true
|
||||
(git diff --name-only --cached 2>/dev/null; git diff --name-only 2>/dev/null) | sort -u || true
|
||||
}
|
||||
|
||||
render_inline_list() {
|
||||
if [ "$#" -eq 0 ]; then
|
||||
printf "[]"
|
||||
return 0
|
||||
fi
|
||||
printf "["
|
||||
local first=1 item
|
||||
for item in "$@"; do
|
||||
if [ "$first" -eq 1 ]; then
|
||||
first=0
|
||||
else
|
||||
printf ", "
|
||||
fi
|
||||
printf "\"%s\"" "$item"
|
||||
done
|
||||
printf "]"
|
||||
}
|
||||
|
||||
write_ehrb_flags() {
|
||||
local list_value="$1"
|
||||
sparv_require_state_file
|
||||
sparv_state_validate_or_die
|
||||
sparv_yaml_set_raw ehrb_flags "$list_value"
|
||||
}
|
||||
|
||||
scan_diff=0
|
||||
dry_run=0
|
||||
clear=0
|
||||
fail_on_flags=0
|
||||
declare -a files=()
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--diff)
|
||||
scan_diff=1
|
||||
shift
|
||||
;;
|
||||
--clear)
|
||||
clear=1
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
dry_run=1
|
||||
shift
|
||||
;;
|
||||
--fail-on-flags)
|
||||
fail_on_flags=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
die "Unknown argument: $1 (use --help for usage)"
|
||||
;;
|
||||
*)
|
||||
files+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
for path in "$@"; do
|
||||
files+=("$path")
|
||||
done
|
||||
|
||||
scan_text=""
|
||||
|
||||
if [ "$scan_diff" -eq 1 ]; then
|
||||
if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
||||
scan_text+=$'\n'"$(git_text)"
|
||||
else
|
||||
die "--diff requires running inside a git repository"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${#files[@]}" -gt 0 ]; then
|
||||
for path in "${files[@]}"; do
|
||||
[ -f "$path" ] || die "File not found: $path"
|
||||
scan_text+=$'\n'"$(cat "$path")"
|
||||
done
|
||||
fi
|
||||
|
||||
if is_piped_stdin; then
|
||||
scan_text+=$'\n'"$(cat)"
|
||||
fi
|
||||
|
||||
declare -a flags=()
|
||||
if [ "$clear" -eq 1 ]; then
|
||||
flags=()
|
||||
else
|
||||
[ -n "$scan_text" ] || die "No scannable input (use --help to see input methods)"
|
||||
|
||||
if printf "%s" "$scan_text" | grep -Eiq '(^|[^a-z])(prod(uction)?|live)([^a-z]|$)|kubeconfig|kubectl|terraform|helm|eks|gke|aks'; then
|
||||
flags+=("production-access")
|
||||
fi
|
||||
if printf "%s" "$scan_text" | grep -Eiq 'pii|phi|hipaa|ssn|password|passwd|secret|token|api[ _-]?key|private key|credit card|身份证|银行卡|医疗|患者'; then
|
||||
flags+=("sensitive-data")
|
||||
fi
|
||||
if printf "%s" "$scan_text" | grep -Eiq 'rm[[:space:]]+-rf|drop[[:space:]]+table|delete[[:space:]]+from|truncate|terraform[[:space:]]+destroy|kubectl[[:space:]]+delete|drop[[:space:]]+database|wipe|purge'; then
|
||||
flags+=("destructive-ops")
|
||||
fi
|
||||
if printf "%s" "$scan_text" | grep -Eiq 'stripe|paypal|billing|charge|invoice|subscription|metering|twilio|sendgrid|openai|anthropic|cost|usage'; then
|
||||
flags+=("billing-external-api")
|
||||
fi
|
||||
if printf "%s" "$scan_text" | grep -Eiq 'auth|authentication|authorization|oauth|jwt|sso|encryption|crypto|tls|ssl|mfa|rbac|permission|权限|登录|认证'; then
|
||||
flags+=("security-critical")
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${#flags[@]}" -eq 0 ]; then
|
||||
echo "EHRB: No risk flags detected"
|
||||
else
|
||||
echo "EHRB: Risk flags detected (require explicit user confirmation):"
|
||||
for f in ${flags[@]+"${flags[@]}"}; do
|
||||
echo " - $f"
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "$dry_run" -eq 0 ]; then
|
||||
list_value="$(render_inline_list ${flags[@]+"${flags[@]}"})"
|
||||
write_ehrb_flags "$list_value"
|
||||
echo "Written to: $STATE_FILE (ehrb_flags: $list_value)"
|
||||
fi
|
||||
|
||||
if [ "$fail_on_flags" -eq 1 ] && [ "${#flags[@]}" -gt 0 ]; then
|
||||
exit 2
|
||||
fi
|
||||
|
||||
exit 0
|
||||
135
skills/sparv/scripts/failure-tracker.sh
Executable file
135
skills/sparv/scripts/failure-tracker.sh
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
# SPARV 3-Failure Protocol Tracker
|
||||
# Maintains consecutive_failures and escalates when reaching 3.
|
||||
# Notes are appended to journal.md (unified log).
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/lib/state-lock.sh"
|
||||
|
||||
THRESHOLD=3
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: failure-tracker.sh <command> [options]
|
||||
|
||||
Commands:
|
||||
status Show current consecutive_failures and protocol level
|
||||
fail [--note TEXT] Increment consecutive_failures (exit 3 when reaching threshold)
|
||||
reset Set consecutive_failures to 0
|
||||
|
||||
Auto-detects active session in .sparv/plan/<session_id>/
|
||||
EOF
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "❌ $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_state() {
|
||||
# Auto-detect session (sets SPARV_DIR, STATE_FILE, JOURNAL_FILE)
|
||||
sparv_require_state_file
|
||||
sparv_state_validate_or_die
|
||||
}
|
||||
|
||||
append_journal() {
|
||||
local level="$1"
|
||||
local note="${2:-}"
|
||||
local ts
|
||||
ts="$(date '+%Y-%m-%d %H:%M')"
|
||||
|
||||
[ -f "$JOURNAL_FILE" ] || sparv_die "Cannot find $JOURNAL_FILE; run init-session.sh first"
|
||||
|
||||
{
|
||||
echo
|
||||
echo "## Failure Protocol - $ts"
|
||||
echo "- level: $level"
|
||||
if [ -n "$note" ]; then
|
||||
echo "- note: $note"
|
||||
fi
|
||||
} >>"$JOURNAL_FILE"
|
||||
}
|
||||
|
||||
protocol_level() {
|
||||
local count="$1"
|
||||
if [ "$count" -le 0 ]; then
|
||||
echo "0"
|
||||
elif [ "$count" -eq 1 ]; then
|
||||
echo "1"
|
||||
elif [ "$count" -eq 2 ]; then
|
||||
echo "2"
|
||||
else
|
||||
echo "3"
|
||||
fi
|
||||
}
|
||||
|
||||
cmd="${1:-status}"
|
||||
shift || true
|
||||
|
||||
note=""
|
||||
case "$cmd" in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
status)
|
||||
require_state
|
||||
current="$(sparv_yaml_get_int consecutive_failures 0)"
|
||||
level="$(protocol_level "$current")"
|
||||
echo "consecutive_failures: $current"
|
||||
case "$level" in
|
||||
0) echo "protocol: clean (no failures)" ;;
|
||||
1) echo "protocol: Attempt 1 - Diagnose and fix" ;;
|
||||
2) echo "protocol: Attempt 2 - Alternative approach" ;;
|
||||
3) echo "protocol: Attempt 3 - Escalate (pause, document, ask user)" ;;
|
||||
esac
|
||||
exit 0
|
||||
;;
|
||||
fail)
|
||||
require_state
|
||||
if [ "${1:-}" = "--note" ]; then
|
||||
[ $# -ge 2 ] || die "--note requires an argument"
|
||||
note="$2"
|
||||
shift 2
|
||||
else
|
||||
note="$*"
|
||||
shift $#
|
||||
fi
|
||||
[ "$#" -eq 0 ] || die "Unknown argument: $1 (use --help for usage)"
|
||||
|
||||
current="$(sparv_yaml_get_int consecutive_failures 0)"
|
||||
new_count=$((current + 1))
|
||||
sparv_yaml_set_int consecutive_failures "$new_count"
|
||||
|
||||
level="$(protocol_level "$new_count")"
|
||||
case "$level" in
|
||||
1)
|
||||
echo "Attempt 1/3: Diagnose and fix"
|
||||
[ -n "$note" ] && append_journal "1" "$note"
|
||||
exit 0
|
||||
;;
|
||||
2)
|
||||
echo "Attempt 2/3: Alternative approach"
|
||||
[ -n "$note" ] && append_journal "2" "$note"
|
||||
exit 0
|
||||
;;
|
||||
3)
|
||||
echo "Attempt 3/3: Escalate"
|
||||
echo "3-Failure Protocol triggered: pause, document blocker and attempted solutions, request user decision."
|
||||
append_journal "3" "${note:-"(no note)"}"
|
||||
exit "$THRESHOLD"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
reset)
|
||||
require_state
|
||||
sparv_yaml_set_int consecutive_failures 0
|
||||
echo "consecutive_failures reset to 0"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
die "Unknown command: $cmd (use --help for usage)"
|
||||
;;
|
||||
esac
|
||||
235
skills/sparv/scripts/init-session.sh
Executable file
235
skills/sparv/scripts/init-session.sh
Executable file
@@ -0,0 +1,235 @@
|
||||
#!/bin/bash
|
||||
# SPARV Session Initialization
|
||||
# Creates .sparv/plan/<session_id>/ with state.yaml and journal.md
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/lib/state-lock.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: init-session.sh [--force] [feature_name]
|
||||
|
||||
Creates .sparv/plan/<session_id>/ directory:
|
||||
- state.yaml (session state)
|
||||
- journal.md (unified log)
|
||||
|
||||
Also initializes:
|
||||
- .sparv/history/index.md (if not exists)
|
||||
- .sparv/CHANGELOG.md (if not exists)
|
||||
|
||||
Options:
|
||||
--force Archive current session and start new one
|
||||
feature_name Optional feature name for the session
|
||||
EOF
|
||||
}
|
||||
|
||||
SPARV_ROOT=".sparv"
|
||||
PLAN_DIR="$SPARV_ROOT/plan"
|
||||
HISTORY_DIR="$SPARV_ROOT/history"
|
||||
|
||||
force=0
|
||||
feature_name=""
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) usage; exit 0 ;;
|
||||
--force) force=1; shift ;;
|
||||
-*) usage >&2; exit 1 ;;
|
||||
*) feature_name="$1"; shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Find current active session
|
||||
find_active_session() {
|
||||
if [ -d "$PLAN_DIR" ]; then
|
||||
local session
|
||||
session="$(ls -1 "$PLAN_DIR" 2>/dev/null | head -1)"
|
||||
if [ -n "$session" ] && [ -f "$PLAN_DIR/$session/state.yaml" ]; then
|
||||
echo "$session"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Archive a session to history
|
||||
archive_session() {
|
||||
local session_id="$1"
|
||||
local src_dir="$PLAN_DIR/$session_id"
|
||||
local dst_dir="$HISTORY_DIR/$session_id"
|
||||
|
||||
[ -d "$src_dir" ] || return 0
|
||||
|
||||
mkdir -p "$HISTORY_DIR"
|
||||
mv "$src_dir" "$dst_dir"
|
||||
|
||||
# Update index.md
|
||||
update_history_index "$session_id"
|
||||
|
||||
echo "📦 Archived: $dst_dir"
|
||||
}
|
||||
|
||||
# Update history/index.md
|
||||
update_history_index() {
|
||||
local session_id="$1"
|
||||
local index_file="$HISTORY_DIR/index.md"
|
||||
local state_file="$HISTORY_DIR/$session_id/state.yaml"
|
||||
|
||||
# Get feature name from state.yaml
|
||||
local fname=""
|
||||
if [ -f "$state_file" ]; then
|
||||
fname="$(grep -E '^feature_name:' "$state_file" | sed -E 's/^feature_name:[[:space:]]*"?([^"]*)"?$/\1/' || true)"
|
||||
fi
|
||||
[ -z "$fname" ] && fname="unnamed"
|
||||
|
||||
local month="${session_id:0:6}"
|
||||
local formatted_month="${month:0:4}-${month:4:2}"
|
||||
local timestamp="${session_id:0:12}"
|
||||
|
||||
# Append to index
|
||||
if [ -f "$index_file" ]; then
|
||||
# Add to monthly section if not exists
|
||||
if ! grep -q "### $formatted_month" "$index_file"; then
|
||||
echo -e "\n### $formatted_month\n" >> "$index_file"
|
||||
fi
|
||||
echo "- \`${session_id}\` - $fname" >> "$index_file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize history/index.md if not exists
|
||||
init_history_index() {
|
||||
local index_file="$HISTORY_DIR/index.md"
|
||||
[ -f "$index_file" ] && return 0
|
||||
|
||||
mkdir -p "$HISTORY_DIR"
|
||||
cat > "$index_file" << 'EOF'
|
||||
# History Index
|
||||
|
||||
This file records all completed sessions for traceability.
|
||||
|
||||
---
|
||||
|
||||
## Index
|
||||
|
||||
| Timestamp | Feature | Type | Status | Path |
|
||||
|-----------|---------|------|--------|------|
|
||||
|
||||
---
|
||||
|
||||
## Monthly Archive
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Initialize CHANGELOG.md if not exists
|
||||
init_changelog() {
|
||||
local changelog="$SPARV_ROOT/CHANGELOG.md"
|
||||
[ -f "$changelog" ] && return 0
|
||||
|
||||
cat > "$changelog" << 'EOF'
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
Format based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Initialize kb.md (knowledge base) if not exists
|
||||
init_kb() {
|
||||
local kb_file="$SPARV_ROOT/kb.md"
|
||||
[ -f "$kb_file" ] && return 0
|
||||
|
||||
cat > "$kb_file" << 'EOF'
|
||||
# Knowledge Base
|
||||
|
||||
Cross-session knowledge accumulated during SPARV workflows.
|
||||
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
<!-- Reusable code patterns discovered -->
|
||||
|
||||
## Decisions
|
||||
|
||||
<!-- Architectural choices + rationale -->
|
||||
<!-- Format: - [YYYY-MM-DD]: decision | rationale -->
|
||||
|
||||
## Gotchas
|
||||
|
||||
<!-- Common pitfalls + solutions -->
|
||||
<!-- Format: - [issue]: cause | solution -->
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Check for active session
|
||||
active_session="$(find_active_session)"
|
||||
|
||||
if [ -n "$active_session" ]; then
|
||||
if [ "$force" -eq 0 ]; then
|
||||
echo "⚠️ Active session exists: $active_session"
|
||||
echo " Use --force to archive and start new session"
|
||||
echo " Or run: archive-session.sh"
|
||||
exit 0
|
||||
else
|
||||
archive_session "$active_session"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate new session ID
|
||||
SESSION_ID=$(date +%Y%m%d%H%M%S)
|
||||
SESSION_DIR="$PLAN_DIR/$SESSION_ID"
|
||||
|
||||
# Create directory structure
|
||||
mkdir -p "$SESSION_DIR"
|
||||
mkdir -p "$HISTORY_DIR"
|
||||
|
||||
# Initialize global files
|
||||
init_history_index
|
||||
init_changelog
|
||||
init_kb
|
||||
|
||||
# Create state.yaml
|
||||
cat > "$SESSION_DIR/state.yaml" << EOF
|
||||
session_id: "$SESSION_ID"
|
||||
feature_name: "$feature_name"
|
||||
current_phase: "specify"
|
||||
action_count: 0
|
||||
consecutive_failures: 0
|
||||
max_iterations: 12
|
||||
iteration_count: 0
|
||||
completion_promise: ""
|
||||
ehrb_flags: []
|
||||
EOF
|
||||
|
||||
# Create journal.md
|
||||
cat > "$SESSION_DIR/journal.md" << EOF
|
||||
# SPARV Journal
|
||||
Session: $SESSION_ID
|
||||
Feature: $feature_name
|
||||
Created: $(date '+%Y-%m-%d %H:%M')
|
||||
|
||||
## Plan
|
||||
<!-- Task breakdown, sub-issues, success criteria -->
|
||||
|
||||
## Progress
|
||||
<!-- Auto-updated every 2 actions -->
|
||||
|
||||
## Findings
|
||||
<!-- Learnings, patterns, discoveries -->
|
||||
EOF
|
||||
|
||||
# Verify files created
|
||||
if [ ! -f "$SESSION_DIR/state.yaml" ] || [ ! -f "$SESSION_DIR/journal.md" ]; then
|
||||
echo "❌ Failed to create files"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ SPARV session: $SESSION_ID"
|
||||
[ -n "$feature_name" ] && echo "📝 Feature: $feature_name"
|
||||
echo "📁 $SESSION_DIR/state.yaml"
|
||||
echo "📁 $SESSION_DIR/journal.md"
|
||||
143
skills/sparv/scripts/lib/state-lock.sh
Executable file
143
skills/sparv/scripts/lib/state-lock.sh
Executable file
@@ -0,0 +1,143 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Shared helpers for .sparv state operations.
|
||||
# Supports new directory structure: .sparv/plan/<session_id>/
|
||||
|
||||
sparv_die() {
|
||||
echo "❌ $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Find active session directory
|
||||
sparv_find_active_session() {
|
||||
local plan_dir=".sparv/plan"
|
||||
if [ -d "$plan_dir" ]; then
|
||||
local session
|
||||
session="$(ls -1 "$plan_dir" 2>/dev/null | head -1)"
|
||||
if [ -n "$session" ] && [ -f "$plan_dir/$session/state.yaml" ]; then
|
||||
echo "$plan_dir/$session"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Auto-detect SPARV_DIR and STATE_FILE
|
||||
sparv_auto_detect() {
|
||||
local session_dir
|
||||
session_dir="$(sparv_find_active_session)"
|
||||
if [ -n "$session_dir" ]; then
|
||||
SPARV_DIR="$session_dir"
|
||||
STATE_FILE="$session_dir/state.yaml"
|
||||
JOURNAL_FILE="$session_dir/journal.md"
|
||||
export SPARV_DIR STATE_FILE JOURNAL_FILE
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
sparv_require_state_env() {
|
||||
if [ -z "${SPARV_DIR:-}" ] || [ -z "${STATE_FILE:-}" ]; then
|
||||
if ! sparv_auto_detect; then
|
||||
sparv_die "No active session found; run init-session.sh first"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
sparv_require_state_file() {
|
||||
sparv_require_state_env
|
||||
[ -f "$STATE_FILE" ] || sparv_die "File not found: $STATE_FILE; run init-session.sh first"
|
||||
}
|
||||
|
||||
# Read a YAML value (simple key: value format)
|
||||
sparv_yaml_get() {
|
||||
local key="$1"
|
||||
local default="${2:-}"
|
||||
sparv_require_state_file
|
||||
|
||||
local line value
|
||||
line="$(grep -E "^${key}:" "$STATE_FILE" | head -n 1 || true)"
|
||||
if [ -z "$line" ]; then
|
||||
printf "%s" "$default"
|
||||
return 0
|
||||
fi
|
||||
value="${line#${key}:}"
|
||||
value="$(printf "%s" "$value" | sed -E 's/^[[:space:]]+//; s/^"//; s/"$//')"
|
||||
printf "%s" "$value"
|
||||
}
|
||||
|
||||
sparv_yaml_get_int() {
|
||||
local key="$1"
|
||||
local default="${2:-0}"
|
||||
local value
|
||||
value="$(sparv_yaml_get "$key" "$default")"
|
||||
if printf "%s" "$value" | grep -Eq '^[0-9]+$'; then
|
||||
printf "%s" "$value"
|
||||
else
|
||||
printf "%s" "$default"
|
||||
fi
|
||||
}
|
||||
|
||||
# Write a YAML value (in-place update)
|
||||
sparv_yaml_set_raw() {
|
||||
local key="$1"
|
||||
local raw_value="$2"
|
||||
sparv_require_state_file
|
||||
|
||||
local tmp
|
||||
tmp="$(mktemp)"
|
||||
|
||||
awk -v key="$key" -v repl="${key}: ${raw_value}" '
|
||||
BEGIN { in_block = 0; replaced = 0 }
|
||||
{
|
||||
if (in_block) {
|
||||
if ($0 ~ /^[[:space:]]*-/) next
|
||||
in_block = 0
|
||||
}
|
||||
if ($0 ~ ("^" key ":")) {
|
||||
print repl
|
||||
in_block = 1
|
||||
replaced = 1
|
||||
next
|
||||
}
|
||||
print
|
||||
}
|
||||
END {
|
||||
if (!replaced) print repl
|
||||
}
|
||||
' "$STATE_FILE" >"$tmp"
|
||||
|
||||
mv -f "$tmp" "$STATE_FILE"
|
||||
}
|
||||
|
||||
sparv_yaml_set_int() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
[ "$value" -ge 0 ] 2>/dev/null || sparv_die "$key must be a non-negative integer"
|
||||
sparv_yaml_set_raw "$key" "$value"
|
||||
}
|
||||
|
||||
# Validate state.yaml has required fields (4 core fields only)
|
||||
sparv_state_validate() {
|
||||
sparv_require_state_file
|
||||
|
||||
local missing=0
|
||||
local key
|
||||
|
||||
for key in session_id current_phase action_count consecutive_failures; do
|
||||
grep -Eq "^${key}:" "$STATE_FILE" || missing=1
|
||||
done
|
||||
|
||||
local phase
|
||||
phase="$(sparv_yaml_get current_phase "")"
|
||||
case "$phase" in
|
||||
specify|plan|act|review|vault) ;;
|
||||
*) missing=1 ;;
|
||||
esac
|
||||
|
||||
[ "$missing" -eq 0 ]
|
||||
}
|
||||
|
||||
sparv_state_validate_or_die() {
|
||||
if ! sparv_state_validate; then
|
||||
sparv_die "Corrupted state.yaml: $STATE_FILE. Run init-session.sh --force to rebuild."
|
||||
fi
|
||||
}
|
||||
127
skills/sparv/scripts/reboot-test.sh
Executable file
127
skills/sparv/scripts/reboot-test.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/bin/bash
|
||||
# SPARV 3-Question Reboot Test Script
|
||||
# Prints (and optionally validates) the "3 questions" using the current session state.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/lib/state-lock.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: reboot-test.sh [options]
|
||||
|
||||
Options:
|
||||
--strict Exit non-zero if critical answers are missing or unsafe
|
||||
-h, --help Show this help
|
||||
|
||||
Auto-detects active session in .sparv/plan/<session_id>/
|
||||
EOF
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "❌ $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
tail_file() {
|
||||
local path="$1"
|
||||
local lines="${2:-20}"
|
||||
if [ -f "$path" ]; then
|
||||
tail -n "$lines" "$path"
|
||||
else
|
||||
echo "(missing: $path)"
|
||||
fi
|
||||
}
|
||||
|
||||
strict=0
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) usage; exit 0 ;;
|
||||
--strict) strict=1; shift ;;
|
||||
*) die "Unknown argument: $1 (use --help for usage)" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Auto-detect session (sets SPARV_DIR, STATE_FILE, JOURNAL_FILE)
|
||||
sparv_require_state_file
|
||||
sparv_state_validate_or_die
|
||||
|
||||
session_id="$(sparv_yaml_get session_id "")"
|
||||
feature_name="$(sparv_yaml_get feature_name "")"
|
||||
current_phase="$(sparv_yaml_get current_phase "")"
|
||||
completion_promise="$(sparv_yaml_get completion_promise "")"
|
||||
iteration_count="$(sparv_yaml_get_int iteration_count 0)"
|
||||
max_iterations="$(sparv_yaml_get_int max_iterations 0)"
|
||||
consecutive_failures="$(sparv_yaml_get_int consecutive_failures 0)"
|
||||
ehrb_flags="$(sparv_yaml_get ehrb_flags "")"
|
||||
|
||||
case "$current_phase" in
|
||||
specify) next_phase="plan" ;;
|
||||
plan) next_phase="act" ;;
|
||||
act) next_phase="review" ;;
|
||||
review) next_phase="vault" ;;
|
||||
vault) next_phase="done" ;;
|
||||
*) next_phase="unknown" ;;
|
||||
esac
|
||||
|
||||
echo "== 3-Question Reboot Test =="
|
||||
echo "session_id: ${session_id:-"(unknown)"}"
|
||||
if [ -n "$feature_name" ]; then
|
||||
echo "feature_name: $feature_name"
|
||||
fi
|
||||
echo
|
||||
echo "1) Where am I?"
|
||||
echo " current_phase: ${current_phase:-"(empty)"}"
|
||||
echo
|
||||
echo "2) Where am I going?"
|
||||
echo " next_phase: $next_phase"
|
||||
echo
|
||||
echo "3) How do I prove completion?"
|
||||
if [ -n "$completion_promise" ]; then
|
||||
echo " completion_promise: $completion_promise"
|
||||
else
|
||||
echo " completion_promise: (empty)"
|
||||
fi
|
||||
echo
|
||||
echo "journal tail (20 lines):"
|
||||
tail_file "$JOURNAL_FILE" 20
|
||||
echo
|
||||
echo "Counters: failures=$consecutive_failures, iteration=$iteration_count/$max_iterations"
|
||||
if [ -n "$ehrb_flags" ] && [ "$ehrb_flags" != "[]" ]; then
|
||||
echo "EHRB: $ehrb_flags"
|
||||
fi
|
||||
|
||||
if [ "$strict" -eq 1 ]; then
|
||||
exit_code=0
|
||||
|
||||
case "$current_phase" in
|
||||
specify|plan|act|review|vault) ;;
|
||||
*) echo "❌ strict: current_phase invalid/empty: $current_phase" >&2; exit_code=1 ;;
|
||||
esac
|
||||
|
||||
if [ -z "$completion_promise" ]; then
|
||||
echo "❌ strict: completion_promise is empty; fill in a verifiable completion commitment in $STATE_FILE first." >&2
|
||||
exit_code=1
|
||||
fi
|
||||
|
||||
if [ "$max_iterations" -gt 0 ] && [ "$iteration_count" -ge "$max_iterations" ]; then
|
||||
echo "❌ strict: iteration_count >= max_iterations; stop hook triggered, should pause and escalate to user." >&2
|
||||
exit_code=1
|
||||
fi
|
||||
|
||||
if [ "$consecutive_failures" -ge 3 ]; then
|
||||
echo "❌ strict: consecutive_failures >= 3; 3-Failure Protocol triggered, should pause and escalate to user." >&2
|
||||
exit_code=1
|
||||
fi
|
||||
|
||||
if [ -n "$ehrb_flags" ] && [ "$ehrb_flags" != "[]" ]; then
|
||||
echo "❌ strict: ehrb_flags not empty; EHRB risk exists, requires explicit user confirmation before continuing." >&2
|
||||
exit_code=1
|
||||
fi
|
||||
|
||||
exit "$exit_code"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
55
skills/sparv/scripts/save-progress.sh
Executable file
55
skills/sparv/scripts/save-progress.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
# SPARV Progress Save Script
|
||||
# Implements the 2-Action rule (called after each tool call; writes every 2 actions).
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/lib/state-lock.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: save-progress.sh [TOOL_NAME] [RESULT]
|
||||
|
||||
Increments action_count and appends to journal.md every 2 actions.
|
||||
Auto-detects active session in .sparv/plan/<session_id>/
|
||||
EOF
|
||||
}
|
||||
|
||||
if [ "${1:-}" = "-h" ] || [ "${1:-}" = "--help" ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Auto-detect session (sets SPARV_DIR, STATE_FILE, JOURNAL_FILE)
|
||||
sparv_require_state_file
|
||||
sparv_state_validate_or_die
|
||||
[ -f "$JOURNAL_FILE" ] || sparv_die "Cannot find $JOURNAL_FILE; run init-session.sh first"
|
||||
|
||||
# Arguments
|
||||
TOOL_NAME="${1:-unknown}"
|
||||
RESULT="${2:-no result}"
|
||||
|
||||
ACTION_COUNT="$(sparv_yaml_get_int action_count 0)"
|
||||
|
||||
# Increment action count
|
||||
NEW_COUNT=$((ACTION_COUNT + 1))
|
||||
|
||||
# Update state file
|
||||
sparv_yaml_set_int action_count "$NEW_COUNT"
|
||||
|
||||
# Only write every 2 actions
|
||||
if [ $((NEW_COUNT % 2)) -ne 0 ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Append to journal
|
||||
TIMESTAMP=$(date '+%H:%M')
|
||||
cat >> "$JOURNAL_FILE" << EOF
|
||||
|
||||
## $TIMESTAMP - Action #$NEW_COUNT
|
||||
- Tool: $TOOL_NAME
|
||||
- Result: $RESULT
|
||||
EOF
|
||||
|
||||
echo "📝 journal.md saved: Action #$NEW_COUNT"
|
||||
Reference in New Issue
Block a user