mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-06 01:54:11 +08:00
Add orchestrator design, phase file generation, and validation processes
- Implement Phase 2: Orchestrator Design with detailed steps for generating SKILL.md - Introduce Phase 3: Phase Files Design to create phase files with content fidelity - Establish Phase 4: Validation & Integration to ensure structural completeness and reference integrity - Include comprehensive validation checks for content quality and data flow consistency - Enhance documentation with clear objectives and critical rules for each phase
This commit is contained in:
@@ -1,650 +0,0 @@
|
||||
---
|
||||
name: lite-skill-generator
|
||||
description: Lightweight skill generator with style learning - creates simple skills using flow-based execution and style imitation. Use for quick skill scaffolding, simple workflow creation, or style-aware skill generation.
|
||||
allowed-tools: Read, Write, Bash, Glob, Grep, AskUserQuestion
|
||||
---
|
||||
|
||||
# Lite Skill Generator
|
||||
|
||||
Lightweight meta-skill for rapid skill creation with intelligent style learning and flow-based execution.
|
||||
|
||||
## Core Concept
|
||||
|
||||
**Simplicity First**: Generate simple, focused skills quickly with minimal overhead. Learn from existing skills to maintain consistent style and structure.
|
||||
|
||||
**Progressive Disclosure**: Follow anthropics' three-layer loading principle:
|
||||
1. **Metadata** - name, description, triggers (always loaded)
|
||||
2. **SKILL.md** - core instructions (loaded when triggered)
|
||||
3. **Bundled resources** - scripts, references, assets (loaded on demand)
|
||||
|
||||
## Execution Model
|
||||
|
||||
**3-Phase Flow**: Style Learning → Requirements Gathering → Generation
|
||||
|
||||
```
|
||||
User Input → Phase 1: Style Analysis → Phase 2: Requirements → Phase 3: Generate → Skill Package
|
||||
↓ ↓ ↓
|
||||
Learn from examples Interactive prompts Write files + validate
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Lite Skill Generator │
|
||||
│ │
|
||||
│ Input: Skill name, purpose, reference skills │
|
||||
│ ↓ │
|
||||
│ ┌─────────────────────────────────────────────────────────┐ │
|
||||
│ │ Phase 1-3: Lightweight Pipeline │ │
|
||||
│ │ ┌────┐ ┌────┐ ┌────┐ │ │
|
||||
│ │ │ P1 │→│ P2 │→│ P3 │ │ │
|
||||
│ │ │Styl│ │Req │ │Gen │ │ │
|
||||
│ │ └────┘ └────┘ └────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────┘ │
|
||||
│ ↓ │
|
||||
│ Output: .claude/skills/{skill-name}/ (minimal package) │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 3-Phase Workflow
|
||||
|
||||
### Phase 1: Style Analysis & Learning
|
||||
|
||||
Analyze reference skills to extract language patterns, structural conventions, and writing style.
|
||||
|
||||
```javascript
|
||||
// Phase 1 Execution Flow
|
||||
async function analyzeStyle(referencePaths) {
|
||||
// Step 1: Load reference skills
|
||||
const references = [];
|
||||
for (const path of referencePaths) {
|
||||
const content = Read(path);
|
||||
references.push({
|
||||
path: path,
|
||||
content: content,
|
||||
metadata: extractYAMLFrontmatter(content)
|
||||
});
|
||||
}
|
||||
|
||||
// Step 2: Extract style patterns
|
||||
const styleProfile = {
|
||||
// Structural patterns
|
||||
structure: {
|
||||
hasFrontmatter: references.every(r => r.metadata !== null),
|
||||
sectionHeaders: extractCommonSections(references),
|
||||
codeBlockUsage: detectCodeBlockPatterns(references),
|
||||
flowDiagramUsage: detectFlowDiagrams(references)
|
||||
},
|
||||
|
||||
// Language patterns
|
||||
language: {
|
||||
instructionStyle: detectInstructionStyle(references), // 'imperative' | 'declarative' | 'procedural'
|
||||
pseudocodeUsage: detectPseudocodePatterns(references),
|
||||
verbosity: calculateVerbosityLevel(references), // 'concise' | 'detailed' | 'verbose'
|
||||
terminology: extractCommonTerms(references)
|
||||
},
|
||||
|
||||
// Organization patterns
|
||||
organization: {
|
||||
phaseStructure: detectPhasePattern(references), // 'sequential' | 'autonomous' | 'flat'
|
||||
exampleDensity: calculateExampleRatio(references),
|
||||
templateUsage: detectTemplateReferences(references)
|
||||
}
|
||||
};
|
||||
|
||||
// Step 3: Generate style guide
|
||||
return {
|
||||
profile: styleProfile,
|
||||
recommendations: generateStyleRecommendations(styleProfile),
|
||||
examples: extractStyleExamples(references, styleProfile)
|
||||
};
|
||||
}
|
||||
|
||||
// Structural pattern detection
|
||||
function extractCommonSections(references) {
|
||||
const allSections = references.map(r =>
|
||||
r.content.match(/^##? (.+)$/gm)?.map(s => s.replace(/^##? /, ''))
|
||||
).flat();
|
||||
return findMostCommon(allSections);
|
||||
}
|
||||
|
||||
// Language style detection
|
||||
function detectInstructionStyle(references) {
|
||||
const imperativePattern = /^(Use|Execute|Run|Call|Create|Generate)\s/gim;
|
||||
const declarativePattern = /^(The|This|Each|All)\s.*\s(is|are|will be)\s/gim;
|
||||
const proceduralPattern = /^(Step \d+|Phase \d+|First|Then|Finally)\s/gim;
|
||||
|
||||
const scores = references.map(r => ({
|
||||
imperative: (r.content.match(imperativePattern) || []).length,
|
||||
declarative: (r.content.match(declarativePattern) || []).length,
|
||||
procedural: (r.content.match(proceduralPattern) || []).length
|
||||
}));
|
||||
|
||||
return getMaxStyle(scores);
|
||||
}
|
||||
|
||||
// Pseudocode pattern detection
|
||||
function detectPseudocodePatterns(references) {
|
||||
const hasJavaScriptBlocks = references.some(r => r.content.includes('```javascript'));
|
||||
const hasFunctionDefs = references.some(r => /function\s+\w+\(/m.test(r.content));
|
||||
const hasFlowComments = references.some(r => /\/\/.*→/m.test(r.content));
|
||||
|
||||
return {
|
||||
usePseudocode: hasJavaScriptBlocks && hasFunctionDefs,
|
||||
flowAnnotations: hasFlowComments,
|
||||
style: hasFunctionDefs ? 'functional' : 'imperative'
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
Style Analysis Complete:
|
||||
Structure: Flow-based with pseudocode
|
||||
Language: Procedural, detailed
|
||||
Organization: Sequential phases
|
||||
Key Patterns: 3-5 phases, function definitions, ASCII diagrams
|
||||
|
||||
Recommendations:
|
||||
✓ Use phase-based structure (3-4 phases)
|
||||
✓ Include pseudocode for complex logic
|
||||
✓ Add ASCII flow diagrams
|
||||
✓ Maintain concise documentation style
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Requirements Gathering
|
||||
|
||||
Interactive discovery of skill requirements using learned style patterns.
|
||||
|
||||
```javascript
|
||||
async function gatherRequirements(styleProfile) {
|
||||
// Step 1: Basic information
|
||||
const basicInfo = await AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "What is the skill name? (kebab-case, e.g., 'pdf-generator')",
|
||||
header: "Name",
|
||||
options: [
|
||||
{ label: "pdf-generator", description: "Example: PDF generation skill" },
|
||||
{ label: "code-analyzer", description: "Example: Code analysis skill" },
|
||||
{ label: "Custom", description: "Enter custom name" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "What is the primary purpose?",
|
||||
header: "Purpose",
|
||||
options: [
|
||||
{ label: "Generation", description: "Create/generate artifacts" },
|
||||
{ label: "Analysis", description: "Analyze/inspect code or data" },
|
||||
{ label: "Transformation", description: "Convert/transform content" },
|
||||
{ label: "Orchestration", description: "Coordinate multiple operations" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
// Step 2: Execution complexity
|
||||
const complexity = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: "How many main steps does this skill need?",
|
||||
header: "Steps",
|
||||
options: [
|
||||
{ label: "2-3 steps", description: "Simple workflow (recommended for lite-skill)" },
|
||||
{ label: "4-5 steps", description: "Moderate workflow" },
|
||||
{ label: "6+ steps", description: "Complex workflow (consider full skill-generator)" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Step 3: Tool requirements
|
||||
const tools = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Which tools will this skill use? (select multiple)",
|
||||
header: "Tools",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "Read", description: "Read files" },
|
||||
{ label: "Write", description: "Write files" },
|
||||
{ label: "Bash", description: "Execute commands" },
|
||||
{ label: "Task", description: "Launch agents" },
|
||||
{ label: "AskUserQuestion", description: "Interactive prompts" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Step 4: Output format
|
||||
const output = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: "What does this skill produce?",
|
||||
header: "Output",
|
||||
options: [
|
||||
{ label: "Single file", description: "One main output file" },
|
||||
{ label: "Multiple files", description: "Several related files" },
|
||||
{ label: "Directory structure", description: "Complete directory tree" },
|
||||
{ label: "Modified files", description: "Edits to existing files" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Step 5: Build configuration
|
||||
return {
|
||||
name: basicInfo.Name,
|
||||
purpose: basicInfo.Purpose,
|
||||
description: generateDescription(basicInfo.Name, basicInfo.Purpose),
|
||||
steps: parseStepCount(complexity.Steps),
|
||||
allowedTools: tools.Tools,
|
||||
outputType: output.Output,
|
||||
styleProfile: styleProfile,
|
||||
triggerPhrases: generateTriggerPhrases(basicInfo.Name, basicInfo.Purpose)
|
||||
};
|
||||
}
|
||||
|
||||
// Generate skill description from name and purpose
|
||||
function generateDescription(name, purpose) {
|
||||
const templates = {
|
||||
Generation: `Generate ${humanize(name)} with intelligent scaffolding`,
|
||||
Analysis: `Analyze ${humanize(name)} with detailed reporting`,
|
||||
Transformation: `Transform ${humanize(name)} with format conversion`,
|
||||
Orchestration: `Orchestrate ${humanize(name)} workflow with multi-step coordination`
|
||||
};
|
||||
return templates[purpose] || `${humanize(name)} skill for ${purpose.toLowerCase()} tasks`;
|
||||
}
|
||||
|
||||
// Generate trigger phrases
|
||||
function generateTriggerPhrases(name, purpose) {
|
||||
const base = [name, name.replace(/-/g, ' ')];
|
||||
const purposeVariants = {
|
||||
Generation: ['generate', 'create', 'build'],
|
||||
Analysis: ['analyze', 'inspect', 'review'],
|
||||
Transformation: ['transform', 'convert', 'format'],
|
||||
Orchestration: ['orchestrate', 'coordinate', 'manage']
|
||||
};
|
||||
return [...base, ...purposeVariants[purpose].map(v => `${v} ${humanize(name)}`)];
|
||||
}
|
||||
```
|
||||
|
||||
**Display to User**:
|
||||
```
|
||||
Requirements Gathered:
|
||||
Name: pdf-generator
|
||||
Purpose: Generation
|
||||
Steps: 3 (Setup → Generate → Validate)
|
||||
Tools: Read, Write, Bash
|
||||
Output: Single file (PDF document)
|
||||
Triggers: "pdf-generator", "generate pdf", "create pdf"
|
||||
|
||||
Style Application:
|
||||
Using flow-based structure (from style analysis)
|
||||
Including pseudocode blocks
|
||||
Adding ASCII diagrams for clarity
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Generate Skill Package
|
||||
|
||||
Create minimal skill structure with style-aware content generation.
|
||||
|
||||
```javascript
|
||||
async function generateSkillPackage(requirements) {
|
||||
const skillDir = `.claude/skills/${requirements.name}`;
|
||||
const workDir = `.workflow/.scratchpad/lite-skill-gen-${Date.now()}`;
|
||||
|
||||
// Step 1: Create directory structure
|
||||
Bash(`mkdir -p "${skillDir}" "${workDir}"`);
|
||||
|
||||
// Step 2: Generate SKILL.md (using learned style)
|
||||
const skillContent = generateSkillMd(requirements);
|
||||
Write(`${skillDir}/SKILL.md`, skillContent);
|
||||
|
||||
// Step 3: Conditionally add bundled resources
|
||||
if (requirements.outputType === 'Directory structure') {
|
||||
Bash(`mkdir -p "${skillDir}/templates"`);
|
||||
const templateContent = generateTemplate(requirements);
|
||||
Write(`${skillDir}/templates/base-template.md`, templateContent);
|
||||
}
|
||||
|
||||
if (requirements.allowedTools.includes('Bash')) {
|
||||
Bash(`mkdir -p "${skillDir}/scripts"`);
|
||||
const scriptContent = generateScript(requirements);
|
||||
Write(`${skillDir}/scripts/helper.sh`, scriptContent);
|
||||
}
|
||||
|
||||
// Step 4: Generate README
|
||||
const readmeContent = generateReadme(requirements);
|
||||
Write(`${skillDir}/README.md`, readmeContent);
|
||||
|
||||
// Step 5: Validate structure
|
||||
const validation = validateSkillStructure(skillDir, requirements);
|
||||
Write(`${workDir}/validation-report.json`, JSON.stringify(validation, null, 2));
|
||||
|
||||
// Step 6: Return summary
|
||||
return {
|
||||
skillPath: skillDir,
|
||||
filesCreated: [
|
||||
`${skillDir}/SKILL.md`,
|
||||
...(validation.hasTemplates ? [`${skillDir}/templates/`] : []),
|
||||
...(validation.hasScripts ? [`${skillDir}/scripts/`] : []),
|
||||
`${skillDir}/README.md`
|
||||
],
|
||||
validation: validation,
|
||||
nextSteps: generateNextSteps(requirements)
|
||||
};
|
||||
}
|
||||
|
||||
// Generate SKILL.md with style awareness
|
||||
function generateSkillMd(req) {
|
||||
const { styleProfile } = req;
|
||||
|
||||
// YAML frontmatter
|
||||
const frontmatter = `---
|
||||
name: ${req.name}
|
||||
description: ${req.description}
|
||||
allowed-tools: ${req.allowedTools.join(', ')}
|
||||
---
|
||||
`;
|
||||
|
||||
// Main content structure (adapts to style)
|
||||
let content = frontmatter;
|
||||
|
||||
content += `\n# ${humanize(req.name)}\n\n`;
|
||||
content += `${req.description}\n\n`;
|
||||
|
||||
// Add architecture diagram if style uses them
|
||||
if (styleProfile.structure.flowDiagramUsage) {
|
||||
content += generateArchitectureDiagram(req);
|
||||
}
|
||||
|
||||
// Add execution flow
|
||||
content += `## Execution Flow\n\n`;
|
||||
if (styleProfile.language.pseudocodeUsage.usePseudocode) {
|
||||
content += generatePseudocodeFlow(req);
|
||||
} else {
|
||||
content += generateProceduralFlow(req);
|
||||
}
|
||||
|
||||
// Add phase sections
|
||||
for (let i = 0; i < req.steps; i++) {
|
||||
content += generatePhaseSection(i + 1, req, styleProfile);
|
||||
}
|
||||
|
||||
// Add examples if style is verbose
|
||||
if (styleProfile.language.verbosity !== 'concise') {
|
||||
content += generateExamplesSection(req);
|
||||
}
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
// Generate architecture diagram
|
||||
function generateArchitectureDiagram(req) {
|
||||
return `## Architecture
|
||||
\`\`\`
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ ${humanize(req.name)} │
|
||||
│ │
|
||||
│ Input → Phase 1 → Phase 2 → Phase 3 → Output │
|
||||
│ ${getPhaseName(1, req)} │
|
||||
│ ${getPhaseName(2, req)} │
|
||||
│ ${getPhaseName(3, req)} │
|
||||
└─────────────────────────────────────────────────┘
|
||||
\`\`\`
|
||||
|
||||
`;
|
||||
}
|
||||
|
||||
// Generate pseudocode flow
|
||||
function generatePseudocodeFlow(req) {
|
||||
return `\`\`\`javascript
|
||||
async function ${toCamelCase(req.name)}(input) {
|
||||
// Phase 1: ${getPhaseName(1, req)}
|
||||
const prepared = await phase1Prepare(input);
|
||||
|
||||
// Phase 2: ${getPhaseName(2, req)}
|
||||
const processed = await phase2Process(prepared);
|
||||
|
||||
// Phase 3: ${getPhaseName(3, req)}
|
||||
const result = await phase3Finalize(processed);
|
||||
|
||||
return result;
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
`;
|
||||
}
|
||||
|
||||
// Generate phase section
|
||||
function generatePhaseSection(phaseNum, req, styleProfile) {
|
||||
const phaseName = getPhaseName(phaseNum, req);
|
||||
|
||||
let section = `### Phase ${phaseNum}: ${phaseName}\n\n`;
|
||||
|
||||
if (styleProfile.language.pseudocodeUsage.usePseudocode) {
|
||||
section += `\`\`\`javascript\n`;
|
||||
section += `async function phase${phaseNum}${toCamelCase(phaseName)}(input) {\n`;
|
||||
section += ` // TODO: Implement ${phaseName.toLowerCase()} logic\n`;
|
||||
section += ` return output;\n`;
|
||||
section += `}\n\`\`\`\n\n`;
|
||||
} else {
|
||||
section += `**Steps**:\n`;
|
||||
section += `1. Load input data\n`;
|
||||
section += `2. Process according to ${phaseName.toLowerCase()} logic\n`;
|
||||
section += `3. Return result to next phase\n\n`;
|
||||
}
|
||||
|
||||
return section;
|
||||
}
|
||||
|
||||
// Validation
|
||||
function validateSkillStructure(skillDir, req) {
|
||||
const requiredFiles = [`${skillDir}/SKILL.md`, `${skillDir}/README.md`];
|
||||
const exists = requiredFiles.map(f => Bash(`test -f "${f}"`).exitCode === 0);
|
||||
|
||||
return {
|
||||
valid: exists.every(e => e),
|
||||
hasTemplates: Bash(`test -d "${skillDir}/templates"`).exitCode === 0,
|
||||
hasScripts: Bash(`test -d "${skillDir}/scripts"`).exitCode === 0,
|
||||
filesPresent: requiredFiles.filter((f, i) => exists[i]),
|
||||
styleCompliance: checkStyleCompliance(skillDir, req.styleProfile)
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
Skill Package Generated:
|
||||
Location: .claude/skills/pdf-generator/
|
||||
|
||||
Structure:
|
||||
✓ SKILL.md (entry point)
|
||||
✓ README.md (usage guide)
|
||||
✓ templates/ (directory templates)
|
||||
✓ scripts/ (helper scripts)
|
||||
|
||||
Validation:
|
||||
✓ All required files present
|
||||
✓ Style compliance: 95%
|
||||
✓ Frontmatter valid
|
||||
✓ Tool references correct
|
||||
|
||||
Next Steps:
|
||||
1. Review SKILL.md and customize phases
|
||||
2. Test skill: /skill:pdf-generator "test input"
|
||||
3. Iterate based on usage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Complete Execution Flow
|
||||
|
||||
```
|
||||
User: "Create a PDF generator skill"
|
||||
↓
|
||||
Phase 1: Style Analysis
|
||||
|-- Read reference skills (ccw.md, ccw-coordinator.md)
|
||||
|-- Extract style patterns (flow diagrams, pseudocode, structure)
|
||||
|-- Generate style profile
|
||||
+-- Output: Style recommendations
|
||||
↓
|
||||
Phase 2: Requirements
|
||||
|-- Ask: Name, purpose, steps
|
||||
|-- Ask: Tools, output format
|
||||
|-- Generate: Description, triggers
|
||||
+-- Output: Requirements config
|
||||
↓
|
||||
Phase 3: Generation
|
||||
|-- Create: Directory structure
|
||||
|-- Write: SKILL.md (style-aware)
|
||||
|-- Write: README.md
|
||||
|-- Optionally: templates/, scripts/
|
||||
|-- Validate: Structure and style
|
||||
+-- Output: Skill package
|
||||
↓
|
||||
Return: Skill location + next steps
|
||||
```
|
||||
|
||||
## Phase Execution Protocol
|
||||
|
||||
```javascript
|
||||
// Main entry point
|
||||
async function liteSkillGenerator(input) {
|
||||
// Phase 1: Style Learning
|
||||
const references = [
|
||||
'.claude/commands/ccw.md',
|
||||
'.claude/commands/ccw-coordinator.md',
|
||||
...discoverReferenceSkills(input)
|
||||
];
|
||||
const styleProfile = await analyzeStyle(references);
|
||||
console.log(`Style Analysis: ${styleProfile.organization.phaseStructure}, ${styleProfile.language.verbosity}`);
|
||||
|
||||
// Phase 2: Requirements
|
||||
const requirements = await gatherRequirements(styleProfile);
|
||||
console.log(`Requirements: ${requirements.name} (${requirements.steps} phases)`);
|
||||
|
||||
// Phase 3: Generation
|
||||
const result = await generateSkillPackage(requirements);
|
||||
console.log(`✅ Generated: ${result.skillPath}`);
|
||||
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
## Output Structure
|
||||
|
||||
**Minimal Package** (default):
|
||||
```
|
||||
.claude/skills/{skill-name}/
|
||||
├── SKILL.md # Entry point with frontmatter
|
||||
└── README.md # Usage guide
|
||||
```
|
||||
|
||||
**With Templates** (if needed):
|
||||
```
|
||||
.claude/skills/{skill-name}/
|
||||
├── SKILL.md
|
||||
├── README.md
|
||||
└── templates/
|
||||
└── base-template.md
|
||||
```
|
||||
|
||||
**With Scripts** (if using Bash):
|
||||
```
|
||||
.claude/skills/{skill-name}/
|
||||
├── SKILL.md
|
||||
├── README.md
|
||||
└── scripts/
|
||||
└── helper.sh
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Style Learning** - Analyze reference skills to maintain consistency
|
||||
2. **Minimal Overhead** - Generate only essential files (SKILL.md + README)
|
||||
3. **Progressive Disclosure** - Follow anthropics' three-layer loading
|
||||
4. **Flow-Based** - Use pseudocode and flow diagrams (when style appropriate)
|
||||
5. **Interactive** - Guided requirements gathering via AskUserQuestion
|
||||
6. **Fast Generation** - 3 phases instead of 6, focused on simplicity
|
||||
7. **Style Awareness** - Adapt output based on detected patterns
|
||||
|
||||
## Style Pattern Detection
|
||||
|
||||
**Structural Patterns**:
|
||||
- YAML frontmatter usage (100% in references)
|
||||
- Section headers (H2 for major, H3 for sub-sections)
|
||||
- Code blocks (JavaScript pseudocode, Bash examples)
|
||||
- ASCII diagrams (architecture, flow charts)
|
||||
|
||||
**Language Patterns**:
|
||||
- Instruction style: Procedural with function definitions
|
||||
- Pseudocode: JavaScript-based with flow annotations
|
||||
- Verbosity: Detailed but focused
|
||||
- Terminology: Phase, workflow, pipeline, orchestrator
|
||||
|
||||
**Organization Patterns**:
|
||||
- Phase structure: 3-5 sequential phases
|
||||
- Example density: Moderate (1-2 per major section)
|
||||
- Template usage: Minimal (only when necessary)
|
||||
|
||||
## Usage Examples
|
||||
|
||||
**Basic Generation**:
|
||||
```
|
||||
User: "Create a markdown formatter skill"
|
||||
Lite-Skill-Generator:
|
||||
→ Analyzes ccw.md style
|
||||
→ Asks: Name? "markdown-formatter"
|
||||
→ Asks: Purpose? "Transformation"
|
||||
→ Asks: Steps? "3 steps"
|
||||
→ Generates: .claude/skills/markdown-formatter/
|
||||
```
|
||||
|
||||
**With Custom References**:
|
||||
```
|
||||
User: "Create a skill like software-manual but simpler"
|
||||
Lite-Skill-Generator:
|
||||
→ Analyzes software-manual skill
|
||||
→ Learns: Multi-phase, agent-based, template-heavy
|
||||
→ Simplifies: 3 phases, direct execution, minimal templates
|
||||
→ Generates: Simplified version
|
||||
```
|
||||
|
||||
## Comparison: lite-skill-generator vs skill-generator
|
||||
|
||||
| Aspect | lite-skill-generator | skill-generator |
|
||||
|--------|---------------------|-----------------|
|
||||
| **Phases** | 3 (Style → Req → Gen) | 6 (Spec → Req → Dir → Gen → Specs → Val) |
|
||||
| **Style Learning** | Yes (analyze references) | No (fixed templates) |
|
||||
| **Complexity** | Simple skills only | Full-featured skills |
|
||||
| **Output** | Minimal (SKILL.md + README) | Complete (phases/, specs/, templates/) |
|
||||
| **Generation Time** | Fast (~2 min) | Thorough (~10 min) |
|
||||
| **Use Case** | Quick scaffolding | Production-ready skills |
|
||||
|
||||
## Workflow Integration
|
||||
|
||||
**Standalone**:
|
||||
```bash
|
||||
/skill:lite-skill-generator "Create a log analyzer skill"
|
||||
```
|
||||
|
||||
**With References**:
|
||||
```bash
|
||||
/skill:lite-skill-generator "Create a skill based on ccw-coordinator.md style"
|
||||
```
|
||||
|
||||
**Batch Generation** (for multiple simple skills):
|
||||
```bash
|
||||
/skill:lite-skill-generator "Create 3 skills: json-validator, yaml-parser, toml-converter"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Next Steps After Generation**:
|
||||
1. Review `.claude/skills/{name}/SKILL.md`
|
||||
2. Customize phase logic for your use case
|
||||
3. Add examples to README.md
|
||||
4. Test skill with sample input
|
||||
5. Iterate based on real usage
|
||||
@@ -1,68 +0,0 @@
|
||||
---
|
||||
name: {{SKILL_NAME}}
|
||||
description: {{SKILL_DESCRIPTION}}
|
||||
allowed-tools: {{ALLOWED_TOOLS}}
|
||||
---
|
||||
|
||||
# {{SKILL_TITLE}}
|
||||
|
||||
{{SKILL_DESCRIPTION}}
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ {{SKILL_TITLE}} │
|
||||
│ │
|
||||
│ Input → {{PHASE_1}} → {{PHASE_2}} → Output │
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```javascript
|
||||
async function {{SKILL_FUNCTION}}(input) {
|
||||
// Phase 1: {{PHASE_1}}
|
||||
const prepared = await phase1(input);
|
||||
|
||||
// Phase 2: {{PHASE_2}}
|
||||
const result = await phase2(prepared);
|
||||
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 1: {{PHASE_1}}
|
||||
|
||||
```javascript
|
||||
async function phase1(input) {
|
||||
// TODO: Implement {{PHASE_1_LOWER}} logic
|
||||
return output;
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 2: {{PHASE_2}}
|
||||
|
||||
```javascript
|
||||
async function phase2(input) {
|
||||
// TODO: Implement {{PHASE_2_LOWER}} logic
|
||||
return output;
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/skill:{{SKILL_NAME}} "input description"
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
**Basic Usage**:
|
||||
```
|
||||
User: "{{EXAMPLE_INPUT}}"
|
||||
{{SKILL_NAME}}:
|
||||
→ Phase 1: {{PHASE_1_ACTION}}
|
||||
→ Phase 2: {{PHASE_2_ACTION}}
|
||||
→ Output: {{EXAMPLE_OUTPUT}}
|
||||
```
|
||||
@@ -1,64 +0,0 @@
|
||||
# Style Guide Template
|
||||
|
||||
Generated by lite-skill-generator style analysis phase.
|
||||
|
||||
## Detected Patterns
|
||||
|
||||
### Structural Patterns
|
||||
|
||||
| Pattern | Detected | Recommendation |
|
||||
|---------|----------|----------------|
|
||||
| YAML Frontmatter | {{HAS_FRONTMATTER}} | {{FRONTMATTER_REC}} |
|
||||
| ASCII Diagrams | {{HAS_DIAGRAMS}} | {{DIAGRAMS_REC}} |
|
||||
| Code Blocks | {{HAS_CODE_BLOCKS}} | {{CODE_BLOCKS_REC}} |
|
||||
| Phase Structure | {{PHASE_STRUCTURE}} | {{PHASE_REC}} |
|
||||
|
||||
### Language Patterns
|
||||
|
||||
| Pattern | Value | Notes |
|
||||
|---------|-------|-------|
|
||||
| Instruction Style | {{INSTRUCTION_STYLE}} | imperative/declarative/procedural |
|
||||
| Pseudocode Usage | {{PSEUDOCODE_USAGE}} | functional/imperative/none |
|
||||
| Verbosity Level | {{VERBOSITY}} | concise/detailed/verbose |
|
||||
| Common Terms | {{TERMINOLOGY}} | domain-specific vocabulary |
|
||||
|
||||
### Organization Patterns
|
||||
|
||||
| Pattern | Value |
|
||||
|---------|-------|
|
||||
| Phase Count | {{PHASE_COUNT}} |
|
||||
| Example Density | {{EXAMPLE_DENSITY}} |
|
||||
| Template Usage | {{TEMPLATE_USAGE}} |
|
||||
|
||||
## Style Compliance Checklist
|
||||
|
||||
- [ ] YAML frontmatter with name, description, allowed-tools
|
||||
- [ ] Architecture diagram (if pattern detected)
|
||||
- [ ] Execution flow section with pseudocode
|
||||
- [ ] Phase sections (sequential numbered)
|
||||
- [ ] Usage examples section
|
||||
- [ ] README.md for external documentation
|
||||
|
||||
## Reference Skills Analyzed
|
||||
|
||||
{{#REFERENCES}}
|
||||
- `{{REF_PATH}}`: {{REF_NOTES}}
|
||||
{{/REFERENCES}}
|
||||
|
||||
## Generated Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"style": {
|
||||
"structure": "{{STRUCTURE_TYPE}}",
|
||||
"language": "{{LANGUAGE_TYPE}}",
|
||||
"organization": "{{ORG_TYPE}}"
|
||||
},
|
||||
"recommendations": {
|
||||
"usePseudocode": {{USE_PSEUDOCODE}},
|
||||
"includeDiagrams": {{INCLUDE_DIAGRAMS}},
|
||||
"verbosityLevel": "{{VERBOSITY}}",
|
||||
"phaseCount": {{PHASE_COUNT}}
|
||||
}
|
||||
}
|
||||
```
|
||||
348
.claude/skills/workflow-plan/SKILL.md
Normal file
348
.claude/skills/workflow-plan/SKILL.md
Normal file
@@ -0,0 +1,348 @@
|
||||
---
|
||||
name: workflow-plan
|
||||
description: 5-phase planning workflow with action-planning-agent task generation, outputs IMPL_PLAN.md and task JSONs. Triggers on "workflow:plan".
|
||||
allowed-tools: Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep, Skill
|
||||
---
|
||||
|
||||
# Workflow Plan
|
||||
|
||||
5-phase planning workflow that orchestrates session discovery, context gathering, conflict resolution, and task generation to produce implementation plans (IMPL_PLAN.md, task JSONs, TODO_LIST.md).
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Workflow Plan Orchestrator (SKILL.md) │
|
||||
│ → Pure coordinator: Execute phases, parse outputs, pass context │
|
||||
└───────────────┬─────────────────────────────────────────────────┘
|
||||
│
|
||||
┌───────────┼───────────┬───────────┬───────────┐
|
||||
↓ ↓ ↓ ↓ ↓
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Phase 1 │ │ Phase 2 │ │ Phase 3 │ │Phase 3.5│ │ Phase 4 │
|
||||
│ Session │ │ Context │ │Conflict │ │ Gate │ │ Task │
|
||||
│Discovery│ │ Gather │ │Resolve │ │(Optional)│ │Generate │
|
||||
└─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘
|
||||
↓ ↓ ↓ ↓
|
||||
sessionId contextPath resolved IMPL_PLAN.md
|
||||
conflict_risk artifacts task JSONs
|
||||
TODO_LIST.md
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Pure Orchestrator**: Execute phases in sequence, parse outputs, pass context between them
|
||||
2. **Auto-Continue**: All phases run autonomously without user intervention between phases
|
||||
3. **Task Attachment Model**: Sub-tasks are attached/collapsed dynamically in TodoWrite
|
||||
4. **Progressive Phase Loading**: Phase docs are read on-demand, not all at once
|
||||
5. **Conditional Execution**: Phase 3 only executes when conflict_risk >= medium
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-continue all phases (skip confirmations), use recommended conflict resolutions.
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
└─ Convert user input to structured format (GOAL/SCOPE/CONTEXT)
|
||||
|
||||
Phase 1: Session Discovery
|
||||
└─ Ref: phases/01-session-discovery.md
|
||||
└─ Output: sessionId (WFS-xxx)
|
||||
|
||||
Phase 2: Context Gathering
|
||||
└─ Ref: phases/02-context-gathering.md
|
||||
├─ Tasks attached: Analyze structure → Identify integration → Generate package
|
||||
└─ Output: contextPath + conflict_risk
|
||||
|
||||
Phase 3: Conflict Resolution
|
||||
└─ Decision (conflict_risk check):
|
||||
├─ conflict_risk ≥ medium → Ref: phases/03-conflict-resolution.md
|
||||
│ ├─ Tasks attached: Detect conflicts → Present to user → Apply strategies
|
||||
│ └─ Output: Modified brainstorm artifacts
|
||||
└─ conflict_risk < medium → Skip to Phase 4
|
||||
|
||||
Phase 4: Task Generation
|
||||
└─ Ref: phases/04-task-generation.md
|
||||
└─ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md
|
||||
|
||||
Return:
|
||||
└─ Summary with recommended next steps
|
||||
```
|
||||
|
||||
**Phase Reference Documents** (read on-demand when phase executes):
|
||||
|
||||
| Phase | Document | Purpose |
|
||||
|-------|----------|---------|
|
||||
| 1 | [phases/01-session-discovery.md](phases/01-session-discovery.md) | Session creation/discovery with intelligent session management |
|
||||
| 2 | [phases/02-context-gathering.md](phases/02-context-gathering.md) | Project context collection via context-search-agent |
|
||||
| 3 | [phases/03-conflict-resolution.md](phases/03-conflict-resolution.md) | Conflict detection and resolution with CLI analysis |
|
||||
| 4 | [phases/04-task-generation.md](phases/04-task-generation.md) | Implementation plan and task JSON generation |
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 execution
|
||||
2. **No Preliminary Analysis**: Do not read files, analyze structure, or gather context before Phase 1
|
||||
3. **Parse Every Output**: Extract required data from each phase output for next phase
|
||||
4. **Auto-Continue via TodoList**: Check TodoList status to execute next pending phase automatically
|
||||
5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern
|
||||
6. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute
|
||||
7. **DO NOT STOP**: Continuous multi-phase workflow. After completing each phase, immediately proceed to next
|
||||
|
||||
## Input Processing
|
||||
|
||||
**Convert User Input to Structured Format**:
|
||||
|
||||
1. **Simple Text** → Structure it:
|
||||
```
|
||||
User: "Build authentication system"
|
||||
|
||||
Structured:
|
||||
GOAL: Build authentication system
|
||||
SCOPE: Core authentication features
|
||||
CONTEXT: New implementation
|
||||
```
|
||||
|
||||
2. **Detailed Text** → Extract components:
|
||||
```
|
||||
User: "Add JWT authentication with email/password login and token refresh"
|
||||
|
||||
Structured:
|
||||
GOAL: Implement JWT-based authentication
|
||||
SCOPE: Email/password login, token generation, token refresh endpoints
|
||||
CONTEXT: JWT token-based security, refresh token rotation
|
||||
```
|
||||
|
||||
3. **File Reference** (e.g., `requirements.md`) → Read and structure:
|
||||
- Read file content
|
||||
- Extract goal, scope, requirements
|
||||
- Format into structured description
|
||||
|
||||
## Data Flow
|
||||
|
||||
```
|
||||
User Input (task description)
|
||||
↓
|
||||
[Convert to Structured Format]
|
||||
↓ Structured Description:
|
||||
↓ GOAL: [objective]
|
||||
↓ SCOPE: [boundaries]
|
||||
↓ CONTEXT: [background]
|
||||
↓
|
||||
Phase 1: session:start --auto "structured-description"
|
||||
↓ Output: sessionId
|
||||
↓ Write: planning-notes.md (User Intent section)
|
||||
↓
|
||||
Phase 2: context-gather --session sessionId "structured-description"
|
||||
↓ Input: sessionId + structured description
|
||||
↓ Output: contextPath (context-package.json with prioritized_context) + conflict_risk
|
||||
↓ Update: planning-notes.md (Context Findings + Consolidated Constraints)
|
||||
↓
|
||||
Phase 3: conflict-resolution [AUTO-TRIGGERED if conflict_risk ≥ medium]
|
||||
↓ Input: sessionId + contextPath + conflict_risk
|
||||
↓ Output: Modified brainstorm artifacts
|
||||
↓ Update: planning-notes.md (Conflict Decisions + Consolidated Constraints)
|
||||
↓ Skip if conflict_risk is none/low → proceed directly to Phase 4
|
||||
↓
|
||||
Phase 4: task-generate-agent --session sessionId
|
||||
↓ Input: sessionId + planning-notes.md + context-package.json + brainstorm artifacts
|
||||
↓ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md
|
||||
↓
|
||||
Return summary to user
|
||||
```
|
||||
|
||||
**Session Memory Flow**: Each phase receives session ID, which provides access to:
|
||||
- Previous task summaries
|
||||
- Existing context and analysis
|
||||
- Brainstorming artifacts (potentially modified by Phase 3)
|
||||
- Session-specific configuration
|
||||
|
||||
## TodoWrite Pattern
|
||||
|
||||
**Core Concept**: Dynamic task attachment and collapse for real-time visibility into workflow execution.
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Task Attachment** (when phase executed):
|
||||
- Sub-command's internal tasks are **attached** to orchestrator's TodoWrite
|
||||
- **Phase 2, 3**: Multiple sub-tasks attached (e.g., Phase 2.1, 2.2, 2.3)
|
||||
- **Phase 4**: Single agent task attached
|
||||
- First attached task marked as `in_progress`, others as `pending`
|
||||
- Orchestrator **executes** these attached tasks sequentially
|
||||
|
||||
2. **Task Collapse** (after sub-tasks complete):
|
||||
- **Applies to Phase 2, 3**: Remove detailed sub-tasks from TodoWrite
|
||||
- **Collapse** to high-level phase summary
|
||||
- **Phase 4**: No collapse needed (single task, just mark completed)
|
||||
- Maintains clean orchestrator-level view
|
||||
|
||||
3. **Continuous Execution**:
|
||||
- After completion, automatically proceed to next pending phase
|
||||
- No user intervention required between phases
|
||||
- TodoWrite dynamically reflects current execution state
|
||||
|
||||
**Lifecycle**: Initial pending tasks → Phase executed (tasks ATTACHED) → Sub-tasks executed sequentially → Phase completed (tasks COLLAPSED) → Next phase begins → Repeat until all phases complete.
|
||||
|
||||
## Phase-Specific TodoWrite Updates
|
||||
|
||||
### Phase 2 (Tasks Attached):
|
||||
```json
|
||||
[
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "in_progress"},
|
||||
{"content": " → Analyze codebase structure", "status": "in_progress"},
|
||||
{"content": " → Identify integration points", "status": "pending"},
|
||||
{"content": " → Generate context package", "status": "pending"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending"}
|
||||
]
|
||||
```
|
||||
|
||||
### Phase 2 (Collapsed):
|
||||
```json
|
||||
[
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending"}
|
||||
]
|
||||
```
|
||||
|
||||
### Phase 3 (Conditional, Tasks Attached):
|
||||
```json
|
||||
[
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed"},
|
||||
{"content": "Phase 3: Conflict Resolution", "status": "in_progress"},
|
||||
{"content": " → Detect conflicts with CLI analysis", "status": "in_progress"},
|
||||
{"content": " → Present conflicts to user", "status": "pending"},
|
||||
{"content": " → Apply resolution strategies", "status": "pending"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending"}
|
||||
]
|
||||
```
|
||||
|
||||
## Planning Notes Template
|
||||
|
||||
After Phase 1, create `planning-notes.md` with this structure:
|
||||
|
||||
```markdown
|
||||
# Planning Notes
|
||||
|
||||
**Session**: ${sessionId}
|
||||
**Created**: ${timestamp}
|
||||
|
||||
## User Intent (Phase 1)
|
||||
|
||||
- **GOAL**: ${userGoal}
|
||||
- **KEY_CONSTRAINTS**: ${userConstraints}
|
||||
|
||||
---
|
||||
|
||||
## Context Findings (Phase 2)
|
||||
(To be filled by context-gather)
|
||||
|
||||
## Conflict Decisions (Phase 3)
|
||||
(To be filled if conflicts detected)
|
||||
|
||||
## Consolidated Constraints (Phase 4 Input)
|
||||
1. ${userConstraints}
|
||||
|
||||
---
|
||||
|
||||
## Task Generation (Phase 4)
|
||||
(To be filled by action-planning-agent)
|
||||
|
||||
## N+1 Context
|
||||
### Decisions
|
||||
| Decision | Rationale | Revisit? |
|
||||
|----------|-----------|----------|
|
||||
|
||||
### Deferred
|
||||
- [ ] (For N+1)
|
||||
```
|
||||
|
||||
## Post-Phase Updates
|
||||
|
||||
### After Phase 2
|
||||
|
||||
Read context-package to extract key findings, update planning-notes.md:
|
||||
- `Context Findings (Phase 2)`: CRITICAL_FILES, ARCHITECTURE, CONFLICT_RISK, CONSTRAINTS
|
||||
- `Consolidated Constraints`: Append Phase 2 constraints
|
||||
|
||||
### After Phase 3
|
||||
|
||||
If executed, read conflict-resolution.json, update planning-notes.md:
|
||||
- `Conflict Decisions (Phase 3)`: RESOLVED, MODIFIED_ARTIFACTS, CONSTRAINTS
|
||||
- `Consolidated Constraints`: Append Phase 3 planning constraints
|
||||
|
||||
### Memory State Check
|
||||
|
||||
After Phase 3, evaluate context window usage. If memory usage is high (>120K tokens):
|
||||
```javascript
|
||||
Skill(skill="compact")
|
||||
```
|
||||
|
||||
## Phase 4 User Decision
|
||||
|
||||
After Phase 4 completes, present user with action choices:
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Planning complete. What would you like to do next?",
|
||||
header: "Next Action",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{
|
||||
label: "Verify Plan Quality (Recommended)",
|
||||
description: "Run quality verification to catch issues before execution."
|
||||
},
|
||||
{
|
||||
label: "Start Execution",
|
||||
description: "Begin implementing tasks immediately."
|
||||
},
|
||||
{
|
||||
label: "Review Status Only",
|
||||
description: "View task breakdown and session status without taking further action."
|
||||
}
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Execute based on user choice
|
||||
// "Verify Plan Quality" → Skill(skill="workflow:plan-verify", args="--session " + sessionId)
|
||||
// "Start Execution" → Skill(skill="workflow:execute", args="--session " + sessionId)
|
||||
// "Review Status Only" → Skill(skill="workflow:status", args="--session " + sessionId)
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
- **Parsing Failure**: If output parsing fails, retry command once, then report error
|
||||
- **Validation Failure**: If validation fails, report which file/data is missing
|
||||
- **Command Failure**: Keep phase `in_progress`, report error to user, do not proceed to next phase
|
||||
|
||||
## Coordinator Checklist
|
||||
|
||||
- **Pre-Phase**: Convert user input to structured format (GOAL/SCOPE/CONTEXT)
|
||||
- Initialize TodoWrite before any command (Phase 3 added dynamically after Phase 2)
|
||||
- Execute Phase 1 immediately with structured description
|
||||
- Parse session ID from Phase 1 output, store in memory
|
||||
- Pass session ID and structured description to Phase 2 command
|
||||
- Parse context path from Phase 2 output, store in memory
|
||||
- **Extract conflict_risk from context-package.json**: Determine Phase 3 execution
|
||||
- **If conflict_risk >= medium**: Launch Phase 3 with sessionId and contextPath
|
||||
- **If conflict_risk is none/low**: Skip Phase 3, proceed directly to Phase 4
|
||||
- **Build Phase 4 command**: `/workflow:tools:task-generate-agent --session [sessionId]`
|
||||
- Verify all Phase 4 outputs
|
||||
- Update TodoWrite after each phase
|
||||
- After each phase, automatically continue to next phase based on TodoList status
|
||||
|
||||
## Related Commands
|
||||
|
||||
**Prerequisite Commands**:
|
||||
- `/workflow:brainstorm:artifacts` - Optional: Generate role-based analyses before planning
|
||||
- `/workflow:brainstorm:synthesis` - Optional: Refine brainstorm analyses with clarifications
|
||||
|
||||
**Follow-up Commands**:
|
||||
- `/workflow:plan-verify` - Recommended: Verify plan quality before execution
|
||||
- `/workflow:status` - Review task breakdown and current progress
|
||||
- `/workflow:execute` - Begin implementation of generated tasks
|
||||
281
.claude/skills/workflow-plan/phases/01-session-discovery.md
Normal file
281
.claude/skills/workflow-plan/phases/01-session-discovery.md
Normal file
@@ -0,0 +1,281 @@
|
||||
# Phase 1: Session Discovery
|
||||
|
||||
Discover existing sessions or start new workflow session with intelligent session management and conflict detection.
|
||||
|
||||
## Objective
|
||||
|
||||
- Ensure project-level state exists (first-time initialization)
|
||||
- Create or discover workflow session for the planning workflow
|
||||
- Generate unique session ID (WFS-xxx format)
|
||||
- Initialize session directory structure
|
||||
|
||||
## Step 0: Initialize Project State (First-time Only)
|
||||
|
||||
**Executed before all modes** - Ensures project-level state files exist by calling `/workflow:init`.
|
||||
|
||||
### Check and Initialize
|
||||
```bash
|
||||
# Check if project state exists (both files required)
|
||||
bash(test -f .workflow/project-tech.json && echo "TECH_EXISTS" || echo "TECH_NOT_FOUND")
|
||||
bash(test -f .workflow/project-guidelines.json && echo "GUIDELINES_EXISTS" || echo "GUIDELINES_NOT_FOUND")
|
||||
```
|
||||
|
||||
**If either NOT_FOUND**, delegate to `/workflow:init`:
|
||||
```javascript
|
||||
// Call workflow:init for intelligent project analysis
|
||||
Skill(skill="workflow:init");
|
||||
|
||||
// Wait for init completion
|
||||
// project-tech.json and project-guidelines.json will be created
|
||||
```
|
||||
|
||||
**Output**:
|
||||
- If BOTH_EXIST: `PROJECT_STATE: initialized`
|
||||
- If NOT_FOUND: Calls `/workflow:init` → creates:
|
||||
- `.workflow/project-tech.json` with full technical analysis
|
||||
- `.workflow/project-guidelines.json` with empty scaffold
|
||||
|
||||
**Note**: `/workflow:init` uses cli-explore-agent to build comprehensive project understanding (technology stack, architecture, key components). This step runs once per project. Subsequent executions skip initialization.
|
||||
|
||||
## Execution
|
||||
|
||||
### Step 1.1: Execute Session Start
|
||||
|
||||
```javascript
|
||||
Skill(skill="workflow:session:start", args="--auto \"[structured-task-description]\"")
|
||||
```
|
||||
|
||||
**Task Description Structure**:
|
||||
```
|
||||
GOAL: [Clear, concise objective]
|
||||
SCOPE: [What's included/excluded]
|
||||
CONTEXT: [Relevant background or constraints]
|
||||
```
|
||||
|
||||
**Example**:
|
||||
```
|
||||
GOAL: Build JWT-based authentication system
|
||||
SCOPE: User registration, login, token validation
|
||||
CONTEXT: Existing user database schema, REST API endpoints
|
||||
```
|
||||
|
||||
### Step 1.2: Parse Output
|
||||
|
||||
- Extract: `SESSION_ID: WFS-[id]` (store as `sessionId`)
|
||||
|
||||
### Step 1.3: Validate
|
||||
|
||||
- Session ID successfully extracted
|
||||
- Session directory `.workflow/active/[sessionId]/` exists
|
||||
|
||||
**Note**: Session directory contains `workflow-session.json` (metadata). Do NOT look for `manifest.json` here - it only exists in `.workflow/archives/` for archived sessions.
|
||||
|
||||
### Step 1.4: Initialize Planning Notes
|
||||
|
||||
Create `planning-notes.md` with N+1 context support:
|
||||
|
||||
```javascript
|
||||
const planningNotesPath = `.workflow/active/${sessionId}/planning-notes.md`
|
||||
const userGoal = structuredDescription.goal
|
||||
const userConstraints = structuredDescription.context || "None specified"
|
||||
|
||||
Write(planningNotesPath, `# Planning Notes
|
||||
|
||||
**Session**: ${sessionId}
|
||||
**Created**: ${new Date().toISOString()}
|
||||
|
||||
## User Intent (Phase 1)
|
||||
|
||||
- **GOAL**: ${userGoal}
|
||||
- **KEY_CONSTRAINTS**: ${userConstraints}
|
||||
|
||||
---
|
||||
|
||||
## Context Findings (Phase 2)
|
||||
(To be filled by context-gather)
|
||||
|
||||
## Conflict Decisions (Phase 3)
|
||||
(To be filled if conflicts detected)
|
||||
|
||||
## Consolidated Constraints (Phase 4 Input)
|
||||
1. ${userConstraints}
|
||||
|
||||
---
|
||||
|
||||
## Task Generation (Phase 4)
|
||||
(To be filled by action-planning-agent)
|
||||
|
||||
## N+1 Context
|
||||
### Decisions
|
||||
| Decision | Rationale | Revisit? |
|
||||
|----------|-----------|----------|
|
||||
|
||||
### Deferred
|
||||
- [ ] (For N+1)
|
||||
`)
|
||||
```
|
||||
|
||||
## Session Types
|
||||
|
||||
The `--type` parameter classifies sessions for CCW dashboard organization:
|
||||
|
||||
| Type | Description | Default For |
|
||||
|------|-------------|-------------|
|
||||
| `workflow` | Standard implementation (default) | `/workflow:plan` |
|
||||
| `review` | Code review sessions | `/workflow:review-module-cycle` |
|
||||
| `tdd` | TDD-based development | `/workflow:tdd-plan` |
|
||||
| `test` | Test generation/fix sessions | `/workflow:test-fix-gen` |
|
||||
| `docs` | Documentation sessions | `/memory:docs` |
|
||||
|
||||
**Validation**: If `--type` is provided with invalid value, return error:
|
||||
```
|
||||
ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs
|
||||
```
|
||||
|
||||
## Mode 1: Discovery Mode (Default)
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
/workflow:session:start
|
||||
```
|
||||
|
||||
### Step 1: List Active Sessions
|
||||
```bash
|
||||
bash(ls -1 .workflow/active/ 2>/dev/null | head -5)
|
||||
```
|
||||
|
||||
### Step 2: Display Session Metadata
|
||||
```bash
|
||||
bash(cat .workflow/active/WFS-promptmaster-platform/workflow-session.json)
|
||||
```
|
||||
|
||||
### Step 4: User Decision
|
||||
Present session information and wait for user to select or create session.
|
||||
|
||||
**Output**: `SESSION_ID: WFS-[user-selected-id]`
|
||||
|
||||
## Mode 2: Auto Mode (Intelligent)
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
/workflow:session:start --auto "task description"
|
||||
```
|
||||
|
||||
### Step 1: Check Active Sessions Count
|
||||
```bash
|
||||
bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | wc -l)
|
||||
```
|
||||
|
||||
### Step 2a: No Active Sessions → Create New
|
||||
```bash
|
||||
# Generate session slug
|
||||
bash(echo "implement OAuth2 auth" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-50)
|
||||
|
||||
# Create directory structure
|
||||
bash(mkdir -p .workflow/active/WFS-implement-oauth2-auth/.process)
|
||||
bash(mkdir -p .workflow/active/WFS-implement-oauth2-auth/.task)
|
||||
bash(mkdir -p .workflow/active/WFS-implement-oauth2-auth/.summaries)
|
||||
|
||||
# Create metadata (include type field, default to "workflow" if not specified)
|
||||
bash(echo '{"session_id":"WFS-implement-oauth2-auth","project":"implement OAuth2 auth","status":"planning","type":"workflow","created_at":"2024-12-04T08:00:00Z"}' > .workflow/active/WFS-implement-oauth2-auth/workflow-session.json)
|
||||
```
|
||||
|
||||
**Output**: `SESSION_ID: WFS-implement-oauth2-auth`
|
||||
|
||||
### Step 2b: Single Active Session → Check Relevance
|
||||
```bash
|
||||
# Extract session ID
|
||||
bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | head -1 | xargs basename)
|
||||
|
||||
# Read project name from metadata
|
||||
bash(cat .workflow/active/WFS-promptmaster-platform/workflow-session.json | grep -o '"project":"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
# Check keyword match (manual comparison)
|
||||
# If task contains project keywords → Reuse session
|
||||
# If task unrelated → Create new session (use Step 2a)
|
||||
```
|
||||
|
||||
**Output (reuse)**: `SESSION_ID: WFS-promptmaster-platform`
|
||||
**Output (new)**: `SESSION_ID: WFS-[new-slug]`
|
||||
|
||||
### Step 2c: Multiple Active Sessions → Use First
|
||||
```bash
|
||||
# Get first active session
|
||||
bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | head -1 | xargs basename)
|
||||
|
||||
# Output warning and session ID
|
||||
# WARNING: Multiple active sessions detected
|
||||
# SESSION_ID: WFS-first-session
|
||||
```
|
||||
|
||||
## Mode 3: Force New Mode
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
/workflow:session:start --new "task description"
|
||||
```
|
||||
|
||||
### Step 1: Generate Unique Session Slug
|
||||
```bash
|
||||
# Convert to slug
|
||||
bash(echo "fix login bug" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-50)
|
||||
|
||||
# Check if exists, add counter if needed
|
||||
bash(ls .workflow/active/WFS-fix-login-bug 2>/dev/null && echo "WFS-fix-login-bug-2" || echo "WFS-fix-login-bug")
|
||||
```
|
||||
|
||||
### Step 2: Create Session Structure
|
||||
```bash
|
||||
bash(mkdir -p .workflow/active/WFS-fix-login-bug/.process)
|
||||
bash(mkdir -p .workflow/active/WFS-fix-login-bug/.task)
|
||||
bash(mkdir -p .workflow/active/WFS-fix-login-bug/.summaries)
|
||||
```
|
||||
|
||||
### Step 3: Create Metadata
|
||||
```bash
|
||||
# Include type field from --type parameter (default: "workflow")
|
||||
bash(echo '{"session_id":"WFS-fix-login-bug","project":"fix login bug","status":"planning","type":"workflow","created_at":"2024-12-04T08:00:00Z"}' > .workflow/active/WFS-fix-login-bug/workflow-session.json)
|
||||
```
|
||||
|
||||
**Output**: `SESSION_ID: WFS-fix-login-bug`
|
||||
|
||||
## Execution Guideline
|
||||
|
||||
- **Non-interrupting**: When called from other commands, this command completes and returns control to the caller without interrupting subsequent tasks.
|
||||
|
||||
## Session ID Format
|
||||
|
||||
- Pattern: `WFS-[lowercase-slug]`
|
||||
- Characters: `a-z`, `0-9`, `-` only
|
||||
- Max length: 50 characters
|
||||
- Uniqueness: Add numeric suffix if collision (`WFS-auth-2`, `WFS-auth-3`)
|
||||
|
||||
## Output Format Specification
|
||||
|
||||
### Success
|
||||
```
|
||||
SESSION_ID: WFS-session-slug
|
||||
```
|
||||
|
||||
### Error
|
||||
```
|
||||
ERROR: --auto mode requires task description
|
||||
ERROR: Failed to create session directory
|
||||
```
|
||||
|
||||
### Analysis (Auto Mode)
|
||||
```
|
||||
ANALYSIS: Task relevance = high
|
||||
DECISION: Reusing existing session
|
||||
SESSION_ID: WFS-promptmaster-platform
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **Variable**: `sessionId` (e.g., `WFS-implement-oauth2-auth`)
|
||||
- **File**: `.workflow/active/{sessionId}/planning-notes.md`
|
||||
- **TodoWrite**: Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator showing Phase 1 results, then auto-continue to [Phase 2: Context Gathering](02-context-gathering.md).
|
||||
427
.claude/skills/workflow-plan/phases/02-context-gathering.md
Normal file
427
.claude/skills/workflow-plan/phases/02-context-gathering.md
Normal file
@@ -0,0 +1,427 @@
|
||||
# Phase 2: Context Gathering
|
||||
|
||||
Intelligently collect project context using context-search-agent based on task description, packages into standardized JSON.
|
||||
|
||||
## Objective
|
||||
|
||||
- Check for existing valid context-package before executing
|
||||
- Assess task complexity and launch parallel exploration agents
|
||||
- Invoke context-search-agent to analyze codebase
|
||||
- Generate standardized `context-package.json` with prioritized context
|
||||
- Detect conflict risk level for Phase 3 decision
|
||||
|
||||
## Core Philosophy
|
||||
|
||||
- **Agent Delegation**: Delegate all discovery to `context-search-agent` for autonomous execution
|
||||
- **Detection-First**: Check for existing context-package before executing
|
||||
- **Plan Mode**: Full comprehensive analysis (vs lightweight brainstorm mode)
|
||||
- **Standardized Output**: Generate `.workflow/active/{session}/.process/context-package.json`
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
├─ Parse flags: --session
|
||||
└─ Parse: task_description (required)
|
||||
|
||||
Step 1: Context-Package Detection
|
||||
└─ Decision (existing package):
|
||||
├─ Valid package exists → Return existing (skip execution)
|
||||
└─ No valid package → Continue to Step 2
|
||||
|
||||
Step 2: Complexity Assessment & Parallel Explore
|
||||
├─ Analyze task_description → classify Low/Medium/High
|
||||
├─ Select exploration angles (1-4 based on complexity)
|
||||
├─ Launch N cli-explore-agents in parallel
|
||||
│ └─ Each outputs: exploration-{angle}.json
|
||||
└─ Generate explorations-manifest.json
|
||||
|
||||
Step 3: Invoke Context-Search Agent (with exploration input)
|
||||
├─ Phase 1: Initialization & Pre-Analysis
|
||||
├─ Phase 2: Multi-Source Discovery
|
||||
│ ├─ Track 0: Exploration Synthesis (prioritize & deduplicate)
|
||||
│ ├─ Track 1-4: Existing tracks
|
||||
└─ Phase 3: Synthesis & Packaging
|
||||
└─ Generate context-package.json with exploration_results
|
||||
|
||||
Step 4: Output Verification
|
||||
└─ Verify context-package.json contains exploration_results
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Step 1: Context-Package Detection
|
||||
|
||||
**Execute First** - Check if valid package already exists:
|
||||
|
||||
```javascript
|
||||
const contextPackagePath = `.workflow/${session_id}/.process/context-package.json`;
|
||||
|
||||
if (file_exists(contextPackagePath)) {
|
||||
const existing = Read(contextPackagePath);
|
||||
|
||||
// Validate package belongs to current session
|
||||
if (existing?.metadata?.session_id === session_id) {
|
||||
console.log("Valid context-package found for session:", session_id);
|
||||
console.log("Stats:", existing.statistics);
|
||||
console.log("Conflict Risk:", existing.conflict_detection.risk_level);
|
||||
return existing; // Skip execution, return existing
|
||||
} else {
|
||||
console.warn("Invalid session_id in existing package, re-generating...");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Complexity Assessment & Parallel Explore
|
||||
|
||||
**Only execute if Step 1 finds no valid package**
|
||||
|
||||
```javascript
|
||||
// 2.1 Complexity Assessment
|
||||
function analyzeTaskComplexity(taskDescription) {
|
||||
const text = taskDescription.toLowerCase();
|
||||
if (/architect|refactor|restructure|modular|cross-module/.test(text)) return 'High';
|
||||
if (/multiple|several|integrate|migrate|extend/.test(text)) return 'Medium';
|
||||
return 'Low';
|
||||
}
|
||||
|
||||
const ANGLE_PRESETS = {
|
||||
architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'],
|
||||
security: ['security', 'auth-patterns', 'dataflow', 'validation'],
|
||||
performance: ['performance', 'bottlenecks', 'caching', 'data-access'],
|
||||
bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'],
|
||||
feature: ['patterns', 'integration-points', 'testing', 'dependencies'],
|
||||
refactor: ['architecture', 'patterns', 'dependencies', 'testing']
|
||||
};
|
||||
|
||||
function selectAngles(taskDescription, complexity) {
|
||||
const text = taskDescription.toLowerCase();
|
||||
let preset = 'feature';
|
||||
if (/refactor|architect|restructure/.test(text)) preset = 'architecture';
|
||||
else if (/security|auth|permission/.test(text)) preset = 'security';
|
||||
else if (/performance|slow|optimi/.test(text)) preset = 'performance';
|
||||
else if (/fix|bug|error|issue/.test(text)) preset = 'bugfix';
|
||||
|
||||
const count = complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1);
|
||||
return ANGLE_PRESETS[preset].slice(0, count);
|
||||
}
|
||||
|
||||
const complexity = analyzeTaskComplexity(task_description);
|
||||
const selectedAngles = selectAngles(task_description, complexity);
|
||||
const sessionFolder = `.workflow/active/${session_id}/.process`;
|
||||
|
||||
// 2.2 Launch Parallel Explore Agents
|
||||
const explorationTasks = selectedAngles.map((angle, index) =>
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Explore: ${angle}`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Execute **${angle}** exploration for task planning context. Analyze codebase from this specific angle to discover relevant structure, patterns, and constraints.
|
||||
|
||||
## Assigned Context
|
||||
- **Exploration Angle**: ${angle}
|
||||
- **Task Description**: ${task_description}
|
||||
- **Session ID**: ${session_id}
|
||||
- **Exploration Index**: ${index + 1} of ${selectedAngles.length}
|
||||
- **Output File**: ${sessionFolder}/exploration-${angle}.json
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Run: ccw tool exec get_modules_by_depth '{}' (project structure)
|
||||
2. Run: rg -l "{keyword_from_task}" --type ts (locate relevant files)
|
||||
3. Execute: cat ~/.claude/workflows/cli-templates/schemas/explore-json-schema.json (get output schema reference)
|
||||
|
||||
## Exploration Strategy (${angle} focus)
|
||||
|
||||
**Step 1: Structural Scan** (Bash)
|
||||
- get_modules_by_depth.sh → identify modules related to ${angle}
|
||||
- find/rg → locate files relevant to ${angle} aspect
|
||||
- Analyze imports/dependencies from ${angle} perspective
|
||||
|
||||
**Step 2: Semantic Analysis** (Gemini CLI)
|
||||
- How does existing code handle ${angle} concerns?
|
||||
- What patterns are used for ${angle}?
|
||||
- Where would new code integrate from ${angle} viewpoint?
|
||||
|
||||
**Step 3: Write Output**
|
||||
- Consolidate ${angle} findings into JSON
|
||||
- Identify ${angle}-specific clarification needs
|
||||
|
||||
## Expected Output
|
||||
|
||||
**File**: ${sessionFolder}/exploration-${angle}.json
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 3, follow schema exactly
|
||||
|
||||
**Required Fields** (all ${angle} focused):
|
||||
- project_structure: Modules/architecture relevant to ${angle}
|
||||
- relevant_files: Files affected from ${angle} perspective
|
||||
**IMPORTANT**: Use object format with relevance scores for synthesis:
|
||||
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Core ${angle} logic"}]\`
|
||||
Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low
|
||||
- patterns: ${angle}-related patterns to follow
|
||||
- dependencies: Dependencies relevant to ${angle}
|
||||
- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations)
|
||||
- constraints: ${angle}-specific limitations/conventions
|
||||
- clarification_needs: ${angle}-related ambiguities (options array + recommended index)
|
||||
- _metadata.exploration_angle: "${angle}"
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat explore-json-schema.json
|
||||
- [ ] get_modules_by_depth.sh executed
|
||||
- [ ] At least 3 relevant files identified with ${angle} rationale
|
||||
- [ ] Patterns are actionable (code examples, not generic advice)
|
||||
- [ ] Integration points include file:line locations
|
||||
- [ ] Constraints are project-specific to ${angle}
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] clarification_needs includes options + recommended
|
||||
|
||||
## Output
|
||||
Write: ${sessionFolder}/exploration-${angle}.json
|
||||
Return: 2-3 sentence summary of ${angle} findings
|
||||
`
|
||||
)
|
||||
);
|
||||
|
||||
// 2.3 Generate Manifest after all complete
|
||||
const explorationFiles = bash(`find ${sessionFolder} -name "exploration-*.json" -type f`).split('\n').filter(f => f.trim());
|
||||
const explorationManifest = {
|
||||
session_id,
|
||||
task_description,
|
||||
timestamp: new Date().toISOString(),
|
||||
complexity,
|
||||
exploration_count: selectedAngles.length,
|
||||
angles_explored: selectedAngles,
|
||||
explorations: explorationFiles.map(file => {
|
||||
const data = JSON.parse(Read(file));
|
||||
return { angle: data._metadata.exploration_angle, file: file.split('/').pop(), path: file, index: data._metadata.exploration_index };
|
||||
})
|
||||
};
|
||||
Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2));
|
||||
```
|
||||
|
||||
### Step 3: Invoke Context-Search Agent
|
||||
|
||||
**Only execute after Step 2 completes**
|
||||
|
||||
```javascript
|
||||
// Load user intent from planning-notes.md (from Phase 1)
|
||||
const planningNotesPath = `.workflow/active/${session_id}/planning-notes.md`;
|
||||
let userIntent = { goal: task_description, key_constraints: "None specified" };
|
||||
|
||||
if (file_exists(planningNotesPath)) {
|
||||
const notesContent = Read(planningNotesPath);
|
||||
const goalMatch = notesContent.match(/\*\*GOAL\*\*:\s*(.+)/);
|
||||
const constraintsMatch = notesContent.match(/\*\*KEY_CONSTRAINTS\*\*:\s*(.+)/);
|
||||
if (goalMatch) userIntent.goal = goalMatch[1].trim();
|
||||
if (constraintsMatch) userIntent.key_constraints = constraintsMatch[1].trim();
|
||||
}
|
||||
|
||||
Task(
|
||||
subagent_type="context-search-agent",
|
||||
run_in_background=false,
|
||||
description="Gather comprehensive context for plan",
|
||||
prompt=`
|
||||
## Execution Mode
|
||||
**PLAN MODE** (Comprehensive) - Full Phase 1-3 execution with priority sorting
|
||||
|
||||
## Session Information
|
||||
- **Session ID**: ${session_id}
|
||||
- **Task Description**: ${task_description}
|
||||
- **Output Path**: .workflow/${session_id}/.process/context-package.json
|
||||
|
||||
## User Intent (from Phase 1 - Planning Notes)
|
||||
**GOAL**: ${userIntent.goal}
|
||||
**KEY_CONSTRAINTS**: ${userIntent.key_constraints}
|
||||
|
||||
This is the PRIMARY context source - all subsequent analysis must align with user intent.
|
||||
|
||||
## Exploration Input (from Step 2)
|
||||
- **Manifest**: ${sessionFolder}/explorations-manifest.json
|
||||
- **Exploration Count**: ${explorationManifest.exploration_count}
|
||||
- **Angles**: ${explorationManifest.angles_explored.join(', ')}
|
||||
- **Complexity**: ${complexity}
|
||||
|
||||
## Mission
|
||||
Execute complete context-search-agent workflow for implementation planning:
|
||||
|
||||
### Phase 1: Initialization & Pre-Analysis
|
||||
1. **Project State Loading**:
|
||||
- Read and parse \`.workflow/project-tech.json\`. Use its \`overview\` section as the foundational \`project_context\`. This is your primary source for architecture, tech stack, and key components.
|
||||
- Read and parse \`.workflow/project-guidelines.json\`. Load \`conventions\`, \`constraints\`, and \`learnings\` into a \`project_guidelines\` section.
|
||||
- If files don't exist, proceed with fresh analysis.
|
||||
2. **Detection**: Check for existing context-package (early exit if valid)
|
||||
3. **Foundation**: Initialize CodexLens, get project structure, load docs
|
||||
4. **Analysis**: Extract keywords, determine scope, classify complexity based on task description and project state
|
||||
|
||||
### Phase 2: Multi-Source Context Discovery
|
||||
Execute all discovery tracks (WITH USER INTENT INTEGRATION):
|
||||
- **Track -1**: User Intent & Priority Foundation (EXECUTE FIRST)
|
||||
- Load user intent (GOAL, KEY_CONSTRAINTS) from session input
|
||||
- Map user requirements to codebase entities (files, modules, patterns)
|
||||
- Establish baseline priority scores based on user goal alignment
|
||||
- Output: user_intent_mapping.json with preliminary priority scores
|
||||
|
||||
- **Track 0**: Exploration Synthesis (load ${sessionFolder}/explorations-manifest.json, prioritize critical_files, deduplicate patterns/integration_points)
|
||||
- **Track 1**: Historical archive analysis (query manifest.json for lessons learned)
|
||||
- **Track 2**: Reference documentation (CLAUDE.md, architecture docs)
|
||||
- **Track 3**: Web examples (use Exa MCP for unfamiliar tech/APIs)
|
||||
- **Track 4**: Codebase analysis (5-layer discovery: files, content, patterns, deps, config/tests)
|
||||
|
||||
### Phase 3: Synthesis, Assessment & Packaging
|
||||
1. Apply relevance scoring and build dependency graph
|
||||
2. **Synthesize 5-source data** (including Track -1): Merge findings from all sources
|
||||
- Priority order: User Intent > Archive > Docs > Exploration > Code > Web
|
||||
- **Prioritize the context from \`project-tech.json\`** for architecture and tech stack unless code analysis reveals it's outdated
|
||||
3. **Context Priority Sorting**:
|
||||
a. Combine scores from Track -1 (user intent alignment) + relevance scores + exploration critical_files
|
||||
b. Classify files into priority tiers:
|
||||
- **Critical** (score >= 0.85): Directly mentioned in user goal OR exploration critical_files
|
||||
- **High** (0.70-0.84): Key dependencies, patterns required for goal
|
||||
- **Medium** (0.50-0.69): Supporting files, indirect dependencies
|
||||
- **Low** (< 0.50): Contextual awareness only
|
||||
c. Generate dependency_order: Based on dependency graph + user goal sequence
|
||||
d. Document sorting_rationale: Explain prioritization logic
|
||||
|
||||
4. **Populate \`project_context\`**: Directly use the \`overview\` from \`project-tech.json\` to fill the \`project_context\` section. Include description, technology_stack, architecture, and key_components.
|
||||
5. **Populate \`project_guidelines\`**: Load conventions, constraints, and learnings from \`project-guidelines.json\` into a dedicated section.
|
||||
6. Integrate brainstorm artifacts (if .brainstorming/ exists, read content)
|
||||
7. Perform conflict detection with risk assessment
|
||||
8. **Inject historical conflicts** from archive analysis into conflict_detection
|
||||
9. **Generate prioritized_context section**:
|
||||
\`\`\`json
|
||||
{
|
||||
"prioritized_context": {
|
||||
"user_intent": {
|
||||
"goal": "...",
|
||||
"scope": "...",
|
||||
"key_constraints": ["..."]
|
||||
},
|
||||
"priority_tiers": {
|
||||
"critical": [{ "path": "...", "relevance": 0.95, "rationale": "..." }],
|
||||
"high": [...],
|
||||
"medium": [...],
|
||||
"low": [...]
|
||||
},
|
||||
"dependency_order": ["module1", "module2", "module3"],
|
||||
"sorting_rationale": "Based on user goal alignment (Track -1), exploration critical files, and dependency graph analysis"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
10. Generate and validate context-package.json with prioritized_context field
|
||||
|
||||
## Output Requirements
|
||||
Complete context-package.json with:
|
||||
- **metadata**: task_description, keywords, complexity, tech_stack, session_id
|
||||
- **project_context**: description, technology_stack, architecture, key_components (sourced from \`project-tech.json\`)
|
||||
- **project_guidelines**: {conventions, constraints, quality_rules, learnings} (sourced from \`project-guidelines.json\`)
|
||||
- **assets**: {documentation[], source_code[], config[], tests[]} with relevance scores
|
||||
- **dependencies**: {internal[], external[]} with dependency graph
|
||||
- **brainstorm_artifacts**: {guidance_specification, role_analyses[], synthesis_output} with content
|
||||
- **conflict_detection**: {risk_level, risk_factors, affected_modules[], mitigation_strategy, historical_conflicts[]}
|
||||
- **exploration_results**: {manifest_path, exploration_count, angles, explorations[], aggregated_insights} (from Track 0)
|
||||
- **prioritized_context**: {user_intent, priority_tiers{critical, high, medium, low}, dependency_order[], sorting_rationale}
|
||||
|
||||
## Quality Validation
|
||||
Before completion verify:
|
||||
- [ ] Valid JSON format with all required fields
|
||||
- [ ] File relevance accuracy >80%
|
||||
- [ ] Dependency graph complete (max 2 transitive levels)
|
||||
- [ ] Conflict risk level calculated correctly
|
||||
- [ ] No sensitive data exposed
|
||||
- [ ] Total files <=50 (prioritize high-relevance)
|
||||
|
||||
## Planning Notes Record (REQUIRED)
|
||||
After completing context-package.json, append a brief execution record to planning-notes.md:
|
||||
|
||||
**File**: .workflow/active/${session_id}/planning-notes.md
|
||||
**Location**: Under "## Context Findings (Phase 2)" section
|
||||
**Format**:
|
||||
\`\`\`
|
||||
### [Context-Search Agent] YYYY-MM-DD
|
||||
- **Note**: [brief summary of key findings]
|
||||
\`\`\`
|
||||
|
||||
Execute autonomously following agent documentation.
|
||||
Report completion with statistics.
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Step 4: Output Verification
|
||||
|
||||
After agent completes, verify output:
|
||||
|
||||
```javascript
|
||||
// Verify file was created
|
||||
const outputPath = `.workflow/${session_id}/.process/context-package.json`;
|
||||
if (!file_exists(outputPath)) {
|
||||
throw new Error("Agent failed to generate context-package.json");
|
||||
}
|
||||
|
||||
// Verify exploration_results included
|
||||
const pkg = JSON.parse(Read(outputPath));
|
||||
if (pkg.exploration_results?.exploration_count > 0) {
|
||||
console.log(`Exploration results aggregated: ${pkg.exploration_results.exploration_count} angles`);
|
||||
}
|
||||
```
|
||||
|
||||
## Parameter Reference
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `--session` | string | Yes | Workflow session ID (e.g., WFS-user-auth) |
|
||||
| `task_description` | string | Yes | Detailed task description for context extraction |
|
||||
|
||||
## Post-Phase Update
|
||||
|
||||
After context-gather completes, update planning-notes.md:
|
||||
|
||||
```javascript
|
||||
const contextPackage = JSON.parse(Read(contextPath))
|
||||
const conflictRisk = contextPackage.conflict_detection?.risk_level || 'low'
|
||||
const criticalFiles = (contextPackage.exploration_results?.aggregated_insights?.critical_files || [])
|
||||
.slice(0, 5).map(f => f.path)
|
||||
const archPatterns = contextPackage.project_context?.architecture_patterns || []
|
||||
const constraints = contextPackage.exploration_results?.aggregated_insights?.constraints || []
|
||||
|
||||
// Update Phase 2 section
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Context Findings (Phase 2)\n(To be filled by context-gather)',
|
||||
new: `## Context Findings (Phase 2)
|
||||
|
||||
- **CRITICAL_FILES**: ${criticalFiles.join(', ') || 'None identified'}
|
||||
- **ARCHITECTURE**: ${archPatterns.join(', ') || 'Not detected'}
|
||||
- **CONFLICT_RISK**: ${conflictRisk}
|
||||
- **CONSTRAINTS**: ${constraints.length > 0 ? constraints.join('; ') : 'None'}`
|
||||
})
|
||||
|
||||
// Append Phase 2 constraints to consolidated list
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Consolidated Constraints (Phase 4 Input)',
|
||||
new: `## Consolidated Constraints (Phase 4 Input)
|
||||
${constraints.map((c, i) => `${i + 2}. [Context] ${c}`).join('\n')}`
|
||||
})
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- **Detection-first**: Always check for existing package before invoking agent
|
||||
- **User intent integration**: Load user intent from planning-notes.md (Phase 1 output)
|
||||
- **Output**: Generates `context-package.json` with `prioritized_context` field
|
||||
- **Plan-specific**: Use this for implementation planning; brainstorm mode uses direct agent call
|
||||
|
||||
## Output
|
||||
|
||||
- **Variable**: `contextPath` (e.g., `.workflow/active/WFS-xxx/.process/context-package.json`)
|
||||
- **Variable**: `conflictRisk` (none/low/medium/high)
|
||||
- **File**: Updated `planning-notes.md` with context findings
|
||||
- **Decision**: If `conflictRisk >= medium` → Phase 3, else → Phase 4
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator showing Phase 2 results, then auto-continue:
|
||||
- If `conflict_risk >= medium` → [Phase 3: Conflict Resolution](03-conflict-resolution.md)
|
||||
- If `conflict_risk < medium` → [Phase 4: Task Generation](04-task-generation.md)
|
||||
645
.claude/skills/workflow-plan/phases/03-conflict-resolution.md
Normal file
645
.claude/skills/workflow-plan/phases/03-conflict-resolution.md
Normal file
@@ -0,0 +1,645 @@
|
||||
# Phase 3: Conflict Resolution
|
||||
|
||||
Detect and resolve conflicts between plan and existing codebase using CLI-powered analysis with Gemini/Qwen.
|
||||
|
||||
## Objective
|
||||
|
||||
- Analyze conflicts between plan and existing code, **including module scenario uniqueness detection**
|
||||
- Generate multiple resolution strategies with **iterative clarification until boundaries are clear**
|
||||
- Apply selected modifications to brainstorm artifacts
|
||||
|
||||
**Scope**: Detection and strategy generation only - NO code modification or task creation.
|
||||
|
||||
**Trigger**: Auto-executes when `conflict_risk >= medium`.
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select recommended strategy for each conflict, skip clarification questions.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
| Responsibility | Description |
|
||||
|---------------|-------------|
|
||||
| **Detect Conflicts** | Analyze plan vs existing code inconsistencies |
|
||||
| **Scenario Uniqueness** | Search and compare new modules with existing modules for functional overlaps |
|
||||
| **Generate Strategies** | Provide 2-4 resolution options per conflict |
|
||||
| **Iterative Clarification** | Ask unlimited questions until scenario boundaries are clear and unique |
|
||||
| **Agent Re-analysis** | Dynamically update strategies based on user clarifications |
|
||||
| **CLI Analysis** | Use Gemini/Qwen (Claude fallback) |
|
||||
| **User Decision** | Present options ONE BY ONE, never auto-apply |
|
||||
| **Direct Text Output** | Output questions via text directly, NEVER use bash echo/printf |
|
||||
| **Structured Data** | JSON output for programmatic processing, NO file generation |
|
||||
|
||||
## Conflict Categories
|
||||
|
||||
### 1. Architecture Conflicts
|
||||
- Incompatible design patterns
|
||||
- Module structure changes
|
||||
- Pattern migration requirements
|
||||
|
||||
### 2. API Conflicts
|
||||
- Breaking contract changes
|
||||
- Signature modifications
|
||||
- Public interface impacts
|
||||
|
||||
### 3. Data Model Conflicts
|
||||
- Schema modifications
|
||||
- Type breaking changes
|
||||
- Data migration needs
|
||||
|
||||
### 4. Dependency Conflicts
|
||||
- Version incompatibilities
|
||||
- Setup conflicts
|
||||
- Breaking updates
|
||||
|
||||
### 5. Module Scenario Overlap
|
||||
- Functional overlap between new and existing modules
|
||||
- Scenario boundary ambiguity
|
||||
- Duplicate responsibility detection
|
||||
- Module merge/split decisions
|
||||
- **Requires iterative clarification until uniqueness confirmed**
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
├─ Parse flags: --session, --context
|
||||
└─ Validation: Both REQUIRED, conflict_risk >= medium
|
||||
|
||||
Phase 1: Validation
|
||||
├─ Step 1: Verify session directory exists
|
||||
├─ Step 2: Load context-package.json
|
||||
├─ Step 3: Check conflict_risk (skip if none/low)
|
||||
└─ Step 4: Prepare agent task prompt
|
||||
|
||||
Phase 2: CLI-Powered Analysis (Agent)
|
||||
├─ Execute Gemini analysis (Qwen fallback)
|
||||
├─ Detect conflicts including ModuleOverlap category
|
||||
└─ Generate 2-4 strategies per conflict with modifications
|
||||
|
||||
Phase 3: Iterative User Interaction
|
||||
└─ FOR each conflict (one by one):
|
||||
├─ Display conflict with overlap_analysis (if ModuleOverlap)
|
||||
├─ Display strategies (2-4 + custom option)
|
||||
├─ User selects strategy
|
||||
└─ IF clarification_needed:
|
||||
├─ Collect answers
|
||||
├─ Agent re-analysis
|
||||
└─ Loop until uniqueness_confirmed (max 10 rounds)
|
||||
|
||||
Phase 4: Apply Modifications
|
||||
├─ Step 1: Extract modifications from resolved strategies
|
||||
├─ Step 2: Apply using Edit tool
|
||||
├─ Step 3: Update context-package.json (mark resolved)
|
||||
└─ Step 4: Output custom conflict summary (if any)
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Phase 1: Validation
|
||||
```
|
||||
1. Verify session directory exists
|
||||
2. Load context-package.json
|
||||
3. Check conflict_risk (skip if none/low)
|
||||
4. Prepare agent task prompt
|
||||
```
|
||||
|
||||
### Phase 2: CLI-Powered Analysis
|
||||
|
||||
**Agent Delegation**:
|
||||
```javascript
|
||||
Task(subagent_type="cli-execution-agent", run_in_background=false, prompt=`
|
||||
## Context
|
||||
- Session: {session_id}
|
||||
- Risk: {conflict_risk}
|
||||
- Files: {existing_files_list}
|
||||
|
||||
## Exploration Context (from context-package.exploration_results)
|
||||
- Exploration Count: ${contextPackage.exploration_results?.exploration_count || 0}
|
||||
- Angles Analyzed: ${JSON.stringify(contextPackage.exploration_results?.angles || [])}
|
||||
- Pre-identified Conflict Indicators: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.conflict_indicators || [])}
|
||||
- Critical Files: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.critical_files?.map(f => f.path) || [])}
|
||||
- All Patterns: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.all_patterns || [])}
|
||||
- All Integration Points: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.all_integration_points || [])}
|
||||
|
||||
## Analysis Steps
|
||||
|
||||
### 0. Load Output Schema (MANDATORY)
|
||||
Execute: cat ~/.claude/workflows/cli-templates/schemas/conflict-resolution-schema.json
|
||||
|
||||
### 1. Load Context
|
||||
- Read existing files from conflict_detection.existing_files
|
||||
- Load plan from .workflow/active/{session_id}/.process/context-package.json
|
||||
- Load exploration_results and use aggregated_insights for enhanced analysis
|
||||
- Extract role analyses and requirements
|
||||
|
||||
### 2. Execute CLI Analysis (Enhanced with Exploration + Scenario Uniqueness)
|
||||
|
||||
Primary (Gemini):
|
||||
ccw cli -p "
|
||||
PURPOSE: Detect conflicts between plan and codebase, using exploration insights
|
||||
TASK:
|
||||
• **Review pre-identified conflict_indicators from exploration results**
|
||||
• Compare architectures (use exploration key_patterns)
|
||||
• Identify breaking API changes
|
||||
• Detect data model incompatibilities
|
||||
• Assess dependency conflicts
|
||||
• **Analyze module scenario uniqueness**
|
||||
- Use exploration integration_points for precise locations
|
||||
- Cross-validate with exploration critical_files
|
||||
- Generate clarification questions for boundary definition
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*.ts @**/*.js @**/*.tsx @**/*.jsx @.workflow/active/{session_id}/**/*
|
||||
EXPECTED: Conflict list with severity ratings, including:
|
||||
- Validation of exploration conflict_indicators
|
||||
- ModuleOverlap conflicts with overlap_analysis
|
||||
- Targeted clarification questions
|
||||
CONSTRAINTS: Focus on breaking changes, migration needs, and functional overlaps | Prioritize exploration-identified conflicts | analysis=READ-ONLY
|
||||
" --tool gemini --mode analysis --rule analysis-code-patterns --cd {project_root}
|
||||
|
||||
Fallback: Qwen (same prompt) → Claude (manual analysis)
|
||||
|
||||
### 3. Generate Strategies (2-4 per conflict)
|
||||
|
||||
Template per conflict:
|
||||
- Severity: Critical/High/Medium
|
||||
- Category: Architecture/API/Data/Dependency/ModuleOverlap
|
||||
- Affected files + impact
|
||||
- **For ModuleOverlap**: Include overlap_analysis with existing modules and scenarios
|
||||
- Options with pros/cons, effort, risk
|
||||
- **For ModuleOverlap strategies**: Add clarification_needed questions for boundary definition
|
||||
- Recommended strategy + rationale
|
||||
|
||||
### 4. Return Structured Conflict Data
|
||||
|
||||
⚠️ Output to conflict-resolution.json (generated in Phase 4)
|
||||
|
||||
**Schema Reference**: Execute \`cat ~/.claude/workflows/cli-templates/schemas/conflict-resolution-schema.json\` to get full schema
|
||||
|
||||
Return JSON following the schema above. Key requirements:
|
||||
- Minimum 2 strategies per conflict, max 4
|
||||
- All text in Chinese for user-facing fields (brief, name, pros, cons, modification_suggestions)
|
||||
- modifications.old_content: 20-100 chars for unique Edit tool matching
|
||||
- modifications.new_content: preserves markdown formatting
|
||||
- modification_suggestions: 2-5 actionable suggestions for custom handling
|
||||
|
||||
### 5. Planning Notes Record (REQUIRED)
|
||||
After analysis complete, append a brief execution record to planning-notes.md:
|
||||
|
||||
**File**: .workflow/active/{session_id}/planning-notes.md
|
||||
**Location**: Under "## Conflict Decisions (Phase 3)" section
|
||||
**Format**:
|
||||
\`\`\`
|
||||
### [Conflict-Resolution Agent] YYYY-MM-DD
|
||||
- **Note**: [brief summary of conflict types, resolution strategies, key decisions]
|
||||
\`\`\`
|
||||
`)
|
||||
```
|
||||
|
||||
### Phase 3: User Interaction Loop
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
FOR each conflict:
|
||||
round = 0, clarified = false, userClarifications = []
|
||||
|
||||
WHILE (!clarified && round++ < 10):
|
||||
// 1. Display conflict info (text output for context)
|
||||
displayConflictSummary(conflict) // id, brief, severity, overlap_analysis if ModuleOverlap
|
||||
|
||||
// 2. Strategy selection
|
||||
if (autoYes) {
|
||||
console.log(`[--yes] Auto-selecting recommended strategy`)
|
||||
selectedStrategy = conflict.strategies[conflict.recommended || 0]
|
||||
clarified = true // Skip clarification loop
|
||||
} else {
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: formatStrategiesForDisplay(conflict.strategies),
|
||||
header: "策略选择",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
...conflict.strategies.map((s, i) => ({
|
||||
label: `${s.name}${i === conflict.recommended ? ' (推荐)' : ''}`,
|
||||
description: `${s.complexity}复杂度 | ${s.risk}风险${s.clarification_needed?.length ? ' | ⚠️需澄清' : ''}`
|
||||
})),
|
||||
{ label: "自定义修改", description: `建议: ${conflict.modification_suggestions?.slice(0,2).join('; ')}` }
|
||||
]
|
||||
}]
|
||||
})
|
||||
|
||||
// 3. Handle selection
|
||||
if (userChoice === "自定义修改") {
|
||||
customConflicts.push({ id, brief, category, suggestions, overlap_analysis })
|
||||
break
|
||||
}
|
||||
|
||||
selectedStrategy = findStrategyByName(userChoice)
|
||||
}
|
||||
|
||||
// 4. Clarification (if needed) - batched max 4 per call
|
||||
if (!autoYes && selectedStrategy.clarification_needed?.length > 0) {
|
||||
for (batch of chunk(selectedStrategy.clarification_needed, 4)) {
|
||||
AskUserQuestion({
|
||||
questions: batch.map((q, i) => ({
|
||||
question: q, header: `澄清${i+1}`, multiSelect: false,
|
||||
options: [{ label: "详细说明", description: "提供答案" }]
|
||||
}))
|
||||
})
|
||||
userClarifications.push(...collectAnswers(batch))
|
||||
}
|
||||
|
||||
// 5. Agent re-analysis
|
||||
reanalysisResult = Task({
|
||||
subagent_type: "cli-execution-agent",
|
||||
run_in_background: false,
|
||||
prompt: `Conflict: ${conflict.id}, Strategy: ${selectedStrategy.name}
|
||||
User Clarifications: ${JSON.stringify(userClarifications)}
|
||||
Output: { uniqueness_confirmed, rationale, updated_strategy, remaining_questions }`
|
||||
})
|
||||
|
||||
if (reanalysisResult.uniqueness_confirmed) {
|
||||
selectedStrategy = { ...reanalysisResult.updated_strategy, clarifications: userClarifications }
|
||||
clarified = true
|
||||
} else {
|
||||
selectedStrategy.clarification_needed = reanalysisResult.remaining_questions
|
||||
}
|
||||
} else {
|
||||
clarified = true
|
||||
}
|
||||
|
||||
if (clarified) resolvedConflicts.push({ conflict, strategy: selectedStrategy })
|
||||
END WHILE
|
||||
END FOR
|
||||
|
||||
selectedStrategies = resolvedConflicts.map(r => ({
|
||||
conflict_id: r.conflict.id, strategy: r.strategy, clarifications: r.strategy.clarifications || []
|
||||
}))
|
||||
```
|
||||
|
||||
**Key Points**:
|
||||
- AskUserQuestion: max 4 questions/call, batch if more
|
||||
- Strategy options: 2-4 strategies + "自定义修改"
|
||||
- Clarification loop: max 10 rounds, agent判断 uniqueness_confirmed
|
||||
- Custom conflicts: 记录 overlap_analysis 供后续手动处理
|
||||
|
||||
### Phase 4: Apply Modifications
|
||||
|
||||
```javascript
|
||||
// 1. Extract modifications from resolved strategies
|
||||
const modifications = [];
|
||||
selectedStrategies.forEach(item => {
|
||||
if (item.strategy && item.strategy.modifications) {
|
||||
modifications.push(...item.strategy.modifications.map(mod => ({
|
||||
...mod,
|
||||
conflict_id: item.conflict_id,
|
||||
clarifications: item.clarifications
|
||||
})));
|
||||
}
|
||||
});
|
||||
|
||||
console.log(`\n正在应用 ${modifications.length} 个修改...`);
|
||||
|
||||
// 2. Apply each modification using Edit tool (with fallback to context-package.json)
|
||||
const appliedModifications = [];
|
||||
const failedModifications = [];
|
||||
const fallbackConstraints = []; // For files that don't exist
|
||||
|
||||
modifications.forEach((mod, idx) => {
|
||||
try {
|
||||
console.log(`[${idx + 1}/${modifications.length}] 修改 ${mod.file}...`);
|
||||
|
||||
// Check if target file exists (brainstorm files may not exist in lite workflow)
|
||||
if (!file_exists(mod.file)) {
|
||||
console.log(` ⚠️ 文件不存在,写入 context-package.json 作为约束`);
|
||||
fallbackConstraints.push({
|
||||
source: "conflict-resolution",
|
||||
conflict_id: mod.conflict_id,
|
||||
target_file: mod.file,
|
||||
section: mod.section,
|
||||
change_type: mod.change_type,
|
||||
content: mod.new_content,
|
||||
rationale: mod.rationale
|
||||
});
|
||||
return; // Skip to next modification
|
||||
}
|
||||
|
||||
if (mod.change_type === "update") {
|
||||
Edit({
|
||||
file_path: mod.file,
|
||||
old_string: mod.old_content,
|
||||
new_string: mod.new_content
|
||||
});
|
||||
} else if (mod.change_type === "add") {
|
||||
// Handle addition - append or insert based on section
|
||||
const fileContent = Read(mod.file);
|
||||
const updated = insertContentAfterSection(fileContent, mod.section, mod.new_content);
|
||||
Write(mod.file, updated);
|
||||
} else if (mod.change_type === "remove") {
|
||||
Edit({
|
||||
file_path: mod.file,
|
||||
old_string: mod.old_content,
|
||||
new_string: ""
|
||||
});
|
||||
}
|
||||
|
||||
appliedModifications.push(mod);
|
||||
console.log(` ✓ 成功`);
|
||||
} catch (error) {
|
||||
console.log(` ✗ 失败: ${error.message}`);
|
||||
failedModifications.push({ ...mod, error: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
// 2b. Generate conflict-resolution.json output file
|
||||
const resolutionOutput = {
|
||||
session_id: sessionId,
|
||||
resolved_at: new Date().toISOString(),
|
||||
summary: {
|
||||
total_conflicts: conflicts.length,
|
||||
resolved_with_strategy: selectedStrategies.length,
|
||||
custom_handling: customConflicts.length,
|
||||
fallback_constraints: fallbackConstraints.length
|
||||
},
|
||||
resolved_conflicts: selectedStrategies.map(s => ({
|
||||
conflict_id: s.conflict_id,
|
||||
strategy_name: s.strategy.name,
|
||||
strategy_approach: s.strategy.approach,
|
||||
clarifications: s.clarifications || [],
|
||||
modifications_applied: s.strategy.modifications?.filter(m =>
|
||||
appliedModifications.some(am => am.conflict_id === s.conflict_id)
|
||||
) || []
|
||||
})),
|
||||
custom_conflicts: customConflicts.map(c => ({
|
||||
id: c.id,
|
||||
brief: c.brief,
|
||||
category: c.category,
|
||||
suggestions: c.suggestions,
|
||||
overlap_analysis: c.overlap_analysis || null
|
||||
})),
|
||||
planning_constraints: fallbackConstraints, // Constraints for files that don't exist
|
||||
failed_modifications: failedModifications
|
||||
};
|
||||
|
||||
const resolutionPath = `.workflow/active/${sessionId}/.process/conflict-resolution.json`;
|
||||
Write(resolutionPath, JSON.stringify(resolutionOutput, null, 2));
|
||||
|
||||
// 3. Update context-package.json with resolution details (reference to JSON file)
|
||||
const contextPackage = JSON.parse(Read(contextPath));
|
||||
contextPackage.conflict_detection.conflict_risk = "resolved";
|
||||
contextPackage.conflict_detection.resolution_file = resolutionPath; // Reference to detailed JSON
|
||||
contextPackage.conflict_detection.resolved_conflicts = selectedStrategies.map(s => s.conflict_id);
|
||||
contextPackage.conflict_detection.custom_conflicts = customConflicts.map(c => c.id);
|
||||
contextPackage.conflict_detection.resolved_at = new Date().toISOString();
|
||||
Write(contextPath, JSON.stringify(contextPackage, null, 2));
|
||||
|
||||
// 4. Output custom conflict summary with overlap analysis (if any)
|
||||
if (customConflicts.length > 0) {
|
||||
console.log(`\n${'='.repeat(60)}`);
|
||||
console.log(`需要自定义处理的冲突 (${customConflicts.length})`);
|
||||
console.log(`${'='.repeat(60)}\n`);
|
||||
|
||||
customConflicts.forEach(conflict => {
|
||||
console.log(`【${conflict.category}】${conflict.id}: ${conflict.brief}`);
|
||||
|
||||
// Show overlap analysis for ModuleOverlap conflicts
|
||||
if (conflict.category === 'ModuleOverlap' && conflict.overlap_analysis) {
|
||||
console.log(`\n场景重叠信息:`);
|
||||
console.log(` 新模块: ${conflict.overlap_analysis.new_module.name}`);
|
||||
console.log(` 场景: ${conflict.overlap_analysis.new_module.scenarios.join(', ')}`);
|
||||
console.log(`\n 与以下模块重叠:`);
|
||||
conflict.overlap_analysis.existing_modules.forEach(mod => {
|
||||
console.log(` - ${mod.name} (${mod.file})`);
|
||||
console.log(` 重叠场景: ${mod.overlap_scenarios.join(', ')}`);
|
||||
});
|
||||
}
|
||||
|
||||
console.log(`\n修改建议:`);
|
||||
conflict.suggestions.forEach(suggestion => {
|
||||
console.log(` - ${suggestion}`);
|
||||
});
|
||||
console.log();
|
||||
});
|
||||
}
|
||||
|
||||
// 5. Output failure summary (if any)
|
||||
if (failedModifications.length > 0) {
|
||||
console.log(`\n⚠️ 部分修改失败 (${failedModifications.length}):`);
|
||||
failedModifications.forEach(mod => {
|
||||
console.log(` - ${mod.file}: ${mod.error}`);
|
||||
});
|
||||
}
|
||||
|
||||
// 6. Return summary
|
||||
return {
|
||||
total_conflicts: conflicts.length,
|
||||
resolved_with_strategy: selectedStrategies.length,
|
||||
custom_handling: customConflicts.length,
|
||||
modifications_applied: appliedModifications.length,
|
||||
modifications_failed: failedModifications.length,
|
||||
modified_files: [...new Set(appliedModifications.map(m => m.file))],
|
||||
custom_conflicts: customConflicts,
|
||||
clarification_records: selectedStrategies.filter(s => s.clarifications.length > 0)
|
||||
};
|
||||
```
|
||||
|
||||
**Validation**:
|
||||
```
|
||||
✓ Agent returns valid JSON structure with ModuleOverlap conflicts
|
||||
✓ Conflicts processed ONE BY ONE (not in batches)
|
||||
✓ ModuleOverlap conflicts include overlap_analysis field
|
||||
✓ Strategies with clarification_needed display questions
|
||||
✓ User selections captured correctly per conflict
|
||||
✓ Clarification loop continues until uniqueness confirmed
|
||||
✓ Agent re-analysis returns uniqueness_confirmed and updated_strategy
|
||||
✓ Maximum 10 rounds per conflict safety limit enforced
|
||||
✓ Edit tool successfully applies modifications
|
||||
✓ guidance-specification.md updated
|
||||
✓ Role analyses (*.md) updated
|
||||
✓ context-package.json marked as resolved with clarification records
|
||||
✓ Custom conflicts display overlap_analysis for manual handling
|
||||
✓ Agent log saved to .workflow/active/{session_id}/.chat/
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
### Primary Output: conflict-resolution.json
|
||||
|
||||
**Path**: `.workflow/active/{session_id}/.process/conflict-resolution.json`
|
||||
|
||||
**Schema**:
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-xxx",
|
||||
"resolved_at": "ISO timestamp",
|
||||
"summary": {
|
||||
"total_conflicts": 3,
|
||||
"resolved_with_strategy": 2,
|
||||
"custom_handling": 1,
|
||||
"fallback_constraints": 0
|
||||
},
|
||||
"resolved_conflicts": [
|
||||
{
|
||||
"conflict_id": "CON-001",
|
||||
"strategy_name": "策略名称",
|
||||
"strategy_approach": "实现方法",
|
||||
"clarifications": [],
|
||||
"modifications_applied": []
|
||||
}
|
||||
],
|
||||
"custom_conflicts": [
|
||||
{
|
||||
"id": "CON-002",
|
||||
"brief": "冲突摘要",
|
||||
"category": "ModuleOverlap",
|
||||
"suggestions": ["建议1", "建议2"],
|
||||
"overlap_analysis": null
|
||||
}
|
||||
],
|
||||
"planning_constraints": [],
|
||||
"failed_modifications": []
|
||||
}
|
||||
```
|
||||
|
||||
### Key Requirements
|
||||
|
||||
| Requirement | Details |
|
||||
|------------|---------|
|
||||
| **Conflict batching** | Max 10 conflicts per round (no total limit) |
|
||||
| **Strategy count** | 2-4 strategies per conflict |
|
||||
| **Modifications** | Each strategy includes file paths, old_content, new_content |
|
||||
| **User-facing text** | Chinese (brief, strategy names, pros/cons) |
|
||||
| **Technical fields** | English (severity, category, complexity, risk) |
|
||||
| **old_content precision** | 20-100 chars for unique Edit tool matching |
|
||||
| **File targets** | guidance-specification.md, role analyses (*.md) |
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Recovery Strategy
|
||||
```
|
||||
1. Pre-check: Verify conflict_risk ≥ medium
|
||||
2. Monitor: Track agent via Task tool
|
||||
3. Validate: Parse agent JSON output
|
||||
4. Recover:
|
||||
- Agent failure → check logs + report error
|
||||
- Invalid JSON → retry once with Claude fallback
|
||||
- CLI failure → fallback to Claude analysis
|
||||
- Edit tool failure → report affected files + rollback option
|
||||
- User cancels → mark as "unresolved", continue to task-generate
|
||||
5. Degrade: If all fail, generate minimal conflict report and skip modifications
|
||||
```
|
||||
|
||||
### Rollback Handling
|
||||
```
|
||||
If Edit tool fails mid-application:
|
||||
1. Log all successfully applied modifications
|
||||
2. Output rollback option via text interaction
|
||||
3. If rollback selected: restore files from git or backups
|
||||
4. If continue: mark partial resolution in context-package.json
|
||||
```
|
||||
|
||||
## Integration
|
||||
|
||||
### Interface
|
||||
**Input**:
|
||||
- `--session` (required): WFS-{session-id}
|
||||
- `--context` (required): context-package.json path
|
||||
- Requires: `conflict_risk >= medium`
|
||||
|
||||
**Output**:
|
||||
- Generated file:
|
||||
- `.workflow/active/{session_id}/.process/conflict-resolution.json` (primary output)
|
||||
- Modified files (if exist):
|
||||
- `.workflow/active/{session_id}/.brainstorm/guidance-specification.md`
|
||||
- `.workflow/active/{session_id}/.brainstorm/{role}/analysis.md`
|
||||
- `.workflow/active/{session_id}/.process/context-package.json` (conflict_risk → resolved, resolution_file reference)
|
||||
|
||||
**User Interaction**:
|
||||
- **Iterative conflict processing**: One conflict at a time, not in batches
|
||||
- Each conflict: 2-4 strategy options + "自定义修改" option (with suggestions)
|
||||
- **Clarification loop**: Unlimited questions per conflict until uniqueness confirmed (max 10 rounds)
|
||||
- **ModuleOverlap conflicts**: Display overlap_analysis with existing modules
|
||||
- **Agent re-analysis**: Dynamic strategy updates based on user clarifications
|
||||
|
||||
### Success Criteria
|
||||
```
|
||||
✓ CLI analysis returns valid JSON structure with ModuleOverlap category
|
||||
✓ Agent performs scenario uniqueness detection (searches existing modules)
|
||||
✓ Conflicts processed ONE BY ONE with iterative clarification
|
||||
✓ Min 2 strategies per conflict with modifications
|
||||
✓ ModuleOverlap conflicts include overlap_analysis with existing modules
|
||||
✓ Strategies requiring clarification include clarification_needed questions
|
||||
✓ Each conflict includes 2-5 modification_suggestions
|
||||
✓ Text output displays conflict with overlap analysis (if ModuleOverlap)
|
||||
✓ User selections captured per conflict
|
||||
✓ Clarification loop continues until uniqueness confirmed (unlimited rounds, max 10)
|
||||
✓ Agent re-analysis with user clarifications updates strategy
|
||||
✓ Uniqueness confirmation based on clear scenario boundaries
|
||||
✓ Edit tool applies modifications successfully
|
||||
✓ Custom conflicts displayed with overlap_analysis for manual handling
|
||||
✓ guidance-specification.md updated with resolved conflicts
|
||||
✓ Role analyses (*.md) updated with resolved conflicts
|
||||
✓ context-package.json marked as "resolved" with clarification records
|
||||
✓ conflict-resolution.json generated with full resolution details
|
||||
✓ Modification summary includes:
|
||||
- Total conflicts
|
||||
- Resolved with strategy (count)
|
||||
- Custom handling (count)
|
||||
- Clarification records
|
||||
- Overlap analysis for custom ModuleOverlap conflicts
|
||||
✓ Agent log saved to .workflow/active/{session_id}/.chat/
|
||||
✓ Error handling robust (validate/retry/degrade)
|
||||
```
|
||||
|
||||
## Post-Phase Update
|
||||
|
||||
If Phase 3 was executed, update planning-notes.md:
|
||||
|
||||
```javascript
|
||||
const conflictResPath = `.workflow/active/${sessionId}/.process/conflict-resolution.json`
|
||||
|
||||
if (file_exists(conflictResPath)) {
|
||||
const conflictRes = JSON.parse(Read(conflictResPath))
|
||||
const resolved = conflictRes.resolved_conflicts || []
|
||||
const planningConstraints = conflictRes.planning_constraints || []
|
||||
|
||||
// Update Phase 3 section
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Conflict Decisions (Phase 3)\n(To be filled if conflicts detected)',
|
||||
new: `## Conflict Decisions (Phase 3)
|
||||
|
||||
- **RESOLVED**: ${resolved.map(r => `${r.conflict_id} → ${r.strategy_name}`).join('; ') || 'None'}
|
||||
- **CUSTOM_HANDLING**: ${conflictRes.custom_conflicts?.map(c => c.id).join(', ') || 'None'}
|
||||
- **CONSTRAINTS**: ${planningConstraints.map(c => c.content).join('; ') || 'None'}`
|
||||
})
|
||||
|
||||
// Append Phase 3 constraints to consolidated list
|
||||
if (planningConstraints.length > 0) {
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Consolidated Constraints (Phase 4 Input)',
|
||||
new: `## Consolidated Constraints (Phase 4 Input)
|
||||
${planningConstraints.map((c, i) => `${constraintCount + i + 1}. [Conflict] ${c.content}`).join('\n')}`
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Memory State Check
|
||||
|
||||
After Phase 3 completion, evaluate context window usage.
|
||||
If memory usage is high (>120K tokens):
|
||||
|
||||
```javascript
|
||||
Skill(skill="compact")
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `.workflow/active/{sessionId}/.process/conflict-resolution.json`
|
||||
- **Modified files**: brainstorm artifacts (guidance-specification.md, role analyses)
|
||||
- **Updated**: `context-package.json` with resolved conflict status
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 4: Task Generation](04-task-generation.md).
|
||||
701
.claude/skills/workflow-plan/phases/04-task-generation.md
Normal file
701
.claude/skills/workflow-plan/phases/04-task-generation.md
Normal file
@@ -0,0 +1,701 @@
|
||||
# Phase 4: Task Generation
|
||||
|
||||
Generate implementation plan documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) using action-planning-agent - produces planning artifacts, does NOT execute code implementation.
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip user questions, use defaults (no materials, Agent executor, Codex CLI tool).
|
||||
|
||||
## Core Philosophy
|
||||
|
||||
- **Planning Only**: Generate planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) - does NOT implement code
|
||||
- **Agent-Driven Document Generation**: Delegate plan generation to action-planning-agent
|
||||
- **NO Redundant Context Sorting**: Context priority sorting is ALREADY completed in context-gather Phase 2/3
|
||||
- Use `context-package.json.prioritized_context` directly
|
||||
- DO NOT re-sort files or re-compute priorities
|
||||
- `priority_tiers` and `dependency_order` are pre-computed and ready-to-use
|
||||
- **N+1 Parallel Planning**: Auto-detect multi-module projects, enable parallel planning (2+1 or 3+1 mode)
|
||||
- **Progressive Loading**: Load context incrementally (Core → Selective → On-Demand) due to analysis.md file size
|
||||
- **Memory-First**: Reuse loaded documents from conversation memory
|
||||
- **Smart Selection**: Load synthesis_output OR guidance + relevant role analyses, NOT all role analyses
|
||||
- **MCP-Enhanced**: Use MCP tools for advanced code analysis and research
|
||||
- **Path Clarity**: All `focus_paths` prefer absolute paths (e.g., `D:\\project\\src\\module`), or clear relative paths from project root (e.g., `./src/module`)
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
├─ Parse flags: --session
|
||||
└─ Validation: session_id REQUIRED
|
||||
|
||||
Phase 0: User Configuration (Interactive)
|
||||
├─ Question 1: Supplementary materials/guidelines?
|
||||
├─ Question 2: Execution method preference (Agent/CLI/Hybrid)
|
||||
├─ Question 3: CLI tool preference (if CLI selected)
|
||||
└─ Store: userConfig for agent prompt
|
||||
|
||||
Phase 1: Context Preparation & Module Detection (Command)
|
||||
├─ Assemble session paths (metadata, context package, output dirs)
|
||||
├─ Provide metadata (session_id, execution_mode, mcp_capabilities)
|
||||
├─ Auto-detect modules from context-package + directory structure
|
||||
└─ Decision:
|
||||
├─ modules.length == 1 → Single Agent Mode (Phase 2A)
|
||||
└─ modules.length >= 2 → Parallel Mode (Phase 2B + Phase 3)
|
||||
|
||||
Phase 2A: Single Agent Planning (Original Flow)
|
||||
├─ Load context package (progressive loading strategy)
|
||||
├─ Generate Task JSON Files (.task/IMPL-*.json)
|
||||
├─ Create IMPL_PLAN.md
|
||||
└─ Generate TODO_LIST.md
|
||||
|
||||
Phase 2B: N Parallel Planning (Multi-Module)
|
||||
├─ Launch N action-planning-agents simultaneously (one per module)
|
||||
├─ Each agent generates module-scoped tasks (IMPL-{prefix}{seq}.json)
|
||||
├─ Task ID format: IMPL-A1, IMPL-A2... / IMPL-B1, IMPL-B2...
|
||||
└─ Each module limited to ≤9 tasks
|
||||
|
||||
Phase 3: Integration (+1 Coordinator, Multi-Module Only)
|
||||
├─ Collect all module task JSONs
|
||||
├─ Resolve cross-module dependencies (CROSS::{module}::{pattern} → actual ID)
|
||||
├─ Generate unified IMPL_PLAN.md (grouped by module)
|
||||
└─ Generate TODO_LIST.md (hierarchical: module → tasks)
|
||||
```
|
||||
|
||||
## Document Generation Lifecycle
|
||||
|
||||
### Phase 0: User Configuration (Interactive)
|
||||
|
||||
**Purpose**: Collect user preferences before task generation to ensure generated tasks match execution expectations.
|
||||
|
||||
**Auto Mode Check**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
console.log(`[--yes] Using defaults: No materials, Agent executor, Codex CLI`)
|
||||
userConfig = {
|
||||
supplementaryMaterials: { type: "none", content: [] },
|
||||
executionMethod: "agent",
|
||||
preferredCliTool: "codex",
|
||||
enableResume: true
|
||||
}
|
||||
// Skip to Phase 1
|
||||
}
|
||||
```
|
||||
|
||||
**User Questions** (skipped if autoYes):
|
||||
```javascript
|
||||
if (!autoYes) AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Do you have supplementary materials or guidelines to include?",
|
||||
header: "Materials",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "No additional materials", description: "Use existing context only" },
|
||||
{ label: "Provide file paths", description: "I'll specify paths to include" },
|
||||
{ label: "Provide inline content", description: "I'll paste content directly" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Select execution method for generated tasks:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent (Recommended)", description: "Claude agent executes tasks directly" },
|
||||
{ label: "Hybrid", description: "Agent orchestrates, calls CLI for complex steps" },
|
||||
{ label: "CLI Only", description: "All execution via CLI tools (codex/gemini/qwen)" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "If using CLI, which tool do you prefer?",
|
||||
header: "CLI Tool",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Codex (Recommended)", description: "Best for implementation tasks" },
|
||||
{ label: "Gemini", description: "Best for analysis and large context" },
|
||||
{ label: "Qwen", description: "Alternative analysis tool" },
|
||||
{ label: "Auto", description: "Let agent decide per-task" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Handle Materials Response** (skipped if autoYes):
|
||||
```javascript
|
||||
if (!autoYes && userConfig.materials === "Provide file paths") {
|
||||
// Follow-up question for file paths
|
||||
const pathsResponse = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Enter file paths to include (comma-separated or one per line):",
|
||||
header: "Paths",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Enter paths", description: "Provide paths in text input" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
userConfig.supplementaryPaths = parseUserPaths(pathsResponse)
|
||||
}
|
||||
```
|
||||
|
||||
**Build userConfig**:
|
||||
```javascript
|
||||
const userConfig = {
|
||||
supplementaryMaterials: {
|
||||
type: "none|paths|inline",
|
||||
content: [...], // Parsed paths or inline content
|
||||
},
|
||||
executionMethod: "agent|hybrid|cli",
|
||||
preferredCliTool: "codex|gemini|qwen|auto",
|
||||
enableResume: true // Always enable resume for CLI executions
|
||||
}
|
||||
```
|
||||
|
||||
**Pass to Agent**: Include `userConfig` in agent prompt for Phase 2A/2B.
|
||||
|
||||
### Phase 1: Context Preparation & Module Detection (Command Responsibility)
|
||||
|
||||
**Command prepares session paths, metadata, detects module structure. Context priority sorting is NOT performed here - it's already completed in context-gather Phase 2/3.**
|
||||
|
||||
**Session Path Structure**:
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json # Session metadata
|
||||
├── planning-notes.md # Consolidated planning notes
|
||||
├── .process/
|
||||
│ └── context-package.json # Context package with artifact catalog
|
||||
├── .task/ # Output: Task JSON files
|
||||
│ ├── IMPL-A1.json # Multi-module: prefixed by module
|
||||
│ ├── IMPL-A2.json
|
||||
│ ├── IMPL-B1.json
|
||||
│ └── ...
|
||||
├── IMPL_PLAN.md # Output: Implementation plan (grouped by module)
|
||||
└── TODO_LIST.md # Output: TODO list (hierarchical)
|
||||
```
|
||||
|
||||
**Command Preparation**:
|
||||
1. **Assemble Session Paths** for agent prompt:
|
||||
- `session_metadata_path`
|
||||
- `context_package_path`
|
||||
- Output directory paths
|
||||
|
||||
2. **Provide Metadata** (simple values):
|
||||
- `session_id`
|
||||
- `mcp_capabilities` (available MCP tools)
|
||||
|
||||
3. **Auto Module Detection** (determines single vs parallel mode):
|
||||
```javascript
|
||||
function autoDetectModules(contextPackage, projectRoot) {
|
||||
// === Complexity Gate: Only parallelize for High complexity ===
|
||||
const complexity = contextPackage.metadata?.complexity || 'Medium';
|
||||
if (complexity !== 'High') {
|
||||
// Force single agent mode for Low/Medium complexity
|
||||
// This maximizes agent context reuse for related tasks
|
||||
return [{ name: 'main', prefix: '', paths: ['.'] }];
|
||||
}
|
||||
|
||||
// Priority 1: Explicit frontend/backend separation
|
||||
if (exists('src/frontend') && exists('src/backend')) {
|
||||
return [
|
||||
{ name: 'frontend', prefix: 'A', paths: ['src/frontend'] },
|
||||
{ name: 'backend', prefix: 'B', paths: ['src/backend'] }
|
||||
];
|
||||
}
|
||||
|
||||
// Priority 2: Monorepo structure
|
||||
if (exists('packages/*') || exists('apps/*')) {
|
||||
return detectMonorepoModules(); // Returns 2-3 main packages
|
||||
}
|
||||
|
||||
// Priority 3: Context-package dependency clustering
|
||||
const modules = clusterByDependencies(contextPackage.dependencies?.internal);
|
||||
if (modules.length >= 2) return modules.slice(0, 3);
|
||||
|
||||
// Default: Single module (original flow)
|
||||
return [{ name: 'main', prefix: '', paths: ['.'] }];
|
||||
}
|
||||
```
|
||||
|
||||
**Decision Logic**:
|
||||
- `complexity !== 'High'` → Force Phase 2A (Single Agent, maximize context reuse)
|
||||
- `modules.length == 1` → Phase 2A (Single Agent, original flow)
|
||||
- `modules.length >= 2 && complexity == 'High'` → Phase 2B + Phase 3 (N+1 Parallel)
|
||||
|
||||
**Note**: CLI tool usage is now determined semantically by action-planning-agent based on user's task description, not by flags.
|
||||
|
||||
### Phase 2A: Single Agent Planning (Original Flow)
|
||||
|
||||
**Condition**: `modules.length == 1` (no multi-module detected)
|
||||
|
||||
**Purpose**: Generate IMPL_PLAN.md, task JSONs, and TODO_LIST.md - planning documents only, NOT code implementation.
|
||||
|
||||
**Agent Invocation**:
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="action-planning-agent",
|
||||
run_in_background=false,
|
||||
description="Generate planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md)",
|
||||
prompt=`
|
||||
## TASK OBJECTIVE
|
||||
Generate implementation planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) for workflow session
|
||||
|
||||
IMPORTANT: This is PLANNING ONLY - you are generating planning documents, NOT implementing code.
|
||||
|
||||
CRITICAL: Follow the progressive loading strategy defined in agent specification (load analysis.md files incrementally due to file size)
|
||||
|
||||
## PLANNING NOTES (PHASE 1-3 CONTEXT)
|
||||
Load: .workflow/active/{session-id}/planning-notes.md
|
||||
|
||||
This document contains:
|
||||
- User Intent: Original GOAL and KEY_CONSTRAINTS from Phase 1
|
||||
- Context Findings: Critical files, architecture, and constraints from Phase 2
|
||||
- Conflict Decisions: Resolved conflicts and planning constraints from Phase 3
|
||||
- Consolidated Constraints: All constraints from all phases
|
||||
|
||||
**USAGE**: Read planning-notes.md FIRST. Use Consolidated Constraints list to guide task sequencing and dependencies.
|
||||
|
||||
## SESSION PATHS
|
||||
Input:
|
||||
- Session Metadata: .workflow/active/{session-id}/workflow-session.json
|
||||
- Planning Notes: .workflow/active/{session-id}/planning-notes.md
|
||||
- Context Package: .workflow/active/{session-id}/.process/context-package.json
|
||||
|
||||
Output:
|
||||
- Task Dir: .workflow/active/{session-id}/.task/
|
||||
- IMPL_PLAN: .workflow/active/{session-id}/IMPL_PLAN.md
|
||||
- TODO_LIST: .workflow/active/{session-id}/TODO_LIST.md
|
||||
|
||||
## CONTEXT METADATA
|
||||
Session ID: {session-id}
|
||||
MCP Capabilities: {exa_code, exa_web, code_index}
|
||||
|
||||
## USER CONFIGURATION (from Phase 0)
|
||||
Execution Method: ${userConfig.executionMethod} // agent|hybrid|cli
|
||||
Preferred CLI Tool: ${userConfig.preferredCliTool} // codex|gemini|qwen|auto
|
||||
Supplementary Materials: ${userConfig.supplementaryMaterials}
|
||||
|
||||
## EXECUTION METHOD MAPPING
|
||||
Based on userConfig.executionMethod, set task-level meta.execution_config:
|
||||
|
||||
"agent" →
|
||||
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
||||
Agent executes implementation_approach steps directly
|
||||
|
||||
"cli" →
|
||||
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
||||
Agent executes pre_analysis, then hands off full context to CLI via buildCliHandoffPrompt()
|
||||
|
||||
"hybrid" →
|
||||
Per-task decision: Analyze task complexity, set method to "agent" OR "cli" per task
|
||||
- Simple tasks (≤3 files, straightforward logic) → method: "agent"
|
||||
- Complex tasks (>3 files, complex logic, refactoring) → method: "cli"
|
||||
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
||||
|
||||
IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
||||
|
||||
## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED
|
||||
Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort.
|
||||
Direct usage:
|
||||
- **user_intent**: Use goal/scope/key_constraints for task alignment
|
||||
- **priority_tiers.critical**: These files are PRIMARY focus for task generation
|
||||
- **priority_tiers.high**: These files are SECONDARY focus
|
||||
- **dependency_order**: Use this for task sequencing - already computed
|
||||
- **sorting_rationale**: Reference for understanding priority decisions
|
||||
|
||||
## EXPLORATION CONTEXT (from context-package.exploration_results) - SUPPLEMENT ONLY
|
||||
If prioritized_context is incomplete, fall back to exploration_results:
|
||||
- Load exploration_results from context-package.json
|
||||
- Use aggregated_insights.critical_files for focus_paths generation
|
||||
- Apply aggregated_insights.constraints to acceptance criteria
|
||||
- Reference aggregated_insights.all_patterns for implementation approach
|
||||
- Use aggregated_insights.all_integration_points for precise modification locations
|
||||
- Use conflict_indicators for risk-aware task sequencing
|
||||
|
||||
## CONFLICT RESOLUTION CONTEXT (if exists)
|
||||
- Check context-package.conflict_detection.resolution_file for conflict-resolution.json path
|
||||
- If exists, load .process/conflict-resolution.json:
|
||||
- Apply planning_constraints as task constraints (for brainstorm-less workflows)
|
||||
- Reference resolved_conflicts for implementation approach alignment
|
||||
- Handle custom_conflicts with explicit task notes
|
||||
|
||||
## EXPECTED DELIVERABLES
|
||||
1. Task JSON Files (.task/IMPL-*.json)
|
||||
- 6-field schema (id, title, status, context_package_path, meta, context, flow_control)
|
||||
- Quantified requirements with explicit counts
|
||||
- Artifacts integration from context package
|
||||
- **focus_paths generated directly from prioritized_context.priority_tiers (critical + high)**
|
||||
- NO re-sorting or re-prioritization - use pre-computed tiers as-is
|
||||
- Critical files are PRIMARY focus, High files are SECONDARY
|
||||
- Flow control with pre_analysis steps (use prioritized_context.dependency_order for task sequencing)
|
||||
- **CLI Execution IDs and strategies (MANDATORY)**
|
||||
|
||||
2. Implementation Plan (IMPL_PLAN.md)
|
||||
- Context analysis and artifact references
|
||||
- Task breakdown and execution strategy
|
||||
- Complete structure per agent definition
|
||||
|
||||
3. TODO List (TODO_LIST.md)
|
||||
- Hierarchical structure (containers, pending, completed markers)
|
||||
- Links to task JSONs and summaries
|
||||
- Matches task JSON hierarchy
|
||||
|
||||
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||
Each task JSON MUST include:
|
||||
- **cli_execution_id**: Unique ID for CLI execution (format: \`{session_id}-{task_id}\`)
|
||||
- **cli_execution**: Strategy object based on depends_on:
|
||||
- No deps → \`{ "strategy": "new" }\`
|
||||
- 1 dep (single child) → \`{ "strategy": "resume", "resume_from": "parent-cli-id" }\`
|
||||
- 1 dep (multiple children) → \`{ "strategy": "fork", "resume_from": "parent-cli-id" }\`
|
||||
- N deps → \`{ "strategy": "merge_fork", "merge_from": ["id1", "id2", ...] }\`
|
||||
|
||||
**CLI Execution Strategy Rules**:
|
||||
1. **new**: Task has no dependencies - starts fresh CLI conversation
|
||||
2. **resume**: Task has 1 parent AND that parent has only this child - continues same conversation
|
||||
3. **fork**: Task has 1 parent BUT parent has multiple children - creates new branch with parent context
|
||||
4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation
|
||||
|
||||
**Execution Command Patterns**:
|
||||
- new: \`ccw cli -p "[prompt]" --tool [tool] --mode write --id [cli_execution_id]\`
|
||||
- resume: \`ccw cli -p "[prompt]" --resume [resume_from] --tool [tool] --mode write\`
|
||||
- fork: \`ccw cli -p "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write\`
|
||||
- merge_fork: \`ccw cli -p "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write\`
|
||||
|
||||
## QUALITY STANDARDS
|
||||
Hard Constraints:
|
||||
- Task count <= 18 (hard limit - request re-scope if exceeded)
|
||||
- All requirements quantified (explicit counts and enumerated lists)
|
||||
- Acceptance criteria measurable (include verification commands)
|
||||
- Artifact references mapped from context package
|
||||
- All documents follow agent-defined structure
|
||||
|
||||
## SUCCESS CRITERIA
|
||||
- All planning documents generated successfully:
|
||||
- Task JSONs valid and saved to .task/ directory
|
||||
- IMPL_PLAN.md created with complete structure
|
||||
- TODO_LIST.md generated matching task JSONs
|
||||
- Return completion status with document count and task breakdown summary
|
||||
|
||||
## PLANNING NOTES RECORD (REQUIRED)
|
||||
After completing, update planning-notes.md:
|
||||
|
||||
**File**: .workflow/active/{session_id}/planning-notes.md
|
||||
|
||||
1. **Task Generation (Phase 4)**: Task count and key tasks
|
||||
2. **N+1 Context**: Key decisions (with rationale) + deferred items
|
||||
|
||||
\`\`\`markdown
|
||||
## Task Generation (Phase 4)
|
||||
### [Action-Planning Agent] YYYY-MM-DD
|
||||
- **Tasks**: [count] ([IDs])
|
||||
|
||||
## N+1 Context
|
||||
### Decisions
|
||||
| Decision | Rationale | Revisit? |
|
||||
|----------|-----------|----------|
|
||||
| [choice] | [why] | [Yes/No] |
|
||||
|
||||
### Deferred
|
||||
- [ ] [item] - [reason]
|
||||
\`\`\`
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Phase 2B: N Parallel Planning (Multi-Module)
|
||||
|
||||
**Condition**: `modules.length >= 2` (multi-module detected)
|
||||
|
||||
**Purpose**: Launch N action-planning-agents simultaneously, one per module, for parallel task JSON generation.
|
||||
|
||||
**Note**: Phase 2B agents generate Task JSONs ONLY. IMPL_PLAN.md and TODO_LIST.md are generated by Phase 3 Coordinator.
|
||||
|
||||
**Parallel Agent Invocation**:
|
||||
```javascript
|
||||
// Launch N agents in parallel (one per module)
|
||||
const planningTasks = modules.map(module =>
|
||||
Task(
|
||||
subagent_type="action-planning-agent",
|
||||
run_in_background=false,
|
||||
description=`Generate ${module.name} module task JSONs`,
|
||||
prompt=`
|
||||
## TASK OBJECTIVE
|
||||
Generate task JSON files for ${module.name} module within workflow session
|
||||
|
||||
IMPORTANT: This is PLANNING ONLY - generate task JSONs, NOT implementing code.
|
||||
IMPORTANT: Generate Task JSONs ONLY. IMPL_PLAN.md and TODO_LIST.md by Phase 3 Coordinator.
|
||||
|
||||
CRITICAL: Follow the progressive loading strategy defined in agent specification (load analysis.md files incrementally due to file size)
|
||||
|
||||
## PLANNING NOTES (PHASE 1-3 CONTEXT)
|
||||
Load: .workflow/active/{session-id}/planning-notes.md
|
||||
|
||||
This document contains consolidated constraints and user intent to guide module-scoped task generation.
|
||||
|
||||
## MODULE SCOPE
|
||||
- Module: ${module.name} (${module.type})
|
||||
- Focus Paths: ${module.paths.join(', ')}
|
||||
- Task ID Prefix: IMPL-${module.prefix}
|
||||
- Task Limit: ≤6 tasks (hard limit for this module)
|
||||
- Other Modules: ${otherModules.join(', ')} (reference only, do NOT generate tasks for them)
|
||||
|
||||
## SESSION PATHS
|
||||
Input:
|
||||
- Session Metadata: .workflow/active/{session-id}/workflow-session.json
|
||||
- Planning Notes: .workflow/active/{session-id}/planning-notes.md
|
||||
- Context Package: .workflow/active/{session-id}/.process/context-package.json
|
||||
|
||||
Output:
|
||||
- Task Dir: .workflow/active/{session-id}/.task/
|
||||
|
||||
## CONTEXT METADATA
|
||||
Session ID: {session-id}
|
||||
MCP Capabilities: {exa_code, exa_web, code_index}
|
||||
|
||||
## USER CONFIGURATION (from Phase 0)
|
||||
Execution Method: ${userConfig.executionMethod} // agent|hybrid|cli
|
||||
Preferred CLI Tool: ${userConfig.preferredCliTool} // codex|gemini|qwen|auto
|
||||
Supplementary Materials: ${userConfig.supplementaryMaterials}
|
||||
|
||||
## EXECUTION METHOD MAPPING
|
||||
Based on userConfig.executionMethod, set task-level meta.execution_config:
|
||||
|
||||
"agent" →
|
||||
meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false }
|
||||
Agent executes implementation_approach steps directly
|
||||
|
||||
"cli" →
|
||||
meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true }
|
||||
Agent executes pre_analysis, then hands off full context to CLI via buildCliHandoffPrompt()
|
||||
|
||||
"hybrid" →
|
||||
Per-task decision: Analyze task complexity, set method to "agent" OR "cli" per task
|
||||
- Simple tasks (≤3 files, straightforward logic) → method: "agent"
|
||||
- Complex tasks (>3 files, complex logic, refactoring) → method: "cli"
|
||||
CLI tool: userConfig.preferredCliTool, enable_resume: true
|
||||
|
||||
IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only.
|
||||
|
||||
## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED
|
||||
Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort.
|
||||
Filter by module scope (${module.paths.join(', ')}):
|
||||
- **user_intent**: Use for task alignment within module
|
||||
- **priority_tiers.critical**: Filter for files in ${module.paths.join(', ')} → PRIMARY focus
|
||||
- **priority_tiers.high**: Filter for files in ${module.paths.join(', ')} → SECONDARY focus
|
||||
- **dependency_order**: Use module-relevant entries for task sequencing
|
||||
|
||||
## EXPLORATION CONTEXT (from context-package.exploration_results) - SUPPLEMENT ONLY
|
||||
If prioritized_context is incomplete for this module, fall back to exploration_results:
|
||||
- Load exploration_results from context-package.json
|
||||
- Filter for ${module.name} module: Use aggregated_insights.critical_files matching ${module.paths.join(', ')}
|
||||
- Apply module-relevant constraints from aggregated_insights.constraints
|
||||
- Reference aggregated_insights.all_patterns applicable to ${module.name}
|
||||
- Use aggregated_insights.all_integration_points for precise modification locations within module scope
|
||||
- Use conflict_indicators for risk-aware task sequencing
|
||||
|
||||
## CONFLICT RESOLUTION CONTEXT (if exists)
|
||||
- Check context-package.conflict_detection.resolution_file for conflict-resolution.json path
|
||||
- If exists, load .process/conflict-resolution.json:
|
||||
- Apply planning_constraints relevant to ${module.name} as task constraints
|
||||
- Reference resolved_conflicts affecting ${module.name} for implementation approach alignment
|
||||
- Handle custom_conflicts with explicit task notes
|
||||
|
||||
## CROSS-MODULE DEPENDENCIES
|
||||
- For dependencies ON other modules: Use placeholder depends_on: ["CROSS::{module}::{pattern}"]
|
||||
- Example: depends_on: ["CROSS::B::api-endpoint"] (this module depends on B's api-endpoint task)
|
||||
- Phase 3 Coordinator resolves to actual task IDs
|
||||
- For dependencies FROM other modules: Document in task context as "provides_for" annotation
|
||||
|
||||
## EXPECTED DELIVERABLES
|
||||
Task JSON Files (.task/IMPL-${module.prefix}*.json):
|
||||
- 6-field schema (id, title, status, context_package_path, meta, context, flow_control)
|
||||
- Task ID format: IMPL-${module.prefix}1, IMPL-${module.prefix}2, ...
|
||||
- Quantified requirements with explicit counts
|
||||
- Artifacts integration from context package (filtered for ${module.name})
|
||||
- **focus_paths generated directly from prioritized_context.priority_tiers filtered by ${module.paths.join(', ')}**
|
||||
- NO re-sorting - use pre-computed tiers filtered for this module
|
||||
- Critical files are PRIMARY focus, High files are SECONDARY
|
||||
- Flow control with pre_analysis steps (use prioritized_context.dependency_order for module task sequencing)
|
||||
- **CLI Execution IDs and strategies (MANDATORY)**
|
||||
- Focus ONLY on ${module.name} module scope
|
||||
|
||||
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||
Each task JSON MUST include:
|
||||
- **cli_execution_id**: Unique ID for CLI execution (format: \`{session_id}-IMPL-${module.prefix}{seq}\`)
|
||||
- **cli_execution**: Strategy object based on depends_on:
|
||||
- No deps → \`{ "strategy": "new" }\`
|
||||
- 1 dep (single child) → \`{ "strategy": "resume", "resume_from": "parent-cli-id" }\`
|
||||
- 1 dep (multiple children) → \`{ "strategy": "fork", "resume_from": "parent-cli-id" }\`
|
||||
- N deps → \`{ "strategy": "merge_fork", "merge_from": ["id1", "id2", ...] }\`
|
||||
- Cross-module dep → \`{ "strategy": "cross_module_fork", "resume_from": "CROSS::{module}::{pattern}" }\`
|
||||
|
||||
**CLI Execution Strategy Rules**:
|
||||
1. **new**: Task has no dependencies - starts fresh CLI conversation
|
||||
2. **resume**: Task has 1 parent AND that parent has only this child - continues same conversation
|
||||
3. **fork**: Task has 1 parent BUT parent has multiple children - creates new branch with parent context
|
||||
4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation
|
||||
5. **cross_module_fork**: Task depends on task from another module - Phase 3 resolves placeholder
|
||||
|
||||
**Execution Command Patterns**:
|
||||
- new: \`ccw cli -p "[prompt]" --tool [tool] --mode write --id [cli_execution_id]\`
|
||||
- resume: \`ccw cli -p "[prompt]" --resume [resume_from] --tool [tool] --mode write\`
|
||||
- fork: \`ccw cli -p "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write\`
|
||||
- merge_fork: \`ccw cli -p "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write\`
|
||||
- cross_module_fork: (Phase 3 resolves placeholder, then uses fork pattern)
|
||||
|
||||
## QUALITY STANDARDS
|
||||
Hard Constraints:
|
||||
- Task count <= 9 for this module (hard limit - coordinate with Phase 3 if exceeded)
|
||||
- All requirements quantified (explicit counts and enumerated lists)
|
||||
- Acceptance criteria measurable (include verification commands)
|
||||
- Artifact references mapped from context package (module-scoped filter)
|
||||
- Focus paths use absolute paths or clear relative paths from project root
|
||||
- Cross-module dependencies use CROSS:: placeholder format
|
||||
|
||||
## SUCCESS CRITERIA
|
||||
- Task JSONs saved to .task/ with IMPL-${module.prefix}* naming
|
||||
- All task JSONs include cli_execution_id and cli_execution strategy
|
||||
- Cross-module dependencies use CROSS:: placeholder format consistently
|
||||
- Focus paths scoped to ${module.paths.join(', ')} only
|
||||
- Return: task count, task IDs, dependency summary (internal + cross-module)
|
||||
|
||||
## PLANNING NOTES RECORD (REQUIRED)
|
||||
After completing, append to planning-notes.md:
|
||||
|
||||
\`\`\`markdown
|
||||
### [${module.name}] YYYY-MM-DD
|
||||
- **Tasks**: [count] ([IDs])
|
||||
- **CROSS deps**: [placeholders used]
|
||||
\`\`\`
|
||||
`
|
||||
)
|
||||
);
|
||||
|
||||
// Execute all in parallel
|
||||
await Promise.all(planningTasks);
|
||||
```
|
||||
|
||||
**Output Structure** (direct to .task/):
|
||||
```
|
||||
.task/
|
||||
├── IMPL-A1.json # Module A (e.g., frontend)
|
||||
├── IMPL-A2.json
|
||||
├── IMPL-B1.json # Module B (e.g., backend)
|
||||
├── IMPL-B2.json
|
||||
└── IMPL-C1.json # Module C (e.g., shared)
|
||||
```
|
||||
|
||||
**Task ID Naming**:
|
||||
- Format: `IMPL-{prefix}{seq}.json`
|
||||
- Prefix: A, B, C... (assigned by detection order)
|
||||
- Sequence: 1, 2, 3... (per-module increment)
|
||||
|
||||
### Phase 3: Integration (+1 Coordinator Agent, Multi-Module Only)
|
||||
|
||||
**Condition**: Only executed when `modules.length >= 2`
|
||||
|
||||
**Purpose**: Collect all module tasks, resolve cross-module dependencies, generate unified IMPL_PLAN.md and TODO_LIST.md documents.
|
||||
|
||||
**Coordinator Agent Invocation**:
|
||||
```javascript
|
||||
// Wait for all Phase 2B agents to complete
|
||||
const moduleResults = await Promise.all(planningTasks);
|
||||
|
||||
// Launch +1 Coordinator Agent
|
||||
Task(
|
||||
subagent_type="action-planning-agent",
|
||||
run_in_background=false,
|
||||
description="Integrate module tasks and generate unified documents",
|
||||
prompt=`
|
||||
## TASK OBJECTIVE
|
||||
Integrate all module task JSONs, resolve cross-module dependencies, and generate unified IMPL_PLAN.md and TODO_LIST.md
|
||||
|
||||
IMPORTANT: This is INTEGRATION ONLY - consolidate existing task JSONs, NOT creating new tasks.
|
||||
|
||||
## SESSION PATHS
|
||||
Input:
|
||||
- Session Metadata: .workflow/active/{session-id}/workflow-session.json
|
||||
- Context Package: .workflow/active/{session-id}/.process/context-package.json
|
||||
- Task JSONs: .workflow/active/{session-id}/.task/IMPL-*.json (from Phase 2B)
|
||||
Output:
|
||||
- Updated Task JSONs: .workflow/active/{session-id}/.task/IMPL-*.json (resolved dependencies)
|
||||
- IMPL_PLAN: .workflow/active/{session-id}/IMPL_PLAN.md
|
||||
- TODO_LIST: .workflow/active/{session-id}/TODO_LIST.md
|
||||
|
||||
## CONTEXT METADATA
|
||||
Session ID: {session-id}
|
||||
Modules: ${modules.map(m => m.name + '(' + m.prefix + ')').join(', ')}
|
||||
Module Count: ${modules.length}
|
||||
|
||||
## INTEGRATION STEPS
|
||||
1. Collect all .task/IMPL-*.json, group by module prefix
|
||||
2. Resolve CROSS:: dependencies → actual task IDs, update task JSONs
|
||||
3. Generate IMPL_PLAN.md (multi-module format per agent specification)
|
||||
4. Generate TODO_LIST.md (hierarchical format per agent specification)
|
||||
|
||||
## CROSS-MODULE DEPENDENCY RESOLUTION
|
||||
- Pattern: CROSS::{module}::{pattern} → IMPL-{module}* matching title/context
|
||||
- Example: CROSS::B::api-endpoint → IMPL-B1 (if B1 title contains "api-endpoint")
|
||||
- Log unresolved as warnings
|
||||
|
||||
## EXPECTED DELIVERABLES
|
||||
1. Updated Task JSONs with resolved dependency IDs
|
||||
2. IMPL_PLAN.md - multi-module format with cross-dependency section
|
||||
3. TODO_LIST.md - hierarchical by module with cross-dependency section
|
||||
|
||||
## SUCCESS CRITERIA
|
||||
- No CROSS:: placeholders remaining in task JSONs
|
||||
- IMPL_PLAN.md and TODO_LIST.md generated with multi-module structure
|
||||
- Return: task count, per-module breakdown, resolved dependency count
|
||||
|
||||
## PLANNING NOTES RECORD (REQUIRED)
|
||||
After integration, update planning-notes.md:
|
||||
|
||||
\`\`\`markdown
|
||||
### [Coordinator] YYYY-MM-DD
|
||||
- **Total**: [count] tasks
|
||||
- **Resolved**: [CROSS:: resolutions]
|
||||
|
||||
## N+1 Context
|
||||
### Decisions
|
||||
| Decision | Rationale | Revisit? |
|
||||
|----------|-----------|----------|
|
||||
| CROSS::X → IMPL-Y | [why this resolution] | [Yes/No] |
|
||||
|
||||
### Deferred
|
||||
- [ ] [unresolved CROSS or conflict] - [reason]
|
||||
\`\`\`
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Dependency Resolution Algorithm**:
|
||||
```javascript
|
||||
function resolveCrossModuleDependency(placeholder, allTasks) {
|
||||
const [, targetModule, pattern] = placeholder.match(/CROSS::(\w+)::(.+)/);
|
||||
const candidates = allTasks.filter(t =>
|
||||
t.id.startsWith(`IMPL-${targetModule}`) &&
|
||||
(t.title.toLowerCase().includes(pattern.toLowerCase()) ||
|
||||
t.context?.description?.toLowerCase().includes(pattern.toLowerCase()))
|
||||
);
|
||||
return candidates.length > 0
|
||||
? candidates.sort((a, b) => a.id.localeCompare(b.id))[0].id
|
||||
: placeholder; // Keep for manual resolution
|
||||
}
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **Files**:
|
||||
- `.workflow/active/{sessionId}/IMPL_PLAN.md`
|
||||
- `.workflow/active/{sessionId}/.task/IMPL-*.json`
|
||||
- `.workflow/active/{sessionId}/TODO_LIST.md`
|
||||
- **Updated**: `planning-notes.md` with task generation record and N+1 context
|
||||
|
||||
## Next Step
|
||||
|
||||
Return to orchestrator. Present user with action choices:
|
||||
1. Verify Plan Quality (Recommended) → `/workflow:plan-verify`
|
||||
2. Start Execution → `/workflow:execute`
|
||||
3. Review Status Only → `/workflow:status`
|
||||
319
.claude/skills/workflow-skill-designer/SKILL.md
Normal file
319
.claude/skills/workflow-skill-designer/SKILL.md
Normal file
@@ -0,0 +1,319 @@
|
||||
---
|
||||
name: workflow-skill-designer
|
||||
description: Meta-skill for designing orchestrator+phases structured workflow skills. Creates SKILL.md coordinator with progressive phase loading, TodoWrite patterns, and data flow. Triggers on "design workflow skill", "create workflow skill", "workflow skill designer".
|
||||
allowed-tools: Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
# Workflow Skill Designer
|
||||
|
||||
Meta-skill for creating structured workflow skills following the orchestrator + phases pattern. Generates complete skill packages with SKILL.md as coordinator and phases/ folder for execution details.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Workflow Skill Designer │
|
||||
│ → Analyze requirements → Design orchestrator → Generate phases │
|
||||
└───────────────┬─────────────────────────────────────────────────┘
|
||||
│
|
||||
┌───────────┼───────────┬───────────┐
|
||||
↓ ↓ ↓ ↓
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Phase 1 │ │ Phase 2 │ │ Phase 3 │ │ Phase 4 │
|
||||
│ Require │ │ Orch │ │ Phases │ │ Valid │
|
||||
│ Analysis│ │ Design │ │ Design │ │ & Integ │
|
||||
└─────────┘ └─────────┘ └─────────┘ └─────────┘
|
||||
↓ ↓ ↓ ↓
|
||||
workflow SKILL.md phases/ Complete
|
||||
config generated 0N-*.md skill pkg
|
||||
```
|
||||
|
||||
## Target Output Structure
|
||||
|
||||
The skill this meta-skill produces follows this structure:
|
||||
|
||||
```
|
||||
.claude/skills/{skill-name}/
|
||||
├── SKILL.md # Orchestrator: coordination, data flow, TodoWrite
|
||||
├── phases/
|
||||
│ ├── 01-{phase-name}.md # Phase execution detail (full content)
|
||||
│ ├── 02-{phase-name}.md
|
||||
│ ├── ...
|
||||
│ └── 0N-{phase-name}.md
|
||||
├── specs/ # [Optional] Domain specifications
|
||||
└── templates/ # [Optional] Reusable templates
|
||||
```
|
||||
|
||||
## Core Design Patterns
|
||||
|
||||
Patterns extracted from successful workflow skill implementations (workflow-plan, project-analyze, etc.):
|
||||
|
||||
### Pattern 1: Orchestrator + Progressive Loading
|
||||
|
||||
**SKILL.md** = Pure coordinator. Contains:
|
||||
- Architecture diagram (ASCII)
|
||||
- Execution flow with `Ref: phases/0N-xxx.md` markers
|
||||
- Phase Reference Documents table (read on-demand)
|
||||
- Data flow between phases
|
||||
- Core rules and error handling
|
||||
|
||||
**Phase files** = Full execution detail. Contains:
|
||||
- Complete agent prompts, bash commands, code implementations
|
||||
- Validation checklists, error handling
|
||||
- Input/Output specification
|
||||
- Next Phase link
|
||||
|
||||
**Key Rule**: SKILL.md references phase docs via `Ref:` markers. Phase docs are read **only when that phase executes**, not all at once.
|
||||
|
||||
### Pattern 2: TodoWrite Attachment/Collapse
|
||||
|
||||
```
|
||||
Phase starts:
|
||||
→ Sub-tasks ATTACHED to TodoWrite (in_progress + pending)
|
||||
→ Orchestrator executes sub-tasks sequentially
|
||||
|
||||
Phase ends:
|
||||
→ Sub-tasks COLLAPSED back to high-level summary (completed)
|
||||
→ Next phase begins
|
||||
```
|
||||
|
||||
### Pattern 3: Inter-Phase Data Flow
|
||||
|
||||
```
|
||||
Phase N output → stored in memory/variable → Phase N+1 input
|
||||
└─ or written to session file for persistence
|
||||
```
|
||||
|
||||
Each phase receives outputs from prior phases via:
|
||||
- In-memory variables (sessionId, contextPath, etc.)
|
||||
- Session directory files (.workflow/active/{sessionId}/...)
|
||||
- Planning notes (accumulated constraints document)
|
||||
|
||||
### Pattern 4: Conditional Phase Execution
|
||||
|
||||
```
|
||||
Phase N output contains condition flag
|
||||
├─ condition met → Execute Phase N+1
|
||||
└─ condition not met → Skip to Phase N+2
|
||||
```
|
||||
|
||||
### Pattern 5: Input Structuring
|
||||
|
||||
User input (free text) → Structured format before Phase 1:
|
||||
```
|
||||
GOAL: [objective]
|
||||
SCOPE: [boundaries]
|
||||
CONTEXT: [background/constraints]
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Requirements Analysis
|
||||
└─ Ref: phases/01-requirements-analysis.md
|
||||
├─ Input source: commands, descriptions, user interaction
|
||||
└─ Output: workflowConfig (phases, data flow, agents, conditions)
|
||||
|
||||
Phase 2: Orchestrator Design (SKILL.md)
|
||||
└─ Ref: phases/02-orchestrator-design.md
|
||||
├─ Input: workflowConfig
|
||||
└─ Output: .claude/skills/{name}/SKILL.md
|
||||
|
||||
Phase 3: Phase Files Design
|
||||
└─ Ref: phases/03-phase-design.md
|
||||
├─ Input: workflowConfig + source content
|
||||
└─ Output: .claude/skills/{name}/phases/0N-*.md
|
||||
|
||||
Phase 4: Validation & Integration
|
||||
└─ Ref: phases/04-validation.md
|
||||
└─ Output: Validated skill package
|
||||
```
|
||||
|
||||
**Phase Reference Documents** (read on-demand):
|
||||
|
||||
| Phase | Document | Purpose |
|
||||
|-------|----------|---------|
|
||||
| 1 | [phases/01-requirements-analysis.md](phases/01-requirements-analysis.md) | Analyze workflow requirements from various sources |
|
||||
| 2 | [phases/02-orchestrator-design.md](phases/02-orchestrator-design.md) | Generate SKILL.md with orchestration patterns |
|
||||
| 3 | [phases/03-phase-design.md](phases/03-phase-design.md) | Generate phase files preserving full execution detail |
|
||||
| 4 | [phases/04-validation.md](phases/04-validation.md) | Validate structure, references, and integration |
|
||||
|
||||
## Input Sources
|
||||
|
||||
This meta-skill accepts workflow definitions from multiple sources:
|
||||
|
||||
| Source | Description | Example |
|
||||
|--------|-------------|---------|
|
||||
| **Existing commands** | Convert `.claude/commands/` orchestrator + sub-commands | `plan.md` + `session/start.md` + `tools/*.md` |
|
||||
| **Text description** | User describes workflow in natural language | "Create a 3-phase code review workflow" |
|
||||
| **Requirements doc** | Structured requirements file | `requirements.md` with phases/agents/outputs |
|
||||
| **Existing skill** | Refactor/redesign an existing skill | Restructure a flat skill into phases |
|
||||
|
||||
## Frontmatter Conversion Rules
|
||||
|
||||
When converting from command format to skill format:
|
||||
|
||||
| Command Field | Skill Field | Transformation |
|
||||
|---------------|-------------|----------------|
|
||||
| `name` | `name` | Prefix with group: `plan` → `workflow-plan` |
|
||||
| `description` | `description` | Append trigger phrase: `Triggers on "xxx"` |
|
||||
| `argument-hint` | _(removed)_ | Arguments handled in Input Processing section |
|
||||
| `examples` | _(removed)_ | Examples moved to inline documentation |
|
||||
| `allowed-tools` | `allowed-tools` | Expand wildcards: `Skill(*)` → `Skill`, add commonly needed tools |
|
||||
| `group` | _(removed)_ | Embedded in `name` prefix |
|
||||
|
||||
## Orchestrator Content Mapping
|
||||
|
||||
What goes into SKILL.md vs what goes into phase files:
|
||||
|
||||
### SKILL.md (Coordinator)
|
||||
|
||||
| Section | Content | Source |
|
||||
|---------|---------|--------|
|
||||
| Frontmatter | name, description, allowed-tools | Command frontmatter (converted) |
|
||||
| Architecture Overview | ASCII diagram of phase flow | Derived from execution structure |
|
||||
| Key Design Principles | Coordination rules | Extracted from command coordinator role |
|
||||
| Execution Flow | Phase sequence with `Ref:` markers + Phase Reference table | Command execution process |
|
||||
| Core Rules | Orchestration constraints | Command core rules |
|
||||
| Input Processing | Structured format conversion | Command input processing |
|
||||
| Data Flow | Inter-phase data passing | Command data flow |
|
||||
| TodoWrite Pattern | Attachment/collapse lifecycle | Command TodoWrite sections |
|
||||
| Post-Phase Updates | Planning notes / state updates between phases | Command inter-phase update code |
|
||||
| Error Handling | Failure recovery | Command error handling |
|
||||
| Coordinator Checklist | Pre/post phase actions | Command coordinator checklist |
|
||||
| Related Commands | Prerequisites and follow-ups | Command related commands |
|
||||
|
||||
### Phase Files (Execution Detail)
|
||||
|
||||
| Content | Rule |
|
||||
|---------|------|
|
||||
| Full agent prompts | Preserve verbatim from source command |
|
||||
| Bash command blocks | Preserve verbatim |
|
||||
| Code implementations | Preserve verbatim |
|
||||
| Validation checklists | Preserve verbatim |
|
||||
| Error handling details | Preserve verbatim |
|
||||
| Input/Output spec | Add if not present in source |
|
||||
| Phase header | Add `# Phase N: {Name}` |
|
||||
| Objective section | Add `## Objective` with bullet points |
|
||||
| Next Phase link | Add `## Next Phase` with link to next |
|
||||
|
||||
**Critical Rule**: Phase files must be **content-faithful** to their source. Do NOT summarize, abbreviate, or simplify. The phase file IS the execution instruction - every bash command, every agent prompt, every validation step must be preserved.
|
||||
|
||||
## SKILL.md Template
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: {skill-name}
|
||||
description: {description}. Triggers on "{trigger1}", "{trigger2}".
|
||||
allowed-tools: {tools}
|
||||
---
|
||||
|
||||
# {Title}
|
||||
|
||||
{One-paragraph description of what this skill does and what it produces.}
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
{ASCII diagram showing phases and data flow}
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **{Principle}**: {Description}
|
||||
...
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: {auto-mode behavior}.
|
||||
|
||||
## Execution Flow
|
||||
|
||||
{Phase sequence with Ref: markers}
|
||||
|
||||
**Phase Reference Documents** (read on-demand when phase executes):
|
||||
|
||||
| Phase | Document | Purpose |
|
||||
|-------|----------|---------|
|
||||
| 1 | [phases/01-xxx.md](phases/01-xxx.md) | ... |
|
||||
...
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. {Rule}
|
||||
...
|
||||
|
||||
## Input Processing
|
||||
|
||||
{How user input is converted to structured format}
|
||||
|
||||
## Data Flow
|
||||
|
||||
{Inter-phase data passing diagram}
|
||||
|
||||
## TodoWrite Pattern
|
||||
|
||||
{Attachment/collapse lifecycle description with examples}
|
||||
|
||||
## Post-Phase Updates
|
||||
|
||||
{State updates between phases}
|
||||
|
||||
## Error Handling
|
||||
|
||||
{Failure recovery rules}
|
||||
|
||||
## Coordinator Checklist
|
||||
|
||||
{Pre/post phase action list}
|
||||
|
||||
## Related Commands
|
||||
|
||||
{Prerequisites and follow-ups}
|
||||
```
|
||||
|
||||
## Phase File Template
|
||||
|
||||
```markdown
|
||||
# Phase N: {Phase Name}
|
||||
|
||||
{One-sentence description of this phase's goal.}
|
||||
|
||||
## Objective
|
||||
|
||||
- {Goal 1}
|
||||
- {Goal 2}
|
||||
|
||||
## Execution
|
||||
|
||||
### Step N.1: {Step Name}
|
||||
|
||||
{Full execution detail: commands, agent prompts, code}
|
||||
|
||||
### Step N.2: {Step Name}
|
||||
|
||||
{Full execution detail}
|
||||
|
||||
## Output
|
||||
|
||||
- **Variable**: `{variableName}` (e.g., `sessionId`)
|
||||
- **File**: `{output file path}`
|
||||
- **TodoWrite**: Mark Phase N completed, Phase N+1 in_progress
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase N+1: xxx](0N+1-xxx.md).
|
||||
```
|
||||
|
||||
## Design Decision Framework
|
||||
|
||||
When designing a new workflow skill, answer these questions:
|
||||
|
||||
| Question | Impact | Example |
|
||||
|----------|--------|---------|
|
||||
| How many phases? | Directory structure | 3-7 phases typical |
|
||||
| Which phases are conditional? | Orchestrator logic | "Phase 3 only if conflict_risk >= medium" |
|
||||
| What data flows between phases? | Data Flow section | sessionId, contextPath, configFlags |
|
||||
| Which phases use agents? | Phase file complexity | Agent prompts need verbatim preservation |
|
||||
| What's the TodoWrite granularity? | TodoWrite Pattern | Some phases have sub-tasks, others are atomic |
|
||||
| Is there a planning notes pattern? | Post-Phase Updates | Accumulated state document across phases |
|
||||
| What's the error recovery? | Error Handling | Retry once then report, vs rollback |
|
||||
| Does it need auto mode? | Auto Mode section | Skip confirmations with --yes flag |
|
||||
@@ -0,0 +1,356 @@
|
||||
# Phase 1: Requirements Analysis
|
||||
|
||||
Analyze workflow requirements from various sources (commands, descriptions, requirements docs) to build a structured workflow configuration.
|
||||
|
||||
## Objective
|
||||
|
||||
- Identify all phases/steps in the workflow
|
||||
- Map data flow between phases
|
||||
- Identify agents, tools, and conditional logic
|
||||
- Detect source type and extract content accordingly
|
||||
- Produce `workflowConfig` object for subsequent phases
|
||||
|
||||
## Step 1.1: Identify Input Source
|
||||
|
||||
```javascript
|
||||
// Determine what the user provided
|
||||
const inputType = detectInputType(userInput);
|
||||
// Returns: 'command_set' | 'text_description' | 'requirements_doc' | 'existing_skill'
|
||||
```
|
||||
|
||||
### Source Type Detection
|
||||
|
||||
| Indicator | Type | Action |
|
||||
|-----------|------|--------|
|
||||
| Path to `.claude/commands/**/*.md` | `command_set` | Read orchestrator + discover sub-commands |
|
||||
| Free text describing workflow | `text_description` | Interactive requirements gathering |
|
||||
| Path to `.md` or `.json` requirements | `requirements_doc` | Parse structured requirements |
|
||||
| Path to `.claude/skills/**/*.md` | `existing_skill` | Analyze and restructure |
|
||||
|
||||
## Step 1.2: Source-Specific Analysis
|
||||
|
||||
### Mode A: Command Set Analysis
|
||||
|
||||
When source is an existing orchestrator command + sub-commands:
|
||||
|
||||
```javascript
|
||||
// Step A.1: Read orchestrator command
|
||||
const orchestratorPath = userInput; // e.g., ".claude/commands/workflow/plan.md"
|
||||
const orchestratorContent = Read(orchestratorPath);
|
||||
|
||||
// Step A.2: Extract frontmatter
|
||||
const frontmatter = extractYAMLFrontmatter(orchestratorContent);
|
||||
// Fields: name, description, argument-hint, examples, allowed-tools, group
|
||||
|
||||
// Step A.3: Discover sub-commands by scanning Skill() calls
|
||||
const skillCalls = orchestratorContent.match(/Skill\(skill="([^"]+)"/g);
|
||||
// e.g., ["workflow:session:start", "workflow:tools:context-gather", ...]
|
||||
|
||||
// Step A.4: Map Skill() calls to file paths
|
||||
// Pattern: "workflow:session:start" → ".claude/commands/workflow/session/start.md"
|
||||
// "workflow:tools:context-gather" → ".claude/commands/workflow/tools/context-gather.md"
|
||||
const subCommandPaths = skillCalls.map(call => {
|
||||
const parts = call.replace('Skill(skill="', '').replace('"', '').split(':');
|
||||
return `.claude/commands/${parts.join('/')}.md`;
|
||||
});
|
||||
|
||||
// Step A.5: Read all sub-commands
|
||||
const subCommands = [];
|
||||
for (const path of subCommandPaths) {
|
||||
const content = Read(path);
|
||||
const fm = extractYAMLFrontmatter(content);
|
||||
subCommands.push({
|
||||
path: path,
|
||||
content: content,
|
||||
frontmatter: fm,
|
||||
skillCallName: extractSkillCallName(path),
|
||||
bodyContent: removeYAMLFrontmatter(content)
|
||||
});
|
||||
}
|
||||
|
||||
// Step A.6: Identify phase ordering from orchestrator execution flow
|
||||
// Look for patterns like:
|
||||
// "Phase 1: ..." → first Skill() call
|
||||
// "Phase 2: ..." → second Skill() call
|
||||
// Conditional logic (if/else) → conditional phases
|
||||
const phaseOrder = extractPhaseOrder(orchestratorContent, skillCalls);
|
||||
```
|
||||
|
||||
**Key Extraction Points from Orchestrator**:
|
||||
|
||||
| Section | What to Extract | Maps to |
|
||||
|---------|-----------------|---------|
|
||||
| Coordinator Role / Overview | Workflow description, execution model | SKILL.md description + Architecture |
|
||||
| Core Rules | Orchestration constraints | SKILL.md Core Rules |
|
||||
| Execution Process | Phase sequence + conditions | SKILL.md Execution Flow |
|
||||
| Data Flow | Inter-phase variables | SKILL.md Data Flow |
|
||||
| TodoWrite Pattern | Attachment/collapse examples | SKILL.md TodoWrite Pattern |
|
||||
| Input Processing | Structured format rules | SKILL.md Input Processing |
|
||||
| Error Handling | Recovery strategies | SKILL.md Error Handling |
|
||||
| Coordinator Checklist | Pre/post actions | SKILL.md Coordinator Checklist |
|
||||
| Related Commands | Prerequisites/follow-ups | SKILL.md Related Commands |
|
||||
| Phase N sections | Phase-specific orchestrator instructions | SKILL.md inline (brief), Phase files (detail) |
|
||||
|
||||
**Key Extraction Points from Sub-Commands**:
|
||||
|
||||
| Section | What to Extract | Maps to |
|
||||
|---------|-----------------|---------|
|
||||
| Full body content | Complete execution detail | Phase file (preserved verbatim) |
|
||||
| Agent prompts (Task calls) | Agent delegation logic | Phase file agent sections |
|
||||
| Bash command blocks | Shell execution steps | Phase file step sections |
|
||||
| Validation/Output sections | Phase outputs | Phase file Output section |
|
||||
| Frontmatter | Tools, description | Phase file header context |
|
||||
|
||||
### Mode B: Text Description Analysis
|
||||
|
||||
When source is a natural language workflow description:
|
||||
|
||||
```javascript
|
||||
// Interactive requirements gathering
|
||||
const basicInfo = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "What is this workflow skill's name? (kebab-case)",
|
||||
header: "Name",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Custom name", description: "Enter a custom skill name" },
|
||||
{ label: "Auto-generate", description: "Generate from workflow description" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "How many main phases does this workflow have?",
|
||||
header: "Phases",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "3 phases", description: "Simple linear workflow" },
|
||||
{ label: "4 phases", description: "Standard workflow with validation" },
|
||||
{ label: "5+ phases", description: "Complex workflow with conditions" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
// For each phase, gather details
|
||||
const phases = [];
|
||||
for (let i = 0; i < phaseCount; i++) {
|
||||
const phaseInfo = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `Phase ${i+1}: What does this phase do?`,
|
||||
header: `Phase ${i+1}`,
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Session/Init", description: "Initialize session or state" },
|
||||
{ label: "Context/Gather", description: "Collect information or analyze" },
|
||||
{ label: "Process/Transform", description: "Process data or generate artifacts" },
|
||||
{ label: "Validate/Review", description: "Quality check or user review" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: `Phase ${i+1}: Does it use agents?`,
|
||||
header: "Agents",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "No agents", description: "Direct execution only" },
|
||||
{ label: "Single agent", description: "Delegates to one agent" },
|
||||
{ label: "Multiple agents", description: "Parallel or sequential agents" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
phases.push(phaseInfo);
|
||||
}
|
||||
|
||||
// Gather conditional logic
|
||||
const conditions = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Are any phases conditional (skipped based on previous results)?",
|
||||
header: "Conditions",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "No conditions", description: "All phases always execute" },
|
||||
{ label: "Has conditions", description: "Some phases execute conditionally" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
```
|
||||
|
||||
### Mode C: Requirements Document
|
||||
|
||||
When source is a structured requirements document:
|
||||
|
||||
```javascript
|
||||
// Read and parse requirements
|
||||
const reqContent = Read(requirementsPath);
|
||||
|
||||
// Extract structured fields
|
||||
// Expected format: Markdown with ## sections for each phase
|
||||
// Or JSON with phases array
|
||||
const requirements = parseRequirements(reqContent);
|
||||
```
|
||||
|
||||
### Mode D: Existing Skill Restructure
|
||||
|
||||
When source is an existing skill to refactor:
|
||||
|
||||
```javascript
|
||||
// Read existing SKILL.md
|
||||
const existingSkill = Read(skillPath);
|
||||
|
||||
// Scan for phase files
|
||||
const existingPhases = Glob(`${skillDir}/phases/*.md`);
|
||||
|
||||
// Analyze current structure for improvement
|
||||
const analysis = analyzeExistingStructure(existingSkill, existingPhases);
|
||||
```
|
||||
|
||||
## Step 1.3: Build Workflow Configuration
|
||||
|
||||
Regardless of source type, produce a unified `workflowConfig`:
|
||||
|
||||
```javascript
|
||||
const workflowConfig = {
|
||||
// Metadata
|
||||
skillName: "workflow-plan", // kebab-case
|
||||
title: "Workflow Plan", // Human-readable
|
||||
description: "5-phase planning...", // One-line description
|
||||
triggers: ["workflow:plan"], // Trigger phrases
|
||||
allowedTools: ["Task", "AskUserQuestion", "TodoWrite", "Read", "Write", "Edit", "Bash", "Glob", "Grep", "Skill"],
|
||||
|
||||
// Source information
|
||||
source: {
|
||||
type: "command_set", // input source type
|
||||
orchestratorPath: "...", // original orchestrator file
|
||||
subCommandPaths: ["..."] // original sub-command files
|
||||
},
|
||||
|
||||
// Phase definitions
|
||||
phases: [
|
||||
{
|
||||
number: 1,
|
||||
name: "Session Discovery",
|
||||
slug: "session-discovery", // for filename: 01-session-discovery.md
|
||||
description: "Create or discover workflow session",
|
||||
sourcePath: ".claude/commands/workflow/session/start.md",
|
||||
isConditional: false,
|
||||
condition: null,
|
||||
usesAgents: false,
|
||||
agentTypes: [],
|
||||
todoWriteSubTasks: [], // no sub-tasks (atomic phase)
|
||||
outputVariables: ["sessionId"],
|
||||
outputFiles: ["planning-notes.md"]
|
||||
},
|
||||
{
|
||||
number: 2,
|
||||
name: "Context Gathering",
|
||||
slug: "context-gathering",
|
||||
description: "Gather project context via agents",
|
||||
sourcePath: ".claude/commands/workflow/tools/context-gather.md",
|
||||
isConditional: false,
|
||||
condition: null,
|
||||
usesAgents: true,
|
||||
agentTypes: ["cli-explore-agent", "context-search-agent"],
|
||||
todoWriteSubTasks: [
|
||||
"Analyze codebase structure",
|
||||
"Identify integration points",
|
||||
"Generate context package"
|
||||
],
|
||||
outputVariables: ["contextPath", "conflictRisk"],
|
||||
outputFiles: ["context-package.json"]
|
||||
},
|
||||
{
|
||||
number: 3,
|
||||
name: "Conflict Resolution",
|
||||
slug: "conflict-resolution",
|
||||
description: "Detect and resolve conflicts",
|
||||
sourcePath: ".claude/commands/workflow/tools/conflict-resolution.md",
|
||||
isConditional: true,
|
||||
condition: "conflictRisk >= 'medium'",
|
||||
usesAgents: true,
|
||||
agentTypes: ["cli-execution-agent"],
|
||||
todoWriteSubTasks: [
|
||||
"Detect conflicts with CLI analysis",
|
||||
"Present conflicts to user",
|
||||
"Apply resolution strategies"
|
||||
],
|
||||
outputVariables: [],
|
||||
outputFiles: ["conflict-resolution.json"]
|
||||
},
|
||||
{
|
||||
number: 4,
|
||||
name: "Task Generation",
|
||||
slug: "task-generation",
|
||||
description: "Generate implementation plan and task JSONs",
|
||||
sourcePath: ".claude/commands/workflow/tools/task-generate-agent.md",
|
||||
isConditional: false,
|
||||
condition: null,
|
||||
usesAgents: true,
|
||||
agentTypes: ["action-planning-agent"],
|
||||
todoWriteSubTasks: [], // single agent task
|
||||
outputVariables: [],
|
||||
outputFiles: ["IMPL_PLAN.md", "IMPL-*.json", "TODO_LIST.md"]
|
||||
}
|
||||
],
|
||||
|
||||
// Data flow
|
||||
dataFlow: [
|
||||
{ from: "input", to: "phase1", variables: ["structuredDescription"] },
|
||||
{ from: "phase1", to: "phase2", variables: ["sessionId"] },
|
||||
{ from: "phase2", to: "phase3", variables: ["contextPath", "conflictRisk"] },
|
||||
{ from: "phase2", to: "phase4", variables: ["contextPath"] },
|
||||
{ from: "phase3", to: "phase4", variables: ["resolvedArtifacts"] }
|
||||
],
|
||||
|
||||
// Features
|
||||
features: {
|
||||
hasAutoMode: true, // --yes flag support
|
||||
hasConditionalPhases: true, // some phases may be skipped
|
||||
hasTodoWriteSubTasks: true, // phases expand into sub-tasks
|
||||
hasPlanningNotes: true, // accumulated state document
|
||||
hasPostPhaseUpdates: true, // state updates between phases
|
||||
hasMemoryCompaction: true, // compact after heavy phases
|
||||
hasUserDecisionGate: true // user choice after final phase
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Step 1.4: User Confirmation
|
||||
|
||||
Present the analyzed structure to the user for confirmation:
|
||||
|
||||
```javascript
|
||||
// Display summary
|
||||
console.log(`
|
||||
Workflow Analysis Complete:
|
||||
Name: ${workflowConfig.skillName}
|
||||
Phases: ${workflowConfig.phases.length}
|
||||
${workflowConfig.phases.map(p =>
|
||||
` ${p.number}. ${p.name}${p.isConditional ? ' (conditional)' : ''}${p.usesAgents ? ` [${p.agentTypes.join(', ')}]` : ''}`
|
||||
).join('\n')}
|
||||
Data Flow: ${workflowConfig.dataFlow.length} connections
|
||||
Features: ${Object.entries(workflowConfig.features).filter(([,v]) => v).map(([k]) => k).join(', ')}
|
||||
`);
|
||||
|
||||
const confirm = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Proceed with this workflow structure?",
|
||||
header: "Confirm",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Yes, proceed", description: "Generate skill with this structure" },
|
||||
{ label: "Modify phases", description: "Adjust phase count or ordering" },
|
||||
{ label: "Add features", description: "Enable additional patterns (auto mode, conditions, etc.)" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **Variable**: `workflowConfig` (structured configuration object)
|
||||
- **TodoWrite**: Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 2: Orchestrator Design](02-orchestrator-design.md).
|
||||
@@ -0,0 +1,381 @@
|
||||
# Phase 2: Orchestrator Design
|
||||
|
||||
Generate the SKILL.md orchestrator file from workflowConfig, applying all coordination patterns (progressive loading, TodoWrite, data flow, conditional execution).
|
||||
|
||||
## Objective
|
||||
|
||||
- Create `.claude/skills/{skillName}/SKILL.md` as pure coordinator
|
||||
- Apply frontmatter conversion rules
|
||||
- Generate architecture diagram from phase structure
|
||||
- Build execution flow with `Ref:` markers and phase reference table
|
||||
- Generate data flow diagram
|
||||
- Build TodoWrite attachment/collapse patterns from phase definitions
|
||||
- Include all orchestrator-level sections
|
||||
|
||||
## Step 2.1: Create Directory Structure
|
||||
|
||||
```bash
|
||||
skillDir=".claude/skills/${workflowConfig.skillName}"
|
||||
mkdir -p "${skillDir}/phases"
|
||||
|
||||
# Optional directories based on features
|
||||
# mkdir -p "${skillDir}/specs" # if has domain specifications
|
||||
# mkdir -p "${skillDir}/templates" # if has reusable templates
|
||||
```
|
||||
|
||||
## Step 2.2: Generate Frontmatter
|
||||
|
||||
```javascript
|
||||
function generateFrontmatter(config) {
|
||||
return `---
|
||||
name: ${config.skillName}
|
||||
description: ${config.description}. Triggers on ${config.triggers.map(t => `"${t}"`).join(', ')}.
|
||||
allowed-tools: ${config.allowedTools.join(', ')}
|
||||
---`;
|
||||
}
|
||||
```
|
||||
|
||||
**Conversion from command frontmatter**:
|
||||
|
||||
```javascript
|
||||
// If source is command_set, convert fields:
|
||||
function convertCommandFrontmatter(commandFm, config) {
|
||||
return {
|
||||
name: commandFm.group
|
||||
? `${commandFm.group}-${commandFm.name}` // "workflow" + "plan" → "workflow-plan"
|
||||
: commandFm.name,
|
||||
description: commandFm.description,
|
||||
// argument-hint → removed (handled in Input Processing section)
|
||||
// examples → removed (moved to inline docs)
|
||||
// group → embedded in name prefix
|
||||
allowedTools: expandToolWildcards(commandFm['allowed-tools'])
|
||||
// "Skill(*), TodoWrite(*), Read(*)" → "Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep, Skill"
|
||||
};
|
||||
}
|
||||
|
||||
// Expand tool wildcards
|
||||
function expandToolWildcards(toolsStr) {
|
||||
const expanded = toolsStr
|
||||
.replace(/Skill\(\*\)/g, 'Skill')
|
||||
.replace(/TodoWrite\(\*\)/g, 'TodoWrite')
|
||||
.replace(/Read\(\*\)/g, 'Read')
|
||||
.replace(/Bash\(\*\)/g, 'Bash')
|
||||
.replace(/Glob\(\*\)/g, 'Glob')
|
||||
.replace(/Grep\(\*\)/g, 'Grep')
|
||||
.replace(/Task\(\*\)/g, 'Task');
|
||||
|
||||
// Add commonly needed tools if not present
|
||||
const baseTools = ['Task', 'AskUserQuestion', 'TodoWrite', 'Read', 'Write', 'Edit', 'Bash', 'Glob', 'Grep'];
|
||||
const current = expanded.split(',').map(t => t.trim());
|
||||
const merged = [...new Set([...current, ...baseTools])];
|
||||
return merged;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2.3: Generate Architecture Diagram
|
||||
|
||||
```javascript
|
||||
function generateArchitectureDiagram(config) {
|
||||
const phases = config.phases;
|
||||
const maxWidth = 65;
|
||||
|
||||
let diagram = '```\n';
|
||||
diagram += '┌' + '─'.repeat(maxWidth) + '┐\n';
|
||||
diagram += `│ ${config.title} Orchestrator (SKILL.md)${' '.repeat(maxWidth - config.title.length - 30)}│\n`;
|
||||
diagram += `│ → Pure coordinator: Execute phases, parse outputs, pass context${' '.repeat(maxWidth - 64)}│\n`;
|
||||
diagram += '└' + '─'.repeat(Math.floor(maxWidth/2)) + '┬' + '─'.repeat(maxWidth - Math.floor(maxWidth/2) - 1) + '┘\n';
|
||||
|
||||
// Phase boxes
|
||||
diagram += ' │\n';
|
||||
diagram += ' ' + phases.map(() => '┌─────────┐').join(' ') + '\n';
|
||||
diagram += ' ' + phases.map((p, i) => {
|
||||
const label = `Phase ${p.number}`.padEnd(9);
|
||||
return `│${label}│`;
|
||||
}).join(' ') + '\n';
|
||||
diagram += ' ' + phases.map(p => {
|
||||
const name = p.name.substring(0, 9).padEnd(9);
|
||||
return `│${name}│`;
|
||||
}).join(' ') + '\n';
|
||||
diagram += ' ' + phases.map(() => '└─────────┘').join(' ') + '\n';
|
||||
|
||||
// Output labels
|
||||
diagram += ' ' + phases.map(p => {
|
||||
const vars = p.outputVariables.join(', ').substring(0, 11).padEnd(11);
|
||||
return ` ${vars}`;
|
||||
}).join('') + '\n';
|
||||
|
||||
diagram += '```';
|
||||
return diagram;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2.4: Generate Execution Flow
|
||||
|
||||
The execution flow uses `Ref:` markers to point to phase documents, with a Phase Reference Documents table inline.
|
||||
|
||||
```javascript
|
||||
function generateExecutionFlow(config) {
|
||||
let flow = '## Execution Flow\n\n```\n';
|
||||
flow += 'Input Parsing:\n';
|
||||
flow += ' └─ Convert user input to structured format (GOAL/SCOPE/CONTEXT)\n\n';
|
||||
|
||||
for (const phase of config.phases) {
|
||||
flow += `Phase ${phase.number}: ${phase.name}\n`;
|
||||
|
||||
if (phase.isConditional) {
|
||||
flow += ` └─ Decision (${phase.condition}):\n`;
|
||||
flow += ` ├─ condition met → Ref: phases/${String(phase.number).padStart(2, '0')}-${phase.slug}.md\n`;
|
||||
if (phase.todoWriteSubTasks.length > 0) {
|
||||
flow += ` │ ├─ Tasks attached: ${phase.todoWriteSubTasks.join(' → ')}\n`;
|
||||
}
|
||||
flow += ` │ └─ Output: ${phase.outputFiles.join(', ') || phase.outputVariables.join(', ')}\n`;
|
||||
flow += ` └─ condition not met → Skip to Phase ${phase.number + 1}\n`;
|
||||
} else {
|
||||
flow += ` └─ Ref: phases/${String(phase.number).padStart(2, '0')}-${phase.slug}.md\n`;
|
||||
if (phase.todoWriteSubTasks.length > 0) {
|
||||
flow += ` ├─ Tasks attached: ${phase.todoWriteSubTasks.join(' → ')}\n`;
|
||||
}
|
||||
flow += ` └─ Output: ${[...phase.outputVariables, ...phase.outputFiles].join(', ')}\n`;
|
||||
}
|
||||
flow += '\n';
|
||||
}
|
||||
|
||||
flow += 'Return:\n └─ Summary with recommended next steps\n';
|
||||
flow += '```\n\n';
|
||||
|
||||
// Phase Reference Documents table
|
||||
flow += '**Phase Reference Documents** (read on-demand when phase executes):\n\n';
|
||||
flow += '| Phase | Document | Purpose |\n';
|
||||
flow += '|-------|----------|---------|\n';
|
||||
for (const phase of config.phases) {
|
||||
const filename = `${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
flow += `| ${phase.number} | [phases/${filename}](phases/${filename}) | ${phase.description} |\n`;
|
||||
}
|
||||
|
||||
return flow;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2.5: Generate Data Flow Section
|
||||
|
||||
```javascript
|
||||
function generateDataFlow(config) {
|
||||
let section = '## Data Flow\n\n```\n';
|
||||
section += 'User Input (task description)\n';
|
||||
section += ' ↓\n';
|
||||
section += '[Convert to Structured Format]\n';
|
||||
|
||||
for (const phase of config.phases) {
|
||||
const inputVars = config.dataFlow
|
||||
.filter(d => d.to === `phase${phase.number}`)
|
||||
.flatMap(d => d.variables);
|
||||
const outputVars = [...phase.outputVariables, ...phase.outputFiles];
|
||||
|
||||
section += ' ↓\n';
|
||||
section += `Phase ${phase.number}: ${phase.name}\n`;
|
||||
if (inputVars.length > 0) {
|
||||
section += ` ↓ Input: ${inputVars.join(' + ')}\n`;
|
||||
}
|
||||
if (outputVars.length > 0) {
|
||||
section += ` ↓ Output: ${outputVars.join(' + ')}\n`;
|
||||
}
|
||||
if (phase.isConditional) {
|
||||
section += ` ↓ Skip if ${phase.condition} is false → proceed to Phase ${phase.number + 1}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
section += ' ↓\n';
|
||||
section += 'Return summary to user\n';
|
||||
section += '```\n';
|
||||
return section;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2.6: Generate TodoWrite Pattern
|
||||
|
||||
```javascript
|
||||
function generateTodoWritePattern(config) {
|
||||
let section = '## TodoWrite Pattern\n\n';
|
||||
section += '**Core Concept**: Dynamic task attachment and collapse for real-time visibility.\n\n';
|
||||
|
||||
section += '### Key Principles\n\n';
|
||||
section += '1. **Task Attachment** (when phase executed):\n';
|
||||
section += ' - Sub-tasks are **attached** to orchestrator\'s TodoWrite\n';
|
||||
|
||||
// Identify which phases have sub-tasks
|
||||
const phasesWithSubTasks = config.phases.filter(p => p.todoWriteSubTasks.length > 0);
|
||||
const phasesWithoutSubTasks = config.phases.filter(p => p.todoWriteSubTasks.length === 0);
|
||||
|
||||
if (phasesWithSubTasks.length > 0) {
|
||||
section += ` - **${phasesWithSubTasks.map(p => `Phase ${p.number}`).join(', ')}**: Multiple sub-tasks attached\n`;
|
||||
}
|
||||
if (phasesWithoutSubTasks.length > 0) {
|
||||
section += ` - **${phasesWithoutSubTasks.map(p => `Phase ${p.number}`).join(', ')}**: Single task (atomic)\n`;
|
||||
}
|
||||
|
||||
section += '\n2. **Task Collapse** (after sub-tasks complete):\n';
|
||||
if (phasesWithSubTasks.length > 0) {
|
||||
section += ` - **Applies to ${phasesWithSubTasks.map(p => `Phase ${p.number}`).join(', ')}**: Remove sub-tasks, collapse to summary\n`;
|
||||
}
|
||||
section += ' - Maintains clean orchestrator-level view\n';
|
||||
|
||||
section += '\n3. **Continuous Execution**: After completion, automatically proceed to next phase\n\n';
|
||||
|
||||
// Generate TodoWrite examples for phases with sub-tasks
|
||||
for (const phase of phasesWithSubTasks) {
|
||||
section += `### Phase ${phase.number} (Tasks Attached):\n`;
|
||||
section += '```json\n[\n';
|
||||
|
||||
// Previous phases completed
|
||||
for (const prev of config.phases.filter(p => p.number < phase.number)) {
|
||||
section += ` {"content": "Phase ${prev.number}: ${prev.name}", "status": "completed"},\n`;
|
||||
}
|
||||
|
||||
// Current phase in_progress with sub-tasks
|
||||
section += ` {"content": "Phase ${phase.number}: ${phase.name}", "status": "in_progress"},\n`;
|
||||
phase.todoWriteSubTasks.forEach((task, i) => {
|
||||
const status = i === 0 ? 'in_progress' : 'pending';
|
||||
section += ` {"content": " → ${task}", "status": "${status}"},\n`;
|
||||
});
|
||||
|
||||
// Remaining phases pending
|
||||
for (const next of config.phases.filter(p => p.number > phase.number && !p.isConditional)) {
|
||||
section += ` {"content": "Phase ${next.number}: ${next.name}", "status": "pending"},\n`;
|
||||
}
|
||||
|
||||
section += ']\n```\n\n';
|
||||
|
||||
// Collapsed version
|
||||
section += `### Phase ${phase.number} (Collapsed):\n`;
|
||||
section += '```json\n[\n';
|
||||
for (const p of config.phases.filter(pp => !pp.isConditional || pp.number <= phase.number)) {
|
||||
const status = p.number <= phase.number ? 'completed' : 'pending';
|
||||
section += ` {"content": "Phase ${p.number}: ${p.name}", "status": "${status}"},\n`;
|
||||
}
|
||||
section += ']\n```\n\n';
|
||||
}
|
||||
|
||||
return section;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2.7: Generate Remaining Sections
|
||||
|
||||
Extract from source orchestrator or generate from config:
|
||||
|
||||
```javascript
|
||||
function generateOrchestratorSections(config, sourceContent) {
|
||||
const sections = [];
|
||||
|
||||
// Auto Mode (if feature enabled)
|
||||
if (config.features.hasAutoMode) {
|
||||
sections.push(extractOrGenerate(sourceContent, 'Auto Mode',
|
||||
'## Auto Mode\n\nWhen `--yes` or `-y`: Auto-continue all phases, use recommended defaults.\n'));
|
||||
}
|
||||
|
||||
// Core Rules
|
||||
sections.push(extractOrGenerate(sourceContent, 'Core Rules',
|
||||
generateDefaultCoreRules(config)));
|
||||
|
||||
// Input Processing
|
||||
sections.push(extractOrGenerate(sourceContent, 'Input Processing',
|
||||
generateDefaultInputProcessing(config)));
|
||||
|
||||
// Post-Phase Updates (if feature enabled)
|
||||
if (config.features.hasPostPhaseUpdates) {
|
||||
sections.push(extractOrGenerate(sourceContent, 'Post-Phase Updates',
|
||||
generatePostPhaseUpdates(config)));
|
||||
}
|
||||
|
||||
// Error Handling
|
||||
sections.push(extractOrGenerate(sourceContent, 'Error Handling',
|
||||
generateDefaultErrorHandling()));
|
||||
|
||||
// Coordinator Checklist
|
||||
sections.push(extractOrGenerate(sourceContent, 'Coordinator Checklist',
|
||||
generateCoordinatorChecklist(config)));
|
||||
|
||||
// Related Commands
|
||||
sections.push(extractOrGenerate(sourceContent, 'Related Commands',
|
||||
generateRelatedCommands(config)));
|
||||
|
||||
return sections.join('\n\n');
|
||||
}
|
||||
|
||||
// Extract section from source if exists, otherwise generate default
|
||||
function extractOrGenerate(sourceContent, sectionName, defaultContent) {
|
||||
if (sourceContent) {
|
||||
const extracted = extractSection(sourceContent, sectionName);
|
||||
if (extracted) return extracted;
|
||||
}
|
||||
return defaultContent;
|
||||
}
|
||||
|
||||
// Default Core Rules template
|
||||
function generateDefaultCoreRules(config) {
|
||||
return `## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 execution
|
||||
2. **No Preliminary Analysis**: Do not read files or gather context before Phase 1
|
||||
3. **Parse Every Output**: Extract required data from each phase for next phase
|
||||
4. **Auto-Continue**: Check TodoList status to execute next pending phase automatically
|
||||
5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern
|
||||
6. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute
|
||||
7. **DO NOT STOP**: Continuous multi-phase workflow until all phases complete`;
|
||||
}
|
||||
|
||||
// Default Error Handling template
|
||||
function generateDefaultErrorHandling() {
|
||||
return `## Error Handling
|
||||
|
||||
- **Parsing Failure**: If output parsing fails, retry once, then report error
|
||||
- **Validation Failure**: Report which file/data is missing
|
||||
- **Command Failure**: Keep phase \`in_progress\`, report error, do not proceed`;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2.8: Assemble SKILL.md
|
||||
|
||||
```javascript
|
||||
function assembleSkillMd(config, sourceContent) {
|
||||
const parts = [
|
||||
generateFrontmatter(config),
|
||||
'',
|
||||
`# ${config.title}`,
|
||||
'',
|
||||
config.description,
|
||||
'',
|
||||
generateArchitectureDiagram(config),
|
||||
'',
|
||||
generateDesignPrinciples(config),
|
||||
'',
|
||||
generateExecutionFlow(config),
|
||||
'',
|
||||
generateDataFlow(config),
|
||||
'',
|
||||
generateTodoWritePattern(config),
|
||||
'',
|
||||
generateOrchestratorSections(config, sourceContent)
|
||||
];
|
||||
|
||||
const skillMdContent = parts.join('\n');
|
||||
Write(`${skillDir}/SKILL.md`, skillMdContent);
|
||||
}
|
||||
```
|
||||
|
||||
**Critical Quality Rules**:
|
||||
|
||||
1. SKILL.md must NOT contain full execution detail (agent prompts, bash commands)
|
||||
2. SKILL.md MUST contain `Ref:` markers pointing to phase files
|
||||
3. SKILL.md MUST contain Phase Reference Documents table
|
||||
4. Every phase mentioned in Execution Flow must have a corresponding phase file
|
||||
5. Data flow variables must be consistent across sections
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `.claude/skills/{skillName}/SKILL.md`
|
||||
- **TodoWrite**: Mark Phase 2 completed, Phase 3 in_progress
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 3: Phase Files Design](03-phase-design.md).
|
||||
356
.claude/skills/workflow-skill-designer/phases/03-phase-design.md
Normal file
356
.claude/skills/workflow-skill-designer/phases/03-phase-design.md
Normal file
@@ -0,0 +1,356 @@
|
||||
# Phase 3: Phase Files Design
|
||||
|
||||
Generate phase files in `phases/` directory, preserving full execution detail from source content. Each phase file is a complete execution instruction.
|
||||
|
||||
## Objective
|
||||
|
||||
- Create `phases/0N-{slug}.md` for each phase in workflowConfig
|
||||
- Preserve full source content (agent prompts, bash commands, code, validation)
|
||||
- Add standard phase structure (header, objective, output, next phase)
|
||||
- Handle different source types (command extraction vs new generation)
|
||||
|
||||
## Critical Rule
|
||||
|
||||
**Content Fidelity**: Phase files must be **content-faithful** to their source. Do NOT summarize, abbreviate, or simplify execution detail. The phase file IS the execution instruction.
|
||||
|
||||
| Content Type | Rule |
|
||||
|-------------|------|
|
||||
| Agent prompts (Task calls) | Preserve **verbatim** including all prompt text, variables, constraints |
|
||||
| Bash command blocks | Preserve **verbatim** including all flags, paths, error handling |
|
||||
| Code implementations | Preserve **verbatim** including all functions, validation logic |
|
||||
| Validation checklists | Preserve **verbatim** including all check items |
|
||||
| Error handling details | Preserve **verbatim** including recovery strategies |
|
||||
| Tables and specifications | Preserve **verbatim** including all rows and columns |
|
||||
| Comments and notes | Preserve **verbatim** including inline documentation |
|
||||
|
||||
**Anti-Pattern**: Creating a phase file that says "See original command for details" or "Execute the agent with appropriate parameters" - this defeats the purpose of the skill structure. The phase file must be self-contained.
|
||||
|
||||
## Step 3.1: Phase File Generation Strategy
|
||||
|
||||
```javascript
|
||||
function selectGenerationStrategy(phase, config) {
|
||||
if (config.source.type === 'command_set' && phase.sourcePath) {
|
||||
return 'extract'; // Extract from existing command file
|
||||
} else if (config.source.type === 'text_description') {
|
||||
return 'generate'; // Generate from requirements
|
||||
} else if (config.source.type === 'existing_skill') {
|
||||
return 'restructure'; // Restructure existing content
|
||||
}
|
||||
return 'generate';
|
||||
}
|
||||
```
|
||||
|
||||
## Step 3.2: Mode A - Extract from Command
|
||||
|
||||
When source is an existing command file, transform its content into phase file format:
|
||||
|
||||
```javascript
|
||||
function extractPhaseFromCommand(phase, config) {
|
||||
const sourceContent = Read(phase.sourcePath);
|
||||
const sourceFrontmatter = extractYAMLFrontmatter(sourceContent);
|
||||
const sourceBody = removeYAMLFrontmatter(sourceContent);
|
||||
|
||||
// Phase file structure:
|
||||
// 1. Phase header (new)
|
||||
// 2. Source body content (preserved verbatim)
|
||||
// 3. Output section (extracted or added)
|
||||
// 4. Next Phase link (new)
|
||||
|
||||
let phaseContent = '';
|
||||
|
||||
// 1. Phase header
|
||||
phaseContent += `# Phase ${phase.number}: ${phase.name}\n\n`;
|
||||
phaseContent += `${phase.description}.\n\n`;
|
||||
|
||||
// 2. Source body content - PRESERVED VERBATIM
|
||||
// Only modifications:
|
||||
// a. Remove original H1 title (replaced by phase header)
|
||||
// b. Remove command-specific frontmatter references
|
||||
// c. Preserve everything else as-is
|
||||
|
||||
// Remove original H1 title line(s)
|
||||
let bodyContent = sourceBody;
|
||||
bodyContent = bodyContent.replace(/^# .+\n+/, '');
|
||||
|
||||
// Remove command-specific overview if it just restates what the phase header says
|
||||
// But KEEP any overview content that adds execution detail
|
||||
|
||||
phaseContent += bodyContent;
|
||||
|
||||
// 3. Ensure Output section exists
|
||||
if (!bodyContent.includes('## Output')) {
|
||||
phaseContent += '\n## Output\n\n';
|
||||
if (phase.outputVariables.length > 0) {
|
||||
phaseContent += phase.outputVariables.map(v => `- **Variable**: \`${v}\``).join('\n') + '\n';
|
||||
}
|
||||
if (phase.outputFiles.length > 0) {
|
||||
phaseContent += phase.outputFiles.map(f => `- **File**: \`${f}\``).join('\n') + '\n';
|
||||
}
|
||||
phaseContent += `- **TodoWrite**: Mark Phase ${phase.number} completed, Phase ${phase.number + 1} in_progress\n`;
|
||||
}
|
||||
|
||||
// 4. Ensure Next Phase link exists
|
||||
if (!bodyContent.includes('## Next Phase')) {
|
||||
const nextPhase = config.phases.find(p => p.number === phase.number + 1);
|
||||
if (nextPhase) {
|
||||
const nextFilename = `${String(nextPhase.number).padStart(2, '0')}-${nextPhase.slug}.md`;
|
||||
phaseContent += `\n## Next Phase\n\n`;
|
||||
phaseContent += `Return to orchestrator, then auto-continue to [Phase ${nextPhase.number}: ${nextPhase.name}](${nextFilename}).\n`;
|
||||
}
|
||||
}
|
||||
|
||||
return phaseContent;
|
||||
}
|
||||
```
|
||||
|
||||
### Content Preservation Checklist
|
||||
|
||||
When extracting from commands, verify these content types are preserved:
|
||||
|
||||
```javascript
|
||||
function verifyContentPreservation(sourceContent, phaseContent) {
|
||||
const checks = {
|
||||
// Count code blocks
|
||||
sourceCodeBlocks: (sourceContent.match(/```/g) || []).length / 2,
|
||||
phaseCodeBlocks: (phaseContent.match(/```/g) || []).length / 2,
|
||||
|
||||
// Count Task/Agent calls
|
||||
sourceAgentCalls: (sourceContent.match(/Task\(/g) || []).length,
|
||||
phaseAgentCalls: (phaseContent.match(/Task\(/g) || []).length,
|
||||
|
||||
// Count bash commands
|
||||
sourceBashBlocks: (sourceContent.match(/```bash/g) || []).length,
|
||||
phaseBashBlocks: (phaseContent.match(/```bash/g) || []).length,
|
||||
|
||||
// Count tables
|
||||
sourceTables: (sourceContent.match(/\|.*\|.*\|/g) || []).length,
|
||||
phaseTables: (phaseContent.match(/\|.*\|.*\|/g) || []).length,
|
||||
|
||||
// Count AskUserQuestion calls
|
||||
sourceAUQ: (sourceContent.match(/AskUserQuestion/g) || []).length,
|
||||
phaseAUQ: (phaseContent.match(/AskUserQuestion/g) || []).length,
|
||||
|
||||
// Line count comparison (phase should be >= source minus frontmatter)
|
||||
sourceLines: sourceContent.split('\n').length,
|
||||
phaseLines: phaseContent.split('\n').length
|
||||
};
|
||||
|
||||
const issues = [];
|
||||
if (checks.phaseCodeBlocks < checks.sourceCodeBlocks) {
|
||||
issues.push(`Missing code blocks: source=${checks.sourceCodeBlocks}, phase=${checks.phaseCodeBlocks}`);
|
||||
}
|
||||
if (checks.phaseAgentCalls < checks.sourceAgentCalls) {
|
||||
issues.push(`Missing agent calls: source=${checks.sourceAgentCalls}, phase=${checks.phaseAgentCalls}`);
|
||||
}
|
||||
if (checks.phaseBashBlocks < checks.sourceBashBlocks) {
|
||||
issues.push(`Missing bash blocks: source=${checks.sourceBashBlocks}, phase=${checks.phaseBashBlocks}`);
|
||||
}
|
||||
if (checks.phaseTables < checks.sourceTables * 0.8) {
|
||||
issues.push(`Missing tables: source=${checks.sourceTables}, phase=${checks.phaseTables}`);
|
||||
}
|
||||
if (checks.phaseAUQ < checks.sourceAUQ) {
|
||||
issues.push(`Missing AskUserQuestion: source=${checks.sourceAUQ}, phase=${checks.phaseAUQ}`);
|
||||
}
|
||||
|
||||
return { checks, issues, passed: issues.length === 0 };
|
||||
}
|
||||
```
|
||||
|
||||
### Handling Orchestrator-Level Content in Source Commands
|
||||
|
||||
Some commands mix orchestrator-level instructions (coordination, TodoWrite) with execution detail. Separation rules:
|
||||
|
||||
| Content in Source Command | Goes To | Rule |
|
||||
|---------------------------|---------|------|
|
||||
| Phase execution steps, agent prompts, bash commands | **Phase file** | Preserve verbatim |
|
||||
| TodoWrite update examples specific to this phase | **Phase file** (optional) | Keep if useful for context |
|
||||
| Inter-phase data passing code | **SKILL.md** Post-Phase Updates | Extract to orchestrator |
|
||||
| Coordinator instructions ("after this phase, auto-continue") | **SKILL.md** Core Rules | Extract to orchestrator |
|
||||
| Conditional logic ("if conflict_risk >= medium") | **SKILL.md** Execution Flow | Extract to orchestrator |
|
||||
|
||||
When in doubt, **keep content in the phase file**. It's better to have slight overlap than to lose execution detail.
|
||||
|
||||
## Step 3.3: Mode B - Generate from Requirements
|
||||
|
||||
When source is a text description, generate phase files interactively:
|
||||
|
||||
```javascript
|
||||
function generatePhaseFromRequirements(phase, config) {
|
||||
let phaseContent = '';
|
||||
|
||||
// Phase header
|
||||
phaseContent += `# Phase ${phase.number}: ${phase.name}\n\n`;
|
||||
phaseContent += `${phase.description}.\n\n`;
|
||||
|
||||
// Objective
|
||||
phaseContent += `## Objective\n\n`;
|
||||
phaseContent += `- ${phase.description}\n`;
|
||||
if (phase.outputVariables.length > 0) {
|
||||
phaseContent += `- Produce: ${phase.outputVariables.join(', ')}\n`;
|
||||
}
|
||||
if (phase.outputFiles.length > 0) {
|
||||
phaseContent += `- Generate: ${phase.outputFiles.join(', ')}\n`;
|
||||
}
|
||||
phaseContent += '\n';
|
||||
|
||||
// Execution steps
|
||||
phaseContent += `## Execution\n\n`;
|
||||
|
||||
if (phase.usesAgents) {
|
||||
// Generate agent delegation skeleton
|
||||
for (const agentType of phase.agentTypes) {
|
||||
phaseContent += `### Step: ${agentType} Delegation\n\n`;
|
||||
phaseContent += '```javascript\n';
|
||||
phaseContent += `const result = Task({\n`;
|
||||
phaseContent += ` subagent_type: "${mapAgentType(agentType)}",\n`;
|
||||
phaseContent += ` prompt: \`\n`;
|
||||
phaseContent += ` [ROLE] ${agentType}\n`;
|
||||
phaseContent += ` [TASK] ${phase.description}\n`;
|
||||
phaseContent += ` [INPUT] \${inputData}\n`;
|
||||
phaseContent += ` [OUTPUT] \${outputPath}\n`;
|
||||
phaseContent += ` \`,\n`;
|
||||
phaseContent += ` run_in_background: false\n`;
|
||||
phaseContent += `});\n`;
|
||||
phaseContent += '```\n\n';
|
||||
}
|
||||
} else {
|
||||
// Generate direct execution skeleton
|
||||
phaseContent += `### Step ${phase.number}.1: Execute\n\n`;
|
||||
phaseContent += `TODO: Add execution detail for ${phase.name}\n\n`;
|
||||
}
|
||||
|
||||
// Output
|
||||
phaseContent += `## Output\n\n`;
|
||||
phase.outputVariables.forEach(v => {
|
||||
phaseContent += `- **Variable**: \`${v}\`\n`;
|
||||
});
|
||||
phase.outputFiles.forEach(f => {
|
||||
phaseContent += `- **File**: \`${f}\`\n`;
|
||||
});
|
||||
phaseContent += `- **TodoWrite**: Mark Phase ${phase.number} completed\n\n`;
|
||||
|
||||
// Next Phase
|
||||
const nextPhase = config.phases.find(p => p.number === phase.number + 1);
|
||||
if (nextPhase) {
|
||||
const nextFilename = `${String(nextPhase.number).padStart(2, '0')}-${nextPhase.slug}.md`;
|
||||
phaseContent += `## Next Phase\n\n`;
|
||||
phaseContent += `Return to orchestrator, then auto-continue to [Phase ${nextPhase.number}: ${nextPhase.name}](${nextFilename}).\n`;
|
||||
}
|
||||
|
||||
return phaseContent;
|
||||
}
|
||||
|
||||
// Map custom agent type names to Task subagent_types
|
||||
function mapAgentType(agentType) {
|
||||
const mapping = {
|
||||
'cli-explore-agent': 'cli-explore-agent',
|
||||
'context-search-agent': 'context-search-agent',
|
||||
'cli-execution-agent': 'cli-execution-agent',
|
||||
'action-planning-agent': 'action-planning-agent',
|
||||
'code-developer': 'code-developer',
|
||||
'test-fix-agent': 'test-fix-agent',
|
||||
'general-purpose': 'general-purpose',
|
||||
'Explore': 'Explore'
|
||||
};
|
||||
return mapping[agentType] || 'general-purpose';
|
||||
}
|
||||
```
|
||||
|
||||
## Step 3.4: Write Phase Files
|
||||
|
||||
```javascript
|
||||
function writePhaseFiles(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
|
||||
for (const phase of config.phases) {
|
||||
const filename = `${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
const filepath = `${skillDir}/phases/${filename}`;
|
||||
|
||||
const strategy = selectGenerationStrategy(phase, config);
|
||||
let content;
|
||||
|
||||
switch (strategy) {
|
||||
case 'extract':
|
||||
content = extractPhaseFromCommand(phase, config);
|
||||
// Verify content preservation
|
||||
const sourceContent = Read(phase.sourcePath);
|
||||
const verification = verifyContentPreservation(sourceContent, content);
|
||||
if (!verification.passed) {
|
||||
console.warn(`⚠️ Content preservation issues for Phase ${phase.number}:`);
|
||||
verification.issues.forEach(issue => console.warn(` - ${issue}`));
|
||||
// Re-extract with more aggressive preservation
|
||||
content = extractPhaseFromCommand(phase, config, { aggressive: true });
|
||||
}
|
||||
break;
|
||||
|
||||
case 'generate':
|
||||
content = generatePhaseFromRequirements(phase, config);
|
||||
break;
|
||||
|
||||
case 'restructure':
|
||||
content = restructureExistingPhase(phase, config);
|
||||
break;
|
||||
}
|
||||
|
||||
Write(filepath, content);
|
||||
console.log(`✓ Generated: ${filepath} (${content.split('\n').length} lines)`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Step 3.5: Cross-Phase Consistency Check
|
||||
|
||||
After generating all phase files, verify cross-phase consistency:
|
||||
|
||||
```javascript
|
||||
function checkCrossPhaseConsistency(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
const issues = [];
|
||||
|
||||
for (const phase of config.phases) {
|
||||
const filename = `${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
const content = Read(`${skillDir}/phases/${filename}`);
|
||||
|
||||
// Check: Next Phase links point to correct file
|
||||
const nextPhaseMatch = content.match(/\[Phase (\d+): (.+?)\]\((.+?)\)/);
|
||||
if (nextPhaseMatch) {
|
||||
const nextNum = parseInt(nextPhaseMatch[1]);
|
||||
const nextPhase = config.phases.find(p => p.number === nextNum);
|
||||
if (!nextPhase) {
|
||||
issues.push(`Phase ${phase.number}: Next Phase link points to non-existent Phase ${nextNum}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check: Output variables match config
|
||||
for (const varName of phase.outputVariables) {
|
||||
if (!content.includes(varName)) {
|
||||
issues.push(`Phase ${phase.number}: Output variable '${varName}' not mentioned in content`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
```
|
||||
|
||||
## Size Comparison Reference
|
||||
|
||||
Expected phase file sizes relative to their source commands:
|
||||
|
||||
| Scenario | Phase File Size vs Source | Reason |
|
||||
|----------|--------------------------|--------|
|
||||
| Command extraction | ≥ 90% of source | Minor removals (H1 title, frontmatter) |
|
||||
| New generation (with agents) | 50-200 lines | Agent prompt skeletons |
|
||||
| New generation (direct) | 30-80 lines | Step skeletons |
|
||||
| Restructure | ~100% of source | Content reorganization only |
|
||||
|
||||
**Red Flag**: If a phase file is significantly smaller than its source (< 70%), content was likely lost during extraction. Re-check with `verifyContentPreservation()`.
|
||||
|
||||
## Output
|
||||
|
||||
- **Files**: `.claude/skills/{skillName}/phases/0N-{slug}.md` for each phase
|
||||
- **TodoWrite**: Mark Phase 3 completed, Phase 4 in_progress
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 4: Validation & Integration](04-validation.md).
|
||||
397
.claude/skills/workflow-skill-designer/phases/04-validation.md
Normal file
397
.claude/skills/workflow-skill-designer/phases/04-validation.md
Normal file
@@ -0,0 +1,397 @@
|
||||
# Phase 4: Validation & Integration
|
||||
|
||||
Validate the generated skill package for structural completeness, reference integrity, and content quality. Produce a validation report and integration summary.
|
||||
|
||||
## Objective
|
||||
|
||||
- Verify all required files exist
|
||||
- Validate SKILL.md references match actual phase files
|
||||
- Check content preservation (for command extraction source)
|
||||
- Verify cross-phase data flow consistency
|
||||
- Report validation results to user
|
||||
|
||||
## Step 4.1: Structural Validation
|
||||
|
||||
```javascript
|
||||
function validateStructure(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
const results = { errors: [], warnings: [], info: [] };
|
||||
|
||||
// Check SKILL.md exists
|
||||
const skillMdExists = fileExists(`${skillDir}/SKILL.md`);
|
||||
if (!skillMdExists) {
|
||||
results.errors.push('SKILL.md not found');
|
||||
}
|
||||
|
||||
// Check all phase files exist
|
||||
for (const phase of config.phases) {
|
||||
const filename = `${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
const filepath = `${skillDir}/phases/${filename}`;
|
||||
if (!fileExists(filepath)) {
|
||||
results.errors.push(`Phase file missing: ${filepath}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check SKILL.md frontmatter
|
||||
if (skillMdExists) {
|
||||
const skillMd = Read(`${skillDir}/SKILL.md`);
|
||||
const fm = extractYAMLFrontmatter(skillMd);
|
||||
|
||||
if (!fm.name) results.errors.push('Frontmatter missing: name');
|
||||
if (!fm.description) results.errors.push('Frontmatter missing: description');
|
||||
if (!fm['allowed-tools']) results.errors.push('Frontmatter missing: allowed-tools');
|
||||
|
||||
// Check description has trigger phrase
|
||||
if (fm.description && !fm.description.includes('Triggers on')) {
|
||||
results.warnings.push('Description missing trigger phrase (Triggers on "...")');
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.2: Reference Integrity
|
||||
|
||||
```javascript
|
||||
function validateReferences(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
const results = { errors: [], warnings: [], info: [] };
|
||||
const skillMd = Read(`${skillDir}/SKILL.md`);
|
||||
|
||||
// Extract all Ref: markers from SKILL.md
|
||||
const refMarkers = skillMd.match(/Ref: phases\/\S+\.md/g) || [];
|
||||
const linkedFiles = skillMd.match(/\[phases\/\S+\.md\]\(phases\/\S+\.md\)/g) || [];
|
||||
|
||||
// Collect all referenced phase files
|
||||
const referencedFiles = new Set();
|
||||
for (const ref of refMarkers) {
|
||||
referencedFiles.add(ref.replace('Ref: ', ''));
|
||||
}
|
||||
for (const link of linkedFiles) {
|
||||
const match = link.match(/\(phases\/\S+\.md\)/);
|
||||
if (match) referencedFiles.add(match[0].replace(/[()]/g, ''));
|
||||
}
|
||||
|
||||
// Check each referenced file exists
|
||||
for (const refFile of referencedFiles) {
|
||||
if (!fileExists(`${skillDir}/${refFile}`)) {
|
||||
results.errors.push(`Referenced file not found: ${refFile}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check each phase file is referenced in SKILL.md
|
||||
for (const phase of config.phases) {
|
||||
const filename = `phases/${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
if (!referencedFiles.has(filename)) {
|
||||
results.warnings.push(`Phase file not referenced in SKILL.md: ${filename}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check Phase Reference Documents table exists
|
||||
if (!skillMd.includes('Phase Reference Documents')) {
|
||||
results.errors.push('SKILL.md missing Phase Reference Documents table');
|
||||
}
|
||||
|
||||
// Check Phase Reference Documents table has entries for all phases
|
||||
for (const phase of config.phases) {
|
||||
const filename = `${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
if (!skillMd.includes(filename)) {
|
||||
results.errors.push(`Phase Reference table missing entry for: ${filename}`);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.3: Content Quality (Command Extraction Only)
|
||||
|
||||
```javascript
|
||||
function validateContentQuality(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
const results = { errors: [], warnings: [], info: [] };
|
||||
|
||||
if (config.source.type !== 'command_set') {
|
||||
results.info.push('Content quality check skipped (not command extraction)');
|
||||
return results;
|
||||
}
|
||||
|
||||
for (const phase of config.phases) {
|
||||
if (!phase.sourcePath) continue;
|
||||
|
||||
const sourceContent = Read(phase.sourcePath);
|
||||
const sourceBody = removeYAMLFrontmatter(sourceContent);
|
||||
const filename = `${String(phase.number).padStart(2, '0')}-${phase.slug}.md`;
|
||||
const phaseContent = Read(`${skillDir}/phases/${filename}`);
|
||||
|
||||
// Line count comparison
|
||||
const sourceLines = sourceBody.split('\n').length;
|
||||
const phaseLines = phaseContent.split('\n').length;
|
||||
const ratio = phaseLines / sourceLines;
|
||||
|
||||
if (ratio < 0.7) {
|
||||
results.errors.push(
|
||||
`Phase ${phase.number} content loss: source=${sourceLines} lines, phase=${phaseLines} lines (${Math.round(ratio * 100)}%)`
|
||||
);
|
||||
} else if (ratio < 0.9) {
|
||||
results.warnings.push(
|
||||
`Phase ${phase.number} possible content reduction: source=${sourceLines}, phase=${phaseLines} (${Math.round(ratio * 100)}%)`
|
||||
);
|
||||
} else {
|
||||
results.info.push(
|
||||
`Phase ${phase.number} content preserved: source=${sourceLines}, phase=${phaseLines} (${Math.round(ratio * 100)}%)`
|
||||
);
|
||||
}
|
||||
|
||||
// Code block count comparison
|
||||
const sourceBlocks = (sourceBody.match(/```/g) || []).length / 2;
|
||||
const phaseBlocks = (phaseContent.match(/```/g) || []).length / 2;
|
||||
if (phaseBlocks < sourceBlocks) {
|
||||
results.warnings.push(
|
||||
`Phase ${phase.number} missing code blocks: source=${sourceBlocks}, phase=${phaseBlocks}`
|
||||
);
|
||||
}
|
||||
|
||||
// Agent prompt preservation
|
||||
const sourceAgents = (sourceBody.match(/Task\(|subagent_type/g) || []).length;
|
||||
const phaseAgents = (phaseContent.match(/Task\(|subagent_type/g) || []).length;
|
||||
if (phaseAgents < sourceAgents) {
|
||||
results.errors.push(
|
||||
`Phase ${phase.number} missing agent calls: source=${sourceAgents}, phase=${phaseAgents}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.4: Data Flow Consistency
|
||||
|
||||
```javascript
|
||||
function validateDataFlow(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
const results = { errors: [], warnings: [], info: [] };
|
||||
const skillMd = Read(`${skillDir}/SKILL.md`);
|
||||
|
||||
// Check all data flow variables are mentioned in SKILL.md
|
||||
for (const flow of config.dataFlow) {
|
||||
for (const variable of flow.variables) {
|
||||
if (!skillMd.includes(variable)) {
|
||||
results.warnings.push(
|
||||
`Data flow variable '${variable}' (${flow.from} → ${flow.to}) not found in SKILL.md`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check conditional phases have their condition in SKILL.md
|
||||
for (const phase of config.phases) {
|
||||
if (phase.isConditional && phase.condition) {
|
||||
// Extract the key variable from condition
|
||||
const condVar = phase.condition.match(/\w+/)?.[0];
|
||||
if (condVar && !skillMd.includes(condVar)) {
|
||||
results.errors.push(
|
||||
`Conditional Phase ${phase.number} condition variable '${condVar}' not found in SKILL.md`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.5: SKILL.md Section Completeness
|
||||
|
||||
```javascript
|
||||
function validateSkillMdSections(config) {
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
const results = { errors: [], warnings: [], info: [] };
|
||||
const skillMd = Read(`${skillDir}/SKILL.md`);
|
||||
|
||||
// Required sections
|
||||
const requiredSections = [
|
||||
{ name: 'Architecture Overview', pattern: /## Architecture Overview/ },
|
||||
{ name: 'Execution Flow', pattern: /## Execution Flow/ },
|
||||
{ name: 'Core Rules', pattern: /## Core Rules/ },
|
||||
{ name: 'Data Flow', pattern: /## Data Flow/ },
|
||||
{ name: 'Error Handling', pattern: /## Error Handling/ }
|
||||
];
|
||||
|
||||
// Recommended sections
|
||||
const recommendedSections = [
|
||||
{ name: 'Key Design Principles', pattern: /## Key Design Principles/ },
|
||||
{ name: 'Input Processing', pattern: /## Input Processing/ },
|
||||
{ name: 'TodoWrite Pattern', pattern: /## TodoWrite Pattern/ },
|
||||
{ name: 'Coordinator Checklist', pattern: /## Coordinator Checklist/ },
|
||||
{ name: 'Related Commands', pattern: /## Related Commands/ }
|
||||
];
|
||||
|
||||
// Conditional sections
|
||||
const conditionalSections = [
|
||||
{ name: 'Auto Mode', pattern: /## Auto Mode/, condition: config.features.hasAutoMode },
|
||||
{ name: 'Post-Phase Updates', pattern: /## Post-Phase Updates/, condition: config.features.hasPostPhaseUpdates }
|
||||
];
|
||||
|
||||
for (const section of requiredSections) {
|
||||
if (!section.pattern.test(skillMd)) {
|
||||
results.errors.push(`Missing required section: ${section.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
for (const section of recommendedSections) {
|
||||
if (!section.pattern.test(skillMd)) {
|
||||
results.warnings.push(`Missing recommended section: ${section.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
for (const section of conditionalSections) {
|
||||
if (section.condition && !section.pattern.test(skillMd)) {
|
||||
results.warnings.push(`Missing conditional section: ${section.name} (feature enabled but section absent)`);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.6: Aggregate Results and Report
|
||||
|
||||
```javascript
|
||||
function generateValidationReport(config) {
|
||||
const structural = validateStructure(config);
|
||||
const references = validateReferences(config);
|
||||
const content = validateContentQuality(config);
|
||||
const dataFlow = validateDataFlow(config);
|
||||
const sections = validateSkillMdSections(config);
|
||||
|
||||
// Aggregate
|
||||
const allErrors = [
|
||||
...structural.errors,
|
||||
...references.errors,
|
||||
...content.errors,
|
||||
...dataFlow.errors,
|
||||
...sections.errors
|
||||
];
|
||||
const allWarnings = [
|
||||
...structural.warnings,
|
||||
...references.warnings,
|
||||
...content.warnings,
|
||||
...dataFlow.warnings,
|
||||
...sections.warnings
|
||||
];
|
||||
const allInfo = [
|
||||
...structural.info,
|
||||
...references.info,
|
||||
...content.info,
|
||||
...dataFlow.info,
|
||||
...sections.info
|
||||
];
|
||||
|
||||
// Quality gate
|
||||
const gate = allErrors.length === 0 ? 'PASS' :
|
||||
allErrors.length <= 2 ? 'REVIEW' : 'FAIL';
|
||||
|
||||
// Display report
|
||||
const skillDir = `.claude/skills/${config.skillName}`;
|
||||
|
||||
console.log(`
|
||||
╔══════════════════════════════════════╗
|
||||
║ Workflow Skill Validation Report ║
|
||||
╠══════════════════════════════════════╣
|
||||
║ Skill: ${config.skillName.padEnd(28)}║
|
||||
║ Gate: ${gate.padEnd(28)}║
|
||||
╚══════════════════════════════════════╝
|
||||
|
||||
Structure:
|
||||
${skillDir}/
|
||||
├── SKILL.md ${fileExists(`${skillDir}/SKILL.md`) ? '✓' : '✗'}
|
||||
└── phases/
|
||||
${config.phases.map(p => {
|
||||
const fn = `${String(p.number).padStart(2, '0')}-${p.slug}.md`;
|
||||
return ` ├── ${fn.padEnd(30)} ${fileExists(`${skillDir}/phases/${fn}`) ? '✓' : '✗'}`;
|
||||
}).join('\n')}
|
||||
|
||||
${allErrors.length > 0 ? `Errors (${allErrors.length}):\n${allErrors.map(e => ` ✗ ${e}`).join('\n')}` : 'Errors: None ✓'}
|
||||
|
||||
${allWarnings.length > 0 ? `Warnings (${allWarnings.length}):\n${allWarnings.map(w => ` ⚠ ${w}`).join('\n')}` : 'Warnings: None ✓'}
|
||||
|
||||
${allInfo.length > 0 ? `Info:\n${allInfo.map(i => ` ℹ ${i}`).join('\n')}` : ''}
|
||||
`);
|
||||
|
||||
return { gate, errors: allErrors, warnings: allWarnings, info: allInfo };
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.7: Error Recovery
|
||||
|
||||
If validation fails, offer recovery options:
|
||||
|
||||
```javascript
|
||||
if (report.gate === 'FAIL') {
|
||||
const recovery = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Validation found ${report.errors.length} errors. How to proceed?`,
|
||||
header: "Recovery",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Auto-fix", description: "Attempt automatic fixes for common issues" },
|
||||
{ label: "Regenerate phases", description: "Re-run Phase 3 with stricter preservation" },
|
||||
{ label: "Accept as-is", description: "Proceed despite errors (manual fix later)" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
if (recovery === 'Auto-fix') {
|
||||
// Common auto-fixes:
|
||||
// 1. Missing Next Phase links → add them
|
||||
// 2. Missing Output sections → add from config
|
||||
// 3. Missing Phase Reference table → generate from config
|
||||
autoFixCommonIssues(config, report.errors);
|
||||
// Re-validate
|
||||
return generateValidationReport(config);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4.8: Integration Summary
|
||||
|
||||
```javascript
|
||||
function displayIntegrationSummary(config) {
|
||||
console.log(`
|
||||
Integration Complete:
|
||||
Location: .claude/skills/${config.skillName}/
|
||||
Files: ${config.phases.length + 1} (SKILL.md + ${config.phases.length} phases)
|
||||
|
||||
Usage:
|
||||
Trigger: ${config.triggers.map(t => `"${t}"`).join(', ')}
|
||||
Auto: /${config.triggers[0]} --yes "task description"
|
||||
|
||||
Design Patterns Applied:
|
||||
✓ Progressive phase loading (Ref: markers)
|
||||
✓ Phase Reference Documents table
|
||||
${config.features.hasTodoWriteSubTasks ? '✓' : '○'} TodoWrite attachment/collapse
|
||||
${config.features.hasConditionalPhases ? '✓' : '○'} Conditional phase execution
|
||||
${config.features.hasAutoMode ? '✓' : '○'} Auto mode (--yes flag)
|
||||
${config.features.hasPostPhaseUpdates ? '✓' : '○'} Post-phase state updates
|
||||
${config.features.hasPlanningNotes ? '✓' : '○'} Accumulated planning notes
|
||||
|
||||
Next Steps:
|
||||
1. Review SKILL.md orchestrator logic
|
||||
2. Review each phase file for completeness
|
||||
3. Test skill invocation: /${config.triggers[0]} "test task"
|
||||
4. Iterate based on execution results
|
||||
`);
|
||||
}
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **Report**: Validation results with quality gate (PASS/REVIEW/FAIL)
|
||||
- **TodoWrite**: Mark Phase 4 completed (all tasks done)
|
||||
|
||||
## Completion
|
||||
|
||||
Workflow Skill Designer has completed. The generated skill package is ready at `.claude/skills/{skillName}/`.
|
||||
@@ -1,102 +0,0 @@
|
||||
/**
|
||||
* JSON Detection Result
|
||||
*/
|
||||
export interface JsonDetectionResult {
|
||||
isJson: boolean;
|
||||
parsed?: Record<string, unknown>;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect if a line contains JSON data
|
||||
* Supports multiple formats:
|
||||
* - Direct JSON: {...} or [...]
|
||||
* - Tool Call: [Tool] toolName({...})
|
||||
* - Tool Result: [Tool Result] status: {...}
|
||||
* - Embedded JSON: trailing JSON object
|
||||
* - Code block JSON: ```json ... ```
|
||||
*
|
||||
* @param content - The content line to detect JSON in
|
||||
* @returns Detection result with parsed data if valid JSON found
|
||||
*/
|
||||
export function detectJsonInLine(content: string): JsonDetectionResult {
|
||||
const trimmed = content.trim();
|
||||
|
||||
// 1. Direct JSON object or array
|
||||
if (trimmed.startsWith('{') || trimmed.startsWith('[')) {
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed);
|
||||
return { isJson: true, parsed: parsed as Record<string, unknown> };
|
||||
} catch {
|
||||
// Continue to other patterns
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Tool Call format: [Tool] toolName({...})
|
||||
const toolCallMatch = trimmed.match(/^\[Tool\]\s+(\w+)\((.*)\)$/);
|
||||
if (toolCallMatch) {
|
||||
const [, toolName, paramsStr] = toolCallMatch;
|
||||
let parameters: unknown;
|
||||
|
||||
try {
|
||||
parameters = paramsStr ? JSON.parse(paramsStr) : {};
|
||||
} catch {
|
||||
parameters = paramsStr || null;
|
||||
}
|
||||
|
||||
return {
|
||||
isJson: true,
|
||||
parsed: {
|
||||
action: 'invoke',
|
||||
toolName,
|
||||
parameters,
|
||||
} as Record<string, unknown>,
|
||||
};
|
||||
}
|
||||
|
||||
// 3. Tool Result format: [Tool Result] status: output
|
||||
const toolResultMatch = trimmed.match(/^\[Tool Result\]\s+(.+?)\s*:\s*(.+)$/);
|
||||
if (toolResultMatch) {
|
||||
const [, status, outputStr] = toolResultMatch;
|
||||
let output: unknown;
|
||||
|
||||
try {
|
||||
output = outputStr.startsWith('{') ? JSON.parse(outputStr) : outputStr;
|
||||
} catch {
|
||||
output = outputStr;
|
||||
}
|
||||
|
||||
return {
|
||||
isJson: true,
|
||||
parsed: {
|
||||
action: 'result',
|
||||
status,
|
||||
output,
|
||||
} as Record<string, unknown>,
|
||||
};
|
||||
}
|
||||
|
||||
// 4. Embedded JSON at end of line
|
||||
const embeddedJsonMatch = trimmed.match(/\{.*\}$/);
|
||||
if (embeddedJsonMatch) {
|
||||
try {
|
||||
const parsed = JSON.parse(embeddedJsonMatch[0]);
|
||||
return { isJson: true, parsed: parsed as Record<string, unknown> };
|
||||
} catch {
|
||||
// Not valid JSON
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Code block JSON
|
||||
const codeBlockMatch = trimmed.match(/```(?:json)?\s*\n([\s\S]*?)\n```/);
|
||||
if (codeBlockMatch) {
|
||||
try {
|
||||
const parsed = JSON.parse(codeBlockMatch[1]);
|
||||
return { isJson: true, parsed: parsed as Record<string, unknown> };
|
||||
} catch {
|
||||
return { isJson: false, error: 'Invalid JSON in code block' };
|
||||
}
|
||||
}
|
||||
|
||||
return { isJson: false };
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"status": "failed",
|
||||
"failedTests": []
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,329 +0,0 @@
|
||||
/**
|
||||
* CCW Loop System - Simplified Flow State Test
|
||||
* Tests the complete Loop system flow with mock endpoints
|
||||
*/
|
||||
|
||||
import { writeFile, readFile, existsSync, mkdirSync, unlinkSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { homedir } from 'os';
|
||||
|
||||
// Test configuration
|
||||
const TEST_WORKSPACE = join(process.cwd(), '.test-loop-workspace');
|
||||
const TEST_STATE_DIR = join(TEST_WORKSPACE, '.workflow');
|
||||
const TEST_TASKS_DIR = join(TEST_WORKSPACE, '.task');
|
||||
|
||||
// Test results
|
||||
const results: { name: string; passed: boolean; error?: string }[] = [];
|
||||
|
||||
function log(msg: string) { console.log(msg); }
|
||||
function assert(condition: boolean, message: string) {
|
||||
if (!condition) {
|
||||
throw new Error(`Assertion failed: ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup test workspace
|
||||
*/
|
||||
function setup() {
|
||||
log('🔧 Setting up test workspace...');
|
||||
|
||||
if (!existsSync(TEST_STATE_DIR)) mkdirSync(TEST_STATE_DIR, { recursive: true });
|
||||
if (!existsSync(TEST_TASKS_DIR)) mkdirSync(TEST_TASKS_DIR, { recursive: true });
|
||||
|
||||
// Create test task
|
||||
const testTask = {
|
||||
id: 'TEST-LOOP-1',
|
||||
title: 'Test Loop',
|
||||
status: 'active',
|
||||
loop_control: {
|
||||
enabled: true,
|
||||
max_iterations: 3,
|
||||
success_condition: 'state_variables.test_result === "pass"',
|
||||
error_policy: { on_failure: 'pause' },
|
||||
cli_sequence: [
|
||||
{ step_id: 'run_test', tool: 'bash', command: 'npm test' },
|
||||
{ step_id: 'analyze', tool: 'gemini', mode: 'analysis', prompt_template: 'Analyze: [run_test_stdout]' }
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
writeFile(join(TEST_TASKS_DIR, 'TEST-LOOP-1.json'), JSON.stringify(testTask, null, 2), (err) => {
|
||||
if (err) throw err;
|
||||
});
|
||||
|
||||
log('✅ Test workspace ready');
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup
|
||||
*/
|
||||
function cleanup() {
|
||||
try {
|
||||
if (existsSync(join(TEST_STATE_DIR, 'loop-state.json'))) {
|
||||
unlinkSync(join(TEST_STATE_DIR, 'loop-state.json'));
|
||||
}
|
||||
log('🧹 Cleaned up');
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test runner
|
||||
*/
|
||||
async function runTest(name: string, fn: () => Promise<void> | void) {
|
||||
process.stdout.write(` ○ ${name}... `);
|
||||
try {
|
||||
await fn();
|
||||
results.push({ name, passed: true });
|
||||
log('✓');
|
||||
} catch (error) {
|
||||
results.push({ name, passed: false, error: (error as Error).message });
|
||||
log(`✗ ${(error as Error).message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create initial state
|
||||
*/
|
||||
function createInitialState() {
|
||||
const state = {
|
||||
loop_id: 'loop-TEST-LOOP-1-' + Date.now(),
|
||||
task_id: 'TEST-LOOP-1',
|
||||
status: 'created',
|
||||
current_iteration: 0,
|
||||
max_iterations: 3,
|
||||
current_cli_step: 0,
|
||||
cli_sequence: [
|
||||
{ step_id: 'run_test', tool: 'bash', command: 'npm test' },
|
||||
{ step_id: 'analyze', tool: 'gemini', mode: 'analysis', prompt_template: 'Analyze: [run_test_stdout]' }
|
||||
],
|
||||
session_mapping: {},
|
||||
state_variables: {},
|
||||
error_policy: { on_failure: 'pause', max_retries: 3 },
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString()
|
||||
};
|
||||
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), (err) => {
|
||||
if (err) throw err;
|
||||
});
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run all tests
|
||||
*/
|
||||
async function runAllTests() {
|
||||
log('\n🧪 CCW LOOP SYSTEM - FLOW STATE TEST');
|
||||
log('='.repeat(50));
|
||||
|
||||
setup();
|
||||
|
||||
// Test 1: State Creation
|
||||
log('\n📋 State Creation Tests:');
|
||||
await runTest('Initial state is "created"', async () => {
|
||||
const state = createInitialState();
|
||||
assert(state.status === 'created', 'status should be created');
|
||||
assert(state.current_iteration === 0, 'iteration should be 0');
|
||||
});
|
||||
|
||||
// Test 2: State Transitions
|
||||
log('\n📋 State Transition Tests:');
|
||||
await runTest('created -> running', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'running';
|
||||
state.updated_at = new Date().toISOString();
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.status === 'running', 'status should be running');
|
||||
});
|
||||
|
||||
await runTest('running -> paused', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'paused';
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.status === 'paused', 'status should be paused');
|
||||
});
|
||||
|
||||
await runTest('paused -> running', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'running';
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.status === 'running', 'status should be running');
|
||||
});
|
||||
|
||||
await runTest('running -> completed', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'completed';
|
||||
state.completed_at = new Date().toISOString();
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.status === 'completed', 'status should be completed');
|
||||
assert(updated.completed_at, 'should have completed_at');
|
||||
});
|
||||
|
||||
// Test 3: Iteration Control
|
||||
log('\n📋 Iteration Control Tests:');
|
||||
await runTest('Iteration increments', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'running';
|
||||
state.current_iteration = 1;
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.current_iteration === 1, 'iteration should increment');
|
||||
});
|
||||
|
||||
await runTest('Max iterations respected', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.current_iteration = 3;
|
||||
state.max_iterations = 3;
|
||||
state.status = 'completed';
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.current_iteration <= updated.max_iterations, 'should not exceed max');
|
||||
});
|
||||
|
||||
// Test 4: CLI Step Control
|
||||
log('\n📋 CLI Step Control Tests:');
|
||||
await runTest('Step index increments', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.current_cli_step = 1;
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.current_cli_step === 1, 'step should increment');
|
||||
});
|
||||
|
||||
await runTest('Step resets on new iteration', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.current_iteration = 2;
|
||||
state.current_cli_step = 0;
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.current_cli_step === 0, 'step should reset');
|
||||
});
|
||||
|
||||
// Test 5: Variable Substitution
|
||||
log('\n📋 Variable Substitution Tests:');
|
||||
await runTest('Variables are stored', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.state_variables = { test_result: 'pass', output: 'Success!' };
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.state_variables.test_result === 'pass', 'variable should be stored');
|
||||
});
|
||||
|
||||
await runTest('Template substitution works', async () => {
|
||||
const template = 'Result: [test_result]';
|
||||
const vars = { test_result: 'pass' };
|
||||
const result = template.replace(/\[(\w+)\]/g, (_, key) => vars[key as keyof typeof vars] || `[${key}]`);
|
||||
assert(result === 'Result: pass', 'substitution should work');
|
||||
});
|
||||
|
||||
// Test 6: Success Condition
|
||||
log('\n📋 Success Condition Tests:');
|
||||
await runTest('Simple condition passes', async () => {
|
||||
const condition = 'state_variables.test_result === "pass"';
|
||||
const vars = { test_result: 'pass' };
|
||||
// Simulate evaluation
|
||||
const pass = vars.test_result === 'pass';
|
||||
assert(pass === true, 'condition should pass');
|
||||
});
|
||||
|
||||
await runTest('Complex condition with regex', async () => {
|
||||
const output = 'Average: 35ms, Min: 28ms';
|
||||
const match = output.match(/Average: ([\d.]+)ms/);
|
||||
const avg = parseFloat(match?.[1] || '1000');
|
||||
const pass = avg < 50;
|
||||
assert(pass === true, 'complex condition should pass');
|
||||
});
|
||||
|
||||
// Test 7: Error Handling
|
||||
log('\n📋 Error Handling Tests:');
|
||||
await runTest('pause policy on error', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'paused';
|
||||
state.failure_reason = 'Test failed';
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.status === 'paused', 'should pause on error');
|
||||
assert(updated.failure_reason, 'should have failure reason');
|
||||
});
|
||||
|
||||
await runTest('fail_fast policy', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.status = 'failed';
|
||||
state.failure_reason = 'Critical error';
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.status === 'failed', 'should fail immediately');
|
||||
});
|
||||
|
||||
// Test 8: Execution History
|
||||
log('\n📋 Execution History Tests:');
|
||||
await runTest('History records are stored', async () => {
|
||||
const state = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
state.execution_history = [
|
||||
{
|
||||
iteration: 1,
|
||||
step_index: 0,
|
||||
step_id: 'run_test',
|
||||
tool: 'bash',
|
||||
started_at: new Date().toISOString(),
|
||||
completed_at: new Date().toISOString(),
|
||||
duration_ms: 100,
|
||||
success: true,
|
||||
exit_code: 0,
|
||||
stdout: 'Tests passed',
|
||||
stderr: ''
|
||||
}
|
||||
];
|
||||
writeFile(join(TEST_STATE_DIR, 'loop-state.json'), JSON.stringify(state, null, 2), () => {});
|
||||
|
||||
const updated = JSON.parse(readFileSync(join(TEST_STATE_DIR, 'loop-state.json'), 'utf-8'));
|
||||
assert(updated.execution_history?.length === 1, 'should have history');
|
||||
});
|
||||
|
||||
// Summary
|
||||
log('\n' + '='.repeat(50));
|
||||
log('📊 TEST SUMMARY');
|
||||
const passed = results.filter(r => r.passed).length;
|
||||
const failed = results.filter(r => !r.passed).length;
|
||||
log(` Total: ${results.length}`);
|
||||
log(` Passed: ${passed} ✓`);
|
||||
log(` Failed: ${failed} ✗`);
|
||||
|
||||
if (failed > 0) {
|
||||
log('\n❌ Failed:');
|
||||
results.filter(r => !r.passed).forEach(r => {
|
||||
log(` - ${r.name}: ${r.error}`);
|
||||
});
|
||||
}
|
||||
|
||||
cleanup();
|
||||
|
||||
return failed === 0 ? 0 : 1;
|
||||
}
|
||||
|
||||
// Run tests
|
||||
runAllTests().then(exitCode => {
|
||||
process.exit(exitCode);
|
||||
}).catch(err => {
|
||||
console.error('Test error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -1,565 +0,0 @@
|
||||
/**
|
||||
* CCW Loop System - Standalone Flow State Test
|
||||
* Tests Loop system without requiring server to be running
|
||||
*/
|
||||
|
||||
import { writeFileSync, readFileSync, existsSync, mkdirSync, unlinkSync, readdirSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
|
||||
// ANSI colors
|
||||
const colors = {
|
||||
reset: '\x1b[0m',
|
||||
green: '\x1b[32m',
|
||||
red: '\x1b[31m',
|
||||
yellow: '\x1b[33m',
|
||||
blue: '\x1b[34m',
|
||||
cyan: '\x1b[36m'
|
||||
};
|
||||
|
||||
function log(color: string, msg: string) {
|
||||
console.log(`${color}${msg}${colors.reset}`);
|
||||
}
|
||||
|
||||
function assert(condition: boolean, message: string) {
|
||||
if (!condition) {
|
||||
throw new Error(`Assertion failed: ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Test workspace
|
||||
const TEST_WORKSPACE = join(process.cwd(), '.test-loop-workspace');
|
||||
const TEST_STATE_DIR = join(TEST_WORKSPACE, '.workflow');
|
||||
const TEST_STATE_FILE = join(TEST_STATE_DIR, 'loop-state.json');
|
||||
|
||||
// Test results
|
||||
interface TestResult {
|
||||
name: string;
|
||||
passed: boolean;
|
||||
error?: string;
|
||||
duration?: number;
|
||||
}
|
||||
const results: TestResult[] = = [];
|
||||
|
||||
/**
|
||||
* Setup test workspace
|
||||
*/
|
||||
function setupTestWorkspace() {
|
||||
log(colors.blue, '🔧 Setting up test workspace...');
|
||||
|
||||
// Clean and create directories
|
||||
if (existsSync(TEST_WORKSPACE)) {
|
||||
const files = readdirSync(TEST_WORKSPACE);
|
||||
files.forEach(f => {
|
||||
const fullPath = join(TEST_WORKSPACE, f);
|
||||
unlinkSync(fullPath);
|
||||
});
|
||||
}
|
||||
|
||||
if (!existsSync(TEST_STATE_DIR)) {
|
||||
mkdirSync(TEST_STATE_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
log(colors.green, '✅ Test workspace ready');
|
||||
}
|
||||
|
||||
/**
|
||||
* Create initial loop state
|
||||
*/
|
||||
function createInitialState(taskId: string = 'TEST-LOOP-1') {
|
||||
const loopId = `loop-${taskId}-${Date.now()}`;
|
||||
const state = {
|
||||
loop_id: loopId,
|
||||
task_id: taskId,
|
||||
status: 'created',
|
||||
current_iteration: 0,
|
||||
max_iterations: 5,
|
||||
current_cli_step: 0,
|
||||
cli_sequence: [
|
||||
{ step_id: 'run_tests', tool: 'bash', command: 'npm test' },
|
||||
{ step_id: 'analyze_failure', tool: 'gemini', mode: 'analysis', prompt_template: 'Analyze: [run_tests_stdout]' },
|
||||
{ step_id: 'apply_fix', tool: 'codex', mode: 'write', prompt_template: 'Fix: [analyze_failure_stdout]' }
|
||||
],
|
||||
session_mapping: {},
|
||||
state_variables: {},
|
||||
error_policy: { on_failure: 'pause', max_retries: 3 },
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString()
|
||||
};
|
||||
|
||||
writeFileSync(TEST_STATE_FILE, JSON.stringify(state, null, 2));
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read current state
|
||||
*/
|
||||
function readState() {
|
||||
return JSON.parse(readFileSync(TEST_STATE_FILE, 'utf-8'));
|
||||
}
|
||||
|
||||
/**
|
||||
* Write state
|
||||
*/
|
||||
function writeState(state: any) {
|
||||
state.updated_at = new Date().toISOString();
|
||||
writeFileSync(TEST_STATE_FILE, JSON.stringify(state, null, 2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a single test
|
||||
*/
|
||||
async function runTest(name: string, fn: () => void | Promise<void>) {
|
||||
const start = Date.now();
|
||||
process.stdout.write(` ○ ${name}... `);
|
||||
|
||||
try {
|
||||
await fn();
|
||||
const duration = Date.now() - start;
|
||||
results.push({ name, passed: true, duration });
|
||||
log(colors.green, `✓ (${duration}ms)`);
|
||||
} catch (error) {
|
||||
const duration = Date.now() - start;
|
||||
results.push({ name, passed: false, error: (error as Error).message, duration });
|
||||
log(colors.red, `✗ ${(error as Error).message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main test runner
|
||||
*/
|
||||
async function runAllTests() {
|
||||
log(colors.cyan, '\n' + '='.repeat(55));
|
||||
log(colors.cyan, '🧪 CCW LOOP SYSTEM - STANDALONE FLOW STATE TEST');
|
||||
log(colors.cyan, '='.repeat(55));
|
||||
|
||||
setupTestWorkspace();
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 1: STATE CREATION
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 1: STATE CREATION');
|
||||
|
||||
await runTest('Initial state has correct structure', () => {
|
||||
const state = createInitialState();
|
||||
assert(state.loop_id.startsWith('loop-'), 'loop_id should start with "loop-"');
|
||||
assert(state.status === 'created', 'status should be "created"');
|
||||
assert(state.current_iteration === 0, 'iteration should be 0');
|
||||
assert(state.current_cli_step === 0, 'cli_step should be 0');
|
||||
assert(state.cli_sequence.length === 3, 'should have 3 cli steps');
|
||||
assert(Object.keys(state.state_variables).length === 0, 'variables should be empty');
|
||||
});
|
||||
|
||||
await runTest('Timestamps are valid ISO strings', () => {
|
||||
const state = createInitialState();
|
||||
assert(!isNaN(Date.parse(state.created_at)), 'created_at should be valid date');
|
||||
assert(!isNaN(Date.parse(state.updated_at)), 'updated_at should be valid date');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 2: STATE TRANSITIONS
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 2: STATE TRANSITIONS');
|
||||
|
||||
await runTest('created -> running', () => {
|
||||
const state = readState();
|
||||
state.status = 'running';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'running', 'status should be running');
|
||||
});
|
||||
|
||||
await runTest('running -> paused', () => {
|
||||
const state = readState();
|
||||
state.status = 'paused';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'paused', 'status should be paused');
|
||||
});
|
||||
|
||||
await runTest('paused -> running (resume)', () => {
|
||||
const state = readState();
|
||||
state.status = 'running';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'running', 'status should be running');
|
||||
});
|
||||
|
||||
await runTest('running -> completed', () => {
|
||||
const state = readState();
|
||||
state.status = 'completed';
|
||||
state.completed_at = new Date().toISOString();
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'completed', 'status should be completed');
|
||||
assert(updated.completed_at, 'should have completed_at timestamp');
|
||||
});
|
||||
|
||||
await runTest('running -> failed with reason', () => {
|
||||
// Create new state for this test
|
||||
createInitialState('TEST-FAIL-1');
|
||||
const state = readState();
|
||||
state.status = 'failed';
|
||||
state.failure_reason = 'Max retries exceeded';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'failed', 'status should be failed');
|
||||
assert(updated.failure_reason === 'Max retries exceeded', 'should have failure reason');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 3: ITERATION CONTROL
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 3: ITERATION CONTROL');
|
||||
|
||||
createInitialState('TEST-ITER-1');
|
||||
|
||||
await runTest('Iteration increments', () => {
|
||||
const state = readState();
|
||||
state.current_iteration = 1;
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.current_iteration === 1, 'iteration should increment');
|
||||
});
|
||||
|
||||
await runTest('Iteration respects max_iterations', () => {
|
||||
const state = readState();
|
||||
state.current_iteration = 5;
|
||||
state.max_iterations = 5;
|
||||
state.status = 'completed';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.current_iteration <= updated.max_iterations, 'cannot exceed max iterations');
|
||||
});
|
||||
|
||||
await runTest('CLI step increments within iteration', () => {
|
||||
const state = readState();
|
||||
state.current_cli_step = 1;
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.current_cli_step === 1, 'cli_step should increment');
|
||||
});
|
||||
|
||||
await runTest('CLI step resets on new iteration', () => {
|
||||
const state = readState();
|
||||
state.current_iteration = 2;
|
||||
state.current_cli_step = 0;
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.current_iteration === 2, 'iteration should be 2');
|
||||
assert(updated.current_cli_step === 0, 'cli_step should reset to 0');
|
||||
});
|
||||
|
||||
await runTest('CLI step cannot exceed sequence length', () => {
|
||||
const state = readState();
|
||||
state.current_cli_step = state.cli_sequence.length - 1;
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.current_cli_step < updated.cli_sequence.length, 'cli_step must be within bounds');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 4: VARIABLE SUBSTITUTION
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 4: VARIABLE SUBSTITUTION');
|
||||
|
||||
createInitialState('TEST-VAR-1');
|
||||
|
||||
await runTest('Variables are stored after step execution', () => {
|
||||
const state = readState();
|
||||
state.state_variables = {
|
||||
run_tests_stdout: 'Tests: 15 passed',
|
||||
run_tests_stderr: '',
|
||||
run_tests_exit_code: '0'
|
||||
};
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.state_variables.run_tests_stdout === 'Tests: 15 passed', 'variable should be stored');
|
||||
});
|
||||
|
||||
await runTest('Simple template substitution works', () => {
|
||||
const template = 'Result: [run_tests_stdout]';
|
||||
const vars = { run_tests_stdout: 'Tests: 15 passed' };
|
||||
const result = template.replace(/\[(\w+)\]/g, (_, key) => vars[key as keyof typeof vars] || `[${key}]`);
|
||||
|
||||
assert(result === 'Result: Tests: 15 passed', 'substitution should work');
|
||||
});
|
||||
|
||||
await runTest('Multiple variable substitution', () => {
|
||||
const template = 'Stdout: [run_tests_stdout]\nStderr: [run_tests_stderr]';
|
||||
const vars = {
|
||||
run_tests_stdout: 'Tests passed',
|
||||
run_tests_stderr: 'No errors'
|
||||
};
|
||||
const result = template.replace(/\[(\w+)\]/g, (_, key) => vars[key as keyof typeof vars] || `[${key}]`);
|
||||
|
||||
assert(result.includes('Tests passed'), 'should substitute first variable');
|
||||
assert(result.includes('No errors'), 'should substitute second variable');
|
||||
});
|
||||
|
||||
await runTest('Missing variable preserves placeholder', () => {
|
||||
const template = 'Result: [missing_var]';
|
||||
const vars = {};
|
||||
const result = template.replace(/\[(\w+)\]/g, (_, key) => vars[key as keyof typeof vars] || `[${key}]`);
|
||||
|
||||
assert(result === 'Result: [missing_var]', 'missing var should preserve placeholder');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 5: SUCCESS CONDITION EVALUATION
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 5: SUCCESS CONDITIONS');
|
||||
|
||||
createInitialState('TEST-SUCCESS-1');
|
||||
|
||||
await runTest('Simple string equality check', () => {
|
||||
const state = readState();
|
||||
state.state_variables = { test_result: 'pass' };
|
||||
const success = state.state_variables.test_result === 'pass';
|
||||
|
||||
assert(success === true, 'simple equality should work');
|
||||
});
|
||||
|
||||
await runTest('String includes check', () => {
|
||||
const output = 'Tests: 15 passed, 0 failed';
|
||||
const success = output.includes('15 passed');
|
||||
|
||||
assert(success === true, 'includes check should work');
|
||||
});
|
||||
|
||||
await runTest('Regex extraction and comparison', () => {
|
||||
const output = 'Average: 35ms, Min: 28ms, Max: 42ms';
|
||||
const match = output.match(/Average: ([\d.]+)ms/);
|
||||
const avgTime = parseFloat(match?.[1] || '1000');
|
||||
const success = avgTime < 50;
|
||||
|
||||
assert(avgTime === 35, 'regex should extract number');
|
||||
assert(success === true, 'comparison should work');
|
||||
});
|
||||
|
||||
await runTest('Combined AND condition', () => {
|
||||
const vars = { test_result: 'pass', coverage: '90%' };
|
||||
const success = vars.test_result === 'pass' && parseInt(vars.coverage) > 80;
|
||||
|
||||
assert(success === true, 'AND condition should work');
|
||||
});
|
||||
|
||||
await runTest('Combined OR condition', () => {
|
||||
const output = 'Status: approved';
|
||||
const success = output.includes('approved') || output.includes('LGTM');
|
||||
|
||||
assert(success === true, 'OR condition should work');
|
||||
});
|
||||
|
||||
await runTest('Negation condition', () => {
|
||||
const output = 'Tests: 15 passed, 0 failed';
|
||||
const success = !output.includes('failed');
|
||||
|
||||
assert(success === true, 'negation should work');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 6: ERROR HANDLING POLICIES
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 6: ERROR HANDLING');
|
||||
|
||||
createInitialState('TEST-ERROR-1');
|
||||
|
||||
await runTest('pause policy stops loop on error', () => {
|
||||
const state = readState();
|
||||
state.error_policy = { on_failure: 'pause', max_retries: 3 };
|
||||
state.status = 'paused';
|
||||
state.failure_reason = 'Step failed with exit code 1';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'paused', 'should be paused');
|
||||
assert(updated.failure_reason, 'should have failure reason');
|
||||
});
|
||||
|
||||
await runTest('fail_fast policy immediately fails loop', () => {
|
||||
createInitialState('TEST-ERROR-2');
|
||||
const state = readState();
|
||||
state.error_policy = { on_failure: 'fail_fast', max_retries: 0 };
|
||||
state.status = 'failed';
|
||||
state.failure_reason = 'Critical error';
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.status === 'failed', 'should be failed');
|
||||
});
|
||||
|
||||
await runTest('continue policy allows proceeding', () => {
|
||||
createInitialState('TEST-ERROR-3');
|
||||
const state = readState();
|
||||
state.error_policy = { on_failure: 'continue', max_retries: 3 };
|
||||
// Simulate continuing to next step despite error
|
||||
state.current_cli_step = 1;
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.current_cli_step === 1, 'should move to next step');
|
||||
assert(updated.status === 'running', 'should still be running');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 7: EXECUTION HISTORY
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 7: EXECUTION HISTORY');
|
||||
|
||||
createInitialState('TEST-HISTORY-1');
|
||||
|
||||
await runTest('Execution record is created', () => {
|
||||
const state = readState();
|
||||
const now = new Date().toISOString();
|
||||
state.execution_history = [
|
||||
{
|
||||
iteration: 1,
|
||||
step_index: 0,
|
||||
step_id: 'run_tests',
|
||||
tool: 'bash',
|
||||
started_at: now,
|
||||
completed_at: now,
|
||||
duration_ms: 150,
|
||||
success: true,
|
||||
exit_code: 0,
|
||||
stdout: 'Tests passed',
|
||||
stderr: ''
|
||||
}
|
||||
];
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.execution_history?.length === 1, 'should have 1 record');
|
||||
assert(updated.execution_history[0].step_id === 'run_tests', 'record should match');
|
||||
});
|
||||
|
||||
await runTest('Multiple records are ordered', () => {
|
||||
const state = readState();
|
||||
const now = new Date().toISOString();
|
||||
state.execution_history = [
|
||||
{ iteration: 1, step_index: 0, step_id: 'step1', tool: 'bash', started_at: now, completed_at: now, duration_ms: 100, success: true, exit_code: 0 },
|
||||
{ iteration: 1, step_index: 1, step_id: 'step2', tool: 'gemini', started_at: now, completed_at: now, duration_ms: 200, success: true, exit_code: 0 }
|
||||
];
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
assert(updated.execution_history.length === 2, 'should have 2 records');
|
||||
assert(updated.execution_history[0].step_id === 'step1', 'first record should be step1');
|
||||
assert(updated.execution_history[1].step_id === 'step2', 'second record should be step2');
|
||||
});
|
||||
|
||||
await runTest('Failed execution has error info', () => {
|
||||
const state = readState();
|
||||
const now = new Date().toISOString();
|
||||
state.execution_history?.push({
|
||||
iteration: 1,
|
||||
step_index: 2,
|
||||
step_id: 'step3',
|
||||
tool: 'codex',
|
||||
started_at: now,
|
||||
completed_at: now,
|
||||
duration_ms: 50,
|
||||
success: false,
|
||||
exit_code: 1,
|
||||
error: 'Compilation failed'
|
||||
});
|
||||
writeState(state);
|
||||
|
||||
const updated = readState();
|
||||
const failedRecord = updated.execution_history?.find(r => r.step_id === 'step3');
|
||||
assert(failedRecord?.success === false, 'record should be marked as failed');
|
||||
assert(failedRecord?.error, 'record should have error message');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// TEST SUITE 8: BACKUP & RECOVERY
|
||||
// ============================================
|
||||
log(colors.blue, '\n📋 TEST SUITE 8: BACKUP & RECOVERY');
|
||||
|
||||
createInitialState('TEST-BACKUP-1');
|
||||
|
||||
await runTest('State file is created', () => {
|
||||
assert(existsSync(TEST_STATE_FILE), 'state file should exist');
|
||||
});
|
||||
|
||||
await runTest('State can be read back', () => {
|
||||
const written = readState();
|
||||
assert(written.loop_id.startsWith('loop-'), 'read state should match');
|
||||
});
|
||||
|
||||
await runTest('State persists across writes', () => {
|
||||
const state = readState();
|
||||
state.current_iteration = 3;
|
||||
writeState(state);
|
||||
|
||||
const readBack = readState();
|
||||
assert(readBack.current_iteration === 3, 'change should persist');
|
||||
});
|
||||
|
||||
// ============================================
|
||||
// PRINT SUMMARY
|
||||
// ============================================
|
||||
log(colors.cyan, '\n' + '='.repeat(55));
|
||||
log(colors.cyan, '📊 TEST SUMMARY');
|
||||
log(colors.cyan, '='.repeat(55));
|
||||
|
||||
const total = results.length;
|
||||
const passed = results.filter(r => r.passed).length;
|
||||
const failed = results.filter(r => !r.passed).length;
|
||||
const totalTime = results.reduce((sum, r) => sum + (r.duration || 0), 0);
|
||||
|
||||
log(colors.reset, `\n Total Tests: ${total}`);
|
||||
log(colors.green, ` Passed: ${passed} ✓`);
|
||||
if (failed > 0) {
|
||||
log(colors.red, ` Failed: ${failed} ✗`);
|
||||
}
|
||||
log(colors.reset, ` Success Rate: ${((passed / total) * 100).toFixed(1)}%`);
|
||||
log(colors.reset, ` Total Time: ${totalTime}ms`);
|
||||
|
||||
if (failed > 0) {
|
||||
log(colors.red, '\n❌ Failed Tests:');
|
||||
results.filter(r => !r.passed).forEach(r => {
|
||||
log(colors.red, ` - ${r.name}`);
|
||||
log(colors.red, ` ${r.error}`);
|
||||
});
|
||||
}
|
||||
|
||||
// Fast tests highlight
|
||||
const fastTests = results.filter(r => (r.duration || 0) < 10);
|
||||
if (fastTests.length > 0) {
|
||||
log(colors.green, `\n⚡ Fast Tests (<10ms): ${fastTests.length}`);
|
||||
}
|
||||
|
||||
log(colors.cyan, '\n' + '='.repeat(55));
|
||||
|
||||
if (failed === 0) {
|
||||
log(colors.green, '✅ ALL TESTS PASSED!');
|
||||
log(colors.green, 'The CCW Loop system flow state tests completed successfully.');
|
||||
} else {
|
||||
log(colors.red, '❌ SOME TESTS FAILED');
|
||||
}
|
||||
|
||||
log(colors.reset, '');
|
||||
|
||||
return failed === 0 ? 0 : 1;
|
||||
}
|
||||
|
||||
// Run tests
|
||||
runAllTests().then(exitCode => {
|
||||
process.exit(exitCode);
|
||||
}).catch(err => {
|
||||
log(colors.red, `💥 Fatal error: ${err.message}`);
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
# CCW Loop System - Comprehensive Test Runner
|
||||
|
||||
echo "============================================"
|
||||
echo "🧪 CCW LOOP SYSTEM - COMPREHENSIVE TESTS"
|
||||
echo "============================================"
|
||||
echo ""
|
||||
|
||||
# Check if Node.js is available
|
||||
if ! command -v node &> /dev/null; then
|
||||
echo "❌ Error: Node.js is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the project root directory
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
echo "📁 Project Root: $PROJECT_ROOT"
|
||||
echo ""
|
||||
|
||||
# Run the comprehensive test
|
||||
node tests/loop-comprehensive-test.js "$@"
|
||||
|
||||
# Exit with the test's exit code
|
||||
exit $?
|
||||
@@ -1,261 +0,0 @@
|
||||
#!/bin/bash
|
||||
# CCW Loop System - Complete Flow State Test
|
||||
# Tests the entire Loop system flow including mock endpoints
|
||||
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo "🧪 CCW LOOP SYSTEM - FLOW STATE TEST"
|
||||
echo "=========================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test workspace
|
||||
TEST_WORKSPACE=".test-loop-workspace"
|
||||
TEST_STATE_DIR="$TEST_WORKSPACE/.workflow"
|
||||
TEST_TASKS_DIR="$TEST_WORKSPACE/.task"
|
||||
|
||||
# Server configuration
|
||||
SERVER_HOST="localhost"
|
||||
SERVER_PORT=3000
|
||||
BASE_URL="http://$SERVER_HOST:$SERVER_PORT"
|
||||
|
||||
# Cleanup function
|
||||
cleanup() {
|
||||
echo ""
|
||||
echo -e "${YELLOW}🧹 Cleaning up...${NC}"
|
||||
rm -rf "$TEST_WORKSPACE"
|
||||
echo "✅ Cleanup complete"
|
||||
}
|
||||
|
||||
# Setup trap to cleanup on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# Step 1: Create test workspace
|
||||
echo ""
|
||||
echo -e "${BLUE}📁 Step 1: Creating test workspace...${NC}"
|
||||
mkdir -p "$TEST_STATE_DIR"
|
||||
mkdir -p "$TEST_TASKS_DIR"
|
||||
|
||||
# Create test task
|
||||
cat > "$TEST_TASKS_DIR/TEST-FIX-1.json" << 'EOF'
|
||||
{
|
||||
"id": "TEST-FIX-1",
|
||||
"title": "Test Fix Loop",
|
||||
"status": "active",
|
||||
"meta": {
|
||||
"type": "test-fix"
|
||||
},
|
||||
"loop_control": {
|
||||
"enabled": true,
|
||||
"description": "Test loop for flow validation",
|
||||
"max_iterations": 3,
|
||||
"success_condition": "state_variables.test_result === 'pass'",
|
||||
"error_policy": {
|
||||
"on_failure": "pause",
|
||||
"max_retries": 2
|
||||
},
|
||||
"cli_sequence": [
|
||||
{
|
||||
"step_id": "run_test",
|
||||
"tool": "bash",
|
||||
"command": "npm test"
|
||||
},
|
||||
{
|
||||
"step_id": "analyze",
|
||||
"tool": "gemini",
|
||||
"mode": "analysis",
|
||||
"prompt_template": "Analyze: [run_test_stdout]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "✅ Test workspace created: $TEST_WORKSPACE"
|
||||
|
||||
# Step 2: Check if server is running
|
||||
echo ""
|
||||
echo -e "${BLUE}🔍 Step 2: Checking server status...${NC}"
|
||||
if curl -s "$BASE_URL/api/status" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✅ Server is running${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Server is not running${NC}"
|
||||
echo "Please start the CCW server first:"
|
||||
echo " npm run dev"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 3: Test Mock Endpoints
|
||||
echo ""
|
||||
echo -e "${BLUE}🧪 Step 3: Testing Mock Endpoints...${NC}"
|
||||
|
||||
# Reset mock store
|
||||
echo " ○ Reset mock execution store..."
|
||||
RESET_RESPONSE=$(curl -s -X POST "$BASE_URL/api/test/loop/mock/reset")
|
||||
if echo "$RESET_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Reset successful"
|
||||
else
|
||||
echo " ✗ Reset failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test scenario setup
|
||||
echo " ○ Setup test scenario..."
|
||||
SCENARIO_RESPONSE=$(curl -s -X POST "$BASE_URL/api/test/loop/run-full-scenario" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"scenario": "test-fix"}')
|
||||
if echo "$SCENARIO_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Scenario setup successful"
|
||||
else
|
||||
echo " ✗ Scenario setup failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 4: State Transition Tests
|
||||
echo ""
|
||||
echo -e "${BLUE}🔄 Step 4: State Transition Tests...${NC}"
|
||||
|
||||
# Test 1: Start loop (created -> running)
|
||||
echo " ○ Start loop (created -> running)..."
|
||||
START_RESPONSE=$(curl -s -X POST "$BASE_URL/api/loops" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\": \"TEST-FIX-1\"}")
|
||||
if echo "$START_RESPONSE" | grep -q '"success":true'; then
|
||||
LOOP_ID=$(echo "$START_RESPONSE" | grep -o '"loopId":"[^"]*"' | cut -d'"' -f4)
|
||||
echo " ✓ Loop started: $LOOP_ID"
|
||||
else
|
||||
echo " ✗ Failed to start loop"
|
||||
echo " Response: $START_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test 2: Check loop status
|
||||
echo " ○ Check loop status..."
|
||||
sleep 1 # Wait for state update
|
||||
STATUS_RESPONSE=$(curl -s "$BASE_URL/api/loops/$LOOP_ID")
|
||||
if echo "$STATUS_RESPONSE" | grep -q '"success":true'; then
|
||||
LOOP_STATUS=$(echo "$STATUS_RESPONSE" | grep -o '"status":"[^"]*"' | cut -d'"' -f4)
|
||||
echo " ✓ Loop status: $LOOP_STATUS"
|
||||
else
|
||||
echo " ✗ Failed to get status"
|
||||
fi
|
||||
|
||||
# Test 3: Pause loop
|
||||
echo " ○ Pause loop..."
|
||||
PAUSE_RESPONSE=$(curl -s -X POST "$BASE_URL/api/loops/$LOOP_ID/pause")
|
||||
if echo "$PAUSE_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Loop paused"
|
||||
else
|
||||
echo " ✗ Failed to pause"
|
||||
fi
|
||||
|
||||
# Test 4: Resume loop
|
||||
echo " ○ Resume loop..."
|
||||
RESUME_RESPONSE=$(curl -s -X POST "$BASE_URL/api/loops/$LOOP_ID/resume")
|
||||
if echo "$RESUME_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Loop resumed"
|
||||
else
|
||||
echo " ✗ Failed to resume"
|
||||
fi
|
||||
|
||||
# Test 5: List loops
|
||||
echo " ○ List all loops..."
|
||||
LIST_RESPONSE=$(curl -s "$BASE_URL/api/loops")
|
||||
if echo "$LIST_RESPONSE" | grep -q '"success":true'; then
|
||||
TOTAL=$(echo "$LIST_RESPONSE" | grep -o '"total":[0-9]*' | cut -d':' -f2)
|
||||
echo " ✓ Found $TOTAL loop(s)"
|
||||
else
|
||||
echo " ✗ Failed to list loops"
|
||||
fi
|
||||
|
||||
# Step 5: Variable Substitution Tests
|
||||
echo ""
|
||||
echo -e "${BLUE}🔧 Step 5: Variable Substitution Tests...${NC}"
|
||||
|
||||
# Test mock CLI execution with variable capture
|
||||
echo " ○ Mock CLI execution with variables..."
|
||||
EXEC_RESPONSE=$(curl -s -X POST "$BASE_URL/api/test/loop/mock/cli/execute" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"loopId\": \"$LOOP_ID\", \"stepId\": \"run_test\", \"tool\": \"bash\", \"command\": \"npm test\"}")
|
||||
if echo "$EXEC_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Mock execution successful"
|
||||
STDOUT=$(echo "$EXEC_RESPONSE" | grep -o '"stdout":"[^"]*"' | cut -d'"' -f4)
|
||||
echo " - Captured output: ${STDOUT:0:50}..."
|
||||
else
|
||||
echo " ✗ Mock execution failed"
|
||||
fi
|
||||
|
||||
# Step 6: Success Condition Tests
|
||||
echo ""
|
||||
echo -e "${BLUE}✅ Step 6: Success Condition Tests...${NC}"
|
||||
|
||||
echo " ○ Test simple condition..."
|
||||
# Simulate success condition evaluation
|
||||
TEST_CONDITION="state_variables.test_result === 'pass'"
|
||||
if [ "$?" -eq 0 ]; then
|
||||
echo " ✓ Condition syntax valid"
|
||||
fi
|
||||
|
||||
echo " ○ Test regex condition..."
|
||||
TEST_REGEX='state_variables.output.match(/Passed: (\d+)/)'
|
||||
echo " ✓ Regex condition valid"
|
||||
|
||||
# Step 7: Error Handling Tests
|
||||
echo ""
|
||||
echo -e "${BLUE}⚠️ Step 7: Error Handling Tests...${NC}"
|
||||
|
||||
echo " ○ Test pause on error..."
|
||||
PAUSE_ON_ERROR_RESPONSE=$(curl -s -X POST "$BASE_URL/api/loops/$LOOP_ID/pause")
|
||||
if echo "$PAUSE_ON_ERROR_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Pause on error works"
|
||||
else
|
||||
echo " ⚠ Pause returned: $PAUSE_ON_ERROR_RESPONSE"
|
||||
fi
|
||||
|
||||
# Step 8: Execution History Tests
|
||||
echo ""
|
||||
echo -e "${BLUE}📊 Step 8: Execution History Tests...${NC}"
|
||||
|
||||
echo " ○ Get mock execution history..."
|
||||
HISTORY_RESPONSE=$(curl -s "$BASE_URL/api/test/loop/mock/history")
|
||||
if echo "$HISTORY_RESPONSE" | grep -q '"success":true'; then
|
||||
HISTORY_COUNT=$(echo "$HISTORY_RESPONSE" | grep -o '"total":[0-9]*' | head -1)
|
||||
echo " ✓ History retrieved: $HISTORY_COUNT records"
|
||||
else
|
||||
echo " ✗ Failed to get history"
|
||||
fi
|
||||
|
||||
# Step 9: Stop loop
|
||||
echo ""
|
||||
echo -e "${BLUE}⏹️ Step 9: Cleanup...${NC}"
|
||||
|
||||
echo " ○ Stop test loop..."
|
||||
STOP_RESPONSE=$(curl -s -X POST "$BASE_URL/api/loops/$LOOP_ID/stop")
|
||||
if echo "$STOP_RESPONSE" | grep -q '"success":true'; then
|
||||
echo " ✓ Loop stopped"
|
||||
else
|
||||
echo " ⚠ Stop response: $STOP_RESPONSE"
|
||||
fi
|
||||
|
||||
# Final Summary
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo -e "${GREEN}✅ ALL TESTS PASSED${NC}"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "Test Results Summary:"
|
||||
echo " ✓ State Transitions: created -> running -> paused -> resumed"
|
||||
echo " ✓ Loop API Endpoints: start, status, list, pause, resume, stop"
|
||||
echo " ✓ Mock CLI Execution: variable capture"
|
||||
echo " ✓ Success Conditions: simple and regex"
|
||||
echo " ✓ Error Handling: pause on error"
|
||||
echo " ✓ Execution History: tracking and retrieval"
|
||||
echo ""
|
||||
echo "The CCW Loop system flow state tests completed successfully!"
|
||||
echo ""
|
||||
Reference in New Issue
Block a user