From 3b92bfae8ca882c6b0554e96da8c0cedc92803dc Mon Sep 17 00:00:00 2001 From: catlog22 Date: Fri, 27 Feb 2026 17:25:52 +0800 Subject: [PATCH] feat: add Discuss and Explore subagents for dynamic critique and code exploration - Implement Discuss Subagent for multi-perspective critique with dynamic perspectives. - Create Explore Subagent for shared codebase exploration with centralized caching. - Add tests for CcwToolsMcpCard component to ensure enabled tools are preserved on config save. - Introduce SessionPreviewPanel component for previewing and selecting sessions for Memory V2 extraction. - Develop CommandCreateDialog component for creating/importing commands with import and CLI generate modes. --- .../protocols/analysis-protocol.md | 2 + .../cli-templates/protocols/write-protocol.md | 2 + .claude/skills/command-generator/SKILL.md | 190 ++++++ .../phases/01-parameter-validation.md | 174 ++++++ .../phases/02-target-path-resolution.md | 171 ++++++ .../phases/03-template-loading.md | 123 ++++ .../phases/04-content-formatting.md | 184 ++++++ .../phases/05-file-generation.md | 185 ++++++ .../specs/command-design-spec.md | 160 ++++++ .../command-generator/templates/command-md.md | 75 +++ .claude/skills/team-coordinate/SKILL.md | 442 ++++++++++++++ .../coordinator/commands/analyze-task.md | 175 ++++++ .../roles/coordinator/commands/dispatch.md | 85 +++ .../roles/coordinator/commands/monitor.md | 274 +++++++++ .../team-coordinate/roles/coordinator/role.md | 233 ++++++++ .../team-coordinate/specs/role-template.md | 432 ++++++++++++++ .../subagents/discuss-subagent.md | 133 +++++ .../subagents/explore-subagent.md | 120 ++++ .claude/skills/team-lifecycle-v4/SKILL.md | 11 +- .../components/hook/HookQuickTemplates.tsx | 45 +- .../components/mcp/CcwToolsMcpCard.test.tsx | 102 ++++ .../src/components/mcp/CcwToolsMcpCard.tsx | 16 +- .../components/memory/SessionPreviewPanel.tsx | 332 +++++++++++ .../src/components/memory/V2PipelineTab.tsx | 166 +++--- .../components/shared/CommandCreateDialog.tsx | 408 +++++++++++++ ccw/frontend/src/hooks/useMemoryV2.ts | 38 +- ccw/frontend/src/lib/api.ts | 106 ++++ ccw/frontend/src/locales/en/cli-hooks.json | 27 +- ccw/frontend/src/locales/en/commands.json | 43 +- ccw/frontend/src/locales/en/mcp-manager.json | 4 + ccw/frontend/src/locales/en/memory.json | 22 + ccw/frontend/src/locales/zh/cli-hooks.json | 27 +- ccw/frontend/src/locales/zh/commands.json | 43 +- ccw/frontend/src/locales/zh/mcp-manager.json | 4 + ccw/frontend/src/locales/zh/memory.json | 22 + ccw/src/cli.ts | 5 + ccw/src/commands/memory.ts | 456 ++++++++++++++- ccw/src/commands/team.ts | 28 +- ccw/src/core/memory-extraction-pipeline.ts | 540 +++++++++++++++++- ccw/src/core/routes/commands-routes.ts | 517 ++++++++++++++++- ccw/src/core/routes/core-memory-routes.ts | 232 ++++++++ ccw/src/tools/cli-history-store.ts | 90 ++- ccw/src/tools/native-session-discovery.ts | 134 ++++- ccw/src/tools/team-msg.ts | 2 +- ccw/tests/native-session-discovery.test.ts | 56 +- 45 files changed, 6508 insertions(+), 128 deletions(-) create mode 100644 .claude/skills/command-generator/SKILL.md create mode 100644 .claude/skills/command-generator/phases/01-parameter-validation.md create mode 100644 .claude/skills/command-generator/phases/02-target-path-resolution.md create mode 100644 .claude/skills/command-generator/phases/03-template-loading.md create mode 100644 .claude/skills/command-generator/phases/04-content-formatting.md create mode 100644 .claude/skills/command-generator/phases/05-file-generation.md create mode 100644 .claude/skills/command-generator/specs/command-design-spec.md create mode 100644 .claude/skills/command-generator/templates/command-md.md create mode 100644 .claude/skills/team-coordinate/SKILL.md create mode 100644 .claude/skills/team-coordinate/roles/coordinator/commands/analyze-task.md create mode 100644 .claude/skills/team-coordinate/roles/coordinator/commands/dispatch.md create mode 100644 .claude/skills/team-coordinate/roles/coordinator/commands/monitor.md create mode 100644 .claude/skills/team-coordinate/roles/coordinator/role.md create mode 100644 .claude/skills/team-coordinate/specs/role-template.md create mode 100644 .claude/skills/team-coordinate/subagents/discuss-subagent.md create mode 100644 .claude/skills/team-coordinate/subagents/explore-subagent.md create mode 100644 ccw/frontend/src/components/mcp/CcwToolsMcpCard.test.tsx create mode 100644 ccw/frontend/src/components/memory/SessionPreviewPanel.tsx create mode 100644 ccw/frontend/src/components/shared/CommandCreateDialog.tsx diff --git a/.ccw/workflows/cli-templates/protocols/analysis-protocol.md b/.ccw/workflows/cli-templates/protocols/analysis-protocol.md index e9eac825..50f037ec 100644 --- a/.ccw/workflows/cli-templates/protocols/analysis-protocol.md +++ b/.ccw/workflows/cli-templates/protocols/analysis-protocol.md @@ -30,6 +30,7 @@ RULES: [templates | additional constraints] ## Execution Flow +0. **Load Project Specs** - MANDATORY first step: run `ccw spec load` to retrieve project specifications and constraints before any analysis. Adapt analysis scope and standards based on loaded specs 1. **Parse** all 6 fields (PURPOSE, TASK, MODE, CONTEXT, EXPECTED, RULES) 2. **Read** and analyze CONTEXT files thoroughly 3. **Identify** patterns, issues, and dependencies @@ -40,6 +41,7 @@ RULES: [templates | additional constraints] ## Core Requirements **ALWAYS**: +- Run `ccw spec load` FIRST to obtain project specifications before starting any work - Analyze ALL CONTEXT files completely - Apply RULES (templates + constraints) exactly - Provide code evidence with `file:line` references diff --git a/.ccw/workflows/cli-templates/protocols/write-protocol.md b/.ccw/workflows/cli-templates/protocols/write-protocol.md index c14da608..3ac609d5 100644 --- a/.ccw/workflows/cli-templates/protocols/write-protocol.md +++ b/.ccw/workflows/cli-templates/protocols/write-protocol.md @@ -24,6 +24,7 @@ RULES: [templates | additional constraints] ## Execution Flow ### MODE: write +0. **Load Project Specs** - MANDATORY first step: run `ccw spec load` to retrieve project specifications and constraints before any implementation. Apply loaded specs to guide coding standards, architecture decisions, and quality gates 1. **Parse** all 6 fields (PURPOSE, TASK, MODE, CONTEXT, EXPECTED, RULES) 2. **Read** CONTEXT files, find 3+ similar patterns 3. **Plan** implementation following RULES @@ -34,6 +35,7 @@ RULES: [templates | additional constraints] ## Core Requirements **ALWAYS**: +- Run `ccw spec load` FIRST to obtain project specifications before starting any work - Study CONTEXT files - find 3+ similar patterns before implementing - Apply RULES exactly - Test continuously (auto mode) diff --git a/.claude/skills/command-generator/SKILL.md b/.claude/skills/command-generator/SKILL.md new file mode 100644 index 00000000..c9e068d1 --- /dev/null +++ b/.claude/skills/command-generator/SKILL.md @@ -0,0 +1,190 @@ +--- +name: command-generator +description: Command file generator - 5 phase workflow for creating Claude Code command files with YAML frontmatter. Generates .md command files for project or user scope. Triggers on "create command", "new command", "command generator". +allowed-tools: Read, Write, Edit, Bash, Glob +--- + +# Command Generator + +CLI-based command file generator producing Claude Code command .md files through a structured 5-phase workflow. Supports both project-level (`.claude/commands/`) and user-level (`~/.claude/commands/`) command locations. + +## Architecture Overview + +``` ++-----------------------------------------------------------+ +| Command Generator | +| | +| Input: skillName, description, location, [group], [hint] | +| | | +| +-------------------------------------------------+ | +| | Phase 1-5: Sequential Pipeline | | +| | | | +| | [P1] --> [P2] --> [P3] --> [P4] --> [P5] | | +| | Param Target Template Content File | | +| | Valid Path Loading Format Gen | | +| +-------------------------------------------------+ | +| | | +| Output: {scope}/.claude/commands/{group}/{name}.md | +| | ++-----------------------------------------------------------+ +``` + +## Key Design Principles + +1. **Single Responsibility**: Generates one command file per invocation +2. **Scope Awareness**: Supports project and user-level command locations +3. **Template-Driven**: Uses consistent template for all generated commands +4. **Validation First**: Validates all required parameters before file operations +5. **Non-Destructive**: Warns if command file already exists + +--- + +## Execution Flow + +``` +Phase 1: Parameter Validation + - Ref: phases/01-parameter-validation.md + - Validate: skillName (required), description (required), location (required) + - Optional: group, argumentHint + - Output: validated params object + +Phase 2: Target Path Resolution + - Ref: phases/02-target-path-resolution.md + - Resolve: location -> target commands directory + - Support: project (.claude/commands/) vs user (~/.claude/commands/) + - Handle: group subdirectory if provided + - Output: targetPath string + +Phase 3: Template Loading + - Ref: phases/03-template-loading.md + - Load: templates/command-md.md + - Template contains YAML frontmatter with placeholders + - Output: templateContent string + +Phase 4: Content Formatting + - Ref: phases/04-content-formatting.md + - Substitute: {{name}}, {{description}}, {{group}}, {{argumentHint}} + - Handle: optional fields (group, argumentHint) + - Output: formattedContent string + +Phase 5: File Generation + - Ref: phases/05-file-generation.md + - Check: file existence (warn if exists) + - Write: formatted content to target path + - Output: success confirmation with file path +``` + +## Usage Examples + +### Basic Command (Project Scope) +```javascript +Skill(skill="command-generator", args={ + skillName: "deploy", + description: "Deploy application to production environment", + location: "project" +}) +// Output: .claude/commands/deploy.md +``` + +### Grouped Command with Argument Hint +```javascript +Skill(skill="command-generator", args={ + skillName: "create", + description: "Create new issue from GitHub URL or text", + location: "project", + group: "issue", + argumentHint: "[-y|--yes] [--priority 1-5]" +}) +// Output: .claude/commands/issue/create.md +``` + +### User-Level Command +```javascript +Skill(skill="command-generator", args={ + skillName: "global-status", + description: "Show global Claude Code status", + location: "user" +}) +// Output: ~/.claude/commands/global-status.md +``` + +--- + +## Reference Documents by Phase + +### Phase 1: Parameter Validation +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [phases/01-parameter-validation.md](phases/01-parameter-validation.md) | Validate required parameters | Phase 1 execution | + +### Phase 2: Target Path Resolution +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [phases/02-target-path-resolution.md](phases/02-target-path-resolution.md) | Resolve target directory | Phase 2 execution | + +### Phase 3: Template Loading +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [phases/03-template-loading.md](phases/03-template-loading.md) | Load command template | Phase 3 execution | +| [templates/command-md.md](templates/command-md.md) | Command file template | Template reference | + +### Phase 4: Content Formatting +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [phases/04-content-formatting.md](phases/04-content-formatting.md) | Format content with params | Phase 4 execution | + +### Phase 5: File Generation +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [phases/05-file-generation.md](phases/05-file-generation.md) | Write final file | Phase 5 execution | + +### Design Specifications +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [specs/command-design-spec.md](specs/command-design-spec.md) | Command design guidelines | Understanding best practices | + +--- + +## Output Structure + +### Generated Command File + +```markdown +--- +name: {skillName} +description: {description} +{group} {argumentHint} +--- + +# {skillName} Command + +## Overview +{Auto-generated placeholder for command overview} + +## Usage +{Auto-generated placeholder for usage examples} + +## Execution Flow +{Auto-generated placeholder for execution steps} +``` + +--- + +## Error Handling + +| Error | Stage | Action | +|-------|-------|--------| +| Missing skillName | Phase 1 | Error: "skillName is required" | +| Missing description | Phase 1 | Error: "description is required" | +| Missing location | Phase 1 | Error: "location is required (project or user)" | +| Invalid location | Phase 2 | Error: "location must be 'project' or 'user'" | +| Template not found | Phase 3 | Error: "Command template not found" | +| File exists | Phase 5 | Warning: "Command file already exists, will overwrite" | +| Write failure | Phase 5 | Error: "Failed to write command file" | + +--- + +## Related Skills + +- **skill-generator**: Create complete skills with phases, templates, and specs +- **flow-coordinator**: Orchestrate multi-step command workflows diff --git a/.claude/skills/command-generator/phases/01-parameter-validation.md b/.claude/skills/command-generator/phases/01-parameter-validation.md new file mode 100644 index 00000000..7c22ccfa --- /dev/null +++ b/.claude/skills/command-generator/phases/01-parameter-validation.md @@ -0,0 +1,174 @@ +# Phase 1: Parameter Validation + +Validate all required parameters for command generation. + +## Objective + +Ensure all required parameters are provided before proceeding with command generation: +- **skillName**: Command identifier (required) +- **description**: Command description (required) +- **location**: Target scope - "project" or "user" (required) +- **group**: Optional grouping subdirectory +- **argumentHint**: Optional argument hint string + +## Input + +Parameters received from skill invocation: +- `skillName`: string (required) +- `description`: string (required) +- `location`: "project" | "user" (required) +- `group`: string (optional) +- `argumentHint`: string (optional) + +## Validation Rules + +### Required Parameters + +```javascript +const requiredParams = { + skillName: { + type: 'string', + minLength: 1, + pattern: /^[a-z][a-z0-9-]*$/, // lowercase, alphanumeric, hyphens + error: 'skillName must be lowercase alphanumeric with hyphens, starting with a letter' + }, + description: { + type: 'string', + minLength: 10, + error: 'description must be at least 10 characters' + }, + location: { + type: 'string', + enum: ['project', 'user'], + error: 'location must be "project" or "user"' + } +}; +``` + +### Optional Parameters + +```javascript +const optionalParams = { + group: { + type: 'string', + pattern: /^[a-z][a-z0-9-]*$/, + default: null, + error: 'group must be lowercase alphanumeric with hyphens' + }, + argumentHint: { + type: 'string', + default: '', + error: 'argumentHint must be a string' + } +}; +``` + +## Execution Steps + +### Step 1: Extract Parameters + +```javascript +// Extract from skill args +const params = { + skillName: args.skillName, + description: args.description, + location: args.location, + group: args.group || null, + argumentHint: args.argumentHint || '' +}; +``` + +### Step 2: Validate Required Parameters + +```javascript +function validateRequired(params, rules) { + const errors = []; + + for (const [key, rule] of Object.entries(rules)) { + const value = params[key]; + + // Check existence + if (value === undefined || value === null || value === '') { + errors.push(`${key} is required`); + continue; + } + + // Check type + if (typeof value !== rule.type) { + errors.push(`${key} must be a ${rule.type}`); + continue; + } + + // Check minLength + if (rule.minLength && value.length < rule.minLength) { + errors.push(`${key} must be at least ${rule.minLength} characters`); + } + + // Check pattern + if (rule.pattern && !rule.pattern.test(value)) { + errors.push(rule.error); + } + + // Check enum + if (rule.enum && !rule.enum.includes(value)) { + errors.push(`${key} must be one of: ${rule.enum.join(', ')}`); + } + } + + return errors; +} + +const requiredErrors = validateRequired(params, requiredParams); +if (requiredErrors.length > 0) { + throw new Error(`Validation failed:\n${requiredErrors.join('\n')}`); +} +``` + +### Step 3: Validate Optional Parameters + +```javascript +function validateOptional(params, rules) { + const warnings = []; + + for (const [key, rule] of Object.entries(rules)) { + const value = params[key]; + + if (value !== null && value !== undefined && value !== '') { + if (rule.pattern && !rule.pattern.test(value)) { + warnings.push(`${key}: ${rule.error}`); + } + } + } + + return warnings; +} + +const optionalWarnings = validateOptional(params, optionalParams); +// Log warnings but continue +``` + +### Step 4: Normalize Parameters + +```javascript +const validatedParams = { + skillName: params.skillName.trim().toLowerCase(), + description: params.description.trim(), + location: params.location.trim().toLowerCase(), + group: params.group ? params.group.trim().toLowerCase() : null, + argumentHint: params.argumentHint ? params.argumentHint.trim() : '' +}; +``` + +## Output + +```javascript +{ + status: 'validated', + params: validatedParams, + warnings: optionalWarnings +} +``` + +## Next Phase + +Proceed to [Phase 2: Target Path Resolution](02-target-path-resolution.md) with `validatedParams`. diff --git a/.claude/skills/command-generator/phases/02-target-path-resolution.md b/.claude/skills/command-generator/phases/02-target-path-resolution.md new file mode 100644 index 00000000..259660e5 --- /dev/null +++ b/.claude/skills/command-generator/phases/02-target-path-resolution.md @@ -0,0 +1,171 @@ +# Phase 2: Target Path Resolution + +Resolve the target commands directory based on location parameter. + +## Objective + +Determine the correct target path for the command file based on: +- **location**: "project" or "user" scope +- **group**: Optional subdirectory for command organization +- **skillName**: Command filename (with .md extension) + +## Input + +From Phase 1 validation: +```javascript +{ + skillName: string, // e.g., "create" + description: string, + location: "project" | "user", + group: string | null, // e.g., "issue" + argumentHint: string +} +``` + +## Path Resolution Rules + +### Location Mapping + +```javascript +const locationMap = { + project: '.claude/commands', + user: '~/.claude/commands' // Expands to user home directory +}; +``` + +### Path Construction + +```javascript +function resolveTargetPath(params) { + const baseDir = locationMap[params.location]; + + if (!baseDir) { + throw new Error(`Invalid location: ${params.location}. Must be "project" or "user".`); + } + + // Expand ~ to user home if present + const expandedBase = baseDir.startsWith('~') + ? path.join(os.homedir(), baseDir.slice(1)) + : baseDir; + + // Build full path + let targetPath; + if (params.group) { + // Grouped command: .claude/commands/{group}/{skillName}.md + targetPath = path.join(expandedBase, params.group, `${params.skillName}.md`); + } else { + // Top-level command: .claude/commands/{skillName}.md + targetPath = path.join(expandedBase, `${params.skillName}.md`); + } + + return targetPath; +} +``` + +## Execution Steps + +### Step 1: Get Base Directory + +```javascript +const location = validatedParams.location; +const baseDir = locationMap[location]; + +if (!baseDir) { + throw new Error(`Invalid location: ${location}. Must be "project" or "user".`); +} +``` + +### Step 2: Expand User Path (if applicable) + +```javascript +const os = require('os'); +const path = require('path'); + +let expandedBase = baseDir; +if (baseDir.startsWith('~')) { + expandedBase = path.join(os.homedir(), baseDir.slice(1)); +} +``` + +### Step 3: Construct Full Path + +```javascript +let targetPath; +let targetDir; + +if (validatedParams.group) { + // Command with group subdirectory + targetDir = path.join(expandedBase, validatedParams.group); + targetPath = path.join(targetDir, `${validatedParams.skillName}.md`); +} else { + // Top-level command + targetDir = expandedBase; + targetPath = path.join(targetDir, `${validatedParams.skillName}.md`); +} +``` + +### Step 4: Ensure Target Directory Exists + +```javascript +// Check and create directory if needed +Bash(`mkdir -p "${targetDir}"`); +``` + +### Step 5: Check File Existence + +```javascript +const fileExists = Bash(`test -f "${targetPath}" && echo "EXISTS" || echo "NOT_FOUND"`); + +if (fileExists.includes('EXISTS')) { + console.warn(`Warning: Command file already exists at ${targetPath}. Will overwrite.`); +} +``` + +## Output + +```javascript +{ + status: 'resolved', + targetPath: targetPath, // Full path to command file + targetDir: targetDir, // Directory containing command + fileName: `${skillName}.md`, + fileExists: fileExists.includes('EXISTS'), + params: validatedParams // Pass through to next phase +} +``` + +## Path Examples + +### Project Scope (No Group) +``` +location: "project" +skillName: "deploy" +-> .claude/commands/deploy.md +``` + +### Project Scope (With Group) +``` +location: "project" +skillName: "create" +group: "issue" +-> .claude/commands/issue/create.md +``` + +### User Scope (No Group) +``` +location: "user" +skillName: "global-status" +-> ~/.claude/commands/global-status.md +``` + +### User Scope (With Group) +``` +location: "user" +skillName: "sync" +group: "session" +-> ~/.claude/commands/session/sync.md +``` + +## Next Phase + +Proceed to [Phase 3: Template Loading](03-template-loading.md) with `targetPath` and `params`. diff --git a/.claude/skills/command-generator/phases/03-template-loading.md b/.claude/skills/command-generator/phases/03-template-loading.md new file mode 100644 index 00000000..dd2fb43c --- /dev/null +++ b/.claude/skills/command-generator/phases/03-template-loading.md @@ -0,0 +1,123 @@ +# Phase 3: Template Loading + +Load the command template file for content generation. + +## Objective + +Load the command template from the skill's templates directory. The template provides: +- YAML frontmatter structure +- Placeholder variables for substitution +- Standard command file sections + +## Input + +From Phase 2: +```javascript +{ + targetPath: string, + targetDir: string, + fileName: string, + fileExists: boolean, + params: { + skillName: string, + description: string, + location: string, + group: string | null, + argumentHint: string + } +} +``` + +## Template Location + +``` +.claude/skills/command-generator/templates/command-md.md +``` + +## Execution Steps + +### Step 1: Locate Template File + +```javascript +// Template is located in the skill's templates directory +const skillDir = '.claude/skills/command-generator'; +const templatePath = `${skillDir}/templates/command-md.md`; +``` + +### Step 2: Read Template Content + +```javascript +const templateContent = Read(templatePath); + +if (!templateContent) { + throw new Error(`Command template not found at ${templatePath}`); +} +``` + +### Step 3: Validate Template Structure + +```javascript +// Verify template contains expected placeholders +const requiredPlaceholders = ['{{name}}', '{{description}}']; +const optionalPlaceholders = ['{{group}}', '{{argumentHint}}']; + +for (const placeholder of requiredPlaceholders) { + if (!templateContent.includes(placeholder)) { + throw new Error(`Template missing required placeholder: ${placeholder}`); + } +} +``` + +### Step 4: Store Template for Next Phase + +```javascript +const template = { + content: templateContent, + requiredPlaceholders: requiredPlaceholders, + optionalPlaceholders: optionalPlaceholders +}; +``` + +## Template Format Reference + +The template should follow this structure: + +```markdown +--- +name: {{name}} +description: {{description}} +{{#if group}}group: {{group}}{{/if}} +{{#if argumentHint}}argument-hint: {{argumentHint}}{{/if}} +--- + +# {{name}} Command + +[Template content with placeholders] +``` + +## Output + +```javascript +{ + status: 'loaded', + template: { + content: templateContent, + requiredPlaceholders: requiredPlaceholders, + optionalPlaceholders: optionalPlaceholders + }, + targetPath: targetPath, + params: params +} +``` + +## Error Handling + +| Error | Action | +|-------|--------| +| Template file not found | Throw error with path | +| Missing required placeholder | Throw error with missing placeholder name | +| Empty template | Throw error | + +## Next Phase + +Proceed to [Phase 4: Content Formatting](04-content-formatting.md) with `template`, `targetPath`, and `params`. diff --git a/.claude/skills/command-generator/phases/04-content-formatting.md b/.claude/skills/command-generator/phases/04-content-formatting.md new file mode 100644 index 00000000..1f5a4bb3 --- /dev/null +++ b/.claude/skills/command-generator/phases/04-content-formatting.md @@ -0,0 +1,184 @@ +# Phase 4: Content Formatting + +Format template content by substituting placeholders with parameter values. + +## Objective + +Replace all placeholder variables in the template with validated parameter values: +- `{{name}}` -> skillName +- `{{description}}` -> description +- `{{group}}` -> group (if provided) +- `{{argumentHint}}` -> argumentHint (if provided) + +## Input + +From Phase 3: +```javascript +{ + template: { + content: string, + requiredPlaceholders: string[], + optionalPlaceholders: string[] + }, + targetPath: string, + params: { + skillName: string, + description: string, + location: string, + group: string | null, + argumentHint: string + } +} +``` + +## Placeholder Mapping + +```javascript +const placeholderMap = { + '{{name}}': params.skillName, + '{{description}}': params.description, + '{{group}}': params.group || '', + '{{argumentHint}}': params.argumentHint || '' +}; +``` + +## Execution Steps + +### Step 1: Initialize Content + +```javascript +let formattedContent = template.content; +``` + +### Step 2: Substitute Required Placeholders + +```javascript +// These must always be replaced +formattedContent = formattedContent.replace(/\{\{name\}\}/g, params.skillName); +formattedContent = formattedContent.replace(/\{\{description\}\}/g, params.description); +``` + +### Step 3: Handle Optional Placeholders + +```javascript +// Group placeholder +if (params.group) { + formattedContent = formattedContent.replace(/\{\{group\}\}/g, params.group); +} else { + // Remove group line if not provided + formattedContent = formattedContent.replace(/^group: \{\{group\}\}\n?/gm, ''); + formattedContent = formattedContent.replace(/\{\{group\}\}/g, ''); +} + +// Argument hint placeholder +if (params.argumentHint) { + formattedContent = formattedContent.replace(/\{\{argumentHint\}\}/g, params.argumentHint); +} else { + // Remove argument-hint line if not provided + formattedContent = formattedContent.replace(/^argument-hint: \{\{argumentHint\}\}\n?/gm, ''); + formattedContent = formattedContent.replace(/\{\{argumentHint\}\}/g, ''); +} +``` + +### Step 4: Handle Conditional Sections + +```javascript +// Remove empty frontmatter lines (caused by missing optional fields) +formattedContent = formattedContent.replace(/\n{3,}/g, '\n\n'); + +// Handle {{#if group}} style conditionals +if (formattedContent.includes('{{#if')) { + // Process group conditional + if (params.group) { + formattedContent = formattedContent.replace(/\{\{#if group\}\}([\s\S]*?)\{\{\/if\}\}/g, '$1'); + } else { + formattedContent = formattedContent.replace(/\{\{#if group\}\}[\s\S]*?\{\{\/if\}\}/g, ''); + } + + // Process argumentHint conditional + if (params.argumentHint) { + formattedContent = formattedContent.replace(/\{\{#if argumentHint\}\}([\s\S]*?)\{\{\/if\}\}/g, '$1'); + } else { + formattedContent = formattedContent.replace(/\{\{#if argumentHint\}\}[\s\S]*?\{\{\/if\}\}/g, ''); + } +} +``` + +### Step 5: Validate Final Content + +```javascript +// Ensure no unresolved placeholders remain +const unresolvedPlaceholders = formattedContent.match(/\{\{[^}]+\}\}/g); +if (unresolvedPlaceholders) { + console.warn(`Warning: Unresolved placeholders found: ${unresolvedPlaceholders.join(', ')}`); +} + +// Ensure frontmatter is valid +const frontmatterMatch = formattedContent.match(/^---\n([\s\S]*?)\n---/); +if (!frontmatterMatch) { + throw new Error('Generated content has invalid frontmatter structure'); +} +``` + +### Step 6: Generate Summary + +```javascript +const summary = { + name: params.skillName, + description: params.description.substring(0, 50) + (params.description.length > 50 ? '...' : ''), + location: params.location, + group: params.group, + hasArgumentHint: !!params.argumentHint +}; +``` + +## Output + +```javascript +{ + status: 'formatted', + content: formattedContent, + targetPath: targetPath, + summary: summary +} +``` + +## Content Example + +### Input Template +```markdown +--- +name: {{name}} +description: {{description}} +{{#if group}}group: {{group}}{{/if}} +{{#if argumentHint}}argument-hint: {{argumentHint}}{{/if}} +--- + +# {{name}} Command +``` + +### Output (with all fields) +```markdown +--- +name: create +description: Create structured issue from GitHub URL or text description +group: issue +argument-hint: [-y|--yes] [--priority 1-5] +--- + +# create Command +``` + +### Output (minimal fields) +```markdown +--- +name: deploy +description: Deploy application to production environment +--- + +# deploy Command +``` + +## Next Phase + +Proceed to [Phase 5: File Generation](05-file-generation.md) with `content` and `targetPath`. diff --git a/.claude/skills/command-generator/phases/05-file-generation.md b/.claude/skills/command-generator/phases/05-file-generation.md new file mode 100644 index 00000000..77f11544 --- /dev/null +++ b/.claude/skills/command-generator/phases/05-file-generation.md @@ -0,0 +1,185 @@ +# Phase 5: File Generation + +Write the formatted content to the target command file. + +## Objective + +Generate the final command file by: +1. Checking for existing file (warn if present) +2. Writing formatted content to target path +3. Confirming successful generation + +## Input + +From Phase 4: +```javascript +{ + status: 'formatted', + content: string, + targetPath: string, + summary: { + name: string, + description: string, + location: string, + group: string | null, + hasArgumentHint: boolean + } +} +``` + +## Execution Steps + +### Step 1: Pre-Write Check + +```javascript +// Check if file already exists +const fileExists = Bash(`test -f "${targetPath}" && echo "EXISTS" || echo "NOT_FOUND"`); + +if (fileExists.includes('EXISTS')) { + console.warn(` +WARNING: Command file already exists at: ${targetPath} +The file will be overwritten with new content. + `); +} +``` + +### Step 2: Ensure Directory Exists + +```javascript +// Get directory from target path +const targetDir = path.dirname(targetPath); + +// Create directory if it doesn't exist +Bash(`mkdir -p "${targetDir}"`); +``` + +### Step 3: Write File + +```javascript +// Write the formatted content +Write(targetPath, content); +``` + +### Step 4: Verify Write + +```javascript +// Confirm file was created +const verifyExists = Bash(`test -f "${targetPath}" && echo "SUCCESS" || echo "FAILED"`); + +if (!verifyExists.includes('SUCCESS')) { + throw new Error(`Failed to create command file at ${targetPath}`); +} + +// Verify content was written +const writtenContent = Read(targetPath); +if (!writtenContent || writtenContent.length === 0) { + throw new Error(`Command file created but appears to be empty`); +} +``` + +### Step 5: Generate Success Report + +```javascript +const report = { + status: 'completed', + file: { + path: targetPath, + name: summary.name, + location: summary.location, + group: summary.group, + size: writtenContent.length, + created: new Date().toISOString() + }, + command: { + name: summary.name, + description: summary.description, + hasArgumentHint: summary.hasArgumentHint + }, + nextSteps: [ + `Edit ${targetPath} to add implementation details`, + 'Add usage examples and execution flow', + 'Test the command with Claude Code' + ] +}; +``` + +## Output + +### Success Output + +```javascript +{ + status: 'completed', + file: { + path: '.claude/commands/issue/create.md', + name: 'create', + location: 'project', + group: 'issue', + size: 1234, + created: '2026-02-27T12:00:00.000Z' + }, + command: { + name: 'create', + description: 'Create structured issue from GitHub URL...', + hasArgumentHint: true + }, + nextSteps: [ + 'Edit .claude/commands/issue/create.md to add implementation details', + 'Add usage examples and execution flow', + 'Test the command with Claude Code' + ] +} +``` + +### Console Output + +``` +Command generated successfully! + +File: .claude/commands/issue/create.md +Name: create +Description: Create structured issue from GitHub URL... +Location: project +Group: issue + +Next Steps: +1. Edit .claude/commands/issue/create.md to add implementation details +2. Add usage examples and execution flow +3. Test the command with Claude Code +``` + +## Error Handling + +| Error | Action | +|-------|--------| +| Directory creation failed | Throw error with directory path | +| File write failed | Throw error with target path | +| Empty file detected | Throw error and attempt cleanup | +| Permission denied | Throw error with permission hint | + +## Cleanup on Failure + +```javascript +// If any step fails, attempt to clean up partial artifacts +function cleanup(targetPath) { + try { + Bash(`rm -f "${targetPath}"`); + } catch (e) { + // Ignore cleanup errors + } +} +``` + +## Completion + +The command file has been successfully generated. The skill execution is complete. + +### Usage Example + +```bash +# Use the generated command +/issue:create https://github.com/owner/repo/issues/123 + +# Or with the group prefix +/issue:create "Login fails with special chars" +``` diff --git a/.claude/skills/command-generator/specs/command-design-spec.md b/.claude/skills/command-generator/specs/command-design-spec.md new file mode 100644 index 00000000..48875ca2 --- /dev/null +++ b/.claude/skills/command-generator/specs/command-design-spec.md @@ -0,0 +1,160 @@ +# Command Design Specification + +Guidelines and best practices for designing Claude Code command files. + +## Command File Structure + +### YAML Frontmatter + +Every command file must start with YAML frontmatter containing: + +```yaml +--- +name: command-name # Required: Command identifier (lowercase, hyphens) +description: Description # Required: Brief description of command purpose +argument-hint: "[args]" # Optional: Argument format hint +allowed-tools: Tool1, Tool2 # Optional: Restricted tool set +examples: # Optional: Usage examples + - /command:example1 + - /command:example2 --flag +--- +``` + +### Frontmatter Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `name` | Yes | Command identifier, lowercase with hyphens | +| `description` | Yes | Brief description, appears in command listings | +| `argument-hint` | No | Usage hint for arguments (shown in help) | +| `allowed-tools` | No | Restrict available tools for this command | +| `examples` | No | Array of usage examples | + +## Naming Conventions + +### Command Names + +- Use lowercase letters only +- Separate words with hyphens (`create-issue`, not `createIssue`) +- Keep names short but descriptive (2-3 words max) +- Use verbs for actions (`deploy`, `create`, `analyze`) + +### Group Names + +- Groups organize related commands +- Use singular nouns (`issue`, `session`, `workflow`) +- Common groups: `issue`, `workflow`, `session`, `memory`, `cli` + +### Path Examples + +``` +.claude/commands/deploy.md # Top-level command +.claude/commands/issue/create.md # Grouped command +.claude/commands/workflow/init.md # Grouped command +``` + +## Content Sections + +### Required Sections + +1. **Overview**: Brief description of command purpose +2. **Usage**: Command syntax and examples +3. **Execution Flow**: High-level process diagram + +### Recommended Sections + +4. **Implementation**: Code examples for each phase +5. **Error Handling**: Error cases and recovery +6. **Related Commands**: Links to related functionality + +## Best Practices + +### 1. Clear Purpose + +Each command should do one thing well: + +``` +Good: /issue:create - Create a new issue +Bad: /issue:manage - Create, update, delete issues (too broad) +``` + +### 2. Consistent Structure + +Follow the same pattern across all commands in a group: + +```markdown +# All issue commands should have: +- Overview +- Usage with examples +- Phase-based implementation +- Error handling table +``` + +### 3. Progressive Detail + +Start simple, add detail in phases: + +``` +Phase 1: Quick overview +Phase 2: Implementation details +Phase 3: Edge cases and errors +``` + +### 4. Reusable Patterns + +Use consistent patterns for common operations: + +```javascript +// Input parsing pattern +const args = parseArguments($ARGUMENTS); +const flags = parseFlags($ARGUMENTS); + +// Validation pattern +if (!args.required) { + throw new Error('Required argument missing'); +} +``` + +## Scope Guidelines + +### Project Commands (`.claude/commands/`) + +- Project-specific workflows +- Team conventions +- Integration with project tools + +### User Commands (`~/.claude/commands/`) + +- Personal productivity tools +- Cross-project utilities +- Global configuration + +## Error Messages + +### Good Error Messages + +``` +Error: GitHub issue URL required +Usage: /issue:create +Example: /issue:create https://github.com/owner/repo/issues/123 +``` + +### Bad Error Messages + +``` +Error: Invalid input +``` + +## Testing Commands + +After creating a command, test: + +1. **Basic invocation**: Does it run without arguments? +2. **Argument parsing**: Does it handle valid arguments? +3. **Error cases**: Does it show helpful errors for invalid input? +4. **Help text**: Is the usage clear? + +## Related Documentation + +- [SKILL-DESIGN-SPEC.md](../_shared/SKILL-DESIGN-SPEC.md) - Full skill design specification +- [../skill-generator/SKILL.md](../skill-generator/SKILL.md) - Meta-skill for creating skills diff --git a/.claude/skills/command-generator/templates/command-md.md b/.claude/skills/command-generator/templates/command-md.md new file mode 100644 index 00000000..d3004430 --- /dev/null +++ b/.claude/skills/command-generator/templates/command-md.md @@ -0,0 +1,75 @@ +--- +name: {{name}} +description: {{description}} +{{#if argumentHint}}argument-hint: {{argumentHint}} +{{/if}}--- + +# {{name}} Command + +## Overview + +[Describe the command purpose and what it does] + +## Usage + +```bash +/{{#if group}}{{group}}:{{/if}}{{name}} [arguments] +``` + +**Examples**: +```bash +# Example 1: Basic usage +/{{#if group}}{{group}}:{{/if}}{{name}} + +# Example 2: With arguments +/{{#if group}}{{group}}:{{/if}}{{name}} --option value +``` + +## Execution Flow + +``` +Phase 1: Input Parsing + - Parse arguments and flags + - Validate input parameters + +Phase 2: Core Processing + - Execute main logic + - Handle edge cases + +Phase 3: Output Generation + - Format results + - Display to user +``` + +## Implementation + +### Phase 1: Input Parsing + +```javascript +// Parse command arguments +const args = parseArguments($ARGUMENTS); +``` + +### Phase 2: Core Processing + +```javascript +// TODO: Implement core logic +``` + +### Phase 3: Output Generation + +```javascript +// TODO: Format and display output +``` + +## Error Handling + +| Error | Action | +|-------|--------| +| Invalid input | Show usage and error message | +| Processing failure | Log error and suggest recovery | + +## Related Commands + +- [Related command 1] +- [Related command 2] diff --git a/.claude/skills/team-coordinate/SKILL.md b/.claude/skills/team-coordinate/SKILL.md new file mode 100644 index 00000000..fb2d2b4e --- /dev/null +++ b/.claude/skills/team-coordinate/SKILL.md @@ -0,0 +1,442 @@ +--- +name: team-coordinate +description: Universal team coordination skill with dynamic role generation. Only coordinator is built-in -- all worker roles are generated at runtime based on task analysis. Beat/cadence model for orchestration. Triggers on "team coordinate". +allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), TaskUpdate(*), TaskList(*), TaskGet(*), Task(*), AskUserQuestion(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*) +--- + +# Team Coordinate + +Universal team coordination skill: analyze task -> generate roles -> dispatch -> execute -> deliver. Only the **coordinator** is built-in. All worker roles are **dynamically generated** based on task analysis. + +## Architecture + +``` ++---------------------------------------------------+ +| Skill(skill="team-coordinate") | +| args="task description" | +| args="--role=coordinator" | +| args="--role= --session=" | ++-------------------+-------------------------------+ + | Role Router + +---- --role present? ----+ + | NO | YES + v v + Orchestration Mode Role Dispatch + (auto -> coordinator) (route to role file) + | | + coordinator +-------+-------+ + (built-in) | --role=coordinator? + | | + YES | | NO + v | v + built-in | Dynamic Role + role.md | /roles/.md + + Subagents (callable by any role, not team members): + [discuss-subagent] - multi-perspective critique (dynamic perspectives) + [explore-subagent] - codebase exploration with cache +``` + +## Role Router + +### Input Parsing + +Parse `$ARGUMENTS` to extract `--role` and `--session`. If no `--role` -> Orchestration Mode (auto route to coordinator). + +### Role Registry + +Only coordinator is statically registered. All other roles are dynamic, stored in `team-session.json#roles`. + +| Role | File | Type | +|------|------|------| +| coordinator | [roles/coordinator/role.md](roles/coordinator/role.md) | built-in orchestrator | +| (dynamic) | `/roles/.md` | runtime-generated worker | + +> **COMPACT PROTECTION**: Role files are execution documents. After context compression, role instructions become summaries only -- **MUST immediately `Read` the role.md to reload before continuing**. Never execute any Phase based on summaries. + +### Subagent Registry + +| Subagent | Spec | Callable By | Purpose | +|----------|------|-------------|---------| +| discuss | [subagents/discuss-subagent.md](subagents/discuss-subagent.md) | any role | Multi-perspective critique (dynamic perspectives) | +| explore | [subagents/explore-subagent.md](subagents/explore-subagent.md) | any role | Codebase exploration with cache | + +### Dispatch + +1. Extract `--role` and `--session` from arguments +2. If no `--role` -> route to coordinator (Orchestration Mode) +3. If `--role=coordinator` -> Read built-in `roles/coordinator/role.md` -> Execute its phases +4. If `--role=` -> Read `/roles/.md` -> Execute its phases +5. If session path not provided -> auto-discover from `.workflow/.team/TC-*/team-session.json` + +### Orchestration Mode + +When invoked without `--role`, coordinator auto-starts. User just provides task description. + +**Invocation**: `Skill(skill="team-coordinate", args="task description")` + +**Lifecycle**: +``` +User provides task description + -> coordinator Phase 1: task analysis (detect capabilities, build dependency graph) + -> coordinator Phase 2: generate roles + initialize session + -> coordinator Phase 3: create task chain from dependency graph + -> coordinator Phase 4: spawn first batch workers (background) -> STOP + -> Worker executes -> SendMessage callback -> coordinator advances next step + -> Loop until pipeline complete -> Phase 5 report +``` + +**User Commands** (wake paused coordinator): + +| Command | Action | +|---------|--------| +| `check` / `status` | Output execution status graph, no advancement | +| `resume` / `continue` | Check worker states, advance next step | + +--- + +## Shared Infrastructure + +The following templates apply to all worker roles. Each generated role.md only needs to define **Phase 2-4** role-specific logic. + +### Worker Phase 1: Task Discovery (all workers shared) + +Each worker on startup executes the same task discovery flow: + +1. Call `TaskList()` to get all tasks +2. Filter: subject matches this role's prefix + owner is this role + status is pending + blockedBy is empty +3. No tasks -> idle wait +4. Has tasks -> `TaskGet` for details -> `TaskUpdate` mark in_progress + +**Resume Artifact Check** (prevent duplicate output after resume): +- Check if this task's output artifacts already exist +- Artifacts complete -> skip to Phase 5 report completion +- Artifacts incomplete or missing -> normal Phase 2-4 execution + +### Worker Phase 5: Report + Fast-Advance (all workers shared) + +Task completion with optional fast-advance to skip coordinator round-trip: + +1. **Message Bus**: Call `mcp__ccw-tools__team_msg` to log message + - Params: operation="log", team=, from=, to="coordinator", type=, summary="[] ", ref= + - **CLI fallback**: When MCP unavailable -> `ccw team log --team --from --to coordinator --type --summary "[] ..." --json` +2. **TaskUpdate**: Mark task completed +3. **Fast-Advance Check**: + - Call `TaskList()`, find pending tasks whose blockedBy are ALL completed + - If exactly 1 ready task AND its owner matches a simple successor pattern -> **spawn it directly** (skip coordinator) + - Otherwise -> **SendMessage** to coordinator for orchestration +4. **Loop**: Back to Phase 1 to check for next task + +**Fast-Advance Rules**: + +| Condition | Action | +|-----------|--------| +| Same-prefix successor (Inner Loop role) | Do not spawn, main agent inner loop (Phase 5-L) | +| 1 ready task, simple linear successor, different prefix | Spawn directly via Task(run_in_background: true) | +| Multiple ready tasks (parallel window) | SendMessage to coordinator (needs orchestration) | +| No ready tasks + others running | SendMessage to coordinator (status update) | +| No ready tasks + nothing running | SendMessage to coordinator (pipeline may be complete) | + +**Fast-advance failure recovery**: If a fast-advanced task fails, the coordinator detects it as an orphaned in_progress task on next `resume`/`check` and resets it to pending for re-spawn. Self-healing. See [monitor.md](roles/coordinator/commands/monitor.md). + +### Worker Inner Loop (roles with multiple same-prefix serial tasks) + +When a role has **2+ serial same-prefix tasks**, it loops internally instead of spawning new agents: + +**Inner Loop flow**: + +``` +Phase 1: Discover task (first time) + | + +- Found task -> Phase 2-3: Load context + Execute work + | | + | v + | Phase 4: Validation (+ optional Inline Discuss) + | | + | v + | Phase 5-L: Loop Completion + | | + | +- TaskUpdate completed + | +- team_msg log + | +- Accumulate summary to context_accumulator + | | + | +- More same-prefix tasks? + | | +- YES -> back to Phase 1 (inner loop) + | | +- NO -> Phase 5-F: Final Report + | | + | +- Interrupt conditions? + | +- consensus_blocked HIGH -> SendMessage -> STOP + | +- Errors >= 3 -> SendMessage -> STOP + | + +- Phase 5-F: Final Report + +- SendMessage (all task summaries) + +- STOP +``` + +**Phase 5-L vs Phase 5-F**: + +| Step | Phase 5-L (looping) | Phase 5-F (final) | +|------|---------------------|-------------------| +| TaskUpdate completed | YES | YES | +| team_msg log | YES | YES | +| Accumulate summary | YES | - | +| SendMessage to coordinator | NO | YES (all tasks summary) | +| Fast-Advance to next prefix | - | YES (check cross-prefix successors) | + +### Inline Discuss Protocol (optional for any role) + +After completing primary output, roles may call the discuss subagent inline. Unlike v4's fixed perspective definitions, team-coordinate uses **dynamic perspectives** specified by the coordinator when generating each role. + +``` +Task({ + subagent_type: "cli-discuss-agent", + run_in_background: false, + description: "Discuss ", + prompt: +}) +``` + +**Consensus handling**: + +| Verdict | Severity | Role Action | +|---------|----------|-------------| +| consensus_reached | - | Include action items in report, proceed to Phase 5 | +| consensus_blocked | HIGH | SendMessage with structured format. Do NOT self-revise. | +| consensus_blocked | MEDIUM | SendMessage with warning. Proceed normally. | +| consensus_blocked | LOW | Treat as consensus_reached with notes. | + +### Shared Explore Utility + +Any role needing codebase context calls the explore subagent: + +``` +Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: "Explore ", + prompt: +}) +``` + +**Cache**: Results stored in `explorations/` with `cache-index.json`. Before exploring, always check cache first. + +### Wisdom Accumulation (all roles) + +Cross-task knowledge accumulation. Coordinator creates `wisdom/` directory at session init. + +**Directory**: +``` +/wisdom/ ++-- learnings.md # Patterns and insights ++-- decisions.md # Design and strategy decisions ++-- issues.md # Known risks and issues +``` + +**Worker load** (Phase 2): Extract `Session: ` from task description, read wisdom files. +**Worker contribute** (Phase 4/5): Write discoveries to corresponding wisdom files. + +### Role Isolation Rules + +| Allowed | Prohibited | +|---------|-----------| +| Process own prefix tasks | Process other role's prefix tasks | +| SendMessage to coordinator | Directly communicate with other workers | +| Use tools appropriate to responsibility | Create tasks for other roles | +| Call discuss/explore subagents | Modify resources outside own scope | +| Fast-advance simple successors | Spawn parallel worker batches | +| Report capability_gap to coordinator | Attempt work outside scope | + +Coordinator additionally prohibited: directly write/modify deliverable artifacts, call implementation subagents directly, directly execute analysis/test/review. + +--- + +## Cadence Control + +**Beat model**: Event-driven, each beat = coordinator wake -> process -> spawn -> STOP. + +``` +Beat Cycle (single beat) +====================================================================== + Event Coordinator Workers +---------------------------------------------------------------------- + callback/resume --> +- handleCallback -+ + | mark completed | + | check pipeline | + +- handleSpawnNext -+ + | find ready tasks | + | spawn workers ---+--> [Worker A] Phase 1-5 + | (parallel OK) --+--> [Worker B] Phase 1-5 + +- STOP (idle) -----+ | + | + callback <-----------------------------------------+ + (next beat) SendMessage + TaskUpdate(completed) +====================================================================== + + Fast-Advance (skips coordinator for simple linear successors) +====================================================================== + [Worker A] Phase 5 complete + +- 1 ready task? simple successor? --> spawn Worker B directly + +- complex case? --> SendMessage to coordinator +====================================================================== +``` + +**Pipelines are dynamic**: Unlike v4's predefined pipeline beat views (spec-only, impl-only, etc.), team-coordinate pipelines are generated per-task from the dependency graph. The beat model is the same -- only the pipeline shape varies. + +--- + +## Coordinator Spawn Template + +### Standard Worker (single-task role) + +``` +Task({ + subagent_type: "general-purpose", + description: "Spawn worker", + team_name: , + name: "", + run_in_background: true, + prompt: `You are team "" . + +## Primary Instruction +All your work MUST be executed by calling Skill to get role definition: +Skill(skill="team-coordinate", args="--role= --session=") + +Current requirement: +Session: + +## Role Guidelines +- Only process -* tasks, do not execute other role work +- All output prefixed with [] tag +- Only communicate with coordinator +- Do not use TaskCreate to create tasks for other roles +- Before each SendMessage, call mcp__ccw-tools__team_msg to log +- After task completion, check for fast-advance opportunity (see SKILL.md Phase 5) + +## Workflow +1. Call Skill -> get role definition and execution logic +2. Follow role.md 5-Phase flow +3. team_msg + SendMessage results to coordinator +4. TaskUpdate completed -> check next task or fast-advance` +}) +``` + +### Inner Loop Worker (multi-task role) + +``` +Task({ + subagent_type: "general-purpose", + description: "Spawn worker (inner loop)", + team_name: , + name: "", + run_in_background: true, + prompt: `You are team "" . + +## Primary Instruction +All your work MUST be executed by calling Skill to get role definition: +Skill(skill="team-coordinate", args="--role= --session=") + +Current requirement: +Session: + +## Inner Loop Mode +You will handle ALL -* tasks in this session, not just the first one. +After completing each task, loop back to find the next -* task. +Only SendMessage to coordinator when: +- All -* tasks are done +- A consensus_blocked HIGH occurs +- Errors accumulate (>= 3) + +## Role Guidelines +- Only process -* tasks, do not execute other role work +- All output prefixed with [] tag +- Only communicate with coordinator +- Do not use TaskCreate to create tasks for other roles +- Before each SendMessage, call mcp__ccw-tools__team_msg to log +- Use subagent calls for heavy work, retain summaries in context` +}) +``` + +--- + +## Session Directory + +``` +.workflow/.team/TC--/ ++-- team-session.json # Session state + dynamic role registry ++-- task-analysis.json # Phase 1 output: capabilities, dependency graph ++-- roles/ # Dynamic role definitions (generated Phase 2) +| +-- .md +| +-- .md ++-- artifacts/ # All MD deliverables from workers +| +-- .md ++-- shared-memory.json # Cross-role state store ++-- wisdom/ # Cross-task knowledge +| +-- learnings.md +| +-- decisions.md +| +-- issues.md ++-- explorations/ # Shared explore cache +| +-- cache-index.json +| +-- explore-.json ++-- discussions/ # Inline discuss records +| +-- .md ++-- .msg/ # Team message bus logs +``` + +### team-session.json Schema + +```json +{ + "session_id": "TC--", + "task_description": "", + "status": "active | paused | completed", + "team_name": "", + "roles": [ + { + "name": "", + "prefix": "", + "responsibility_type": "", + "inner_loop": false, + "role_file": "roles/.md" + } + ], + "pipeline": { + "dependency_graph": {}, + "tasks_total": 0, + "tasks_completed": 0 + }, + "active_workers": [], + "completed_tasks": [], + "created_at": "" +} +``` + +--- + +## Session Resume + +Coordinator supports `--resume` / `--continue` for interrupted sessions: + +1. Scan `.workflow/.team/TC-*/team-session.json` for active/paused sessions +2. Multiple matches -> AskUserQuestion for selection +3. Audit TaskList -> reconcile session state <-> task status +4. Reset in_progress -> pending (interrupted tasks) +5. Rebuild team and spawn needed workers only +6. Create missing tasks with correct blockedBy +7. Kick first executable task -> Phase 4 coordination loop + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Unknown --role value | Check if `/roles/.md` exists; error with message if not | +| Missing --role arg | Orchestration Mode -> coordinator | +| Dynamic role file not found | Error with expected path, coordinator may need to regenerate | +| Built-in role file not found | Error with expected path | +| Command file not found | Fallback to inline execution | +| Discuss subagent fails | Role proceeds without discuss, logs warning | +| Explore cache corrupt | Clear cache, re-explore | +| Fast-advance spawns wrong task | Coordinator reconciles on next callback | +| Session path not provided | Auto-discover from `.workflow/.team/TC-*/team-session.json` | +| capability_gap reported | Coordinator generates new role via handleAdapt | diff --git a/.claude/skills/team-coordinate/roles/coordinator/commands/analyze-task.md b/.claude/skills/team-coordinate/roles/coordinator/commands/analyze-task.md new file mode 100644 index 00000000..40dc696a --- /dev/null +++ b/.claude/skills/team-coordinate/roles/coordinator/commands/analyze-task.md @@ -0,0 +1,175 @@ +# Command: analyze-task + +## Purpose + +Parse user task description -> detect required capabilities -> build dependency graph -> design dynamic roles. This replaces v4's static mode selection with intelligent task decomposition. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Task description | User input from Phase 1 | Yes | +| Clarification answers | AskUserQuestion results (if any) | No | +| Session folder | From coordinator Phase 2 | Yes | + +## Phase 3: Task Analysis + +### Step 1: Signal Detection + +Scan task description for capability keywords: + +| Signal | Keywords | Capability | Prefix | Responsibility Type | +|--------|----------|------------|--------|---------------------| +| Research | investigate, explore, compare, survey, find, research, discover, benchmark, study | researcher | RESEARCH | orchestration | +| Writing | write, draft, document, article, report, blog, describe, explain, summarize, content | writer | DRAFT | code-gen (docs) | +| Coding | implement, build, code, fix, refactor, develop, create app, program, migrate, port | developer | IMPL | code-gen (code) | +| Design | design, architect, plan, structure, blueprint, model, schema, wireframe, layout | designer | DESIGN | orchestration | +| Analysis | analyze, review, audit, assess, evaluate, inspect, examine, diagnose, profile | analyst | ANALYSIS | read-only | +| Testing | test, verify, validate, QA, quality, check, assert, coverage, regression | tester | TEST | validation | +| Planning | plan, breakdown, organize, schedule, decompose, roadmap, strategy, prioritize | planner | PLAN | orchestration | + +**Multi-match**: A task may trigger multiple capabilities. E.g., "research and write a technical article" triggers both `researcher` and `writer`. + +**No match**: If no keywords match, default to a single `general` capability with `TASK` prefix. + +### Step 2: Artifact Inference + +Each capability produces default output artifacts: + +| Capability | Default Artifact | Format | +|------------|-----------------|--------| +| researcher | Research findings | `/artifacts/research-findings.md` | +| writer | Written document(s) | `/artifacts/.md` | +| developer | Code implementation | Source files + `/artifacts/implementation-summary.md` | +| designer | Design document | `/artifacts/design-spec.md` | +| analyst | Analysis report | `/artifacts/analysis-report.md` | +| tester | Test results | `/artifacts/test-report.md` | +| planner | Execution plan | `/artifacts/execution-plan.md` | + +### Step 3: Dependency Graph Construction + +Build a DAG of work streams using these inference rules: + +| Pattern | Shape | Example | +|---------|-------|---------| +| Knowledge -> Creation | research blockedBy nothing, creation blockedBy research | RESEARCH-001 -> DRAFT-001 | +| Design -> Build | design first, build after | DESIGN-001 -> IMPL-001 | +| Build -> Validate | build first, test/review after | IMPL-001 -> TEST-001 + ANALYSIS-001 | +| Plan -> Execute | plan first, execute after | PLAN-001 -> IMPL-001 | +| Independent parallel | no dependency between them | DRAFT-001 || IMPL-001 | +| Analysis -> Revise | analysis finds issues, revise artifact | ANALYSIS-001 -> DRAFT-002 | + +**Graph construction algorithm**: + +1. Group capabilities by natural ordering: knowledge-gathering -> design/planning -> creation -> validation +2. Within same tier: capabilities are parallel unless task description implies sequence +3. Between tiers: downstream blockedBy upstream +4. Single-capability tasks: one node, no dependencies + +**Natural ordering tiers**: + +| Tier | Capabilities | Description | +|------|-------------|-------------| +| 0 | researcher, planner | Knowledge gathering / planning | +| 1 | designer | Design (requires context from tier 0 if present) | +| 2 | writer, developer | Creation (requires design/plan if present) | +| 3 | analyst, tester | Validation (requires artifacts to validate) | + +### Step 4: Complexity Scoring + +| Factor | Weight | Condition | +|--------|--------|-----------| +| Capability count | +1 each | Number of distinct capabilities | +| Cross-domain factor | +2 | Capabilities span 3+ tiers | +| Parallel tracks | +1 each | Independent parallel work streams | +| Serial depth | +1 per level | Longest dependency chain length | + +| Total Score | Complexity | Role Limit | +|-------------|------------|------------| +| 1-3 | Low | 1-2 roles | +| 4-6 | Medium | 2-3 roles | +| 7+ | High | 3-5 roles | + +### Step 5: Role Minimization + +Apply merging rules to reduce role count: + +| Rule | Condition | Action | +|------|-----------|--------| +| Absorb trivial | Capability has exactly 1 task AND no explore needed | Merge into nearest related role | +| Merge overlap | Two capabilities share >50% keywords from task description | Combine into single role | +| Coordinator inline | Planner capability with 1 task, no explore | Coordinator handles inline, no separate role | +| Cap at 5 | More than 5 roles after initial assignment | Merge lowest-priority pairs (priority: researcher > designer > developer > writer > analyst > planner > tester) | + +**Merge priority** (when two must merge, keep the higher-priority one as the role name): + +1. developer (code-gen is hardest to merge) +2. researcher (context-gathering is foundational) +3. writer (document generation has specific patterns) +4. designer (design has specific outputs) +5. analyst (analysis can be absorbed by reviewer pattern) +6. planner (can be absorbed by coordinator) +7. tester (can be absorbed by developer or analyst) + +## Phase 4: Output + +Write `/task-analysis.json`: + +```json +{ + "task_description": "", + "capabilities": [ + { + "name": "researcher", + "prefix": "RESEARCH", + "responsibility_type": "orchestration", + "tasks": [ + { "id": "RESEARCH-001", "description": "..." } + ], + "artifacts": ["research-findings.md"] + } + ], + "dependency_graph": { + "RESEARCH-001": [], + "DRAFT-001": ["RESEARCH-001"], + "ANALYSIS-001": ["DRAFT-001"] + }, + "roles": [ + { + "name": "researcher", + "prefix": "RESEARCH", + "responsibility_type": "orchestration", + "task_count": 1, + "inner_loop": false + }, + { + "name": "writer", + "prefix": "DRAFT", + "responsibility_type": "code-gen (docs)", + "task_count": 1, + "inner_loop": false + } + ], + "complexity": { + "capability_count": 2, + "cross_domain_factor": false, + "parallel_tracks": 0, + "serial_depth": 2, + "total_score": 3, + "level": "low" + }, + "artifacts": [ + { "name": "research-findings.md", "producer": "researcher", "path": "artifacts/research-findings.md" }, + { "name": "article-draft.md", "producer": "writer", "path": "artifacts/article-draft.md" } + ] +} +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No capabilities detected | Default to single `general` role with TASK prefix | +| Circular dependency in graph | Break cycle at lowest-tier edge, warn | +| Task description too vague | Return minimal analysis, coordinator will AskUserQuestion | +| All capabilities merge into one | Valid -- single-role execution, no team overhead | diff --git a/.claude/skills/team-coordinate/roles/coordinator/commands/dispatch.md b/.claude/skills/team-coordinate/roles/coordinator/commands/dispatch.md new file mode 100644 index 00000000..0fc4948f --- /dev/null +++ b/.claude/skills/team-coordinate/roles/coordinator/commands/dispatch.md @@ -0,0 +1,85 @@ +# Command: dispatch + +## Purpose + +Create task chains from dynamic dependency graphs. Unlike v4's static mode-to-pipeline mapping, team-coordinate builds pipelines from the task-analysis.json produced by Phase 1. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Task analysis | `/task-analysis.json` | Yes | +| Session file | `/team-session.json` | Yes | +| Role registry | `team-session.json#roles` | Yes | +| Scope | User requirements description | Yes | + +## Phase 3: Task Chain Creation + +### Workflow + +1. **Read dependency graph** from `task-analysis.json#dependency_graph` +2. **Topological sort** tasks to determine creation order +3. **Validate** all task owners exist in role registry +4. **For each task** (in topological order): + +``` +TaskCreate({ + subject: "-", + owner: "", + description: "\nSession: \nScope: \nInnerLoop: ", + blockedBy: [], + status: "pending" +}) +``` + +5. **Update team-session.json** with pipeline and tasks_total +6. **Validate** created chain + +### Task Description Template + +Every task description includes session path and inner loop flag: + +``` + +Session: +Scope: +InnerLoop: +``` + +### InnerLoop Flag Rules + +| Condition | InnerLoop | +|-----------|-----------| +| Role has 2+ serial same-prefix tasks | true | +| Role has 1 task | false | +| Tasks are parallel (no dependency between them) | false | + +### Dependency Validation + +| Check | Criteria | +|-------|----------| +| No orphan tasks | Every task is reachable from at least one root | +| No circular deps | Topological sort succeeds without cycle | +| All owners valid | Every task owner exists in team-session.json#roles | +| All blockedBy valid | Every blockedBy references an existing task subject | +| Session reference | Every task description contains `Session: ` | + +## Phase 4: Validation + +| Check | Criteria | +|-------|----------| +| Task count | Matches dependency_graph node count | +| Dependencies | Every blockedBy references an existing task subject | +| Owner assignment | Each task owner is in role registry | +| Session reference | Every task description contains `Session:` | +| Pipeline integrity | No disconnected subgraphs (warn if found) | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Circular dependency detected | Report cycle, halt task creation | +| Owner not in role registry | Error, coordinator must fix roles first | +| TaskCreate fails | Log error, report to coordinator | +| Duplicate task subject | Skip creation, log warning | +| Empty dependency graph | Error, task analysis may have failed | diff --git a/.claude/skills/team-coordinate/roles/coordinator/commands/monitor.md b/.claude/skills/team-coordinate/roles/coordinator/commands/monitor.md new file mode 100644 index 00000000..9b45cf05 --- /dev/null +++ b/.claude/skills/team-coordinate/roles/coordinator/commands/monitor.md @@ -0,0 +1,274 @@ +# Command: monitor + +## Purpose + +Event-driven pipeline coordination with Spawn-and-Stop pattern. Adapted from v4 for dynamic roles -- role names are read from `team-session.json#roles` instead of hardcoded. Includes `handleAdapt` for mid-pipeline capability gap handling. + +## Constants + +| Constant | Value | Description | +|----------|-------|-------------| +| SPAWN_MODE | background | All workers spawned via `Task(run_in_background: true)` | +| ONE_STEP_PER_INVOCATION | true | Coordinator does one operation then STOPS | +| FAST_ADVANCE_AWARE | true | Workers may skip coordinator for simple linear successors | + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Session file | `/team-session.json` | Yes | +| Task list | `TaskList()` | Yes | +| Active workers | session.active_workers[] | Yes | +| Role registry | session.roles[] | Yes | + +**Dynamic role resolution**: Known worker roles are loaded from `session.roles[].name` rather than a static list. This is the key difference from v4. + +## Phase 3: Handler Routing + +### Wake-up Source Detection + +Parse `$ARGUMENTS` to determine handler: + +| Priority | Condition | Handler | +|----------|-----------|---------| +| 1 | Message contains `[]` from session roles | handleCallback | +| 2 | Contains "capability_gap" | handleAdapt | +| 3 | Contains "check" or "status" | handleCheck | +| 4 | Contains "resume", "continue", or "next" | handleResume | +| 5 | None of the above (initial spawn after dispatch) | handleSpawnNext | + +--- + +### Handler: handleCallback + +Worker completed a task. Verify completion, update state, auto-advance. + +``` +Receive callback from [] + +- Find matching active worker by role (from session.roles) + +- Is this a progress update (not final)? (Inner Loop intermediate task completion) + | +- YES -> Update session state, do NOT remove from active_workers -> STOP + +- Task status = completed? + | +- YES -> remove from active_workers -> update session + | | +- -> handleSpawnNext + | +- NO -> progress message, do not advance -> STOP + +- No matching worker found + +- Scan all active workers for completed tasks + +- Found completed -> process each -> handleSpawnNext + +- None completed -> STOP +``` + +**Fast-advance note**: A worker may have already spawned its successor via fast-advance. When processing a callback: +1. Check if the expected next task is already `in_progress` (fast-advanced) +2. If yes -> skip spawning that task, update active_workers to include the fast-advanced worker +3. If no -> normal handleSpawnNext + +--- + +### Handler: handleCheck + +Read-only status report. No pipeline advancement. + +**Output format**: + +``` +[coordinator] Pipeline Status +[coordinator] Progress: / (%) + +[coordinator] Execution Graph: + + + done=completed >>>=running o=pending .=not created + +[coordinator] Active Workers: + > () - running [inner-loop: N/M tasks done] + +[coordinator] Ready to spawn: +[coordinator] Commands: 'resume' to advance | 'check' to refresh +``` + +**Icon mapping**: completed=done, in_progress=>>>, pending=o, not created=. + +**Graph rendering**: Read dependency_graph from task-analysis.json, render each node with status icon. Show parallel branches side-by-side. + +Then STOP. + +--- + +### Handler: handleResume + +Check active worker completion, process results, advance pipeline. + +``` +Load active_workers from session + +- No active workers -> handleSpawnNext + +- Has active workers -> check each: + +- status = completed -> mark done, log + +- status = in_progress -> still running, log + +- other status -> worker failure -> reset to pending + After processing: + +- Some completed -> handleSpawnNext + +- All still running -> report status -> STOP + +- All failed -> handleSpawnNext (retry) +``` + +--- + +### Handler: handleSpawnNext + +Find all ready tasks, spawn workers in background, update session, STOP. + +``` +Collect task states from TaskList() + +- completedSubjects: status = completed + +- inProgressSubjects: status = in_progress + +- readySubjects: pending + all blockedBy in completedSubjects + +Ready tasks found? + +- NONE + work in progress -> report waiting -> STOP + +- NONE + nothing in progress -> PIPELINE_COMPLETE -> Phase 5 + +- HAS ready tasks -> for each: + +- Is task owner an Inner Loop role AND that role already has an active_worker? + | +- YES -> SKIP spawn (existing worker will pick it up via inner loop) + | +- NO -> normal spawn below + +- TaskUpdate -> in_progress + +- team_msg log -> task_unblocked + +- Spawn worker (see spawn tool call below) + +- Add to session.active_workers + Update session file -> output summary -> STOP +``` + +**Spawn worker tool call** (one per ready task): + +``` +Task({ + subagent_type: "general-purpose", + description: "Spawn worker for ", + team_name: , + name: "", + run_in_background: true, + prompt: "" +}) +``` + +--- + +### Handler: handleAdapt + +Handle mid-pipeline capability gap discovery. A worker reports `capability_gap` when it encounters work outside its scope. + +``` +Parse capability_gap message: + +- Extract: gap_description, requesting_role, suggested_capability + +- Validate gap is genuine: + +- Check existing roles in session.roles -> does any role cover this? + | +- YES -> redirect: SendMessage to that role's owner -> STOP + | +- NO -> genuine gap, proceed to role generation + +- Generate new role: + 1. Read specs/role-template.md + 2. Fill template with capability details from gap description + 3. Write new role file to /roles/.md + 4. Add to session.roles[] + +- Create new task(s): + TaskCreate({ + subject: "-001", + owner: "", + description: "\nSession: \nInnerLoop: false", + blockedBy: [], + status: "pending" + }) + +- Update team-session.json: add role, increment tasks_total + +- Spawn new worker -> STOP +``` + +--- + +### Worker Failure Handling + +When a worker has unexpected status (not completed, not in_progress): + +1. Reset task -> pending via TaskUpdate +2. Log via team_msg (type: error) +3. Report to user: task reset, will retry on next resume + +### Fast-Advance Failure Recovery + +When coordinator detects a fast-advanced task has failed (task in_progress but no callback and worker gone): + +``` +handleCallback / handleResume detects: + +- Task is in_progress (was fast-advanced by predecessor) + +- No active_worker entry for this task + +- Original fast-advancing worker has already completed and exited + +- Resolution: + 1. TaskUpdate -> reset task to pending + 2. Remove stale active_worker entry (if any) + 3. Log via team_msg (type: error, summary: "Fast-advanced task failed, resetting for retry") + 4. -> handleSpawnNext (will re-spawn the task normally) +``` + +**Detection in handleResume**: + +``` +For each in_progress task in TaskList(): + +- Has matching active_worker? -> normal, skip + +- No matching active_worker? -> orphaned (likely fast-advance failure) + +- Check creation time: if > 5 minutes with no progress callback + +- Reset to pending -> handleSpawnNext +``` + +**Prevention**: Fast-advance failures are self-healing. The coordinator reconciles orphaned tasks on every `resume`/`check` cycle. + +### Consensus-Blocked Handling + +When a worker reports `consensus_blocked` in its callback: + +``` +handleCallback receives message with consensus_blocked flag + +- Extract: divergence_severity, blocked_round, action_recommendation + +- Route by severity: + | + +- severity = HIGH + | +- Create REVISION task: + | +- Same role, same doc type, incremented suffix (e.g., DRAFT-001-R1) + | +- Description includes: divergence details + action items from discuss + | +- blockedBy: none (immediate execution) + | +- Max 1 revision per task (DRAFT-001 -> DRAFT-001-R1, no R2) + | +- If already revised once -> PAUSE, escalate to user + | +- Update session: mark task as "revised", log revision chain + | + +- severity = MEDIUM + | +- Proceed with warning: include divergence in next task's context + | +- Log action items to wisdom/issues.md + | +- Normal handleSpawnNext + | + +- severity = LOW + +- Proceed normally: treat as consensus_reached with notes + +- Normal handleSpawnNext +``` + +## Phase 4: Validation + +| Check | Criteria | +|-------|----------| +| Session state consistent | active_workers matches TaskList in_progress tasks | +| No orphaned tasks | Every in_progress task has an active_worker entry | +| Dynamic roles valid | All task owners exist in session.roles | +| Completion detection | readySubjects=0 + inProgressSubjects=0 -> PIPELINE_COMPLETE | +| Fast-advance tracking | Detect tasks already in_progress via fast-advance, sync to active_workers | +| Fast-advance orphan check | in_progress tasks without active_worker entry -> reset to pending | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Session file not found | Error, suggest re-initialization | +| Worker callback from unknown role | Log info, scan for other completions | +| All workers still running on resume | Report status, suggest check later | +| Pipeline stall (no ready, no running) | Check for missing tasks, report to user | +| Fast-advance conflict | Coordinator reconciles, no duplicate spawns | +| Fast-advance task orphaned | Reset to pending, re-spawn via handleSpawnNext | +| Dynamic role file not found | Error, coordinator must regenerate from task-analysis | +| capability_gap from completed role | Validate gap, generate role if genuine | +| consensus_blocked HIGH | Create revision task (max 1) or pause for user | +| consensus_blocked MEDIUM | Proceed with warning, log to wisdom/issues.md | diff --git a/.claude/skills/team-coordinate/roles/coordinator/role.md b/.claude/skills/team-coordinate/roles/coordinator/role.md new file mode 100644 index 00000000..60b71e42 --- /dev/null +++ b/.claude/skills/team-coordinate/roles/coordinator/role.md @@ -0,0 +1,233 @@ +# Coordinator Role + +Orchestrate the team-coordinate workflow: task analysis, dynamic role generation, task dispatching, progress monitoring, session state. The sole built-in role -- all worker roles are generated at runtime. + +## Identity + +- **Name**: `coordinator` | **Tag**: `[coordinator]` +- **Responsibility**: Analyze task -> Generate roles -> Create team -> Dispatch tasks -> Monitor progress -> Report results + +## Boundaries + +### MUST +- Analyze user task to detect capabilities and build dependency graph +- Dynamically generate worker roles from specs/role-template.md +- Create team and spawn worker subagents in background +- Dispatch tasks with proper dependency chains from task-analysis.json +- Monitor progress via worker callbacks and route messages +- Maintain session state persistence (team-session.json) +- Handle capability_gap reports (generate new roles mid-pipeline) +- Handle consensus_blocked HIGH verdicts (create revision tasks or pause) +- Detect fast-advance orphans on resume/check and reset to pending + +### MUST NOT +- Execute task work directly (delegate to workers) +- Modify task output artifacts (workers own their deliverables) +- Call implementation subagents (code-developer, etc.) directly +- Skip dependency validation when creating task chains +- Generate more than 5 worker roles (merge if exceeded) +- Override consensus_blocked HIGH without user confirmation + +> **Core principle**: coordinator is the orchestrator, not the executor. All actual work is delegated to dynamically generated worker roles. + +--- + +## Entry Router + +When coordinator is invoked, first detect the invocation type: + +| Detection | Condition | Handler | +|-----------|-----------|---------| +| Worker callback | Message contains `[role-name]` from session roles | -> handleCallback | +| Status check | Arguments contain "check" or "status" | -> handleCheck | +| Manual resume | Arguments contain "resume" or "continue" | -> handleResume | +| Capability gap | Message contains "capability_gap" | -> handleAdapt | +| New session | None of above | -> Phase 0 | + +For callback/check/resume/adapt: load `commands/monitor.md` and execute the appropriate handler, then STOP. + +--- + +## Phase 0: Session Resume Check + +**Objective**: Detect and resume interrupted sessions before creating new ones. + +**Workflow**: +1. Scan `.workflow/.team/TC-*/team-session.json` for sessions with status "active" or "paused" +2. No sessions found -> proceed to Phase 1 +3. Single session found -> resume it (-> Session Reconciliation) +4. Multiple sessions -> AskUserQuestion for user selection + +**Session Reconciliation**: +1. Audit TaskList -> get real status of all tasks +2. Reconcile: session.completed_tasks <-> TaskList status (bidirectional sync) +3. Reset any in_progress tasks -> pending (they were interrupted) +4. Detect fast-advance orphans (in_progress without recent activity) -> reset to pending +5. Determine remaining pipeline from reconciled state +6. Rebuild team if disbanded (TeamCreate + spawn needed workers only) +7. Create missing tasks with correct blockedBy dependencies +8. Verify dependency chain integrity +9. Update session file with reconciled state +10. Kick first executable task's worker -> Phase 4 + +--- + +## Phase 1: Task Analysis + +**Objective**: Parse user task, detect capabilities, build dependency graph, design roles. + +**Workflow**: + +1. **Parse user task description** + +2. **Clarify if ambiguous** via AskUserQuestion: + - What is the scope? (specific files, module, project-wide) + - What deliverables are expected? (documents, code, analysis reports) + - Any constraints? (timeline, technology, style) + +3. **Delegate to `commands/analyze-task.md`**: + - Signal detection: scan keywords -> infer capabilities + - Artifact inference: each capability -> default output type (.md) + - Dependency graph: build DAG of work streams + - Complexity scoring: count capabilities, cross-domain factor, parallel tracks + - Role minimization: merge overlapping, absorb trivial, cap at 5 + +4. **Output**: Write `/task-analysis.json` + +**Success**: Task analyzed, capabilities detected, dependency graph built, roles designed. + +--- + +## Phase 2: Generate Roles + Initialize Session + +**Objective**: Create session, generate dynamic role files, initialize shared infrastructure. + +**Workflow**: + +1. **Generate session ID**: `TC--` (slug from first 3 meaningful words of task) + +2. **Create session folder structure**: + ``` + .workflow/.team// + +-- roles/ + +-- artifacts/ + +-- wisdom/ + +-- explorations/ + +-- discussions/ + +-- .msg/ + ``` + +3. **Call TeamCreate** with team name derived from session ID + +4. **Read `specs/role-template.md`** + `task-analysis.json` + +5. **For each role in task-analysis.json#roles**: + - Fill role template with: + - role_name, prefix, responsibility_type from analysis + - Phase 2-4 content from responsibility type reference sections in template + - inner_loop flag from analysis (true if role has 2+ serial tasks) + - Task-specific instructions from task description + - Write generated role file to `/roles/.md` + +6. **Register roles** in team-session.json#roles + +7. **Initialize shared infrastructure**: + - `wisdom/learnings.md`, `wisdom/decisions.md`, `wisdom/issues.md` (empty with headers) + - `explorations/cache-index.json` (`{ "entries": [] }`) + - `shared-memory.json` (`{}`) + - `discussions/` (empty directory) + +8. **Write team-session.json** with: session_id, task_description, status="active", roles, pipeline (empty), active_workers=[], created_at + +**Success**: Session created, role files generated, shared infrastructure initialized. + +--- + +## Phase 3: Create Task Chain + +**Objective**: Dispatch tasks based on dependency graph with proper dependencies. + +Delegate to `commands/dispatch.md` which creates the full task chain: +1. Reads dependency_graph from task-analysis.json +2. Topological sorts tasks +3. Creates tasks via TaskCreate with correct blockedBy +4. Assigns owner based on role mapping from task-analysis.json +5. Includes `Session: ` in every task description +6. Sets InnerLoop flag for multi-task roles +7. Updates team-session.json with pipeline and tasks_total + +**Success**: All tasks created with correct dependency chains, session updated. + +--- + +## Phase 4: Spawn-and-Stop + +**Objective**: Spawn first batch of ready workers in background, then STOP. + +**Design**: Spawn-and-Stop + Callback pattern, with worker fast-advance. +- Spawn workers with `Task(run_in_background: true)` -> immediately return +- Worker completes -> may fast-advance to next task OR SendMessage callback -> auto-advance +- User can use "check" / "resume" to manually advance +- Coordinator does one operation per invocation, then STOPS + +**Workflow**: +1. Load `commands/monitor.md` +2. Find tasks with: status=pending, blockedBy all resolved, owner assigned +3. For each ready task -> spawn worker (see SKILL.md Coordinator Spawn Template) + - Use Standard Worker template for single-task roles + - Use Inner Loop Worker template for multi-task roles +4. Output status summary with execution graph +5. STOP + +**Pipeline advancement** driven by three wake sources: +- Worker callback (automatic) -> Entry Router -> handleCallback +- User "check" -> handleCheck (status only) +- User "resume" -> handleResume (advance) + +--- + +## Phase 5: Report + Next Steps + +**Objective**: Completion report and follow-up options. + +**Workflow**: +1. Load session state -> count completed tasks, duration +2. List all deliverables with output paths in `/artifacts/` +3. Include discussion summaries (if inline discuss was used) +4. Summarize wisdom accumulated during execution +5. Update session status -> "completed" +6. Offer next steps: exit / view artifacts / extend with additional tasks + +**Output format**: + +``` +[coordinator] ============================================ +[coordinator] TASK COMPLETE +[coordinator] +[coordinator] Deliverables: +[coordinator] - () +[coordinator] - () +[coordinator] +[coordinator] Pipeline: / tasks +[coordinator] Roles: +[coordinator] Duration: +[coordinator] +[coordinator] Session: +[coordinator] ============================================ +``` + +--- + +## Error Handling + +| Error | Resolution | +|-------|------------| +| Task timeout | Log, mark failed, ask user to retry or skip | +| Worker crash | Respawn worker, reassign task | +| Dependency cycle | Detect in task analysis, report to user, halt | +| Task description too vague | AskUserQuestion for clarification | +| Session corruption | Attempt recovery, fallback to manual reconciliation | +| Role generation fails | Fall back to single general-purpose role | +| capability_gap reported | handleAdapt: generate new role, create tasks, spawn | +| All capabilities merge to one | Valid: single-role execution, reduced overhead | +| No capabilities detected | Default to single general role with TASK prefix | diff --git a/.claude/skills/team-coordinate/specs/role-template.md b/.claude/skills/team-coordinate/specs/role-template.md new file mode 100644 index 00000000..6b7fb7be --- /dev/null +++ b/.claude/skills/team-coordinate/specs/role-template.md @@ -0,0 +1,432 @@ +# Dynamic Role Template + +Template used by coordinator to generate worker role.md files at runtime. Each generated role is written to `/roles/.md`. + +## Template + +```markdown +# Role: + + + +## Identity + +- **Name**: `` | **Tag**: `[]` +- **Task Prefix**: `-*` +- **Responsibility**: + +- **Mode**: Inner Loop (handle all `-*` tasks in single agent) + + +## Boundaries + +### MUST +- Only process `-*` prefixed tasks +- All output (SendMessage, team_msg, logs) must carry `[]` identifier +- Only communicate with coordinator via SendMessage +- Work strictly within responsibility scope +- Use fast-advance for simple linear successors (see SKILL.md Phase 5) +- Produce MD artifacts in `/artifacts/` + +- Use subagent for heavy work (do not execute CLI/generation in main agent context) +- Maintain context_accumulator across tasks within the inner loop +- Loop through all `-*` tasks before reporting to coordinator + + +### MUST NOT +- Execute work outside this role's responsibility scope +- Communicate directly with other worker roles (must go through coordinator) +- Create tasks for other roles (TaskCreate is coordinator-exclusive) +- Modify files or resources outside this role's scope +- Omit `[]` identifier in any output +- Fast-advance when multiple tasks are ready or at checkpoint boundaries + +- Execute heavy work (CLI calls, large document generation) in main agent (delegate to subagent) +- SendMessage to coordinator mid-loop (unless consensus_blocked HIGH or error count >= 3) + + +## Toolbox + +| Tool | Purpose | +|------|---------| + + +## Message Types + +| Type | Direction | Description | +|------|-----------|-------------| +| `_complete` | -> coordinator | Task completed with artifact path | +| `_error` | -> coordinator | Error encountered | +| `capability_gap` | -> coordinator | Work outside role scope discovered | + +## Message Bus + +Before every SendMessage, log via `mcp__ccw-tools__team_msg`: + +``` +mcp__ccw-tools__team_msg({ + operation: "log", + team: , + from: "", + to: "coordinator", + type: , + summary: "[] complete: ", + ref: +}) +``` + +**CLI fallback** (when MCP unavailable): + +``` +Bash("ccw team log --team --from --to coordinator --type --summary \"[] complete\" --ref --json") +``` + +--- + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +> See SKILL.md Shared Infrastructure -> Worker Phase 1: Task Discovery + +Standard task discovery flow: TaskList -> filter by prefix `-*` + owner match + pending + unblocked -> TaskGet -> TaskUpdate in_progress. + +### Phase 2: + + + +### Phase 3: + + + +### Phase 4: + + + + +### Phase 4b: Inline Discuss (optional) + +After primary work, optionally call discuss subagent: + +``` +Task({ + subagent_type: "cli-discuss-agent", + run_in_background: false, + description: "Discuss ", + prompt: "## Multi-Perspective Critique: + See subagents/discuss-subagent.md for prompt template. + Perspectives: " +}) +``` + +| Verdict | Severity | Action | +|---------|----------|--------| +| consensus_reached | - | Include action items in report, proceed to Phase 5 | +| consensus_blocked | HIGH | Phase 5 SendMessage includes structured consensus_blocked format. Do NOT self-revise. | +| consensus_blocked | MEDIUM | Phase 5 SendMessage includes warning. Proceed normally. | +| consensus_blocked | LOW | Treat as consensus_reached with notes. | + + + +### Phase 5-L: Loop Completion (Inner Loop) + +When more same-prefix tasks remain: + +1. **TaskUpdate**: Mark current task completed +2. **team_msg**: Log task completion +3. **Accumulate summary**: + ``` + context_accumulator.append({ + task: "", + artifact: "", + key_decisions: , + discuss_verdict: , + summary: + }) + ``` +4. **Interrupt check**: + - consensus_blocked HIGH -> SendMessage -> STOP + - Error count >= 3 -> SendMessage -> STOP +5. **Loop**: Back to Phase 1 + +**Does NOT**: SendMessage to coordinator, Fast-Advance spawn. + +### Phase 5-F: Final Report (Inner Loop) + +When all same-prefix tasks are done: + +1. **TaskUpdate**: Mark last task completed +2. **team_msg**: Log completion +3. **Summary report**: All tasks summary + discuss results + artifact paths +4. **Fast-Advance check**: Check cross-prefix successors +5. **SendMessage** or **spawn successor** + +> See SKILL.md Shared Infrastructure -> Worker Phase 5: Report + Fast-Advance + + +### Phase 5: Report + Fast-Advance + +> See SKILL.md Shared Infrastructure -> Worker Phase 5: Report + Fast-Advance + +Standard report flow: team_msg log -> SendMessage with `[]` prefix -> TaskUpdate completed -> Fast-Advance Check -> Loop to Phase 1 for next task. + + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No -* tasks available | Idle, wait for coordinator assignment | +| Context file not found | Notify coordinator, request location | +| Subagent fails | Retry once with fallback; still fails -> log error, continue next task | +| Fast-advance spawn fails | Fall back to SendMessage to coordinator | + +| Cumulative 3 task failures | SendMessage to coordinator, STOP inner loop | +| Agent crash mid-loop | Coordinator detects orphan on resume -> re-spawn -> resume from interrupted task | + +| Work outside scope discovered | SendMessage capability_gap to coordinator | +| Critical issue beyond scope | SendMessage fix_required to coordinator | +``` + +--- + +## Phase 2-4 Content by Responsibility Type + +Reference sections for coordinator to fill when generating roles. Select the matching section based on `responsibility_type`. + +### orchestration + +**Phase 2: Context Assessment** + +``` +| Input | Source | Required | +|-------|--------|----------| +| Task description | From TaskGet | Yes | +| Shared memory | /shared-memory.json | No | +| Prior artifacts | /artifacts/ | No | +| Wisdom | /wisdom/ | No | + +Loading steps: +1. Extract session path from task description +2. Read shared-memory.json for cross-role context +3. Read prior artifacts (if any exist from upstream tasks) +4. Load wisdom files for accumulated knowledge +5. Optionally call explore subagent for codebase context +``` + +**Phase 3: Subagent Execution** + +``` +Delegate to appropriate subagent based on task: + +Task({ + subagent_type: "general-purpose", + run_in_background: false, + description: " for ", + prompt: "## Task + - + - Session: + ## Context + + ## Expected Output + Write artifact to: /artifacts/.md + Return JSON summary: { artifact_path, summary, key_decisions[], warnings[] }" +}) +``` + +**Phase 4: Result Aggregation** + +``` +1. Verify subagent output artifact exists +2. Read artifact, validate structure/completeness +3. Update shared-memory.json with key findings +4. Write insights to wisdom/ files +``` + +### code-gen (docs) + +**Phase 2: Load Prior Context** + +``` +| Input | Source | Required | +|-------|--------|----------| +| Task description | From TaskGet | Yes | +| Prior artifacts | /artifacts/ from upstream tasks | Conditional | +| Shared memory | /shared-memory.json | No | +| Wisdom | /wisdom/ | No | + +Loading steps: +1. Extract session path from task description +2. Read upstream artifacts (e.g., research findings for a writer) +3. Read shared-memory.json for cross-role context +4. Load wisdom for accumulated decisions +``` + +**Phase 3: Document Generation** + +``` +Task({ + subagent_type: "universal-executor", + run_in_background: false, + description: "Generate for ", + prompt: "## Task + - Generate: + - Session: + ## Prior Context + + ## Instructions + + ## Expected Output + Write document to: /artifacts/.md + Return JSON: { artifact_path, summary, key_decisions[], sections_generated[], warnings[] }" +}) +``` + +**Phase 4: Structure Validation** + +``` +1. Verify document artifact exists +2. Check document has expected sections +3. Validate no placeholder text remains +4. Update shared-memory.json with document metadata +``` + +### code-gen (code) + +**Phase 2: Load Plan/Specs** + +``` +| Input | Source | Required | +|-------|--------|----------| +| Task description | From TaskGet | Yes | +| Plan/design artifacts | /artifacts/ | Conditional | +| Shared memory | /shared-memory.json | No | +| Wisdom | /wisdom/ | No | + +Loading steps: +1. Extract session path from task description +2. Read plan/design artifacts from upstream +3. Load shared-memory.json for implementation context +4. Load wisdom for conventions and patterns +``` + +**Phase 3: Code Implementation** + +``` +Task({ + subagent_type: "code-developer", + run_in_background: false, + description: "Implement ", + prompt: "## Task + - + - Session: + ## Plan/Design Context + + ## Instructions + + ## Expected Output + Implement code changes. + Write summary to: /artifacts/implementation-summary.md + Return JSON: { artifact_path, summary, files_changed[], key_decisions[], warnings[] }" +}) +``` + +**Phase 4: Syntax Validation** + +``` +1. Run syntax check (tsc --noEmit or equivalent) +2. Verify all planned files exist +3. Check no broken imports +4. If validation fails -> attempt auto-fix (max 2 attempts) +5. Write implementation summary to artifacts/ +``` + +### read-only + +**Phase 2: Target Loading** + +``` +| Input | Source | Required | +|-------|--------|----------| +| Task description | From TaskGet | Yes | +| Target artifacts/files | From task description or upstream | Yes | +| Shared memory | /shared-memory.json | No | + +Loading steps: +1. Extract session path and target files from task description +2. Read target artifacts or source files for analysis +3. Load shared-memory.json for context +``` + +**Phase 3: Multi-Dimension Analysis** + +``` +Task({ + subagent_type: "general-purpose", + run_in_background: false, + description: "Analyze for ", + prompt: "## Task + - Analyze: + - Dimensions: + - Session: + ## Target Content + + ## Expected Output + Write report to: /artifacts/analysis-report.md + Return JSON: { artifact_path, summary, findings[], severity_counts: {critical, high, medium, low} }" +}) +``` + +**Phase 4: Severity Classification** + +``` +1. Verify analysis report exists +2. Classify findings by severity (Critical/High/Medium/Low) +3. Update shared-memory.json with key findings +4. Write issues to wisdom/issues.md +``` + +### validation + +**Phase 2: Environment Detection** + +``` +| Input | Source | Required | +|-------|--------|----------| +| Task description | From TaskGet | Yes | +| Implementation artifacts | Upstream code changes | Yes | + +Loading steps: +1. Detect test framework from project files +2. Get changed files from implementation +3. Identify test command and coverage tool +``` + +**Phase 3: Test-Fix Cycle** + +``` +Task({ + subagent_type: "test-fix-agent", + run_in_background: false, + description: "Test-fix for ", + prompt: "## Task + - Run tests and fix failures + - Session: + - Max iterations: 5 + ## Changed Files + + ## Expected Output + Write report to: /artifacts/test-report.md + Return JSON: { artifact_path, pass_rate, coverage, iterations_used, remaining_failures[] }" +}) +``` + +**Phase 4: Result Analysis** + +``` +1. Check pass rate >= 95% +2. Check coverage meets threshold +3. Generate test report with pass/fail counts +4. Update shared-memory.json with test results +``` diff --git a/.claude/skills/team-coordinate/subagents/discuss-subagent.md b/.claude/skills/team-coordinate/subagents/discuss-subagent.md new file mode 100644 index 00000000..fff00818 --- /dev/null +++ b/.claude/skills/team-coordinate/subagents/discuss-subagent.md @@ -0,0 +1,133 @@ +# Discuss Subagent + +Lightweight multi-perspective critique engine. Called inline by any role needing peer review. Perspectives are dynamic -- specified by the calling role, not pre-defined. + +## Design + +Unlike team-lifecycle-v4's fixed perspective definitions (product, technical, quality, risk, coverage), team-coordinate uses **dynamic perspectives** passed in the prompt. The calling role decides what viewpoints matter for its artifact. + +## Invocation + +Called by roles after artifact creation: + +``` +Task({ + subagent_type: "cli-discuss-agent", + run_in_background: false, + description: "Discuss ", + prompt: `## Multi-Perspective Critique: + +### Input +- Artifact: +- Round: +- Session: + +### Perspectives + + +Example: +| Perspective | CLI Tool | Role | Focus Areas | +|-------------|----------|------|-------------| +| Feasibility | gemini | Engineer | Implementation complexity, technical risks, resource needs | +| Clarity | codex | Editor | Readability, logical flow, completeness of explanation | +| Accuracy | gemini | Domain Expert | Factual correctness, source reliability, claim verification | + +### Execution Steps +1. Read artifact from +2. For each perspective, launch CLI analysis in background: + Bash(command="ccw cli -p 'PURPOSE: Analyze from perspective for + TASK: + MODE: analysis + CONTEXT: Artifact content below + EXPECTED: JSON with strengths[], weaknesses[], suggestions[], rating (1-5) + CONSTRAINTS: Output valid JSON only + + Artifact: + ' --tool --mode analysis", run_in_background=true) +3. Wait for all CLI results +4. Divergence detection: + - High severity: any rating <= 2, critical issue identified + - Medium severity: rating spread (max - min) >= 3, or single perspective rated <= 2 with others >= 3 + - Low severity: minor suggestions only, all ratings >= 3 +5. Consensus determination: + - No high-severity divergences AND average rating >= 3.0 -> consensus_reached + - Otherwise -> consensus_blocked +6. Synthesize: + - Convergent themes (agreed by 2+ perspectives) + - Divergent views (conflicting assessments) + - Action items from suggestions +7. Write discussion record to: /discussions/-discussion.md + +### Discussion Record Format +# Discussion Record: + +**Artifact**: +**Perspectives**: +**Consensus**: reached / blocked +**Average Rating**: /5 + +## Convergent Themes +- + +## Divergent Views +- **** (): + +## Action Items +1. + +## Ratings +| Perspective | Rating | +|-------------|--------| +| | /5 | + +### Return Value + +**When consensus_reached**: +Return a summary string with: +- Verdict: consensus_reached +- Average rating +- Key action items (top 3) +- Discussion record path + +**When consensus_blocked**: +Return a structured summary with: +- Verdict: consensus_blocked +- Severity: HIGH | MEDIUM | LOW +- Average rating +- Divergence summary: top 3 divergent points with perspective attribution +- Action items: prioritized list of required changes +- Recommendation: revise | proceed-with-caution | escalate +- Discussion record path + +### Error Handling +- Single CLI fails -> fallback to direct Claude analysis for that perspective +- All CLI fail -> generate basic discussion from direct artifact reading +- Artifact not found -> return error immediately` +}) +``` + +## Integration with Calling Role + +The calling role is responsible for: + +1. **Before calling**: Complete primary artifact output +2. **Calling**: Invoke discuss subagent with appropriate dynamic perspectives +3. **After calling**: + +| Verdict | Severity | Role Action | +|---------|----------|-------------| +| consensus_reached | - | Include action items in Phase 5 report, proceed normally | +| consensus_blocked | HIGH | Include divergence details in Phase 5 SendMessage. Do NOT self-revise -- coordinator decides. | +| consensus_blocked | MEDIUM | Include warning in Phase 5 SendMessage. Proceed normally. | +| consensus_blocked | LOW | Treat as consensus_reached with notes. Proceed normally. | + +**SendMessage format for consensus_blocked (HIGH or MEDIUM)**: + +``` +[] complete. Discuss : consensus_blocked (severity=) +Divergences: +Action items: +Recommendation: +Artifact: +Discussion: +``` diff --git a/.claude/skills/team-coordinate/subagents/explore-subagent.md b/.claude/skills/team-coordinate/subagents/explore-subagent.md new file mode 100644 index 00000000..e934f7ed --- /dev/null +++ b/.claude/skills/team-coordinate/subagents/explore-subagent.md @@ -0,0 +1,120 @@ +# Explore Subagent + +Shared codebase exploration utility with centralized caching. Callable by any role needing code context. + +## Invocation + +``` +Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: "Explore ", + prompt: `Explore codebase for: + +Focus angle: +Keywords: +Session folder: + +## Cache Check +1. Read /explorations/cache-index.json (if exists) +2. Look for entry with matching angle +3. If found AND file exists -> read cached result, return summary +4. If not found -> proceed to exploration + +## Exploration + + +## Output +Write JSON to: /explorations/explore-.json +Update cache-index.json with new entry + +## Output Schema +{ + "angle": "", + "query": "", + "relevant_files": [ + { "path": "...", "rationale": "...", "role": "...", "discovery_source": "...", "key_symbols": [] } + ], + "patterns": [], + "dependencies": [], + "external_refs": [], + "_metadata": { "created_by": "", "timestamp": "...", "cache_key": "..." } +} + +Return summary: file count, pattern count, top 5 files, output path` +}) +``` + +## Cache Mechanism + +### Cache Index Schema + +`/explorations/cache-index.json`: + +```json +{ + "entries": [ + { + "angle": "architecture", + "keywords": ["auth", "middleware"], + "file": "explore-architecture.json", + "created_by": "analyst", + "created_at": "2026-02-27T10:00:00Z", + "file_count": 15 + } + ] +} +``` + +### Cache Lookup Rules + +| Condition | Action | +|-----------|--------| +| Exact angle match exists | Return cached result | +| No match | Execute exploration, cache result | +| Cache file missing but index has entry | Remove stale entry, re-explore | + +### Cache Invalidation + +Cache is session-scoped. No explicit invalidation needed -- each session starts fresh. If a role suspects stale data, it can pass `force_refresh: true` in the prompt to bypass cache. + +## Angle Focus Guide + +| Angle | Focus Points | Typical Caller | +|-------|-------------|----------------| +| architecture | Layer boundaries, design patterns, component responsibilities, ADRs | any | +| dependencies | Import chains, external libraries, circular dependencies, shared utilities | any | +| modularity | Module interfaces, separation of concerns, extraction opportunities | any | +| integration-points | API endpoints, data flow between modules, event systems | any | +| security | Auth/authz logic, input validation, sensitive data handling, middleware | any | +| dataflow | Data transformations, state propagation, validation points | any | +| performance | Bottlenecks, N+1 queries, blocking operations, algorithm complexity | any | +| error-handling | Try-catch blocks, error propagation, recovery strategies, logging | any | +| patterns | Code conventions, design patterns, naming conventions, best practices | any | +| testing | Test files, coverage gaps, test patterns, mocking strategies | any | +| general | Broad semantic search for topic-related code | any | + +## Exploration Strategies + +### Low Complexity (direct search) + +For simple queries, use ACE semantic search: + +``` +mcp__ace-tool__search_context(project_root_path="", query="") +``` + +ACE failure fallback: `rg -l '' --type ts` + +### Medium/High Complexity (multi-angle) + +For complex queries, call cli-explore-agent per angle. The calling role determines complexity and selects angles. + +## Search Tool Priority + +| Tool | Priority | Use Case | +|------|----------|----------| +| mcp__ace-tool__search_context | P0 | Semantic search | +| Grep / Glob | P1 | Pattern matching | +| cli-explore-agent | Deep | Multi-angle exploration | +| WebSearch | P3 | External docs | diff --git a/.claude/skills/team-lifecycle-v4/SKILL.md b/.claude/skills/team-lifecycle-v4/SKILL.md index 5d7c424f..df609a1c 100644 --- a/.claude/skills/team-lifecycle-v4/SKILL.md +++ b/.claude/skills/team-lifecycle-v4/SKILL.md @@ -136,8 +136,9 @@ Each worker on startup executes the same task discovery flow: Task completion with optional fast-advance to skip coordinator round-trip: 1. **Message Bus**: Call `mcp__ccw-tools__team_msg` to log message - - Params: operation="log", team=, from=, to="coordinator", type=, summary="[] ", ref= - - **CLI fallback**: When MCP unavailable -> `ccw team log --team --from --to coordinator --type --summary "[] ..." --json` + - Params: operation="log", team=****, from=, to="coordinator", type=, summary="[] ", ref= + - **`team` must be session ID** (e.g., `TLS-my-project-2026-02-27`), NOT team name. Extract from task description `Session:` field → take folder name. + - **CLI fallback**: `ccw team log --team --from --to coordinator --type --summary "[] ..." --json` 2. **TaskUpdate**: Mark task completed 3. **Fast-Advance Check**: - Call `TaskList()`, find pending tasks whose blockedBy are ALL completed @@ -533,13 +534,13 @@ Session: - All output prefixed with [] tag - Only communicate with coordinator - Do not use TaskCreate to create tasks for other roles -- Before each SendMessage, call mcp__ccw-tools__team_msg to log +- Before each SendMessage, call mcp__ccw-tools__team_msg to log (team= from Session field, NOT team name) - After task completion, check for fast-advance opportunity (see SKILL.md Phase 5) ## Workflow 1. Call Skill -> get role definition and execution logic 2. Follow role.md 5-Phase flow -3. team_msg + SendMessage results to coordinator +3. team_msg(team=) + SendMessage results to coordinator 4. TaskUpdate completed -> check next task or fast-advance` }) ``` @@ -575,7 +576,7 @@ Only SendMessage to coordinator when: - All output prefixed with [] tag - Only communicate with coordinator - Do not use TaskCreate to create tasks for other roles -- Before each SendMessage, call mcp__ccw-tools__team_msg to log +- Before each SendMessage, call mcp__ccw-tools__team_msg to log (team= from Session field, NOT team name) - Use subagent calls for heavy work, retain summaries in context` }) ``` diff --git a/ccw/frontend/src/components/hook/HookQuickTemplates.tsx b/ccw/frontend/src/components/hook/HookQuickTemplates.tsx index ccbe76d9..73259974 100644 --- a/ccw/frontend/src/components/hook/HookQuickTemplates.tsx +++ b/ccw/frontend/src/components/hook/HookQuickTemplates.tsx @@ -19,6 +19,7 @@ import { GitBranch, Send, FileBarChart, + Settings, } from 'lucide-react'; import { Card } from '@/components/ui/Card'; import { Button } from '@/components/ui/Button'; @@ -31,7 +32,7 @@ import type { HookTriggerType } from './HookCard'; /** * Template category type */ -export type TemplateCategory = 'notification' | 'indexing' | 'automation'; +export type TemplateCategory = 'notification' | 'indexing' | 'automation' | 'utility'; /** * Hook template definition @@ -226,6 +227,34 @@ export const HOOK_TEMPLATES: readonly HookTemplate[] = [ '-e', 'const cp=require("child_process");const payload=JSON.stringify({type:"MEMORY_V2_STATUS_UPDATED",project:process.env.CLAUDE_PROJECT_DIR||process.cwd(),timestamp:Date.now()});cp.spawnSync("curl",["-s","-X","POST","-H","Content-Type: application/json","-d",payload,"http://localhost:3456/api/hook"],{stdio:"inherit",shell:true})' ] + }, + // --- Memory Operations --- + { + id: 'memory-auto-compress', + name: 'Auto Memory Compress', + description: 'Automatically compress memory when entries exceed threshold', + category: 'automation', + trigger: 'Stop', + command: 'ccw', + args: ['memory', 'consolidate', '--threshold', '50'] + }, + { + id: 'memory-preview-extract', + name: 'Memory Preview & Extract', + description: 'Preview extraction queue and extract eligible sessions', + category: 'automation', + trigger: 'SessionStart', + command: 'ccw', + args: ['memory', 'preview', '--include-native'] + }, + { + id: 'memory-status-check', + name: 'Memory Status Check', + description: 'Check memory extraction and consolidation status', + category: 'utility', + trigger: 'SessionStart', + command: 'ccw', + args: ['memory', 'status'] } ] as const; @@ -234,7 +263,8 @@ export const HOOK_TEMPLATES: readonly HookTemplate[] = [ const CATEGORY_ICONS: Record = { notification: { icon: Bell, color: 'text-blue-500', bg: 'bg-blue-500/10' }, indexing: { icon: Database, color: 'text-purple-500', bg: 'bg-purple-500/10' }, - automation: { icon: Wrench, color: 'text-orange-500', bg: 'bg-orange-500/10' } + automation: { icon: Wrench, color: 'text-orange-500', bg: 'bg-orange-500/10' }, + utility: { icon: Settings, color: 'text-gray-500', bg: 'bg-gray-500/10' } }; // ========== Template Icons ========== @@ -258,7 +288,8 @@ function getCategoryName(category: TemplateCategory, formatMessage: ReturnType = { notification: formatMessage({ id: 'cliHooks.templates.categories.notification' }), indexing: formatMessage({ id: 'cliHooks.templates.categories.indexing' }), - automation: formatMessage({ id: 'cliHooks.templates.categories.automation' }) + automation: formatMessage({ id: 'cliHooks.templates.categories.automation' }), + utility: formatMessage({ id: 'cliHooks.templates.categories.utility' }) }; return names[category]; } @@ -352,7 +383,9 @@ export function HookQuickTemplates({

- {formatMessage({ id: `cliHooks.templates.templates.${template.id}.name` })} + {formatMessage( + { id: `cliHooks.templates.templates.${template.id}.name`, defaultMessage: template.name } + )}

@@ -394,7 +427,9 @@ export function HookQuickTemplates({ {/* Description */}

- {formatMessage({ id: `cliHooks.templates.templates.${template.id}.description` })} + {formatMessage( + { id: `cliHooks.templates.templates.${template.id}.description`, defaultMessage: template.description } + )}

); diff --git a/ccw/frontend/src/components/mcp/CcwToolsMcpCard.test.tsx b/ccw/frontend/src/components/mcp/CcwToolsMcpCard.test.tsx new file mode 100644 index 00000000..6902455a --- /dev/null +++ b/ccw/frontend/src/components/mcp/CcwToolsMcpCard.test.tsx @@ -0,0 +1,102 @@ +// ======================================== +// CcwToolsMcpCard Component Tests +// ======================================== + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { render, screen, waitFor } from '@/test/i18n'; +import userEvent from '@testing-library/user-event'; + +import { CcwToolsMcpCard } from './CcwToolsMcpCard'; +import { updateCcwConfig, updateCcwConfigForCodex } from '@/lib/api'; + +vi.mock('@/lib/api', () => ({ + installCcwMcp: vi.fn(), + uninstallCcwMcp: vi.fn(), + updateCcwConfig: vi.fn(), + installCcwMcpToCodex: vi.fn(), + uninstallCcwMcpFromCodex: vi.fn(), + updateCcwConfigForCodex: vi.fn(), +})); + +vi.mock('@/hooks/useNotifications', () => ({ + useNotifications: () => ({ + success: vi.fn(), + error: vi.fn(), + }), +})); + +describe('CcwToolsMcpCard', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('preserves enabledTools when saving config (Codex)', async () => { + const updateCodexMock = vi.mocked(updateCcwConfigForCodex); + updateCodexMock.mockResolvedValue({ + isInstalled: true, + enabledTools: [], + installedScopes: ['global'], + }); + + render( + , + { locale: 'en' } + ); + + const user = userEvent.setup(); + await user.click(screen.getByText(/CCW MCP Server|mcp\.ccw\.title/i)); + await user.click( + screen.getByRole('button', { name: /Save Configuration|mcp\.ccw\.actions\.saveConfig/i }) + ); + + await waitFor(() => { + expect(updateCodexMock).toHaveBeenCalledWith( + expect.objectContaining({ + enabledTools: ['write_file', 'read_many_files'], + }) + ); + }); + }); + + it('preserves enabledTools when saving config (Claude)', async () => { + const updateClaudeMock = vi.mocked(updateCcwConfig); + updateClaudeMock.mockResolvedValue({ + isInstalled: true, + enabledTools: [], + installedScopes: ['global'], + }); + + render( + , + { locale: 'en' } + ); + + const user = userEvent.setup(); + await user.click(screen.getByText(/CCW MCP Server|mcp\.ccw\.title/i)); + await user.click( + screen.getByRole('button', { name: /Save Configuration|mcp\.ccw\.actions\.saveConfig/i }) + ); + + await waitFor(() => { + expect(updateClaudeMock).toHaveBeenCalledWith( + expect.objectContaining({ + enabledTools: ['write_file', 'smart_search'], + }) + ); + }); + }); +}); + diff --git a/ccw/frontend/src/components/mcp/CcwToolsMcpCard.tsx b/ccw/frontend/src/components/mcp/CcwToolsMcpCard.tsx index 467cb83e..f0d7dd34 100644 --- a/ccw/frontend/src/components/mcp/CcwToolsMcpCard.tsx +++ b/ccw/frontend/src/components/mcp/CcwToolsMcpCard.tsx @@ -37,7 +37,7 @@ import { uninstallCcwMcpFromCodex, updateCcwConfigForCodex, } from '@/lib/api'; -import { mcpServersKeys } from '@/hooks'; +import { mcpServersKeys, useNotifications } from '@/hooks'; import { useQueryClient } from '@tanstack/react-query'; import { cn } from '@/lib/utils'; import { useWorkflowStore, selectProjectPath } from '@/stores/workflowStore'; @@ -128,6 +128,7 @@ export function CcwToolsMcpCard({ }: CcwToolsMcpCardProps) { const { formatMessage } = useIntl(); const queryClient = useQueryClient(); + const { success: notifySuccess, error: notifyError } = useNotifications(); const currentProjectPath = useWorkflowStore(selectProjectPath); // Local state for config inputs @@ -179,9 +180,19 @@ export function CcwToolsMcpCard({ onSuccess: () => { if (isCodex) { queryClient.invalidateQueries({ queryKey: ['codexMcpServers'] }); + queryClient.invalidateQueries({ queryKey: ['ccwMcpConfigCodex'] }); } else { queryClient.invalidateQueries({ queryKey: mcpServersKeys.all }); + queryClient.invalidateQueries({ queryKey: ['ccwMcpConfig'] }); } + notifySuccess(formatMessage({ id: 'mcp.ccw.feedback.saveSuccess' })); + }, + onError: (error) => { + console.error('Failed to update CCW config:', error); + notifyError( + formatMessage({ id: 'mcp.ccw.feedback.saveError' }), + error instanceof Error ? error.message : String(error) + ); }, }); @@ -201,6 +212,9 @@ export function CcwToolsMcpCard({ const handleConfigSave = () => { updateConfigMutation.mutate({ + // Preserve current tool selection; otherwise updateCcwConfig* falls back to defaults + // and can unintentionally overwrite user-chosen enabled tools. + enabledTools, projectRoot: projectRootInput || undefined, allowedDirs: allowedDirsInput || undefined, enableSandbox: enableSandboxInput, diff --git a/ccw/frontend/src/components/memory/SessionPreviewPanel.tsx b/ccw/frontend/src/components/memory/SessionPreviewPanel.tsx new file mode 100644 index 00000000..56e9cdc6 --- /dev/null +++ b/ccw/frontend/src/components/memory/SessionPreviewPanel.tsx @@ -0,0 +1,332 @@ +// ======================================== +// SessionPreviewPanel Component +// ======================================== +// Preview and select sessions for Memory V2 extraction + +import { useState, useMemo } from 'react'; +import { useIntl } from 'react-intl'; +import { formatDistanceToNow } from 'date-fns'; +import { Search, Eye, Loader2, CheckCircle2, XCircle, Clock } from 'lucide-react'; +import { Button } from '@/components/ui/Button'; +import { Badge } from '@/components/ui/Badge'; +import { Input } from '@/components/ui/Input'; +import { Checkbox } from '@/components/ui/Checkbox'; +import { + usePreviewSessions, + useTriggerSelectiveExtraction, +} from '@/hooks/useMemoryV2'; +import { cn } from '@/lib/utils'; + +interface SessionPreviewPanelProps { + onClose?: () => void; + onExtractComplete?: () => void; +} + +// Helper function to format bytes +function formatBytes(bytes: number): string { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return `${parseFloat((bytes / Math.pow(k, i)).toFixed(1))} ${sizes[i]}`; +} + +// Helper function to format timestamp +function formatTimestamp(timestamp: number): string { + try { + const date = new Date(timestamp); + return formatDistanceToNow(date, { addSuffix: true }); + } catch { + return '-'; + } +} + +export function SessionPreviewPanel({ onClose, onExtractComplete }: SessionPreviewPanelProps) { + const intl = useIntl(); + const [searchQuery, setSearchQuery] = useState(''); + const [selectedIds, setSelectedIds] = useState>(new Set()); + const [includeNative, setIncludeNative] = useState(false); + + const { data, isLoading, refetch } = usePreviewSessions(includeNative); + const triggerExtraction = useTriggerSelectiveExtraction(); + + // Filter sessions based on search query + const filteredSessions = useMemo(() => { + if (!data?.sessions) return []; + if (!searchQuery.trim()) return data.sessions; + + const query = searchQuery.toLowerCase(); + return data.sessions.filter( + (session) => + session.sessionId.toLowerCase().includes(query) || + session.tool.toLowerCase().includes(query) || + session.source.toLowerCase().includes(query) + ); + }, [data?.sessions, searchQuery]); + + // Get ready sessions (eligible and not extracted) + const readySessions = useMemo(() => { + return filteredSessions.filter((s) => s.eligible && !s.extracted); + }, [filteredSessions]); + + // Toggle session selection + const toggleSelection = (sessionId: string) => { + setSelectedIds((prev) => { + const next = new Set(prev); + if (next.has(sessionId)) { + next.delete(sessionId); + } else { + next.add(sessionId); + } + return next; + }); + }; + + // Select all ready sessions + const selectAll = () => { + setSelectedIds(new Set(readySessions.map((s) => s.sessionId))); + }; + + // Clear selection + const selectNone = () => { + setSelectedIds(new Set()); + }; + + // Trigger extraction for selected sessions + const handleExtract = async () => { + if (selectedIds.size === 0) return; + + triggerExtraction.mutate( + { + sessionIds: Array.from(selectedIds), + includeNative, + }, + { + onSuccess: () => { + setSelectedIds(new Set()); + onExtractComplete?.(); + }, + } + ); + }; + + return ( +
+ {/* Header */} +
+

+ + {intl.formatMessage({ id: 'memory.v2.preview.title', defaultMessage: 'Extraction Queue Preview' })} +

+
+ + +
+
+ + {/* Summary Bar */} + {data?.summary && ( +
+
+
{data.summary.total}
+
+ {intl.formatMessage({ id: 'memory.v2.preview.total', defaultMessage: 'Total' })} +
+
+
+
{data.summary.eligible}
+
+ {intl.formatMessage({ id: 'memory.v2.preview.eligible', defaultMessage: 'Eligible' })} +
+
+
+
{data.summary.alreadyExtracted}
+
+ {intl.formatMessage({ id: 'memory.v2.preview.extracted', defaultMessage: 'Already Extracted' })} +
+
+
+
{data.summary.readyForExtraction}
+
+ {intl.formatMessage({ id: 'memory.v2.preview.ready', defaultMessage: 'Ready' })} +
+
+
+ )} + + {/* Search and Actions */} +
+
+ + setSearchQuery(e.target.value)} + className="pl-9" + /> +
+ + +
+ + {/* Session Table */} +
+ {isLoading ? ( +
+ +
+ ) : filteredSessions.length === 0 ? ( +
+ {intl.formatMessage({ id: 'memory.v2.preview.noSessions', defaultMessage: 'No sessions found' })} +
+ ) : ( + + + + + + + + + + + + + + + {filteredSessions.map((session) => { + const isReady = session.eligible && !session.extracted; + const isSelected = selectedIds.has(session.sessionId); + const isDisabled = !isReady; + + return ( + + + + + + + + + + + ); + })} + +
SourceSession IDToolTimestampSizeTurnsStatus
+ toggleSelection(session.sessionId)} + /> + + + {session.source === 'ccw' + ? intl.formatMessage({ id: 'memory.v2.preview.sourceCcw', defaultMessage: 'CCW' }) + : intl.formatMessage({ id: 'memory.v2.preview.sourceNative', defaultMessage: 'Native' })} + + + {session.sessionId} + + {session.tool || '-'} + + {formatTimestamp(session.timestamp)} + + {formatBytes(session.bytes)} + + {session.turns} + + {session.extracted ? ( + + + {intl.formatMessage({ id: 'memory.v2.preview.extracted', defaultMessage: 'Extracted' })} + + ) : session.eligible ? ( + + + {intl.formatMessage({ id: 'memory.v2.preview.ready', defaultMessage: 'Ready' })} + + ) : ( + + + {intl.formatMessage({ id: 'memory.v2.preview.ineligible', defaultMessage: 'Ineligible' })} + + )} +
+ )} +
+ + {/* Footer Actions */} +
+
+ {selectedIds.size > 0 ? ( + intl.formatMessage( + { id: 'memory.v2.preview.selected', defaultMessage: '{count} sessions selected' }, + { count: selectedIds.size } + ) + ) : ( + intl.formatMessage({ id: 'memory.v2.preview.selectHint', defaultMessage: 'Select sessions to extract' }) + )} +
+
+ {onClose && ( + + )} + +
+
+
+ ); +} + +export default SessionPreviewPanel; diff --git a/ccw/frontend/src/components/memory/V2PipelineTab.tsx b/ccw/frontend/src/components/memory/V2PipelineTab.tsx index 06f70ad7..1316446e 100644 --- a/ccw/frontend/src/components/memory/V2PipelineTab.tsx +++ b/ccw/frontend/src/components/memory/V2PipelineTab.tsx @@ -28,6 +28,7 @@ import { Card } from '@/components/ui/Card'; import { Button } from '@/components/ui/Button'; import { Badge } from '@/components/ui/Badge'; import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/Dialog'; +import { SessionPreviewPanel } from '@/components/memory/SessionPreviewPanel'; import 'highlight.js/styles/github-dark.css'; import { useExtractionStatus, @@ -84,6 +85,7 @@ function ExtractionCard() { const { data: status, isLoading, refetch } = useExtractionStatus(); const trigger = useTriggerExtraction(); const [maxSessions, setMaxSessions] = useState(10); + const [showPreview, setShowPreview] = useState(false); const handleTrigger = () => { trigger.mutate(maxSessions); @@ -94,83 +96,107 @@ function ExtractionCard() { const lastRunText = formatRelativeTime(status?.lastRun); return ( - -
-
-

- - Phase 1: {intl.formatMessage({ id: 'memory.v2.extraction.title', defaultMessage: 'Extraction' })} -

-

- {intl.formatMessage({ id: 'memory.v2.extraction.description', defaultMessage: 'Extract structured memories from CLI sessions' })} -

- {lastRunText && ( -

- {intl.formatMessage({ id: 'memory.v2.extraction.lastRun', defaultMessage: 'Last run' })}: {lastRunText} + <> + +

+
+

+ + Phase 1: {intl.formatMessage({ id: 'memory.v2.extraction.title', defaultMessage: 'Extraction' })} +

+

+ {intl.formatMessage({ id: 'memory.v2.extraction.description', defaultMessage: 'Extract structured memories from CLI sessions' })}

+ {lastRunText && ( +

+ {intl.formatMessage({ id: 'memory.v2.extraction.lastRun', defaultMessage: 'Last run' })}: {lastRunText} +

+ )} +
+ {status && ( +
+
{status.total_stage1}
+
+ {intl.formatMessage({ id: 'memory.v2.extraction.extracted', defaultMessage: 'Extracted' })} +
+
)}
- {status && ( -
-
{status.total_stage1}
-
- {intl.formatMessage({ id: 'memory.v2.extraction.extracted', defaultMessage: 'Extracted' })} + +
+ setMaxSessions(Math.max(1, parseInt(e.target.value) || 10))} + className="w-20 px-2 py-1 text-sm border rounded bg-background" + min={1} + max={64} + /> + sessions max +
+ +
+ + + +
+ + {status?.jobs && status.jobs.length > 0 && ( +
+
+ {intl.formatMessage({ id: 'memory.v2.extraction.recentJobs', defaultMessage: 'Recent Jobs' })} +
+
+ {status.jobs.slice(0, 5).map((job) => ( +
+ {job.job_key} + +
+ ))}
)} -
+ -
- setMaxSessions(Math.max(1, parseInt(e.target.value) || 10))} - className="w-20 px-2 py-1 text-sm border rounded bg-background" - min={1} - max={64} - /> - sessions max -
- -
- - -
- - {status?.jobs && status.jobs.length > 0 && ( -
-
- {intl.formatMessage({ id: 'memory.v2.extraction.recentJobs', defaultMessage: 'Recent Jobs' })} -
-
- {status.jobs.slice(0, 5).map((job) => ( -
- {job.job_key} - -
- ))} -
-
- )} - + {/* Preview Queue Dialog */} + + + setShowPreview(false)} + onExtractComplete={() => { + setShowPreview(false); + refetch(); + }} + /> + + + ); } diff --git a/ccw/frontend/src/components/shared/CommandCreateDialog.tsx b/ccw/frontend/src/components/shared/CommandCreateDialog.tsx new file mode 100644 index 00000000..38a3688d --- /dev/null +++ b/ccw/frontend/src/components/shared/CommandCreateDialog.tsx @@ -0,0 +1,408 @@ +// ======================================== +// Command Create Dialog Component +// ======================================== +// Modal dialog for creating/importing commands with two modes: +// - Import: import existing command file +// - CLI Generate: AI-generated command from description + +import { useState, useCallback } from 'react'; +import { useIntl } from 'react-intl'; +import { + Folder, + User, + FileCode, + Sparkles, + CheckCircle, + XCircle, + Loader2, + Info, +} from 'lucide-react'; +import { + Dialog, + DialogContent, + DialogHeader, + DialogFooter, + DialogTitle, + DialogDescription, +} from '@/components/ui/Dialog'; +import { Button } from '@/components/ui/Button'; +import { Input } from '@/components/ui/Input'; +import { Textarea } from '@/components/ui/Textarea'; +import { Label } from '@/components/ui/Label'; +import { validateCommandImport, createCommand } from '@/lib/api'; +import { useWorkflowStore, selectProjectPath } from '@/stores/workflowStore'; +import { cn } from '@/lib/utils'; + +export interface CommandCreateDialogProps { + open: boolean; + onOpenChange: (open: boolean) => void; + onCreated: () => void; + cliType?: 'claude' | 'codex'; +} + +type CreateMode = 'import' | 'cli-generate'; +type CommandLocation = 'project' | 'user'; + +interface ValidationResult { + valid: boolean; + errors?: string[]; + commandInfo?: { name: string; description: string; usage?: string }; +} + +export function CommandCreateDialog({ open, onOpenChange, onCreated, cliType = 'claude' }: CommandCreateDialogProps) { + const { formatMessage } = useIntl(); + const projectPath = useWorkflowStore(selectProjectPath); + + const [mode, setMode] = useState('import'); + const [location, setLocation] = useState('project'); + + // Import mode state + const [sourcePath, setSourcePath] = useState(''); + const [customName, setCustomName] = useState(''); + const [validationResult, setValidationResult] = useState(null); + const [isValidating, setIsValidating] = useState(false); + + // CLI Generate mode state + const [commandName, setCommandName] = useState(''); + const [description, setDescription] = useState(''); + + const [isCreating, setIsCreating] = useState(false); + + const resetState = useCallback(() => { + setMode('import'); + setLocation('project'); + setSourcePath(''); + setCustomName(''); + setValidationResult(null); + setIsValidating(false); + setCommandName(''); + setDescription(''); + setIsCreating(false); + }, []); + + const handleOpenChange = useCallback((open: boolean) => { + if (!open) { + resetState(); + } + onOpenChange(open); + }, [onOpenChange, resetState]); + + const handleValidate = useCallback(async () => { + if (!sourcePath.trim()) return; + + setIsValidating(true); + setValidationResult(null); + + try { + const result = await validateCommandImport(sourcePath.trim()); + setValidationResult(result); + } catch (err) { + setValidationResult({ + valid: false, + errors: [err instanceof Error ? err.message : String(err)], + }); + } finally { + setIsValidating(false); + } + }, [sourcePath]); + + const handleCreate = useCallback(async () => { + if (mode === 'import') { + if (!sourcePath.trim()) return; + if (!validationResult?.valid) return; + } else { + if (!commandName.trim()) return; + if (!description.trim()) return; + } + + setIsCreating(true); + + try { + await createCommand({ + mode, + location, + sourcePath: mode === 'import' ? sourcePath.trim() : undefined, + commandName: mode === 'import' ? (customName.trim() || undefined) : commandName.trim(), + description: mode === 'cli-generate' ? description.trim() : undefined, + generationType: mode === 'cli-generate' ? 'description' : undefined, + projectPath, + cliType, + }); + + handleOpenChange(false); + onCreated(); + } catch (err) { + console.error('Failed to create command:', err); + if (mode === 'import') { + setValidationResult({ + valid: false, + errors: [err instanceof Error ? err.message : formatMessage({ id: 'commands.create.createError' })], + }); + } + } finally { + setIsCreating(false); + } + }, [mode, location, sourcePath, customName, commandName, description, validationResult, projectPath, handleOpenChange, onCreated, formatMessage]); + + const canCreate = mode === 'import' + ? sourcePath.trim() && validationResult?.valid && !isCreating + : commandName.trim() && description.trim() && !isCreating; + + return ( + + + + {formatMessage({ id: 'commands.create.title' })} + + {formatMessage({ id: 'commands.description' })} + + + +
+ {/* Location Selection */} +
+ +
+ + +
+
+ + {/* Mode Selection */} +
+ +
+ + +
+
+ + {/* Import Mode Content */} + {mode === 'import' && ( +
+
+ + { + setSourcePath(e.target.value); + setValidationResult(null); + }} + placeholder={formatMessage({ id: 'commands.create.sourcePathPlaceholder' })} + className="font-mono text-sm" + /> +

{formatMessage({ id: 'commands.create.sourcePathHint' })}

+
+ +
+ + setCustomName(e.target.value)} + placeholder={formatMessage({ id: 'commands.create.customNamePlaceholder' })} + /> +
+ + {/* Validation Result */} + {isValidating && ( +
+ + {formatMessage({ id: 'commands.create.validating' })} +
+ )} + {validationResult && !isValidating && ( + validationResult.valid ? ( +
+
+ + {formatMessage({ id: 'commands.create.validCommand' })} +
+ {validationResult.commandInfo && ( +
+
+ {formatMessage({ id: 'commands.card.name' })}: + {validationResult.commandInfo.name} +
+ {validationResult.commandInfo.description && ( +
+ {formatMessage({ id: 'commands.card.description' })}: + {validationResult.commandInfo.description} +
+ )} + {validationResult.commandInfo.usage && ( +
+ {formatMessage({ id: 'commands.card.usage' })}: + {validationResult.commandInfo.usage} +
+ )} +
+ )} +
+ ) : ( +
+
+ + {formatMessage({ id: 'commands.create.invalidCommand' })} +
+ {validationResult.errors && ( +
    + {validationResult.errors.map((error, i) => ( +
  • {error}
  • + ))} +
+ )} +
+ ) + )} +
+ )} + + {/* CLI Generate Mode Content */} + {mode === 'cli-generate' && ( +
+
+ + setCommandName(e.target.value)} + placeholder={formatMessage({ id: 'commands.create.commandNamePlaceholder' })} + /> +

{formatMessage({ id: 'commands.create.commandNameHint' })}

+
+ +
+ +