Enhance project management workflow by introducing dual file system for project guidelines and tech analysis

- Updated workflow initialization to create `.workflow/project-tech.json` and `.workflow/project-guidelines.json` for comprehensive project understanding.
- Added mandatory context reading steps in various commands to ensure compliance with user-defined constraints and technology stack.
- Implemented a new command `/workflow:session:solidify` to capture session learnings and solidify them into project guidelines.
- Introduced a detail action in issue management to retrieve task details without altering status.
- Enhanced documentation across multiple workflow commands to reflect changes in project structure and guidelines.
This commit is contained in:
catlog22
2025-12-28 12:47:39 +08:00
parent 2c42cefa5a
commit 4c6b28030f
12 changed files with 576 additions and 146 deletions

View File

@@ -1,6 +1,6 @@
--- ---
name: execute name: execute
description: Execute queue with codex using DAG-based parallel orchestration (delegates task lookup to executors) description: Execute queue with codex using DAG-based parallel orchestration (read-only task fetch)
argument-hint: "[--parallel <n>] [--executor codex|gemini|agent]" argument-hint: "[--parallel <n>] [--executor codex|gemini|agent]"
allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*) allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*)
--- ---
@@ -9,13 +9,13 @@ allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*)
## Overview ## Overview
Minimal orchestrator that dispatches task IDs to executors. **Does NOT read task details** - delegates all task lookup to the executor via `ccw issue next <item_id>`. Minimal orchestrator that dispatches task IDs to executors. Uses read-only `detail` command for parallel-safe task fetching.
**Design Principles:** **Design Principles:**
- **DAG-driven**: Uses `ccw issue queue dag` to get parallel execution plan - `queue dag` → returns parallel batches with task IDs
- **ID-only dispatch**: Only passes `item_id` to executors - `detail <id>` → READ-ONLY task fetch (no status modification)
- **Executor responsibility**: Codex/Agent fetches task details via `ccw issue next <item_id>` - `done <id>` → update completion status
- **Parallel execution**: Launches multiple executors concurrently based on DAG batches - No race conditions: status changes only via `done`
## Usage ## Usage
@@ -37,20 +37,18 @@ Minimal orchestrator that dispatches task IDs to executors. **Does NOT read task
``` ```
Phase 1: Get DAG Phase 1: Get DAG
└─ ccw issue queue dag → { parallel_batches, nodes, ready_count } └─ ccw issue queue dag → { parallel_batches: [["T-1","T-2","T-3"], ...] }
Phase 2: Dispatch Batches Phase 2: Dispatch Parallel Batch
├─ For each batch in parallel_batches: ├─ For each ID in batch (parallel):
│ ├─ Launch N executors (up to --parallel limit) │ ├─ Executor calls: ccw issue detail <id> (READ-ONLY)
│ ├─ Each executor receives: item_id only
│ ├─ Executor calls: ccw issue next <item_id>
│ ├─ Executor gets full task definition │ ├─ Executor gets full task definition
│ ├─ Executor implements + tests + commits │ ├─ Executor implements + tests + commits
│ └─ Executor calls: ccw issue done <item_id> │ └─ Executor calls: ccw issue done <id>
└─ Wait for batch completion before next batch └─ Wait for batch completion
Phase 3: Summary Phase 3: Next Batch
└─ ccw issue queue dag → updated status └─ ccw issue queue dag → check for newly-ready tasks
``` ```
## Implementation ## Implementation
@@ -74,13 +72,12 @@ console.log(`
- Total: ${dag.total} - Total: ${dag.total}
- Ready: ${dag.ready_count} - Ready: ${dag.ready_count}
- Completed: ${dag.completed_count} - Completed: ${dag.completed_count}
- Batches: ${dag.parallel_batches.length} - Parallel in batch 1: ${dag.parallel_batches[0]?.length || 0}
- Max parallel: ${dag._summary.can_parallel}
`); `);
// Dry run mode // Dry run mode
if (flags.dryRun) { if (flags.dryRun) {
console.log('### Parallel Batches (would execute):\n'); console.log('### Parallel Batches:\n');
dag.parallel_batches.forEach((batch, i) => { dag.parallel_batches.forEach((batch, i) => {
console.log(`Batch ${i + 1}: ${batch.join(', ')}`); console.log(`Batch ${i + 1}: ${batch.join(', ')}`);
}); });
@@ -88,71 +85,69 @@ if (flags.dryRun) {
} }
``` ```
### Phase 2: Dispatch Batches ### Phase 2: Dispatch Parallel Batch
```javascript ```javascript
const parallelLimit = flags.parallel || 3; const parallelLimit = flags.parallel || 3;
const executor = flags.executor || 'codex'; const executor = flags.executor || 'codex';
// Initialize TodoWrite for tracking // Process first batch (all can run in parallel)
const allTasks = dag.parallel_batches.flat(); const batch = dag.parallel_batches[0] || [];
// Initialize TodoWrite
TodoWrite({ TodoWrite({
todos: allTasks.map(id => ({ todos: batch.map(id => ({
content: `Execute ${id}`, content: `Execute ${id}`,
status: 'pending', status: 'pending',
activeForm: `Executing ${id}` activeForm: `Executing ${id}`
})) }))
}); });
// Process each batch // Dispatch all in parallel (up to limit)
for (const [batchIndex, batch] of dag.parallel_batches.entries()) { const chunks = [];
console.log(`\n### Batch ${batchIndex + 1}/${dag.parallel_batches.length}`); for (let i = 0; i < batch.length; i += parallelLimit) {
console.log(`Tasks: ${batch.join(', ')}`); chunks.push(batch.slice(i, i + parallelLimit));
}
// Dispatch batch with parallelism limit for (const chunk of chunks) {
const chunks = []; console.log(`\n### Executing: ${chunk.join(', ')}`);
for (let i = 0; i < batch.length; i += parallelLimit) {
chunks.push(batch.slice(i, i + parallelLimit));
}
for (const chunk of chunks) { // Launch all in parallel
// Launch executors in parallel const executions = chunk.map(itemId => {
const executions = chunk.map(itemId => { updateTodo(itemId, 'in_progress');
updateTodo(itemId, 'in_progress'); return dispatchExecutor(itemId, executor);
return dispatchExecutor(itemId, executor); });
});
await Promise.all(executions); await Promise.all(executions);
chunk.forEach(id => updateTodo(id, 'completed')); chunk.forEach(id => updateTodo(id, 'completed'));
}
// Refresh DAG for next batch (dependencies may now be satisfied)
const refreshedDag = JSON.parse(Bash(`ccw issue queue dag`).trim());
if (refreshedDag.ready_count === 0) break;
} }
``` ```
### Executor Dispatch (Minimal Prompt) ### Executor Dispatch
```javascript ```javascript
function dispatchExecutor(itemId, executorType) { function dispatchExecutor(itemId, executorType) {
// Minimal prompt - executor fetches its own task // Executor fetches task via READ-ONLY detail command
// Then reports completion via done command
const prompt = ` const prompt = `
## Execute Task ${itemId} ## Execute Task ${itemId}
### Step 1: Fetch Task ### Step 1: Get Task (read-only)
\`\`\`bash \`\`\`bash
ccw issue next ${itemId} ccw issue detail ${itemId}
\`\`\` \`\`\`
### Step 2: Execute ### Step 2: Execute
Follow the task definition returned by the command above. Follow the task definition returned above:
The JSON includes: implementation steps, test commands, acceptance criteria, commit spec. - task.implementation: Implementation steps
- task.test: Test commands
- task.acceptance: Acceptance criteria
- task.commit: Commit specification
### Step 3: Report ### Step 3: Report Completion
When done: When done:
\`\`\`bash \`\`\`bash
ccw issue done ${itemId} --result '{"summary": "..."}' ccw issue done ${itemId} --result '{"summary": "...", "files_modified": [...]}'
\`\`\` \`\`\`
If failed: If failed:
@@ -182,32 +177,51 @@ ccw issue done ${itemId} --fail --reason "..."
} }
``` ```
### Phase 3: Summary ### Phase 3: Check Next Batch
```javascript ```javascript
// Get final status // Refresh DAG after batch completes
const finalDag = JSON.parse(Bash(`ccw issue queue dag`).trim()); const refreshedDag = JSON.parse(Bash(`ccw issue queue dag`).trim());
console.log(` console.log(`
## Execution Complete ## Batch Complete
- Completed: ${finalDag.completed_count}/${finalDag.total} - Completed: ${refreshedDag.completed_count}/${refreshedDag.total}
- Remaining: ${finalDag.ready_count} - Next ready: ${refreshedDag.ready_count}
### Task Status
${finalDag.nodes.map(n => {
const icon = n.status === 'completed' ? '✓' :
n.status === 'failed' ? '✗' :
n.status === 'executing' ? '⟳' : '○';
return `${icon} ${n.id} [${n.issue_id}:${n.task_id}] - ${n.status}`;
}).join('\n')}
`); `);
if (finalDag.ready_count > 0) { if (refreshedDag.ready_count > 0) {
console.log('\nRun `/issue:execute` again for remaining tasks.'); console.log('Run `/issue:execute` again for next batch.');
} }
``` ```
## Parallel Execution Model
```
┌─────────────────────────────────────────────────────────┐
│ Orchestrator │
├─────────────────────────────────────────────────────────┤
│ 1. ccw issue queue dag │
│ → { parallel_batches: [["T-1","T-2","T-3"], ["T-4"]] │
│ │
│ 2. Dispatch batch 1 (parallel): │
│ ┌────────────────┐ ┌────────────────┐ ┌────────────┐│
│ │ Executor 1 │ │ Executor 2 │ │ Executor 3 ││
│ │ detail T-1 │ │ detail T-2 │ │ detail T-3 ││
│ │ [work] │ │ [work] │ │ [work] ││
│ │ done T-1 │ │ done T-2 │ │ done T-3 ││
│ └────────────────┘ └────────────────┘ └────────────┘│
│ │
│ 3. ccw issue queue dag (refresh) │
│ → T-4 now ready (dependencies T-1,T-2 completed) │
└─────────────────────────────────────────────────────────┘
```
**Why this works for parallel:**
- `detail <id>` is READ-ONLY → no race conditions
- `done <id>` updates only its own task status
- `queue dag` recalculates ready tasks after each batch
## CLI Endpoint Contract ## CLI Endpoint Contract
### `ccw issue queue dag` ### `ccw issue queue dag`
@@ -219,25 +233,24 @@ Returns dependency graph with parallel batches:
"ready_count": 3, "ready_count": 3,
"completed_count": 2, "completed_count": 2,
"nodes": [{ "id": "T-1", "status": "pending", "ready": true, ... }], "nodes": [{ "id": "T-1", "status": "pending", "ready": true, ... }],
"edges": [{ "from": "T-1", "to": "T-2" }], "parallel_batches": [["T-1", "T-2", "T-3"], ["T-4", "T-5"]]
"parallel_batches": [["T-1", "T-3"], ["T-2"]],
"_summary": { "can_parallel": 2, "batches_needed": 2 }
} }
``` ```
### `ccw issue next <item_id>` ### `ccw issue detail <item_id>`
Returns full task definition for the specified item: Returns full task definition (READ-ONLY):
```json ```json
{ {
"item_id": "T-1", "item_id": "T-1",
"issue_id": "GH-123", "issue_id": "GH-123",
"task": { "id": "T1", "title": "...", "implementation": [...], ... }, "status": "pending",
"task": { "id": "T1", "implementation": [...], "test": {...}, ... },
"context": { "relevant_files": [...] } "context": { "relevant_files": [...] }
} }
``` ```
### `ccw issue done <item_id>` ### `ccw issue done <item_id>`
Marks task completed/failed and updates queue state. Marks task completed/failed, updates queue state, checks for queue completion.
## Error Handling ## Error Handling
@@ -245,28 +258,13 @@ Marks task completed/failed and updates queue state.
|-------|------------| |-------|------------|
| No queue | Run /issue:queue first | | No queue | Run /issue:queue first |
| No ready tasks | Dependencies blocked, check DAG | | No ready tasks | Dependencies blocked, check DAG |
| Executor timeout | Marked as executing, can resume | | Executor timeout | Task not marked done, can retry |
| Task failure | Use `ccw issue retry` to reset | | Task failure | Use `ccw issue retry` to reset |
## Troubleshooting
### Check DAG Status
```bash
ccw issue queue dag | jq '.parallel_batches'
```
### Resume Interrupted Execution
Executors in `executing` status will be resumed automatically when calling `ccw issue next <item_id>`.
### Retry Failed Tasks
```bash
ccw issue retry # Reset all failed to pending
/issue:execute # Re-execute
```
## Related Commands ## Related Commands
- `/issue:plan` - Plan issues with solutions - `/issue:plan` - Plan issues with solutions
- `/issue:queue` - Form execution queue - `/issue:queue` - Form execution queue
- `ccw issue queue dag` - View dependency graph - `ccw issue queue dag` - View dependency graph
- `ccw issue detail <id>` - View task details
- `ccw issue retry` - Reset failed tasks - `ccw issue retry` - Reset failed tasks

View File

@@ -193,10 +193,17 @@ ${issueList}
**Project Root**: ${process.cwd()} **Project Root**: ${process.cwd()}
### Project Context (MANDATORY - Read Both Files First)
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
**CRITICAL**: All solution tasks MUST comply with constraints in project-guidelines.json
### Steps ### Steps
1. Fetch: \`ccw issue status <id> --json\` 1. Fetch: \`ccw issue status <id> --json\`
2. Explore (ACE) → Plan solution 2. Load project context (project-tech.json + project-guidelines.json)
3. Register & bind: \`ccw issue bind <id> --solution <file>\` 3. Explore (ACE) → Plan solution (respecting guidelines)
4. Register & bind: \`ccw issue bind <id> --solution <file>\`
### Generate Files ### Generate Files
\`.workflow/issues/solutions/{issue-id}.jsonl\` - Solution with tasks (schema: cat .claude/workflows/cli-templates/schemas/solution-schema.json) \`.workflow/issues/solutions/{issue-id}.jsonl\` - Solution with tasks (schema: cat .claude/workflows/cli-templates/schemas/solution-schema.json)

View File

@@ -10,7 +10,11 @@ examples:
# Workflow Init Command (/workflow:init) # Workflow Init Command (/workflow:init)
## Overview ## Overview
Initialize `.workflow/project.json` with comprehensive project understanding by delegating analysis to **cli-explore-agent**. Initialize `.workflow/project-tech.json` and `.workflow/project-guidelines.json` with comprehensive project understanding by delegating analysis to **cli-explore-agent**.
**Dual File System**:
- `project-tech.json`: Auto-generated technical analysis (stack, architecture, components)
- `project-guidelines.json`: User-maintained rules and constraints (created as scaffold)
**Note**: This command may be called by other workflow commands. Upon completion, return immediately to continue the calling workflow without interrupting the task flow. **Note**: This command may be called by other workflow commands. Upon completion, return immediately to continue the calling workflow without interrupting the task flow.
@@ -27,7 +31,7 @@ Input Parsing:
└─ Parse --regenerate flag → regenerate = true | false └─ Parse --regenerate flag → regenerate = true | false
Decision: Decision:
├─ EXISTS + no --regenerate → Exit: "Already initialized" ├─ BOTH_EXIST + no --regenerate → Exit: "Already initialized"
├─ EXISTS + --regenerate → Backup existing → Continue analysis ├─ EXISTS + --regenerate → Backup existing → Continue analysis
└─ NOT_FOUND → Continue analysis └─ NOT_FOUND → Continue analysis
@@ -37,11 +41,14 @@ Analysis Flow:
│ ├─ Structural scan (get_modules_by_depth.sh, find, wc) │ ├─ Structural scan (get_modules_by_depth.sh, find, wc)
│ ├─ Semantic analysis (Gemini CLI) │ ├─ Semantic analysis (Gemini CLI)
│ ├─ Synthesis and merge │ ├─ Synthesis and merge
│ └─ Write .workflow/project.json │ └─ Write .workflow/project-tech.json
├─ Create guidelines scaffold (if not exists)
│ └─ Write .workflow/project-guidelines.json (empty structure)
└─ Display summary └─ Display summary
Output: Output:
─ .workflow/project.json (+ .backup if regenerate) ─ .workflow/project-tech.json (+ .backup if regenerate)
└─ .workflow/project-guidelines.json (scaffold if new)
``` ```
## Implementation ## Implementation
@@ -56,13 +63,18 @@ const regenerate = $ARGUMENTS.includes('--regenerate')
**Check existing state**: **Check existing state**:
```bash ```bash
bash(test -f .workflow/project.json && echo "EXISTS" || echo "NOT_FOUND") bash(test -f .workflow/project-tech.json && echo "TECH_EXISTS" || echo "TECH_NOT_FOUND")
bash(test -f .workflow/project-guidelines.json && echo "GUIDELINES_EXISTS" || echo "GUIDELINES_NOT_FOUND")
``` ```
**If EXISTS and no --regenerate**: Exit early **If BOTH_EXIST and no --regenerate**: Exit early
``` ```
Project already initialized at .workflow/project.json Project already initialized:
Use /workflow:init --regenerate to rebuild - Tech analysis: .workflow/project-tech.json
- Guidelines: .workflow/project-guidelines.json
Use /workflow:init --regenerate to rebuild tech analysis
Use /workflow:session:solidify to add guidelines
Use /workflow:status --project to view state Use /workflow:status --project to view state
``` ```
@@ -78,7 +90,7 @@ bash(mkdir -p .workflow)
**For --regenerate**: Backup and preserve existing data **For --regenerate**: Backup and preserve existing data
```bash ```bash
bash(cp .workflow/project.json .workflow/project.json.backup) bash(cp .workflow/project-tech.json .workflow/project-tech.json.backup)
``` ```
**Delegate analysis to agent**: **Delegate analysis to agent**:
@@ -89,20 +101,17 @@ Task(
run_in_background=false, run_in_background=false,
description="Deep project analysis", description="Deep project analysis",
prompt=` prompt=`
Analyze project for workflow initialization and generate .workflow/project.json. Analyze project for workflow initialization and generate .workflow/project-tech.json.
## MANDATORY FIRST STEPS ## MANDATORY FIRST STEPS
1. Execute: cat ~/.claude/workflows/cli-templates/schemas/project-json-schema.json (get schema reference) 1. Execute: cat ~/.claude/workflows/cli-templates/schemas/project-tech-schema.json (get schema reference)
2. Execute: ccw tool exec get_modules_by_depth '{}' (get project structure) 2. Execute: ccw tool exec get_modules_by_depth '{}' (get project structure)
## Task ## Task
Generate complete project.json with: Generate complete project-tech.json with:
- project_name: ${projectName} - project_metadata: {name: ${projectName}, root_path: ${projectRoot}, initialized_at, updated_at}
- initialized_at: current ISO timestamp - technology_analysis: {description, languages, frameworks, build_tools, test_frameworks, architecture, key_components, dependencies}
- overview: {description, technology_stack, architecture, key_components} - development_status: ${regenerate ? 'preserve from backup' : '{completed_features: [], development_index: {feature: [], enhancement: [], bugfix: [], refactor: [], docs: []}, statistics: {total_features: 0, total_sessions: 0, last_updated}}'}
- features: ${regenerate ? 'preserve from backup' : '[] (empty)'}
- development_index: ${regenerate ? 'preserve from backup' : '{feature: [], enhancement: [], bugfix: [], refactor: [], docs: []}'}
- statistics: ${regenerate ? 'preserve from backup' : '{total_features: 0, total_sessions: 0, last_updated}'}
- _metadata: {initialized_by: "cli-explore-agent", analysis_timestamp, analysis_mode} - _metadata: {initialized_by: "cli-explore-agent", analysis_timestamp, analysis_mode}
## Analysis Requirements ## Analysis Requirements
@@ -123,8 +132,8 @@ Generate complete project.json with:
1. Structural scan: get_modules_by_depth.sh, find, wc -l 1. Structural scan: get_modules_by_depth.sh, find, wc -l
2. Semantic analysis: Gemini for patterns/architecture 2. Semantic analysis: Gemini for patterns/architecture
3. Synthesis: Merge findings 3. Synthesis: Merge findings
4. ${regenerate ? 'Merge with preserved features/development_index/statistics from .workflow/project.json.backup' : ''} 4. ${regenerate ? 'Merge with preserved development_status from .workflow/project-tech.json.backup' : ''}
5. Write JSON: Write('.workflow/project.json', jsonContent) 5. Write JSON: Write('.workflow/project-tech.json', jsonContent)
6. Report: Return brief completion summary 6. Report: Return brief completion summary
Project root: ${projectRoot} Project root: ${projectRoot}
@@ -132,29 +141,66 @@ Project root: ${projectRoot}
) )
``` ```
### Step 3.5: Create Guidelines Scaffold (if not exists)
```javascript
// Only create if not exists (never overwrite user guidelines)
if (!file_exists('.workflow/project-guidelines.json')) {
const guidelinesScaffold = {
conventions: {
coding_style: [],
naming_patterns: [],
file_structure: [],
documentation: []
},
constraints: {
architecture: [],
tech_stack: [],
performance: [],
security: []
},
quality_rules: [],
learnings: [],
_metadata: {
created_at: new Date().toISOString(),
version: "1.0.0"
}
};
Write('.workflow/project-guidelines.json', JSON.stringify(guidelinesScaffold, null, 2));
}
```
### Step 4: Display Summary ### Step 4: Display Summary
```javascript ```javascript
const projectJson = JSON.parse(Read('.workflow/project.json')); const projectTech = JSON.parse(Read('.workflow/project-tech.json'));
const guidelinesExists = file_exists('.workflow/project-guidelines.json');
console.log(` console.log(`
✓ Project initialized successfully ✓ Project initialized successfully
## Project Overview ## Project Overview
Name: ${projectJson.project_name} Name: ${projectTech.project_metadata.name}
Description: ${projectJson.overview.description} Description: ${projectTech.technology_analysis.description}
### Technology Stack ### Technology Stack
Languages: ${projectJson.overview.technology_stack.languages.map(l => l.name).join(', ')} Languages: ${projectTech.technology_analysis.languages.map(l => l.name).join(', ')}
Frameworks: ${projectJson.overview.technology_stack.frameworks.join(', ')} Frameworks: ${projectTech.technology_analysis.frameworks.join(', ')}
### Architecture ### Architecture
Style: ${projectJson.overview.architecture.style} Style: ${projectTech.technology_analysis.architecture.style}
Components: ${projectJson.overview.key_components.length} core modules Components: ${projectTech.technology_analysis.key_components.length} core modules
--- ---
Project state: .workflow/project.json Files created:
${regenerate ? 'Backup: .workflow/project.json.backup' : ''} - Tech analysis: .workflow/project-tech.json
- Guidelines: .workflow/project-guidelines.json ${guidelinesExists ? '(scaffold)' : ''}
${regenerate ? '- Backup: .workflow/project-tech.json.backup' : ''}
Next steps:
- Use /workflow:session:solidify to add project guidelines
- Use /workflow:plan to start planning
`); `);
``` ```

View File

@@ -181,6 +181,8 @@ Execute **${angle}** diagnosis for bug root cause analysis. Analyze codebase fro
1. Run: ccw tool exec get_modules_by_depth '{}' (project structure) 1. Run: ccw tool exec get_modules_by_depth '{}' (project structure)
2. Run: rg -l "{error_keyword_from_bug}" --type ts (locate relevant files) 2. Run: rg -l "{error_keyword_from_bug}" --type ts (locate relevant files)
3. Execute: cat ~/.claude/workflows/cli-templates/schemas/diagnosis-json-schema.json (get output schema reference) 3. Execute: cat ~/.claude/workflows/cli-templates/schemas/diagnosis-json-schema.json (get output schema reference)
4. Read: .workflow/project-tech.json (technology stack and architecture context)
5. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
## Diagnosis Strategy (${angle} focus) ## Diagnosis Strategy (${angle} focus)
@@ -409,6 +411,12 @@ Generate fix plan and write fix-plan.json.
## Output Schema Reference ## Output Schema Reference
Execute: cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json (get schema reference before generating plan) Execute: cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json (get schema reference before generating plan)
## Project Context (MANDATORY - Read Both Files)
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
**CRITICAL**: All fix tasks MUST comply with constraints in project-guidelines.json
## Bug Description ## Bug Description
${bug_description} ${bug_description}

View File

@@ -184,6 +184,8 @@ Execute **${angle}** exploration for task planning context. Analyze codebase fro
1. Run: ccw tool exec get_modules_by_depth '{}' (project structure) 1. Run: ccw tool exec get_modules_by_depth '{}' (project structure)
2. Run: rg -l "{keyword_from_task}" --type ts (locate relevant files) 2. Run: rg -l "{keyword_from_task}" --type ts (locate relevant files)
3. Execute: cat ~/.claude/workflows/cli-templates/schemas/explore-json-schema.json (get output schema reference) 3. Execute: cat ~/.claude/workflows/cli-templates/schemas/explore-json-schema.json (get output schema reference)
4. Read: .workflow/project-tech.json (technology stack and architecture context)
5. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
## Exploration Strategy (${angle} focus) ## Exploration Strategy (${angle} focus)
@@ -416,6 +418,12 @@ Generate implementation plan and write plan.json.
## Output Schema Reference ## Output Schema Reference
Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json (get schema reference before generating plan) Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json (get schema reference before generating plan)
## Project Context (MANDATORY - Read Both Files)
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
**CRITICAL**: All generated tasks MUST comply with constraints in project-guidelines.json
## Task Description ## Task Description
${task_description} ${task_description}

View File

@@ -409,6 +409,8 @@ Task(
2. Get target files: Read resolved_files from review-state.json 2. Get target files: Read resolved_files from review-state.json
3. Validate file access: bash(ls -la ${targetFiles.join(' ')}) 3. Validate file access: bash(ls -la ${targetFiles.join(' ')})
4. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference) 4. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference)
5. Read: .workflow/project-tech.json (technology stack and architecture context)
6. Read: .workflow/project-guidelines.json (user-defined constraints and conventions to validate against)
## Review Context ## Review Context
- Review Type: module (independent) - Review Type: module (independent)
@@ -511,6 +513,8 @@ Task(
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${projectDir}/src --include="*.ts") 3. Identify related code: bash(grep -r "import.*${basename(file)}" ${projectDir}/src --include="*.ts")
4. Read test files: bash(find ${projectDir}/tests -name "*${basename(file, '.ts')}*" -type f) 4. Read test files: bash(find ${projectDir}/tests -name "*${basename(file, '.ts')}*" -type f)
5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference) 5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
6. Read: .workflow/project-tech.json (technology stack and architecture context)
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
## CLI Configuration ## CLI Configuration
- Tool Priority: gemini → qwen → codex - Tool Priority: gemini → qwen → codex

View File

@@ -420,6 +420,8 @@ Task(
3. Get changed files: bash(cd ${workflowDir} && git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u) 3. Get changed files: bash(cd ${workflowDir} && git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u)
4. Read review state: ${reviewStateJsonPath} 4. Read review state: ${reviewStateJsonPath}
5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference) 5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference)
6. Read: .workflow/project-tech.json (technology stack and architecture context)
7. Read: .workflow/project-guidelines.json (user-defined constraints and conventions to validate against)
## Session Context ## Session Context
- Session ID: ${sessionId} - Session ID: ${sessionId}
@@ -522,6 +524,8 @@ Task(
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${workflowDir}/src --include="*.ts") 3. Identify related code: bash(grep -r "import.*${basename(file)}" ${workflowDir}/src --include="*.ts")
4. Read test files: bash(find ${workflowDir}/tests -name "*${basename(file, '.ts')}*" -type f) 4. Read test files: bash(find ${workflowDir}/tests -name "*${basename(file, '.ts')}*" -type f)
5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference) 5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
6. Read: .workflow/project-tech.json (technology stack and architecture context)
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
## CLI Configuration ## CLI Configuration
- Tool Priority: gemini → qwen → codex - Tool Priority: gemini → qwen → codex

View File

@@ -139,7 +139,7 @@ After bash validation, the model takes control to:
ccw cli -p " ccw cli -p "
PURPOSE: Security audit of completed implementation PURPOSE: Security audit of completed implementation
TASK: Review code for security vulnerabilities, insecure patterns, auth/authz issues TASK: Review code for security vulnerabilities, insecure patterns, auth/authz issues
CONTEXT: @.summaries/IMPL-*.md,../.. @../../CLAUDE.md CONTEXT: @.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
EXPECTED: Security findings report with severity levels EXPECTED: Security findings report with severity levels
RULES: Focus on OWASP Top 10, authentication, authorization, data validation, injection risks RULES: Focus on OWASP Top 10, authentication, authorization, data validation, injection risks
" --tool gemini --mode write --cd .workflow/active/${sessionId} " --tool gemini --mode write --cd .workflow/active/${sessionId}
@@ -151,7 +151,7 @@ After bash validation, the model takes control to:
ccw cli -p " ccw cli -p "
PURPOSE: Architecture compliance review PURPOSE: Architecture compliance review
TASK: Evaluate adherence to architectural patterns, identify technical debt, review design decisions TASK: Evaluate adherence to architectural patterns, identify technical debt, review design decisions
CONTEXT: @.summaries/IMPL-*.md,../.. @../../CLAUDE.md CONTEXT: @.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
EXPECTED: Architecture assessment with recommendations EXPECTED: Architecture assessment with recommendations
RULES: Check for patterns, separation of concerns, modularity, scalability RULES: Check for patterns, separation of concerns, modularity, scalability
" --tool qwen --mode write --cd .workflow/active/${sessionId} " --tool qwen --mode write --cd .workflow/active/${sessionId}
@@ -163,7 +163,7 @@ After bash validation, the model takes control to:
ccw cli -p " ccw cli -p "
PURPOSE: Code quality and best practices review PURPOSE: Code quality and best practices review
TASK: Assess code readability, maintainability, adherence to best practices TASK: Assess code readability, maintainability, adherence to best practices
CONTEXT: @.summaries/IMPL-*.md,../.. @../../CLAUDE.md CONTEXT: @.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
EXPECTED: Quality assessment with improvement suggestions EXPECTED: Quality assessment with improvement suggestions
RULES: Check for code smells, duplication, complexity, naming conventions RULES: Check for code smells, duplication, complexity, naming conventions
" --tool gemini --mode write --cd .workflow/active/${sessionId} " --tool gemini --mode write --cd .workflow/active/${sessionId}
@@ -185,7 +185,7 @@ After bash validation, the model takes control to:
ccw cli -p " ccw cli -p "
PURPOSE: Verify all requirements and acceptance criteria are met PURPOSE: Verify all requirements and acceptance criteria are met
TASK: Cross-check implementation summaries against original requirements TASK: Cross-check implementation summaries against original requirements
CONTEXT: @.task/IMPL-*.json,.summaries/IMPL-*.md,../.. @../../CLAUDE.md CONTEXT: @.task/IMPL-*.json,.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
EXPECTED: EXPECTED:
- Requirements coverage matrix - Requirements coverage matrix
- Acceptance criteria verification - Acceptance criteria verification

View File

@@ -0,0 +1,299 @@
---
name: solidify
description: Crystallize session learnings and user-defined constraints into permanent project guidelines
argument-hint: "[--type <convention|constraint|learning>] [--category <category>] \"rule or insight\""
examples:
- /workflow:session:solidify "Use functional components for all React code" --type convention
- /workflow:session:solidify "No direct DB access from controllers" --type constraint --category architecture
- /workflow:session:solidify "Cache invalidation requires event sourcing" --type learning --category architecture
- /workflow:session:solidify --interactive
---
# Session Solidify Command (/workflow:session:solidify)
## Overview
Crystallizes ephemeral session context (insights, decisions, constraints) into permanent project guidelines stored in `.workflow/project-guidelines.json`. This ensures valuable learnings persist across sessions and inform future planning.
## Use Cases
1. **During Session**: Capture important decisions as they're made
2. **After Session**: Reflect on lessons learned before archiving
3. **Proactive**: Add team conventions or architectural rules
## Parameters
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `rule` | string | ✅ (unless --interactive) | The rule, convention, or insight to solidify |
| `--type` | enum | ❌ | Type: `convention`, `constraint`, `learning` (default: auto-detect) |
| `--category` | string | ❌ | Category for organization (see categories below) |
| `--interactive` | flag | ❌ | Launch guided wizard for adding rules |
### Type Categories
**convention** → Coding style preferences (goes to `conventions` section)
- Subcategories: `coding_style`, `naming_patterns`, `file_structure`, `documentation`
**constraint** → Hard rules that must not be violated (goes to `constraints` section)
- Subcategories: `architecture`, `tech_stack`, `performance`, `security`
**learning** → Session-specific insights (goes to `learnings` array)
- Subcategories: `architecture`, `performance`, `security`, `testing`, `process`, `other`
## Execution Process
```
Input Parsing:
├─ Parse: rule text (required unless --interactive)
├─ Parse: --type (convention|constraint|learning)
├─ Parse: --category (subcategory)
└─ Parse: --interactive (flag)
Step 1: Ensure Guidelines File Exists
└─ If not exists → Create with empty structure
Step 2: Auto-detect Type (if not specified)
└─ Analyze rule text for keywords
Step 3: Validate and Format Entry
└─ Build entry object based on type
Step 4: Update Guidelines File
└─ Add entry to appropriate section
Step 5: Display Confirmation
└─ Show what was added and where
```
## Implementation
### Step 1: Ensure Guidelines File Exists
```bash
bash(test -f .workflow/project-guidelines.json && echo "EXISTS" || echo "NOT_FOUND")
```
**If NOT_FOUND**, create scaffold:
```javascript
const scaffold = {
conventions: {
coding_style: [],
naming_patterns: [],
file_structure: [],
documentation: []
},
constraints: {
architecture: [],
tech_stack: [],
performance: [],
security: []
},
quality_rules: [],
learnings: [],
_metadata: {
created_at: new Date().toISOString(),
version: "1.0.0"
}
};
Write('.workflow/project-guidelines.json', JSON.stringify(scaffold, null, 2));
```
### Step 2: Auto-detect Type (if not specified)
```javascript
function detectType(ruleText) {
const text = ruleText.toLowerCase();
// Constraint indicators
if (/\b(no|never|must not|forbidden|prohibited|always must)\b/.test(text)) {
return 'constraint';
}
// Learning indicators
if (/\b(learned|discovered|realized|found that|turns out)\b/.test(text)) {
return 'learning';
}
// Default to convention
return 'convention';
}
function detectCategory(ruleText, type) {
const text = ruleText.toLowerCase();
if (type === 'constraint' || type === 'learning') {
if (/\b(architecture|layer|module|dependency|circular)\b/.test(text)) return 'architecture';
if (/\b(security|auth|permission|sanitize|xss|sql)\b/.test(text)) return 'security';
if (/\b(performance|cache|lazy|async|sync|slow)\b/.test(text)) return 'performance';
if (/\b(test|coverage|mock|stub)\b/.test(text)) return 'testing';
}
if (type === 'convention') {
if (/\b(name|naming|prefix|suffix|camel|pascal)\b/.test(text)) return 'naming_patterns';
if (/\b(file|folder|directory|structure|organize)\b/.test(text)) return 'file_structure';
if (/\b(doc|comment|jsdoc|readme)\b/.test(text)) return 'documentation';
return 'coding_style';
}
return type === 'constraint' ? 'tech_stack' : 'other';
}
```
### Step 3: Build Entry
```javascript
function buildEntry(rule, type, category, sessionId) {
if (type === 'learning') {
return {
date: new Date().toISOString().split('T')[0],
session_id: sessionId || null,
insight: rule,
category: category,
context: null
};
}
// For conventions and constraints, just return the rule string
return rule;
}
```
### Step 4: Update Guidelines File
```javascript
const guidelines = JSON.parse(Read('.workflow/project-guidelines.json'));
if (type === 'convention') {
if (!guidelines.conventions[category]) {
guidelines.conventions[category] = [];
}
if (!guidelines.conventions[category].includes(rule)) {
guidelines.conventions[category].push(rule);
}
} else if (type === 'constraint') {
if (!guidelines.constraints[category]) {
guidelines.constraints[category] = [];
}
if (!guidelines.constraints[category].includes(rule)) {
guidelines.constraints[category].push(rule);
}
} else if (type === 'learning') {
guidelines.learnings.push(buildEntry(rule, type, category, sessionId));
}
guidelines._metadata.updated_at = new Date().toISOString();
guidelines._metadata.last_solidified_by = sessionId;
Write('.workflow/project-guidelines.json', JSON.stringify(guidelines, null, 2));
```
### Step 5: Display Confirmation
```
✓ Guideline solidified
Type: ${type}
Category: ${category}
Rule: "${rule}"
Location: .workflow/project-guidelines.json → ${type}s.${category}
Total ${type}s in ${category}: ${count}
```
## Interactive Mode
When `--interactive` flag is provided:
```javascript
AskUserQuestion({
questions: [
{
question: "What type of guideline are you adding?",
header: "Type",
multiSelect: false,
options: [
{ label: "Convention", description: "Coding style preference (e.g., use functional components)" },
{ label: "Constraint", description: "Hard rule that must not be violated (e.g., no direct DB access)" },
{ label: "Learning", description: "Insight from this session (e.g., cache invalidation needs events)" }
]
}
]
});
// Follow-up based on type selection...
```
## Examples
### Add a Convention
```bash
/workflow:session:solidify "Use async/await instead of callbacks" --type convention --category coding_style
```
Result in `project-guidelines.json`:
```json
{
"conventions": {
"coding_style": ["Use async/await instead of callbacks"]
}
}
```
### Add an Architectural Constraint
```bash
/workflow:session:solidify "No direct DB access from controllers" --type constraint --category architecture
```
Result:
```json
{
"constraints": {
"architecture": ["No direct DB access from controllers"]
}
}
```
### Capture a Session Learning
```bash
/workflow:session:solidify "Cache invalidation requires event sourcing for consistency" --type learning
```
Result:
```json
{
"learnings": [
{
"date": "2024-12-28",
"session_id": "WFS-auth-feature",
"insight": "Cache invalidation requires event sourcing for consistency",
"category": "architecture"
}
]
}
```
## Integration with Planning
The `project-guidelines.json` is consumed by:
1. **`/workflow:tools:context-gather`**: Loads guidelines into context-package.json
2. **`/workflow:plan`**: Passes guidelines to task generation agent
3. **`task-generate-agent`**: Includes guidelines as "CRITICAL CONSTRAINTS" in system prompt
This ensures all future planning respects solidified rules without users needing to re-state them.
## Error Handling
- **Duplicate Rule**: Warn and skip if exact rule already exists
- **Invalid Category**: Suggest valid categories for the type
- **File Corruption**: Backup existing file before modification
## Related Commands
- `/workflow:session:start` - Start a session (may prompt for solidify at end)
- `/workflow:session:complete` - Complete session (prompts for learnings to solidify)
- `/workflow:init` - Creates project-guidelines.json scaffold if missing

View File

@@ -38,26 +38,29 @@ ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs
## Step 0: Initialize Project State (First-time Only) ## Step 0: Initialize Project State (First-time Only)
**Executed before all modes** - Ensures project-level state file exists by calling `/workflow:init`. **Executed before all modes** - Ensures project-level state files exist by calling `/workflow:init`.
### Check and Initialize ### Check and Initialize
```bash ```bash
# Check if project state exists # Check if project state exists (both files required)
bash(test -f .workflow/project.json && echo "EXISTS" || echo "NOT_FOUND") bash(test -f .workflow/project-tech.json && echo "TECH_EXISTS" || echo "TECH_NOT_FOUND")
bash(test -f .workflow/project-guidelines.json && echo "GUIDELINES_EXISTS" || echo "GUIDELINES_NOT_FOUND")
``` ```
**If NOT_FOUND**, delegate to `/workflow:init`: **If either NOT_FOUND**, delegate to `/workflow:init`:
```javascript ```javascript
// Call workflow:init for intelligent project analysis // Call workflow:init for intelligent project analysis
SlashCommand({command: "/workflow:init"}); SlashCommand({command: "/workflow:init"});
// Wait for init completion // Wait for init completion
// project.json will be created with comprehensive project overview // project-tech.json and project-guidelines.json will be created
``` ```
**Output**: **Output**:
- If EXISTS: `PROJECT_STATE: initialized` - If BOTH_EXIST: `PROJECT_STATE: initialized`
- If NOT_FOUND: Calls `/workflow:init` → creates `.workflow/project.json` with full project analysis - If NOT_FOUND: Calls `/workflow:init` → creates:
- `.workflow/project-tech.json` with full technical analysis
- `.workflow/project-guidelines.json` with empty scaffold
**Note**: `/workflow:init` uses cli-explore-agent to build comprehensive project understanding (technology stack, architecture, key components). This step runs once per project. Subsequent executions skip initialization. **Note**: `/workflow:init` uses cli-explore-agent to build comprehensive project understanding (technology stack, architecture, key components). This step runs once per project. Subsequent executions skip initialization.

View File

@@ -236,7 +236,10 @@ Task(
Execute complete context-search-agent workflow for implementation planning: Execute complete context-search-agent workflow for implementation planning:
### Phase 1: Initialization & Pre-Analysis ### Phase 1: Initialization & Pre-Analysis
1. **Project State Loading**: Read and parse `.workflow/project.json`. Use its `overview` section as the foundational `project_context`. This is your primary source for architecture, tech stack, and key components. If file doesn't exist, proceed with fresh analysis. 1. **Project State Loading**:
- Read and parse `.workflow/project-tech.json`. Use its `technology_analysis` section as the foundational `project_context`. This is your primary source for architecture, tech stack, and key components.
- Read and parse `.workflow/project-guidelines.json`. Load `conventions`, `constraints`, and `learnings` into a `project_guidelines` section.
- If files don't exist, proceed with fresh analysis.
2. **Detection**: Check for existing context-package (early exit if valid) 2. **Detection**: Check for existing context-package (early exit if valid)
3. **Foundation**: Initialize CodexLens, get project structure, load docs 3. **Foundation**: Initialize CodexLens, get project structure, load docs
4. **Analysis**: Extract keywords, determine scope, classify complexity based on task description and project state 4. **Analysis**: Extract keywords, determine scope, classify complexity based on task description and project state
@@ -251,17 +254,19 @@ Execute all discovery tracks:
### Phase 3: Synthesis, Assessment & Packaging ### Phase 3: Synthesis, Assessment & Packaging
1. Apply relevance scoring and build dependency graph 1. Apply relevance scoring and build dependency graph
2. **Synthesize 4-source data**: Merge findings from all sources (archive > docs > code > web). **Prioritize the context from `project.json`** for architecture and tech stack unless code analysis reveals it's outdated. 2. **Synthesize 4-source data**: Merge findings from all sources (archive > docs > code > web). **Prioritize the context from `project-tech.json`** for architecture and tech stack unless code analysis reveals it's outdated.
3. **Populate `project_context`**: Directly use the `overview` from `project.json` to fill the `project_context` section of the output `context-package.json`. Include description, technology_stack, architecture, and key_components. 3. **Populate `project_context`**: Directly use the `technology_analysis` from `project-tech.json` to fill the `project_context` section. Include description, technology_stack, architecture, and key_components.
4. Integrate brainstorm artifacts (if .brainstorming/ exists, read content) 4. **Populate `project_guidelines`**: Load conventions, constraints, and learnings from `project-guidelines.json` into a dedicated section.
5. Perform conflict detection with risk assessment 5. Integrate brainstorm artifacts (if .brainstorming/ exists, read content)
6. **Inject historical conflicts** from archive analysis into conflict_detection 6. Perform conflict detection with risk assessment
7. Generate and validate context-package.json 7. **Inject historical conflicts** from archive analysis into conflict_detection
8. Generate and validate context-package.json
## Output Requirements ## Output Requirements
Complete context-package.json with: Complete context-package.json with:
- **metadata**: task_description, keywords, complexity, tech_stack, session_id - **metadata**: task_description, keywords, complexity, tech_stack, session_id
- **project_context**: description, technology_stack, architecture, key_components (sourced from `project.json` overview) - **project_context**: description, technology_stack, architecture, key_components (sourced from `project-tech.json`)
- **project_guidelines**: {conventions, constraints, quality_rules, learnings} (sourced from `project-guidelines.json`)
- **assets**: {documentation[], source_code[], config[], tests[]} with relevance scores - **assets**: {documentation[], source_code[], config[], tests[]} with relevance scores
- **dependencies**: {internal[], external[]} with dependency graph - **dependencies**: {internal[], external[]} with dependency graph
- **brainstorm_artifacts**: {guidance_specification, role_analyses[], synthesis_output} with content - **brainstorm_artifacts**: {guidance_specification, role_analyses[], synthesis_output} with content
@@ -314,7 +319,8 @@ Refer to `context-search-agent.md` Phase 3.7 for complete `context-package.json`
**Key Sections**: **Key Sections**:
- **metadata**: Session info, keywords, complexity, tech stack - **metadata**: Session info, keywords, complexity, tech stack
- **project_context**: Architecture patterns, conventions, tech stack (populated from `project.json` overview) - **project_context**: Architecture patterns, conventions, tech stack (populated from `project-tech.json`)
- **project_guidelines**: Conventions, constraints, quality rules, learnings (populated from `project-guidelines.json`)
- **assets**: Categorized files with relevance scores (documentation, source_code, config, tests) - **assets**: Categorized files with relevance scores (documentation, source_code, config, tests)
- **dependencies**: Internal and external dependency graphs - **dependencies**: Internal and external dependency graphs
- **brainstorm_artifacts**: Brainstorm documents with full content (if exists) - **brainstorm_artifacts**: Brainstorm documents with full content (if exists)
@@ -429,6 +435,7 @@ if (historicalConflicts.length > 0 && currentRisk === "low") {
## Notes ## Notes
- **Detection-first**: Always check for existing package before invoking agent - **Detection-first**: Always check for existing package before invoking agent
- **Project.json integration**: Agent reads `.workflow/project.json` as primary source for project context, avoiding redundant analysis - **Dual project file integration**: Agent reads both `.workflow/project-tech.json` (tech analysis) and `.workflow/project-guidelines.json` (user constraints) as primary sources
- **Guidelines injection**: Project guidelines are included in context-package to ensure task generation respects user-defined constraints
- **No redundancy**: This command is a thin orchestrator, all logic in agent - **No redundancy**: This command is a thin orchestrator, all logic in agent
- **Plan-specific**: Use this for implementation planning; brainstorm mode uses direct agent call - **Plan-specific**: Use this for implementation planning; brainstorm mode uses direct agent call

View File

@@ -1192,6 +1192,48 @@ async function nextAction(itemId: string | undefined, options: IssueOptions): Pr
}, null, 2)); }, null, 2));
} }
/**
* detail - Get task details by item_id (READ-ONLY, does NOT change status)
* Used for parallel execution: orchestrator gets dag, then dispatches with detail <id>
*/
async function detailAction(itemId: string | undefined, options: IssueOptions): Promise<void> {
if (!itemId) {
console.log(JSON.stringify({ status: 'error', message: 'item_id is required' }));
return;
}
const queue = readActiveQueue();
const queueItem = queue.tasks.find(t => t.item_id === itemId);
if (!queueItem) {
console.log(JSON.stringify({ status: 'error', message: `Task ${itemId} not found` }));
return;
}
// Load task definition from solution
const solution = findSolution(queueItem.issue_id, queueItem.solution_id);
const taskDef = solution?.tasks.find(t => t.id === queueItem.task_id);
if (!taskDef) {
console.log(JSON.stringify({ status: 'error', message: 'Task definition not found in solution' }));
return;
}
// Return full task info (READ-ONLY - no status update)
console.log(JSON.stringify({
item_id: queueItem.item_id,
issue_id: queueItem.issue_id,
solution_id: queueItem.solution_id,
status: queueItem.status,
task: taskDef,
context: solution?.exploration_context || {},
execution_hints: {
executor: queueItem.assigned_executor,
estimated_minutes: taskDef.estimated_minutes || 30
}
}, null, 2));
}
/** /**
* done - Mark task completed or failed * done - Mark task completed or failed
*/ */
@@ -1333,6 +1375,9 @@ export async function issueCommand(
case 'next': case 'next':
await nextAction(argsArray[0], options); await nextAction(argsArray[0], options);
break; break;
case 'detail':
await detailAction(argsArray[0], options);
break;
case 'done': case 'done':
await doneAction(argsArray[0], options); await doneAction(argsArray[0], options);
break; break;
@@ -1370,7 +1415,8 @@ export async function issueCommand(
console.log(chalk.gray(' retry [issue-id] Retry failed tasks')); console.log(chalk.gray(' retry [issue-id] Retry failed tasks'));
console.log(); console.log();
console.log(chalk.bold('Execution Endpoints:')); console.log(chalk.bold('Execution Endpoints:'));
console.log(chalk.gray(' next [item-id] Get task by ID or next ready task (JSON)')); console.log(chalk.gray(' next [item-id] Get & mark task executing (JSON)'));
console.log(chalk.gray(' detail <item-id> Get task details (READ-ONLY, for parallel)'));
console.log(chalk.gray(' done <item-id> Mark task completed')); console.log(chalk.gray(' done <item-id> Mark task completed'));
console.log(chalk.gray(' done <item-id> --fail Mark task failed')); console.log(chalk.gray(' done <item-id> --fail Mark task failed'));
console.log(); console.log();