mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
Fix session management location inference and ccw command usage
This commit addresses multiple issues in session management and command documentation: Session Management Fixes: - Add auto-inference of location from type parameter in session.ts - When --type lite-plan/lite-fix is specified, automatically set location accordingly - Preserve explicit --location parameter when provided - Update session-manager.ts to support type-based location inference - Fix metadata filename selection (session-metadata.json vs workflow-session.json) Command Documentation Fixes: - Add missing --mode analysis parameter (3 locations): * commands/memory/docs.md * commands/workflow/lite-execute.md (2 instances) - Add missing --mode write parameter (4 locations): * commands/workflow/tools/task-generate-agent.md - Remove non-existent subcommands (3 locations): * commands/workflow/session/complete.md (manifest, project) - Update session command syntax to use simplified format: * Changed from 'ccw session manifest read' to 'test -f' checks * Changed from 'ccw session project read' to 'test -f' checks Documentation Updates: - Update lite-plan.md and lite-fix.md to use --type parameter - Update session/start.md to document lite-plan and lite-fix types - Sync all fixes to skills/command-guide/reference directory (84 files) All ccw command usage across the codebase is now consistent and correct. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -34,10 +34,10 @@ You are a code execution specialist focused on implementing high-quality, produc
|
||||
- **context-package.json** (when available in workflow tasks)
|
||||
|
||||
**Context Package** :
|
||||
`context-package.json` provides artifact paths - read using `ccw session`:
|
||||
`context-package.json` provides artifact paths - read using Read tool or ccw session:
|
||||
```bash
|
||||
# Get context package content from session
|
||||
ccw session read ${SESSION_ID} --type context
|
||||
# Get context package content from session using Read tool
|
||||
Read(.workflow/active/${SESSION_ID}/.process/context-package.json)
|
||||
# Returns parsed JSON with brainstorm_artifacts, focus_paths, etc.
|
||||
```
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ This agent processes **simplified inline [FLOW_CONTROL]** format from brainstorm
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata
|
||||
- Command: ccw session read WFS-{session} --type session
|
||||
- Command: Read(.workflow/active/WFS-{session}/workflow-session.json)
|
||||
- Output: session_metadata
|
||||
```
|
||||
|
||||
@@ -155,7 +155,7 @@ When called, you receive:
|
||||
- **User Context**: Specific requirements, constraints, and expectations from user discussion
|
||||
- **Output Location**: Directory path for generated analysis files
|
||||
- **Role Hint** (optional): Suggested role or role selection guidance
|
||||
- **context-package.json** (CCW Workflow): Artifact paths catalog - use `ccw session read {session} --type context` to get context package
|
||||
- **context-package.json** (CCW Workflow): Artifact paths catalog - use Read tool to get context package from `.workflow/active/{session}/.process/context-package.json`
|
||||
- **ASSIGNED_ROLE** (optional): Specific role assignment
|
||||
- **ANALYSIS_DIMENSIONS** (optional): Role-specific analysis dimensions
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ When task JSON contains implementation_approach array:
|
||||
- L1 (Unit): `*.test.*`, `*.spec.*` in `__tests__/`, `tests/unit/`
|
||||
- L2 (Integration): `tests/integration/`, `*.integration.test.*`
|
||||
- L3 (E2E): `tests/e2e/`, `*.e2e.test.*`, `cypress/`, `playwright/`
|
||||
- **context-package.json** (CCW Workflow): Use `ccw session read {session} --type context` to get context package with artifact paths
|
||||
- **context-package.json** (CCW Workflow): Use Read tool to get context package from `.workflow/active/{session}/.process/context-package.json`
|
||||
- Identify test commands from project configuration
|
||||
|
||||
```bash
|
||||
|
||||
@@ -74,7 +74,7 @@ SlashCommand(command="/workflow:session:start --type docs --new \"{project_name}
|
||||
|
||||
```bash
|
||||
# Update workflow-session.json with docs-specific fields
|
||||
ccw session update {sessionId} --type session --content '{"target_path":"{target_path}","project_root":"{project_root}","project_name":"{project_name}","mode":"full","tool":"gemini","cli_execute":false}'
|
||||
ccw session {sessionId} write workflow-session.json '{"target_path":"{target_path}","project_root":"{project_root}","project_name":"{project_name}","mode":"full","tool":"gemini","cli_execute":false}'
|
||||
```
|
||||
|
||||
### Phase 2: Analyze Structure
|
||||
@@ -136,7 +136,7 @@ bash(if [ -d .workflow/docs/\${project_name} ]; then find .workflow/docs/\${proj
|
||||
|
||||
```bash
|
||||
# Count existing docs from doc-planning-data.json
|
||||
ccw session read WFS-docs-{timestamp} --type process --filename doc-planning-data.json --raw | jq '.existing_docs.file_list | length'
|
||||
ccw session WFS-docs-{timestamp} read .process/doc-planning-data.json --raw | jq '.existing_docs.file_list | length'
|
||||
# Or read entire process file and parse
|
||||
```
|
||||
|
||||
@@ -191,10 +191,10 @@ Large Projects (single dir >10 docs):
|
||||
|
||||
```bash
|
||||
# 1. Get top-level directories from doc-planning-data.json
|
||||
ccw session read WFS-docs-{timestamp} --type process --filename doc-planning-data.json --raw | jq -r '.top_level_dirs[]'
|
||||
ccw session WFS-docs-{timestamp} read .process/doc-planning-data.json --raw | jq -r '.top_level_dirs[]'
|
||||
|
||||
# 2. Get mode from workflow-session.json
|
||||
ccw session read WFS-docs-{timestamp} --type session --raw | jq -r '.mode // "full"'
|
||||
ccw session WFS-docs-{timestamp} read workflow-session.json --raw | jq -r '.mode // "full"'
|
||||
|
||||
# 3. Check for HTTP API
|
||||
bash(grep -r "router\.|@Get\|@Post" src/ 2>/dev/null && echo "API_FOUND" || echo "NO_API")
|
||||
@@ -223,7 +223,7 @@ bash(grep -r "router\.|@Get\|@Post" src/ 2>/dev/null && echo "API_FOUND" || echo
|
||||
|
||||
**Task ID Calculation**:
|
||||
```bash
|
||||
group_count=$(ccw session read WFS-docs-{timestamp} --type process --filename doc-planning-data.json --raw | jq '.groups.count')
|
||||
group_count=$(ccw session WFS-docs-{timestamp} read .process/doc-planning-data.json --raw | jq '.groups.count')
|
||||
readme_id=$((group_count + 1)) # Next ID after groups
|
||||
arch_id=$((group_count + 2))
|
||||
api_id=$((group_count + 3))
|
||||
@@ -239,7 +239,7 @@ api_id=$((group_count + 3))
|
||||
| **CLI** | true | implementation_approach | write | --mode write | Execute CLI commands, validate output |
|
||||
|
||||
**Command Patterns**:
|
||||
- Gemini/Qwen: `ccw cli exec "..." --tool gemini --cd dir`
|
||||
- Gemini/Qwen: `ccw cli exec "..." --tool gemini --mode analysis --cd dir`
|
||||
- CLI Mode: `ccw cli exec "..." --tool gemini --mode write --cd dir`
|
||||
- Codex: `ccw cli exec "..." --tool codex --mode write --cd dir`
|
||||
|
||||
@@ -286,8 +286,8 @@ api_id=$((group_count + 3))
|
||||
"step": "load_precomputed_data",
|
||||
"action": "Load Phase 2 analysis and extract group directories",
|
||||
"commands": [
|
||||
"ccw session read ${session_id} --type process --filename doc-planning-data.json",
|
||||
"ccw session read ${session_id} --type process --filename doc-planning-data.json --raw | jq '.groups.assignments[] | select(.group_id == \"${group_number}\") | .directories'"
|
||||
"ccw session ${session_id} read .process/doc-planning-data.json",
|
||||
"ccw session ${session_id} read .process/doc-planning-data.json --raw | jq '.groups.assignments[] | select(.group_id == \"${group_number}\") | .directories'"
|
||||
],
|
||||
"output_to": "phase2_context",
|
||||
"note": "Single JSON file contains all Phase 2 analysis results"
|
||||
|
||||
@@ -149,7 +149,7 @@ Parse user input (supports: number "1", full ID "WFS-auth-system", or partial "a
|
||||
|
||||
#### Step 1.3: Load Session Metadata
|
||||
```bash
|
||||
ccw session read ${sessionId} --type session
|
||||
ccw session ${sessionId} read workflow-session.json
|
||||
```
|
||||
|
||||
**Output**: Store session metadata in memory
|
||||
|
||||
@@ -591,12 +591,12 @@ ccw cli exec "[Verify plan acceptance criteria at ${plan.json}]" --tool codex --
|
||||
const reviewId = `${sessionId}-review`
|
||||
|
||||
// First review pass with fixed ID
|
||||
const reviewResult = Bash(`ccw cli exec "[Review prompt]" --tool gemini --id ${reviewId}`)
|
||||
const reviewResult = Bash(`ccw cli exec "[Review prompt]" --tool gemini --mode analysis --id ${reviewId}`)
|
||||
|
||||
// If issues found, continue review dialog with fixed ID chain
|
||||
if (hasUnresolvedIssues(reviewResult)) {
|
||||
// Resume with follow-up questions
|
||||
Bash(`ccw cli exec "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --id ${reviewId}-followup`)
|
||||
Bash(`ccw cli exec "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --mode analysis --id ${reviewId}-followup`)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -72,31 +72,62 @@ Phase 5: Dispatch
|
||||
### Phase 1: Intelligent Multi-Angle Diagnosis
|
||||
|
||||
**Session Setup** (MANDATORY - follow exactly):
|
||||
|
||||
**Option 1: Using CLI Command** (Recommended for simplicity):
|
||||
```bash
|
||||
# Generate session ID
|
||||
bug_slug=$(echo "${bug_description}" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '-' | cut -c1-40)
|
||||
date_str=$(date -u '+%Y-%m-%d')
|
||||
session_id="${bug_slug}-${date_str}"
|
||||
|
||||
# Initialize lite-fix session (location auto-inferred from type)
|
||||
ccw session init "${session_id}" \
|
||||
--type lite-fix \
|
||||
--content "{\"description\":\"${bug_description}\",\"severity\":\"${severity}\"}"
|
||||
|
||||
|
||||
|
||||
# Get session folder
|
||||
session_folder=".workflow/.lite-fix/${session_id}"
|
||||
echo "Session initialized: ${session_id} at ${session_folder}"
|
||||
```
|
||||
|
||||
**Option 2: Using session_manager Tool** (For programmatic access):
|
||||
```javascript
|
||||
// Helper: Get UTC+8 (China Standard Time) ISO string
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const bugSlug = bug_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-12-17
|
||||
|
||||
const sessionId = `${bugSlug}-${dateStr}` // e.g., "user-avatar-upload-fails-2025-12-17"
|
||||
|
||||
const sessionId = `${bugSlug}-${dateStr}` // e.g., "user-avatar-upload-fails-2025-11-29"
|
||||
|
||||
// Initialize session via session_manager tool
|
||||
const initResult = await ccw_tool_exec('session_manager', {
|
||||
operation: 'init',
|
||||
session_id: sessionId,
|
||||
location: 'lite-fix',
|
||||
metadata: {
|
||||
description: bug_description,
|
||||
severity: severity, // Set after severity assessment
|
||||
created_at: getUtc8ISOString()
|
||||
}
|
||||
})
|
||||
|
||||
const sessionFolder = initResult.result.path
|
||||
console.log(`Session initialized: ${sessionId} at ${sessionFolder}`)
|
||||
```
|
||||
|
||||
**Session File Structure**:
|
||||
- `session-metadata.json` - Session metadata (created at init, contains description, severity, status)
|
||||
- `fix-plan.json` - Actual fix planning content (created later in Phase 3, contains fix tasks, diagnosis results)
|
||||
|
||||
**Metadata Field Usage**:
|
||||
- `description`: Displayed in dashboard session list (replaces session ID as title)
|
||||
- `severity`: Used for fix planning strategy selection (Low/Medium → Direct Claude, High/Critical → Agent)
|
||||
- `created_at`: Displayed in dashboard timeline
|
||||
- `status`: Updated through workflow (diagnosing → fixing → completed)
|
||||
- Custom fields: Any additional fields in metadata are saved and accessible programmatically
|
||||
|
||||
**Accessing Session Data**:
|
||||
```bash
|
||||
# Read session metadata
|
||||
ccw session ${session_id} read session-metadata.json
|
||||
|
||||
# Read fix plan content (after Phase 3 completion)
|
||||
ccw session ${session_id} read fix-plan.json
|
||||
```
|
||||
|
||||
**Diagnosis Decision Logic**:
|
||||
```javascript
|
||||
const hotfixMode = $ARGUMENTS.includes('--hotfix') || $ARGUMENTS.includes('-h')
|
||||
|
||||
@@ -72,31 +72,59 @@ Phase 5: Dispatch
|
||||
### Phase 1: Intelligent Multi-Angle Exploration
|
||||
|
||||
**Session Setup** (MANDATORY - follow exactly):
|
||||
|
||||
**Option 1: Using CLI Command** (Recommended for simplicity):
|
||||
```bash
|
||||
# Generate session ID
|
||||
task_slug=$(echo "${task_description}" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '-' | cut -c1-40)
|
||||
date_str=$(date -u '+%Y-%m-%d')
|
||||
session_id="${task_slug}-${date_str}"
|
||||
|
||||
# Initialize lite-plan session (location auto-inferred from type)
|
||||
ccw session init "${session_id}" \
|
||||
--type lite-plan \
|
||||
--content "{\"description\":\"${task_description}\",\"complexity\":\"${complexity}\"}"
|
||||
|
||||
# Get session folder
|
||||
session_folder=".workflow/.lite-plan/${session_id}"
|
||||
echo "Session initialized: ${session_id} at ${session_folder}"
|
||||
```
|
||||
|
||||
**Option 2: Using session_manager Tool** (For programmatic access):
|
||||
```javascript
|
||||
// Helper: Get UTC+8 (China Standard Time) ISO string
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const taskSlug = task_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-12-17
|
||||
|
||||
const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-12-17"
|
||||
|
||||
const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-11-29"
|
||||
|
||||
// Initialize session via session_manager tool
|
||||
const initResult = await ccw_tool_exec('session_manager', {
|
||||
operation: 'init',
|
||||
session_id: sessionId,
|
||||
location: 'lite-plan',
|
||||
metadata: {
|
||||
description: task_description,
|
||||
complexity: complexity, // Set after complexity assessment
|
||||
created_at: getUtc8ISOString()
|
||||
}
|
||||
})
|
||||
|
||||
const sessionFolder = initResult.result.path
|
||||
console.log(`Session initialized: ${sessionId} at ${sessionFolder}`)
|
||||
```
|
||||
|
||||
**Session File Structure**:
|
||||
- `session-metadata.json` - Session metadata (created at init, contains description, complexity, status)
|
||||
- `plan.json` - Actual planning content (created later in Phase 3, contains tasks, steps, dependencies)
|
||||
|
||||
**Metadata Field Usage**:
|
||||
- `description`: Displayed in dashboard session list (replaces session ID as title)
|
||||
- `complexity`: Used for planning strategy selection (Low → Direct Claude, Medium/High → Agent)
|
||||
- `created_at`: Displayed in dashboard timeline
|
||||
- Custom fields: Any additional fields in metadata are saved and accessible programmatically
|
||||
|
||||
**Accessing Session Data**:
|
||||
```bash
|
||||
# Read session metadata
|
||||
ccw session ${session_id} read session-metadata.json
|
||||
|
||||
# Read plan content (after Phase 3 completion)
|
||||
ccw session ${session_id} read plan.json
|
||||
```
|
||||
|
||||
**Exploration Decision Logic**:
|
||||
```javascript
|
||||
needsExploration = (
|
||||
|
||||
@@ -112,14 +112,18 @@ After bash validation, the model takes control to:
|
||||
|
||||
1. **Load Context**: Read completed task summaries and changed files
|
||||
```bash
|
||||
# Load implementation summaries
|
||||
ccw session read ${sessionId} --type summary --raw
|
||||
# Load implementation summaries (iterate through .summaries/ directory)
|
||||
for summary in .workflow/active/${sessionId}/.summaries/*.md; do
|
||||
cat "$summary"
|
||||
done
|
||||
|
||||
# Load test results (if available)
|
||||
ccw session read ${sessionId} --type summary --filename "TEST-FIX-*.md" --raw 2>/dev/null
|
||||
for test_summary in .workflow/active/${sessionId}/.summaries/TEST-FIX-*.md 2>/dev/null; do
|
||||
cat "$test_summary"
|
||||
done
|
||||
|
||||
# Get session created_at for git log filter
|
||||
created_at=$(ccw session read ${sessionId} --type session --raw | jq -r .created_at)
|
||||
created_at=$(ccw session ${sessionId} read workflow-session.json | jq -r .created_at)
|
||||
git log --since="$created_at" --name-only --pretty=format: | sort -u
|
||||
```
|
||||
|
||||
@@ -170,11 +174,13 @@ After bash validation, the model takes control to:
|
||||
- Verify all requirements and acceptance criteria met:
|
||||
```bash
|
||||
# Load task requirements and acceptance criteria
|
||||
ccw session read ${sessionId} --type task --raw | jq -r '
|
||||
"Task: " + .id + "\n" +
|
||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
||||
"Acceptance: " + (.context.acceptance | join(", "))
|
||||
'
|
||||
for task_file in .workflow/active/${sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '
|
||||
"Task: " + .id + "\n" +
|
||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
||||
"Acceptance: " + (.context.acceptance | join(", "))
|
||||
'
|
||||
done
|
||||
|
||||
# Check implementation summaries against requirements
|
||||
ccw cli exec "
|
||||
|
||||
@@ -34,7 +34,7 @@ ccw session list --location active
|
||||
#### Step 1.2: Check for Existing Archiving Marker (Resume Detection)
|
||||
```bash
|
||||
# Check if session is already being archived (marker file exists)
|
||||
ccw session read WFS-session-name --type process --filename .archiving 2>/dev/null && echo "RESUMING" || echo "NEW"
|
||||
ccw session WFS-session-name read .process/.archiving 2>/dev/null && echo "RESUMING" || echo "NEW"
|
||||
```
|
||||
|
||||
**If RESUMING**:
|
||||
@@ -47,7 +47,7 @@ ccw session read WFS-session-name --type process --filename .archiving 2>/dev/nu
|
||||
#### Step 1.3: Create Archiving Marker
|
||||
```bash
|
||||
# Mark session as "archiving in progress"
|
||||
ccw session write WFS-session-name --type process --filename .archiving --content ''
|
||||
ccw session WFS-session-name write .process/.archiving ''
|
||||
```
|
||||
**Purpose**:
|
||||
- Prevents concurrent operations on this session
|
||||
@@ -171,8 +171,8 @@ ccw session archive WFS-session-name
|
||||
|
||||
#### Step 3.2: Update Manifest
|
||||
```bash
|
||||
# Read current manifest using ccw (or create empty array if not exists)
|
||||
ccw session read manifest --type manifest --raw 2>/dev/null || echo "[]"
|
||||
# Check if manifest exists
|
||||
test -f .workflow/archives/manifest.json && echo "EXISTS" || echo "NOT_FOUND"
|
||||
```
|
||||
|
||||
**JSON Update Logic**:
|
||||
@@ -221,8 +221,8 @@ rm .workflow/archives/WFS-session-name/.process/.archiving 2>/dev/null || true
|
||||
|
||||
#### Step 4.1: Check Project State Exists
|
||||
```bash
|
||||
# Check project state using ccw
|
||||
ccw session read project --type project 2>/dev/null && echo "EXISTS" || echo "SKIP"
|
||||
# Check if project.json exists
|
||||
test -f .workflow/project.json && echo "EXISTS" || echo "SKIP"
|
||||
```
|
||||
|
||||
**If SKIP**: Output warning and skip Phase 4
|
||||
@@ -249,11 +249,6 @@ const featureId = title.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 5
|
||||
|
||||
#### Step 4.3: Update project.json
|
||||
|
||||
```bash
|
||||
# Read current project state using ccw
|
||||
ccw session read project --type project --raw
|
||||
```
|
||||
|
||||
**JSON Update Logic**:
|
||||
```javascript
|
||||
// Read existing project.json (created by /workflow:init)
|
||||
|
||||
@@ -30,7 +30,7 @@ ccw session stats WFS-session
|
||||
|
||||
### Step 3: Read Session Metadata
|
||||
```bash
|
||||
ccw session read WFS-session --type session
|
||||
ccw session WFS-session read workflow-session.json
|
||||
# Returns: session_id, status, project, created_at, etc.
|
||||
```
|
||||
|
||||
@@ -39,8 +39,8 @@ ccw session read WFS-session --type session
|
||||
### Basic Operations
|
||||
- **List all sessions**: `ccw session list`
|
||||
- **List active only**: `ccw session list --location active`
|
||||
- **Read session data**: `ccw session read WFS-xxx --type session`
|
||||
- **Get task stats**: `ccw session stats WFS-xxx`
|
||||
- **Read session data**: `ccw session WFS-xxx read workflow-session.json`
|
||||
- **Get task stats**: `ccw session WFS-xxx stats`
|
||||
|
||||
## Simple Output Format
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ ccw session list --location active
|
||||
|
||||
### Step 2: Check Session Status
|
||||
```bash
|
||||
ccw session read WFS-session --type session
|
||||
ccw session WFS-session read workflow-session.json
|
||||
# Check .status field in response
|
||||
```
|
||||
|
||||
@@ -35,17 +35,15 @@ ccw session list --location active
|
||||
|
||||
### Step 4: Update Session Status to Active
|
||||
```bash
|
||||
ccw session status WFS-session active
|
||||
# Or with full update:
|
||||
ccw session update WFS-session --type session --content '{"status":"active","resumed_at":"2025-12-10T08:00:00Z"}'
|
||||
ccw session WFS-session status active
|
||||
```
|
||||
|
||||
## Simple Commands
|
||||
|
||||
### Basic Operations
|
||||
- **List sessions**: `ccw session list --location active`
|
||||
- **Check status**: `ccw session read WFS-xxx --type session`
|
||||
- **Update status**: `ccw session status WFS-xxx active`
|
||||
- **Check status**: `ccw session WFS-xxx read workflow-session.json`
|
||||
- **Update status**: `ccw session WFS-xxx status active`
|
||||
|
||||
### Resume Result
|
||||
```
|
||||
|
||||
@@ -30,10 +30,17 @@ The `--type` parameter classifies sessions for CCW dashboard organization:
|
||||
| `tdd` | TDD-based development | `/workflow:tdd-plan` |
|
||||
| `test` | Test generation/fix sessions | `/workflow:test-fix-gen` |
|
||||
| `docs` | Documentation sessions | `/memory:docs` |
|
||||
| `lite-plan` | Lightweight planning workflow | `/workflow:lite-plan` |
|
||||
| `lite-fix` | Lightweight bug fix workflow | `/workflow:lite-fix` |
|
||||
|
||||
**Special Behavior for `lite-plan` and `lite-fix`**:
|
||||
- These types automatically infer the storage location (`.workflow/.lite-plan/` or `.workflow/.lite-fix/`)
|
||||
- No need to specify `--location` parameter when using these types
|
||||
- Alternative: Use `--location lite-plan` or `--location lite-fix` directly
|
||||
|
||||
**Validation**: If `--type` is provided with invalid value, return error:
|
||||
```
|
||||
ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs
|
||||
ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs, lite-plan, lite-fix
|
||||
```
|
||||
|
||||
## Step 0: Initialize Project State (First-time Only)
|
||||
@@ -75,7 +82,7 @@ ccw session list --location active
|
||||
|
||||
### Step 2: Display Session Metadata
|
||||
```bash
|
||||
ccw session read WFS-promptmaster-platform --type session
|
||||
ccw session WFS-promptmaster-platform read workflow-session.json
|
||||
```
|
||||
|
||||
### Step 4: User Decision
|
||||
@@ -102,7 +109,7 @@ ccw session list --location active
|
||||
# Pattern: WFS-{lowercase-slug-from-description}
|
||||
|
||||
# Create session with ccw (creates directories + metadata atomically)
|
||||
ccw session init WFS-implement-oauth2-auth --type workflow --content '{"project":"implement OAuth2 auth","status":"planning"}'
|
||||
ccw session init WFS-implement-oauth2-auth --type workflow
|
||||
```
|
||||
|
||||
**Output**: `SESSION_ID: WFS-implement-oauth2-auth`
|
||||
@@ -113,7 +120,7 @@ ccw session init WFS-implement-oauth2-auth --type workflow --content '{"project"
|
||||
ccw session list --location active
|
||||
|
||||
# Read session metadata for relevance check
|
||||
ccw session read WFS-promptmaster-platform --type session
|
||||
ccw session WFS-promptmaster-platform read workflow-session.json
|
||||
|
||||
# If task contains project keywords → Reuse session
|
||||
# If task unrelated → Create new session (use Step 2a)
|
||||
@@ -149,10 +156,41 @@ ccw session list --location active
|
||||
|
||||
### Step 2: Create Session Structure
|
||||
```bash
|
||||
# Single command creates directories (.process, .task, .summaries) + metadata
|
||||
ccw session init WFS-fix-login-bug --type workflow --content '{"project":"fix login bug","status":"planning"}'
|
||||
# Basic init - creates directories + default metadata
|
||||
ccw session init WFS-fix-login-bug --type workflow
|
||||
|
||||
# Advanced init - with custom metadata
|
||||
ccw session init WFS-oauth-implementation --type workflow --content '{"description":"OAuth2 authentication system","priority":"high","complexity":"medium"}'
|
||||
```
|
||||
|
||||
**Default Metadata** (auto-generated):
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-fix-login-bug",
|
||||
"type": "workflow",
|
||||
"status": "planning",
|
||||
"created_at": "2025-12-17T..."
|
||||
}
|
||||
```
|
||||
|
||||
**Custom Metadata** (merged with defaults):
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-oauth-implementation",
|
||||
"type": "workflow",
|
||||
"status": "planning",
|
||||
"created_at": "2025-12-17T...",
|
||||
"description": "OAuth2 authentication system",
|
||||
"priority": "high",
|
||||
"complexity": "medium"
|
||||
}
|
||||
```
|
||||
|
||||
**Field Usage**:
|
||||
- `description`: Displayed in dashboard (replaces session_id as title)
|
||||
- `status`: Can override default "planning" (e.g., "active", "implementing")
|
||||
- Custom fields: Any additional fields are saved and accessible programmatically
|
||||
|
||||
**Output**: `SESSION_ID: WFS-fix-login-bug`
|
||||
|
||||
## Execution Guideline
|
||||
|
||||
@@ -77,18 +77,32 @@ find .workflow/active/ -name "WFS-*" -type d | head -1 | sed 's/.*\///'
|
||||
|
||||
```bash
|
||||
# Load all task JSONs
|
||||
ccw session read {sessionId} --type task
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file"
|
||||
done
|
||||
|
||||
# Extract task IDs
|
||||
ccw session read {sessionId} --type task --raw | jq -r '.id'
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.id'
|
||||
done
|
||||
|
||||
# Check dependencies - read tasks and filter for IMPL/REFACTOR
|
||||
ccw session read {sessionId} --type task --task-id "IMPL-*" --raw | jq -r '.context.depends_on[]?'
|
||||
ccw session read {sessionId} --type task --task-id "REFACTOR-*" --raw | jq -r '.context.depends_on[]?'
|
||||
for task_file in .workflow/active/{sessionId}/.task/IMPL-*.json; do
|
||||
cat "$task_file" | jq -r '.context.depends_on[]?'
|
||||
done
|
||||
|
||||
for task_file in .workflow/active/{sessionId}/.task/REFACTOR-*.json; do
|
||||
cat "$task_file" | jq -r '.context.depends_on[]?'
|
||||
done
|
||||
|
||||
# Check meta fields
|
||||
ccw session read {sessionId} --type task --raw | jq -r '.meta.tdd_phase'
|
||||
ccw session read {sessionId} --type task --raw | jq -r '.meta.agent'
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.meta.tdd_phase'
|
||||
done
|
||||
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.meta.agent'
|
||||
done
|
||||
```
|
||||
|
||||
**Validation**:
|
||||
|
||||
@@ -307,10 +307,10 @@ Each task JSON MUST include:
|
||||
4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation
|
||||
|
||||
**Execution Command Patterns**:
|
||||
- new: `ccw cli exec "[prompt]" --tool [tool] --id [cli_execution_id]`
|
||||
- resume: `ccw cli exec "[prompt]" --resume [resume_from] --tool [tool]`
|
||||
- fork: `ccw cli exec "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool]`
|
||||
- merge_fork: `ccw cli exec "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool]`
|
||||
- new: `ccw cli exec "[prompt]" --tool [tool] --mode write --id [cli_execution_id]`
|
||||
- resume: `ccw cli exec "[prompt]" --resume [resume_from] --tool [tool] --mode write`
|
||||
- fork: `ccw cli exec "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write`
|
||||
- merge_fork: `ccw cli exec "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write`
|
||||
|
||||
## QUALITY STANDARDS
|
||||
Hard Constraints:
|
||||
|
||||
@@ -203,7 +203,13 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
||||
"id": "IMPL-N",
|
||||
"title": "Descriptive task name",
|
||||
"status": "pending|active|completed|blocked",
|
||||
"context_package_path": ".workflow/active/WFS-{session}/.process/context-package.json"
|
||||
"context_package_path": ".workflow/active/WFS-{session}/.process/context-package.json",
|
||||
"cli_execution_id": "WFS-{session}-IMPL-N",
|
||||
"cli_execution": {
|
||||
"strategy": "new|resume|fork|merge_fork",
|
||||
"resume_from": "parent-cli-id",
|
||||
"merge_from": ["id1", "id2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -216,6 +222,50 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
||||
- `title`: Descriptive task name summarizing the work
|
||||
- `status`: Task state - `pending` (not started), `active` (in progress), `completed` (done), `blocked` (waiting on dependencies)
|
||||
- `context_package_path`: Path to smart context package containing project structure, dependencies, and brainstorming artifacts catalog
|
||||
- `cli_execution_id`: Unique CLI conversation ID (format: `{session_id}-{task_id}`)
|
||||
- `cli_execution`: CLI execution strategy based on task dependencies
|
||||
- `strategy`: Execution pattern (`new`, `resume`, `fork`, `merge_fork`)
|
||||
- `resume_from`: Parent task's cli_execution_id (for resume/fork)
|
||||
- `merge_from`: Array of parent cli_execution_ids (for merge_fork)
|
||||
|
||||
**CLI Execution Strategy Rules** (MANDATORY - apply to all tasks):
|
||||
|
||||
| Dependency Pattern | Strategy | CLI Command Pattern |
|
||||
|--------------------|----------|---------------------|
|
||||
| No `depends_on` | `new` | `--id {cli_execution_id}` |
|
||||
| 1 parent, parent has 1 child | `resume` | `--resume {resume_from}` |
|
||||
| 1 parent, parent has N children | `fork` | `--resume {resume_from} --id {cli_execution_id}` |
|
||||
| N parents | `merge_fork` | `--resume {merge_from.join(',')} --id {cli_execution_id}` |
|
||||
|
||||
**Strategy Selection Algorithm**:
|
||||
```javascript
|
||||
function computeCliStrategy(task, allTasks) {
|
||||
const deps = task.context?.depends_on || []
|
||||
const childCount = allTasks.filter(t =>
|
||||
t.context?.depends_on?.includes(task.id)
|
||||
).length
|
||||
|
||||
if (deps.length === 0) {
|
||||
return { strategy: "new" }
|
||||
} else if (deps.length === 1) {
|
||||
const parentTask = allTasks.find(t => t.id === deps[0])
|
||||
const parentChildCount = allTasks.filter(t =>
|
||||
t.context?.depends_on?.includes(deps[0])
|
||||
).length
|
||||
|
||||
if (parentChildCount === 1) {
|
||||
return { strategy: "resume", resume_from: parentTask.cli_execution_id }
|
||||
} else {
|
||||
return { strategy: "fork", resume_from: parentTask.cli_execution_id }
|
||||
}
|
||||
} else {
|
||||
const mergeFrom = deps.map(depId =>
|
||||
allTasks.find(t => t.id === depId).cli_execution_id
|
||||
)
|
||||
return { strategy: "merge_fork", merge_from: mergeFrom }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Meta Object
|
||||
|
||||
@@ -225,7 +275,13 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
||||
"type": "feature|bugfix|refactor|test-gen|test-fix|docs",
|
||||
"agent": "@code-developer|@action-planning-agent|@test-fix-agent|@universal-executor",
|
||||
"execution_group": "parallel-abc123|null",
|
||||
"module": "frontend|backend|shared|null"
|
||||
"module": "frontend|backend|shared|null",
|
||||
"execution_config": {
|
||||
"method": "agent|hybrid|cli",
|
||||
"cli_tool": "codex|gemini|qwen|auto",
|
||||
"enable_resume": true,
|
||||
"previous_cli_id": "string|null"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -235,6 +291,11 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
||||
- `agent`: Assigned agent for execution
|
||||
- `execution_group`: Parallelization group ID (tasks with same ID can run concurrently) or `null` for sequential tasks
|
||||
- `module`: Module identifier for multi-module projects (e.g., `frontend`, `backend`, `shared`) or `null` for single-module
|
||||
- `execution_config`: CLI execution settings (from userConfig in task-generate-agent)
|
||||
- `method`: Execution method - `agent` (direct), `hybrid` (agent + CLI), `cli` (CLI only)
|
||||
- `cli_tool`: Preferred CLI tool - `codex`, `gemini`, `qwen`, or `auto`
|
||||
- `enable_resume`: Whether to use `--resume` for CLI continuity (default: true)
|
||||
- `previous_cli_id`: Previous task's CLI execution ID for resume (populated at runtime)
|
||||
|
||||
**Test Task Extensions** (for type="test-gen" or type="test-fix"):
|
||||
|
||||
@@ -409,14 +470,14 @@ Generate individual `.task/IMPL-*.json` files with the following structure:
|
||||
// Pattern: Gemini CLI deep analysis
|
||||
{
|
||||
"step": "gemini_analyze_[aspect]",
|
||||
"command": "ccw cli exec 'PURPOSE: [goal]\\nTASK: [tasks]\\nMODE: analysis\\nCONTEXT: @[paths]\\nEXPECTED: [output]\\nRULES: $(cat [template]) | [constraints] | analysis=READ-ONLY' --tool gemini --cd [path]",
|
||||
"command": "ccw cli exec 'PURPOSE: [goal]\\nTASK: [tasks]\\nMODE: analysis\\nCONTEXT: @[paths]\\nEXPECTED: [output]\\nRULES: $(cat [template]) | [constraints] | analysis=READ-ONLY' --tool gemini --mode analysis --cd [path]",
|
||||
"output_to": "analysis_result"
|
||||
},
|
||||
|
||||
// Pattern: Qwen CLI analysis (fallback/alternative)
|
||||
{
|
||||
"step": "qwen_analyze_[aspect]",
|
||||
"command": "ccw cli exec '[similar to gemini pattern]' --tool qwen --cd [path]",
|
||||
"command": "ccw cli exec '[similar to gemini pattern]' --tool qwen --mode analysis --cd [path]",
|
||||
"output_to": "analysis_result"
|
||||
},
|
||||
|
||||
@@ -457,7 +518,7 @@ The examples above demonstrate **patterns**, not fixed requirements. Agent MUST:
|
||||
4. **Command Composition Patterns**:
|
||||
- **Single command**: `bash([simple_search])`
|
||||
- **Multiple commands**: `["bash([cmd1])", "bash([cmd2])"]`
|
||||
- **CLI analysis**: `ccw cli exec '[prompt]' --tool gemini --cd [path]`
|
||||
- **CLI analysis**: `ccw cli exec '[prompt]' --tool gemini --mode analysis --cd [path]`
|
||||
- **MCP integration**: `mcp__[tool]__[function]([params])`
|
||||
|
||||
**Key Principle**: Examples show **structure patterns**, not specific implementations. Agent must create task-appropriate steps dynamically.
|
||||
@@ -479,11 +540,12 @@ The `implementation_approach` supports **two execution modes** based on the pres
|
||||
- Specified command executes the step directly
|
||||
- Leverages specialized CLI tools (codex/gemini/qwen) for complex reasoning
|
||||
- **Use for**: Large-scale features, complex refactoring, or when user explicitly requests CLI tool usage
|
||||
- **Required fields**: Same as default mode **PLUS** `command`
|
||||
- **Command patterns**:
|
||||
- `ccw cli exec '[prompt]' --tool codex --mode auto --cd [path]`
|
||||
- `ccw cli exec '[task]' --tool codex --mode auto` (multi-step with context)
|
||||
- **Required fields**: Same as default mode **PLUS** `command`, `resume_from` (optional)
|
||||
- **Command patterns** (with resume support):
|
||||
- `ccw cli exec '[prompt]' --tool codex --mode write --cd [path]`
|
||||
- `ccw cli exec '[prompt]' --resume ${previousCliId} --tool codex --mode write` (resume from previous)
|
||||
- `ccw cli exec '[prompt]' --tool gemini --mode write --cd [path]` (write mode)
|
||||
- **Resume mechanism**: When step depends on previous CLI execution, include `--resume` with previous execution ID
|
||||
|
||||
**Semantic CLI Tool Selection**:
|
||||
|
||||
@@ -559,11 +621,26 @@ Agent determines CLI tool usage per-step based on user semantics and task nature
|
||||
"step": 3,
|
||||
"title": "Execute implementation using CLI tool",
|
||||
"description": "Use Codex/Gemini for complex autonomous execution",
|
||||
"command": "ccw cli exec '[prompt]' --tool codex --mode auto --cd [path]",
|
||||
"command": "ccw cli exec '[prompt]' --tool codex --mode write --cd [path]",
|
||||
"modification_points": ["[Same as default mode]"],
|
||||
"logic_flow": ["[Same as default mode]"],
|
||||
"depends_on": [1, 2],
|
||||
"output": "cli_implementation"
|
||||
"output": "cli_implementation",
|
||||
"cli_output_id": "step3_cli_id" // Store execution ID for resume
|
||||
},
|
||||
|
||||
// === CLI MODE with Resume: Continue from previous CLI execution ===
|
||||
{
|
||||
"step": 4,
|
||||
"title": "Continue implementation with context",
|
||||
"description": "Resume from previous step with accumulated context",
|
||||
"command": "ccw cli exec '[continuation prompt]' --resume ${step3_cli_id} --tool codex --mode write",
|
||||
"resume_from": "step3_cli_id", // Reference previous step's CLI ID
|
||||
"modification_points": ["[Continue from step 3]"],
|
||||
"logic_flow": ["[Build on previous output]"],
|
||||
"depends_on": [3],
|
||||
"output": "continued_implementation",
|
||||
"cli_output_id": "step4_cli_id"
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -759,6 +836,8 @@ Use `analysis_results.complexity` or task count to determine structure:
|
||||
- Use provided context package: Extract all information from structured context
|
||||
- Respect memory-first rule: Use provided content (already loaded from memory/file)
|
||||
- Follow 6-field schema: All task JSONs must have id, title, status, context_package_path, meta, context, flow_control
|
||||
- **Assign CLI execution IDs**: Every task MUST have `cli_execution_id` (format: `{session_id}-{task_id}`)
|
||||
- **Compute CLI execution strategy**: Based on `depends_on`, set `cli_execution.strategy` (new/resume/fork/merge_fork)
|
||||
- Map artifacts: Use artifacts_inventory to populate task.context.artifacts array
|
||||
- Add MCP integration: Include MCP tool steps in flow_control.pre_analysis when capabilities available
|
||||
- Validate task count: Maximum 12 tasks hard limit, request re-scope if exceeded
|
||||
|
||||
@@ -134,7 +134,7 @@ RULES: $(cat {selected_template}) | {constraints}
|
||||
```
|
||||
analyze|plan → gemini (qwen fallback) + mode=analysis
|
||||
execute (simple|medium) → gemini (qwen fallback) + mode=write
|
||||
execute (complex) → codex + mode=auto
|
||||
execute (complex) → codex + mode=write
|
||||
discuss → multi (gemini + codex parallel)
|
||||
```
|
||||
|
||||
@@ -155,7 +155,7 @@ MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: {output}
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/pattern.txt)
|
||||
" --tool gemini --cd {dir}
|
||||
" --tool gemini --mode analysis --cd {dir}
|
||||
|
||||
# Qwen fallback: Replace '--tool gemini' with '--tool qwen'
|
||||
```
|
||||
@@ -165,14 +165,14 @@ RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/pattern.txt)
|
||||
ccw cli exec "..." --tool gemini --mode write --cd {dir}
|
||||
```
|
||||
|
||||
**Codex (Auto)**:
|
||||
**Codex (Write)**:
|
||||
```bash
|
||||
ccw cli exec "..." --tool codex --mode auto --cd {dir}
|
||||
ccw cli exec "..." --tool codex --mode write --cd {dir}
|
||||
```
|
||||
|
||||
**Cross-Directory** (Gemini/Qwen):
|
||||
```bash
|
||||
ccw cli exec "CONTEXT: @**/* @../shared/**/*" --tool gemini --cd src/auth --includeDirs ../shared
|
||||
ccw cli exec "CONTEXT: @**/* @../shared/**/*" --tool gemini --mode analysis --cd src/auth --includeDirs ../shared
|
||||
```
|
||||
|
||||
**Directory Scope**:
|
||||
|
||||
@@ -85,7 +85,7 @@ MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: {from prompt}
|
||||
RULES: {from prompt, if template specified} | analysis=READ-ONLY
|
||||
" --tool gemini --cd {dir}
|
||||
" --tool gemini --mode analysis --cd {dir}
|
||||
```
|
||||
|
||||
**Fallback Chain**: Gemini → Qwen → Codex → Bash-only
|
||||
|
||||
@@ -1,140 +1,117 @@
|
||||
---
|
||||
name: cli-lite-planning-agent
|
||||
description: |
|
||||
Specialized agent for executing CLI planning tools (Gemini/Qwen) to generate detailed implementation plans. Used by lite-plan workflow for Medium/High complexity tasks.
|
||||
Generic planning agent for lite-plan and lite-fix workflows. Generates structured plan JSON based on provided schema reference.
|
||||
|
||||
Core capabilities:
|
||||
- Task decomposition (1-10 tasks with IDs: T1, T2...)
|
||||
- Dependency analysis (depends_on references)
|
||||
- Flow control (parallel/sequential phases)
|
||||
- Multi-angle exploration context integration
|
||||
- Schema-driven output (plan-json-schema or fix-plan-json-schema)
|
||||
- Task decomposition with dependency analysis
|
||||
- CLI execution ID assignment for fork/merge strategies
|
||||
- Multi-angle context integration (explorations or diagnoses)
|
||||
color: cyan
|
||||
---
|
||||
|
||||
You are a specialized execution agent that bridges CLI planning tools (Gemini/Qwen) with lite-plan workflow. You execute CLI commands for task breakdown, parse structured results, and generate planObject for downstream execution.
|
||||
You are a generic planning agent that generates structured plan JSON for lite workflows. Output format is determined by the schema reference provided in the prompt. You execute CLI planning tools (Gemini/Qwen), parse results, and generate planObject conforming to the specified schema.
|
||||
|
||||
## Output Schema
|
||||
|
||||
**Reference**: `~/.claude/workflows/cli-templates/schemas/plan-json-schema.json`
|
||||
|
||||
**planObject Structure**:
|
||||
```javascript
|
||||
{
|
||||
summary: string, // 2-3 sentence overview
|
||||
approach: string, // High-level strategy
|
||||
tasks: [TaskObject], // 1-10 structured tasks
|
||||
flow_control: { // Execution phases
|
||||
execution_order: [{ phase, tasks, type }],
|
||||
exit_conditions: { success, failure }
|
||||
},
|
||||
focus_paths: string[], // Affected files (aggregated)
|
||||
estimated_time: string,
|
||||
recommended_execution: "Agent" | "Codex",
|
||||
complexity: "Low" | "Medium" | "High",
|
||||
_metadata: { timestamp, source, planning_mode, exploration_angles, duration_seconds }
|
||||
}
|
||||
```
|
||||
|
||||
**TaskObject Structure**:
|
||||
```javascript
|
||||
{
|
||||
id: string, // T1, T2, T3...
|
||||
title: string, // Action verb + target
|
||||
file: string, // Target file path
|
||||
action: string, // Create|Update|Implement|Refactor|Add|Delete|Configure|Test|Fix
|
||||
description: string, // What to implement (1-2 sentences)
|
||||
modification_points: [{ // Precise changes (optional)
|
||||
file: string,
|
||||
target: string, // function:lineRange
|
||||
change: string
|
||||
}],
|
||||
implementation: string[], // 2-7 actionable steps
|
||||
reference: { // Pattern guidance (optional)
|
||||
pattern: string,
|
||||
files: string[],
|
||||
examples: string
|
||||
},
|
||||
acceptance: string[], // 1-4 quantified criteria
|
||||
depends_on: string[] // Task IDs: ["T1", "T2"]
|
||||
}
|
||||
```
|
||||
|
||||
## Input Context
|
||||
|
||||
```javascript
|
||||
{
|
||||
task_description: string,
|
||||
explorationsContext: { [angle]: ExplorationResult } | null,
|
||||
explorationAngles: string[],
|
||||
// Required
|
||||
task_description: string, // Task or bug description
|
||||
schema_path: string, // Schema reference path (plan-json-schema or fix-plan-json-schema)
|
||||
session: { id, folder, artifacts },
|
||||
|
||||
// Context (one of these based on workflow)
|
||||
explorationsContext: { [angle]: ExplorationResult } | null, // From lite-plan
|
||||
diagnosesContext: { [angle]: DiagnosisResult } | null, // From lite-fix
|
||||
contextAngles: string[], // Exploration or diagnosis angles
|
||||
|
||||
// Optional
|
||||
clarificationContext: { [question]: answer } | null,
|
||||
complexity: "Low" | "Medium" | "High",
|
||||
cli_config: { tool, template, timeout, fallback },
|
||||
session: { id, folder, artifacts }
|
||||
complexity: "Low" | "Medium" | "High", // For lite-plan
|
||||
severity: "Low" | "Medium" | "High" | "Critical", // For lite-fix
|
||||
cli_config: { tool, template, timeout, fallback }
|
||||
}
|
||||
```
|
||||
|
||||
## Schema-Driven Output
|
||||
|
||||
**CRITICAL**: Read the schema reference first to determine output structure:
|
||||
- `plan-json-schema.json` → Implementation plan with `approach`, `complexity`
|
||||
- `fix-plan-json-schema.json` → Fix plan with `root_cause`, `severity`, `risk_level`
|
||||
|
||||
```javascript
|
||||
// Step 1: Always read schema first
|
||||
const schema = Bash(`cat ${schema_path}`)
|
||||
|
||||
// Step 2: Generate plan conforming to schema
|
||||
const planObject = generatePlanFromSchema(schema, context)
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: CLI Execution
|
||||
├─ Aggregate multi-angle exploration findings
|
||||
Phase 1: Schema & Context Loading
|
||||
├─ Read schema reference (plan-json-schema or fix-plan-json-schema)
|
||||
├─ Aggregate multi-angle context (explorations or diagnoses)
|
||||
└─ Determine output structure from schema
|
||||
|
||||
Phase 2: CLI Execution
|
||||
├─ Construct CLI command with planning template
|
||||
├─ Execute Gemini (fallback: Qwen → degraded mode)
|
||||
└─ Timeout: 60 minutes
|
||||
|
||||
Phase 2: Parsing & Enhancement
|
||||
├─ Parse CLI output sections (Summary, Approach, Tasks, Flow Control)
|
||||
Phase 3: Parsing & Enhancement
|
||||
├─ Parse CLI output sections
|
||||
├─ Validate and enhance task objects
|
||||
└─ Infer missing fields from exploration context
|
||||
└─ Infer missing fields from context
|
||||
|
||||
Phase 3: planObject Generation
|
||||
├─ Build planObject from parsed results
|
||||
├─ Generate flow_control from depends_on if not provided
|
||||
├─ Aggregate focus_paths from all tasks
|
||||
└─ Return to orchestrator (lite-plan)
|
||||
Phase 4: planObject Generation
|
||||
├─ Build planObject conforming to schema
|
||||
├─ Assign CLI execution IDs and strategies
|
||||
├─ Generate flow_control from depends_on
|
||||
└─ Return to orchestrator
|
||||
```
|
||||
|
||||
## CLI Command Template
|
||||
|
||||
```bash
|
||||
ccw cli exec "
|
||||
PURPOSE: Generate implementation plan for {complexity} task
|
||||
PURPOSE: Generate plan for {task_description}
|
||||
TASK:
|
||||
• Analyze: {task_description}
|
||||
• Break down into 1-10 tasks with: id, title, file, action, description, modification_points, implementation, reference, acceptance, depends_on
|
||||
• Identify parallel vs sequential execution phases
|
||||
• Analyze task/bug description and context
|
||||
• Break down into tasks following schema structure
|
||||
• Identify dependencies and execution phases
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* | Memory: {exploration_summary}
|
||||
CONTEXT: @**/* | Memory: {context_summary}
|
||||
EXPECTED:
|
||||
## Implementation Summary
|
||||
## Summary
|
||||
[overview]
|
||||
|
||||
## High-Level Approach
|
||||
[strategy]
|
||||
|
||||
## Task Breakdown
|
||||
### T1: [Title]
|
||||
**File**: [path]
|
||||
### T1: [Title] (or FIX1 for fix-plan)
|
||||
**Scope**: [module/feature path]
|
||||
**Action**: [type]
|
||||
**Description**: [what]
|
||||
**Modification Points**: - [file]: [target] - [change]
|
||||
**Implementation**: 1. [step]
|
||||
**Reference**: - Pattern: [name] - Files: [paths] - Examples: [guidance]
|
||||
**Acceptance**: - [quantified criterion]
|
||||
**Acceptance/Verification**: - [quantified criterion]
|
||||
**Depends On**: []
|
||||
|
||||
## Flow Control
|
||||
**Execution Order**: - Phase parallel-1: [T1, T2] (independent)
|
||||
**Exit Conditions**: - Success: [condition] - Failure: [condition]
|
||||
|
||||
## Time Estimate
|
||||
**Total**: [time]
|
||||
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/planning/02-breakdown-task-steps.txt) |
|
||||
- Acceptance must be quantified (counts, method names, metrics)
|
||||
- Dependencies use task IDs (T1, T2)
|
||||
- Follow schema structure from {schema_path}
|
||||
- Acceptance/verification must be quantified
|
||||
- Dependencies use task IDs
|
||||
- analysis=READ-ONLY
|
||||
" --tool {cli_tool} --cd {project_root}
|
||||
" --tool {cli_tool} --mode analysis --cd {project_root}
|
||||
```
|
||||
|
||||
## Core Functions
|
||||
@@ -279,6 +256,51 @@ function inferFile(task, ctx) {
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Execution ID Assignment (MANDATORY)
|
||||
|
||||
```javascript
|
||||
function assignCliExecutionIds(tasks, sessionId) {
|
||||
const taskMap = new Map(tasks.map(t => [t.id, t]))
|
||||
const childCount = new Map()
|
||||
|
||||
// Count children for each task
|
||||
tasks.forEach(task => {
|
||||
(task.depends_on || []).forEach(depId => {
|
||||
childCount.set(depId, (childCount.get(depId) || 0) + 1)
|
||||
})
|
||||
})
|
||||
|
||||
tasks.forEach(task => {
|
||||
task.cli_execution_id = `${sessionId}-${task.id}`
|
||||
const deps = task.depends_on || []
|
||||
|
||||
if (deps.length === 0) {
|
||||
task.cli_execution = { strategy: "new" }
|
||||
} else if (deps.length === 1) {
|
||||
const parent = taskMap.get(deps[0])
|
||||
const parentChildCount = childCount.get(deps[0]) || 0
|
||||
task.cli_execution = parentChildCount === 1
|
||||
? { strategy: "resume", resume_from: parent.cli_execution_id }
|
||||
: { strategy: "fork", resume_from: parent.cli_execution_id }
|
||||
} else {
|
||||
task.cli_execution = {
|
||||
strategy: "merge_fork",
|
||||
merge_from: deps.map(depId => taskMap.get(depId).cli_execution_id)
|
||||
}
|
||||
}
|
||||
})
|
||||
return tasks
|
||||
}
|
||||
```
|
||||
|
||||
**Strategy Rules**:
|
||||
| depends_on | Parent Children | Strategy | CLI Command |
|
||||
|------------|-----------------|----------|-------------|
|
||||
| [] | - | `new` | `--id {cli_execution_id}` |
|
||||
| [T1] | 1 | `resume` | `--resume {resume_from}` |
|
||||
| [T1] | >1 | `fork` | `--resume {resume_from} --id {cli_execution_id}` |
|
||||
| [T1,T2] | - | `merge_fork` | `--resume {ids.join(',')} --id {cli_execution_id}` |
|
||||
|
||||
### Flow Control Inference
|
||||
|
||||
```javascript
|
||||
@@ -303,21 +325,44 @@ function inferFlowControl(tasks) {
|
||||
### planObject Generation
|
||||
|
||||
```javascript
|
||||
function generatePlanObject(parsed, enrichedContext, input) {
|
||||
function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
||||
const tasks = validateAndEnhanceTasks(parsed.raw_tasks, enrichedContext)
|
||||
assignCliExecutionIds(tasks, input.session.id) // MANDATORY: Assign CLI execution IDs
|
||||
const flow_control = parsed.flow_control?.execution_order?.length > 0 ? parsed.flow_control : inferFlowControl(tasks)
|
||||
const focus_paths = [...new Set(tasks.flatMap(t => [t.file, ...t.modification_points.map(m => m.file)]).filter(Boolean))]
|
||||
const focus_paths = [...new Set(tasks.flatMap(t => [t.file || t.scope, ...t.modification_points.map(m => m.file)]).filter(Boolean))]
|
||||
|
||||
return {
|
||||
summary: parsed.summary || `Implementation plan for: ${input.task_description.slice(0, 100)}`,
|
||||
approach: parsed.approach || "Step-by-step implementation",
|
||||
// Base fields (common to both schemas)
|
||||
const base = {
|
||||
summary: parsed.summary || `Plan for: ${input.task_description.slice(0, 100)}`,
|
||||
tasks,
|
||||
flow_control,
|
||||
focus_paths,
|
||||
estimated_time: parsed.time_estimate || `${tasks.length * 30} minutes`,
|
||||
recommended_execution: input.complexity === "Low" ? "Agent" : "Codex",
|
||||
complexity: input.complexity,
|
||||
_metadata: { timestamp: new Date().toISOString(), source: "cli-lite-planning-agent", planning_mode: "agent-based", exploration_angles: input.explorationAngles || [], duration_seconds: Math.round((Date.now() - startTime) / 1000) }
|
||||
recommended_execution: (input.complexity === "Low" || input.severity === "Low") ? "Agent" : "Codex",
|
||||
_metadata: {
|
||||
timestamp: new Date().toISOString(),
|
||||
source: "cli-lite-planning-agent",
|
||||
planning_mode: "agent-based",
|
||||
context_angles: input.contextAngles || [],
|
||||
duration_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
}
|
||||
|
||||
// Schema-specific fields
|
||||
if (schemaType === 'fix-plan') {
|
||||
return {
|
||||
...base,
|
||||
root_cause: parsed.root_cause || "Root cause from diagnosis",
|
||||
strategy: parsed.strategy || "comprehensive_fix",
|
||||
severity: input.severity || "Medium",
|
||||
risk_level: parsed.risk_level || "medium"
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
...base,
|
||||
approach: parsed.approach || "Step-by-step implementation",
|
||||
complexity: input.complexity || "Medium"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -383,9 +428,12 @@ function validateTask(task) {
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- Generate task IDs (T1, T2, T3...)
|
||||
- **Read schema first** to determine output structure
|
||||
- Generate task IDs (T1/T2 for plan, FIX1/FIX2 for fix-plan)
|
||||
- Include depends_on (even if empty [])
|
||||
- Quantify acceptance criteria
|
||||
- **Assign cli_execution_id** (`{sessionId}-{taskId}`)
|
||||
- **Compute cli_execution strategy** based on depends_on
|
||||
- Quantify acceptance/verification criteria
|
||||
- Generate flow_control from dependencies
|
||||
- Handle CLI errors with fallback chain
|
||||
|
||||
@@ -394,3 +442,5 @@ function validateTask(task) {
|
||||
- Use vague acceptance criteria
|
||||
- Create circular dependencies
|
||||
- Skip task validation
|
||||
- **Skip CLI execution ID assignment**
|
||||
- **Ignore schema structure**
|
||||
|
||||
@@ -134,7 +134,7 @@ RULES: $(cat ~/.claude/workflows/cli-templates/prompts/{template}) |
|
||||
- Consider previous iteration failures
|
||||
- Validate fix doesn't introduce new vulnerabilities
|
||||
- analysis=READ-ONLY
|
||||
" --tool {cli_tool} --cd {project_root} --timeout {timeout_value}
|
||||
" --tool {cli_tool} --mode analysis --cd {project_root} --timeout {timeout_value}
|
||||
```
|
||||
|
||||
**Layer-Specific Guidance Injection**:
|
||||
@@ -529,7 +529,7 @@ See: `.process/iteration-{iteration}-cli-output.txt`
|
||||
```bash
|
||||
ccw cli exec "PURPOSE: Analyze integration test failure...
|
||||
TASK: Examine component interactions, data flow, interface contracts...
|
||||
RULES: Analyze full call stack and data flow across components" --tool gemini
|
||||
RULES: Analyze full call stack and data flow across components" --tool gemini --mode analysis
|
||||
```
|
||||
3. **Parse Output**: Extract RCA, 修复建议, 验证建议 sections
|
||||
4. **Generate Task JSON** (IMPL-fix-1.json):
|
||||
|
||||
@@ -34,10 +34,10 @@ You are a code execution specialist focused on implementing high-quality, produc
|
||||
- **context-package.json** (when available in workflow tasks)
|
||||
|
||||
**Context Package** :
|
||||
`context-package.json` provides artifact paths - read using `ccw session`:
|
||||
`context-package.json` provides artifact paths - read using Read tool or ccw session:
|
||||
```bash
|
||||
# Get context package content from session
|
||||
ccw session read ${SESSION_ID} --type context
|
||||
# Get context package content from session using Read tool
|
||||
Read(.workflow/active/${SESSION_ID}/.process/context-package.json)
|
||||
# Returns parsed JSON with brainstorm_artifacts, focus_paths, etc.
|
||||
```
|
||||
|
||||
@@ -123,7 +123,7 @@ When task JSON contains `flow_control.implementation_approach` array:
|
||||
|
||||
**CLI Command Execution (CLI Execute Mode)**:
|
||||
When step contains `command` field with Codex CLI, execute via CCW CLI. For Codex resume:
|
||||
- First task (`depends_on: []`): `ccw cli exec "..." --tool codex --mode auto --cd [path]`
|
||||
- First task (`depends_on: []`): `ccw cli exec "..." --tool codex --mode write --cd [path]`
|
||||
- Subsequent tasks (has `depends_on`): Use CCW CLI with resume context to maintain session
|
||||
|
||||
**Test-Driven Development**:
|
||||
|
||||
@@ -109,7 +109,7 @@ This agent processes **simplified inline [FLOW_CONTROL]** format from brainstorm
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata
|
||||
- Command: ccw session read WFS-{session} --type session
|
||||
- Command: Read(.workflow/active/WFS-{session}/workflow-session.json)
|
||||
- Output: session_metadata
|
||||
```
|
||||
|
||||
@@ -155,7 +155,7 @@ When called, you receive:
|
||||
- **User Context**: Specific requirements, constraints, and expectations from user discussion
|
||||
- **Output Location**: Directory path for generated analysis files
|
||||
- **Role Hint** (optional): Suggested role or role selection guidance
|
||||
- **context-package.json** (CCW Workflow): Artifact paths catalog - use `ccw session read {session} --type context` to get context package
|
||||
- **context-package.json** (CCW Workflow): Artifact paths catalog - use Read tool to get context package from `.workflow/active/{session}/.process/context-package.json`
|
||||
- **ASSIGNED_ROLE** (optional): Specific role assignment
|
||||
- **ANALYSIS_DIMENSIONS** (optional): Role-specific analysis dimensions
|
||||
|
||||
|
||||
@@ -216,7 +216,7 @@ Before completion, verify:
|
||||
{
|
||||
"step": "analyze_module_structure",
|
||||
"action": "Deep analysis of module structure and API",
|
||||
"command": "ccw cli exec \"PURPOSE: Document module comprehensively\nTASK: Extract module purpose, architecture, public API, dependencies\nMODE: analysis\nCONTEXT: @**/* System: [system_context]\nEXPECTED: Complete module analysis for documentation\nRULES: $(cat ~/.claude/workflows/cli-templates/prompts/documentation/module-documentation.txt)\" --tool gemini --cd src/auth",
|
||||
"command": "ccw cli exec \"PURPOSE: Document module comprehensively\nTASK: Extract module purpose, architecture, public API, dependencies\nMODE: analysis\nCONTEXT: @**/* System: [system_context]\nEXPECTED: Complete module analysis for documentation\nRULES: $(cat ~/.claude/workflows/cli-templates/prompts/documentation/module-documentation.txt)\" --tool gemini --mode analysis --cd src/auth",
|
||||
"output_to": "module_analysis",
|
||||
"on_error": "fail"
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ You are a test context discovery specialist focused on gathering test coverage i
|
||||
- `find` - Test file discovery
|
||||
- `Grep` - Framework detection
|
||||
|
||||
**Priority**: CodexLens MCP > ripgrep > find > grep
|
||||
**Priority**: Code-Index MCP > ripgrep > find > grep
|
||||
|
||||
### 3. Framework & Convention Analysis
|
||||
**Tools**:
|
||||
|
||||
@@ -83,7 +83,7 @@ When task JSON contains implementation_approach array:
|
||||
- L1 (Unit): `*.test.*`, `*.spec.*` in `__tests__/`, `tests/unit/`
|
||||
- L2 (Integration): `tests/integration/`, `*.integration.test.*`
|
||||
- L3 (E2E): `tests/e2e/`, `*.e2e.test.*`, `cypress/`, `playwright/`
|
||||
- **context-package.json** (CCW Workflow): Use `ccw session read {session} --type context` to get context package with artifact paths
|
||||
- **context-package.json** (CCW Workflow): Use Read tool to get context package from `.workflow/active/{session}/.process/context-package.json`
|
||||
- Identify test commands from project configuration
|
||||
|
||||
```bash
|
||||
|
||||
@@ -74,7 +74,7 @@ SlashCommand(command="/workflow:session:start --type docs --new \"{project_name}
|
||||
|
||||
```bash
|
||||
# Update workflow-session.json with docs-specific fields
|
||||
ccw session update {sessionId} --type session --content '{"target_path":"{target_path}","project_root":"{project_root}","project_name":"{project_name}","mode":"full","tool":"gemini","cli_execute":false}'
|
||||
ccw session {sessionId} write workflow-session.json '{"target_path":"{target_path}","project_root":"{project_root}","project_name":"{project_name}","mode":"full","tool":"gemini","cli_execute":false}'
|
||||
```
|
||||
|
||||
### Phase 2: Analyze Structure
|
||||
@@ -136,7 +136,7 @@ bash(if [ -d .workflow/docs/\${project_name} ]; then find .workflow/docs/\${proj
|
||||
|
||||
```bash
|
||||
# Count existing docs from doc-planning-data.json
|
||||
ccw session read WFS-docs-{timestamp} --type process --filename doc-planning-data.json --raw | jq '.existing_docs.file_list | length'
|
||||
ccw session WFS-docs-{timestamp} read .process/doc-planning-data.json --raw | jq '.existing_docs.file_list | length'
|
||||
# Or read entire process file and parse
|
||||
```
|
||||
|
||||
@@ -191,10 +191,10 @@ Large Projects (single dir >10 docs):
|
||||
|
||||
```bash
|
||||
# 1. Get top-level directories from doc-planning-data.json
|
||||
ccw session read WFS-docs-{timestamp} --type process --filename doc-planning-data.json --raw | jq -r '.top_level_dirs[]'
|
||||
ccw session WFS-docs-{timestamp} read .process/doc-planning-data.json --raw | jq -r '.top_level_dirs[]'
|
||||
|
||||
# 2. Get mode from workflow-session.json
|
||||
ccw session read WFS-docs-{timestamp} --type session --raw | jq -r '.mode // "full"'
|
||||
ccw session WFS-docs-{timestamp} read workflow-session.json --raw | jq -r '.mode // "full"'
|
||||
|
||||
# 3. Check for HTTP API
|
||||
bash(grep -r "router\.|@Get\|@Post" src/ 2>/dev/null && echo "API_FOUND" || echo "NO_API")
|
||||
@@ -223,7 +223,7 @@ bash(grep -r "router\.|@Get\|@Post" src/ 2>/dev/null && echo "API_FOUND" || echo
|
||||
|
||||
**Task ID Calculation**:
|
||||
```bash
|
||||
group_count=$(ccw session read WFS-docs-{timestamp} --type process --filename doc-planning-data.json --raw | jq '.groups.count')
|
||||
group_count=$(ccw session WFS-docs-{timestamp} read .process/doc-planning-data.json --raw | jq '.groups.count')
|
||||
readme_id=$((group_count + 1)) # Next ID after groups
|
||||
arch_id=$((group_count + 2))
|
||||
api_id=$((group_count + 3))
|
||||
@@ -239,9 +239,9 @@ api_id=$((group_count + 3))
|
||||
| **CLI** | true | implementation_approach | write | --mode write | Execute CLI commands, validate output |
|
||||
|
||||
**Command Patterns**:
|
||||
- Gemini/Qwen: `ccw cli exec "..." --tool gemini --cd dir`
|
||||
- Gemini/Qwen: `ccw cli exec "..." --tool gemini --mode analysis --cd dir`
|
||||
- CLI Mode: `ccw cli exec "..." --tool gemini --mode write --cd dir`
|
||||
- Codex: `ccw cli exec "..." --tool codex --mode auto --cd dir`
|
||||
- Codex: `ccw cli exec "..." --tool codex --mode write --cd dir`
|
||||
|
||||
**Generation Process**:
|
||||
1. Read configuration values (tool, cli_execute, mode) from workflow-session.json
|
||||
@@ -286,8 +286,8 @@ api_id=$((group_count + 3))
|
||||
"step": "load_precomputed_data",
|
||||
"action": "Load Phase 2 analysis and extract group directories",
|
||||
"commands": [
|
||||
"ccw session read ${session_id} --type process --filename doc-planning-data.json",
|
||||
"ccw session read ${session_id} --type process --filename doc-planning-data.json --raw | jq '.groups.assignments[] | select(.group_id == \"${group_number}\") | .directories'"
|
||||
"ccw session ${session_id} read .process/doc-planning-data.json",
|
||||
"ccw session ${session_id} read .process/doc-planning-data.json --raw | jq '.groups.assignments[] | select(.group_id == \"${group_number}\") | .directories'"
|
||||
],
|
||||
"output_to": "phase2_context",
|
||||
"note": "Single JSON file contains all Phase 2 analysis results"
|
||||
@@ -364,7 +364,7 @@ api_id=$((group_count + 3))
|
||||
},
|
||||
{
|
||||
"step": "analyze_project",
|
||||
"command": "bash(gemini \"PURPOSE: Analyze project structure\\nTASK: Extract overview from modules\\nMODE: analysis\\nCONTEXT: [all_module_docs]\\nEXPECTED: Project outline\")",
|
||||
"command": "bash(ccw cli exec \"PURPOSE: Analyze project structure\\nTASK: Extract overview from modules\\nMODE: analysis\\nCONTEXT: [all_module_docs]\\nEXPECTED: Project outline\" --tool gemini --mode analysis)",
|
||||
"output_to": "project_outline"
|
||||
}
|
||||
],
|
||||
@@ -404,7 +404,7 @@ api_id=$((group_count + 3))
|
||||
"pre_analysis": [
|
||||
{"step": "load_existing_docs", "command": "bash(cat .workflow/docs/${project_name}/{ARCHITECTURE,EXAMPLES}.md 2>/dev/null || echo 'No existing docs')", "output_to": "existing_arch_examples"},
|
||||
{"step": "load_all_docs", "command": "bash(cat .workflow/docs/${project_name}/README.md && find .workflow/docs/${project_name} -type f -name '*.md' ! -path '*/README.md' ! -path '*/ARCHITECTURE.md' ! -path '*/EXAMPLES.md' ! -path '*/api/*' | xargs cat)", "output_to": "all_docs"},
|
||||
{"step": "analyze_architecture", "command": "bash(gemini \"PURPOSE: Analyze system architecture\\nTASK: Synthesize architectural overview and examples\\nMODE: analysis\\nCONTEXT: [all_docs]\\nEXPECTED: Architecture + Examples outline\")", "output_to": "arch_examples_outline"}
|
||||
{"step": "analyze_architecture", "command": "bash(ccw cli exec \"PURPOSE: Analyze system architecture\\nTASK: Synthesize architectural overview and examples\\nMODE: analysis\\nCONTEXT: [all_docs]\\nEXPECTED: Architecture + Examples outline\" --tool gemini --mode analysis)", "output_to": "arch_examples_outline"}
|
||||
],
|
||||
"implementation_approach": [
|
||||
{
|
||||
@@ -441,7 +441,7 @@ api_id=$((group_count + 3))
|
||||
"pre_analysis": [
|
||||
{"step": "discover_api", "command": "bash(rg 'router\\.| @(Get|Post)' -g '*.{ts,js}')", "output_to": "endpoint_discovery"},
|
||||
{"step": "load_existing_api", "command": "bash(cat .workflow/docs/${project_name}/api/README.md 2>/dev/null || echo 'No existing API docs')", "output_to": "existing_api_docs"},
|
||||
{"step": "analyze_api", "command": "bash(gemini \"PURPOSE: Document HTTP API\\nTASK: Analyze endpoints\\nMODE: analysis\\nCONTEXT: @src/api/**/* [endpoint_discovery]\\nEXPECTED: API outline\")", "output_to": "api_outline"}
|
||||
{"step": "analyze_api", "command": "bash(ccw cli exec \"PURPOSE: Document HTTP API\\nTASK: Analyze endpoints\\nMODE: analysis\\nCONTEXT: @src/api/**/* [endpoint_discovery]\\nEXPECTED: API outline\" --tool gemini --mode analysis)", "output_to": "api_outline"}
|
||||
],
|
||||
"implementation_approach": [
|
||||
{
|
||||
|
||||
@@ -147,7 +147,7 @@ RULES:
|
||||
- Identify key architecture patterns and technical constraints
|
||||
- Extract integration points and development standards
|
||||
- Output concise, structured format
|
||||
" --tool ${tool}
|
||||
" --tool ${tool} --mode analysis
|
||||
\`\`\`
|
||||
|
||||
### Step 4: Generate Core Content Package
|
||||
|
||||
@@ -198,7 +198,7 @@ Objectives:
|
||||
CONTEXT: @IMPL_PLAN.md @workflow-session.json
|
||||
EXPECTED: Structured lessons and conflicts in JSON format
|
||||
RULES: Template reference from skill-aggregation.txt
|
||||
" --tool gemini --cd .workflow/.archives/{session_id}
|
||||
" --tool gemini --mode analysis --cd .workflow/.archives/{session_id}
|
||||
|
||||
3.5. **Generate SKILL.md Description** (CRITICAL for auto-loading):
|
||||
|
||||
@@ -345,7 +345,7 @@ Objectives:
|
||||
CONTEXT: [Provide aggregated JSON data]
|
||||
EXPECTED: Final aggregated structure for SKILL documents
|
||||
RULES: Template reference from skill-aggregation.txt
|
||||
" --tool gemini
|
||||
" --tool gemini --mode analysis
|
||||
|
||||
3. Read templates for formatting (same 4 templates as single mode)
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ Parse user input (supports: number "1", full ID "WFS-auth-system", or partial "a
|
||||
|
||||
#### Step 1.3: Load Session Metadata
|
||||
```bash
|
||||
ccw session read ${sessionId} --type session
|
||||
ccw session ${sessionId} read workflow-session.json
|
||||
```
|
||||
|
||||
**Output**: Store session metadata in memory
|
||||
|
||||
@@ -473,10 +473,10 @@ Detailed plan: ${executionContext.session.artifacts.plan}`)
|
||||
return prompt
|
||||
}
|
||||
|
||||
ccw cli exec "${buildCLIPrompt(batch)}" --tool codex --mode auto
|
||||
ccw cli exec "${buildCLIPrompt(batch)}" --tool codex --mode write
|
||||
```
|
||||
|
||||
**Execution with tracking**:
|
||||
**Execution with fixed IDs** (predictable ID pattern):
|
||||
```javascript
|
||||
// Launch CLI in foreground (NOT background)
|
||||
// Timeout based on complexity: Low=40min, Medium=60min, High=100min
|
||||
@@ -486,15 +486,48 @@ const timeoutByComplexity = {
|
||||
"High": 6000000 // 100 minutes
|
||||
}
|
||||
|
||||
// Generate fixed execution ID: ${sessionId}-${groupId}
|
||||
// This enables predictable ID lookup without relying on resume context chains
|
||||
const sessionId = executionContext?.session?.id || 'standalone'
|
||||
const fixedExecutionId = `${sessionId}-${batch.groupId}` // e.g., "implement-auth-2025-12-13-P1"
|
||||
|
||||
// Check if resuming from previous failed execution
|
||||
const previousCliId = batch.resumeFromCliId || null
|
||||
|
||||
// Build command with fixed ID (and optional resume for continuation)
|
||||
const cli_command = previousCliId
|
||||
? `ccw cli exec "${buildCLIPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId} --resume ${previousCliId}`
|
||||
: `ccw cli exec "${buildCLIPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId}`
|
||||
|
||||
bash_result = Bash(
|
||||
command=cli_command,
|
||||
timeout=timeoutByComplexity[planObject.complexity] || 3600000
|
||||
)
|
||||
|
||||
// Execution ID is now predictable: ${fixedExecutionId}
|
||||
// Can also extract from output: "ID: implement-auth-2025-12-13-P1"
|
||||
const cliExecutionId = fixedExecutionId
|
||||
|
||||
// Update TodoWrite when execution completes
|
||||
```
|
||||
|
||||
**Result Collection**: After completion, analyze output and collect result following `executionResult` structure
|
||||
**Resume on Failure** (with fixed ID):
|
||||
```javascript
|
||||
// If execution failed or timed out, offer resume option
|
||||
if (bash_result.status === 'failed' || bash_result.status === 'timeout') {
|
||||
console.log(`
|
||||
⚠️ Execution incomplete. Resume available:
|
||||
Fixed ID: ${fixedExecutionId}
|
||||
Lookup: ccw cli detail ${fixedExecutionId}
|
||||
Resume: ccw cli exec "Continue tasks" --resume ${fixedExecutionId} --tool codex --mode write --id ${fixedExecutionId}-retry
|
||||
`)
|
||||
|
||||
// Store for potential retry in same session
|
||||
batch.resumeFromCliId = fixedExecutionId
|
||||
}
|
||||
```
|
||||
|
||||
**Result Collection**: After completion, analyze output and collect result following `executionResult` structure (include `cliExecutionId` for resume capability)
|
||||
|
||||
### Step 4: Progress Tracking
|
||||
|
||||
@@ -541,15 +574,30 @@ RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/02-review-code-q
|
||||
# - Report findings directly
|
||||
|
||||
# Method 2: Gemini Review (recommended)
|
||||
ccw cli exec "[Shared Prompt Template with artifacts]" --tool gemini
|
||||
ccw cli exec "[Shared Prompt Template with artifacts]" --tool gemini --mode analysis
|
||||
# CONTEXT includes: @**/* @${plan.json} [@${exploration.json}]
|
||||
|
||||
# Method 3: Qwen Review (alternative)
|
||||
ccw cli exec "[Shared Prompt Template with artifacts]" --tool qwen
|
||||
ccw cli exec "[Shared Prompt Template with artifacts]" --tool qwen --mode analysis
|
||||
# Same prompt as Gemini, different execution engine
|
||||
|
||||
# Method 4: Codex Review (autonomous)
|
||||
ccw cli exec "[Verify plan acceptance criteria at ${plan.json}]" --tool codex --mode auto
|
||||
ccw cli exec "[Verify plan acceptance criteria at ${plan.json}]" --tool codex --mode write
|
||||
```
|
||||
|
||||
**Multi-Round Review with Fixed IDs**:
|
||||
```javascript
|
||||
// Generate fixed review ID
|
||||
const reviewId = `${sessionId}-review`
|
||||
|
||||
// First review pass with fixed ID
|
||||
const reviewResult = Bash(`ccw cli exec "[Review prompt]" --tool gemini --mode analysis --id ${reviewId}`)
|
||||
|
||||
// If issues found, continue review dialog with fixed ID chain
|
||||
if (hasUnresolvedIssues(reviewResult)) {
|
||||
// Resume with follow-up questions
|
||||
Bash(`ccw cli exec "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --mode analysis --id ${reviewId}-followup`)
|
||||
}
|
||||
```
|
||||
|
||||
**Implementation Note**: Replace `[Shared Prompt Template with artifacts]` placeholder with actual template content, substituting:
|
||||
@@ -623,8 +671,10 @@ console.log(`✓ Development index: [${category}] ${entry.title}`)
|
||||
| Empty file | File exists but no content | Error: "File is empty: {path}. Provide task description." |
|
||||
| Invalid Enhanced Task JSON | JSON missing required fields | Warning: "Missing required fields. Treating as plain text." |
|
||||
| Malformed JSON | JSON parsing fails | Treat as plain text (expected for non-JSON files) |
|
||||
| Execution failure | Agent/Codex crashes | Display error, save partial progress, suggest retry |
|
||||
| Execution failure | Agent/Codex crashes | Display error, use fixed ID `${sessionId}-${groupId}` for resume: `ccw cli exec "Continue" --resume <fixed-id> --id <fixed-id>-retry` |
|
||||
| Execution timeout | CLI exceeded timeout | Use fixed ID for resume with extended timeout |
|
||||
| Codex unavailable | Codex not installed | Show installation instructions, offer Agent execution |
|
||||
| Fixed ID not found | Custom ID lookup failed | Check `ccw cli history`, verify date directories |
|
||||
|
||||
## Data Structures
|
||||
|
||||
@@ -679,8 +729,20 @@ Collected after each execution call completes:
|
||||
tasksSummary: string, // Brief description of tasks handled
|
||||
completionSummary: string, // What was completed
|
||||
keyOutputs: string, // Files created/modified, key changes
|
||||
notes: string // Important context for next execution
|
||||
notes: string, // Important context for next execution
|
||||
fixedCliId: string | null // Fixed CLI execution ID (e.g., "implement-auth-2025-12-13-P1")
|
||||
}
|
||||
```
|
||||
|
||||
Appended to `previousExecutionResults` array for context continuity in multi-execution scenarios.
|
||||
|
||||
**Fixed ID Pattern**: `${sessionId}-${groupId}` enables predictable lookup without auto-generated timestamps.
|
||||
|
||||
**Resume Usage**: If `status` is "partial" or "failed", use `fixedCliId` to resume:
|
||||
```bash
|
||||
# Lookup previous execution
|
||||
ccw cli detail ${fixedCliId}
|
||||
|
||||
# Resume with new fixed ID for retry
|
||||
ccw cli exec "Continue from where we left off" --resume ${fixedCliId} --tool codex --mode write --id ${fixedCliId}-retry
|
||||
```
|
||||
|
||||
@@ -72,17 +72,60 @@ Phase 5: Dispatch
|
||||
### Phase 1: Intelligent Multi-Angle Diagnosis
|
||||
|
||||
**Session Setup** (MANDATORY - follow exactly):
|
||||
|
||||
**Option 1: Using CLI Command** (Recommended for simplicity):
|
||||
```bash
|
||||
# Generate session ID
|
||||
bug_slug=$(echo "${bug_description}" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '-' | cut -c1-40)
|
||||
date_str=$(date -u '+%Y-%m-%d')
|
||||
session_id="${bug_slug}-${date_str}"
|
||||
|
||||
# Initialize lite-fix session (location auto-inferred from type)
|
||||
ccw session init "${session_id}" \
|
||||
--type lite-fix \
|
||||
--content "{\"description\":\"${bug_description}\",\"severity\":\"${severity}\"}"
|
||||
|
||||
|
||||
|
||||
# Get session folder
|
||||
session_folder=".workflow/.lite-fix/${session_id}"
|
||||
echo "Session initialized: ${session_id} at ${session_folder}"
|
||||
```
|
||||
|
||||
**Option 2: Using session_manager Tool** (For programmatic access):
|
||||
```javascript
|
||||
// Helper: Get UTC+8 (China Standard Time) ISO string
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const bugSlug = bug_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-12-17
|
||||
|
||||
const sessionId = `${bugSlug}-${dateStr}` // e.g., "user-avatar-upload-fails-2025-11-29"
|
||||
const sessionFolder = `.workflow/.lite-fix/${sessionId}`
|
||||
const sessionId = `${bugSlug}-${dateStr}` // e.g., "user-avatar-upload-fails-2025-12-17"
|
||||
|
||||
bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`)
|
||||
|
||||
|
||||
const sessionFolder = initResult.result.path
|
||||
console.log(`Session initialized: ${sessionId} at ${sessionFolder}`)
|
||||
```
|
||||
|
||||
**Session File Structure**:
|
||||
- `session-metadata.json` - Session metadata (created at init, contains description, severity, status)
|
||||
- `fix-plan.json` - Actual fix planning content (created later in Phase 3, contains fix tasks, diagnosis results)
|
||||
|
||||
**Metadata Field Usage**:
|
||||
- `description`: Displayed in dashboard session list (replaces session ID as title)
|
||||
- `severity`: Used for fix planning strategy selection (Low/Medium → Direct Claude, High/Critical → Agent)
|
||||
- `created_at`: Displayed in dashboard timeline
|
||||
- `status`: Updated through workflow (diagnosing → fixing → completed)
|
||||
- Custom fields: Any additional fields in metadata are saved and accessible programmatically
|
||||
|
||||
**Accessing Session Data**:
|
||||
```bash
|
||||
# Read session metadata
|
||||
ccw session ${session_id} read session-metadata.json
|
||||
|
||||
# Read fix plan content (after Phase 3 completion)
|
||||
ccw session ${session_id} read fix-plan.json
|
||||
```
|
||||
|
||||
**Diagnosis Decision Logic**:
|
||||
|
||||
@@ -72,17 +72,57 @@ Phase 5: Dispatch
|
||||
### Phase 1: Intelligent Multi-Angle Exploration
|
||||
|
||||
**Session Setup** (MANDATORY - follow exactly):
|
||||
|
||||
**Option 1: Using CLI Command** (Recommended for simplicity):
|
||||
```bash
|
||||
# Generate session ID
|
||||
task_slug=$(echo "${task_description}" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '-' | cut -c1-40)
|
||||
date_str=$(date -u '+%Y-%m-%d')
|
||||
session_id="${task_slug}-${date_str}"
|
||||
|
||||
# Initialize lite-plan session (location auto-inferred from type)
|
||||
ccw session init "${session_id}" \
|
||||
--type lite-plan \
|
||||
--content "{\"description\":\"${task_description}\",\"complexity\":\"${complexity}\"}"
|
||||
|
||||
# Get session folder
|
||||
session_folder=".workflow/.lite-plan/${session_id}"
|
||||
echo "Session initialized: ${session_id} at ${session_folder}"
|
||||
```
|
||||
|
||||
**Option 2: Using session_manager Tool** (For programmatic access):
|
||||
```javascript
|
||||
// Helper: Get UTC+8 (China Standard Time) ISO string
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const taskSlug = task_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-12-17
|
||||
|
||||
const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-11-29"
|
||||
const sessionFolder = `.workflow/.lite-plan/${sessionId}`
|
||||
const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-12-17"
|
||||
|
||||
bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`)
|
||||
|
||||
|
||||
const sessionFolder = initResult.result.path
|
||||
console.log(`Session initialized: ${sessionId} at ${sessionFolder}`)
|
||||
```
|
||||
|
||||
**Session File Structure**:
|
||||
- `session-metadata.json` - Session metadata (created at init, contains description, complexity, status)
|
||||
- `plan.json` - Actual planning content (created later in Phase 3, contains tasks, steps, dependencies)
|
||||
|
||||
**Metadata Field Usage**:
|
||||
- `description`: Displayed in dashboard session list (replaces session ID as title)
|
||||
- `complexity`: Used for planning strategy selection (Low → Direct Claude, Medium/High → Agent)
|
||||
- `created_at`: Displayed in dashboard timeline
|
||||
- Custom fields: Any additional fields in metadata are saved and accessible programmatically
|
||||
|
||||
**Accessing Session Data**:
|
||||
```bash
|
||||
# Read session metadata
|
||||
ccw session ${session_id} read session-metadata.json
|
||||
|
||||
# Read plan content (after Phase 3 completion)
|
||||
ccw session ${session_id} read plan.json
|
||||
```
|
||||
|
||||
**Exploration Decision Logic**:
|
||||
|
||||
@@ -112,14 +112,18 @@ After bash validation, the model takes control to:
|
||||
|
||||
1. **Load Context**: Read completed task summaries and changed files
|
||||
```bash
|
||||
# Load implementation summaries
|
||||
ccw session read ${sessionId} --type summary --raw
|
||||
# Load implementation summaries (iterate through .summaries/ directory)
|
||||
for summary in .workflow/active/${sessionId}/.summaries/*.md; do
|
||||
cat "$summary"
|
||||
done
|
||||
|
||||
# Load test results (if available)
|
||||
ccw session read ${sessionId} --type summary --filename "TEST-FIX-*.md" --raw 2>/dev/null
|
||||
for test_summary in .workflow/active/${sessionId}/.summaries/TEST-FIX-*.md 2>/dev/null; do
|
||||
cat "$test_summary"
|
||||
done
|
||||
|
||||
# Get session created_at for git log filter
|
||||
created_at=$(ccw session read ${sessionId} --type session --raw | jq -r .created_at)
|
||||
created_at=$(ccw session ${sessionId} read workflow-session.json | jq -r .created_at)
|
||||
git log --since="$created_at" --name-only --pretty=format: | sort -u
|
||||
```
|
||||
|
||||
@@ -170,11 +174,13 @@ After bash validation, the model takes control to:
|
||||
- Verify all requirements and acceptance criteria met:
|
||||
```bash
|
||||
# Load task requirements and acceptance criteria
|
||||
ccw session read ${sessionId} --type task --raw | jq -r '
|
||||
"Task: " + .id + "\n" +
|
||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
||||
"Acceptance: " + (.context.acceptance | join(", "))
|
||||
'
|
||||
for task_file in .workflow/active/${sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '
|
||||
"Task: " + .id + "\n" +
|
||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
||||
"Acceptance: " + (.context.acceptance | join(", "))
|
||||
'
|
||||
done
|
||||
|
||||
# Check implementation summaries against requirements
|
||||
ccw cli exec "
|
||||
|
||||
@@ -34,7 +34,7 @@ ccw session list --location active
|
||||
#### Step 1.2: Check for Existing Archiving Marker (Resume Detection)
|
||||
```bash
|
||||
# Check if session is already being archived (marker file exists)
|
||||
ccw session read WFS-session-name --type process --filename .archiving 2>/dev/null && echo "RESUMING" || echo "NEW"
|
||||
ccw session WFS-session-name read .process/.archiving 2>/dev/null && echo "RESUMING" || echo "NEW"
|
||||
```
|
||||
|
||||
**If RESUMING**:
|
||||
@@ -47,7 +47,7 @@ ccw session read WFS-session-name --type process --filename .archiving 2>/dev/nu
|
||||
#### Step 1.3: Create Archiving Marker
|
||||
```bash
|
||||
# Mark session as "archiving in progress"
|
||||
ccw session write WFS-session-name --type process --filename .archiving --content ''
|
||||
ccw session WFS-session-name write .process/.archiving ''
|
||||
```
|
||||
**Purpose**:
|
||||
- Prevents concurrent operations on this session
|
||||
@@ -171,8 +171,8 @@ ccw session archive WFS-session-name
|
||||
|
||||
#### Step 3.2: Update Manifest
|
||||
```bash
|
||||
# Read current manifest using ccw (or create empty array if not exists)
|
||||
ccw session read manifest --type manifest --raw 2>/dev/null || echo "[]"
|
||||
# Check if manifest exists
|
||||
test -f .workflow/archives/manifest.json && echo "EXISTS" || echo "NOT_FOUND"
|
||||
```
|
||||
|
||||
**JSON Update Logic**:
|
||||
@@ -221,8 +221,8 @@ rm .workflow/archives/WFS-session-name/.process/.archiving 2>/dev/null || true
|
||||
|
||||
#### Step 4.1: Check Project State Exists
|
||||
```bash
|
||||
# Check project state using ccw
|
||||
ccw session read project --type project 2>/dev/null && echo "EXISTS" || echo "SKIP"
|
||||
# Check if project.json exists
|
||||
test -f .workflow/project.json && echo "EXISTS" || echo "SKIP"
|
||||
```
|
||||
|
||||
**If SKIP**: Output warning and skip Phase 4
|
||||
@@ -249,11 +249,6 @@ const featureId = title.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 5
|
||||
|
||||
#### Step 4.3: Update project.json
|
||||
|
||||
```bash
|
||||
# Read current project state using ccw
|
||||
ccw session read project --type project --raw
|
||||
```
|
||||
|
||||
**JSON Update Logic**:
|
||||
```javascript
|
||||
// Read existing project.json (created by /workflow:init)
|
||||
|
||||
@@ -30,7 +30,7 @@ ccw session stats WFS-session
|
||||
|
||||
### Step 3: Read Session Metadata
|
||||
```bash
|
||||
ccw session read WFS-session --type session
|
||||
ccw session WFS-session read workflow-session.json
|
||||
# Returns: session_id, status, project, created_at, etc.
|
||||
```
|
||||
|
||||
@@ -39,8 +39,8 @@ ccw session read WFS-session --type session
|
||||
### Basic Operations
|
||||
- **List all sessions**: `ccw session list`
|
||||
- **List active only**: `ccw session list --location active`
|
||||
- **Read session data**: `ccw session read WFS-xxx --type session`
|
||||
- **Get task stats**: `ccw session stats WFS-xxx`
|
||||
- **Read session data**: `ccw session WFS-xxx read workflow-session.json`
|
||||
- **Get task stats**: `ccw session WFS-xxx stats`
|
||||
|
||||
## Simple Output Format
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ ccw session list --location active
|
||||
|
||||
### Step 2: Check Session Status
|
||||
```bash
|
||||
ccw session read WFS-session --type session
|
||||
ccw session WFS-session read workflow-session.json
|
||||
# Check .status field in response
|
||||
```
|
||||
|
||||
@@ -35,17 +35,15 @@ ccw session list --location active
|
||||
|
||||
### Step 4: Update Session Status to Active
|
||||
```bash
|
||||
ccw session status WFS-session active
|
||||
# Or with full update:
|
||||
ccw session update WFS-session --type session --content '{"status":"active","resumed_at":"2025-12-10T08:00:00Z"}'
|
||||
ccw session WFS-session status active
|
||||
```
|
||||
|
||||
## Simple Commands
|
||||
|
||||
### Basic Operations
|
||||
- **List sessions**: `ccw session list --location active`
|
||||
- **Check status**: `ccw session read WFS-xxx --type session`
|
||||
- **Update status**: `ccw session status WFS-xxx active`
|
||||
- **Check status**: `ccw session WFS-xxx read workflow-session.json`
|
||||
- **Update status**: `ccw session WFS-xxx status active`
|
||||
|
||||
### Resume Result
|
||||
```
|
||||
|
||||
@@ -30,10 +30,17 @@ The `--type` parameter classifies sessions for CCW dashboard organization:
|
||||
| `tdd` | TDD-based development | `/workflow:tdd-plan` |
|
||||
| `test` | Test generation/fix sessions | `/workflow:test-fix-gen` |
|
||||
| `docs` | Documentation sessions | `/memory:docs` |
|
||||
| `lite-plan` | Lightweight planning workflow | `/workflow:lite-plan` |
|
||||
| `lite-fix` | Lightweight bug fix workflow | `/workflow:lite-fix` |
|
||||
|
||||
**Special Behavior for `lite-plan` and `lite-fix`**:
|
||||
- These types automatically infer the storage location (`.workflow/.lite-plan/` or `.workflow/.lite-fix/`)
|
||||
- No need to specify `--location` parameter when using these types
|
||||
- Alternative: Use `--location lite-plan` or `--location lite-fix` directly
|
||||
|
||||
**Validation**: If `--type` is provided with invalid value, return error:
|
||||
```
|
||||
ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs
|
||||
ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs, lite-plan, lite-fix
|
||||
```
|
||||
|
||||
## Step 0: Initialize Project State (First-time Only)
|
||||
@@ -75,7 +82,7 @@ ccw session list --location active
|
||||
|
||||
### Step 2: Display Session Metadata
|
||||
```bash
|
||||
ccw session read WFS-promptmaster-platform --type session
|
||||
ccw session WFS-promptmaster-platform read workflow-session.json
|
||||
```
|
||||
|
||||
### Step 4: User Decision
|
||||
@@ -102,7 +109,7 @@ ccw session list --location active
|
||||
# Pattern: WFS-{lowercase-slug-from-description}
|
||||
|
||||
# Create session with ccw (creates directories + metadata atomically)
|
||||
ccw session init WFS-implement-oauth2-auth --type workflow --content '{"project":"implement OAuth2 auth","status":"planning"}'
|
||||
ccw session init WFS-implement-oauth2-auth --type workflow
|
||||
```
|
||||
|
||||
**Output**: `SESSION_ID: WFS-implement-oauth2-auth`
|
||||
@@ -113,7 +120,7 @@ ccw session init WFS-implement-oauth2-auth --type workflow --content '{"project"
|
||||
ccw session list --location active
|
||||
|
||||
# Read session metadata for relevance check
|
||||
ccw session read WFS-promptmaster-platform --type session
|
||||
ccw session WFS-promptmaster-platform read workflow-session.json
|
||||
|
||||
# If task contains project keywords → Reuse session
|
||||
# If task unrelated → Create new session (use Step 2a)
|
||||
@@ -149,10 +156,41 @@ ccw session list --location active
|
||||
|
||||
### Step 2: Create Session Structure
|
||||
```bash
|
||||
# Single command creates directories (.process, .task, .summaries) + metadata
|
||||
ccw session init WFS-fix-login-bug --type workflow --content '{"project":"fix login bug","status":"planning"}'
|
||||
# Basic init - creates directories + default metadata
|
||||
ccw session init WFS-fix-login-bug --type workflow
|
||||
|
||||
# Advanced init - with custom metadata
|
||||
ccw session init WFS-oauth-implementation --type workflow --content '{"description":"OAuth2 authentication system","priority":"high","complexity":"medium"}'
|
||||
```
|
||||
|
||||
**Default Metadata** (auto-generated):
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-fix-login-bug",
|
||||
"type": "workflow",
|
||||
"status": "planning",
|
||||
"created_at": "2025-12-17T..."
|
||||
}
|
||||
```
|
||||
|
||||
**Custom Metadata** (merged with defaults):
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-oauth-implementation",
|
||||
"type": "workflow",
|
||||
"status": "planning",
|
||||
"created_at": "2025-12-17T...",
|
||||
"description": "OAuth2 authentication system",
|
||||
"priority": "high",
|
||||
"complexity": "medium"
|
||||
}
|
||||
```
|
||||
|
||||
**Field Usage**:
|
||||
- `description`: Displayed in dashboard (replaces session_id as title)
|
||||
- `status`: Can override default "planning" (e.g., "active", "implementing")
|
||||
- Custom fields: Any additional fields are saved and accessible programmatically
|
||||
|
||||
**Output**: `SESSION_ID: WFS-fix-login-bug`
|
||||
|
||||
## Execution Guideline
|
||||
|
||||
@@ -77,18 +77,32 @@ find .workflow/active/ -name "WFS-*" -type d | head -1 | sed 's/.*\///'
|
||||
|
||||
```bash
|
||||
# Load all task JSONs
|
||||
ccw session read {sessionId} --type task
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file"
|
||||
done
|
||||
|
||||
# Extract task IDs
|
||||
ccw session read {sessionId} --type task --raw | jq -r '.id'
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.id'
|
||||
done
|
||||
|
||||
# Check dependencies - read tasks and filter for IMPL/REFACTOR
|
||||
ccw session read {sessionId} --type task --task-id "IMPL-*" --raw | jq -r '.context.depends_on[]?'
|
||||
ccw session read {sessionId} --type task --task-id "REFACTOR-*" --raw | jq -r '.context.depends_on[]?'
|
||||
for task_file in .workflow/active/{sessionId}/.task/IMPL-*.json; do
|
||||
cat "$task_file" | jq -r '.context.depends_on[]?'
|
||||
done
|
||||
|
||||
for task_file in .workflow/active/{sessionId}/.task/REFACTOR-*.json; do
|
||||
cat "$task_file" | jq -r '.context.depends_on[]?'
|
||||
done
|
||||
|
||||
# Check meta fields
|
||||
ccw session read {sessionId} --type task --raw | jq -r '.meta.tdd_phase'
|
||||
ccw session read {sessionId} --type task --raw | jq -r '.meta.agent'
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.meta.tdd_phase'
|
||||
done
|
||||
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.meta.agent'
|
||||
done
|
||||
```
|
||||
|
||||
**Validation**:
|
||||
@@ -139,7 +153,7 @@ EXPECTED:
|
||||
- Red-Green-Refactor cycle validation
|
||||
- Best practices adherence assessment
|
||||
RULES: Focus on TDD best practices and workflow adherence. Be specific about violations and improvements.
|
||||
" --tool gemini --cd project-root > .workflow/active/{sessionId}/TDD_COMPLIANCE_REPORT.md
|
||||
" --tool gemini --mode analysis --cd project-root > .workflow/active/{sessionId}/TDD_COMPLIANCE_REPORT.md
|
||||
```
|
||||
|
||||
**Output**: TDD_COMPLIANCE_REPORT.md
|
||||
|
||||
@@ -152,7 +152,7 @@ Task(subagent_type="cli-execution-agent", prompt=`
|
||||
- ModuleOverlap conflicts with overlap_analysis
|
||||
- Targeted clarification questions
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/02-analyze-code-patterns.txt) | Focus on breaking changes, migration needs, and functional overlaps | Prioritize exploration-identified conflicts | analysis=READ-ONLY
|
||||
" --tool gemini --cd {project_root}
|
||||
" --tool gemini --mode analysis --cd {project_root}
|
||||
|
||||
Fallback: Qwen (same prompt) → Claude (manual analysis)
|
||||
|
||||
|
||||
@@ -28,6 +28,12 @@ Input Parsing:
|
||||
├─ Parse flags: --session
|
||||
└─ Validation: session_id REQUIRED
|
||||
|
||||
Phase 0: User Configuration (Interactive)
|
||||
├─ Question 1: Supplementary materials/guidelines?
|
||||
├─ Question 2: Execution method preference (Agent/CLI/Hybrid)
|
||||
├─ Question 3: CLI tool preference (if CLI selected)
|
||||
└─ Store: userConfig for agent prompt
|
||||
|
||||
Phase 1: Context Preparation & Module Detection (Command)
|
||||
├─ Assemble session paths (metadata, context package, output dirs)
|
||||
├─ Provide metadata (session_id, execution_mode, mcp_capabilities)
|
||||
@@ -57,6 +63,82 @@ Phase 3: Integration (+1 Coordinator, Multi-Module Only)
|
||||
|
||||
## Document Generation Lifecycle
|
||||
|
||||
### Phase 0: User Configuration (Interactive)
|
||||
|
||||
**Purpose**: Collect user preferences before task generation to ensure generated tasks match execution expectations.
|
||||
|
||||
**User Questions**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Do you have supplementary materials or guidelines to include?",
|
||||
header: "Materials",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "No additional materials", description: "Use existing context only" },
|
||||
{ label: "Provide file paths", description: "I'll specify paths to include" },
|
||||
{ label: "Provide inline content", description: "I'll paste content directly" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Select execution method for generated tasks:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent (Recommended)", description: "Claude agent executes tasks directly" },
|
||||
{ label: "Hybrid", description: "Agent orchestrates, calls CLI for complex steps" },
|
||||
{ label: "CLI Only", description: "All execution via CLI tools (codex/gemini/qwen)" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "If using CLI, which tool do you prefer?",
|
||||
header: "CLI Tool",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Codex (Recommended)", description: "Best for implementation tasks" },
|
||||
{ label: "Gemini", description: "Best for analysis and large context" },
|
||||
{ label: "Qwen", description: "Alternative analysis tool" },
|
||||
{ label: "Auto", description: "Let agent decide per-task" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Handle Materials Response**:
|
||||
```javascript
|
||||
if (userConfig.materials === "Provide file paths") {
|
||||
// Follow-up question for file paths
|
||||
const pathsResponse = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Enter file paths to include (comma-separated or one per line):",
|
||||
header: "Paths",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Enter paths", description: "Provide paths in text input" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
userConfig.supplementaryPaths = parseUserPaths(pathsResponse)
|
||||
}
|
||||
```
|
||||
|
||||
**Build userConfig**:
|
||||
```javascript
|
||||
const userConfig = {
|
||||
supplementaryMaterials: {
|
||||
type: "none|paths|inline",
|
||||
content: [...], // Parsed paths or inline content
|
||||
},
|
||||
executionMethod: "agent|hybrid|cli",
|
||||
preferredCliTool: "codex|gemini|qwen|auto",
|
||||
enableResume: true // Always enable resume for CLI executions
|
||||
}
|
||||
```
|
||||
|
||||
**Pass to Agent**: Include `userConfig` in agent prompt for Phase 2A/2B.
|
||||
|
||||
### Phase 1: Context Preparation & Module Detection (Command Responsibility)
|
||||
|
||||
**Command prepares session paths, metadata, and detects module structure.**
|
||||
@@ -159,10 +241,21 @@ Output:
|
||||
Session ID: {session-id}
|
||||
MCP Capabilities: {exa_code, exa_web, code_index}
|
||||
|
||||
## USER CONFIGURATION (from Phase 0)
|
||||
Execution Method: ${userConfig.executionMethod} // agent|hybrid|cli
|
||||
Preferred CLI Tool: ${userConfig.preferredCliTool} // codex|gemini|qwen|auto
|
||||
Supplementary Materials: ${userConfig.supplementaryMaterials}
|
||||
|
||||
## CLI TOOL SELECTION
|
||||
Determine CLI tool usage per-step based on user's task description:
|
||||
- If user specifies "use Codex/Gemini/Qwen for X" → Add command field to relevant steps
|
||||
- Default: Agent execution (no command field) unless user explicitly requests CLI
|
||||
Based on userConfig.executionMethod:
|
||||
- "agent": No command field in implementation_approach steps
|
||||
- "hybrid": Add command field to complex steps only (agent handles simple steps)
|
||||
- "cli": Add command field to ALL implementation_approach steps
|
||||
|
||||
CLI Resume Support (MANDATORY for all CLI commands):
|
||||
- Use --resume parameter to continue from previous task execution
|
||||
- Read previous task's cliExecutionId from session state
|
||||
- Format: ccw cli exec "[prompt]" --resume ${previousCliId} --tool ${tool} --mode write
|
||||
|
||||
## EXPLORATION CONTEXT (from context-package.exploration_results)
|
||||
- Load exploration_results from context-package.json
|
||||
@@ -186,6 +279,7 @@ Determine CLI tool usage per-step based on user's task description:
|
||||
- Artifacts integration from context package
|
||||
- **focus_paths enhanced with exploration critical_files**
|
||||
- Flow control with pre_analysis steps (include exploration integration_points analysis)
|
||||
- **CLI Execution IDs and strategies (MANDATORY)**
|
||||
|
||||
2. Implementation Plan (IMPL_PLAN.md)
|
||||
- Context analysis and artifact references
|
||||
@@ -197,6 +291,27 @@ Determine CLI tool usage per-step based on user's task description:
|
||||
- Links to task JSONs and summaries
|
||||
- Matches task JSON hierarchy
|
||||
|
||||
## CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||
Each task JSON MUST include:
|
||||
- **cli_execution_id**: Unique ID for CLI execution (format: `{session_id}-{task_id}`)
|
||||
- **cli_execution**: Strategy object based on depends_on:
|
||||
- No deps → `{ "strategy": "new" }`
|
||||
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
||||
- 1 dep (multiple children) → `{ "strategy": "fork", "resume_from": "parent-cli-id" }`
|
||||
- N deps → `{ "strategy": "merge_fork", "merge_from": ["id1", "id2", ...] }`
|
||||
|
||||
**CLI Execution Strategy Rules**:
|
||||
1. **new**: Task has no dependencies - starts fresh CLI conversation
|
||||
2. **resume**: Task has 1 parent AND that parent has only this child - continues same conversation
|
||||
3. **fork**: Task has 1 parent BUT parent has multiple children - creates new branch with parent context
|
||||
4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation
|
||||
|
||||
**Execution Command Patterns**:
|
||||
- new: `ccw cli exec "[prompt]" --tool [tool] --mode write --id [cli_execution_id]`
|
||||
- resume: `ccw cli exec "[prompt]" --resume [resume_from] --tool [tool] --mode write`
|
||||
- fork: `ccw cli exec "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write`
|
||||
- merge_fork: `ccw cli exec "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write`
|
||||
|
||||
## QUALITY STANDARDS
|
||||
Hard Constraints:
|
||||
- Task count <= 18 (hard limit - request re-scope if exceeded)
|
||||
|
||||
@@ -187,7 +187,7 @@ Task(subagent_type="ui-design-agent",
|
||||
CONTEXT: @**/*.css @**/*.scss @**/*.js @**/*.ts
|
||||
EXPECTED: JSON report listing conflicts with file:line, values, semantic context
|
||||
RULES: Focus on core tokens | Report ALL variants | analysis=READ-ONLY
|
||||
\" --tool gemini --cd ${source}
|
||||
\" --tool gemini --mode analysis --cd ${source}
|
||||
\`\`\`
|
||||
|
||||
**Step 1: Load file list**
|
||||
@@ -302,7 +302,7 @@ Task(subagent_type="ui-design-agent",
|
||||
CONTEXT: @**/*.css @**/*.scss @**/*.js @**/*.ts
|
||||
EXPECTED: JSON report listing frameworks, animation types, file locations
|
||||
RULES: Focus on framework consistency | Map all animations | analysis=READ-ONLY
|
||||
\" --tool gemini --cd ${source}
|
||||
\" --tool gemini --mode analysis --cd ${source}
|
||||
\`\`\`
|
||||
|
||||
**Step 1: Load file list**
|
||||
@@ -381,7 +381,7 @@ Task(subagent_type="ui-design-agent",
|
||||
CONTEXT: @**/*.css @**/*.scss @**/*.js @**/*.ts @**/*.html
|
||||
EXPECTED: JSON report categorizing components, layout patterns, naming conventions
|
||||
RULES: Focus on component reusability | Identify layout systems | analysis=READ-ONLY
|
||||
\" --tool gemini --cd ${source}
|
||||
\" --tool gemini --mode analysis --cd ${source}
|
||||
\`\`\`
|
||||
|
||||
**Step 1: Load file list**
|
||||
|
||||
@@ -74,7 +74,7 @@ ccw cli exec "<PROMPT>" --tool <gemini|qwen|codex> --mode <analysis|write>
|
||||
- **Mode is MANDATORY** - ALWAYS explicitly specify `--mode analysis|write` (no implicit defaults)
|
||||
- **One template required** - ALWAYS reference exactly ONE template in RULES (use universal fallback if no specific match)
|
||||
- **Write protection** - Require EXPLICIT `--mode write` for file operations
|
||||
- **No escape characters** - NEVER use `\$`, `\"`, `\'` in CLI commands
|
||||
- **Use double quotes for shell expansion** - Always wrap prompts in double quotes `"..."` to enable `$(cat ...)` command substitution; NEVER use single quotes or escape characters (`\$`, `\"`, `\'`)
|
||||
|
||||
---
|
||||
|
||||
@@ -276,9 +276,22 @@ ccw cli exec "..." --tool gemini --mode analysis --cd src
|
||||
- `universal/00-universal-creative-style.txt` - For exploratory tasks
|
||||
|
||||
**Command Substitution Rules**:
|
||||
- Use `$(cat ...)` directly - do NOT read template content first
|
||||
- NEVER use escape characters: `\$`, `\"`, `\'`
|
||||
- Tilde expands correctly in prompt context
|
||||
- Use `$(cat ...)` directly in **double quotes** - command substitution executes in your local shell BEFORE passing to ccw
|
||||
- Shell expands `$(cat ...)` into file content automatically - do NOT read template content first
|
||||
- NEVER use escape characters (`\$`, `\"`, `\'`) or single quotes - these prevent shell expansion
|
||||
- Tilde (`~`) expands correctly in prompt context
|
||||
|
||||
**Critical**: Use double quotes `"..."` around the entire prompt to enable `$(cat ...)` expansion:
|
||||
```bash
|
||||
# ✓ CORRECT - double quotes allow shell expansion
|
||||
ccw cli exec "RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) ..." --tool gemini
|
||||
|
||||
# ✗ WRONG - single quotes prevent expansion
|
||||
ccw cli exec 'RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) ...' --tool gemini
|
||||
|
||||
# ✗ WRONG - escaped $ prevents expansion
|
||||
ccw cli exec "RULES: \$(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) ..." --tool gemini
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
```bash
|
||||
|
||||
@@ -5,40 +5,67 @@ Before implementation, always:
|
||||
- Map dependencies and integration points
|
||||
- Understand testing framework and coding conventions
|
||||
|
||||
## Context Gathering
|
||||
## MCP Tools Usage
|
||||
|
||||
**MANDATORY**: Use `codex_lens` (MCP tool) for all code search and analysis.
|
||||
### smart_search - Code Search (REQUIRED)
|
||||
|
||||
### codex_lens (REQUIRED)
|
||||
**When**: Find code, understand codebase structure, locate implementations
|
||||
|
||||
**MCP Actions**: `init`, `search`, `search_files` (Advanced ops via CLI: `codexlens --help`)
|
||||
|
||||
**Initialize**:
|
||||
```
|
||||
codex_lens(action="init", path=".")
|
||||
```
|
||||
- Auto-generates embeddings if `fastembed` installed
|
||||
- Skip with `--no-embeddings` flag
|
||||
|
||||
**Search** (Auto hybrid mode):
|
||||
```
|
||||
codex_lens(action="search", query="authentication")
|
||||
```
|
||||
**Search Files**:
|
||||
```
|
||||
codex_lens(action="search_files", query="payment")
|
||||
**How**:
|
||||
```javascript
|
||||
smart_search(query="authentication logic") // Auto mode (recommended)
|
||||
smart_search(action="init", path=".") // First-time setup
|
||||
smart_search(query="LoginUser", mode="exact") // Precise matching
|
||||
smart_search(query="import", mode="ripgrep") // Fast, no index
|
||||
```
|
||||
|
||||
### read_file (MCP)
|
||||
- Read files found by codex_lens
|
||||
- Directory traversal with patterns
|
||||
- Batch operations
|
||||
**Modes**: `auto` (intelligent routing), `hybrid` (best quality), `exact` (FTS), `ripgrep` (fast)
|
||||
|
||||
### smart_search
|
||||
- Fallback when codex_lens unavailable
|
||||
- Small projects (<100 files)
|
||||
---
|
||||
|
||||
### Exa
|
||||
- External APIs, libraries, frameworks
|
||||
- Recent documentation beyond knowledge cutoff
|
||||
- Public implementation examples
|
||||
### read_file - Read File Contents
|
||||
|
||||
**When**: Read files found by smart_search
|
||||
|
||||
**How**:
|
||||
```javascript
|
||||
read_file(path="/path/to/file.ts") // Single file
|
||||
read_file(path="/src/**/*.config.ts") // Pattern matching
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### edit_file - Modify Files
|
||||
|
||||
**When**: Built-in Edit tool fails or need advanced features
|
||||
|
||||
**How**:
|
||||
```javascript
|
||||
edit_file(path="/file.ts", old_string="...", new_string="...", mode="update")
|
||||
edit_file(path="/file.ts", line=10, content="...", mode="insert_after")
|
||||
```
|
||||
|
||||
**Modes**: `update` (replace text), `insert_after`, `insert_before`, `delete_line`
|
||||
|
||||
---
|
||||
|
||||
### write_file - Create/Overwrite Files
|
||||
|
||||
**When**: Create new files or completely replace content
|
||||
|
||||
**How**:
|
||||
```javascript
|
||||
write_file(path="/new-file.ts", content="...")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Exa - External Search
|
||||
|
||||
**When**: Find documentation/examples outside codebase
|
||||
|
||||
**How**:
|
||||
```javascript
|
||||
exa(query="React hooks 2025 documentation")
|
||||
exa(query="FastAPI auth example github")
|
||||
```
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
1. Known single file → Built-in Read
|
||||
2. Multiple files OR pattern matching → read_file (MCP)
|
||||
3. Unknown location → smart_search then Read
|
||||
4. Large codebase + repeated access → codex_lens
|
||||
4. Large codebase + repeated access → smart_search (indexed)
|
||||
|
||||
**File Editing**:
|
||||
1. Always try built-in Edit first
|
||||
@@ -36,7 +36,7 @@
|
||||
1. External knowledge → Exa
|
||||
2. Exact pattern in small codebase → Built-in Grep
|
||||
3. Semantic/unknown location → smart_search
|
||||
4. Large codebase + repeated searches → codex_lens
|
||||
4. Large codebase + repeated searches → smart_search (indexed)
|
||||
|
||||
## Decision Triggers
|
||||
|
||||
@@ -46,48 +46,3 @@
|
||||
**Use indexed search** for large, stable codebases
|
||||
**Use Exa** for external/public knowledge
|
||||
|
||||
## ⚡ Core Search Tools
|
||||
|
||||
**rg (ripgrep)**: Fast content search with regex support
|
||||
**find**: File/directory location by name patterns
|
||||
**grep**: Built-in pattern matching (fallback when rg unavailable)
|
||||
**get_modules_by_depth**: Program architecture analysis (MANDATORY before planning)
|
||||
|
||||
|
||||
## 🔧 Quick Command Reference
|
||||
|
||||
```bash
|
||||
# Semantic File Discovery (codebase-retrieval via CCW)
|
||||
ccw cli exec "
|
||||
PURPOSE: Discover files relevant to task/feature
|
||||
TASK: • List all files related to [task/feature description]
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: Relevant file paths with relevance explanation
|
||||
RULES: Focus on direct relevance to task requirements | analysis=READ-ONLY
|
||||
" --tool gemini --cd [directory]
|
||||
|
||||
# Program Architecture (MANDATORY before planning)
|
||||
ccw tool exec get_modules_by_depth '{}'
|
||||
|
||||
# Content Search (rg preferred)
|
||||
rg "pattern" --type js -n # Search JS files with line numbers
|
||||
rg -i "case-insensitive" # Ignore case
|
||||
rg -C 3 "context" # Show 3 lines before/after
|
||||
|
||||
# File Search
|
||||
find . -name "*.ts" -type f # Find TypeScript files
|
||||
find . -path "*/node_modules" -prune -o -name "*.js" -print
|
||||
|
||||
# Workflow Examples
|
||||
rg "IMPL-\d+" .workflow/ --type json # Find task IDs
|
||||
find .workflow/ -name "*.json" -path "*/.task/*" # Locate task files
|
||||
rg "status.*pending" .workflow/.task/ # Find pending tasks
|
||||
```
|
||||
|
||||
## ⚡ Performance Tips
|
||||
|
||||
- **rg > grep** for content search
|
||||
- **Use --type filters** to limit file types
|
||||
- **Exclude dirs**: `--glob '!node_modules'`
|
||||
- **Use -F** for literal strings (no regex)
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import chalk from 'chalk';
|
||||
import http from 'http';
|
||||
import { executeTool } from '../tools/index.js';
|
||||
import { resolveFilePath, PathResolutionError, type ResolverContext } from './session-path-resolver.js';
|
||||
|
||||
// Handle EPIPE errors gracefully (occurs when piping to head/jq that closes early)
|
||||
process.stdout.on('error', (err: NodeJS.ErrnoException) => {
|
||||
@@ -22,6 +23,8 @@ interface ListOptions {
|
||||
|
||||
interface InitOptions {
|
||||
type?: string;
|
||||
content?: string; // JSON string for custom metadata
|
||||
location?: string; // Session location: active | lite-plan | lite-fix
|
||||
}
|
||||
|
||||
interface ReadOptions {
|
||||
@@ -146,14 +149,64 @@ async function listAction(options: ListOptions): Promise<void> {
|
||||
async function initAction(sessionId: string | undefined, options: InitOptions): Promise<void> {
|
||||
if (!sessionId) {
|
||||
console.error(chalk.red('Session ID is required'));
|
||||
console.error(chalk.gray('Usage: ccw session init <session_id> [--type <type>]'));
|
||||
console.error(chalk.gray('Usage: ccw session init <session_id> [--location <location>] [--type <type>] [--content <json>]'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const params = {
|
||||
// Auto-infer location from type if not explicitly provided
|
||||
// When type is 'lite-plan' or 'lite-fix', default location should match the type
|
||||
const sessionLocation = options.location ||
|
||||
(options.type === 'lite-plan' ? 'lite-plan' :
|
||||
options.type === 'lite-fix' ? 'lite-fix' :
|
||||
'active');
|
||||
|
||||
// Infer type from location if not explicitly provided
|
||||
const sessionType = options.type || (sessionLocation === 'active' ? 'workflow' : sessionLocation);
|
||||
|
||||
// Parse custom metadata from --content if provided
|
||||
let customMetadata: any = {};
|
||||
if (options.content) {
|
||||
try {
|
||||
customMetadata = JSON.parse(options.content);
|
||||
} catch (e) {
|
||||
const error = e as Error;
|
||||
console.error(chalk.red('Invalid JSON in --content parameter'));
|
||||
console.error(chalk.gray(`Parse error: ${error.message}`));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Filter custom metadata: only allow safe fields, block system-critical fields
|
||||
const blockedFields = ['session_id', 'type', 'status', 'created_at', 'updated_at', 'archived_at'];
|
||||
const filteredCustomMetadata: any = {};
|
||||
for (const key in customMetadata) {
|
||||
if (!blockedFields.includes(key)) {
|
||||
filteredCustomMetadata[key] = customMetadata[key];
|
||||
} else {
|
||||
console.warn(chalk.yellow(`⚠ WARNING: Field '${key}' in --content is reserved and will be ignored`));
|
||||
}
|
||||
}
|
||||
|
||||
// Merge metadata: defaults < custom (filtered) < required fields
|
||||
const metadata: any = Object.assign(
|
||||
{
|
||||
session_id: sessionId,
|
||||
type: sessionType,
|
||||
status: 'planning',
|
||||
created_at: new Date().toISOString()
|
||||
},
|
||||
filteredCustomMetadata, // User custom fields (filtered)
|
||||
{
|
||||
session_id: sessionId, // Force override - always use CLI param
|
||||
type: sessionType // Force override - always use --type or default
|
||||
}
|
||||
);
|
||||
|
||||
const params: any = {
|
||||
operation: 'init',
|
||||
session_id: sessionId,
|
||||
session_type: options.type || 'workflow'
|
||||
metadata: metadata,
|
||||
location: sessionLocation // Always pass location to session_manager
|
||||
};
|
||||
|
||||
const result = await executeTool('session_manager', params);
|
||||
@@ -170,16 +223,146 @@ async function initAction(sessionId: string | undefined, options: InitOptions):
|
||||
payload: result.result
|
||||
});
|
||||
|
||||
// Lite sessions (lite-plan, lite-fix) use session-metadata.json, others use workflow-session.json
|
||||
const metadataFile = sessionLocation.startsWith('lite-') ? 'session-metadata.json' : 'workflow-session.json';
|
||||
|
||||
console.log(chalk.green(`✓ Session "${sessionId}" initialized`));
|
||||
console.log(chalk.gray(` Location: ${(result.result as any).path}`));
|
||||
console.log(chalk.gray(` Metadata: ${metadataFile} created`));
|
||||
}
|
||||
|
||||
/**
|
||||
* Read session content
|
||||
* Get session information (location and path)
|
||||
* Helper function for path resolution
|
||||
*/
|
||||
async function getSessionInfo(sessionId: string): Promise<{ path: string; location: 'active' | 'archived' | 'lite-plan' | 'lite-fix' }> {
|
||||
// Use session_manager to find the session
|
||||
const findParams = {
|
||||
operation: 'list',
|
||||
location: 'all',
|
||||
include_metadata: false
|
||||
};
|
||||
|
||||
const result = await executeTool('session_manager', findParams);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(`Failed to list sessions: ${result.error}`);
|
||||
}
|
||||
|
||||
const resultData = result.result as any;
|
||||
const allSessions = [
|
||||
...(resultData.active || []).map((s: any) => ({ ...s, location: 'active' as const })),
|
||||
...(resultData.archived || []).map((s: any) => ({ ...s, location: 'archived' as const })),
|
||||
...(resultData.litePlan || []).map((s: any) => ({ ...s, location: 'lite-plan' as const })),
|
||||
...(resultData.liteFix || []).map((s: any) => ({ ...s, location: 'lite-fix' as const })),
|
||||
];
|
||||
|
||||
const session = allSessions.find((s: any) => s.session_id === sessionId || s.id === sessionId);
|
||||
|
||||
if (!session) {
|
||||
throw new Error(`Session "${sessionId}" not found in active, archived, lite-plan, or lite-fix locations`);
|
||||
}
|
||||
|
||||
// Return actual session path from the session object
|
||||
return {
|
||||
path: session.path || '',
|
||||
location: session.location
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Read session content (NEW - with path resolution)
|
||||
* @param {string} sessionId - Session ID
|
||||
* @param {string} filename - Filename or relative path
|
||||
* @param {Object} options - CLI options
|
||||
*/
|
||||
async function readAction(
|
||||
sessionId: string | undefined,
|
||||
filename: string | undefined,
|
||||
options: ReadOptions
|
||||
): Promise<void> {
|
||||
if (!sessionId) {
|
||||
console.error(chalk.red('Session ID is required'));
|
||||
console.error(chalk.gray('Usage: ccw session <session-id> read <filename|path>'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Backward compatibility: if --type is provided, use legacy implementation
|
||||
if (options.type) {
|
||||
console.warn(chalk.yellow('⚠ WARNING: --type parameter is deprecated'));
|
||||
console.warn(chalk.gray(' Old: ccw session read WFS-001 --type task --task-id IMPL-001'));
|
||||
console.warn(chalk.gray(' New: ccw session WFS-001 read IMPL-001.json'));
|
||||
console.log();
|
||||
return readActionLegacy(sessionId, options);
|
||||
}
|
||||
|
||||
if (!filename) {
|
||||
console.error(chalk.red('Filename is required'));
|
||||
console.error(chalk.gray('Usage: ccw session <session-id> read <filename|path>'));
|
||||
console.error(chalk.gray(''));
|
||||
console.error(chalk.gray('Examples:'));
|
||||
console.error(chalk.gray(' ccw session WFS-001 read IMPL-001.json'));
|
||||
console.error(chalk.gray(' ccw session WFS-001 read IMPL_PLAN.md'));
|
||||
console.error(chalk.gray(' ccw session WFS-001 read .task/IMPL-001.json'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
// Get session context
|
||||
const session = await getSessionInfo(sessionId);
|
||||
const context: ResolverContext = {
|
||||
sessionPath: session.path,
|
||||
sessionLocation: session.location
|
||||
};
|
||||
|
||||
// Resolve filename to content_type
|
||||
const resolved = resolveFilePath(filename, context);
|
||||
|
||||
// Call session_manager tool
|
||||
const params: any = {
|
||||
operation: 'read',
|
||||
session_id: sessionId,
|
||||
content_type: resolved.contentType,
|
||||
};
|
||||
|
||||
if (resolved.pathParams) {
|
||||
params.path_params = resolved.pathParams;
|
||||
}
|
||||
|
||||
const result = await executeTool('session_manager', params);
|
||||
|
||||
if (!result.success) {
|
||||
console.error(chalk.red(`Error: ${result.error}`));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Output raw content for piping
|
||||
if (options.raw) {
|
||||
console.log(typeof (result.result as any).content === 'string'
|
||||
? (result.result as any).content
|
||||
: JSON.stringify((result.result as any).content, null, 2));
|
||||
} else {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error instanceof PathResolutionError) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
if (error.suggestions.length > 0) {
|
||||
console.log(chalk.yellow('\nSuggestions:'));
|
||||
error.suggestions.forEach(s => console.log(chalk.gray(` ${s}`)));
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read session content (LEGACY - with --type parameter)
|
||||
* @param {string} sessionId - Session ID
|
||||
* @param {Object} options - CLI options
|
||||
*/
|
||||
async function readAction(sessionId: string | undefined, options: ReadOptions): Promise<void> {
|
||||
async function readActionLegacy(sessionId: string | undefined, options: ReadOptions): Promise<void> {
|
||||
if (!sessionId) {
|
||||
console.error(chalk.red('Session ID is required'));
|
||||
console.error(chalk.gray('Usage: ccw session read <session_id> --type <content_type>'));
|
||||
@@ -193,10 +376,10 @@ async function readAction(sessionId: string | undefined, options: ReadOptions):
|
||||
};
|
||||
|
||||
// Add path_params if provided
|
||||
if (options.taskId) params.path_params = { ...params.path_params, task_id: options.taskId };
|
||||
if (options.filename) params.path_params = { ...params.path_params, filename: options.filename };
|
||||
if (options.dimension) params.path_params = { ...params.path_params, dimension: options.dimension };
|
||||
if (options.iteration) params.path_params = { ...params.path_params, iteration: options.iteration };
|
||||
if (options.taskId) params.path_params = { ...(params.path_params || {}), task_id: options.taskId };
|
||||
if (options.filename) params.path_params = { ...(params.path_params || {}), filename: options.filename };
|
||||
if (options.dimension) params.path_params = { ...(params.path_params || {}), dimension: options.dimension };
|
||||
if (options.iteration) params.path_params = { ...(params.path_params || {}), iteration: options.iteration };
|
||||
|
||||
const result = await executeTool('session_manager', params);
|
||||
|
||||
@@ -216,11 +399,144 @@ async function readAction(sessionId: string | undefined, options: ReadOptions):
|
||||
}
|
||||
|
||||
/**
|
||||
* Write session content
|
||||
* Write session content (NEW - with path resolution)
|
||||
* @param {string} sessionId - Session ID
|
||||
* @param {string} filename - Filename or relative path
|
||||
* @param {string} contentString - Content to write
|
||||
* @param {Object} options - CLI options
|
||||
*/
|
||||
async function writeAction(
|
||||
sessionId: string | undefined,
|
||||
filename: string | undefined,
|
||||
contentString: string | undefined,
|
||||
options: WriteOptions
|
||||
): Promise<void> {
|
||||
if (!sessionId) {
|
||||
console.error(chalk.red('Session ID is required'));
|
||||
console.error(chalk.gray('Usage: ccw session <session-id> write <filename|path> <content>'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Backward compatibility: if --type is provided, use legacy implementation
|
||||
if (options.type) {
|
||||
console.warn(chalk.yellow('⚠ WARNING: --type parameter is deprecated'));
|
||||
console.warn(chalk.gray(' Old: ccw session write WFS-001 --type plan --content "# Plan"'));
|
||||
console.warn(chalk.gray(' New: ccw session WFS-001 write IMPL_PLAN.md "# Plan"'));
|
||||
console.log();
|
||||
return writeActionLegacy(sessionId, options);
|
||||
}
|
||||
|
||||
if (!filename || !contentString) {
|
||||
console.error(chalk.red('Filename and content are required'));
|
||||
console.error(chalk.gray('Usage: ccw session <session-id> write <filename|path> <content>'));
|
||||
console.error(chalk.gray(''));
|
||||
console.error(chalk.gray('Examples:'));
|
||||
console.error(chalk.gray(' ccw session WFS-001 write IMPL_PLAN.md "# Implementation Plan"'));
|
||||
console.error(chalk.gray(' ccw session WFS-001 write IMPL-001.json \'{"id":"IMPL-001","status":"pending"}\''));
|
||||
console.error(chalk.gray(' ccw session WFS-001 write .task/IMPL-001.json \'{"status":"completed"}\''));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
// Get session context
|
||||
const session = await getSessionInfo(sessionId);
|
||||
const context: ResolverContext = {
|
||||
sessionPath: session.path,
|
||||
sessionLocation: session.location
|
||||
};
|
||||
|
||||
// Resolve filename to content_type
|
||||
const resolved = resolveFilePath(filename, context);
|
||||
|
||||
// Parse content (try JSON first, fallback to string)
|
||||
let content: any;
|
||||
try {
|
||||
content = JSON.parse(contentString);
|
||||
} catch {
|
||||
content = contentString;
|
||||
}
|
||||
|
||||
// Call session_manager tool
|
||||
const params: any = {
|
||||
operation: 'write',
|
||||
session_id: sessionId,
|
||||
content_type: resolved.contentType,
|
||||
content,
|
||||
};
|
||||
|
||||
if (resolved.pathParams) {
|
||||
params.path_params = resolved.pathParams;
|
||||
}
|
||||
|
||||
const result = await executeTool('session_manager', params);
|
||||
|
||||
if (!result.success) {
|
||||
console.error(chalk.red(`Error: ${result.error}`));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Emit granular event based on content_type
|
||||
const contentType = resolved.contentType;
|
||||
let eventType = 'CONTENT_WRITTEN';
|
||||
let entityId = null;
|
||||
|
||||
switch (contentType) {
|
||||
case 'task':
|
||||
eventType = 'TASK_CREATED';
|
||||
entityId = resolved.pathParams?.task_id || content.task_id;
|
||||
break;
|
||||
case 'summary':
|
||||
eventType = 'SUMMARY_WRITTEN';
|
||||
entityId = resolved.pathParams?.task_id;
|
||||
break;
|
||||
case 'plan':
|
||||
eventType = 'PLAN_UPDATED';
|
||||
break;
|
||||
case 'review-dim':
|
||||
eventType = 'REVIEW_UPDATED';
|
||||
entityId = resolved.pathParams?.dimension;
|
||||
break;
|
||||
case 'review-iter':
|
||||
eventType = 'REVIEW_UPDATED';
|
||||
entityId = resolved.pathParams?.iteration;
|
||||
break;
|
||||
case 'review-fix':
|
||||
eventType = 'REVIEW_UPDATED';
|
||||
entityId = resolved.pathParams?.filename;
|
||||
break;
|
||||
case 'session':
|
||||
eventType = 'SESSION_UPDATED';
|
||||
break;
|
||||
}
|
||||
|
||||
notifyDashboard({
|
||||
type: eventType,
|
||||
sessionId: sessionId,
|
||||
entityId: entityId,
|
||||
contentType: contentType,
|
||||
payload: (result.result as any).written_content || content
|
||||
});
|
||||
|
||||
console.log(chalk.green(`✓ Content written to ${resolved.resolvedPath}`));
|
||||
} catch (error: any) {
|
||||
if (error instanceof PathResolutionError) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
if (error.suggestions.length > 0) {
|
||||
console.log(chalk.yellow('\nSuggestions:'));
|
||||
error.suggestions.forEach(s => console.log(chalk.gray(` ${s}`)));
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write session content (LEGACY - with --type parameter)
|
||||
* @param {string} sessionId - Session ID
|
||||
* @param {Object} options - CLI options
|
||||
*/
|
||||
async function writeAction(sessionId: string | undefined, options: WriteOptions): Promise<void> {
|
||||
async function writeActionLegacy(sessionId: string | undefined, options: WriteOptions): Promise<void> {
|
||||
if (!sessionId) {
|
||||
console.error(chalk.red('Session ID is required'));
|
||||
console.error(chalk.gray('Usage: ccw session write <session_id> --type <content_type> --content <json>'));
|
||||
@@ -248,8 +564,8 @@ async function writeAction(sessionId: string | undefined, options: WriteOptions)
|
||||
};
|
||||
|
||||
// Add path_params if provided
|
||||
if (options.taskId) params.path_params = { ...params.path_params, task_id: options.taskId };
|
||||
if (options.filename) params.path_params = { ...params.path_params, filename: options.filename };
|
||||
if (options.taskId) params.path_params = { ...(params.path_params || {}), task_id: options.taskId };
|
||||
if (options.filename) params.path_params = { ...(params.path_params || {}), filename: options.filename };
|
||||
|
||||
const result = await executeTool('session_manager', params);
|
||||
|
||||
@@ -712,7 +1028,38 @@ export async function sessionCommand(
|
||||
args: string | string[],
|
||||
options: any
|
||||
): Promise<void> {
|
||||
const argsArray = Array.isArray(args) ? args : (args ? [args] : []);
|
||||
let argsArray = Array.isArray(args) ? args : (args ? [args] : []);
|
||||
|
||||
// Detect new format: ccw session WFS-xxx <operation> <args>
|
||||
// If subcommand looks like a session ID, rearrange parameters
|
||||
// Exception: 'init' should always use traditional format (ccw session init WFS-xxx)
|
||||
const isSessionId = subcommand && (
|
||||
subcommand.startsWith('WFS-') ||
|
||||
subcommand === 'manifest' ||
|
||||
subcommand === 'project' ||
|
||||
/^[A-Z][A-Z0-9]*-[A-Z0-9]+/.test(subcommand) // Generic session ID pattern (uppercase prefix + dash + alphanumeric)
|
||||
);
|
||||
|
||||
if (isSessionId && argsArray.length > 0) {
|
||||
const operation = argsArray[0];
|
||||
|
||||
// Reject new format for init operation (semantic error)
|
||||
if (operation === 'init') {
|
||||
console.error(chalk.red('Error: Invalid format for init operation'));
|
||||
console.error(chalk.gray('Correct: ccw session init <session-id>'));
|
||||
console.error(chalk.gray(`Wrong: ccw session <session-id> init`));
|
||||
console.error(chalk.yellow('\nReason: Session must be initialized before it can be referenced'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// New format detected: session-id comes first
|
||||
const sessionId = subcommand;
|
||||
const operationArgs = argsArray.slice(1);
|
||||
|
||||
// Rearrange: operation becomes subcommand, session-id goes into args
|
||||
subcommand = operation;
|
||||
argsArray = [sessionId, ...operationArgs];
|
||||
}
|
||||
|
||||
switch (subcommand) {
|
||||
case 'list':
|
||||
@@ -722,10 +1069,12 @@ export async function sessionCommand(
|
||||
await initAction(argsArray[0], options);
|
||||
break;
|
||||
case 'read':
|
||||
await readAction(argsArray[0], options);
|
||||
// args[0] = session-id, args[1] = filename (optional for backward compat)
|
||||
await readAction(argsArray[0], argsArray[1], options);
|
||||
break;
|
||||
case 'write':
|
||||
await writeAction(argsArray[0], options);
|
||||
// args[0] = session-id, args[1] = filename, args[2] = content
|
||||
await writeAction(argsArray[0], argsArray[1], argsArray[2], options);
|
||||
break;
|
||||
case 'update':
|
||||
await updateAction(argsArray[0], options);
|
||||
@@ -754,18 +1103,26 @@ export async function sessionCommand(
|
||||
default:
|
||||
console.log(chalk.bold.cyan('\nCCW Session Management\n'));
|
||||
console.log('Subcommands:');
|
||||
console.log(chalk.gray(' list List all sessions'));
|
||||
console.log(chalk.gray(' init <session_id> Initialize new session'));
|
||||
console.log(chalk.gray(' status <session_id> <status> Update session status'));
|
||||
console.log(chalk.gray(' task <session_id> <task_id> <status> Update task status'));
|
||||
console.log(chalk.gray(' stats <session_id> Get session statistics'));
|
||||
console.log(chalk.gray(' delete <session_id> <file_path> Delete file within session'));
|
||||
console.log(chalk.gray(' read <session_id> Read session content'));
|
||||
console.log(chalk.gray(' write <session_id> Write session content'));
|
||||
console.log(chalk.gray(' update <session_id> Update session (merge)'));
|
||||
console.log(chalk.gray(' archive <session_id> Archive session'));
|
||||
console.log(chalk.gray(' mkdir <session_id> Create subdirectory'));
|
||||
console.log(chalk.gray(' exec <json> Execute raw operation'));
|
||||
console.log(chalk.gray(' list List all sessions'));
|
||||
console.log(chalk.gray(' <session-id> init [metadata] Initialize new session'));
|
||||
console.log(chalk.gray(' <session-id> read <filename|path> Read session content'));
|
||||
console.log(chalk.gray(' <session-id> write <filename> <content> Write session content'));
|
||||
console.log(chalk.gray(' <session-id> stats Get session statistics'));
|
||||
console.log(chalk.gray(' <session-id> archive Archive session'));
|
||||
console.log(chalk.gray(' <session-id> status <status> Update session status'));
|
||||
console.log(chalk.gray(' <session-id> task <task-id> <status> Update task status'));
|
||||
console.log(chalk.gray(' <session-id> delete <file-path> Delete file within session'));
|
||||
console.log(chalk.gray(' <session-id> update Update session (merge)'));
|
||||
console.log(chalk.gray(' <session-id> mkdir Create subdirectory'));
|
||||
console.log(chalk.gray(' exec <json> Execute raw operation'));
|
||||
console.log();
|
||||
console.log('Filename/Path Examples:');
|
||||
console.log(chalk.gray(' IMPL-001.json Task file (auto: .task/)'));
|
||||
console.log(chalk.gray(' .task/IMPL-001.json Task file (explicit path)'));
|
||||
console.log(chalk.gray(' IMPL_PLAN.md Implementation plan'));
|
||||
console.log(chalk.gray(' TODO_LIST.md TODO list'));
|
||||
console.log(chalk.gray(' workflow-session.json Session metadata'));
|
||||
console.log(chalk.gray(' .review/dimensions/security.json Review dimension'));
|
||||
console.log();
|
||||
console.log('Status Values:');
|
||||
console.log(chalk.gray(' Session: planning, active, implementing, reviewing, completed, paused'));
|
||||
@@ -773,11 +1130,12 @@ export async function sessionCommand(
|
||||
console.log();
|
||||
console.log('Examples:');
|
||||
console.log(chalk.gray(' ccw session list'));
|
||||
console.log(chalk.gray(' ccw session init WFS-my-feature'));
|
||||
console.log(chalk.gray(' ccw session status WFS-my-feature active'));
|
||||
console.log(chalk.gray(' ccw session task WFS-my-feature IMPL-001 completed'));
|
||||
console.log(chalk.gray(' ccw session stats WFS-my-feature'));
|
||||
console.log(chalk.gray(' ccw session delete WFS-my-feature .archiving'));
|
||||
console.log(chalk.gray(' ccw session archive WFS-my-feature'));
|
||||
console.log(chalk.gray(' ccw session WFS-001 init'));
|
||||
console.log(chalk.gray(' ccw session WFS-001 read IMPL_PLAN.md'));
|
||||
console.log(chalk.gray(' ccw session WFS-001 read IMPL-001.json'));
|
||||
console.log(chalk.gray(' ccw session WFS-001 write IMPL_PLAN.md "# Plan"'));
|
||||
console.log(chalk.gray(' ccw session WFS-001 write IMPL-001.json \'{"status":"pending"}\''));
|
||||
console.log(chalk.gray(' ccw session WFS-001 stats'));
|
||||
console.log(chalk.gray(' ccw session WFS-001 archive'));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,10 +354,14 @@ function executeInit(params: Params): any {
|
||||
// Validate session_id format
|
||||
validateSessionId(session_id);
|
||||
|
||||
// Determine session location (default: active for WFS, or specified for lite)
|
||||
const sessionLocation = (location === 'lite-plan' || location === 'lite-fix')
|
||||
? location
|
||||
: 'active';
|
||||
// Auto-infer location from metadata.type if location not explicitly provided
|
||||
// Priority: explicit location > metadata.type > default 'active'
|
||||
const sessionLocation: 'active' | 'archived' | 'lite-plan' | 'lite-fix' =
|
||||
(location === 'active' || location === 'archived' || location === 'lite-plan' || location === 'lite-fix')
|
||||
? location
|
||||
: (metadata?.type === 'lite-plan' ? 'lite-plan' :
|
||||
metadata?.type === 'lite-fix' ? 'lite-fix' :
|
||||
'active');
|
||||
|
||||
// Check if session already exists (auto-detect all locations)
|
||||
const existing = findSession(session_id);
|
||||
@@ -392,7 +396,7 @@ function executeInit(params: Params): any {
|
||||
|
||||
const sessionData = {
|
||||
session_id,
|
||||
type: sessionLocation,
|
||||
type: metadata?.type || sessionLocation, // Preserve user-specified type if provided
|
||||
status: 'initialized',
|
||||
created_at: new Date().toISOString(),
|
||||
...metadata,
|
||||
|
||||
Reference in New Issue
Block a user