feat: 添加执行ID和子命令功能,增强CLI工具的输出选项和文档生成

This commit is contained in:
catlog22
2026-03-07 17:57:30 +08:00
parent ece4afcac8
commit eaaadcd164
6 changed files with 514 additions and 37 deletions

View File

@@ -321,6 +321,12 @@ ccw cli -p "..." --tool gemini --mode analysis --rule analysis-review-architectu
- Description: Additional directories (comma-separated, quote if paths contain spaces) - Description: Additional directories (comma-separated, quote if paths contain spaces)
- Default: none - Default: none
- **`--id <id>`**
- Description: Execution ID (recommended, auto-generated if omitted)
- Default: Auto-generated in format `{prefix}-{HHmmss}-{rand4}` (e.g., `gem-143022-x7k2`)
- Prefix mapping: gemini→gem, qwen→qwn, codex→cdx, claude→cld, opencode→opc
- Note: ID is always output to stderr as `[CCW_EXEC_ID=<id>]` for programmatic capture
- **`--resume [id]`** - **`--resume [id]`**
- Description: Resume previous session - Description: Resume previous session
- Default: - - Default: -
@@ -385,6 +391,65 @@ ASSISTANT RESPONSE: [Previous output]
[Your new prompt] [Your new prompt]
``` ```
### Subcommands
#### `show` — List All Executions
```bash
ccw cli show # Active + recent completed executions
ccw cli show --all # Include full history
```
Displays a unified table of running and recent executions with: ID, Tool, Mode, Status, Duration, Prompt Preview.
#### `watch <id>` — Stream Execution Output
```bash
ccw cli watch <id> # Stream until completion (output to stderr)
ccw cli watch <id> --timeout 120 # Auto-exit after 120 seconds
```
Behavior:
- Output written to **stderr** (does not pollute stdout)
- Exit code: 0 = success, 1 = error, 2 = timeout
- Callers can `ccw cli watch <id> 2>/dev/null` to silently wait
#### `output <id>` — Get Execution Output
```bash
ccw cli output <id> # Final result only (default)
ccw cli output <id> --verbose # Full metadata + raw output
ccw cli output <id> --raw # Raw stdout (for piping)
```
Default returns `finalOutput > parsedOutput > stdout` — agent's final response text only.
`--verbose` shows full metadata (ID, turn, status, project) plus raw stdout/stderr.
#### ID Workflow Example
```bash
# Execute with auto-generated ID
ccw cli -p "analyze code" --tool gemini --mode analysis
# stderr outputs: [CCW_EXEC_ID=gem-143022-x7k2]
# Execute with custom ID
ccw cli -p "implement feature" --tool gemini --mode write --id my-task-1
# stderr outputs: [CCW_EXEC_ID=my-task-1]
# Check status
ccw cli show
# Watch running execution
ccw cli watch gem-143022-x7k2
# Get final result
ccw cli output gem-143022-x7k2
# Capture ID programmatically
EXEC_ID=$(ccw cli -p "test" --tool gemini --mode analysis 2>&1 | grep -oP 'CCW_EXEC_ID=\K[^\]]+')
ccw cli output $EXEC_ID
```
### Command Examples ### Command Examples
#### Task-Type Specific Templates #### Task-Type Specific Templates

View File

@@ -153,6 +153,27 @@ If DeepWiki is available (`deepwiki_feature_to_symbol_index` exists in doc-index
**Graceful degradation**: If DeepWiki unavailable → log warning → skip symbol injection → continue flow. **Graceful degradation**: If DeepWiki unavailable → log warning → skip symbol injection → continue flow.
### Phase 1.8: Persist Doc Context Package
After building doc_context (including symbol_docs from Phase 1.7), persist it as a reusable context package:
1. Bundle doc_context into JSON structure:
```json
{
"affected_features": ["feat-auth"],
"affected_requirements": ["REQ-001", "REQ-002"],
"affected_components": ["tech-auth-service"],
"architecture_constraints": ["ADR-001"],
"index_path": ".workflow/.doc-index/doc-index.json",
"symbol_docs": [...]
}
```
2. Write to session folder: `{sessionFolder}/.process/doc-context-package.json`
3. Store relative path for task.json population: `../.process/doc-context-package.json`
**Error handling**: If write fails → log warning → continue without context package (backward compatible).
--- ---
## Phase 2: Doc-Index-Guided Exploration (NEW) ## Phase 2: Doc-Index-Guided Exploration (NEW)
@@ -318,6 +339,93 @@ Agent(subagent_type="cli-lite-planning-agent", prompt="
") ")
``` ```
### 4.3.1 Populate Task Artifacts (TASK-002)
After task generation, enrich each TASK-*.json with artifacts[] field:
1. Load doc-index.json from `.workflow/.doc-index/doc-index.json`
2. For each task, extract feature_ids from task.doc_context
3. Filter doc-index features/requirements matching task scope:
- Match by feature_ids in task.doc_context.feature_ids
- Include linked requirements via requirementIds
- Include linked components via componentIds
4. Populate task.artifacts[] with filtered references:
```json
{
"artifacts": [
{
"type": "feature_spec",
"source": "doc-index",
"path": ".workflow/.doc-index/feature-maps/auth.md",
"feature_id": "feat-auth",
"usage": "Reference for authentication requirements"
},
{
"type": "requirement",
"source": "doc-index",
"path": ".workflow/.doc-index/doc-index.json#requirements[0]",
"feature_id": "feat-auth",
"requirement_id": "REQ-001",
"usage": "Acceptance criteria source"
},
{
"type": "component_doc",
"source": "doc-index",
"path": ".workflow/.doc-index/tech-registry/auth-service.md",
"component_id": "tech-auth-service",
"usage": "Implementation reference"
}
]
}
```
**Loading pattern** (following brainstorm pattern from action-planning-agent.md:200-214):
- Load doc-index.json once for catalog
- Filter by task-relevant feature IDs (1-3 per task)
- Only include artifacts directly referenced in task scope
- Use relative paths from task file location
### 4.3.2 Populate Context Package Path (TASK-001)
Set context_package_path field in each TASK-*.json:
```json
{
"context_package_path": "../.process/doc-context-package.json"
}
```
Relative path from `.task/TASK-*.json` to `.process/doc-context-package.json`.
### 4.3.3 Add Navigation Links Block (TASK-003)
Add links{} navigation block to each TASK-*.json for improved discoverability:
```json
{
"links": {
"plan": "../plan.json",
"doc_index": "../../.doc-index/doc-index.json",
"feature_maps": [
"../../.doc-index/feature-maps/auth.md"
],
"related_tasks": [
"TASK-002.json",
"TASK-003.json"
]
}
}
```
**Path computation**:
- `plan`: Relative path from `.task/TASK-*.json` to `plan.json` (sibling of .task/)
- `doc_index`: Relative path to `.workflow/.doc-index/doc-index.json`
- `feature_maps`: Paths to feature-map docs from task.doc_context.feature_docs
- `related_tasks`: Task IDs from task.depends_on or tasks sharing same feature_ids
**Backward compatibility**: links{} is optional field (task-schema allows additionalProperties).
### 4.4 Output Schema: plan.json ### 4.4 Output Schema: plan.json
Follows `plan-overview-base-schema` with ddd-specific `doc_context` extension: Follows `plan-overview-base-schema` with ddd-specific `doc_context` extension:

View File

@@ -277,6 +277,234 @@ Write the index with code-first markers:
} }
``` ```
## Phase 7: Layer-Based Document Generation (TASK-004)
**Generation Strategy**: Layer 3 → Layer 2 → Layer 1 (bottom-up, following memory-manage pattern)
### 7.1 Layer Definition
| Layer | Content | Generation Order | Dependencies |
|-------|---------|------------------|--------------|
| **Layer 3** | Component docs (`tech-registry/{slug}.md`) | First | Source code only |
| **Layer 2** | Feature docs (`feature-maps/{slug}.md`) | Second | Layer 3 component docs |
| **Layer 1** | Index docs (`README.md`, `ARCHITECTURE.md`, `_index.md`) | Third | Layer 2 feature docs |
### 7.2 Layer 3: Component Documentation
For each component in `technicalComponents[]`:
```bash
ccw cli -p "PURPOSE: Generate component documentation for {component.name}
TASK:
• Document component purpose and responsibility
• List exported symbols (classes, functions, types)
• Document dependencies (internal and external)
• Include code examples for key APIs
• Document integration points with other components
MODE: write
CONTEXT: @{component.codeLocations[].path}
EXPECTED: Markdown file with: Overview, API Reference, Dependencies, Usage Examples
CONSTRAINTS: Focus on public API | Include type signatures
" --tool gemini --mode write --cd .workflow/.doc-index/tech-registry/
```
Output: `.workflow/.doc-index/tech-registry/{component-slug}.md`
Add layer metadata to generated doc:
```markdown
---
layer: 3
component_id: tech-auth-service
generated_at: ISO8601
---
```
### 7.3 Layer 2: Feature Documentation
For each feature in `features[]`:
```bash
ccw cli -p "PURPOSE: Generate feature documentation for {feature.name}
TASK:
• Describe feature purpose and business value
• List requirements (from requirementIds)
• Document components involved (from techComponentIds)
• Include architecture decisions (from adrIds)
• Provide integration guide
MODE: write
CONTEXT: @.workflow/.doc-index/tech-registry/{related-components}.md
EXPECTED: Markdown file with: Overview, Requirements, Components, Architecture, Integration
CONSTRAINTS: Reference Layer 3 component docs | Business-focused language
" --tool gemini --mode write --cd .workflow/.doc-index/feature-maps/
```
Output: `.workflow/.doc-index/feature-maps/{feature-slug}.md`
Add layer metadata:
```markdown
---
layer: 2
feature_id: feat-auth
depends_on_layer3: [tech-auth-service, tech-user-model]
generated_at: ISO8601
---
```
### 7.4 Layer 1: Index Documentation
Generate top-level overview documents:
1. **README.md** - Project overview with navigation
2. **ARCHITECTURE.md** - System architecture overview
3. **feature-maps/_index.md** - Feature catalog
4. **tech-registry/_index.md** - Component catalog
5. **sessions/_index.md** - Planning sessions index
```bash
ccw cli -p "PURPOSE: Generate project overview documentation
TASK:
• Create README.md with project summary and navigation
• Create ARCHITECTURE.md with system design overview
• Create _index.md files for feature-maps and tech-registry
• Include links to all Layer 2 feature docs
MODE: write
CONTEXT: @.workflow/.doc-index/feature-maps/*.md @.workflow/.doc-index/tech-registry/*.md @.workflow/.doc-index/doc-index.json
EXPECTED: Overview documents with navigation links
CONSTRAINTS: High-level only | Link to Layer 2 docs for details
" --tool gemini --mode write --cd .workflow/.doc-index/
```
Add layer metadata:
```markdown
---
layer: 1
depends_on_layer2: [feat-auth, feat-orders]
generated_at: ISO8601
---
```
## Phase 8: Project Overview Documentation (TASK-005)
Generate standard overview documents as entry points for navigation.
### 8.1 README.md Template
```bash
ccw cli -p "PURPOSE: Generate project README with overview and navigation
TASK:
• Project summary and purpose
• Quick start guide
• Navigation to features, components, and architecture
• Link to doc-index.json
MODE: write
CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/feature-maps/_index.md
EXPECTED: README.md with: Overview, Quick Start, Navigation, Links
CONSTRAINTS: High-level only | Entry point for new developers
" --tool gemini --mode write --cd .workflow/.doc-index/
```
Output: `.workflow/.doc-index/README.md`
### 8.2 ARCHITECTURE.md Template
```bash
ccw cli -p "PURPOSE: Generate architecture overview document
TASK:
• System design overview
• Component relationships and dependencies
• Key architecture decisions (from ADRs)
• Technology stack
MODE: write
CONTEXT: @.workflow/.doc-index/doc-index.json @.workflow/.doc-index/tech-registry/*.md
EXPECTED: ARCHITECTURE.md with: System Design, Component Diagram, ADRs, Tech Stack
CONSTRAINTS: Architecture-focused | Reference component docs for details
" --tool gemini --mode write --cd .workflow/.doc-index/
```
Output: `.workflow/.doc-index/ARCHITECTURE.md`
### 8.3 sessions/_index.md Template
```bash
ccw cli -p "PURPOSE: Generate planning sessions index
TASK:
• List all planning session folders chronologically
• Link to each session's plan.json
• Show session status and task count
MODE: write
CONTEXT: @.workflow/.doc-index/planning/*/plan.json
EXPECTED: sessions/_index.md with: Session List, Links, Status
CONSTRAINTS: Chronological order | Link to session folders
" --tool gemini --mode write --cd .workflow/.doc-index/sessions/
```
Output: `.workflow/.doc-index/sessions/_index.md`
### 8.4 Update ddd:sync for Overview Docs
Add to Phase 4 (Refresh Documents):
- If Layer 2 changed significantly → regenerate README.md and ARCHITECTURE.md
- If new planning session created → update sessions/_index.md
## Phase 9: Schema Versioning (TASK-006)
### 9.1 Add schema_version to doc-index.json
Update Phase 6 doc-index.json generation to include:
```json
{
"schema_version": "1.0",
"version": "1.0",
"project": "{project-name}",
...
}
```
### 9.2 Create SCHEMA.md
```bash
ccw cli -p "PURPOSE: Document doc-index.json schema structure and versioning
TASK:
• Document current schema structure (all fields)
• Define versioning policy (semver: major.minor)
• Document migration protocol for version upgrades
• Provide examples for each schema section
MODE: write
CONTEXT: @.workflow/.doc-index/doc-index.json
EXPECTED: SCHEMA.md with: Schema Structure, Versioning Policy, Migration Protocol, Examples
CONSTRAINTS: Complete field documentation | Clear migration steps
" --tool gemini --mode write --cd .workflow/.doc-index/
```
Output: `.workflow/.doc-index/SCHEMA.md`
### 9.3 Version Check Logic
Add to ddd:plan and ddd:sync Phase 1:
```javascript
const docIndex = JSON.parse(Read('.workflow/.doc-index/doc-index.json'));
const schemaVersion = docIndex.schema_version || '0.0'; // Default for legacy
if (schemaVersion !== '1.0') {
console.warn(`Schema version mismatch: found ${schemaVersion}, expected 1.0`);
console.warn('Consider running schema migration or regenerating doc-index');
}
```
### 9.4 Versioning Policy
**Semantic Versioning**:
- **Major** (X.0): Breaking changes (field removal, type changes, incompatible structure)
- **Minor** (X.Y): Non-breaking additions (new optional fields, new sections)
**Migration Protocol**:
1. Detect version mismatch in ddd:plan/ddd:sync
2. Log warning with migration instructions
3. Provide migration script or regeneration option
4. Update schema_version after successful migration
## Phase 6.5: Build DeepWiki Feature-to-Symbol Index ## Phase 6.5: Build DeepWiki Feature-to-Symbol Index
If DeepWiki is available (`.codexlens/deepwiki_index.db` exists): If DeepWiki is available (`.codexlens/deepwiki_index.db` exists):

View File

@@ -79,7 +79,9 @@ For each changed file, determine:
- **Category**: source | test | config | docs | other - **Category**: source | test | config | docs | other
- **Symbols affected**: parse diff for changed functions/classes (use Gemini if complex) - **Symbols affected**: parse diff for changed functions/classes (use Gemini if complex)
## Phase 2: Impact Tracing ## Phase 2: Impact Tracing (Layer-Based, TASK-004)
**Strategy**: Trace impact through layers (files → components → features → indexes) following memory-manage pattern.
### 2.1 Match to Index ### 2.1 Match to Index
@@ -87,9 +89,9 @@ For each changed file path:
``` ```
Search doc-index.json.technicalComponents[].codeLocations[].path Search doc-index.json.technicalComponents[].codeLocations[].path
→ Find matching component IDs → Find matching component IDs (Layer 3)
→ From components, find linked featureIds → From components, find linked featureIds (Layer 2)
→ From features, find linked requirementIds → From features, find linked requirementIds (Layer 2)
``` ```
### 2.2 Discover New Components ### 2.2 Discover New Components
@@ -228,23 +230,102 @@ For each affected feature:
Set `doc-index.json.last_updated` to current time. Set `doc-index.json.last_updated` to current time.
## Phase 4: Refresh Documents ## Phase 4: Refresh Documents (Layer-Based Regeneration, TASK-004)
### 4.1 Update Feature Maps **Strategy**: Regenerate affected documents following Layer 3 → Layer 2 → Layer 1 order.
For each affected feature's `feature-maps/{slug}.md`: ### 4.1 Identify Affected Layers
- Update "Change History" table with new action entry
- Update component list if new components were added
- Update status if changed
### 4.2 Update Tech Registry From Phase 2 impact tracing:
- **Layer 3 affected**: Components with modified codeLocations
- **Layer 2 affected**: Features linked to affected components
- **Layer 1 affected**: Index docs if Layer 2 changed
### 4.2 Layer 3: Update Component Docs
For each affected component's `tech-registry/{slug}.md`: For each affected component's `tech-registry/{slug}.md`:
- Update code locations
- Update symbol list
- Add action to change history
### 4.3 Update Action Log ```bash
ccw cli -p "PURPOSE: Update component documentation for {component.name} after code changes
TASK:
• Update code locations and line ranges
• Update symbol list (add new exports, remove deleted)
• Add change entry to history table
• Refresh usage examples if API changed
MODE: write
CONTEXT: @{component.codeLocations[].path}
EXPECTED: Updated markdown with current API state
CONSTRAINTS: Preserve existing structure | Only update changed sections
" --tool gemini --mode write --cd .workflow/.doc-index/tech-registry/
```
Update layer metadata:
```markdown
---
layer: 3
component_id: tech-auth-service
generated_at: ISO8601
last_updated: ISO8601
---
```
### 4.3 Layer 2: Update Feature Docs
For each affected feature's `feature-maps/{slug}.md`:
```bash
ccw cli -p "PURPOSE: Update feature documentation for {feature.name} after component changes
TASK:
• Update component list if new components added
• Update status if requirements now fully implemented
• Add change entry to history table
• Refresh integration guide if component APIs changed
MODE: write
CONTEXT: @.workflow/.doc-index/tech-registry/{affected-components}.md
EXPECTED: Updated markdown reflecting current feature state
CONSTRAINTS: Reference updated Layer 3 docs | Preserve business language
" --tool gemini --mode write --cd .workflow/.doc-index/feature-maps/
```
Update layer metadata:
```markdown
---
layer: 2
feature_id: feat-auth
depends_on_layer3: [tech-auth-service, tech-user-model]
generated_at: ISO8601
last_updated: ISO8601
---
```
### 4.4 Layer 1: Update Index Docs (if needed)
If Layer 2 changed significantly (new features, status changes):
```bash
ccw cli -p "PURPOSE: Update project overview docs after feature changes
TASK:
• Update README.md feature list
• Update ARCHITECTURE.md if new components added
• Update _index.md files with new entries
MODE: write
CONTEXT: @.workflow/.doc-index/feature-maps/*.md @.workflow/.doc-index/doc-index.json
EXPECTED: Updated overview docs with current project state
CONSTRAINTS: High-level only | Link to Layer 2 for details
" --tool gemini --mode write --cd .workflow/.doc-index/
```
Update layer metadata:
```markdown
---
layer: 1
depends_on_layer2: [feat-auth, feat-orders]
generated_at: ISO8601
last_updated: ISO8601
---
```
### 4.5 Update Action Log
Create `.workflow/.doc-index/action-logs/{task-id}.md`: Create `.workflow/.doc-index/action-logs/{task-id}.md`:

View File

@@ -226,8 +226,8 @@ export function run(argv: string[]): void {
.option('--output-type <type>', 'Output type: stdout, stderr, both', 'both') .option('--output-type <type>', 'Output type: stdout, stderr, both', 'both')
.option('--turn <n>', 'Turn number for cache (default: latest)') .option('--turn <n>', 'Turn number for cache (default: latest)')
.option('--raw', 'Raw output only (no formatting)') .option('--raw', 'Raw output only (no formatting)')
.option('--final', 'Output final result only (legacy, now default)') .option('--final', 'Output final result only (agent_message content, now default)')
.option('--verbose', 'Show full metadata in output view') .option('--verbose', 'Show full metadata + raw output')
.option('--timeout <seconds>', 'Timeout for watch command') .option('--timeout <seconds>', 'Timeout for watch command')
.option('--all', 'Show all executions in show command') .option('--all', 'Show all executions in show command')
.option('--to-file <path>', 'Save output to file') .option('--to-file <path>', 'Save output to file')

View File

@@ -181,8 +181,8 @@ interface OutputViewOptions {
outputType?: 'stdout' | 'stderr' | 'both'; outputType?: 'stdout' | 'stderr' | 'both';
turn?: string; turn?: string;
raw?: boolean; raw?: boolean;
final?: boolean; // Only output final result with usage hint final?: boolean; // Explicit --final (same as default, kept for compatibility)
verbose?: boolean; // Show full metadata (original default behavior) verbose?: boolean; // Show full metadata + raw stdout/stderr
project?: string; // Optional project path for lookup project?: string; // Optional project path for lookup
} }
@@ -429,13 +429,13 @@ async function outputAction(conversationId: string | undefined, options: OutputV
} }
if (options.raw) { if (options.raw) {
// Raw output only (for piping) // Raw output only (for piping) — unprocessed stdout
if (result.stdout) console.log(result.stdout.content); if (result.stdout) console.log(result.stdout.content);
return; return;
} }
if (options.verbose) { if (options.verbose) {
// Verbose: full metadata + output (original default behavior) // Verbose: full metadata + raw stdout/stderr (for debugging)
console.log(chalk.bold.cyan('Execution Output\n')); console.log(chalk.bold.cyan('Execution Output\n'));
console.log(` ${chalk.gray('ID:')} ${result.conversationId}`); console.log(` ${chalk.gray('ID:')} ${result.conversationId}`);
console.log(` ${chalk.gray('Turn:')} ${result.turnNumber}`); console.log(` ${chalk.gray('Turn:')} ${result.turnNumber}`);
@@ -470,21 +470,12 @@ async function outputAction(conversationId: string | undefined, options: OutputV
return; return;
} }
// Default: final result only (equivalent to --final) // Default (and --final): output final result only
// Prefer finalOutput (agent_message only) > parsedOutput (filtered) > raw stdout // Prefer finalOutput (agent_message only) > parsedOutput (filtered) > raw stdout
const outputContent = result.finalOutput?.content || result.parsedOutput?.content || result.stdout?.content; const outputContent = result.finalOutput?.content || result.parsedOutput?.content || result.stdout?.content;
if (outputContent) { if (outputContent) {
console.log(outputContent); console.log(outputContent);
} }
console.log();
console.log(chalk.gray('\u2500'.repeat(60)));
console.log(chalk.dim(`Usage: ccw cli output ${conversationId} [options]`));
console.log(chalk.dim(' --verbose Show full metadata'));
console.log(chalk.dim(' --raw Raw output (no formatting)'));
console.log(chalk.dim(' --offset <n> Start from byte offset'));
console.log(chalk.dim(' --limit <n> Limit output bytes'));
console.log(chalk.dim(' --project <p> Specify project path explicitly'));
console.log(chalk.dim(` --resume ccw cli -p "..." --resume ${conversationId}`));
} }
/** /**
@@ -926,6 +917,9 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
} }
// Generate execution ID for streaming (use custom ID or auto-generated readable ID) // Generate execution ID for streaming (use custom ID or auto-generated readable ID)
if (!id) {
console.error(chalk.yellow('[WARN] --id not provided. Use --id <id> for reliable tracking. Auto-generating ID.'));
}
const executionId = id || generateExecutionId(tool); const executionId = id || generateExecutionId(tool);
const startTime = Date.now(); const startTime = Date.now();
const modelInfo = model ? ` @${model}` : ''; const modelInfo = model ? ` @${model}` : '';
@@ -1160,7 +1154,7 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
} }
console.log(chalk.dim(` Continue: ccw cli -p "..." --resume ${result.execution.id}`)); console.log(chalk.dim(` Continue: ccw cli -p "..." --resume ${result.execution.id}`));
if (!stream) { if (!stream) {
console.log(chalk.dim(` Output (optional): ccw cli output ${result.execution.id}`)); console.log(chalk.dim(` Output: ccw cli output ${result.execution.id}`));
} }
if (toFile) { if (toFile) {
const { resolve } = await import('path'); const { resolve } = await import('path');
@@ -1386,7 +1380,7 @@ async function showAction(options: { all?: boolean }): Promise<void> {
rows.push({ rows.push({
id: exec.id, id: exec.id,
tool: exec.tool, tool: exec.tool,
mode: exec.mode || 'analysis', mode: '-',
status: exec.status, status: exec.status,
prompt: exec.prompt_preview.replace(/\n/g, ' ').substring(0, 50), prompt: exec.prompt_preview.replace(/\n/g, ' ').substring(0, 50),
time: timeAgo, time: timeAgo,
@@ -1567,7 +1561,7 @@ async function historyAction(options: HistoryOptions): Promise<void> {
console.log(); console.log();
console.log(chalk.gray(' ' + '─'.repeat(70))); console.log(chalk.gray(' ' + '─'.repeat(70)));
console.log(chalk.dim(' Filter: ccw cli history --tool <gemini|codex|qwen> --limit <n>')); console.log(chalk.dim(' Filter: ccw cli history --tool <gemini|codex|qwen> --limit <n>'));
console.log(chalk.dim(' Output: ccw cli output <id> --final')); console.log(chalk.dim(' Output: ccw cli output <id>'));
console.log(); console.log();
} }
@@ -1768,7 +1762,7 @@ export async function cliCommand(
console.log(chalk.gray(' ccw cli --resume --tool gemini')); console.log(chalk.gray(' ccw cli --resume --tool gemini'));
console.log(chalk.gray(' ccw cli -p "..." --cache "@src/**/*.ts" --tool codex')); console.log(chalk.gray(' ccw cli -p "..." --cache "@src/**/*.ts" --tool codex'));
console.log(chalk.gray(' ccw cli -p "..." --cache "@src/**/*" --inject-mode progressive --tool gemini')); console.log(chalk.gray(' ccw cli -p "..." --cache "@src/**/*" --inject-mode progressive --tool gemini'));
console.log(chalk.gray(' ccw cli output <id> --final # View result with usage hint')); console.log(chalk.gray(' ccw cli output <id> # View final result (default)'));
console.log(); console.log();
console.log(' Cache format:'); console.log(' Cache format:');
console.log(chalk.gray(' --cache "@src/**/*.ts,@CLAUDE.md" # @patterns to pack')); console.log(chalk.gray(' --cache "@src/**/*.ts,@CLAUDE.md" # @patterns to pack'));
@@ -1781,8 +1775,9 @@ export async function cliCommand(
console.log(chalk.gray(' progressive: inject first 64KB with MCP continuation hint')); console.log(chalk.gray(' progressive: inject first 64KB with MCP continuation hint'));
console.log(); console.log();
console.log(' Output options (ccw cli output <id>):'); console.log(' Output options (ccw cli output <id>):');
console.log(chalk.gray(' --final Final result only with usage hint')); console.log(chalk.gray(' (default) Final result only (agent response text)'));
console.log(chalk.gray(' --raw Raw output only (no formatting, for piping)')); console.log(chalk.gray(' --verbose Full metadata + raw stdout/stderr'));
console.log(chalk.gray(' --raw Raw stdout only (no formatting, for piping)'));
console.log(chalk.gray(' --offset <n> Start from byte offset')); console.log(chalk.gray(' --offset <n> Start from byte offset'));
console.log(chalk.gray(' --limit <n> Limit output bytes')); console.log(chalk.gray(' --limit <n> Limit output bytes'));
console.log(); console.log();