mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
feat: unify CLI output handling and enhance theme variables
- Updated `CliStreamMonitorNew`, `CliStreamMonitorLegacy`, and `CliViewerPage` components to prioritize `unitContent` from payloads, falling back to `data` when necessary. - Enhanced `colorGenerator` to include legacy variables for compatibility with shadcn/ui. - Refactored orchestrator index to unify node exports under a single module. - Improved `appStore` to clear both new and legacy CSS variables when applying themes. - Added new options to CLI execution for raw and final output modes, improving programmatic output handling. - Enhanced `cli-output-converter` to normalize cumulative delta frames and avoid duplication in streaming outputs. - Introduced a new unified workflow specification for prompt template-based workflows, replacing the previous multi-type node system. - Added tests for CLI final output handling and streaming output converter to ensure correct behavior in various scenarios.
This commit is contained in:
@@ -1,37 +1,56 @@
|
||||
---
|
||||
name: flow-coordinator
|
||||
description: Template-driven workflow coordinator with minimal state tracking. Executes command chains from workflow templates with slash-command execution (mainprocess/async). Triggers on "flow-coordinator", "workflow template", "orchestrate".
|
||||
description: Template-driven workflow coordinator with minimal state tracking. Executes command chains from workflow templates OR unified PromptTemplate workflows. Supports slash-command and DAG-based execution. Triggers on "flow-coordinator", "workflow template", "orchestrate".
|
||||
allowed-tools: Task, AskUserQuestion, Read, Write, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
# Flow Coordinator
|
||||
|
||||
Lightweight workflow coordinator that executes command chains from predefined templates, supporting slash-command execution with mainprocess (blocking) and async (background) modes.
|
||||
Lightweight workflow coordinator supporting two workflow formats:
|
||||
1. **Legacy Templates**: Command chains with slash-command execution
|
||||
2. **Unified Workflows**: DAG-based PromptTemplate nodes (spec: `spec/unified-workflow-spec.md`)
|
||||
|
||||
## Specification Reference
|
||||
|
||||
- **Unified Workflow Spec**: @spec/unified-workflow-spec.md
|
||||
- **Demo Workflow**: `ccw/data/flows/demo-unified-workflow.json`
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
User Task → Select Template → status.json Init → Execute Steps → Complete
|
||||
↑ │
|
||||
└──────────────── Resume (from status.json) ─────┘
|
||||
User Task → Detect Format → Select Workflow → Init Status → Execute → Complete
|
||||
│ │
|
||||
├─ Legacy Template │
|
||||
│ └─ Sequential cmd execution │
|
||||
│ │
|
||||
└─ Unified Workflow │
|
||||
└─ DAG traversal with contextRefs │
|
||||
│
|
||||
└──────────────── Resume (from status.json) ──────────────┘
|
||||
|
||||
Step Execution:
|
||||
execution mode?
|
||||
├─ mainprocess → SlashCommand (blocking, main process)
|
||||
└─ async → ccw cli --tool claude --mode write (background)
|
||||
Execution Modes:
|
||||
├─ analysis → Read-only, CLI --mode analysis
|
||||
├─ write → File changes, CLI --mode write
|
||||
├─ mainprocess → Blocking, synchronous
|
||||
└─ async → Background, ccw cli
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
**Template-Driven**: Workflows defined as JSON templates in `templates/`, decoupled from coordinator logic.
|
||||
**Dual Format Support**:
|
||||
- Legacy: `templates/*.json` with `cmd`, `args`, `execution`
|
||||
- Unified: `ccw/data/flows/*.json` with `nodes`, `edges`, `contextRefs`
|
||||
|
||||
**Execution Type**: `slash-command` only
|
||||
- ALL workflow commands (`/workflow:*`) use `slash-command` type
|
||||
- Two execution modes:
|
||||
- `mainprocess`: SlashCommand (blocking, main process)
|
||||
- `async`: CLI background (ccw cli with claude tool)
|
||||
**Unified PromptTemplate Model**: All workflow steps are natural language instructions with:
|
||||
- `instruction`: What to execute (natural language)
|
||||
- `outputName`: Name for output reference
|
||||
- `contextRefs`: References to previous step outputs
|
||||
- `tool`: Optional CLI tool (gemini/qwen/codex/claude)
|
||||
- `mode`: Execution mode (analysis/write/mainprocess/async)
|
||||
|
||||
**Dynamic Discovery**: Templates discovered at runtime via Glob, not hardcoded.
|
||||
**DAG Execution**: Unified workflows execute as directed acyclic graphs with parallel branches and conditional edges.
|
||||
|
||||
**Dynamic Discovery**: Both formats discovered at runtime via Glob.
|
||||
|
||||
---
|
||||
|
||||
@@ -83,7 +102,139 @@ async function executeSteps(status, statusPath) {
|
||||
|
||||
---
|
||||
|
||||
## Template Discovery
|
||||
## Unified Workflow Execution
|
||||
|
||||
For workflows using the unified PromptTemplate format (`ccw/data/flows/*.json`):
|
||||
|
||||
```javascript
|
||||
async function executeUnifiedWorkflow(workflow, task) {
|
||||
// 1. Initialize execution state
|
||||
const sessionId = `ufc-${timestamp()}`;
|
||||
const statusPath = `.workflow/.flow-coordinator/${sessionId}/status.json`;
|
||||
const state = {
|
||||
id: sessionId,
|
||||
workflow: workflow.id,
|
||||
goal: task,
|
||||
nodeStates: {}, // nodeId -> { status, result, error }
|
||||
outputs: {}, // outputName -> result
|
||||
complete: false
|
||||
};
|
||||
|
||||
// 2. Topological sort for execution order
|
||||
const executionOrder = topologicalSort(workflow.nodes, workflow.edges);
|
||||
|
||||
// 3. Execute nodes respecting DAG dependencies
|
||||
await executeDAG(workflow, executionOrder, state, statusPath);
|
||||
}
|
||||
|
||||
async function executeDAG(workflow, order, state, statusPath) {
|
||||
for (const nodeId of order) {
|
||||
const node = workflow.nodes.find(n => n.id === nodeId);
|
||||
const data = node.data;
|
||||
|
||||
// Check if all dependencies are satisfied
|
||||
if (!areDependenciesSatisfied(nodeId, workflow.edges, state)) {
|
||||
continue; // Will be executed when dependencies complete
|
||||
}
|
||||
|
||||
// Resolve context references
|
||||
const resolvedInstruction = resolveContextRefs(
|
||||
data.instruction,
|
||||
data.contextRefs || [],
|
||||
state.outputs
|
||||
);
|
||||
|
||||
// Execute based on mode
|
||||
state.nodeStates[nodeId] = { status: 'running' };
|
||||
write(statusPath, JSON.stringify(state, null, 2));
|
||||
|
||||
const result = await executeNode(resolvedInstruction, data.tool, data.mode);
|
||||
|
||||
// Store output for downstream nodes
|
||||
state.nodeStates[nodeId] = { status: 'completed', result };
|
||||
if (data.outputName) {
|
||||
state.outputs[data.outputName] = result;
|
||||
}
|
||||
write(statusPath, JSON.stringify(state, null, 2));
|
||||
}
|
||||
|
||||
state.complete = true;
|
||||
write(statusPath, JSON.stringify(state, null, 2));
|
||||
}
|
||||
|
||||
function resolveContextRefs(instruction, refs, outputs) {
|
||||
let resolved = instruction;
|
||||
for (const ref of refs) {
|
||||
const value = outputs[ref];
|
||||
const placeholder = `{{${ref}}}`;
|
||||
resolved = resolved.replace(new RegExp(placeholder, 'g'),
|
||||
typeof value === 'object' ? JSON.stringify(value) : String(value));
|
||||
}
|
||||
return resolved;
|
||||
}
|
||||
|
||||
async function executeNode(instruction, tool, mode) {
|
||||
// Build CLI command based on tool and mode
|
||||
const cliTool = tool || 'gemini';
|
||||
const cliMode = mode === 'write' ? 'write' : 'analysis';
|
||||
|
||||
if (mode === 'async') {
|
||||
// Background execution
|
||||
return Bash(
|
||||
`ccw cli -p "${escapePrompt(instruction)}" --tool ${cliTool} --mode ${cliMode}`,
|
||||
{ run_in_background: true }
|
||||
);
|
||||
} else {
|
||||
// Synchronous execution
|
||||
return Bash(
|
||||
`ccw cli -p "${escapePrompt(instruction)}" --tool ${cliTool} --mode ${cliMode}`
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Unified Workflow Discovery
|
||||
|
||||
```javascript
|
||||
async function discoverUnifiedWorkflows() {
|
||||
const files = Glob('*.json', { path: 'ccw/data/flows/' });
|
||||
|
||||
const workflows = [];
|
||||
for (const file of files) {
|
||||
const content = JSON.parse(Read(file));
|
||||
// Detect unified format by checking for 'nodes' array
|
||||
if (content.nodes && Array.isArray(content.nodes)) {
|
||||
workflows.push({
|
||||
id: content.id,
|
||||
name: content.name,
|
||||
description: content.description,
|
||||
nodeCount: content.nodes.length,
|
||||
format: 'unified',
|
||||
file: file
|
||||
});
|
||||
}
|
||||
}
|
||||
return workflows;
|
||||
}
|
||||
```
|
||||
|
||||
### Format Detection
|
||||
|
||||
```javascript
|
||||
function detectWorkflowFormat(content) {
|
||||
if (content.nodes && content.edges) {
|
||||
return 'unified'; // PromptTemplate DAG format
|
||||
}
|
||||
if (content.steps && content.steps[0]?.cmd) {
|
||||
return 'legacy'; // Command chain format
|
||||
}
|
||||
throw new Error('Unknown workflow format');
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Legacy Template Discovery
|
||||
|
||||
**Dynamic query** - never hardcode template list:
|
||||
|
||||
@@ -391,4 +542,14 @@ Templates discovered from `templates/*.json`:
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| templates/*.json | Workflow templates (dynamic discovery) |
|
||||
| spec/unified-workflow-spec.md | Unified PromptTemplate workflow specification |
|
||||
| ccw/data/flows/*.json | Unified workflows (DAG format, dynamic discovery) |
|
||||
| templates/*.json | Legacy workflow templates (command chain format) |
|
||||
|
||||
### Demo Workflows (Unified Format)
|
||||
|
||||
| File | Description | Nodes |
|
||||
|------|-------------|-------|
|
||||
| `demo-unified-workflow.json` | Auth implementation | 7 nodes: Analyze → Plan → Implement → Review → Tests → Report |
|
||||
| `parallel-ci-workflow.json` | CI/CD pipeline | 8 nodes: Parallel checks → Merge → Conditional notify |
|
||||
| `simple-analysis-workflow.json` | Analysis pipeline | 3 nodes: Explore → Analyze → Report |
|
||||
|
||||
324
.claude/skills/flow-coordinator/spec/unified-workflow-spec.md
Normal file
324
.claude/skills/flow-coordinator/spec/unified-workflow-spec.md
Normal file
@@ -0,0 +1,324 @@
|
||||
# Unified Workflow Specification v1.0
|
||||
|
||||
> Standard format for PromptTemplate-based workflow definitions
|
||||
|
||||
## Overview
|
||||
|
||||
This specification defines the JSON schema for unified workflows where **all nodes are prompt templates** with natural language instructions. This replaces the previous multi-type node system with a single, flexible model.
|
||||
|
||||
**Design Philosophy**: Every workflow step is a natural language instruction that can optionally specify execution tool and mode. Data flows through named outputs referenced by subsequent steps.
|
||||
|
||||
---
|
||||
|
||||
## Schema Definition
|
||||
|
||||
### Root Object: `Flow`
|
||||
|
||||
```typescript
|
||||
interface Flow {
|
||||
id: string; // Unique identifier (kebab-case)
|
||||
name: string; // Display name
|
||||
description?: string; // Human-readable description
|
||||
version: number; // Schema version (currently 1)
|
||||
created_at: string; // ISO 8601 timestamp
|
||||
updated_at: string; // ISO 8601 timestamp
|
||||
nodes: FlowNode[]; // Workflow steps
|
||||
edges: FlowEdge[]; // Step connections (DAG)
|
||||
variables: Record<string, unknown>; // Global workflow variables
|
||||
metadata: FlowMetadata; // Classification and source info
|
||||
}
|
||||
```
|
||||
|
||||
### FlowNode
|
||||
|
||||
```typescript
|
||||
interface FlowNode {
|
||||
id: string; // Unique node ID
|
||||
type: 'prompt-template'; // Always 'prompt-template'
|
||||
position: { x: number; y: number }; // Canvas position
|
||||
data: PromptTemplateNodeData; // Node configuration
|
||||
}
|
||||
```
|
||||
|
||||
### PromptTemplateNodeData
|
||||
|
||||
```typescript
|
||||
interface PromptTemplateNodeData {
|
||||
// === Required ===
|
||||
label: string; // Display label in editor
|
||||
instruction: string; // Natural language instruction
|
||||
|
||||
// === Data Flow ===
|
||||
outputName?: string; // Name for output reference
|
||||
contextRefs?: string[]; // References to previous outputs
|
||||
|
||||
// === Execution Config ===
|
||||
tool?: CliTool; // 'gemini' | 'qwen' | 'codex' | 'claude'
|
||||
mode?: ExecutionMode; // 'analysis' | 'write' | 'mainprocess' | 'async'
|
||||
|
||||
// === Runtime State (populated during execution) ===
|
||||
executionStatus?: ExecutionStatus;
|
||||
executionError?: string;
|
||||
executionResult?: unknown;
|
||||
}
|
||||
```
|
||||
|
||||
### FlowEdge
|
||||
|
||||
```typescript
|
||||
interface FlowEdge {
|
||||
id: string; // Unique edge ID
|
||||
source: string; // Source node ID
|
||||
target: string; // Target node ID
|
||||
type?: string; // Edge type (default: 'default')
|
||||
data?: {
|
||||
label?: string; // Edge label (e.g., 'parallel')
|
||||
condition?: string; // Conditional expression
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### FlowMetadata
|
||||
|
||||
```typescript
|
||||
interface FlowMetadata {
|
||||
source?: 'template' | 'custom' | 'imported';
|
||||
tags?: string[];
|
||||
category?: string;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Instruction Syntax
|
||||
|
||||
### Context References
|
||||
|
||||
Use `{{outputName}}` syntax to reference outputs from previous steps:
|
||||
|
||||
```
|
||||
Analyze {{requirements_analysis}} and create implementation plan.
|
||||
```
|
||||
|
||||
### Nested Property Access
|
||||
|
||||
```
|
||||
If {{ci_report.status}} === 'failed', stop execution.
|
||||
```
|
||||
|
||||
### Multiple References
|
||||
|
||||
```
|
||||
Combine {{lint_result}}, {{typecheck_result}}, and {{test_result}} into report.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution Modes
|
||||
|
||||
| Mode | Behavior | Use Case |
|
||||
|------|----------|----------|
|
||||
| `analysis` | Read-only, no file changes | Code review, exploration |
|
||||
| `write` | Can create/modify/delete files | Implementation, fixes |
|
||||
| `mainprocess` | Blocking, synchronous | Interactive steps |
|
||||
| `async` | Background, non-blocking | Long-running tasks |
|
||||
|
||||
---
|
||||
|
||||
## DAG Execution Semantics
|
||||
|
||||
### Sequential Execution
|
||||
|
||||
Nodes with single input edge execute after predecessor completes.
|
||||
|
||||
```
|
||||
[A] ──▶ [B] ──▶ [C]
|
||||
```
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
Multiple edges from same source trigger parallel execution:
|
||||
|
||||
```
|
||||
┌──▶ [B]
|
||||
[A] ──┤
|
||||
└──▶ [C]
|
||||
```
|
||||
|
||||
### Merge Point
|
||||
|
||||
Node with multiple input edges waits for all predecessors:
|
||||
|
||||
```
|
||||
[B] ──┐
|
||||
├──▶ [D]
|
||||
[C] ──┘
|
||||
```
|
||||
|
||||
### Conditional Branching
|
||||
|
||||
Edge `data.condition` specifies branch condition:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "e-decision-success",
|
||||
"source": "decision",
|
||||
"target": "notify-success",
|
||||
"data": { "condition": "decision.result === 'pass'" }
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example: Minimal Workflow
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "simple-analysis",
|
||||
"name": "Simple Analysis",
|
||||
"version": 1,
|
||||
"created_at": "2026-02-04T00:00:00.000Z",
|
||||
"updated_at": "2026-02-04T00:00:00.000Z",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "analyze",
|
||||
"type": "prompt-template",
|
||||
"position": { "x": 100, "y": 100 },
|
||||
"data": {
|
||||
"label": "Analyze Code",
|
||||
"instruction": "Analyze the authentication module for security issues.",
|
||||
"outputName": "analysis",
|
||||
"tool": "gemini",
|
||||
"mode": "analysis"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "report",
|
||||
"type": "prompt-template",
|
||||
"position": { "x": 100, "y": 250 },
|
||||
"data": {
|
||||
"label": "Generate Report",
|
||||
"instruction": "Based on {{analysis}}, generate a security report with recommendations.",
|
||||
"outputName": "report",
|
||||
"contextRefs": ["analysis"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{ "id": "e1", "source": "analyze", "target": "report" }
|
||||
],
|
||||
"variables": {},
|
||||
"metadata": { "source": "custom", "tags": ["security"] }
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example: Parallel with Merge
|
||||
|
||||
```json
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"id": "start",
|
||||
"type": "prompt-template",
|
||||
"position": { "x": 200, "y": 50 },
|
||||
"data": {
|
||||
"label": "Prepare",
|
||||
"instruction": "Set up build environment",
|
||||
"outputName": "env"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "lint",
|
||||
"type": "prompt-template",
|
||||
"position": { "x": 100, "y": 200 },
|
||||
"data": {
|
||||
"label": "Lint",
|
||||
"instruction": "Run linter checks",
|
||||
"outputName": "lint_result",
|
||||
"tool": "codex",
|
||||
"mode": "analysis",
|
||||
"contextRefs": ["env"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "test",
|
||||
"type": "prompt-template",
|
||||
"position": { "x": 300, "y": 200 },
|
||||
"data": {
|
||||
"label": "Test",
|
||||
"instruction": "Run unit tests",
|
||||
"outputName": "test_result",
|
||||
"tool": "codex",
|
||||
"mode": "analysis",
|
||||
"contextRefs": ["env"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "merge",
|
||||
"type": "prompt-template",
|
||||
"position": { "x": 200, "y": 350 },
|
||||
"data": {
|
||||
"label": "Merge Results",
|
||||
"instruction": "Combine {{lint_result}} and {{test_result}} into CI report",
|
||||
"outputName": "ci_report",
|
||||
"contextRefs": ["lint_result", "test_result"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{ "id": "e1", "source": "start", "target": "lint", "data": { "label": "parallel" } },
|
||||
{ "id": "e2", "source": "start", "target": "test", "data": { "label": "parallel" } },
|
||||
{ "id": "e3", "source": "lint", "target": "merge" },
|
||||
{ "id": "e4", "source": "test", "target": "merge" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Migration from Old Format
|
||||
|
||||
### Old Template Step
|
||||
|
||||
```json
|
||||
{
|
||||
"cmd": "/workflow:lite-plan",
|
||||
"args": "\"{{goal}}\"",
|
||||
"execution": { "type": "slash-command", "mode": "mainprocess" }
|
||||
}
|
||||
```
|
||||
|
||||
### New PromptTemplate Node
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "plan",
|
||||
"type": "prompt-template",
|
||||
"data": {
|
||||
"label": "Create Plan",
|
||||
"instruction": "Execute /workflow:lite-plan for: {{goal}}",
|
||||
"outputName": "plan_result",
|
||||
"mode": "mainprocess"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Validation Rules
|
||||
|
||||
1. **Unique IDs**: All node and edge IDs must be unique within the flow
|
||||
2. **Valid References**: `contextRefs` must reference existing `outputName` values
|
||||
3. **DAG Structure**: No circular dependencies allowed
|
||||
4. **Required Fields**: `id`, `name`, `version`, `nodes`, `edges` are required
|
||||
5. **Node Type**: All nodes must have `type: 'prompt-template'`
|
||||
|
||||
---
|
||||
|
||||
## File Location
|
||||
|
||||
Workflow files stored in: `ccw/data/flows/*.json`
|
||||
|
||||
Template discovery: `Glob('*.json', { path: 'ccw/data/flows/' })`
|
||||
@@ -250,7 +250,7 @@ export function CliStreamMonitorNew({ isOpen, onClose }: CliStreamMonitorNewProp
|
||||
invalidateActive();
|
||||
} else if (type === 'CLI_OUTPUT') {
|
||||
const p = payload as CliStreamOutputPayload;
|
||||
const unitContent = p.unit?.content;
|
||||
const unitContent = p.unit?.content ?? p.data;
|
||||
const unitType = p.unit?.type || p.chunkType;
|
||||
|
||||
let content: string;
|
||||
@@ -267,7 +267,7 @@ export function CliStreamMonitorNew({ isOpen, onClose }: CliStreamMonitorNewProp
|
||||
content = JSON.stringify(unitContent);
|
||||
}
|
||||
} else {
|
||||
content = typeof p.data === 'string' ? p.data : JSON.stringify(p.data);
|
||||
content = typeof unitContent === 'string' ? unitContent : JSON.stringify(unitContent);
|
||||
}
|
||||
|
||||
const lines = content.split('\n');
|
||||
|
||||
@@ -257,7 +257,7 @@ export function CliStreamMonitor({ isOpen, onClose }: CliStreamMonitorProps) {
|
||||
invalidateActive();
|
||||
} else if (type === 'CLI_OUTPUT') {
|
||||
const p = payload as CliStreamOutputPayload;
|
||||
const unitContent = p.unit?.content;
|
||||
const unitContent = p.unit?.content ?? p.data;
|
||||
const unitType = p.unit?.type || p.chunkType;
|
||||
|
||||
let content: string;
|
||||
@@ -274,7 +274,7 @@ export function CliStreamMonitor({ isOpen, onClose }: CliStreamMonitorProps) {
|
||||
content = JSON.stringify(unitContent);
|
||||
}
|
||||
} else {
|
||||
content = typeof p.data === 'string' ? p.data : JSON.stringify(p.data);
|
||||
content = typeof unitContent === 'string' ? unitContent : JSON.stringify(unitContent);
|
||||
}
|
||||
|
||||
const lines = content.split('\n');
|
||||
|
||||
@@ -98,6 +98,25 @@ export function generateThemeFromHue(
|
||||
vars['--active'] = `${normalizedHue} 15% 90%`;
|
||||
vars['--focus'] = `${normalizedHue} 70% 60%`;
|
||||
|
||||
// Legacy variables for shadcn/ui compatibility
|
||||
vars['--background'] = vars['--bg'];
|
||||
vars['--foreground'] = vars['--text'];
|
||||
vars['--card'] = vars['--surface'];
|
||||
vars['--card-foreground'] = vars['--text'];
|
||||
vars['--primary-foreground'] = `0 0% 100%`;
|
||||
vars['--secondary-foreground'] = `0 0% 100%`;
|
||||
vars['--accent-foreground'] = `0 0% 100%`;
|
||||
vars['--destructive-foreground'] = `0 0% 100%`;
|
||||
vars['--muted-foreground'] = vars['--text-secondary'];
|
||||
vars['--sidebar-background'] = `${normalizedHue} 30% 97%`;
|
||||
vars['--sidebar-foreground'] = vars['--text'];
|
||||
vars['--input'] = vars['--border'];
|
||||
vars['--ring'] = vars['--accent'];
|
||||
vars['--indigo'] = `239 65% 60%`;
|
||||
vars['--indigo-light'] = `239 65% 92%`;
|
||||
vars['--orange'] = `25 90% 55%`;
|
||||
vars['--orange-light'] = `25 90% 92%`;
|
||||
|
||||
} else {
|
||||
// Dark mode: Medium saturation, low lightness backgrounds
|
||||
vars['--bg'] = `${normalizedHue} 20% 10%`;
|
||||
@@ -163,6 +182,25 @@ export function generateThemeFromHue(
|
||||
vars['--hover'] = `${normalizedHue} 18% 16%`;
|
||||
vars['--active'] = `${normalizedHue} 20% 20%`;
|
||||
vars['--focus'] = `${normalizedHue} 70% 60%`;
|
||||
|
||||
// Legacy variables for shadcn/ui compatibility
|
||||
vars['--background'] = vars['--bg'];
|
||||
vars['--foreground'] = vars['--text'];
|
||||
vars['--card'] = vars['--surface'];
|
||||
vars['--card-foreground'] = vars['--text'];
|
||||
vars['--primary-foreground'] = `${normalizedHue} 30% 10%`;
|
||||
vars['--secondary-foreground'] = `0 0% 100%`;
|
||||
vars['--accent-foreground'] = `${normalizedHue} 30% 10%`;
|
||||
vars['--destructive-foreground'] = `0 0% 100%`;
|
||||
vars['--muted-foreground'] = vars['--text-secondary'];
|
||||
vars['--sidebar-background'] = `${normalizedHue} 25% 12%`;
|
||||
vars['--sidebar-foreground'] = vars['--text'];
|
||||
vars['--input'] = vars['--border'];
|
||||
vars['--ring'] = vars['--accent'];
|
||||
vars['--indigo'] = `239 60% 55%`;
|
||||
vars['--indigo-light'] = `239 40% 20%`;
|
||||
vars['--orange'] = `25 85% 50%`;
|
||||
vars['--orange-light'] = `25 50% 20%`;
|
||||
}
|
||||
|
||||
return vars;
|
||||
|
||||
@@ -232,7 +232,7 @@ export function CliViewerPage() {
|
||||
invalidateActive();
|
||||
} else if (type === 'CLI_OUTPUT') {
|
||||
const p = payload as CliStreamOutputPayload;
|
||||
const unitContent = p.unit?.content;
|
||||
const unitContent = p.unit?.content ?? p.data;
|
||||
const unitType = p.unit?.type || p.chunkType;
|
||||
|
||||
let content: string;
|
||||
@@ -249,7 +249,7 @@ export function CliViewerPage() {
|
||||
content = JSON.stringify(unitContent);
|
||||
}
|
||||
} else {
|
||||
content = typeof p.data === 'string' ? p.data : JSON.stringify(p.data);
|
||||
content = typeof unitContent === 'string' ? unitContent : JSON.stringify(unitContent);
|
||||
}
|
||||
|
||||
const lines = content.split('\n');
|
||||
|
||||
@@ -8,8 +8,5 @@ export { NodePalette } from './NodePalette';
|
||||
export { PropertyPanel } from './PropertyPanel';
|
||||
export { FlowToolbar } from './FlowToolbar';
|
||||
|
||||
// Node components
|
||||
export { SlashCommandNode } from './nodes/SlashCommandNode';
|
||||
export { FileOperationNode } from './nodes/FileOperationNode';
|
||||
export { ConditionalNode } from './nodes/ConditionalNode';
|
||||
export { ParallelNode } from './nodes/ParallelNode';
|
||||
// Node components (unified system)
|
||||
export { NodeWrapper, PromptTemplateNode, nodeTypes } from './nodes';
|
||||
|
||||
@@ -51,8 +51,9 @@ const applyThemeToDocument = (
|
||||
document.documentElement.classList.remove('light', 'dark');
|
||||
document.documentElement.classList.add(resolvedTheme);
|
||||
|
||||
// Clear custom CSS variables list
|
||||
// Clear custom CSS variables list (includes both new and legacy variables)
|
||||
const customVars = [
|
||||
// New theme system variables
|
||||
'--bg', '--bg-secondary', '--surface', '--surface-hover',
|
||||
'--border', '--border-hover', '--text', '--text-secondary',
|
||||
'--text-tertiary', '--text-disabled', '--accent', '--accent-hover',
|
||||
@@ -63,7 +64,13 @@ const applyThemeToDocument = (
|
||||
'--warning', '--warning-light', '--warning-text', '--error',
|
||||
'--error-light', '--error-text', '--info', '--info-light',
|
||||
'--info-text', '--destructive', '--destructive-hover', '--destructive-light',
|
||||
'--hover', '--active', '--focus'
|
||||
'--hover', '--active', '--focus',
|
||||
// Legacy shadcn/ui compatibility variables
|
||||
'--background', '--foreground', '--card', '--card-foreground',
|
||||
'--primary-foreground', '--secondary-foreground', '--accent-foreground',
|
||||
'--destructive-foreground', '--muted-foreground', '--sidebar-background',
|
||||
'--sidebar-foreground', '--input', '--ring', '--indigo', '--indigo-light',
|
||||
'--orange', '--orange-light'
|
||||
];
|
||||
|
||||
// Apply custom theme or preset theme
|
||||
|
||||
@@ -141,6 +141,8 @@ interface CliExecOptions {
|
||||
// Template/Rules options
|
||||
rule?: string; // Template name for auto-discovery (defines $PROTO and $TMPL env vars)
|
||||
// Output options
|
||||
raw?: boolean; // Raw output only (best for piping)
|
||||
final?: boolean; // Final agent result only (best for piping)
|
||||
toFile?: string; // Save output to file
|
||||
}
|
||||
|
||||
@@ -590,7 +592,30 @@ async function statusAction(debug?: boolean): Promise<void> {
|
||||
* @param {Object} options - CLI options
|
||||
*/
|
||||
async function execAction(positionalPrompt: string | undefined, options: CliExecOptions): Promise<void> {
|
||||
const { prompt: optionPrompt, file, tool: userTool, mode = 'analysis', model, cd, includeDirs, stream, resume, id, noNative, cache, injectMode, debug, uncommitted, base, commit, title, rule, toFile } = options;
|
||||
const {
|
||||
prompt: optionPrompt,
|
||||
file,
|
||||
tool: userTool,
|
||||
mode = 'analysis',
|
||||
model,
|
||||
cd,
|
||||
includeDirs,
|
||||
stream,
|
||||
resume,
|
||||
id,
|
||||
noNative,
|
||||
cache,
|
||||
injectMode,
|
||||
debug,
|
||||
uncommitted,
|
||||
base,
|
||||
commit,
|
||||
title,
|
||||
rule,
|
||||
toFile,
|
||||
raw,
|
||||
final: finalOnly,
|
||||
} = options;
|
||||
|
||||
// Determine the tool to use: explicit --tool option, or defaultTool from config
|
||||
let tool = userTool;
|
||||
@@ -857,8 +882,17 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
const nativeMode = noNative ? ' (prompt-concat)' : '';
|
||||
const idInfo = id ? ` [${id}]` : '';
|
||||
|
||||
// Programmatic output mode:
|
||||
// - `--raw`: stdout/stderr passthrough semantics (minimal noise)
|
||||
// - `--final`: agent-message only semantics (minimal noise)
|
||||
// - non-TTY stdout (e.g. called from another process): default to final-only unless `--stream` is used
|
||||
const programmaticOutput = Boolean(raw || finalOnly) || (!process.stdout.isTTY && !stream);
|
||||
const showUi = !programmaticOutput;
|
||||
const useRawOutput = Boolean(raw);
|
||||
const useFinalOnlyOutput = Boolean(finalOnly) || (!useRawOutput && !process.stdout.isTTY && !stream);
|
||||
|
||||
// Show merge details
|
||||
if (isMerge) {
|
||||
if (isMerge && showUi) {
|
||||
console.log(chalk.gray(' Merging conversations:'));
|
||||
for (const rid of resumeIds) {
|
||||
console.log(chalk.gray(` • ${rid}`));
|
||||
@@ -871,9 +905,11 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
const startTime = Date.now();
|
||||
const modelInfo = model ? ` @${model}` : '';
|
||||
const spinnerBaseText = `Executing ${tool}${modelInfo} (${mode} mode${resumeInfo}${nativeMode})${idInfo}...`;
|
||||
console.log();
|
||||
if (showUi) {
|
||||
console.log();
|
||||
}
|
||||
|
||||
const spinner = stream ? null : createSpinner(` ${spinnerBaseText}`).start();
|
||||
const spinner = (showUi && !stream) ? createSpinner(` ${spinnerBaseText}`).start() : null;
|
||||
const elapsedInterval = spinner
|
||||
? setInterval(() => {
|
||||
const elapsedSeconds = Math.floor((Date.now() - startTime) / 1000);
|
||||
@@ -882,7 +918,7 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
: null;
|
||||
elapsedInterval?.unref?.();
|
||||
|
||||
if (!spinner) {
|
||||
if (showUi && !spinner) {
|
||||
console.log(chalk.cyan(` ${spinnerBaseText}\n`));
|
||||
}
|
||||
|
||||
@@ -892,7 +928,7 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
if (elapsedInterval) clearInterval(elapsedInterval);
|
||||
if (spinner) {
|
||||
spinner.warn(`Interrupted by ${signal} (${Math.floor(duration / 1000)}s elapsed)`);
|
||||
} else {
|
||||
} else if (showUi) {
|
||||
console.log(chalk.yellow(`\n Interrupted by ${signal}`));
|
||||
}
|
||||
|
||||
@@ -1028,9 +1064,15 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
// If not streaming (default), print output now
|
||||
// Prefer parsedOutput (from stream parser) over raw stdout for better formatting
|
||||
if (!stream) {
|
||||
const output = result.parsedOutput || result.stdout;
|
||||
const output = useRawOutput
|
||||
? result.stdout
|
||||
: (useFinalOnlyOutput ? (result.finalOutput || result.parsedOutput || result.stdout) : (result.parsedOutput || result.stdout));
|
||||
if (output) {
|
||||
console.log(output);
|
||||
if (programmaticOutput) {
|
||||
process.stdout.write(output);
|
||||
} else {
|
||||
console.log(output);
|
||||
}
|
||||
|
||||
// Save to file if --to-file is specified
|
||||
if (toFile) {
|
||||
@@ -1051,8 +1093,11 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
}
|
||||
}
|
||||
|
||||
// Print summary with execution ID and turn info
|
||||
console.log();
|
||||
// Print summary with execution ID and turn info (interactive mode only)
|
||||
if (showUi) {
|
||||
console.log();
|
||||
}
|
||||
|
||||
if (result.success) {
|
||||
// Save streaming output to file if needed
|
||||
if (stream && toFile && streamBuffer) {
|
||||
@@ -1068,31 +1113,34 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
}
|
||||
}
|
||||
|
||||
if (!spinner) {
|
||||
if (showUi && !spinner) {
|
||||
const turnInfo = result.conversation.turn_count > 1
|
||||
? ` (turn ${result.conversation.turn_count})`
|
||||
: '';
|
||||
console.log(chalk.green(` ✓ Completed in ${(result.execution.duration_ms / 1000).toFixed(1)}s${turnInfo}`));
|
||||
}
|
||||
console.log(chalk.gray(` ID: ${result.execution.id}`));
|
||||
if (isMerge && !id) {
|
||||
// Merge without custom ID: updated all source conversations
|
||||
console.log(chalk.gray(` Updated ${resumeIds.length} conversations: ${resumeIds.join(', ')}`));
|
||||
} else if (isMerge && id) {
|
||||
// Merge with custom ID: created new merged conversation
|
||||
console.log(chalk.gray(` Created merged conversation from ${resumeIds.length} sources`));
|
||||
}
|
||||
if (result.conversation.turn_count > 1) {
|
||||
console.log(chalk.gray(` Total: ${result.conversation.turn_count} turns, ${(result.conversation.total_duration_ms / 1000).toFixed(1)}s`));
|
||||
}
|
||||
console.log(chalk.dim(` Continue: ccw cli -p "..." --resume ${result.execution.id}`));
|
||||
if (!stream) {
|
||||
console.log(chalk.dim(` Output (optional): ccw cli output ${result.execution.id}`));
|
||||
}
|
||||
if (toFile) {
|
||||
const { resolve } = await import('path');
|
||||
const filePath = resolve(cd || process.cwd(), toFile);
|
||||
console.log(chalk.green(` Saved to: ${filePath}`));
|
||||
|
||||
if (showUi) {
|
||||
console.log(chalk.gray(` ID: ${result.execution.id}`));
|
||||
if (isMerge && !id) {
|
||||
// Merge without custom ID: updated all source conversations
|
||||
console.log(chalk.gray(` Updated ${resumeIds.length} conversations: ${resumeIds.join(', ')}`));
|
||||
} else if (isMerge && id) {
|
||||
// Merge with custom ID: created new merged conversation
|
||||
console.log(chalk.gray(` Created merged conversation from ${resumeIds.length} sources`));
|
||||
}
|
||||
if (result.conversation.turn_count > 1) {
|
||||
console.log(chalk.gray(` Total: ${result.conversation.turn_count} turns, ${(result.conversation.total_duration_ms / 1000).toFixed(1)}s`));
|
||||
}
|
||||
console.log(chalk.dim(` Continue: ccw cli -p "..." --resume ${result.execution.id}`));
|
||||
if (!stream) {
|
||||
console.log(chalk.dim(` Output (optional): ccw cli output ${result.execution.id}`));
|
||||
}
|
||||
if (toFile) {
|
||||
const { resolve } = await import('path');
|
||||
const filePath = resolve(cd || process.cwd(), toFile);
|
||||
console.log(chalk.green(` Saved to: ${filePath}`));
|
||||
}
|
||||
}
|
||||
|
||||
// Notify dashboard: execution completed (legacy)
|
||||
@@ -1129,41 +1177,48 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
process.exit(0);
|
||||
}, 500);
|
||||
} else {
|
||||
if (!spinner) {
|
||||
console.log(chalk.red(` ✗ Failed (${result.execution.status})`));
|
||||
}
|
||||
console.log(chalk.gray(` ID: ${result.execution.id}`));
|
||||
console.log(chalk.gray(` Duration: ${(result.execution.duration_ms / 1000).toFixed(1)}s`));
|
||||
console.log(chalk.gray(` Exit Code: ${result.execution.exit_code}`));
|
||||
|
||||
// Show stderr with better formatting
|
||||
if (result.stderr) {
|
||||
console.log();
|
||||
console.log(chalk.red.bold(' Error Output:'));
|
||||
console.log(chalk.gray(' ' + '─'.repeat(60)));
|
||||
// Indent stderr for better readability
|
||||
const stderrLines = result.stderr.split('\n');
|
||||
for (const line of stderrLines.slice(0, 30)) { // Limit to 30 lines
|
||||
console.error(chalk.red(` ${line}`));
|
||||
if (!showUi) {
|
||||
// Programmatic mode: avoid banners/hints; write stderr only if available.
|
||||
if (result.stderr) {
|
||||
process.stderr.write(result.stderr);
|
||||
}
|
||||
if (stderrLines.length > 30) {
|
||||
console.log(chalk.yellow(` ... ${stderrLines.length - 30} more lines`));
|
||||
console.log(chalk.cyan(` 💡 View full output: ccw cli output ${result.execution.id}`));
|
||||
} else {
|
||||
if (!spinner) {
|
||||
console.log(chalk.red(` ✗ Failed (${result.execution.status})`));
|
||||
}
|
||||
console.log(chalk.gray(` ID: ${result.execution.id}`));
|
||||
console.log(chalk.gray(` Duration: ${(result.execution.duration_ms / 1000).toFixed(1)}s`));
|
||||
console.log(chalk.gray(` Exit Code: ${result.execution.exit_code}`));
|
||||
|
||||
// Show stderr with better formatting
|
||||
if (result.stderr) {
|
||||
console.log();
|
||||
console.log(chalk.red.bold(' Error Output:'));
|
||||
console.log(chalk.gray(' ' + '─'.repeat(60)));
|
||||
// Indent stderr for better readability
|
||||
const stderrLines = result.stderr.split('\n');
|
||||
for (const line of stderrLines.slice(0, 30)) { // Limit to 30 lines
|
||||
console.error(chalk.red(` ${line}`));
|
||||
}
|
||||
if (stderrLines.length > 30) {
|
||||
console.log(chalk.yellow(` ... ${stderrLines.length - 30} more lines`));
|
||||
console.log(chalk.cyan(` 💡 View full output: ccw cli output ${result.execution.id}`));
|
||||
console.log();
|
||||
}
|
||||
console.log(chalk.gray(' ' + '─'.repeat(60)));
|
||||
}
|
||||
console.log(chalk.gray(' ' + '─'.repeat(60)));
|
||||
}
|
||||
|
||||
// Show troubleshooting hints
|
||||
console.log();
|
||||
console.log(chalk.yellow.bold(' Troubleshooting:'));
|
||||
console.log(chalk.gray(` • Check if ${tool} is properly installed: ccw cli status`));
|
||||
console.log(chalk.gray(` • Enable debug mode: DEBUG=true ccw cli -p "..." or set DEBUG=true && ccw cli -p "..."`));
|
||||
if (result.stderr?.includes('API key') || result.stderr?.includes('Authentication')) {
|
||||
console.log(chalk.gray(` • Check API key configuration for ${tool}`));
|
||||
}
|
||||
if (result.stderr?.includes('rate limit')) {
|
||||
console.log(chalk.gray(` • Wait and retry - rate limit exceeded`));
|
||||
// Show troubleshooting hints
|
||||
console.log();
|
||||
console.log(chalk.yellow.bold(' Troubleshooting:'));
|
||||
console.log(chalk.gray(` • Check if ${tool} is properly installed: ccw cli status`));
|
||||
console.log(chalk.gray(` • Enable debug mode: DEBUG=true ccw cli -p "..." or set DEBUG=true && ccw cli -p "..."`));
|
||||
if (result.stderr?.includes('API key') || result.stderr?.includes('Authentication')) {
|
||||
console.log(chalk.gray(` • Check API key configuration for ${tool}`));
|
||||
}
|
||||
if (result.stderr?.includes('rate limit')) {
|
||||
console.log(chalk.gray(` • Wait and retry - rate limit exceeded`));
|
||||
}
|
||||
}
|
||||
|
||||
// Notify dashboard: execution failed (legacy)
|
||||
|
||||
@@ -107,6 +107,12 @@ export class PlainTextParser implements IOutputParser {
|
||||
export class JsonLinesParser implements IOutputParser {
|
||||
private buffer: string = '';
|
||||
|
||||
// Gemini "message" frames may be true deltas OR cumulative content (varies by CLI/version).
|
||||
// Track cumulative assistant content so we can normalize cumulative frames into true deltas and
|
||||
// avoid emitting duplicated content downstream (terminal + dashboard + final reconstruction).
|
||||
private geminiAssistantCumulative: string = '';
|
||||
private geminiSawAssistantDelta: boolean = false;
|
||||
|
||||
/**
|
||||
* Classify non-JSON content to determine appropriate output type
|
||||
* Helps distinguish real errors from normal progress/output sent to stderr
|
||||
@@ -294,12 +300,67 @@ export class JsonLinesParser implements IOutputParser {
|
||||
if (json.type === 'message' && json.role) {
|
||||
// Gemini assistant/user message
|
||||
if (json.role === 'assistant') {
|
||||
// Delta messages use 'streaming_content' type - aggregated to agent_message later
|
||||
// Non-delta (final) messages use 'agent_message' type directly
|
||||
const outputType = json.delta === true ? 'streaming_content' : 'agent_message';
|
||||
const content = json.content || '';
|
||||
if (!content) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Delta messages use 'streaming_content' type (should be incremental).
|
||||
// Some CLIs send delta=true with cumulative content; normalize to a suffix-delta when possible.
|
||||
if (json.delta === true) {
|
||||
this.geminiSawAssistantDelta = true;
|
||||
|
||||
// Duplicate frame
|
||||
if (content === this.geminiAssistantCumulative) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Cumulative frame (new content starts with previous content)
|
||||
if (this.geminiAssistantCumulative && content.startsWith(this.geminiAssistantCumulative)) {
|
||||
const delta = content.slice(this.geminiAssistantCumulative.length);
|
||||
this.geminiAssistantCumulative = content;
|
||||
if (!delta) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
type: 'streaming_content',
|
||||
content: delta,
|
||||
timestamp
|
||||
};
|
||||
}
|
||||
|
||||
// Unexpected reset/shortening: treat as a fresh stream restart to avoid negative slicing
|
||||
if (this.geminiAssistantCumulative && this.geminiAssistantCumulative.startsWith(content)) {
|
||||
this.geminiAssistantCumulative = content;
|
||||
return {
|
||||
type: 'streaming_content',
|
||||
content,
|
||||
timestamp
|
||||
};
|
||||
}
|
||||
|
||||
// True delta frame (append-only)
|
||||
this.geminiAssistantCumulative += content;
|
||||
return {
|
||||
type: 'streaming_content',
|
||||
content,
|
||||
timestamp
|
||||
};
|
||||
}
|
||||
|
||||
// Non-delta (final) messages use 'agent_message' type directly.
|
||||
// If we already streamed deltas for this assistant message, skip this final frame to avoid duplication
|
||||
// in streaming UIs (frontend already has the assembled content from deltas).
|
||||
if (this.geminiSawAssistantDelta) {
|
||||
// Keep cumulative for potential later comparisons but do not emit.
|
||||
this.geminiAssistantCumulative = content;
|
||||
return null;
|
||||
}
|
||||
|
||||
this.geminiAssistantCumulative = content;
|
||||
return {
|
||||
type: outputType,
|
||||
content: json.content || '',
|
||||
type: 'agent_message',
|
||||
content,
|
||||
timestamp
|
||||
};
|
||||
}
|
||||
@@ -1141,17 +1202,24 @@ export function flattenOutputUnits(
|
||||
let processedUnits = units;
|
||||
const streamingUnits = units.filter(u => u.type === 'streaming_content');
|
||||
if (streamingUnits.length > 0) {
|
||||
// Concatenate all streaming_content into one
|
||||
const concatenatedContent = streamingUnits
|
||||
.map(u => typeof u.content === 'string' ? u.content : '')
|
||||
.join('');
|
||||
const hasAgentMessage = units.some(u => u.type === 'agent_message');
|
||||
|
||||
// If a non-delta final agent_message already exists, prefer it and simply drop streaming_content.
|
||||
// This avoids duplicated final output when providers emit BOTH streaming deltas and a final message frame.
|
||||
processedUnits = units.filter(u => u.type !== 'streaming_content');
|
||||
// Add concatenated content as agent_message type for final output
|
||||
processedUnits.push({
|
||||
type: 'agent_message',
|
||||
content: concatenatedContent,
|
||||
timestamp: streamingUnits[streamingUnits.length - 1].timestamp
|
||||
});
|
||||
|
||||
// If no agent_message exists, synthesize one from streaming_content (delta-only streams).
|
||||
if (!hasAgentMessage) {
|
||||
const concatenatedContent = streamingUnits
|
||||
.map(u => typeof u.content === 'string' ? u.content : '')
|
||||
.join('');
|
||||
|
||||
processedUnits.push({
|
||||
type: 'agent_message',
|
||||
content: concatenatedContent,
|
||||
timestamp: streamingUnits[streamingUnits.length - 1].timestamp
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Filter units by type
|
||||
|
||||
76
ccw/tests/cli-final-only-output.test.js
Normal file
76
ccw/tests/cli-final-only-output.test.js
Normal file
@@ -0,0 +1,76 @@
|
||||
/**
|
||||
* ccw cli exec --final output mode
|
||||
*
|
||||
* Ensures programmatic callers can get a clean final agent result without
|
||||
* banners/spinner/summary noise on stdout.
|
||||
*/
|
||||
|
||||
import { afterEach, describe, it, mock } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import http from 'node:http';
|
||||
|
||||
const cliCommandPath = new URL('../dist/commands/cli.js', import.meta.url).href;
|
||||
const cliExecutorPath = new URL('../dist/tools/cli-executor.js', import.meta.url).href;
|
||||
|
||||
function stubHttpRequest() {
|
||||
mock.method(http, 'request', () => {
|
||||
const req = {
|
||||
on(event, handler) {
|
||||
if (event === 'socket') handler({ unref() {} });
|
||||
return req;
|
||||
},
|
||||
write() {},
|
||||
end() {},
|
||||
destroy() {},
|
||||
};
|
||||
return req;
|
||||
});
|
||||
}
|
||||
|
||||
describe('ccw cli exec --final', async () => {
|
||||
afterEach(() => {
|
||||
mock.restoreAll();
|
||||
});
|
||||
|
||||
it('writes only finalOutput to stdout (no banner/summary)', async () => {
|
||||
stubHttpRequest();
|
||||
|
||||
const cliModule = await import(cliCommandPath);
|
||||
const cliExecutorModule = await import(cliExecutorPath);
|
||||
|
||||
const stdoutWrites = [];
|
||||
mock.method(process.stdout, 'write', (chunk) => {
|
||||
stdoutWrites.push(String(chunk));
|
||||
return true;
|
||||
});
|
||||
mock.method(console, 'log', () => {});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
mock.method(cliExecutorModule.cliExecutorTool, 'execute', async () => {
|
||||
return {
|
||||
success: true,
|
||||
stdout: 'STDOUT_SHOULD_NOT_WIN',
|
||||
stderr: '',
|
||||
parsedOutput: 'PARSED_SHOULD_NOT_WIN',
|
||||
finalOutput: 'FINAL',
|
||||
execution: { id: 'EXEC-FINAL', duration_ms: 1, status: 'success' },
|
||||
conversation: { turn_count: 1, total_duration_ms: 1 },
|
||||
};
|
||||
});
|
||||
|
||||
// Prevent the command from terminating the test runner.
|
||||
mock.method(process, 'exit', () => {});
|
||||
|
||||
// Ensure the CLI's internal delayed exit timer doesn't keep the test process alive.
|
||||
const realSetTimeout = globalThis.setTimeout;
|
||||
mock.method(globalThis, 'setTimeout', (fn, ms, ...args) => {
|
||||
const t = realSetTimeout(fn, ms, ...args);
|
||||
t?.unref?.();
|
||||
return t;
|
||||
});
|
||||
|
||||
await cliModule.cliCommand('exec', [], { prompt: 'Hello', tool: 'gemini', final: true });
|
||||
|
||||
assert.equal(stdoutWrites.join(''), 'FINAL');
|
||||
});
|
||||
});
|
||||
66
ccw/tests/cli-output-converter.test.js
Normal file
66
ccw/tests/cli-output-converter.test.js
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* CLI Output Converter - Streaming/Final de-duplication tests
|
||||
*
|
||||
* Runs against the shipped runtime in `ccw/dist`.
|
||||
*/
|
||||
|
||||
import { describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createOutputParser, flattenOutputUnits } from '../dist/tools/cli-output-converter.js';
|
||||
|
||||
describe('cli-output-converter (streaming de-dup)', () => {
|
||||
it('normalizes cumulative Gemini delta frames into suffix deltas', () => {
|
||||
const parser = createOutputParser('json-lines');
|
||||
const ts0 = '2026-02-04T00:00:00.000Z';
|
||||
const ts1 = '2026-02-04T00:00:01.000Z';
|
||||
|
||||
const input = [
|
||||
JSON.stringify({ type: 'message', timestamp: ts0, role: 'assistant', content: 'Hello', delta: true }),
|
||||
JSON.stringify({ type: 'message', timestamp: ts1, role: 'assistant', content: 'Hello world', delta: true }),
|
||||
'',
|
||||
].join('\n');
|
||||
|
||||
const units = parser.parse(Buffer.from(input, 'utf8'), 'stdout');
|
||||
|
||||
assert.equal(units.length, 2);
|
||||
assert.equal(units[0].type, 'streaming_content');
|
||||
assert.equal(units[0].content, 'Hello');
|
||||
assert.equal(units[1].type, 'streaming_content');
|
||||
assert.equal(units[1].content, ' world');
|
||||
});
|
||||
|
||||
it('skips non-delta final assistant frame after deltas (avoids stream duplication)', () => {
|
||||
const parser = createOutputParser('json-lines');
|
||||
const ts0 = '2026-02-04T00:00:00.000Z';
|
||||
const ts1 = '2026-02-04T00:00:01.000Z';
|
||||
const ts2 = '2026-02-04T00:00:02.000Z';
|
||||
|
||||
const input = [
|
||||
JSON.stringify({ type: 'message', timestamp: ts0, role: 'assistant', content: 'Hello', delta: true }),
|
||||
JSON.stringify({ type: 'message', timestamp: ts1, role: 'assistant', content: ' world', delta: true }),
|
||||
// Some CLIs send a final non-delta message repeating the full content
|
||||
JSON.stringify({ type: 'message', timestamp: ts2, role: 'assistant', content: 'Hello world', delta: false }),
|
||||
'',
|
||||
].join('\n');
|
||||
|
||||
const units = parser.parse(Buffer.from(input, 'utf8'), 'stdout');
|
||||
assert.equal(units.some((u) => u.type === 'agent_message'), false);
|
||||
assert.equal(units.filter((u) => u.type === 'streaming_content').length, 2);
|
||||
|
||||
const reconstructed = flattenOutputUnits(units, { includeTypes: ['agent_message'] });
|
||||
assert.equal(reconstructed, 'Hello world');
|
||||
});
|
||||
|
||||
it('does not synthesize an extra agent_message when one already exists', () => {
|
||||
const units = [
|
||||
{ type: 'streaming_content', content: 'a', timestamp: '2026-02-04T00:00:00.000Z' },
|
||||
{ type: 'streaming_content', content: 'b', timestamp: '2026-02-04T00:00:01.000Z' },
|
||||
{ type: 'agent_message', content: 'ab', timestamp: '2026-02-04T00:00:02.000Z' },
|
||||
];
|
||||
|
||||
const out = flattenOutputUnits(units, { includeTypes: ['agent_message'] });
|
||||
assert.equal(out, 'ab');
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user