mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-11 17:21:03 +08:00
Add comprehensive tests for CLI functionality and CodexLens compatibility
- Introduced tests for stale running fallback in CLI watch functionality to ensure proper handling of saved conversations. - Added compatibility tests for CodexLens CLI to verify index initialization despite compatibility conflicts. - Implemented tests for Smart Search MCP usage to validate default settings and path handling. - Created tests for UV Manager to ensure Python preference handling works as expected. - Added a detailed guide for CCW/Codex commands and skills, covering core commands, execution modes, and templates.
This commit is contained in:
@@ -12,7 +12,10 @@ import { router } from './router';
|
||||
import queryClient from './lib/query-client';
|
||||
import type { Locale } from './lib/i18n';
|
||||
import { useWorkflowStore } from '@/stores/workflowStore';
|
||||
import { useActiveCliExecutions } from '@/hooks/useActiveCliExecutions';
|
||||
import { useCliStreamStore } from '@/stores/cliStreamStore';
|
||||
import { useExecutionMonitorStore } from '@/stores/executionMonitorStore';
|
||||
import { useTerminalPanelStore } from '@/stores/terminalPanelStore';
|
||||
import { useActiveCliExecutions, ACTIVE_CLI_EXECUTIONS_QUERY_KEY } from '@/hooks/useActiveCliExecutions';
|
||||
import { DialogStyleProvider } from '@/contexts/DialogStyleContext';
|
||||
import { initializeCsrfToken } from './lib/api';
|
||||
|
||||
@@ -55,6 +58,10 @@ function QueryInvalidator() {
|
||||
useEffect(() => {
|
||||
// Register callback to invalidate all workspace-related queries on workspace switch
|
||||
const callback = () => {
|
||||
useCliStreamStore.getState().resetState();
|
||||
useExecutionMonitorStore.getState().resetState();
|
||||
useTerminalPanelStore.getState().resetState();
|
||||
queryClient.invalidateQueries({ queryKey: ACTIVE_CLI_EXECUTIONS_QUERY_KEY });
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const queryKey = query.queryKey;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// ========================================
|
||||
// Redesigned CLI streaming monitor with smart parsing and message-based layout
|
||||
|
||||
import { useState, useCallback, useMemo } from 'react';
|
||||
import { useState, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import {
|
||||
Terminal,
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
import { cn } from '@/lib/utils';
|
||||
import { useCliStreamStore, type CliOutputLine } from '@/stores/cliStreamStore';
|
||||
import { useActiveCliExecutions } from '@/hooks/useActiveCliExecutions';
|
||||
import { useWorkflowStore, selectProjectPath } from '@/stores/workflowStore';
|
||||
import { useCliStreamWebSocket } from '@/hooks/useCliStreamWebSocket';
|
||||
|
||||
// New layout components
|
||||
@@ -169,6 +170,7 @@ export function CliStreamMonitorNew({ isOpen, onClose }: CliStreamMonitorNewProp
|
||||
const executions = useCliStreamStore((state) => state.executions);
|
||||
const currentExecutionId = useCliStreamStore((state) => state.currentExecutionId);
|
||||
const removeExecution = useCliStreamStore((state) => state.removeExecution);
|
||||
const projectPath = useWorkflowStore(selectProjectPath);
|
||||
|
||||
// Active execution sync
|
||||
const { isLoading: isSyncing, refetch } = useActiveCliExecutions(isOpen);
|
||||
@@ -221,6 +223,12 @@ export function CliStreamMonitorNew({ isOpen, onClose }: CliStreamMonitorNewProp
|
||||
return filtered;
|
||||
}, [messages, filter, searchQuery]);
|
||||
|
||||
useEffect(() => {
|
||||
setSearchQuery('');
|
||||
setFilter('all');
|
||||
setViewMode('preview');
|
||||
}, [projectPath]);
|
||||
|
||||
// Copy message content
|
||||
const handleCopy = useCallback(async (content: string) => {
|
||||
try {
|
||||
|
||||
@@ -25,6 +25,7 @@ import { Badge } from '@/components/ui/Badge';
|
||||
import { LogBlockList } from '@/components/shared/LogBlock';
|
||||
import { useCliStreamStore, type CliOutputLine } from '@/stores/cliStreamStore';
|
||||
import { useActiveCliExecutions } from '@/hooks/useActiveCliExecutions';
|
||||
import { useWorkflowStore, selectProjectPath } from '@/stores/workflowStore';
|
||||
import { useCliStreamWebSocket } from '@/hooks/useCliStreamWebSocket';
|
||||
|
||||
// New components for Tab + JSON Cards
|
||||
@@ -186,6 +187,7 @@ export function CliStreamMonitor({ isOpen, onClose }: CliStreamMonitorProps) {
|
||||
const setCurrentExecution = useCliStreamStore((state) => state.setCurrentExecution);
|
||||
const removeExecution = useCliStreamStore((state) => state.removeExecution);
|
||||
const markExecutionClosedByUser = useCliStreamStore((state) => state.markExecutionClosedByUser);
|
||||
const projectPath = useWorkflowStore(selectProjectPath);
|
||||
|
||||
// Active execution sync
|
||||
const { isLoading: isSyncing, refetch } = useActiveCliExecutions(isOpen);
|
||||
@@ -214,6 +216,13 @@ export function CliStreamMonitor({ isOpen, onClose }: CliStreamMonitorProps) {
|
||||
}
|
||||
}, [executions, currentExecutionId, autoScroll, isUserScrolling]);
|
||||
|
||||
useEffect(() => {
|
||||
setSearchQuery('');
|
||||
setAutoScroll(true);
|
||||
setIsUserScrolling(false);
|
||||
setViewMode('list');
|
||||
}, [projectPath]);
|
||||
|
||||
// Handle scroll to detect user scrolling (with debounce for performance)
|
||||
const handleScrollRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const handleScroll = useCallback(() => {
|
||||
|
||||
288
ccw/frontend/src/hooks/useActiveCliExecutions.test.tsx
Normal file
288
ccw/frontend/src/hooks/useActiveCliExecutions.test.tsx
Normal file
@@ -0,0 +1,288 @@
|
||||
// ========================================
|
||||
// useActiveCliExecutions Hook Tests
|
||||
// ========================================
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { renderHook, waitFor } from '@testing-library/react';
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
|
||||
import * as React from 'react';
|
||||
import * as api from '@/lib/api';
|
||||
import { useActiveCliExecutions } from './useActiveCliExecutions';
|
||||
|
||||
const mockProjectState = vi.hoisted(() => ({
|
||||
projectPath: '/test/project',
|
||||
}));
|
||||
|
||||
const mockStoreState = vi.hoisted(() => ({
|
||||
executions: {} as Record<string, any>,
|
||||
cleanupUserClosedExecutions: vi.fn(),
|
||||
isExecutionClosedByUser: vi.fn(() => false),
|
||||
removeExecution: vi.fn(),
|
||||
upsertExecution: vi.fn(),
|
||||
setCurrentExecution: vi.fn(),
|
||||
}));
|
||||
|
||||
const mockUseCliStreamStore = vi.hoisted(() => {
|
||||
const store = vi.fn();
|
||||
Object.assign(store, {
|
||||
getState: vi.fn(() => mockStoreState),
|
||||
});
|
||||
return store;
|
||||
});
|
||||
|
||||
vi.mock('@/stores/cliStreamStore', () => ({
|
||||
useCliStreamStore: mockUseCliStreamStore,
|
||||
}));
|
||||
|
||||
vi.mock('@/stores/workflowStore', () => ({
|
||||
useWorkflowStore: vi.fn((selector?: (state: { projectPath: string }) => unknown) => (
|
||||
selector
|
||||
? selector({ projectPath: mockProjectState.projectPath })
|
||||
: { projectPath: mockProjectState.projectPath }
|
||||
)),
|
||||
selectProjectPath: (state: { projectPath: string }) => state.projectPath,
|
||||
}));
|
||||
|
||||
vi.mock('@/lib/api', async () => {
|
||||
const actual = await vi.importActual<typeof import('@/lib/api')>('@/lib/api');
|
||||
return {
|
||||
...actual,
|
||||
fetchExecutionDetail: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
const fetchMock = vi.fn();
|
||||
|
||||
function createTestQueryClient() {
|
||||
return new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: {
|
||||
retry: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function createWrapper() {
|
||||
const queryClient = createTestQueryClient();
|
||||
|
||||
return ({ children }: { children: React.ReactNode }) => (
|
||||
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
|
||||
);
|
||||
}
|
||||
|
||||
function createActiveResponse(executions: Array<Record<string, unknown>>) {
|
||||
return {
|
||||
ok: true,
|
||||
statusText: 'OK',
|
||||
json: vi.fn().mockResolvedValue({ executions }),
|
||||
};
|
||||
}
|
||||
|
||||
describe('useActiveCliExecutions', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.stubGlobal('fetch', fetchMock);
|
||||
|
||||
mockProjectState.projectPath = '/test/project';
|
||||
mockStoreState.executions = {};
|
||||
mockStoreState.cleanupUserClosedExecutions.mockReset();
|
||||
mockStoreState.isExecutionClosedByUser.mockReset();
|
||||
mockStoreState.isExecutionClosedByUser.mockReturnValue(false);
|
||||
mockStoreState.removeExecution.mockReset();
|
||||
mockStoreState.upsertExecution.mockReset();
|
||||
mockStoreState.setCurrentExecution.mockReset();
|
||||
(mockUseCliStreamStore as any).getState.mockReset();
|
||||
(mockUseCliStreamStore as any).getState.mockImplementation(() => mockStoreState);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
});
|
||||
|
||||
it('requests active executions with scoped project path', async () => {
|
||||
fetchMock.mockResolvedValue(createActiveResponse([]));
|
||||
|
||||
const { result } = renderHook(() => useActiveCliExecutions(true, 60_000), {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.data).toEqual([]);
|
||||
});
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledWith('/api/cli/active?path=%2Ftest%2Fproject');
|
||||
});
|
||||
|
||||
it('filters stale recovered running executions when saved detail is newer', async () => {
|
||||
const startTime = 1_741_392_000_000;
|
||||
mockStoreState.executions = {
|
||||
'exec-stale': {
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: [],
|
||||
startTime,
|
||||
recovered: true,
|
||||
},
|
||||
};
|
||||
|
||||
fetchMock.mockResolvedValue(createActiveResponse([
|
||||
{
|
||||
id: 'exec-stale',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: '[响应] stale output',
|
||||
startTime,
|
||||
},
|
||||
]));
|
||||
|
||||
vi.mocked(api.fetchExecutionDetail).mockResolvedValue({
|
||||
id: 'exec-stale',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
turns: [],
|
||||
turn_count: 1,
|
||||
created_at: new Date(startTime - 2_000).toISOString(),
|
||||
updated_at: new Date(startTime + 2_000).toISOString(),
|
||||
} as any);
|
||||
|
||||
const { result } = renderHook(() => useActiveCliExecutions(true, 60_000), {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.data).toEqual([]);
|
||||
});
|
||||
|
||||
expect(api.fetchExecutionDetail).toHaveBeenCalledWith('exec-stale', '/test/project');
|
||||
expect(mockStoreState.removeExecution).toHaveBeenCalledWith('exec-stale');
|
||||
expect(mockStoreState.upsertExecution).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('removes recovered running executions that are absent from the current workspace active list', async () => {
|
||||
mockStoreState.executions = {
|
||||
'exec-old-workspace': {
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: [],
|
||||
startTime: 1_741_394_000_000,
|
||||
recovered: true,
|
||||
},
|
||||
};
|
||||
|
||||
fetchMock.mockResolvedValue(createActiveResponse([]));
|
||||
|
||||
const { result } = renderHook(() => useActiveCliExecutions(true, 60_000), {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.data).toEqual([]);
|
||||
});
|
||||
|
||||
expect(mockStoreState.removeExecution).toHaveBeenCalledWith('exec-old-workspace');
|
||||
expect(api.fetchExecutionDetail).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('reselects the best remaining execution when current selection becomes invalid', async () => {
|
||||
mockStoreState.executions = {
|
||||
'exec-running': {
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: [],
|
||||
startTime: 1_741_395_000_000,
|
||||
recovered: false,
|
||||
},
|
||||
'exec-completed': {
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'completed',
|
||||
output: [],
|
||||
startTime: 1_741_394_000_000,
|
||||
recovered: false,
|
||||
},
|
||||
};
|
||||
|
||||
(mockUseCliStreamStore as any).getState.mockImplementation(() => ({
|
||||
...mockStoreState,
|
||||
currentExecutionId: 'exec-missing',
|
||||
}));
|
||||
fetchMock.mockResolvedValue(createActiveResponse([]));
|
||||
|
||||
const { result } = renderHook(() => useActiveCliExecutions(true, 60_000), {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.data).toEqual([]);
|
||||
});
|
||||
|
||||
expect(mockStoreState.setCurrentExecution).toHaveBeenCalledWith('exec-running');
|
||||
});
|
||||
|
||||
it('clears current selection when no executions remain after sync', async () => {
|
||||
mockStoreState.executions = {};
|
||||
(mockUseCliStreamStore as any).getState.mockImplementation(() => ({
|
||||
...mockStoreState,
|
||||
currentExecutionId: 'exec-missing',
|
||||
}));
|
||||
fetchMock.mockResolvedValue(createActiveResponse([]));
|
||||
|
||||
const { result } = renderHook(() => useActiveCliExecutions(true, 60_000), {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.data).toEqual([]);
|
||||
});
|
||||
|
||||
expect(mockStoreState.setCurrentExecution).toHaveBeenCalledWith(null);
|
||||
});
|
||||
|
||||
it('keeps running executions when saved detail is older than active start time', async () => {
|
||||
const startTime = 1_741_393_000_000;
|
||||
|
||||
fetchMock.mockResolvedValue(createActiveResponse([
|
||||
{
|
||||
id: 'exec-live',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: '[响应] live output',
|
||||
startTime,
|
||||
},
|
||||
]));
|
||||
|
||||
vi.mocked(api.fetchExecutionDetail).mockResolvedValue({
|
||||
id: 'exec-live',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
turns: [],
|
||||
turn_count: 1,
|
||||
created_at: new Date(startTime - 20_000).toISOString(),
|
||||
updated_at: new Date(startTime - 10_000).toISOString(),
|
||||
} as any);
|
||||
|
||||
const { result } = renderHook(() => useActiveCliExecutions(true, 60_000), {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.data?.map((execution) => execution.id)).toEqual(['exec-live']);
|
||||
});
|
||||
|
||||
expect(mockStoreState.removeExecution).not.toHaveBeenCalled();
|
||||
expect(mockStoreState.upsertExecution).toHaveBeenCalledWith(
|
||||
'exec-live',
|
||||
expect.objectContaining({
|
||||
status: 'running',
|
||||
recovered: true,
|
||||
})
|
||||
);
|
||||
expect(mockStoreState.setCurrentExecution).toHaveBeenCalledWith('exec-live');
|
||||
});
|
||||
});
|
||||
@@ -4,7 +4,9 @@
|
||||
// Hook for syncing active CLI executions from server
|
||||
|
||||
import { useQuery, useQueryClient } from '@tanstack/react-query';
|
||||
import { useCliStreamStore } from '@/stores/cliStreamStore';
|
||||
import { fetchExecutionDetail, type ConversationRecord } from '@/lib/api';
|
||||
import { useCliStreamStore, type CliExecutionState } from '@/stores/cliStreamStore';
|
||||
import { useWorkflowStore, selectProjectPath } from '@/stores/workflowStore';
|
||||
|
||||
/**
|
||||
* Response type from /api/cli/active endpoint
|
||||
@@ -84,6 +86,104 @@ function parseHistoricalOutput(rawOutput: string, startTime: number) {
|
||||
return historicalLines;
|
||||
}
|
||||
|
||||
function normalizeTimestampMs(value: unknown): number | undefined {
|
||||
if (value instanceof Date) {
|
||||
const time = value.getTime();
|
||||
return Number.isFinite(time) ? time : undefined;
|
||||
}
|
||||
|
||||
if (typeof value === 'number' && Number.isFinite(value)) {
|
||||
return value > 0 && value < 1_000_000_000_000 ? value * 1000 : value;
|
||||
}
|
||||
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return undefined;
|
||||
|
||||
const numericValue = Number(trimmed);
|
||||
if (Number.isFinite(numericValue)) {
|
||||
return numericValue > 0 && numericValue < 1_000_000_000_000 ? numericValue * 1000 : numericValue;
|
||||
}
|
||||
|
||||
const parsed = Date.parse(trimmed);
|
||||
return Number.isNaN(parsed) ? undefined : parsed;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function isSavedExecutionNewerThanActive(activeStartTime: unknown, savedTimestamp: unknown): boolean {
|
||||
const activeStartTimeMs = normalizeTimestampMs(activeStartTime);
|
||||
if (activeStartTimeMs === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const savedTimestampMs = normalizeTimestampMs(savedTimestamp);
|
||||
if (savedTimestampMs === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return savedTimestampMs >= activeStartTimeMs;
|
||||
}
|
||||
|
||||
async function filterSupersededRunningExecutions(
|
||||
executions: ActiveCliExecution[],
|
||||
currentExecutions: Record<string, CliExecutionState>,
|
||||
projectPath?: string
|
||||
): Promise<{ filteredExecutions: ActiveCliExecution[]; removedIds: string[] }> {
|
||||
const candidates = executions.filter((execution) => {
|
||||
if (execution.status !== 'running') {
|
||||
return false;
|
||||
}
|
||||
|
||||
const existing = currentExecutions[execution.id];
|
||||
return !existing || existing.recovered;
|
||||
});
|
||||
|
||||
if (candidates.length === 0) {
|
||||
return { filteredExecutions: executions, removedIds: [] };
|
||||
}
|
||||
|
||||
const removedIds = new Set<string>();
|
||||
|
||||
await Promise.all(candidates.map(async (execution) => {
|
||||
try {
|
||||
const detail = await fetchExecutionDetail(execution.id, projectPath) as ConversationRecord & { _active?: boolean };
|
||||
if (detail._active) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (isSavedExecutionNewerThanActive(
|
||||
execution.startTime,
|
||||
detail.updated_at || detail.created_at
|
||||
)) {
|
||||
removedIds.add(execution.id);
|
||||
}
|
||||
} catch {
|
||||
// Ignore detail lookup failures and keep server active state.
|
||||
}
|
||||
}));
|
||||
|
||||
if (removedIds.size === 0) {
|
||||
return { filteredExecutions: executions, removedIds: [] };
|
||||
}
|
||||
|
||||
return {
|
||||
filteredExecutions: executions.filter((execution) => !removedIds.has(execution.id)),
|
||||
removedIds: Array.from(removedIds),
|
||||
};
|
||||
}
|
||||
|
||||
function pickPreferredExecutionId(executions: Record<string, CliExecutionState>): string | null {
|
||||
const sortedEntries = Object.entries(executions).sort(([, executionA], [, executionB]) => {
|
||||
if (executionA.status === 'running' && executionB.status !== 'running') return -1;
|
||||
if (executionA.status !== 'running' && executionB.status === 'running') return 1;
|
||||
return executionB.startTime - executionA.startTime;
|
||||
});
|
||||
|
||||
return sortedEntries[0]?.[0] ?? null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query key for active CLI executions
|
||||
*/
|
||||
@@ -104,42 +204,52 @@ export function useActiveCliExecutions(
|
||||
enabled: boolean,
|
||||
refetchInterval: number = 5000
|
||||
) {
|
||||
const projectPath = useWorkflowStore(selectProjectPath);
|
||||
|
||||
return useQuery({
|
||||
queryKey: ACTIVE_CLI_EXECUTIONS_QUERY_KEY,
|
||||
queryKey: [...ACTIVE_CLI_EXECUTIONS_QUERY_KEY, projectPath || 'default'],
|
||||
queryFn: async () => {
|
||||
// Access store state at execution time to avoid stale closures
|
||||
const store = useCliStreamStore.getState();
|
||||
const currentExecutions = store.executions;
|
||||
const params = new URLSearchParams();
|
||||
if (projectPath) {
|
||||
params.set('path', projectPath);
|
||||
}
|
||||
|
||||
const response = await fetch('/api/cli/active');
|
||||
const activeUrl = params.size > 0
|
||||
? `/api/cli/active?${params.toString()}`
|
||||
: '/api/cli/active';
|
||||
const response = await fetch(activeUrl);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch active executions: ${response.statusText}`);
|
||||
}
|
||||
const data: ActiveCliExecutionsResponse = await response.json();
|
||||
const { filteredExecutions, removedIds } = await filterSupersededRunningExecutions(
|
||||
data.executions,
|
||||
currentExecutions,
|
||||
projectPath || undefined
|
||||
);
|
||||
|
||||
// Get server execution IDs
|
||||
const serverIds = new Set(data.executions.map(e => e.id));
|
||||
removedIds.forEach((executionId) => {
|
||||
store.removeExecution(executionId);
|
||||
});
|
||||
|
||||
const serverIds = new Set(filteredExecutions.map(e => e.id));
|
||||
|
||||
// Clean up userClosedExecutions - remove those no longer on server
|
||||
store.cleanupUserClosedExecutions(serverIds);
|
||||
|
||||
// Remove executions that are no longer on server and were closed by user
|
||||
for (const [id, exec] of Object.entries(currentExecutions)) {
|
||||
if (store.isExecutionClosedByUser(id)) {
|
||||
// User closed this execution, remove from local state
|
||||
store.removeExecution(id);
|
||||
} else if (exec.status !== 'running' && !serverIds.has(id) && exec.recovered) {
|
||||
// Not running, not on server, and was recovered (not user-created)
|
||||
} else if (exec.recovered && !serverIds.has(id)) {
|
||||
store.removeExecution(id);
|
||||
}
|
||||
}
|
||||
|
||||
// Process executions and sync to store
|
||||
let hasNewExecution = false;
|
||||
const now = Date.now();
|
||||
|
||||
for (const exec of data.executions) {
|
||||
// Skip if user closed this execution
|
||||
for (const exec of filteredExecutions) {
|
||||
if (store.isExecutionClosedByUser(exec.id)) {
|
||||
continue;
|
||||
}
|
||||
@@ -151,13 +261,10 @@ export function useActiveCliExecutions(
|
||||
hasNewExecution = true;
|
||||
}
|
||||
|
||||
// Merge existing output with historical output
|
||||
const existingOutput = existing?.output || [];
|
||||
const existingContentSet = new Set(existingOutput.map(o => o.content));
|
||||
const missingLines = historicalOutput.filter(h => !existingContentSet.has(h.content));
|
||||
|
||||
// Prepend missing historical lines before existing output
|
||||
// Skip system start message when prepending
|
||||
const systemMsgIndex = existingOutput.findIndex(o => o.type === 'system');
|
||||
const insertIndex = systemMsgIndex >= 0 ? systemMsgIndex + 1 : 0;
|
||||
|
||||
@@ -166,12 +273,10 @@ export function useActiveCliExecutions(
|
||||
mergedOutput.splice(insertIndex, 0, ...missingLines);
|
||||
}
|
||||
|
||||
// Trim if too long
|
||||
if (mergedOutput.length > MAX_OUTPUT_LINES) {
|
||||
mergedOutput.splice(0, mergedOutput.length - MAX_OUTPUT_LINES);
|
||||
}
|
||||
|
||||
// Add system message for new executions
|
||||
let finalOutput = mergedOutput;
|
||||
if (!existing) {
|
||||
finalOutput = [
|
||||
@@ -195,19 +300,27 @@ export function useActiveCliExecutions(
|
||||
});
|
||||
}
|
||||
|
||||
// Set current execution to first running execution if none selected
|
||||
if (hasNewExecution) {
|
||||
const runningExec = data.executions.find(e => e.status === 'running' && !store.isExecutionClosedByUser(e.id));
|
||||
const runningExec = filteredExecutions.find(e => e.status === 'running' && !store.isExecutionClosedByUser(e.id));
|
||||
if (runningExec && !currentExecutions[runningExec.id]) {
|
||||
store.setCurrentExecution(runningExec.id);
|
||||
}
|
||||
}
|
||||
|
||||
return data.executions;
|
||||
const nextState = useCliStreamStore.getState();
|
||||
const currentExecutionId = nextState.currentExecutionId;
|
||||
if (!currentExecutionId || !nextState.executions[currentExecutionId]) {
|
||||
const preferredExecutionId = pickPreferredExecutionId(nextState.executions);
|
||||
if (preferredExecutionId !== currentExecutionId) {
|
||||
store.setCurrentExecution(preferredExecutionId);
|
||||
}
|
||||
}
|
||||
|
||||
return filteredExecutions;
|
||||
},
|
||||
enabled,
|
||||
refetchInterval,
|
||||
staleTime: 2000, // Consider data fresh for 2 seconds
|
||||
staleTime: 2000,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
55
ccw/frontend/src/pages/CliViewerPage.test.ts
Normal file
55
ccw/frontend/src/pages/CliViewerPage.test.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
// ========================================
|
||||
// CliViewerPage Helper Tests
|
||||
// ========================================
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { getStaleViewerTabs } from './cliViewerPage.utils';
|
||||
|
||||
describe('getStaleViewerTabs', () => {
|
||||
it('returns tabs whose execution ids are missing from the current execution map', () => {
|
||||
const panes = {
|
||||
'pane-1': {
|
||||
id: 'pane-1',
|
||||
activeTabId: 'tab-1',
|
||||
tabs: [
|
||||
{ id: 'tab-1', executionId: 'exec-stale', title: 'stale', isPinned: false, order: 1 },
|
||||
{ id: 'tab-2', executionId: 'exec-live', title: 'live', isPinned: false, order: 2 },
|
||||
],
|
||||
},
|
||||
'pane-2': {
|
||||
id: 'pane-2',
|
||||
activeTabId: 'tab-3',
|
||||
tabs: [
|
||||
{ id: 'tab-3', executionId: 'exec-missing', title: 'missing', isPinned: true, order: 1 },
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const executions = {
|
||||
'exec-live': { tool: 'codex', mode: 'analysis' },
|
||||
};
|
||||
|
||||
expect(getStaleViewerTabs(panes as any, executions)).toEqual([
|
||||
{ paneId: 'pane-1', tabId: 'tab-1', executionId: 'exec-stale' },
|
||||
{ paneId: 'pane-2', tabId: 'tab-3', executionId: 'exec-missing' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('returns an empty list when all tabs map to current executions', () => {
|
||||
const panes = {
|
||||
'pane-1': {
|
||||
id: 'pane-1',
|
||||
activeTabId: 'tab-1',
|
||||
tabs: [
|
||||
{ id: 'tab-1', executionId: 'exec-live', title: 'live', isPinned: false, order: 1 },
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const executions = {
|
||||
'exec-live': { tool: 'codex', mode: 'analysis' },
|
||||
};
|
||||
|
||||
expect(getStaleViewerTabs(panes as any, executions)).toEqual([]);
|
||||
});
|
||||
});
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
import { useCliStreamStore } from '@/stores/cliStreamStore';
|
||||
import { useActiveCliExecutions } from '@/hooks/useActiveCliExecutions';
|
||||
import { useCliStreamWebSocket } from '@/hooks/useCliStreamWebSocket';
|
||||
import { getStaleViewerTabs } from './cliViewerPage.utils';
|
||||
|
||||
// ========================================
|
||||
// Constants
|
||||
@@ -61,13 +62,13 @@ export function CliViewerPage() {
|
||||
const layout = useViewerLayout();
|
||||
const panes = useViewerPanes();
|
||||
const focusedPaneId = useFocusedPaneId();
|
||||
const { initializeDefaultLayout, addTab } = useViewerStore();
|
||||
const { initializeDefaultLayout, addTab, removeTab } = useViewerStore();
|
||||
|
||||
// CLI Stream Store hooks
|
||||
const executions = useCliStreamStore((state) => state.executions);
|
||||
|
||||
// Active execution sync from server
|
||||
useActiveCliExecutions(true);
|
||||
const { isLoading: isSyncing, isFetching: isRefreshing } = useActiveCliExecutions(true);
|
||||
|
||||
// CENTRALIZED WebSocket handler - processes each message only ONCE globally
|
||||
useCliStreamWebSocket();
|
||||
@@ -106,6 +107,18 @@ export function CliViewerPage() {
|
||||
});
|
||||
}, [executions, panes]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isSyncing || isRefreshing) return;
|
||||
|
||||
const staleTabs = getStaleViewerTabs(panes, executions);
|
||||
if (staleTabs.length === 0) return;
|
||||
|
||||
staleTabs.forEach(({ paneId, tabId, executionId }) => {
|
||||
addedExecutionsRef.current.delete(executionId);
|
||||
removeTab(paneId, tabId);
|
||||
});
|
||||
}, [executions, isRefreshing, isSyncing, panes, removeTab]);
|
||||
|
||||
// Initialize layout if empty
|
||||
useEffect(() => {
|
||||
const paneCount = countPanes(layout);
|
||||
|
||||
22
ccw/frontend/src/pages/cliViewerPage.utils.ts
Normal file
22
ccw/frontend/src/pages/cliViewerPage.utils.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
// ========================================
|
||||
// CliViewerPage Utilities
|
||||
// ========================================
|
||||
|
||||
import type { PaneId, PaneState, TabId } from '@/stores/viewerStore';
|
||||
|
||||
export function getStaleViewerTabs(
|
||||
panes: Record<PaneId, PaneState>,
|
||||
executions: Record<string, unknown>
|
||||
): Array<{ paneId: PaneId; tabId: TabId; executionId: string }> {
|
||||
const executionIds = new Set(Object.keys(executions));
|
||||
|
||||
return Object.entries(panes).flatMap(([paneId, pane]) => (
|
||||
pane.tabs
|
||||
.filter((tab) => !executionIds.has(tab.executionId))
|
||||
.map((tab) => ({
|
||||
paneId,
|
||||
tabId: tab.id,
|
||||
executionId: tab.executionId,
|
||||
}))
|
||||
));
|
||||
}
|
||||
63
ccw/frontend/src/stores/cliStreamStore.test.ts
Normal file
63
ccw/frontend/src/stores/cliStreamStore.test.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
// ========================================
|
||||
// CLI Stream Store Tests
|
||||
// ========================================
|
||||
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { useCliStreamStore, selectActiveExecutionCount } from './cliStreamStore';
|
||||
|
||||
describe('cliStreamStore', () => {
|
||||
beforeEach(() => {
|
||||
useCliStreamStore.getState().resetState();
|
||||
});
|
||||
|
||||
it('removeExecution clears outputs and execution state together', () => {
|
||||
const store = useCliStreamStore.getState();
|
||||
|
||||
store.upsertExecution('exec-1', {
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: [],
|
||||
startTime: 1_741_400_000_000,
|
||||
});
|
||||
store.addOutput('exec-1', {
|
||||
type: 'stdout',
|
||||
content: 'hello',
|
||||
timestamp: 1_741_400_000_100,
|
||||
});
|
||||
|
||||
expect(useCliStreamStore.getState().outputs['exec-1']).toHaveLength(1);
|
||||
expect(useCliStreamStore.getState().executions['exec-1']).toBeDefined();
|
||||
|
||||
store.removeExecution('exec-1');
|
||||
|
||||
expect(useCliStreamStore.getState().outputs['exec-1']).toBeUndefined();
|
||||
expect(useCliStreamStore.getState().executions['exec-1']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('resetState clears execution badge state for workspace switches', () => {
|
||||
const store = useCliStreamStore.getState();
|
||||
|
||||
store.upsertExecution('exec-running', {
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
output: [],
|
||||
startTime: 1_741_401_000_000,
|
||||
});
|
||||
store.setCurrentExecution('exec-running');
|
||||
store.markExecutionClosedByUser('exec-running');
|
||||
|
||||
expect(selectActiveExecutionCount(useCliStreamStore.getState() as any)).toBe(1);
|
||||
expect(useCliStreamStore.getState().currentExecutionId).toBe('exec-running');
|
||||
|
||||
store.resetState();
|
||||
|
||||
const nextState = useCliStreamStore.getState();
|
||||
expect(selectActiveExecutionCount(nextState as any)).toBe(0);
|
||||
expect(nextState.currentExecutionId).toBeNull();
|
||||
expect(Object.keys(nextState.executions)).toEqual([]);
|
||||
expect(Object.keys(nextState.outputs)).toEqual([]);
|
||||
expect(nextState.userClosedExecutions.size).toBe(0);
|
||||
});
|
||||
});
|
||||
@@ -93,6 +93,7 @@ interface CliStreamState extends BlockCacheState {
|
||||
isExecutionClosedByUser: (executionId: string) => boolean;
|
||||
cleanupUserClosedExecutions: (serverIds: Set<string>) => void;
|
||||
setCurrentExecution: (executionId: string | null) => void;
|
||||
resetState: () => void;
|
||||
|
||||
// Block cache methods
|
||||
getBlocks: (executionId: string) => LogBlockData[];
|
||||
@@ -462,15 +463,18 @@ export const useCliStreamStore = create<CliStreamState>()(
|
||||
|
||||
removeExecution: (executionId: string) => {
|
||||
set((state) => {
|
||||
const newOutputs = { ...state.outputs };
|
||||
const newExecutions = { ...state.executions };
|
||||
const newBlocks = { ...state.blocks };
|
||||
const newLastUpdate = { ...state.lastUpdate };
|
||||
const newDeduplicationWindows = { ...state.deduplicationWindows };
|
||||
delete newOutputs[executionId];
|
||||
delete newExecutions[executionId];
|
||||
delete newBlocks[executionId];
|
||||
delete newLastUpdate[executionId];
|
||||
delete newDeduplicationWindows[executionId];
|
||||
return {
|
||||
outputs: newOutputs,
|
||||
executions: newExecutions,
|
||||
blocks: newBlocks,
|
||||
lastUpdate: newLastUpdate,
|
||||
@@ -513,6 +517,18 @@ export const useCliStreamStore = create<CliStreamState>()(
|
||||
set({ currentExecutionId: executionId }, false, 'cliStream/setCurrentExecution');
|
||||
},
|
||||
|
||||
resetState: () => {
|
||||
set({
|
||||
outputs: {},
|
||||
executions: {},
|
||||
currentExecutionId: null,
|
||||
userClosedExecutions: new Set<string>(),
|
||||
deduplicationWindows: {},
|
||||
blocks: {},
|
||||
lastUpdate: {},
|
||||
}, false, 'cliStream/resetState');
|
||||
},
|
||||
|
||||
// Block cache methods
|
||||
getBlocks: (executionId: string) => {
|
||||
const state = get();
|
||||
|
||||
46
ccw/frontend/src/stores/executionMonitorStore.test.ts
Normal file
46
ccw/frontend/src/stores/executionMonitorStore.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
// ========================================
|
||||
// Execution Monitor Store Tests
|
||||
// ========================================
|
||||
|
||||
import { beforeEach, describe, expect, it } from 'vitest';
|
||||
import {
|
||||
useExecutionMonitorStore,
|
||||
selectActiveExecutionCount,
|
||||
type ExecutionWSMessage,
|
||||
} from './executionMonitorStore';
|
||||
|
||||
describe('executionMonitorStore', () => {
|
||||
beforeEach(() => {
|
||||
useExecutionMonitorStore.getState().resetState();
|
||||
});
|
||||
|
||||
it('resetState clears workspace-scoped execution monitor state', () => {
|
||||
const store = useExecutionMonitorStore.getState();
|
||||
const startMessage: ExecutionWSMessage = {
|
||||
type: 'EXECUTION_STARTED',
|
||||
payload: {
|
||||
executionId: 'exec-running',
|
||||
flowId: 'flow-1',
|
||||
sessionKey: 'session-1',
|
||||
stepName: 'Workspace Flow',
|
||||
totalSteps: 3,
|
||||
timestamp: '2026-03-08T12:00:00.000Z',
|
||||
},
|
||||
};
|
||||
|
||||
store.handleExecutionMessage(startMessage);
|
||||
|
||||
const activeState = useExecutionMonitorStore.getState();
|
||||
expect(selectActiveExecutionCount(activeState as any)).toBe(1);
|
||||
expect(activeState.currentExecutionId).toBe('exec-running');
|
||||
expect(activeState.isPanelOpen).toBe(true);
|
||||
|
||||
store.resetState();
|
||||
|
||||
const nextState = useExecutionMonitorStore.getState();
|
||||
expect(selectActiveExecutionCount(nextState as any)).toBe(0);
|
||||
expect(nextState.activeExecutions).toEqual({});
|
||||
expect(nextState.currentExecutionId).toBeNull();
|
||||
expect(nextState.isPanelOpen).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -81,6 +81,7 @@ interface ExecutionMonitorActions {
|
||||
setPanelOpen: (open: boolean) => void;
|
||||
clearExecution: (executionId: string) => void;
|
||||
clearAllExecutions: () => void;
|
||||
resetState: () => void;
|
||||
}
|
||||
|
||||
type ExecutionMonitorStore = ExecutionMonitorState & ExecutionMonitorActions;
|
||||
@@ -318,6 +319,10 @@ export const useExecutionMonitorStore = create<ExecutionMonitorStore>()(
|
||||
clearAllExecutions: () => {
|
||||
set({ activeExecutions: {}, currentExecutionId: null }, false, 'clearAllExecutions');
|
||||
},
|
||||
|
||||
resetState: () => {
|
||||
set({ ...initialState }, false, 'resetState');
|
||||
},
|
||||
}),
|
||||
{ name: 'ExecutionMonitorStore' }
|
||||
)
|
||||
|
||||
35
ccw/frontend/src/stores/terminalPanelStore.test.ts
Normal file
35
ccw/frontend/src/stores/terminalPanelStore.test.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
// ========================================
|
||||
// Terminal Panel Store Tests
|
||||
// ========================================
|
||||
|
||||
import { beforeEach, describe, expect, it } from 'vitest';
|
||||
import { useTerminalPanelStore, selectTerminalCount } from './terminalPanelStore';
|
||||
|
||||
describe('terminalPanelStore', () => {
|
||||
beforeEach(() => {
|
||||
useTerminalPanelStore.getState().resetState();
|
||||
});
|
||||
|
||||
it('resetState clears workspace-scoped terminal tabs and selection', () => {
|
||||
const store = useTerminalPanelStore.getState();
|
||||
|
||||
store.openTerminal('session-a');
|
||||
store.addTerminal('session-b');
|
||||
store.setPanelView('queue');
|
||||
|
||||
const activeState = useTerminalPanelStore.getState();
|
||||
expect(selectTerminalCount(activeState as any)).toBe(2);
|
||||
expect(activeState.activeTerminalId).toBe('session-a');
|
||||
expect(activeState.panelView).toBe('queue');
|
||||
expect(activeState.isPanelOpen).toBe(true);
|
||||
|
||||
store.resetState();
|
||||
|
||||
const nextState = useTerminalPanelStore.getState();
|
||||
expect(selectTerminalCount(nextState as any)).toBe(0);
|
||||
expect(nextState.terminalOrder).toEqual([]);
|
||||
expect(nextState.activeTerminalId).toBeNull();
|
||||
expect(nextState.panelView).toBe('terminal');
|
||||
expect(nextState.isPanelOpen).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -38,6 +38,8 @@ export interface TerminalPanelActions {
|
||||
addTerminal: (sessionKey: string) => void;
|
||||
/** Remove a terminal from the order list and adjust active if needed */
|
||||
removeTerminal: (sessionKey: string) => void;
|
||||
/** Reset workspace-scoped terminal panel UI state */
|
||||
resetState: () => void;
|
||||
}
|
||||
|
||||
export type TerminalPanelStore = TerminalPanelState & TerminalPanelActions;
|
||||
@@ -153,6 +155,10 @@ export const useTerminalPanelStore = create<TerminalPanelStore>()(
|
||||
'removeTerminal'
|
||||
);
|
||||
},
|
||||
|
||||
resetState: () => {
|
||||
set({ ...initialState }, false, 'resetState');
|
||||
},
|
||||
}),
|
||||
{ name: 'TerminalPanelStore' }
|
||||
)
|
||||
|
||||
@@ -112,8 +112,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
sessionDataStore,
|
||||
},
|
||||
false,
|
||||
'setSessions'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -131,8 +130,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
[key]: session,
|
||||
},
|
||||
}),
|
||||
false,
|
||||
'addSession'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -140,7 +138,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const key = sessionKey(sessionId);
|
||||
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
const session = state.sessionDataStore[key];
|
||||
if (!session) return state;
|
||||
|
||||
@@ -163,8 +161,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
};
|
||||
},
|
||||
false,
|
||||
'updateSession'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -172,7 +169,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const key = sessionKey(sessionId);
|
||||
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
const { [key]: removed, ...remainingStore } = state.sessionDataStore;
|
||||
|
||||
return {
|
||||
@@ -187,8 +184,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
};
|
||||
},
|
||||
false,
|
||||
'removeSession'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -196,7 +192,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const key = sessionKey(sessionId);
|
||||
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
const session = state.sessionDataStore[key];
|
||||
if (!session || session.location === 'archived') return state;
|
||||
|
||||
@@ -220,8 +216,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
};
|
||||
},
|
||||
false,
|
||||
'archiveSession'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -231,7 +226,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const key = sessionKey(sessionId);
|
||||
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
const session = state.sessionDataStore[key];
|
||||
if (!session) return state;
|
||||
|
||||
@@ -252,8 +247,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
};
|
||||
},
|
||||
false,
|
||||
'addTask'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -261,7 +255,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const key = sessionKey(sessionId);
|
||||
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
const session = state.sessionDataStore[key];
|
||||
if (!session?.tasks) return state;
|
||||
|
||||
@@ -284,8 +278,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
};
|
||||
},
|
||||
false,
|
||||
'updateTask'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -293,7 +286,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const key = sessionKey(sessionId);
|
||||
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
const session = state.sessionDataStore[key];
|
||||
if (!session?.tasks) return state;
|
||||
|
||||
@@ -310,8 +303,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
};
|
||||
},
|
||||
false,
|
||||
'removeTask'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -325,8 +317,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
[key]: session,
|
||||
},
|
||||
}),
|
||||
false,
|
||||
'setLiteTaskSession'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -336,8 +327,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const { [key]: removed, ...remaining } = state.liteTaskDataStore;
|
||||
return { liteTaskDataStore: remaining };
|
||||
},
|
||||
false,
|
||||
'removeLiteTaskSession'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -351,8 +341,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
[key]: data,
|
||||
},
|
||||
}),
|
||||
false,
|
||||
'setTaskJson'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -362,38 +351,36 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
const { [key]: removed, ...remaining } = state.taskJsonStore;
|
||||
return { taskJsonStore: remaining };
|
||||
},
|
||||
false,
|
||||
'removeTaskJson'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
// ========== Active Session ==========
|
||||
|
||||
setActiveSessionId: (sessionId: string | null) => {
|
||||
set({ activeSessionId: sessionId }, false, 'setActiveSessionId');
|
||||
set({ activeSessionId: sessionId }, false);
|
||||
},
|
||||
|
||||
// ========== Project Path ==========
|
||||
|
||||
setProjectPath: (path: string) => {
|
||||
set({ projectPath: path }, false, 'setProjectPath');
|
||||
set({ projectPath: path }, false);
|
||||
},
|
||||
|
||||
addRecentPath: (path: string) => {
|
||||
set(
|
||||
(state) => {
|
||||
(state: WorkflowState) => {
|
||||
// Remove if exists, add to front
|
||||
const filtered = state.recentPaths.filter((p) => p !== path);
|
||||
const updated = [path, ...filtered].slice(0, 10); // Keep max 10
|
||||
return { recentPaths: updated };
|
||||
},
|
||||
false,
|
||||
'addRecentPath'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
setServerPlatform: (platform: 'win32' | 'darwin' | 'linux') => {
|
||||
set({ serverPlatform: platform }, false, 'setServerPlatform');
|
||||
set({ serverPlatform: platform }, false);
|
||||
},
|
||||
|
||||
// ========== Workspace Actions ==========
|
||||
@@ -418,8 +405,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
},
|
||||
sessionDataStore,
|
||||
},
|
||||
false,
|
||||
'switchWorkspace'
|
||||
false
|
||||
);
|
||||
|
||||
// Persist projectPath to localStorage manually
|
||||
@@ -434,16 +420,16 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
|
||||
removeRecentPath: async (path: string) => {
|
||||
const updatedPaths = await apiRemoveRecentPath(path);
|
||||
set({ recentPaths: updatedPaths }, false, 'removeRecentPath');
|
||||
set({ recentPaths: updatedPaths }, false);
|
||||
},
|
||||
|
||||
refreshRecentPaths: async () => {
|
||||
const paths = await fetchRecentPaths();
|
||||
set({ recentPaths: paths }, false, 'refreshRecentPaths');
|
||||
set({ recentPaths: paths }, false);
|
||||
},
|
||||
|
||||
registerQueryInvalidator: (callback: () => void) => {
|
||||
set({ _invalidateQueriesCallback: callback }, false, 'registerQueryInvalidator');
|
||||
set({ _invalidateQueriesCallback: callback }, false);
|
||||
},
|
||||
|
||||
// ========== Filters and Sorting ==========
|
||||
@@ -453,8 +439,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
(state) => ({
|
||||
filters: { ...state.filters, ...filters },
|
||||
}),
|
||||
false,
|
||||
'setFilters'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
@@ -463,13 +448,12 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
(state) => ({
|
||||
sorting: { ...state.sorting, ...sorting },
|
||||
}),
|
||||
false,
|
||||
'setSorting'
|
||||
false
|
||||
);
|
||||
},
|
||||
|
||||
resetFilters: () => {
|
||||
set({ filters: defaultFilters, sorting: defaultSorting }, false, 'resetFilters');
|
||||
set({ filters: defaultFilters, sorting: defaultSorting }, false);
|
||||
},
|
||||
|
||||
// ========== Computed Selectors ==========
|
||||
|
||||
@@ -226,7 +226,7 @@ export function run(argv: string[]): void {
|
||||
.option('--output-type <type>', 'Output type: stdout, stderr, both', 'both')
|
||||
.option('--turn <n>', 'Turn number for cache (default: latest)')
|
||||
.option('--raw', 'Raw output only (no formatting)')
|
||||
.option('--final', 'Output final result only (agent_message content, now default)')
|
||||
.option('--final', 'Output strict final result only (no parsed/stdout fallback)')
|
||||
.option('--verbose', 'Show full metadata + raw output')
|
||||
.option('--timeout <seconds>', 'Timeout for watch command')
|
||||
.option('--all', 'Show all executions in show command')
|
||||
|
||||
@@ -181,7 +181,7 @@ interface OutputViewOptions {
|
||||
outputType?: 'stdout' | 'stderr' | 'both';
|
||||
turn?: string;
|
||||
raw?: boolean;
|
||||
final?: boolean; // Explicit --final (same as default, kept for compatibility)
|
||||
final?: boolean; // Explicit --final (strict final result, no parsed/stdout fallback)
|
||||
verbose?: boolean; // Show full metadata + raw stdout/stderr
|
||||
project?: string; // Optional project path for lookup
|
||||
}
|
||||
@@ -470,10 +470,23 @@ async function outputAction(conversationId: string | undefined, options: OutputV
|
||||
return;
|
||||
}
|
||||
|
||||
// Default (and --final): output final result only
|
||||
// Prefer finalOutput (agent_message only) > parsedOutput (filtered) > raw stdout
|
||||
const outputContent = result.finalOutput?.content || result.parsedOutput?.content || result.stdout?.content;
|
||||
if (outputContent) {
|
||||
const finalOutputContent = result.finalOutput?.content;
|
||||
|
||||
if (options.final) {
|
||||
if (finalOutputContent !== undefined) {
|
||||
console.log(finalOutputContent);
|
||||
return;
|
||||
}
|
||||
|
||||
console.error(chalk.yellow('No final agent result found in cached output.'));
|
||||
console.error(chalk.gray(' Try without --final for best-effort output, or use --verbose to inspect raw stdout/stderr.'));
|
||||
process.exit(1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Default output: prefer strict final result, then fall back to best-effort parsed/plain output.
|
||||
const outputContent = finalOutputContent ?? result.parsedOutput?.content ?? result.stdout?.content;
|
||||
if (outputContent !== undefined) {
|
||||
console.log(outputContent);
|
||||
}
|
||||
}
|
||||
@@ -1351,7 +1364,7 @@ async function showAction(options: { all?: boolean }): Promise<void> {
|
||||
// 1. Try to fetch active executions from dashboard
|
||||
let activeExecs: Array<{
|
||||
id: string; tool: string; mode: string; status: string;
|
||||
prompt: string; startTime: number; isComplete?: boolean;
|
||||
prompt: string; startTime: number | string | Date; isComplete?: boolean;
|
||||
}> = [];
|
||||
|
||||
try {
|
||||
@@ -1382,6 +1395,7 @@ async function showAction(options: { all?: boolean }): Promise<void> {
|
||||
// 2. Get recent history from SQLite
|
||||
const historyLimit = options.all ? 100 : 20;
|
||||
const history = await getExecutionHistoryAsync(process.cwd(), { limit: historyLimit, recursive: true });
|
||||
const historyById = new Map(history.executions.map(exec => [exec.id, exec]));
|
||||
|
||||
// 3. Build unified list: active first, then history (de-duped)
|
||||
const seenIds = new Set<string>();
|
||||
@@ -1393,16 +1407,26 @@ async function showAction(options: { all?: boolean }): Promise<void> {
|
||||
// Active executions (running)
|
||||
for (const exec of activeExecs) {
|
||||
if (exec.status === 'running') {
|
||||
const normalizedStartTime = normalizeTimestampMs(exec.startTime);
|
||||
const matchingHistory = historyById.get(exec.id);
|
||||
const shouldSuppressActiveRow = matchingHistory !== undefined && isSavedExecutionNewerThanActive(
|
||||
normalizedStartTime,
|
||||
matchingHistory.updated_at || matchingHistory.timestamp
|
||||
);
|
||||
|
||||
if (shouldSuppressActiveRow) {
|
||||
continue;
|
||||
}
|
||||
|
||||
seenIds.add(exec.id);
|
||||
const elapsed = Math.floor((Date.now() - exec.startTime) / 1000);
|
||||
rows.push({
|
||||
id: exec.id,
|
||||
tool: exec.tool,
|
||||
mode: exec.mode,
|
||||
status: 'running',
|
||||
prompt: (exec.prompt || '').replace(/\n/g, ' ').substring(0, 50),
|
||||
time: `${elapsed}s ago`,
|
||||
duration: `${elapsed}s...`,
|
||||
time: normalizedStartTime !== undefined ? getTimeAgo(new Date(normalizedStartTime)) : 'unknown',
|
||||
duration: normalizedStartTime !== undefined ? formatRunningDuration(Date.now() - normalizedStartTime) : 'running',
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1513,6 +1537,18 @@ async function watchAction(watchId: string | undefined, options: { timeout?: str
|
||||
}
|
||||
|
||||
if (exec.status === 'running') {
|
||||
const savedConversation = getHistoryStore(process.cwd()).getConversation(watchId);
|
||||
const shouldPreferSavedConversation = !!savedConversation && isSavedExecutionNewerThanActive(
|
||||
normalizeTimestampMs((exec as { startTime?: unknown }).startTime),
|
||||
savedConversation.updated_at || savedConversation.created_at
|
||||
);
|
||||
|
||||
if (shouldPreferSavedConversation) {
|
||||
process.stderr.write(chalk.gray(`\nExecution already completed (status: ${savedConversation.latest_status}).\n`));
|
||||
process.stderr.write(chalk.dim(`Use: ccw cli output ${watchId}\n`));
|
||||
return savedConversation.latest_status === 'success' ? 0 : 1;
|
||||
}
|
||||
|
||||
// Still running — wait and poll again
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
return poll();
|
||||
@@ -1667,7 +1703,7 @@ async function detailAction(conversationId: string | undefined): Promise<void> {
|
||||
* @returns {string}
|
||||
*/
|
||||
function getTimeAgo(date: Date): string {
|
||||
const seconds = Math.floor((new Date().getTime() - date.getTime()) / 1000);
|
||||
const seconds = Math.floor((Date.now() - date.getTime()) / 1000);
|
||||
|
||||
if (seconds < 60) return 'just now';
|
||||
if (seconds < 3600) return `${Math.floor(seconds / 60)}m ago`;
|
||||
@@ -1676,6 +1712,71 @@ function getTimeAgo(date: Date): string {
|
||||
return date.toLocaleDateString();
|
||||
}
|
||||
|
||||
function normalizeTimestampMs(value: unknown): number | undefined {
|
||||
if (value instanceof Date) {
|
||||
const time = value.getTime();
|
||||
return Number.isFinite(time) ? time : undefined;
|
||||
}
|
||||
|
||||
if (typeof value === 'number' && Number.isFinite(value)) {
|
||||
return value > 0 && value < 1_000_000_000_000 ? value * 1000 : value;
|
||||
}
|
||||
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return undefined;
|
||||
|
||||
const numericValue = Number(trimmed);
|
||||
if (Number.isFinite(numericValue)) {
|
||||
return numericValue > 0 && numericValue < 1_000_000_000_000 ? numericValue * 1000 : numericValue;
|
||||
}
|
||||
|
||||
const parsed = Date.parse(trimmed);
|
||||
return Number.isNaN(parsed) ? undefined : parsed;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function formatRunningDuration(elapsedMs: number): string {
|
||||
const safeElapsedMs = Math.max(0, elapsedMs);
|
||||
const totalSeconds = Math.floor(safeElapsedMs / 1000);
|
||||
|
||||
if (totalSeconds < 60) return `${totalSeconds}s...`;
|
||||
|
||||
const minutes = Math.floor(totalSeconds / 60);
|
||||
const seconds = totalSeconds % 60;
|
||||
if (totalSeconds < 3600) {
|
||||
return seconds === 0 ? `${minutes}m...` : `${minutes}m ${seconds}s...`;
|
||||
}
|
||||
|
||||
const hours = Math.floor(totalSeconds / 3600);
|
||||
const remainingMinutes = Math.floor((totalSeconds % 3600) / 60);
|
||||
if (totalSeconds < 86400) {
|
||||
return remainingMinutes === 0 ? `${hours}h...` : `${hours}h ${remainingMinutes}m...`;
|
||||
}
|
||||
|
||||
const days = Math.floor(totalSeconds / 86400);
|
||||
const remainingHours = Math.floor((totalSeconds % 86400) / 3600);
|
||||
return remainingHours === 0 ? `${days}d...` : `${days}d ${remainingHours}h...`;
|
||||
}
|
||||
|
||||
function isSavedExecutionNewerThanActive(
|
||||
activeStartTimeMs: number | undefined,
|
||||
savedTimestamp: unknown
|
||||
): boolean {
|
||||
if (activeStartTimeMs === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const savedTimestampMs = normalizeTimestampMs(savedTimestamp);
|
||||
if (savedTimestampMs === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return savedTimestampMs >= activeStartTimeMs;
|
||||
}
|
||||
|
||||
/**ccw cli -p
|
||||
* CLI command entry point
|
||||
* @param {string} subcommand - Subcommand (status, exec, history, detail)
|
||||
|
||||
@@ -24,6 +24,8 @@ import {
|
||||
getEnrichedConversation,
|
||||
getHistoryWithNativeInfo
|
||||
} from '../../tools/cli-executor.js';
|
||||
import { getHistoryStore } from '../../tools/cli-history-store.js';
|
||||
import { StoragePaths } from '../../config/storage-paths.js';
|
||||
import { listAllNativeSessions } from '../../tools/native-session-discovery.js';
|
||||
import { SmartContentFormatter } from '../../tools/cli-output-converter.js';
|
||||
import { generateSmartContext, formatSmartContext } from '../../tools/smart-context.js';
|
||||
@@ -51,6 +53,7 @@ import {
|
||||
getCodeIndexMcp
|
||||
} from '../../tools/claude-cli-tools.js';
|
||||
import type { RouteContext } from './types.js';
|
||||
import { existsSync } from 'fs';
|
||||
import { resolve, normalize } from 'path';
|
||||
import { homedir } from 'os';
|
||||
|
||||
@@ -171,6 +174,84 @@ export function getActiveExecutions(): ActiveExecutionDto[] {
|
||||
}));
|
||||
}
|
||||
|
||||
function normalizeTimestampMs(value: unknown): number | undefined {
|
||||
if (value instanceof Date) {
|
||||
const time = value.getTime();
|
||||
return Number.isFinite(time) ? time : undefined;
|
||||
}
|
||||
|
||||
if (typeof value === 'number' && Number.isFinite(value)) {
|
||||
return value > 0 && value < 1_000_000_000_000 ? value * 1000 : value;
|
||||
}
|
||||
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return undefined;
|
||||
|
||||
const numericValue = Number(trimmed);
|
||||
if (Number.isFinite(numericValue)) {
|
||||
return numericValue > 0 && numericValue < 1_000_000_000_000 ? numericValue * 1000 : numericValue;
|
||||
}
|
||||
|
||||
const parsed = Date.parse(trimmed);
|
||||
return Number.isNaN(parsed) ? undefined : parsed;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function isSavedExecutionNewerThanActive(activeStartTimeMs: number | undefined, savedTimestamp: unknown): boolean {
|
||||
if (activeStartTimeMs === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const savedTimestampMs = normalizeTimestampMs(savedTimestamp);
|
||||
if (savedTimestampMs === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return savedTimestampMs >= activeStartTimeMs;
|
||||
}
|
||||
|
||||
function getSavedConversationWithNativeInfo(projectPath: string, executionId: string) {
|
||||
const historyDbPath = StoragePaths.project(projectPath).historyDb;
|
||||
if (!existsSync(historyDbPath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return getHistoryStore(projectPath).getConversationWithNativeInfo(executionId);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function cleanupSupersededActiveExecutions(projectPath: string): void {
|
||||
const supersededIds: string[] = [];
|
||||
|
||||
for (const [executionId, activeExec] of activeExecutions.entries()) {
|
||||
const savedConversation = getSavedConversationWithNativeInfo(projectPath, executionId);
|
||||
if (!savedConversation) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isSavedExecutionNewerThanActive(
|
||||
normalizeTimestampMs(activeExec.startTime),
|
||||
savedConversation.updated_at || savedConversation.created_at
|
||||
)) {
|
||||
supersededIds.push(executionId);
|
||||
}
|
||||
}
|
||||
|
||||
supersededIds.forEach(executionId => {
|
||||
activeExecutions.delete(executionId);
|
||||
});
|
||||
|
||||
if (supersededIds.length > 0) {
|
||||
console.log(`[ActiveExec] Removed ${supersededIds.length} superseded execution(s): ${supersededIds.join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update active execution state from hook events
|
||||
* Called by hooks-routes when CLI events are received from terminal execution
|
||||
@@ -240,6 +321,10 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
|
||||
// API: Get Active CLI Executions (for state recovery)
|
||||
if (pathname === '/api/cli/active' && req.method === 'GET') {
|
||||
const projectPath = url.searchParams.get('path') || initialPath;
|
||||
cleanupStaleExecutions();
|
||||
cleanupSupersededActiveExecutions(projectPath);
|
||||
|
||||
const executions = getActiveExecutions().map(exec => ({
|
||||
...exec,
|
||||
isComplete: exec.status !== 'running'
|
||||
@@ -537,6 +622,8 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
// API: CLI Execution Detail (GET) or Delete (DELETE)
|
||||
if (pathname === '/api/cli/execution') {
|
||||
const projectPath = url.searchParams.get('path') || initialPath;
|
||||
cleanupStaleExecutions();
|
||||
cleanupSupersededActiveExecutions(projectPath);
|
||||
const executionId = url.searchParams.get('id');
|
||||
|
||||
if (!executionId) {
|
||||
@@ -564,10 +651,17 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
const conversation = getSavedConversationWithNativeInfo(projectPath, executionId) || getConversationDetailWithNativeInfo(projectPath, executionId);
|
||||
|
||||
// Handle GET request - return conversation with native session info
|
||||
// First check in-memory active executions (for running/recently completed)
|
||||
const activeExec = activeExecutions.get(executionId);
|
||||
if (activeExec) {
|
||||
const shouldPreferSavedConversation = !!activeExec && !!conversation && isSavedExecutionNewerThanActive(
|
||||
normalizeTimestampMs(activeExec.startTime),
|
||||
conversation.updated_at || conversation.created_at
|
||||
);
|
||||
|
||||
if (activeExec && !shouldPreferSavedConversation) {
|
||||
// Return active execution data as conversation record format
|
||||
// Note: Convert output array buffer back to string for API compatibility
|
||||
const activeConversation = {
|
||||
@@ -594,8 +688,6 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fall back to database query for saved conversations
|
||||
const conversation = getConversationDetailWithNativeInfo(projectPath, executionId);
|
||||
if (!conversation) {
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'Conversation not found' }));
|
||||
|
||||
@@ -9,7 +9,7 @@ import { join, dirname, resolve } from 'path';
|
||||
import { parseSessionFile, formatConversation, extractConversationPairs, type ParsedSession, type ParsedTurn } from './session-content-parser.js';
|
||||
import { getDiscoverer, getNativeSessions } from './native-session-discovery.js';
|
||||
import { StoragePaths, ensureStorageDir, getProjectId, getCCWHome } from '../config/storage-paths.js';
|
||||
import type { CliOutputUnit } from './cli-output-converter.js';
|
||||
import { createOutputParser, flattenOutputUnits, type CliOutputUnit } from './cli-output-converter.js';
|
||||
|
||||
// Debug logging for history save investigation (Iteration 4)
|
||||
const DEBUG_SESSION_ID = 'DBG-parallel-ccw-cli-test-2026-03-07';
|
||||
@@ -34,6 +34,27 @@ function writeDebugLog(event: string, data: Record<string, any>): void {
|
||||
}
|
||||
}
|
||||
|
||||
function reconstructFinalOutputFromStdout(rawStdout: string, canTrustStdout: boolean): string | undefined {
|
||||
if (!canTrustStdout || !rawStdout.trim()) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
const parser = createOutputParser('json-lines');
|
||||
const units = parser.parse(Buffer.from(rawStdout, 'utf8'), 'stdout');
|
||||
units.push(...parser.flush());
|
||||
|
||||
const reconstructed = flattenOutputUnits(units, {
|
||||
includeTypes: ['agent_message'],
|
||||
stripCommandJsonBlocks: true
|
||||
});
|
||||
|
||||
return reconstructed || undefined;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
// Types
|
||||
export interface ConversationTurn {
|
||||
turn: number;
|
||||
@@ -764,8 +785,14 @@ export class CliHistoryStore {
|
||||
}
|
||||
|
||||
// Add final output if available (agent_message only for --final flag)
|
||||
if (turn.final_output) {
|
||||
const finalContent = turn.final_output;
|
||||
// For older records that lack final_output, attempt reconstruction from raw JSONL stdout.
|
||||
const canTrustStdoutForFinal = !!(turn.cached || !turn.truncated);
|
||||
const reconstructedFinalOutput = turn.final_output
|
||||
? undefined
|
||||
: reconstructFinalOutputFromStdout(turn.cached ? (turn.stdout_full || '') : (turn.stdout || ''), canTrustStdoutForFinal);
|
||||
const finalContent = turn.final_output ?? reconstructedFinalOutput;
|
||||
|
||||
if (finalContent !== undefined) {
|
||||
const totalBytes = finalContent.length;
|
||||
const content = finalContent.substring(offset, offset + limit);
|
||||
result.finalOutput = {
|
||||
|
||||
@@ -185,6 +185,7 @@ interface ExecuteResult {
|
||||
output?: string;
|
||||
error?: string;
|
||||
message?: string;
|
||||
warning?: string;
|
||||
results?: unknown;
|
||||
files?: unknown;
|
||||
symbols?: unknown;
|
||||
@@ -1228,6 +1229,143 @@ function parseProgressLine(line: string): ProgressInfo | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
function shouldRetryWithoutEnrich(args: string[], error?: string): boolean {
|
||||
return args.includes('--enrich') && Boolean(error && /No such option:\s+--enrich/i.test(error));
|
||||
}
|
||||
|
||||
function shouldRetryWithoutLanguageFilters(args: string[], error?: string): boolean {
|
||||
return args.includes('--language') && Boolean(error && /Got unexpected extra arguments?\b/i.test(error));
|
||||
}
|
||||
|
||||
function stripFlag(args: string[], flag: string): string[] {
|
||||
return args.filter((arg) => arg !== flag);
|
||||
}
|
||||
|
||||
function stripOptionWithValues(args: string[], option: string): string[] {
|
||||
const nextArgs: string[] = [];
|
||||
for (let index = 0; index < args.length; index += 1) {
|
||||
if (args[index] === option) {
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
nextArgs.push(args[index]);
|
||||
}
|
||||
return nextArgs;
|
||||
}
|
||||
|
||||
function shouldRetryWithAstGrepPreference(args: string[], error?: string): boolean {
|
||||
return !args.includes('--use-astgrep')
|
||||
&& !args.includes('--no-use-astgrep')
|
||||
&& Boolean(error && /Options --use-astgrep and --no-use-astgrep are mutually exclusive/i.test(error));
|
||||
}
|
||||
|
||||
function shouldRetryWithStaticGraphPreference(args: string[], error?: string): boolean {
|
||||
return !args.includes('--static-graph')
|
||||
&& !args.includes('--no-static-graph')
|
||||
&& Boolean(error && /Options --static-graph and --no-static-graph are mutually exclusive/i.test(error));
|
||||
}
|
||||
|
||||
function stripAnsiCodes(value: string): string {
|
||||
return value
|
||||
.replace(/\x1b\[[0-9;]*m/g, '')
|
||||
.replace(/\x1b\][0-9;]*\x07/g, '')
|
||||
.replace(/\x1b\][^\x07]*\x07/g, '');
|
||||
}
|
||||
|
||||
function tryExtractJsonPayload(raw: string): unknown | null {
|
||||
const cleanOutput = stripAnsiCodes(raw).trim();
|
||||
const jsonStart = cleanOutput.search(/[\[{]/);
|
||||
if (jsonStart === -1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const startChar = cleanOutput[jsonStart];
|
||||
const endChar = startChar === '{' ? '}' : ']';
|
||||
let depth = 0;
|
||||
let inString = false;
|
||||
let escapeNext = false;
|
||||
let jsonEnd = -1;
|
||||
|
||||
for (let index = jsonStart; index < cleanOutput.length; index += 1) {
|
||||
const char = cleanOutput[index];
|
||||
|
||||
if (escapeNext) {
|
||||
escapeNext = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\' && inString) {
|
||||
escapeNext = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"') {
|
||||
inString = !inString;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inString) {
|
||||
if (char === startChar) {
|
||||
depth += 1;
|
||||
} else if (char === endChar) {
|
||||
depth -= 1;
|
||||
if (depth === 0) {
|
||||
jsonEnd = index + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (jsonEnd === -1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.parse(cleanOutput.slice(jsonStart, jsonEnd));
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function extractStructuredError(payload: unknown): string | null {
|
||||
if (!payload || typeof payload !== 'object' || Array.isArray(payload)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const record = payload as Record<string, unknown>;
|
||||
if (typeof record.error === 'string' && record.error.trim()) {
|
||||
return record.error.trim();
|
||||
}
|
||||
if (typeof record.message === 'string' && record.message.trim()) {
|
||||
return record.message.trim();
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function extractCodexLensFailure(stdout: string, stderr: string, code: number | null): string {
|
||||
const structuredStdout = extractStructuredError(tryExtractJsonPayload(stdout));
|
||||
if (structuredStdout) {
|
||||
return structuredStdout;
|
||||
}
|
||||
|
||||
const structuredStderr = extractStructuredError(tryExtractJsonPayload(stderr));
|
||||
if (structuredStderr) {
|
||||
return structuredStderr;
|
||||
}
|
||||
|
||||
const cleanStdout = stripAnsiCodes(stdout).trim();
|
||||
const cleanStderr = stripAnsiCodes(stderr)
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line && !/^DEBUG\b/i.test(line))
|
||||
.join('\n')
|
||||
.trim();
|
||||
|
||||
return cleanStderr || cleanStdout || stripAnsiCodes(stderr).trim() || `Process exited with code ${code ?? 'unknown'}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute CodexLens CLI command with real-time progress updates
|
||||
* @param args - CLI arguments
|
||||
@@ -1235,6 +1373,64 @@ function parseProgressLine(line: string): ProgressInfo | null {
|
||||
* @returns Execution result
|
||||
*/
|
||||
async function executeCodexLens(args: string[], options: ExecuteOptions = {}): Promise<ExecuteResult> {
|
||||
let attemptArgs = [...args];
|
||||
let result = await executeCodexLensOnce(attemptArgs, options);
|
||||
const compatibilityWarnings: string[] = [];
|
||||
|
||||
const compatibilityRetries = [
|
||||
{
|
||||
shouldRetry: shouldRetryWithoutEnrich,
|
||||
transform: (currentArgs: string[]) => stripFlag(currentArgs, '--enrich'),
|
||||
warning: 'CodexLens CLI does not support --enrich; retried without it.',
|
||||
},
|
||||
{
|
||||
shouldRetry: shouldRetryWithoutLanguageFilters,
|
||||
transform: (currentArgs: string[]) => stripOptionWithValues(currentArgs, '--language'),
|
||||
warning: 'CodexLens CLI rejected --language filters; retried without language scoping.',
|
||||
},
|
||||
{
|
||||
shouldRetry: shouldRetryWithAstGrepPreference,
|
||||
transform: (currentArgs: string[]) => [...currentArgs, '--use-astgrep'],
|
||||
warning: 'CodexLens CLI hit a Typer ast-grep option conflict; retried with explicit --use-astgrep.',
|
||||
},
|
||||
{
|
||||
shouldRetry: shouldRetryWithStaticGraphPreference,
|
||||
transform: (currentArgs: string[]) => [...currentArgs, '--static-graph'],
|
||||
warning: 'CodexLens CLI hit a Typer static-graph option conflict; retried with explicit --static-graph.',
|
||||
},
|
||||
];
|
||||
|
||||
for (const retry of compatibilityRetries) {
|
||||
if (result.success || !retry.shouldRetry(attemptArgs, result.error)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
compatibilityWarnings.push(retry.warning);
|
||||
attemptArgs = retry.transform(attemptArgs);
|
||||
const retryResult = await executeCodexLensOnce(attemptArgs, options);
|
||||
result = retryResult.success
|
||||
? retryResult
|
||||
: {
|
||||
...retryResult,
|
||||
error: retryResult.error
|
||||
? `${retryResult.error} (after compatibility retry; initial error: ${result.error})`
|
||||
: result.error,
|
||||
};
|
||||
}
|
||||
|
||||
if (compatibilityWarnings.length === 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const warning = compatibilityWarnings.join(' ');
|
||||
return {
|
||||
...result,
|
||||
warning,
|
||||
message: result.message ? `${result.message} ${warning}` : warning,
|
||||
};
|
||||
}
|
||||
|
||||
async function executeCodexLensOnce(args: string[], options: ExecuteOptions = {}): Promise<ExecuteResult> {
|
||||
const { timeout = 300000, cwd = process.cwd(), onProgress } = options; // Default 5 min
|
||||
|
||||
// Ensure ready
|
||||
@@ -1362,7 +1558,11 @@ async function executeCodexLens(args: string[], options: ExecuteOptions = {}): P
|
||||
if (code === 0) {
|
||||
safeResolve({ success: true, output: stdout.trim() });
|
||||
} else {
|
||||
safeResolve({ success: false, error: stderr.trim() || `Process exited with code ${code}` });
|
||||
safeResolve({
|
||||
success: false,
|
||||
error: extractCodexLensFailure(stdout, stderr, code),
|
||||
output: stdout.trim() || undefined,
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1379,7 +1579,7 @@ async function initIndex(params: Params): Promise<ExecuteResult> {
|
||||
// Use 'index init' subcommand (new CLI structure)
|
||||
const args = ['index', 'init', path];
|
||||
if (languages && languages.length > 0) {
|
||||
args.push('--language', languages.join(','));
|
||||
args.push(...languages.flatMap((language) => ['--language', language]));
|
||||
}
|
||||
|
||||
return executeCodexLens(args, { cwd: path });
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
import { z } from 'zod';
|
||||
import type { ToolSchema, ToolResult } from '../types/tool.js';
|
||||
import { spawn, execSync } from 'child_process';
|
||||
import { statSync } from 'fs';
|
||||
import { dirname, resolve } from 'path';
|
||||
import {
|
||||
ensureReady as ensureCodexLensReady,
|
||||
executeCodexLens,
|
||||
@@ -398,17 +400,106 @@ function splitResultsWithExtraFiles<T extends { file: string }>(
|
||||
return { results, extra_files };
|
||||
}
|
||||
|
||||
interface SearchScope {
|
||||
workingDirectory: string;
|
||||
searchPaths: string[];
|
||||
targetFile?: string;
|
||||
}
|
||||
|
||||
function sanitizeSearchQuery(query: string | undefined): string | undefined {
|
||||
if (!query) {
|
||||
return query;
|
||||
}
|
||||
|
||||
return query.replace(/\r?\n\s*/g, ' ').trim();
|
||||
}
|
||||
|
||||
function sanitizeSearchPath(pathValue: string | undefined): string | undefined {
|
||||
if (!pathValue) {
|
||||
return pathValue;
|
||||
}
|
||||
|
||||
return pathValue.replace(/\r?\n\s*/g, '').trim();
|
||||
}
|
||||
|
||||
function resolveSearchScope(pathValue: string = '.', paths: string[] = []): SearchScope {
|
||||
const normalizedPath = sanitizeSearchPath(pathValue) || '.';
|
||||
const normalizedPaths = paths.map((item) => sanitizeSearchPath(item) || item);
|
||||
const fallbackPath = normalizedPath || getProjectRoot();
|
||||
|
||||
try {
|
||||
const resolvedPath = resolve(fallbackPath);
|
||||
const stats = statSync(resolvedPath);
|
||||
|
||||
if (stats.isFile()) {
|
||||
return {
|
||||
workingDirectory: dirname(resolvedPath),
|
||||
searchPaths: normalizedPaths.length > 0 ? normalizedPaths : [resolvedPath],
|
||||
targetFile: resolvedPath,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
workingDirectory: resolvedPath,
|
||||
searchPaths: normalizedPaths.length > 0 ? normalizedPaths : ['.'],
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
workingDirectory: fallbackPath,
|
||||
searchPaths: normalizedPaths.length > 0 ? normalizedPaths : [normalizedPath || '.'],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeResultFilePath(filePath: string, workingDirectory: string): string {
|
||||
return resolve(workingDirectory, filePath).replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
function filterResultsToTargetFile<T extends { file: string }>(results: T[], scope: SearchScope): T[] {
|
||||
if (!scope.targetFile) {
|
||||
return results;
|
||||
}
|
||||
|
||||
const normalizedTarget = scope.targetFile.replace(/\\/g, '/');
|
||||
return results.filter((result) => normalizeResultFilePath(result.file, scope.workingDirectory) === normalizedTarget);
|
||||
}
|
||||
|
||||
function collectBackendError(
|
||||
errors: string[],
|
||||
backendName: string,
|
||||
backendResult: PromiseSettledResult<SearchResult>,
|
||||
): void {
|
||||
if (backendResult.status === 'rejected') {
|
||||
errors.push(`${backendName}: ${String(backendResult.reason)}`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!backendResult.value.success) {
|
||||
errors.push(`${backendName}: ${backendResult.value.error || 'unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
function mergeWarnings(...warnings: Array<string | undefined>): string | undefined {
|
||||
const merged = [...new Set(
|
||||
warnings
|
||||
.filter((warning): warning is string => typeof warning === 'string' && warning.trim().length > 0)
|
||||
.map((warning) => warning.trim())
|
||||
)];
|
||||
return merged.length > 0 ? merged.join(' | ') : undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if CodexLens index exists for current directory
|
||||
* @param path - Directory path to check
|
||||
* @returns Index status
|
||||
*/
|
||||
async function checkIndexStatus(path: string = '.'): Promise<IndexStatus> {
|
||||
const scope = resolveSearchScope(path);
|
||||
try {
|
||||
// Fetch both status and config in parallel
|
||||
const [statusResult, configResult] = await Promise.all([
|
||||
executeCodexLens(['status', '--json'], { cwd: path }),
|
||||
executeCodexLens(['config', 'show', '--json'], { cwd: path }),
|
||||
executeCodexLens(['status', '--json'], { cwd: scope.workingDirectory }),
|
||||
executeCodexLens(['config', 'show', '--json'], { cwd: scope.workingDirectory }),
|
||||
]);
|
||||
|
||||
// Parse config
|
||||
@@ -694,6 +785,7 @@ function buildRipgrepCommand(params: {
|
||||
*/
|
||||
async function executeInitAction(params: Params, force: boolean = false): Promise<SearchResult> {
|
||||
const { path = '.', languages } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
|
||||
// Check CodexLens availability
|
||||
const readyStatus = await ensureCodexLensReady();
|
||||
@@ -706,12 +798,12 @@ async function executeInitAction(params: Params, force: boolean = false): Promis
|
||||
|
||||
// Build args with --no-embeddings for FTS-only index (faster)
|
||||
// Use 'index init' subcommand (new CLI structure)
|
||||
const args = ['index', 'init', path, '--no-embeddings'];
|
||||
const args = ['index', 'init', scope.workingDirectory, '--no-embeddings'];
|
||||
if (force) {
|
||||
args.push('--force'); // Force full rebuild
|
||||
}
|
||||
if (languages && languages.length > 0) {
|
||||
args.push('--language', languages.join(','));
|
||||
args.push(...languages.flatMap((language) => ['--language', language]));
|
||||
}
|
||||
|
||||
// Track progress updates
|
||||
@@ -719,7 +811,7 @@ async function executeInitAction(params: Params, force: boolean = false): Promis
|
||||
let lastProgress: ProgressInfo | null = null;
|
||||
|
||||
const result = await executeCodexLens(args, {
|
||||
cwd: path,
|
||||
cwd: scope.workingDirectory,
|
||||
timeout: 1800000, // 30 minutes for large codebases
|
||||
onProgress: (progress: ProgressInfo) => {
|
||||
progressUpdates.push(progress);
|
||||
@@ -730,7 +822,7 @@ async function executeInitAction(params: Params, force: boolean = false): Promis
|
||||
// Build metadata with progress info
|
||||
const metadata: SearchMetadata = {
|
||||
action: force ? 'init_force' : 'init',
|
||||
path,
|
||||
path: scope.workingDirectory,
|
||||
};
|
||||
|
||||
if (lastProgress !== null) {
|
||||
@@ -766,8 +858,9 @@ async function executeInitAction(params: Params, force: boolean = false): Promis
|
||||
*/
|
||||
async function executeStatusAction(params: Params): Promise<SearchResult> {
|
||||
const { path = '.' } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
|
||||
// Build detailed status message
|
||||
const statusParts: string[] = [];
|
||||
@@ -815,6 +908,7 @@ async function executeStatusAction(params: Params): Promise<SearchResult> {
|
||||
*/
|
||||
async function executeUpdateAction(params: Params): Promise<SearchResult> {
|
||||
const { path = '.', languages } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
|
||||
// Check CodexLens availability
|
||||
const readyStatus = await ensureCodexLensReady();
|
||||
@@ -826,7 +920,7 @@ async function executeUpdateAction(params: Params): Promise<SearchResult> {
|
||||
}
|
||||
|
||||
// Check if index exists first
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
if (!indexStatus.indexed) {
|
||||
return {
|
||||
success: false,
|
||||
@@ -836,9 +930,9 @@ async function executeUpdateAction(params: Params): Promise<SearchResult> {
|
||||
|
||||
// Build args for incremental init (without --force)
|
||||
// Use 'index init' subcommand (new CLI structure)
|
||||
const args = ['index', 'init', path];
|
||||
const args = ['index', 'init', scope.workingDirectory];
|
||||
if (languages && languages.length > 0) {
|
||||
args.push('--language', languages.join(','));
|
||||
args.push(...languages.flatMap((language) => ['--language', language]));
|
||||
}
|
||||
|
||||
// Track progress updates
|
||||
@@ -846,7 +940,7 @@ async function executeUpdateAction(params: Params): Promise<SearchResult> {
|
||||
let lastProgress: ProgressInfo | null = null;
|
||||
|
||||
const result = await executeCodexLens(args, {
|
||||
cwd: path,
|
||||
cwd: scope.workingDirectory,
|
||||
timeout: 600000, // 10 minutes for incremental updates
|
||||
onProgress: (progress: ProgressInfo) => {
|
||||
progressUpdates.push(progress);
|
||||
@@ -891,6 +985,7 @@ async function executeUpdateAction(params: Params): Promise<SearchResult> {
|
||||
*/
|
||||
async function executeWatchAction(params: Params): Promise<SearchResult> {
|
||||
const { path = '.', languages, debounce = 1000 } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
|
||||
// Check CodexLens availability
|
||||
const readyStatus = await ensureCodexLensReady();
|
||||
@@ -902,7 +997,7 @@ async function executeWatchAction(params: Params): Promise<SearchResult> {
|
||||
}
|
||||
|
||||
// Check if index exists first
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
if (!indexStatus.indexed) {
|
||||
return {
|
||||
success: false,
|
||||
@@ -911,15 +1006,15 @@ async function executeWatchAction(params: Params): Promise<SearchResult> {
|
||||
}
|
||||
|
||||
// Build args for watch command
|
||||
const args = ['watch', path, '--debounce', debounce.toString()];
|
||||
const args = ['watch', scope.workingDirectory, '--debounce', debounce.toString()];
|
||||
if (languages && languages.length > 0) {
|
||||
args.push('--language', languages.join(','));
|
||||
args.push(...languages.flatMap((language) => ['--language', language]));
|
||||
}
|
||||
|
||||
// Start watcher in background (non-blocking)
|
||||
// Note: The watcher runs until manually stopped
|
||||
const result = await executeCodexLens(args, {
|
||||
cwd: path,
|
||||
cwd: scope.workingDirectory,
|
||||
timeout: 5000, // Short timeout for initial startup check
|
||||
});
|
||||
|
||||
@@ -975,11 +1070,11 @@ async function executeFuzzyMode(params: Params): Promise<SearchResult> {
|
||||
// If both failed, return error
|
||||
if (resultsMap.size === 0) {
|
||||
const errors: string[] = [];
|
||||
if (ftsResult.status === 'rejected') errors.push(`FTS: ${ftsResult.reason}`);
|
||||
if (ripgrepResult.status === 'rejected') errors.push(`Ripgrep: ${ripgrepResult.reason}`);
|
||||
collectBackendError(errors, 'FTS', ftsResult);
|
||||
collectBackendError(errors, 'Ripgrep', ripgrepResult);
|
||||
return {
|
||||
success: false,
|
||||
error: `Both search backends failed: ${errors.join('; ')}`,
|
||||
error: `Both search backends failed: ${errors.join('; ') || 'unknown error'}`,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1032,6 +1127,7 @@ async function executeFuzzyMode(params: Params): Promise<SearchResult> {
|
||||
*/
|
||||
async function executeAutoMode(params: Params): Promise<SearchResult> {
|
||||
const { query, path = '.' } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
|
||||
if (!query) {
|
||||
return {
|
||||
@@ -1041,7 +1137,7 @@ async function executeAutoMode(params: Params): Promise<SearchResult> {
|
||||
}
|
||||
|
||||
// Check index status
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
|
||||
// Classify intent with index and embeddings awareness
|
||||
const classification = classifyIntent(
|
||||
@@ -1098,6 +1194,7 @@ async function executeAutoMode(params: Params): Promise<SearchResult> {
|
||||
*/
|
||||
async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||
const { query, paths = [], contextLines = 0, maxResults = 5, extraFilesCount = 10, maxContentLength = 200, includeHidden = false, path = '.', regex = true, caseSensitive = true, tokenize = true, codeOnly = true, withDoc = false, excludeExtensions } = params;
|
||||
const scope = resolveSearchScope(path, paths);
|
||||
// withDoc overrides codeOnly
|
||||
const effectiveCodeOnly = withDoc ? false : codeOnly;
|
||||
|
||||
@@ -1126,7 +1223,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||
|
||||
// Use CodexLens fts mode as fallback
|
||||
const args = ['search', query, '--limit', totalToFetch.toString(), '--method', 'fts', '--json'];
|
||||
const result = await executeCodexLens(args, { cwd: path });
|
||||
const result = await executeCodexLens(args, { cwd: scope.workingDirectory });
|
||||
|
||||
if (!result.success) {
|
||||
return {
|
||||
@@ -1156,8 +1253,10 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||
// Keep empty results
|
||||
}
|
||||
|
||||
const scopedResults = filterResultsToTargetFile(allResults, scope);
|
||||
|
||||
// Split results: first N with full content, rest as file paths only
|
||||
const { results, extra_files } = splitResultsWithExtraFiles(allResults, maxResults, extraFilesCount);
|
||||
const { results, extra_files } = splitResultsWithExtraFiles(scopedResults, maxResults, extraFilesCount);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
@@ -1176,7 +1275,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||
// Use ripgrep - request more results to support split
|
||||
const { command, args, tokens } = buildRipgrepCommand({
|
||||
query,
|
||||
paths: paths.length > 0 ? paths : [path],
|
||||
paths: scope.searchPaths,
|
||||
contextLines,
|
||||
maxResults: totalToFetch, // Fetch more to support split
|
||||
includeHidden,
|
||||
@@ -1187,7 +1286,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(command, args, {
|
||||
cwd: path || getProjectRoot(),
|
||||
cwd: scope.workingDirectory || getProjectRoot(),
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
});
|
||||
|
||||
@@ -1312,6 +1411,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||
*/
|
||||
async function executeCodexLensExactMode(params: Params): Promise<SearchResult> {
|
||||
const { query, path = '.', maxResults = 5, extraFilesCount = 10, maxContentLength = 200, enrich = false, excludeExtensions, codeOnly = true, withDoc = false, offset = 0 } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
// withDoc overrides codeOnly
|
||||
const effectiveCodeOnly = withDoc ? false : codeOnly;
|
||||
|
||||
@@ -1332,7 +1432,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
}
|
||||
|
||||
// Check index status
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
|
||||
// Request more results to support split (full content + extra files)
|
||||
const totalToFetch = maxResults + extraFilesCount;
|
||||
@@ -1348,7 +1448,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
if (excludeExtensions && excludeExtensions.length > 0) {
|
||||
args.push('--exclude-extensions', excludeExtensions.join(','));
|
||||
}
|
||||
const result = await executeCodexLens(args, { cwd: path });
|
||||
const result = await executeCodexLens(args, { cwd: scope.workingDirectory });
|
||||
|
||||
if (!result.success) {
|
||||
return {
|
||||
@@ -1359,7 +1459,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
backend: 'codexlens',
|
||||
count: 0,
|
||||
query,
|
||||
warning: indexStatus.warning,
|
||||
warning: mergeWarnings(indexStatus.warning, result.warning),
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -1379,6 +1479,8 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
// Keep empty results
|
||||
}
|
||||
|
||||
allResults = filterResultsToTargetFile(allResults, scope);
|
||||
|
||||
// Fallback to fuzzy mode if exact returns no results
|
||||
if (allResults.length === 0) {
|
||||
const fuzzyArgs = ['search', query, '--limit', totalToFetch.toString(), '--offset', offset.toString(), '--method', 'fts', '--use-fuzzy', '--json'];
|
||||
@@ -1393,18 +1495,18 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
if (excludeExtensions && excludeExtensions.length > 0) {
|
||||
fuzzyArgs.push('--exclude-extensions', excludeExtensions.join(','));
|
||||
}
|
||||
const fuzzyResult = await executeCodexLens(fuzzyArgs, { cwd: path });
|
||||
const fuzzyResult = await executeCodexLens(fuzzyArgs, { cwd: scope.workingDirectory });
|
||||
|
||||
if (fuzzyResult.success) {
|
||||
try {
|
||||
const parsed = JSON.parse(stripAnsi(fuzzyResult.output || '{}'));
|
||||
const data = parsed.result?.results || parsed.results || parsed;
|
||||
allResults = (Array.isArray(data) ? data : []).map((item: any) => ({
|
||||
allResults = filterResultsToTargetFile((Array.isArray(data) ? data : []).map((item: any) => ({
|
||||
file: item.path || item.file,
|
||||
score: item.score || 0,
|
||||
content: truncateContent(item.content || item.excerpt, maxContentLength),
|
||||
symbol: item.symbol || null,
|
||||
}));
|
||||
})), scope);
|
||||
} catch {
|
||||
// Keep empty results
|
||||
}
|
||||
@@ -1421,7 +1523,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
backend: 'codexlens',
|
||||
count: results.length,
|
||||
query,
|
||||
warning: indexStatus.warning,
|
||||
warning: mergeWarnings(indexStatus.warning, fuzzyResult.warning),
|
||||
note: 'No exact matches found, showing fuzzy results',
|
||||
fallback: 'fuzzy',
|
||||
},
|
||||
@@ -1442,7 +1544,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
backend: 'codexlens',
|
||||
count: results.length,
|
||||
query,
|
||||
warning: indexStatus.warning,
|
||||
warning: mergeWarnings(indexStatus.warning, result.warning),
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -1455,6 +1557,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
||||
async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
const timer = createTimer();
|
||||
const { query, path = '.', maxResults = 5, extraFilesCount = 10, maxContentLength = 200, enrich = false, excludeExtensions, codeOnly = true, withDoc = false, offset = 0 } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
// withDoc overrides codeOnly
|
||||
const effectiveCodeOnly = withDoc ? false : codeOnly;
|
||||
|
||||
@@ -1476,7 +1579,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
}
|
||||
|
||||
// Check index status
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
timer.mark('index_status_check');
|
||||
|
||||
// Request more results to support split (full content + extra files)
|
||||
@@ -1493,7 +1596,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
if (excludeExtensions && excludeExtensions.length > 0) {
|
||||
args.push('--exclude-extensions', excludeExtensions.join(','));
|
||||
}
|
||||
const result = await executeCodexLens(args, { cwd: path });
|
||||
const result = await executeCodexLens(args, { cwd: scope.workingDirectory });
|
||||
timer.mark('codexlens_search');
|
||||
|
||||
if (!result.success) {
|
||||
@@ -1506,7 +1609,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
backend: 'codexlens',
|
||||
count: 0,
|
||||
query,
|
||||
warning: indexStatus.warning,
|
||||
warning: mergeWarnings(indexStatus.warning, result.warning),
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -1519,7 +1622,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
try {
|
||||
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
||||
const data = parsed.result?.results || parsed.results || parsed;
|
||||
allResults = (Array.isArray(data) ? data : []).map((item: any) => {
|
||||
allResults = filterResultsToTargetFile((Array.isArray(data) ? data : []).map((item: any) => {
|
||||
const rawScore = item.score || 0;
|
||||
// Hybrid mode returns distance scores (lower is better).
|
||||
// Convert to similarity scores (higher is better) for consistency.
|
||||
@@ -1531,7 +1634,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
content: truncateContent(item.content || item.excerpt, maxContentLength),
|
||||
symbol: item.symbol || null,
|
||||
};
|
||||
});
|
||||
}), scope);
|
||||
timer.mark('parse_results');
|
||||
|
||||
initialCount = allResults.length;
|
||||
@@ -1562,7 +1665,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
backend: 'codexlens',
|
||||
count: 0,
|
||||
query,
|
||||
warning: indexStatus.warning || 'Failed to parse JSON output',
|
||||
warning: mergeWarnings(indexStatus.warning, result.warning, 'Failed to parse JSON output'),
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -1591,7 +1694,7 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||
count: results.length,
|
||||
query,
|
||||
note,
|
||||
warning: indexStatus.warning,
|
||||
warning: mergeWarnings(indexStatus.warning, result.warning),
|
||||
suggested_weights: getRRFWeights(query),
|
||||
timing: TIMING_ENABLED ? timings : undefined,
|
||||
},
|
||||
@@ -1943,6 +2046,7 @@ function withTimeout<T>(promise: Promise<T>, ms: number, modeName: string): Prom
|
||||
*/
|
||||
async function executePriorityFallbackMode(params: Params): Promise<SearchResult> {
|
||||
const { query, path = '.' } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
const fallbackHistory: string[] = [];
|
||||
|
||||
if (!query) {
|
||||
@@ -1950,7 +2054,7 @@ async function executePriorityFallbackMode(params: Params): Promise<SearchResult
|
||||
}
|
||||
|
||||
// Check index status first
|
||||
const indexStatus = await checkIndexStatus(path);
|
||||
const indexStatus = await checkIndexStatus(scope.workingDirectory);
|
||||
|
||||
// 1. Try Hybrid search (highest priority) - 90s timeout for large indexes
|
||||
if (indexStatus.indexed && indexStatus.has_embeddings) {
|
||||
@@ -2034,13 +2138,15 @@ export const schema: ToolSchema = {
|
||||
name: 'smart_search',
|
||||
description: `Unified code search tool. Choose an action and provide its required parameters.
|
||||
|
||||
Recommended MCP flow: use **action=\"search\"** for lookups, **action=\"init\"** to create a static FTS index, and **action=\"update\"** when files change. Use **watch** only for explicit long-running auto-update sessions.
|
||||
|
||||
**Actions & Required Parameters:**
|
||||
|
||||
* **search** (default): Search file content.
|
||||
* **query** (string, **REQUIRED**): Content to search for.
|
||||
* *mode* (string): 'fuzzy' (default, FTS+ripgrep) or 'semantic' (dense+reranker).
|
||||
* *limit* (number): Max results (default: 20).
|
||||
* *path* (string): Directory to search (default: current).
|
||||
* *mode* (string): 'fuzzy' (default, FTS+ripgrep for stage-1 lexical search) or 'semantic' (dense+reranker, best when embeddings exist).
|
||||
* *limit* (number): Max results with full content (default: 5).
|
||||
* *path* (string): Directory or single file to search (default: current directory; file paths are auto-scoped back to that file).
|
||||
* *contextLines* (number): Context lines around matches (default: 0).
|
||||
* *regex* (boolean): Use regex matching (default: true).
|
||||
* *caseSensitive* (boolean): Case-sensitive search (default: true).
|
||||
@@ -2051,11 +2157,11 @@ export const schema: ToolSchema = {
|
||||
* *offset* (number): Pagination offset (default: 0).
|
||||
* *includeHidden* (boolean): Include hidden files (default: false).
|
||||
|
||||
* **init**: Create FTS index (incremental, skips existing).
|
||||
* **init**: Create a static FTS index (incremental, skips existing, no embeddings).
|
||||
* *path* (string): Directory to index (default: current).
|
||||
* *languages* (array): Languages to index (e.g., ["javascript", "typescript"]).
|
||||
|
||||
* **init_force**: Force full rebuild (delete and recreate index).
|
||||
* **init_force**: Force full rebuild (delete and recreate static index).
|
||||
* *path* (string): Directory to index (default: current).
|
||||
|
||||
* **status**: Check index status. (No required params)
|
||||
@@ -2070,7 +2176,7 @@ export const schema: ToolSchema = {
|
||||
smart_search(query="authentication logic") # Content search (default action)
|
||||
smart_search(query="MyClass", mode="semantic") # Semantic search
|
||||
smart_search(action="find_files", pattern="*.ts") # Find TypeScript files
|
||||
smart_search(action="init", path="/project") # Initialize index
|
||||
smart_search(action="init", path="/project") # Build static FTS index
|
||||
smart_search(query="auth", limit=10, offset=0) # Paginated search`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
@@ -2078,12 +2184,12 @@ export const schema: ToolSchema = {
|
||||
action: {
|
||||
type: 'string',
|
||||
enum: ['init', 'init_force', 'search', 'find_files', 'status', 'update', 'watch', 'search_files'],
|
||||
description: 'Action: search (content search), find_files (path pattern matching), init (create index, incremental), init_force (force full rebuild), status (check index), update (incremental update), watch (auto-update). Note: search_files is deprecated.',
|
||||
description: 'Action: search (content search; default and recommended), find_files (path pattern matching), init (create static FTS index, incremental), init_force (force full rebuild), status (check index), update (incremental refresh), watch (auto-update watcher; opt-in). Note: search_files is deprecated.',
|
||||
default: 'search',
|
||||
},
|
||||
query: {
|
||||
type: 'string',
|
||||
description: 'Content search query (for action="search")',
|
||||
description: 'Content search query (for action="search"). Recommended default workflow: action=search with fuzzy mode, plus init/update for static indexing.',
|
||||
},
|
||||
pattern: {
|
||||
type: 'string',
|
||||
@@ -2092,7 +2198,7 @@ export const schema: ToolSchema = {
|
||||
mode: {
|
||||
type: 'string',
|
||||
enum: SEARCH_MODES,
|
||||
description: 'Search mode: fuzzy (FTS + ripgrep fusion, default), semantic (dense + reranker for natural language queries)',
|
||||
description: 'Search mode: fuzzy (FTS + ripgrep fusion, default) or semantic (dense + reranker for natural language queries when embeddings exist).',
|
||||
default: 'fuzzy',
|
||||
},
|
||||
output_mode: {
|
||||
@@ -2103,7 +2209,7 @@ export const schema: ToolSchema = {
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
description: 'Directory path for init/search actions (default: current directory)',
|
||||
description: 'Directory path for init/search actions (default: current directory). For action=search, a single file path is also accepted and results are automatically scoped back to that file.',
|
||||
},
|
||||
paths: {
|
||||
type: 'array',
|
||||
@@ -2120,13 +2226,13 @@ export const schema: ToolSchema = {
|
||||
},
|
||||
maxResults: {
|
||||
type: 'number',
|
||||
description: 'Maximum number of results (default: 20)',
|
||||
default: 20,
|
||||
description: 'Maximum number of full-content results (default: 5)',
|
||||
default: 5,
|
||||
},
|
||||
limit: {
|
||||
type: 'number',
|
||||
description: 'Alias for maxResults (default: 20)',
|
||||
default: 20,
|
||||
description: 'Alias for maxResults (default: 5)',
|
||||
default: 5,
|
||||
},
|
||||
extraFilesCount: {
|
||||
type: 'number',
|
||||
@@ -2184,6 +2290,7 @@ export const schema: ToolSchema = {
|
||||
*/
|
||||
async function executeFindFilesAction(params: Params): Promise<SearchResult> {
|
||||
const { pattern, path = '.', limit = 20, offset = 0, includeHidden = false, caseSensitive = true } = params;
|
||||
const scope = resolveSearchScope(path);
|
||||
|
||||
if (!pattern) {
|
||||
return {
|
||||
@@ -2207,7 +2314,7 @@ async function executeFindFilesAction(params: Params): Promise<SearchResult> {
|
||||
|
||||
// Try CodexLens file list command
|
||||
const args = ['list-files', '--json'];
|
||||
const result = await executeCodexLens(args, { cwd: path });
|
||||
const result = await executeCodexLens(args, { cwd: scope.workingDirectory });
|
||||
|
||||
if (!result.success) {
|
||||
return {
|
||||
@@ -2290,7 +2397,7 @@ async function executeFindFilesAction(params: Params): Promise<SearchResult> {
|
||||
}
|
||||
|
||||
const child = spawn('rg', args, {
|
||||
cwd: path || getProjectRoot(),
|
||||
cwd: scope.workingDirectory || getProjectRoot(),
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
});
|
||||
|
||||
@@ -2485,11 +2592,20 @@ export async function handler(params: Record<string, unknown>): Promise<ToolResu
|
||||
return { success: false, error: `Invalid params: ${parsed.error.message}` };
|
||||
}
|
||||
|
||||
parsed.data.query = sanitizeSearchQuery(parsed.data.query);
|
||||
parsed.data.pattern = sanitizeSearchPath(parsed.data.pattern);
|
||||
parsed.data.path = sanitizeSearchPath(parsed.data.path);
|
||||
parsed.data.paths = parsed.data.paths.map((item) => sanitizeSearchPath(item) || item);
|
||||
|
||||
const { action, mode, output_mode, offset = 0 } = parsed.data;
|
||||
|
||||
// Sync limit and maxResults - use the larger of the two if both provided
|
||||
// This ensures user-provided values take precedence over defaults
|
||||
const effectiveLimit = Math.max(parsed.data.limit || 20, parsed.data.maxResults || 20);
|
||||
// Sync limit and maxResults while preserving explicit small values.
|
||||
// If both are provided, use the larger one. If only one is provided, honor it.
|
||||
const rawLimit = typeof params.limit === 'number' ? params.limit : undefined;
|
||||
const rawMaxResults = typeof params.maxResults === 'number' ? params.maxResults : undefined;
|
||||
const effectiveLimit = rawLimit !== undefined && rawMaxResults !== undefined
|
||||
? Math.max(rawLimit, rawMaxResults)
|
||||
: rawMaxResults ?? rawLimit ?? parsed.data.maxResults ?? parsed.data.limit ?? 5;
|
||||
parsed.data.maxResults = effectiveLimit;
|
||||
parsed.data.limit = effectiveLimit;
|
||||
|
||||
@@ -2613,7 +2729,7 @@ export async function executeInitWithProgress(
|
||||
args.push('--force'); // Force full rebuild
|
||||
}
|
||||
if (languages && languages.length > 0) {
|
||||
args.push('--language', languages.join(','));
|
||||
args.push(...languages.flatMap((language) => ['--language', language]));
|
||||
}
|
||||
|
||||
// Track progress updates
|
||||
|
||||
@@ -765,6 +765,37 @@ export class UvManager {
|
||||
}
|
||||
}
|
||||
|
||||
export function getPreferredCodexLensPythonSpec(): string {
|
||||
const override = process.env.CCW_PYTHON?.trim();
|
||||
if (override) {
|
||||
return override;
|
||||
}
|
||||
|
||||
if (!IS_WINDOWS) {
|
||||
return '>=3.10,<3.13';
|
||||
}
|
||||
|
||||
// Prefer 3.11/3.10 on Windows because current CodexLens semantic GPU extras
|
||||
// depend on onnxruntime 1.15.x wheels, which are not consistently available for cp312.
|
||||
const preferredVersions = ['3.11', '3.10', '3.12'];
|
||||
for (const version of preferredVersions) {
|
||||
try {
|
||||
const output = execSync(`py -${version} --version`, {
|
||||
encoding: 'utf-8',
|
||||
timeout: EXEC_TIMEOUTS.PYTHON_VERSION,
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
});
|
||||
if (output.includes(`Python ${version}`)) {
|
||||
return version;
|
||||
}
|
||||
} catch {
|
||||
// Try next installed version
|
||||
}
|
||||
}
|
||||
|
||||
return '>=3.10,<3.13';
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a UvManager with default settings for CodexLens
|
||||
* @param dataDir - Base data directory (defaults to ~/.codexlens)
|
||||
@@ -772,9 +803,10 @@ export class UvManager {
|
||||
*/
|
||||
export function createCodexLensUvManager(dataDir?: string): UvManager {
|
||||
const baseDir = dataDir ?? getCodexLensDataDir();
|
||||
void baseDir;
|
||||
return new UvManager({
|
||||
venvPath: getCodexLensVenvDir(),
|
||||
pythonVersion: '>=3.10,<3.13', // onnxruntime compatibility
|
||||
pythonVersion: getPreferredCodexLensPythonSpec(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
162
ccw/tests/cli-output-command-final.test.js
Normal file
162
ccw/tests/cli-output-command-final.test.js
Normal file
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* ccw cli output --final regression tests
|
||||
*
|
||||
* Verifies strict final-result behavior for cached executions.
|
||||
*/
|
||||
|
||||
import { after, afterEach, before, describe, it, mock } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { existsSync, mkdirSync, mkdtempSync, rmSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
const TEST_CCW_HOME = mkdtempSync(join(tmpdir(), 'ccw-cli-output-final-home-'));
|
||||
|
||||
const cliCommandPath = new URL('../dist/commands/cli.js', import.meta.url).href;
|
||||
const historyStorePath = new URL('../dist/tools/cli-history-store.js', import.meta.url).href;
|
||||
|
||||
function createTestProjectRoot() {
|
||||
const dirPath = mkdtempSync(join(tmpdir(), 'ccw-cli-output-final-project-'));
|
||||
if (!existsSync(dirPath)) {
|
||||
mkdirSync(dirPath, { recursive: true });
|
||||
}
|
||||
return dirPath;
|
||||
}
|
||||
|
||||
function createConversation({ id, stdoutFull = '', parsedOutput, finalOutput }) {
|
||||
return {
|
||||
id,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
tool: 'codex',
|
||||
model: 'default',
|
||||
mode: 'analysis',
|
||||
category: 'user',
|
||||
total_duration_ms: 100,
|
||||
turn_count: 1,
|
||||
latest_status: 'success',
|
||||
turns: [
|
||||
{
|
||||
turn: 1,
|
||||
timestamp: new Date().toISOString(),
|
||||
prompt: 'test prompt',
|
||||
duration_ms: 100,
|
||||
status: 'success',
|
||||
exit_code: 0,
|
||||
output: {
|
||||
stdout: stdoutFull,
|
||||
stderr: '',
|
||||
truncated: false,
|
||||
cached: true,
|
||||
stdout_full: stdoutFull,
|
||||
stderr_full: '',
|
||||
parsed_output: parsedOutput,
|
||||
final_output: finalOutput,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
describe('ccw cli output --final', async () => {
|
||||
let cliModule;
|
||||
let historyStoreModule;
|
||||
|
||||
before(async () => {
|
||||
process.env.CCW_DATA_DIR = TEST_CCW_HOME;
|
||||
cliModule = await import(cliCommandPath);
|
||||
historyStoreModule = await import(historyStorePath);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mock.restoreAll();
|
||||
try {
|
||||
historyStoreModule?.closeAllStores?.();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
});
|
||||
|
||||
after(() => {
|
||||
try {
|
||||
historyStoreModule?.closeAllStores?.();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
rmSync(TEST_CCW_HOME, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('reconstructs the final agent message from raw JSONL when final_output is missing', async () => {
|
||||
const projectRoot = createTestProjectRoot();
|
||||
const store = new historyStoreModule.CliHistoryStore(projectRoot);
|
||||
const stdoutFull = [
|
||||
JSON.stringify({ type: 'thread.started', thread_id: 'THREAD-1' }),
|
||||
JSON.stringify({ type: 'turn.started' }),
|
||||
JSON.stringify({ type: 'item.completed', item: { id: 'item_0', type: 'agent_message', text: 'Running `pwd` now.' } }),
|
||||
JSON.stringify({ type: 'item.completed', item: { id: 'item_1', type: 'command_execution', command: 'pwd', aggregated_output: 'D:\\Claude_dms3\\ccw', exit_code: 0, status: 'completed' } }),
|
||||
JSON.stringify({ type: 'item.completed', item: { id: 'item_2', type: 'agent_message', text: 'Waiting for the command output, then I’ll return it verbatim.' } }),
|
||||
JSON.stringify({ type: 'item.completed', item: { id: 'item_3', type: 'agent_message', text: 'D:\\Claude_dms3\\ccw' } }),
|
||||
JSON.stringify({ type: 'turn.completed', usage: { input_tokens: 1, output_tokens: 1 } }),
|
||||
'',
|
||||
].join('\n');
|
||||
|
||||
try {
|
||||
store.saveConversation(createConversation({
|
||||
id: 'EXEC-RECONSTRUCT-FINAL',
|
||||
stdoutFull,
|
||||
parsedOutput: 'Running `pwd` now.\nWaiting for the command output, then I’ll return it verbatim.\nD:\\Claude_dms3\\ccw',
|
||||
finalOutput: undefined,
|
||||
}));
|
||||
|
||||
const logs = [];
|
||||
mock.method(console, 'log', (...args) => {
|
||||
logs.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
await cliModule.cliCommand('output', ['EXEC-RECONSTRUCT-FINAL'], { final: true, project: projectRoot });
|
||||
|
||||
assert.deepEqual(logs, ['D:\\Claude_dms3\\ccw']);
|
||||
} finally {
|
||||
store.close();
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('fails fast for explicit --final when no final agent result can be recovered', async () => {
|
||||
const projectRoot = createTestProjectRoot();
|
||||
const store = new historyStoreModule.CliHistoryStore(projectRoot);
|
||||
|
||||
try {
|
||||
store.saveConversation(createConversation({
|
||||
id: 'EXEC-NO-FINAL',
|
||||
stdoutFull: 'plain stdout without JSONL final message',
|
||||
parsedOutput: 'INTERMEDIATE_STATUS_LINE',
|
||||
finalOutput: undefined,
|
||||
}));
|
||||
|
||||
const logs = [];
|
||||
const errors = [];
|
||||
const exitCodes = [];
|
||||
|
||||
mock.method(console, 'log', (...args) => {
|
||||
logs.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(console, 'error', (...args) => {
|
||||
errors.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(process, 'exit', (code) => {
|
||||
exitCodes.push(code);
|
||||
});
|
||||
|
||||
await cliModule.cliCommand('output', ['EXEC-NO-FINAL'], { final: true, project: projectRoot });
|
||||
|
||||
assert.deepEqual(logs, []);
|
||||
assert.deepEqual(exitCodes, [1]);
|
||||
assert.ok(errors.some((line) => line.includes('No final agent result found in cached output.')));
|
||||
} finally {
|
||||
store.close();
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
263
ccw/tests/cli-show-running-time.test.js
Normal file
263
ccw/tests/cli-show-running-time.test.js
Normal file
@@ -0,0 +1,263 @@
|
||||
/**
|
||||
* ccw cli show - running execution time formatting tests
|
||||
*/
|
||||
|
||||
import { after, afterEach, before, describe, it, mock } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import http from 'node:http';
|
||||
import { mkdtempSync, rmSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
const TEST_CCW_HOME = mkdtempSync(join(tmpdir(), 'ccw-cli-show-time-home-'));
|
||||
process.env.CCW_DATA_DIR = TEST_CCW_HOME;
|
||||
|
||||
const cliCommandPath = new URL('../dist/commands/cli.js', import.meta.url).href;
|
||||
const historyStorePath = new URL('../dist/tools/cli-history-store.js', import.meta.url).href;
|
||||
|
||||
function createConversationRecord({ id, prompt, updatedAt, durationMs = 2000 }) {
|
||||
return {
|
||||
id,
|
||||
created_at: updatedAt,
|
||||
updated_at: updatedAt,
|
||||
tool: 'codex',
|
||||
model: 'default',
|
||||
mode: 'analysis',
|
||||
category: 'user',
|
||||
total_duration_ms: durationMs,
|
||||
turn_count: 1,
|
||||
latest_status: 'success',
|
||||
turns: [
|
||||
{
|
||||
turn: 1,
|
||||
timestamp: updatedAt,
|
||||
prompt,
|
||||
duration_ms: durationMs,
|
||||
status: 'success',
|
||||
exit_code: 0,
|
||||
output: {
|
||||
stdout: 'saved output',
|
||||
stderr: '',
|
||||
truncated: false,
|
||||
cached: false,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
function stubActiveExecutionsResponse(executions) {
|
||||
mock.method(http, 'request', (_options, callback) => {
|
||||
const payload = JSON.stringify({ executions });
|
||||
const res = {
|
||||
on(event, handler) {
|
||||
if (event === 'data') {
|
||||
handler(Buffer.from(payload, 'utf8'));
|
||||
}
|
||||
if (event === 'end') {
|
||||
handler();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
if (callback) {
|
||||
callback(res);
|
||||
}
|
||||
|
||||
const req = {
|
||||
on() { return req; },
|
||||
write() {},
|
||||
end() {},
|
||||
destroy() {},
|
||||
};
|
||||
return req;
|
||||
});
|
||||
}
|
||||
|
||||
describe('ccw cli show running time formatting', async () => {
|
||||
let cliModule;
|
||||
let historyStoreModule;
|
||||
|
||||
before(async () => {
|
||||
cliModule = await import(cliCommandPath);
|
||||
historyStoreModule = await import(historyStorePath);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mock.restoreAll();
|
||||
try {
|
||||
historyStoreModule?.closeAllStores?.();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
});
|
||||
|
||||
after(() => {
|
||||
try {
|
||||
historyStoreModule?.closeAllStores?.();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
rmSync(TEST_CCW_HOME, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('formats running time with the same humanized style as history rows', async () => {
|
||||
const now = 1_741_392_000_000;
|
||||
stubActiveExecutionsResponse([
|
||||
{
|
||||
id: 'EXEC-RUN-125S',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
prompt: 'long task',
|
||||
startTime: now - 125_000,
|
||||
output: ''
|
||||
}
|
||||
]);
|
||||
|
||||
mock.method(Date, 'now', () => now);
|
||||
|
||||
const logs = [];
|
||||
mock.method(console, 'log', (...args) => {
|
||||
logs.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
await cliModule.cliCommand('show', [], {});
|
||||
|
||||
const rendered = logs.join('\n');
|
||||
assert.match(rendered, /2m ago/);
|
||||
assert.match(rendered, /2m 5s\.\.\./);
|
||||
assert.doesNotMatch(rendered, /125s ago/);
|
||||
});
|
||||
|
||||
it('normalizes second-based string timestamps for running executions', async () => {
|
||||
const now = 1_741_392_000_000;
|
||||
const startTimeSeconds = String(Math.floor((now - 3_600_000) / 1000));
|
||||
|
||||
stubActiveExecutionsResponse([
|
||||
{
|
||||
id: 'EXEC-RUN-1H',
|
||||
tool: 'gemini',
|
||||
mode: 'write',
|
||||
status: 'running',
|
||||
prompt: 'hour task',
|
||||
startTime: startTimeSeconds,
|
||||
output: ''
|
||||
}
|
||||
]);
|
||||
|
||||
mock.method(Date, 'now', () => now);
|
||||
|
||||
const logs = [];
|
||||
mock.method(console, 'log', (...args) => {
|
||||
logs.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
await cliModule.cliCommand('show', [], {});
|
||||
|
||||
const rendered = logs.join('\n');
|
||||
assert.match(rendered, /1h ago/);
|
||||
assert.match(rendered, /1h\.\.\./);
|
||||
});
|
||||
|
||||
it('suppresses stale running rows when saved history is newer than the active start time', async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), 'ccw-cli-show-stale-project-'));
|
||||
const previousCwd = process.cwd();
|
||||
const now = 1_741_392_000_000;
|
||||
const savedUpdatedAt = new Date(now - 5_000).toISOString();
|
||||
|
||||
try {
|
||||
process.chdir(projectRoot);
|
||||
const store = new historyStoreModule.CliHistoryStore(projectRoot);
|
||||
store.saveConversation(createConversationRecord({
|
||||
id: 'EXEC-STALE-RUNNING',
|
||||
prompt: 'HISTORY PROMPT SHOULD WIN',
|
||||
updatedAt: savedUpdatedAt,
|
||||
durationMs: 2300,
|
||||
}));
|
||||
store.close();
|
||||
|
||||
stubActiveExecutionsResponse([
|
||||
{
|
||||
id: 'EXEC-STALE-RUNNING',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
prompt: 'ACTIVE PROMPT SHOULD BE HIDDEN',
|
||||
startTime: now - 60_000,
|
||||
output: ''
|
||||
}
|
||||
]);
|
||||
|
||||
mock.method(Date, 'now', () => now);
|
||||
|
||||
const logs = [];
|
||||
mock.method(console, 'log', (...args) => {
|
||||
logs.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
await cliModule.cliCommand('show', [], {});
|
||||
|
||||
const rendered = logs.join('\n');
|
||||
assert.match(rendered, /HISTORY PROMPT SHOULD WIN/);
|
||||
assert.doesNotMatch(rendered, /ACTIVE PROMPT SHOULD BE HIDDEN/);
|
||||
assert.match(rendered, /2\.3s/);
|
||||
} finally {
|
||||
process.chdir(previousCwd);
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('keeps active running rows when saved history is older than the active start time \(resume-safe\)', async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), 'ccw-cli-show-resume-project-'));
|
||||
const previousCwd = process.cwd();
|
||||
const now = 1_741_392_000_000;
|
||||
const savedUpdatedAt = new Date(now - 120_000).toISOString();
|
||||
|
||||
try {
|
||||
process.chdir(projectRoot);
|
||||
const store = new historyStoreModule.CliHistoryStore(projectRoot);
|
||||
store.saveConversation(createConversationRecord({
|
||||
id: 'EXEC-RESUME-RUNNING',
|
||||
prompt: 'OLD HISTORY PROMPT',
|
||||
updatedAt: savedUpdatedAt,
|
||||
durationMs: 1800,
|
||||
}));
|
||||
store.close();
|
||||
|
||||
stubActiveExecutionsResponse([
|
||||
{
|
||||
id: 'EXEC-RESUME-RUNNING',
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
prompt: 'ACTIVE RESUME PROMPT',
|
||||
startTime: now - 30_000,
|
||||
output: ''
|
||||
}
|
||||
]);
|
||||
|
||||
mock.method(Date, 'now', () => now);
|
||||
|
||||
const logs = [];
|
||||
mock.method(console, 'log', (...args) => {
|
||||
logs.push(args.map(String).join(' '));
|
||||
});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
await cliModule.cliCommand('show', [], {});
|
||||
|
||||
const rendered = logs.join('\n');
|
||||
assert.match(rendered, /ACTIVE RESUME PROMPT/);
|
||||
assert.doesNotMatch(rendered, /OLD HISTORY PROMPT/);
|
||||
assert.match(rendered, /just now/);
|
||||
} finally {
|
||||
process.chdir(previousCwd);
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
131
ccw/tests/cli-watch-stale-running.test.js
Normal file
131
ccw/tests/cli-watch-stale-running.test.js
Normal file
@@ -0,0 +1,131 @@
|
||||
/**
|
||||
* ccw cli watch - stale running fallback tests
|
||||
*/
|
||||
|
||||
import { after, afterEach, before, describe, it, mock } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import http from 'node:http';
|
||||
import { mkdtempSync, rmSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
const TEST_CCW_HOME = mkdtempSync(join(tmpdir(), 'ccw-cli-watch-home-'));
|
||||
process.env.CCW_DATA_DIR = TEST_CCW_HOME;
|
||||
|
||||
const cliCommandPath = new URL('../dist/commands/cli.js', import.meta.url).href;
|
||||
const historyStorePath = new URL('../dist/tools/cli-history-store.js', import.meta.url).href;
|
||||
|
||||
describe('ccw cli watch stale running fallback', async () => {
|
||||
let cliModule;
|
||||
let historyStoreModule;
|
||||
|
||||
before(async () => {
|
||||
cliModule = await import(cliCommandPath);
|
||||
historyStoreModule = await import(historyStorePath);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mock.restoreAll();
|
||||
try {
|
||||
historyStoreModule?.closeAllStores?.();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
});
|
||||
|
||||
after(() => {
|
||||
try {
|
||||
historyStoreModule?.closeAllStores?.();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
rmSync(TEST_CCW_HOME, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('treats stale active running state as completed when saved conversation is newer', async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), 'ccw-cli-watch-project-'));
|
||||
const previousCwd = process.cwd();
|
||||
const now = Date.now();
|
||||
const executionId = `EXEC-WATCH-STALE-${now}`;
|
||||
|
||||
try {
|
||||
process.chdir(projectRoot);
|
||||
const store = new historyStoreModule.CliHistoryStore(projectRoot);
|
||||
store.saveConversation({
|
||||
id: executionId,
|
||||
created_at: new Date(now - 10_000).toISOString(),
|
||||
updated_at: new Date(now - 5_000).toISOString(),
|
||||
tool: 'codex',
|
||||
model: 'default',
|
||||
mode: 'analysis',
|
||||
category: 'user',
|
||||
total_duration_ms: 2100,
|
||||
turn_count: 1,
|
||||
latest_status: 'success',
|
||||
turns: [{
|
||||
turn: 1,
|
||||
timestamp: new Date(now - 5_000).toISOString(),
|
||||
prompt: 'saved prompt',
|
||||
duration_ms: 2100,
|
||||
status: 'success',
|
||||
exit_code: 0,
|
||||
output: {
|
||||
stdout: 'saved output',
|
||||
stderr: '',
|
||||
truncated: false,
|
||||
cached: false,
|
||||
}
|
||||
}]
|
||||
});
|
||||
store.close();
|
||||
|
||||
mock.method(http, 'request', (_options, callback) => {
|
||||
const payload = JSON.stringify({
|
||||
executions: [{
|
||||
id: executionId,
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
status: 'running',
|
||||
prompt: 'stale active prompt',
|
||||
startTime: now - 60_000,
|
||||
output: ''
|
||||
}]
|
||||
});
|
||||
const res = {
|
||||
on(event, handler) {
|
||||
if (event === 'data') handler(Buffer.from(payload, 'utf8'));
|
||||
if (event === 'end') handler();
|
||||
return res;
|
||||
}
|
||||
};
|
||||
if (callback) callback(res);
|
||||
const req = {
|
||||
on() { return req; },
|
||||
end() {},
|
||||
destroy() {},
|
||||
};
|
||||
return req;
|
||||
});
|
||||
|
||||
const stderrWrites = [];
|
||||
const exitCodes = [];
|
||||
mock.method(process.stderr, 'write', (chunk) => {
|
||||
stderrWrites.push(String(chunk));
|
||||
return true;
|
||||
});
|
||||
mock.method(process, 'exit', (code) => {
|
||||
exitCodes.push(code);
|
||||
});
|
||||
|
||||
await cliModule.cliCommand('watch', [executionId], { timeout: '1' });
|
||||
|
||||
const rendered = stderrWrites.join('');
|
||||
assert.match(rendered, /Execution already completed/);
|
||||
assert.match(rendered, new RegExp(`Use: ccw cli output ${executionId}`));
|
||||
assert.deepEqual(exitCodes, [0]);
|
||||
} finally {
|
||||
process.chdir(previousCwd);
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
35
ccw/tests/codex-lens-cli-compat.test.js
Normal file
35
ccw/tests/codex-lens-cli-compat.test.js
Normal file
@@ -0,0 +1,35 @@
|
||||
import { after, describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
const tempDirs = [];
|
||||
|
||||
after(() => {
|
||||
for (const dir of tempDirs) {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('CodexLens CLI compatibility retries', () => {
|
||||
it('initializes a tiny index even when CLI emits compatibility conflicts first', async () => {
|
||||
const moduleUrl = new URL(`../dist/tools/codex-lens.js?compat=${Date.now()}`, import.meta.url).href;
|
||||
const { checkVenvStatus, executeCodexLens } = await import(moduleUrl);
|
||||
|
||||
const ready = await checkVenvStatus(true);
|
||||
if (!ready.ready) {
|
||||
console.log('Skipping: CodexLens not ready');
|
||||
return;
|
||||
}
|
||||
|
||||
const projectDir = mkdtempSync(join(tmpdir(), 'codexlens-init-'));
|
||||
tempDirs.push(projectDir);
|
||||
writeFileSync(join(projectDir, 'sample.ts'), 'export const sample = 1;\n');
|
||||
|
||||
const result = await executeCodexLens(['index', 'init', projectDir, '--force'], { timeout: 600000 });
|
||||
|
||||
assert.equal(result.success, true, result.error ?? 'Expected init to succeed');
|
||||
assert.ok((result.output ?? '').length > 0 || (result.warning ?? '').length > 0, 'Expected init output or compatibility warning');
|
||||
});
|
||||
});
|
||||
@@ -199,6 +199,202 @@ describe('cli routes integration', async () => {
|
||||
}
|
||||
});
|
||||
|
||||
it('GET /api/cli/execution prefers newer saved conversation over stale active running state', async () => {
|
||||
const broadcasts: any[] = [];
|
||||
const { server, baseUrl } = await createServer(PROJECT_ROOT, broadcasts);
|
||||
const historyStoreUrl = new URL('../../dist/tools/cli-history-store.js', import.meta.url);
|
||||
const historyStoreMod: any = await import(historyStoreUrl.href);
|
||||
const executionId = `EXEC-STALE-DETAIL-${Date.now()}`;
|
||||
const now = 1_741_392_000_000;
|
||||
|
||||
try {
|
||||
const store = new historyStoreMod.CliHistoryStore(PROJECT_ROOT);
|
||||
store.saveConversation({
|
||||
id: executionId,
|
||||
created_at: new Date(now - 10_000).toISOString(),
|
||||
updated_at: new Date(now - 5_000).toISOString(),
|
||||
tool: 'codex',
|
||||
model: 'default',
|
||||
mode: 'analysis',
|
||||
category: 'user',
|
||||
total_duration_ms: 2300,
|
||||
turn_count: 1,
|
||||
latest_status: 'success',
|
||||
turns: [{
|
||||
turn: 1,
|
||||
timestamp: new Date(now - 5_000).toISOString(),
|
||||
prompt: 'SAVED DETAIL SHOULD WIN',
|
||||
duration_ms: 2300,
|
||||
status: 'success',
|
||||
exit_code: 0,
|
||||
output: {
|
||||
stdout: 'saved output',
|
||||
stderr: '',
|
||||
truncated: false,
|
||||
cached: false,
|
||||
}
|
||||
}]
|
||||
});
|
||||
store.close();
|
||||
|
||||
mock.method(Date, 'now', () => now - 60_000);
|
||||
mod.updateActiveExecution({
|
||||
type: 'started',
|
||||
executionId,
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
prompt: 'STALE ACTIVE DETAIL'
|
||||
});
|
||||
mock.restoreAll();
|
||||
mock.method(console, 'log', () => {});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
const res = await requestJson(
|
||||
baseUrl,
|
||||
'GET',
|
||||
`/api/cli/execution?path=${encodeURIComponent(PROJECT_ROOT)}&id=${encodeURIComponent(executionId)}`,
|
||||
);
|
||||
|
||||
assert.equal(res.status, 200);
|
||||
assert.equal(res.json?._active, undefined);
|
||||
assert.equal(res.json?.turns?.[0]?.prompt, 'SAVED DETAIL SHOULD WIN');
|
||||
assert.equal(res.json?.latest_status, 'success');
|
||||
} finally {
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
}
|
||||
});
|
||||
|
||||
it('GET /api/cli/active filters stale running state when saved conversation is newer', async () => {
|
||||
const broadcasts: any[] = [];
|
||||
const { server, baseUrl } = await createServer(PROJECT_ROOT, broadcasts);
|
||||
const historyStoreUrl = new URL('../../dist/tools/cli-history-store.js', import.meta.url);
|
||||
const historyStoreMod: any = await import(historyStoreUrl.href);
|
||||
const executionId = `EXEC-STALE-ACTIVE-${Date.now()}`;
|
||||
const now = 1_741_392_500_000;
|
||||
|
||||
try {
|
||||
const store = new historyStoreMod.CliHistoryStore(PROJECT_ROOT);
|
||||
store.saveConversation({
|
||||
id: executionId,
|
||||
created_at: new Date(now - 12_000).toISOString(),
|
||||
updated_at: new Date(now - 4_000).toISOString(),
|
||||
tool: 'codex',
|
||||
model: 'default',
|
||||
mode: 'analysis',
|
||||
category: 'user',
|
||||
total_duration_ms: 3200,
|
||||
turn_count: 1,
|
||||
latest_status: 'success',
|
||||
turns: [{
|
||||
turn: 1,
|
||||
timestamp: new Date(now - 4_000).toISOString(),
|
||||
prompt: 'SAVED ACTIVE SHOULD WIN',
|
||||
duration_ms: 3200,
|
||||
status: 'success',
|
||||
exit_code: 0,
|
||||
output: {
|
||||
stdout: 'saved output',
|
||||
stderr: '',
|
||||
truncated: false,
|
||||
cached: false,
|
||||
}
|
||||
}]
|
||||
});
|
||||
store.close();
|
||||
|
||||
mock.method(Date, 'now', () => now - 60_000);
|
||||
mod.updateActiveExecution({
|
||||
type: 'started',
|
||||
executionId,
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
prompt: 'STALE ACTIVE SHOULD DISAPPEAR'
|
||||
});
|
||||
mock.restoreAll();
|
||||
mock.method(console, 'log', () => {});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
const res = await requestJson(
|
||||
baseUrl,
|
||||
'GET',
|
||||
`/api/cli/active?path=${encodeURIComponent(PROJECT_ROOT)}`,
|
||||
);
|
||||
|
||||
assert.equal(res.status, 200);
|
||||
assert.equal(Array.isArray(res.json?.executions), true);
|
||||
assert.equal(res.json.executions.some((exec: any) => exec.id === executionId), false);
|
||||
} finally {
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
}
|
||||
});
|
||||
|
||||
it('GET /api/cli/active keeps running state when saved conversation is older', async () => {
|
||||
const broadcasts: any[] = [];
|
||||
const { server, baseUrl } = await createServer(PROJECT_ROOT, broadcasts);
|
||||
const historyStoreUrl = new URL('../../dist/tools/cli-history-store.js', import.meta.url);
|
||||
const historyStoreMod: any = await import(historyStoreUrl.href);
|
||||
const executionId = `EXEC-ACTIVE-RESUME-${Date.now()}`;
|
||||
const now = 1_741_393_000_000;
|
||||
|
||||
try {
|
||||
const store = new historyStoreMod.CliHistoryStore(PROJECT_ROOT);
|
||||
store.saveConversation({
|
||||
id: executionId,
|
||||
created_at: new Date(now - 120_000).toISOString(),
|
||||
updated_at: new Date(now - 110_000).toISOString(),
|
||||
tool: 'codex',
|
||||
model: 'default',
|
||||
mode: 'analysis',
|
||||
category: 'user',
|
||||
total_duration_ms: 1200,
|
||||
turn_count: 1,
|
||||
latest_status: 'success',
|
||||
turns: [{
|
||||
turn: 1,
|
||||
timestamp: new Date(now - 110_000).toISOString(),
|
||||
prompt: 'OLDER SAVED TURN',
|
||||
duration_ms: 1200,
|
||||
status: 'success',
|
||||
exit_code: 0,
|
||||
output: {
|
||||
stdout: 'older output',
|
||||
stderr: '',
|
||||
truncated: false,
|
||||
cached: false,
|
||||
}
|
||||
}]
|
||||
});
|
||||
store.close();
|
||||
|
||||
mock.method(Date, 'now', () => now - 20_000);
|
||||
mod.updateActiveExecution({
|
||||
type: 'started',
|
||||
executionId,
|
||||
tool: 'codex',
|
||||
mode: 'analysis',
|
||||
prompt: 'NEWER ACTIVE SHOULD STAY'
|
||||
});
|
||||
mock.restoreAll();
|
||||
mock.method(console, 'log', () => {});
|
||||
mock.method(console, 'error', () => {});
|
||||
|
||||
const res = await requestJson(
|
||||
baseUrl,
|
||||
'GET',
|
||||
`/api/cli/active?path=${encodeURIComponent(PROJECT_ROOT)}`,
|
||||
);
|
||||
|
||||
assert.equal(res.status, 200);
|
||||
assert.equal(Array.isArray(res.json?.executions), true);
|
||||
const activeExecution = res.json.executions.find((exec: any) => exec.id === executionId);
|
||||
assert.ok(activeExecution);
|
||||
assert.equal(activeExecution.status, 'running');
|
||||
assert.equal(activeExecution.prompt, 'NEWER ACTIVE SHOULD STAY');
|
||||
} finally {
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
}
|
||||
});
|
||||
|
||||
it('PUT /api/cli/config/gemini updates config and broadcasts event', async () => {
|
||||
const broadcasts: any[] = [];
|
||||
const { server, baseUrl } = await createServer(PROJECT_ROOT, broadcasts);
|
||||
|
||||
135
ccw/tests/smart-search-mcp-usage.test.js
Normal file
135
ccw/tests/smart-search-mcp-usage.test.js
Normal file
@@ -0,0 +1,135 @@
|
||||
import { afterEach, before, describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
const smartSearchPath = new URL('../dist/tools/smart-search.js', import.meta.url).href;
|
||||
|
||||
describe('Smart Search MCP usage defaults and path handling', async () => {
|
||||
let smartSearchModule;
|
||||
const tempDirs = [];
|
||||
|
||||
before(async () => {
|
||||
try {
|
||||
smartSearchModule = await import(smartSearchPath);
|
||||
} catch (err) {
|
||||
console.log('Note: smart-search module import skipped:', err?.message ?? String(err));
|
||||
}
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
while (tempDirs.length > 0) {
|
||||
rmSync(tempDirs.pop(), { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
function createWorkspace() {
|
||||
const dir = mkdtempSync(join(tmpdir(), 'ccw-smart-search-'));
|
||||
tempDirs.push(dir);
|
||||
return dir;
|
||||
}
|
||||
|
||||
it('keeps schema defaults aligned with runtime docs', () => {
|
||||
if (!smartSearchModule) return;
|
||||
|
||||
const { schema } = smartSearchModule;
|
||||
const props = schema.inputSchema.properties;
|
||||
|
||||
assert.equal(props.maxResults.default, 5);
|
||||
assert.equal(props.limit.default, 5);
|
||||
assert.match(schema.description, /static FTS index/i);
|
||||
assert.match(props.path.description, /single file path/i);
|
||||
});
|
||||
|
||||
it('honors explicit small limit values', async () => {
|
||||
if (!smartSearchModule) return;
|
||||
|
||||
const dir = createWorkspace();
|
||||
const file = join(dir, 'many.ts');
|
||||
writeFileSync(file, ['const hit = 1;', 'const hit = 2;', 'const hit = 3;'].join('\n'));
|
||||
|
||||
const toolResult = await smartSearchModule.handler({
|
||||
action: 'search',
|
||||
query: 'hit',
|
||||
path: dir,
|
||||
limit: 1,
|
||||
regex: false,
|
||||
tokenize: false,
|
||||
});
|
||||
|
||||
assert.equal(toolResult.success, true, toolResult.error);
|
||||
assert.equal(toolResult.result.success, true);
|
||||
assert.equal(toolResult.result.results.length, 1);
|
||||
assert.equal(toolResult.result.metadata.pagination.limit, 1);
|
||||
});
|
||||
|
||||
it('scopes search results to a single file path', async () => {
|
||||
if (!smartSearchModule) return;
|
||||
|
||||
const dir = createWorkspace();
|
||||
const target = join(dir, 'target.ts');
|
||||
const other = join(dir, 'other.ts');
|
||||
writeFileSync(target, 'const TARGET_TOKEN = 1;\n');
|
||||
writeFileSync(other, 'const TARGET_TOKEN = 2;\n');
|
||||
|
||||
const toolResult = await smartSearchModule.handler({
|
||||
action: 'search',
|
||||
query: 'TARGET_TOKEN',
|
||||
path: target,
|
||||
regex: false,
|
||||
tokenize: false,
|
||||
});
|
||||
|
||||
assert.equal(toolResult.success, true, toolResult.error);
|
||||
assert.equal(toolResult.result.success, true);
|
||||
assert.ok(Array.isArray(toolResult.result.results));
|
||||
assert.ok(toolResult.result.results.length >= 1);
|
||||
|
||||
const normalizedFiles = toolResult.result.results.map((item) => String(item.file).replace(/\\/g, '/'));
|
||||
assert.ok(normalizedFiles.every((file) => file.endsWith('/target.ts') || file === 'target.ts'));
|
||||
assert.ok(normalizedFiles.every((file) => !file.endsWith('/other.ts')));
|
||||
});
|
||||
|
||||
it('normalizes wrapped multiline query and file path inputs', async () => {
|
||||
if (!smartSearchModule) return;
|
||||
|
||||
const dir = createWorkspace();
|
||||
const nestedDir = join(dir, 'hydro_generator_module', 'builders');
|
||||
mkdirSync(nestedDir, { recursive: true });
|
||||
const target = join(nestedDir, 'full_machine_builders.py');
|
||||
writeFileSync(target, 'def _resolve_rotor_inner():\n return rotor_main_seg\n');
|
||||
|
||||
const wrappedPath = target.replace(/([\\/])builders([\\/])/, '$1\n builders$2');
|
||||
const wrappedQuery = '_resolve_rotor_inner OR\n rotor_main_seg';
|
||||
|
||||
const toolResult = await smartSearchModule.handler({
|
||||
action: 'search',
|
||||
query: wrappedQuery,
|
||||
path: wrappedPath,
|
||||
regex: false,
|
||||
caseSensitive: false,
|
||||
});
|
||||
|
||||
assert.equal(toolResult.success, true, toolResult.error);
|
||||
assert.equal(toolResult.result.success, true);
|
||||
assert.ok(toolResult.result.results.length >= 1);
|
||||
});
|
||||
|
||||
it('surfaces backend failure details when fuzzy search fully fails', async () => {
|
||||
if (!smartSearchModule) return;
|
||||
|
||||
const missingPath = join(createWorkspace(), 'missing-folder', 'missing.ts');
|
||||
const toolResult = await smartSearchModule.handler({
|
||||
action: 'search',
|
||||
query: 'TARGET_TOKEN',
|
||||
path: missingPath,
|
||||
regex: false,
|
||||
tokenize: false,
|
||||
});
|
||||
|
||||
assert.equal(toolResult.success, false);
|
||||
assert.match(toolResult.error, /Both search backends failed:/);
|
||||
assert.match(toolResult.error, /(FTS|Ripgrep)/);
|
||||
});
|
||||
});
|
||||
51
ccw/tests/uv-manager-codexlens-python.test.js
Normal file
51
ccw/tests/uv-manager-codexlens-python.test.js
Normal file
@@ -0,0 +1,51 @@
|
||||
import { afterEach, before, describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { execSync } from 'node:child_process';
|
||||
|
||||
const uvManagerPath = new URL('../dist/utils/uv-manager.js', import.meta.url).href;
|
||||
|
||||
describe('CodexLens UV python preference', async () => {
|
||||
let mod;
|
||||
const originalPython = process.env.CCW_PYTHON;
|
||||
|
||||
before(async () => {
|
||||
mod = await import(uvManagerPath);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (originalPython === undefined) {
|
||||
delete process.env.CCW_PYTHON;
|
||||
return;
|
||||
}
|
||||
process.env.CCW_PYTHON = originalPython;
|
||||
});
|
||||
|
||||
it('honors CCW_PYTHON override', () => {
|
||||
process.env.CCW_PYTHON = 'C:/Custom/Python/python.exe';
|
||||
assert.equal(mod.getPreferredCodexLensPythonSpec(), 'C:/Custom/Python/python.exe');
|
||||
});
|
||||
|
||||
it('prefers Python 3.11 or 3.10 on Windows when available', () => {
|
||||
if (process.platform !== 'win32') return;
|
||||
delete process.env.CCW_PYTHON;
|
||||
|
||||
let installed = '';
|
||||
try {
|
||||
installed = execSync('py -0p', { encoding: 'utf-8' });
|
||||
} catch {
|
||||
return;
|
||||
}
|
||||
|
||||
const has311 = installed.includes('-V:3.11');
|
||||
const has310 = installed.includes('-V:3.10');
|
||||
if (!has311 && !has310) {
|
||||
return;
|
||||
}
|
||||
|
||||
const preferred = mod.getPreferredCodexLensPythonSpec();
|
||||
assert.ok(
|
||||
preferred === '3.11' || preferred === '3.10',
|
||||
`expected Windows preference to avoid 3.12 when 3.11/3.10 exists, got ${preferred}`,
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -11,7 +11,7 @@ License-File: LICENSE
|
||||
Requires-Dist: typer~=0.9.0
|
||||
Requires-Dist: click<9,>=8.0.0
|
||||
Requires-Dist: rich~=13.0.0
|
||||
Requires-Dist: pydantic>=2.5.0
|
||||
Requires-Dist: pydantic~=2.0.0
|
||||
Requires-Dist: tree-sitter~=0.20.0
|
||||
Requires-Dist: tree-sitter-python~=0.25.0
|
||||
Requires-Dist: tree-sitter-javascript~=0.25.0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
typer~=0.9.0
|
||||
click<9,>=8.0.0
|
||||
rich~=13.0.0
|
||||
pydantic>=2.5.0
|
||||
pydantic~=2.0.0
|
||||
tree-sitter~=0.20.0
|
||||
tree-sitter-python~=0.25.0
|
||||
tree-sitter-javascript~=0.25.0
|
||||
|
||||
589
docs/CCW-CODEX-COMMANDS-SKILLS-GUIDE.md
Normal file
589
docs/CCW-CODEX-COMMANDS-SKILLS-GUIDE.md
Normal file
@@ -0,0 +1,589 @@
|
||||
# CCW / Codex 命令与 Skill 完整指南
|
||||
|
||||
> **Who(谁来用)**: 项目开发者 - 使用 Claude Code 或 Codex CLI 进行代码开发、审查、调试、规划的开发人员
|
||||
|
||||
---
|
||||
|
||||
## 一、CCW CLI 命令
|
||||
|
||||
### 1.1 核心命令
|
||||
|
||||
| 命令 | 用途 | 示例 |
|
||||
|------|------|------|
|
||||
| `ccw view` | 启动 Dashboard 服务器 | `ccw view --port 3456` |
|
||||
| `ccw stop` | 停止服务器 | `ccw stop --force` |
|
||||
| `ccw cli` | 统一 CLI 工具执行 | `ccw cli -p "..." --tool gemini` |
|
||||
| `ccw session` | 会话生命周期管理 | `ccw session list` |
|
||||
| `ccw issue` | Issue 生命周期管理 | `ccw issue create --title "..."` |
|
||||
| `ccw memory` | 上下文跟踪 | `ccw memory search --query "..."` |
|
||||
| `ccw team` | 团队消息总线 | `ccw team status --session-id <id>` |
|
||||
|
||||
### 1.2 CLI 执行命令详解
|
||||
|
||||
#### 基础语法
|
||||
|
||||
```bash
|
||||
ccw cli -p "<PROMPT>" --tool <tool> --mode <mode> [options]
|
||||
```
|
||||
|
||||
#### 三种执行模式
|
||||
|
||||
| 模式 | 权限 | 用途 | 工具支持 |
|
||||
|------|------|------|----------|
|
||||
| `analysis` | 只读 | 代码审查、架构分析、模式发现 | 全部 |
|
||||
| `write` | 创建/修改/删除 | 功能实现、Bug修复、代码重构 | 全部 |
|
||||
| `review` | 只读(Git感知) | 代码变更审查 | **codex 专用** |
|
||||
|
||||
#### CLI 工具配置 (`~/.claude/cli-tools.json`)
|
||||
|
||||
```json
|
||||
{
|
||||
"tools": {
|
||||
"gemini": { "enabled": true, "primaryModel": "gemini-2.5-pro", "tags": ["分析", "Debug"] },
|
||||
"qwen": { "enabled": true, "primaryModel": "coder-model" },
|
||||
"codex": { "enabled": true, "primaryModel": "gpt-5.2" },
|
||||
"claude": { "enabled": true, "primaryModel": "sonnet" },
|
||||
"opencode": { "enabled": true, "primaryModel": "opencode/glm-4.7-free" }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### CLI 子命令
|
||||
|
||||
```bash
|
||||
# 列出所有执行
|
||||
ccw cli show --all
|
||||
|
||||
# 流式输出(stderr)
|
||||
ccw cli watch <id> --timeout 120
|
||||
|
||||
# 获取执行结果
|
||||
ccw cli output <id> # 最终结果
|
||||
ccw cli output <id> --verbose # 完整元数据
|
||||
ccw cli output <id> --raw # 原始输出
|
||||
```
|
||||
|
||||
#### CLI 核心选项
|
||||
|
||||
| 选项 | 说明 |
|
||||
|------|------|
|
||||
| `-p, --prompt <text>` | 提示文本 |
|
||||
| `-f, --file <file>` | 从文件读取提示 |
|
||||
| `--tool <tool>` | 工具选择 (gemini/qwen/codex/claude) |
|
||||
| `--mode <mode>` | 执行模式 (analysis/write/review) |
|
||||
| `--model <model>` | 模型覆盖 |
|
||||
| `--cd <path>` | 工作目录 |
|
||||
| `--includeDirs <dirs>` | 额外目录(逗号分隔) |
|
||||
| `--rule <template>` | 模板规则名称 |
|
||||
| `--resume [id]` | 恢复会话(空=最近) |
|
||||
| `--id <id>` | 执行 ID(推荐指定) |
|
||||
| `--yes` | 自动模式 |
|
||||
|
||||
#### 目录配置
|
||||
|
||||
```bash
|
||||
# 单额外目录
|
||||
ccw cli -p "CONTEXT: @**/* @../shared/**/*" \
|
||||
--cd src/auth --includeDirs ../shared
|
||||
|
||||
# 多额外目录
|
||||
ccw cli -p "..." --cd src/auth --includeDirs ../shared,../types,../utils
|
||||
```
|
||||
|
||||
### 1.3 Codex Review 命令
|
||||
|
||||
**重要约束**: 目标标志 (`--uncommitted`, `--base`, `--commit`) 和 `[PROMPT]` **互斥**
|
||||
|
||||
```bash
|
||||
# 方式1: 带提示(审查未提交)
|
||||
ccw cli -p "聚焦安全漏洞" --tool codex --mode review
|
||||
|
||||
# 方式2: 目标标志(无提示)
|
||||
ccw cli --tool codex --mode review --uncommitted # 未提交更改
|
||||
ccw cli --tool codex --mode review --base main # 与 main 对比
|
||||
ccw cli --tool codex --mode review --commit abc123 # 特定提交
|
||||
|
||||
# ❌ 无效组合(互斥)
|
||||
ccw cli -p "review" --tool codex --mode review --uncommitted
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 二、模板规则(--rule)
|
||||
|
||||
### 2.1 分析模板
|
||||
|
||||
| 模板 | 用途 |
|
||||
|------|------|
|
||||
| `analysis-assess-security-risks` | 安全风险评估 |
|
||||
| `analysis-diagnose-bug-root-cause` | Bug 根因诊断 |
|
||||
| `analysis-review-architecture` | 架构审查 |
|
||||
| `analysis-analyze-code-patterns` | 代码模式分析 |
|
||||
| `analysis-review-code-quality` | 代码质量审查 |
|
||||
| `analysis-analyze-performance` | 性能分析 |
|
||||
| `analysis-trace-code-execution` | 执行追踪 |
|
||||
|
||||
### 2.2 规划模板
|
||||
|
||||
| 模板 | 用途 |
|
||||
|------|------|
|
||||
| `planning-plan-architecture-design` | 架构设计规划 |
|
||||
| `planning-breakdown-task-steps` | 任务步骤分解 |
|
||||
| `planning-design-component-spec` | 组件规格设计 |
|
||||
| `planning-plan-migration-strategy` | 迁移策略规划 |
|
||||
|
||||
### 2.3 开发模板
|
||||
|
||||
| 模板 | 用途 |
|
||||
|------|------|
|
||||
| `development-implement-feature` | 功能实现 |
|
||||
| `development-refactor-codebase` | 代码重构 |
|
||||
| `development-generate-tests` | 测试生成 |
|
||||
| `development-implement-component-ui` | UI 组件实现 |
|
||||
| `development-debug-runtime-issues` | 运行时调试 |
|
||||
|
||||
---
|
||||
|
||||
## 三、Claude Slash Commands
|
||||
|
||||
### 3.1 Workflow 命令
|
||||
|
||||
| 命令 | 阶段 | 用途 |
|
||||
|------|------|------|
|
||||
| `/workflow:lite-plan` | 5 | 快速任务规划(explore → clarify → plan → confirm → handoff) |
|
||||
| `/workflow-plan` | 4 | 正式规划(explore → analyze → plan → verify) |
|
||||
| `/workflow-execute` | 6+ | 执行工作流(session → tasks → commit) |
|
||||
| `/workflow-tdd-plan` | 6 | TDD 规划(Red-Green-Refactor) |
|
||||
| `/workflow-test-fix` | 5 | 测试生成与修复管道 |
|
||||
| `/workflow:debug-with-file` | 5 | 假设驱动调试 |
|
||||
| `/workflow:brainstorm-with-file` | 4 | 4视角并行头脑风暴 |
|
||||
| `/workflow:analyze-with-file` | 4 | 协作分析(带文档) |
|
||||
| `/workflow:refactor-cycle` | 4 | 重构循环 |
|
||||
| `/workflow:integration-test-cycle` | 4 | 集成测试循环 |
|
||||
|
||||
### 3.2 Issue 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/issue:new` | 创建新 Issue |
|
||||
| `/issue:discover` | 发现潜在问题 |
|
||||
| `/issue:discover-by-prompt` | 通过提示发现问题 |
|
||||
| `/issue:plan` | 规划解决方案 |
|
||||
| `/issue:queue` | 形成执行队列 |
|
||||
| `/issue:execute` | 执行方案队列 |
|
||||
| `/issue:convert-to-plan` | 转换为规划工件 |
|
||||
|
||||
### 3.3 DDD 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/ddd:auto` | 文档驱动自动化(chain) |
|
||||
| `/ddd:plan` | 文档驱动规划 |
|
||||
| `/ddd:execute` | 文档驱动执行 |
|
||||
| `/ddd:scan` | 扫描代码库构建文档 |
|
||||
| `/ddd:index-build` | 构建文档索引 |
|
||||
| `/ddd:doc-generate` | 生成完整文档树 |
|
||||
| `/ddd:doc-refresh` | 增量更新文档 |
|
||||
| `/ddd:sync` | 任务后同步 |
|
||||
| `/ddd:update` | 增量索引更新 |
|
||||
|
||||
### 3.4 Session 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/workflow:session:start` | 启动工作流会话 |
|
||||
| `/workflow:session:resume` | 恢复最近会话 |
|
||||
| `/workflow:session:list` | 列出所有会话 |
|
||||
| `/workflow:session:sync` | 同步到 specs |
|
||||
| `/workflow:session:complete` | 完成会话 |
|
||||
|
||||
### 3.5 Spec 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/workflow:spec:setup` | 初始化项目级配置 |
|
||||
| `/workflow:spec:add` | 添加规格/约束 |
|
||||
|
||||
### 3.6 UI Design 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/workflow:ui-design:explore-auto` | UI 设计自动工作流 |
|
||||
| `/workflow:ui-design:imitate-auto` | UI 模仿自动工作流 |
|
||||
| `/workflow:ui-design:style-extract` | 提取设计样式 |
|
||||
| `/workflow:ui-design:layout-extract` | 提取布局 |
|
||||
| `/workflow:ui-design:animation-extract` | 提取动画 |
|
||||
| `/workflow:ui-design:codify-style` | 代码化样式 |
|
||||
| `/workflow:ui-design:design-sync` | 同步设计系统 |
|
||||
| `/workflow:ui-design:import-from-code` | 从代码导入设计 |
|
||||
| `/workflow:ui-design:generate` | 组装 UI 原型 |
|
||||
| `/workflow:ui-design:reference-page-generator` | 生成参考页面 |
|
||||
|
||||
### 3.7 Memory 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/memory:prepare` | 准备记忆上下文 |
|
||||
| `/memory:style-skill-memory` | 生成 Skill 记忆包 |
|
||||
|
||||
### 3.8 IDAW 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/idaw:add` | 添加 IDAW 任务 |
|
||||
| `/idaw:run` | 执行 IDAW 任务链 |
|
||||
| `/idaw:run-coordinate` | IDAW 协调器 |
|
||||
| `/idaw:resume` | 恢复 IDAW 会话 |
|
||||
| `/idaw:status` | 查看 IDAW 状态 |
|
||||
|
||||
### 3.9 CLI 命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/cli:cli-init` | 初始化 CLI 配置 |
|
||||
| `/cli:codex-review` | Codex 代码审查 |
|
||||
|
||||
### 3.10 其他命令
|
||||
|
||||
| 命令 | 用途 |
|
||||
|------|------|
|
||||
| `/ccw` | 主工作流协调器 |
|
||||
| `/ccw-coordinator` | 命令协调工具 |
|
||||
| `/flow-create` | Flow 模板生成器 |
|
||||
| `/workflow:clean` | 智能代码清理 |
|
||||
| `/workflow:roadmap-with-file` | 战略路线图规划 |
|
||||
| `/workflow:collaborative-plan-with-file` | 协作规划 |
|
||||
| `/workflow:unified-execute-with-file` | 统一执行引擎 |
|
||||
| `/workflow:debug-with-file` | 调试工作流 |
|
||||
| `/workflow:brainstorm-with-file` | 头脑风暴 |
|
||||
| `/workflow:analyze-with-file` | 协作分析 |
|
||||
|
||||
---
|
||||
|
||||
## 四、CCW Skills(41 个)
|
||||
|
||||
### 4.1 Workflow Skills(9 个)
|
||||
|
||||
| Skill | 阶段 | 触发 | 用途 |
|
||||
|-------|------|------|------|
|
||||
| `workflow-plan` | 4 | "workflow-plan", 正式规划 | 4阶段规划(explore → analyze → plan → verify) |
|
||||
| `workflow-execute` | 6+ | "workflow-execute", 执行 | Agent 执行编排,延迟加载阶段文件 |
|
||||
| `workflow-lite-plan` | 5 | "lite-plan", 快速任务 | 轻量级规划(explore → clarify → plan → confirm → handoff) |
|
||||
| `workflow-lite-execute` | - | "lite-execute" | 轻量级执行,支持多模式输入 |
|
||||
| `workflow-tdd-plan` | 6 | "tdd", "TDD" | 6阶段 TDD 规划(Red-Green-Refactor) |
|
||||
| `workflow-test-fix` | 5 | "test-fix" | 测试生成与执行管道 |
|
||||
| `workflow-multi-cli-plan` | - | "multi-cli" | 多 CLI 协作规划(Gemini/Codex/Claude) |
|
||||
| `workflow-skill-designer` | - | "skill-designer" | 元技能,生成新的 workflow skills |
|
||||
|
||||
### 4.2 Team Skills(21 个)
|
||||
|
||||
#### 生命周期
|
||||
|
||||
| Skill | 角色 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-lifecycle-v4` | 7+ | 完整生命周期:specification → planning → implementation → testing → review |
|
||||
| `team-planex` | 4 | 规划+执行 wave pipeline |
|
||||
| `team-roadmap-dev` | 5 | 分阶段执行,对齐路线图 |
|
||||
|
||||
#### 质量保障
|
||||
|
||||
| Skill | 角色 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-review` | 3 | 代码审查管道:scanner → reviewer → fixer |
|
||||
| `team-quality-assurance` | 5 | 完整 QA:scout → strategist → generator → executor → analyst |
|
||||
| `team-testing` | 4 | 测试管道:strategist → generator → executor → analyst |
|
||||
|
||||
#### 优化
|
||||
|
||||
| Skill | 角色 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-arch-opt` | 4 | 架构优化:analyze → optimize → verify |
|
||||
| `team-perf-opt` | 4 | 性能优化:profile → strategy → implement → benchmark |
|
||||
| `team-tech-debt` | 4 | 技术债务:scan → assess → plan → fix |
|
||||
| `team-ux-improve` | 5 | UX 改进:scan → diagnose → design → implement → test |
|
||||
|
||||
#### 开发
|
||||
|
||||
| Skill | 角色 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-frontend` | 4+ | 前端开发团队 |
|
||||
| `team-frontend-debug` | 3+ | 前端调试(Chrome DevTools MCP) |
|
||||
| `team-iterdev` | 3 | 迭代开发(Generator-Critic 循环) |
|
||||
| `team-issue` | 5 | Issue 解决:explore → plan → review → marshal → implement |
|
||||
|
||||
#### 分析与设计
|
||||
|
||||
| Skill | 角色 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-ultra-analyze` | 4 | 深度分析:explore → analyze → discuss → synthesize |
|
||||
| `team-brainstorm` | 4 | 头脑风暴(Generator-Critic 循环) |
|
||||
| `team-uidesign` | 4 | UI 设计:research → tokens → audit → implement |
|
||||
|
||||
#### 协调
|
||||
|
||||
| Skill | 角色 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-coordinate` | 动态 | 通用协调,动态角色生成 |
|
||||
| `team-executor` | - | 轻量级执行,无分析阶段 |
|
||||
| `team-edict` | 多部门 | 三省六部架构 |
|
||||
|
||||
#### 其他
|
||||
|
||||
| Skill | 用途 |
|
||||
|-------|------|
|
||||
| `team-designer` | 元技能,生成团队 skills |
|
||||
| `team-lifecycle` | Codex 完整生命周期 |
|
||||
|
||||
### 4.3 Utility Skills(11 个)
|
||||
|
||||
| Skill | 阶段 | 触发 | 用途 |
|
||||
|-------|------|------|------|
|
||||
| `review-code` | 6维度 | "review" | 6维度代码审查(正确性、可读性、性能、安全、测试、架构) |
|
||||
| `review-cycle` | 3模式 | "review-cycle" | 统一审查(session/module/fix 模式) |
|
||||
| `skill-tuning` | - | "tuning", "诊断" | Skill 诊断和调优 |
|
||||
| `skill-simplify` | 3 | "simplify" | Skill 简化(完整性验证) |
|
||||
| `skill-generator` | 5 | "create skill", "new skill" | 元技能,创建新 skills |
|
||||
| `command-generator` | 5 | "create command", "new command" | 命令文件生成器 |
|
||||
| `spec-generator` | 6 | "spec" | 规格文档生成器 |
|
||||
| `memory-capture` | 2 | "memory", "记忆" | 记忆捕获(compact/tips 模式) |
|
||||
| `memory-manage` | 5 | "CLAUDE.md" | CLAUDE.md 更新和文档生成 |
|
||||
| `brainstorm` | 2 | "brainstorm", "头脑风暴" | 双模式头脑风暴 |
|
||||
| `ccw-help` | - | "help", "帮助" | 命令帮助系统 |
|
||||
|
||||
---
|
||||
|
||||
## 五、Team Skills 架构
|
||||
|
||||
### 5.1 协调器 + Worker 模式
|
||||
|
||||
```javascript
|
||||
// SKILL.md 是路由器
|
||||
if (hasFlag('--role <name>')) {
|
||||
// Worker 模式
|
||||
Read(`roles/<name>/role.md`)
|
||||
execute Phase 2-4
|
||||
} else {
|
||||
// Coordinator 模式
|
||||
Read('roles/coordinator/role.md')
|
||||
spawn workers
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 Worker 生成模板
|
||||
|
||||
```javascript
|
||||
Agent({
|
||||
subagent_type: "team-worker",
|
||||
team_name: "planex",
|
||||
name: "<role>",
|
||||
run_in_background: true,
|
||||
prompt: `## Role Assignment
|
||||
role: <role>
|
||||
role_spec: .claude/skills/team-planex/roles/<role>/role.md
|
||||
session: <session-folder>
|
||||
session_id: <session-id>
|
||||
inner_loop: true`
|
||||
})
|
||||
```
|
||||
|
||||
### 5.3 消息总线通信
|
||||
|
||||
```javascript
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
session_id: sessionId,
|
||||
from: role,
|
||||
type: "state_update",
|
||||
data: {
|
||||
status: "task_complete",
|
||||
task_id: "...",
|
||||
ref: "..."
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
### 5.4 会话目录结构
|
||||
|
||||
```
|
||||
.workflow/.team/<PREFIX>-<slug>-<date>/
|
||||
├── team-session.json # 会话状态 + 角色注册
|
||||
├── artifacts/ # 交付物
|
||||
├── evidence/ # 调试证据
|
||||
├── wisdom/ # 跨任务知识
|
||||
└── .msg/ # 消息总线存储
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 六、Codex Skills
|
||||
|
||||
### 6.1 核心 Skills
|
||||
|
||||
| Skill | 触发 | 用途 |
|
||||
|-------|------|------|
|
||||
| `team-lifecycle` | `/team-lifecycle <task>` | 5阶段管道(requirements → init → tasks → coordination → report) |
|
||||
| `parallel-dev-cycle` | `/parallel-dev-cycle TASK="..."` | 4专家并行(RA, EP, CD, VAS) |
|
||||
| `analyze-with-file` | `/analyze-with-file TOPIC="..."` | 协作分析,带文档讨论 |
|
||||
| `brainstorm-with-file` | `/brainstorm-with-file TOPIC="..."` | 4视角并行分析 |
|
||||
| `debug-with-file` | `/debug-with-file BUG="..."` | 假设驱动调试 |
|
||||
| `review-cycle` | `/review-cycle <target>` | 多维代码审查 + 修复 |
|
||||
| `workflow-test-fix-cycle` | `/workflow-test-fix-cycle <tests>` | 测试-修复循环 |
|
||||
|
||||
### 6.2 专用 Skills
|
||||
|
||||
| Skill | 触发 | 用途 |
|
||||
|-------|------|------|
|
||||
| `clean` | `/clean` | 代码清理 |
|
||||
| `memory-compact` | `/memory-compact` | 记忆压缩 |
|
||||
| `issue-discover` | `/issue-discover` | 问题发现 |
|
||||
| `session-sync` | `/session-sync` | 会话同步 |
|
||||
| `spec-add` | `/spec-add` | 添加规格 |
|
||||
| `spec-setup` | `/spec-setup` | 设置规格 |
|
||||
|
||||
---
|
||||
|
||||
## 七、Skill 执行模式
|
||||
|
||||
### 7.1 Sequential 模式(固定顺序)
|
||||
|
||||
```
|
||||
Phase 01 → Phase 02 → Phase 03 → ... → Phase N
|
||||
```
|
||||
|
||||
**适用**: 流水线任务、阶段间强依赖、固定输出结构
|
||||
|
||||
**文件**: `phases/01-*.md`, `phases/02-*.md`, ...
|
||||
|
||||
### 7.2 Autonomous 模式(状态驱动)
|
||||
|
||||
```
|
||||
Orchestrator → 读取状态 → 选择 Action → 执行 → 更新状态
|
||||
```
|
||||
|
||||
**适用**: 交互式任务、阶段间无强依赖、需动态响应
|
||||
|
||||
**文件**: `phases/orchestrator.md`, `phases/actions/*.md`, `phases/state-schema.md`
|
||||
|
||||
---
|
||||
|
||||
## 八、通用参数
|
||||
|
||||
### 8.1 自动模式(-y/--yes)
|
||||
|
||||
所有命令支持跳过确认:
|
||||
|
||||
```bash
|
||||
/workflow-execute --yes
|
||||
/issue:new -y "问题描述"
|
||||
/team-planex --yes "任务描述"
|
||||
```
|
||||
|
||||
### 8.2 会话恢复(--resume)
|
||||
|
||||
```bash
|
||||
# 恢复最近会话
|
||||
/workflow-execute --resume
|
||||
|
||||
# 恢复特定会话
|
||||
/workflow-execute --resume="WFS-auth"
|
||||
```
|
||||
|
||||
### 8.3 角色指定(--role)
|
||||
|
||||
```bash
|
||||
# Team Skills 角色路由
|
||||
/team-lifecycle-v4 --role coordinator
|
||||
/team-lifecycle-v4 --role planner
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 九、使用示例
|
||||
|
||||
### 9.1 分析任务(安全模式)
|
||||
|
||||
```bash
|
||||
# 安全审计
|
||||
ccw cli -p "PURPOSE: 识别认证模块的安全漏洞
|
||||
TASK: • 扫描注入漏洞 • 检查认证绕过 • 评估会话管理
|
||||
MODE: analysis
|
||||
CONTEXT: @src/auth/**/*
|
||||
EXPECTED: 安全报告(严重级别 + 文件位置 + 修复建议)" \
|
||||
--tool gemini --mode analysis --rule analysis-assess-security-risks
|
||||
|
||||
# 架构审查
|
||||
ccw cli -p "..." --tool gemini --mode analysis --rule analysis-review-architecture
|
||||
|
||||
# 代码模式分析
|
||||
ccw cli -p "..." --tool gemini --mode analysis --rule analysis-analyze-code-patterns
|
||||
```
|
||||
|
||||
### 9.2 实现任务(写入模式)
|
||||
|
||||
```bash
|
||||
# 功能实现
|
||||
ccw cli -p "PURPOSE: 实现 API 限流
|
||||
TASK: • 创建滑动窗口中间件 • Redis 后端 • 按路由配置
|
||||
MODE: write
|
||||
CONTEXT: @src/middleware/**/*
|
||||
EXPECTED: 生产代码 + 测试 + 配置示例" \
|
||||
--tool gemini --mode write --rule development-implement-feature
|
||||
|
||||
# Bug 修复
|
||||
ccw cli -p "..." --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause
|
||||
|
||||
# 代码重构
|
||||
ccw cli -p "..." --tool gemini --mode write --rule development-refactor-codebase
|
||||
```
|
||||
|
||||
### 9.3 Codex 代码审查
|
||||
|
||||
```bash
|
||||
# 审查未提交更改(带焦点)
|
||||
ccw cli -p "聚焦安全漏洞和错误处理" --tool codex --mode review
|
||||
|
||||
# 审查特定提交
|
||||
ccw cli --tool codex --mode review --commit abc123
|
||||
|
||||
# 审查分支差异
|
||||
ccw cli --tool codex --mode review --base main
|
||||
```
|
||||
|
||||
### 9.4 工作流执行
|
||||
|
||||
```bash
|
||||
# 交互式执行
|
||||
/workflow-execute
|
||||
|
||||
# 自动模式
|
||||
/workflow-execute --yes
|
||||
|
||||
# 自动提交
|
||||
/workflow-execute --with-commit
|
||||
|
||||
# 恢复会话
|
||||
/workflow-execute --resume-session="WFS-auth"
|
||||
```
|
||||
|
||||
### 9.5 团队协作
|
||||
|
||||
```bash
|
||||
# 启动团队工作流
|
||||
/team-planex --yes "实现用户认证系统"
|
||||
|
||||
# 角色分配(自动)
|
||||
# coordinator: 协调任务分发
|
||||
# planner: 生成规划方案
|
||||
# executor: 执行具体任务
|
||||
# reviewer: 审查执行结果
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 十、相关资源
|
||||
|
||||
- [CLI Tools 使用规范](~/.ccw/workflows/cli-tools-usage.md)
|
||||
- [Skill 设计规范](.claude/skills/_shared/SKILL-DESIGN-SPEC.md)
|
||||
- [命令到 Skill 转换](.claude/skills/_shared/COMMAND-TO-SKILL-CONVERSION.md)
|
||||
- [编码哲学](~/.ccw/workflows/coding-philosophy.md)
|
||||
- [OMO 命令速查一页纸](https://linux.do/t/topic/1637568)
|
||||
Reference in New Issue
Block a user