Enhance UX and Coordinator Role Constraints in Skills Documentation

- Added detailed constraints for the Coordinator role in the team UX improvement skill, emphasizing orchestration responsibilities and workflow management.
- Updated test cases in DashboardToolbar, useIssues, and useWebSocket to improve reliability and clarity.
- Introduced new tests for configStore and ignore patterns in Codex Lens to ensure proper functionality and configuration handling.
- Enhanced smart search functionality with improved embedding selection logic and added tests for various scenarios.
- Updated installation and usage documentation to reflect changes in directory structure and role specifications.
This commit is contained in:
catlog22
2026-03-08 23:43:44 +08:00
parent f3ae78f95e
commit 61ea9d47a6
110 changed files with 1516 additions and 218 deletions

View File

@@ -105,7 +105,7 @@ describe('DashboardToolbar', () => {
/>
);
fireEvent.click(screen.getByTitle('Click to configure and launch a CLI session'));
fireEvent.click(screen.getByRole('button', { name: 'New Session' }));
expect(screen.getByTestId('cli-config-modal')).toBeInTheDocument();
mockState.currentProjectPath = 'D:/workspace-b';

View File

@@ -4,7 +4,7 @@
// Tests for issue-related hooks with queue and discovery
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { renderHook, waitFor } from '@testing-library/react';
import { act, renderHook, waitFor } from '@testing-library/react';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import {
useIssueQueue,
@@ -203,14 +203,18 @@ describe('useIssueDiscovery', () => {
});
// Select a session to load findings
result.current.selectSession('1');
act(() => {
result.current.selectSession('1');
});
await waitFor(() => {
expect(result.current.findings).toHaveLength(2);
});
// Apply severity filter
result.current.setFilters({ severity: 'critical' as const });
act(() => {
result.current.setFilters({ severity: 'critical' as const });
});
await waitFor(() => {
expect(result.current.filteredFindings).toHaveLength(1);
@@ -239,14 +243,18 @@ describe('useIssueDiscovery', () => {
});
// Select a session to load findings
result.current.selectSession('1');
act(() => {
result.current.selectSession('1');
});
await waitFor(() => {
expect(result.current.findings).toHaveLength(2);
});
// Apply type filter
result.current.setFilters({ type: 'bug' });
act(() => {
result.current.setFilters({ type: 'bug' });
});
await waitFor(() => {
expect(result.current.filteredFindings).toHaveLength(1);
@@ -275,14 +283,18 @@ describe('useIssueDiscovery', () => {
});
// Select a session to load findings
result.current.selectSession('1');
act(() => {
result.current.selectSession('1');
});
await waitFor(() => {
expect(result.current.findings).toHaveLength(2);
});
// Apply search filter
result.current.setFilters({ search: 'authentication' });
act(() => {
result.current.setFilters({ search: 'authentication' });
});
await waitFor(() => {
expect(result.current.filteredFindings).toHaveLength(1);
@@ -310,7 +322,9 @@ describe('useIssueDiscovery', () => {
});
// Select a session to load findings
result.current.selectSession('1');
act(() => {
result.current.selectSession('1');
});
await waitFor(() => {
expect(result.current.findings).toHaveLength(1);

View File

@@ -77,6 +77,7 @@ describe('useWebSocket workspace scoping', () => {
useSessionManagerStore.getState().resetState();
useWorkflowStore.setState({ projectPath: 'D:\\workspace-a' });
vi.spyOn(console, 'log').mockImplementation(() => {});
vi.stubGlobal('WebSocket', MockWebSocket as unknown as typeof WebSocket);
});
@@ -84,6 +85,7 @@ describe('useWebSocket workspace scoping', () => {
useCliSessionStore.getState().resetState();
useExecutionMonitorStore.getState().resetState();
useSessionManagerStore.getState().resetState();
vi.restoreAllMocks();
vi.unstubAllGlobals();
});

View File

@@ -137,6 +137,7 @@ describe('OrchestrationPlanBuilder', () => {
});
it('should detect cycles and throw an error', () => {
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const flow: Flow = {
id: 'flow-cycle',
name: 'Cyclic Flow',
@@ -156,7 +157,11 @@ describe('OrchestrationPlanBuilder', () => {
metadata: {},
};
expect(() => OrchestrationPlanBuilder.fromFlow(flow)).toThrow('Cycle detected in flow graph. Cannot build orchestration plan from cyclic flow.');
try {
expect(() => OrchestrationPlanBuilder.fromFlow(flow)).toThrow('Cycle detected in flow graph. Cannot build orchestration plan from cyclic flow.');
} finally {
consoleErrorSpy.mockRestore();
}
});
it('should correctly map sessionStrategy and executionType from node data', () => {

View File

@@ -0,0 +1,67 @@
// ========================================
// Config Store Tests
// ========================================
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
const CONFIG_STORE_MODULE_PATH = './configStore';
describe('configStore backend sync', () => {
beforeEach(() => {
vi.resetModules();
vi.clearAllMocks();
localStorage.clear();
window.history.replaceState({}, '', '/');
});
afterEach(() => {
vi.unstubAllGlobals();
});
it('does not fetch backend config during module import', async () => {
const fetchMock = vi.fn();
vi.stubGlobal('fetch', fetchMock);
await import(CONFIG_STORE_MODULE_PATH);
expect(fetchMock).not.toHaveBeenCalled();
});
it('syncs backend config explicitly with an absolute URL', async () => {
const fetchMock = vi.fn().mockResolvedValue({
json: vi.fn().mockResolvedValue({
config: {
tools: {
codex: {
enabled: true,
primaryModel: 'gpt-5',
secondaryModel: 'gpt-5-mini',
tags: ['analysis', 'debug'],
type: 'builtin',
envFile: '.env.codex',
settingsFile: 'codex.settings.json',
availableModels: ['gpt-5', 'gpt-5-mini'],
},
},
},
}),
});
vi.stubGlobal('fetch', fetchMock);
const { syncConfigStoreFromBackend, useConfigStore } = await import(CONFIG_STORE_MODULE_PATH);
await syncConfigStoreFromBackend(true);
expect(fetchMock).toHaveBeenCalledWith(`${window.location.origin}/api/cli/config`);
expect(useConfigStore.getState().cliTools.codex).toMatchObject({
enabled: true,
primaryModel: 'gpt-5',
secondaryModel: 'gpt-5-mini',
tags: ['analysis', 'debug'],
type: 'builtin',
envFile: '.env.codex',
settingsFile: 'codex.settings.json',
availableModels: ['gpt-5', 'gpt-5-mini'],
});
});
});

View File

@@ -10,6 +10,8 @@ import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import { MemoryRouter } from 'react-router-dom';
import { vi } from 'vitest';
import type { Locale } from '../types/store';
import enMessages from '../locales/en/index';
import zhMessages from '../locales/zh/index';
// Mock translation messages for testing
const mockMessages: Record<Locale, Record<string, string>> = {
@@ -677,13 +679,36 @@ interface I18nWrapperProps {
locale?: Locale;
}
const testMessages: Record<Locale, Record<string, string>> = {
en: { ...enMessages, ...mockMessages.en },
zh: { ...zhMessages, ...mockMessages.zh },
};
function handleIntlTestError(error: unknown): void {
const intlError = error as { code?: string } | undefined;
if (intlError?.code === 'MISSING_TRANSLATION') {
return;
}
console.error(error);
}
const testRouterFutureConfig = {
v7_startTransition: true,
v7_relativeSplatPath: true,
} as const;
function I18nWrapper({ children, locale = 'en' }: I18nWrapperProps) {
const queryClient = createTestQueryClient();
return (
<MemoryRouter>
<MemoryRouter future={testRouterFutureConfig}>
<QueryClientProvider client={queryClient}>
<IntlProvider locale={locale} messages={mockMessages[locale]}>
<IntlProvider
locale={locale}
messages={testMessages[locale]}
onError={handleIntlTestError}
>
{children}
</IntlProvider>
</QueryClientProvider>
@@ -726,9 +751,9 @@ export const mockLocaleUtils = {
export function mockI18nContext(locale: Locale = 'en') {
return {
locale,
messages: mockMessages[locale],
messages: testMessages[locale],
formatMessage: (id: string, values?: Record<string, unknown>) => {
const message = mockMessages[locale][id];
const message = testMessages[locale][id];
if (!message) return id;
if (!values) return message;

View File

@@ -85,8 +85,8 @@ const ParamsSchema = z.object({
maxResults: z.number().default(5), // Default 5 with full content
includeHidden: z.boolean().default(false),
languages: z.array(z.string()).optional(),
embeddingBackend: z.string().optional().describe('Embedding backend for action="embed": fastembed/local or litellm/api.'),
embeddingModel: z.string().optional().describe('Embedding model/profile for action="embed". Examples: "code", "fast", "qwen3-embedding-sf".'),
embeddingBackend: z.string().optional().describe('Embedding backend for action="embed": fastembed/local or litellm/api. Default bulk preset: local-fast.'),
embeddingModel: z.string().optional().describe('Embedding model/profile for action="embed". Examples: "code", "fast", "qwen3-embedding-sf". Default bulk preset uses "fast".'),
apiMaxWorkers: z.number().int().min(1).optional().describe('Max concurrent API embedding workers for action="embed". Recommended: 8-16 for litellm/api when multiple endpoints are configured.'),
force: z.boolean().default(false).describe('Force regeneration for action="embed".'),
limit: z.number().default(5), // Default 5 with full content
@@ -328,6 +328,7 @@ interface SearchMetadata {
use_gpu?: boolean;
cascade_strategy?: string;
staged_stage2_mode?: string;
preset?: string;
}
interface SearchResult {
@@ -951,6 +952,42 @@ function normalizeEmbeddingBackend(backend?: string): string | undefined {
return normalized;
}
function resolveEmbeddingSelection(
requestedBackend: string | undefined,
requestedModel: string | undefined,
config: CodexLensConfig | null | undefined,
): { backend?: string; model?: string; preset: 'explicit' | 'config' | 'bulk-local-fast'; note?: string } {
const normalizedRequestedBackend = normalizeEmbeddingBackend(requestedBackend);
const normalizedRequestedModel = requestedModel?.trim() || undefined;
if (normalizedRequestedBackend) {
return {
backend: normalizedRequestedBackend,
model: normalizedRequestedModel || config?.embedding_model,
preset: 'explicit',
};
}
if (normalizedRequestedModel) {
const inferredBackend = config?.embedding_backend
|| (['fast', 'code'].includes(normalizedRequestedModel) ? 'fastembed' : undefined);
return {
backend: inferredBackend,
model: normalizedRequestedModel,
preset: inferredBackend ? 'config' : 'explicit',
};
}
return {
backend: 'fastembed',
model: 'fast',
preset: 'bulk-local-fast',
note: config?.embedding_backend && config.embedding_backend !== 'fastembed'
? `Using recommended bulk indexing preset: local-fast instead of configured ${config.embedding_backend}. Pass embeddingBackend="api" to force remote API embeddings.`
: 'Using recommended bulk indexing preset: local-fast. Pass embeddingBackend="api" to force remote API embeddings.',
};
}
const EMBED_PROGRESS_PREFIX = '__CCW_EMBED_PROGRESS__';
function resolveEmbeddingEndpoints(backend?: string): RotationEndpointConfig[] {
@@ -1214,8 +1251,9 @@ async function executeEmbedAction(params: Params): Promise<SearchResult> {
}
const currentStatus = await checkIndexStatus(scope.workingDirectory);
const normalizedBackend = normalizeEmbeddingBackend(embeddingBackend) || currentStatus.config?.embedding_backend;
const trimmedModel = embeddingModel?.trim() || currentStatus.config?.embedding_model;
const embeddingSelection = resolveEmbeddingSelection(embeddingBackend, embeddingModel, currentStatus.config);
const normalizedBackend = embeddingSelection.backend;
const trimmedModel = embeddingSelection.model;
const endpoints = resolveEmbeddingEndpoints(normalizedBackend);
const configuredApiMaxWorkers = currentStatus.config?.api_max_workers;
const effectiveApiMaxWorkers = typeof apiMaxWorkers === 'number'
@@ -1261,12 +1299,13 @@ async function executeEmbedAction(params: Params): Promise<SearchResult> {
path: scope.workingDirectory,
backend: normalizedBackend || indexStatus?.config?.embedding_backend,
embeddings_coverage_percent: coverage,
api_max_workers: effectiveApiMaxWorkers,
api_max_workers: normalizedBackend === 'litellm' ? effectiveApiMaxWorkers : undefined,
endpoint_count: endpoints.length,
use_gpu: true,
cascade_strategy: currentStatus.config?.cascade_strategy,
staged_stage2_mode: currentStatus.config?.staged_stage2_mode,
note: progressMessage,
note: [embeddingSelection.note, progressMessage].filter(Boolean).join(' | ') || undefined,
preset: embeddingSelection.preset,
},
status: indexStatus,
};
@@ -2590,8 +2629,8 @@ Recommended MCP flow: use **action=\"search\"** for lookups, **action=\"init\"**
* **embed**: Generate semantic/vector embeddings for an indexed project.
* *path* (string): Directory to embed (default: current).
* *embeddingBackend* (string): 'litellm'/'api' for remote API embeddings, 'fastembed'/'local' for local embeddings.
* *embeddingModel* (string): Embedding model/profile to use.
* *embeddingBackend* (string): 'litellm'/'api' for remote API embeddings, 'fastembed'/'local' for local embeddings. Default bulk preset: local-fast.
* *embeddingModel* (string): Embedding model/profile to use. Default bulk preset uses 'fast'.
* *apiMaxWorkers* (number): Max concurrent API embedding workers. Defaults to auto-sizing from the configured endpoint pool.
* *force* (boolean): Regenerate embeddings even if they already exist.
@@ -2693,11 +2732,11 @@ Recommended MCP flow: use **action=\"search\"** for lookups, **action=\"init\"**
},
embeddingBackend: {
type: 'string',
description: 'Embedding backend for action="embed": litellm/api (remote API) or fastembed/local (local GPU/CPU).',
description: 'Embedding backend for action="embed": litellm/api (remote API) or fastembed/local (local GPU/CPU). Default bulk preset: local-fast.',
},
embeddingModel: {
type: 'string',
description: 'Embedding model/profile for action="embed". Examples: "code", "fast", "qwen3-embedding-sf".',
description: 'Embedding model/profile for action="embed". Examples: "code", "fast", "qwen3-embedding-sf". Default bulk preset uses "fast".',
},
apiMaxWorkers: {
type: 'number',
@@ -3163,6 +3202,7 @@ export const __testables = {
parseCodexLensJsonOutput,
parsePlainTextFileMatches,
hasCentralizedVectorArtifacts,
resolveEmbeddingSelection,
};
export async function executeInitWithProgress(

View File

@@ -121,6 +121,66 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
assert.ok(toolResult.result.results.length >= 1);
});
it('defaults embed selection to local-fast for bulk indexing', () => {
if (!smartSearchModule) return;
const selection = smartSearchModule.__testables.resolveEmbeddingSelection(undefined, undefined, {
embedding_backend: 'litellm',
embedding_model: 'qwen3-embedding-sf',
});
assert.equal(selection.backend, 'fastembed');
assert.equal(selection.model, 'fast');
assert.equal(selection.preset, 'bulk-local-fast');
assert.match(selection.note, /local-fast/i);
});
it('keeps explicit api embedding selection when requested', () => {
if (!smartSearchModule) return;
const selection = smartSearchModule.__testables.resolveEmbeddingSelection('api', 'qwen3-embedding-sf', {
embedding_backend: 'fastembed',
embedding_model: 'fast',
});
assert.equal(selection.backend, 'litellm');
assert.equal(selection.model, 'qwen3-embedding-sf');
assert.equal(selection.preset, 'explicit');
});
it('parses warning-prefixed JSON and plain-text file lists for semantic fallback', () => {
if (!smartSearchModule) return;
const dir = createWorkspace();
const target = join(dir, 'target.ts');
writeFileSync(target, 'export const target = 1;\n');
const parsed = smartSearchModule.__testables.parseCodexLensJsonOutput([
'RuntimeWarning: compatibility shim',
JSON.stringify({ results: [{ file: 'target.ts', score: 0.25, excerpt: 'target' }] }),
].join('\n'));
assert.equal(Array.isArray(parsed.results), true);
assert.equal(parsed.results[0].file, 'target.ts');
const matches = smartSearchModule.__testables.parsePlainTextFileMatches(target, {
workingDirectory: dir,
searchPaths: ['.'],
});
assert.equal(matches.length, 1);
assert.match(String(matches[0].file).replace(/\\/g, '/'), /target\.ts$/);
});
it('detects centralized vector artifacts as full embedding coverage evidence', () => {
if (!smartSearchModule) return;
const dir = createWorkspace();
writeFileSync(join(dir, '_vectors.hnsw'), 'hnsw');
writeFileSync(join(dir, '_vectors_meta.db'), 'meta');
writeFileSync(join(dir, '_binary_vectors.mmap'), 'mmap');
assert.equal(smartSearchModule.__testables.hasCentralizedVectorArtifacts(dir), true);
});
it('surfaces backend failure details when fuzzy search fully fails', async () => {
if (!smartSearchModule) return;