-
+
+ {/* Section 1: Claude ↔ Codex 同步 */}
+
+
+
- {formatMessage({ id: 'mcp.crossCli.title' })}
+ {formatMessage({ id: 'mcp.sync.title' })}
-
- {formatMessage({ id: 'mcp.crossCli.selectServersHint' })}
-
- refetch()}
+
+ refetch()} />
+
+
+
+ {/* Section 2: 项目概览 */}
+
+
+
+
+ {formatMessage({ id: 'mcp.projects.title' })}
+
+
+
+ {formatMessage({ id: 'mcp.projects.description' })}
+
+ console.log('Open project:', path)}
+ onOpenNewWindow={(path) => window.open(`/?project=${encodeURIComponent(path)}`, '_blank')}
/>
-
+
- {/* All Projects Table */}
-
console.log('Open project:', path)}
- onOpenNewWindow={(path) => window.open(`/?project=${encodeURIComponent(path)}`, '_blank')}
- />
-
- {/* Other Projects Section */}
- {
- console.log('Imported server:', serverName, 'from:', sourceProject);
- refetch();
- }}
- />
+ {/* Section 3: 跨项目导入 */}
+
+
+
+
+ {formatMessage({ id: 'mcp.crossProject.title' })}
+
+
+
+ {formatMessage({ id: 'mcp.crossProject.description' })}
+
+ {
+ console.log('Imported server:', serverName, 'from:', sourceProject);
+ refetch();
+ }}
+ />
+
)}
diff --git a/ccw/src/.gitignore b/ccw/src/.gitignore
new file mode 100644
index 00000000..b4a7d405
--- /dev/null
+++ b/ccw/src/.gitignore
@@ -0,0 +1 @@
+.ace-tool/
diff --git a/ccw/src/core/a2ui/A2UIWebSocketHandler.ts b/ccw/src/core/a2ui/A2UIWebSocketHandler.ts
index 234c354f..7430af43 100644
--- a/ccw/src/core/a2ui/A2UIWebSocketHandler.ts
+++ b/ccw/src/core/a2ui/A2UIWebSocketHandler.ts
@@ -222,20 +222,30 @@ export class A2UIWebSocketHandler {
});
const req = http.request({
- hostname: 'localhost',
+ hostname: '127.0.0.1',
port: DASHBOARD_PORT,
path: '/api/hook',
method: 'POST',
+ timeout: 2000,
headers: {
'Content-Type': 'application/json',
'Content-Length': Buffer.byteLength(body),
},
});
+ // Fire-and-forget: don't keep the process alive due to an open socket
+ req.on('socket', (socket) => {
+ socket.unref();
+ });
+
req.on('error', (err) => {
console.error(`[A2UI] Failed to forward surface ${surfaceUpdate.surfaceId} to Dashboard:`, err.message);
});
+ req.on('timeout', () => {
+ req.destroy(new Error('Request timed out'));
+ });
+
req.write(body);
req.end();
diff --git a/ccw/src/core/auth/middleware.ts b/ccw/src/core/auth/middleware.ts
index ae2d266a..1a4132d2 100644
--- a/ccw/src/core/auth/middleware.ts
+++ b/ccw/src/core/auth/middleware.ts
@@ -90,6 +90,7 @@ const LOCALHOST_PUBLIC_PATHS = [
'/api/litellm-api/providers',
'/api/litellm-api/endpoints',
'/api/health',
+ '/api/a2ui/answer',
];
/**
diff --git a/ccw/src/tools/ask-question.ts b/ccw/src/tools/ask-question.ts
index 857c7ba8..6eccfd42 100644
--- a/ccw/src/tools/ask-question.ts
+++ b/ccw/src/tools/ask-question.ts
@@ -562,11 +562,17 @@ function startAnswerPolling(questionId: string, isComposite: boolean = false): v
return;
}
- const req = http.get({ hostname: '127.0.0.1', port: DASHBOARD_PORT, path: pollPath }, (res) => {
+ const req = http.get({ hostname: '127.0.0.1', port: DASHBOARD_PORT, path: pollPath, timeout: 2000 }, (res) => {
let data = '';
res.on('data', (chunk: Buffer) => { data += chunk.toString(); });
res.on('end', () => {
try {
+ if (res.statusCode && res.statusCode >= 400) {
+ console.error(`[A2UI-Poll] HTTP ${res.statusCode} from Dashboard (first 200 chars):`, data.slice(0, 200));
+ setTimeout(poll, POLL_INTERVAL_MS);
+ return;
+ }
+
const parsed = JSON.parse(data);
if (parsed.pending) {
// No answer yet, schedule next poll
@@ -599,6 +605,10 @@ function startAnswerPolling(questionId: string, isComposite: boolean = false): v
setTimeout(poll, POLL_INTERVAL_MS);
}
});
+
+ req.on('timeout', () => {
+ req.destroy(new Error('Request timed out'));
+ });
};
// Start first poll after a short delay to give the Dashboard time to receive the surface
diff --git a/ccw/src/tools/edit-file.ts b/ccw/src/tools/edit-file.ts
index 7e2319c4..a6e9485b 100644
--- a/ccw/src/tools/edit-file.ts
+++ b/ccw/src/tools/edit-file.ts
@@ -23,25 +23,64 @@ const EditItemSchema = z.object({
newText: z.string(),
});
-const ParamsSchema = z.object({
+// Base schema with common parameters
+const BaseParamsSchema = z.object({
path: z.string().min(1, 'Path is required'),
- mode: z.enum(['update', 'line']).default('update'),
dryRun: z.boolean().default(false),
- // Update mode params
+});
+
+// Update mode schema
+const UpdateModeSchema = BaseParamsSchema.extend({
+ mode: z.literal('update').default('update'),
oldText: z.string().optional(),
newText: z.string().optional(),
edits: z.array(EditItemSchema).optional(),
- replaceAll: z.boolean().optional(),
- // Line mode params
- operation: z.enum(['insert_before', 'insert_after', 'replace', 'delete']).optional(),
- line: z.number().optional(),
- end_line: z.number().optional(),
+ replaceAll: z.boolean().default(false),
+}).refine(
+ (data) => {
+ const hasSingle = data.oldText !== undefined;
+ const hasBatch = data.edits !== undefined;
+ // XOR: Only one of oldText/newText or edits should be provided
+ return hasSingle !== hasBatch || (!hasSingle && !hasBatch);
+ },
+ {
+ message: 'Use either oldText/newText or edits array, not both',
+ }
+);
+
+// Line mode schema
+const LineModeSchema = BaseParamsSchema.extend({
+ mode: z.literal('line'),
+ operation: z.enum(['insert_before', 'insert_after', 'replace', 'delete']),
+ line: z.number().int().positive('Line must be a positive integer'),
+ end_line: z.number().int().positive().optional(),
text: z.string().optional(),
-});
+}).refine(
+ (data) => {
+ // text is required for insert_before, insert_after, and replace operations
+ if (['insert_before', 'insert_after', 'replace'].includes(data.operation)) {
+ return data.text !== undefined;
+ }
+ return true;
+ },
+ {
+ message: 'Parameter "text" is required for insert_before, insert_after, and replace operations',
+ }
+);
+
+// Discriminated union schema
+const ParamsSchema = z.discriminatedUnion('mode', [
+ UpdateModeSchema,
+ LineModeSchema,
+]);
type Params = z.infer
;
type EditItem = z.infer;
+// Extract specific types for each mode
+type UpdateModeParams = z.infer;
+type LineModeParams = z.infer;
+
interface UpdateModeResult {
content: string;
modified: boolean;
@@ -229,7 +268,7 @@ function createUnifiedDiff(original: string, modified: string, filePath: string)
* Auto-adapts line endings (CRLF/LF)
* Supports multiple edits via 'edits' array
*/
-function executeUpdateMode(content: string, params: Params, filePath: string): UpdateModeResult {
+function executeUpdateMode(content: string, params: UpdateModeParams, filePath: string): UpdateModeResult {
const { oldText, newText, replaceAll, edits, dryRun = false } = params;
// Detect original line ending
@@ -334,11 +373,10 @@ function executeUpdateMode(content: string, params: Params, filePath: string): U
* Mode: line - Line-based operations
* Operations: insert_before, insert_after, replace, delete
*/
-function executeLineMode(content: string, params: Params): LineModeResult {
+function executeLineMode(content: string, params: LineModeParams): LineModeResult {
const { operation, line, text, end_line } = params;
- if (!operation) throw new Error('Parameter "operation" is required for line mode');
- if (line === undefined) throw new Error('Parameter "line" is required for line mode');
+ // No need for additional validation - Zod schema already ensures required fields
// Detect original line ending and normalize for processing
const hasCRLF = content.includes('\r\n');
@@ -418,15 +456,30 @@ export const schema: ToolSchema = {
name: 'edit_file',
description: `Edit file using two modes: "update" for text replacement (default) and "line" for line-based operations.
-Usage (update mode):
+**Update Mode** (default):
+- Use oldText/newText for single replacement OR edits for multiple replacements
+- Parameters: oldText, newText, replaceAll, dryRun
+- Cannot use line mode parameters (operation, line, end_line, text)
+- Validation: oldText/newText and edits are mutually exclusive
+
+**Line Mode**:
+- Use for precise line-based operations
+- Parameters: operation (insert_before/insert_after/replace/delete), line, end_line, text, dryRun
+- Cannot use update mode parameters (oldText, newText, edits, replaceAll)
+
+Usage (update mode - single replacement):
edit_file(path="f.js", oldText="old", newText="new")
+
+Usage (update mode - multiple replacements):
edit_file(path="f.js", edits=[{oldText:"a",newText:"b"},{oldText:"c",newText:"d"}])
Usage (line mode):
edit_file(path="f.js", mode="line", operation="insert_after", line=10, text="new line")
edit_file(path="f.js", mode="line", operation="delete", line=5, end_line=8)
-Options: dryRun=true (preview diff), replaceAll=true (update mode only)`,
+Options: dryRun=true (preview diff), replaceAll=true (update mode only)
+
+**Important**: Each mode only accepts its own parameters. Providing parameters from both modes will cause a validation error.`,
inputSchema: {
type: 'object',
properties: {
@@ -448,7 +501,7 @@ Options: dryRun=true (preview diff), replaceAll=true (update mode only)`,
// Update mode params
oldText: {
type: 'string',
- description: '[update mode] Text to find and replace (use oldText/newText OR edits array)',
+ description: '[update mode] Text to find and replace. **Mutually exclusive with edits parameter** - use either oldText/newText or edits, not both.',
},
newText: {
type: 'string',
@@ -456,7 +509,7 @@ Options: dryRun=true (preview diff), replaceAll=true (update mode only)`,
},
edits: {
type: 'array',
- description: '[update mode] Array of {oldText, newText} for multiple replacements',
+ description: '[update mode] Array of {oldText, newText} for multiple replacements. **Mutually exclusive with oldText/newText** - use either oldText/newText or edits, not both.',
items: {
type: 'object',
properties: {
@@ -474,19 +527,19 @@ Options: dryRun=true (preview diff), replaceAll=true (update mode only)`,
operation: {
type: 'string',
enum: ['insert_before', 'insert_after', 'replace', 'delete'],
- description: '[line mode] Line operation type',
+ description: '[line mode] Line operation type. **Only valid in line mode** - cannot be combined with update mode parameters.',
},
line: {
type: 'number',
- description: '[line mode] Line number (1-based)',
+ description: '[line mode] Line number (1-based). **Only valid in line mode** - cannot be combined with update mode parameters.',
},
end_line: {
type: 'number',
- description: '[line mode] End line for range operations',
+ description: '[line mode] End line for range operations. **Only valid in line mode** - cannot be combined with update mode parameters.',
},
text: {
type: 'string',
- description: '[line mode] Text for insert/replace operations',
+ description: '[line mode] Text for insert/replace operations. **Only valid in line mode** - cannot be combined with update mode parameters.',
},
},
required: ['path'],
@@ -522,21 +575,18 @@ export async function handler(params: Record): Promise {
+ // Validate: offset/limit only allowed for single file mode
+ const hasPagination = data.offset !== undefined || data.limit !== undefined;
+ const isMultiple = Array.isArray(data.paths) && data.paths.length > 1;
+ return !(hasPagination && isMultiple);
+}, {
+ message: 'offset/limit parameters are only supported for single file mode. Cannot use with multiple paths.',
+ path: ['offset', 'limit', 'paths'],
});
type Params = z.infer;
@@ -267,12 +275,12 @@ Returns compact file list with optional content. Use offset/limit for large file
},
offset: {
type: 'number',
- description: 'Line offset to start reading from (0-based, for single file only)',
+ description: 'Line offset to start reading from (0-based). **Only for single file mode** - validation error if used with multiple paths.',
minimum: 0,
},
limit: {
type: 'number',
- description: 'Number of lines to read (for single file only)',
+ description: 'Number of lines to read. **Only for single file mode** - validation error if used with multiple paths.',
minimum: 1,
},
},
diff --git a/ccw/tests/e2e/ask-question-answer-broker.e2e.test.ts b/ccw/tests/e2e/ask-question-answer-broker.e2e.test.ts
new file mode 100644
index 00000000..49a282e3
--- /dev/null
+++ b/ccw/tests/e2e/ask-question-answer-broker.e2e.test.ts
@@ -0,0 +1,271 @@
+/**
+ * E2E: ask_question Answer Broker
+ *
+ * Verifies that when the MCP server runs as a separate stdio process (no local WS clients),
+ * `ask_question` forwards the surface to the Dashboard via /api/hook and later retrieves
+ * the user's answer via /api/a2ui/answer polling.
+ */
+import { after, before, describe, it, mock } from 'node:test';
+import assert from 'node:assert/strict';
+import http from 'node:http';
+import { spawn, type ChildProcess } from 'node:child_process';
+import { mkdtempSync, rmSync } from 'node:fs';
+import { tmpdir } from 'node:os';
+import { dirname, join } from 'node:path';
+import { fileURLToPath } from 'node:url';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+
+const serverUrl = new URL('../../dist/core/server.js', import.meta.url);
+serverUrl.searchParams.set('t', String(Date.now()));
+
+interface JsonRpcRequest {
+ jsonrpc: string;
+ id: number;
+ method: string;
+ params: any;
+}
+
+interface JsonRpcResponse {
+ jsonrpc: string;
+ id: number;
+ result?: any;
+ error?: { code: number; message: string; data?: any };
+}
+
+class McpClient {
+ private serverProcess!: ChildProcess;
+ private requestId = 0;
+ private pendingRequests = new Map void; reject: (e: Error) => void }>();
+
+ private env: Record;
+
+ constructor(env: Record) {
+ this.env = env;
+ }
+
+ async start(): Promise {
+ const serverPath = join(__dirname, '../../bin/ccw-mcp.js');
+ this.serverProcess = spawn('node', [serverPath], {
+ stdio: ['pipe', 'pipe', 'pipe'],
+ env: { ...process.env, ...this.env },
+ });
+
+ await new Promise((resolve, reject) => {
+ const timeout = setTimeout(() => reject(new Error('MCP server start timeout')), 15000);
+ this.serverProcess.stderr!.on('data', (data) => {
+ const message = data.toString();
+ if (message.includes('started') || message.includes('ccw-tools')) {
+ clearTimeout(timeout);
+ resolve();
+ }
+ });
+ this.serverProcess.on('error', (err) => {
+ clearTimeout(timeout);
+ reject(err);
+ });
+ });
+
+ this.serverProcess.stdout!.on('data', (data) => {
+ try {
+ const lines = data.toString().split('\n').filter((l: string) => l.trim());
+ for (const line of lines) {
+ const response: JsonRpcResponse = JSON.parse(line);
+ const pending = this.pendingRequests.get(response.id);
+ if (pending) {
+ this.pendingRequests.delete(response.id);
+ pending.resolve(response);
+ }
+ }
+ } catch {
+ // ignore parse errors
+ }
+ });
+ }
+
+ async call(method: string, params: any = {}, timeoutMs = 10000): Promise {
+ const id = ++this.requestId;
+ const request: JsonRpcRequest = { jsonrpc: '2.0', id, method, params };
+
+ return new Promise((resolve, reject) => {
+ const timeout = setTimeout(() => {
+ this.pendingRequests.delete(id);
+ reject(new Error(`Request timeout for ${method}`));
+ }, timeoutMs);
+
+ this.pendingRequests.set(id, {
+ resolve: (response) => {
+ clearTimeout(timeout);
+ resolve(response);
+ },
+ reject: (error) => {
+ clearTimeout(timeout);
+ reject(error);
+ },
+ });
+
+ this.serverProcess.stdin!.write(JSON.stringify(request) + '\n');
+ });
+ }
+
+ stop(): void {
+ this.serverProcess?.kill();
+ }
+}
+
+function waitForWebSocketOpen(ws: WebSocket, timeoutMs = 10000): Promise {
+ return new Promise((resolve, reject) => {
+ const t = setTimeout(() => reject(new Error('WebSocket open timeout')), timeoutMs);
+ ws.addEventListener('open', () => {
+ clearTimeout(t);
+ resolve();
+ });
+ ws.addEventListener('error', () => {
+ clearTimeout(t);
+ reject(new Error('WebSocket error'));
+ });
+ });
+}
+
+function waitForA2UISurface(ws: WebSocket, timeoutMs = 15000): Promise {
+ return new Promise((resolve, reject) => {
+ const t = setTimeout(() => reject(new Error('Timed out waiting for a2ui-surface')), timeoutMs);
+ const handler = (event: MessageEvent) => {
+ try {
+ const data = JSON.parse(String(event.data));
+ if (data?.type === 'a2ui-surface' && data?.payload?.initialState?.questionId) {
+ clearTimeout(t);
+ ws.removeEventListener('message', handler);
+ resolve(data);
+ }
+ } catch {
+ // ignore
+ }
+ };
+ ws.addEventListener('message', handler);
+ });
+}
+
+function httpRequest(options: http.RequestOptions, body?: string, timeout = 10000): Promise<{ status: number; body: string }> {
+ return new Promise((resolve, reject) => {
+ const req = http.request(options, (res) => {
+ let data = '';
+ res.on('data', (chunk) => (data += chunk));
+ res.on('end', () => resolve({ status: res.statusCode || 0, body: data }));
+ });
+ req.on('error', reject);
+ req.setTimeout(timeout, () => {
+ req.destroy();
+ reject(new Error('Request timeout'));
+ });
+ if (body) req.write(body);
+ req.end();
+ });
+}
+
+describe('E2E: ask_question Answer Broker', async () => {
+ let server: http.Server;
+ let port: number;
+ let projectRoot: string;
+ const originalCwd = process.cwd();
+ let mcp: McpClient;
+ let ws: WebSocket;
+
+ before(async () => {
+ process.env.CCW_DISABLE_WARMUP = '1';
+
+ projectRoot = mkdtempSync(join(tmpdir(), 'ccw-e2e-askq-'));
+ process.chdir(projectRoot);
+
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const serverMod: any = await import(serverUrl.href);
+ mock.method(console, 'log', () => {});
+ mock.method(console, 'error', () => {});
+
+ server = await serverMod.startServer({ initialPath: projectRoot, port: 0 });
+ const addr = server.address();
+ port = typeof addr === 'object' && addr ? addr.port : 0;
+ assert.ok(port > 0, 'Server should start on a valid port');
+
+ ws = new WebSocket(`ws://127.0.0.1:${port}/ws`);
+ await waitForWebSocketOpen(ws);
+
+ mcp = new McpClient({
+ CCW_PROJECT_ROOT: projectRoot,
+ CCW_ENABLED_TOOLS: 'all',
+ CCW_PORT: String(port),
+ CCW_DISABLE_WARMUP: '1',
+ });
+ await mcp.start();
+
+ // Sanity: broker endpoint should be reachable without auth from localhost
+ const broker = await httpRequest({ hostname: '127.0.0.1', port, path: '/api/a2ui/answer?questionId=nonexistent', method: 'GET' });
+ assert.equal(broker.status, 200);
+ });
+
+ after(async () => {
+ try {
+ ws?.close();
+ } catch {}
+ mcp?.stop();
+
+ await new Promise((resolve) => {
+ server.close(() => resolve());
+ });
+ process.chdir(originalCwd);
+ rmSync(projectRoot, { recursive: true, force: true });
+ mock.restoreAll();
+ });
+
+ it('returns the answered value via MCP tool call', async () => {
+ const questionId = `e2e-q-${Date.now()}`;
+
+ const toolCallPromise = mcp.call(
+ 'tools/call',
+ {
+ name: 'ask_question',
+ arguments: {
+ question: {
+ id: questionId,
+ type: 'confirm',
+ title: 'E2E Confirm',
+ message: 'Confirm this in the test harness',
+ },
+ timeout: 15000,
+ },
+ },
+ 30000,
+ );
+
+ const surfaceMsg = await waitForA2UISurface(ws, 15000);
+ const surfaceId = surfaceMsg.payload.surfaceId as string;
+ const receivedQuestionId = surfaceMsg.payload.initialState.questionId as string;
+ assert.equal(receivedQuestionId, questionId);
+
+ ws.send(
+ JSON.stringify({
+ type: 'a2ui-action',
+ actionId: 'confirm',
+ surfaceId,
+ parameters: { questionId },
+ timestamp: new Date().toISOString(),
+ }),
+ );
+
+ const response = await toolCallPromise;
+ assert.equal(response.jsonrpc, '2.0');
+ assert.ok(response.result);
+ assert.ok(Array.isArray(response.result.content));
+
+ const text = response.result.content[0]?.text as string;
+ const parsed = JSON.parse(text);
+ const resultObj = parsed.result ?? parsed;
+
+ assert.equal(resultObj.success, true);
+ assert.equal(resultObj.cancelled, false);
+ assert.ok(Array.isArray(resultObj.answers));
+ assert.equal(resultObj.answers[0].questionId, questionId);
+ assert.equal(resultObj.answers[0].value, true);
+ });
+});
diff --git a/ccw/tests/hook-quoting-fix.test.js b/ccw/tests/hook-quoting-fix.test.js
index e9a1b819..8836f463 100644
--- a/ccw/tests/hook-quoting-fix.test.js
+++ b/ccw/tests/hook-quoting-fix.test.js
@@ -5,7 +5,8 @@
* Tests that bash -c commands use single quotes to avoid jq escaping issues
*/
-import { describe, it, expect } from 'vitest';
+import { describe, it } from 'node:test';
+import assert from 'node:assert/strict';
// Import the convertToClaudeCodeFormat function logic
// Since it's in a browser JS file, we'll recreate it here for testing
@@ -58,9 +59,9 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const result = convertToClaudeCodeFormat(hookData);
- expect(result.hooks[0].command).toMatch(/^bash -c '/);
- expect(result.hooks[0].command).toMatch(/'$/);
- expect(result.hooks[0].command).not.toMatch(/^bash -c "/);
+ assert.match(result.hooks[0].command, /^bash -c '/);
+ assert.match(result.hooks[0].command, /'$/);
+ assert.doesNotMatch(result.hooks[0].command, /^bash -c "/);
});
it('should preserve jq command double quotes without excessive escaping', () => {
@@ -73,9 +74,9 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const cmd = result.hooks[0].command;
// The jq pattern should remain readable
- expect(cmd).toContain('jq -r ".tool_input.command // empty"');
+ assert.ok(cmd.includes('jq -r ".tool_input.command // empty"'));
// Should not have excessive escaping like \\\"
- expect(cmd).not.toContain('\\\\\\"');
+ assert.ok(!cmd.includes('\\\\\\"'));
});
it('should correctly escape single quotes in script using \'\\\'\'', () => {
@@ -88,8 +89,8 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const cmd = result.hooks[0].command;
// Single quotes should be escaped as '\''
- expect(cmd).toContain("'\\''");
- expect(cmd).toBe("bash -c 'echo '\\''hello world'\\'''");
+ assert.ok(cmd.includes("'\\''"));
+ assert.equal(cmd, "bash -c 'echo '\\''hello world'\\'''");
});
it('should handle danger-bash-confirm hook template correctly', () => {
@@ -102,11 +103,11 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const cmd = result.hooks[0].command;
// Should use single quotes
- expect(cmd).toMatch(/^bash -c '/);
+ assert.match(cmd, /^bash -c '/);
// jq pattern should be intact
- expect(cmd).toContain('jq -r ".tool_input.command // empty"');
+ assert.ok(cmd.includes('jq -r ".tool_input.command // empty"'));
// JSON output should have escaped double quotes (in shell)
- expect(cmd).toContain('{\\"hookSpecificOutput\\"');
+ assert.ok(cmd.includes('{\\"hookSpecificOutput\\"'));
});
it('should handle non-bash commands with original logic', () => {
@@ -117,7 +118,7 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const result = convertToClaudeCodeFormat(hookData);
- expect(result.hooks[0].command).toBe('ccw memory track --type file --action read');
+ assert.equal(result.hooks[0].command, 'ccw memory track --type file --action read');
});
it('should handle bash commands without -c flag with original logic', () => {
@@ -128,7 +129,7 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const result = convertToClaudeCodeFormat(hookData);
- expect(result.hooks[0].command).toBe('bash script.sh --arg value');
+ assert.equal(result.hooks[0].command, 'bash script.sh --arg value');
});
it('should handle args with spaces in non-bash commands', () => {
@@ -139,7 +140,7 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const result = convertToClaudeCodeFormat(hookData);
- expect(result.hooks[0].command).toBe('echo "hello world" "another arg"');
+ assert.equal(result.hooks[0].command, 'echo "hello world" "another arg"');
});
it('should handle already formatted hook data', () => {
@@ -152,7 +153,7 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const result = convertToClaudeCodeFormat(hookData);
- expect(result).toBe(hookData);
+ assert.equal(result, hookData);
});
it('should handle additional args after bash -c script', () => {
@@ -164,8 +165,8 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const result = convertToClaudeCodeFormat(hookData);
const cmd = result.hooks[0].command;
- expect(cmd).toMatch(/^bash -c 'echo \$1'/);
- expect(cmd).toContain('"hello world"');
+ assert.match(cmd, /^bash -c 'echo \$1'/);
+ assert.ok(cmd.includes('"hello world"'));
});
});
@@ -195,11 +196,11 @@ describe('Hook Quoting Fix (Issue #73)', () => {
const cmd = result.hooks[0].command;
// All bash -c commands should use single quotes
- expect(cmd).toMatch(/^bash -c '/);
- expect(cmd).toMatch(/'$/);
+ assert.match(cmd, /^bash -c '/);
+ assert.match(cmd, /'$/);
// jq patterns should be intact
- expect(cmd).toContain('jq -r ".');
+ assert.ok(cmd.includes('jq -r ".'));
});
}
});
diff --git a/ccw/tests/smart-search-enrich.test.js b/ccw/tests/smart-search-enrich.test.js
index c426883c..2950c17d 100644
--- a/ccw/tests/smart-search-enrich.test.js
+++ b/ccw/tests/smart-search-enrich.test.js
@@ -206,9 +206,8 @@ describe('Smart Search Tool Definition', async () => {
const modeEnum = params.properties.mode?.enum;
assert.ok(modeEnum, 'Should have mode enum');
- assert.ok(modeEnum.includes('auto'), 'Should support auto mode');
- assert.ok(modeEnum.includes('hybrid'), 'Should support hybrid mode');
- assert.ok(modeEnum.includes('exact'), 'Should support exact mode');
+ assert.ok(modeEnum.includes('fuzzy'), 'Should support fuzzy mode');
+ assert.ok(modeEnum.includes('semantic'), 'Should support semantic mode');
});
});
diff --git a/codex-lens/src/codexlens/config.py b/codex-lens/src/codexlens/config.py
index c46deb04..d333b308 100644
--- a/codex-lens/src/codexlens/config.py
+++ b/codex-lens/src/codexlens/config.py
@@ -146,9 +146,12 @@ class Config:
staged_coarse_k: int = 200 # Number of coarse candidates from Stage 1 binary search
staged_lsp_depth: int = 2 # LSP relationship expansion depth in Stage 2
staged_stage2_mode: str = "precomputed" # "precomputed" (graph_neighbors) | "realtime" (LSP)
- staged_realtime_lsp_timeout_s: float = 10.0 # Max time budget for realtime LSP expansion
- staged_realtime_lsp_max_nodes: int = 100 # Node cap for realtime graph expansion
- staged_realtime_lsp_warmup_s: float = 2.0 # Wait for server analysis after opening seed docs
+ staged_realtime_lsp_timeout_s: float = 30.0 # Max time budget for realtime LSP expansion
+ staged_realtime_lsp_depth: int = 1 # BFS depth for realtime LSP expansion
+ staged_realtime_lsp_max_nodes: int = 50 # Node cap for realtime graph expansion
+ staged_realtime_lsp_max_seeds: int = 1 # Seed cap for realtime graph expansion
+ staged_realtime_lsp_max_concurrent: int = 2 # Max concurrent LSP requests during graph expansion
+ staged_realtime_lsp_warmup_s: float = 3.0 # Wait for server analysis after opening seed docs
staged_realtime_lsp_resolve_symbols: bool = False # If True, resolves symbol names via documentSymbol (slower)
staged_clustering_strategy: str = "auto" # "auto", "hdbscan", "dbscan", "frequency", "noop"
staged_clustering_min_size: int = 3 # Minimum cluster size for Stage 3 grouping
diff --git a/codex-lens/src/codexlens/lsp/lsp_bridge.py b/codex-lens/src/codexlens/lsp/lsp_bridge.py
index d3e2523b..0e41cc24 100644
--- a/codex-lens/src/codexlens/lsp/lsp_bridge.py
+++ b/codex-lens/src/codexlens/lsp/lsp_bridge.py
@@ -14,6 +14,7 @@ Features:
from __future__ import annotations
import asyncio
+import logging
import os
import time
from collections import OrderedDict
@@ -22,6 +23,8 @@ from pathlib import Path
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from urllib.parse import unquote
+logger = logging.getLogger(__name__)
+
if TYPE_CHECKING:
from codexlens.lsp.standalone_manager import StandaloneLspManager
@@ -362,6 +365,14 @@ class LspBridge:
except (KeyError, TypeError):
continue
+ logger.debug(
+ "LSP references for %s (%s:%s:%s): %d",
+ symbol.id,
+ symbol.file_path,
+ symbol.range.start_line,
+ symbol.range.start_character,
+ len(locations),
+ )
self._cache(cache_key, symbol.file_path, locations)
return locations
@@ -542,6 +553,14 @@ class LspBridge:
detail="Inferred from reference",
))
+ logger.debug(
+ "LSP call hierarchy for %s (%s:%s:%s): %d",
+ symbol.id,
+ symbol.file_path,
+ symbol.range.start_line,
+ symbol.range.start_character,
+ len(items),
+ )
self._cache(cache_key, symbol.file_path, items)
return items
diff --git a/codex-lens/src/codexlens/search/chain_search.py b/codex-lens/src/codexlens/search/chain_search.py
index d609ed26..50200deb 100644
--- a/codex-lens/src/codexlens/search/chain_search.py
+++ b/codex-lens/src/codexlens/search/chain_search.py
@@ -854,7 +854,7 @@ class ChainSearchEngine:
# ========== Stage 2: LSP Graph Expansion ==========
stage2_start = time.time()
- expanded_results = self._stage2_lsp_expand(coarse_results, index_root)
+ expanded_results = self._stage2_lsp_expand(coarse_results, index_root, query=query)
stage_times["stage2_expand_ms"] = (time.time() - stage2_start) * 1000
stage_counts["stage2_expanded"] = len(expanded_results)
@@ -969,8 +969,9 @@ class ChainSearchEngine:
# Try centralized BinarySearcher first (preferred for mmap indexes)
index_root = index_paths[0].parent if index_paths else None
- coarse_candidates: List[Tuple[int, int, Path]] = [] # (chunk_id, distance, index_path)
+ coarse_candidates: List[Tuple[int, float, Path]] = [] # (chunk_id, distance, index_path)
used_centralized = False
+ using_dense_fallback = False
if index_root:
binary_searcher = self._get_centralized_binary_searcher(index_root)
@@ -992,30 +993,78 @@ class ChainSearchEngine:
self.logger.debug(f"Centralized binary search failed: {exc}")
if not used_centralized:
- # Fallback to per-directory binary indexes
- use_gpu = True
- if self._config is not None:
- use_gpu = getattr(self._config, "embedding_use_gpu", True)
+ # Fallback to per-directory binary indexes (legacy BinaryANNIndex).
+ #
+ # Generating the query binary embedding can be expensive (depending on embedding backend).
+ # If no legacy binary vector files exist, skip this path and fall back to dense ANN search.
+ has_legacy_binary_vectors = any(
+ (p.parent / f"{p.stem}_binary_vectors.bin").exists() for p in index_paths
+ )
+ if not has_legacy_binary_vectors:
+ self.logger.debug(
+ "No legacy binary vector files found; skipping legacy binary search fallback"
+ )
+ else:
+ use_gpu = True
+ if self._config is not None:
+ use_gpu = getattr(self._config, "embedding_use_gpu", True)
- try:
- binary_backend = BinaryEmbeddingBackend(use_gpu=use_gpu)
- query_binary = binary_backend.embed_packed([query])[0]
- except Exception as exc:
- self.logger.warning(f"Failed to generate binary query embedding: {exc}")
- return [], index_root
-
- for index_path in index_paths:
+ query_binary = None
try:
- binary_index = self._get_or_create_binary_index(index_path)
- if binary_index is None or binary_index.count() == 0:
- continue
- ids, distances = binary_index.search(query_binary, coarse_k)
- for chunk_id, dist in zip(ids, distances):
- coarse_candidates.append((chunk_id, dist, index_path))
+ binary_backend = BinaryEmbeddingBackend(use_gpu=use_gpu)
+ query_binary = binary_backend.embed_packed([query])[0]
except Exception as exc:
- self.logger.debug(
- "Binary search failed for %s: %s", index_path, exc
- )
+ self.logger.warning(f"Failed to generate binary query embedding: {exc}")
+ query_binary = None
+
+ if query_binary is not None:
+ for index_path in index_paths:
+ try:
+ binary_index = self._get_or_create_binary_index(index_path)
+ if binary_index is None or binary_index.count() == 0:
+ continue
+ ids, distances = binary_index.search(query_binary, coarse_k)
+ for chunk_id, dist in zip(ids, distances):
+ coarse_candidates.append((chunk_id, float(dist), index_path))
+ except Exception as exc:
+ self.logger.debug(
+ "Binary search failed for %s: %s", index_path, exc
+ )
+
+ if not coarse_candidates:
+ # Final fallback: dense ANN coarse search (HNSW) over existing dense vector indexes.
+ #
+ # This allows the staged pipeline (LSP expansion + clustering) to run even when
+ # binary vectors are not generated for the current project.
+ dense_candidates: List[Tuple[int, float, Path]] = []
+ try:
+ from codexlens.semantic.ann_index import ANNIndex
+ from codexlens.semantic.embedder import Embedder
+
+ embedder = Embedder()
+ query_dense = embedder.embed_to_numpy([query])[0]
+ dim = int(getattr(query_dense, "shape", (len(query_dense),))[0])
+
+ for index_path in index_paths:
+ try:
+ ann_index = ANNIndex(index_path, dim=dim)
+ if not ann_index.load() or ann_index.count() == 0:
+ continue
+ ids, distances = ann_index.search(query_dense, top_k=coarse_k)
+ for chunk_id, dist in zip(ids, distances):
+ dense_candidates.append((chunk_id, float(dist), index_path))
+ except Exception as exc:
+ self.logger.debug(
+ "Dense coarse search failed for %s: %s", index_path, exc
+ )
+ except Exception as exc:
+ self.logger.debug("Dense coarse search fallback unavailable: %s", exc)
+ dense_candidates = []
+
+ if dense_candidates:
+ dense_candidates.sort(key=lambda x: x[1])
+ coarse_candidates = dense_candidates[:coarse_k]
+ using_dense_fallback = True
if not coarse_candidates:
return [], index_root
@@ -1086,7 +1135,11 @@ class ChainSearchEngine:
(d for cid, d, _ in coarse_candidates if cid == chunk_id),
256
)
- score = 1.0 - (distance / 256.0)
+ if using_dense_fallback:
+ # Cosine distance in [0, 2] -> clamp to [0, 1] score
+ score = max(0.0, 1.0 - float(distance))
+ else:
+ score = 1.0 - (int(distance) / 256.0)
content = chunk.get("content", "")
@@ -1129,6 +1182,7 @@ class ChainSearchEngine:
self,
coarse_results: List[SearchResult],
index_root: Optional[Path],
+ query: Optional[str] = None,
) -> List[SearchResult]:
"""Stage 2: LSP/graph expansion for staged cascade.
@@ -1152,7 +1206,11 @@ class ChainSearchEngine:
mode = (getattr(self._config, "staged_stage2_mode", "precomputed") or "precomputed").strip().lower()
if mode in {"realtime", "live"}:
- return self._stage2_realtime_lsp_expand(coarse_results, index_root=index_root)
+ return self._stage2_realtime_lsp_expand(
+ coarse_results,
+ index_root=index_root,
+ query=query,
+ )
return self._stage2_precomputed_graph_expand(coarse_results, index_root=index_root)
@@ -1209,6 +1267,7 @@ class ChainSearchEngine:
coarse_results: List[SearchResult],
*,
index_root: Path,
+ query: Optional[str] = None,
) -> List[SearchResult]:
"""Stage 2 (realtime): compute expansion graph via live LSP servers."""
import asyncio
@@ -1217,16 +1276,27 @@ class ChainSearchEngine:
from codexlens.hybrid_search.data_structures import CodeSymbolNode, Range
from codexlens.lsp import LspBridge, LspGraphBuilder
- max_depth = 2
- timeout_s = 10.0
- max_nodes = 100
- warmup_s = 2.0
+ max_depth = 1
+ timeout_s = 30.0
+ max_nodes = 50
+ max_seeds = 1
+ max_concurrent = 2
+ warmup_s = 3.0
resolve_symbols = False
if self._config is not None:
- max_depth = int(getattr(self._config, "staged_lsp_depth", 2) or 2)
- timeout_s = float(getattr(self._config, "staged_realtime_lsp_timeout_s", 10.0) or 10.0)
- max_nodes = int(getattr(self._config, "staged_realtime_lsp_max_nodes", 100) or 100)
- warmup_s = float(getattr(self._config, "staged_realtime_lsp_warmup_s", 2.0) or 0.0)
+ max_depth = int(
+ getattr(
+ self._config,
+ "staged_realtime_lsp_depth",
+ getattr(self._config, "staged_lsp_depth", 1),
+ )
+ or 1
+ )
+ timeout_s = float(getattr(self._config, "staged_realtime_lsp_timeout_s", 30.0) or 30.0)
+ max_nodes = int(getattr(self._config, "staged_realtime_lsp_max_nodes", 50) or 50)
+ warmup_s = float(getattr(self._config, "staged_realtime_lsp_warmup_s", 3.0) or 0.0)
+ max_seeds = int(getattr(self._config, "staged_realtime_lsp_max_seeds", 1) or 1)
+ max_concurrent = int(getattr(self._config, "staged_realtime_lsp_max_concurrent", 2) or 2)
resolve_symbols = bool(getattr(self._config, "staged_realtime_lsp_resolve_symbols", False))
try:
@@ -1234,13 +1304,189 @@ class ChainSearchEngine:
except Exception:
source_root = Path(coarse_results[0].path).resolve().parent
- workspace_root = self._find_lsp_workspace_root(source_root)
+ lsp_config_file = self._find_lsp_config_file(source_root)
+ workspace_root = Path(source_root).resolve()
- max_expand = min(10, len(coarse_results))
+ max_expand = min(max(1, max_seeds), len(coarse_results))
seed_nodes: List[CodeSymbolNode] = []
seed_ids: set[str] = set()
- for seed in list(coarse_results)[:max_expand]:
+ selected_results = list(coarse_results)
+ if query:
+ import re
+
+ terms = {
+ t.lower()
+ for t in re.findall(r"[A-Za-z_][A-Za-z0-9_]*", query)
+ if t
+ }
+
+ def _priority(result: SearchResult) -> float:
+ sym = (result.symbol_name or "").strip().lower()
+ stem = Path(result.path).stem.lower() if result.path else ""
+ score = 0.0
+ if sym and sym in terms:
+ score += 5.0
+ if sym:
+ score += 2.0
+ if stem and stem in terms:
+ score += 1.0
+ if result.symbol_kind:
+ score += 0.5
+ if result.start_line:
+ score += 0.2
+ return score
+
+ indexed = list(enumerate(selected_results))
+ indexed.sort(
+ key=lambda pair: (
+ _priority(pair[1]),
+ float(pair[1].score),
+ -pair[0],
+ ),
+ reverse=True,
+ )
+ selected_results = [r for _, r in indexed]
+ else:
+ indexed = list(enumerate(selected_results))
+ indexed.sort(
+ key=lambda pair: (
+ 1.0 if pair[1].symbol_name else 0.0,
+ float(pair[1].score),
+ -pair[0],
+ ),
+ reverse=True,
+ )
+ selected_results = [r for _, r in indexed]
+
+ # Prefer symbol-definition seeds when possible (improves LSP reference/call-hierarchy results).
+ #
+ # NOTE: We avoid relying purely on the stored symbol index here because its ranges may be
+ # imprecise in some projects. Instead, we attempt a lightweight definition-line detection
+ # for query identifiers within the top coarse candidate files.
+ if query:
+ try:
+ import re
+
+ terms_raw = [
+ t for t in re.findall(r"[A-Za-z_][A-Za-z0-9_]*", query) if t
+ ]
+ stopwords = {
+ "class", "def", "function", "method", "import", "from", "return",
+ "async", "await", "public", "private", "protected", "static",
+ "const", "let", "var", "new",
+ }
+ candidate_terms = [
+ t for t in terms_raw
+ if t.lower() not in stopwords and len(t) >= 3
+ ]
+
+ candidate_terms.sort(key=len, reverse=True)
+
+ # Candidate files (best-first): de-dupe while preserving ordering.
+ candidate_files: List[str] = []
+ seen_files: set[str] = set()
+ for r in selected_results:
+ if r.path and r.path not in seen_files:
+ seen_files.add(r.path)
+ candidate_files.append(r.path)
+ if len(candidate_files) >= 50:
+ break
+
+ # Also consider files whose *names* match query identifiers (helps when coarse retrieval
+ # misses the defining file for a symbol like `Config`).
+ try:
+ if source_root and candidate_terms:
+ allow_suffix = {".py", ".ts", ".tsx", ".js", ".jsx"}
+ name_terms = [t.lower() for t in candidate_terms[:3]]
+ for dirpath, _, filenames in os.walk(source_root):
+ for filename in filenames:
+ suffix = Path(filename).suffix.lower()
+ if suffix not in allow_suffix:
+ continue
+ lowered = filename.lower()
+ if any(t in lowered for t in name_terms):
+ fp = str(Path(dirpath) / filename)
+ if fp not in seen_files:
+ seen_files.add(fp)
+ candidate_files.append(fp)
+ if len(candidate_files) >= 120:
+ break
+ except Exception:
+ pass
+
+ for term in candidate_terms[:5]:
+ if len(seed_nodes) >= max_expand:
+ break
+
+ escaped = re.escape(term)
+ py_class = re.compile(rf"^\s*class\s+{escaped}\b")
+ py_def = re.compile(rf"^\s*(?:async\s+)?def\s+{escaped}\b")
+ ts_class = re.compile(rf"^\s*(?:export\s+)?class\s+{escaped}\b")
+ ts_func = re.compile(rf"^\s*(?:export\s+)?(?:async\s+)?function\s+{escaped}\b")
+
+ for file_path in candidate_files:
+ if len(seed_nodes) >= max_expand:
+ break
+ suffix = Path(file_path).suffix.lower()
+ if suffix not in {".py", ".ts", ".tsx", ".js", ".jsx"}:
+ continue
+
+ try:
+ lines = Path(file_path).read_text(encoding="utf-8", errors="ignore").splitlines()
+ except Exception:
+ continue
+
+ for i, line in enumerate(lines):
+ kind = None
+ if suffix == ".py":
+ if py_class.search(line):
+ kind = "class"
+ elif py_def.search(line):
+ kind = "function"
+ else:
+ if ts_class.search(line):
+ kind = "class"
+ elif ts_func.search(line):
+ kind = "function"
+
+ if not kind:
+ continue
+
+ start_line = i + 1
+ idx = line.find(term)
+ if idx >= 0:
+ start_character = idx + 1
+ else:
+ stripped = line.lstrip()
+ start_character = (len(line) - len(stripped)) + 1 if stripped else 1
+
+ node_id = f"{file_path}:{term}:{start_line}"
+ if node_id in seed_ids:
+ break
+
+ seed_ids.add(node_id)
+ seed_nodes.append(
+ CodeSymbolNode(
+ id=node_id,
+ name=term,
+ kind=kind,
+ file_path=file_path,
+ range=Range(
+ start_line=start_line,
+ start_character=start_character,
+ end_line=start_line,
+ end_character=start_character,
+ ),
+ )
+ )
+ break
+ except Exception:
+ pass
+
+ for seed in selected_results:
+ if len(seed_nodes) >= max_expand:
+ break
if not seed.path:
continue
name = seed.symbol_name or Path(seed.path).stem
@@ -1249,14 +1495,21 @@ class ChainSearchEngine:
end_line = int(seed.end_line or start_line)
start_character = 1
try:
- if seed.symbol_name and start_line >= 1:
+ if start_line >= 1:
line_text = Path(seed.path).read_text(encoding="utf-8", errors="ignore").splitlines()[start_line - 1]
- idx = line_text.find(seed.symbol_name)
- if idx >= 0:
- start_character = idx + 1 # 1-based for StandaloneLspManager
+ if seed.symbol_name:
+ idx = line_text.find(seed.symbol_name)
+ if idx >= 0:
+ start_character = idx + 1 # 1-based for StandaloneLspManager
+ else:
+ stripped = line_text.lstrip()
+ if stripped:
+ start_character = (len(line_text) - len(stripped)) + 1
except Exception:
start_character = 1
node_id = f"{seed.path}:{name}:{start_line}"
+ if node_id in seed_ids:
+ continue
seed_ids.add(node_id)
seed_nodes.append(
CodeSymbolNode(
@@ -1268,7 +1521,7 @@ class ChainSearchEngine:
start_line=start_line,
start_character=start_character,
end_line=end_line,
- end_character=1,
+ end_character=start_character if end_line == start_line else 1,
),
raw_code=seed.content or "",
docstring=seed.excerpt or "",
@@ -1279,7 +1532,11 @@ class ChainSearchEngine:
return coarse_results
async def expand_graph():
- async with LspBridge(workspace_root=str(workspace_root), timeout=timeout_s) as bridge:
+ async with LspBridge(
+ workspace_root=str(workspace_root),
+ config_file=str(lsp_config_file) if lsp_config_file else None,
+ timeout=timeout_s,
+ ) as bridge:
# Warm up analysis: open seed docs and wait a bit so references/call hierarchy are populated.
if warmup_s > 0:
for seed in seed_nodes[:3]:
@@ -1288,12 +1545,14 @@ class ChainSearchEngine:
except Exception:
continue
try:
- await asyncio.sleep(min(warmup_s, max(0.0, timeout_s - 0.5)))
+ warmup_budget = min(warmup_s, max(0.0, timeout_s * 0.1))
+ await asyncio.sleep(min(warmup_budget, max(0.0, timeout_s - 0.5)))
except Exception:
pass
builder = LspGraphBuilder(
max_depth=max_depth,
max_nodes=max_nodes,
+ max_concurrent=max(1, max_concurrent),
resolve_symbols=resolve_symbols,
)
return await builder.build_from_seeds(seed_nodes, bridge)
@@ -1314,9 +1573,21 @@ class ChainSearchEngine:
else:
graph = run_coro_blocking()
except Exception as exc:
- self.logger.debug("Stage 2 (realtime) expansion failed: %s", exc)
+ self.logger.debug("Stage 2 (realtime) expansion failed: %r", exc)
return coarse_results
+ try:
+ node_count = len(getattr(graph, "nodes", {}) or {})
+ edge_count = len(getattr(graph, "edges", []) or [])
+ except Exception:
+ node_count, edge_count = 0, 0
+ self.logger.debug(
+ "Stage 2 (realtime) graph built: seeds=%d nodes=%d edges=%d",
+ len(seed_nodes),
+ node_count,
+ edge_count,
+ )
+
related_results: List[SearchResult] = []
for node_id, node in getattr(graph, "nodes", {}).items():
if node_id in seed_ids or getattr(node, "id", "") in seed_ids:
@@ -1395,6 +1666,21 @@ class ChainSearchEngine:
return start
+ def _find_lsp_config_file(self, start_path: Path) -> Optional[Path]:
+ """Find a lsp-servers.json by walking up from start_path."""
+ start = Path(start_path).resolve()
+ if start.is_file():
+ start = start.parent
+
+ for current in [start, *list(start.parents)]:
+ try:
+ candidate = current / "lsp-servers.json"
+ if candidate.is_file():
+ return candidate
+ except OSError:
+ continue
+ return None
+
def _stage3_cluster_prune(
self,
expanded_results: List[SearchResult],
diff --git a/codex-lens/src/codexlens/storage/path_mapper.py b/codex-lens/src/codexlens/storage/path_mapper.py
index a7cbd701..2c238355 100644
--- a/codex-lens/src/codexlens/storage/path_mapper.py
+++ b/codex-lens/src/codexlens/storage/path_mapper.py
@@ -290,7 +290,7 @@ class PathMapper:
# Check if first part is a drive letter
if len(parts[0]) == 1 and parts[0].isalpha():
# D/path/to/dir → D:/path/to/dir
- drive = f"{parts[0]}:"
+ drive = f"{parts[0]}:/"
if len(parts) > 1:
return Path(drive) / Path(*parts[1:])
return Path(drive)
diff --git a/codex-lens/tests/test_path_mapper_windows_drive.py b/codex-lens/tests/test_path_mapper_windows_drive.py
new file mode 100644
index 00000000..21522b8d
--- /dev/null
+++ b/codex-lens/tests/test_path_mapper_windows_drive.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+import platform
+from pathlib import Path
+
+from codexlens.storage.path_mapper import PathMapper
+
+
+def test_denormalize_path_windows_drive_is_absolute() -> None:
+ if platform.system() != "Windows":
+ return
+
+ mapper = PathMapper(index_root=Path("C:/tmp/codexlens_indexes"))
+ mapped = mapper.denormalize_path("D/Claude_dms3/codex-lens/src")
+
+ assert mapped.is_absolute()
+ assert str(mapped).lower().startswith("d:\\") or str(mapped).lower().startswith("d:/")
+ assert mapped == Path("D:/Claude_dms3/codex-lens/src")
+
diff --git a/codex-lens/tests/test_stage1_binary_search_uses_chunk_lines.py b/codex-lens/tests/test_stage1_binary_search_uses_chunk_lines.py
index 92d0cf6b..ed566c9b 100644
--- a/codex-lens/tests/test_stage1_binary_search_uses_chunk_lines.py
+++ b/codex-lens/tests/test_stage1_binary_search_uses_chunk_lines.py
@@ -63,3 +63,69 @@ def test_stage1_binary_search_prefers_chunk_start_line(tmp_path: Path) -> None:
finally:
engine.close()
+
+def test_stage1_binary_search_dense_fallback(tmp_path: Path) -> None:
+ registry = RegistryStore(db_path=tmp_path / "registry.db")
+ registry.initialize()
+ mapper = PathMapper(index_root=tmp_path / "indexes")
+ engine = ChainSearchEngine(registry, mapper, config=Config(data_dir=tmp_path / "data"))
+
+ try:
+ index_root = tmp_path / "fake_index_root"
+ index_root.mkdir(parents=True, exist_ok=True)
+ index_db = index_root / "_index.db"
+ index_db.write_text("", encoding="utf-8")
+ (index_root / VECTORS_META_DB_NAME).write_text("", encoding="utf-8")
+
+ class _DummyEmbedder:
+ def embed_to_numpy(self, texts):
+ _ = texts
+ # Only dim matters for ANNIndex initialization
+ return [[0.0, 1.0, 2.0]]
+
+ class _DummyANNIndex:
+ def __init__(self, *args, **kwargs) -> None:
+ pass
+
+ def load(self) -> bool:
+ return True
+
+ def count(self) -> int:
+ return 1
+
+ def search(self, query_vec, top_k: int = 10):
+ _ = query_vec
+ _ = top_k
+ return [123], [0.2]
+
+ dummy_meta_store = MagicMock()
+ dummy_meta_store.get_chunks_by_ids.return_value = [
+ {
+ "chunk_id": 123,
+ "file_path": str(tmp_path / "b.py"),
+ "content": "def b():\n return 2\n",
+ "start_line": 20,
+ "end_line": 22,
+ "metadata": {},
+ "category": "code",
+ }
+ ]
+
+ with patch.object(engine, "_get_centralized_binary_searcher", return_value=None):
+ with patch("codexlens.search.chain_search.VectorMetadataStore", return_value=dummy_meta_store):
+ with patch("codexlens.semantic.embedder.Embedder", return_value=_DummyEmbedder()):
+ with patch("codexlens.semantic.ann_index.ANNIndex", _DummyANNIndex):
+ coarse_results, returned_root = engine._stage1_binary_search(
+ "b",
+ [index_db],
+ coarse_k=1,
+ stats=SearchStats(),
+ )
+
+ assert returned_root == index_root
+ assert len(coarse_results) == 1
+ assert coarse_results[0].start_line == 20
+ assert coarse_results[0].end_line == 22
+ assert coarse_results[0].score == 0.8
+ finally:
+ engine.close()
diff --git a/test-mcp-tools.mjs b/test-mcp-tools.mjs
new file mode 100644
index 00000000..c142c655
--- /dev/null
+++ b/test-mcp-tools.mjs
@@ -0,0 +1,182 @@
+#!/usr/bin/env node
+/**
+ * MCP Tools Test Script
+ * Tests the modified read_file and edit_file tools with parameter validation
+ */
+
+import { executeTool } from './ccw/dist/tools/index.js';
+
+console.log('🧪 MCP Tools Test Suite\n');
+console.log('Testing modified parameters:\n');
+
+let passed = 0;
+let failed = 0;
+
+// Test helper
+async function test(name, testFn) {
+ try {
+ await testFn();
+ console.log(`✅ ${name}`);
+ passed++;
+ } catch (error) {
+ console.log(`❌ ${name}`);
+ console.error(` Error: ${error.message}`);
+ failed++;
+ }
+}
+
+// Test 1: read_file - single file with offset/limit (should succeed)
+await test('read_file: single file + offset/limit (valid)', async () => {
+ const result = await executeTool('read_file', {
+ paths: 'README.md',
+ offset: 0,
+ limit: 5
+ });
+
+ if (!result.success) {
+ throw new Error(result.error);
+ }
+ console.log(` → Read ${result.result.files.length} file, ${result.result.message}`);
+});
+
+// Test 2: read_file - multiple files with offset/limit (should FAIL with new validation)
+await test('read_file: multiple files + offset/limit (validation error)', async () => {
+ const result = await executeTool('read_file', {
+ paths: ['README.md', 'package.json'],
+ offset: 0,
+ limit: 5
+ });
+
+ if (result.success) {
+ throw new Error('Expected validation error but succeeded');
+ }
+
+ if (!result.error.includes('offset/limit')) {
+ throw new Error(`Expected error message about offset/limit, got: ${result.error}`);
+ }
+ console.log(` → Got expected error: ${result.error.substring(0, 60)}...`);
+});
+
+// Test 3: read_file - multiple files without offset/limit (should succeed)
+await test('read_file: multiple files without offset/limit (valid)', async () => {
+ const result = await executeTool('read_file', {
+ paths: ['README.md', 'package.json']
+ });
+
+ if (!result.success) {
+ throw new Error(result.error);
+ }
+ console.log(` → Read ${result.result.files.length} files`);
+});
+
+// Test 4: edit_file - update mode with oldText/newText (should succeed)
+await test('edit_file: update mode + oldText/newText (valid)', async () => {
+ const result = await executeTool('edit_file', {
+ path: 'README.md',
+ mode: 'update',
+ oldText: 'old content',
+ newText: 'new content',
+ dryRun: true
+ });
+
+ if (!result.success) {
+ throw new Error(result.error);
+ }
+ console.log(` → ${result.result.message}`);
+});
+
+// Test 5: edit_file - update mode with edits (should succeed)
+await test('edit_file: update mode + edits (valid)', async () => {
+ const result = await executeTool('edit_file', {
+ path: 'README.md',
+ mode: 'update',
+ edits: [{ oldText: 'old', newText: 'new' }],
+ dryRun: true
+ });
+
+ if (!result.success) {
+ throw new Error(result.error);
+ }
+ console.log(` → ${result.result.message}`);
+});
+
+// Test 6: edit_file - update mode with BOTH oldText/newText AND edits (should FAIL)
+await test('edit_file: update mode + both oldText/newText AND edits (validation error)', async () => {
+ const result = await executeTool('edit_file', {
+ path: 'README.md',
+ mode: 'update',
+ oldText: 'old',
+ newText: 'new',
+ edits: [{ oldText: 'old2', newText: 'new2' }],
+ dryRun: true
+ });
+
+ if (result.success) {
+ throw new Error('Expected validation error but succeeded');
+ }
+
+ if (!result.error.includes('oldText/newText') && !result.error.includes('edits')) {
+ throw new Error(`Expected error about oldText/newText or edits, got: ${result.error}`);
+ }
+ console.log(` → Got expected error: ${result.error.substring(0, 80)}...`);
+});
+
+// Test 7: edit_file - update mode without proper parameters (should FAIL - no oldText/newText or edits)
+await test('edit_file: update mode without proper parameters (validation error)', async () => {
+ const result = await executeTool('edit_file', {
+ path: 'README.md',
+ mode: 'update'
+ // Missing both oldText/newText and edits
+ });
+
+ if (result.success) {
+ throw new Error('Expected validation error but succeeded');
+ }
+ console.log(` → Got expected error: ${result.error.substring(0, 80)}...`);
+});
+
+// Test 8: edit_file - line mode with line mode parameters (should succeed)
+await test('edit_file: line mode + line mode parameters (valid)', async () => {
+ const result = await executeTool('edit_file', {
+ path: 'README.md',
+ mode: 'line',
+ operation: 'insert_after',
+ line: 1,
+ text: 'new line'
+ });
+
+ if (!result.success) {
+ throw new Error(result.error);
+ }
+ console.log(` → ${result.result.message}`);
+});
+
+// Test 9: edit_file - line mode missing required text (should FAIL)
+await test('edit_file: line mode + insert without text (validation error)', async () => {
+ const result = await executeTool('edit_file', {
+ path: 'README.md',
+ mode: 'line',
+ operation: 'insert_after',
+ line: 1
+ // missing 'text' parameter
+ });
+
+ if (result.success) {
+ throw new Error('Expected validation error but succeeded');
+ }
+ console.log(` → Got expected error: ${result.error.substring(0, 80)}...`);
+});
+
+// Summary
+console.log(`\n📊 Test Results:`);
+console.log(` ✅ Passed: ${passed}`);
+console.log(` ❌ Failed: ${failed}`);
+console.log(` 📈 Success Rate: ${((passed / (passed + failed)) * 100).toFixed(1)}%`);
+
+if (failed === 0) {
+ console.log('\n🎉 All tests passed!');
+ process.exit(0);
+} else {
+ console.log(`\n⚠️ ${failed} test(s) failed`);
+ process.exit(1);
+}