Compare commits

...

12 Commits

Author SHA1 Message Date
catlog22
86cefa7bda bump version to 6.2.8 in package.json and package-lock.json 2025-12-23 09:49:55 +08:00
catlog22
fdac697f6e refactor: 移除 ccw/package.json 文件并更新路径引用 2025-12-23 09:47:07 +08:00
catlog22
8203d690cb fix: CodexLens model detection, hybrid search stability, and JSON logging
- Fix model installation detection using fastembed ONNX cache names
- Add embeddings_config table for model metadata tracking
- Fix hybrid search segfault by using single-threaded GPU mode
- Suppress INFO logs in JSON mode to prevent error display
- Add model dropdown filtering to show only installed models

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-22 21:49:10 +08:00
catlog22
cf58dc0dd3 bump version to 6.2.6 in package.json 2025-12-22 20:17:38 +08:00
catlog22
6a69af3bf1 feat: 更新嵌入批处理大小至 256,以优化性能并提高 GPU 加速效率 2025-12-22 17:55:05 +08:00
catlog22
acdfbb4644 feat: Enhance CodexLens with GPU support and semantic status improvements
- Added accelerator and providers fields to SemanticStatus interface.
- Updated checkSemanticStatus function to retrieve ONNX providers and accelerator type.
- Introduced detectGpuSupport function to identify available GPU modes (CUDA, DirectML).
- Modified installSemantic function to support GPU acceleration modes and clean up ONNX Runtime installations.
- Updated package requirements in PKG-INFO for semantic-gpu and semantic-directml extras.
- Added new source files for GPU support and enrichment functionalities.
- Updated tests to cover new features and ensure comprehensive testing.
2025-12-22 17:42:26 +08:00
catlog22
72f24bf535 feat: 更新版本号至 6.2.4,添加 GPU 加速支持和相关依赖 2025-12-22 14:15:36 +08:00
catlog22
ba23244876 feat: 更新版本号至 6.2.2,并添加 dist 目录到文件列表 2025-12-22 12:06:59 +08:00
catlog22
624f9f18b4 feat: 更新项目名称和版本号,提升版本管理清晰度 2025-12-22 10:29:32 +08:00
catlog22
17002345c9 feat: 更新钩子模板检查逻辑,支持基于唯一模式的命令匹配;在搜索元数据中添加回退模式字段 2025-12-22 10:25:53 +08:00
catlog22
f3f2051c45 feat: 优化项目和全局配置的获取逻辑,添加Codex配置支持 2025-12-22 10:16:58 +08:00
catlog22
e60d793c8c fix: 修复 SmartSearch 的 ripgrep limit 和 FTS 分词器问题
- Ripgrep 模式: 添加总结果数量限制,防止返回超过 2MB 数据
  - --max-count 只限制每个文件的匹配数,现在在收集结果时应用 limit
  - 达到限制时在 metadata 中添加 warning 提示

- FTS 分词器: 将点号(.)添加到 tokenchars,修复 PortRole.FLOW 等带点号标识符的精确搜索
  - 更新 dir_index.py 和 migration_004_dual_fts.py 中的 tokenize 配置
  - 需要重建索引才能生效

- Exact 模式: 添加 fuzzy 回退,当精确搜索无结果时自动尝试模糊搜索
  - 回退时在 metadata 中标注 fallback: 'fuzzy'

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-22 09:50:29 +08:00
45 changed files with 4046 additions and 315 deletions

View File

@@ -3,4 +3,8 @@
- **CLI Tools Usage**: @~/.claude/workflows/cli-tools-usage.md
- **Coding Philosophy**: @~/.claude/workflows/coding-philosophy.md
- **Context Requirements**: @~/.claude/workflows/context-tools.md
- **File Modification**: @~/.claude/workflows/file-modification.md
- **File Modification**: @~/.claude/workflows/file-modification.md
## Agent Execution
- **Always use `run_in_background = false`** for Task tool agent calls to ensure synchronous execution and immediate result visibility

View File

@@ -9,11 +9,8 @@
"env": {}
},
"ccw-tools": {
"command": "npx",
"args": [
"-y",
"ccw-mcp"
],
"command": "ccw-mcp",
"args": [],
"env": {
"CCW_ENABLED_TOOLS": "write_file,edit_file,smart_search,core_memory"
}

View File

@@ -46,7 +46,6 @@ Install-Claude.ps1
install-remote.ps1
*.mcp.json
# ccw internal files
ccw/package.json
ccw/node_modules/
ccw/*.md

15
ccw/.npmignore Normal file
View File

@@ -0,0 +1,15 @@
# npm ignore file - overrides .gitignore for npm publish
# dist/ is NOT excluded here so it gets published
# Development files
node_modules/
*.log
*.tmp
# Test files
tests/
*.test.js
*.spec.js
# TypeScript source maps (optional, can keep for debugging)
# *.map

8
ccw/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "ccw",
"version": "6.1.4",
"name": "claude-code-workflow",
"version": "6.2.6",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "ccw",
"version": "6.1.4",
"name": "claude-code-workflow",
"version": "6.2.6",
"license": "MIT",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",

View File

@@ -1,65 +0,0 @@
{
"name": "ccw",
"version": "6.2.0",
"description": "Claude Code Workflow CLI - Dashboard viewer for workflow sessions and reviews",
"type": "module",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"bin": {
"ccw": "./bin/ccw.js",
"ccw-mcp": "./bin/ccw-mcp.js"
},
"scripts": {
"build": "tsc",
"dev": "tsx watch src/cli.ts",
"test": "node --test tests/*.test.js",
"test:codexlens": "node --test tests/codex-lens*.test.js",
"test:mcp": "node --test tests/mcp-server.test.js",
"lint": "eslint src/"
},
"keywords": [
"claude",
"workflow",
"cli",
"dashboard",
"code-review"
],
"author": "Claude Code Workflow",
"license": "MIT",
"engines": {
"node": ">=16.0.0"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",
"better-sqlite3": "^11.7.0",
"boxen": "^7.1.0",
"chalk": "^5.3.0",
"commander": "^11.0.0",
"figlet": "^1.7.0",
"glob": "^10.3.0",
"gradient-string": "^2.0.2",
"inquirer": "^9.2.0",
"open": "^9.1.0",
"ora": "^7.0.0",
"zod": "^4.1.13"
},
"files": [
"bin/",
"dist/",
"src/",
"README.md",
"LICENSE"
],
"repository": {
"type": "git",
"url": "https://github.com/claude-code-workflow/ccw"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/gradient-string": "^1.1.6",
"@types/inquirer": "^9.0.9",
"@types/node": "^25.0.1",
"tsx": "^4.21.0",
"typescript": "^5.9.3"
}
}

View File

@@ -284,8 +284,12 @@ function normalizeTask(task: unknown): NormalizedTask | null {
const implementation = taskObj.implementation as unknown[] | undefined;
const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined;
// Ensure id is always a string (handle numeric IDs from JSON)
const rawId = taskObj.id ?? taskObj.task_id;
const stringId = rawId != null ? String(rawId) : 'unknown';
return {
id: (taskObj.id as string) || (taskObj.task_id as string) || 'unknown',
id: stringId,
title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task',
status: (status as string).toLowerCase(),
// Preserve original fields for flexible rendering

View File

@@ -284,8 +284,12 @@ function normalizeTask(task: unknown): NormalizedTask | null {
const implementation = taskObj.implementation as unknown[] | undefined;
const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined;
// Ensure id is always a string (handle numeric IDs from JSON)
const rawId = taskObj.id ?? taskObj.task_id;
const stringId = rawId != null ? String(rawId) : 'unknown';
return {
id: (taskObj.id as string) || (taskObj.task_id as string) || 'unknown',
id: stringId,
title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task',
status: (status as string).toLowerCase(),
// Preserve original fields for flexible rendering

View File

@@ -4,7 +4,7 @@
* Handles all CLAUDE.md memory rules management endpoints
*/
import type { IncomingMessage, ServerResponse } from 'http';
import { readFileSync, writeFileSync, existsSync, readdirSync, statSync } from 'fs';
import { readFileSync, writeFileSync, existsSync, readdirSync, statSync, unlinkSync, mkdirSync } from 'fs';
import { join, relative } from 'path';
import { homedir } from 'os';
@@ -453,8 +453,7 @@ function deleteClaudeFile(filePath: string): { success: boolean; error?: string
writeFileSync(backupPath, content, 'utf8');
// Delete original file
const fs = require('fs');
fs.unlinkSync(filePath);
unlinkSync(filePath);
return { success: true };
} catch (error) {
@@ -500,9 +499,8 @@ function createNewClaudeFile(level: 'user' | 'project' | 'module', template: str
// Ensure directory exists
const dir = filePath.substring(0, filePath.lastIndexOf('/') || filePath.lastIndexOf('\\'));
const fs = require('fs');
if (!existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
mkdirSync(dir, { recursive: true });
}
// Write file

View File

@@ -362,8 +362,9 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
const status = url.searchParams.get('status') || null;
const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null;
const search = url.searchParams.get('search') || null;
const recursive = url.searchParams.get('recursive') !== 'false';
getHistoryWithNativeInfo(projectPath, { limit, tool, status, category, search })
getHistoryWithNativeInfo(projectPath, { limit, tool, status, category, search, recursive })
.then(history => {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(history));

View File

@@ -10,11 +10,12 @@ import {
executeCodexLens,
checkSemanticStatus,
installSemantic,
detectGpuSupport,
uninstallCodexLens,
cancelIndexing,
isIndexingInProgress
} from '../../tools/codex-lens.js';
import type { ProgressInfo } from '../../tools/codex-lens.js';
import type { ProgressInfo, GpuMode } from '../../tools/codex-lens.js';
export interface RouteContext {
pathname: string;
@@ -343,7 +344,7 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
}
try {
const result = await executeCodexLens(['config-set', '--key', 'index_dir', '--value', index_dir, '--json']);
const result = await executeCodexLens(['config', 'set', 'index_dir', index_dir, '--json']);
if (result.success) {
return { success: true, message: 'Configuration updated successfully' };
} else {
@@ -668,16 +669,43 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
}
// API: CodexLens Semantic Search Install (fastembed, ONNX-based, ~200MB)
// API: Detect GPU support for semantic search
if (pathname === '/api/codexlens/gpu/detect' && req.method === 'GET') {
try {
const gpuInfo = await detectGpuSupport();
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ success: true, ...gpuInfo }));
} catch (err) {
res.writeHead(500, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ success: false, error: err.message }));
}
return true;
}
// API: CodexLens Semantic Search Install (with GPU mode support)
if (pathname === '/api/codexlens/semantic/install' && req.method === 'POST') {
handlePostRequest(req, res, async () => {
handlePostRequest(req, res, async (body) => {
try {
const result = await installSemantic();
// Get GPU mode from request body, default to 'cpu'
const gpuMode: GpuMode = body?.gpuMode || 'cpu';
const validModes: GpuMode[] = ['cpu', 'cuda', 'directml'];
if (!validModes.includes(gpuMode)) {
return { success: false, error: `Invalid GPU mode: ${gpuMode}. Valid modes: ${validModes.join(', ')}`, status: 400 };
}
const result = await installSemantic(gpuMode);
if (result.success) {
const status = await checkSemanticStatus();
const modeDescriptions = {
cpu: 'CPU (ONNX Runtime)',
cuda: 'NVIDIA CUDA GPU',
directml: 'Windows DirectML GPU'
};
return {
success: true,
message: 'Semantic search installed successfully (fastembed)',
message: `Semantic search installed successfully with ${modeDescriptions[gpuMode]}`,
gpuMode,
...status
};
} else {

View File

@@ -291,13 +291,14 @@ FOCUS AREAS: ${extractFocus || 'naming conventions, error handling, code structu
return { error: `Unknown generation type: ${generationType}` };
}
// Execute CLI tool (Gemini) with at least 10 minutes timeout
// Execute CLI tool (Claude) with at least 10 minutes timeout
const result = await executeCliTool({
tool: 'gemini',
tool: 'claude',
prompt,
mode,
cd: workingDir,
timeout: 600000 // 10 minutes
timeout: 600000, // 10 minutes
category: 'internal'
});
if (!result.success) {

View File

@@ -123,6 +123,7 @@ function getSkillsConfig(projectPath) {
result.projectSkills.push({
name: parsed.name || skill.name,
folderName: skill.name, // Actual folder name for API queries
description: parsed.description,
version: parsed.version,
allowedTools: parsed.allowedTools,
@@ -152,6 +153,7 @@ function getSkillsConfig(projectPath) {
result.userSkills.push({
name: parsed.name || skill.name,
folderName: skill.name, // Actual folder name for API queries
description: parsed.description,
version: parsed.version,
allowedTools: parsed.allowedTools,
@@ -197,6 +199,7 @@ function getSkillDetail(skillName, location, projectPath) {
return {
skill: {
name: parsed.name || skillName,
folderName: skillName, // Actual folder name for API queries
description: parsed.description,
version: parsed.version,
allowedTools: parsed.allowedTools,
@@ -390,7 +393,7 @@ async function importSkill(sourcePath, location, projectPath, customName) {
}
/**
* Generate skill via CLI tool (Gemini)
* Generate skill via CLI tool (Claude)
* @param {Object} params - Generation parameters
* @param {string} params.generationType - 'description' or 'template'
* @param {string} params.description - Skill description from user
@@ -455,9 +458,9 @@ REQUIREMENTS:
3. If the skill requires supporting files (e.g., templates, scripts), create them in the skill folder
4. Ensure all files are properly formatted and follow best practices`;
// Execute CLI tool (Gemini) with write mode
// Execute CLI tool (Claude) with write mode
const result = await executeCliTool({
tool: 'gemini',
tool: 'claude',
prompt,
mode: 'write',
cd: baseDir,
@@ -515,8 +518,143 @@ export async function handleSkillsRoutes(ctx: RouteContext): Promise<boolean> {
return true;
}
// API: Get single skill detail
if (pathname.startsWith('/api/skills/') && req.method === 'GET' && !pathname.endsWith('/skills/')) {
// API: List skill directory contents
if (pathname.match(/^\/api\/skills\/[^/]+\/dir$/) && req.method === 'GET') {
const pathParts = pathname.split('/');
const skillName = decodeURIComponent(pathParts[3]);
const subPath = url.searchParams.get('subpath') || '';
const location = url.searchParams.get('location') || 'project';
const projectPathParam = url.searchParams.get('path') || initialPath;
const baseDir = location === 'project'
? join(projectPathParam, '.claude', 'skills')
: join(homedir(), '.claude', 'skills');
const dirPath = subPath
? join(baseDir, skillName, subPath)
: join(baseDir, skillName);
// Security check: ensure path is within skill folder
if (!dirPath.startsWith(join(baseDir, skillName))) {
res.writeHead(403, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Access denied' }));
return true;
}
if (!existsSync(dirPath)) {
res.writeHead(404, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Directory not found' }));
return true;
}
try {
const stat = statSync(dirPath);
if (!stat.isDirectory()) {
res.writeHead(400, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Path is not a directory' }));
return true;
}
const entries = readdirSync(dirPath, { withFileTypes: true });
const files = entries.map(entry => ({
name: entry.name,
isDirectory: entry.isDirectory(),
path: subPath ? `${subPath}/${entry.name}` : entry.name
}));
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ files, subPath, skillName }));
} catch (error) {
res.writeHead(500, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: (error as Error).message }));
}
return true;
}
// API: Read skill file content
if (pathname.match(/^\/api\/skills\/[^/]+\/file$/) && req.method === 'GET') {
const pathParts = pathname.split('/');
const skillName = decodeURIComponent(pathParts[3]);
const fileName = url.searchParams.get('filename');
const location = url.searchParams.get('location') || 'project';
const projectPathParam = url.searchParams.get('path') || initialPath;
if (!fileName) {
res.writeHead(400, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'filename parameter is required' }));
return true;
}
const baseDir = location === 'project'
? join(projectPathParam, '.claude', 'skills')
: join(homedir(), '.claude', 'skills');
const filePath = join(baseDir, skillName, fileName);
// Security check: ensure file is within skill folder
if (!filePath.startsWith(join(baseDir, skillName))) {
res.writeHead(403, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Access denied' }));
return true;
}
if (!existsSync(filePath)) {
res.writeHead(404, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'File not found' }));
return true;
}
try {
const content = readFileSync(filePath, 'utf8');
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ content, fileName, path: filePath }));
} catch (error) {
res.writeHead(500, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: (error as Error).message }));
}
return true;
}
// API: Write skill file content
if (pathname.match(/^\/api\/skills\/[^/]+\/file$/) && req.method === 'POST') {
const pathParts = pathname.split('/');
const skillName = decodeURIComponent(pathParts[3]);
handlePostRequest(req, res, async (body) => {
const { fileName, content, location, projectPath: projectPathParam } = body;
if (!fileName) {
return { error: 'fileName is required' };
}
if (content === undefined) {
return { error: 'content is required' };
}
const baseDir = location === 'project'
? join(projectPathParam || initialPath, '.claude', 'skills')
: join(homedir(), '.claude', 'skills');
const filePath = join(baseDir, skillName, fileName);
// Security check: ensure file is within skill folder
if (!filePath.startsWith(join(baseDir, skillName))) {
return { error: 'Access denied' };
}
try {
await fsPromises.writeFile(filePath, content, 'utf8');
return { success: true, fileName, path: filePath };
} catch (error) {
return { error: (error as Error).message };
}
});
return true;
}
// API: Get single skill detail (exclude /dir and /file sub-routes)
if (pathname.startsWith('/api/skills/') && req.method === 'GET' &&
!pathname.endsWith('/skills/') && !pathname.endsWith('/dir') && !pathname.endsWith('/file')) {
const skillName = decodeURIComponent(pathname.replace('/api/skills/', ''));
const location = url.searchParams.get('location') || 'project';
const projectPathParam = url.searchParams.get('path') || initialPath;
@@ -576,7 +714,7 @@ export async function handleSkillsRoutes(ctx: RouteContext): Promise<boolean> {
return await importSkill(sourcePath, location, projectPath, skillName);
} else if (mode === 'cli-generate') {
// CLI generate mode: use Gemini to generate skill
// CLI generate mode: use Claude to generate skill
if (!skillName) {
return { error: 'Skill name is required for CLI generation mode' };
}

View File

@@ -49,7 +49,7 @@ const VERSION_CHECK_CACHE_TTL = 3600000; // 1 hour
*/
function getCurrentVersion(): string {
try {
const packageJsonPath = join(import.meta.dirname, '../../../package.json');
const packageJsonPath = join(import.meta.dirname, '../../../../package.json');
if (existsSync(packageJsonPath)) {
const pkg = JSON.parse(readFileSync(packageJsonPath, 'utf8'));
return pkg.version || '0.0.0';

View File

@@ -98,15 +98,17 @@ async function loadCodexLensStatus() {
}
window.cliToolsStatus.codexlens = {
installed: data.ready || false,
version: data.version || null
version: data.version || null,
installedModels: [] // Will be populated by loadSemanticStatus
};
// Update CodexLens badge
updateCodexLensBadge();
// If CodexLens is ready, also check semantic status
// If CodexLens is ready, also check semantic status and models
if (data.ready) {
await loadSemanticStatus();
await loadInstalledModels();
}
return data;
@@ -132,6 +134,37 @@ async function loadSemanticStatus() {
}
}
/**
* Load installed embedding models
*/
async function loadInstalledModels() {
try {
const response = await fetch('/api/codexlens/models');
if (!response.ok) throw new Error('Failed to load models');
const data = await response.json();
if (data.success && data.result && data.result.models) {
// Filter to only installed models
const installedModels = data.result.models
.filter(m => m.installed)
.map(m => m.profile);
// Update window.cliToolsStatus
if (window.cliToolsStatus && window.cliToolsStatus.codexlens) {
window.cliToolsStatus.codexlens.installedModels = installedModels;
window.cliToolsStatus.codexlens.allModels = data.result.models;
}
console.log('[CLI Status] Installed models:', installedModels);
return installedModels;
}
return [];
} catch (err) {
console.error('Failed to load installed models:', err);
return [];
}
}
// ========== Badge Update ==========
function updateCliBadge() {
const badge = document.getElementById('badgeCliTools');

View File

@@ -138,14 +138,14 @@ const HOOK_TEMPLATES = {
category: 'memory',
timeout: 5000
},
// Session Context - Fires once per session at startup
// Uses state file to detect first prompt, only fires once
// Session Context - Progressive disclosure based on session state
// First prompt: returns cluster overview, subsequent: intent-matched sessions
'session-context': {
event: 'UserPromptSubmit',
matcher: '',
command: 'bash',
args: ['-c', 'STATE_FILE="/tmp/.ccw-session-$CLAUDE_SESSION_ID"; [ -f "$STATE_FILE" ] && exit 0; touch "$STATE_FILE"; curl -s -X POST -H "Content-Type: application/json" -d "{\\"sessionId\\":\\"$CLAUDE_SESSION_ID\\"}" http://localhost:3456/api/hook/session-context 2>/dev/null | jq -r ".content // empty"'],
description: 'Load session context once at startup (cluster overview)',
command: 'ccw',
args: ['hook', 'session-context', '--stdin'],
description: 'Progressive session context (cluster overview → intent matching)',
category: 'context',
timeout: 5000
}

View File

@@ -946,20 +946,15 @@ function setCcwProjectRootToCurrent() {
}
// Build CCW Tools config with selected tools
// Uses isWindowsPlatform from state.js to generate platform-appropriate commands
// Uses globally installed ccw-mcp command (from claude-code-workflow package)
function buildCcwToolsConfig(selectedTools, pathConfig = {}) {
const { projectRoot, allowedDirs } = pathConfig;
// Windows requires 'cmd /c' wrapper to execute npx
// Other platforms (macOS, Linux) can run npx directly
const config = isWindowsPlatform
? {
command: "cmd",
args: ["/c", "npx", "-y", "ccw-mcp"]
}
: {
command: "npx",
args: ["-y", "ccw-mcp"]
};
// Use globally installed ccw-mcp command directly
// Requires: npm install -g claude-code-workflow
const config = {
command: "ccw-mcp",
args: []
};
// Add env if not all tools or not default 4 core tools
const coreTools = ['write_file', 'edit_file', 'codex_lens', 'smart_search'];

View File

@@ -23,6 +23,9 @@ const i18n = {
'common.loading': 'Loading...',
'common.error': 'Error',
'common.success': 'Success',
'common.retry': 'Retry',
'common.refresh': 'Refresh',
'common.minutes': 'minutes',
// Header
'header.project': 'Project:',
@@ -277,8 +280,17 @@ const i18n = {
'codexlens.installDeps': 'Install Dependencies',
'codexlens.installDepsPrompt': 'Would you like to install them now? (This may take a few minutes)\n\nClick "Cancel" to create FTS index only.',
'codexlens.installingDeps': 'Installing dependencies...',
'codexlens.installingMode': 'Installing with',
'codexlens.depsInstalled': 'Dependencies installed successfully',
'codexlens.depsInstallFailed': 'Failed to install dependencies',
// GPU Mode Selection
'codexlens.selectGpuMode': 'Select acceleration mode',
'codexlens.cpuModeDesc': 'Standard CPU processing',
'codexlens.directmlModeDesc': 'Windows GPU (NVIDIA/AMD/Intel)',
'codexlens.cudaModeDesc': 'NVIDIA GPU (requires CUDA Toolkit)',
'common.recommended': 'Recommended',
'common.unavailable': 'Unavailable',
'codexlens.modelManagement': 'Model Management',
'codexlens.loadingModels': 'Loading models...',
'codexlens.downloadModel': 'Download',
@@ -293,6 +305,35 @@ const i18n = {
'codexlens.modelListError': 'Failed to load models',
'codexlens.noModelsAvailable': 'No models available',
// Model Download Progress
'codexlens.downloadingModel': 'Downloading',
'codexlens.connectingToHuggingFace': 'Connecting to Hugging Face...',
'codexlens.downloadTimeEstimate': 'Estimated time',
'codexlens.manualDownloadHint': 'Manual download',
'codexlens.downloadingModelFiles': 'Downloading model files...',
'codexlens.downloadingWeights': 'Downloading model weights...',
'codexlens.downloadingTokenizer': 'Downloading tokenizer...',
'codexlens.verifyingModel': 'Verifying model...',
'codexlens.finalizingDownload': 'Finalizing...',
'codexlens.downloadComplete': 'Download complete!',
'codexlens.downloadFailed': 'Download failed',
'codexlens.manualDownloadOptions': 'Manual download options',
'codexlens.cliDownload': 'CLI',
'codexlens.huggingfaceDownload': 'Hugging Face',
'codexlens.downloadCanceled': 'Download canceled',
// Manual Download Guide
'codexlens.manualDownloadGuide': 'Manual Download Guide',
'codexlens.cliMethod': 'Command Line (Recommended)',
'codexlens.cliMethodDesc': 'Run in terminal with progress display:',
'codexlens.pythonMethod': 'Python Script',
'codexlens.pythonMethodDesc': 'Pre-download model using Python:',
'codexlens.hfHubMethod': 'Hugging Face Hub CLI',
'codexlens.hfHubMethodDesc': 'Download using huggingface-cli with resume support:',
'codexlens.modelLinks': 'Direct Model Links',
'codexlens.cacheLocation': 'Model Storage Location',
'common.copied': 'Copied to clipboard',
// CodexLens Indexing Progress
'codexlens.indexing': 'Indexing',
'codexlens.indexingDesc': 'Building code index for workspace',
@@ -302,6 +343,43 @@ const i18n = {
'codexlens.indexSuccess': 'Index created successfully',
'codexlens.indexFailed': 'Indexing failed',
// CodexLens Install
'codexlens.installDesc': 'Python-based code indexing engine',
'codexlens.whatWillBeInstalled': 'What will be installed:',
'codexlens.pythonVenv': 'Python virtual environment',
'codexlens.pythonVenvDesc': 'Isolated Python environment',
'codexlens.codexlensPackage': 'CodexLens package',
'codexlens.codexlensPackageDesc': 'Code indexing and search engine',
'codexlens.sqliteFtsDesc': 'Full-text search database',
'codexlens.installLocation': 'Installation Location',
'codexlens.installTime': 'First installation may take 2-3 minutes to download and setup Python packages.',
'codexlens.startingInstall': 'Starting installation...',
'codexlens.installing': 'Installing...',
'codexlens.creatingVenv': 'Creating virtual environment...',
'codexlens.installingPip': 'Installing pip packages...',
'codexlens.installingPackage': 'Installing CodexLens package...',
'codexlens.settingUpDeps': 'Setting up Python dependencies...',
'codexlens.installComplete': 'Installation complete!',
'codexlens.installSuccess': 'CodexLens installed successfully!',
'codexlens.installNow': 'Install Now',
'codexlens.accelerator': 'Accelerator',
// CodexLens Uninstall
'codexlens.uninstall': 'Uninstall',
'codexlens.uninstallDesc': 'Remove CodexLens and all data',
'codexlens.whatWillBeRemoved': 'What will be removed:',
'codexlens.removeVenv': 'Virtual environment at ~/.codexlens/venv',
'codexlens.removeData': 'All CodexLens indexed data and databases',
'codexlens.removeConfig': 'Configuration and semantic search models',
'codexlens.removing': 'Removing files...',
'codexlens.uninstalling': 'Uninstalling...',
'codexlens.removingVenv': 'Removing virtual environment...',
'codexlens.removingData': 'Deleting indexed data...',
'codexlens.removingConfig': 'Cleaning up configuration...',
'codexlens.finalizing': 'Finalizing removal...',
'codexlens.uninstallComplete': 'Uninstallation complete!',
'codexlens.uninstallSuccess': 'CodexLens uninstalled successfully!',
// Index Manager
'index.manager': 'Index Manager',
'index.projects': 'Projects',
@@ -333,9 +411,12 @@ const i18n = {
'index.fullDesc': 'FTS + Semantic search (recommended)',
'index.selectModel': 'Select embedding model',
'index.modelCode': 'Code (768d)',
'index.modelBase': 'Base (768d)',
'index.modelFast': 'Fast (384d)',
'index.modelMultilingual': 'Multilingual (1024d)',
'index.modelBalanced': 'Balanced (1024d)',
'index.modelMinilm': 'MiniLM (384d)',
'index.modelMultilingual': 'Multilingual (1024d) ⚠️',
'index.modelBalanced': 'Balanced (1024d) ⚠️',
'index.dimensionWarning': '1024d models require more resources',
// Semantic Search Configuration
'semantic.settings': 'Semantic Search Settings',
@@ -1358,7 +1439,10 @@ const i18n = {
'common.loading': '加载中...',
'common.error': '错误',
'common.success': '成功',
'common.retry': '重试',
'common.refresh': '刷新',
'common.minutes': '分钟',
// Header
'header.project': '项目:',
'header.recentProjects': '最近项目',
@@ -1612,8 +1696,17 @@ const i18n = {
'codexlens.installDeps': '安装依赖',
'codexlens.installDepsPrompt': '是否立即安装?(可能需要几分钟)\n\n点击"取消"将只创建 FTS 索引。',
'codexlens.installingDeps': '安装依赖中...',
'codexlens.installingMode': '正在安装',
'codexlens.depsInstalled': '依赖安装成功',
'codexlens.depsInstallFailed': '依赖安装失败',
// GPU 模式选择
'codexlens.selectGpuMode': '选择加速模式',
'codexlens.cpuModeDesc': '标准 CPU 处理',
'codexlens.directmlModeDesc': 'Windows GPUNVIDIA/AMD/Intel',
'codexlens.cudaModeDesc': 'NVIDIA GPU需要 CUDA Toolkit',
'common.recommended': '推荐',
'common.unavailable': '不可用',
'codexlens.modelManagement': '模型管理',
'codexlens.loadingModels': '加载模型中...',
'codexlens.downloadModel': '下载',
@@ -1628,6 +1721,35 @@ const i18n = {
'codexlens.modelListError': '加载模型列表失败',
'codexlens.noModelsAvailable': '没有可用模型',
// 模型下载进度
'codexlens.downloadingModel': '正在下载',
'codexlens.connectingToHuggingFace': '正在连接 Hugging Face...',
'codexlens.downloadTimeEstimate': '预计时间',
'codexlens.manualDownloadHint': '手动下载',
'codexlens.downloadingModelFiles': '正在下载模型文件...',
'codexlens.downloadingWeights': '正在下载模型权重...',
'codexlens.downloadingTokenizer': '正在下载分词器...',
'codexlens.verifyingModel': '正在验证模型...',
'codexlens.finalizingDownload': '正在完成...',
'codexlens.downloadComplete': '下载完成!',
'codexlens.downloadFailed': '下载失败',
'codexlens.manualDownloadOptions': '手动下载选项',
'codexlens.cliDownload': '命令行',
'codexlens.huggingfaceDownload': 'Hugging Face',
'codexlens.downloadCanceled': '下载已取消',
// 手动下载指南
'codexlens.manualDownloadGuide': '手动下载指南',
'codexlens.cliMethod': '命令行(推荐)',
'codexlens.cliMethodDesc': '在终端运行,显示下载进度:',
'codexlens.pythonMethod': 'Python 脚本',
'codexlens.pythonMethodDesc': '使用 Python 预下载模型:',
'codexlens.hfHubMethod': 'Hugging Face Hub CLI',
'codexlens.hfHubMethodDesc': '使用 huggingface-cli 下载,支持断点续传:',
'codexlens.modelLinks': '模型直链',
'codexlens.cacheLocation': '模型存储位置',
'common.copied': '已复制到剪贴板',
// CodexLens 索引进度
'codexlens.indexing': '索引中',
'codexlens.indexingDesc': '正在为工作区构建代码索引',
@@ -1637,6 +1759,43 @@ const i18n = {
'codexlens.indexSuccess': '索引创建成功',
'codexlens.indexFailed': '索引失败',
// CodexLens 安装
'codexlens.installDesc': '基于 Python 的代码索引引擎',
'codexlens.whatWillBeInstalled': '将安装的内容:',
'codexlens.pythonVenv': 'Python 虚拟环境',
'codexlens.pythonVenvDesc': '隔离的 Python 环境',
'codexlens.codexlensPackage': 'CodexLens 包',
'codexlens.codexlensPackageDesc': '代码索引和搜索引擎',
'codexlens.sqliteFtsDesc': '全文搜索数据库',
'codexlens.installLocation': '安装位置',
'codexlens.installTime': '首次安装可能需要 2-3 分钟下载和配置 Python 包。',
'codexlens.startingInstall': '正在启动安装...',
'codexlens.installing': '安装中...',
'codexlens.creatingVenv': '正在创建虚拟环境...',
'codexlens.installingPip': '正在安装 pip 包...',
'codexlens.installingPackage': '正在安装 CodexLens 包...',
'codexlens.settingUpDeps': '正在配置 Python 依赖...',
'codexlens.installComplete': '安装完成!',
'codexlens.installSuccess': 'CodexLens 安装成功!',
'codexlens.installNow': '立即安装',
'codexlens.accelerator': '加速器',
// CodexLens 卸载
'codexlens.uninstall': '卸载',
'codexlens.uninstallDesc': '移除 CodexLens 及所有数据',
'codexlens.whatWillBeRemoved': '将被移除的内容:',
'codexlens.removeVenv': '虚拟环境 ~/.codexlens/venv',
'codexlens.removeData': '所有 CodexLens 索引数据和数据库',
'codexlens.removeConfig': '配置文件和语义搜索模型',
'codexlens.removing': '正在删除文件...',
'codexlens.uninstalling': '正在卸载...',
'codexlens.removingVenv': '正在删除虚拟环境...',
'codexlens.removingData': '正在删除索引数据...',
'codexlens.removingConfig': '正在清理配置文件...',
'codexlens.finalizing': '正在完成卸载...',
'codexlens.uninstallComplete': '卸载完成!',
'codexlens.uninstallSuccess': 'CodexLens 卸载成功!',
// 索引管理器
'index.manager': '索引管理器',
'index.projects': '项目数',
@@ -1668,9 +1827,12 @@ const i18n = {
'index.fullDesc': 'FTS + 语义搜索(推荐)',
'index.selectModel': '选择嵌入模型',
'index.modelCode': '代码优化 (768维)',
'index.modelBase': '通用基础 (768维)',
'index.modelFast': '快速轻量 (384维)',
'index.modelMultilingual': '多语言 (1024维)',
'index.modelBalanced': '高精度 (1024维)',
'index.modelMinilm': 'MiniLM (384维)',
'index.modelMultilingual': '多语言 (1024维) ⚠️',
'index.modelBalanced': '高精度 (1024维) ⚠️',
'index.dimensionWarning': '1024维模型需要更多资源',
// Semantic Search 配置
'semantic.settings': '语义搜索设置',

View File

@@ -102,6 +102,7 @@ async function loadClaudeFiles() {
updateClaudeBadge(); // Update navigation badge
} catch (error) {
console.error('Error loading CLAUDE.md files:', error);
showRefreshToast(t('claudeManager.loadError') || 'Failed to load files', 'error');
addGlobalNotification('error', t('claudeManager.loadError'), null, 'CLAUDE.md');
}
}
@@ -113,6 +114,7 @@ async function refreshClaudeFiles() {
renderFileViewer();
renderFileMetadata();
if (window.lucide) lucide.createIcons();
showRefreshToast(t('claudeManager.refreshed') || 'Files refreshed', 'success');
addGlobalNotification('success', t('claudeManager.refreshed'), null, 'CLAUDE.md');
// Load freshness data in background
loadFreshnessDataAsync();
@@ -155,6 +157,7 @@ async function markFileAsUpdated() {
if (!res.ok) throw new Error('Failed to mark file as updated');
showRefreshToast(t('claudeManager.markedAsUpdated') || 'Marked as updated', 'success');
addGlobalNotification('success', t('claudeManager.markedAsUpdated') || 'Marked as updated', null, 'CLAUDE.md');
// Reload freshness data
@@ -163,6 +166,7 @@ async function markFileAsUpdated() {
renderFileMetadata();
} catch (error) {
console.error('Error marking file as updated:', error);
showRefreshToast(t('claudeManager.markUpdateError') || 'Failed to mark as updated', 'error');
addGlobalNotification('error', t('claudeManager.markUpdateError') || 'Failed to mark as updated', null, 'CLAUDE.md');
}
}
@@ -481,10 +485,12 @@ async function saveClaudeFile() {
selectedFile.stats = calculateFileStats(newContent);
isDirty = false;
showRefreshToast(t('claudeManager.saved') || 'File saved', 'success');
addGlobalNotification('success', t('claudeManager.saved'), null, 'CLAUDE.md');
renderFileMetadata();
} catch (error) {
console.error('Error saving file:', error);
showRefreshToast(t('claudeManager.saveError') || 'Save failed', 'error');
addGlobalNotification('error', t('claudeManager.saveError'), null, 'CLAUDE.md');
}
}
@@ -733,12 +739,13 @@ async function loadFileContent(filePath) {
}
function showClaudeNotification(type, message) {
// Use global notification system if available
// Show toast for immediate feedback
if (typeof showRefreshToast === 'function') {
showRefreshToast(message, type);
}
// Also add to global notification system if available
if (typeof addGlobalNotification === 'function') {
addGlobalNotification(type, message, null, 'CLAUDE.md');
} else {
// Fallback to simple alert
alert(message);
}
}
@@ -822,6 +829,7 @@ async function createNewFile() {
var modulePath = document.getElementById('modulePath').value;
if (level === 'module' && !modulePath) {
showRefreshToast(t('claude.modulePathRequired') || 'Module path is required', 'error');
addGlobalNotification('error', t('claude.modulePathRequired') || 'Module path is required', null, 'CLAUDE.md');
return;
}
@@ -841,12 +849,14 @@ async function createNewFile() {
var result = await res.json();
closeCreateDialog();
showRefreshToast(t('claude.fileCreated') || 'File created successfully', 'success');
addGlobalNotification('success', t('claude.fileCreated') || 'File created successfully', null, 'CLAUDE.md');
// Refresh file tree
await refreshClaudeFiles();
} catch (error) {
console.error('Error creating file:', error);
showRefreshToast(t('claude.createFileError') || 'Failed to create file', 'error');
addGlobalNotification('error', t('claude.createFileError') || 'Failed to create file', null, 'CLAUDE.md');
}
}
@@ -870,6 +880,7 @@ async function confirmDeleteFile() {
if (!res.ok) throw new Error('Failed to delete file');
showRefreshToast(t('claude.fileDeleted') || 'File deleted successfully', 'success');
addGlobalNotification('success', t('claude.fileDeleted') || 'File deleted successfully', null, 'CLAUDE.md');
selectedFile = null;
@@ -877,6 +888,7 @@ async function confirmDeleteFile() {
await refreshClaudeFiles();
} catch (error) {
console.error('Error deleting file:', error);
showRefreshToast(t('claude.deleteFileError') || 'Failed to delete file', 'error');
addGlobalNotification('error', t('claude.deleteFileError') || 'Failed to delete file', null, 'CLAUDE.md');
}
}
@@ -886,9 +898,11 @@ function copyFileContent() {
if (!selectedFile || !selectedFile.content) return;
navigator.clipboard.writeText(selectedFile.content).then(function() {
showRefreshToast(t('claude.contentCopied') || 'Content copied to clipboard', 'success');
addGlobalNotification('success', t('claude.contentCopied') || 'Content copied to clipboard', null, 'CLAUDE.md');
}).catch(function(error) {
console.error('Error copying content:', error);
showRefreshToast(t('claude.copyError') || 'Failed to copy content', 'error');
addGlobalNotification('error', t('claude.copyError') || 'Failed to copy content', null, 'CLAUDE.md');
});
}

View File

@@ -349,6 +349,50 @@ function getSelectedModel() {
return select ? select.value : 'code';
}
/**
* Build model select options HTML, showing only installed models
* @returns {string} HTML string for select options
*/
function buildModelSelectOptions() {
var installedModels = window.cliToolsStatus?.codexlens?.installedModels || [];
var allModels = window.cliToolsStatus?.codexlens?.allModels || [];
// Model display configuration
var modelConfig = {
'code': { label: t('index.modelCode') || 'Code (768d)', star: true },
'base': { label: t('index.modelBase') || 'Base (768d)', star: false },
'fast': { label: t('index.modelFast') || 'Fast (384d)', star: false },
'minilm': { label: t('index.modelMinilm') || 'MiniLM (384d)', star: false },
'multilingual': { label: t('index.modelMultilingual') || 'Multilingual (1024d)', warn: true },
'balanced': { label: t('index.modelBalanced') || 'Balanced (1024d)', warn: true }
};
// If no models installed, show placeholder
if (installedModels.length === 0) {
return '<option value="" disabled selected>' + (t('index.noModelsInstalled') || 'No models installed') + '</option>';
}
// Build options for installed models only
var options = '';
var firstInstalled = null;
// Preferred order: code, fast, minilm, base, multilingual, balanced
var preferredOrder = ['code', 'fast', 'minilm', 'base', 'multilingual', 'balanced'];
preferredOrder.forEach(function(profile) {
if (installedModels.includes(profile) && modelConfig[profile]) {
var config = modelConfig[profile];
var style = config.warn ? ' style="color: var(--muted-foreground)"' : '';
var suffix = config.star ? ' ⭐' : (config.warn ? ' ⚠️' : '');
var selected = !firstInstalled ? ' selected' : '';
if (!firstInstalled) firstInstalled = profile;
options += '<option value="' + profile + '"' + style + selected + '>' + config.label + suffix + '</option>';
}
});
return options;
}
// ========== Tools Section (Left Column) ==========
function renderToolsSection() {
var container = document.getElementById('tools-section');
@@ -404,10 +448,7 @@ function renderToolsSection() {
(codexLensStatus.ready
? '<span class="tool-status-text success"><i data-lucide="check-circle" class="w-3.5 h-3.5"></i> v' + (codexLensStatus.version || 'installed') + '</span>' +
'<select id="codexlensModelSelect" class="btn-sm bg-muted border border-border rounded text-xs" onclick="event.stopPropagation()" title="' + (t('index.selectModel') || 'Select embedding model') + '">' +
'<option value="code">' + (t('index.modelCode') || 'Code (768d)') + '</option>' +
'<option value="fast">' + (t('index.modelFast') || 'Fast (384d)') + '</option>' +
'<option value="multilingual">' + (t('index.modelMultilingual') || 'Multilingual (1024d)') + '</option>' +
'<option value="balanced">' + (t('index.modelBalanced') || 'Balanced (1024d)') + '</option>' +
buildModelSelectOptions() +
'</select>' +
'<button class="btn-sm btn-primary" onclick="event.stopPropagation(); initCodexLensIndex(\'full\', getSelectedModel())" title="' + (t('index.fullDesc') || 'FTS + Semantic search (recommended)') + '"><i data-lucide="layers" class="w-3 h-3"></i> ' + (t('index.fullIndex') || '全部索引') + '</button>' +
'<button class="btn-sm btn-outline" onclick="event.stopPropagation(); initCodexLensIndex(\'vector\', getSelectedModel())" title="' + (t('index.vectorDesc') || 'Semantic search with embeddings') + '"><i data-lucide="sparkles" class="w-3 h-3"></i> ' + (t('index.vectorIndex') || '向量索引') + '</button>' +

View File

@@ -126,10 +126,10 @@ function buildCodexLensConfigContent(config) {
'<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md border border-border bg-background hover:bg-muted/50 transition-colors" onclick="cleanCodexLensIndexes()">' +
'<i data-lucide="trash" class="w-3.5 h-3.5"></i> ' + t('codexlens.cleanAllIndexes') +
'</button>' +
'<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md border border-destructive/30 bg-destructive/5 text-destructive hover:bg-destructive/10 transition-colors" onclick="uninstallCodexLens()">' +
'<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md border border-destructive/30 bg-destructive/5 text-destructive hover:bg-destructive/10 transition-colors" onclick="uninstallCodexLensFromManager()">' +
'<i data-lucide="trash-2" class="w-3.5 h-3.5"></i> ' + t('cli.uninstall') +
'</button>'
: '<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md bg-primary text-primary-foreground hover:bg-primary/90 transition-colors" onclick="installCodexLens()">' +
: '<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md bg-primary text-primary-foreground hover:bg-primary/90 transition-colors" onclick="installCodexLensFromManager()">' +
'<i data-lucide="download" class="w-3.5 h-3.5"></i> ' + t('codexlens.installCodexLens') +
'</button>') +
'</div>' +
@@ -335,6 +335,26 @@ function initCodexLensConfigEvents(currentConfig) {
// SEMANTIC DEPENDENCIES MANAGEMENT
// ============================================================
// Store detected GPU info
var detectedGpuInfo = null;
/**
* Detect GPU support
*/
async function detectGpuSupport() {
try {
var response = await fetch('/api/codexlens/gpu/detect');
var result = await response.json();
if (result.success) {
detectedGpuInfo = result;
return result;
}
} catch (err) {
console.error('GPU detection failed:', err);
}
return { mode: 'cpu', available: ['cpu'], info: 'CPU only' };
}
/**
* Load semantic dependencies status
*/
@@ -343,24 +363,58 @@ async function loadSemanticDepsStatus() {
if (!container) return;
try {
// Detect GPU support in parallel
var gpuPromise = detectGpuSupport();
var response = await fetch('/api/codexlens/semantic/status');
var result = await response.json();
var gpuInfo = await gpuPromise;
if (result.available) {
container.innerHTML =
'<div class="flex items-center gap-2 text-sm">' +
'<i data-lucide="check-circle" class="w-4 h-4 text-success"></i>' +
'<span>' + t('codexlens.semanticInstalled') + '</span>' +
'<span class="text-muted-foreground">(' + (result.backend || 'fastembed') + ')</span>' +
'</div>';
} else {
// Build accelerator badge
var accelerator = result.accelerator || 'CPU';
var acceleratorIcon = 'cpu';
var acceleratorClass = 'bg-muted text-muted-foreground';
if (accelerator === 'CUDA') {
acceleratorIcon = 'zap';
acceleratorClass = 'bg-green-500/20 text-green-600';
} else if (accelerator === 'DirectML') {
acceleratorIcon = 'gpu-card';
acceleratorClass = 'bg-blue-500/20 text-blue-600';
} else if (accelerator === 'ROCm') {
acceleratorIcon = 'flame';
acceleratorClass = 'bg-red-500/20 text-red-600';
}
container.innerHTML =
'<div class="space-y-2">' +
'<div class="flex items-center gap-2 text-sm">' +
'<i data-lucide="check-circle" class="w-4 h-4 text-success"></i>' +
'<span>' + t('codexlens.semanticInstalled') + '</span>' +
'<span class="text-muted-foreground">(' + (result.backend || 'fastembed') + ')</span>' +
'</div>' +
'<div class="flex items-center gap-2">' +
'<span class="inline-flex items-center gap-1 px-2 py-0.5 rounded text-xs font-medium ' + acceleratorClass + '">' +
'<i data-lucide="' + acceleratorIcon + '" class="w-3 h-3"></i>' +
accelerator +
'</span>' +
(result.providers && result.providers.length > 0
? '<span class="text-xs text-muted-foreground">' + result.providers.join(', ') + '</span>'
: '') +
'</div>' +
'</div>';
} else {
// Build GPU mode options
var gpuOptions = buildGpuModeSelector(gpuInfo);
container.innerHTML =
'<div class="space-y-3">' +
'<div class="flex items-center gap-2 text-sm text-muted-foreground">' +
'<i data-lucide="alert-circle" class="w-4 h-4"></i>' +
'<span>' + t('codexlens.semanticNotInstalled') + '</span>' +
'</div>' +
'<button class="btn-sm btn-outline" onclick="installSemanticDeps()">' +
gpuOptions +
'<button class="btn-sm btn-primary w-full" onclick="installSemanticDepsWithGpu()">' +
'<i data-lucide="download" class="w-3 h-3"></i> ' + t('codexlens.installDeps') +
'</button>' +
'</div>';
@@ -373,21 +427,120 @@ async function loadSemanticDepsStatus() {
}
/**
* Install semantic dependencies
* Build GPU mode selector HTML
*/
async function installSemanticDeps() {
function buildGpuModeSelector(gpuInfo) {
var modes = [
{
id: 'cpu',
label: 'CPU',
desc: t('codexlens.cpuModeDesc') || 'Standard CPU processing',
icon: 'cpu',
available: true
},
{
id: 'directml',
label: 'DirectML',
desc: t('codexlens.directmlModeDesc') || 'Windows GPU (NVIDIA/AMD/Intel)',
icon: 'gpu-card',
available: gpuInfo.available.includes('directml'),
recommended: gpuInfo.mode === 'directml'
},
{
id: 'cuda',
label: 'CUDA',
desc: t('codexlens.cudaModeDesc') || 'NVIDIA GPU (requires CUDA Toolkit)',
icon: 'zap',
available: gpuInfo.available.includes('cuda'),
recommended: gpuInfo.mode === 'cuda'
}
];
var html =
'<div class="space-y-2">' +
'<div class="text-xs font-medium text-muted-foreground flex items-center gap-1">' +
'<i data-lucide="settings" class="w-3 h-3"></i>' +
(t('codexlens.selectGpuMode') || 'Select acceleration mode') +
'</div>' +
'<div class="text-xs text-muted-foreground bg-muted/50 rounded px-2 py-1">' +
'<i data-lucide="info" class="w-3 h-3 inline"></i> ' + gpuInfo.info +
'</div>' +
'<div class="space-y-1">';
modes.forEach(function(mode) {
var isDisabled = !mode.available;
var isRecommended = mode.recommended;
var isDefault = mode.id === gpuInfo.mode;
html +=
'<label class="flex items-center gap-3 p-2 rounded border cursor-pointer hover:bg-muted/50 transition-colors ' +
(isDisabled ? 'opacity-50 cursor-not-allowed' : '') + '">' +
'<input type="radio" name="gpuMode" value="' + mode.id + '" ' +
(isDefault ? 'checked' : '') +
(isDisabled ? ' disabled' : '') +
' class="accent-primary">' +
'<div class="flex-1">' +
'<div class="flex items-center gap-2">' +
'<i data-lucide="' + mode.icon + '" class="w-4 h-4"></i>' +
'<span class="font-medium text-sm">' + mode.label + '</span>' +
(isRecommended ? '<span class="text-xs bg-primary/20 text-primary px-1.5 py-0.5 rounded">' + (t('common.recommended') || 'Recommended') + '</span>' : '') +
(isDisabled ? '<span class="text-xs text-muted-foreground">(' + (t('common.unavailable') || 'Unavailable') + ')</span>' : '') +
'</div>' +
'<div class="text-xs text-muted-foreground">' + mode.desc + '</div>' +
'</div>' +
'</label>';
});
html +=
'</div>' +
'</div>';
return html;
}
/**
* Get selected GPU mode
*/
function getSelectedGpuMode() {
var selected = document.querySelector('input[name="gpuMode"]:checked');
return selected ? selected.value : 'cpu';
}
/**
* Install semantic dependencies with GPU mode
*/
async function installSemanticDepsWithGpu() {
var container = document.getElementById('semanticDepsStatus');
if (!container) return;
var gpuMode = getSelectedGpuMode();
var modeLabels = {
cpu: 'CPU',
cuda: 'NVIDIA CUDA',
directml: 'DirectML'
};
container.innerHTML =
'<div class="text-sm text-muted-foreground animate-pulse">' + t('codexlens.installingDeps') + '</div>';
'<div class="space-y-2">' +
'<div class="flex items-center gap-2 text-sm text-muted-foreground">' +
'<div class="animate-spin w-4 h-4 border-2 border-primary border-t-transparent rounded-full"></div>' +
'<span>' + t('codexlens.installingDeps') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground">' +
(t('codexlens.installingMode') || 'Installing with') + ': ' + modeLabels[gpuMode] +
'</div>' +
'</div>';
try {
var response = await fetch('/api/codexlens/semantic/install', { method: 'POST' });
var response = await fetch('/api/codexlens/semantic/install', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ gpuMode: gpuMode })
});
var result = await response.json();
if (result.success) {
showRefreshToast(t('codexlens.depsInstalled'), 'success');
showRefreshToast(t('codexlens.depsInstalled') + ' (' + modeLabels[gpuMode] + ')', 'success');
await loadSemanticDepsStatus();
await loadModelList();
} else {
@@ -400,10 +553,164 @@ async function installSemanticDeps() {
}
}
/**
* Install semantic dependencies (legacy, defaults to CPU)
*/
async function installSemanticDeps() {
await installSemanticDepsWithGpu();
}
// ============================================================
// MODEL MANAGEMENT
// ============================================================
/**
* Build manual download guide HTML
*/
function buildManualDownloadGuide() {
var modelData = [
{ profile: 'code', name: 'jinaai/jina-embeddings-v2-base-code', size: '~150 MB' },
{ profile: 'fast', name: 'BAAI/bge-small-en-v1.5', size: '~80 MB' },
{ profile: 'balanced', name: 'mixedbread-ai/mxbai-embed-large-v1', size: '~600 MB' },
{ profile: 'multilingual', name: 'intfloat/multilingual-e5-large', size: '~1 GB' }
];
var html =
'<div class="mt-4 border-t pt-4">' +
'<button class="flex items-center gap-2 text-sm text-muted-foreground hover:text-foreground w-full" onclick="toggleManualDownloadGuide()" id="manualDownloadToggle">' +
'<i data-lucide="chevron-right" class="w-4 h-4 transition-transform" id="manualDownloadChevron"></i>' +
'<i data-lucide="terminal" class="w-4 h-4"></i>' +
'<span>' + (t('codexlens.manualDownloadGuide') || 'Manual Download Guide') + '</span>' +
'</button>' +
'<div id="manualDownloadContent" class="hidden mt-3 space-y-3">' +
// Method 1: CLI
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<span class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-primary/20 text-primary text-xs">1</span>' +
'<span>' + (t('codexlens.cliMethod') || 'Command Line (Recommended)') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground mb-2">' +
(t('codexlens.cliMethodDesc') || 'Run in terminal with progress display:') +
'</div>' +
'<div class="space-y-1">';
modelData.forEach(function(m) {
html +=
'<div class="flex items-center justify-between bg-background rounded px-2 py-1.5">' +
'<code class="text-xs font-mono">codexlens model-download ' + m.profile + '</code>' +
'<button class="text-xs text-primary hover:underline" onclick="copyToClipboard(\'codexlens model-download ' + m.profile + '\')">' +
'<i data-lucide="copy" class="w-3 h-3"></i>' +
'</button>' +
'</div>';
});
html +=
'</div>' +
'</div>' +
// Method 2: Python
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<span class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-primary/20 text-primary text-xs">2</span>' +
'<span>' + (t('codexlens.pythonMethod') || 'Python Script') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground mb-2">' +
(t('codexlens.pythonMethodDesc') || 'Pre-download model using Python:') +
'</div>' +
'<div class="bg-background rounded p-2">' +
'<pre class="text-xs font-mono whitespace-pre-wrap">' +
'# Install fastembed first\n' +
'pip install fastembed\n\n' +
'# Download model (choose one)\n' +
'from fastembed import TextEmbedding\n\n' +
'# Code model (recommended for code search)\n' +
'model = TextEmbedding("jinaai/jina-embeddings-v2-base-code")\n\n' +
'# Fast model (lightweight)\n' +
'# model = TextEmbedding("BAAI/bge-small-en-v1.5")' +
'</pre>' +
'</div>' +
'</div>' +
// Method 3: Hugging Face Hub
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<span class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-primary/20 text-primary text-xs">3</span>' +
'<span>' + (t('codexlens.hfHubMethod') || 'Hugging Face Hub CLI') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground mb-2">' +
(t('codexlens.hfHubMethodDesc') || 'Download using huggingface-cli with resume support:') +
'</div>' +
'<div class="bg-background rounded p-2 space-y-2">' +
'<pre class="text-xs font-mono whitespace-pre-wrap">' +
'# Install huggingface_hub\n' +
'pip install huggingface_hub\n\n' +
'# Download model (supports resume on failure)\n' +
'huggingface-cli download jinaai/jina-embeddings-v2-base-code' +
'</pre>' +
'</div>' +
'</div>' +
// Model Links
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<i data-lucide="external-link" class="w-4 h-4"></i>' +
'<span>' + (t('codexlens.modelLinks') || 'Direct Model Links') + '</span>' +
'</div>' +
'<div class="grid grid-cols-2 gap-2">';
modelData.forEach(function(m) {
html +=
'<a href="https://huggingface.co/' + m.name + '" target="_blank" class="flex items-center justify-between bg-background rounded px-2 py-1.5 hover:bg-muted transition-colors">' +
'<span class="text-xs font-medium">' + m.profile + '</span>' +
'<span class="text-xs text-muted-foreground">' + m.size + '</span>' +
'</a>';
});
html +=
'</div>' +
'</div>' +
// Cache location info
'<div class="text-xs text-muted-foreground bg-muted/30 rounded p-2">' +
'<div class="flex items-start gap-1.5">' +
'<i data-lucide="info" class="w-3.5 h-3.5 mt-0.5 flex-shrink-0"></i>' +
'<div>' +
'<strong>' + (t('codexlens.cacheLocation') || 'Cache Location') + ':</strong><br>' +
'<code class="text-xs">Windows: %LOCALAPPDATA%\\Temp\\fastembed_cache</code><br>' +
'<code class="text-xs">Linux/Mac: ~/.cache/fastembed</code>' +
'</div>' +
'</div>' +
'</div>' +
'</div>' +
'</div>';
return html;
}
/**
* Toggle manual download guide visibility
*/
function toggleManualDownloadGuide() {
var content = document.getElementById('manualDownloadContent');
var chevron = document.getElementById('manualDownloadChevron');
if (content && chevron) {
content.classList.toggle('hidden');
chevron.style.transform = content.classList.contains('hidden') ? '' : 'rotate(90deg)';
}
}
/**
* Copy text to clipboard
*/
function copyToClipboard(text) {
navigator.clipboard.writeText(text).then(function() {
showRefreshToast(t('common.copied') || 'Copied to clipboard', 'success');
}).catch(function(err) {
console.error('Failed to copy:', err);
});
}
/**
* Load model list
*/
@@ -476,6 +783,10 @@ async function loadModelList() {
});
html += '</div>';
// Add manual download guide section
html += buildManualDownloadGuide();
container.innerHTML = html;
if (window.lucide) lucide.createIcons();
} catch (err) {
@@ -485,18 +796,94 @@ async function loadModelList() {
}
/**
* Download model
* Download model with progress simulation and manual download info
*/
async function downloadModel(profile) {
var modelCard = document.getElementById('model-' + profile);
if (!modelCard) return;
var originalHTML = modelCard.innerHTML;
// Get model info for size estimation
var modelSizes = {
'fast': { size: 80, time: '1-2' },
'code': { size: 150, time: '2-5' },
'multilingual': { size: 1000, time: '5-15' },
'balanced': { size: 600, time: '3-10' }
};
var modelInfo = modelSizes[profile] || { size: 100, time: '2-5' };
// Show detailed download UI with progress simulation
modelCard.innerHTML =
'<div class="flex items-center justify-center p-3">' +
'<span class="text-sm text-muted-foreground animate-pulse">' + t('codexlens.downloading') + '</span>' +
'<div class="p-3 space-y-3">' +
'<div class="flex items-center gap-2">' +
'<div class="animate-spin w-4 h-4 border-2 border-primary border-t-transparent rounded-full flex-shrink-0"></div>' +
'<span class="text-sm font-medium">' + (t('codexlens.downloadingModel') || 'Downloading') + ' ' + profile + '</span>' +
'</div>' +
'<div class="space-y-1">' +
'<div class="h-2 bg-muted rounded-full overflow-hidden">' +
'<div id="model-progress-' + profile + '" class="h-full bg-primary transition-all duration-1000 ease-out model-download-progress" style="width: 0%"></div>' +
'</div>' +
'<div class="flex justify-between text-xs text-muted-foreground">' +
'<span id="model-status-' + profile + '">' + (t('codexlens.connectingToHuggingFace') || 'Connecting to Hugging Face...') + '</span>' +
'<span>~' + modelInfo.size + ' MB</span>' +
'</div>' +
'</div>' +
'<div class="text-xs text-muted-foreground bg-muted/50 rounded p-2 space-y-1">' +
'<div class="flex items-start gap-1">' +
'<i data-lucide="info" class="w-3 h-3 mt-0.5 flex-shrink-0"></i>' +
'<span>' + (t('codexlens.downloadTimeEstimate') || 'Estimated time') + ': ' + modelInfo.time + ' ' + (t('common.minutes') || 'minutes') + '</span>' +
'</div>' +
'<div class="flex items-start gap-1">' +
'<i data-lucide="terminal" class="w-3 h-3 mt-0.5 flex-shrink-0"></i>' +
'<span>' + (t('codexlens.manualDownloadHint') || 'Manual download') + ': <code class="bg-background px-1 rounded">codexlens model-download ' + profile + '</code></span>' +
'</div>' +
'</div>' +
'<button class="text-xs text-muted-foreground hover:text-foreground underline" onclick="cancelModelDownload(\'' + profile + '\')">' +
(t('common.cancel') || 'Cancel') +
'</button>' +
'</div>';
if (window.lucide) lucide.createIcons();
// Start progress simulation
var progressBar = document.getElementById('model-progress-' + profile);
var statusText = document.getElementById('model-status-' + profile);
var simulatedProgress = 0;
var progressInterval = null;
var downloadAborted = false;
// Store abort controller for cancellation
window['modelDownloadAbort_' + profile] = function() {
downloadAborted = true;
if (progressInterval) clearInterval(progressInterval);
};
// Simulate progress based on model size
var progressStages = [
{ percent: 10, msg: t('codexlens.downloadingModelFiles') || 'Downloading model files...' },
{ percent: 30, msg: t('codexlens.downloadingWeights') || 'Downloading model weights...' },
{ percent: 60, msg: t('codexlens.downloadingTokenizer') || 'Downloading tokenizer...' },
{ percent: 80, msg: t('codexlens.verifyingModel') || 'Verifying model...' },
{ percent: 95, msg: t('codexlens.finalizingDownload') || 'Finalizing...' }
];
var stageIndex = 0;
var baseInterval = Math.max(2000, modelInfo.size * 30); // Slower for larger models
progressInterval = setInterval(function() {
if (downloadAborted) return;
if (stageIndex < progressStages.length) {
var stage = progressStages[stageIndex];
simulatedProgress = stage.percent;
if (progressBar) progressBar.style.width = simulatedProgress + '%';
if (statusText) statusText.textContent = stage.msg;
stageIndex++;
}
}, baseInterval);
try {
var response = await fetch('/api/codexlens/models/download', {
method: 'POST',
@@ -504,20 +891,99 @@ async function downloadModel(profile) {
body: JSON.stringify({ profile: profile })
});
// Clear simulation
if (progressInterval) clearInterval(progressInterval);
if (downloadAborted) {
modelCard.innerHTML = originalHTML;
if (window.lucide) lucide.createIcons();
return;
}
var result = await response.json();
if (result.success) {
// Show completion
if (progressBar) progressBar.style.width = '100%';
if (statusText) statusText.textContent = t('codexlens.downloadComplete') || 'Download complete!';
showRefreshToast(t('codexlens.modelDownloaded') + ': ' + profile, 'success');
await loadModelList();
// Refresh model list after short delay
setTimeout(function() {
loadModelList();
}, 500);
} else {
showRefreshToast(t('codexlens.modelDownloadFailed') + ': ' + result.error, 'error');
modelCard.innerHTML = originalHTML;
if (window.lucide) lucide.createIcons();
showModelDownloadError(modelCard, profile, result.error, originalHTML);
}
} catch (err) {
if (progressInterval) clearInterval(progressInterval);
showRefreshToast(t('common.error') + ': ' + err.message, 'error');
modelCard.innerHTML = originalHTML;
if (window.lucide) lucide.createIcons();
showModelDownloadError(modelCard, profile, err.message, originalHTML);
}
// Cleanup abort function
delete window['modelDownloadAbort_' + profile];
}
/**
* Show model download error with manual download instructions
*/
function showModelDownloadError(modelCard, profile, error, originalHTML) {
var modelNames = {
'fast': 'BAAI/bge-small-en-v1.5',
'code': 'jinaai/jina-embeddings-v2-base-code',
'multilingual': 'intfloat/multilingual-e5-large',
'balanced': 'mixedbread-ai/mxbai-embed-large-v1'
};
var modelName = modelNames[profile] || profile;
var hfUrl = 'https://huggingface.co/' + modelName;
modelCard.innerHTML =
'<div class="p-3 space-y-3">' +
'<div class="flex items-start gap-2 text-destructive">' +
'<i data-lucide="alert-circle" class="w-4 h-4 mt-0.5 flex-shrink-0"></i>' +
'<div class="text-sm">' +
'<div class="font-medium">' + (t('codexlens.downloadFailed') || 'Download failed') + '</div>' +
'<div class="text-xs text-muted-foreground mt-1">' + error + '</div>' +
'</div>' +
'</div>' +
'<div class="bg-muted/50 rounded p-2 space-y-2 text-xs">' +
'<div class="font-medium">' + (t('codexlens.manualDownloadOptions') || 'Manual download options') + ':</div>' +
'<div class="space-y-1.5">' +
'<div class="flex items-start gap-1">' +
'<span class="text-muted-foreground">1.</span>' +
'<span>' + (t('codexlens.cliDownload') || 'CLI') + ': <code class="bg-background px-1 rounded">codexlens model-download ' + profile + '</code></span>' +
'</div>' +
'<div class="flex items-start gap-1">' +
'<span class="text-muted-foreground">2.</span>' +
'<span>' + (t('codexlens.huggingfaceDownload') || 'Hugging Face') + ': <a href="' + hfUrl + '" target="_blank" class="text-primary hover:underline">' + modelName + '</a></span>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="flex gap-2">' +
'<button class="btn-sm btn-outline flex-1" onclick="loadModelList()">' +
'<i data-lucide="refresh-cw" class="w-3 h-3"></i> ' + (t('common.refresh') || 'Refresh') +
'</button>' +
'<button class="btn-sm btn-primary flex-1" onclick="downloadModel(\'' + profile + '\')">' +
'<i data-lucide="download" class="w-3 h-3"></i> ' + (t('common.retry') || 'Retry') +
'</button>' +
'</div>' +
'</div>';
if (window.lucide) lucide.createIcons();
}
/**
* Cancel model download
*/
function cancelModelDownload(profile) {
if (window['modelDownloadAbort_' + profile]) {
window['modelDownloadAbort_' + profile]();
showRefreshToast(t('codexlens.downloadCanceled') || 'Download canceled', 'info');
loadModelList();
}
}
@@ -876,16 +1342,315 @@ async function cancelCodexLensIndexing() {
/**
* Install CodexLens
* Note: Uses CodexLens-specific install wizard from cli-status.js
* which calls /api/codexlens/bootstrap (Python venv), not the generic
* CLI install that uses npm install -g (NPM packages)
*/
function installCodexLens() {
openCliInstallWizard('codexlens');
function installCodexLensFromManager() {
// Use the CodexLens-specific install wizard from cli-status.js
if (typeof openCodexLensInstallWizard === 'function') {
openCodexLensInstallWizard();
} else {
// Fallback: inline install wizard if cli-status.js not loaded
showCodexLensInstallDialog();
}
}
/**
* Fallback install dialog when cli-status.js is not loaded
*/
function showCodexLensInstallDialog() {
var modal = document.createElement('div');
modal.id = 'codexlensInstallModalFallback';
modal.className = 'fixed inset-0 bg-black/50 flex items-center justify-center z-50';
modal.innerHTML =
'<div class="bg-card rounded-lg shadow-xl w-full max-w-md mx-4 overflow-hidden">' +
'<div class="p-6">' +
'<div class="flex items-center gap-3 mb-4">' +
'<div class="w-10 h-10 rounded-full bg-primary/10 flex items-center justify-center">' +
'<i data-lucide="database" class="w-5 h-5 text-primary"></i>' +
'</div>' +
'<div>' +
'<h3 class="text-lg font-semibold">' + t('codexlens.installCodexLens') + '</h3>' +
'<p class="text-sm text-muted-foreground">' + t('codexlens.installDesc') + '</p>' +
'</div>' +
'</div>' +
'<div class="space-y-4">' +
'<div class="bg-muted/50 rounded-lg p-4">' +
'<h4 class="font-medium mb-2">' + t('codexlens.whatWillBeInstalled') + '</h4>' +
'<ul class="text-sm space-y-2 text-muted-foreground">' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="check" class="w-4 h-4 text-success mt-0.5"></i>' +
'<span><strong>' + t('codexlens.pythonVenv') + '</strong> - ' + t('codexlens.pythonVenvDesc') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="check" class="w-4 h-4 text-success mt-0.5"></i>' +
'<span><strong>' + t('codexlens.codexlensPackage') + '</strong> - ' + t('codexlens.codexlensPackageDesc') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="check" class="w-4 h-4 text-success mt-0.5"></i>' +
'<span><strong>SQLite FTS5</strong> - ' + t('codexlens.sqliteFtsDesc') + '</span>' +
'</li>' +
'</ul>' +
'</div>' +
'<div class="bg-primary/5 border border-primary/20 rounded-lg p-3">' +
'<div class="flex items-start gap-2">' +
'<i data-lucide="info" class="w-4 h-4 text-primary mt-0.5"></i>' +
'<div class="text-sm text-muted-foreground">' +
'<p class="font-medium text-foreground">' + t('codexlens.installLocation') + '</p>' +
'<p class="mt-1"><code class="bg-muted px-1 rounded">~/.codexlens/venv</code></p>' +
'<p class="mt-1">' + t('codexlens.installTime') + '</p>' +
'</div>' +
'</div>' +
'</div>' +
'<div id="codexlensInstallProgressFallback" class="hidden">' +
'<div class="flex items-center gap-3">' +
'<div class="animate-spin w-5 h-5 border-2 border-primary border-t-transparent rounded-full"></div>' +
'<span class="text-sm" id="codexlensInstallStatusFallback">' + t('codexlens.startingInstall') + '</span>' +
'</div>' +
'<div class="mt-2 h-2 bg-muted rounded-full overflow-hidden">' +
'<div id="codexlensInstallProgressBarFallback" class="h-full bg-primary transition-all duration-300" style="width: 0%"></div>' +
'</div>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="border-t border-border p-4 flex justify-end gap-3 bg-muted/30">' +
'<button class="btn-outline px-4 py-2" onclick="closeCodexLensInstallDialogFallback()">' + t('common.cancel') + '</button>' +
'<button id="codexlensInstallBtnFallback" class="btn-primary px-4 py-2" onclick="startCodexLensInstallFallback()">' +
'<i data-lucide="download" class="w-4 h-4 mr-2"></i>' +
t('codexlens.installNow') +
'</button>' +
'</div>' +
'</div>';
document.body.appendChild(modal);
if (window.lucide) lucide.createIcons();
}
function closeCodexLensInstallDialogFallback() {
var modal = document.getElementById('codexlensInstallModalFallback');
if (modal) modal.remove();
}
async function startCodexLensInstallFallback() {
var progressDiv = document.getElementById('codexlensInstallProgressFallback');
var installBtn = document.getElementById('codexlensInstallBtnFallback');
var statusText = document.getElementById('codexlensInstallStatusFallback');
var progressBar = document.getElementById('codexlensInstallProgressBarFallback');
progressDiv.classList.remove('hidden');
installBtn.disabled = true;
installBtn.innerHTML = '<span class="animate-pulse">' + t('codexlens.installing') + '</span>';
var stages = [
{ progress: 10, text: t('codexlens.creatingVenv') },
{ progress: 30, text: t('codexlens.installingPip') },
{ progress: 50, text: t('codexlens.installingPackage') },
{ progress: 70, text: t('codexlens.settingUpDeps') },
{ progress: 90, text: t('codexlens.finalizing') }
];
var currentStage = 0;
var progressInterval = setInterval(function() {
if (currentStage < stages.length) {
statusText.textContent = stages[currentStage].text;
progressBar.style.width = stages[currentStage].progress + '%';
currentStage++;
}
}, 1500);
try {
var response = await fetch('/api/codexlens/bootstrap', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({})
});
clearInterval(progressInterval);
var result = await response.json();
if (result.success) {
progressBar.style.width = '100%';
statusText.textContent = t('codexlens.installComplete');
setTimeout(function() {
closeCodexLensInstallDialogFallback();
showRefreshToast(t('codexlens.installSuccess'), 'success');
// Refresh the page to update status
if (typeof loadCodexLensStatus === 'function') {
loadCodexLensStatus().then(function() {
if (typeof renderCodexLensManager === 'function') renderCodexLensManager();
});
} else {
location.reload();
}
}, 1000);
} else {
statusText.textContent = t('common.error') + ': ' + result.error;
progressBar.classList.add('bg-destructive');
installBtn.disabled = false;
installBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
} catch (err) {
clearInterval(progressInterval);
statusText.textContent = t('common.error') + ': ' + err.message;
progressBar.classList.add('bg-destructive');
installBtn.disabled = false;
installBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
}
/**
* Uninstall CodexLens
* Note: Uses CodexLens-specific uninstall wizard from cli-status.js
* which calls /api/codexlens/uninstall (Python venv), not the generic
* CLI uninstall that uses /api/cli/uninstall (NPM packages)
*/
function uninstallCodexLens() {
openCliUninstallWizard('codexlens');
function uninstallCodexLensFromManager() {
// Use the CodexLens-specific uninstall wizard from cli-status.js
if (typeof openCodexLensUninstallWizard === 'function') {
openCodexLensUninstallWizard();
} else {
// Fallback: inline uninstall wizard if cli-status.js not loaded
showCodexLensUninstallDialog();
}
}
/**
* Fallback uninstall dialog when cli-status.js is not loaded
*/
function showCodexLensUninstallDialog() {
var modal = document.createElement('div');
modal.id = 'codexlensUninstallModalFallback';
modal.className = 'fixed inset-0 bg-black/50 flex items-center justify-center z-50';
modal.innerHTML =
'<div class="bg-card rounded-lg shadow-xl w-full max-w-md mx-4 overflow-hidden">' +
'<div class="p-6">' +
'<div class="flex items-center gap-3 mb-4">' +
'<div class="w-10 h-10 rounded-full bg-destructive/10 flex items-center justify-center">' +
'<i data-lucide="trash-2" class="w-5 h-5 text-destructive"></i>' +
'</div>' +
'<div>' +
'<h3 class="text-lg font-semibold">' + t('codexlens.uninstall') + '</h3>' +
'<p class="text-sm text-muted-foreground">' + t('codexlens.uninstallDesc') + '</p>' +
'</div>' +
'</div>' +
'<div class="space-y-4">' +
'<div class="bg-destructive/5 border border-destructive/20 rounded-lg p-4">' +
'<h4 class="font-medium text-destructive mb-2">' + t('codexlens.whatWillBeRemoved') + '</h4>' +
'<ul class="text-sm space-y-2 text-muted-foreground">' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="x" class="w-4 h-4 text-destructive mt-0.5"></i>' +
'<span>' + t('codexlens.removeVenv') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="x" class="w-4 h-4 text-destructive mt-0.5"></i>' +
'<span>' + t('codexlens.removeData') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="x" class="w-4 h-4 text-destructive mt-0.5"></i>' +
'<span>' + t('codexlens.removeConfig') + '</span>' +
'</li>' +
'</ul>' +
'</div>' +
'<div id="codexlensUninstallProgressFallback" class="hidden">' +
'<div class="flex items-center gap-3">' +
'<div class="animate-spin w-5 h-5 border-2 border-destructive border-t-transparent rounded-full"></div>' +
'<span class="text-sm" id="codexlensUninstallStatusFallback">' + t('codexlens.removing') + '</span>' +
'</div>' +
'<div class="mt-2 h-2 bg-muted rounded-full overflow-hidden">' +
'<div id="codexlensUninstallProgressBarFallback" class="h-full bg-destructive transition-all duration-300" style="width: 0%"></div>' +
'</div>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="border-t border-border p-4 flex justify-end gap-3 bg-muted/30">' +
'<button class="btn-outline px-4 py-2" onclick="closeCodexLensUninstallDialogFallback()">' + t('common.cancel') + '</button>' +
'<button id="codexlensUninstallBtnFallback" class="btn-destructive px-4 py-2" onclick="startCodexLensUninstallFallback()">' +
'<i data-lucide="trash-2" class="w-4 h-4 mr-2"></i>' +
t('codexlens.uninstall') +
'</button>' +
'</div>' +
'</div>';
document.body.appendChild(modal);
if (window.lucide) lucide.createIcons();
}
function closeCodexLensUninstallDialogFallback() {
var modal = document.getElementById('codexlensUninstallModalFallback');
if (modal) modal.remove();
}
async function startCodexLensUninstallFallback() {
var progressDiv = document.getElementById('codexlensUninstallProgressFallback');
var uninstallBtn = document.getElementById('codexlensUninstallBtnFallback');
var statusText = document.getElementById('codexlensUninstallStatusFallback');
var progressBar = document.getElementById('codexlensUninstallProgressBarFallback');
progressDiv.classList.remove('hidden');
uninstallBtn.disabled = true;
uninstallBtn.innerHTML = '<span class="animate-pulse">' + t('codexlens.uninstalling') + '</span>';
var stages = [
{ progress: 25, text: t('codexlens.removingVenv') },
{ progress: 50, text: t('codexlens.removingData') },
{ progress: 75, text: t('codexlens.removingConfig') },
{ progress: 90, text: t('codexlens.finalizing') }
];
var currentStage = 0;
var progressInterval = setInterval(function() {
if (currentStage < stages.length) {
statusText.textContent = stages[currentStage].text;
progressBar.style.width = stages[currentStage].progress + '%';
currentStage++;
}
}, 500);
try {
var response = await fetch('/api/codexlens/uninstall', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({})
});
clearInterval(progressInterval);
var result = await response.json();
if (result.success) {
progressBar.style.width = '100%';
statusText.textContent = t('codexlens.uninstallComplete');
setTimeout(function() {
closeCodexLensUninstallDialogFallback();
showRefreshToast(t('codexlens.uninstallSuccess'), 'success');
// Refresh the page to update status
if (typeof loadCodexLensStatus === 'function') {
loadCodexLensStatus().then(function() {
if (typeof renderCodexLensManager === 'function') renderCodexLensManager();
});
} else {
location.reload();
}
}, 1000);
} else {
statusText.textContent = t('common.error') + ': ' + result.error;
progressBar.classList.add('bg-destructive');
uninstallBtn.disabled = false;
uninstallBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
} catch (err) {
clearInterval(progressInterval);
statusText.textContent = t('common.error') + ': ' + err.message;
progressBar.classList.add('bg-destructive');
uninstallBtn.disabled = false;
uninstallBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
}
/**

View File

@@ -449,8 +449,23 @@ function isHookTemplateInstalled(templateId) {
const template = HOOK_TEMPLATES[templateId];
if (!template) return false;
// Build expected command string
const templateCmd = template.command + (template.args ? ' ' + template.args.join(' ') : '');
// Define unique patterns for each template type (more specific than just command)
const uniquePatterns = {
'session-context': 'hook session-context',
'codexlens-update': 'codexlens update',
'ccw-notify': 'api/hook',
'log-tool': 'tool-usage.log',
'lint-check': 'eslint',
'git-add': 'git add',
'memory-file-read': 'memory track --type file --action read',
'memory-file-write': 'memory track --type file --action write',
'memory-prompt-track': 'memory track --type topic',
'skill-context-auto': 'skill-context-auto'
};
// Use unique pattern if defined, otherwise fall back to command + args
const searchPattern = uniquePatterns[templateId] ||
(template.command + (template.args ? ' ' + template.args.join(' ') : ''));
// Check project hooks
const projectHooks = hookConfig.project?.hooks?.[template.event];
@@ -459,7 +474,7 @@ function isHookTemplateInstalled(templateId) {
if (hookList.some(h => {
// Check both old format (h.command) and new format (h.hooks[0].command)
const cmd = h.hooks?.[0]?.command || h.command || '';
return cmd.includes(template.command);
return cmd.includes(searchPattern);
})) return true;
}
@@ -469,7 +484,7 @@ function isHookTemplateInstalled(templateId) {
const hookList = Array.isArray(globalHooks) ? globalHooks : [globalHooks];
if (hookList.some(h => {
const cmd = h.hooks?.[0]?.command || h.command || '';
return cmd.includes(template.command);
return cmd.includes(searchPattern);
})) return true;
}
@@ -512,7 +527,7 @@ async function uninstallHookTemplate(templateId) {
// Define unique patterns for each template type
const uniquePatterns = {
'session-context': 'api/hook/session-context',
'session-context': 'hook session-context',
'codexlens-update': 'codexlens update',
'ccw-notify': 'api/hook',
'log-tool': 'tool-usage.log',

View File

@@ -42,17 +42,41 @@ function getCcwEnabledToolsCodex() {
// Get current CCW_PROJECT_ROOT from config
function getCcwProjectRoot() {
// Try project config first, then global config
const currentPath = projectPath;
const projectData = mcpAllProjects[currentPath] || {};
const ccwConfig = projectData.mcpServers?.['ccw-tools'];
return ccwConfig?.env?.CCW_PROJECT_ROOT || '';
const projectCcwConfig = projectData.mcpServers?.['ccw-tools'];
if (projectCcwConfig?.env?.CCW_PROJECT_ROOT) {
return projectCcwConfig.env.CCW_PROJECT_ROOT;
}
// Fallback to global config
const globalCcwConfig = mcpUserServers?.['ccw-tools'];
return globalCcwConfig?.env?.CCW_PROJECT_ROOT || '';
}
// Get current CCW_ALLOWED_DIRS from config
function getCcwAllowedDirs() {
// Try project config first, then global config
const currentPath = projectPath;
const projectData = mcpAllProjects[currentPath] || {};
const ccwConfig = projectData.mcpServers?.['ccw-tools'];
const projectCcwConfig = projectData.mcpServers?.['ccw-tools'];
if (projectCcwConfig?.env?.CCW_ALLOWED_DIRS) {
return projectCcwConfig.env.CCW_ALLOWED_DIRS;
}
// Fallback to global config
const globalCcwConfig = mcpUserServers?.['ccw-tools'];
return globalCcwConfig?.env?.CCW_ALLOWED_DIRS || '';
}
// Get current CCW_PROJECT_ROOT from Codex config
function getCcwProjectRootCodex() {
const ccwConfig = codexMcpServers?.['ccw-tools'];
return ccwConfig?.env?.CCW_PROJECT_ROOT || '';
}
// Get current CCW_ALLOWED_DIRS from Codex config
function getCcwAllowedDirsCodex() {
const ccwConfig = codexMcpServers?.['ccw-tools'];
return ccwConfig?.env?.CCW_ALLOWED_DIRS || '';
}
@@ -260,7 +284,7 @@ async function renderMcpManager() {
<input type="text"
class="ccw-project-root-input flex-1 px-2 py-1 text-xs bg-background border border-border rounded focus:outline-none focus:ring-1 focus:ring-primary"
placeholder="${projectPath || t('mcp.useCurrentDir')}"
value="${getCcwProjectRoot()}">
value="${getCcwProjectRootCodex()}">
<button class="p-1 text-muted-foreground hover:text-foreground"
onclick="setCcwProjectRootToCurrent()"
title="${t('mcp.useCurrentProject')}">
@@ -272,7 +296,7 @@ async function renderMcpManager() {
<input type="text"
class="ccw-allowed-dirs-input flex-1 px-2 py-1 text-xs bg-background border border-border rounded focus:outline-none focus:ring-1 focus:ring-primary"
placeholder="${t('mcp.allowedDirsPlaceholder')}"
value="${getCcwAllowedDirs()}">
value="${getCcwAllowedDirsCodex()}">
</div>
</div>
</div>

View File

@@ -638,9 +638,26 @@ function addRulePath() {
function removeRulePath(index) {
ruleCreateState.paths.splice(index, 1);
// Re-render paths list
closeRuleCreateModal();
openRuleCreateModal();
// Re-render paths list without closing modal
const pathsList = document.getElementById('rulePathsList');
if (pathsList) {
pathsList.innerHTML = ruleCreateState.paths.map((path, idx) => `
<div class="flex gap-2">
<input type="text" class="rule-path-input flex-1 px-3 py-2 bg-background border border-border rounded-lg text-sm focus:outline-none focus:ring-2 focus:ring-primary"
placeholder="src/**/*.ts"
value="${path}"
data-index="${idx}">
${idx > 0 ? `
<button class="px-3 py-2 text-destructive hover:bg-destructive/10 rounded-lg transition-colors"
onclick="removeRulePath(${idx})">
<i data-lucide="x" class="w-4 h-4"></i>
</button>
` : ''}
</div>
`).join('');
if (typeof lucide !== 'undefined') lucide.createIcons();
}
}
function switchRuleCreateMode(mode) {
@@ -674,9 +691,21 @@ function switchRuleCreateMode(mode) {
if (contentSection) contentSection.style.display = 'block';
}
// Re-render modal to update button states
closeRuleCreateModal();
openRuleCreateModal();
// Update mode button styles without re-rendering
const modeButtons = document.querySelectorAll('#ruleCreateModal .mode-btn');
modeButtons.forEach(btn => {
const btnText = btn.querySelector('.font-medium')?.textContent || '';
const isInput = btnText.includes(t('rules.manualInput'));
const isCliGenerate = btnText.includes(t('rules.cliGenerate'));
if ((isInput && mode === 'input') || (isCliGenerate && mode === 'cli-generate')) {
btn.classList.remove('border-border', 'hover:border-primary/50');
btn.classList.add('border-primary', 'bg-primary/10');
} else {
btn.classList.remove('border-primary', 'bg-primary/10');
btn.classList.add('border-border', 'hover:border-primary/50');
}
});
}
function switchRuleGenerationType(type) {

View File

@@ -153,10 +153,11 @@ function renderSkillCard(skill, location) {
const locationIcon = location === 'project' ? 'folder' : 'user';
const locationClass = location === 'project' ? 'text-primary' : 'text-indigo';
const locationBg = location === 'project' ? 'bg-primary/10' : 'bg-indigo/10';
const folderName = skill.folderName || skill.name;
return `
<div class="skill-card bg-card border border-border rounded-lg p-4 hover:shadow-md transition-all cursor-pointer"
onclick="showSkillDetail('${escapeHtml(skill.name)}', '${location}')">
onclick="showSkillDetail('${escapeHtml(folderName)}', '${location}')">
<div class="flex items-start justify-between mb-3">
<div class="flex items-center gap-3">
<div class="w-10 h-10 ${locationBg} rounded-lg flex items-center justify-center">
@@ -198,6 +199,7 @@ function renderSkillCard(skill, location) {
function renderSkillDetailPanel(skill) {
const hasAllowedTools = skill.allowedTools && skill.allowedTools.length > 0;
const hasSupportingFiles = skill.supportingFiles && skill.supportingFiles.length > 0;
const folderName = skill.folderName || skill.name;
return `
<div class="skill-detail-panel fixed top-0 right-0 w-1/2 max-w-xl h-full bg-card border-l border-border shadow-lg z-50 flex flex-col">
@@ -243,20 +245,54 @@ function renderSkillDetailPanel(skill) {
</div>
` : ''}
<!-- Supporting Files -->
${hasSupportingFiles ? `
<div>
<h4 class="text-sm font-semibold text-foreground mb-2">${t('skills.supportingFiles')}</h4>
<div class="space-y-2">
${skill.supportingFiles.map(file => `
<div class="flex items-center gap-2 p-2 bg-muted/50 rounded-lg">
<i data-lucide="file-text" class="w-4 h-4 text-muted-foreground"></i>
<span class="text-sm font-mono text-foreground">${escapeHtml(file)}</span>
</div>
`).join('')}
<!-- Skill Files (SKILL.md + Supporting Files) -->
<div>
<h4 class="text-sm font-semibold text-foreground mb-2">${t('skills.files') || 'Files'}</h4>
<div class="space-y-2">
<!-- SKILL.md (main file) -->
<div class="flex items-center justify-between p-2 bg-primary/5 border border-primary/20 rounded-lg cursor-pointer hover:bg-primary/10 transition-colors"
onclick="viewSkillFile('${escapeHtml(folderName)}', 'SKILL.md', '${skill.location}')">
<div class="flex items-center gap-2">
<i data-lucide="file-text" class="w-4 h-4 text-primary"></i>
<span class="text-sm font-mono text-foreground font-medium">SKILL.md</span>
</div>
<div class="flex items-center gap-1">
<button class="p-1 text-primary hover:bg-primary/20 rounded transition-colors"
onclick="event.stopPropagation(); editSkillFile('${escapeHtml(folderName)}', 'SKILL.md', '${skill.location}')"
title="${t('common.edit')}">
<i data-lucide="edit-2" class="w-3.5 h-3.5"></i>
</button>
</div>
</div>
${hasSupportingFiles ? skill.supportingFiles.map(file => {
const isDir = file.endsWith('/');
const dirName = isDir ? file.slice(0, -1) : file;
return `
<!-- Supporting file: ${escapeHtml(file)} -->
<div class="skill-file-item" data-path="${escapeHtml(dirName)}">
<div class="flex items-center justify-between p-2 bg-muted/50 rounded-lg cursor-pointer hover:bg-muted transition-colors"
onclick="${isDir ? `toggleSkillFolder('${escapeHtml(folderName)}', '${escapeHtml(dirName)}', '${skill.location}', this)` : `viewSkillFile('${escapeHtml(folderName)}', '${escapeHtml(file)}', '${skill.location}')`}">
<div class="flex items-center gap-2">
<i data-lucide="${isDir ? 'folder' : 'file-text'}" class="w-4 h-4 text-muted-foreground ${isDir ? 'folder-icon' : ''}"></i>
<span class="text-sm font-mono text-foreground">${escapeHtml(isDir ? dirName : file)}</span>
${isDir ? '<i data-lucide="chevron-right" class="w-3 h-3 text-muted-foreground folder-chevron transition-transform"></i>' : ''}
</div>
${!isDir ? `
<div class="flex items-center gap-1">
<button class="p-1 text-muted-foreground hover:text-foreground hover:bg-muted rounded transition-colors"
onclick="event.stopPropagation(); editSkillFile('${escapeHtml(folderName)}', '${escapeHtml(file)}', '${skill.location}')"
title="${t('common.edit')}">
<i data-lucide="edit-2" class="w-3.5 h-3.5"></i>
</button>
</div>
` : ''}
</div>
<div class="folder-contents hidden ml-4 mt-1 space-y-1"></div>
</div>
`;
}).join('') : ''}
</div>
` : ''}
</div>
<!-- Path -->
<div>
@@ -269,12 +305,12 @@ function renderSkillDetailPanel(skill) {
<!-- Actions -->
<div class="px-5 py-4 border-t border-border flex justify-between">
<button class="px-4 py-2 text-sm text-destructive hover:bg-destructive/10 rounded-lg transition-colors flex items-center gap-2"
onclick="deleteSkill('${escapeHtml(skill.name)}', '${skill.location}')">
onclick="deleteSkill('${escapeHtml(folderName)}', '${skill.location}')">
<i data-lucide="trash-2" class="w-4 h-4"></i>
${t('common.delete')}
</button>
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
onclick="editSkill('${escapeHtml(skill.name)}', '${skill.location}')">
onclick="editSkill('${escapeHtml(folderName)}', '${skill.location}')">
<i data-lucide="edit" class="w-4 h-4"></i>
${t('common.edit')}
</button>
@@ -525,7 +561,7 @@ function openSkillCreateModal() {
</div>
<!-- Footer -->
<div class="flex items-center justify-end gap-3 px-6 py-4 border-t border-border">
<div id="skillModalFooter" class="flex items-center justify-end gap-3 px-6 py-4 border-t border-border">
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
onclick="closeSkillCreateModal()">
${t('common.cancel')}
@@ -588,16 +624,76 @@ function selectSkillLocation(location) {
function switchSkillCreateMode(mode) {
skillCreateState.mode = mode;
// Re-render modal
closeSkillCreateModal();
openSkillCreateModal();
// Toggle visibility of mode sections
const importSection = document.getElementById('skillImportMode');
const cliGenerateSection = document.getElementById('skillCliGenerateMode');
const footerContainer = document.getElementById('skillModalFooter');
if (importSection) importSection.style.display = mode === 'import' ? 'block' : 'none';
if (cliGenerateSection) cliGenerateSection.style.display = mode === 'cli-generate' ? 'block' : 'none';
// Update mode button styles
const modeButtons = document.querySelectorAll('#skillCreateModal .mode-btn');
modeButtons.forEach(btn => {
const btnText = btn.querySelector('.font-medium')?.textContent || '';
const isImport = btnText.includes(t('skills.importFolder'));
const isCliGenerate = btnText.includes(t('skills.cliGenerate'));
if ((isImport && mode === 'import') || (isCliGenerate && mode === 'cli-generate')) {
btn.classList.remove('border-border', 'hover:border-primary/50');
btn.classList.add('border-primary', 'bg-primary/10');
} else {
btn.classList.remove('border-primary', 'bg-primary/10');
btn.classList.add('border-border', 'hover:border-primary/50');
}
});
// Update footer buttons
if (footerContainer) {
if (mode === 'import') {
footerContainer.innerHTML = `
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
onclick="closeSkillCreateModal()">
${t('common.cancel')}
</button>
<button class="px-4 py-2 text-sm bg-primary/10 text-primary rounded-lg hover:bg-primary/20 transition-colors"
onclick="validateSkillImport()">
${t('skills.validate')}
</button>
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity"
onclick="createSkill()">
${t('skills.import')}
</button>
`;
} else {
footerContainer.innerHTML = `
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
onclick="closeSkillCreateModal()">
${t('common.cancel')}
</button>
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
onclick="createSkill()">
<i data-lucide="sparkles" class="w-4 h-4"></i>
${t('skills.generate')}
</button>
`;
}
if (typeof lucide !== 'undefined') lucide.createIcons();
}
}
function switchSkillGenerationType(type) {
skillCreateState.generationType = type;
// Re-render modal
closeSkillCreateModal();
openSkillCreateModal();
// Toggle visibility of description area
const descriptionArea = document.getElementById('skillDescriptionArea');
if (descriptionArea) {
descriptionArea.style.display = type === 'description' ? 'block' : 'none';
}
// Update generation type button styles (only the description button is active, template is disabled)
// No need to update button styles since template button is disabled
}
function browseSkillFolder() {
@@ -817,3 +913,271 @@ async function createSkill() {
}
}
}
// ========== Skill File View/Edit Functions ==========
var skillFileEditorState = {
skillName: '',
fileName: '',
location: '',
content: '',
isEditing: false
};
async function viewSkillFile(skillName, fileName, location) {
try {
const response = await fetch(
'/api/skills/' + encodeURIComponent(skillName) + '/file?filename=' + encodeURIComponent(fileName) +
'&location=' + location + '&path=' + encodeURIComponent(projectPath)
);
if (!response.ok) {
const error = await response.json();
throw new Error(error.error || 'Failed to load file');
}
const data = await response.json();
skillFileEditorState = {
skillName,
fileName,
location,
content: data.content,
isEditing: false
};
renderSkillFileModal();
} catch (err) {
console.error('Failed to load skill file:', err);
if (window.showToast) {
showToast(err.message || t('skills.fileLoadError') || 'Failed to load file', 'error');
}
}
}
function editSkillFile(skillName, fileName, location) {
viewSkillFile(skillName, fileName, location).then(() => {
skillFileEditorState.isEditing = true;
renderSkillFileModal();
});
}
function renderSkillFileModal() {
// Remove existing modal if any
const existingModal = document.getElementById('skillFileModal');
if (existingModal) existingModal.remove();
const { skillName, fileName, content, isEditing, location } = skillFileEditorState;
const modalHtml = `
<div class="modal-overlay fixed inset-0 bg-black/50 z-[60] flex items-center justify-center" onclick="closeSkillFileModal(event)">
<div class="modal-dialog bg-card rounded-lg shadow-lg w-full max-w-4xl max-h-[90vh] mx-4 flex flex-col" onclick="event.stopPropagation()">
<!-- Header -->
<div class="flex items-center justify-between px-6 py-4 border-b border-border">
<div class="flex items-center gap-3">
<i data-lucide="file-text" class="w-5 h-5 text-primary"></i>
<div>
<h3 class="text-lg font-semibold text-foreground font-mono">${escapeHtml(fileName)}</h3>
<p class="text-xs text-muted-foreground">${escapeHtml(skillName)} / ${location}</p>
</div>
</div>
<div class="flex items-center gap-2">
${!isEditing ? `
<button class="px-3 py-1.5 text-sm bg-primary/10 text-primary rounded-lg hover:bg-primary/20 transition-colors flex items-center gap-1"
onclick="toggleSkillFileEdit()">
<i data-lucide="edit-2" class="w-4 h-4"></i>
${t('common.edit')}
</button>
` : ''}
<button class="w-8 h-8 flex items-center justify-center text-xl text-muted-foreground hover:text-foreground hover:bg-hover rounded"
onclick="closeSkillFileModal()">&times;</button>
</div>
</div>
<!-- Content -->
<div class="flex-1 overflow-hidden p-4">
${isEditing ? `
<textarea id="skillFileContent"
class="w-full h-full min-h-[400px] px-4 py-3 bg-background border border-border rounded-lg text-sm font-mono focus:outline-none focus:ring-2 focus:ring-primary resize-none"
spellcheck="false">${escapeHtml(content)}</textarea>
` : `
<div class="w-full h-full min-h-[400px] overflow-auto">
<pre class="px-4 py-3 bg-muted/30 rounded-lg text-sm font-mono whitespace-pre-wrap break-words">${escapeHtml(content)}</pre>
</div>
`}
</div>
<!-- Footer -->
${isEditing ? `
<div class="flex items-center justify-end gap-3 px-6 py-4 border-t border-border">
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
onclick="cancelSkillFileEdit()">
${t('common.cancel')}
</button>
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
onclick="saveSkillFile()">
<i data-lucide="save" class="w-4 h-4"></i>
${t('common.save')}
</button>
</div>
` : ''}
</div>
</div>
`;
const modalContainer = document.createElement('div');
modalContainer.id = 'skillFileModal';
modalContainer.innerHTML = modalHtml;
document.body.appendChild(modalContainer);
if (typeof lucide !== 'undefined') lucide.createIcons();
}
function closeSkillFileModal(event) {
if (event && event.target !== event.currentTarget) return;
const modal = document.getElementById('skillFileModal');
if (modal) modal.remove();
skillFileEditorState = { skillName: '', fileName: '', location: '', content: '', isEditing: false };
}
function toggleSkillFileEdit() {
skillFileEditorState.isEditing = true;
renderSkillFileModal();
}
function cancelSkillFileEdit() {
skillFileEditorState.isEditing = false;
renderSkillFileModal();
}
async function saveSkillFile() {
const contentTextarea = document.getElementById('skillFileContent');
if (!contentTextarea) return;
const newContent = contentTextarea.value;
const { skillName, fileName, location } = skillFileEditorState;
try {
const response = await fetch('/api/skills/' + encodeURIComponent(skillName) + '/file', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
fileName,
content: newContent,
location,
projectPath
})
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.error || 'Failed to save file');
}
// Update state and close edit mode
skillFileEditorState.content = newContent;
skillFileEditorState.isEditing = false;
renderSkillFileModal();
// Refresh skill detail if SKILL.md was edited
if (fileName === 'SKILL.md') {
await loadSkillsData();
// Reload current skill detail
if (selectedSkill) {
await showSkillDetail(skillName, location);
}
}
if (window.showToast) {
showToast(t('skills.fileSaved') || 'File saved successfully', 'success');
}
} catch (err) {
console.error('Failed to save skill file:', err);
if (window.showToast) {
showToast(err.message || t('skills.fileSaveError') || 'Failed to save file', 'error');
}
}
}
// ========== Skill Folder Expansion Functions ==========
var expandedFolders = new Set();
async function toggleSkillFolder(skillName, subPath, location, element) {
const fileItem = element.closest('.skill-file-item');
if (!fileItem) return;
const contentsDiv = fileItem.querySelector('.folder-contents');
const chevron = element.querySelector('.folder-chevron');
const folderIcon = element.querySelector('.folder-icon');
const folderKey = `${skillName}:${subPath}:${location}`;
if (expandedFolders.has(folderKey)) {
// Collapse folder
expandedFolders.delete(folderKey);
contentsDiv.classList.add('hidden');
contentsDiv.innerHTML = '';
if (chevron) chevron.style.transform = '';
if (folderIcon) folderIcon.setAttribute('data-lucide', 'folder');
if (typeof lucide !== 'undefined') lucide.createIcons();
} else {
// Expand folder
try {
const response = await fetch(
'/api/skills/' + encodeURIComponent(skillName) + '/dir?subpath=' + encodeURIComponent(subPath) +
'&location=' + location + '&path=' + encodeURIComponent(projectPath)
);
if (!response.ok) {
const error = await response.json();
throw new Error(error.error || 'Failed to load folder');
}
const data = await response.json();
expandedFolders.add(folderKey);
if (chevron) chevron.style.transform = 'rotate(90deg)';
if (folderIcon) folderIcon.setAttribute('data-lucide', 'folder-open');
// Render folder contents
contentsDiv.innerHTML = data.files.map(file => {
const filePath = file.path;
const isDir = file.isDirectory;
return `
<div class="skill-file-item" data-path="${escapeHtml(filePath)}">
<div class="flex items-center justify-between p-2 bg-muted/30 rounded-lg cursor-pointer hover:bg-muted/50 transition-colors"
onclick="${isDir ? `toggleSkillFolder('${escapeHtml(skillName)}', '${escapeHtml(filePath)}', '${location}', this)` : `viewSkillFile('${escapeHtml(skillName)}', '${escapeHtml(filePath)}', '${location}')`}">
<div class="flex items-center gap-2">
<i data-lucide="${isDir ? 'folder' : 'file-text'}" class="w-4 h-4 text-muted-foreground ${isDir ? 'folder-icon' : ''}"></i>
<span class="text-sm font-mono text-foreground">${escapeHtml(file.name)}</span>
${isDir ? '<i data-lucide="chevron-right" class="w-3 h-3 text-muted-foreground folder-chevron transition-transform"></i>' : ''}
</div>
${!isDir ? `
<div class="flex items-center gap-1">
<button class="p-1 text-muted-foreground hover:text-foreground hover:bg-muted rounded transition-colors"
onclick="event.stopPropagation(); editSkillFile('${escapeHtml(skillName)}', '${escapeHtml(filePath)}', '${location}')"
title="${t('common.edit')}">
<i data-lucide="edit-2" class="w-3.5 h-3.5"></i>
</button>
</div>
` : ''}
</div>
<div class="folder-contents hidden ml-4 mt-1 space-y-1"></div>
</div>
`;
}).join('');
contentsDiv.classList.remove('hidden');
if (typeof lucide !== 'undefined') lucide.createIcons();
} catch (err) {
console.error('Failed to load folder contents:', err);
if (window.showToast) {
showToast(err.message || 'Failed to load folder', 'error');
}
}
}
}

View File

@@ -5,6 +5,7 @@
import { z } from 'zod';
import type { ToolSchema, ToolResult } from '../types/tool.js';
import type { HistoryIndexEntry } from './cli-history-store.js';
import { spawn, ChildProcess } from 'child_process';
import { existsSync, mkdirSync, readFileSync, writeFileSync, unlinkSync, readdirSync, statSync } from 'fs';
import { join, relative } from 'path';
@@ -1982,6 +1983,7 @@ export async function getEnrichedConversation(baseDir: string, ccwId: string) {
/**
* Get history with native session info
* Supports recursive querying of child projects
*/
export async function getHistoryWithNativeInfo(baseDir: string, options?: {
limit?: number;
@@ -1990,9 +1992,75 @@ export async function getHistoryWithNativeInfo(baseDir: string, options?: {
status?: string | null;
category?: ExecutionCategory | null;
search?: string | null;
recursive?: boolean;
}) {
const store = await getSqliteStore(baseDir);
return store.getHistoryWithNativeInfo(options || {});
const { limit = 50, recursive = false, ...queryOptions } = options || {};
// Non-recursive mode: query single project
if (!recursive) {
const store = await getSqliteStore(baseDir);
return store.getHistoryWithNativeInfo({ limit, ...queryOptions });
}
// Recursive mode: aggregate data from parent and all child projects
const { scanChildProjectsAsync } = await import('../config/storage-paths.js');
const childProjects = await scanChildProjectsAsync(baseDir);
// Use the same type as store.getHistoryWithNativeInfo returns
type ExecutionWithNativeAndSource = HistoryIndexEntry & {
hasNativeSession: boolean;
nativeSessionId?: string;
nativeSessionPath?: string;
};
const allExecutions: ExecutionWithNativeAndSource[] = [];
let totalCount = 0;
// Query parent project
try {
const parentStore = await getSqliteStore(baseDir);
const parentResult = parentStore.getHistoryWithNativeInfo({ limit, ...queryOptions });
totalCount += parentResult.total;
for (const exec of parentResult.executions) {
allExecutions.push({ ...exec, sourceDir: baseDir });
}
} catch (error) {
if (process.env.DEBUG) {
console.error(`[CLI History] Failed to query parent project ${baseDir}:`, error);
}
}
// Query all child projects
for (const child of childProjects) {
try {
const childStore = await getSqliteStore(child.projectPath);
const childResult = childStore.getHistoryWithNativeInfo({ limit, ...queryOptions });
totalCount += childResult.total;
for (const exec of childResult.executions) {
allExecutions.push({ ...exec, sourceDir: child.projectPath });
}
} catch (error) {
if (process.env.DEBUG) {
console.error(`[CLI History] Failed to query child project ${child.projectPath}:`, error);
}
}
}
// Sort by updated_at descending and apply limit
allExecutions.sort((a, b) => {
const timeA = a.updated_at ? new Date(a.updated_at).getTime() : new Date(a.timestamp).getTime();
const timeB = b.updated_at ? new Date(b.updated_at).getTime() : new Date(b.timestamp).getTime();
return timeB - timeA;
});
const limitedExecutions = allExecutions.slice(0, limit);
return {
total: totalCount,
count: limitedExecutions.length,
executions: limitedExecutions
};
}
// Export types

View File

@@ -75,6 +75,8 @@ interface ReadyStatus {
interface SemanticStatus {
available: boolean;
backend?: string;
accelerator?: string;
providers?: string[];
error?: string;
}
@@ -190,18 +192,39 @@ async function checkSemanticStatus(): Promise<SemanticStatus> {
return { available: false, error: 'CodexLens not installed' };
}
// Check semantic module availability
// Check semantic module availability and accelerator info
return new Promise((resolve) => {
const checkCode = `
import sys
import json
try:
from codexlens.semantic import SEMANTIC_AVAILABLE, SEMANTIC_BACKEND
if SEMANTIC_AVAILABLE:
print(f"available:{SEMANTIC_BACKEND}")
else:
print("unavailable")
result = {"available": SEMANTIC_AVAILABLE, "backend": SEMANTIC_BACKEND if SEMANTIC_AVAILABLE else None}
# Get ONNX providers for accelerator info
try:
import onnxruntime
providers = onnxruntime.get_available_providers()
result["providers"] = providers
# Determine accelerator type
if "CUDAExecutionProvider" in providers or "TensorrtExecutionProvider" in providers:
result["accelerator"] = "CUDA"
elif "DmlExecutionProvider" in providers:
result["accelerator"] = "DirectML"
elif "CoreMLExecutionProvider" in providers:
result["accelerator"] = "CoreML"
elif "ROCMExecutionProvider" in providers:
result["accelerator"] = "ROCm"
else:
result["accelerator"] = "CPU"
except:
result["providers"] = []
result["accelerator"] = "CPU"
print(json.dumps(result))
except Exception as e:
print(f"error:{e}")
print(json.dumps({"available": False, "error": str(e)}))
`;
const child = spawn(VENV_PYTHON, ['-c', checkCode], {
stdio: ['ignore', 'pipe', 'pipe'],
@@ -220,12 +243,16 @@ except Exception as e:
child.on('close', (code) => {
const output = stdout.trim();
if (output.startsWith('available:')) {
const backend = output.split(':')[1];
resolve({ available: true, backend });
} else if (output === 'unavailable') {
resolve({ available: false, error: 'Semantic dependencies not installed' });
} else {
try {
const result = JSON.parse(output);
resolve({
available: result.available || false,
backend: result.backend,
accelerator: result.accelerator || 'CPU',
providers: result.providers || [],
error: result.error
});
} catch {
resolve({ available: false, error: output || stderr || 'Unknown error' });
}
});
@@ -237,10 +264,66 @@ except Exception as e:
}
/**
* Install semantic search dependencies (fastembed, ONNX-based, ~200MB)
* GPU acceleration mode for semantic search
*/
type GpuMode = 'cpu' | 'cuda' | 'directml';
/**
* Detect available GPU acceleration
* @returns Detected GPU mode and info
*/
async function detectGpuSupport(): Promise<{ mode: GpuMode; available: GpuMode[]; info: string }> {
const available: GpuMode[] = ['cpu'];
let detectedInfo = 'CPU only';
// Check for NVIDIA GPU (CUDA)
try {
if (process.platform === 'win32') {
execSync('nvidia-smi', { stdio: 'pipe' });
available.push('cuda');
detectedInfo = 'NVIDIA GPU detected (CUDA available)';
} else {
execSync('which nvidia-smi', { stdio: 'pipe' });
available.push('cuda');
detectedInfo = 'NVIDIA GPU detected (CUDA available)';
}
} catch {
// NVIDIA not available
}
// On Windows, DirectML is always available if DirectX 12 is supported
if (process.platform === 'win32') {
try {
// Check for DirectX 12 support via dxdiag or registry
// DirectML works on most modern Windows 10/11 systems
available.push('directml');
if (available.includes('cuda')) {
detectedInfo = 'NVIDIA GPU detected (CUDA & DirectML available)';
} else {
detectedInfo = 'DirectML available (Windows GPU acceleration)';
}
} catch {
// DirectML check failed
}
}
// Recommend best available mode
let recommendedMode: GpuMode = 'cpu';
if (process.platform === 'win32' && available.includes('directml')) {
recommendedMode = 'directml'; // DirectML is easier on Windows (no CUDA toolkit needed)
} else if (available.includes('cuda')) {
recommendedMode = 'cuda';
}
return { mode: recommendedMode, available, info: detectedInfo };
}
/**
* Install semantic search dependencies with optional GPU acceleration
* @param gpuMode - GPU acceleration mode: 'cpu', 'cuda', or 'directml'
* @returns Bootstrap result
*/
async function installSemantic(): Promise<BootstrapResult> {
async function installSemantic(gpuMode: GpuMode = 'cpu'): Promise<BootstrapResult> {
// First ensure CodexLens is installed
const venvStatus = await checkVenvStatus();
if (!venvStatus.ready) {
@@ -252,42 +335,106 @@ async function installSemantic(): Promise<BootstrapResult> {
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
: join(CODEXLENS_VENV, 'bin', 'pip');
return new Promise((resolve) => {
console.log('[CodexLens] Installing semantic search dependencies (fastembed)...');
console.log('[CodexLens] Using ONNX-based fastembed backend (~200MB)');
// IMPORTANT: Uninstall all onnxruntime variants first to prevent conflicts
// Having multiple onnxruntime packages causes provider detection issues
const onnxVariants = ['onnxruntime', 'onnxruntime-gpu', 'onnxruntime-directml'];
console.log(`[CodexLens] Cleaning up existing ONNX Runtime packages...`);
const child = spawn(pipPath, ['install', 'numpy>=1.24', 'fastembed>=0.2'], {
for (const pkg of onnxVariants) {
try {
execSync(`"${pipPath}" uninstall ${pkg} -y`, { stdio: 'pipe' });
console.log(`[CodexLens] Removed ${pkg}`);
} catch {
// Package not installed, ignore
}
}
// Build package list based on GPU mode
const packages = ['numpy>=1.24', 'fastembed>=0.5', 'hnswlib>=0.8.0'];
let modeDescription = 'CPU (ONNX Runtime)';
let onnxPackage = 'onnxruntime>=1.18.0'; // Default CPU
if (gpuMode === 'cuda') {
onnxPackage = 'onnxruntime-gpu>=1.18.0';
modeDescription = 'NVIDIA CUDA GPU acceleration';
} else if (gpuMode === 'directml') {
onnxPackage = 'onnxruntime-directml>=1.18.0';
modeDescription = 'Windows DirectML GPU acceleration';
}
return new Promise((resolve) => {
console.log(`[CodexLens] Installing semantic search dependencies...`);
console.log(`[CodexLens] Mode: ${modeDescription}`);
console.log(`[CodexLens] ONNX Runtime: ${onnxPackage}`);
console.log(`[CodexLens] Packages: ${packages.join(', ')}`);
// Install ONNX Runtime first with force-reinstall to ensure clean state
const installOnnx = spawn(pipPath, ['install', '--force-reinstall', onnxPackage], {
stdio: ['ignore', 'pipe', 'pipe'],
timeout: 600000, // 10 minutes for potential model download
timeout: 600000, // 10 minutes for GPU packages
});
let stdout = '';
let stderr = '';
let onnxStdout = '';
let onnxStderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
// Log progress
installOnnx.stdout.on('data', (data) => {
onnxStdout += data.toString();
const line = data.toString().trim();
if (line.includes('Downloading') || line.includes('Installing') || line.includes('Collecting')) {
if (line.includes('Downloading') || line.includes('Installing')) {
console.log(`[CodexLens] ${line}`);
}
});
child.stderr.on('data', (data) => {
stderr += data.toString();
installOnnx.stderr.on('data', (data) => {
onnxStderr += data.toString();
});
child.on('close', (code) => {
if (code === 0) {
console.log('[CodexLens] Semantic dependencies installed successfully');
resolve({ success: true });
} else {
resolve({ success: false, error: `Installation failed: ${stderr || stdout}` });
installOnnx.on('close', (onnxCode) => {
if (onnxCode !== 0) {
resolve({ success: false, error: `Failed to install ${onnxPackage}: ${onnxStderr || onnxStdout}` });
return;
}
console.log(`[CodexLens] ${onnxPackage} installed successfully`);
// Now install remaining packages
const child = spawn(pipPath, ['install', ...packages], {
stdio: ['ignore', 'pipe', 'pipe'],
timeout: 600000,
});
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
const line = data.toString().trim();
if (line.includes('Downloading') || line.includes('Installing') || line.includes('Collecting')) {
console.log(`[CodexLens] ${line}`);
}
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
if (code === 0) {
console.log(`[CodexLens] Semantic dependencies installed successfully (${gpuMode} mode)`);
resolve({ success: true, message: `Installed with ${modeDescription}` });
} else {
resolve({ success: false, error: `Installation failed: ${stderr || stdout}` });
}
});
child.on('error', (err) => {
resolve({ success: false, error: `Failed to run pip: ${err.message}` });
});
});
child.on('error', (err) => {
resolve({ success: false, error: `Failed to run pip: ${err.message}` });
installOnnx.on('error', (err) => {
resolve({ success: false, error: `Failed to install ONNX Runtime: ${err.message}` });
});
});
}
@@ -1126,7 +1273,8 @@ function isIndexingInProgress(): boolean {
export type { ProgressInfo, ExecuteOptions };
// Export for direct usage
export { ensureReady, executeCodexLens, checkVenvStatus, bootstrapVenv, checkSemanticStatus, installSemantic, uninstallCodexLens, cancelIndexing, isIndexingInProgress };
export { ensureReady, executeCodexLens, checkVenvStatus, bootstrapVenv, checkSemanticStatus, installSemantic, detectGpuSupport, uninstallCodexLens, cancelIndexing, isIndexingInProgress };
export type { GpuMode };
// Backward-compatible export for tests
export const codexLensTool = {

View File

@@ -244,6 +244,7 @@ interface SearchMetadata {
warning?: string;
note?: string;
index_status?: 'indexed' | 'not_indexed' | 'partial';
fallback?: string; // Fallback mode used (e.g., 'fuzzy')
fallback_history?: string[];
suggested_weights?: Record<string, number>;
// Tokenization metadata (ripgrep mode)
@@ -774,6 +775,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
let stdout = '';
let stderr = '';
let resultLimitReached = false;
child.stdout.on('data', (data) => {
stdout += data.toString();
@@ -786,8 +788,16 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
child.on('close', (code) => {
const results: ExactMatch[] = [];
const lines = stdout.split('\n').filter((line) => line.trim());
// Limit total results to prevent memory overflow (--max-count only limits per-file)
const effectiveLimit = maxResults > 0 ? maxResults : 500;
for (const line of lines) {
// Stop collecting if we've reached the limit
if (results.length >= effectiveLimit) {
resultLimitReached = true;
break;
}
try {
const item = JSON.parse(line);
@@ -817,6 +827,15 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
const scoredResults = tokens.length > 1 ? scoreByTokenMatch(results, tokens) : results;
if (code === 0 || code === 1 || (isWindowsDeviceError && scoredResults.length > 0)) {
// Build warning message for various conditions
const warnings: string[] = [];
if (resultLimitReached) {
warnings.push(`Result limit reached (${effectiveLimit}). Use a more specific query or increase limit.`);
}
if (isWindowsDeviceError) {
warnings.push('Some Windows device files were skipped');
}
resolve({
success: true,
results: scoredResults,
@@ -827,7 +846,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
query,
tokens: tokens.length > 1 ? tokens : undefined, // Include tokens in metadata for debugging
tokenized: tokens.length > 1,
...(isWindowsDeviceError && { warning: 'Some Windows device files were skipped' }),
...(warnings.length > 0 && { warning: warnings.join('; ') }),
},
});
} else if (isWindowsDeviceError && results.length === 0) {
@@ -923,6 +942,46 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
// Keep empty results
}
// Fallback to fuzzy mode if exact returns no results
if (results.length === 0) {
const fuzzyArgs = ['search', query, '--limit', maxResults.toString(), '--mode', 'fuzzy', '--json'];
if (enrich) {
fuzzyArgs.push('--enrich');
}
const fuzzyResult = await executeCodexLens(fuzzyArgs, { cwd: path });
if (fuzzyResult.success) {
try {
const parsed = JSON.parse(stripAnsi(fuzzyResult.output || '{}'));
const data = parsed.result?.results || parsed.results || parsed;
results = (Array.isArray(data) ? data : []).map((item: any) => ({
file: item.path || item.file,
score: item.score || 0,
content: item.excerpt || item.content || '',
symbol: item.symbol || null,
}));
} catch {
// Keep empty results
}
if (results.length > 0) {
return {
success: true,
results,
metadata: {
mode: 'exact',
backend: 'codexlens',
count: results.length,
query,
warning: indexStatus.warning,
note: 'No exact matches found, showing fuzzy results',
fallback: 'fuzzy',
},
};
}
}
}
return {
success: true,
results,

View File

@@ -31,6 +31,24 @@ semantic = [
"hnswlib>=0.8.0",
]
# GPU acceleration for semantic search (NVIDIA CUDA)
# Install with: pip install codexlens[semantic-gpu]
semantic-gpu = [
"numpy>=1.24",
"fastembed>=0.2",
"hnswlib>=0.8.0",
"onnxruntime-gpu>=1.15.0", # CUDA support
]
# GPU acceleration for Windows (DirectML - supports NVIDIA/AMD/Intel)
# Install with: pip install codexlens[semantic-directml]
semantic-directml = [
"numpy>=1.24",
"fastembed>=0.2",
"hnswlib>=0.8.0",
"onnxruntime-directml>=1.15.0", # DirectML support
]
# Encoding detection for non-UTF8 files
encoding = [
"chardet>=5.0",

View File

@@ -18,6 +18,17 @@ Requires-Dist: pathspec>=0.11
Provides-Extra: semantic
Requires-Dist: numpy>=1.24; extra == "semantic"
Requires-Dist: fastembed>=0.2; extra == "semantic"
Requires-Dist: hnswlib>=0.8.0; extra == "semantic"
Provides-Extra: semantic-gpu
Requires-Dist: numpy>=1.24; extra == "semantic-gpu"
Requires-Dist: fastembed>=0.2; extra == "semantic-gpu"
Requires-Dist: hnswlib>=0.8.0; extra == "semantic-gpu"
Requires-Dist: onnxruntime-gpu>=1.15.0; extra == "semantic-gpu"
Provides-Extra: semantic-directml
Requires-Dist: numpy>=1.24; extra == "semantic-directml"
Requires-Dist: fastembed>=0.2; extra == "semantic-directml"
Requires-Dist: hnswlib>=0.8.0; extra == "semantic-directml"
Requires-Dist: onnxruntime-directml>=1.15.0; extra == "semantic-directml"
Provides-Extra: encoding
Requires-Dist: chardet>=5.0; extra == "encoding"
Provides-Extra: full

View File

@@ -11,8 +11,11 @@ src/codexlens/entities.py
src/codexlens/errors.py
src/codexlens/cli/__init__.py
src/codexlens/cli/commands.py
src/codexlens/cli/embedding_manager.py
src/codexlens/cli/model_manager.py
src/codexlens/cli/output.py
src/codexlens/indexing/__init__.py
src/codexlens/indexing/symbol_extractor.py
src/codexlens/parsers/__init__.py
src/codexlens/parsers/encoding.py
src/codexlens/parsers/factory.py
@@ -20,15 +23,16 @@ src/codexlens/parsers/tokenizer.py
src/codexlens/parsers/treesitter_parser.py
src/codexlens/search/__init__.py
src/codexlens/search/chain_search.py
src/codexlens/search/enrichment.py
src/codexlens/search/hybrid_search.py
src/codexlens/search/query_parser.py
src/codexlens/search/ranking.py
src/codexlens/semantic/__init__.py
src/codexlens/semantic/ann_index.py
src/codexlens/semantic/chunker.py
src/codexlens/semantic/code_extractor.py
src/codexlens/semantic/embedder.py
src/codexlens/semantic/graph_analyzer.py
src/codexlens/semantic/llm_enhancer.py
src/codexlens/semantic/gpu_support.py
src/codexlens/semantic/vector_store.py
src/codexlens/storage/__init__.py
src/codexlens/storage/dir_index.py
@@ -42,38 +46,38 @@ src/codexlens/storage/sqlite_utils.py
src/codexlens/storage/migrations/__init__.py
src/codexlens/storage/migrations/migration_001_normalize_keywords.py
src/codexlens/storage/migrations/migration_002_add_token_metadata.py
src/codexlens/storage/migrations/migration_003_code_relationships.py
src/codexlens/storage/migrations/migration_004_dual_fts.py
src/codexlens/storage/migrations/migration_005_cleanup_unused_fields.py
tests/test_chain_search_engine.py
tests/test_ann_index.py
tests/test_cli_hybrid_search.py
tests/test_cli_output.py
tests/test_code_extractor.py
tests/test_config.py
tests/test_dual_fts.py
tests/test_encoding.py
tests/test_enrichment.py
tests/test_entities.py
tests/test_errors.py
tests/test_file_cache.py
tests/test_graph_analyzer.py
tests/test_graph_cli.py
tests/test_graph_storage.py
tests/test_hybrid_chunker.py
tests/test_hybrid_search_e2e.py
tests/test_incremental_indexing.py
tests/test_llm_enhancer.py
tests/test_parser_integration.py
tests/test_parsers.py
tests/test_performance_optimizations.py
tests/test_pure_vector_search.py
tests/test_query_parser.py
tests/test_result_grouping.py
tests/test_rrf_fusion.py
tests/test_schema_cleanup_migration.py
tests/test_search_comparison.py
tests/test_search_comprehensive.py
tests/test_search_full_coverage.py
tests/test_search_performance.py
tests/test_semantic.py
tests/test_semantic_search.py
tests/test_storage.py
tests/test_symbol_extractor.py
tests/test_token_chunking.py
tests/test_token_storage.py
tests/test_tokenizer.py

View File

@@ -16,3 +16,16 @@ tiktoken>=0.5.0
[semantic]
numpy>=1.24
fastembed>=0.2
hnswlib>=0.8.0
[semantic-directml]
numpy>=1.24
fastembed>=0.2
hnswlib>=0.8.0
onnxruntime-directml>=1.15.0
[semantic-gpu]
numpy>=1.24
fastembed>=0.2
hnswlib>=0.8.0
onnxruntime-gpu>=1.15.0

View File

@@ -35,8 +35,17 @@ from .output import (
app = typer.Typer(help="CodexLens CLI — local code indexing and search.")
def _configure_logging(verbose: bool) -> None:
level = logging.DEBUG if verbose else logging.INFO
def _configure_logging(verbose: bool, json_mode: bool = False) -> None:
"""Configure logging level.
In JSON mode, suppress INFO logs to keep stderr clean for error parsing.
Only WARNING and above are shown to avoid mixing logs with JSON output.
"""
if json_mode and not verbose:
# In JSON mode, suppress INFO logs to keep stderr clean
level = logging.WARNING
else:
level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(level=level, format="%(levelname)s %(message)s")
@@ -95,7 +104,7 @@ def init(
If semantic search dependencies are installed, automatically generates embeddings
after indexing completes. Use --no-embeddings to skip this step.
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
config = Config()
languages = _parse_languages(language)
base_path = path.expanduser().resolve()
@@ -314,7 +323,7 @@ def search(
# Force hybrid mode
codexlens search "authentication" --mode hybrid
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
search_path = path.expanduser().resolve()
# Validate mode
@@ -487,7 +496,7 @@ def symbol(
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable debug logging."),
) -> None:
"""Look up symbols by name and optional kind."""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
search_path = path.expanduser().resolve()
registry: RegistryStore | None = None
@@ -538,7 +547,7 @@ def inspect(
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable debug logging."),
) -> None:
"""Analyze a single file and display symbols."""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
config = Config()
factory = ParserFactory(config)
@@ -588,7 +597,7 @@ def status(
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable debug logging."),
) -> None:
"""Show index status and configuration."""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
registry: RegistryStore | None = None
try:
@@ -648,7 +657,7 @@ def status(
# Embedding manager not available
pass
except Exception as e:
logger.debug(f"Failed to get embeddings status: {e}")
logging.debug(f"Failed to get embeddings status: {e}")
stats = {
"index_root": str(index_root),
@@ -737,7 +746,7 @@ def projects(
- show <path>: Show details for a specific project
- remove <path>: Remove a project from the registry
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
registry: RegistryStore | None = None
try:
@@ -892,7 +901,7 @@ def config(
Config keys:
- index_dir: Directory to store indexes (default: ~/.codexlens/indexes)
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
config_file = Path.home() / ".codexlens" / "config.json"
@@ -1057,7 +1066,7 @@ def migrate(
This is a safe operation that preserves all existing data.
Progress is shown during migration.
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
base_path = path.expanduser().resolve()
registry: RegistryStore | None = None
@@ -1183,7 +1192,7 @@ def clean(
With path, removes that project's indexes.
With --all, removes all indexes (use with caution).
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
try:
mapper = PathMapper()
@@ -1329,7 +1338,7 @@ def semantic_list(
Shows files that have LLM-generated summaries and keywords.
Results are aggregated from all index databases in the project.
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
base_path = path.expanduser().resolve()
registry: Optional[RegistryStore] = None
@@ -1798,7 +1807,7 @@ def embeddings_generate(
codexlens embeddings-generate ~/.codexlens/indexes/project/_index.db # Specific index
codexlens embeddings-generate ~/projects/my-app --model fast --force # Regenerate with fast model
"""
_configure_logging(verbose)
_configure_logging(verbose, json_mode)
from codexlens.cli.embedding_manager import generate_embeddings, generate_embeddings_recursive

View File

@@ -20,8 +20,8 @@ except ImportError:
logger = logging.getLogger(__name__)
# Embedding batch size - larger values improve throughput on modern hardware
# Default 64 balances memory usage and GPU/CPU utilization
EMBEDDING_BATCH_SIZE = 64 # Increased from 8 for better performance
# Benchmark: 256 gives ~2.35x speedup over 64 with DirectML GPU acceleration
EMBEDDING_BATCH_SIZE = 256 # Optimized from 64 based on batch size benchmarks
def _generate_chunks_from_cursor(
@@ -275,10 +275,25 @@ def generate_embeddings(
total_chunks_created = 0
total_files_processed = 0
FILE_BATCH_SIZE = 100 # Process 100 files at a time
# EMBEDDING_BATCH_SIZE is defined at module level (default: 64)
# EMBEDDING_BATCH_SIZE is defined at module level (default: 256)
try:
with VectorStore(index_path) as vector_store:
# Check model compatibility with existing embeddings
if not force:
is_compatible, warning = vector_store.check_model_compatibility(
model_profile, embedder.model_name, embedder.embedding_dim
)
if not is_compatible:
return {
"success": False,
"error": warning,
}
# Set/update model configuration for this index
vector_store.set_model_config(
model_profile, embedder.model_name, embedder.embedding_dim
)
# Use bulk insert mode for efficient batch ANN index building
# This defers ANN updates until end_bulk_insert() is called
with vector_store.bulk_insert():

View File

@@ -14,34 +14,63 @@ except ImportError:
# Model profiles with metadata
# Note: 768d is max recommended dimension for optimal performance/quality balance
# 1024d models are available but not recommended due to higher resource usage
# cache_name: The actual Hugging Face repo name used by fastembed for ONNX caching
MODEL_PROFILES = {
"fast": {
"model_name": "BAAI/bge-small-en-v1.5",
"cache_name": "qdrant/bge-small-en-v1.5-onnx-q", # fastembed uses ONNX version
"dimensions": 384,
"size_mb": 80,
"description": "Fast, lightweight, English-optimized",
"use_case": "Quick prototyping, resource-constrained environments",
"recommended": True,
},
"base": {
"model_name": "BAAI/bge-base-en-v1.5",
"cache_name": "qdrant/bge-base-en-v1.5-onnx-q", # fastembed uses ONNX version
"dimensions": 768,
"size_mb": 220,
"description": "General purpose, good balance of speed and quality",
"use_case": "General text search, documentation",
"recommended": True,
},
"code": {
"model_name": "jinaai/jina-embeddings-v2-base-code",
"cache_name": "jinaai/jina-embeddings-v2-base-code", # Uses original name
"dimensions": 768,
"size_mb": 150,
"description": "Code-optimized, best for programming languages",
"use_case": "Open source projects, code semantic search",
"recommended": True,
},
"minilm": {
"model_name": "sentence-transformers/all-MiniLM-L6-v2",
"cache_name": "qdrant/all-MiniLM-L6-v2-onnx", # fastembed uses ONNX version
"dimensions": 384,
"size_mb": 90,
"description": "Popular lightweight model, good quality",
"use_case": "General purpose, low resource environments",
"recommended": True,
},
"multilingual": {
"model_name": "intfloat/multilingual-e5-large",
"cache_name": "qdrant/multilingual-e5-large-onnx", # fastembed uses ONNX version
"dimensions": 1024,
"size_mb": 1000,
"description": "Multilingual + code support",
"description": "Multilingual + code support (high resource usage)",
"use_case": "Enterprise multilingual projects",
"recommended": False, # 1024d not recommended
},
"balanced": {
"model_name": "mixedbread-ai/mxbai-embed-large-v1",
"cache_name": "mixedbread-ai/mxbai-embed-large-v1", # Uses original name
"dimensions": 1024,
"size_mb": 600,
"description": "High accuracy, general purpose",
"description": "High accuracy, general purpose (high resource usage)",
"use_case": "High-quality semantic search, balanced performance",
"recommended": False, # 1024d not recommended
},
}
@@ -65,6 +94,23 @@ def get_cache_dir() -> Path:
return cache_dir
def _get_model_cache_path(cache_dir: Path, info: Dict) -> Path:
"""Get the actual cache path for a model.
fastembed uses ONNX versions of models with different names than the original.
This function returns the correct path based on the cache_name field.
Args:
cache_dir: The fastembed cache directory
info: Model profile info dictionary
Returns:
Path to the model cache directory
"""
cache_name = info.get("cache_name", info["model_name"])
return cache_dir / f"models--{cache_name.replace('/', '--')}"
def list_models() -> Dict[str, any]:
"""List available model profiles and their installation status.
@@ -84,13 +130,13 @@ def list_models() -> Dict[str, any]:
for profile, info in MODEL_PROFILES.items():
model_name = info["model_name"]
# Check if model is cached
# Check if model is cached using the actual cache name
installed = False
cache_size_mb = 0
if cache_exists:
# Check for model directory in cache
model_cache_path = cache_dir / f"models--{model_name.replace('/', '--')}"
# Check for model directory in cache using correct cache_name
model_cache_path = _get_model_cache_path(cache_dir, info)
if model_cache_path.exists():
installed = True
# Calculate cache size
@@ -144,7 +190,8 @@ def download_model(profile: str, progress_callback: Optional[callable] = None) -
"error": f"Unknown profile: {profile}. Available: {', '.join(MODEL_PROFILES.keys())}",
}
model_name = MODEL_PROFILES[profile]["model_name"]
info = MODEL_PROFILES[profile]
model_name = info["model_name"]
try:
# Download model by instantiating TextEmbedding
@@ -157,9 +204,9 @@ def download_model(profile: str, progress_callback: Optional[callable] = None) -
if progress_callback:
progress_callback(f"Model {model_name} downloaded successfully")
# Get cache info
# Get cache info using correct cache_name
cache_dir = get_cache_dir()
model_cache_path = cache_dir / f"models--{model_name.replace('/', '--')}"
model_cache_path = _get_model_cache_path(cache_dir, info)
cache_size = 0
if model_cache_path.exists():
@@ -202,9 +249,10 @@ def delete_model(profile: str) -> Dict[str, any]:
"error": f"Unknown profile: {profile}. Available: {', '.join(MODEL_PROFILES.keys())}",
}
model_name = MODEL_PROFILES[profile]["model_name"]
info = MODEL_PROFILES[profile]
model_name = info["model_name"]
cache_dir = get_cache_dir()
model_cache_path = cache_dir / f"models--{model_name.replace('/', '--')}"
model_cache_path = _get_model_cache_path(cache_dir, info)
if not model_cache_path.exists():
return {
@@ -259,9 +307,9 @@ def get_model_info(profile: str) -> Dict[str, any]:
info = MODEL_PROFILES[profile]
model_name = info["model_name"]
# Check installation status
# Check installation status using correct cache_name
cache_dir = get_cache_dir()
model_cache_path = cache_dir / f"models--{model_name.replace('/', '--')}"
model_cache_path = _get_model_cache_path(cache_dir, info)
installed = model_cache_path.exists()
cache_size_mb = None

View File

@@ -396,7 +396,20 @@ class ChainSearchEngine:
all_results = []
stats = SearchStats()
executor = self._get_executor(options.max_workers)
# Force single-threaded execution for vector/hybrid search to avoid GPU crashes
# DirectML/ONNX have threading issues when multiple threads access GPU resources
effective_workers = options.max_workers
if options.enable_vector or options.hybrid_mode:
effective_workers = 1
self.logger.debug("Using single-threaded mode for vector search (GPU safety)")
# Pre-load embedder to avoid initialization overhead per-search
try:
from codexlens.semantic.embedder import get_embedder
get_embedder(profile="code", use_gpu=True)
except Exception:
pass # Ignore pre-load failures
executor = self._get_executor(effective_workers)
# Submit all search tasks
future_to_path = {
executor.submit(

View File

@@ -274,19 +274,32 @@ class HybridSearchEngine:
)
return []
# Auto-detect embedding dimension and select appropriate profile
detected_dim = vector_store.dimension
if detected_dim is None:
self.logger.info("Vector store dimension unknown, using default profile")
profile = "code" # Default fallback
elif detected_dim == 384:
profile = "fast"
elif detected_dim == 768:
profile = "code"
elif detected_dim == 1024:
profile = "multilingual" # or balanced, both are 1024
# Get stored model configuration (preferred) or auto-detect from dimension
model_config = vector_store.get_model_config()
if model_config:
profile = model_config["model_profile"]
self.logger.debug(
"Using stored model config: %s (%s, %dd)",
profile, model_config["model_name"], model_config["embedding_dim"]
)
else:
profile = "code" # Default fallback
# Fallback: auto-detect from embedding dimension
detected_dim = vector_store.dimension
if detected_dim is None:
self.logger.info("Vector store dimension unknown, using default profile")
profile = "code" # Default fallback
elif detected_dim == 384:
profile = "fast"
elif detected_dim == 768:
profile = "code"
elif detected_dim == 1024:
profile = "multilingual" # or balanced, both are 1024
else:
profile = "code" # Default fallback
self.logger.debug(
"No stored model config, auto-detected profile '%s' from dimension %s",
profile, detected_dim
)
# Use cached embedder (singleton) for performance
embedder = get_embedder(profile=profile)

View File

@@ -2,38 +2,75 @@
Install with: pip install codexlens[semantic]
Uses fastembed (ONNX-based, lightweight ~200MB)
GPU Acceleration:
- Automatic GPU detection and usage when available
- Supports CUDA (NVIDIA), TensorRT, DirectML (Windows), ROCm (AMD), CoreML (Apple)
- Install GPU support: pip install onnxruntime-gpu (NVIDIA) or onnxruntime-directml (Windows)
"""
from __future__ import annotations
SEMANTIC_AVAILABLE = False
SEMANTIC_BACKEND: str | None = None
GPU_AVAILABLE = False
_import_error: str | None = None
def _detect_backend() -> tuple[bool, str | None, str | None]:
"""Detect if fastembed is available."""
def _detect_backend() -> tuple[bool, str | None, bool, str | None]:
"""Detect if fastembed and GPU are available."""
try:
import numpy as np
except ImportError as e:
return False, None, f"numpy not available: {e}"
return False, None, False, f"numpy not available: {e}"
try:
from fastembed import TextEmbedding
return True, "fastembed", None
except ImportError:
return False, None, False, "fastembed not available. Install with: pip install codexlens[semantic]"
# Check GPU availability
gpu_available = False
try:
from .gpu_support import is_gpu_available
gpu_available = is_gpu_available()
except ImportError:
pass
return False, None, "fastembed not available. Install with: pip install codexlens[semantic]"
return True, "fastembed", gpu_available, None
# Initialize on module load
SEMANTIC_AVAILABLE, SEMANTIC_BACKEND, _import_error = _detect_backend()
SEMANTIC_AVAILABLE, SEMANTIC_BACKEND, GPU_AVAILABLE, _import_error = _detect_backend()
def check_semantic_available() -> tuple[bool, str | None]:
"""Check if semantic search dependencies are available."""
return SEMANTIC_AVAILABLE, _import_error
def check_gpu_available() -> tuple[bool, str]:
"""Check if GPU acceleration is available.
Returns:
Tuple of (is_available, status_message)
"""
if not SEMANTIC_AVAILABLE:
return False, "Semantic search not available"
try:
from .gpu_support import is_gpu_available, get_gpu_summary
if is_gpu_available():
return True, get_gpu_summary()
return False, "No GPU detected (using CPU)"
except ImportError:
return False, "GPU support module not available"
__all__ = [
"SEMANTIC_AVAILABLE",
"SEMANTIC_BACKEND",
"GPU_AVAILABLE",
"check_semantic_available",
"check_gpu_available",
]

View File

@@ -1,22 +1,29 @@
"""Embedder for semantic code search using fastembed."""
"""Embedder for semantic code search using fastembed.
Supports GPU acceleration via ONNX execution providers (CUDA, TensorRT, DirectML, ROCm, CoreML).
GPU acceleration is automatic when available, with transparent CPU fallback.
"""
from __future__ import annotations
import gc
import logging
import threading
from typing import Dict, Iterable, List, Optional
import numpy as np
from . import SEMANTIC_AVAILABLE
from .gpu_support import get_optimal_providers, is_gpu_available, get_gpu_summary
logger = logging.getLogger(__name__)
# Global embedder cache for singleton pattern
_embedder_cache: Dict[str, "Embedder"] = {}
_cache_lock = threading.Lock()
def get_embedder(profile: str = "code") -> "Embedder":
def get_embedder(profile: str = "code", use_gpu: bool = True) -> "Embedder":
"""Get or create a cached Embedder instance (thread-safe singleton).
This function provides significant performance improvement by reusing
@@ -25,27 +32,38 @@ def get_embedder(profile: str = "code") -> "Embedder":
Args:
profile: Model profile ("fast", "code", "multilingual", "balanced")
use_gpu: If True, use GPU acceleration when available (default: True)
Returns:
Cached Embedder instance for the given profile
"""
global _embedder_cache
# Cache key includes GPU preference to support mixed configurations
cache_key = f"{profile}:{'gpu' if use_gpu else 'cpu'}"
# Fast path: check cache without lock
if profile in _embedder_cache:
return _embedder_cache[profile]
if cache_key in _embedder_cache:
return _embedder_cache[cache_key]
# Slow path: acquire lock for initialization
with _cache_lock:
# Double-check after acquiring lock
if profile in _embedder_cache:
return _embedder_cache[profile]
if cache_key in _embedder_cache:
return _embedder_cache[cache_key]
# Create new embedder and cache it
embedder = Embedder(profile=profile)
embedder = Embedder(profile=profile, use_gpu=use_gpu)
# Pre-load model to ensure it's ready
embedder._load_model()
_embedder_cache[profile] = embedder
_embedder_cache[cache_key] = embedder
# Log GPU status on first embedder creation
if use_gpu and is_gpu_available():
logger.info(f"Embedder initialized with GPU: {get_gpu_summary()}")
elif use_gpu:
logger.debug("GPU not available, using CPU for embeddings")
return embedder
@@ -96,13 +114,21 @@ class Embedder:
DEFAULT_MODEL = "BAAI/bge-small-en-v1.5"
DEFAULT_PROFILE = "fast"
def __init__(self, model_name: str | None = None, profile: str | None = None) -> None:
def __init__(
self,
model_name: str | None = None,
profile: str | None = None,
use_gpu: bool = True,
providers: List[str] | None = None,
) -> None:
"""Initialize embedder with model or profile.
Args:
model_name: Explicit model name (e.g., "jinaai/jina-embeddings-v2-base-code")
profile: Model profile shortcut ("fast", "code", "multilingual", "balanced")
If both provided, model_name takes precedence.
use_gpu: If True, use GPU acceleration when available (default: True)
providers: Explicit ONNX providers list (overrides use_gpu if provided)
"""
if not SEMANTIC_AVAILABLE:
raise ImportError(
@@ -118,6 +144,13 @@ class Embedder:
else:
self.model_name = self.DEFAULT_MODEL
# Configure ONNX execution providers
if providers is not None:
self._providers = providers
else:
self._providers = get_optimal_providers(use_gpu=use_gpu)
self._use_gpu = use_gpu
self._model = None
@property
@@ -125,13 +158,39 @@ class Embedder:
"""Get embedding dimension for current model."""
return self.MODEL_DIMS.get(self.model_name, 768) # Default to 768 if unknown
@property
def providers(self) -> List[str]:
"""Get configured ONNX execution providers."""
return self._providers
@property
def is_gpu_enabled(self) -> bool:
"""Check if GPU acceleration is enabled for this embedder."""
gpu_providers = {"CUDAExecutionProvider", "TensorrtExecutionProvider",
"DmlExecutionProvider", "ROCMExecutionProvider", "CoreMLExecutionProvider"}
return any(p in gpu_providers for p in self._providers)
def _load_model(self) -> None:
"""Lazy load the embedding model."""
"""Lazy load the embedding model with configured providers."""
if self._model is not None:
return
from fastembed import TextEmbedding
self._model = TextEmbedding(model_name=self.model_name)
# fastembed supports 'providers' parameter for ONNX execution providers
try:
self._model = TextEmbedding(
model_name=self.model_name,
providers=self._providers,
)
logger.debug(f"Model loaded with providers: {self._providers}")
except TypeError:
# Fallback for older fastembed versions without providers parameter
logger.warning(
"fastembed version doesn't support 'providers' parameter. "
"Upgrade fastembed for GPU acceleration: pip install --upgrade fastembed"
)
self._model = TextEmbedding(model_name=self.model_name)
def embed(self, texts: str | Iterable[str]) -> List[List[float]]:
"""Generate embeddings for one or more texts.

View File

@@ -0,0 +1,192 @@
"""GPU acceleration support for semantic embeddings.
This module provides GPU detection, initialization, and fallback handling
for ONNX-based embedding generation.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import List, Optional
logger = logging.getLogger(__name__)
@dataclass
class GPUInfo:
"""GPU availability and configuration info."""
gpu_available: bool = False
cuda_available: bool = False
gpu_count: int = 0
gpu_name: Optional[str] = None
onnx_providers: List[str] = None
def __post_init__(self):
if self.onnx_providers is None:
self.onnx_providers = ["CPUExecutionProvider"]
_gpu_info_cache: Optional[GPUInfo] = None
def detect_gpu(force_refresh: bool = False) -> GPUInfo:
"""Detect available GPU resources for embedding acceleration.
Args:
force_refresh: If True, re-detect GPU even if cached.
Returns:
GPUInfo with detection results.
"""
global _gpu_info_cache
if _gpu_info_cache is not None and not force_refresh:
return _gpu_info_cache
info = GPUInfo()
# Check PyTorch CUDA availability (most reliable detection)
try:
import torch
if torch.cuda.is_available():
info.cuda_available = True
info.gpu_available = True
info.gpu_count = torch.cuda.device_count()
if info.gpu_count > 0:
info.gpu_name = torch.cuda.get_device_name(0)
logger.debug(f"PyTorch CUDA detected: {info.gpu_count} GPU(s)")
except ImportError:
logger.debug("PyTorch not available for GPU detection")
# Check ONNX Runtime providers with validation
try:
import onnxruntime as ort
available_providers = ort.get_available_providers()
# Build provider list with priority order
providers = []
# Test each provider to ensure it actually works
def test_provider(provider_name: str) -> bool:
"""Test if a provider actually works by creating a dummy session."""
try:
# Create a minimal ONNX model to test provider
import numpy as np
# Simple test: just check if provider can be instantiated
sess_options = ort.SessionOptions()
sess_options.log_severity_level = 4 # Suppress warnings
return True
except Exception:
return False
# CUDA provider (NVIDIA GPU) - check if CUDA runtime is available
if "CUDAExecutionProvider" in available_providers:
# Verify CUDA is actually usable by checking for cuBLAS
cuda_works = False
try:
import ctypes
# Try to load cuBLAS to verify CUDA installation
try:
ctypes.CDLL("cublas64_12.dll")
cuda_works = True
except OSError:
try:
ctypes.CDLL("cublas64_11.dll")
cuda_works = True
except OSError:
pass
except Exception:
pass
if cuda_works:
providers.append("CUDAExecutionProvider")
info.gpu_available = True
logger.debug("ONNX CUDAExecutionProvider available and working")
else:
logger.debug("ONNX CUDAExecutionProvider listed but CUDA runtime not found")
# TensorRT provider (optimized NVIDIA inference)
if "TensorrtExecutionProvider" in available_providers:
# TensorRT requires additional libraries, skip for now
logger.debug("ONNX TensorrtExecutionProvider available (requires TensorRT SDK)")
# DirectML provider (Windows GPU - AMD/Intel/NVIDIA)
if "DmlExecutionProvider" in available_providers:
providers.append("DmlExecutionProvider")
info.gpu_available = True
logger.debug("ONNX DmlExecutionProvider available (DirectML)")
# ROCm provider (AMD GPU on Linux)
if "ROCMExecutionProvider" in available_providers:
providers.append("ROCMExecutionProvider")
info.gpu_available = True
logger.debug("ONNX ROCMExecutionProvider available (AMD)")
# CoreML provider (Apple Silicon)
if "CoreMLExecutionProvider" in available_providers:
providers.append("CoreMLExecutionProvider")
info.gpu_available = True
logger.debug("ONNX CoreMLExecutionProvider available (Apple)")
# Always include CPU as fallback
providers.append("CPUExecutionProvider")
info.onnx_providers = providers
except ImportError:
logger.debug("ONNX Runtime not available")
info.onnx_providers = ["CPUExecutionProvider"]
_gpu_info_cache = info
return info
def get_optimal_providers(use_gpu: bool = True) -> List[str]:
"""Get optimal ONNX execution providers based on availability.
Args:
use_gpu: If True, include GPU providers when available.
If False, force CPU-only execution.
Returns:
List of provider names in priority order.
"""
if not use_gpu:
return ["CPUExecutionProvider"]
gpu_info = detect_gpu()
return gpu_info.onnx_providers
def is_gpu_available() -> bool:
"""Check if any GPU acceleration is available."""
return detect_gpu().gpu_available
def get_gpu_summary() -> str:
"""Get human-readable GPU status summary."""
info = detect_gpu()
if not info.gpu_available:
return "GPU: Not available (using CPU)"
parts = []
if info.gpu_name:
parts.append(f"GPU: {info.gpu_name}")
if info.gpu_count > 1:
parts.append(f"({info.gpu_count} devices)")
# Show active providers (excluding CPU fallback)
gpu_providers = [p for p in info.onnx_providers if p != "CPUExecutionProvider"]
if gpu_providers:
parts.append(f"Providers: {', '.join(gpu_providers)}")
return " | ".join(parts) if parts else "GPU: Available"
def clear_gpu_cache() -> None:
"""Clear cached GPU detection info."""
global _gpu_info_cache
_gpu_info_cache = None

View File

@@ -116,6 +116,17 @@ class VectorStore:
CREATE INDEX IF NOT EXISTS idx_chunks_file
ON semantic_chunks(file_path)
""")
# Model configuration table - tracks which model generated the embeddings
conn.execute("""
CREATE TABLE IF NOT EXISTS embeddings_config (
id INTEGER PRIMARY KEY CHECK (id = 1),
model_profile TEXT NOT NULL,
model_name TEXT NOT NULL,
embedding_dim INTEGER NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
""")
conn.commit()
def _init_ann_index(self) -> None:
@@ -932,6 +943,92 @@ class VectorStore:
return self._ann_index.count()
return 0
def get_model_config(self) -> Optional[Dict[str, Any]]:
"""Get the model configuration used for embeddings in this store.
Returns:
Dictionary with model_profile, model_name, embedding_dim, or None if not set.
"""
with sqlite3.connect(self.db_path) as conn:
row = conn.execute(
"SELECT model_profile, model_name, embedding_dim, created_at, updated_at "
"FROM embeddings_config WHERE id = 1"
).fetchone()
if row:
return {
"model_profile": row[0],
"model_name": row[1],
"embedding_dim": row[2],
"created_at": row[3],
"updated_at": row[4],
}
return None
def set_model_config(
self, model_profile: str, model_name: str, embedding_dim: int
) -> None:
"""Set the model configuration for embeddings in this store.
This should be called when generating new embeddings. If a different
model was previously used, this will update the configuration.
Args:
model_profile: Model profile name (fast, code, minilm, etc.)
model_name: Full model name (e.g., jinaai/jina-embeddings-v2-base-code)
embedding_dim: Embedding dimension (e.g., 768)
"""
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
INSERT INTO embeddings_config (id, model_profile, model_name, embedding_dim)
VALUES (1, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
model_profile = excluded.model_profile,
model_name = excluded.model_name,
embedding_dim = excluded.embedding_dim,
updated_at = CURRENT_TIMESTAMP
""",
(model_profile, model_name, embedding_dim)
)
conn.commit()
def check_model_compatibility(
self, model_profile: str, model_name: str, embedding_dim: int
) -> Tuple[bool, Optional[str]]:
"""Check if the given model is compatible with existing embeddings.
Args:
model_profile: Model profile to check
model_name: Model name to check
embedding_dim: Embedding dimension to check
Returns:
Tuple of (is_compatible, warning_message).
is_compatible is True if no existing config or configs match.
warning_message is a user-friendly message if incompatible.
"""
existing = self.get_model_config()
if existing is None:
return True, None
# Check dimension first (most critical)
if existing["embedding_dim"] != embedding_dim:
return False, (
f"Dimension mismatch: existing embeddings use {existing['embedding_dim']}d "
f"({existing['model_profile']}), but requested model uses {embedding_dim}d "
f"({model_profile}). Use --force to regenerate all embeddings."
)
# Check model (different models with same dimension may have different semantic spaces)
if existing["model_profile"] != model_profile:
return False, (
f"Model mismatch: existing embeddings use '{existing['model_profile']}' "
f"({existing['model_name']}), but requested '{model_profile}' "
f"({model_name}). Use --force to regenerate all embeddings."
)
return True, None
def close(self) -> None:
"""Close the vector store and release resources.

View File

@@ -1651,16 +1651,17 @@ class DirIndexStore:
from codexlens.storage.sqlite_utils import check_trigram_support
has_trigram = check_trigram_support(conn)
fuzzy_tokenizer = "trigram" if has_trigram else "unicode61 tokenchars '_-'"
fuzzy_tokenizer = "trigram" if has_trigram else "unicode61 tokenchars '_-.'"
# Exact FTS table with unicode61 tokenizer
# Note: tokenchars includes '.' to properly tokenize qualified names like PortRole.FLOW
conn.execute(
"""
CREATE VIRTUAL TABLE IF NOT EXISTS files_fts_exact USING fts5(
name, full_path UNINDEXED, content,
content='files',
content_rowid='id',
tokenize="unicode61 tokenchars '_-'"
tokenize="unicode61 tokenchars '_-.'"
)
"""
)

View File

@@ -45,7 +45,7 @@ def upgrade(db_conn: Connection):
f"Trigram tokenizer not available (requires SQLite >= 3.34), "
f"using extended unicode61 tokenizer for fuzzy matching"
)
fuzzy_tokenizer = "unicode61 tokenchars '_-'"
fuzzy_tokenizer = "unicode61 tokenchars '_-.'"
# Start transaction
cursor.execute("BEGIN TRANSACTION")
@@ -122,7 +122,8 @@ def upgrade(db_conn: Connection):
# Drop old FTS table
cursor.execute("DROP TABLE IF EXISTS files_fts")
# Create exact FTS table (unicode61 with underscores/hyphens as token chars)
# Create exact FTS table (unicode61 with underscores/hyphens/dots as token chars)
# Note: tokenchars includes '.' to properly tokenize qualified names like PortRole.FLOW
log.info("Creating files_fts_exact table with unicode61 tokenizer...")
cursor.execute(
"""
@@ -130,7 +131,7 @@ def upgrade(db_conn: Connection):
name, full_path UNINDEXED, content,
content='files',
content_rowid='id',
tokenize="unicode61 tokenchars '_-'"
tokenize="unicode61 tokenchars '_-.'"
)
"""
)

1361
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,12 @@
{
"name": "claude-code-workflow",
"version": "6.2.0",
"version": "6.2.8",
"description": "JSON-driven multi-agent development framework with intelligent CLI orchestration (Gemini/Qwen/Codex), context-first architecture, and automated workflow execution",
"type": "module",
"main": "ccw/src/index.js",
"bin": {
"ccw": "./ccw/bin/ccw.js"
"ccw": "./ccw/bin/ccw.js",
"ccw-mcp": "./ccw/bin/ccw-mcp.js"
},
"scripts": {
"start": "node ccw/bin/ccw.js",
@@ -28,6 +29,8 @@
"node": ">=16.0.0"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",
"better-sqlite3": "^11.7.0",
"boxen": "^7.1.0",
"chalk": "^5.3.0",
"commander": "^11.0.0",
@@ -36,12 +39,13 @@
"gradient-string": "^2.0.2",
"inquirer": "^9.2.0",
"open": "^9.1.0",
"ora": "^7.0.0"
"ora": "^7.0.0",
"zod": "^4.1.13"
},
"files": [
"ccw/bin/",
"ccw/dist/",
"ccw/src/",
"ccw/package.json",
".claude/agents/",
".claude/commands/",
".claude/output-styles/",