feat: Enhance CodexLens with GPU support and semantic status improvements

- Added accelerator and providers fields to SemanticStatus interface.
- Updated checkSemanticStatus function to retrieve ONNX providers and accelerator type.
- Introduced detectGpuSupport function to identify available GPU modes (CUDA, DirectML).
- Modified installSemantic function to support GPU acceleration modes and clean up ONNX Runtime installations.
- Updated package requirements in PKG-INFO for semantic-gpu and semantic-directml extras.
- Added new source files for GPU support and enrichment functionalities.
- Updated tests to cover new features and ensure comprehensive testing.
This commit is contained in:
catlog22
2025-12-22 17:42:26 +08:00
parent 72f24bf535
commit acdfbb4644
9 changed files with 1215 additions and 98 deletions

View File

@@ -9,11 +9,8 @@
"env": {}
},
"ccw-tools": {
"command": "npx",
"args": [
"-y",
"ccw-mcp"
],
"command": "ccw-mcp",
"args": [],
"env": {
"CCW_ENABLED_TOOLS": "write_file,edit_file,smart_search,core_memory"
}

View File

@@ -10,11 +10,12 @@ import {
executeCodexLens,
checkSemanticStatus,
installSemantic,
detectGpuSupport,
uninstallCodexLens,
cancelIndexing,
isIndexingInProgress
} from '../../tools/codex-lens.js';
import type { ProgressInfo } from '../../tools/codex-lens.js';
import type { ProgressInfo, GpuMode } from '../../tools/codex-lens.js';
export interface RouteContext {
pathname: string;
@@ -343,7 +344,7 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
}
try {
const result = await executeCodexLens(['config-set', '--key', 'index_dir', '--value', index_dir, '--json']);
const result = await executeCodexLens(['config', 'set', 'index_dir', index_dir, '--json']);
if (result.success) {
return { success: true, message: 'Configuration updated successfully' };
} else {
@@ -668,16 +669,43 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
}
// API: CodexLens Semantic Search Install (fastembed, ONNX-based, ~200MB)
// API: Detect GPU support for semantic search
if (pathname === '/api/codexlens/gpu/detect' && req.method === 'GET') {
try {
const gpuInfo = await detectGpuSupport();
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ success: true, ...gpuInfo }));
} catch (err) {
res.writeHead(500, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ success: false, error: err.message }));
}
return true;
}
// API: CodexLens Semantic Search Install (with GPU mode support)
if (pathname === '/api/codexlens/semantic/install' && req.method === 'POST') {
handlePostRequest(req, res, async () => {
handlePostRequest(req, res, async (body) => {
try {
const result = await installSemantic();
// Get GPU mode from request body, default to 'cpu'
const gpuMode: GpuMode = body?.gpuMode || 'cpu';
const validModes: GpuMode[] = ['cpu', 'cuda', 'directml'];
if (!validModes.includes(gpuMode)) {
return { success: false, error: `Invalid GPU mode: ${gpuMode}. Valid modes: ${validModes.join(', ')}`, status: 400 };
}
const result = await installSemantic(gpuMode);
if (result.success) {
const status = await checkSemanticStatus();
const modeDescriptions = {
cpu: 'CPU (ONNX Runtime)',
cuda: 'NVIDIA CUDA GPU',
directml: 'Windows DirectML GPU'
};
return {
success: true,
message: 'Semantic search installed successfully (fastembed)',
message: `Semantic search installed successfully with ${modeDescriptions[gpuMode]}`,
gpuMode,
...status
};
} else {

View File

@@ -946,20 +946,15 @@ function setCcwProjectRootToCurrent() {
}
// Build CCW Tools config with selected tools
// Uses isWindowsPlatform from state.js to generate platform-appropriate commands
// Uses globally installed ccw-mcp command (from claude-code-workflow package)
function buildCcwToolsConfig(selectedTools, pathConfig = {}) {
const { projectRoot, allowedDirs } = pathConfig;
// Windows requires 'cmd /c' wrapper to execute npx
// Other platforms (macOS, Linux) can run npx directly
const config = isWindowsPlatform
? {
command: "cmd",
args: ["/c", "npx", "-y", "ccw-mcp"]
}
: {
command: "npx",
args: ["-y", "ccw-mcp"]
};
// Use globally installed ccw-mcp command directly
// Requires: npm install -g claude-code-workflow
const config = {
command: "ccw-mcp",
args: []
};
// Add env if not all tools or not default 4 core tools
const coreTools = ['write_file', 'edit_file', 'codex_lens', 'smart_search'];

View File

@@ -23,6 +23,9 @@ const i18n = {
'common.loading': 'Loading...',
'common.error': 'Error',
'common.success': 'Success',
'common.retry': 'Retry',
'common.refresh': 'Refresh',
'common.minutes': 'minutes',
// Header
'header.project': 'Project:',
@@ -277,8 +280,17 @@ const i18n = {
'codexlens.installDeps': 'Install Dependencies',
'codexlens.installDepsPrompt': 'Would you like to install them now? (This may take a few minutes)\n\nClick "Cancel" to create FTS index only.',
'codexlens.installingDeps': 'Installing dependencies...',
'codexlens.installingMode': 'Installing with',
'codexlens.depsInstalled': 'Dependencies installed successfully',
'codexlens.depsInstallFailed': 'Failed to install dependencies',
// GPU Mode Selection
'codexlens.selectGpuMode': 'Select acceleration mode',
'codexlens.cpuModeDesc': 'Standard CPU processing',
'codexlens.directmlModeDesc': 'Windows GPU (NVIDIA/AMD/Intel)',
'codexlens.cudaModeDesc': 'NVIDIA GPU (requires CUDA Toolkit)',
'common.recommended': 'Recommended',
'common.unavailable': 'Unavailable',
'codexlens.modelManagement': 'Model Management',
'codexlens.loadingModels': 'Loading models...',
'codexlens.downloadModel': 'Download',
@@ -293,6 +305,35 @@ const i18n = {
'codexlens.modelListError': 'Failed to load models',
'codexlens.noModelsAvailable': 'No models available',
// Model Download Progress
'codexlens.downloadingModel': 'Downloading',
'codexlens.connectingToHuggingFace': 'Connecting to Hugging Face...',
'codexlens.downloadTimeEstimate': 'Estimated time',
'codexlens.manualDownloadHint': 'Manual download',
'codexlens.downloadingModelFiles': 'Downloading model files...',
'codexlens.downloadingWeights': 'Downloading model weights...',
'codexlens.downloadingTokenizer': 'Downloading tokenizer...',
'codexlens.verifyingModel': 'Verifying model...',
'codexlens.finalizingDownload': 'Finalizing...',
'codexlens.downloadComplete': 'Download complete!',
'codexlens.downloadFailed': 'Download failed',
'codexlens.manualDownloadOptions': 'Manual download options',
'codexlens.cliDownload': 'CLI',
'codexlens.huggingfaceDownload': 'Hugging Face',
'codexlens.downloadCanceled': 'Download canceled',
// Manual Download Guide
'codexlens.manualDownloadGuide': 'Manual Download Guide',
'codexlens.cliMethod': 'Command Line (Recommended)',
'codexlens.cliMethodDesc': 'Run in terminal with progress display:',
'codexlens.pythonMethod': 'Python Script',
'codexlens.pythonMethodDesc': 'Pre-download model using Python:',
'codexlens.hfHubMethod': 'Hugging Face Hub CLI',
'codexlens.hfHubMethodDesc': 'Download using huggingface-cli with resume support:',
'codexlens.modelLinks': 'Direct Model Links',
'codexlens.cacheLocation': 'Model Storage Location',
'common.copied': 'Copied to clipboard',
// CodexLens Indexing Progress
'codexlens.indexing': 'Indexing',
'codexlens.indexingDesc': 'Building code index for workspace',
@@ -302,6 +343,43 @@ const i18n = {
'codexlens.indexSuccess': 'Index created successfully',
'codexlens.indexFailed': 'Indexing failed',
// CodexLens Install
'codexlens.installDesc': 'Python-based code indexing engine',
'codexlens.whatWillBeInstalled': 'What will be installed:',
'codexlens.pythonVenv': 'Python virtual environment',
'codexlens.pythonVenvDesc': 'Isolated Python environment',
'codexlens.codexlensPackage': 'CodexLens package',
'codexlens.codexlensPackageDesc': 'Code indexing and search engine',
'codexlens.sqliteFtsDesc': 'Full-text search database',
'codexlens.installLocation': 'Installation Location',
'codexlens.installTime': 'First installation may take 2-3 minutes to download and setup Python packages.',
'codexlens.startingInstall': 'Starting installation...',
'codexlens.installing': 'Installing...',
'codexlens.creatingVenv': 'Creating virtual environment...',
'codexlens.installingPip': 'Installing pip packages...',
'codexlens.installingPackage': 'Installing CodexLens package...',
'codexlens.settingUpDeps': 'Setting up Python dependencies...',
'codexlens.installComplete': 'Installation complete!',
'codexlens.installSuccess': 'CodexLens installed successfully!',
'codexlens.installNow': 'Install Now',
'codexlens.accelerator': 'Accelerator',
// CodexLens Uninstall
'codexlens.uninstall': 'Uninstall',
'codexlens.uninstallDesc': 'Remove CodexLens and all data',
'codexlens.whatWillBeRemoved': 'What will be removed:',
'codexlens.removeVenv': 'Virtual environment at ~/.codexlens/venv',
'codexlens.removeData': 'All CodexLens indexed data and databases',
'codexlens.removeConfig': 'Configuration and semantic search models',
'codexlens.removing': 'Removing files...',
'codexlens.uninstalling': 'Uninstalling...',
'codexlens.removingVenv': 'Removing virtual environment...',
'codexlens.removingData': 'Deleting indexed data...',
'codexlens.removingConfig': 'Cleaning up configuration...',
'codexlens.finalizing': 'Finalizing removal...',
'codexlens.uninstallComplete': 'Uninstallation complete!',
'codexlens.uninstallSuccess': 'CodexLens uninstalled successfully!',
// Index Manager
'index.manager': 'Index Manager',
'index.projects': 'Projects',
@@ -1358,7 +1436,10 @@ const i18n = {
'common.loading': '加载中...',
'common.error': '错误',
'common.success': '成功',
'common.retry': '重试',
'common.refresh': '刷新',
'common.minutes': '分钟',
// Header
'header.project': '项目:',
'header.recentProjects': '最近项目',
@@ -1612,8 +1693,17 @@ const i18n = {
'codexlens.installDeps': '安装依赖',
'codexlens.installDepsPrompt': '是否立即安装?(可能需要几分钟)\n\n点击"取消"将只创建 FTS 索引。',
'codexlens.installingDeps': '安装依赖中...',
'codexlens.installingMode': '正在安装',
'codexlens.depsInstalled': '依赖安装成功',
'codexlens.depsInstallFailed': '依赖安装失败',
// GPU 模式选择
'codexlens.selectGpuMode': '选择加速模式',
'codexlens.cpuModeDesc': '标准 CPU 处理',
'codexlens.directmlModeDesc': 'Windows GPUNVIDIA/AMD/Intel',
'codexlens.cudaModeDesc': 'NVIDIA GPU需要 CUDA Toolkit',
'common.recommended': '推荐',
'common.unavailable': '不可用',
'codexlens.modelManagement': '模型管理',
'codexlens.loadingModels': '加载模型中...',
'codexlens.downloadModel': '下载',
@@ -1628,6 +1718,35 @@ const i18n = {
'codexlens.modelListError': '加载模型列表失败',
'codexlens.noModelsAvailable': '没有可用模型',
// 模型下载进度
'codexlens.downloadingModel': '正在下载',
'codexlens.connectingToHuggingFace': '正在连接 Hugging Face...',
'codexlens.downloadTimeEstimate': '预计时间',
'codexlens.manualDownloadHint': '手动下载',
'codexlens.downloadingModelFiles': '正在下载模型文件...',
'codexlens.downloadingWeights': '正在下载模型权重...',
'codexlens.downloadingTokenizer': '正在下载分词器...',
'codexlens.verifyingModel': '正在验证模型...',
'codexlens.finalizingDownload': '正在完成...',
'codexlens.downloadComplete': '下载完成!',
'codexlens.downloadFailed': '下载失败',
'codexlens.manualDownloadOptions': '手动下载选项',
'codexlens.cliDownload': '命令行',
'codexlens.huggingfaceDownload': 'Hugging Face',
'codexlens.downloadCanceled': '下载已取消',
// 手动下载指南
'codexlens.manualDownloadGuide': '手动下载指南',
'codexlens.cliMethod': '命令行(推荐)',
'codexlens.cliMethodDesc': '在终端运行,显示下载进度:',
'codexlens.pythonMethod': 'Python 脚本',
'codexlens.pythonMethodDesc': '使用 Python 预下载模型:',
'codexlens.hfHubMethod': 'Hugging Face Hub CLI',
'codexlens.hfHubMethodDesc': '使用 huggingface-cli 下载,支持断点续传:',
'codexlens.modelLinks': '模型直链',
'codexlens.cacheLocation': '模型存储位置',
'common.copied': '已复制到剪贴板',
// CodexLens 索引进度
'codexlens.indexing': '索引中',
'codexlens.indexingDesc': '正在为工作区构建代码索引',
@@ -1637,6 +1756,43 @@ const i18n = {
'codexlens.indexSuccess': '索引创建成功',
'codexlens.indexFailed': '索引失败',
// CodexLens 安装
'codexlens.installDesc': '基于 Python 的代码索引引擎',
'codexlens.whatWillBeInstalled': '将安装的内容:',
'codexlens.pythonVenv': 'Python 虚拟环境',
'codexlens.pythonVenvDesc': '隔离的 Python 环境',
'codexlens.codexlensPackage': 'CodexLens 包',
'codexlens.codexlensPackageDesc': '代码索引和搜索引擎',
'codexlens.sqliteFtsDesc': '全文搜索数据库',
'codexlens.installLocation': '安装位置',
'codexlens.installTime': '首次安装可能需要 2-3 分钟下载和配置 Python 包。',
'codexlens.startingInstall': '正在启动安装...',
'codexlens.installing': '安装中...',
'codexlens.creatingVenv': '正在创建虚拟环境...',
'codexlens.installingPip': '正在安装 pip 包...',
'codexlens.installingPackage': '正在安装 CodexLens 包...',
'codexlens.settingUpDeps': '正在配置 Python 依赖...',
'codexlens.installComplete': '安装完成!',
'codexlens.installSuccess': 'CodexLens 安装成功!',
'codexlens.installNow': '立即安装',
'codexlens.accelerator': '加速器',
// CodexLens 卸载
'codexlens.uninstall': '卸载',
'codexlens.uninstallDesc': '移除 CodexLens 及所有数据',
'codexlens.whatWillBeRemoved': '将被移除的内容:',
'codexlens.removeVenv': '虚拟环境 ~/.codexlens/venv',
'codexlens.removeData': '所有 CodexLens 索引数据和数据库',
'codexlens.removeConfig': '配置文件和语义搜索模型',
'codexlens.removing': '正在删除文件...',
'codexlens.uninstalling': '正在卸载...',
'codexlens.removingVenv': '正在删除虚拟环境...',
'codexlens.removingData': '正在删除索引数据...',
'codexlens.removingConfig': '正在清理配置文件...',
'codexlens.finalizing': '正在完成卸载...',
'codexlens.uninstallComplete': '卸载完成!',
'codexlens.uninstallSuccess': 'CodexLens 卸载成功!',
// 索引管理器
'index.manager': '索引管理器',
'index.projects': '项目数',

View File

@@ -126,10 +126,10 @@ function buildCodexLensConfigContent(config) {
'<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md border border-border bg-background hover:bg-muted/50 transition-colors" onclick="cleanCodexLensIndexes()">' +
'<i data-lucide="trash" class="w-3.5 h-3.5"></i> ' + t('codexlens.cleanAllIndexes') +
'</button>' +
'<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md border border-destructive/30 bg-destructive/5 text-destructive hover:bg-destructive/10 transition-colors" onclick="uninstallCodexLens()">' +
'<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md border border-destructive/30 bg-destructive/5 text-destructive hover:bg-destructive/10 transition-colors" onclick="uninstallCodexLensFromManager()">' +
'<i data-lucide="trash-2" class="w-3.5 h-3.5"></i> ' + t('cli.uninstall') +
'</button>'
: '<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md bg-primary text-primary-foreground hover:bg-primary/90 transition-colors" onclick="installCodexLens()">' +
: '<button class="inline-flex items-center gap-1.5 px-3 py-1.5 text-xs font-medium rounded-md bg-primary text-primary-foreground hover:bg-primary/90 transition-colors" onclick="installCodexLensFromManager()">' +
'<i data-lucide="download" class="w-3.5 h-3.5"></i> ' + t('codexlens.installCodexLens') +
'</button>') +
'</div>' +
@@ -335,6 +335,26 @@ function initCodexLensConfigEvents(currentConfig) {
// SEMANTIC DEPENDENCIES MANAGEMENT
// ============================================================
// Store detected GPU info
var detectedGpuInfo = null;
/**
* Detect GPU support
*/
async function detectGpuSupport() {
try {
var response = await fetch('/api/codexlens/gpu/detect');
var result = await response.json();
if (result.success) {
detectedGpuInfo = result;
return result;
}
} catch (err) {
console.error('GPU detection failed:', err);
}
return { mode: 'cpu', available: ['cpu'], info: 'CPU only' };
}
/**
* Load semantic dependencies status
*/
@@ -343,24 +363,58 @@ async function loadSemanticDepsStatus() {
if (!container) return;
try {
// Detect GPU support in parallel
var gpuPromise = detectGpuSupport();
var response = await fetch('/api/codexlens/semantic/status');
var result = await response.json();
var gpuInfo = await gpuPromise;
if (result.available) {
container.innerHTML =
'<div class="flex items-center gap-2 text-sm">' +
'<i data-lucide="check-circle" class="w-4 h-4 text-success"></i>' +
'<span>' + t('codexlens.semanticInstalled') + '</span>' +
'<span class="text-muted-foreground">(' + (result.backend || 'fastembed') + ')</span>' +
'</div>';
} else {
// Build accelerator badge
var accelerator = result.accelerator || 'CPU';
var acceleratorIcon = 'cpu';
var acceleratorClass = 'bg-muted text-muted-foreground';
if (accelerator === 'CUDA') {
acceleratorIcon = 'zap';
acceleratorClass = 'bg-green-500/20 text-green-600';
} else if (accelerator === 'DirectML') {
acceleratorIcon = 'gpu-card';
acceleratorClass = 'bg-blue-500/20 text-blue-600';
} else if (accelerator === 'ROCm') {
acceleratorIcon = 'flame';
acceleratorClass = 'bg-red-500/20 text-red-600';
}
container.innerHTML =
'<div class="space-y-2">' +
'<div class="flex items-center gap-2 text-sm">' +
'<i data-lucide="check-circle" class="w-4 h-4 text-success"></i>' +
'<span>' + t('codexlens.semanticInstalled') + '</span>' +
'<span class="text-muted-foreground">(' + (result.backend || 'fastembed') + ')</span>' +
'</div>' +
'<div class="flex items-center gap-2">' +
'<span class="inline-flex items-center gap-1 px-2 py-0.5 rounded text-xs font-medium ' + acceleratorClass + '">' +
'<i data-lucide="' + acceleratorIcon + '" class="w-3 h-3"></i>' +
accelerator +
'</span>' +
(result.providers && result.providers.length > 0
? '<span class="text-xs text-muted-foreground">' + result.providers.join(', ') + '</span>'
: '') +
'</div>' +
'</div>';
} else {
// Build GPU mode options
var gpuOptions = buildGpuModeSelector(gpuInfo);
container.innerHTML =
'<div class="space-y-3">' +
'<div class="flex items-center gap-2 text-sm text-muted-foreground">' +
'<i data-lucide="alert-circle" class="w-4 h-4"></i>' +
'<span>' + t('codexlens.semanticNotInstalled') + '</span>' +
'</div>' +
'<button class="btn-sm btn-outline" onclick="installSemanticDeps()">' +
gpuOptions +
'<button class="btn-sm btn-primary w-full" onclick="installSemanticDepsWithGpu()">' +
'<i data-lucide="download" class="w-3 h-3"></i> ' + t('codexlens.installDeps') +
'</button>' +
'</div>';
@@ -373,21 +427,120 @@ async function loadSemanticDepsStatus() {
}
/**
* Install semantic dependencies
* Build GPU mode selector HTML
*/
async function installSemanticDeps() {
function buildGpuModeSelector(gpuInfo) {
var modes = [
{
id: 'cpu',
label: 'CPU',
desc: t('codexlens.cpuModeDesc') || 'Standard CPU processing',
icon: 'cpu',
available: true
},
{
id: 'directml',
label: 'DirectML',
desc: t('codexlens.directmlModeDesc') || 'Windows GPU (NVIDIA/AMD/Intel)',
icon: 'gpu-card',
available: gpuInfo.available.includes('directml'),
recommended: gpuInfo.mode === 'directml'
},
{
id: 'cuda',
label: 'CUDA',
desc: t('codexlens.cudaModeDesc') || 'NVIDIA GPU (requires CUDA Toolkit)',
icon: 'zap',
available: gpuInfo.available.includes('cuda'),
recommended: gpuInfo.mode === 'cuda'
}
];
var html =
'<div class="space-y-2">' +
'<div class="text-xs font-medium text-muted-foreground flex items-center gap-1">' +
'<i data-lucide="settings" class="w-3 h-3"></i>' +
(t('codexlens.selectGpuMode') || 'Select acceleration mode') +
'</div>' +
'<div class="text-xs text-muted-foreground bg-muted/50 rounded px-2 py-1">' +
'<i data-lucide="info" class="w-3 h-3 inline"></i> ' + gpuInfo.info +
'</div>' +
'<div class="space-y-1">';
modes.forEach(function(mode) {
var isDisabled = !mode.available;
var isRecommended = mode.recommended;
var isDefault = mode.id === gpuInfo.mode;
html +=
'<label class="flex items-center gap-3 p-2 rounded border cursor-pointer hover:bg-muted/50 transition-colors ' +
(isDisabled ? 'opacity-50 cursor-not-allowed' : '') + '">' +
'<input type="radio" name="gpuMode" value="' + mode.id + '" ' +
(isDefault ? 'checked' : '') +
(isDisabled ? ' disabled' : '') +
' class="accent-primary">' +
'<div class="flex-1">' +
'<div class="flex items-center gap-2">' +
'<i data-lucide="' + mode.icon + '" class="w-4 h-4"></i>' +
'<span class="font-medium text-sm">' + mode.label + '</span>' +
(isRecommended ? '<span class="text-xs bg-primary/20 text-primary px-1.5 py-0.5 rounded">' + (t('common.recommended') || 'Recommended') + '</span>' : '') +
(isDisabled ? '<span class="text-xs text-muted-foreground">(' + (t('common.unavailable') || 'Unavailable') + ')</span>' : '') +
'</div>' +
'<div class="text-xs text-muted-foreground">' + mode.desc + '</div>' +
'</div>' +
'</label>';
});
html +=
'</div>' +
'</div>';
return html;
}
/**
* Get selected GPU mode
*/
function getSelectedGpuMode() {
var selected = document.querySelector('input[name="gpuMode"]:checked');
return selected ? selected.value : 'cpu';
}
/**
* Install semantic dependencies with GPU mode
*/
async function installSemanticDepsWithGpu() {
var container = document.getElementById('semanticDepsStatus');
if (!container) return;
var gpuMode = getSelectedGpuMode();
var modeLabels = {
cpu: 'CPU',
cuda: 'NVIDIA CUDA',
directml: 'DirectML'
};
container.innerHTML =
'<div class="text-sm text-muted-foreground animate-pulse">' + t('codexlens.installingDeps') + '</div>';
'<div class="space-y-2">' +
'<div class="flex items-center gap-2 text-sm text-muted-foreground">' +
'<div class="animate-spin w-4 h-4 border-2 border-primary border-t-transparent rounded-full"></div>' +
'<span>' + t('codexlens.installingDeps') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground">' +
(t('codexlens.installingMode') || 'Installing with') + ': ' + modeLabels[gpuMode] +
'</div>' +
'</div>';
try {
var response = await fetch('/api/codexlens/semantic/install', { method: 'POST' });
var response = await fetch('/api/codexlens/semantic/install', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ gpuMode: gpuMode })
});
var result = await response.json();
if (result.success) {
showRefreshToast(t('codexlens.depsInstalled'), 'success');
showRefreshToast(t('codexlens.depsInstalled') + ' (' + modeLabels[gpuMode] + ')', 'success');
await loadSemanticDepsStatus();
await loadModelList();
} else {
@@ -400,10 +553,164 @@ async function installSemanticDeps() {
}
}
/**
* Install semantic dependencies (legacy, defaults to CPU)
*/
async function installSemanticDeps() {
await installSemanticDepsWithGpu();
}
// ============================================================
// MODEL MANAGEMENT
// ============================================================
/**
* Build manual download guide HTML
*/
function buildManualDownloadGuide() {
var modelData = [
{ profile: 'code', name: 'jinaai/jina-embeddings-v2-base-code', size: '~150 MB' },
{ profile: 'fast', name: 'BAAI/bge-small-en-v1.5', size: '~80 MB' },
{ profile: 'balanced', name: 'mixedbread-ai/mxbai-embed-large-v1', size: '~600 MB' },
{ profile: 'multilingual', name: 'intfloat/multilingual-e5-large', size: '~1 GB' }
];
var html =
'<div class="mt-4 border-t pt-4">' +
'<button class="flex items-center gap-2 text-sm text-muted-foreground hover:text-foreground w-full" onclick="toggleManualDownloadGuide()" id="manualDownloadToggle">' +
'<i data-lucide="chevron-right" class="w-4 h-4 transition-transform" id="manualDownloadChevron"></i>' +
'<i data-lucide="terminal" class="w-4 h-4"></i>' +
'<span>' + (t('codexlens.manualDownloadGuide') || 'Manual Download Guide') + '</span>' +
'</button>' +
'<div id="manualDownloadContent" class="hidden mt-3 space-y-3">' +
// Method 1: CLI
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<span class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-primary/20 text-primary text-xs">1</span>' +
'<span>' + (t('codexlens.cliMethod') || 'Command Line (Recommended)') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground mb-2">' +
(t('codexlens.cliMethodDesc') || 'Run in terminal with progress display:') +
'</div>' +
'<div class="space-y-1">';
modelData.forEach(function(m) {
html +=
'<div class="flex items-center justify-between bg-background rounded px-2 py-1.5">' +
'<code class="text-xs font-mono">codexlens model-download ' + m.profile + '</code>' +
'<button class="text-xs text-primary hover:underline" onclick="copyToClipboard(\'codexlens model-download ' + m.profile + '\')">' +
'<i data-lucide="copy" class="w-3 h-3"></i>' +
'</button>' +
'</div>';
});
html +=
'</div>' +
'</div>' +
// Method 2: Python
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<span class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-primary/20 text-primary text-xs">2</span>' +
'<span>' + (t('codexlens.pythonMethod') || 'Python Script') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground mb-2">' +
(t('codexlens.pythonMethodDesc') || 'Pre-download model using Python:') +
'</div>' +
'<div class="bg-background rounded p-2">' +
'<pre class="text-xs font-mono whitespace-pre-wrap">' +
'# Install fastembed first\n' +
'pip install fastembed\n\n' +
'# Download model (choose one)\n' +
'from fastembed import TextEmbedding\n\n' +
'# Code model (recommended for code search)\n' +
'model = TextEmbedding("jinaai/jina-embeddings-v2-base-code")\n\n' +
'# Fast model (lightweight)\n' +
'# model = TextEmbedding("BAAI/bge-small-en-v1.5")' +
'</pre>' +
'</div>' +
'</div>' +
// Method 3: Hugging Face Hub
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<span class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-primary/20 text-primary text-xs">3</span>' +
'<span>' + (t('codexlens.hfHubMethod') || 'Hugging Face Hub CLI') + '</span>' +
'</div>' +
'<div class="text-xs text-muted-foreground mb-2">' +
(t('codexlens.hfHubMethodDesc') || 'Download using huggingface-cli with resume support:') +
'</div>' +
'<div class="bg-background rounded p-2 space-y-2">' +
'<pre class="text-xs font-mono whitespace-pre-wrap">' +
'# Install huggingface_hub\n' +
'pip install huggingface_hub\n\n' +
'# Download model (supports resume on failure)\n' +
'huggingface-cli download jinaai/jina-embeddings-v2-base-code' +
'</pre>' +
'</div>' +
'</div>' +
// Model Links
'<div class="bg-muted/50 rounded-lg p-3 space-y-2">' +
'<div class="flex items-center gap-2 text-sm font-medium">' +
'<i data-lucide="external-link" class="w-4 h-4"></i>' +
'<span>' + (t('codexlens.modelLinks') || 'Direct Model Links') + '</span>' +
'</div>' +
'<div class="grid grid-cols-2 gap-2">';
modelData.forEach(function(m) {
html +=
'<a href="https://huggingface.co/' + m.name + '" target="_blank" class="flex items-center justify-between bg-background rounded px-2 py-1.5 hover:bg-muted transition-colors">' +
'<span class="text-xs font-medium">' + m.profile + '</span>' +
'<span class="text-xs text-muted-foreground">' + m.size + '</span>' +
'</a>';
});
html +=
'</div>' +
'</div>' +
// Cache location info
'<div class="text-xs text-muted-foreground bg-muted/30 rounded p-2">' +
'<div class="flex items-start gap-1.5">' +
'<i data-lucide="info" class="w-3.5 h-3.5 mt-0.5 flex-shrink-0"></i>' +
'<div>' +
'<strong>' + (t('codexlens.cacheLocation') || 'Cache Location') + ':</strong><br>' +
'<code class="text-xs">Windows: %LOCALAPPDATA%\\Temp\\fastembed_cache</code><br>' +
'<code class="text-xs">Linux/Mac: ~/.cache/fastembed</code>' +
'</div>' +
'</div>' +
'</div>' +
'</div>' +
'</div>';
return html;
}
/**
* Toggle manual download guide visibility
*/
function toggleManualDownloadGuide() {
var content = document.getElementById('manualDownloadContent');
var chevron = document.getElementById('manualDownloadChevron');
if (content && chevron) {
content.classList.toggle('hidden');
chevron.style.transform = content.classList.contains('hidden') ? '' : 'rotate(90deg)';
}
}
/**
* Copy text to clipboard
*/
function copyToClipboard(text) {
navigator.clipboard.writeText(text).then(function() {
showRefreshToast(t('common.copied') || 'Copied to clipboard', 'success');
}).catch(function(err) {
console.error('Failed to copy:', err);
});
}
/**
* Load model list
*/
@@ -476,6 +783,10 @@ async function loadModelList() {
});
html += '</div>';
// Add manual download guide section
html += buildManualDownloadGuide();
container.innerHTML = html;
if (window.lucide) lucide.createIcons();
} catch (err) {
@@ -485,18 +796,94 @@ async function loadModelList() {
}
/**
* Download model
* Download model with progress simulation and manual download info
*/
async function downloadModel(profile) {
var modelCard = document.getElementById('model-' + profile);
if (!modelCard) return;
var originalHTML = modelCard.innerHTML;
// Get model info for size estimation
var modelSizes = {
'fast': { size: 80, time: '1-2' },
'code': { size: 150, time: '2-5' },
'multilingual': { size: 1000, time: '5-15' },
'balanced': { size: 600, time: '3-10' }
};
var modelInfo = modelSizes[profile] || { size: 100, time: '2-5' };
// Show detailed download UI with progress simulation
modelCard.innerHTML =
'<div class="flex items-center justify-center p-3">' +
'<span class="text-sm text-muted-foreground animate-pulse">' + t('codexlens.downloading') + '</span>' +
'<div class="p-3 space-y-3">' +
'<div class="flex items-center gap-2">' +
'<div class="animate-spin w-4 h-4 border-2 border-primary border-t-transparent rounded-full flex-shrink-0"></div>' +
'<span class="text-sm font-medium">' + (t('codexlens.downloadingModel') || 'Downloading') + ' ' + profile + '</span>' +
'</div>' +
'<div class="space-y-1">' +
'<div class="h-2 bg-muted rounded-full overflow-hidden">' +
'<div id="model-progress-' + profile + '" class="h-full bg-primary transition-all duration-1000 ease-out model-download-progress" style="width: 0%"></div>' +
'</div>' +
'<div class="flex justify-between text-xs text-muted-foreground">' +
'<span id="model-status-' + profile + '">' + (t('codexlens.connectingToHuggingFace') || 'Connecting to Hugging Face...') + '</span>' +
'<span>~' + modelInfo.size + ' MB</span>' +
'</div>' +
'</div>' +
'<div class="text-xs text-muted-foreground bg-muted/50 rounded p-2 space-y-1">' +
'<div class="flex items-start gap-1">' +
'<i data-lucide="info" class="w-3 h-3 mt-0.5 flex-shrink-0"></i>' +
'<span>' + (t('codexlens.downloadTimeEstimate') || 'Estimated time') + ': ' + modelInfo.time + ' ' + (t('common.minutes') || 'minutes') + '</span>' +
'</div>' +
'<div class="flex items-start gap-1">' +
'<i data-lucide="terminal" class="w-3 h-3 mt-0.5 flex-shrink-0"></i>' +
'<span>' + (t('codexlens.manualDownloadHint') || 'Manual download') + ': <code class="bg-background px-1 rounded">codexlens model-download ' + profile + '</code></span>' +
'</div>' +
'</div>' +
'<button class="text-xs text-muted-foreground hover:text-foreground underline" onclick="cancelModelDownload(\'' + profile + '\')">' +
(t('common.cancel') || 'Cancel') +
'</button>' +
'</div>';
if (window.lucide) lucide.createIcons();
// Start progress simulation
var progressBar = document.getElementById('model-progress-' + profile);
var statusText = document.getElementById('model-status-' + profile);
var simulatedProgress = 0;
var progressInterval = null;
var downloadAborted = false;
// Store abort controller for cancellation
window['modelDownloadAbort_' + profile] = function() {
downloadAborted = true;
if (progressInterval) clearInterval(progressInterval);
};
// Simulate progress based on model size
var progressStages = [
{ percent: 10, msg: t('codexlens.downloadingModelFiles') || 'Downloading model files...' },
{ percent: 30, msg: t('codexlens.downloadingWeights') || 'Downloading model weights...' },
{ percent: 60, msg: t('codexlens.downloadingTokenizer') || 'Downloading tokenizer...' },
{ percent: 80, msg: t('codexlens.verifyingModel') || 'Verifying model...' },
{ percent: 95, msg: t('codexlens.finalizingDownload') || 'Finalizing...' }
];
var stageIndex = 0;
var baseInterval = Math.max(2000, modelInfo.size * 30); // Slower for larger models
progressInterval = setInterval(function() {
if (downloadAborted) return;
if (stageIndex < progressStages.length) {
var stage = progressStages[stageIndex];
simulatedProgress = stage.percent;
if (progressBar) progressBar.style.width = simulatedProgress + '%';
if (statusText) statusText.textContent = stage.msg;
stageIndex++;
}
}, baseInterval);
try {
var response = await fetch('/api/codexlens/models/download', {
method: 'POST',
@@ -504,20 +891,99 @@ async function downloadModel(profile) {
body: JSON.stringify({ profile: profile })
});
// Clear simulation
if (progressInterval) clearInterval(progressInterval);
if (downloadAborted) {
modelCard.innerHTML = originalHTML;
if (window.lucide) lucide.createIcons();
return;
}
var result = await response.json();
if (result.success) {
// Show completion
if (progressBar) progressBar.style.width = '100%';
if (statusText) statusText.textContent = t('codexlens.downloadComplete') || 'Download complete!';
showRefreshToast(t('codexlens.modelDownloaded') + ': ' + profile, 'success');
await loadModelList();
// Refresh model list after short delay
setTimeout(function() {
loadModelList();
}, 500);
} else {
showRefreshToast(t('codexlens.modelDownloadFailed') + ': ' + result.error, 'error');
modelCard.innerHTML = originalHTML;
if (window.lucide) lucide.createIcons();
showModelDownloadError(modelCard, profile, result.error, originalHTML);
}
} catch (err) {
if (progressInterval) clearInterval(progressInterval);
showRefreshToast(t('common.error') + ': ' + err.message, 'error');
modelCard.innerHTML = originalHTML;
if (window.lucide) lucide.createIcons();
showModelDownloadError(modelCard, profile, err.message, originalHTML);
}
// Cleanup abort function
delete window['modelDownloadAbort_' + profile];
}
/**
* Show model download error with manual download instructions
*/
function showModelDownloadError(modelCard, profile, error, originalHTML) {
var modelNames = {
'fast': 'BAAI/bge-small-en-v1.5',
'code': 'jinaai/jina-embeddings-v2-base-code',
'multilingual': 'intfloat/multilingual-e5-large',
'balanced': 'mixedbread-ai/mxbai-embed-large-v1'
};
var modelName = modelNames[profile] || profile;
var hfUrl = 'https://huggingface.co/' + modelName;
modelCard.innerHTML =
'<div class="p-3 space-y-3">' +
'<div class="flex items-start gap-2 text-destructive">' +
'<i data-lucide="alert-circle" class="w-4 h-4 mt-0.5 flex-shrink-0"></i>' +
'<div class="text-sm">' +
'<div class="font-medium">' + (t('codexlens.downloadFailed') || 'Download failed') + '</div>' +
'<div class="text-xs text-muted-foreground mt-1">' + error + '</div>' +
'</div>' +
'</div>' +
'<div class="bg-muted/50 rounded p-2 space-y-2 text-xs">' +
'<div class="font-medium">' + (t('codexlens.manualDownloadOptions') || 'Manual download options') + ':</div>' +
'<div class="space-y-1.5">' +
'<div class="flex items-start gap-1">' +
'<span class="text-muted-foreground">1.</span>' +
'<span>' + (t('codexlens.cliDownload') || 'CLI') + ': <code class="bg-background px-1 rounded">codexlens model-download ' + profile + '</code></span>' +
'</div>' +
'<div class="flex items-start gap-1">' +
'<span class="text-muted-foreground">2.</span>' +
'<span>' + (t('codexlens.huggingfaceDownload') || 'Hugging Face') + ': <a href="' + hfUrl + '" target="_blank" class="text-primary hover:underline">' + modelName + '</a></span>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="flex gap-2">' +
'<button class="btn-sm btn-outline flex-1" onclick="loadModelList()">' +
'<i data-lucide="refresh-cw" class="w-3 h-3"></i> ' + (t('common.refresh') || 'Refresh') +
'</button>' +
'<button class="btn-sm btn-primary flex-1" onclick="downloadModel(\'' + profile + '\')">' +
'<i data-lucide="download" class="w-3 h-3"></i> ' + (t('common.retry') || 'Retry') +
'</button>' +
'</div>' +
'</div>';
if (window.lucide) lucide.createIcons();
}
/**
* Cancel model download
*/
function cancelModelDownload(profile) {
if (window['modelDownloadAbort_' + profile]) {
window['modelDownloadAbort_' + profile]();
showRefreshToast(t('codexlens.downloadCanceled') || 'Download canceled', 'info');
loadModelList();
}
}
@@ -876,16 +1342,315 @@ async function cancelCodexLensIndexing() {
/**
* Install CodexLens
* Note: Uses CodexLens-specific install wizard from cli-status.js
* which calls /api/codexlens/bootstrap (Python venv), not the generic
* CLI install that uses npm install -g (NPM packages)
*/
function installCodexLens() {
openCliInstallWizard('codexlens');
function installCodexLensFromManager() {
// Use the CodexLens-specific install wizard from cli-status.js
if (typeof openCodexLensInstallWizard === 'function') {
openCodexLensInstallWizard();
} else {
// Fallback: inline install wizard if cli-status.js not loaded
showCodexLensInstallDialog();
}
}
/**
* Fallback install dialog when cli-status.js is not loaded
*/
function showCodexLensInstallDialog() {
var modal = document.createElement('div');
modal.id = 'codexlensInstallModalFallback';
modal.className = 'fixed inset-0 bg-black/50 flex items-center justify-center z-50';
modal.innerHTML =
'<div class="bg-card rounded-lg shadow-xl w-full max-w-md mx-4 overflow-hidden">' +
'<div class="p-6">' +
'<div class="flex items-center gap-3 mb-4">' +
'<div class="w-10 h-10 rounded-full bg-primary/10 flex items-center justify-center">' +
'<i data-lucide="database" class="w-5 h-5 text-primary"></i>' +
'</div>' +
'<div>' +
'<h3 class="text-lg font-semibold">' + t('codexlens.installCodexLens') + '</h3>' +
'<p class="text-sm text-muted-foreground">' + t('codexlens.installDesc') + '</p>' +
'</div>' +
'</div>' +
'<div class="space-y-4">' +
'<div class="bg-muted/50 rounded-lg p-4">' +
'<h4 class="font-medium mb-2">' + t('codexlens.whatWillBeInstalled') + '</h4>' +
'<ul class="text-sm space-y-2 text-muted-foreground">' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="check" class="w-4 h-4 text-success mt-0.5"></i>' +
'<span><strong>' + t('codexlens.pythonVenv') + '</strong> - ' + t('codexlens.pythonVenvDesc') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="check" class="w-4 h-4 text-success mt-0.5"></i>' +
'<span><strong>' + t('codexlens.codexlensPackage') + '</strong> - ' + t('codexlens.codexlensPackageDesc') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="check" class="w-4 h-4 text-success mt-0.5"></i>' +
'<span><strong>SQLite FTS5</strong> - ' + t('codexlens.sqliteFtsDesc') + '</span>' +
'</li>' +
'</ul>' +
'</div>' +
'<div class="bg-primary/5 border border-primary/20 rounded-lg p-3">' +
'<div class="flex items-start gap-2">' +
'<i data-lucide="info" class="w-4 h-4 text-primary mt-0.5"></i>' +
'<div class="text-sm text-muted-foreground">' +
'<p class="font-medium text-foreground">' + t('codexlens.installLocation') + '</p>' +
'<p class="mt-1"><code class="bg-muted px-1 rounded">~/.codexlens/venv</code></p>' +
'<p class="mt-1">' + t('codexlens.installTime') + '</p>' +
'</div>' +
'</div>' +
'</div>' +
'<div id="codexlensInstallProgressFallback" class="hidden">' +
'<div class="flex items-center gap-3">' +
'<div class="animate-spin w-5 h-5 border-2 border-primary border-t-transparent rounded-full"></div>' +
'<span class="text-sm" id="codexlensInstallStatusFallback">' + t('codexlens.startingInstall') + '</span>' +
'</div>' +
'<div class="mt-2 h-2 bg-muted rounded-full overflow-hidden">' +
'<div id="codexlensInstallProgressBarFallback" class="h-full bg-primary transition-all duration-300" style="width: 0%"></div>' +
'</div>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="border-t border-border p-4 flex justify-end gap-3 bg-muted/30">' +
'<button class="btn-outline px-4 py-2" onclick="closeCodexLensInstallDialogFallback()">' + t('common.cancel') + '</button>' +
'<button id="codexlensInstallBtnFallback" class="btn-primary px-4 py-2" onclick="startCodexLensInstallFallback()">' +
'<i data-lucide="download" class="w-4 h-4 mr-2"></i>' +
t('codexlens.installNow') +
'</button>' +
'</div>' +
'</div>';
document.body.appendChild(modal);
if (window.lucide) lucide.createIcons();
}
function closeCodexLensInstallDialogFallback() {
var modal = document.getElementById('codexlensInstallModalFallback');
if (modal) modal.remove();
}
async function startCodexLensInstallFallback() {
var progressDiv = document.getElementById('codexlensInstallProgressFallback');
var installBtn = document.getElementById('codexlensInstallBtnFallback');
var statusText = document.getElementById('codexlensInstallStatusFallback');
var progressBar = document.getElementById('codexlensInstallProgressBarFallback');
progressDiv.classList.remove('hidden');
installBtn.disabled = true;
installBtn.innerHTML = '<span class="animate-pulse">' + t('codexlens.installing') + '</span>';
var stages = [
{ progress: 10, text: t('codexlens.creatingVenv') },
{ progress: 30, text: t('codexlens.installingPip') },
{ progress: 50, text: t('codexlens.installingPackage') },
{ progress: 70, text: t('codexlens.settingUpDeps') },
{ progress: 90, text: t('codexlens.finalizing') }
];
var currentStage = 0;
var progressInterval = setInterval(function() {
if (currentStage < stages.length) {
statusText.textContent = stages[currentStage].text;
progressBar.style.width = stages[currentStage].progress + '%';
currentStage++;
}
}, 1500);
try {
var response = await fetch('/api/codexlens/bootstrap', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({})
});
clearInterval(progressInterval);
var result = await response.json();
if (result.success) {
progressBar.style.width = '100%';
statusText.textContent = t('codexlens.installComplete');
setTimeout(function() {
closeCodexLensInstallDialogFallback();
showRefreshToast(t('codexlens.installSuccess'), 'success');
// Refresh the page to update status
if (typeof loadCodexLensStatus === 'function') {
loadCodexLensStatus().then(function() {
if (typeof renderCodexLensManager === 'function') renderCodexLensManager();
});
} else {
location.reload();
}
}, 1000);
} else {
statusText.textContent = t('common.error') + ': ' + result.error;
progressBar.classList.add('bg-destructive');
installBtn.disabled = false;
installBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
} catch (err) {
clearInterval(progressInterval);
statusText.textContent = t('common.error') + ': ' + err.message;
progressBar.classList.add('bg-destructive');
installBtn.disabled = false;
installBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
}
/**
* Uninstall CodexLens
* Note: Uses CodexLens-specific uninstall wizard from cli-status.js
* which calls /api/codexlens/uninstall (Python venv), not the generic
* CLI uninstall that uses /api/cli/uninstall (NPM packages)
*/
function uninstallCodexLens() {
openCliUninstallWizard('codexlens');
function uninstallCodexLensFromManager() {
// Use the CodexLens-specific uninstall wizard from cli-status.js
if (typeof openCodexLensUninstallWizard === 'function') {
openCodexLensUninstallWizard();
} else {
// Fallback: inline uninstall wizard if cli-status.js not loaded
showCodexLensUninstallDialog();
}
}
/**
* Fallback uninstall dialog when cli-status.js is not loaded
*/
function showCodexLensUninstallDialog() {
var modal = document.createElement('div');
modal.id = 'codexlensUninstallModalFallback';
modal.className = 'fixed inset-0 bg-black/50 flex items-center justify-center z-50';
modal.innerHTML =
'<div class="bg-card rounded-lg shadow-xl w-full max-w-md mx-4 overflow-hidden">' +
'<div class="p-6">' +
'<div class="flex items-center gap-3 mb-4">' +
'<div class="w-10 h-10 rounded-full bg-destructive/10 flex items-center justify-center">' +
'<i data-lucide="trash-2" class="w-5 h-5 text-destructive"></i>' +
'</div>' +
'<div>' +
'<h3 class="text-lg font-semibold">' + t('codexlens.uninstall') + '</h3>' +
'<p class="text-sm text-muted-foreground">' + t('codexlens.uninstallDesc') + '</p>' +
'</div>' +
'</div>' +
'<div class="space-y-4">' +
'<div class="bg-destructive/5 border border-destructive/20 rounded-lg p-4">' +
'<h4 class="font-medium text-destructive mb-2">' + t('codexlens.whatWillBeRemoved') + '</h4>' +
'<ul class="text-sm space-y-2 text-muted-foreground">' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="x" class="w-4 h-4 text-destructive mt-0.5"></i>' +
'<span>' + t('codexlens.removeVenv') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="x" class="w-4 h-4 text-destructive mt-0.5"></i>' +
'<span>' + t('codexlens.removeData') + '</span>' +
'</li>' +
'<li class="flex items-start gap-2">' +
'<i data-lucide="x" class="w-4 h-4 text-destructive mt-0.5"></i>' +
'<span>' + t('codexlens.removeConfig') + '</span>' +
'</li>' +
'</ul>' +
'</div>' +
'<div id="codexlensUninstallProgressFallback" class="hidden">' +
'<div class="flex items-center gap-3">' +
'<div class="animate-spin w-5 h-5 border-2 border-destructive border-t-transparent rounded-full"></div>' +
'<span class="text-sm" id="codexlensUninstallStatusFallback">' + t('codexlens.removing') + '</span>' +
'</div>' +
'<div class="mt-2 h-2 bg-muted rounded-full overflow-hidden">' +
'<div id="codexlensUninstallProgressBarFallback" class="h-full bg-destructive transition-all duration-300" style="width: 0%"></div>' +
'</div>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="border-t border-border p-4 flex justify-end gap-3 bg-muted/30">' +
'<button class="btn-outline px-4 py-2" onclick="closeCodexLensUninstallDialogFallback()">' + t('common.cancel') + '</button>' +
'<button id="codexlensUninstallBtnFallback" class="btn-destructive px-4 py-2" onclick="startCodexLensUninstallFallback()">' +
'<i data-lucide="trash-2" class="w-4 h-4 mr-2"></i>' +
t('codexlens.uninstall') +
'</button>' +
'</div>' +
'</div>';
document.body.appendChild(modal);
if (window.lucide) lucide.createIcons();
}
function closeCodexLensUninstallDialogFallback() {
var modal = document.getElementById('codexlensUninstallModalFallback');
if (modal) modal.remove();
}
async function startCodexLensUninstallFallback() {
var progressDiv = document.getElementById('codexlensUninstallProgressFallback');
var uninstallBtn = document.getElementById('codexlensUninstallBtnFallback');
var statusText = document.getElementById('codexlensUninstallStatusFallback');
var progressBar = document.getElementById('codexlensUninstallProgressBarFallback');
progressDiv.classList.remove('hidden');
uninstallBtn.disabled = true;
uninstallBtn.innerHTML = '<span class="animate-pulse">' + t('codexlens.uninstalling') + '</span>';
var stages = [
{ progress: 25, text: t('codexlens.removingVenv') },
{ progress: 50, text: t('codexlens.removingData') },
{ progress: 75, text: t('codexlens.removingConfig') },
{ progress: 90, text: t('codexlens.finalizing') }
];
var currentStage = 0;
var progressInterval = setInterval(function() {
if (currentStage < stages.length) {
statusText.textContent = stages[currentStage].text;
progressBar.style.width = stages[currentStage].progress + '%';
currentStage++;
}
}, 500);
try {
var response = await fetch('/api/codexlens/uninstall', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({})
});
clearInterval(progressInterval);
var result = await response.json();
if (result.success) {
progressBar.style.width = '100%';
statusText.textContent = t('codexlens.uninstallComplete');
setTimeout(function() {
closeCodexLensUninstallDialogFallback();
showRefreshToast(t('codexlens.uninstallSuccess'), 'success');
// Refresh the page to update status
if (typeof loadCodexLensStatus === 'function') {
loadCodexLensStatus().then(function() {
if (typeof renderCodexLensManager === 'function') renderCodexLensManager();
});
} else {
location.reload();
}
}, 1000);
} else {
statusText.textContent = t('common.error') + ': ' + result.error;
progressBar.classList.add('bg-destructive');
uninstallBtn.disabled = false;
uninstallBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
} catch (err) {
clearInterval(progressInterval);
statusText.textContent = t('common.error') + ': ' + err.message;
progressBar.classList.add('bg-destructive');
uninstallBtn.disabled = false;
uninstallBtn.innerHTML = '<i data-lucide="refresh-cw" class="w-4 h-4 mr-2"></i> ' + t('common.retry');
if (window.lucide) lucide.createIcons();
}
}
/**

View File

@@ -75,6 +75,8 @@ interface ReadyStatus {
interface SemanticStatus {
available: boolean;
backend?: string;
accelerator?: string;
providers?: string[];
error?: string;
}
@@ -190,18 +192,39 @@ async function checkSemanticStatus(): Promise<SemanticStatus> {
return { available: false, error: 'CodexLens not installed' };
}
// Check semantic module availability
// Check semantic module availability and accelerator info
return new Promise((resolve) => {
const checkCode = `
import sys
import json
try:
from codexlens.semantic import SEMANTIC_AVAILABLE, SEMANTIC_BACKEND
if SEMANTIC_AVAILABLE:
print(f"available:{SEMANTIC_BACKEND}")
else:
print("unavailable")
result = {"available": SEMANTIC_AVAILABLE, "backend": SEMANTIC_BACKEND if SEMANTIC_AVAILABLE else None}
# Get ONNX providers for accelerator info
try:
import onnxruntime
providers = onnxruntime.get_available_providers()
result["providers"] = providers
# Determine accelerator type
if "CUDAExecutionProvider" in providers or "TensorrtExecutionProvider" in providers:
result["accelerator"] = "CUDA"
elif "DmlExecutionProvider" in providers:
result["accelerator"] = "DirectML"
elif "CoreMLExecutionProvider" in providers:
result["accelerator"] = "CoreML"
elif "ROCMExecutionProvider" in providers:
result["accelerator"] = "ROCm"
else:
result["accelerator"] = "CPU"
except:
result["providers"] = []
result["accelerator"] = "CPU"
print(json.dumps(result))
except Exception as e:
print(f"error:{e}")
print(json.dumps({"available": False, "error": str(e)}))
`;
const child = spawn(VENV_PYTHON, ['-c', checkCode], {
stdio: ['ignore', 'pipe', 'pipe'],
@@ -220,12 +243,16 @@ except Exception as e:
child.on('close', (code) => {
const output = stdout.trim();
if (output.startsWith('available:')) {
const backend = output.split(':')[1];
resolve({ available: true, backend });
} else if (output === 'unavailable') {
resolve({ available: false, error: 'Semantic dependencies not installed' });
} else {
try {
const result = JSON.parse(output);
resolve({
available: result.available || false,
backend: result.backend,
accelerator: result.accelerator || 'CPU',
providers: result.providers || [],
error: result.error
});
} catch {
resolve({ available: false, error: output || stderr || 'Unknown error' });
}
});
@@ -237,10 +264,66 @@ except Exception as e:
}
/**
* Install semantic search dependencies (fastembed, ONNX-based, ~200MB)
* GPU acceleration mode for semantic search
*/
type GpuMode = 'cpu' | 'cuda' | 'directml';
/**
* Detect available GPU acceleration
* @returns Detected GPU mode and info
*/
async function detectGpuSupport(): Promise<{ mode: GpuMode; available: GpuMode[]; info: string }> {
const available: GpuMode[] = ['cpu'];
let detectedInfo = 'CPU only';
// Check for NVIDIA GPU (CUDA)
try {
if (process.platform === 'win32') {
execSync('nvidia-smi', { stdio: 'pipe' });
available.push('cuda');
detectedInfo = 'NVIDIA GPU detected (CUDA available)';
} else {
execSync('which nvidia-smi', { stdio: 'pipe' });
available.push('cuda');
detectedInfo = 'NVIDIA GPU detected (CUDA available)';
}
} catch {
// NVIDIA not available
}
// On Windows, DirectML is always available if DirectX 12 is supported
if (process.platform === 'win32') {
try {
// Check for DirectX 12 support via dxdiag or registry
// DirectML works on most modern Windows 10/11 systems
available.push('directml');
if (available.includes('cuda')) {
detectedInfo = 'NVIDIA GPU detected (CUDA & DirectML available)';
} else {
detectedInfo = 'DirectML available (Windows GPU acceleration)';
}
} catch {
// DirectML check failed
}
}
// Recommend best available mode
let recommendedMode: GpuMode = 'cpu';
if (process.platform === 'win32' && available.includes('directml')) {
recommendedMode = 'directml'; // DirectML is easier on Windows (no CUDA toolkit needed)
} else if (available.includes('cuda')) {
recommendedMode = 'cuda';
}
return { mode: recommendedMode, available, info: detectedInfo };
}
/**
* Install semantic search dependencies with optional GPU acceleration
* @param gpuMode - GPU acceleration mode: 'cpu', 'cuda', or 'directml'
* @returns Bootstrap result
*/
async function installSemantic(): Promise<BootstrapResult> {
async function installSemantic(gpuMode: GpuMode = 'cpu'): Promise<BootstrapResult> {
// First ensure CodexLens is installed
const venvStatus = await checkVenvStatus();
if (!venvStatus.ready) {
@@ -252,42 +335,106 @@ async function installSemantic(): Promise<BootstrapResult> {
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
: join(CODEXLENS_VENV, 'bin', 'pip');
return new Promise((resolve) => {
console.log('[CodexLens] Installing semantic search dependencies (fastembed)...');
console.log('[CodexLens] Using ONNX-based fastembed backend (~200MB)');
// IMPORTANT: Uninstall all onnxruntime variants first to prevent conflicts
// Having multiple onnxruntime packages causes provider detection issues
const onnxVariants = ['onnxruntime', 'onnxruntime-gpu', 'onnxruntime-directml'];
console.log(`[CodexLens] Cleaning up existing ONNX Runtime packages...`);
const child = spawn(pipPath, ['install', 'numpy>=1.24', 'fastembed>=0.2'], {
for (const pkg of onnxVariants) {
try {
execSync(`"${pipPath}" uninstall ${pkg} -y`, { stdio: 'pipe' });
console.log(`[CodexLens] Removed ${pkg}`);
} catch {
// Package not installed, ignore
}
}
// Build package list based on GPU mode
const packages = ['numpy>=1.24', 'fastembed>=0.5', 'hnswlib>=0.8.0'];
let modeDescription = 'CPU (ONNX Runtime)';
let onnxPackage = 'onnxruntime>=1.18.0'; // Default CPU
if (gpuMode === 'cuda') {
onnxPackage = 'onnxruntime-gpu>=1.18.0';
modeDescription = 'NVIDIA CUDA GPU acceleration';
} else if (gpuMode === 'directml') {
onnxPackage = 'onnxruntime-directml>=1.18.0';
modeDescription = 'Windows DirectML GPU acceleration';
}
return new Promise((resolve) => {
console.log(`[CodexLens] Installing semantic search dependencies...`);
console.log(`[CodexLens] Mode: ${modeDescription}`);
console.log(`[CodexLens] ONNX Runtime: ${onnxPackage}`);
console.log(`[CodexLens] Packages: ${packages.join(', ')}`);
// Install ONNX Runtime first with force-reinstall to ensure clean state
const installOnnx = spawn(pipPath, ['install', '--force-reinstall', onnxPackage], {
stdio: ['ignore', 'pipe', 'pipe'],
timeout: 600000, // 10 minutes for potential model download
timeout: 600000, // 10 minutes for GPU packages
});
let stdout = '';
let stderr = '';
let onnxStdout = '';
let onnxStderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
// Log progress
installOnnx.stdout.on('data', (data) => {
onnxStdout += data.toString();
const line = data.toString().trim();
if (line.includes('Downloading') || line.includes('Installing') || line.includes('Collecting')) {
if (line.includes('Downloading') || line.includes('Installing')) {
console.log(`[CodexLens] ${line}`);
}
});
child.stderr.on('data', (data) => {
stderr += data.toString();
installOnnx.stderr.on('data', (data) => {
onnxStderr += data.toString();
});
child.on('close', (code) => {
if (code === 0) {
console.log('[CodexLens] Semantic dependencies installed successfully');
resolve({ success: true });
} else {
resolve({ success: false, error: `Installation failed: ${stderr || stdout}` });
installOnnx.on('close', (onnxCode) => {
if (onnxCode !== 0) {
resolve({ success: false, error: `Failed to install ${onnxPackage}: ${onnxStderr || onnxStdout}` });
return;
}
console.log(`[CodexLens] ${onnxPackage} installed successfully`);
// Now install remaining packages
const child = spawn(pipPath, ['install', ...packages], {
stdio: ['ignore', 'pipe', 'pipe'],
timeout: 600000,
});
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
const line = data.toString().trim();
if (line.includes('Downloading') || line.includes('Installing') || line.includes('Collecting')) {
console.log(`[CodexLens] ${line}`);
}
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
if (code === 0) {
console.log(`[CodexLens] Semantic dependencies installed successfully (${gpuMode} mode)`);
resolve({ success: true, message: `Installed with ${modeDescription}` });
} else {
resolve({ success: false, error: `Installation failed: ${stderr || stdout}` });
}
});
child.on('error', (err) => {
resolve({ success: false, error: `Failed to run pip: ${err.message}` });
});
});
child.on('error', (err) => {
resolve({ success: false, error: `Failed to run pip: ${err.message}` });
installOnnx.on('error', (err) => {
resolve({ success: false, error: `Failed to install ONNX Runtime: ${err.message}` });
});
});
}
@@ -1126,7 +1273,8 @@ function isIndexingInProgress(): boolean {
export type { ProgressInfo, ExecuteOptions };
// Export for direct usage
export { ensureReady, executeCodexLens, checkVenvStatus, bootstrapVenv, checkSemanticStatus, installSemantic, uninstallCodexLens, cancelIndexing, isIndexingInProgress };
export { ensureReady, executeCodexLens, checkVenvStatus, bootstrapVenv, checkSemanticStatus, installSemantic, detectGpuSupport, uninstallCodexLens, cancelIndexing, isIndexingInProgress };
export type { GpuMode };
// Backward-compatible export for tests
export const codexLensTool = {

View File

@@ -1,6 +1,6 @@
Metadata-Version: 2.4
Name: codex-lens
Version: 0.1.0
Version: 0.2.0
Summary: CodexLens multi-modal code analysis platform
Author: CodexLens contributors
License: MIT
@@ -17,7 +17,18 @@ Requires-Dist: tree-sitter-typescript>=0.23
Requires-Dist: pathspec>=0.11
Provides-Extra: semantic
Requires-Dist: numpy>=1.24; extra == "semantic"
Requires-Dist: fastembed>=0.2; extra == "semantic"
Requires-Dist: fastembed>=0.5; extra == "semantic"
Requires-Dist: hnswlib>=0.8.0; extra == "semantic"
Provides-Extra: semantic-gpu
Requires-Dist: numpy>=1.24; extra == "semantic-gpu"
Requires-Dist: fastembed>=0.5; extra == "semantic-gpu"
Requires-Dist: hnswlib>=0.8.0; extra == "semantic-gpu"
Requires-Dist: onnxruntime-gpu>=1.18.0; extra == "semantic-gpu"
Provides-Extra: semantic-directml
Requires-Dist: numpy>=1.24; extra == "semantic-directml"
Requires-Dist: fastembed>=0.5; extra == "semantic-directml"
Requires-Dist: hnswlib>=0.8.0; extra == "semantic-directml"
Requires-Dist: onnxruntime-directml>=1.18.0; extra == "semantic-directml"
Provides-Extra: encoding
Requires-Dist: chardet>=5.0; extra == "encoding"
Provides-Extra: full

View File

@@ -11,8 +11,11 @@ src/codexlens/entities.py
src/codexlens/errors.py
src/codexlens/cli/__init__.py
src/codexlens/cli/commands.py
src/codexlens/cli/embedding_manager.py
src/codexlens/cli/model_manager.py
src/codexlens/cli/output.py
src/codexlens/indexing/__init__.py
src/codexlens/indexing/symbol_extractor.py
src/codexlens/parsers/__init__.py
src/codexlens/parsers/encoding.py
src/codexlens/parsers/factory.py
@@ -20,15 +23,16 @@ src/codexlens/parsers/tokenizer.py
src/codexlens/parsers/treesitter_parser.py
src/codexlens/search/__init__.py
src/codexlens/search/chain_search.py
src/codexlens/search/enrichment.py
src/codexlens/search/hybrid_search.py
src/codexlens/search/query_parser.py
src/codexlens/search/ranking.py
src/codexlens/semantic/__init__.py
src/codexlens/semantic/ann_index.py
src/codexlens/semantic/chunker.py
src/codexlens/semantic/code_extractor.py
src/codexlens/semantic/embedder.py
src/codexlens/semantic/graph_analyzer.py
src/codexlens/semantic/llm_enhancer.py
src/codexlens/semantic/gpu_support.py
src/codexlens/semantic/vector_store.py
src/codexlens/storage/__init__.py
src/codexlens/storage/dir_index.py
@@ -42,38 +46,38 @@ src/codexlens/storage/sqlite_utils.py
src/codexlens/storage/migrations/__init__.py
src/codexlens/storage/migrations/migration_001_normalize_keywords.py
src/codexlens/storage/migrations/migration_002_add_token_metadata.py
src/codexlens/storage/migrations/migration_003_code_relationships.py
src/codexlens/storage/migrations/migration_004_dual_fts.py
src/codexlens/storage/migrations/migration_005_cleanup_unused_fields.py
tests/test_chain_search_engine.py
tests/test_ann_index.py
tests/test_cli_hybrid_search.py
tests/test_cli_output.py
tests/test_code_extractor.py
tests/test_config.py
tests/test_dual_fts.py
tests/test_encoding.py
tests/test_enrichment.py
tests/test_entities.py
tests/test_errors.py
tests/test_file_cache.py
tests/test_graph_analyzer.py
tests/test_graph_cli.py
tests/test_graph_storage.py
tests/test_hybrid_chunker.py
tests/test_hybrid_search_e2e.py
tests/test_incremental_indexing.py
tests/test_llm_enhancer.py
tests/test_parser_integration.py
tests/test_parsers.py
tests/test_performance_optimizations.py
tests/test_pure_vector_search.py
tests/test_query_parser.py
tests/test_result_grouping.py
tests/test_rrf_fusion.py
tests/test_schema_cleanup_migration.py
tests/test_search_comparison.py
tests/test_search_comprehensive.py
tests/test_search_full_coverage.py
tests/test_search_performance.py
tests/test_semantic.py
tests/test_semantic_search.py
tests/test_storage.py
tests/test_symbol_extractor.py
tests/test_token_chunking.py
tests/test_token_storage.py
tests/test_tokenizer.py

View File

@@ -15,4 +15,17 @@ tiktoken>=0.5.0
[semantic]
numpy>=1.24
fastembed>=0.2
fastembed>=0.5
hnswlib>=0.8.0
[semantic-directml]
numpy>=1.24
fastembed>=0.5
hnswlib>=0.8.0
onnxruntime-directml>=1.18.0
[semantic-gpu]
numpy>=1.24
fastembed>=0.5
hnswlib>=0.8.0
onnxruntime-gpu>=1.18.0