Add comprehensive code review specifications and templates

- Introduced best practices requirements specification covering code quality, performance, maintainability, error handling, and documentation standards.
- Established quality standards with overall quality metrics and mandatory checks for security, code quality, performance, and maintainability.
- Created security requirements specification aligned with OWASP Top 10 and CWE Top 25, detailing checks and patterns for common vulnerabilities.
- Developed templates for documenting best practice findings, security findings, and generating reports, including structured markdown and JSON formats.
- Updated dependencies in the project, ensuring compatibility and stability.
- Added test files and README documentation for vector indexing tests.
This commit is contained in:
catlog22
2026-01-06 23:11:15 +08:00
parent 02d66325a0
commit ef770ff29b
32 changed files with 4530 additions and 164 deletions

View File

@@ -791,8 +791,12 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
}, onOutput); // Always pass onOutput for real-time dashboard streaming
// If not streaming (default), print output now
if (!stream && result.stdout) {
console.log(result.stdout);
// Prefer parsedOutput (from stream parser) over raw stdout for better formatting
if (!stream) {
const output = result.parsedOutput || result.stdout;
if (output) {
console.log(output);
}
}
// Print summary with execution ID and turn info

View File

@@ -622,7 +622,7 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
// API: CodexLens Init (Initialize workspace index)
if (pathname === '/api/codexlens/init' && req.method === 'POST') {
handlePostRequest(req, res, async (body) => {
const { path: projectPath, indexType = 'vector', embeddingModel = 'code', embeddingBackend = 'fastembed', maxWorkers = 1 } = body;
const { path: projectPath, indexType = 'vector', embeddingModel = 'code', embeddingBackend = 'fastembed', maxWorkers = 1, incremental = true } = body;
const targetPath = projectPath || initialPath;
// Ensure LiteLLM backend dependencies are installed before running the CLI
@@ -636,6 +636,13 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
// Build CLI arguments based on index type
// Use 'index init' subcommand (new CLI structure)
const args = ['index', 'init', targetPath, '--json'];
// Force mode: when incremental=false, add --force to rebuild all files
// CLI defaults to incremental mode (skip unchanged files)
if (!incremental) {
args.push('--force');
}
if (indexType === 'normal') {
args.push('--no-embeddings');
} else {
@@ -728,6 +735,98 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
return true;
}
// API: Generate embeddings only (without FTS rebuild)
if (pathname === '/api/codexlens/embeddings/generate' && req.method === 'POST') {
handlePostRequest(req, res, async (body) => {
const { path: projectPath, incremental = false, backend = 'litellm', maxWorkers = 4, model } = body;
const targetPath = projectPath || initialPath;
// Ensure LiteLLM backend dependencies are installed
if (backend === 'litellm') {
try {
await ensureLiteLLMEmbedderReady();
} catch (err) {
return { success: false, error: `LiteLLM embedder setup failed: ${err.message}` };
}
}
// Build CLI arguments for embeddings generation
// Use 'index embeddings' subcommand
const args = ['index', 'embeddings', targetPath, '--json'];
// Add backend option
if (backend && backend !== 'fastembed') {
args.push('--backend', backend);
}
// Add model if specified
if (model) {
args.push('--model', model);
}
// Add max workers for API backend
if (backend === 'litellm' && maxWorkers > 1) {
args.push('--max-workers', String(maxWorkers));
}
// Force mode: always use --force for litellm backend to avoid model conflict
// (litellm uses different embeddings than fastembed, so regeneration is required)
// For true incremental updates with same model, use fastembed backend
if (!incremental || backend === 'litellm') {
args.push('--force'); // Force regenerate embeddings
}
try {
// Broadcast progress start
broadcastToClients({
type: 'CODEXLENS_INDEX_PROGRESS',
payload: { stage: 'embeddings', message: 'Generating embeddings...', percent: 10 }
});
const result = await executeCodexLens(args, {
cwd: targetPath,
onProgress: (progress: ProgressInfo) => {
broadcastToClients({
type: 'CODEXLENS_INDEX_PROGRESS',
payload: {
stage: 'embeddings',
message: progress.message || 'Processing...',
percent: progress.percent || 50
}
});
}
});
if (result.success) {
broadcastToClients({
type: 'CODEXLENS_INDEX_PROGRESS',
payload: { stage: 'complete', message: 'Embeddings generated', percent: 100 }
});
try {
const parsed = extractJSON(result.output || '{}');
return { success: true, result: parsed };
} catch {
return { success: true, result: { message: 'Embeddings generated successfully' } };
}
} else {
broadcastToClients({
type: 'CODEXLENS_INDEX_PROGRESS',
payload: { stage: 'error', message: result.error || 'Failed', percent: 0 }
});
return { success: false, error: result.error };
}
} catch (err) {
broadcastToClients({
type: 'CODEXLENS_INDEX_PROGRESS',
payload: { stage: 'error', message: err.message, percent: 0 }
});
return { success: false, error: err.message, status: 500 };
}
});
return true;
}
// API: CodexLens Semantic Search Status
if (pathname === '/api/codexlens/semantic/status') {
const status = await checkSemanticStatus();

View File

@@ -429,34 +429,45 @@ async function generateSkillViaCLI({ generationType, description, skillName, loc
await fsPromises.mkdir(baseDir, { recursive: true });
}
// Build CLI prompt
// Build structured skill parameters for /skill-generator
const targetLocationDisplay = location === 'project'
? '.claude/skills/'
: '~/.claude/skills/';
const prompt = `PURPOSE: Generate a complete Claude Code skill from description
TASK: • Parse skill requirements • Create SKILL.md with proper frontmatter (name, description, version, allowed-tools) • Generate supporting files if needed in skill folder
MODE: write
CONTEXT: @**/*
EXPECTED: Complete skill folder structure with SKILL.md and all necessary files
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/universal/00-universal-rigorous-style.txt) | Follow Claude Code skill format | Include name, description in frontmatter | write=CREATE
// Structured fields from user input
const skillParams = {
skill_name: skillName,
description: description || 'Generate a basic skill template',
target_location: targetLocationDisplay,
target_path: targetPath,
location_type: location // 'project' | 'user'
};
SKILL DESCRIPTION:
${description || 'Generate a basic skill template'}
// Prompt that invokes /skill-generator skill with structured parameters
const prompt = `/skill-generator
SKILL NAME: ${skillName}
TARGET LOCATION: ${targetLocationDisplay}
TARGET PATH: ${targetPath}
## Skill Parameters (Structured Input)
REQUIREMENTS:
1. Create SKILL.md with frontmatter containing:
- name: "${skillName}"
- description: Brief description of the skill
- version: "1.0.0"
- allowed-tools: List of tools this skill can use (e.g., [Read, Write, Edit, Bash])
2. Add skill content below frontmatter explaining what the skill does and how to use it
3. If the skill requires supporting files (e.g., templates, scripts), create them in the skill folder
4. Ensure all files are properly formatted and follow best practices`;
\`\`\`json
${JSON.stringify(skillParams, null, 2)}
\`\`\`
## User Request
Create a new Claude Code skill with the following specifications:
- **Skill Name**: ${skillName}
- **Description**: ${description || 'Generate a basic skill template'}
- **Target Location**: ${targetLocationDisplay}${skillName}
- **Location Type**: ${location === 'project' ? 'Project-level (.claude/skills/)' : 'User-level (~/.claude/skills/)'}
## Instructions
1. Use the skill-generator to create a complete skill structure
2. Generate SKILL.md with proper frontmatter (name, description, version, allowed-tools)
3. Create necessary supporting files (phases, specs, templates as needed)
4. Follow Claude Code skill design patterns and best practices
5. Output all files to: ${targetPath}`;
// Execute CLI tool (Claude) with write mode
const result = await executeCliTool({

View File

@@ -299,10 +299,30 @@
color: hsl(38 92% 50%);
}
.icon-btn.favorite-active svg {
stroke: hsl(38 92% 50%);
fill: hsl(38 92% 50% / 0.2);
}
.icon-btn.favorite-active:hover {
color: hsl(38 92% 40%);
}
.icon-btn.favorite-active:hover svg {
stroke: hsl(38 92% 40%);
fill: hsl(38 92% 40% / 0.3);
}
/* Favorite star icon in memory-id */
.favorite-star {
color: hsl(38 92% 50%);
}
.favorite-star svg {
stroke: hsl(38 92% 50%);
fill: hsl(38 92% 50% / 0.2);
}
.icon-btn i {
width: 18px;
height: 18px;

View File

@@ -429,6 +429,136 @@
color: hsl(200 80% 70%);
}
/* ===== Formatted Message Types ===== */
.cli-stream-line.formatted {
display: flex;
align-items: flex-start;
gap: 8px;
padding: 6px 10px;
margin: 2px 0;
border-radius: 4px;
transition: all 0.15s ease;
color: hsl(0 0% 90%); /* Ensure text is visible */
}
.cli-stream-line.formatted:hover {
background: hsl(0 0% 100% / 0.05);
}
/* Message Badge */
.cli-msg-badge {
display: inline-flex;
align-items: center;
gap: 4px;
padding: 2px 8px;
border-radius: 4px;
font-size: 0.625rem;
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.3px;
white-space: nowrap;
flex-shrink: 0;
}
.cli-msg-badge i,
.cli-msg-badge svg {
width: 12px;
height: 12px;
}
.cli-msg-content {
flex: 1;
word-break: break-word;
}
/* System Message */
.cli-stream-line.formatted.system {
background: hsl(210 50% 20% / 0.3);
border-left: 3px solid hsl(210 80% 55%);
}
.cli-msg-badge.cli-msg-system {
background: hsl(210 80% 55% / 0.2);
color: hsl(210 80% 70%);
}
/* Thinking Message */
.cli-stream-line.formatted.thinking {
background: hsl(280 50% 20% / 0.3);
border-left: 3px solid hsl(280 70% 65%);
font-style: italic;
}
.cli-msg-badge.cli-msg-thinking {
background: hsl(280 70% 65% / 0.2);
color: hsl(280 70% 75%);
}
/* Response Message */
.cli-stream-line.formatted.response {
background: hsl(145 40% 18% / 0.3);
border-left: 3px solid hsl(145 60% 50%);
}
.cli-msg-badge.cli-msg-response {
background: hsl(145 60% 50% / 0.2);
color: hsl(145 60% 65%);
}
/* Result Message */
.cli-stream-line.formatted.result {
background: hsl(160 50% 18% / 0.4);
border-left: 3px solid hsl(160 80% 45%);
}
.cli-msg-badge.cli-msg-result {
background: hsl(160 80% 45% / 0.25);
color: hsl(160 80% 60%);
}
/* Error Message */
.cli-stream-line.formatted.error {
background: hsl(0 50% 20% / 0.4);
border-left: 3px solid hsl(0 70% 55%);
}
.cli-msg-badge.cli-msg-error {
background: hsl(0 70% 55% / 0.25);
color: hsl(0 70% 70%);
}
/* Warning Message */
.cli-stream-line.formatted.warning {
background: hsl(45 60% 18% / 0.4);
border-left: 3px solid hsl(45 80% 55%);
}
.cli-msg-badge.cli-msg-warning {
background: hsl(45 80% 55% / 0.25);
color: hsl(45 80% 65%);
}
/* Info Message */
.cli-stream-line.formatted.info {
background: hsl(200 50% 18% / 0.3);
border-left: 3px solid hsl(200 70% 60%);
}
.cli-msg-badge.cli-msg-info {
background: hsl(200 70% 60% / 0.2);
color: hsl(200 70% 70%);
}
/* Inline Code */
.cli-inline-code {
padding: 1px 5px;
background: hsl(0 0% 25%);
border-radius: 3px;
font-family: var(--font-mono, 'Consolas', 'Monaco', 'Courier New', monospace);
font-size: 0.85em;
color: hsl(45 80% 70%);
}
/* JSON/Code syntax coloring in output */
.cli-stream-line .json-key {
color: hsl(200 80% 70%);

View File

@@ -269,6 +269,106 @@ function handleCliStreamError(payload) {
updateStreamBadge();
}
// ===== Message Type Parsing =====
const MESSAGE_TYPE_PATTERNS = {
system: /^\[系统\]/,
thinking: /^\[思考\]/,
response: /^\[响应\]/,
result: /^\[结果\]/,
error: /^\[错误\]/,
warning: /^\[警告\]/,
info: /^\[信息\]/
};
const MESSAGE_TYPE_ICONS = {
system: 'settings',
thinking: 'brain',
response: 'message-circle',
result: 'check-circle',
error: 'alert-circle',
warning: 'alert-triangle',
info: 'info'
};
const MESSAGE_TYPE_LABELS = {
system: '系统',
thinking: '思考',
response: '响应',
result: '结果',
error: '错误',
warning: '警告',
info: '信息'
};
/**
* Parse message content to extract type and clean content
* @param {string} content - Raw message content
* @returns {{ type: string, label: string, content: string, hasPrefix: boolean }}
*/
function parseMessageType(content) {
for (const [type, pattern] of Object.entries(MESSAGE_TYPE_PATTERNS)) {
if (pattern.test(content)) {
return {
type,
label: MESSAGE_TYPE_LABELS[type],
content: content.replace(pattern, '').trim(),
hasPrefix: true
};
}
}
return {
type: 'default',
label: '',
content: content,
hasPrefix: false
};
}
/**
* Render a formatted message line with type badge
* @param {Object} line - Line object with type and content
* @param {string} searchFilter - Current search filter
* @returns {string} - HTML string
*/
function renderFormattedLine(line, searchFilter) {
const parsed = parseMessageType(line.content);
let content = escapeHtml(parsed.content);
// Apply search highlighting
if (searchFilter && searchFilter.trim()) {
const searchRegex = new RegExp(`(${escapeRegex(searchFilter)})`, 'gi');
content = content.replace(searchRegex, '<mark class="cli-stream-highlight">$1</mark>');
}
// Format code blocks
content = formatCodeBlocks(content);
// Format inline code
content = content.replace(/`([^`]+)`/g, '<code class="cli-inline-code">$1</code>');
// Build type badge if has prefix
const typeBadge = parsed.hasPrefix ?
`<span class="cli-msg-badge cli-msg-${parsed.type}">
<i data-lucide="${MESSAGE_TYPE_ICONS[parsed.type] || 'circle'}"></i>
<span>${parsed.label}</span>
</span>` : '';
// Determine line class based on original type and parsed type
const lineClass = parsed.hasPrefix ? `cli-stream-line formatted ${parsed.type}` :
`cli-stream-line ${line.type}`;
return `<div class="${lineClass}">${typeBadge}<span class="cli-msg-content">${content}</span></div>`;
}
/**
* Format code blocks in content
*/
function formatCodeBlocks(content) {
// Handle multi-line code blocks (already escaped)
// Just apply styling class for now
return content;
}
// ===== UI Rendering =====
function renderStreamTabs() {
const tabsContainer = document.getElementById('cliStreamTabs');
@@ -351,16 +451,15 @@ function renderStreamContent(executionId) {
);
}
// Render output lines with search highlighting
contentContainer.innerHTML = filteredOutput.map(line => {
let content = escapeHtml(line.content);
// Highlight search matches
if (searchFilter.trim()) {
const searchRegex = new RegExp(`(${escapeRegex(searchFilter)})`, 'gi');
content = content.replace(searchRegex, '<mark class="cli-stream-highlight">$1</mark>');
}
return `<div class="cli-stream-line ${line.type}">${content}</div>`;
}).join('');
// Render output lines with formatted styling
contentContainer.innerHTML = filteredOutput.map(line =>
renderFormattedLine(line, searchFilter)
).join('');
// Initialize Lucide icons for message badges
if (typeof lucide !== 'undefined') {
lucide.createIcons({ attrs: { class: 'cli-msg-icon' } });
}
// Show filter result count if filtering
if (searchFilter.trim() && filteredOutput.length !== exec.output.length) {

View File

@@ -298,6 +298,8 @@ const i18n = {
'codexlens.configuredInApiSettings': 'Configured in API Settings',
'codexlens.commonModels': 'Common Models',
'codexlens.selectApiModel': 'Select API model...',
'codexlens.selectLocalModel': 'Select local model...',
'codexlens.noConfiguredModels': 'No models configured in API Settings',
'codexlens.autoDownloadHint': 'Models are auto-downloaded on first use',
'codexlens.embeddingBackend': 'Embedding Backend',
'codexlens.localFastembed': 'Local (FastEmbed)',
@@ -2305,6 +2307,8 @@ const i18n = {
'codexlens.configuredInApiSettings': '已在 API 设置中配置',
'codexlens.commonModels': '常用模型',
'codexlens.selectApiModel': '选择 API 模型...',
'codexlens.selectLocalModel': '选择本地模型...',
'codexlens.noConfiguredModels': '未在 API 设置中配置模型',
'codexlens.autoDownloadHint': '模型会在首次使用时自动下载',
'codexlens.embeddingBackend': '嵌入后端',
'codexlens.localFastembed': '本地 (FastEmbed)',

View File

@@ -147,14 +147,40 @@ function buildCodexLensConfigContent(config) {
'</div>' +
'</div>' +
// Quick Actions
// Index Operations - 4 buttons grid
'<div class="space-y-2">' +
'<h4 class="text-xs font-medium text-muted-foreground uppercase tracking-wide mb-2">Quick Actions</h4>' +
'<div class="grid grid-cols-2 gap-2">' +
(isInstalled
? '<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-primary/30 bg-primary/5 text-primary hover:bg-primary/10 transition-colors" onclick="initCodexLensIndex()">' +
'<i data-lucide="refresh-cw" class="w-4 h-4"></i> Update Index' +
'<h4 class="text-xs font-medium text-muted-foreground uppercase tracking-wide mb-2">' + (t('codexlens.indexOperations') || 'Index Operations') + '</h4>' +
(isInstalled
? '<div class="grid grid-cols-2 gap-2">' +
// FTS Full Index
'<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-blue-500/30 bg-blue-500/5 text-blue-600 hover:bg-blue-500/10 transition-colors" onclick="runFtsFullIndex()" title="' + (t('codexlens.ftsFullIndexDesc') || 'Rebuild full-text search index') + '">' +
'<i data-lucide="file-text" class="w-4 h-4"></i> FTS ' + (t('codexlens.fullIndex') || 'Full') +
'</button>' +
// FTS Incremental
'<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-blue-500/30 bg-background text-blue-600 hover:bg-blue-500/5 transition-colors" onclick="runFtsIncrementalUpdate()" title="' + (t('codexlens.ftsIncrementalDesc') || 'Update FTS index for changed files') + '">' +
'<i data-lucide="file-plus" class="w-4 h-4"></i> FTS ' + (t('codexlens.incremental') || 'Incremental') +
'</button>' +
// Vector Full Index
'<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-purple-500/30 bg-purple-500/5 text-purple-600 hover:bg-purple-500/10 transition-colors" onclick="runVectorFullIndex()" title="' + (t('codexlens.vectorFullIndexDesc') || 'Generate all embeddings') + '">' +
'<i data-lucide="brain" class="w-4 h-4"></i> Vector ' + (t('codexlens.fullIndex') || 'Full') +
'</button>' +
// Vector Incremental
'<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-purple-500/30 bg-background text-purple-600 hover:bg-purple-500/5 transition-colors" onclick="runVectorIncrementalUpdate()" title="' + (t('codexlens.vectorIncrementalDesc') || 'Generate embeddings for new files only') + '">' +
'<i data-lucide="brain" class="w-4 h-4"></i> Vector ' + (t('codexlens.incremental') || 'Incremental') +
'</button>' +
'</div>'
: '<div class="grid grid-cols-2 gap-2">' +
'<button class="col-span-2 flex items-center justify-center gap-2 px-4 py-3 text-sm font-medium rounded-lg bg-primary text-primary-foreground hover:bg-primary/90 transition-colors" onclick="installCodexLensFromManager()">' +
'<i data-lucide="download" class="w-4 h-4"></i> Install CodexLens' +
'</button>' +
'</div>') +
'</div>' +
// Quick Actions
'<div class="space-y-2 mt-3">' +
'<h4 class="text-xs font-medium text-muted-foreground uppercase tracking-wide mb-2">' + (t('codexlens.quickActions') || 'Quick Actions') + '</h4>' +
(isInstalled
? '<div class="grid grid-cols-2 gap-2">' +
'<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-border bg-background hover:bg-muted/50 transition-colors" onclick="showWatcherControlModal()">' +
'<i data-lucide="eye" class="w-4 h-4"></i> File Watcher' +
'</button>' +
@@ -163,11 +189,9 @@ function buildCodexLensConfigContent(config) {
'</button>' +
'<button class="flex items-center justify-center gap-2 px-3 py-2 text-sm font-medium rounded-lg border border-border bg-background hover:bg-muted/50 transition-colors" onclick="cleanCurrentWorkspaceIndex()">' +
'<i data-lucide="eraser" class="w-4 h-4"></i> Clean Workspace' +
'</button>'
: '<button class="col-span-2 flex items-center justify-center gap-2 px-4 py-3 text-sm font-medium rounded-lg bg-primary text-primary-foreground hover:bg-primary/90 transition-colors" onclick="installCodexLensFromManager()">' +
'<i data-lucide="download" class="w-4 h-4"></i> Install CodexLens' +
'</button>') +
'</div>' +
'</button>' +
'</div>'
: '') +
'</div>' +
'</div>' +
@@ -684,9 +708,10 @@ var ENV_VAR_GROUPS = {
{ group: 'Jina', items: ['jina-embeddings-v3', 'jina-embeddings-v2-base-en', 'jina-embeddings-v2-base-zh'] }
]
},
'CODEXLENS_USE_GPU': { label: 'Use GPU', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'embedding.use_gpu', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] !== 'litellm'; } },
'CODEXLENS_EMBEDDING_STRATEGY': { label: 'Load Balance', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'embedding.strategy', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'litellm'; } },
'CODEXLENS_EMBEDDING_COOLDOWN': { label: 'Rate Limit Cooldown (s)', type: 'number', placeholder: '60', default: '60', settingsPath: 'embedding.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'litellm'; } }
'CODEXLENS_USE_GPU': { label: 'Use GPU', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'embedding.use_gpu', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'local'; } },
'CODEXLENS_EMBEDDING_POOL_ENABLED': { label: 'High Availability', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'embedding.pool_enabled', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api'; } },
'CODEXLENS_EMBEDDING_STRATEGY': { label: 'Load Balance Strategy', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'embedding.strategy', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api' && env['CODEXLENS_EMBEDDING_POOL_ENABLED'] === 'true'; } },
'CODEXLENS_EMBEDDING_COOLDOWN': { label: 'Rate Limit Cooldown (s)', type: 'number', placeholder: '60', default: '60', settingsPath: 'embedding.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api' && env['CODEXLENS_EMBEDDING_POOL_ENABLED'] === 'true'; } }
}
},
reranker: {
@@ -711,7 +736,10 @@ var ENV_VAR_GROUPS = {
{ group: 'Jina', items: ['jina-reranker-v2-base-multilingual', 'jina-reranker-v1-base-en'] }
]
},
'CODEXLENS_RERANKER_TOP_K': { label: 'Top K Results', type: 'number', placeholder: '50', default: '50', settingsPath: 'reranker.top_k', min: 5, max: 200 }
'CODEXLENS_RERANKER_TOP_K': { label: 'Top K Results', type: 'number', placeholder: '50', default: '50', settingsPath: 'reranker.top_k', min: 5, max: 200 },
'CODEXLENS_RERANKER_POOL_ENABLED': { label: 'High Availability', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'reranker.pool_enabled', showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api'; } },
'CODEXLENS_RERANKER_STRATEGY': { label: 'Load Balance Strategy', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'reranker.strategy', showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api' && env['CODEXLENS_RERANKER_POOL_ENABLED'] === 'true'; } },
'CODEXLENS_RERANKER_COOLDOWN': { label: 'Rate Limit Cooldown (s)', type: 'number', placeholder: '60', default: '60', settingsPath: 'reranker.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api' && env['CODEXLENS_RERANKER_POOL_ENABLED'] === 'true'; } }
}
},
concurrency: {
@@ -730,15 +758,6 @@ var ENV_VAR_GROUPS = {
'CODEXLENS_CASCADE_COARSE_K': { label: 'Coarse K (1st stage)', type: 'number', placeholder: '100', default: '100', settingsPath: 'cascade.coarse_k', min: 10, max: 500 },
'CODEXLENS_CASCADE_FINE_K': { label: 'Fine K (final)', type: 'number', placeholder: '10', default: '10', settingsPath: 'cascade.fine_k', min: 1, max: 100 }
}
},
llm: {
labelKey: 'codexlens.envGroup.llm',
icon: 'sparkles',
collapsed: true,
vars: {
'CODEXLENS_LLM_ENABLED': { label: 'Enable LLM', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'llm.enabled' },
'CODEXLENS_LLM_BATCH_SIZE': { label: 'Batch Size', type: 'number', placeholder: '5', default: '5', settingsPath: 'llm.batch_size', min: 1, max: 20 }
}
}
};
@@ -859,12 +878,11 @@ async function loadEnvVariables() {
for (var key in group.vars) {
var config = group.vars[key];
// Check variable-level showWhen condition
if (config.showWhen && !config.showWhen(env)) {
continue;
}
// Check variable-level showWhen condition - render but hide if condition is false
var shouldShow = !config.showWhen || config.showWhen(env);
var hiddenStyle = shouldShow ? '' : ' style="display:none"';
// Priority: env file > settings.json > hardcoded default
var value = env[key] || settings[key] || config.default || '';
@@ -874,7 +892,7 @@ async function loadEnvVariables() {
if (key === 'CODEXLENS_EMBEDDING_BACKEND' || key === 'CODEXLENS_RERANKER_BACKEND') {
onchangeHandler = ' onchange="updateModelOptionsOnBackendChange(\'' + key + '\', this.value)"';
}
html += '<div class="flex items-center gap-2">' +
html += '<div class="flex items-center gap-2"' + hiddenStyle + '>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0">' + escapeHtml(config.label) + '</label>' +
'<select class="tool-config-input flex-1 text-xs py-1" data-env-key="' + escapeHtml(key) + '"' + onchangeHandler + '>';
config.options.forEach(function(opt) {
@@ -897,7 +915,7 @@ async function loadEnvVariables() {
// Fallback preset list for API models
var apiModelList = config.apiModels || [];
html += '<div class="flex items-center gap-2">' +
html += '<div class="flex items-center gap-2"' + hiddenStyle + '>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0" title="' + escapeHtml(key) + '">' + escapeHtml(config.label) + '</label>' +
'<div class="relative flex-1">' +
'<input type="text" class="tool-config-input w-full text-xs py-1 pr-6" ' +
@@ -908,7 +926,8 @@ async function loadEnvVariables() {
'<datalist id="' + datalistId + '">';
if (isApiBackend) {
// For API backend: show configured models from API settings first
// For API backend: show ONLY configured models from API settings
// (don't show unconfigured preset models - they won't work without configuration)
if (configuredModels.length > 0) {
html += '<option value="" disabled>-- ' + (t('codexlens.configuredModels') || 'Configured in API Settings') + ' --</option>';
configuredModels.forEach(function(model) {
@@ -918,19 +937,8 @@ async function loadEnvVariables() {
(providers ? ' (' + escapeHtml(providers) + ')' : '') +
'</option>';
});
}
// Then show common API models as suggestions
if (apiModelList.length > 0) {
html += '<option value="" disabled>-- ' + (t('codexlens.commonModels') || 'Common Models') + ' --</option>';
apiModelList.forEach(function(group) {
group.items.forEach(function(model) {
// Skip if already in configured list
var exists = configuredModels.some(function(m) { return m.modelId === model; });
if (!exists) {
html += '<option value="' + escapeHtml(model) + '">' + escapeHtml(group.group) + ': ' + escapeHtml(model) + '</option>';
}
});
});
} else {
html += '<option value="" disabled>-- ' + (t('codexlens.noConfiguredModels') || 'No models configured in API Settings') + ' --</option>';
}
} else {
// For local backend (fastembed): show actually downloaded models
@@ -959,7 +967,7 @@ async function loadEnvVariables() {
if (config.max !== undefined) extraAttrs += ' max="' + config.max + '"';
extraAttrs += ' step="1"';
}
html += '<div class="flex items-center gap-2">' +
html += '<div class="flex items-center gap-2"' + hiddenStyle + '>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0" title="' + escapeHtml(key) + '">' + escapeHtml(config.label) + '</label>' +
'<input type="' + inputType + '" class="tool-config-input flex-1 text-xs py-1" ' +
'data-env-key="' + escapeHtml(key) + '" value="' + escapeHtml(value) + '" placeholder="' + escapeHtml(config.placeholder || '') + '"' + extraAttrs + ' />' +
@@ -1021,7 +1029,8 @@ async function loadEnvVariables() {
var optionsHtml = '';
if (isApiBackend) {
// For API backend: show configured models from API settings first
// For API backend: show ONLY configured models from API settings
// (don't show unconfigured preset models - they won't work without configuration)
if (apiConfiguredModels.length > 0) {
optionsHtml += '<option value="" disabled>-- ' + (t('codexlens.configuredModels') || 'Configured in API Settings') + ' --</option>';
apiConfiguredModels.forEach(function(model) {
@@ -1031,18 +1040,8 @@ async function loadEnvVariables() {
(providers ? ' (' + escapeHtml(providers) + ')' : '') +
'</option>';
});
}
// Then show common API models as suggestions
if (apiModelList.length > 0) {
optionsHtml += '<option value="" disabled>-- ' + (t('codexlens.commonModels') || 'Common Models') + ' --</option>';
apiModelList.forEach(function(group) {
group.items.forEach(function(model) {
var exists = apiConfiguredModels.some(function(m) { return m.modelId === model; });
if (!exists) {
optionsHtml += '<option value="' + escapeHtml(model) + '">' + escapeHtml(group.group) + ': ' + escapeHtml(model) + '</option>';
}
});
});
} else {
optionsHtml += '<option value="" disabled>-- ' + (t('codexlens.noConfiguredModels') || 'No models configured in API Settings') + ' --</option>';
}
} else {
// For local backend: show actually downloaded models
@@ -1070,9 +1069,65 @@ async function loadEnvVariables() {
}
}
}
// Update visibility of dependent fields based on new backend value
var prefix = isEmbedding ? 'CODEXLENS_EMBEDDING_' : 'CODEXLENS_RERANKER_';
var gpuField = document.querySelector('[data-env-key="' + prefix + 'USE_GPU"]');
var poolField = document.querySelector('[data-env-key="' + prefix + 'POOL_ENABLED"]');
var strategyField = document.querySelector('[data-env-key="' + prefix + 'STRATEGY"]');
var cooldownField = document.querySelector('[data-env-key="' + prefix + 'COOLDOWN"]');
// GPU only for local backend
if (gpuField) {
var gpuRow = gpuField.closest('.flex.items-center');
if (gpuRow) gpuRow.style.display = isApiBackend ? 'none' : '';
}
// Pool, Strategy, Cooldown only for API backend
if (poolField) {
var poolRow = poolField.closest('.flex.items-center');
if (poolRow) poolRow.style.display = isApiBackend ? '' : 'none';
// Reset pool value when switching to local
if (!isApiBackend) poolField.value = 'false';
}
// Strategy and Cooldown depend on pool being enabled
var poolEnabled = poolField && poolField.value === 'true';
if (strategyField) {
var strategyRow = strategyField.closest('.flex.items-center');
if (strategyRow) strategyRow.style.display = (isApiBackend && poolEnabled) ? '' : 'none';
}
if (cooldownField) {
var cooldownRow = cooldownField.closest('.flex.items-center');
if (cooldownRow) cooldownRow.style.display = (isApiBackend && poolEnabled) ? '' : 'none';
}
// Note: No auto-save here - user must click Save button
});
});
// Add change handler for pool_enabled selects to show/hide strategy and cooldown
var poolSelects = container.querySelectorAll('select[data-env-key*="POOL_ENABLED"]');
poolSelects.forEach(function(select) {
select.addEventListener('change', function() {
var poolKey = select.getAttribute('data-env-key');
var poolEnabled = select.value === 'true';
var isEmbedding = poolKey.indexOf('EMBEDDING') !== -1;
var prefix = isEmbedding ? 'CODEXLENS_EMBEDDING_' : 'CODEXLENS_RERANKER_';
var strategyField = document.querySelector('[data-env-key="' + prefix + 'STRATEGY"]');
var cooldownField = document.querySelector('[data-env-key="' + prefix + 'COOLDOWN"]');
if (strategyField) {
var strategyRow = strategyField.closest('.flex.items-center');
if (strategyRow) strategyRow.style.display = poolEnabled ? '' : 'none';
}
if (cooldownField) {
var cooldownRow = cooldownField.closest('.flex.items-center');
if (cooldownRow) cooldownRow.style.display = poolEnabled ? '' : 'none';
}
});
});
} catch (err) {
container.innerHTML = '<div class="text-xs text-error">' + escapeHtml(err.message) + '</div>';
}
@@ -2213,6 +2268,9 @@ async function loadModelList() {
'<div class="flex items-center gap-2">' +
statusIcon +
'<span class="text-sm font-medium">' + model.profile + '</span>' +
'<button class="text-muted-foreground hover:text-foreground p-0.5" onclick="copyToClipboard(\'' + escapeHtml(model.model_name) + '\')" title="' + escapeHtml(model.model_name) + '">' +
'<i data-lucide="copy" class="w-3 h-3"></i>' +
'</button>' +
'<span class="text-xs text-muted-foreground">' + model.dimensions + 'd</span>' +
'</div>' +
'<div class="flex items-center gap-3">' +
@@ -2491,6 +2549,9 @@ async function loadRerankerModelList() {
'<div class="flex items-center gap-2">' +
statusIcon +
'<span class="text-sm font-medium">' + model.id + recBadge + '</span>' +
'<button class="text-muted-foreground hover:text-foreground p-0.5" onclick="copyToClipboard(\'' + escapeHtml(model.name) + '\')" title="' + escapeHtml(model.name) + '">' +
'<i data-lucide="copy" class="w-3 h-3"></i>' +
'</button>' +
'<span class="text-xs text-muted-foreground">' + model.desc + '</span>' +
'</div>' +
'<div class="flex items-center gap-3">' +
@@ -2901,12 +2962,14 @@ async function updateSemanticStatusBadge() {
* @param {string} embeddingModel - Model profile: 'code', 'fast'
* @param {string} embeddingBackend - Backend: 'fastembed' (local) or 'litellm' (API)
* @param {number} maxWorkers - Max concurrent API calls for embedding generation (default: 1)
* @param {boolean} incremental - Incremental mode: true=skip unchanged, false=full rebuild (default: false)
*/
async function initCodexLensIndex(indexType, embeddingModel, embeddingBackend, maxWorkers) {
async function initCodexLensIndex(indexType, embeddingModel, embeddingBackend, maxWorkers, incremental) {
indexType = indexType || 'vector';
embeddingModel = embeddingModel || 'code';
embeddingBackend = embeddingBackend || 'fastembed';
maxWorkers = maxWorkers || 1;
incremental = incremental !== undefined ? incremental : false; // Default: full rebuild
// For vector/full index with local backend, check if semantic dependencies are available
// LiteLLM backend uses remote embeddings and does not require fastembed/ONNX deps.
@@ -3011,7 +3074,7 @@ async function initCodexLensIndex(indexType, embeddingModel, embeddingBackend, m
var apiIndexType = (indexType === 'full') ? 'vector' : indexType;
// Start indexing with specified type and model
startCodexLensIndexing(apiIndexType, embeddingModel, embeddingBackend, maxWorkers);
startCodexLensIndexing(apiIndexType, embeddingModel, embeddingBackend, maxWorkers, incremental);
}
/**
@@ -3020,12 +3083,14 @@ async function initCodexLensIndex(indexType, embeddingModel, embeddingBackend, m
* @param {string} embeddingModel - Model profile: 'code', 'fast'
* @param {string} embeddingBackend - Backend: 'fastembed' (local) or 'litellm' (API)
* @param {number} maxWorkers - Max concurrent API calls for embedding generation (default: 1)
* @param {boolean} incremental - Incremental mode (default: false for full rebuild)
*/
async function startCodexLensIndexing(indexType, embeddingModel, embeddingBackend, maxWorkers) {
async function startCodexLensIndexing(indexType, embeddingModel, embeddingBackend, maxWorkers, incremental) {
indexType = indexType || 'vector';
embeddingModel = embeddingModel || 'code';
embeddingBackend = embeddingBackend || 'fastembed';
maxWorkers = maxWorkers || 1;
incremental = incremental !== undefined ? incremental : false; // Default: full rebuild
var statusText = document.getElementById('codexlensIndexStatus');
var progressBar = document.getElementById('codexlensIndexProgressBar');
var percentText = document.getElementById('codexlensIndexPercent');
@@ -3057,11 +3122,11 @@ async function startCodexLensIndexing(indexType, embeddingModel, embeddingBacken
}
try {
console.log('[CodexLens] Starting index for:', projectPath, 'type:', indexType, 'model:', embeddingModel, 'backend:', embeddingBackend, 'maxWorkers:', maxWorkers);
console.log('[CodexLens] Starting index for:', projectPath, 'type:', indexType, 'model:', embeddingModel, 'backend:', embeddingBackend, 'maxWorkers:', maxWorkers, 'incremental:', incremental);
var response = await fetch('/api/codexlens/init', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ path: projectPath, indexType: indexType, embeddingModel: embeddingModel, embeddingBackend: embeddingBackend, maxWorkers: maxWorkers })
body: JSON.stringify({ path: projectPath, indexType: indexType, embeddingModel: embeddingModel, embeddingBackend: embeddingBackend, maxWorkers: maxWorkers, incremental: incremental })
});
var result = await response.json();
@@ -4165,6 +4230,121 @@ function initCodexLensIndexFromPage(indexType) {
}
}
// ============================================================
// INDEX OPERATIONS - 4 Button Functions
// ============================================================
/**
* Run FTS full index (rebuild full-text search index)
* Creates FTS index without embeddings
*/
window.runFtsFullIndex = async function runFtsFullIndex() {
showRefreshToast(t('codexlens.startingFtsFullIndex') || 'Starting FTS full index...', 'info');
// FTS only, no embeddings, full rebuild (incremental=false)
initCodexLensIndex('normal', null, 'fastembed', 1, false);
}
/**
* Run FTS incremental update
* Updates FTS index for changed files only
*/
window.runFtsIncrementalUpdate = async function runFtsIncrementalUpdate() {
var projectPath = window.CCW_PROJECT_ROOT || '.';
showRefreshToast(t('codexlens.startingFtsIncremental') || 'Starting FTS incremental update...', 'info');
try {
// Use index update endpoint for FTS incremental
var response = await fetch('/api/codexlens/init', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
path: projectPath,
indexType: 'normal', // FTS only
incremental: true
})
});
var result = await response.json();
if (result.success) {
showRefreshToast(t('codexlens.ftsIncrementalComplete') || 'FTS incremental update completed', 'success');
renderCodexLensManager();
} else {
showRefreshToast((t('codexlens.ftsIncrementalFailed') || 'FTS incremental failed') + ': ' + (result.error || 'Unknown error'), 'error');
}
} catch (err) {
showRefreshToast((t('common.error') || 'Error') + ': ' + err.message, 'error');
}
}
/**
* Run Vector full index (generate all embeddings)
* Generates embeddings for all files
*/
window.runVectorFullIndex = async function runVectorFullIndex() {
showRefreshToast(t('codexlens.startingVectorFullIndex') || 'Starting Vector full index...', 'info');
try {
// Fetch env settings to get the configured embedding model
var envResponse = await fetch('/api/codexlens/env');
var envData = await envResponse.json();
var embeddingModel = envData.CODEXLENS_EMBEDDING_MODEL || envData.LITELLM_EMBEDDING_MODEL || 'code';
// Use litellm backend with env-configured model, full rebuild (incremental=false)
initCodexLensIndex('vector', embeddingModel, 'litellm', 4, false);
} catch (err) {
// Fallback to default model if env fetch fails
initCodexLensIndex('vector', 'code', 'litellm', 4, false);
}
}
/**
* Run Vector incremental update
* Generates embeddings for new/changed files only
*/
window.runVectorIncrementalUpdate = async function runVectorIncrementalUpdate() {
var projectPath = window.CCW_PROJECT_ROOT || '.';
showRefreshToast(t('codexlens.startingVectorIncremental') || 'Starting Vector incremental update...', 'info');
try {
// Fetch env settings to get the configured embedding model
var envResponse = await fetch('/api/codexlens/env');
var envData = await envResponse.json();
var embeddingModel = envData.CODEXLENS_EMBEDDING_MODEL || envData.LITELLM_EMBEDDING_MODEL || null;
// Use embeddings endpoint for vector incremental
var requestBody = {
path: projectPath,
incremental: true, // Only new/changed files
backend: 'litellm',
maxWorkers: 4
};
// Add model if configured in env
if (embeddingModel) {
requestBody.model = embeddingModel;
}
var response = await fetch('/api/codexlens/embeddings/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody)
});
var result = await response.json();
if (result.success) {
var stats = result.result || {};
var msg = (t('codexlens.vectorIncrementalComplete') || 'Vector incremental completed') +
(stats.chunks_created ? ': ' + stats.chunks_created + ' chunks' : '');
showRefreshToast(msg, 'success');
renderCodexLensManager();
} else {
showRefreshToast((t('codexlens.vectorIncrementalFailed') || 'Vector incremental failed') + ': ' + (result.error || 'Unknown error'), 'error');
}
} catch (err) {
showRefreshToast((t('common.error') || 'Error') + ': ' + err.message, 'error');
}
}
/**
* Run incremental update on the current workspace index
*/

View File

@@ -228,15 +228,31 @@ function renderMemoryCard(memory) {
const updatedDate = memory.updated_at ? new Date(memory.updated_at).toLocaleString() : createdDate;
const isArchived = memory.archived || false;
const metadata = memory.metadata || {};
// Parse metadata - it may be double-encoded JSON string from the backend
let metadata = {};
if (memory.metadata) {
try {
let parsed = typeof memory.metadata === 'string' ? JSON.parse(memory.metadata) : memory.metadata;
// Handle double-encoded JSON (string within string)
if (typeof parsed === 'string') {
parsed = JSON.parse(parsed);
}
metadata = parsed;
console.log('[DEBUG] Memory', memory.id, 'metadata parsed:', metadata, 'favorite:', metadata.favorite);
} catch (e) {
console.warn('Failed to parse memory metadata:', e);
}
}
const tags = metadata.tags || [];
const priority = metadata.priority || 'medium';
const isFavorite = metadata.favorite === true;
console.log('[DEBUG] Memory', memory.id, 'isFavorite:', isFavorite);
return `
<div class="memory-card ${isArchived ? 'archived' : ''}" data-memory-id="${memory.id}" onclick="viewMemoryDetail('${memory.id}')">
<div class="memory-card-header">
<div class="memory-id">
${metadata.favorite ? '<i data-lucide="star"></i>' : ''}
${isFavorite ? '<i data-lucide="star" class="favorite-star"></i>' : ''}
<span>${memory.id}</span>
${isArchived ? `<span class="badge badge-archived">${t('common.archived')}</span>` : ''}
${priority !== 'medium' ? `<span class="badge badge-priority-${priority}">${priority}</span>` : ''}
@@ -245,7 +261,7 @@ function renderMemoryCard(memory) {
<button class="icon-btn" onclick="editMemory('${memory.id}')" title="${t('common.edit')}">
<i data-lucide="edit"></i>
</button>
<button class="icon-btn ${metadata.favorite ? 'favorite-active' : ''}" onclick="toggleFavorite('${memory.id}')" title="${t('coreMemory.toggleFavorite') || 'Toggle Favorite'}">
<button class="icon-btn ${isFavorite ? 'favorite-active' : ''}" onclick="toggleFavorite('${memory.id}')" title="${t('coreMemory.toggleFavorite') || 'Toggle Favorite'}">
<i data-lucide="star"></i>
</button>
${!isArchived
@@ -312,7 +328,8 @@ function renderMemoryCard(memory) {
// API Functions
async function fetchCoreMemories(archived = false) {
try {
const response = await fetch(`/api/core-memory/memories?path=${encodeURIComponent(projectPath)}&archived=${archived}`);
// Add timestamp to prevent browser caching
const response = await fetch(`/api/core-memory/memories?path=${encodeURIComponent(projectPath)}&archived=${archived}&_t=${Date.now()}`);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
const data = await response.json();
return data.memories || [];
@@ -325,7 +342,8 @@ async function fetchCoreMemories(archived = false) {
async function fetchMemoryById(memoryId) {
try {
const response = await fetch(`/api/core-memory/memories/${memoryId}?path=${encodeURIComponent(projectPath)}`);
// Add timestamp to prevent browser caching
const response = await fetch(`/api/core-memory/memories/${memoryId}?path=${encodeURIComponent(projectPath)}&_t=${Date.now()}`);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
const data = await response.json();
return data.memory || null;
@@ -356,7 +374,9 @@ async function editMemory(memoryId) {
document.getElementById('memoryModalTitle').textContent = t('coreMemory.edit');
document.getElementById('memoryContent').value = memory.content || '';
document.getElementById('memorySummary').value = memory.summary || '';
document.getElementById('memoryMetadata').value = memory.metadata ? JSON.stringify(memory.metadata, null, 2) : '';
document.getElementById('memoryMetadata').value = memory.metadata
? (typeof memory.metadata === 'string' ? memory.metadata : JSON.stringify(memory.metadata, null, 2))
: '';
modal.dataset.editId = memoryId;
modal.style.display = 'flex';
lucide.createIcons();
@@ -523,13 +543,23 @@ async function viewMemoryDetail(memoryId) {
<pre class="detail-code">${escapeHtml(memory.content)}</pre>
</div>
${memory.metadata && Object.keys(memory.metadata).length > 0
? `<div class="detail-section">
${(() => {
if (!memory.metadata) return '';
try {
let metadataObj = typeof memory.metadata === 'string' ? JSON.parse(memory.metadata) : memory.metadata;
// Handle double-encoded JSON
if (typeof metadataObj === 'string') {
metadataObj = JSON.parse(metadataObj);
}
if (Object.keys(metadataObj).length === 0) return '';
return `<div class="detail-section">
<h3>${t('coreMemory.metadata')}</h3>
<pre class="detail-code">${escapeHtml(JSON.stringify(memory.metadata, null, 2))}</pre>
</div>`
: ''
}
<pre class="detail-code">${escapeHtml(JSON.stringify(metadataObj, null, 2))}</pre>
</div>`;
} catch (e) {
return '';
}
})()}
${memory.raw_output
? `<div class="detail-section">
@@ -644,7 +674,19 @@ function showClustersView() {
// Favorites Functions
async function refreshFavorites() {
const allMemories = await fetchCoreMemories(false);
const favorites = allMemories.filter(m => m.metadata && m.metadata.favorite);
const favorites = allMemories.filter(m => {
if (!m.metadata) return false;
try {
let parsed = typeof m.metadata === 'string' ? JSON.parse(m.metadata) : m.metadata;
// Handle double-encoded JSON
if (typeof parsed === 'string') {
parsed = JSON.parse(parsed);
}
return parsed.favorite === true;
} catch (e) {
return false;
}
});
const countEl = document.getElementById('totalFavoritesCount');
const gridEl = document.getElementById('favoritesGridContent');
@@ -670,7 +712,7 @@ async function refreshFavorites() {
async function showMemoryRelations(memoryId) {
try {
// Fetch all clusters
const response = await fetch(`/api/core-memory/clusters?path=${encodeURIComponent(projectPath)}`);
const response = await fetch(`/api/core-memory/clusters?path=${encodeURIComponent(projectPath)}&_t=${Date.now()}`);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
const result = await response.json();
@@ -679,7 +721,7 @@ async function showMemoryRelations(memoryId) {
// Find clusters containing this memory
const relatedClusters = [];
for (const cluster of clusters) {
const detailRes = await fetch(`/api/core-memory/clusters/${cluster.id}?path=${encodeURIComponent(projectPath)}`);
const detailRes = await fetch(`/api/core-memory/clusters/${cluster.id}?path=${encodeURIComponent(projectPath)}&_t=${Date.now()}`);
if (detailRes.ok) {
const detail = await detailRes.json();
const members = detail.members || [];
@@ -749,7 +791,20 @@ async function toggleFavorite(memoryId) {
const memory = await fetchMemoryById(memoryId);
if (!memory) return;
const metadata = memory.metadata || {};
// Parse metadata - it may be double-encoded JSON string from the backend
let metadata = {};
if (memory.metadata) {
try {
let parsed = typeof memory.metadata === 'string' ? JSON.parse(memory.metadata) : memory.metadata;
// Handle double-encoded JSON
if (typeof parsed === 'string') {
parsed = JSON.parse(parsed);
}
metadata = parsed;
} catch (e) {
console.warn('Failed to parse memory metadata:', e);
}
}
metadata.favorite = !metadata.favorite;
const response = await fetch('/api/core-memory/memories', {

View File

@@ -20,14 +20,15 @@ export interface CliConfig {
tools: Record<string, CliToolConfig>;
}
export type CliToolName = 'gemini' | 'qwen' | 'codex';
export type CliToolName = 'gemini' | 'qwen' | 'codex' | 'claude';
// ========== Constants ==========
export const PREDEFINED_MODELS: Record<CliToolName, string[]> = {
gemini: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-1.5-pro', 'gemini-1.5-flash'],
qwen: ['coder-model', 'vision-model', 'qwen2.5-coder-32b'],
codex: ['gpt-5.2', 'gpt-4.1', 'o4-mini', 'o3']
codex: ['gpt-5.2', 'gpt-4.1', 'o4-mini', 'o3'],
claude: ['sonnet', 'opus', 'haiku', 'claude-sonnet-4-5-20250929', 'claude-opus-4-5-20251101']
};
export const DEFAULT_CONFIG: CliConfig = {
@@ -47,6 +48,11 @@ export const DEFAULT_CONFIG: CliConfig = {
enabled: true,
primaryModel: 'gpt-5.2',
secondaryModel: 'gpt-5.2'
},
claude: {
enabled: true,
primaryModel: 'sonnet',
secondaryModel: 'haiku'
}
}
};
@@ -63,7 +69,7 @@ function ensureConfigDirForProject(baseDir: string): void {
}
function isValidToolName(tool: string): tool is CliToolName {
return ['gemini', 'qwen', 'codex'].includes(tool);
return ['gemini', 'qwen', 'codex', 'claude'].includes(tool);
}
function validateConfig(config: unknown): config is CliConfig {
@@ -74,7 +80,7 @@ function validateConfig(config: unknown): config is CliConfig {
if (!c.tools || typeof c.tools !== 'object') return false;
const tools = c.tools as Record<string, unknown>;
for (const toolName of ['gemini', 'qwen', 'codex']) {
for (const toolName of ['gemini', 'qwen', 'codex', 'claude']) {
const tool = tools[toolName];
if (!tool || typeof tool !== 'object') return false;

View File

@@ -66,6 +66,309 @@ function errorLog(category: string, message: string, error?: Error | unknown, co
}
}
// ========== Unified Stream-JSON Parser ==========
/**
* Claude CLI stream-json message types
*/
interface ClaudeStreamMessage {
type: 'system' | 'assistant' | 'result' | 'error';
subtype?: 'init' | 'success' | 'error';
session_id?: string;
model?: string;
message?: {
content: Array<{ type: 'text'; text: string }>;
};
result?: string;
total_cost_usd?: number;
usage?: {
input_tokens?: number;
output_tokens?: number;
};
error?: string;
}
/**
* Gemini/Qwen CLI stream-json message types
*/
interface GeminiStreamMessage {
type: 'init' | 'message' | 'result';
timestamp?: string;
session_id?: string;
model?: string;
role?: 'user' | 'assistant';
content?: string;
delta?: boolean;
status?: 'success' | 'error';
stats?: {
total_tokens?: number;
input_tokens?: number;
output_tokens?: number;
};
}
/**
* Codex CLI JSON message types
*/
interface CodexStreamMessage {
type: 'thread.started' | 'turn.started' | 'item.completed' | 'turn.completed';
thread_id?: string;
item?: {
type: 'reasoning' | 'agent_message';
text: string;
};
usage?: {
input_tokens?: number;
output_tokens?: number;
};
}
/**
* Unified Stream-JSON Parser for Claude, Gemini, Qwen, and Codex
* Supports different JSON formats and extracts text, session info, and usage data
*/
class UnifiedStreamParser {
private tool: 'claude' | 'gemini' | 'qwen' | 'codex';
private lineBuffer = '';
private extractedText = '';
private sessionInfo: { session_id?: string; model?: string; thread_id?: string } = {};
private usageInfo: { cost?: number; tokens?: { input: number; output: number } } = {};
constructor(tool: 'claude' | 'gemini' | 'qwen' | 'codex') {
this.tool = tool;
}
/**
* Process incoming data chunk
* @returns Extracted text to output with message type prefixes
*/
processChunk(data: string): string {
this.lineBuffer += data;
const lines = this.lineBuffer.split('\n');
// Keep last incomplete line in buffer
this.lineBuffer = lines.pop() || '';
let output = '';
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed) continue;
try {
output += this.parseJsonLine(trimmed);
} catch (err) {
// Not valid JSON or not a stream-json line - pass through as-is
debugLog('STREAM_PARSER', `Non-JSON line (passing through): ${trimmed.substring(0, 100)}`);
output += line + '\n';
}
}
return output;
}
/**
* Parse a single JSON line based on tool type
*/
private parseJsonLine(line: string): string {
switch (this.tool) {
case 'claude':
return this.parseClaudeLine(line);
case 'gemini':
case 'qwen':
return this.parseGeminiQwenLine(line);
case 'codex':
return this.parseCodexLine(line);
default:
return '';
}
}
/**
* Parse Claude stream-json format
*/
private parseClaudeLine(line: string): string {
const msg: ClaudeStreamMessage = JSON.parse(line);
let output = '';
// Extract session metadata
if (msg.type === 'system' && msg.subtype === 'init') {
this.sessionInfo.session_id = msg.session_id;
this.sessionInfo.model = msg.model;
debugLog('STREAM_PARSER', 'Claude session initialized', this.sessionInfo);
output += `[系统] 会话初始化: ${msg.model || 'unknown'}\n`;
}
// Extract assistant response text
if (msg.type === 'assistant' && msg.message?.content) {
for (const item of msg.message.content) {
if (item.type === 'text' && item.text && item.text.trim()) { // Filter empty/whitespace-only text
this.extractedText += item.text;
output += `[响应] ${item.text}\n`; // Add newline for proper line separation
}
}
}
// Extract result metadata
if (msg.type === 'result') {
if (msg.total_cost_usd !== undefined) {
this.usageInfo.cost = msg.total_cost_usd;
}
if (msg.usage) {
this.usageInfo.tokens = {
input: msg.usage.input_tokens || 0,
output: msg.usage.output_tokens || 0
};
}
debugLog('STREAM_PARSER', 'Claude execution result received', {
subtype: msg.subtype,
cost: this.usageInfo.cost,
tokens: this.usageInfo.tokens
});
output += `[结果] 状态: ${msg.subtype || 'completed'}\n`;
}
// Handle errors
if (msg.type === 'error') {
errorLog('STREAM_PARSER', `Claude error in stream: ${msg.error || 'Unknown error'}`);
output += `[错误] ${msg.error || 'Unknown error'}\n`;
}
return output;
}
private lastMessageType: string = ''; // Track last message type for delta mode
/**
* Parse Gemini/Qwen stream-json format
*/
private parseGeminiQwenLine(line: string): string {
const msg: GeminiStreamMessage = JSON.parse(line);
let output = '';
// Extract session metadata
if (msg.type === 'init') {
this.sessionInfo.session_id = msg.session_id;
this.sessionInfo.model = msg.model;
debugLog('STREAM_PARSER', `${this.tool} session initialized`, this.sessionInfo);
output += `[系统] 会话初始化: ${msg.model || 'unknown'}\n`;
this.lastMessageType = 'init';
}
// Extract assistant message
if (msg.type === 'message' && msg.role === 'assistant' && msg.content) {
const contentText = msg.content.trim(); // Filter empty/whitespace-only content
if (contentText) {
this.extractedText += msg.content;
if (msg.delta) {
// Delta mode: add prefix only for first chunk
if (this.lastMessageType !== 'assistant') {
output += `[响应] ${msg.content}`;
} else {
output += msg.content;
}
} else {
// Full message mode
output += `[响应] ${msg.content}\n`;
}
this.lastMessageType = 'assistant';
}
}
// Extract result statistics
if (msg.type === 'result') {
// Add newline before result if last was delta streaming
if (this.lastMessageType === 'assistant') {
output += '\n';
}
if (msg.stats) {
this.usageInfo.tokens = {
input: msg.stats.input_tokens || 0,
output: msg.stats.output_tokens || 0
};
}
debugLog('STREAM_PARSER', `${this.tool} execution result received`, {
status: msg.status,
tokens: this.usageInfo.tokens
});
output += `[结果] 状态: ${msg.status || 'success'}\n`;
this.lastMessageType = 'result';
}
return output;
}
/**
* Parse Codex JSON format
*/
private parseCodexLine(line: string): string {
const msg: CodexStreamMessage = JSON.parse(line);
let output = '';
// Extract thread metadata
if (msg.type === 'thread.started' && msg.thread_id) {
this.sessionInfo.thread_id = msg.thread_id;
debugLog('STREAM_PARSER', 'Codex thread started', { thread_id: msg.thread_id });
output += `[系统] 线程启动: ${msg.thread_id}\n`;
}
// Extract reasoning text
if (msg.type === 'item.completed' && msg.item?.type === 'reasoning') {
output += `[思考] ${msg.item.text}\n`;
}
// Extract agent message
if (msg.type === 'item.completed' && msg.item?.type === 'agent_message') {
this.extractedText += msg.item.text;
output += `[响应] ${msg.item.text}\n`;
}
// Extract usage statistics
if (msg.type === 'turn.completed' && msg.usage) {
this.usageInfo.tokens = {
input: msg.usage.input_tokens || 0,
output: msg.usage.output_tokens || 0
};
debugLog('STREAM_PARSER', 'Codex turn completed', {
tokens: this.usageInfo.tokens
});
output += `[结果] 回合完成\n`;
}
return output;
}
/**
* Flush remaining buffer on stream end
*/
flush(): string {
if (this.lineBuffer.trim()) {
return this.processChunk('\n'); // Force process remaining line
}
return '';
}
/**
* Get full extracted text
*/
getExtractedText(): string {
return this.extractedText;
}
/**
* Get session metadata
*/
getSessionInfo() {
return this.sessionInfo;
}
/**
* Get usage metadata
*/
getUsageInfo() {
return this.usageInfo;
}
}
// LiteLLM integration
import { executeLiteLLMEndpoint } from './litellm-executor.js';
import { findEndpointById } from '../config/litellm-api-config-manager.js';
@@ -116,7 +419,7 @@ function getSqliteStoreSync(baseDir: string) {
// Define Zod schema for validation
const ParamsSchema = z.object({
tool: z.enum(['gemini', 'qwen', 'codex']),
tool: z.enum(['gemini', 'qwen', 'codex', 'claude']),
prompt: z.string().min(1, 'Prompt is required'),
mode: z.enum(['analysis', 'write', 'auto']).default('analysis'),
format: z.enum(['plain', 'yaml', 'json']).default('plain'), // Multi-turn prompt concatenation format
@@ -255,6 +558,7 @@ interface ExecutionOutput {
conversation: ConversationRecord; // Full conversation record
stdout: string;
stderr: string;
parsedOutput?: string; // Parsed output from stream parser (for stream-json tools)
}
/**
@@ -380,6 +684,8 @@ function buildCommand(params: {
if (include) {
args.push('--include-directories', include);
}
// Enable stream-json output for unified parsing
args.push('--output-format', 'stream-json');
break;
case 'qwen':
@@ -400,6 +706,8 @@ function buildCommand(params: {
if (include) {
args.push('--include-directories', include);
}
// Enable stream-json output for unified parsing
args.push('--output-format', 'stream-json');
break;
case 'codex':
@@ -434,6 +742,8 @@ function buildCommand(params: {
args.push('--add-dir', addDir);
}
}
// Enable JSON output for unified parsing
args.push('--json');
// Use `-` to indicate reading prompt from stdin
args.push('-');
} else {
@@ -458,6 +768,8 @@ function buildCommand(params: {
args.push('--add-dir', addDir);
}
}
// Enable JSON output for unified parsing
args.push('--json');
// Use `-` to indicate reading prompt from stdin (avoids Windows escaping issues)
args.push('-');
}
@@ -483,8 +795,9 @@ function buildCommand(params: {
} else {
args.push('--permission-mode', 'default');
}
// Output format for better parsing
args.push('--output-format', 'text');
// Output format: stream-json for real-time parsing, text for backward compatibility
args.push('--output-format', 'stream-json');
args.push('--verbose'); // Required for stream-json format
// Add directories
if (include) {
const dirs = include.split(',').map(d => d.trim()).filter(d => d);
@@ -962,11 +1275,23 @@ async function executeCliTool(
let stderr = '';
let timedOut = false;
// Initialize unified stream parser for all tools
const streamParser = ['claude', 'gemini', 'qwen', 'codex'].includes(tool)
? new UnifiedStreamParser(tool as 'claude' | 'gemini' | 'qwen' | 'codex')
: null;
// Handle stdout
child.stdout!.on('data', (data) => {
const text = data.toString();
stdout += text;
if (onOutput) {
// Parse stream-json for all supported tools
if (streamParser && onOutput) {
const parsedText = streamParser.processChunk(text);
if (parsedText) {
onOutput({ type: 'stdout', data: parsedText });
}
} else if (onOutput) {
onOutput({ type: 'stdout', data: text });
}
});
@@ -985,6 +1310,23 @@ async function executeCliTool(
// Clear current child process reference
currentChildProcess = null;
// Flush unified parser buffer if present
if (streamParser && onOutput) {
const remaining = streamParser.flush();
if (remaining) {
onOutput({ type: 'stdout', data: remaining });
}
// Log usage information if available
const usageInfo = streamParser.getUsageInfo();
if (usageInfo.cost !== undefined || usageInfo.tokens) {
debugLog('STREAM_USAGE', `${tool} execution usage`, {
cost_usd: usageInfo.cost,
tokens: usageInfo.tokens
});
}
}
const endTime = Date.now();
const duration = endTime - startTime;
@@ -1212,7 +1554,8 @@ async function executeCliTool(
execution,
conversation,
stdout,
stderr
stderr,
parsedOutput: streamParser?.getExtractedText() || undefined
});
});

View File

@@ -2,7 +2,8 @@
* Smart Search Tool - Unified intelligent search with CodexLens integration
*
* Features:
* - Intent classification with automatic mode selection
* - Fuzzy mode: FTS + ripgrep fusion with RRF ranking (default)
* - Semantic mode: Dense coarse retrieval + cross-encoder reranking
* - CodexLens integration (init, dense_rerank, fts)
* - Ripgrep fallback for exact mode
* - Index status checking and warnings
@@ -10,7 +11,7 @@
*
* Actions:
* - init: Initialize CodexLens index
* - search: Intelligent search with auto mode selection
* - search: Intelligent search with fuzzy (default) or semantic mode
* - status: Check index status
* - update: Incremental index update for changed files
* - watch: Start file watcher for automatic updates
@@ -66,7 +67,7 @@ const ParamsSchema = z.object({
action: z.enum(['init', 'search', 'search_files', 'find_files', 'status', 'update', 'watch']).default('search'),
query: z.string().optional().describe('Content search query (for action="search")'),
pattern: z.string().optional().describe('Glob pattern for path matching (for action="find_files")'),
mode: z.enum(['auto', 'hybrid', 'exact', 'ripgrep', 'priority']).default('auto'),
mode: z.enum(['fuzzy', 'semantic']).default('fuzzy'),
output_mode: z.enum(['full', 'files_only', 'count']).default('full'),
path: z.string().optional(),
paths: z.array(z.string()).default([]),
@@ -94,7 +95,7 @@ const ParamsSchema = z.object({
type Params = z.infer<typeof ParamsSchema>;
// Search mode constants
const SEARCH_MODES = ['auto', 'hybrid', 'exact', 'ripgrep', 'priority'] as const;
const SEARCH_MODES = ['fuzzy', 'semantic'] as const;
// Classification confidence threshold
const CONFIDENCE_THRESHOLD = 0.7;
@@ -850,6 +851,93 @@ async function executeWatchAction(params: Params): Promise<SearchResult> {
};
}
/**
* Mode: fuzzy - FTS + ripgrep fusion with RRF ranking
* Runs both exact (FTS) and ripgrep searches in parallel, merges and ranks results
*/
async function executeFuzzyMode(params: Params): Promise<SearchResult> {
const { query, path = '.', maxResults = 5, extraFilesCount = 10 } = params;
if (!query) {
return {
success: false,
error: 'Query is required for search',
};
}
const timer = createTimer();
// Run both searches in parallel
const [ftsResult, ripgrepResult] = await Promise.allSettled([
executeCodexLensExactMode(params),
executeRipgrepMode(params),
]);
timer.mark('parallel_search');
// Collect results from both sources
const resultsMap = new Map<string, any[]>();
// Add FTS results if successful
if (ftsResult.status === 'fulfilled' && ftsResult.value.success && ftsResult.value.results) {
resultsMap.set('exact', ftsResult.value.results as any[]);
}
// Add ripgrep results if successful
if (ripgrepResult.status === 'fulfilled' && ripgrepResult.value.success && ripgrepResult.value.results) {
resultsMap.set('ripgrep', ripgrepResult.value.results as any[]);
}
// If both failed, return error
if (resultsMap.size === 0) {
const errors: string[] = [];
if (ftsResult.status === 'rejected') errors.push(`FTS: ${ftsResult.reason}`);
if (ripgrepResult.status === 'rejected') errors.push(`Ripgrep: ${ripgrepResult.reason}`);
return {
success: false,
error: `Both search backends failed: ${errors.join('; ')}`,
};
}
// Apply RRF fusion with fuzzy-optimized weights
// Fuzzy mode: balanced between exact and ripgrep
const fusionWeights = { exact: 0.5, ripgrep: 0.5 };
const totalToFetch = maxResults + extraFilesCount;
const fusedResults = applyRRFFusion(resultsMap, fusionWeights, totalToFetch);
timer.mark('rrf_fusion');
// Normalize results format
const normalizedResults = fusedResults.map((item: any) => ({
file: item.file || item.path,
line: item.line || 0,
column: item.column || 0,
content: item.content || '',
score: item.fusion_score || 0,
matchCount: item.matchCount,
matchScore: item.matchScore,
}));
// Split results: first N with full content, rest as file paths only
const { results, extra_files } = splitResultsWithExtraFiles(normalizedResults, maxResults, extraFilesCount);
// Log timing
timer.log();
const timings = timer.getTimings();
return {
success: true,
results,
extra_files: extra_files.length > 0 ? extra_files : undefined,
metadata: {
mode: 'fuzzy',
backend: 'fts+ripgrep',
count: results.length,
query,
note: `Fuzzy search using RRF fusion of FTS and ripgrep (weights: exact=${fusionWeights.exact}, ripgrep=${fusionWeights.ripgrep})`,
timing: TIMING_ENABLED ? timings : undefined,
},
};
}
/**
* Mode: auto - Intent classification and mode selection
* Routes to: hybrid (NL + index) | exact (index) | ripgrep (no index)
@@ -1832,10 +1920,9 @@ export const schema: ToolSchema = {
- watch: Start file watcher for automatic updates
**Content Search (action="search"):**
smart_search(query="authentication logic") # auto mode - routes to best backend
smart_search(query="MyClass", mode="exact") # exact mode - precise FTS matching
smart_search(query="auth", mode="ripgrep") # ripgrep mode - fast literal search
smart_search(query="how to auth", mode="hybrid") # hybrid mode - semantic + fuzzy search
smart_search(query="authentication logic") # fuzzy mode (default) - FTS + ripgrep fusion
smart_search(query="MyClass", mode="fuzzy") # fuzzy mode - fast hybrid search
smart_search(query="how to auth", mode="semantic") # semantic mode - dense + reranker
**File Discovery (action="find_files"):**
smart_search(action="find_files", pattern="*.ts") # find all TypeScript files
@@ -1852,17 +1939,7 @@ export const schema: ToolSchema = {
smart_search(query="auth", limit=10, offset=0) # first page
smart_search(query="auth", limit=10, offset=10) # second page
**Multi-Word Search (ripgrep mode with tokenization):**
smart_search(query="CCW_PROJECT_ROOT CCW_ALLOWED_DIRS", mode="ripgrep") # tokenized OR matching
smart_search(query="auth login user", mode="ripgrep") # matches any token, ranks by match count
smart_search(query="exact phrase", mode="ripgrep", tokenize=false) # disable tokenization
**Regex Search (ripgrep mode):**
smart_search(query="class.*Builder") # auto-detects regex pattern
smart_search(query="def.*\\(.*\\):") # find function definitions
smart_search(query="import.*from", caseSensitive=false) # case-insensitive
**Modes:** auto (intelligent routing), hybrid (semantic+fuzzy), exact (FTS), ripgrep (fast with tokenization), priority (fallback chain)`,
**Modes:** fuzzy (FTS + ripgrep fusion, default), semantic (dense + reranker)`,
inputSchema: {
type: 'object',
properties: {
@@ -1883,8 +1960,8 @@ export const schema: ToolSchema = {
mode: {
type: 'string',
enum: SEARCH_MODES,
description: 'Search mode: auto, hybrid (best quality), exact (CodexLens FTS), ripgrep (fast, no index), priority (fallback chain)',
default: 'auto',
description: 'Search mode: fuzzy (FTS + ripgrep fusion, default), semantic (dense + reranker for natural language queries)',
default: 'fuzzy',
},
output_mode: {
type: 'string',
@@ -2323,25 +2400,16 @@ export async function handler(params: Record<string, unknown>): Promise<ToolResu
case 'search':
default:
// Handle search modes: auto | hybrid | exact | ripgrep | priority
// Handle search modes: fuzzy | semantic
switch (mode) {
case 'auto':
result = await executeAutoMode(parsed.data);
case 'fuzzy':
result = await executeFuzzyMode(parsed.data);
break;
case 'hybrid':
case 'semantic':
result = await executeHybridMode(parsed.data);
break;
case 'exact':
result = await executeCodexLensExactMode(parsed.data);
break;
case 'ripgrep':
result = await executeRipgrepMode(parsed.data);
break;
case 'priority':
result = await executePriorityFallbackMode(parsed.data);
break;
default:
throw new Error(`Unsupported mode: ${mode}. Use: auto, hybrid, exact, ripgrep, or priority`);
throw new Error(`Unsupported mode: ${mode}. Use: fuzzy or semantic`);
}
break;
}

View File

@@ -1 +1 @@
{"root":["./src/cli.ts","./src/index.ts","./src/commands/cli.ts","./src/commands/core-memory.ts","./src/commands/hook.ts","./src/commands/install.ts","./src/commands/list.ts","./src/commands/memory.ts","./src/commands/serve.ts","./src/commands/session-path-resolver.ts","./src/commands/session.ts","./src/commands/stop.ts","./src/commands/tool.ts","./src/commands/uninstall.ts","./src/commands/upgrade.ts","./src/commands/view.ts","./src/config/litellm-api-config-manager.ts","./src/config/provider-models.ts","./src/config/storage-paths.ts","./src/core/cache-manager.ts","./src/core/claude-freshness.ts","./src/core/core-memory-store.ts","./src/core/dashboard-generator-patch.ts","./src/core/dashboard-generator.ts","./src/core/data-aggregator.ts","./src/core/history-importer.ts","./src/core/lite-scanner-complete.ts","./src/core/lite-scanner.ts","./src/core/manifest.ts","./src/core/memory-embedder-bridge.ts","./src/core/memory-store.ts","./src/core/server.ts","./src/core/session-clustering-service.ts","./src/core/session-scanner.ts","./src/core/websocket.ts","./src/core/routes/ccw-routes.ts","./src/core/routes/claude-routes.ts","./src/core/routes/cli-routes.ts","./src/core/routes/codexlens-routes.ts","./src/core/routes/core-memory-routes.ts","./src/core/routes/files-routes.ts","./src/core/routes/graph-routes.ts","./src/core/routes/help-routes.ts","./src/core/routes/hooks-routes.ts","./src/core/routes/litellm-api-routes.ts","./src/core/routes/litellm-routes.ts","./src/core/routes/mcp-routes.ts","./src/core/routes/mcp-templates-db.ts","./src/core/routes/memory-routes.ts","./src/core/routes/rules-routes.ts","./src/core/routes/session-routes.ts","./src/core/routes/skills-routes.ts","./src/core/routes/status-routes.ts","./src/core/routes/system-routes.ts","./src/mcp-server/index.ts","./src/tools/classify-folders.ts","./src/tools/claude-cli-tools.ts","./src/tools/cli-config-manager.ts","./src/tools/cli-executor.ts","./src/tools/cli-history-store.ts","./src/tools/codex-lens.ts","./src/tools/context-cache-store.ts","./src/tools/context-cache.ts","./src/tools/convert-tokens-to-css.ts","./src/tools/core-memory.ts","./src/tools/detect-changed-modules.ts","./src/tools/discover-design-files.ts","./src/tools/edit-file.ts","./src/tools/generate-module-docs.ts","./src/tools/get-modules-by-depth.ts","./src/tools/index.ts","./src/tools/litellm-client.ts","./src/tools/litellm-executor.ts","./src/tools/native-session-discovery.ts","./src/tools/notifier.ts","./src/tools/pattern-parser.ts","./src/tools/read-file.ts","./src/tools/resume-strategy.ts","./src/tools/session-content-parser.ts","./src/tools/session-manager.ts","./src/tools/smart-context.ts","./src/tools/smart-search.ts","./src/tools/storage-manager.ts","./src/tools/ui-generate-preview.js","./src/tools/ui-instantiate-prototypes.js","./src/tools/update-module-claude.js","./src/tools/write-file.ts","./src/types/config.ts","./src/types/index.ts","./src/types/litellm-api-config.ts","./src/types/session.ts","./src/types/tool.ts","./src/utils/browser-launcher.ts","./src/utils/file-utils.ts","./src/utils/path-resolver.ts","./src/utils/path-validator.ts","./src/utils/ui.ts"],"version":"5.9.3"}
{"root":["./src/cli.ts","./src/index.ts","./src/commands/cli.ts","./src/commands/core-memory.ts","./src/commands/hook.ts","./src/commands/install.ts","./src/commands/issue.ts","./src/commands/list.ts","./src/commands/memory.ts","./src/commands/serve.ts","./src/commands/session-path-resolver.ts","./src/commands/session.ts","./src/commands/stop.ts","./src/commands/tool.ts","./src/commands/uninstall.ts","./src/commands/upgrade.ts","./src/commands/view.ts","./src/config/litellm-api-config-manager.ts","./src/config/provider-models.ts","./src/config/storage-paths.ts","./src/core/cache-manager.ts","./src/core/claude-freshness.ts","./src/core/core-memory-store.ts","./src/core/dashboard-generator-patch.ts","./src/core/dashboard-generator.ts","./src/core/data-aggregator.ts","./src/core/history-importer.ts","./src/core/lite-scanner-complete.ts","./src/core/lite-scanner.ts","./src/core/manifest.ts","./src/core/memory-embedder-bridge.ts","./src/core/memory-store.ts","./src/core/server.ts","./src/core/session-clustering-service.ts","./src/core/session-scanner.ts","./src/core/websocket.ts","./src/core/routes/ccw-routes.ts","./src/core/routes/claude-routes.ts","./src/core/routes/cli-routes.ts","./src/core/routes/codexlens-routes.ts","./src/core/routes/core-memory-routes.ts","./src/core/routes/discovery-routes.ts","./src/core/routes/files-routes.ts","./src/core/routes/graph-routes.ts","./src/core/routes/help-routes.ts","./src/core/routes/hooks-routes.ts","./src/core/routes/issue-routes.ts","./src/core/routes/litellm-api-routes.ts","./src/core/routes/litellm-routes.ts","./src/core/routes/mcp-routes.ts","./src/core/routes/mcp-templates-db.ts","./src/core/routes/memory-routes.ts","./src/core/routes/nav-status-routes.ts","./src/core/routes/rules-routes.ts","./src/core/routes/session-routes.ts","./src/core/routes/skills-routes.ts","./src/core/routes/status-routes.ts","./src/core/routes/system-routes.ts","./src/mcp-server/index.ts","./src/tools/classify-folders.ts","./src/tools/claude-cli-tools.ts","./src/tools/cli-config-manager.ts","./src/tools/cli-executor.ts","./src/tools/cli-history-store.ts","./src/tools/codex-lens.ts","./src/tools/context-cache-store.ts","./src/tools/context-cache.ts","./src/tools/convert-tokens-to-css.ts","./src/tools/core-memory.ts","./src/tools/detect-changed-modules.ts","./src/tools/discover-design-files.ts","./src/tools/edit-file.ts","./src/tools/generate-module-docs.ts","./src/tools/get-modules-by-depth.ts","./src/tools/index.ts","./src/tools/litellm-client.ts","./src/tools/litellm-executor.ts","./src/tools/native-session-discovery.ts","./src/tools/notifier.ts","./src/tools/pattern-parser.ts","./src/tools/read-file.ts","./src/tools/resume-strategy.ts","./src/tools/session-content-parser.ts","./src/tools/session-manager.ts","./src/tools/smart-context.ts","./src/tools/smart-search.ts","./src/tools/storage-manager.ts","./src/tools/ui-generate-preview.js","./src/tools/ui-instantiate-prototypes.js","./src/tools/update-module-claude.js","./src/tools/write-file.ts","./src/types/config.ts","./src/types/index.ts","./src/types/litellm-api-config.ts","./src/types/session.ts","./src/types/tool.ts","./src/utils/browser-launcher.ts","./src/utils/file-utils.ts","./src/utils/path-resolver.ts","./src/utils/path-validator.ts","./src/utils/python-utils.ts","./src/utils/ui.ts"],"version":"5.9.3"}