feat: 更新执行命令的参数提示,支持指定现有工作树路径,增强工作树管理功能

This commit is contained in:
catlog22
2026-01-07 16:54:23 +08:00
parent 87d38a3374
commit 42fbc1936d
8 changed files with 496 additions and 41 deletions

View File

@@ -1,7 +1,7 @@
---
name: execute
description: Execute queue with DAG-based parallel orchestration (one commit per solution)
argument-hint: "[--worktree] [--queue <queue-id>]"
argument-hint: "[--worktree [<existing-path>]] [--queue <queue-id>]"
allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*)
---

View File

@@ -1,6 +1,6 @@
---
description: Execute all solutions from issue queue with git commit after each solution
argument-hint: "[--worktree] [--queue <queue-id>]"
argument-hint: "[--worktree [<existing-path>]] [--queue <queue-id>]"
---
# Issue Execute (Codex Version)
@@ -11,7 +11,11 @@ argument-hint: "[--worktree] [--queue <queue-id>]"
## Worktree Mode (Recommended for Parallel Execution)
When `--worktree` is specified, create a separate git worktree to isolate work.
When `--worktree` is specified, create or use a git worktree to isolate work.
**Usage**:
- `--worktree` - Create a new worktree with timestamp-based name
- `--worktree <existing-path>` - Resume in an existing worktree (for recovery/continuation)
**Note**: `ccw issue` commands auto-detect worktree and redirect to main repo automatically.
@@ -21,17 +25,38 @@ When `--worktree` is specified, create a separate git worktree to isolate work.
# Use absolute paths to avoid issues when running from subdirectories
REPO_ROOT=$(git rev-parse --show-toplevel)
WORKTREE_BASE="${REPO_ROOT}/.ccw/worktrees"
WORKTREE_NAME="issue-exec-$(date +%Y%m%d-%H%M%S)"
WORKTREE_PATH="${WORKTREE_BASE}/${WORKTREE_NAME}"
# Ensure worktree base directory exists (gitignored)
mkdir -p "${WORKTREE_BASE}"
# Check if existing worktree path was provided
EXISTING_WORKTREE="${1:-}" # Pass as argument or empty
# Prune stale worktrees from previous interrupted executions
git worktree prune
if [[ -n "${EXISTING_WORKTREE}" && -d "${EXISTING_WORKTREE}" ]]; then
# Resume mode: Use existing worktree
WORKTREE_PATH="${EXISTING_WORKTREE}"
WORKTREE_NAME=$(basename "${WORKTREE_PATH}")
# Create worktree from current branch
git worktree add "${WORKTREE_PATH}" -b "${WORKTREE_NAME}"
# Verify it's a valid git worktree
if ! git -C "${WORKTREE_PATH}" rev-parse --is-inside-work-tree &>/dev/null; then
echo "Error: ${EXISTING_WORKTREE} is not a valid git worktree"
exit 1
fi
echo "Resuming in existing worktree: ${WORKTREE_PATH}"
else
# Create mode: New worktree with timestamp
WORKTREE_NAME="issue-exec-$(date +%Y%m%d-%H%M%S)"
WORKTREE_PATH="${WORKTREE_BASE}/${WORKTREE_NAME}"
# Ensure worktree base directory exists (gitignored)
mkdir -p "${WORKTREE_BASE}"
# Prune stale worktrees from previous interrupted executions
git worktree prune
# Create worktree from current branch
git worktree add "${WORKTREE_PATH}" -b "${WORKTREE_NAME}"
echo "Created new worktree: ${WORKTREE_PATH}"
fi
# Setup cleanup trap for graceful failure handling
cleanup_worktree() {
@@ -64,6 +89,17 @@ cd "${WORKTREE_PATH}"
- Parallel executors don't conflict with each other
- Main working directory stays clean
- Easy cleanup after execution
- **Resume support**: Pass existing worktree path to continue interrupted executions
**Resume Examples:**
```bash
# List existing worktrees to find interrupted execution
git worktree list
# Resume in existing worktree (pass path as argument)
# The worktree path will be used instead of creating a new one
codex -p "@.codex/prompts/issue-execute.md --worktree /path/to/existing/worktree"
```
**Completion - User Choice:**

View File

@@ -25,6 +25,350 @@ import {
} from '../../tools/codex-lens.js';
import type { ProgressInfo, GpuMode } from '../../tools/codex-lens.js';
import { loadLiteLLMApiConfig } from '../../config/litellm-api-config-manager.js';
import { spawn, ChildProcess } from 'child_process';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
// ============================================================
// WATCHER PERSISTENCE CONFIGURATION
// ============================================================
interface WatcherConfig {
enabled: boolean;
debounce_ms: number;
}
interface WatcherStats {
running: boolean;
root_path: string;
events_processed: number;
start_time: Date | null;
}
interface ActiveWatcher {
process: ChildProcess;
stats: WatcherStats;
}
// Configuration file path: ~/.codexlens/watchers.json
const WATCHER_CONFIG_DIR = path.join(os.homedir(), '.codexlens');
const WATCHER_CONFIG_FILE = path.join(WATCHER_CONFIG_DIR, 'watchers.json');
// Active watchers Map: normalized_path -> { process, stats }
const activeWatchers = new Map<string, ActiveWatcher>();
/**
* Normalize path for consistent key usage
* - Convert to absolute path
// Flag to ensure watchers are initialized only once
let watchersInitialized = false;
* - Convert to lowercase on Windows
* - Use forward slashes
*/
function normalizePath(inputPath: string): string {
const resolved = path.resolve(inputPath);
// Use lowercase on Windows for case-insensitive comparison
return process.platform === 'win32' ? resolved.toLowerCase() : resolved;
}
/**
* Read watcher configuration from ~/.codexlens/watchers.json
* Returns empty object if file doesn't exist or has errors
*/
function readWatcherConfig(): Record<string, WatcherConfig> {
try {
if (!fs.existsSync(WATCHER_CONFIG_FILE)) {
return {};
}
const content = fs.readFileSync(WATCHER_CONFIG_FILE, 'utf-8');
return JSON.parse(content);
} catch (err) {
console.warn('[CodexLens] Failed to read watcher config:', err);
return {};
}
}
/**
* Write watcher configuration to ~/.codexlens/watchers.json
* Creates directory if it doesn't exist
*/
function writeWatcherConfig(config: Record<string, WatcherConfig>): void {
try {
// Ensure config directory exists
if (!fs.existsSync(WATCHER_CONFIG_DIR)) {
fs.mkdirSync(WATCHER_CONFIG_DIR, { recursive: true });
}
fs.writeFileSync(WATCHER_CONFIG_FILE, JSON.stringify(config, null, 2), 'utf-8');
} catch (err) {
console.error('[CodexLens] Failed to write watcher config:', err);
throw err;
}
}
// ============================================================
// ============================================================
// PROCESS MANAGEMENT FUNCTIONS
// ============================================================
/**
* Start watcher process for the given path
* Creates process, registers handlers, and updates activeWatchers Map
*/
async function startWatcherProcess(
targetPath: string,
debounce_ms: number,
broadcastToClients: (data: unknown) => void
): Promise<{ success: boolean; error?: string; pid?: number }> {
const normalizedPath = normalizePath(targetPath);
// Check if watcher already running for this path
if (activeWatchers.has(normalizedPath)) {
return { success: false, error: 'Watcher already running for this path' };
}
try {
const { existsSync, statSync } = await import('fs');
// Validate path exists and is a directory
if (!existsSync(targetPath)) {
return { success: false, error: `Path does not exist: ${targetPath}` };
}
const pathStat = statSync(targetPath);
if (!pathStat.isDirectory()) {
return { success: false, error: `Path is not a directory: ${targetPath}` };
}
// Get the codexlens CLI path
const venvStatus = await checkVenvStatus();
if (!venvStatus.ready) {
return { success: false, error: 'CodexLens not installed' };
}
// Verify directory is indexed before starting watcher
try {
const statusResult = await executeCodexLens(['projects', 'list', '--json']);
if (statusResult.success && statusResult.stdout) {
const parsed = extractJSON(statusResult.stdout);
const projects = parsed.result || parsed || [];
const normalizedTarget = targetPath.toLowerCase().replace(/\\/g, '/');
const isIndexed = Array.isArray(projects) && projects.some((p: { source_root: string }) =>
p.source_root && p.source_root.toLowerCase().replace(/\\/g, '/') === normalizedTarget
);
if (!isIndexed) {
return {
success: false,
error: `Directory is not indexed: ${targetPath}. Run 'codexlens init' first.`
};
}
}
} catch (err) {
console.warn('[CodexLens] Could not verify index status:', err);
// Continue anyway - watcher will fail with proper error if not indexed
}
// Spawn watch process using Python (no shell: true for security)
const pythonPath = getVenvPythonPath();
const args = ['-m', 'codexlens', 'watch', targetPath, '--debounce', String(debounce_ms)];
const childProcess = spawn(pythonPath, args, {
cwd: targetPath,
stdio: ['ignore', 'pipe', 'pipe'],
env: { ...process.env }
});
const stats: WatcherStats = {
running: true,
root_path: targetPath,
events_processed: 0,
start_time: new Date()
};
// Register in activeWatchers Map
activeWatchers.set(normalizedPath, { process: childProcess, stats });
// Capture stderr for error messages (capped at 4KB to prevent memory leak)
const MAX_STDERR_SIZE = 4096;
let stderrBuffer = '';
if (childProcess.stderr) {
childProcess.stderr.on('data', (data: Buffer) => {
stderrBuffer += data.toString();
if (stderrBuffer.length > MAX_STDERR_SIZE) {
stderrBuffer = stderrBuffer.slice(-MAX_STDERR_SIZE);
}
});
}
// Handle process output for event counting
if (childProcess.stdout) {
childProcess.stdout.on('data', (data: Buffer) => {
const output = data.toString();
const matches = output.match(/Processed \d+ events?/g);
if (matches) {
const watcher = activeWatchers.get(normalizedPath);
if (watcher) {
watcher.stats.events_processed += matches.length;
}
}
});
}
// Handle spawn errors (e.g., ENOENT)
childProcess.on('error', (err: Error) => {
console.error(`[CodexLens] Watcher spawn error for ${targetPath}: ${err.message}`);
const watcher = activeWatchers.get(normalizedPath);
if (watcher) {
watcher.stats.running = false;
}
activeWatchers.delete(normalizedPath);
broadcastToClients({
type: 'CODEXLENS_WATCHER_STATUS',
payload: { running: false, path: targetPath, error: `Spawn error: ${err.message}` }
});
});
// Handle process exit
childProcess.on('exit', (code: number) => {
console.log(`[CodexLens] Watcher exited with code ${code} for ${targetPath}`);
const watcher = activeWatchers.get(normalizedPath);
if (watcher) {
watcher.stats.running = false;
}
activeWatchers.delete(normalizedPath);
// Broadcast error if exited with non-zero code
if (code !== 0) {
const errorMsg = stderrBuffer.trim() || `Exited with code ${code}`;
const cleanError = stripAnsiCodes(errorMsg);
broadcastToClients({
type: 'CODEXLENS_WATCHER_STATUS',
payload: { running: false, path: targetPath, error: cleanError }
});
} else {
broadcastToClients({
type: 'CODEXLENS_WATCHER_STATUS',
payload: { running: false, path: targetPath }
});
}
});
// Broadcast watcher started
broadcastToClients({
type: 'CODEXLENS_WATCHER_STATUS',
payload: { running: true, path: targetPath }
});
console.log(`[CodexLens] Watcher started for ${targetPath} (PID: ${childProcess.pid})`);
return {
success: true,
pid: childProcess.pid
};
} catch (err: any) {
return { success: false, error: err.message };
}
}
/**
* Stop watcher process for the given path
* Gracefully stops process, removes from activeWatchers Map
*/
async function stopWatcherProcess(
targetPath: string,
broadcastToClients: (data: unknown) => void
): Promise<{ success: boolean; error?: string; stats?: { events_processed: number; uptime_seconds: number } }> {
const normalizedPath = normalizePath(targetPath);
const watcher = activeWatchers.get(normalizedPath);
if (!watcher || !watcher.stats.running) {
return { success: false, error: 'Watcher not running for this path' };
}
try {
// Send SIGTERM to gracefully stop the watcher
watcher.process.kill('SIGTERM');
// Wait a moment for graceful shutdown
await new Promise(resolve => setTimeout(resolve, 500));
// Force kill if still running
if (watcher.process && !watcher.process.killed) {
watcher.process.kill('SIGKILL');
}
const finalStats = {
events_processed: watcher.stats.events_processed,
uptime_seconds: watcher.stats.start_time
? Math.floor((Date.now() - watcher.stats.start_time.getTime()) / 1000)
: 0
};
// Update stats and remove from Map
watcher.stats.running = false;
watcher.stats.root_path = '';
watcher.stats.events_processed = 0;
watcher.stats.start_time = null;
activeWatchers.delete(normalizedPath);
// Broadcast watcher stopped
broadcastToClients({
type: 'CODEXLENS_WATCHER_STATUS',
payload: { running: false, path: targetPath }
});
console.log(`[CodexLens] Watcher stopped for ${targetPath}`);
return {
success: true,
stats: finalStats
};
} catch (err: any) {
return { success: false, error: err.message };
// ============================================================
// AUTO-RECOVERY ON SERVER START
// ============================================================
/**
* Initialize watchers from persisted configuration
* Called on server startup to restore watchers from ~/.codexlens/watchers.json
*/
async function initializeWatchers(broadcastToClients: (data: unknown) => void): Promise<void> {
const config = readWatcherConfig();
const enabledWatchers = Object.entries(config).filter(([_, cfg]) => cfg.enabled);
if (enabledWatchers.length === 0) {
console.log('[CodexLens] No watchers to restore');
return;
}
console.log(`[CodexLens] Restoring ${enabledWatchers.length} watcher(s) from config...`);
for (const [watchPath, cfg] of enabledWatchers) {
try {
const result = await startWatcherProcess(watchPath, cfg.debounce_ms, broadcastToClients);
if (result.success) {
console.log(`[CodexLens] Restored watcher for ${watchPath}`);
} else {
console.warn(`[CodexLens] Failed to restore watcher for ${watchPath}: ${result.error}`);
// Keep config entry but mark as disabled (will be re-enabled manually)
config[watchPath].enabled = false;
writeWatcherConfig(config);
}
} catch (err: any) {
console.error(`[CodexLens] Error restoring watcher for ${watchPath}:`, err.message);
}
}
}
}
}
// LEGACY STATE (Deprecated - use activeWatchers Map instead)
// ============================================================
// File watcher state (persisted across requests)
let watcherProcess: any = null;

View File

@@ -288,6 +288,20 @@ const i18n = {
'codexlens.envGroup.concurrency': 'Concurrency Settings',
'codexlens.envGroup.cascade': 'Cascade Search Settings',
'codexlens.envGroup.llm': 'LLM Features',
// Environment variable field labels
'codexlens.envField.backend': 'Backend',
'codexlens.envField.model': 'Model',
'codexlens.envField.useGpu': 'Use GPU',
'codexlens.envField.highAvailability': 'High Availability',
'codexlens.envField.loadBalanceStrategy': 'Load Balance Strategy',
'codexlens.envField.rateLimitCooldown': 'Rate Limit Cooldown (s)',
'codexlens.envField.enabled': 'Enabled',
'codexlens.envField.topKResults': 'Top K Results',
'codexlens.envField.maxWorkers': 'Max Workers',
'codexlens.envField.batchSize': 'Batch Size',
'codexlens.envField.searchStrategy': 'Search Strategy',
'codexlens.envField.coarseK': 'Coarse K (1st stage)',
'codexlens.envField.fineK': 'Fine K (final)',
'codexlens.usingApiReranker': 'Using API Reranker',
'codexlens.currentModel': 'Current Model',
'codexlens.localModels': 'Local Models',
@@ -1713,6 +1727,7 @@ const i18n = {
'common.removeFromRecent': 'Remove from recent',
'common.noDescription': 'No description',
'common.saving': 'Saving...',
'common.saveSuccess': 'Saved successfully',
'common.saveFailed': 'Failed to save',
'common.unknownError': 'Unknown error',
'common.exception': 'Exception',
@@ -2306,6 +2321,20 @@ const i18n = {
'codexlens.envGroup.concurrency': '并发设置',
'codexlens.envGroup.cascade': '级联搜索设置',
'codexlens.envGroup.llm': 'LLM 功能',
// 环境变量字段标签
'codexlens.envField.backend': '后端',
'codexlens.envField.model': '模型',
'codexlens.envField.useGpu': '使用 GPU',
'codexlens.envField.highAvailability': '高可用',
'codexlens.envField.loadBalanceStrategy': '负载均衡策略',
'codexlens.envField.rateLimitCooldown': '限流冷却 (秒)',
'codexlens.envField.enabled': '启用',
'codexlens.envField.topKResults': 'Top K 结果数',
'codexlens.envField.maxWorkers': '最大工作线程数',
'codexlens.envField.batchSize': '批处理大小',
'codexlens.envField.searchStrategy': '搜索策略',
'codexlens.envField.coarseK': '粗筛 K (第一阶段)',
'codexlens.envField.fineK': '精筛 K (最终)',
'codexlens.usingApiReranker': '使用 API 重排序',
'codexlens.currentModel': '当前模型',
'codexlens.localModels': '本地模型',
@@ -3742,6 +3771,7 @@ const i18n = {
'common.removeFromRecent': '从最近中移除',
'common.noDescription': '无描述',
'common.saving': '保存中...',
'common.saveSuccess': '保存成功',
'common.saveFailed': '保存失败',
'common.unknownError': '未知错误',
'common.exception': '异常',

View File

@@ -1640,6 +1640,10 @@ function showAddModelModal(providerId, modelType) {
'</label>' +
'</div>'
: isReranker ?
'<div class="form-group">' +
'<label>' + t('apiSettings.embeddingMaxTokens') + '</label>' +
'<input type="number" id="model-max-tokens" class="cli-input" value="8192" min="128" />' +
'</div>' +
'<div class="form-group">' +
'<label>' + t('apiSettings.rerankerTopK') + '</label>' +
'<input type="number" id="model-top-k" class="cli-input" value="10" min="1" max="100" />' +
@@ -1846,13 +1850,15 @@ function saveNewModel(event, providerId, modelType) {
};
} else if (isReranker) {
var topKEl = document.getElementById('model-top-k');
var maxTokensEl = document.getElementById('model-max-tokens');
newModel.capabilities = {
maxInputTokens: maxTokensEl ? parseInt(maxTokensEl.value) || 8192 : 8192,
topK: topKEl ? parseInt(topKEl.value) || 10 : 10
};
} else {
newModel.capabilities = {
embeddingDimension: parseInt(document.getElementById('model-dimensions').value) || 1536,
contextWindow: parseInt(document.getElementById('model-max-tokens').value) || 8192
maxInputTokens: parseInt(document.getElementById('model-max-tokens').value) || 8192
};
}
@@ -1878,7 +1884,8 @@ function saveNewModel(event, providerId, modelType) {
})
.then(function() {
closeAddModelModal();
return loadApiSettings();
// Force refresh to get latest data including newly created model
return loadApiSettings(true);
})
.then(function() {
if (selectedProviderId === providerId) {

View File

@@ -837,9 +837,9 @@ var ENV_VAR_GROUPS = {
labelKey: 'codexlens.envGroup.embedding',
icon: 'box',
vars: {
'CODEXLENS_EMBEDDING_BACKEND': { label: 'Backend', type: 'select', options: ['local', 'api'], default: 'local', settingsPath: 'embedding.backend' },
'CODEXLENS_EMBEDDING_BACKEND': { labelKey: 'codexlens.envField.backend', type: 'select', options: ['local', 'api'], default: 'local', settingsPath: 'embedding.backend' },
'CODEXLENS_EMBEDDING_MODEL': {
label: 'Model',
labelKey: 'codexlens.envField.model',
type: 'model-select',
placeholder: 'Select or enter model...',
default: 'fast',
@@ -855,20 +855,20 @@ var ENV_VAR_GROUPS = {
{ group: 'Jina', items: ['jina-embeddings-v3', 'jina-embeddings-v2-base-en', 'jina-embeddings-v2-base-zh'] }
]
},
'CODEXLENS_USE_GPU': { label: 'Use GPU', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'embedding.use_gpu', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'local'; } },
'CODEXLENS_EMBEDDING_POOL_ENABLED': { label: 'High Availability', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'embedding.pool_enabled', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api'; } },
'CODEXLENS_EMBEDDING_STRATEGY': { label: 'Load Balance Strategy', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'embedding.strategy', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api' && env['CODEXLENS_EMBEDDING_POOL_ENABLED'] === 'true'; } },
'CODEXLENS_EMBEDDING_COOLDOWN': { label: 'Rate Limit Cooldown (s)', type: 'number', placeholder: '60', default: '60', settingsPath: 'embedding.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api' && env['CODEXLENS_EMBEDDING_POOL_ENABLED'] === 'true'; } }
'CODEXLENS_USE_GPU': { labelKey: 'codexlens.envField.useGpu', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'embedding.use_gpu', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'local'; } },
'CODEXLENS_EMBEDDING_POOL_ENABLED': { labelKey: 'codexlens.envField.highAvailability', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'embedding.pool_enabled', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api'; } },
'CODEXLENS_EMBEDDING_STRATEGY': { labelKey: 'codexlens.envField.loadBalanceStrategy', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'embedding.strategy', showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api' && env['CODEXLENS_EMBEDDING_POOL_ENABLED'] === 'true'; } },
'CODEXLENS_EMBEDDING_COOLDOWN': { labelKey: 'codexlens.envField.rateLimitCooldown', type: 'number', placeholder: '60', default: '60', settingsPath: 'embedding.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_EMBEDDING_BACKEND'] === 'api' && env['CODEXLENS_EMBEDDING_POOL_ENABLED'] === 'true'; } }
}
},
reranker: {
labelKey: 'codexlens.envGroup.reranker',
icon: 'arrow-up-down',
vars: {
'CODEXLENS_RERANKER_ENABLED': { label: 'Enabled', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'reranker.enabled' },
'CODEXLENS_RERANKER_BACKEND': { label: 'Backend', type: 'select', options: ['local', 'api'], default: 'local', settingsPath: 'reranker.backend' },
'CODEXLENS_RERANKER_ENABLED': { labelKey: 'codexlens.envField.enabled', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'reranker.enabled' },
'CODEXLENS_RERANKER_BACKEND': { labelKey: 'codexlens.envField.backend', type: 'select', options: ['local', 'api'], default: 'local', settingsPath: 'reranker.backend' },
'CODEXLENS_RERANKER_MODEL': {
label: 'Model',
labelKey: 'codexlens.envField.model',
type: 'model-select',
placeholder: 'Select or enter model...',
default: 'Xenova/ms-marco-MiniLM-L-6-v2',
@@ -883,27 +883,27 @@ var ENV_VAR_GROUPS = {
{ group: 'Jina', items: ['jina-reranker-v2-base-multilingual', 'jina-reranker-v1-base-en'] }
]
},
'CODEXLENS_RERANKER_TOP_K': { label: 'Top K Results', type: 'number', placeholder: '50', default: '50', settingsPath: 'reranker.top_k', min: 5, max: 200 },
'CODEXLENS_RERANKER_POOL_ENABLED': { label: 'High Availability', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'reranker.pool_enabled', showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api'; } },
'CODEXLENS_RERANKER_STRATEGY': { label: 'Load Balance Strategy', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'reranker.strategy', showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api' && env['CODEXLENS_RERANKER_POOL_ENABLED'] === 'true'; } },
'CODEXLENS_RERANKER_COOLDOWN': { label: 'Rate Limit Cooldown (s)', type: 'number', placeholder: '60', default: '60', settingsPath: 'reranker.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api' && env['CODEXLENS_RERANKER_POOL_ENABLED'] === 'true'; } }
'CODEXLENS_RERANKER_TOP_K': { labelKey: 'codexlens.envField.topKResults', type: 'number', placeholder: '50', default: '50', settingsPath: 'reranker.top_k', min: 5, max: 200 },
'CODEXLENS_RERANKER_POOL_ENABLED': { labelKey: 'codexlens.envField.highAvailability', type: 'select', options: ['true', 'false'], default: 'false', settingsPath: 'reranker.pool_enabled', showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api'; } },
'CODEXLENS_RERANKER_STRATEGY': { labelKey: 'codexlens.envField.loadBalanceStrategy', type: 'select', options: ['round_robin', 'latency_aware', 'weighted_random'], default: 'latency_aware', settingsPath: 'reranker.strategy', showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api' && env['CODEXLENS_RERANKER_POOL_ENABLED'] === 'true'; } },
'CODEXLENS_RERANKER_COOLDOWN': { labelKey: 'codexlens.envField.rateLimitCooldown', type: 'number', placeholder: '60', default: '60', settingsPath: 'reranker.cooldown', min: 0, max: 300, showWhen: function(env) { return env['CODEXLENS_RERANKER_BACKEND'] === 'api' && env['CODEXLENS_RERANKER_POOL_ENABLED'] === 'true'; } }
}
},
concurrency: {
labelKey: 'codexlens.envGroup.concurrency',
icon: 'cpu',
vars: {
'CODEXLENS_API_MAX_WORKERS': { label: 'Max Workers', type: 'number', placeholder: '4', default: '4', settingsPath: 'api.max_workers', min: 1, max: 32 },
'CODEXLENS_API_BATCH_SIZE': { label: 'Batch Size', type: 'number', placeholder: '8', default: '8', settingsPath: 'api.batch_size', min: 1, max: 64 }
'CODEXLENS_API_MAX_WORKERS': { labelKey: 'codexlens.envField.maxWorkers', type: 'number', placeholder: '4', default: '4', settingsPath: 'api.max_workers', min: 1, max: 32 },
'CODEXLENS_API_BATCH_SIZE': { labelKey: 'codexlens.envField.batchSize', type: 'number', placeholder: '8', default: '8', settingsPath: 'api.batch_size', min: 1, max: 64 }
}
},
cascade: {
labelKey: 'codexlens.envGroup.cascade',
icon: 'git-branch',
vars: {
'CODEXLENS_CASCADE_STRATEGY': { label: 'Search Strategy', type: 'select', options: ['binary', 'hybrid', 'binary_rerank', 'dense_rerank'], default: 'dense_rerank', settingsPath: 'cascade.strategy' },
'CODEXLENS_CASCADE_COARSE_K': { label: 'Coarse K (1st stage)', type: 'number', placeholder: '100', default: '100', settingsPath: 'cascade.coarse_k', min: 10, max: 500 },
'CODEXLENS_CASCADE_FINE_K': { label: 'Fine K (final)', type: 'number', placeholder: '10', default: '10', settingsPath: 'cascade.fine_k', min: 1, max: 100 }
'CODEXLENS_CASCADE_STRATEGY': { labelKey: 'codexlens.envField.searchStrategy', type: 'select', options: ['binary', 'hybrid', 'binary_rerank', 'dense_rerank'], default: 'dense_rerank', settingsPath: 'cascade.strategy' },
'CODEXLENS_CASCADE_COARSE_K': { labelKey: 'codexlens.envField.coarseK', type: 'number', placeholder: '100', default: '100', settingsPath: 'cascade.coarse_k', min: 10, max: 500 },
'CODEXLENS_CASCADE_FINE_K': { labelKey: 'codexlens.envField.fineK', type: 'number', placeholder: '10', default: '10', settingsPath: 'cascade.fine_k', min: 1, max: 100 }
}
}
};
@@ -1039,8 +1039,9 @@ async function loadEnvVariables() {
if (key === 'CODEXLENS_EMBEDDING_BACKEND' || key === 'CODEXLENS_RERANKER_BACKEND') {
onchangeHandler = ' onchange="updateModelOptionsOnBackendChange(\'' + key + '\', this.value)"';
}
var fieldLabel = config.labelKey ? t(config.labelKey) : config.label;
html += '<div class="flex items-center gap-2"' + hiddenStyle + '>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0">' + escapeHtml(config.label) + '</label>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0">' + escapeHtml(fieldLabel) + '</label>' +
'<select class="tool-config-input flex-1 text-xs py-1" data-env-key="' + escapeHtml(key) + '"' + onchangeHandler + '>';
config.options.forEach(function(opt) {
html += '<option value="' + escapeHtml(opt) + '"' + (value === opt ? ' selected' : '') + '>' + escapeHtml(opt) + '</option>';
@@ -1062,8 +1063,9 @@ async function loadEnvVariables() {
// Fallback preset list for API models
var apiModelList = config.apiModels || [];
var modelFieldLabel = config.labelKey ? t(config.labelKey) : config.label;
html += '<div class="flex items-center gap-2"' + hiddenStyle + '>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0" title="' + escapeHtml(key) + '">' + escapeHtml(config.label) + '</label>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0" title="' + escapeHtml(key) + '">' + escapeHtml(modelFieldLabel) + '</label>' +
'<div class="relative flex-1">' +
'<input type="text" class="tool-config-input w-full text-xs py-1 pr-6" ' +
'data-env-key="' + escapeHtml(key) + '" value="' + escapeHtml(value) + '" ' +
@@ -1114,8 +1116,9 @@ async function loadEnvVariables() {
if (config.max !== undefined) extraAttrs += ' max="' + config.max + '"';
extraAttrs += ' step="1"';
}
var inputFieldLabel = config.labelKey ? t(config.labelKey) : config.label;
html += '<div class="flex items-center gap-2"' + hiddenStyle + '>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0" title="' + escapeHtml(key) + '">' + escapeHtml(config.label) + '</label>' +
'<label class="text-xs text-muted-foreground w-28 flex-shrink-0" title="' + escapeHtml(key) + '">' + escapeHtml(inputFieldLabel) + '</label>' +
'<input type="' + inputType + '" class="tool-config-input flex-1 text-xs py-1" ' +
'data-env-key="' + escapeHtml(key) + '" value="' + escapeHtml(value) + '" placeholder="' + escapeHtml(config.placeholder || '') + '"' + extraAttrs + ' />' +
'</div>';
@@ -3905,6 +3908,8 @@ async function renderCodexLensManager() {
loadIndexStatsForPage();
// Check index health based on git history
checkIndexHealth();
// Load workspace index status (FTS and Vector coverage)
refreshWorkspaceIndexStatus();
}
} catch (err) {
container.innerHTML = '<div class="text-center py-12 text-destructive"><i data-lucide="alert-circle" class="w-8 h-8 mx-auto mb-2"></i><p>' + t('common.error') + ': ' + escapeHtml(err.message) + '</p></div>';
@@ -5938,24 +5943,37 @@ function startWatcherStatusPolling() {
watcherPollingInterval = setInterval(async function() {
try {
// Check if modal elements still exist (modal may be closed)
var eventsCountEl = document.getElementById('watcherEventsCount');
var uptimeEl = document.getElementById('watcherUptime');
var toggleEl = document.getElementById('watcherToggle');
var statsEl = document.getElementById('watcherStats');
var configEl = document.getElementById('watcherStartConfig');
// If modal elements don't exist, stop polling
if (!eventsCountEl && !toggleEl) {
stopWatcherStatusPolling();
return;
}
var response = await fetch('/api/codexlens/watch/status');
var status = await response.json();
if (status.running) {
document.getElementById('watcherEventsCount').textContent = status.events_processed || 0;
if (eventsCountEl) eventsCountEl.textContent = status.events_processed || 0;
// Format uptime
var seconds = status.uptime_seconds || 0;
var formatted = seconds < 60 ? seconds + 's' :
seconds < 3600 ? Math.floor(seconds / 60) + 'm ' + (seconds % 60) + 's' :
Math.floor(seconds / 3600) + 'h ' + Math.floor((seconds % 3600) / 60) + 'm';
document.getElementById('watcherUptime').textContent = formatted;
if (uptimeEl) uptimeEl.textContent = formatted;
} else {
// Watcher stopped externally
stopWatcherStatusPolling();
document.getElementById('watcherToggle').checked = false;
document.getElementById('watcherStats').style.display = 'none';
document.getElementById('watcherStartConfig').style.display = 'block';
if (toggleEl) toggleEl.checked = false;
if (statsEl) statsEl.style.display = 'none';
if (configEl) configEl.style.display = 'block';
}
} catch (err) {
console.error('Failed to poll watcher status:', err);

View File

@@ -499,6 +499,13 @@ class Config:
except ValueError:
log.warning("Invalid RERANKER_COOLDOWN in .env: %r", env_vars["RERANKER_COOLDOWN"])
if "RERANKER_MAX_INPUT_TOKENS" in env_vars:
try:
self.reranker_max_input_tokens = int(env_vars["RERANKER_MAX_INPUT_TOKENS"])
log.debug("Overriding reranker_max_input_tokens from .env: %s", self.reranker_max_input_tokens)
except ValueError:
log.warning("Invalid RERANKER_MAX_INPUT_TOKENS in .env: %r", env_vars["RERANKER_MAX_INPUT_TOKENS"])
@classmethod
def load(cls) -> "Config":
"""Load config with settings from file."""

View File

@@ -294,7 +294,12 @@ class APIReranker(BaseReranker):
return payload
def _estimate_tokens(self, text: str) -> int:
"""Estimate token count using fast heuristic (len/4)."""
"""Estimate token count using fast heuristic.
Uses len(text) // 4 as approximation (~4 chars per token for English).
Not perfectly accurate for all models/languages but sufficient for
batch sizing decisions where exact counts aren't critical.
"""
return len(text) // 4
def _create_token_aware_batches(
@@ -317,7 +322,15 @@ class APIReranker(BaseReranker):
for idx, doc in enumerate(documents):
doc_tokens = self._estimate_tokens(doc)
# If single doc + query exceeds limit, include it anyway (will be truncated by API)
# Warn if single document exceeds token limit (will be truncated by API)
if doc_tokens > max_tokens - query_tokens:
logger.warning(
f"Document {idx} exceeds token limit: ~{doc_tokens} tokens "
f"(limit: {max_tokens - query_tokens} after query overhead). "
"Document will likely be truncated by the API."
)
# If batch would exceed limit, start new batch
if current_tokens + doc_tokens > max_tokens and current_batch:
batches.append(current_batch)
current_batch = []