fix: resolve GitHub issues #63, #66, #67, #68, #69, #70

- #70: Fix API Key Tester URL handling - normalize trailing slashes before
  version suffix detection to prevent double-slash URLs like //models
- #69: Fix memory embedder ignoring CodexLens config - add error handling
  for CodexLensConfig.load() with fallback to defaults
- #68: Fix ccw cli using wrong Python environment - add getCodexLensVenvPython()
  to resolve correct venv path on Windows/Unix
- #67: Fix LiteLLM API Provider test endpoint - actually test API key connection
  instead of just checking ccw-litellm installation
- #66: Fix help-routes.ts path configuration - use correct 'ccw-help' directory
  name and refactor getIndexDir to pure function
- #63: Fix CodexLens install state refresh - add cache invalidation after
  config save in codexlens-manager.js

Also includes targeted unit tests for the URL normalization logic.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
catlog22
2026-01-13 18:20:54 +08:00
parent 61cef8019a
commit 340137d347
12 changed files with 546 additions and 29 deletions

View File

@@ -26,7 +26,9 @@ except ImportError:
sys.exit(1)
try:
from codexlens.semantic.embedder import get_embedder, clear_embedder_cache
from codexlens.semantic.factory import get_embedder as get_embedder_factory
from codexlens.semantic.factory import clear_embedder_cache
from codexlens.config import Config as CodexLensConfig
except ImportError:
print("Error: CodexLens not found. Install with: pip install codexlens[semantic]", file=sys.stderr)
sys.exit(1)
@@ -35,8 +37,6 @@ except ImportError:
class MemoryEmbedder:
"""Generate and search embeddings for memory chunks."""
EMBEDDING_DIM = 768 # jina-embeddings-v2-base-code dimension
def __init__(self, db_path: str):
"""Initialize embedder with database path."""
self.db_path = Path(db_path)
@@ -46,14 +46,61 @@ class MemoryEmbedder:
self.conn = sqlite3.connect(str(self.db_path))
self.conn.row_factory = sqlite3.Row
# Load CodexLens configuration for embedding settings
try:
self._config = CodexLensConfig.load()
except Exception as e:
print(f"Warning: Could not load CodexLens config, using defaults. Error: {e}", file=sys.stderr)
self._config = CodexLensConfig() # Use default config
# Lazy-load embedder to avoid ~0.8s model loading for status command
self._embedder = None
self._embedding_dim = None
@property
def embedding_dim(self) -> int:
"""Get embedding dimension from the embedder."""
if self._embedding_dim is None:
# Access embedder to get its dimension
self._embedding_dim = self.embedder.embedding_dim
return self._embedding_dim
@property
def embedder(self):
"""Lazy-load the embedder on first access."""
"""Lazy-load the embedder on first access using CodexLens config."""
if self._embedder is None:
self._embedder = get_embedder(profile="code")
# Use CodexLens configuration settings
backend = self._config.embedding_backend
model = self._config.embedding_model
use_gpu = self._config.embedding_use_gpu
# Use factory to create embedder based on backend type
if backend == "fastembed":
self._embedder = get_embedder_factory(
backend="fastembed",
profile=model,
use_gpu=use_gpu
)
elif backend == "litellm":
# For litellm backend, also pass endpoints if configured
endpoints = self._config.embedding_endpoints
strategy = self._config.embedding_strategy
cooldown = self._config.embedding_cooldown
self._embedder = get_embedder_factory(
backend="litellm",
model=model,
endpoints=endpoints if endpoints else None,
strategy=strategy,
cooldown=cooldown,
)
else:
# Fallback to fastembed with code profile
self._embedder = get_embedder_factory(
backend="fastembed",
profile="code",
use_gpu=True
)
return self._embedder
def close(self):