feat: Enhance embedding management and model configuration

- Updated embedding_manager.py to include backend parameter in model configuration.
- Modified model_manager.py to utilize cache_name for ONNX models.
- Refactored hybrid_search.py to improve embedder initialization based on backend type.
- Added backend column to vector_store.py for better model configuration management.
- Implemented migration for existing database to include backend information.
- Enhanced API settings implementation with comprehensive provider and endpoint management.
- Introduced LiteLLM integration guide detailing configuration and usage.
- Added examples for LiteLLM usage in TypeScript.
This commit is contained in:
catlog22
2025-12-24 14:03:59 +08:00
parent 9b926d1a1e
commit b00113d212
22 changed files with 5507 additions and 706 deletions

View File

@@ -309,7 +309,7 @@ def generate_embeddings(
# Set/update model configuration for this index
vector_store.set_model_config(
model_profile, embedder.model_name, embedder.embedding_dim
model_profile, embedder.model_name, embedder.embedding_dim, backend=embedding_backend
)
# Use bulk insert mode for efficient batch ANN index building
# This defers ANN updates until end_bulk_insert() is called

View File

@@ -107,8 +107,9 @@ def _get_model_cache_path(cache_dir: Path, info: Dict) -> Path:
Path to the model cache directory
"""
# HuggingFace Hub naming: models--{org}--{model}
model_name = info["model_name"]
sanitized_name = f"models--{model_name.replace('/', '--')}"
# Use cache_name if available (for mapped ONNX models), else model_name
target_name = info.get("cache_name", info["model_name"])
sanitized_name = f"models--{target_name.replace('/', '--')}"
return cache_dir / sanitized_name

View File

@@ -260,7 +260,7 @@ class HybridSearchEngine:
return []
# Initialize embedder and vector store
from codexlens.semantic.embedder import get_embedder
from codexlens.semantic.factory import get_embedder
from codexlens.semantic.vector_store import VectorStore
vector_store = VectorStore(index_path)
@@ -277,32 +277,51 @@ class HybridSearchEngine:
# Get stored model configuration (preferred) or auto-detect from dimension
model_config = vector_store.get_model_config()
if model_config:
profile = model_config["model_profile"]
backend = model_config.get("backend", "fastembed")
model_name = model_config["model_name"]
model_profile = model_config["model_profile"]
self.logger.debug(
"Using stored model config: %s (%s, %dd)",
profile, model_config["model_name"], model_config["embedding_dim"]
"Using stored model config: %s backend, %s (%s, %dd)",
backend, model_profile, model_name, model_config["embedding_dim"]
)
# Get embedder based on backend
if backend == "litellm":
embedder = get_embedder(backend="litellm", model=model_name)
else:
embedder = get_embedder(backend="fastembed", profile=model_profile)
else:
# Fallback: auto-detect from embedding dimension
detected_dim = vector_store.dimension
if detected_dim is None:
self.logger.info("Vector store dimension unknown, using default profile")
profile = "code" # Default fallback
embedder = get_embedder(backend="fastembed", profile="code")
elif detected_dim == 384:
profile = "fast"
embedder = get_embedder(backend="fastembed", profile="fast")
elif detected_dim == 768:
profile = "code"
embedder = get_embedder(backend="fastembed", profile="code")
elif detected_dim == 1024:
profile = "multilingual" # or balanced, both are 1024
embedder = get_embedder(backend="fastembed", profile="multilingual")
elif detected_dim == 1536:
# Likely OpenAI text-embedding-3-small or ada-002
self.logger.info(
"Detected 1536-dim embeddings (likely OpenAI), using litellm backend with text-embedding-3-small"
)
embedder = get_embedder(backend="litellm", model="text-embedding-3-small")
elif detected_dim == 3072:
# Likely OpenAI text-embedding-3-large
self.logger.info(
"Detected 3072-dim embeddings (likely OpenAI), using litellm backend with text-embedding-3-large"
)
embedder = get_embedder(backend="litellm", model="text-embedding-3-large")
else:
profile = "code" # Default fallback
self.logger.debug(
"No stored model config, auto-detected profile '%s' from dimension %s",
profile, detected_dim
)
self.logger.debug(
"Unknown dimension %s, using default fastembed profile 'code'",
detected_dim
)
embedder = get_embedder(backend="fastembed", profile="code")
# Use cached embedder (singleton) for performance
embedder = get_embedder(profile=profile)
# Generate query embedding
query_embedding = embedder.embed_single(query)

View File

@@ -123,12 +123,34 @@ class VectorStore:
model_profile TEXT NOT NULL,
model_name TEXT NOT NULL,
embedding_dim INTEGER NOT NULL,
backend TEXT NOT NULL DEFAULT 'fastembed',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
""")
# Migration: Add backend column to existing tables
self._migrate_backend_column(conn)
conn.commit()
def _migrate_backend_column(self, conn: sqlite3.Connection) -> None:
"""Add backend column to existing embeddings_config table if not present.
Args:
conn: Active SQLite connection
"""
# Check if backend column exists
cursor = conn.execute("PRAGMA table_info(embeddings_config)")
columns = [row[1] for row in cursor.fetchall()]
if 'backend' not in columns:
logger.info("Migrating embeddings_config table: adding backend column")
conn.execute("""
ALTER TABLE embeddings_config
ADD COLUMN backend TEXT NOT NULL DEFAULT 'fastembed'
""")
def _init_ann_index(self) -> None:
"""Initialize ANN index (lazy loading from existing data)."""
if not HNSWLIB_AVAILABLE:
@@ -947,11 +969,11 @@ class VectorStore:
"""Get the model configuration used for embeddings in this store.
Returns:
Dictionary with model_profile, model_name, embedding_dim, or None if not set.
Dictionary with model_profile, model_name, embedding_dim, backend, or None if not set.
"""
with sqlite3.connect(self.db_path) as conn:
row = conn.execute(
"SELECT model_profile, model_name, embedding_dim, created_at, updated_at "
"SELECT model_profile, model_name, embedding_dim, backend, created_at, updated_at "
"FROM embeddings_config WHERE id = 1"
).fetchone()
if row:
@@ -959,13 +981,14 @@ class VectorStore:
"model_profile": row[0],
"model_name": row[1],
"embedding_dim": row[2],
"created_at": row[3],
"updated_at": row[4],
"backend": row[3],
"created_at": row[4],
"updated_at": row[5],
}
return None
def set_model_config(
self, model_profile: str, model_name: str, embedding_dim: int
self, model_profile: str, model_name: str, embedding_dim: int, backend: str = 'fastembed'
) -> None:
"""Set the model configuration for embeddings in this store.
@@ -976,19 +999,21 @@ class VectorStore:
model_profile: Model profile name (fast, code, minilm, etc.)
model_name: Full model name (e.g., jinaai/jina-embeddings-v2-base-code)
embedding_dim: Embedding dimension (e.g., 768)
backend: Backend used for embeddings (fastembed or litellm, default: fastembed)
"""
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
INSERT INTO embeddings_config (id, model_profile, model_name, embedding_dim)
VALUES (1, ?, ?, ?)
INSERT INTO embeddings_config (id, model_profile, model_name, embedding_dim, backend)
VALUES (1, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
model_profile = excluded.model_profile,
model_name = excluded.model_name,
embedding_dim = excluded.embedding_dim,
backend = excluded.backend,
updated_at = CURRENT_TIMESTAMP
""",
(model_profile, model_name, embedding_dim)
(model_profile, model_name, embedding_dim, backend)
)
conn.commit()