Add comprehensive code review specifications and templates

- Introduced best practices requirements specification covering code quality, performance, maintainability, error handling, and documentation standards.
- Established quality standards with overall quality metrics and mandatory checks for security, code quality, performance, and maintainability.
- Created security requirements specification aligned with OWASP Top 10 and CWE Top 25, detailing checks and patterns for common vulnerabilities.
- Developed templates for documenting best practice findings, security findings, and generating reports, including structured markdown and JSON formats.
- Updated dependencies in the project, ensuring compatibility and stability.
- Added test files and README documentation for vector indexing tests.
This commit is contained in:
catlog22
2026-01-06 23:11:15 +08:00
parent 02d66325a0
commit ef770ff29b
32 changed files with 4530 additions and 164 deletions

View File

@@ -430,6 +430,7 @@ def search(
query: str = typer.Argument(..., help="Search query."),
path: Path = typer.Option(Path("."), "--path", "-p", help="Directory to search from."),
limit: int = typer.Option(20, "--limit", "-n", min=1, max=500, help="Max results."),
offset: int = typer.Option(0, "--offset", min=0, help="Pagination offset - skip first N results."),
depth: int = typer.Option(-1, "--depth", "-d", help="Search depth (-1 = unlimited, 0 = current only)."),
files_only: bool = typer.Option(False, "--files-only", "-f", help="Return only file paths without content snippets."),
method: str = typer.Option("dense_rerank", "--method", "-m", help="Search method: 'dense_rerank' (semantic, default), 'fts' (exact keyword)."),

View File

@@ -161,9 +161,15 @@ class Config:
# Multi-endpoint configuration for litellm backend
embedding_endpoints: List[Dict[str, Any]] = field(default_factory=list)
# List of endpoint configs: [{"model": "...", "api_key": "...", "api_base": "...", "weight": 1.0}]
embedding_pool_enabled: bool = False # Enable high availability pool for embeddings
embedding_strategy: str = "latency_aware" # round_robin, latency_aware, weighted_random
embedding_cooldown: float = 60.0 # Default cooldown seconds for rate-limited endpoints
# Reranker multi-endpoint configuration
reranker_pool_enabled: bool = False # Enable high availability pool for reranker
reranker_strategy: str = "latency_aware" # round_robin, latency_aware, weighted_random
reranker_cooldown: float = 60.0 # Default cooldown seconds for rate-limited endpoints
# API concurrency settings
api_max_workers: int = 4 # Max concurrent API calls for embedding/reranking
api_batch_size: int = 8 # Batch size for API requests
@@ -254,12 +260,13 @@ class Config:
"backend": self.embedding_backend,
"model": self.embedding_model,
"use_gpu": self.embedding_use_gpu,
"pool_enabled": self.embedding_pool_enabled,
"strategy": self.embedding_strategy,
"cooldown": self.embedding_cooldown,
}
# Include multi-endpoint config if present
if self.embedding_endpoints:
embedding_config["endpoints"] = self.embedding_endpoints
embedding_config["strategy"] = self.embedding_strategy
embedding_config["cooldown"] = self.embedding_cooldown
settings = {
"embedding": embedding_config,
@@ -274,6 +281,9 @@ class Config:
"backend": self.reranker_backend,
"model": self.reranker_model,
"top_k": self.reranker_top_k,
"pool_enabled": self.reranker_pool_enabled,
"strategy": self.reranker_strategy,
"cooldown": self.reranker_cooldown,
},
"cascade": {
"strategy": self.cascade_strategy,
@@ -317,6 +327,8 @@ class Config:
# Load multi-endpoint configuration
if "endpoints" in embedding:
self.embedding_endpoints = embedding["endpoints"]
if "pool_enabled" in embedding:
self.embedding_pool_enabled = embedding["pool_enabled"]
if "strategy" in embedding:
self.embedding_strategy = embedding["strategy"]
if "cooldown" in embedding:
@@ -351,6 +363,12 @@ class Config:
self.reranker_model = reranker["model"]
if "top_k" in reranker:
self.reranker_top_k = reranker["top_k"]
if "pool_enabled" in reranker:
self.reranker_pool_enabled = reranker["pool_enabled"]
if "strategy" in reranker:
self.reranker_strategy = reranker["strategy"]
if "cooldown" in reranker:
self.reranker_cooldown = reranker["cooldown"]
# Load cascade settings
cascade = settings.get("cascade", {})
@@ -394,9 +412,15 @@ class Config:
Supported variables:
EMBEDDING_MODEL: Override embedding model/profile
EMBEDDING_BACKEND: Override embedding backend (fastembed/litellm)
EMBEDDING_POOL_ENABLED: Enable embedding high availability pool
EMBEDDING_STRATEGY: Load balance strategy for embedding
EMBEDDING_COOLDOWN: Rate limit cooldown for embedding
RERANKER_MODEL: Override reranker model
RERANKER_BACKEND: Override reranker backend
RERANKER_ENABLED: Override reranker enabled state (true/false)
RERANKER_POOL_ENABLED: Enable reranker high availability pool
RERANKER_STRATEGY: Load balance strategy for reranker
RERANKER_COOLDOWN: Rate limit cooldown for reranker
"""
from .env_config import load_global_env
@@ -417,6 +441,26 @@ class Config:
else:
log.warning("Invalid EMBEDDING_BACKEND in .env: %r", backend)
if "EMBEDDING_POOL_ENABLED" in env_vars:
value = env_vars["EMBEDDING_POOL_ENABLED"].lower()
self.embedding_pool_enabled = value in {"true", "1", "yes", "on"}
log.debug("Overriding embedding_pool_enabled from .env: %s", self.embedding_pool_enabled)
if "EMBEDDING_STRATEGY" in env_vars:
strategy = env_vars["EMBEDDING_STRATEGY"].lower()
if strategy in {"round_robin", "latency_aware", "weighted_random"}:
self.embedding_strategy = strategy
log.debug("Overriding embedding_strategy from .env: %s", strategy)
else:
log.warning("Invalid EMBEDDING_STRATEGY in .env: %r", strategy)
if "EMBEDDING_COOLDOWN" in env_vars:
try:
self.embedding_cooldown = float(env_vars["EMBEDDING_COOLDOWN"])
log.debug("Overriding embedding_cooldown from .env: %s", self.embedding_cooldown)
except ValueError:
log.warning("Invalid EMBEDDING_COOLDOWN in .env: %r", env_vars["EMBEDDING_COOLDOWN"])
# Reranker overrides
if "RERANKER_MODEL" in env_vars:
self.reranker_model = env_vars["RERANKER_MODEL"]
@@ -435,6 +479,26 @@ class Config:
self.enable_cross_encoder_rerank = value in {"true", "1", "yes", "on"}
log.debug("Overriding reranker_enabled from .env: %s", self.enable_cross_encoder_rerank)
if "RERANKER_POOL_ENABLED" in env_vars:
value = env_vars["RERANKER_POOL_ENABLED"].lower()
self.reranker_pool_enabled = value in {"true", "1", "yes", "on"}
log.debug("Overriding reranker_pool_enabled from .env: %s", self.reranker_pool_enabled)
if "RERANKER_STRATEGY" in env_vars:
strategy = env_vars["RERANKER_STRATEGY"].lower()
if strategy in {"round_robin", "latency_aware", "weighted_random"}:
self.reranker_strategy = strategy
log.debug("Overriding reranker_strategy from .env: %s", strategy)
else:
log.warning("Invalid RERANKER_STRATEGY in .env: %r", strategy)
if "RERANKER_COOLDOWN" in env_vars:
try:
self.reranker_cooldown = float(env_vars["RERANKER_COOLDOWN"])
log.debug("Overriding reranker_cooldown from .env: %s", self.reranker_cooldown)
except ValueError:
log.warning("Invalid RERANKER_COOLDOWN in .env: %r", env_vars["RERANKER_COOLDOWN"])
@classmethod
def load(cls) -> "Config":
"""Load config with settings from file."""

View File

@@ -27,11 +27,17 @@ ENV_VARS = {
"RERANKER_API_KEY": "API key for reranker service (SiliconFlow/Cohere/Jina)",
"RERANKER_API_BASE": "Base URL for reranker API (overrides provider default)",
"RERANKER_PROVIDER": "Reranker provider: siliconflow, cohere, jina",
"RERANKER_POOL_ENABLED": "Enable reranker high availability pool: true/false",
"RERANKER_STRATEGY": "Reranker load balance strategy: round_robin, latency_aware, weighted_random",
"RERANKER_COOLDOWN": "Reranker rate limit cooldown in seconds",
# Embedding configuration (overrides settings.json)
"EMBEDDING_MODEL": "Embedding model/profile name (overrides settings.json)",
"EMBEDDING_BACKEND": "Embedding backend: fastembed, litellm",
"EMBEDDING_API_KEY": "API key for embedding service",
"EMBEDDING_API_BASE": "Base URL for embedding API",
"EMBEDDING_POOL_ENABLED": "Enable embedding high availability pool: true/false",
"EMBEDDING_STRATEGY": "Embedding load balance strategy: round_robin, latency_aware, weighted_random",
"EMBEDDING_COOLDOWN": "Embedding rate limit cooldown in seconds",
# LiteLLM configuration
"LITELLM_API_KEY": "API key for LiteLLM",
"LITELLM_API_BASE": "Base URL for LiteLLM",

View File

@@ -1338,8 +1338,9 @@ class ChainSearchEngine:
(d for cid, d, _ in coarse_candidates if cid == chunk_id),
1.0
)
# Convert cosine distance to score
score = 1.0 - distance
# Convert cosine distance to score (clamp to [0, 1] for Pydantic validation)
# Cosine distance can be > 1 for anti-correlated vectors, causing negative scores
score = max(0.0, 1.0 - distance)
content = chunk.get("content", "")
result = SearchResult(