fix(codexlens): correct fastembed 0.7.4 cache path and download trigger

- Update cache path to ~/.cache/huggingface (HuggingFace Hub default)
- Fix model path format: models--{org}--{model}
- Add .embed() call to trigger actual download in download_model()
- Ensure cross-platform compatibility (Linux/Windows)
This commit is contained in:
rhyme
2025-12-23 14:51:08 +08:00
parent 86cefa7bda
commit 1998f3ae8a

View File

@@ -79,36 +79,37 @@ def get_cache_dir() -> Path:
"""Get fastembed cache directory. """Get fastembed cache directory.
Returns: Returns:
Path to cache directory (usually ~/.cache/fastembed or %LOCALAPPDATA%\\Temp\\fastembed_cache) Path to cache directory (~/.cache/huggingface or custom path)
""" """
# Check HF_HOME environment variable first # Check HF_HOME environment variable first
if "HF_HOME" in os.environ: if "HF_HOME" in os.environ:
return Path(os.environ["HF_HOME"]) return Path(os.environ["HF_HOME"])
# Default cache locations # fastembed 0.7.4+ uses HuggingFace cache when cache_dir is specified
if os.name == "nt": # Windows # Models are stored directly under the cache directory
cache_dir = Path(os.environ.get("LOCALAPPDATA", Path.home() / "AppData" / "Local")) / "Temp" / "fastembed_cache" return Path.home() / ".cache" / "huggingface"
else: # Unix-like
cache_dir = Path.home() / ".cache" / "fastembed"
return cache_dir
def _get_model_cache_path(cache_dir: Path, info: Dict) -> Path: def _get_model_cache_path(cache_dir: Path, info: Dict) -> Path:
"""Get the actual cache path for a model. """Get the actual cache path for a model.
fastembed uses ONNX versions of models with different names than the original. fastembed 0.7.4+ uses HuggingFace Hub's naming convention:
This function returns the correct path based on the cache_name field. - Prefix: 'models--'
- Replace '/' with '--' in model name
Example: jinaai/jina-embeddings-v2-base-code
-> models--jinaai--jina-embeddings-v2-base-code
Args: Args:
cache_dir: The fastembed cache directory cache_dir: The fastembed cache directory (HuggingFace hub path)
info: Model profile info dictionary info: Model profile info dictionary
Returns: Returns:
Path to the model cache directory Path to the model cache directory
""" """
cache_name = info.get("cache_name", info["model_name"]) # HuggingFace Hub naming: models--{org}--{model}
return cache_dir / f"models--{cache_name.replace('/', '--')}" model_name = info["model_name"]
sanitized_name = f"models--{model_name.replace('/', '--')}"
return cache_dir / sanitized_name
def list_models() -> Dict[str, any]: def list_models() -> Dict[str, any]:
@@ -194,18 +195,29 @@ def download_model(profile: str, progress_callback: Optional[callable] = None) -
model_name = info["model_name"] model_name = info["model_name"]
try: try:
# Download model by instantiating TextEmbedding # Get cache directory
# This will automatically download to cache if not present cache_dir = get_cache_dir()
# Download model by instantiating TextEmbedding with explicit cache_dir
# This ensures fastembed uses the correct HuggingFace Hub cache location
if progress_callback: if progress_callback:
progress_callback(f"Downloading {model_name}...") progress_callback(f"Downloading {model_name}...")
embedder = TextEmbedding(model_name=model_name) # CRITICAL: Must specify cache_dir to use HuggingFace cache
# and call embed() to trigger actual download
embedder = TextEmbedding(model_name=model_name, cache_dir=str(cache_dir))
# Trigger actual download by calling embed
# TextEmbedding.__init__ alone doesn't download files
if progress_callback:
progress_callback(f"Initializing {model_name}...")
list(embedder.embed(["test"])) # Trigger download
if progress_callback: if progress_callback:
progress_callback(f"Model {model_name} downloaded successfully") progress_callback(f"Model {model_name} downloaded successfully")
# Get cache info using correct cache_name # Get cache info using correct HuggingFace Hub path
cache_dir = get_cache_dir()
model_cache_path = _get_model_cache_path(cache_dir, info) model_cache_path = _get_model_cache_path(cache_dir, info)
cache_size = 0 cache_size = 0