mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
- Added `splade_encoder.py` for ONNX-optimized SPLADE encoding, including methods for encoding text and batch processing. - Created `SPLADE_IMPLEMENTATION.md` to document the SPLADE encoder's functionality, design patterns, and integration points. - Introduced migration script `migration_009_add_splade.py` to add SPLADE metadata and posting list tables to the database. - Developed `splade_index.py` for managing the SPLADE inverted index, supporting efficient sparse vector retrieval. - Added verification script `verify_watcher.py` to test FileWatcher event filtering and debouncing functionality.
110 lines
2.3 KiB
TOML
110 lines
2.3 KiB
TOML
[build-system]
|
|
requires = ["setuptools>=61.0"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[project]
|
|
name = "codex-lens"
|
|
version = "0.1.0"
|
|
description = "CodexLens multi-modal code analysis platform"
|
|
readme = "README.md"
|
|
requires-python = ">=3.10"
|
|
license = { text = "MIT" }
|
|
authors = [
|
|
{ name = "CodexLens contributors" }
|
|
]
|
|
dependencies = [
|
|
"typer>=0.9",
|
|
"rich>=13",
|
|
"pydantic>=2.0",
|
|
"tree-sitter>=0.20",
|
|
"tree-sitter-python>=0.25",
|
|
"tree-sitter-javascript>=0.25",
|
|
"tree-sitter-typescript>=0.23",
|
|
"pathspec>=0.11",
|
|
"watchdog>=3.0",
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
# Semantic search using fastembed (ONNX-based, lightweight ~200MB)
|
|
semantic = [
|
|
"numpy>=1.24",
|
|
"fastembed>=0.2",
|
|
"hnswlib>=0.8.0",
|
|
]
|
|
|
|
# GPU acceleration for semantic search (NVIDIA CUDA)
|
|
# Install with: pip install codexlens[semantic-gpu]
|
|
semantic-gpu = [
|
|
"numpy>=1.24",
|
|
"fastembed>=0.2",
|
|
"hnswlib>=0.8.0",
|
|
"onnxruntime-gpu>=1.15.0", # CUDA support
|
|
]
|
|
|
|
# GPU acceleration for Windows (DirectML - supports NVIDIA/AMD/Intel)
|
|
# Install with: pip install codexlens[semantic-directml]
|
|
semantic-directml = [
|
|
"numpy>=1.24",
|
|
"fastembed>=0.2",
|
|
"hnswlib>=0.8.0",
|
|
"onnxruntime-directml>=1.15.0", # DirectML support
|
|
]
|
|
|
|
# Cross-encoder reranking (second-stage, optional)
|
|
# Install with: pip install codexlens[reranker] (default: ONNX backend)
|
|
reranker-onnx = [
|
|
"optimum>=1.16",
|
|
"onnxruntime>=1.15",
|
|
"transformers>=4.36",
|
|
]
|
|
|
|
# Remote reranking via HTTP API
|
|
reranker-api = [
|
|
"httpx>=0.25",
|
|
]
|
|
|
|
# LLM-based reranking via ccw-litellm
|
|
reranker-litellm = [
|
|
"ccw-litellm>=0.1",
|
|
]
|
|
|
|
# Legacy sentence-transformers CrossEncoder reranker
|
|
reranker-legacy = [
|
|
"sentence-transformers>=2.2",
|
|
]
|
|
|
|
# Backward-compatible alias for default reranker backend
|
|
reranker = [
|
|
"optimum>=1.16",
|
|
"onnxruntime>=1.15",
|
|
"transformers>=4.36",
|
|
]
|
|
|
|
# SPLADE sparse retrieval
|
|
splade = [
|
|
"transformers>=4.36",
|
|
"optimum[onnxruntime]>=1.16",
|
|
]
|
|
|
|
# SPLADE with GPU acceleration (CUDA)
|
|
splade-gpu = [
|
|
"transformers>=4.36",
|
|
"optimum[onnxruntime-gpu]>=1.16",
|
|
]
|
|
|
|
# Encoding detection for non-UTF8 files
|
|
encoding = [
|
|
"chardet>=5.0",
|
|
]
|
|
|
|
# Full features including tiktoken for accurate token counting
|
|
full = [
|
|
"tiktoken>=0.5.0",
|
|
]
|
|
|
|
[project.urls]
|
|
Homepage = "https://github.com/openai/codex-lens"
|
|
|
|
[tool.setuptools]
|
|
package-dir = { "" = "src" }
|