chore: move ccw-skill-hub to standalone repository

Migrated ccw-skill-hub to D:/ccw-skill-hub as independent git project.
Removed nested git repos (ccw/frontend/ccw-skill-hub, skill-hub-repo, skill-hub-temp).
This commit is contained in:
catlog22
2026-02-24 11:57:26 +08:00
parent 6f0bbe84ea
commit 61e313a0c1
35 changed files with 3189 additions and 362 deletions

View File

@@ -13,95 +13,95 @@ authors = [
{ name = "CodexLens contributors" }
]
dependencies = [
"typer>=0.9",
"rich>=13",
"pydantic>=2.0",
"tree-sitter>=0.20",
"tree-sitter-python>=0.25",
"tree-sitter-javascript>=0.25",
"tree-sitter-typescript>=0.23",
"pathspec>=0.11",
"watchdog>=3.0",
"typer~=0.9.0",
"rich~=13.0.0",
"pydantic~=2.0.0",
"tree-sitter~=0.20.0",
"tree-sitter-python~=0.25.0",
"tree-sitter-javascript~=0.25.0",
"tree-sitter-typescript~=0.23.0",
"pathspec~=0.11.0",
"watchdog~=3.0.0",
# ast-grep for pattern-based AST matching (PyO3 bindings)
# ast-grep-py 0.40+ supports Python 3.13
"ast-grep-py>=0.40.0",
"ast-grep-py~=0.40.0",
]
[project.optional-dependencies]
# Semantic search using fastembed (ONNX-based, lightweight ~200MB)
semantic = [
"numpy>=1.24",
"fastembed>=0.2",
"hnswlib>=0.8.0",
"numpy~=1.24.0",
"fastembed~=0.2.0",
"hnswlib~=0.8.0",
]
# GPU acceleration for semantic search (NVIDIA CUDA)
# Install with: pip install codexlens[semantic-gpu]
semantic-gpu = [
"numpy>=1.24",
"fastembed>=0.2",
"hnswlib>=0.8.0",
"onnxruntime-gpu>=1.15.0", # CUDA support
"numpy~=1.24.0",
"fastembed~=0.2.0",
"hnswlib~=0.8.0",
"onnxruntime-gpu~=1.15.0", # CUDA support
]
# GPU acceleration for Windows (DirectML - supports NVIDIA/AMD/Intel)
# Install with: pip install codexlens[semantic-directml]
semantic-directml = [
"numpy>=1.24",
"fastembed>=0.2",
"hnswlib>=0.8.0",
"onnxruntime-directml>=1.15.0", # DirectML support
"numpy~=1.24.0",
"fastembed~=0.2.0",
"hnswlib~=0.8.0",
"onnxruntime-directml~=1.15.0", # DirectML support
]
# Cross-encoder reranking (second-stage, optional)
# Install with: pip install codexlens[reranker] (default: ONNX backend)
reranker-onnx = [
"optimum>=1.16",
"onnxruntime>=1.15",
"transformers>=4.36",
"optimum~=1.16.0",
"onnxruntime~=1.15.0",
"transformers~=4.36.0",
]
# Remote reranking via HTTP API
reranker-api = [
"httpx>=0.25",
"httpx~=0.25.0",
]
# LLM-based reranking via ccw-litellm
reranker-litellm = [
"ccw-litellm>=0.1",
"ccw-litellm~=0.1.0",
]
# Legacy sentence-transformers CrossEncoder reranker
reranker-legacy = [
"sentence-transformers>=2.2",
"sentence-transformers~=2.2.0",
]
# Backward-compatible alias for default reranker backend
reranker = [
"optimum>=1.16",
"onnxruntime>=1.15",
"transformers>=4.36",
"optimum~=1.16.0",
"onnxruntime~=1.15.0",
"transformers~=4.36.0",
]
# Encoding detection for non-UTF8 files
encoding = [
"chardet>=5.0",
"chardet~=5.0.0",
]
# Clustering for staged hybrid search (HDBSCAN + sklearn)
clustering = [
"hdbscan>=0.8.1",
"scikit-learn>=1.3.0",
"hdbscan~=0.8.1",
"scikit-learn~=1.3.0",
]
# Full features including tiktoken for accurate token counting
full = [
"tiktoken>=0.5.0",
"tiktoken~=0.5.0",
]
# Language Server Protocol support
lsp = [
"pygls>=1.3.0",
"pygls~=1.3.0",
]
[project.scripts]