mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-03 15:43:11 +08:00
feat: add documentation for Checkbox, Input, and Select components; enhance Queue and Terminal features
- Introduced Checkbox component documentation in Chinese, covering usage, properties, and examples. - Added Input component documentation in Chinese, detailing its attributes and various states. - Created Select component documentation in Chinese, including subcomponents and usage examples. - Developed Queue management documentation, outlining its core functionalities and component structure. - Added Terminal dashboard documentation, describing its layout, core features, and usage examples. - Documented team workflows, detailing various team skills and their applications in project management.
This commit is contained in:
@@ -30,7 +30,7 @@ dependencies = [
|
||||
[project.optional-dependencies]
|
||||
# Semantic search using fastembed (ONNX-based, lightweight ~200MB)
|
||||
semantic = [
|
||||
"numpy~=1.24.0",
|
||||
"numpy~=1.26.0",
|
||||
"fastembed~=0.2.0",
|
||||
"hnswlib~=0.8.0",
|
||||
]
|
||||
@@ -38,7 +38,7 @@ semantic = [
|
||||
# GPU acceleration for semantic search (NVIDIA CUDA)
|
||||
# Install with: pip install codexlens[semantic-gpu]
|
||||
semantic-gpu = [
|
||||
"numpy~=1.24.0",
|
||||
"numpy~=1.26.0",
|
||||
"fastembed~=0.2.0",
|
||||
"hnswlib~=0.8.0",
|
||||
"onnxruntime-gpu~=1.15.0", # CUDA support
|
||||
@@ -47,7 +47,7 @@ semantic-gpu = [
|
||||
# GPU acceleration for Windows (DirectML - supports NVIDIA/AMD/Intel)
|
||||
# Install with: pip install codexlens[semantic-directml]
|
||||
semantic-directml = [
|
||||
"numpy~=1.24.0",
|
||||
"numpy~=1.26.0",
|
||||
"fastembed~=0.2.0",
|
||||
"hnswlib~=0.8.0",
|
||||
"onnxruntime-directml~=1.15.0", # DirectML support
|
||||
|
||||
@@ -19,16 +19,16 @@ Requires-Dist: pathspec~=0.11.0
|
||||
Requires-Dist: watchdog~=3.0.0
|
||||
Requires-Dist: ast-grep-py~=0.40.0
|
||||
Provides-Extra: semantic
|
||||
Requires-Dist: numpy~=1.24.0; extra == "semantic"
|
||||
Requires-Dist: numpy~=1.26.0; extra == "semantic"
|
||||
Requires-Dist: fastembed~=0.2.0; extra == "semantic"
|
||||
Requires-Dist: hnswlib~=0.8.0; extra == "semantic"
|
||||
Provides-Extra: semantic-gpu
|
||||
Requires-Dist: numpy~=1.24.0; extra == "semantic-gpu"
|
||||
Requires-Dist: numpy~=1.26.0; extra == "semantic-gpu"
|
||||
Requires-Dist: fastembed~=0.2.0; extra == "semantic-gpu"
|
||||
Requires-Dist: hnswlib~=0.8.0; extra == "semantic-gpu"
|
||||
Requires-Dist: onnxruntime-gpu~=1.15.0; extra == "semantic-gpu"
|
||||
Provides-Extra: semantic-directml
|
||||
Requires-Dist: numpy~=1.24.0; extra == "semantic-directml"
|
||||
Requires-Dist: numpy~=1.26.0; extra == "semantic-directml"
|
||||
Requires-Dist: fastembed~=0.2.0; extra == "semantic-directml"
|
||||
Requires-Dist: hnswlib~=0.8.0; extra == "semantic-directml"
|
||||
Requires-Dist: onnxruntime-directml~=1.15.0; extra == "semantic-directml"
|
||||
|
||||
@@ -42,18 +42,18 @@ onnxruntime~=1.15.0
|
||||
transformers~=4.36.0
|
||||
|
||||
[semantic]
|
||||
numpy~=1.24.0
|
||||
numpy~=1.26.0
|
||||
fastembed~=0.2.0
|
||||
hnswlib~=0.8.0
|
||||
|
||||
[semantic-directml]
|
||||
numpy~=1.24.0
|
||||
numpy~=1.26.0
|
||||
fastembed~=0.2.0
|
||||
hnswlib~=0.8.0
|
||||
onnxruntime-directml~=1.15.0
|
||||
|
||||
[semantic-gpu]
|
||||
numpy~=1.24.0
|
||||
numpy~=1.26.0
|
||||
fastembed~=0.2.0
|
||||
hnswlib~=0.8.0
|
||||
onnxruntime-gpu~=1.15.0
|
||||
|
||||
Reference in New Issue
Block a user