feat: 添加钩子命令,简化 Claude Code 钩子操作接口,支持会话上下文加载和通知功能

This commit is contained in:
catlog22
2025-12-21 23:28:19 +08:00
parent 6d3f10d1d7
commit 210f0f1012
7 changed files with 432 additions and 11 deletions

View File

@@ -151,10 +151,21 @@ def init(
if not json_mode:
console.print("\n[bold]Generating embeddings...[/bold]")
console.print(f"Model: [cyan]{embedding_model}[/cyan]")
else:
# Output progress message for JSON mode (parsed by Node.js)
print("Generating embeddings...", flush=True)
# Progress callback for non-json mode
# Progress callback - outputs progress for both json and non-json modes
# Node.js parseProgressLine() expects formats like:
# - "Batch X: N files, M chunks"
# - "Processing N files"
# - "Finalizing index"
def progress_update(msg: str):
if not json_mode and verbose:
if json_mode:
# Output without prefix so Node.js can parse it
# Strip leading spaces that embedding_manager adds
print(msg.strip(), flush=True)
elif verbose:
console.print(f" {msg}")
embed_result = generate_embeddings_recursive(
@@ -162,12 +173,16 @@ def init(
model_profile=embedding_model,
force=False, # Don't force regenerate during init
chunk_size=2000,
progress_callback=progress_update if not json_mode else None,
progress_callback=progress_update, # Always use callback
)
if embed_result["success"]:
embed_data = embed_result["result"]
# Output completion message for Node.js to parse
if json_mode:
print(f"Embeddings complete: {embed_data['total_chunks_created']} chunks", flush=True)
# Get comprehensive coverage statistics
status_result = get_embeddings_status(index_root)
if status_result["success"]:

View File

@@ -235,7 +235,8 @@ def generate_embeddings(
return {"success": False, "error": "No files found in index"}
if progress_callback:
progress_callback(f"Processing {total_files} files in batches of {FILE_BATCH_SIZE}...")
# Format must match Node.js parseProgressLine: "Processing N files" with 'embed' keyword
progress_callback(f"Processing {total_files} files for embeddings in batches of {FILE_BATCH_SIZE}...")
cursor = conn.execute(f"SELECT {path_column}, content, language FROM files")
batch_number = 0
@@ -325,10 +326,24 @@ def generate_embeddings(
progress_callback(f"Finalizing index... Building ANN index for {total_chunks_created} chunks")
except Exception as e:
# Cleanup on error to prevent process hanging
try:
clear_embedder_cache()
gc.collect()
except Exception:
pass
return {"success": False, "error": f"Failed to read or process files: {str(e)}"}
elapsed_time = time.time() - start_time
# Final cleanup: release ONNX resources to allow process exit
# This is critical - without it, ONNX Runtime threads prevent Python from exiting
try:
clear_embedder_cache()
gc.collect()
except Exception:
pass
return {
"success": True,
"result": {
@@ -418,7 +433,8 @@ def generate_embeddings_recursive(
rel_path = index_path.relative_to(index_root)
except ValueError:
rel_path = index_path
progress_callback(f"[{idx}/{len(index_files)}] Processing {rel_path}")
# Format: "Processing file X/Y: path" to match Node.js parseProgressLine
progress_callback(f"Processing file {idx}/{len(index_files)}: {rel_path}")
result = generate_embeddings(
index_path,
@@ -443,6 +459,15 @@ def generate_embeddings_recursive(
successful = sum(1 for r in all_results if r["success"])
# Final cleanup after processing all indexes
# Each generate_embeddings() call does its own cleanup, but do a final one to be safe
try:
if SEMANTIC_AVAILABLE:
clear_embedder_cache()
gc.collect()
except Exception:
pass
return {
"success": successful > 0,
"result": {