mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-09 02:24:11 +08:00
Add help view and core memory styles
- Introduced styles for the help view including tab transitions, accordion animations, search highlighting, and responsive design. - Implemented core memory styles with modal base styles, memory card designs, and knowledge graph visualization. - Enhanced dark mode support across various components. - Added loading states and empty state designs for better user experience.
This commit is contained in:
@@ -376,7 +376,7 @@ class DirIndexStore:
|
||||
|
||||
conn.execute("DELETE FROM symbols WHERE file_id=?", (file_id,))
|
||||
if symbols:
|
||||
# Insert symbols without token_count and symbol_type
|
||||
# Insert symbols
|
||||
symbol_rows = []
|
||||
for s in symbols:
|
||||
symbol_rows.append(
|
||||
@@ -819,22 +819,23 @@ class DirIndexStore:
|
||||
return results
|
||||
|
||||
else:
|
||||
# Fallback to original query for backward compatibility
|
||||
# Fallback using normalized tables with contains matching (slower but more flexible)
|
||||
keyword_pattern = f"%{keyword}%"
|
||||
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT f.id, f.name, f.full_path, f.language, f.mtime, f.line_count, sm.keywords
|
||||
SELECT f.id, f.name, f.full_path, f.language, f.mtime, f.line_count,
|
||||
GROUP_CONCAT(k.keyword, ',') as keywords
|
||||
FROM files f
|
||||
JOIN semantic_metadata sm ON f.id = sm.file_id
|
||||
WHERE sm.keywords LIKE ? COLLATE NOCASE
|
||||
JOIN file_keywords fk ON f.id = fk.file_id
|
||||
JOIN keywords k ON fk.keyword_id = k.id
|
||||
WHERE k.keyword LIKE ? COLLATE NOCASE
|
||||
GROUP BY f.id, f.name, f.full_path, f.language, f.mtime, f.line_count
|
||||
ORDER BY f.name
|
||||
""",
|
||||
(keyword_pattern,),
|
||||
).fetchall()
|
||||
|
||||
import json
|
||||
|
||||
results = []
|
||||
for row in rows:
|
||||
file_entry = FileEntry(
|
||||
@@ -845,7 +846,7 @@ class DirIndexStore:
|
||||
mtime=float(row["mtime"]) if row["mtime"] else 0.0,
|
||||
line_count=int(row["line_count"]) if row["line_count"] else 0,
|
||||
)
|
||||
keywords = json.loads(row["keywords"]) if row["keywords"] else []
|
||||
keywords = row["keywords"].split(',') if row["keywords"] else []
|
||||
results.append((file_entry, keywords))
|
||||
|
||||
return results
|
||||
@@ -1432,7 +1433,7 @@ class DirIndexStore:
|
||||
"""
|
||||
)
|
||||
|
||||
# Symbols table (v5: removed token_count and symbol_type)
|
||||
# Symbols table with token metadata
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS symbols (
|
||||
|
||||
@@ -143,6 +143,9 @@ class IndexTreeBuilder:
|
||||
index_root = self.mapper.source_to_index_dir(source_root)
|
||||
project_info = self.registry.register_project(source_root, index_root)
|
||||
|
||||
# Report progress: discovering files (5%)
|
||||
print("Discovering files...", flush=True)
|
||||
|
||||
# Collect directories by depth
|
||||
dirs_by_depth = self._collect_dirs_by_depth(source_root, languages)
|
||||
|
||||
@@ -157,6 +160,13 @@ class IndexTreeBuilder:
|
||||
errors=["No indexable directories found"],
|
||||
)
|
||||
|
||||
# Calculate total directories for progress tracking
|
||||
total_dirs_to_process = sum(len(dirs) for dirs in dirs_by_depth.values())
|
||||
processed_dirs = 0
|
||||
|
||||
# Report progress: building index (10%)
|
||||
print("Building index...", flush=True)
|
||||
|
||||
total_files = 0
|
||||
total_dirs = 0
|
||||
all_errors: List[str] = []
|
||||
@@ -179,10 +189,17 @@ class IndexTreeBuilder:
|
||||
for result in results:
|
||||
if result.error:
|
||||
all_errors.append(f"{result.source_path}: {result.error}")
|
||||
processed_dirs += 1
|
||||
continue
|
||||
|
||||
total_files += result.files_count
|
||||
total_dirs += 1
|
||||
processed_dirs += 1
|
||||
|
||||
# Report progress for each processed directory (10-80%)
|
||||
# Use "Processing file" format for frontend parser compatibility
|
||||
progress_percent = 10 + int((processed_dirs / total_dirs_to_process) * 70)
|
||||
print(f"Processing file {processed_dirs}/{total_dirs_to_process}: {result.source_path.name}", flush=True)
|
||||
|
||||
# Register directory in registry
|
||||
self.registry.register_dir(
|
||||
@@ -193,6 +210,9 @@ class IndexTreeBuilder:
|
||||
files_count=result.files_count,
|
||||
)
|
||||
|
||||
# Report progress: linking subdirectories (80%)
|
||||
print("Linking subdirectories...", flush=True)
|
||||
|
||||
# After building all directories, link subdirectories to parents
|
||||
# This needs to happen after all indexes exist
|
||||
for result in all_results:
|
||||
@@ -203,6 +223,8 @@ class IndexTreeBuilder:
|
||||
|
||||
# Cleanup deleted files if in incremental mode
|
||||
if use_incremental:
|
||||
# Report progress: cleaning up (90%)
|
||||
print("Cleaning up deleted files...", flush=True)
|
||||
self.logger.info("Cleaning up deleted files...")
|
||||
total_deleted = 0
|
||||
for result in all_results:
|
||||
@@ -220,9 +242,15 @@ class IndexTreeBuilder:
|
||||
if total_deleted > 0:
|
||||
self.logger.info("Removed %d deleted files from index", total_deleted)
|
||||
|
||||
# Report progress: finalizing (95%)
|
||||
print("Finalizing...", flush=True)
|
||||
|
||||
# Update project statistics
|
||||
self.registry.update_project_stats(source_root, total_files, total_dirs)
|
||||
|
||||
# Report completion (100%)
|
||||
print(f"Indexed {total_files} files", flush=True)
|
||||
|
||||
self.logger.info(
|
||||
"Index build complete: %d files, %d directories, %d errors",
|
||||
total_files,
|
||||
|
||||
@@ -102,7 +102,7 @@ class MigrationManager:
|
||||
|
||||
This method checks the current database version and applies all
|
||||
subsequent migrations in order. Each migration is applied within
|
||||
a transaction.
|
||||
a transaction, unless the migration manages its own transactions.
|
||||
"""
|
||||
current_version = self.get_current_version()
|
||||
log.info(f"Current database schema version: {current_version}")
|
||||
@@ -111,21 +111,36 @@ class MigrationManager:
|
||||
if migration.version > current_version:
|
||||
log.info(f"Applying migration {migration.version}: {migration.name}...")
|
||||
try:
|
||||
self.db_conn.execute("BEGIN")
|
||||
# Check if a transaction is already in progress
|
||||
in_transaction = self.db_conn.in_transaction
|
||||
|
||||
# Only start transaction if not already in one
|
||||
if not in_transaction:
|
||||
self.db_conn.execute("BEGIN")
|
||||
|
||||
migration.upgrade(self.db_conn)
|
||||
self.set_version(migration.version)
|
||||
self.db_conn.execute("COMMIT")
|
||||
|
||||
# Only commit if we started the transaction and it's still active
|
||||
if not in_transaction and self.db_conn.in_transaction:
|
||||
self.db_conn.execute("COMMIT")
|
||||
|
||||
log.info(
|
||||
f"Successfully applied migration {migration.version}: {migration.name}"
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"Failed to apply migration {migration.version}: {migration.name}. Rolling back. Error: {e}",
|
||||
f"Failed to apply migration {migration.version}: {migration.name}. Error: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
self.db_conn.execute("ROLLBACK")
|
||||
# Try to rollback if transaction is active
|
||||
try:
|
||||
if self.db_conn.in_transaction:
|
||||
self.db_conn.execute("ROLLBACK")
|
||||
except Exception:
|
||||
pass # Ignore rollback errors
|
||||
raise
|
||||
|
||||
|
||||
latest_migration_version = self.migrations[-1].version if self.migrations else 0
|
||||
if current_version < latest_migration_version:
|
||||
# This case can be hit if migrations were applied but the loop was exited
|
||||
|
||||
@@ -64,6 +64,14 @@ def upgrade(db_conn: Connection):
|
||||
log.info("No 'semantic_metadata' table found, skipping data migration.")
|
||||
return
|
||||
|
||||
# Check if 'keywords' column exists in semantic_metadata table
|
||||
# (current schema may already use normalized tables without this column)
|
||||
cursor.execute("PRAGMA table_info(semantic_metadata)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "keywords" not in columns:
|
||||
log.info("No 'keywords' column in semantic_metadata table, skipping data migration.")
|
||||
return
|
||||
|
||||
cursor.execute("SELECT file_id, keywords FROM semantic_metadata WHERE keywords IS NOT NULL AND keywords != ''")
|
||||
|
||||
files_to_migrate = cursor.fetchall()
|
||||
|
||||
@@ -36,22 +36,27 @@ log = logging.getLogger(__name__)
|
||||
def upgrade(db_conn: Connection):
|
||||
"""Remove unused and redundant fields from schema.
|
||||
|
||||
Note: Transaction management is handled by MigrationManager.
|
||||
This migration should NOT start its own transaction.
|
||||
|
||||
Args:
|
||||
db_conn: The SQLite database connection.
|
||||
"""
|
||||
cursor = db_conn.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute("BEGIN TRANSACTION")
|
||||
# Step 1: Remove semantic_metadata.keywords (if column exists)
|
||||
log.info("Checking semantic_metadata.keywords column...")
|
||||
|
||||
# Step 1: Remove semantic_metadata.keywords
|
||||
log.info("Removing semantic_metadata.keywords column...")
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='semantic_metadata'"
|
||||
)
|
||||
if cursor.fetchone():
|
||||
# Check if keywords column exists
|
||||
cursor.execute("PRAGMA table_info(semantic_metadata)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
|
||||
# Check if semantic_metadata table exists
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='semantic_metadata'"
|
||||
)
|
||||
if cursor.fetchone():
|
||||
if "keywords" in columns:
|
||||
log.info("Removing semantic_metadata.keywords column...")
|
||||
cursor.execute("""
|
||||
CREATE TABLE semantic_metadata_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -79,16 +84,23 @@ def upgrade(db_conn: Connection):
|
||||
)
|
||||
log.info("Removed semantic_metadata.keywords column")
|
||||
else:
|
||||
log.info("semantic_metadata table does not exist, skipping")
|
||||
log.info("semantic_metadata.keywords column does not exist, skipping")
|
||||
else:
|
||||
log.info("semantic_metadata table does not exist, skipping")
|
||||
|
||||
# Step 2: Remove symbols.token_count and symbols.symbol_type
|
||||
log.info("Removing symbols.token_count and symbols.symbol_type columns...")
|
||||
# Step 2: Remove symbols.token_count and symbols.symbol_type (if columns exist)
|
||||
log.info("Checking symbols.token_count and symbols.symbol_type columns...")
|
||||
|
||||
# Check if symbols table exists
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='symbols'"
|
||||
)
|
||||
if cursor.fetchone():
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='symbols'"
|
||||
)
|
||||
if cursor.fetchone():
|
||||
# Check if token_count or symbol_type columns exist
|
||||
cursor.execute("PRAGMA table_info(symbols)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
|
||||
if "token_count" in columns or "symbol_type" in columns:
|
||||
log.info("Removing symbols.token_count and symbols.symbol_type columns...")
|
||||
cursor.execute("""
|
||||
CREATE TABLE symbols_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -110,21 +122,28 @@ def upgrade(db_conn: Connection):
|
||||
cursor.execute("DROP TABLE symbols")
|
||||
cursor.execute("ALTER TABLE symbols_new RENAME TO symbols")
|
||||
|
||||
# Recreate indexes (excluding idx_symbols_type which indexed symbol_type)
|
||||
# Recreate indexes
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_symbols_file ON symbols(file_id)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_symbols_name ON symbols(name)")
|
||||
log.info("Removed symbols.token_count and symbols.symbol_type columns")
|
||||
else:
|
||||
log.info("symbols table does not exist, skipping")
|
||||
log.info("symbols.token_count/symbol_type columns do not exist, skipping")
|
||||
else:
|
||||
log.info("symbols table does not exist, skipping")
|
||||
|
||||
# Step 3: Remove subdirs.direct_files
|
||||
log.info("Removing subdirs.direct_files column...")
|
||||
# Step 3: Remove subdirs.direct_files (if column exists)
|
||||
log.info("Checking subdirs.direct_files column...")
|
||||
|
||||
# Check if subdirs table exists
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='subdirs'"
|
||||
)
|
||||
if cursor.fetchone():
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='subdirs'"
|
||||
)
|
||||
if cursor.fetchone():
|
||||
# Check if direct_files column exists
|
||||
cursor.execute("PRAGMA table_info(subdirs)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
|
||||
if "direct_files" in columns:
|
||||
log.info("Removing subdirs.direct_files column...")
|
||||
cursor.execute("""
|
||||
CREATE TABLE subdirs_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -148,26 +167,15 @@ def upgrade(db_conn: Connection):
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_subdirs_name ON subdirs(name)")
|
||||
log.info("Removed subdirs.direct_files column")
|
||||
else:
|
||||
log.info("subdirs table does not exist, skipping")
|
||||
log.info("subdirs.direct_files column does not exist, skipping")
|
||||
else:
|
||||
log.info("subdirs table does not exist, skipping")
|
||||
|
||||
cursor.execute("COMMIT")
|
||||
log.info("Migration 005 completed successfully")
|
||||
log.info("Migration 005 completed successfully")
|
||||
|
||||
# Vacuum to reclaim space (outside transaction)
|
||||
try:
|
||||
log.info("Running VACUUM to reclaim space...")
|
||||
cursor.execute("VACUUM")
|
||||
log.info("VACUUM completed successfully")
|
||||
except Exception as e:
|
||||
log.warning(f"VACUUM failed (non-critical): {e}")
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Migration 005 failed: {e}")
|
||||
try:
|
||||
cursor.execute("ROLLBACK")
|
||||
except Exception:
|
||||
pass
|
||||
raise
|
||||
# Vacuum to reclaim space (outside transaction, optional)
|
||||
# Note: VACUUM cannot run inside a transaction, so we skip it here
|
||||
# The caller can run VACUUM separately if desired
|
||||
|
||||
|
||||
def downgrade(db_conn: Connection):
|
||||
|
||||
@@ -7,7 +7,7 @@ import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from codexlens.errors import StorageError
|
||||
|
||||
@@ -462,6 +462,66 @@ class RegistryStore:
|
||||
row = conn.execute(query, paths_to_check).fetchone()
|
||||
return self._row_to_dir_mapping(row) if row else None
|
||||
|
||||
def find_by_source_path(self, source_path: str) -> Optional[Dict[str, str]]:
|
||||
"""Find project by source path (exact or nearest match).
|
||||
|
||||
Searches for a project whose source_root matches or contains
|
||||
the given source_path.
|
||||
|
||||
Args:
|
||||
source_path: Source directory path as string
|
||||
|
||||
Returns:
|
||||
Dict with project info including 'index_root', or None if not found
|
||||
"""
|
||||
with self._lock:
|
||||
conn = self._get_connection()
|
||||
source_path_resolved = str(Path(source_path).resolve())
|
||||
|
||||
# First try exact match on projects table
|
||||
row = conn.execute(
|
||||
"SELECT * FROM projects WHERE source_root=?", (source_path_resolved,)
|
||||
).fetchone()
|
||||
|
||||
if row:
|
||||
return {
|
||||
"id": str(row["id"]),
|
||||
"source_root": row["source_root"],
|
||||
"index_root": row["index_root"],
|
||||
"status": row["status"] or "active",
|
||||
}
|
||||
|
||||
# Try finding project that contains this path
|
||||
# Build list of all parent paths
|
||||
paths_to_check = []
|
||||
current = Path(source_path_resolved)
|
||||
while True:
|
||||
paths_to_check.append(str(current))
|
||||
parent = current.parent
|
||||
if parent == current:
|
||||
break
|
||||
current = parent
|
||||
|
||||
if paths_to_check:
|
||||
placeholders = ','.join('?' * len(paths_to_check))
|
||||
query = f"""
|
||||
SELECT * FROM projects
|
||||
WHERE source_root IN ({placeholders})
|
||||
ORDER BY LENGTH(source_root) DESC
|
||||
LIMIT 1
|
||||
"""
|
||||
row = conn.execute(query, paths_to_check).fetchone()
|
||||
|
||||
if row:
|
||||
return {
|
||||
"id": str(row["id"]),
|
||||
"source_root": row["source_root"],
|
||||
"index_root": row["index_root"],
|
||||
"status": row["status"] or "active",
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def get_project_dirs(self, project_id: int) -> List[DirMapping]:
|
||||
"""Get all directory mappings for a project.
|
||||
|
||||
|
||||
@@ -204,13 +204,11 @@ class SQLiteStore:
|
||||
if indexed_file.symbols:
|
||||
conn.executemany(
|
||||
"""
|
||||
INSERT INTO symbols(file_id, name, kind, start_line, end_line, token_count, symbol_type)
|
||||
VALUES(?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO symbols(file_id, name, kind, start_line, end_line)
|
||||
VALUES(?, ?, ?, ?, ?)
|
||||
""",
|
||||
[
|
||||
(file_id, s.name, s.kind, s.range[0], s.range[1],
|
||||
getattr(s, 'token_count', None),
|
||||
getattr(s, 'symbol_type', None) or s.kind)
|
||||
(file_id, s.name, s.kind, s.range[0], s.range[1])
|
||||
for s in indexed_file.symbols
|
||||
],
|
||||
)
|
||||
@@ -255,13 +253,11 @@ class SQLiteStore:
|
||||
if indexed_file.symbols:
|
||||
conn.executemany(
|
||||
"""
|
||||
INSERT INTO symbols(file_id, name, kind, start_line, end_line, token_count, symbol_type)
|
||||
VALUES(?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO symbols(file_id, name, kind, start_line, end_line)
|
||||
VALUES(?, ?, ?, ?, ?)
|
||||
""",
|
||||
[
|
||||
(file_id, s.name, s.kind, s.range[0], s.range[1],
|
||||
getattr(s, 'token_count', None),
|
||||
getattr(s, 'symbol_type', None) or s.kind)
|
||||
(file_id, s.name, s.kind, s.range[0], s.range[1])
|
||||
for s in indexed_file.symbols
|
||||
],
|
||||
)
|
||||
@@ -611,15 +607,12 @@ class SQLiteStore:
|
||||
name TEXT NOT NULL,
|
||||
kind TEXT NOT NULL,
|
||||
start_line INTEGER NOT NULL,
|
||||
end_line INTEGER NOT NULL,
|
||||
token_count INTEGER,
|
||||
symbol_type TEXT
|
||||
end_line INTEGER NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_symbols_name ON symbols(name)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_symbols_kind ON symbols(kind)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_symbols_type ON symbols(symbol_type)")
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS code_relationships (
|
||||
|
||||
Reference in New Issue
Block a user