From 79a29538620ec31106b9685d9571c825dc849b03 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Sun, 14 Dec 2025 17:17:09 +0800 Subject: [PATCH] Add comprehensive tests for vector/semantic search functionality - Implement full coverage tests for Embedder model loading and embedding generation - Add CRUD operations and caching tests for VectorStore - Include cosine similarity computation tests - Validate semantic search accuracy and relevance through various queries - Establish performance benchmarks for embedding and search operations - Ensure edge cases and error handling are covered - Test thread safety and concurrent access scenarios - Verify availability of semantic search dependencies --- .claude/rules/active_memory.md | 13 + .claude/rules/active_memory_config.json | 4 + .claude/rules/cli-tools-usage.md | 36 + .../{CLAUDE.md => rules/coding-philosophy.md} | 55 +- .claude/rules/context-requirements.md | 7 + .../intelligent-tools-strategy.md | 113 +- .claude/rules/project-integration.md | 22 + .claude/rules/tool-selection.md | 88 + .claude/workflows/context-search-strategy.md | 6 - .claude/workflows/tool-strategy.md | 256 +- ccw/src/core/lite-scanner-complete.ts | 469 ++ ccw/src/core/routes/ccw-routes.ts | 96 + ccw/src/core/routes/cli-routes.ts | 561 +++ ccw/src/core/routes/codexlens-routes.ts | 175 + ccw/src/core/routes/files-routes.ts | 428 ++ ccw/src/core/routes/memory-routes.ts | 1129 +++++ ccw/src/core/routes/rules-routes.ts | 266 + ccw/src/core/routes/session-routes.ts | 406 ++ ccw/src/core/routes/skills-routes.ts | 300 ++ ccw/src/core/routes/system-routes.ts | 329 ++ ccw/src/core/server.ts | 4312 +---------------- ccw/src/core/websocket.ts | 190 + ccw/src/templates/dashboard-css/10-cli.css | 506 ++ ccw/src/templates/dashboard-css/11-memory.css | 210 + .../dashboard-js/components/cli-history.js | 62 +- .../dashboard-js/components/cli-status.js | 572 ++- .../dashboard-js/components/notifications.js | 98 + .../dashboard-js/views/cli-manager.js | 280 +- .../dashboard-js/views/prompt-history.js | 5 +- ccw/src/tools/cli-config-manager.ts | 272 ++ ccw/src/tools/cli-executor.ts | 207 +- ccw/src/tools/cli-history-store.ts | 20 + ccw/src/tools/generate-module-docs.ts | 12 +- codex-lens/SEMANTIC_SEARCH_USAGE.md | 83 + codex-lens/_debug_output.txt | 19 + codex-lens/_test_output.txt | 22 + codex-lens/_test_prompt.txt | 25 + codex-lens/_test_result.txt | 19 + codex-lens/src/codexlens/cli/commands.py | 129 + codex-lens/src/codexlens/config.py | 5 + .../src/codexlens/search/chain_search.py | 34 +- codex-lens/src/codexlens/semantic/__init__.py | 36 +- .../src/codexlens/semantic/llm_enhancer.py | 667 +++ codex-lens/src/codexlens/storage/dir_index.py | 232 + codex-lens/tests/test_llm_enhancer.py | 831 ++++ codex-lens/tests/test_search_full_coverage.py | 1190 +++++ codex-lens/tests/test_vector_search_full.py | 747 +++ 47 files changed, 11208 insertions(+), 4336 deletions(-) create mode 100644 .claude/rules/active_memory.md create mode 100644 .claude/rules/active_memory_config.json create mode 100644 .claude/rules/cli-tools-usage.md rename .claude/{CLAUDE.md => rules/coding-philosophy.md} (54%) create mode 100644 .claude/rules/context-requirements.md rename .claude/{workflows => rules}/intelligent-tools-strategy.md (69%) create mode 100644 .claude/rules/project-integration.md create mode 100644 .claude/rules/tool-selection.md create mode 100644 ccw/src/core/lite-scanner-complete.ts create mode 100644 ccw/src/core/routes/ccw-routes.ts create mode 100644 ccw/src/core/routes/cli-routes.ts create mode 100644 ccw/src/core/routes/codexlens-routes.ts create mode 100644 ccw/src/core/routes/files-routes.ts create mode 100644 ccw/src/core/routes/memory-routes.ts create mode 100644 ccw/src/core/routes/rules-routes.ts create mode 100644 ccw/src/core/routes/session-routes.ts create mode 100644 ccw/src/core/routes/skills-routes.ts create mode 100644 ccw/src/core/routes/system-routes.ts create mode 100644 ccw/src/core/websocket.ts create mode 100644 ccw/src/tools/cli-config-manager.ts create mode 100644 codex-lens/SEMANTIC_SEARCH_USAGE.md create mode 100644 codex-lens/_debug_output.txt create mode 100644 codex-lens/_test_output.txt create mode 100644 codex-lens/_test_prompt.txt create mode 100644 codex-lens/_test_result.txt create mode 100644 codex-lens/src/codexlens/semantic/llm_enhancer.py create mode 100644 codex-lens/tests/test_llm_enhancer.py create mode 100644 codex-lens/tests/test_search_full_coverage.py create mode 100644 codex-lens/tests/test_vector_search_full.py diff --git a/.claude/rules/active_memory.md b/.claude/rules/active_memory.md new file mode 100644 index 00000000..a3effdea --- /dev/null +++ b/.claude/rules/active_memory.md @@ -0,0 +1,13 @@ +# Active Memory + +> Auto-generated understanding of frequently accessed files using GEMINI. +> Last updated: 2025-12-14T08:59:41.526Z +> Files analyzed: 10 +> CLI Tool: gemini + +--- + +[object Object] + +--- + diff --git a/.claude/rules/active_memory_config.json b/.claude/rules/active_memory_config.json new file mode 100644 index 00000000..f1b9cd57 --- /dev/null +++ b/.claude/rules/active_memory_config.json @@ -0,0 +1,4 @@ +{ + "interval": "manual", + "tool": "gemini" +} \ No newline at end of file diff --git a/.claude/rules/cli-tools-usage.md b/.claude/rules/cli-tools-usage.md new file mode 100644 index 00000000..57b51a71 --- /dev/null +++ b/.claude/rules/cli-tools-usage.md @@ -0,0 +1,36 @@ +# CLI Tools Usage Rules + +## Tool Selection + +### Gemini & Qwen +**Use for**: Analysis, documentation, code exploration, architecture review +- Default MODE: `analysis` (read-only) +- Prefer Gemini; use Qwen as fallback +- Large context window, pattern recognition + +### Codex +**Use for**: Feature implementation, bug fixes, autonomous development +- Requires explicit `--mode auto` or `--mode write` +- Best for: Implementation, testing, automation + +## Core Principles + +- Use tools early and often - tools are faster and more thorough +- Always use `ccw cli exec` for consistent parameter handling +- ALWAYS reference exactly ONE template in RULES section +- Require EXPLICIT `--mode write` or `--mode auto` for modifications +- NEVER use escape characters (`\$`, `\"`, `\'`) in CLI commands + +## Permission Framework + +- `analysis` (default): Read-only, safe for auto-execution +- `write`: Requires explicit `--mode write` - creates/modifies/deletes files +- `auto`: Requires explicit `--mode auto` - full autonomous operations (Codex only) + +## Timeout Guidelines + +- Simple (5-10min): Analysis, search +- Medium (10-20min): Refactoring, documentation +- Complex (20-60min): Implementation, migration +- Heavy (60-120min): Large codebase, multi-file operations +- Codex multiplier: 3x allocated time (minimum 15min) diff --git a/.claude/CLAUDE.md b/.claude/rules/coding-philosophy.md similarity index 54% rename from .claude/CLAUDE.md rename to .claude/rules/coding-philosophy.md index ffa467be..31200653 100644 --- a/.claude/CLAUDE.md +++ b/.claude/rules/coding-philosophy.md @@ -1,19 +1,6 @@ +# Coding Philosophy -### Tool Context Protocols -For all tool usage, command syntax, and integration guidelines: -- **Tool Strategy**: @~/.claude/workflows/tool-strategy.md -- **Intelligent Context Strategy**: @~/.claude/workflows/intelligent-tools-strategy.md -- **Context Search Commands**: @~/.claude/workflows/context-search-strategy.md - -**Context Requirements**: -- Identify 3+ existing similar patterns before implementation -- Map dependencies and integration points -- Understand testing framework and coding conventions - - -## Philosophy - -### Core Beliefs +## Core Beliefs - **Pursue good taste** - Eliminate edge cases to make code logic natural and elegant - **Embrace extreme simplicity** - Complexity is the root of all evil @@ -27,33 +14,16 @@ For all tool usage, command syntax, and integration guidelines: - **Minimize changes** - Only modify what's directly required; avoid refactoring, adding features, or "improving" code beyond the request - **No unsolicited documentation** - NEVER generate reports, documentation files, or summaries without explicit user request. If required, save to .workflow/.scratchpad/ -### Simplicity Means +## Simplicity Means - Single responsibility per function/class - Avoid premature abstractions - No clever tricks - choose the boring solution - If you need to explain it, it's too complex -## Project Integration +## Fix, Don't Hide -### Learning the Codebase - -- Find 3 similar features/components -- Identify common patterns and conventions -- Use same libraries/utilities when possible -- Follow existing test patterns - -### Tooling - -- Use project's existing build system -- Use project's test framework -- Use project's formatter/linter settings -- Don't introduce new tools without strong justification - - -### Fix, Don't Hide - -- **Solve problems, don't silence symptoms** - Skipped tests, `@ts-ignore`, empty catch, `as any`, excessive timeouts = hiding bugs, not fixing them +**Solve problems, don't silence symptoms** - Skipped tests, `@ts-ignore`, empty catch, `as any`, excessive timeouts = hiding bugs, not fixing them **NEVER**: - Make assumptions - verify with existing code @@ -70,18 +40,3 @@ For all tool usage, command syntax, and integration guidelines: - Learn from existing implementations - Stop after 3 failed attempts and reassess - **Edit fallback**: When Edit tool fails 2+ times on same file, try Bash sed/awk first, then Write to recreate if still failing - -## Platform-Specific Guidelines - -### Windows Path Format Guidelines -- **MCP Tools**: Double backslash `D:\\path\\file.txt` -- **Bash Commands**: Forward slash `D:/path/file.txt` or `/d/path/file.txt` -- **Relative Paths**: Universal (works in both) - -#### **Content Uniqueness Rules** - -- **Each layer owns its abstraction level** - no content sharing between layers -- **Reference, don't duplicate** - point to other layers, never copy content -- **Maintain perspective** - each layer sees the system at its appropriate scale -- **Avoid implementation creep** - higher layers stay architectural - diff --git a/.claude/rules/context-requirements.md b/.claude/rules/context-requirements.md new file mode 100644 index 00000000..72f77d89 --- /dev/null +++ b/.claude/rules/context-requirements.md @@ -0,0 +1,7 @@ +# Context Requirements + +Before implementation, always: + +- Identify 3+ existing similar patterns before implementation +- Map dependencies and integration points +- Understand testing framework and coding conventions diff --git a/.claude/workflows/intelligent-tools-strategy.md b/.claude/rules/intelligent-tools-strategy.md similarity index 69% rename from .claude/workflows/intelligent-tools-strategy.md rename to .claude/rules/intelligent-tools-strategy.md index a5402391..da2e5bc4 100644 --- a/.claude/workflows/intelligent-tools-strategy.md +++ b/.claude/rules/intelligent-tools-strategy.md @@ -15,14 +15,25 @@ ### Universal Prompt Template ``` -PURPOSE: [objective + why + success criteria] -TASK: • [step 1] • [step 2] • [step 3] +PURPOSE: [what] + [why] + [success criteria] + [constraints/scope] +TASK: • [step 1: specific action] • [step 2: specific action] • [step 3: specific action] MODE: [analysis|write|auto] -CONTEXT: @**/* | Memory: [session/tech/module context] -EXPECTED: [format + quality + structure] -RULES: $(cat ~/.claude/workflows/cli-templates/prompts/[category]/[template].txt) | [constraints] | MODE=[permission] +CONTEXT: @[file patterns] | Memory: [session/tech/module context] +EXPECTED: [deliverable format] + [quality criteria] + [structure requirements] +RULES: $(cat ~/.claude/workflows/cli-templates/prompts/[category]/[template].txt) | [domain constraints] | MODE=[permission] ``` +### Intent Capture Checklist (Before CLI Execution) + +**⚠️ CRITICAL**: Before executing any CLI command, verify these intent dimensions: +**Intent Validation Questions**: +- [ ] Is the objective specific and measurable? +- [ ] Are success criteria defined? +- [ ] Is the scope clearly bounded? +- [ ] Are constraints and limitations stated? +- [ ] Is the expected output format clear? +- [ ] Is the action level (read/write) explicit? + ### Tool Selection | Task Type | Tool | Fallback | @@ -128,14 +139,15 @@ ASSISTANT RESPONSE: [Previous output] Every command MUST include these fields: -| Field | Purpose | Example | -|-------|---------|---------| -| **PURPOSE** | Goal, why needed, success criteria | "Analyze auth module for security vulnerabilities" | -| **TASK** | Actionable steps (• bullet format) | "• Review patterns • Identify risks • Document findings" | -| **MODE** | Permission level | `analysis` / `write` / `auto` | -| **CONTEXT** | File patterns + Memory context | `@src/**/* | Memory: Previous refactoring (abc123)` | -| **EXPECTED** | Deliverable format, quality criteria | "Security report with risk levels and recommendations" | -| **RULES** | **Template (REQUIRED)** + constraints | `$(cat ~/.claude/.../analysis/02-analyze-code-patterns.txt) | Focus on auth | analysis=READ-ONLY` | +| Field | Purpose | Components | Bad Example | Good Example | +|-------|---------|------------|-------------|--------------| +| **PURPOSE** | Goal + motivation + success | What + Why + Success Criteria + Constraints | "Analyze code" | "Identify security vulnerabilities in auth module to pass compliance audit; success = all OWASP Top 10 addressed; scope = src/auth/** only" | +| **TASK** | Actionable steps | Specific verbs + targets | "• Review code • Find issues" | "• Scan for SQL injection in query builders • Check XSS in template rendering • Verify CSRF token validation" | +| **MODE** | Permission level | analysis / write / auto | (missing) | "analysis" or "write" | +| **CONTEXT** | File scope + history | File patterns + Memory | "@**/*" | "@src/auth/**/*.ts @shared/utils/security.ts \| Memory: Previous auth refactoring (WFS-001)" | +| **EXPECTED** | Output specification | Format + Quality + Structure | "Report" | "Markdown report with: severity levels (Critical/High/Medium/Low), file:line references, remediation code snippets, priority ranking" | +| **RULES** | Template + constraints | $(cat template) + domain rules | (missing) | "$(cat ~/.claude/.../security.txt) \| Focus on authentication \| Ignore test files \| analysis=READ-ONLY" | + ### CONTEXT Configuration @@ -303,42 +315,55 @@ CCW automatically maps to tool-specific syntax: ### Command Examples +#### Task-Type Specific Templates + +**Analysis Task** (Security Audit): ```bash -# Analysis (default) ccw cli exec " -PURPOSE: Analyze authentication -TASK: • Review patterns • Identify risks +PURPOSE: Identify OWASP Top 10 vulnerabilities in authentication module to pass security audit; success = all critical/high issues documented with remediation +TASK: • Scan for injection flaws (SQL, command, LDAP) • Check authentication bypass vectors • Evaluate session management • Assess sensitive data exposure MODE: analysis -CONTEXT: @**/* @../shared/**/* -EXPECTED: Analysis report -RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/02-analyze-code-patterns.txt) | analysis=READ-ONLY -" --tool gemini --cd src/auth --includeDirs ../shared - -# Write mode -ccw cli exec " -PURPOSE: Generate API docs -TASK: • Create docs • Add examples -MODE: write -CONTEXT: @src/api/**/* -EXPECTED: Complete documentation -RULES: $(cat ~/.claude/workflows/cli-templates/prompts/development/02-implement-feature.txt) | write=CREATE/MODIFY/DELETE -" --tool gemini --mode write - -# Auto mode (Codex) -ccw cli exec " -PURPOSE: Implement auth module -TASK: • Create service • Add validation • Setup JWT -MODE: auto -CONTEXT: @**/* | Memory: Following project security patterns -EXPECTED: Complete module with tests -RULES: $(cat ~/.claude/workflows/cli-templates/prompts/development/02-implement-feature.txt) | auto=FULL -" --tool codex --mode auto - -# Fallback strategy -ccw cli exec "" --tool gemini # Primary -ccw cli exec "" --tool qwen # Fallback +CONTEXT: @src/auth/**/* @src/middleware/auth.ts | Memory: Using bcrypt for passwords, JWT for sessions +EXPECTED: Security report with: severity matrix, file:line references, CVE mappings where applicable, remediation code snippets prioritized by risk +RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/03-assess-security-risks.txt) | Focus on authentication | Ignore test files | analysis=READ-ONLY +" --tool gemini --cd src/auth --timeout 600000 ``` +**Implementation Task** (New Feature): +```bash +ccw cli exec " +PURPOSE: Implement rate limiting for API endpoints to prevent abuse; must be configurable per-endpoint; backward compatible with existing clients +TASK: • Create rate limiter middleware with sliding window • Implement per-route configuration • Add Redis backend for distributed state • Include bypass for internal services +MODE: auto +CONTEXT: @src/middleware/**/* @src/config/**/* | Memory: Using Express.js, Redis already configured, existing middleware pattern in auth.ts +EXPECTED: Production-ready code with: TypeScript types, unit tests, integration test, configuration example, migration guide +RULES: $(cat ~/.claude/workflows/cli-templates/prompts/development/02-implement-feature.txt) | Follow existing middleware patterns | No breaking changes | auto=FULL +" --tool codex --mode auto --timeout 1800000 +``` + +**Bug Fix Task**: +```bash +ccw cli exec " +PURPOSE: Fix memory leak in WebSocket connection handler causing server OOM after 24h; root cause must be identified before any fix +TASK: • Trace connection lifecycle from open to close • Identify event listener accumulation • Check cleanup on disconnect • Verify garbage collection eligibility +MODE: analysis +CONTEXT: @src/websocket/**/* @src/services/connection-manager.ts | Memory: Using ws library, ~5000 concurrent connections in production +EXPECTED: Root cause analysis with: memory profile, leak source (file:line), fix recommendation with code, verification steps +RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt) | Focus on resource cleanup | analysis=READ-ONLY +" --tool gemini --cd src --timeout 900000 +``` + +**Refactoring Task**: +```bash +ccw cli exec " +PURPOSE: Refactor payment processing to use strategy pattern for multi-gateway support; no functional changes; all existing tests must pass +TASK: • Extract gateway interface from current implementation • Create strategy classes for Stripe, PayPal • Implement factory for gateway selection • Migrate existing code to use strategies +MODE: write +CONTEXT: @src/payments/**/* @src/types/payment.ts | Memory: Currently only Stripe, adding PayPal next sprint, must support future gateways +EXPECTED: Refactored code with: strategy interface, concrete implementations, factory class, updated tests, migration checklist +RULES: $(cat ~/.claude/workflows/cli-templates/prompts/development/02-refactor-codebase.txt) | Preserve all existing behavior | Tests must pass | write=CREATE/MODIFY/DELETE +" --tool gemini --mode write --timeout 1200000 +``` --- ## Configuration diff --git a/.claude/rules/project-integration.md b/.claude/rules/project-integration.md new file mode 100644 index 00000000..b8475c5f --- /dev/null +++ b/.claude/rules/project-integration.md @@ -0,0 +1,22 @@ +# Project Integration Rules + +## Learning the Codebase + +- Find 3 similar features/components +- Identify common patterns and conventions +- Use same libraries/utilities when possible +- Follow existing test patterns + +## Tooling + +- Use project's existing build system +- Use project's test framework +- Use project's formatter/linter settings +- Don't introduce new tools without strong justification + +## Content Uniqueness Rules + +- **Each layer owns its abstraction level** - no content sharing between layers +- **Reference, don't duplicate** - point to other layers, never copy content +- **Maintain perspective** - each layer sees the system at its appropriate scale +- **Avoid implementation creep** - higher layers stay architectural diff --git a/.claude/rules/tool-selection.md b/.claude/rules/tool-selection.md new file mode 100644 index 00000000..6c60d309 --- /dev/null +++ b/.claude/rules/tool-selection.md @@ -0,0 +1,88 @@ +# Tool Selection Rules + +## Context Gathering + +### Use Exa +- Researching external APIs, libraries, frameworks +- Need recent documentation beyond knowledge cutoff +- Looking for implementation examples in public repos +- User mentions specific library/framework names +- Questions about "best practices" or "how does X work" + +### Use read_file (MCP) +- Reading multiple related files at once +- Directory traversal with pattern matching +- Searching file content with regex +- Need to limit depth/file count for large directories +- Batch operations on multiple files +- Pattern-based filtering (glob + content regex) + +### Use codex_lens +- Large codebase (>500 files) requiring repeated searches +- Need semantic understanding of code relationships +- Working across multiple sessions +- Symbol-level navigation needed +- Finding all implementations of interface/class +- Tracking function calls across codebase + +### Use smart_search +- Unknown file locations +- Concept/semantic search ("authentication logic", "payment processing") +- Medium-sized codebase (100-500 files) +- One-time or infrequent searches +- Natural language queries about code structure + +**Mode Selection**: +- `auto`: Let tool decide (default) +- `exact`: Known exact pattern +- `fuzzy`: Typo-tolerant search +- `semantic`: Concept-based search +- `graph`: Dependency analysis + +## File Modification + +### Use edit_file (MCP) +- Built-in Edit tool failed 1+ times +- Need dry-run preview before applying changes +- Need line-based operations (insert_after, insert_before) +- Need to replace all occurrences at once +- Built-in Edit returns "old_string not found" +- Whitespace/formatting issues in built-in Edit + +**Mode Selection**: +- `mode=update`: Replace text +- `mode=line`: Line-based operations + +### Use write_file (MCP) +- Creating brand new files +- MCP edit_file still fails (last resort) +- Need to completely replace file content +- Need backup before overwriting +- User explicitly asks to "recreate file" + +## Priority Logic + +**File Reading**: +1. Known single file → Built-in Read +2. Multiple files OR pattern matching → read_file (MCP) +3. Unknown location → smart_search then Read +4. Large codebase + repeated access → codex_lens + +**File Editing**: +1. Always try built-in Edit first +2. Fails 1+ times → edit_file (MCP) +3. Still fails → write_file (MCP) + +**Search**: +1. External knowledge → Exa +2. Exact pattern in small codebase → Built-in Grep +3. Semantic/unknown location → smart_search +4. Large codebase + repeated searches → codex_lens + +## Decision Triggers + +**Start with simplest tool** (Read, Edit, Grep) +**Escalate to MCP tools** when built-ins fail or inappropriate +**Use semantic search** for exploratory tasks +**Use indexed search** for large, stable codebases +**Use Exa** for external/public knowledge diff --git a/.claude/workflows/context-search-strategy.md b/.claude/workflows/context-search-strategy.md index 06146595..f659721e 100644 --- a/.claude/workflows/context-search-strategy.md +++ b/.claude/workflows/context-search-strategy.md @@ -1,9 +1,3 @@ ---- -name: context-search-strategy -description: Strategic guidelines for context search commands -type: search-guideline ---- - # Context Search Strategy ## ⚡ Execution Environment diff --git a/.claude/workflows/tool-strategy.md b/.claude/workflows/tool-strategy.md index e6d6ed76..84a13fcd 100644 --- a/.claude/workflows/tool-strategy.md +++ b/.claude/workflows/tool-strategy.md @@ -1,88 +1,216 @@ -# Tool Strategy +# Tool Strategy - When to Use What -## ⚡ Exa Triggering Mechanisms +> **Focus**: Decision triggers and selection logic, NOT syntax (already registered with Claude) -**Auto-Trigger**: -- User mentions "exa-code" or code-related queries → `mcp__exa__get_code_context_exa` -- Need current web information → `mcp__exa__web_search_exa` - -**Manual Trigger**: -- Complex API research → Exa Code Context -- Real-time information needs → Exa Web Search - -## ⚡ CCW MCP Tools - - -### edit_file - -**When to Use**: Edit tool fails 1+ times on same file +## Quick Decision Tree ``` -mcp__ccw-tools__edit_file(path="file.py", oldText="old", newText="new") -mcp__ccw-tools__edit_file(path="file.py", oldText="old", newText="new", dryRun=true) -mcp__ccw-tools__edit_file(path="file.py", oldText="old", newText="new", replaceAll=true) -mcp__ccw-tools__edit_file(path="file.py", mode="line", operation="insert_after", line=10, text="new line") +Need context? +├─ Exa available? → Use Exa (fastest, most comprehensive) +├─ Large codebase (>500 files)? → codex_lens +├─ Known files (<5)? → Read tool +└─ Unknown files? → smart_search → Read tool + +Need to modify files? +├─ Built-in Edit fails? → mcp__ccw-tools__edit_file +└─ Still fails? → mcp__ccw-tools__write_file + +Need to search? +├─ Semantic/concept search? → smart_search (mode=semantic) +├─ Exact pattern match? → Grep tool +└─ Multiple search modes needed? → smart_search (mode=auto) ``` -**Options**: `dryRun` (preview diff), `replaceAll`, `mode` (update|line), `operation`, `line`, `text` +--- -### write_file +## 1. Context Gathering Tools -**When to Use**: Create new files or overwrite existing content +### Exa (`mcp__exa__get_code_context_exa`) + +**Use When**: +- ✅ Researching external APIs, libraries, frameworks +- ✅ Need recent documentation (post-cutoff knowledge) +- ✅ Looking for implementation examples in public repos +- ✅ Comparing architectural patterns across projects + +**Don't Use When**: +- ❌ Searching internal codebase (use smart_search/codex_lens) +- ❌ Files already in working directory (use Read) + +**Trigger Indicators**: +- User mentions specific library/framework names +- Questions about "best practices", "how does X work" +- Need to verify current API signatures + +--- + +### read_file (`mcp__ccw-tools__read_file`) + +**Use When**: +- ✅ Reading multiple related files at once (batch reading) +- ✅ Need directory traversal with pattern matching +- ✅ Searching file content with regex (`contentPattern`) +- ✅ Want to limit depth/file count for large directories + +**Don't Use When**: +- ❌ Single file read → Use built-in Read tool (faster) +- ❌ Unknown file locations → Use smart_search first +- ❌ Need semantic search → Use smart_search or codex_lens + +**Trigger Indicators**: +- Need to read "all TypeScript files in src/" +- Need to find "files containing TODO comments" +- Want to read "up to 20 config files" + +**Advantages over Built-in Read**: +- Batch operation (multiple files in one call) +- Pattern-based filtering (glob + content regex) +- Directory traversal with depth control + +--- + +### codex_lens (`mcp__ccw-tools__codex_lens`) + +**Use When**: +- ✅ Large codebase (>500 files) requiring repeated searches +- ✅ Need semantic understanding of code relationships +- ✅ Working across multiple sessions (persistent index) +- ✅ Symbol-level navigation needed + +**Don't Use When**: +- ❌ Small project (<100 files) → Use smart_search (no indexing overhead) +- ❌ One-time search → Use smart_search or Grep +- ❌ Files change frequently → Indexing overhead not worth it + +**Trigger Indicators**: +- "Find all implementations of interface X" +- "What calls this function across the codebase?" +- Multi-session workflow on same codebase + +**Action Selection**: +- `init`: First time in new codebase +- `search`: Find code patterns +- `search_files`: Find files by path/name pattern +- `symbol`: Get symbols in specific file +- `status`: Check if index exists/is stale +- `clean`: Remove stale index + +--- + +### smart_search (`mcp__ccw-tools__smart_search`) + +**Use When**: +- ✅ Don't know exact file locations +- ✅ Need concept/semantic search ("authentication logic") +- ✅ Medium-sized codebase (100-500 files) +- ✅ One-time or infrequent searches + +**Don't Use When**: +- ❌ Known exact file path → Use Read directly +- ❌ Large codebase + repeated searches → Use codex_lens +- ❌ Exact pattern match → Use Grep (faster) + +**Mode Selection**: +- `auto`: Let tool decide (default, safest) +- `exact`: Know exact pattern, need fast results +- `fuzzy`: Typo-tolerant file/symbol names +- `semantic`: Concept-based ("error handling", "data validation") +- `graph`: Dependency/relationship analysis + +**Trigger Indicators**: +- "Find files related to user authentication" +- "Where is the payment processing logic?" +- "Locate database connection setup" + +--- + +## 2. File Modification Tools + +### edit_file (`mcp__ccw-tools__edit_file`) + +**Use When**: +- ✅ Built-in Edit tool failed 1+ times +- ✅ Need dry-run preview before applying +- ✅ Need line-based operations (insert_after, insert_before) +- ✅ Need to replace all occurrences + +**Don't Use When**: +- ❌ Built-in Edit hasn't failed yet → Try built-in first +- ❌ Need to create new file → Use write_file + +**Trigger Indicators**: +- Built-in Edit returns "old_string not found" +- Built-in Edit fails due to whitespace/formatting +- Need to verify changes before applying (dryRun=true) + +**Mode Selection**: +- `mode=update`: Replace text (similar to built-in Edit) +- `mode=line`: Line-based operations (insert_after, insert_before, delete) + +--- + +### write_file (`mcp__ccw-tools__write_file`) + +**Use When**: +- ✅ Creating brand new files +- ✅ MCP edit_file still fails (last resort) +- ✅ Need to completely replace file content +- ✅ Need backup before overwriting + +**Don't Use When**: +- ❌ File exists + small change → Use Edit tools +- ❌ Built-in Edit hasn't been tried → Try built-in Edit first + +**Trigger Indicators**: +- All Edit attempts failed +- Need to create new file with specific content +- User explicitly asks to "recreate file" + +--- + +## 3. Decision Logic + +### File Reading Priority ``` -mcp__ccw-tools__write_file(path="file.txt", content="Hello") -mcp__ccw-tools__write_file(path="file.txt", content="code with `backticks` and ${vars}", backup=true) +1. Known single file? → Built-in Read +2. Multiple files OR pattern matching? → mcp__ccw-tools__read_file +3. Unknown location? → smart_search, then Read +4. Large codebase + repeated access? → codex_lens ``` -**Options**: `backup`, `createDirectories`, `encoding` - -### read_file - -**When to Use**: Read multiple files, directory traversal, content search +### File Editing Priority ``` -mcp__ccw-tools__read_file(paths="file.ts") # Single file -mcp__ccw-tools__read_file(paths=["a.ts", "b.ts"]) # Multiple files -mcp__ccw-tools__read_file(paths="src/", pattern="*.ts") # Directory + glob -mcp__ccw-tools__read_file(paths="src/", contentPattern="TODO") # Regex search +1. Always try built-in Edit first +2. Fails 1+ times? → mcp__ccw-tools__edit_file +3. Still fails? → mcp__ccw-tools__write_file (last resort) ``` -**Options**: `pattern`, `contentPattern`, `maxDepth` (3), `includeContent` (true), `maxFiles` (50) - -### codex_lens - -**When to Use**: Code indexing, semantic search, cache management +### Search Tool Priority ``` -mcp__ccw-tools__codex_lens(action="init", path=".") -mcp__ccw-tools__codex_lens(action="search", query="function main", path=".") -mcp__ccw-tools__codex_lens(action="search_files", query="pattern", limit=20) -mcp__ccw-tools__codex_lens(action="symbol", file="src/main.py") -mcp__ccw-tools__codex_lens(action="status") -mcp__ccw-tools__codex_lens(action="config_show") -mcp__ccw-tools__codex_lens(action="config_set", key="index_dir", value="/path") -mcp__ccw-tools__codex_lens(action="config_migrate", newPath="/new/path") -mcp__ccw-tools__codex_lens(action="clean", path=".") -mcp__ccw-tools__codex_lens(action="clean", all=true) +1. External knowledge? → Exa +2. Exact pattern in small codebase? → Built-in Grep +3. Semantic/unknown location? → smart_search +4. Large codebase + repeated searches? → codex_lens ``` -**Actions**: `init`, `search`, `search_files`, `symbol`, `status`, `config_show`, `config_set`, `config_migrate`, `clean` +--- -### smart_search +## 4. Anti-Patterns -**When to Use**: Quick search without indexing, natural language queries +**Don't**: +- Use codex_lens for one-time searches in small projects +- Use smart_search when file path is already known +- Use write_file before trying Edit tools +- Use Exa for internal codebase searches +- Use read_file for single file when Read tool works -``` -mcp__ccw-tools__smart_search(query="function main", path=".") -mcp__ccw-tools__smart_search(query="def init", mode="exact") -mcp__ccw-tools__smart_search(query="authentication logic", mode="semantic") -``` - -**Modes**: `auto` (default), `exact`, `fuzzy`, `semantic`, `graph` - -### Fallback Strategy - -1. **Edit fails 1+ times** → `mcp__ccw-tools__edit_file` -2. **Still fails** → `mcp__ccw-tools__write_file` +**Do**: +- Start with simplest tool (Read, Edit, Grep) +- Escalate to MCP tools when built-ins fail +- Use semantic search (smart_search) for exploratory tasks +- Use indexed search (codex_lens) for large, stable codebases +- Use Exa for external/public knowledge diff --git a/ccw/src/core/lite-scanner-complete.ts b/ccw/src/core/lite-scanner-complete.ts new file mode 100644 index 00000000..e089886a --- /dev/null +++ b/ccw/src/core/lite-scanner-complete.ts @@ -0,0 +1,469 @@ +import { existsSync, readdirSync, readFileSync, statSync } from 'fs'; +import { join } from 'path'; + +interface TaskMeta { + type: string; + agent: string | null; + scope: string | null; + module: string | null; +} + +interface TaskContext { + requirements: string[]; + focus_paths: string[]; + acceptance: string[]; + depends_on: string[]; +} + +interface TaskFlowControl { + implementation_approach: Array<{ + step: string; + action: string; + }>; +} + +interface NormalizedTask { + id: string; + title: string; + status: string; + meta: TaskMeta; + context: TaskContext; + flow_control: TaskFlowControl; + _raw: unknown; +} + +interface Progress { + total: number; + completed: number; + percentage: number; +} + +interface DiagnosisItem { + id: string; + filename: string; + [key: string]: unknown; +} + +interface Diagnoses { + manifest: unknown | null; + items: DiagnosisItem[]; +} + +interface LiteSession { + id: string; + type: string; + path: string; + createdAt: string; + plan: unknown | null; + tasks: NormalizedTask[]; + diagnoses?: Diagnoses; + progress: Progress; +} + +interface LiteTasks { + litePlan: LiteSession[]; + liteFix: LiteSession[]; +} + +interface LiteTaskDetail { + id: string; + type: string; + path: string; + plan: unknown | null; + tasks: NormalizedTask[]; + explorations: unknown[]; + clarifications: unknown | null; + diagnoses?: Diagnoses; +} + +/** + * Scan lite-plan and lite-fix directories for task sessions + * @param workflowDir - Path to .workflow directory + * @returns Lite tasks data + */ +export async function scanLiteTasks(workflowDir: string): Promise { + const litePlanDir = join(workflowDir, '.lite-plan'); + const liteFixDir = join(workflowDir, '.lite-fix'); + + return { + litePlan: scanLiteDir(litePlanDir, 'lite-plan'), + liteFix: scanLiteDir(liteFixDir, 'lite-fix') + }; +} + +/** + * Scan a lite task directory + * @param dir - Directory path + * @param type - Task type ('lite-plan' or 'lite-fix') + * @returns Array of lite task sessions + */ +function scanLiteDir(dir: string, type: string): LiteSession[] { + if (!existsSync(dir)) return []; + + try { + const sessions = readdirSync(dir, { withFileTypes: true }) + .filter(d => d.isDirectory()) + .map(d => { + const sessionPath = join(dir, d.name); + const session: LiteSession = { + id: d.name, + type, + path: sessionPath, + createdAt: getCreatedTime(sessionPath), + plan: loadPlanJson(sessionPath), + tasks: loadTaskJsons(sessionPath), + progress: { total: 0, completed: 0, percentage: 0 } + }; + + // For lite-fix sessions, also load diagnoses separately + if (type === 'lite-fix') { + session.diagnoses = loadDiagnoses(sessionPath); + } + + // Calculate progress + session.progress = calculateProgress(session.tasks); + + return session; + }) + .sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime()); + + return sessions; + } catch (err) { + console.error(`Error scanning ${dir}:`, (err as Error).message); + return []; + } +} + +/** + * Load plan.json or fix-plan.json from session directory + * @param sessionPath - Session directory path + * @returns Plan data or null + */ +function loadPlanJson(sessionPath: string): unknown | null { + // Try fix-plan.json first (for lite-fix), then plan.json (for lite-plan) + const fixPlanPath = join(sessionPath, 'fix-plan.json'); + const planPath = join(sessionPath, 'plan.json'); + + // Try fix-plan.json first + if (existsSync(fixPlanPath)) { + try { + const content = readFileSync(fixPlanPath, 'utf8'); + return JSON.parse(content); + } catch { + // Continue to try plan.json + } + } + + // Fallback to plan.json + if (existsSync(planPath)) { + try { + const content = readFileSync(planPath, 'utf8'); + return JSON.parse(content); + } catch { + return null; + } + } + + return null; +} + +/** + * Load all task JSON files from session directory + * Supports multiple task formats: + * 1. .task/IMPL-*.json files + * 2. tasks array in plan.json + * 3. task-*.json files in session root + * @param sessionPath - Session directory path + * @returns Array of task objects + */ +function loadTaskJsons(sessionPath: string): NormalizedTask[] { + let tasks: NormalizedTask[] = []; + + // Method 1: Check .task/IMPL-*.json files + const taskDir = join(sessionPath, '.task'); + if (existsSync(taskDir)) { + try { + const implTasks = readdirSync(taskDir) + .filter(f => f.endsWith('.json') && ( + f.startsWith('IMPL-') || + f.startsWith('TASK-') || + f.startsWith('task-') || + f.startsWith('diagnosis-') || + /^T\d+\.json$/i.test(f) + )) + .map(f => { + const taskPath = join(taskDir, f); + try { + const content = readFileSync(taskPath, 'utf8'); + return normalizeTask(JSON.parse(content)); + } catch { + return null; + } + }) + .filter((t): t is NormalizedTask => t !== null); + tasks = tasks.concat(implTasks); + } catch { + // Continue to other methods + } + } + + // Method 2: Check plan.json or fix-plan.json for embedded tasks array + if (tasks.length === 0) { + // Try fix-plan.json first (for lite-fix), then plan.json (for lite-plan) + const fixPlanPath = join(sessionPath, 'fix-plan.json'); + const planPath = join(sessionPath, 'plan.json'); + + const planFile = existsSync(fixPlanPath) ? fixPlanPath : + existsSync(planPath) ? planPath : null; + + if (planFile) { + try { + const plan = JSON.parse(readFileSync(planFile, 'utf8')) as { tasks?: unknown[] }; + if (Array.isArray(plan.tasks)) { + tasks = plan.tasks.map(t => normalizeTask(t)).filter((t): t is NormalizedTask => t !== null); + } + } catch { + // Continue to other methods + } + } + } + + // Method 3: Check for task-*.json and diagnosis-*.json files in session root + if (tasks.length === 0) { + try { + const rootTasks = readdirSync(sessionPath) + .filter(f => f.endsWith('.json') && ( + f.startsWith('task-') || + f.startsWith('TASK-') || + f.startsWith('diagnosis-') || + /^T\d+\.json$/i.test(f) + )) + .map(f => { + const taskPath = join(sessionPath, f); + try { + const content = readFileSync(taskPath, 'utf8'); + return normalizeTask(JSON.parse(content)); + } catch { + return null; + } + }) + .filter((t): t is NormalizedTask => t !== null); + tasks = tasks.concat(rootTasks); + } catch { + // No tasks found + } + } + + // Sort tasks by ID + return tasks.sort((a, b) => { + const aNum = parseInt(a.id?.replace(/\D/g, '') || '0'); + const bNum = parseInt(b.id?.replace(/\D/g, '') || '0'); + return aNum - bNum; + }); +} + +/** + * Normalize task object to consistent structure + * @param task - Raw task object + * @returns Normalized task + */ +function normalizeTask(task: unknown): NormalizedTask | null { + if (!task || typeof task !== 'object') return null; + + const taskObj = task as Record; + + // Determine status - support various status formats + let status = (taskObj.status as string | { state?: string; value?: string }) || 'pending'; + if (typeof status === 'object') { + status = status.state || status.value || 'pending'; + } + + const meta = taskObj.meta as Record | undefined; + const context = taskObj.context as Record | undefined; + const flowControl = taskObj.flow_control as Record | undefined; + const implementation = taskObj.implementation as unknown[] | undefined; + const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined; + + return { + id: (taskObj.id as string) || (taskObj.task_id as string) || 'unknown', + title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task', + status: (status as string).toLowerCase(), + // Preserve original fields for flexible rendering + meta: meta ? { + type: (meta.type as string) || (taskObj.type as string) || (taskObj.action as string) || 'task', + agent: (meta.agent as string) || (taskObj.agent as string) || null, + scope: (meta.scope as string) || (taskObj.scope as string) || null, + module: (meta.module as string) || (taskObj.module as string) || null + } : { + type: (taskObj.type as string) || (taskObj.action as string) || 'task', + agent: (taskObj.agent as string) || null, + scope: (taskObj.scope as string) || null, + module: (taskObj.module as string) || null + }, + context: context ? { + requirements: (context.requirements as string[]) || [], + focus_paths: (context.focus_paths as string[]) || [], + acceptance: (context.acceptance as string[]) || [], + depends_on: (context.depends_on as string[]) || [] + } : { + requirements: (taskObj.requirements as string[]) || (taskObj.description ? [taskObj.description as string] : []), + focus_paths: (taskObj.focus_paths as string[]) || modificationPoints?.map(m => m.file).filter((f): f is string => !!f) || [], + acceptance: (taskObj.acceptance as string[]) || [], + depends_on: (taskObj.depends_on as string[]) || [] + }, + flow_control: flowControl ? { + implementation_approach: (flowControl.implementation_approach as Array<{ step: string; action: string }>) || [] + } : { + implementation_approach: implementation?.map((step, i) => ({ + step: `Step ${i + 1}`, + action: step as string + })) || [] + }, + // Keep all original fields for raw JSON view + _raw: task + }; +} + +/** + * Get directory creation time + * @param dirPath - Directory path + * @returns ISO date string + */ +function getCreatedTime(dirPath: string): string { + try { + const stat = statSync(dirPath); + return stat.birthtime.toISOString(); + } catch { + return new Date().toISOString(); + } +} + +/** + * Calculate progress from tasks + * @param tasks - Array of task objects + * @returns Progress info + */ +function calculateProgress(tasks: NormalizedTask[]): Progress { + if (!tasks || tasks.length === 0) { + return { total: 0, completed: 0, percentage: 0 }; + } + + const total = tasks.length; + const completed = tasks.filter(t => t.status === 'completed').length; + const percentage = Math.round((completed / total) * 100); + + return { total, completed, percentage }; +} + +/** + * Get detailed lite task info + * @param workflowDir - Workflow directory + * @param type - 'lite-plan' or 'lite-fix' + * @param sessionId - Session ID + * @returns Detailed task info + */ +export function getLiteTaskDetail(workflowDir: string, type: string, sessionId: string): LiteTaskDetail | null { + const dir = type === 'lite-plan' + ? join(workflowDir, '.lite-plan', sessionId) + : join(workflowDir, '.lite-fix', sessionId); + + if (!existsSync(dir)) return null; + + const detail: LiteTaskDetail = { + id: sessionId, + type, + path: dir, + plan: loadPlanJson(dir), + tasks: loadTaskJsons(dir), + explorations: loadExplorations(dir), + clarifications: loadClarifications(dir) + }; + + // For lite-fix sessions, also load diagnoses + if (type === 'lite-fix') { + detail.diagnoses = loadDiagnoses(dir); + } + + return detail; +} + +/** + * Load exploration results + * @param sessionPath - Session directory path + * @returns Exploration results + */ +function loadExplorations(sessionPath: string): unknown[] { + const explorePath = join(sessionPath, 'explorations.json'); + if (!existsSync(explorePath)) return []; + + try { + const content = readFileSync(explorePath, 'utf8'); + return JSON.parse(content); + } catch { + return []; + } +} + +/** + * Load clarification data + * @param sessionPath - Session directory path + * @returns Clarification data + */ +function loadClarifications(sessionPath: string): unknown | null { + const clarifyPath = join(sessionPath, 'clarifications.json'); + if (!existsSync(clarifyPath)) return null; + + try { + const content = readFileSync(clarifyPath, 'utf8'); + return JSON.parse(content); + } catch { + return null; + } +} + +/** + * Load diagnosis files for lite-fix sessions + * Loads diagnosis-*.json files from session root directory + * @param sessionPath - Session directory path + * @returns Diagnoses data with manifest and items + */ +function loadDiagnoses(sessionPath: string): Diagnoses { + const result: Diagnoses = { + manifest: null, + items: [] + }; + + // Try to load diagnoses-manifest.json first + const manifestPath = join(sessionPath, 'diagnoses-manifest.json'); + if (existsSync(manifestPath)) { + try { + result.manifest = JSON.parse(readFileSync(manifestPath, 'utf8')); + } catch { + // Continue without manifest + } + } + + // Load all diagnosis-*.json files from session root + try { + const diagnosisFiles = readdirSync(sessionPath) + .filter(f => f.startsWith('diagnosis-') && f.endsWith('.json')); + + for (const file of diagnosisFiles) { + const filePath = join(sessionPath, file); + try { + const content = JSON.parse(readFileSync(filePath, 'utf8')) as Record; + result.items.push({ + id: file.replace('diagnosis-', '').replace('.json', ''), + filename: file, + ...content + }); + } catch { + // Skip invalid files + } + } + } catch { + // Return empty items if directory read fails + } + + return result; +} diff --git a/ccw/src/core/routes/ccw-routes.ts b/ccw/src/core/routes/ccw-routes.ts new file mode 100644 index 00000000..425681ae --- /dev/null +++ b/ccw/src/core/routes/ccw-routes.ts @@ -0,0 +1,96 @@ +// @ts-nocheck +/** + * CCW Routes Module + * Handles all CCW-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { getAllManifests } from '../manifest.js'; +import { listTools } from '../../tools/index.js'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +/** + * Handle CCW routes + * @returns true if route was handled, false otherwise + */ +export async function handleCcwRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients } = ctx; + + // API: CCW Installation Status + if (pathname === '/api/ccw/installations') { + const manifests = getAllManifests(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ installations: manifests })); + return true; + } + + // API: CCW Endpoint Tools List + if (pathname === '/api/ccw/tools') { + const tools = listTools(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ tools })); + return true; + } + + // API: CCW Upgrade + if (pathname === '/api/ccw/upgrade' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path: installPath } = body; + + try { + const { spawn } = await import('child_process'); + + // Run ccw upgrade command + const args = installPath ? ['upgrade', '--all'] : ['upgrade', '--all']; + const upgradeProcess = spawn('ccw', args, { + shell: true, + stdio: ['ignore', 'pipe', 'pipe'] + }); + + let stdout = ''; + let stderr = ''; + + upgradeProcess.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + upgradeProcess.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + return new Promise((resolve) => { + upgradeProcess.on('close', (code) => { + if (code === 0) { + resolve({ success: true, message: 'Upgrade completed', output: stdout }); + } else { + resolve({ success: false, error: stderr || 'Upgrade failed', output: stdout, status: 500 }); + } + }); + + upgradeProcess.on('error', (err) => { + resolve({ success: false, error: err.message, status: 500 }); + }); + + // Timeout after 2 minutes + setTimeout(() => { + upgradeProcess.kill(); + resolve({ success: false, error: 'Upgrade timed out', status: 504 }); + }, 120000); + }); + } catch (err) { + return { success: false, error: err.message, status: 500 }; + } + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/cli-routes.ts b/ccw/src/core/routes/cli-routes.ts new file mode 100644 index 00000000..0678401e --- /dev/null +++ b/ccw/src/core/routes/cli-routes.ts @@ -0,0 +1,561 @@ +// @ts-nocheck +/** + * CLI Routes Module + * Handles all CLI-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { + getCliToolsStatus, + getCliToolsFullStatus, + installCliTool, + uninstallCliTool, + enableCliTool, + disableCliTool, + getExecutionHistory, + getExecutionHistoryAsync, + getExecutionDetail, + getConversationDetail, + getConversationDetailWithNativeInfo, + deleteExecution, + deleteExecutionAsync, + batchDeleteExecutionsAsync, + executeCliTool, + getNativeSessionContent, + getFormattedNativeConversation, + getEnrichedConversation, + getHistoryWithNativeInfo +} from '../../tools/cli-executor.js'; +import { generateSmartContext, formatSmartContext } from '../../tools/smart-context.js'; +import { + loadCliConfig, + getToolConfig, + updateToolConfig, + getFullConfigResponse, + PREDEFINED_MODELS +} from '../../tools/cli-config-manager.js'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +/** + * Handle CLI routes + * @returns true if route was handled, false otherwise + */ +export async function handleCliRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients } = ctx; + + // API: CLI Tools Status + if (pathname === '/api/cli/status') { + const status = await getCliToolsStatus(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(status)); + return true; + } + + // API: CLI Tools Full Status (with enabled state) + if (pathname === '/api/cli/full-status') { + const status = await getCliToolsFullStatus(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(status)); + return true; + } + + // API: Install CLI Tool + if (pathname === '/api/cli/install' && req.method === 'POST') { + handlePostRequest(req, res, async (body: unknown) => { + const { tool } = body as { tool: string }; + if (!tool) { + return { error: 'Tool name is required', status: 400 }; + } + + const result = await installCliTool(tool); + if (result.success) { + // Broadcast tool installed event + broadcastToClients({ + type: 'CLI_TOOL_INSTALLED', + payload: { tool, timestamp: new Date().toISOString() } + }); + return { success: true, message: `${tool} installed successfully` }; + } else { + return { success: false, error: result.error, status: 500 }; + } + }); + return true; + } + + // API: Uninstall CLI Tool + if (pathname === '/api/cli/uninstall' && req.method === 'POST') { + handlePostRequest(req, res, async (body: unknown) => { + const { tool } = body as { tool: string }; + if (!tool) { + return { error: 'Tool name is required', status: 400 }; + } + + const result = await uninstallCliTool(tool); + if (result.success) { + // Broadcast tool uninstalled event + broadcastToClients({ + type: 'CLI_TOOL_UNINSTALLED', + payload: { tool, timestamp: new Date().toISOString() } + }); + return { success: true, message: `${tool} uninstalled successfully` }; + } else { + return { success: false, error: result.error, status: 500 }; + } + }); + return true; + } + + // API: Enable CLI Tool + if (pathname === '/api/cli/enable' && req.method === 'POST') { + handlePostRequest(req, res, async (body: unknown) => { + const { tool } = body as { tool: string }; + if (!tool) { + return { error: 'Tool name is required', status: 400 }; + } + + const result = enableCliTool(tool); + // Broadcast tool enabled event + broadcastToClients({ + type: 'CLI_TOOL_ENABLED', + payload: { tool, timestamp: new Date().toISOString() } + }); + return { success: true, message: `${tool} enabled` }; + }); + return true; + } + + // API: Disable CLI Tool + if (pathname === '/api/cli/disable' && req.method === 'POST') { + handlePostRequest(req, res, async (body: unknown) => { + const { tool } = body as { tool: string }; + if (!tool) { + return { error: 'Tool name is required', status: 400 }; + } + + const result = disableCliTool(tool); + // Broadcast tool disabled event + broadcastToClients({ + type: 'CLI_TOOL_DISABLED', + payload: { tool, timestamp: new Date().toISOString() } + }); + return { success: true, message: `${tool} disabled` }; + }); + return true; + } + + // API: Get Full CLI Config (with predefined models) + if (pathname === '/api/cli/config' && req.method === 'GET') { + try { + const response = getFullConfigResponse(initialPath); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(response)); + } catch (err) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (err as Error).message })); + } + return true; + } + + // API: Get/Update Tool Config + const configMatch = pathname.match(/^\/api\/cli\/config\/(gemini|qwen|codex)$/); + if (configMatch) { + const tool = configMatch[1]; + + // GET: Get single tool config + if (req.method === 'GET') { + try { + const toolConfig = getToolConfig(initialPath, tool); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(toolConfig)); + } catch (err) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (err as Error).message })); + } + return true; + } + + // PUT: Update tool config + if (req.method === 'PUT') { + handlePostRequest(req, res, async (body: unknown) => { + try { + const updates = body as { enabled?: boolean; primaryModel?: string; secondaryModel?: string }; + const updated = updateToolConfig(initialPath, tool, updates); + + // Broadcast config updated event + broadcastToClients({ + type: 'CLI_CONFIG_UPDATED', + payload: { tool, config: updated, timestamp: new Date().toISOString() } + }); + + return { success: true, config: updated }; + } catch (err) { + return { error: (err as Error).message, status: 500 }; + } + }); + return true; + } + } + + // API: CLI Execution History + if (pathname === '/api/cli/history') { + const projectPath = url.searchParams.get('path') || initialPath; + const limit = parseInt(url.searchParams.get('limit') || '50', 10); + const tool = url.searchParams.get('tool') || null; + const status = url.searchParams.get('status') || null; + const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null; + const search = url.searchParams.get('search') || null; + const recursive = url.searchParams.get('recursive') !== 'false'; + + getExecutionHistoryAsync(projectPath, { limit, tool, status, category, search, recursive }) + .then(history => { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(history)); + }) + .catch(err => { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: err.message })); + }); + return true; + } + + // API: CLI Execution Detail (GET) or Delete (DELETE) + if (pathname === '/api/cli/execution') { + const projectPath = url.searchParams.get('path') || initialPath; + const executionId = url.searchParams.get('id'); + + if (!executionId) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Execution ID is required' })); + return true; + } + + // Handle DELETE request + if (req.method === 'DELETE') { + deleteExecutionAsync(projectPath, executionId) + .then(result => { + if (result.success) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: true, message: 'Execution deleted' })); + } else { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: result.error || 'Delete failed' })); + } + }) + .catch(err => { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: err.message })); + }); + return true; + } + + // Handle GET request - return conversation with native session info + const conversation = getConversationDetailWithNativeInfo(projectPath, executionId); + if (!conversation) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Conversation not found' })); + return true; + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(conversation)); + return true; + } + + // API: Batch Delete CLI Executions + if (pathname === '/api/cli/batch-delete' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path: projectPath, ids } = body as { path?: string; ids: string[] }; + + if (!ids || !Array.isArray(ids) || ids.length === 0) { + return { error: 'ids array is required', status: 400 }; + } + + const basePath = projectPath || initialPath; + return await batchDeleteExecutionsAsync(basePath, ids); + }); + return true; + } + + // API: Get Native Session Content + if (pathname === '/api/cli/native-session') { + const projectPath = url.searchParams.get('path') || initialPath; + const executionId = url.searchParams.get('id'); + const format = url.searchParams.get('format') || 'json'; + + if (!executionId) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Execution ID is required' })); + return true; + } + + try { + let result; + if (format === 'text') { + result = await getFormattedNativeConversation(projectPath, executionId, { + includeThoughts: url.searchParams.get('thoughts') === 'true', + includeToolCalls: url.searchParams.get('tools') === 'true', + includeTokens: url.searchParams.get('tokens') === 'true' + }); + } else if (format === 'pairs') { + const enriched = await getEnrichedConversation(projectPath, executionId); + result = enriched?.merged || null; + } else { + result = await getNativeSessionContent(projectPath, executionId); + } + + if (!result) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Native session not found' })); + return true; + } + + res.writeHead(200, { 'Content-Type': format === 'text' ? 'text/plain' : 'application/json' }); + res.end(format === 'text' ? result : JSON.stringify(result)); + } catch (err) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (err as Error).message })); + } + return true; + } + + // API: Get Enriched Conversation + if (pathname === '/api/cli/enriched') { + const projectPath = url.searchParams.get('path') || initialPath; + const executionId = url.searchParams.get('id'); + + if (!executionId) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Execution ID is required' })); + return true; + } + + getEnrichedConversation(projectPath, executionId) + .then(result => { + if (!result) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Conversation not found' })); + return; + } + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(result)); + }) + .catch(err => { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (err as Error).message })); + }); + return true; + } + + // API: Get History with Native Session Info + if (pathname === '/api/cli/history-native') { + const projectPath = url.searchParams.get('path') || initialPath; + const limit = parseInt(url.searchParams.get('limit') || '50', 10); + const tool = url.searchParams.get('tool') || null; + const status = url.searchParams.get('status') || null; + const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null; + const search = url.searchParams.get('search') || null; + + getHistoryWithNativeInfo(projectPath, { limit, tool, status, category, search }) + .then(history => { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(history)); + }) + .catch(err => { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (err as Error).message })); + }); + return true; + } + + // API: Execute CLI Tool + if (pathname === '/api/cli/execute' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { tool, prompt, mode, format, model, dir, includeDirs, timeout, smartContext, parentExecutionId, category } = body as any; + + if (!tool || !prompt) { + return { error: 'tool and prompt are required', status: 400 }; + } + + // Generate smart context if enabled + let finalPrompt = prompt; + if (smartContext?.enabled) { + try { + const contextResult = await generateSmartContext(prompt, { + enabled: true, + maxFiles: smartContext.maxFiles || 10, + searchMode: 'text' + }, dir || initialPath); + + const contextAppendage = formatSmartContext(contextResult); + if (contextAppendage) { + finalPrompt = prompt + contextAppendage; + } + } catch (err) { + console.warn('[Smart Context] Failed to generate:', err); + } + } + + const executionId = `${Date.now()}-${tool}`; + + // Broadcast execution started + broadcastToClients({ + type: 'CLI_EXECUTION_STARTED', + payload: { + executionId, + tool, + mode: mode || 'analysis', + parentExecutionId, + timestamp: new Date().toISOString() + } + }); + + try { + const result = await executeCliTool({ + tool, + prompt: finalPrompt, + mode: mode || 'analysis', + format: format || 'plain', + model, + cd: dir || initialPath, + includeDirs, + timeout: timeout || 300000, + category: category || 'user', + parentExecutionId, + stream: true + }, (chunk) => { + broadcastToClients({ + type: 'CLI_OUTPUT', + payload: { + executionId, + chunkType: chunk.type, + data: chunk.data + } + }); + }); + + // Broadcast completion + broadcastToClients({ + type: 'CLI_EXECUTION_COMPLETED', + payload: { + executionId, + success: result.success, + status: result.execution.status, + duration_ms: result.execution.duration_ms + } + }); + + return { + success: result.success, + execution: result.execution + }; + + } catch (error: unknown) { + broadcastToClients({ + type: 'CLI_EXECUTION_ERROR', + payload: { + executionId, + error: (error as Error).message + } + }); + + return { error: (error as Error).message, status: 500 }; + } + }); + return true; + } + + // API: CLI Review - Submit review for an execution + if (pathname.startsWith('/api/cli/review/') && req.method === 'POST') { + const executionId = pathname.replace('/api/cli/review/', ''); + handlePostRequest(req, res, async (body) => { + const { status, rating, comments, reviewer } = body as { + status: 'pending' | 'approved' | 'rejected' | 'changes_requested'; + rating?: number; + comments?: string; + reviewer?: string; + }; + + if (!status) { + return { error: 'status is required', status: 400 }; + } + + try { + const historyStore = await import('../../tools/cli-history-store.js').then(m => m.getHistoryStore(initialPath)); + + const execution = historyStore.getConversation(executionId); + if (!execution) { + return { error: 'Execution not found', status: 404 }; + } + + const review = historyStore.saveReview({ + execution_id: executionId, + status, + rating, + comments, + reviewer + }); + + broadcastToClients({ + type: 'CLI_REVIEW_UPDATED', + payload: { + executionId, + review, + timestamp: new Date().toISOString() + } + }); + + return { success: true, review }; + } catch (error: unknown) { + return { error: (error as Error).message, status: 500 }; + } + }); + return true; + } + + // API: CLI Review - Get review for an execution + if (pathname.startsWith('/api/cli/review/') && req.method === 'GET') { + const executionId = pathname.replace('/api/cli/review/', ''); + try { + const historyStore = await import('../../tools/cli-history-store.js').then(m => m.getHistoryStore(initialPath)); + const review = historyStore.getReview(executionId); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ review })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: CLI Reviews - List all reviews + if (pathname === '/api/cli/reviews' && req.method === 'GET') { + try { + const historyStore = await import('../../tools/cli-history-store.js').then(m => m.getHistoryStore(initialPath)); + const statusFilter = url.searchParams.get('status') as 'pending' | 'approved' | 'rejected' | 'changes_requested' | null; + const limit = parseInt(url.searchParams.get('limit') || '50', 10); + + const reviews = historyStore.getReviews({ + status: statusFilter || undefined, + limit + }); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ reviews, count: reviews.length })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/codexlens-routes.ts b/ccw/src/core/routes/codexlens-routes.ts new file mode 100644 index 00000000..ff977282 --- /dev/null +++ b/ccw/src/core/routes/codexlens-routes.ts @@ -0,0 +1,175 @@ +// @ts-nocheck +/** + * CodexLens Routes Module + * Handles all CodexLens-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { + checkVenvStatus, + bootstrapVenv, + executeCodexLens, + checkSemanticStatus, + installSemantic +} from '../../tools/codex-lens.js'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +/** + * Handle CodexLens routes + * @returns true if route was handled, false otherwise + */ +export async function handleCodexLensRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients } = ctx; + + // API: CodexLens Status + if (pathname === '/api/codexlens/status') { + const status = await checkVenvStatus(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(status)); + return true; + } + + // API: CodexLens Bootstrap (Install) + if (pathname === '/api/codexlens/bootstrap' && req.method === 'POST') { + handlePostRequest(req, res, async () => { + try { + const result = await bootstrapVenv(); + if (result.success) { + const status = await checkVenvStatus(); + return { success: true, message: 'CodexLens installed successfully', version: status.version }; + } else { + return { success: false, error: result.error, status: 500 }; + } + } catch (err) { + return { success: false, error: err.message, status: 500 }; + } + }); + return true; + } + + // API: CodexLens Init (Initialize workspace index) + if (pathname === '/api/codexlens/init' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path: projectPath } = body; + const targetPath = projectPath || initialPath; + + try { + const result = await executeCodexLens(['init', targetPath, '--json'], { cwd: targetPath }); + if (result.success) { + try { + const parsed = JSON.parse(result.output); + return { success: true, result: parsed }; + } catch { + return { success: true, output: result.output }; + } + } else { + return { success: false, error: result.error, status: 500 }; + } + } catch (err) { + return { success: false, error: err.message, status: 500 }; + } + }); + return true; + } + + // API: CodexLens Semantic Search Status + if (pathname === '/api/codexlens/semantic/status') { + const status = await checkSemanticStatus(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(status)); + return true; + } + + // API: CodexLens Semantic Metadata List + if (pathname === '/api/codexlens/semantic/metadata') { + const offset = parseInt(url.searchParams.get('offset') || '0', 10); + const limit = parseInt(url.searchParams.get('limit') || '50', 10); + const tool = url.searchParams.get('tool') || ''; + const projectPath = url.searchParams.get('path') || initialPath; + + try { + const args = [ + 'semantic-list', + '--path', projectPath, + '--offset', offset.toString(), + '--limit', limit.toString(), + '--json' + ]; + if (tool) { + args.push('--tool', tool); + } + + const result = await executeCodexLens(args, { cwd: projectPath }); + + if (result.success) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(result.output); + } else { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: false, error: result.error })); + } + } catch (err) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: false, error: err.message })); + } + return true; + } + + // API: CodexLens LLM Enhancement (run enhance command) + if (pathname === '/api/codexlens/enhance' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path: projectPath, tool = 'gemini', batchSize = 5, timeoutMs = 300000 } = body; + const targetPath = projectPath || initialPath; + + try { + const args = ['enhance', targetPath, '--tool', tool, '--batch-size', batchSize.toString()]; + const result = await executeCodexLens(args, { cwd: targetPath, timeout: timeoutMs + 30000 }); + if (result.success) { + try { + const parsed = JSON.parse(result.output); + return { success: true, result: parsed }; + } catch { + return { success: true, output: result.output }; + } + } else { + return { success: false, error: result.error, status: 500 }; + } + } catch (err) { + return { success: false, error: err.message, status: 500 }; + } + }); + return true; + } + + // API: CodexLens Semantic Search Install (fastembed, ONNX-based, ~200MB) + if (pathname === '/api/codexlens/semantic/install' && req.method === 'POST') { + handlePostRequest(req, res, async () => { + try { + const result = await installSemantic(); + if (result.success) { + const status = await checkSemanticStatus(); + return { + success: true, + message: 'Semantic search installed successfully (fastembed)', + ...status + }; + } else { + return { success: false, error: result.error, status: 500 }; + } + } catch (err) { + return { success: false, error: err.message, status: 500 }; + } + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/files-routes.ts b/ccw/src/core/routes/files-routes.ts new file mode 100644 index 00000000..ff09d919 --- /dev/null +++ b/ccw/src/core/routes/files-routes.ts @@ -0,0 +1,428 @@ +// @ts-nocheck +/** + * Files Routes Module + * Handles all file browsing related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { existsSync, readFileSync, readdirSync, statSync } from 'fs'; +import { join } from 'path'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +// ======================================== +// Constants +// ======================================== + +// Directories to always exclude from file tree +const EXPLORER_EXCLUDE_DIRS = [ + '.git', '__pycache__', 'node_modules', '.venv', 'venv', 'env', + 'dist', 'build', '.cache', '.pytest_cache', '.mypy_cache', + 'coverage', '.nyc_output', 'logs', 'tmp', 'temp', '.next', + '.nuxt', '.output', '.turbo', '.parcel-cache' +]; + +// File extensions to language mapping for syntax highlighting +const EXT_TO_LANGUAGE = { + '.js': 'javascript', + '.jsx': 'javascript', + '.ts': 'typescript', + '.tsx': 'typescript', + '.py': 'python', + '.rb': 'ruby', + '.java': 'java', + '.go': 'go', + '.rs': 'rust', + '.c': 'c', + '.cpp': 'cpp', + '.h': 'c', + '.hpp': 'cpp', + '.cs': 'csharp', + '.php': 'php', + '.swift': 'swift', + '.kt': 'kotlin', + '.scala': 'scala', + '.sh': 'bash', + '.bash': 'bash', + '.zsh': 'bash', + '.ps1': 'powershell', + '.sql': 'sql', + '.html': 'html', + '.htm': 'html', + '.css': 'css', + '.scss': 'scss', + '.sass': 'sass', + '.less': 'less', + '.json': 'json', + '.xml': 'xml', + '.yaml': 'yaml', + '.yml': 'yaml', + '.toml': 'toml', + '.ini': 'ini', + '.cfg': 'ini', + '.conf': 'nginx', + '.md': 'markdown', + '.markdown': 'markdown', + '.txt': 'plaintext', + '.log': 'plaintext', + '.env': 'bash', + '.dockerfile': 'dockerfile', + '.vue': 'html', + '.svelte': 'html' +}; + +// ======================================== +// Helper Functions +// ======================================== + +/** + * Parse .gitignore file and return patterns + * @param {string} gitignorePath - Path to .gitignore file + * @returns {string[]} Array of gitignore patterns + */ +function parseGitignore(gitignorePath) { + try { + if (!existsSync(gitignorePath)) return []; + const content = readFileSync(gitignorePath, 'utf8'); + return content + .split('\n') + .map(line => line.trim()) + .filter(line => line && !line.startsWith('#')); + } catch { + return []; + } +} + +/** + * Check if a file/directory should be ignored based on gitignore patterns + * Simple pattern matching (supports basic glob patterns) + * @param {string} name - File or directory name + * @param {string[]} patterns - Gitignore patterns + * @param {boolean} isDirectory - Whether the entry is a directory + * @returns {boolean} + */ +function shouldIgnore(name, patterns, isDirectory) { + // Always exclude certain directories + if (isDirectory && EXPLORER_EXCLUDE_DIRS.includes(name)) { + return true; + } + + // Skip hidden files/directories (starting with .) + if (name.startsWith('.') && name !== '.claude' && name !== '.workflow') { + return true; + } + + for (const pattern of patterns) { + let p = pattern; + + // Handle negation patterns (we skip them for simplicity) + if (p.startsWith('!')) continue; + + // Handle directory-only patterns + if (p.endsWith('/')) { + if (!isDirectory) continue; + p = p.slice(0, -1); + } + + // Simple pattern matching + if (p === name) return true; + + // Handle wildcard patterns + if (p.includes('*')) { + const regex = new RegExp('^' + p.replace(/\./g, '\\.').replace(/\*/g, '.*') + '$'); + if (regex.test(name)) return true; + } + + // Handle extension patterns like *.log + if (p.startsWith('*.')) { + const ext = p.slice(1); + if (name.endsWith(ext)) return true; + } + } + + return false; +} + +/** + * List directory files with .gitignore filtering + * @param {string} dirPath - Directory path to list + * @returns {Promise} + */ +async function listDirectoryFiles(dirPath) { + try { + // Normalize path + let normalizedPath = dirPath.replace(/\\/g, '/'); + if (normalizedPath.match(/^\/[a-zA-Z]\//)) { + normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); + } + + if (!existsSync(normalizedPath)) { + return { error: 'Directory not found', files: [] }; + } + + if (!statSync(normalizedPath).isDirectory()) { + return { error: 'Not a directory', files: [] }; + } + + // Parse .gitignore patterns + const gitignorePath = join(normalizedPath, '.gitignore'); + const gitignorePatterns = parseGitignore(gitignorePath); + + // Read directory entries + const entries = readdirSync(normalizedPath, { withFileTypes: true }); + + const files = []; + for (const entry of entries) { + const isDirectory = entry.isDirectory(); + + // Check if should be ignored + if (shouldIgnore(entry.name, gitignorePatterns, isDirectory)) { + continue; + } + + const entryPath = join(normalizedPath, entry.name); + const fileInfo = { + name: entry.name, + type: isDirectory ? 'directory' : 'file', + path: entryPath.replace(/\\/g, '/') + }; + + // Check if directory has CLAUDE.md + if (isDirectory) { + const claudeMdPath = join(entryPath, 'CLAUDE.md'); + fileInfo.hasClaudeMd = existsSync(claudeMdPath); + } + + files.push(fileInfo); + } + + // Sort: directories first, then alphabetically + files.sort((a, b) => { + if (a.type === 'directory' && b.type !== 'directory') return -1; + if (a.type !== 'directory' && b.type === 'directory') return 1; + return a.name.localeCompare(b.name); + }); + + return { + path: normalizedPath.replace(/\\/g, '/'), + files, + gitignorePatterns + }; + } catch (error: unknown) { + console.error('Error listing directory:', error); + return { error: (error as Error).message, files: [] }; + } +} + +/** + * Get file content for preview + * @param {string} filePath - Path to file + * @returns {Promise} + */ +async function getFileContent(filePath) { + try { + // Normalize path + let normalizedPath = filePath.replace(/\\/g, '/'); + if (normalizedPath.match(/^\/[a-zA-Z]\//)) { + normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); + } + + if (!existsSync(normalizedPath)) { + return { error: 'File not found' }; + } + + const stats = statSync(normalizedPath); + if (stats.isDirectory()) { + return { error: 'Cannot read directory' }; + } + + // Check file size (limit to 1MB for preview) + if (stats.size > 1024 * 1024) { + return { error: 'File too large for preview (max 1MB)', size: stats.size }; + } + + // Read file content + const content = readFileSync(normalizedPath, 'utf8'); + const ext = normalizedPath.substring(normalizedPath.lastIndexOf('.')).toLowerCase(); + const language = EXT_TO_LANGUAGE[ext] || 'plaintext'; + const isMarkdown = ext === '.md' || ext === '.markdown'; + const fileName = normalizedPath.split('/').pop(); + + return { + content, + language, + isMarkdown, + fileName, + path: normalizedPath, + size: stats.size, + lines: content.split('\n').length + }; + } catch (error: unknown) { + console.error('Error reading file:', error); + return { error: (error as Error).message }; + } +} + +/** + * Trigger update-module-claude tool (async execution) + * @param {string} targetPath - Directory path to update + * @param {string} tool - CLI tool to use (gemini, qwen, codex, claude) + * @param {string} strategy - Update strategy (single-layer, multi-layer) + * @returns {Promise} + */ +async function triggerUpdateClaudeMd(targetPath, tool, strategy) { + const { spawn } = await import('child_process'); + + // Normalize path + let normalizedPath = targetPath.replace(/\\/g, '/'); + if (normalizedPath.match(/^\/[a-zA-Z]\//)) { + normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); + } + + if (!existsSync(normalizedPath)) { + return { error: 'Directory not found' }; + } + + if (!statSync(normalizedPath).isDirectory()) { + return { error: 'Not a directory' }; + } + + // Build ccw tool command with JSON parameters + const params = JSON.stringify({ + strategy, + path: normalizedPath, + tool + }); + + console.log(`[Explorer] Running async: ccw tool exec update_module_claude with ${tool} (${strategy})`); + + return new Promise((resolve) => { + const isWindows = process.platform === 'win32'; + + // Spawn the process + const child = spawn('ccw', ['tool', 'exec', 'update_module_claude', params], { + cwd: normalizedPath, + shell: isWindows, + stdio: ['ignore', 'pipe', 'pipe'] + }); + + let stdout = ''; + let stderr = ''; + + child.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + child.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + child.on('close', (code) => { + if (code === 0) { + // Parse the JSON output from the tool + let result; + try { + result = JSON.parse(stdout); + } catch { + result = { output: stdout }; + } + + if (result.success === false || result.error) { + resolve({ + success: false, + error: result.error || result.message || 'Update failed', + output: stdout + }); + } else { + resolve({ + success: true, + message: result.message || `CLAUDE.md updated successfully using ${tool} (${strategy})`, + output: stdout, + path: normalizedPath + }); + } + } else { + resolve({ + success: false, + error: stderr || `Process exited with code ${code}`, + output: stdout + stderr + }); + } + }); + + child.on('error', (error) => { + console.error('Error spawning process:', error); + resolve({ + success: false, + error: (error as Error).message, + output: '' + }); + }); + + // Timeout after 5 minutes + setTimeout(() => { + child.kill(); + resolve({ + success: false, + error: 'Timeout: Process took longer than 5 minutes', + output: stdout + }); + }, 300000); + }); +} + +// ======================================== +// Route Handler +// ======================================== + +/** + * Handle files routes + * @returns true if route was handled, false otherwise + */ +export async function handleFilesRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest } = ctx; + + // API: List directory files with .gitignore filtering (Explorer view) + if (pathname === '/api/files') { + const dirPath = url.searchParams.get('path') || initialPath; + const filesData = await listDirectoryFiles(dirPath); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(filesData)); + return true; + } + + // API: Get file content for preview (Explorer view) + if (pathname === '/api/file-content') { + const filePath = url.searchParams.get('path'); + if (!filePath) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'File path is required' })); + return true; + } + const fileData = await getFileContent(filePath); + res.writeHead(fileData.error ? 404 : 200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(fileData)); + return true; + } + + // API: Update CLAUDE.md using CLI tools (Explorer view) + if (pathname === '/api/update-claude-md' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path: targetPath, tool = 'gemini', strategy = 'single-layer' } = body; + if (!targetPath) { + return { error: 'path is required', status: 400 }; + } + return await triggerUpdateClaudeMd(targetPath, tool, strategy); + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/memory-routes.ts b/ccw/src/core/routes/memory-routes.ts new file mode 100644 index 00000000..434ebd82 --- /dev/null +++ b/ccw/src/core/routes/memory-routes.ts @@ -0,0 +1,1129 @@ +// @ts-nocheck +import http from 'http'; +import { URL } from 'url'; +import { readFileSync, writeFileSync, existsSync, mkdirSync, statSync, unlinkSync } from 'fs'; +import { join, isAbsolute, extname } from 'path'; +import { homedir } from 'os'; +import { getMemoryStore } from '../memory-store.js'; +import { executeCliTool } from '../../tools/cli-executor.js'; + +/** + * Route context interface + */ +interface RouteContext { + pathname: string; + url: URL; + req: http.IncomingMessage; + res: http.ServerResponse; + initialPath: string; + handlePostRequest: (req: http.IncomingMessage, res: http.ServerResponse, handler: (body: any) => Promise) => void; + broadcastToClients: (data: any) => void; +} + +/** + * Derive prompt intent from text + */ +function derivePromptIntent(text: string): string { + const lower = text.toLowerCase(); + + // Implementation/coding patterns + if (/实现|implement|create|add|build|write|develop|make/.test(lower)) return 'implement'; + if (/修复|fix|bug|error|issue|problem|解决/.test(lower)) return 'fix'; + if (/重构|refactor|optimize|improve|clean/.test(lower)) return 'refactor'; + if (/测试|test|spec|coverage/.test(lower)) return 'test'; + + // Analysis patterns + if (/分析|analyze|review|check|examine|audit/.test(lower)) return 'analyze'; + if (/解释|explain|what|how|why|understand/.test(lower)) return 'explain'; + if (/搜索|search|find|look|where|locate/.test(lower)) return 'search'; + + // Documentation patterns + if (/文档|document|readme|comment|注释/.test(lower)) return 'document'; + + // Planning patterns + if (/计划|plan|design|architect|strategy/.test(lower)) return 'plan'; + + // Configuration patterns + if (/配置|config|setup|install|设置/.test(lower)) return 'configure'; + + // Default + return 'general'; +} + +/** + * Calculate prompt quality score (0-100) + */ +function calculateQualityScore(text: string): number { + let score = 50; // Base score + + // Length factors + const length = text.length; + if (length > 50 && length < 500) score += 15; + else if (length >= 500 && length < 1000) score += 10; + else if (length < 20) score -= 20; + + // Specificity indicators + if (/file|path|function|class|method|variable/i.test(text)) score += 10; + if (/src\/|\.ts|\.js|\.py|\.go/i.test(text)) score += 10; + + // Context indicators + if (/when|after|before|because|since/i.test(text)) score += 5; + + // Action clarity + if (/please|要|请|帮|help/i.test(text)) score += 5; + + // Structure indicators + if (/\d+\.|•|-\s/.test(text)) score += 10; // Lists + + // Cap at 100 + return Math.min(100, Math.max(0, score)); +} + +/** + * Handle Memory API routes + * @returns true if route was handled, false otherwise + */ +export async function handleMemoryRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients } = ctx; + + // API: Memory Module - Track entity access + if (pathname === '/api/memory/track' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { type, action, value, sessionId, metadata, path: projectPath } = body; + + if (!type || !action || !value) { + return { error: 'type, action, and value are required', status: 400 }; + } + + const basePath = projectPath || initialPath; + + try { + const memoryStore = getMemoryStore(basePath); + const now = new Date().toISOString(); + + // Normalize the value + const normalizedValue = value.toLowerCase().trim(); + + // Upsert entity + const entityId = memoryStore.upsertEntity({ + type, + value, + normalized_value: normalizedValue, + first_seen_at: now, + last_seen_at: now, + metadata: metadata ? JSON.stringify(metadata) : undefined + }); + + // Log access + memoryStore.logAccess({ + entity_id: entityId, + action, + session_id: sessionId, + timestamp: now, + context_summary: metadata?.context + }); + + // Update stats + memoryStore.updateStats(entityId, action); + + // Calculate new heat score + const heatScore = memoryStore.calculateHeatScore(entityId); + const stats = memoryStore.getStats(entityId); + + // Broadcast MEMORY_UPDATED event via WebSocket + broadcastToClients({ + type: 'MEMORY_UPDATED', + payload: { + entity: { id: entityId, type, value }, + stats: { + read_count: stats?.read_count || 0, + write_count: stats?.write_count || 0, + mention_count: stats?.mention_count || 0, + heat_score: heatScore + }, + timestamp: now + } + }); + + return { + success: true, + entity_id: entityId, + heat_score: heatScore + }; + } catch (error: unknown) { + return { error: (error as Error).message, status: 500 }; + } + }); + return true; + } + + // API: Memory Module - Get native Claude history from ~/.claude/history.jsonl + if (pathname === '/api/memory/native-history') { + const projectPath = url.searchParams.get('path') || initialPath; + const limit = parseInt(url.searchParams.get('limit') || '100', 10); + const historyFile = join(homedir(), '.claude', 'history.jsonl'); + + try { + if (!existsSync(historyFile)) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ prompts: [], total: 0, message: 'No history file found' })); + return true; + } + + const content = readFileSync(historyFile, 'utf8'); + const lines = content.trim().split('\n').filter(line => line.trim()); + const allPrompts = []; + + for (const line of lines) { + try { + const entry = JSON.parse(line); + // Filter by project if specified + if (projectPath && entry.project) { + const normalizedProject = entry.project.replace(/\\/g, '/').toLowerCase(); + const normalizedPath = projectPath.replace(/\\/g, '/').toLowerCase(); + if (!normalizedProject.includes(normalizedPath) && !normalizedPath.includes(normalizedProject)) { + continue; + } + } + + allPrompts.push({ + id: `${entry.sessionId}-${entry.timestamp}`, + text: entry.display || '', + timestamp: new Date(entry.timestamp).toISOString(), + project: entry.project || '', + session_id: entry.sessionId || '', + pasted_contents: entry.pastedContents || {}, + // Derive intent from content keywords + intent: derivePromptIntent(entry.display || ''), + quality_score: calculateQualityScore(entry.display || '') + }); + } catch (parseError) { + // Skip malformed lines + } + } + + // Sort by timestamp descending + allPrompts.sort((a, b) => new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime()); + + // Apply limit + const prompts = allPrompts.slice(0, limit); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ prompts, total: allPrompts.length })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Memory Module - Get prompt history + if (pathname === '/api/memory/prompts') { + const projectPath = url.searchParams.get('path') || initialPath; + const limit = parseInt(url.searchParams.get('limit') || '50', 10); + const search = url.searchParams.get('search') || null; + + try { + const memoryStore = getMemoryStore(projectPath); + let prompts; + + if (search) { + prompts = memoryStore.searchPrompts(search, limit); + } else { + // Get all recent prompts (we'll need to add this method to MemoryStore) + const stmt = memoryStore['db'].prepare(` + SELECT * FROM prompt_history + ORDER BY timestamp DESC + LIMIT ? + `); + prompts = stmt.all(limit); + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ prompts })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Memory Module - Get insights (from prompt_patterns) + if (pathname === '/api/memory/insights' && req.method === 'GET') { + const projectPath = url.searchParams.get('path') || initialPath; + const limitParam = url.searchParams.get('limit'); + const tool = url.searchParams.get('tool') || undefined; + + // Check if this is a request for insights history (has limit or tool param) + if (limitParam || tool) { + const limit = parseInt(limitParam || '20', 10); + try { + const storeModule = await import('../../tools/cli-history-store.js'); + const store = storeModule.getHistoryStore(projectPath); + const insights = store.getInsights({ limit, tool }); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: true, insights })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // Default: Get prompt pattern insights + try { + const memoryStore = getMemoryStore(projectPath); + + // Get total prompt count + const countStmt = memoryStore['db'].prepare(`SELECT COUNT(*) as count FROM prompt_history`); + const { count: totalPrompts } = countStmt.get() as { count: number }; + + // Get top intent + const topIntentStmt = memoryStore['db'].prepare(` + SELECT intent_label, COUNT(*) as count + FROM prompt_history + WHERE intent_label IS NOT NULL + GROUP BY intent_label + ORDER BY count DESC + LIMIT 1 + `); + const topIntentRow = topIntentStmt.get() as { intent_label: string; count: number } | undefined; + + // Get average prompt length + const avgLengthStmt = memoryStore['db'].prepare(` + SELECT AVG(LENGTH(prompt_text)) as avg_length + FROM prompt_history + WHERE prompt_text IS NOT NULL + `); + const { avg_length: avgLength } = avgLengthStmt.get() as { avg_length: number }; + + // Get prompt patterns + const patternsStmt = memoryStore['db'].prepare(` + SELECT * FROM prompt_patterns + ORDER BY frequency DESC + LIMIT 10 + `); + const patterns = patternsStmt.all(); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ + stats: { + totalPrompts, + topIntent: topIntentRow?.intent_label || 'unknown', + avgLength: Math.round(avgLength || 0) + }, + patterns: patterns.map((p: any) => ({ + type: p.pattern_type, + description: `Pattern detected in prompts`, + occurrences: p.frequency, + suggestion: `Consider using more specific prompts for ${p.pattern_type}` + })) + })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Memory Module - Trigger async CLI-based insights analysis + if (pathname === '/api/memory/insights/analyze' && req.method === 'POST') { + handlePostRequest(req, res, async (body: any) => { + const projectPath = body.path || initialPath; + const tool = body.tool || 'gemini'; // gemini, qwen, codex, claude + const prompts = body.prompts || []; + const lang = body.lang || 'en'; // Language preference + + if (prompts.length === 0) { + return { error: 'No prompts provided for analysis', status: 400 }; + } + + // Prepare prompt summary for CLI analysis + const promptSummary = prompts.slice(0, 20).map((p: any, i: number) => { + return `${i + 1}. [${p.intent || 'unknown'}] ${(p.text || '').substring(0, 100)}...`; + }).join('\n'); + + const langInstruction = lang === 'zh' + ? '请用中文回复。所有 description、suggestion、title 字段必须使用中文。' + : 'Respond in English. All description, suggestion, title fields must be in English.'; + + const analysisPrompt = ` +PURPOSE: Analyze prompt patterns and provide optimization suggestions +TASK: +• Review the following prompt history summary +• Identify common patterns (vague requests, repetitive queries, incomplete context) +• Suggest specific improvements for prompt quality +• Detect areas where prompts could be more effective +MODE: analysis +CONTEXT: ${prompts.length} prompts from project: ${projectPath} +EXPECTED: JSON with patterns array and suggestions array +LANGUAGE: ${langInstruction} + +PROMPT HISTORY: +${promptSummary} + +Return ONLY valid JSON in this exact format (no markdown, no code blocks, just pure JSON): +{ + "patterns": [ + {"type": "pattern_type", "description": "description", "occurrences": count, "severity": "low|medium|high", "suggestion": "how to improve"} + ], + "suggestions": [ + {"title": "title", "description": "description", "example": "example prompt"} + ] +}`; + + try { + // Queue CLI execution + const result = await executeCliTool({ + tool, + prompt: analysisPrompt, + mode: 'analysis', + timeout: 120000 + }); + + // Try to parse JSON from response + let insights = { patterns: [], suggestions: [] }; + if (result.stdout) { + let outputText = result.stdout; + + // Strip markdown code blocks if present + const codeBlockMatch = outputText.match(/```(?:json)?\s*([\s\S]*?)```/); + if (codeBlockMatch) { + outputText = codeBlockMatch[1].trim(); + } + + // Find JSON object in the response + const jsonMatch = outputText.match(/\{[\s\S]*\}/); + if (jsonMatch) { + try { + insights = JSON.parse(jsonMatch[0]); + // Ensure arrays exist + if (!Array.isArray(insights.patterns)) insights.patterns = []; + if (!Array.isArray(insights.suggestions)) insights.suggestions = []; + } catch (e) { + console.error('[insights/analyze] JSON parse error:', e); + // Return raw output if JSON parse fails + insights = { + patterns: [{ type: 'raw_analysis', description: result.stdout.substring(0, 500), occurrences: 1, severity: 'low', suggestion: '' }], + suggestions: [] + }; + } + } else { + // No JSON found, wrap raw output + insights = { + patterns: [{ type: 'raw_analysis', description: result.stdout.substring(0, 500), occurrences: 1, severity: 'low', suggestion: '' }], + suggestions: [] + }; + } + } + + // Save insight to database + try { + const storeModule = await import('../../tools/cli-history-store.js'); + const store = storeModule.getHistoryStore(projectPath); + const insightId = `insight-${Date.now()}`; + store.saveInsight({ + id: insightId, + tool, + promptCount: prompts.length, + patterns: insights.patterns, + suggestions: insights.suggestions, + rawOutput: result.stdout || '', + executionId: result.execution?.id, + lang + }); + console.log('[Insights] Saved insight:', insightId); + } catch (saveErr) { + console.warn('[Insights] Failed to save insight:', (saveErr as Error).message); + } + + return { + success: true, + insights, + tool, + executionId: result.execution.id + }; + } catch (error: unknown) { + return { error: (error as Error).message, status: 500 }; + } + }); + return true; + } + + // API: Get single insight detail + if (pathname.startsWith('/api/memory/insights/') && req.method === 'GET') { + const insightId = pathname.replace('/api/memory/insights/', ''); + const projectPath = url.searchParams.get('path') || initialPath; + + if (!insightId || insightId === 'analyze') { + // Skip - handled by other routes + return false; + } + + try { + const storeModule = await import('../../tools/cli-history-store.js'); + const store = storeModule.getHistoryStore(projectPath); + const insight = store.getInsight(insightId); + + if (insight) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: true, insight })); + } else { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Insight not found' })); + } + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Delete insight + if (pathname.startsWith('/api/memory/insights/') && req.method === 'DELETE') { + const insightId = pathname.replace('/api/memory/insights/', ''); + const projectPath = url.searchParams.get('path') || initialPath; + + try { + const storeModule = await import('../../tools/cli-history-store.js'); + const store = storeModule.getHistoryStore(projectPath); + const deleted = store.deleteInsight(insightId); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: deleted })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Memory Module - Get hotspot statistics + if (pathname === '/api/memory/stats') { + const projectPath = url.searchParams.get('path') || initialPath; + const filter = url.searchParams.get('filter') || 'all'; // today, week, all + const limit = parseInt(url.searchParams.get('limit') || '10', 10); + + try { + const memoryStore = getMemoryStore(projectPath); + const hotEntities = memoryStore.getHotEntities(limit * 4); + + // Filter by time if needed + let filtered = hotEntities; + if (filter === 'today') { + const today = new Date(); + today.setHours(0, 0, 0, 0); + filtered = hotEntities.filter((e: any) => new Date(e.last_seen_at) >= today); + } else if (filter === 'week') { + const weekAgo = new Date(); + weekAgo.setDate(weekAgo.getDate() - 7); + filtered = hotEntities.filter((e: any) => new Date(e.last_seen_at) >= weekAgo); + } + + // Separate into mostRead and mostEdited + const fileEntities = filtered.filter((e: any) => e.type === 'file'); + + const mostRead = fileEntities + .filter((e: any) => e.stats.read_count > 0) + .sort((a: any, b: any) => b.stats.read_count - a.stats.read_count) + .slice(0, limit) + .map((e: any) => ({ + path: e.value, + file: e.value.split(/[/\\]/).pop(), + heat: e.stats.read_count, + count: e.stats.read_count, + lastSeen: e.last_seen_at + })); + + const mostEdited = fileEntities + .filter((e: any) => e.stats.write_count > 0) + .sort((a: any, b: any) => b.stats.write_count - a.stats.write_count) + .slice(0, limit) + .map((e: any) => ({ + path: e.value, + file: e.value.split(/[/\\]/).pop(), + heat: e.stats.write_count, + count: e.stats.write_count, + lastSeen: e.last_seen_at + })); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ stats: { mostRead, mostEdited } })); + } catch (error: unknown) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ stats: { mostRead: [], mostEdited: [] } })); + } + return true; + } + + // API: Memory Module - Get memory graph (file associations with modules and components) + if (pathname === '/api/memory/graph') { + const projectPath = url.searchParams.get('path') || initialPath; + + try { + const memoryStore = getMemoryStore(projectPath); + const hotEntities = memoryStore.getHotEntities(100); + + // Build file nodes from entities + const fileEntities = hotEntities.filter((e: any) => e.type === 'file'); + const fileNodes = fileEntities.map((e: any) => { + const fileName = e.value.split(/[/\\]/).pop() || ''; + // Detect component type based on file name patterns + const isComponent = /\.(tsx|jsx|vue|svelte)$/.test(fileName) || + /^[A-Z][a-zA-Z]+\.(ts|js)$/.test(fileName) || + fileName.includes('.component.') || + fileName.includes('.controller.'); + + return { + id: e.value, + name: fileName, + path: e.value, + type: isComponent ? 'component' : 'file', + heat: Math.min(25, 8 + e.stats.heat_score / 10) + }; + }); + + // Extract unique modules (directories) from file paths + const moduleMap = new Map(); + for (const file of fileEntities) { + const parts = file.value.split(/[/\\]/); + // Get parent directory as module (skip if root level) + if (parts.length > 1) { + const modulePath = parts.slice(0, -1).join('/'); + const moduleName = parts[parts.length - 2] || modulePath; + // Skip common non-module directories + if (['node_modules', '.git', 'dist', 'build', '.next', '.nuxt'].includes(moduleName)) continue; + + if (!moduleMap.has(modulePath)) { + moduleMap.set(modulePath, { heat: 0, files: [] }); + } + const mod = moduleMap.get(modulePath)!; + mod.heat += file.stats.heat_score / 20; + mod.files.push(file.value); + } + } + + // Create module nodes (limit to top modules by heat) + const moduleNodes = Array.from(moduleMap.entries()) + .sort((a, b) => b[1].heat - a[1].heat) + .slice(0, 15) + .map(([modulePath, data]) => ({ + id: modulePath, + name: modulePath.split(/[/\\]/).pop() || modulePath, + path: modulePath, + type: 'module', + heat: Math.min(20, 12 + data.heat / 5), + fileCount: data.files.length + })); + + // Combine all nodes + const nodes = [...fileNodes, ...moduleNodes]; + const nodeIds = new Set(nodes.map(n => n.id)); + + // Build edges from associations + const edges: any[] = []; + const edgeSet = new Set(); // Prevent duplicate edges + + // Add file-to-file associations + for (const entity of hotEntities) { + if (!entity.id || entity.type !== 'file') continue; + const associations = memoryStore.getAssociations(entity.id, 10); + for (const assoc of associations) { + if (assoc.target && nodeIds.has(assoc.target.value)) { + const edgeKey = [entity.value, assoc.target.value].sort().join('|'); + if (!edgeSet.has(edgeKey)) { + edgeSet.add(edgeKey); + edges.push({ + source: entity.value, + target: assoc.target.value, + weight: assoc.weight + }); + } + } + } + } + + // Add file-to-module edges (files belong to their parent modules) + for (const [modulePath, data] of moduleMap.entries()) { + if (!nodeIds.has(modulePath)) continue; + for (const filePath of data.files) { + if (nodeIds.has(filePath)) { + const edgeKey = [modulePath, filePath].sort().join('|'); + if (!edgeSet.has(edgeKey)) { + edgeSet.add(edgeKey); + edges.push({ + source: modulePath, + target: filePath, + weight: 2 // Lower weight for structural relationships + }); + } + } + } + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ graph: { nodes, edges } })); + } catch (error: unknown) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ graph: { nodes: [], edges: [] } })); + } + return true; + } + + // API: Memory Module - Get recent context activities + if (pathname === '/api/memory/recent') { + const projectPath = url.searchParams.get('path') || initialPath; + const limit = parseInt(url.searchParams.get('limit') || '20', 10); + + try { + const memoryStore = getMemoryStore(projectPath); + + // Get recent access logs with entity info - filter to file type only + const db = (memoryStore as any).db; + const recentLogs = db.prepare(` + SELECT a.*, e.type, e.value + FROM access_logs a + JOIN entities e ON a.entity_id = e.id + WHERE e.type = 'file' + ORDER BY a.timestamp DESC + LIMIT ? + `).all(limit * 2) as any[]; // Fetch more to account for filtering + + // Filter out invalid entries (JSON strings, error messages, etc.) + const validLogs = recentLogs.filter((log: any) => { + const value = log.value || ''; + // Skip if value looks like JSON or contains error-like patterns + if (value.includes('"status"') || value.includes('"content"') || + value.includes('"activeForm"') || value.startsWith('{') || + value.startsWith('[') || value.includes('graph 400')) { + return false; + } + // Must have a file extension or look like a valid path + const hasExtension = /\.[a-zA-Z0-9]{1,10}$/.test(value); + const looksLikePath = value.includes('/') || value.includes('\\'); + return hasExtension || looksLikePath; + }).slice(0, limit); + + const recent = validLogs.map((log: any) => ({ + type: log.action, // read, write, mention + timestamp: log.timestamp, + prompt: log.context_summary || '', + files: [log.value], + description: `${log.action}: ${log.value.split(/[/\\]/).pop()}` + })); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ recent })); + } catch (error: unknown) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ recent: [] })); + } + return true; + } + + // API: Active Memory - Get status + if (pathname === '/api/memory/active/status') { + const projectPath = url.searchParams.get('path') || initialPath; + + if (!projectPath) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ enabled: false, status: null, config: { interval: 'manual', tool: 'gemini' } })); + return true; + } + + try { + const configPath = join(projectPath, '.claude', 'rules', 'active_memory.md'); + const configJsonPath = join(projectPath, '.claude', 'rules', 'active_memory_config.json'); + const enabled = existsSync(configPath); + let lastSync: string | null = null; + let fileCount = 0; + let config = { interval: 'manual', tool: 'gemini' }; + + if (enabled) { + const stats = statSync(configPath); + lastSync = stats.mtime.toISOString(); + const content = readFileSync(configPath, 'utf-8'); + // Count file sections + fileCount = (content.match(/^## /gm) || []).length; + } + + // Load config if exists + if (existsSync(configJsonPath)) { + try { + config = JSON.parse(readFileSync(configJsonPath, 'utf-8')); + } catch (e) { /* ignore parse errors */ } + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ + enabled, + status: enabled ? { lastSync, fileCount } : null, + config + })); + } catch (error: unknown) { + console.error('Active Memory status error:', error); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ enabled: false, status: null, config: { interval: 'manual', tool: 'gemini' } })); + } + return true; + } + + // API: Active Memory - Toggle + if (pathname === '/api/memory/active/toggle' && req.method === 'POST') { + let body = ''; + req.on('data', (chunk: Buffer) => { body += chunk.toString(); }); + req.on('end', async () => { + try { + const { enabled, config } = JSON.parse(body || '{}'); + const projectPath = initialPath; + + if (!projectPath) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'No project path configured' })); + return; + } + + const rulesDir = join(projectPath, '.claude', 'rules'); + const configPath = join(rulesDir, 'active_memory.md'); + const configJsonPath = join(rulesDir, 'active_memory_config.json'); + + if (enabled) { + // Enable: Create directory and initial file + if (!existsSync(rulesDir)) { + mkdirSync(rulesDir, { recursive: true }); + } + + // Save config + if (config) { + writeFileSync(configJsonPath, JSON.stringify(config, null, 2), 'utf-8'); + } + + // Create initial active_memory.md with header + const initialContent = `# Active Memory + +> Auto-generated understanding of frequently accessed files. +> Last updated: ${new Date().toISOString()} + +--- + +*No files analyzed yet. Click "Sync Now" to analyze hot files.* +`; + writeFileSync(configPath, initialContent, 'utf-8'); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ enabled: true, message: 'Active Memory enabled' })); + } else { + // Disable: Remove the files + if (existsSync(configPath)) { + unlinkSync(configPath); + } + if (existsSync(configJsonPath)) { + unlinkSync(configJsonPath); + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ enabled: false, message: 'Active Memory disabled' })); + } + } catch (error: unknown) { + console.error('Active Memory toggle error:', error); + if (!res.headersSent) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + } + }); + return true; + } + + // API: Active Memory - Update Config + if (pathname === '/api/memory/active/config' && req.method === 'POST') { + let body = ''; + req.on('data', (chunk: Buffer) => { body += chunk.toString(); }); + req.on('end', async () => { + try { + const { config } = JSON.parse(body || '{}'); + const projectPath = initialPath; + const rulesDir = join(projectPath, '.claude', 'rules'); + const configJsonPath = join(rulesDir, 'active_memory_config.json'); + + if (!existsSync(rulesDir)) { + mkdirSync(rulesDir, { recursive: true }); + } + + writeFileSync(configJsonPath, JSON.stringify(config, null, 2), 'utf-8'); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ success: true, config })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + }); + return true; + } + + // API: Active Memory - Sync (analyze hot files using CLI and update active_memory.md) + if (pathname === '/api/memory/active/sync' && req.method === 'POST') { + let body = ''; + req.on('data', (chunk: Buffer) => { body += chunk.toString(); }); + req.on('end', async () => { + try { + const { tool = 'gemini' } = JSON.parse(body || '{}'); + const projectPath = initialPath; + + if (!projectPath) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'No project path configured' })); + return; + } + + const rulesDir = join(projectPath, '.claude', 'rules'); + const configPath = join(rulesDir, 'active_memory.md'); + + // Get hot files from memory store - with fallback + let hotFiles: any[] = []; + try { + const memoryStore = getMemoryStore(projectPath); + const hotEntities = memoryStore.getHotEntities(20); + hotFiles = hotEntities + .filter((e: any) => e.type === 'file') + .slice(0, 10); + } catch (memErr) { + console.warn('[Active Memory] Memory store error, using empty list:', (memErr as Error).message); + } + + // Build file list for CLI analysis + const filePaths = hotFiles.map((f: any) => { + const filePath = f.value; + return isAbsolute(filePath) ? filePath : join(projectPath, filePath); + }).filter((p: string) => existsSync(p)); + + // Build the active memory content header + let content = `# Active Memory + +> Auto-generated understanding of frequently accessed files using ${tool.toUpperCase()}. +> Last updated: ${new Date().toISOString()} +> Files analyzed: ${hotFiles.length} +> CLI Tool: ${tool} + +--- + +`; + + // Use CCW CLI tool to analyze files + let cliOutput = ''; + + // Build CLI prompt + const cliPrompt = `PURPOSE: Analyze the following hot files and provide a concise understanding of each. +TASK: For each file, describe its purpose, key exports, dependencies, and how it relates to other files. +MODE: analysis +CONTEXT: ${filePaths.map((p: string) => '@' + p).join(' ')} +EXPECTED: Markdown format with ## headings for each file, bullet points for key information. +RULES: Be concise. Focus on practical understanding. Include function signatures for key exports.`; + + // Try to execute CLI using CCW's built-in executor + try { + const syncId = `active-memory-${Date.now()}`; + const result = await executeCliTool({ + tool: tool === 'qwen' ? 'qwen' : 'gemini', + prompt: cliPrompt, + mode: 'analysis', + format: 'plain', + cd: projectPath, + timeout: 120000, + stream: false, + category: 'internal', + id: syncId + }); + + if (result.success && result.execution?.output) { + cliOutput = result.execution.output; + } + + // Add CLI output to content + content += cliOutput + '\n\n---\n\n'; + + } catch (cliErr) { + // Fallback to basic analysis if CLI fails + console.warn('[Active Memory] CLI analysis failed, using basic analysis:', (cliErr as Error).message); + + // Basic analysis fallback + for (const file of hotFiles) { + const fileName = file.value.split(/[/\\]/).pop() || file.value; + const filePath = file.value; + const heat = file.stats?.heat_score || 0; + const readCount = file.stats?.read_count || 0; + const writeCount = file.stats?.write_count || 0; + + content += `## ${fileName} + +- **Path**: \`${filePath}\` +- **Heat Score**: ${heat} +- **Access**: ${readCount} reads, ${writeCount} writes +- **Last Seen**: ${file.last_seen_at || 'Unknown'} + +`; + + // Try to read file and generate summary + try { + const fullPath = isAbsolute(filePath) ? filePath : join(projectPath, filePath); + + if (existsSync(fullPath)) { + const stat = statSync(fullPath); + const ext = extname(fullPath).toLowerCase(); + + content += `- **Size**: ${(stat.size / 1024).toFixed(1)} KB\n`; + content += `- **Type**: ${ext || 'unknown'}\n`; + + const textExts = ['.ts', '.js', '.tsx', '.jsx', '.md', '.json', '.css', '.html', '.vue', '.svelte', '.py', '.go', '.rs']; + if (textExts.includes(ext) && stat.size < 100000) { + const fileContent = readFileSync(fullPath, 'utf-8'); + const lines = fileContent.split('\n').slice(0, 30); + + const exports = lines.filter(l => + l.includes('export ') || l.includes('function ') || + l.includes('class ') || l.includes('interface ') + ).slice(0, 8); + + if (exports.length > 0) { + content += `\n**Key Exports**:\n\`\`\`\n${exports.join('\n')}\n\`\`\`\n`; + } + } + } + } catch (fileErr) { + // Skip file analysis errors + } + + content += '\n---\n\n'; + } + } + + // Ensure directory exists + if (!existsSync(rulesDir)) { + mkdirSync(rulesDir, { recursive: true }); + } + + // Write the file + writeFileSync(configPath, content, 'utf-8'); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ + success: true, + filesAnalyzed: hotFiles.length, + path: configPath, + usedCli: cliOutput.length > 0 + })); + } catch (error: unknown) { + console.error('[Active Memory] Sync error:', error); + if (!res.headersSent) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + } + }); + return true; + } + + // API: Memory Module - Get conversations index + if (pathname === '/api/memory/conversations') { + const projectPath = url.searchParams.get('path') || initialPath; + const project = url.searchParams.get('project') || null; + const limit = parseInt(url.searchParams.get('limit') || '20', 10); + + try { + const memoryStore = getMemoryStore(projectPath); + + let conversations; + if (project) { + const stmt = memoryStore['db'].prepare(` + SELECT * FROM conversations + WHERE project_name = ? + ORDER BY updated_at DESC + LIMIT ? + `); + conversations = stmt.all(project, limit); + } else { + conversations = memoryStore.getConversations(limit); + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ conversations })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Memory Module - Replay conversation + if (pathname.startsWith('/api/memory/replay/')) { + const conversationId = pathname.replace('/api/memory/replay/', ''); + const projectPath = url.searchParams.get('path') || initialPath; + + if (!conversationId) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Conversation ID is required' })); + return true; + } + + try { + const memoryStore = getMemoryStore(projectPath); + const conversation = memoryStore.getConversation(conversationId); + + if (!conversation) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Conversation not found' })); + return true; + } + + const messages = memoryStore.getMessages(conversationId); + + // Enhance messages with tool calls + const messagesWithTools = []; + for (const message of messages) { + const toolCalls = message.id ? memoryStore.getToolCalls(message.id) : []; + messagesWithTools.push({ + ...message, + tool_calls: toolCalls + }); + } + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ + conversation, + messages: messagesWithTools + })); + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + return true; + } + + // API: Memory Module - Import history (async task) + if (pathname === '/api/memory/import' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { source = 'all', project, path: projectPath } = body; + const basePath = projectPath || initialPath; + + // Generate task ID for async operation + const taskId = `import-${Date.now()}`; + + // TODO: Implement actual history import using HistoryImporter + // For now, return a placeholder response + console.log(`[Memory] Import task ${taskId} started: source=${source}, project=${project}`); + + return { + success: true, + taskId, + message: 'Import task started (not yet implemented)', + source, + project + }; + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/rules-routes.ts b/ccw/src/core/routes/rules-routes.ts new file mode 100644 index 00000000..f516898f --- /dev/null +++ b/ccw/src/core/routes/rules-routes.ts @@ -0,0 +1,266 @@ +// @ts-nocheck +/** + * Rules Routes Module + * Handles all Rules-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { readFileSync, existsSync, readdirSync, unlinkSync } from 'fs'; +import { join } from 'path'; +import { homedir } from 'os'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +/** + * Parse rule frontmatter + * @param {string} content + * @returns {Object} + */ +function parseRuleFrontmatter(content) { + const result = { + paths: [], + content: content + }; + + // Check for YAML frontmatter + if (content.startsWith('---')) { + const endIndex = content.indexOf('---', 3); + if (endIndex > 0) { + const frontmatter = content.substring(3, endIndex).trim(); + result.content = content.substring(endIndex + 3).trim(); + + // Parse frontmatter lines + const lines = frontmatter.split('\n'); + for (const line of lines) { + const colonIndex = line.indexOf(':'); + if (colonIndex > 0) { + const key = line.substring(0, colonIndex).trim().toLowerCase(); + const value = line.substring(colonIndex + 1).trim(); + + if (key === 'paths') { + // Parse as comma-separated or YAML array + result.paths = value.replace(/^\[|\]$/g, '').split(',').map(t => t.trim()).filter(Boolean); + } + } + } + } + } + + return result; +} + +/** + * Recursively scan rules directory for .md files + * @param {string} dirPath + * @param {string} location + * @param {string} subdirectory + * @returns {Object[]} + */ +function scanRulesDirectory(dirPath, location, subdirectory) { + const rules = []; + + try { + const entries = readdirSync(dirPath, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = join(dirPath, entry.name); + + if (entry.isFile() && entry.name.endsWith('.md')) { + const content = readFileSync(fullPath, 'utf8'); + const parsed = parseRuleFrontmatter(content); + + rules.push({ + name: entry.name, + paths: parsed.paths, + content: parsed.content, + location, + path: fullPath, + subdirectory: subdirectory || null + }); + } else if (entry.isDirectory()) { + // Recursively scan subdirectories + const subRules = scanRulesDirectory(fullPath, location, subdirectory ? `${subdirectory}/${entry.name}` : entry.name); + rules.push(...subRules); + } + } + } catch (e) { + // Ignore errors + } + + return rules; +} + +/** + * Get rules configuration from project and user directories + * @param {string} projectPath + * @returns {Object} + */ +function getRulesConfig(projectPath) { + const result = { + projectRules: [], + userRules: [] + }; + + try { + // Project rules: .claude/rules/ + const projectRulesDir = join(projectPath, '.claude', 'rules'); + if (existsSync(projectRulesDir)) { + const rules = scanRulesDirectory(projectRulesDir, 'project', ''); + result.projectRules = rules; + } + + // User rules: ~/.claude/rules/ + const userRulesDir = join(homedir(), '.claude', 'rules'); + if (existsSync(userRulesDir)) { + const rules = scanRulesDirectory(userRulesDir, 'user', ''); + result.userRules = rules; + } + } catch (error) { + console.error('Error reading rules config:', error); + } + + return result; +} + +/** + * Find rule file in directory (including subdirectories) + * @param {string} baseDir + * @param {string} ruleName + * @returns {string|null} + */ +function findRuleFile(baseDir, ruleName) { + try { + // Direct path + const directPath = join(baseDir, ruleName); + if (existsSync(directPath)) { + return directPath; + } + + // Search in subdirectories + const entries = readdirSync(baseDir, { withFileTypes: true }); + for (const entry of entries) { + if (entry.isDirectory()) { + const subPath = findRuleFile(join(baseDir, entry.name), ruleName); + if (subPath) return subPath; + } + } + } catch (e) { + // Ignore errors + } + return null; +} + +/** + * Get single rule detail + * @param {string} ruleName + * @param {string} location - 'project' or 'user' + * @param {string} projectPath + * @returns {Object} + */ +function getRuleDetail(ruleName, location, projectPath) { + try { + const baseDir = location === 'project' + ? join(projectPath, '.claude', 'rules') + : join(homedir(), '.claude', 'rules'); + + // Find the rule file (could be in subdirectory) + const rulePath = findRuleFile(baseDir, ruleName); + + if (!rulePath) { + return { error: 'Rule not found' }; + } + + const content = readFileSync(rulePath, 'utf8'); + const parsed = parseRuleFrontmatter(content); + + return { + rule: { + name: ruleName, + paths: parsed.paths, + content: parsed.content, + location, + path: rulePath + } + }; + } catch (error) { + return { error: (error as Error).message }; + } +} + +/** + * Delete a rule + * @param {string} ruleName + * @param {string} location + * @param {string} projectPath + * @returns {Object} + */ +function deleteRule(ruleName, location, projectPath) { + try { + const baseDir = location === 'project' + ? join(projectPath, '.claude', 'rules') + : join(homedir(), '.claude', 'rules'); + + const rulePath = findRuleFile(baseDir, ruleName); + + if (!rulePath) { + return { error: 'Rule not found' }; + } + + unlinkSync(rulePath); + + return { success: true, ruleName, location }; + } catch (error) { + return { error: (error as Error).message }; + } +} + +/** + * Handle Rules routes + * @returns true if route was handled, false otherwise + */ +export async function handleRulesRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest } = ctx; + + // API: Get all rules + if (pathname === '/api/rules') { + const projectPathParam = url.searchParams.get('path') || initialPath; + const rulesData = getRulesConfig(projectPathParam); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(rulesData)); + return true; + } + + // API: Get single rule detail + if (pathname.startsWith('/api/rules/') && req.method === 'GET' && !pathname.endsWith('/rules/')) { + const ruleName = decodeURIComponent(pathname.replace('/api/rules/', '')); + const location = url.searchParams.get('location') || 'project'; + const projectPathParam = url.searchParams.get('path') || initialPath; + const ruleDetail = getRuleDetail(ruleName, location, projectPathParam); + if (ruleDetail.error) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(ruleDetail)); + } else { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(ruleDetail)); + } + return true; + } + + // API: Delete rule + if (pathname.startsWith('/api/rules/') && req.method === 'DELETE') { + const ruleName = decodeURIComponent(pathname.replace('/api/rules/', '')); + handlePostRequest(req, res, async (body) => { + const { location, projectPath: projectPathParam } = body; + return deleteRule(ruleName, location, projectPathParam || initialPath); + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/session-routes.ts b/ccw/src/core/routes/session-routes.ts new file mode 100644 index 00000000..eb31a954 --- /dev/null +++ b/ccw/src/core/routes/session-routes.ts @@ -0,0 +1,406 @@ +// @ts-nocheck +/** + * Session Routes Module + * Handles all Session/Task-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { readFileSync, writeFileSync, existsSync, readdirSync } from 'fs'; +import { join } from 'path'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +/** + * Get session detail data (context, summaries, impl-plan, review) + * @param {string} sessionPath - Path to session directory + * @param {string} dataType - Type of data to load ('all', 'context', 'tasks', 'summary', 'plan', 'explorations', 'conflict', 'impl-plan', 'review') + * @returns {Promise} + */ +async function getSessionDetailData(sessionPath, dataType) { + const result = {}; + + // Normalize path + const normalizedPath = sessionPath.replace(/\\/g, '/'); + + try { + // Load context-package.json (in .process/ subfolder) + if (dataType === 'context' || dataType === 'all') { + // Try .process/context-package.json first (common location) + let contextFile = join(normalizedPath, '.process', 'context-package.json'); + if (!existsSync(contextFile)) { + // Fallback to session root + contextFile = join(normalizedPath, 'context-package.json'); + } + if (existsSync(contextFile)) { + try { + result.context = JSON.parse(readFileSync(contextFile, 'utf8')); + } catch (e) { + result.context = null; + } + } + } + + // Load task JSONs from .task/ folder + if (dataType === 'tasks' || dataType === 'all') { + const taskDir = join(normalizedPath, '.task'); + result.tasks = []; + if (existsSync(taskDir)) { + const files = readdirSync(taskDir).filter(f => f.endsWith('.json') && f.startsWith('IMPL-')); + for (const file of files) { + try { + const content = JSON.parse(readFileSync(join(taskDir, file), 'utf8')); + result.tasks.push({ + filename: file, + task_id: file.replace('.json', ''), + ...content + }); + } catch (e) { + // Skip unreadable files + } + } + // Sort by task ID + result.tasks.sort((a, b) => a.task_id.localeCompare(b.task_id)); + } + } + + // Load summaries from .summaries/ + if (dataType === 'summary' || dataType === 'all') { + const summariesDir = join(normalizedPath, '.summaries'); + result.summaries = []; + if (existsSync(summariesDir)) { + const files = readdirSync(summariesDir).filter(f => f.endsWith('.md')); + for (const file of files) { + try { + const content = readFileSync(join(summariesDir, file), 'utf8'); + result.summaries.push({ name: file.replace('.md', ''), content }); + } catch (e) { + // Skip unreadable files + } + } + } + } + + // Load plan.json (for lite tasks) + if (dataType === 'plan' || dataType === 'all') { + const planFile = join(normalizedPath, 'plan.json'); + if (existsSync(planFile)) { + try { + result.plan = JSON.parse(readFileSync(planFile, 'utf8')); + } catch (e) { + result.plan = null; + } + } + } + + // Load explorations (exploration-*.json files) - check .process/ first, then session root + if (dataType === 'context' || dataType === 'explorations' || dataType === 'all') { + result.explorations = { manifest: null, data: {} }; + + // Try .process/ first (standard workflow sessions), then session root (lite tasks) + const searchDirs = [ + join(normalizedPath, '.process'), + normalizedPath + ]; + + for (const searchDir of searchDirs) { + if (!existsSync(searchDir)) continue; + + // Look for explorations-manifest.json + const manifestFile = join(searchDir, 'explorations-manifest.json'); + if (existsSync(manifestFile)) { + try { + result.explorations.manifest = JSON.parse(readFileSync(manifestFile, 'utf8')); + + // Load each exploration file based on manifest + const explorations = result.explorations.manifest.explorations || []; + for (const exp of explorations) { + const expFile = join(searchDir, exp.file); + if (existsSync(expFile)) { + try { + result.explorations.data[exp.angle] = JSON.parse(readFileSync(expFile, 'utf8')); + } catch (e) { + // Skip unreadable exploration files + } + } + } + break; // Found manifest, stop searching + } catch (e) { + result.explorations.manifest = null; + } + } else { + // Fallback: scan for exploration-*.json files directly + try { + const files = readdirSync(searchDir).filter(f => f.startsWith('exploration-') && f.endsWith('.json')); + if (files.length > 0) { + // Create synthetic manifest + result.explorations.manifest = { + exploration_count: files.length, + explorations: files.map((f, i) => ({ + angle: f.replace('exploration-', '').replace('.json', ''), + file: f, + index: i + 1 + })) + }; + + // Load each file + for (const file of files) { + const angle = file.replace('exploration-', '').replace('.json', ''); + try { + result.explorations.data[angle] = JSON.parse(readFileSync(join(searchDir, file), 'utf8')); + } catch (e) { + // Skip unreadable files + } + } + break; // Found explorations, stop searching + } + } catch (e) { + // Directory read failed + } + } + } + } + + // Load conflict resolution decisions (conflict-resolution-decisions.json) + if (dataType === 'context' || dataType === 'conflict' || dataType === 'all') { + result.conflictResolution = null; + + // Try .process/ first (standard workflow sessions) + const conflictFiles = [ + join(normalizedPath, '.process', 'conflict-resolution-decisions.json'), + join(normalizedPath, 'conflict-resolution-decisions.json') + ]; + + for (const conflictFile of conflictFiles) { + if (existsSync(conflictFile)) { + try { + result.conflictResolution = JSON.parse(readFileSync(conflictFile, 'utf8')); + break; // Found file, stop searching + } catch (e) { + // Skip unreadable file + } + } + } + } + + // Load IMPL_PLAN.md + if (dataType === 'impl-plan' || dataType === 'all') { + const implPlanFile = join(normalizedPath, 'IMPL_PLAN.md'); + if (existsSync(implPlanFile)) { + try { + result.implPlan = readFileSync(implPlanFile, 'utf8'); + } catch (e) { + result.implPlan = null; + } + } + } + + // Load review data from .review/ + if (dataType === 'review' || dataType === 'all') { + const reviewDir = join(normalizedPath, '.review'); + result.review = { + state: null, + dimensions: [], + severityDistribution: null, + totalFindings: 0 + }; + + if (existsSync(reviewDir)) { + // Load review-state.json + const stateFile = join(reviewDir, 'review-state.json'); + if (existsSync(stateFile)) { + try { + const state = JSON.parse(readFileSync(stateFile, 'utf8')); + result.review.state = state; + result.review.severityDistribution = state.severity_distribution || {}; + result.review.totalFindings = state.total_findings || 0; + result.review.phase = state.phase || 'unknown'; + result.review.dimensionSummaries = state.dimension_summaries || {}; + result.review.crossCuttingConcerns = state.cross_cutting_concerns || []; + result.review.criticalFiles = state.critical_files || []; + } catch (e) { + // Skip unreadable state + } + } + + // Load dimension findings + const dimensionsDir = join(reviewDir, 'dimensions'); + if (existsSync(dimensionsDir)) { + const files = readdirSync(dimensionsDir).filter(f => f.endsWith('.json')); + for (const file of files) { + try { + const dimName = file.replace('.json', ''); + const data = JSON.parse(readFileSync(join(dimensionsDir, file), 'utf8')); + + // Handle array structure: [ { findings: [...] } ] + let findings = []; + let summary = null; + + if (Array.isArray(data) && data.length > 0) { + const dimData = data[0]; + findings = dimData.findings || []; + summary = dimData.summary || null; + } else if (data.findings) { + findings = data.findings; + summary = data.summary || null; + } + + result.review.dimensions.push({ + name: dimName, + findings: findings, + summary: summary, + count: findings.length + }); + } catch (e) { + // Skip unreadable files + } + } + } + } + } + + } catch (error: unknown) { + console.error('Error loading session detail:', error); + result.error = (error as Error).message; + } + + return result; +} + +/** + * Update task status in a task JSON file + * @param {string} sessionPath - Path to session directory + * @param {string} taskId - Task ID (e.g., IMPL-001) + * @param {string} newStatus - New status (pending, in_progress, completed) + * @returns {Promise} + */ +async function updateTaskStatus(sessionPath, taskId, newStatus) { + // Normalize path (handle both forward and back slashes) + let normalizedPath = sessionPath.replace(/\\/g, '/'); + + // Handle Windows drive letter format + if (normalizedPath.match(/^[a-zA-Z]:\//)) { + // Already in correct format + } else if (normalizedPath.match(/^\/[a-zA-Z]\//)) { + // Convert /D/path to D:/path + normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); + } + + const taskDir = join(normalizedPath, '.task'); + + // Check if task directory exists + if (!existsSync(taskDir)) { + throw new Error(`Task directory not found: ${taskDir}`); + } + + // Try to find the task file + let taskFile = join(taskDir, `${taskId}.json`); + + if (!existsSync(taskFile)) { + // Try without .json if taskId already has it + if (taskId.endsWith('.json')) { + taskFile = join(taskDir, taskId); + } + if (!existsSync(taskFile)) { + throw new Error(`Task file not found: ${taskId}.json in ${taskDir}`); + } + } + + try { + const content = JSON.parse(readFileSync(taskFile, 'utf8')); + const oldStatus = content.status || 'pending'; + content.status = newStatus; + + // Add status change timestamp + if (!content.status_history) { + content.status_history = []; + } + content.status_history.push({ + from: oldStatus, + to: newStatus, + changed_at: new Date().toISOString() + }); + + writeFileSync(taskFile, JSON.stringify(content, null, 2), 'utf8'); + + return { + success: true, + taskId, + oldStatus, + newStatus, + file: taskFile + }; + } catch (error: unknown) { + throw new Error(`Failed to update task ${taskId}: ${(error as Error).message}`); + } +} + +/** + * Handle Session routes + * @returns true if route was handled, false otherwise + */ +export async function handleSessionRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, handlePostRequest } = ctx; + + // API: Get session detail data (context, summaries, impl-plan, review) + if (pathname === '/api/session-detail') { + const sessionPath = url.searchParams.get('path'); + const dataType = url.searchParams.get('type') || 'all'; + + if (!sessionPath) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Session path is required' })); + return true; + } + + const detail = await getSessionDetailData(sessionPath, dataType); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(detail)); + return true; + } + + // API: Update task status + if (pathname === '/api/update-task-status' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { sessionPath, taskId, newStatus } = body; + + if (!sessionPath || !taskId || !newStatus) { + return { error: 'sessionPath, taskId, and newStatus are required', status: 400 }; + } + + return await updateTaskStatus(sessionPath, taskId, newStatus); + }); + return true; + } + + // API: Bulk update task status + if (pathname === '/api/bulk-update-task-status' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { sessionPath, taskIds, newStatus } = body; + + if (!sessionPath || !taskIds || !newStatus) { + return { error: 'sessionPath, taskIds, and newStatus are required', status: 400 }; + } + + const results = []; + for (const taskId of taskIds) { + try { + const result = await updateTaskStatus(sessionPath, taskId, newStatus); + results.push(result); + } catch (err) { + results.push({ taskId, error: err.message }); + } + } + return { success: true, results }; + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/skills-routes.ts b/ccw/src/core/routes/skills-routes.ts new file mode 100644 index 00000000..4d130f87 --- /dev/null +++ b/ccw/src/core/routes/skills-routes.ts @@ -0,0 +1,300 @@ +// @ts-nocheck +/** + * Skills Routes Module + * Handles all Skills-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import { readFileSync, existsSync, readdirSync, statSync, unlinkSync, promises as fsPromises } from 'fs'; +import { join } from 'path'; +import { homedir } from 'os'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; +} + +// ========== Skills Helper Functions ========== + +/** + * Parse skill frontmatter (YAML header) + * @param {string} content - Skill file content + * @returns {Object} Parsed frontmatter and content + */ +function parseSkillFrontmatter(content) { + const result = { + name: '', + description: '', + version: null, + allowedTools: [], + content: '' + }; + + // Check for YAML frontmatter + if (content.startsWith('---')) { + const endIndex = content.indexOf('---', 3); + if (endIndex > 0) { + const frontmatter = content.substring(3, endIndex).trim(); + result.content = content.substring(endIndex + 3).trim(); + + // Parse frontmatter lines + const lines = frontmatter.split('\n'); + for (const line of lines) { + const colonIndex = line.indexOf(':'); + if (colonIndex > 0) { + const key = line.substring(0, colonIndex).trim().toLowerCase(); + const value = line.substring(colonIndex + 1).trim(); + + if (key === 'name') { + result.name = value.replace(/^["']|["']$/g, ''); + } else if (key === 'description') { + result.description = value.replace(/^["']|["']$/g, ''); + } else if (key === 'version') { + result.version = value.replace(/^["']|["']$/g, ''); + } else if (key === 'allowed-tools' || key === 'allowedtools') { + // Parse as comma-separated or YAML array + result.allowedTools = value.replace(/^\[|\]$/g, '').split(',').map(t => t.trim()).filter(Boolean); + } + } + } + } + } else { + result.content = content; + } + + return result; +} + +/** + * Get list of supporting files for a skill + * @param {string} skillDir + * @returns {string[]} + */ +function getSupportingFiles(skillDir) { + const files = []; + try { + const entries = readdirSync(skillDir, { withFileTypes: true }); + for (const entry of entries) { + if (entry.name !== 'SKILL.md') { + if (entry.isFile()) { + files.push(entry.name); + } else if (entry.isDirectory()) { + files.push(entry.name + '/'); + } + } + } + } catch (e) { + // Ignore errors + } + return files; +} + +/** + * Get skills configuration from project and user directories + * @param {string} projectPath + * @returns {Object} + */ +function getSkillsConfig(projectPath) { + const result = { + projectSkills: [], + userSkills: [] + }; + + try { + // Project skills: .claude/skills/ + const projectSkillsDir = join(projectPath, '.claude', 'skills'); + if (existsSync(projectSkillsDir)) { + const skills = readdirSync(projectSkillsDir, { withFileTypes: true }); + for (const skill of skills) { + if (skill.isDirectory()) { + const skillMdPath = join(projectSkillsDir, skill.name, 'SKILL.md'); + if (existsSync(skillMdPath)) { + const content = readFileSync(skillMdPath, 'utf8'); + const parsed = parseSkillFrontmatter(content); + + // Get supporting files + const skillDir = join(projectSkillsDir, skill.name); + const supportingFiles = getSupportingFiles(skillDir); + + result.projectSkills.push({ + name: parsed.name || skill.name, + description: parsed.description, + version: parsed.version, + allowedTools: parsed.allowedTools, + location: 'project', + path: skillDir, + supportingFiles + }); + } + } + } + } + + // User skills: ~/.claude/skills/ + const userSkillsDir = join(homedir(), '.claude', 'skills'); + if (existsSync(userSkillsDir)) { + const skills = readdirSync(userSkillsDir, { withFileTypes: true }); + for (const skill of skills) { + if (skill.isDirectory()) { + const skillMdPath = join(userSkillsDir, skill.name, 'SKILL.md'); + if (existsSync(skillMdPath)) { + const content = readFileSync(skillMdPath, 'utf8'); + const parsed = parseSkillFrontmatter(content); + + // Get supporting files + const skillDir = join(userSkillsDir, skill.name); + const supportingFiles = getSupportingFiles(skillDir); + + result.userSkills.push({ + name: parsed.name || skill.name, + description: parsed.description, + version: parsed.version, + allowedTools: parsed.allowedTools, + location: 'user', + path: skillDir, + supportingFiles + }); + } + } + } + } + } catch (error) { + console.error('Error reading skills config:', error); + } + + return result; +} + +/** + * Get single skill detail + * @param {string} skillName + * @param {string} location - 'project' or 'user' + * @param {string} projectPath + * @returns {Object} + */ +function getSkillDetail(skillName, location, projectPath) { + try { + const baseDir = location === 'project' + ? join(projectPath, '.claude', 'skills') + : join(homedir(), '.claude', 'skills'); + + const skillDir = join(baseDir, skillName); + const skillMdPath = join(skillDir, 'SKILL.md'); + + if (!existsSync(skillMdPath)) { + return { error: 'Skill not found' }; + } + + const content = readFileSync(skillMdPath, 'utf8'); + const parsed = parseSkillFrontmatter(content); + const supportingFiles = getSupportingFiles(skillDir); + + return { + skill: { + name: parsed.name || skillName, + description: parsed.description, + version: parsed.version, + allowedTools: parsed.allowedTools, + content: parsed.content, + location, + path: skillDir, + supportingFiles + } + }; + } catch (error) { + return { error: (error as Error).message }; + } +} + +/** + * Delete a skill + * @param {string} skillName + * @param {string} location + * @param {string} projectPath + * @returns {Object} + */ +function deleteSkill(skillName, location, projectPath) { + try { + const baseDir = location === 'project' + ? join(projectPath, '.claude', 'skills') + : join(homedir(), '.claude', 'skills'); + + const skillDir = join(baseDir, skillName); + + if (!existsSync(skillDir)) { + return { error: 'Skill not found' }; + } + + // Recursively delete directory + const deleteRecursive = (dirPath) => { + if (existsSync(dirPath)) { + readdirSync(dirPath).forEach((file) => { + const curPath = join(dirPath, file); + if (statSync(curPath).isDirectory()) { + deleteRecursive(curPath); + } else { + unlinkSync(curPath); + } + }); + fsPromises.rmdir(dirPath); + } + }; + + deleteRecursive(skillDir); + + return { success: true, skillName, location }; + } catch (error) { + return { error: (error as Error).message }; + } +} + +// ========== Skills API Routes ========== + +/** + * Handle Skills routes + * @returns true if route was handled, false otherwise + */ +export async function handleSkillsRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest } = ctx; + + // API: Get all skills (project and user) + if (pathname === '/api/skills') { + const projectPathParam = url.searchParams.get('path') || initialPath; + const skillsData = getSkillsConfig(projectPathParam); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(skillsData)); + return true; + } + + // API: Get single skill detail + if (pathname.startsWith('/api/skills/') && req.method === 'GET' && !pathname.endsWith('/skills/')) { + const skillName = decodeURIComponent(pathname.replace('/api/skills/', '')); + const location = url.searchParams.get('location') || 'project'; + const projectPathParam = url.searchParams.get('path') || initialPath; + const skillDetail = getSkillDetail(skillName, location, projectPathParam); + if (skillDetail.error) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(skillDetail)); + } else { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(skillDetail)); + } + return true; + } + + // API: Delete skill + if (pathname.startsWith('/api/skills/') && req.method === 'DELETE') { + const skillName = decodeURIComponent(pathname.replace('/api/skills/', '')); + handlePostRequest(req, res, async (body) => { + const { location, projectPath: projectPathParam } = body; + return deleteSkill(skillName, location, projectPathParam || initialPath); + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/routes/system-routes.ts b/ccw/src/core/routes/system-routes.ts new file mode 100644 index 00000000..8b29d3d4 --- /dev/null +++ b/ccw/src/core/routes/system-routes.ts @@ -0,0 +1,329 @@ +// @ts-nocheck +/** + * System Routes Module + * Handles all system-related API endpoints + */ +import type { IncomingMessage, ServerResponse } from 'http'; +import type { Server } from 'http'; +import { readFileSync, existsSync, promises as fsPromises } from 'fs'; +import { join } from 'path'; +import { resolvePath, getRecentPaths, trackRecentPath, removeRecentPath, normalizePathForDisplay } from '../../utils/path-resolver.js'; +import { scanSessions } from '../session-scanner.js'; +import { aggregateData } from '../data-aggregator.js'; + +export interface RouteContext { + pathname: string; + url: URL; + req: IncomingMessage; + res: ServerResponse; + initialPath: string; + handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise) => void; + broadcastToClients: (data: unknown) => void; + server: Server; +} + +// ======================================== +// Helper Functions +// ======================================== + +// Package name on npm registry +const NPM_PACKAGE_NAME = 'claude-code-workflow'; + +// Cache for version check (avoid too frequent requests) +let versionCheckCache = null; +let versionCheckTime = 0; +const VERSION_CHECK_CACHE_TTL = 3600000; // 1 hour + +/** + * Get current package version from package.json + * @returns {string} + */ +function getCurrentVersion(): string { + try { + const packageJsonPath = join(import.meta.dirname, '../../../package.json'); + if (existsSync(packageJsonPath)) { + const pkg = JSON.parse(readFileSync(packageJsonPath, 'utf8')); + return pkg.version || '0.0.0'; + } + } catch (e) { + console.error('Error reading package.json:', e); + } + return '0.0.0'; +} + +/** + * Compare two semver versions + * @param {string} v1 + * @param {string} v2 + * @returns {number} 1 if v1 > v2, -1 if v1 < v2, 0 if equal + */ +function compareVersions(v1: string, v2: string): number { + const parts1 = v1.split('.').map(Number); + const parts2 = v2.split('.').map(Number); + + for (let i = 0; i < 3; i++) { + const p1 = parts1[i] || 0; + const p2 = parts2[i] || 0; + if (p1 > p2) return 1; + if (p1 < p2) return -1; + } + return 0; +} + +/** + * Check npm registry for latest version + * @returns {Promise} + */ +async function checkNpmVersion(): Promise { + // Return cached result if still valid + const now = Date.now(); + if (versionCheckCache && (now - versionCheckTime) < VERSION_CHECK_CACHE_TTL) { + return versionCheckCache; + } + + const currentVersion = getCurrentVersion(); + + try { + // Fetch latest version from npm registry + const npmUrl = 'https://registry.npmjs.org/' + encodeURIComponent(NPM_PACKAGE_NAME) + '/latest'; + const response = await fetch(npmUrl, { + headers: { 'Accept': 'application/json' } + }); + + if (!response.ok) { + throw new Error('HTTP ' + response.status); + } + + const data = await response.json(); + const latestVersion = data.version; + + // Compare versions + const hasUpdate = compareVersions(latestVersion, currentVersion) > 0; + + const result = { + currentVersion, + latestVersion, + hasUpdate, + packageName: NPM_PACKAGE_NAME, + updateCommand: 'npm update -g ' + NPM_PACKAGE_NAME, + checkedAt: new Date().toISOString() + }; + + // Cache the result + versionCheckCache = result; + versionCheckTime = now; + + return result; + } catch (error: unknown) { + console.error('Version check failed:', (error as Error).message); + return { + currentVersion, + latestVersion: null, + hasUpdate: false, + error: (error as Error).message, + checkedAt: new Date().toISOString() + }; + } +} + +/** + * Get workflow data for a project path + * @param {string} projectPath + * @returns {Promise} + */ +async function getWorkflowData(projectPath: string): Promise { + const resolvedPath = resolvePath(projectPath); + const workflowDir = join(resolvedPath, '.workflow'); + + // Track this path + trackRecentPath(resolvedPath); + + // Check if .workflow exists + if (!existsSync(workflowDir)) { + return { + generatedAt: new Date().toISOString(), + activeSessions: [], + archivedSessions: [], + liteTasks: { litePlan: [], liteFix: [] }, + reviewData: { dimensions: {} }, + projectOverview: null, + statistics: { + totalSessions: 0, + activeSessions: 0, + totalTasks: 0, + completedTasks: 0, + reviewFindings: 0, + litePlanCount: 0, + liteFixCount: 0 + }, + projectPath: normalizePathForDisplay(resolvedPath), + recentPaths: getRecentPaths() + }; + } + + // Scan and aggregate data + const sessions = await scanSessions(workflowDir); + const data = await aggregateData(sessions, workflowDir); + + data.projectPath = normalizePathForDisplay(resolvedPath); + data.recentPaths = getRecentPaths(); + + return data; +} + +// ======================================== +// Route Handler +// ======================================== + +/** + * Handle System routes + * @returns true if route was handled, false otherwise + */ +export async function handleSystemRoutes(ctx: RouteContext): Promise { + const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients, server } = ctx; + + // API: Get workflow data for a path + if (pathname === '/api/data') { + const projectPath = url.searchParams.get('path') || initialPath; + const data = await getWorkflowData(projectPath); + + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(data)); + return true; + } + + // API: Get recent paths + if (pathname === '/api/recent-paths') { + const paths = getRecentPaths(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ paths })); + return true; + } + + // API: Switch workspace path (for ccw view command) + if (pathname === '/api/switch-path') { + const newPath = url.searchParams.get('path'); + if (!newPath) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Path is required' })); + return true; + } + + const resolved = resolvePath(newPath); + if (!existsSync(resolved)) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Path does not exist' })); + return true; + } + + // Track the path and return success + trackRecentPath(resolved); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ + success: true, + path: resolved, + recentPaths: getRecentPaths() + })); + return true; + } + + // API: Health check (for ccw view to detect running server) + if (pathname === '/api/health') { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ status: 'ok', timestamp: Date.now() })); + return true; + } + + // API: Version check (check for npm updates) + if (pathname === '/api/version-check') { + const versionData = await checkNpmVersion(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(versionData)); + return true; + } + + // API: Shutdown server (for ccw stop command) + if (pathname === '/api/shutdown' && req.method === 'POST') { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ status: 'shutting_down' })); + + // Graceful shutdown + console.log('\n Received shutdown signal...'); + setTimeout(() => { + server.close(() => { + console.log(' Server stopped.\n'); + process.exit(0); + }); + // Force exit after 3 seconds if graceful shutdown fails + setTimeout(() => process.exit(0), 3000); + }, 100); + return true; + } + + // API: Remove a recent path + if (pathname === '/api/remove-recent-path' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { path } = body as { path?: string }; + if (!path) { + return { error: 'path is required', status: 400 }; + } + const removed = removeRecentPath(path); + return { success: removed, paths: getRecentPaths() }; + }); + return true; + } + + // API: Read a JSON file (for fix progress tracking) + if (pathname === '/api/file') { + const filePath = url.searchParams.get('path'); + if (!filePath) { + res.writeHead(400, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'File path is required' })); + return true; + } + + try { + const content = await fsPromises.readFile(filePath, 'utf-8'); + const json = JSON.parse(content); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(json)); + } catch (err) { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'File not found or invalid JSON' })); + } + return true; + } + + // API: System notify - CLI to Server communication bridge + // Allows CLI commands to trigger WebSocket broadcasts for UI updates + if (pathname === '/api/system/notify' && req.method === 'POST') { + handlePostRequest(req, res, async (body) => { + const { type, scope, data } = body as { + type: 'REFRESH_REQUIRED' | 'MEMORY_UPDATED' | 'HISTORY_UPDATED' | 'INSIGHT_GENERATED'; + scope: 'memory' | 'history' | 'insights' | 'all'; + data?: Record; + }; + + if (!type || !scope) { + return { error: 'type and scope are required', status: 400 }; + } + + // Map CLI notification types to WebSocket broadcast format + const notification = { + type, + payload: { + scope, + timestamp: new Date().toISOString(), + ...data + } + }; + + broadcastToClients(notification); + + return { success: true, broadcast: true }; + }); + return true; + } + + return false; +} diff --git a/ccw/src/core/server.ts b/ccw/src/core/server.ts index 249c2ee5..674bf9e8 100644 --- a/ccw/src/core/server.ts +++ b/ccw/src/core/server.ts @@ -1,43 +1,44 @@ // @ts-nocheck import http from 'http'; import { URL } from 'url'; -import { readFileSync, writeFileSync, existsSync, readdirSync, mkdirSync, statSync, unlinkSync, promises as fsPromises } from 'fs'; -import { join, dirname, isAbsolute, extname } from 'path'; -import { homedir } from 'os'; -import { createHash } from 'crypto'; -import { scanSessions } from './session-scanner.js'; -import { aggregateData } from './data-aggregator.js'; -import { resolvePath, getRecentPaths, trackRecentPath, removeRecentPath, normalizePathForDisplay, getWorkflowDir } from '../utils/path-resolver.js'; -import { getCliToolsStatus, getExecutionHistory, getExecutionHistoryAsync, getExecutionDetail, getConversationDetail, deleteExecution, deleteExecutionAsync, batchDeleteExecutionsAsync, executeCliTool, getNativeSessionContent, getFormattedNativeConversation, getEnrichedConversation, getHistoryWithNativeInfo } from '../tools/cli-executor.js'; -import { getAllManifests } from './manifest.js'; -import { checkVenvStatus, bootstrapVenv, executeCodexLens, checkSemanticStatus, installSemantic } from '../tools/codex-lens.js'; -import { generateSmartContext, formatSmartContext } from '../tools/smart-context.js'; -import { listTools } from '../tools/index.js'; -import { getMemoryStore } from './memory-store.js'; -import type { ServerConfig } from '../types/config.js';interface ServerOptions { port?: number; initialPath?: string; host?: string; open?: boolean;}interface PostResult { error?: string; status?: number; [key: string]: unknown;}type PostHandler = (body: unknown) => Promise; +import { readFileSync, existsSync } from 'fs'; +import { join } from 'path'; +import { resolvePath, getRecentPaths, normalizePathForDisplay } from '../utils/path-resolver.js'; -// Claude config file paths -const CLAUDE_CONFIG_PATH = join(homedir(), '.claude.json'); -const CLAUDE_SETTINGS_DIR = join(homedir(), '.claude'); -const CLAUDE_GLOBAL_SETTINGS = join(CLAUDE_SETTINGS_DIR, 'settings.json'); -const CLAUDE_GLOBAL_SETTINGS_LOCAL = join(CLAUDE_SETTINGS_DIR, 'settings.local.json'); +// Import route handlers +import { handleCliRoutes } from './routes/cli-routes.js'; +import { handleMemoryRoutes } from './routes/memory-routes.js'; +import { handleMcpRoutes } from './routes/mcp-routes.js'; +import { handleHooksRoutes } from './routes/hooks-routes.js'; +import { handleCodexLensRoutes } from './routes/codexlens-routes.js'; +import { handleSystemRoutes } from './routes/system-routes.js'; +import { handleFilesRoutes } from './routes/files-routes.js'; +import { handleSkillsRoutes } from './routes/skills-routes.js'; +import { handleRulesRoutes } from './routes/rules-routes.js'; +import { handleSessionRoutes } from './routes/session-routes.js'; +import { handleCcwRoutes } from './routes/ccw-routes.js'; -// Enterprise managed MCP paths (platform-specific) -function getEnterpriseMcpPath(): string { - const platform = process.platform; - if (platform === 'darwin') { - return '/Library/Application Support/ClaudeCode/managed-mcp.json'; - } else if (platform === 'win32') { - return 'C:\\Program Files\\ClaudeCode\\managed-mcp.json'; - } else { - // Linux and WSL - return '/etc/claude-code/managed-mcp.json'; - } +// Import WebSocket handling +import { handleWebSocketUpgrade, broadcastToClients } from './websocket.js'; + +import type { ServerConfig } from '../types/config.js'; + +interface ServerOptions { + port?: number; + initialPath?: string; + host?: string; + open?: boolean; } -// WebSocket clients for real-time notifications -const wsClients = new Set(); +interface PostResult { + error?: string; + status?: number; + [key: string]: unknown; +} +type PostHandler = (body: unknown) => Promise; + +// Template paths const TEMPLATE_PATH = join(import.meta.dirname, '../../src/templates/dashboard.html'); const MODULE_CSS_DIR = join(import.meta.dirname, '../../src/templates/dashboard-css'); const JS_FILE = join(import.meta.dirname, '../../src/templates/dashboard.js'); @@ -61,32 +62,6 @@ const MODULE_CSS_FILES = [ '12-skills-rules.css' ]; -/** - * Handle POST request with JSON body - */ -function handlePostRequest(req: http.IncomingMessage, res: http.ServerResponse, handler: PostHandler): void { - let body = ''; - req.on('data', chunk => { body += chunk; }); - req.on('end', async () => { - try { - const parsed = JSON.parse(body); - const result = await handler(parsed); - - if (result.error) { - const status = result.status || 500; - res.writeHead(status, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: result.error })); - } else { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(result)); - } - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - }); -} - // Modular JS files in dependency order const MODULE_FILES = [ 'i18n.js', // Must be loaded first for translations @@ -131,6 +106,89 @@ const MODULE_FILES = [ 'views/rules-manager.js', 'main.js' ]; + +/** + * Handle POST request with JSON body + */ +function handlePostRequest(req: http.IncomingMessage, res: http.ServerResponse, handler: PostHandler): void { + let body = ''; + req.on('data', chunk => { body += chunk; }); + req.on('end', async () => { + try { + const parsed = JSON.parse(body); + const result = await handler(parsed); + + if (result.error) { + const status = result.status || 500; + res.writeHead(status, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: result.error })); + } else { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(result)); + } + } catch (error: unknown) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: (error as Error).message })); + } + }); +} + +/** + * Generate dashboard HTML with embedded CSS and JS + */ +function generateServerDashboard(initialPath: string): string { + let html = readFileSync(TEMPLATE_PATH, 'utf8'); + + // Read and concatenate modular CSS files in load order + const cssContent = MODULE_CSS_FILES.map(file => { + const filePath = join(MODULE_CSS_DIR, file); + return existsSync(filePath) ? readFileSync(filePath, 'utf8') : ''; + }).join('\n\n'); + + // Read and concatenate modular JS files in dependency order + let jsContent = MODULE_FILES.map(file => { + const filePath = join(MODULE_JS_DIR, file); + return existsSync(filePath) ? readFileSync(filePath, 'utf8') : ''; + }).join('\n\n'); + + // Inject CSS content + html = html.replace('{{CSS_CONTENT}}', cssContent); + + // Prepare JS content with empty initial data (will be loaded dynamically) + const emptyData = { + generatedAt: new Date().toISOString(), + activeSessions: [], + archivedSessions: [], + liteTasks: { litePlan: [], liteFix: [] }, + reviewData: { dimensions: {} }, + projectOverview: null, + statistics: { totalSessions: 0, activeSessions: 0, totalTasks: 0, completedTasks: 0, reviewFindings: 0, litePlanCount: 0, liteFixCount: 0 } + }; + + // Replace JS placeholders + jsContent = jsContent.replace('{{WORKFLOW_DATA}}', JSON.stringify(emptyData, null, 2)); + jsContent = jsContent.replace(/\{\{PROJECT_PATH\}\}/g, normalizePathForDisplay(initialPath).replace(/\\/g, '/')); + jsContent = jsContent.replace('{{RECENT_PATHS}}', JSON.stringify(getRecentPaths())); + + // Add server mode flag at the start of JS + const serverModeScript = ` +// Server mode - load data dynamically +window.SERVER_MODE = true; +window.INITIAL_PATH = '${normalizePathForDisplay(initialPath).replace(/\\/g, '/')}'; +`; + + // Prepend server mode script to JS content + jsContent = serverModeScript + jsContent; + + // Inject JS content + html = html.replace('{{JS_CONTENT}}', jsContent); + + // Replace any remaining placeholders in HTML + html = html.replace(/\{\{PROJECT_PATH\}\}/g, normalizePathForDisplay(initialPath).replace(/\\/g, '/')); + + return html; +} + /** * Create and start the dashboard server * @param {Object} options - Server options @@ -148,7 +206,7 @@ export async function startServer(options: ServerOptions = {}): Promise { - server.close(() => { - console.log(' Server stopped.\n'); - process.exit(0); - }); - // Force exit after 3 seconds if graceful shutdown fails - setTimeout(() => process.exit(0), 3000); - }, 100); - return; - } - - // API: Remove a recent path - if (pathname === '/api/remove-recent-path' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { path } = body; - if (!path) { - return { error: 'path is required', status: 400 }; - } - const removed = removeRecentPath(path); - return { success: removed, paths: getRecentPaths() }; - }); - return; - } - - // API: Read a JSON file (for fix progress tracking) - if (pathname === '/api/file') { - const filePath = url.searchParams.get('path'); - if (!filePath) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'File path is required' })); - return; - } - - try { - const content = await fsPromises.readFile(filePath, 'utf-8'); - const json = JSON.parse(content); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(json)); - } catch (err) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'File not found or invalid JSON' })); - } - return; - } - - // API: Get session detail data (context, summaries, impl-plan, review) - if (pathname === '/api/session-detail') { - const sessionPath = url.searchParams.get('path'); - const dataType = url.searchParams.get('type') || 'all'; - - if (!sessionPath) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Session path is required' })); - return; - } - - const detail = await getSessionDetailData(sessionPath, dataType); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(detail)); - return; - } - - // API: Update task status - if (pathname === '/api/update-task-status' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { sessionPath, taskId, newStatus } = body; - - if (!sessionPath || !taskId || !newStatus) { - return { error: 'sessionPath, taskId, and newStatus are required', status: 400 }; - } - - return await updateTaskStatus(sessionPath, taskId, newStatus); - }); - return; - } - - // API: Bulk update task status - if (pathname === '/api/bulk-update-task-status' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { sessionPath, taskIds, newStatus } = body; - - if (!sessionPath || !taskIds || !newStatus) { - return { error: 'sessionPath, taskIds, and newStatus are required', status: 400 }; - } - - const results = []; - for (const taskId of taskIds) { - try { - const result = await updateTaskStatus(sessionPath, taskId, newStatus); - results.push(result); - } catch (err) { - results.push({ taskId, error: err.message }); - } - } - return { success: true, results }; - }); - return; - } - - // API: Get MCP configuration - if (pathname === '/api/mcp-config') { - const mcpData = getMcpConfig(); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(mcpData)); - return; - } - - // API: Toggle MCP server enabled/disabled - if (pathname === '/api/mcp-toggle' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { projectPath, serverName, enable } = body; - if (!projectPath || !serverName) { - return { error: 'projectPath and serverName are required', status: 400 }; - } - return toggleMcpServerEnabled(projectPath, serverName, enable); - }); - return; - } - - // API: Copy MCP server to project - if (pathname === '/api/mcp-copy-server' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { projectPath, serverName, serverConfig } = body; - if (!projectPath || !serverName || !serverConfig) { - return { error: 'projectPath, serverName, and serverConfig are required', status: 400 }; - } - return addMcpServerToProject(projectPath, serverName, serverConfig); - }); - return; - } - - // API: Install CCW MCP server to project - if (pathname === '/api/mcp-install-ccw' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { projectPath } = body; - if (!projectPath) { - return { error: 'projectPath is required', status: 400 }; - } - - // Generate CCW MCP server config - const ccwMcpConfig = { - command: "ccw-mcp", - args: [] - }; - - // Use existing addMcpServerToProject to install CCW MCP - return addMcpServerToProject(projectPath, 'ccw-mcp', ccwMcpConfig); - }); - return; - } - - // API: Remove MCP server from project - if (pathname === '/api/mcp-remove-server' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { projectPath, serverName } = body; - if (!projectPath || !serverName) { - return { error: 'projectPath and serverName are required', status: 400 }; - } - return removeMcpServerFromProject(projectPath, serverName); - }); - return; - } - - // API: Hook endpoint for Claude Code notifications - if (pathname === '/api/hook' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { type, filePath, sessionId, ...extraData } = body; - - // Determine session ID from file path if not provided - let resolvedSessionId = sessionId; - if (!resolvedSessionId && filePath) { - resolvedSessionId = extractSessionIdFromPath(filePath); - } - - // Broadcast to all connected WebSocket clients - const notification = { - type: type || 'session_updated', - payload: { - sessionId: resolvedSessionId, - filePath: filePath, - timestamp: new Date().toISOString(), - ...extraData // Pass through toolName, status, result, params, error, etc. - } - }; - - broadcastToClients(notification); - - return { success: true, notification }; - }); - return; - } - - // API: System notify - CLI to Server communication bridge - // Allows CLI commands to trigger WebSocket broadcasts for UI updates - if (pathname === '/api/system/notify' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { type, scope, data } = body as { - type: 'REFRESH_REQUIRED' | 'MEMORY_UPDATED' | 'HISTORY_UPDATED' | 'INSIGHT_GENERATED'; - scope: 'memory' | 'history' | 'insights' | 'all'; - data?: Record; - }; - - if (!type || !scope) { - return { error: 'type and scope are required', status: 400 }; - } - - // Map CLI notification types to WebSocket broadcast format - const notification = { - type, - payload: { - scope, - timestamp: new Date().toISOString(), - ...data - } - }; - - broadcastToClients(notification); - - return { success: true, broadcast: true, clientCount: wsClients.size }; - }); - return; - } - - // API: Get hooks configuration - if (pathname === '/api/hooks' && req.method === 'GET') { - const projectPathParam = url.searchParams.get('path'); - const hooksData = getHooksConfig(projectPathParam); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(hooksData)); - return; - } - - // API: Save hook - if (pathname === '/api/hooks' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { projectPath, scope, event, hookData } = body; - if (!scope || !event || !hookData) { - return { error: 'scope, event, and hookData are required', status: 400 }; - } - return saveHookToSettings(projectPath, scope, event, hookData); - }); - return; - } - - // API: Delete hook - if (pathname === '/api/hooks' && req.method === 'DELETE') { - handlePostRequest(req, res, async (body) => { - const { projectPath, scope, event, hookIndex } = body; - if (!scope || !event || hookIndex === undefined) { - return { error: 'scope, event, and hookIndex are required', status: 400 }; - } - return deleteHookFromSettings(projectPath, scope, event, hookIndex); - }); - return; - } - - // API: List directory files with .gitignore filtering (Explorer view) - if (pathname === '/api/files') { - const dirPath = url.searchParams.get('path') || initialPath; - const filesData = await listDirectoryFiles(dirPath); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(filesData)); - return; - } - - // API: Get all skills (project and user) - if (pathname === '/api/skills') { - const projectPathParam = url.searchParams.get('path') || initialPath; - const skillsData = getSkillsConfig(projectPathParam); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(skillsData)); - return; - } - - // API: Get file content for preview (Explorer view) - if (pathname === '/api/file-content') { - const filePath = url.searchParams.get('path'); - if (!filePath) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'File path is required' })); - return; - } - const fileData = await getFileContent(filePath); - res.writeHead(fileData.error ? 404 : 200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(fileData)); - return; - } - - // API: CLI Tools Status - if (pathname === '/api/cli/status') { - const status = await getCliToolsStatus(); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(status)); - return; - } - - // API: CodexLens Status - if (pathname === '/api/codexlens/status') { - const status = await checkVenvStatus(); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(status)); - return; - } - - // API: CodexLens Bootstrap (Install) - if (pathname === '/api/codexlens/bootstrap' && req.method === 'POST') { - handlePostRequest(req, res, async () => { - try { - const result = await bootstrapVenv(); - if (result.success) { - const status = await checkVenvStatus(); - return { success: true, message: 'CodexLens installed successfully', version: status.version }; - } else { - return { success: false, error: result.error, status: 500 }; - } - } catch (err) { - return { success: false, error: err.message, status: 500 }; - } - }); - return; - } - - // API: CodexLens Init (Initialize workspace index) - if (pathname === '/api/codexlens/init' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { path: projectPath } = body; - const targetPath = projectPath || initialPath; + // Try each route handler in order + // Order matters: more specific routes should come before general ones - try { - const result = await executeCodexLens(['init', targetPath, '--json'], { cwd: targetPath }); - if (result.success) { - try { - const parsed = JSON.parse(result.output); - return { success: true, result: parsed }; - } catch { - return { success: true, output: result.output }; - } - } else { - return { success: false, error: result.error, status: 500 }; - } - } catch (err) { - return { success: false, error: err.message, status: 500 }; - } - }); - return; + // CLI routes (/api/cli/*) + if (pathname.startsWith('/api/cli/')) { + if (await handleCliRoutes(routeContext)) return; } - // API: CodexLens Semantic Search Status - if (pathname === '/api/codexlens/semantic/status') { - const status = await checkSemanticStatus(); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(status)); - return; + // Memory routes (/api/memory/*) + if (pathname.startsWith('/api/memory/')) { + if (await handleMemoryRoutes(routeContext)) return; } - // API: CodexLens Semantic Search Install (fastembed, ONNX-based, ~200MB) - if (pathname === '/api/codexlens/semantic/install' && req.method === 'POST') { - handlePostRequest(req, res, async () => { - try { - const result = await installSemantic(); - if (result.success) { - const status = await checkSemanticStatus(); - return { - success: true, - message: 'Semantic search installed successfully (fastembed)', - ...status - }; - } else { - return { success: false, error: result.error, status: 500 }; - } - } catch (err) { - return { success: false, error: err.message, status: 500 }; - } - }); - return; + // MCP routes (/api/mcp*) + if (pathname.startsWith('/api/mcp')) { + if (await handleMcpRoutes(routeContext)) return; } - // API: CCW Installation Status - if (pathname === '/api/ccw/installations') { - const manifests = getAllManifests(); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ installations: manifests })); - return; + // Hooks routes (/api/hooks, /api/hook) + if (pathname.startsWith('/api/hook')) { + if (await handleHooksRoutes(routeContext)) return; } - // API: CCW Endpoint Tools List - if (pathname === '/api/ccw/tools') { - const tools = listTools(); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ tools })); - return; + // CodexLens routes (/api/codexlens/*) + if (pathname.startsWith('/api/codexlens/')) { + if (await handleCodexLensRoutes(routeContext)) return; } - // API: CCW Upgrade - if (pathname === '/api/ccw/upgrade' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { path: installPath } = body; - - try { - const { spawn } = await import('child_process'); - - // Run ccw upgrade command - const args = installPath ? ['upgrade', '--all'] : ['upgrade', '--all']; - const upgradeProcess = spawn('ccw', args, { - shell: true, - stdio: ['ignore', 'pipe', 'pipe'] - }); - - let stdout = ''; - let stderr = ''; - - upgradeProcess.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - upgradeProcess.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - return new Promise((resolve) => { - upgradeProcess.on('close', (code) => { - if (code === 0) { - resolve({ success: true, message: 'Upgrade completed', output: stdout }); - } else { - resolve({ success: false, error: stderr || 'Upgrade failed', output: stdout, status: 500 }); - } - }); - - upgradeProcess.on('error', (err) => { - resolve({ success: false, error: err.message, status: 500 }); - }); - - // Timeout after 2 minutes - setTimeout(() => { - upgradeProcess.kill(); - resolve({ success: false, error: 'Upgrade timed out', status: 504 }); - }, 120000); - }); - } catch (err) { - return { success: false, error: err.message, status: 500 }; - } - }); - return; - } - - // API: CLI Execution History - if (pathname === '/api/cli/history') { - const projectPath = url.searchParams.get('path') || initialPath; - const limit = parseInt(url.searchParams.get('limit') || '50', 10); - const tool = url.searchParams.get('tool') || null; - const status = url.searchParams.get('status') || null; - const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null; - const search = url.searchParams.get('search') || null; - const recursive = url.searchParams.get('recursive') !== 'false'; // Default true - - // Use async version to ensure SQLite is initialized - getExecutionHistoryAsync(projectPath, { limit, tool, status, category, search, recursive }) - .then(history => { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(history)); - }) - .catch(err => { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: err.message })); - }); - return; - } - - // API: CLI Execution Detail (GET) or Delete (DELETE) - if (pathname === '/api/cli/execution') { - const projectPath = url.searchParams.get('path') || initialPath; - const executionId = url.searchParams.get('id'); - - if (!executionId) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Execution ID is required' })); - return; - } - - // Handle DELETE request - if (req.method === 'DELETE') { - // Use async version to ensure SQLite is initialized - deleteExecutionAsync(projectPath, executionId) - .then(result => { - if (result.success) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ success: true, message: 'Execution deleted' })); - } else { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: result.error || 'Delete failed' })); - } - }) - .catch(err => { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: err.message })); - }); - return; - } - - // Handle GET request - return full conversation with all turns - const conversation = getConversationDetail(projectPath, executionId); - if (!conversation) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Conversation not found' })); - return; - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(conversation)); - return; - } - - // API: Batch Delete CLI Executions - if (pathname === '/api/cli/batch-delete' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { path: projectPath, ids } = body; - - if (!ids || !Array.isArray(ids) || ids.length === 0) { - return { error: 'ids array is required', status: 400 }; - } - - const basePath = projectPath || initialPath; - return await batchDeleteExecutionsAsync(basePath, ids); - }); - return; - } - - // API: Get Native Session Content (full conversation from native session file) - if (pathname === '/api/cli/native-session') { - const projectPath = url.searchParams.get('path') || initialPath; - const executionId = url.searchParams.get('id'); - const format = url.searchParams.get('format') || 'json'; // json, text, pairs - - if (!executionId) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Execution ID is required' })); - return; - } - - try { - let result; - if (format === 'text') { - // Get formatted text representation - result = await getFormattedNativeConversation(projectPath, executionId, { - includeThoughts: url.searchParams.get('thoughts') === 'true', - includeToolCalls: url.searchParams.get('tools') === 'true', - includeTokens: url.searchParams.get('tokens') === 'true' - }); - } else if (format === 'pairs') { - // Get simple prompt/response pairs - const enriched = await getEnrichedConversation(projectPath, executionId); - result = enriched?.merged || null; - } else { - // Get full parsed session data - result = await getNativeSessionContent(projectPath, executionId); - } - - if (!result) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Native session not found' })); - return; - } - - res.writeHead(200, { 'Content-Type': format === 'text' ? 'text/plain' : 'application/json' }); - res.end(format === 'text' ? result : JSON.stringify(result)); - } catch (err) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (err as Error).message })); - } - return; + // CCW routes (/api/ccw/*) + if (pathname.startsWith('/api/ccw/')) { + if (await handleCcwRoutes(routeContext)) return; } - // API: Get Enriched Conversation (CCW + Native merged) - if (pathname === '/api/cli/enriched') { - const projectPath = url.searchParams.get('path') || initialPath; - const executionId = url.searchParams.get('id'); - - if (!executionId) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Execution ID is required' })); - return; - } - - getEnrichedConversation(projectPath, executionId) - .then(result => { - if (!result) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Conversation not found' })); - return; - } - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(result)); - }) - .catch(err => { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (err as Error).message })); - }); - return; - } - - // API: Get History with Native Session Info - if (pathname === '/api/cli/history-native') { - const projectPath = url.searchParams.get('path') || initialPath; - const limit = parseInt(url.searchParams.get('limit') || '50', 10); - const tool = url.searchParams.get('tool') || null; - const status = url.searchParams.get('status') || null; - const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null; - const search = url.searchParams.get('search') || null; - - getHistoryWithNativeInfo(projectPath, { limit, tool, status, category, search }) - .then(history => { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(history)); - }) - .catch(err => { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (err as Error).message })); - }); - return; - } - - // API: Execute CLI Tool - if (pathname === '/api/cli/execute' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { tool, prompt, mode, format, model, dir, includeDirs, timeout, smartContext, parentExecutionId, category } = body; - - if (!tool || !prompt) { - return { error: 'tool and prompt are required', status: 400 }; - } - - // Generate smart context if enabled - let finalPrompt = prompt; - if (smartContext?.enabled) { - try { - const contextResult = await generateSmartContext(prompt, { - enabled: true, - maxFiles: smartContext.maxFiles || 10, - searchMode: 'text' - }, dir || initialPath); - - const contextAppendage = formatSmartContext(contextResult); - if (contextAppendage) { - finalPrompt = prompt + contextAppendage; - } - } catch (err) { - console.warn('[Smart Context] Failed to generate:', err); - // Continue without smart context - } - } - - // Start execution - const executionId = `${Date.now()}-${tool}`; - - // Broadcast execution started - broadcastToClients({ - type: 'CLI_EXECUTION_STARTED', - payload: { - executionId, - tool, - mode: mode || 'analysis', - parentExecutionId, - timestamp: new Date().toISOString() - } - }); - - try { - // Execute with streaming output broadcast - const result = await executeCliTool({ - tool, - prompt: finalPrompt, - mode: mode || 'analysis', - format: format || 'plain', - model, - cd: dir || initialPath, - includeDirs, - timeout: timeout || 300000, - category: category || 'user', - parentExecutionId, - stream: true - }, (chunk) => { - // Broadcast output chunks via WebSocket - broadcastToClients({ - type: 'CLI_OUTPUT', - payload: { - executionId, - chunkType: chunk.type, - data: chunk.data - } - }); - }); - - // Broadcast completion - broadcastToClients({ - type: 'CLI_EXECUTION_COMPLETED', - payload: { - executionId, - success: result.success, - status: result.execution.status, - duration_ms: result.execution.duration_ms - } - }); - - return { - success: result.success, - execution: result.execution - }; - - } catch (error: unknown) { - // Broadcast error - broadcastToClients({ - type: 'CLI_EXECUTION_ERROR', - payload: { - executionId, - error: (error as Error).message - } - }); - - return { error: (error as Error).message, status: 500 }; - } - }); - return; - } - - // API: CLI Review - Submit review for an execution - if (pathname.startsWith('/api/cli/review/') && req.method === 'POST') { - const executionId = pathname.replace('/api/cli/review/', ''); - handlePostRequest(req, res, async (body) => { - const { status, rating, comments, reviewer } = body as { - status: 'pending' | 'approved' | 'rejected' | 'changes_requested'; - rating?: number; - comments?: string; - reviewer?: string; - }; - - if (!status) { - return { error: 'status is required', status: 400 }; - } - - try { - const historyStore = await import('../tools/cli-history-store.js').then(m => m.getHistoryStore(initialPath)); - - // Verify execution exists - const execution = historyStore.getConversation(executionId); - if (!execution) { - return { error: 'Execution not found', status: 404 }; - } - - // Save review - const review = historyStore.saveReview({ - execution_id: executionId, - status, - rating, - comments, - reviewer - }); - - // Broadcast review update - broadcastToClients({ - type: 'CLI_REVIEW_UPDATED', - payload: { - executionId, - review, - timestamp: new Date().toISOString() - } - }); - - return { success: true, review }; - } catch (error: unknown) { - return { error: (error as Error).message, status: 500 }; - } - }); - return; - } - - // API: CLI Review - Get review for an execution - if (pathname.startsWith('/api/cli/review/') && req.method === 'GET') { - const executionId = pathname.replace('/api/cli/review/', ''); - try { - const historyStore = await import('../tools/cli-history-store.js').then(m => m.getHistoryStore(initialPath)); - const review = historyStore.getReview(executionId); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ review })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: CLI Reviews - List all reviews - if (pathname === '/api/cli/reviews' && req.method === 'GET') { - try { - const historyStore = await import('../tools/cli-history-store.js').then(m => m.getHistoryStore(initialPath)); - const statusFilter = url.searchParams.get('status') as 'pending' | 'approved' | 'rejected' | 'changes_requested' | null; - const limit = parseInt(url.searchParams.get('limit') || '50', 10); - - const reviews = historyStore.getReviews({ - status: statusFilter || undefined, - limit - }); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ reviews, count: reviews.length })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Track entity access - if (pathname === '/api/memory/track' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { type, action, value, sessionId, metadata, path: projectPath } = body; - - if (!type || !action || !value) { - return { error: 'type, action, and value are required', status: 400 }; - } - - const basePath = projectPath || initialPath; - - try { - const memoryStore = getMemoryStore(basePath); - const now = new Date().toISOString(); - - // Normalize the value - const normalizedValue = value.toLowerCase().trim(); - - // Upsert entity - const entityId = memoryStore.upsertEntity({ - type, - value, - normalized_value: normalizedValue, - first_seen_at: now, - last_seen_at: now, - metadata: metadata ? JSON.stringify(metadata) : undefined - }); - - // Log access - memoryStore.logAccess({ - entity_id: entityId, - action, - session_id: sessionId, - timestamp: now, - context_summary: metadata?.context - }); - - // Update stats - memoryStore.updateStats(entityId, action); - - // Calculate new heat score - const heatScore = memoryStore.calculateHeatScore(entityId); - const stats = memoryStore.getStats(entityId); - - // Broadcast MEMORY_UPDATED event via WebSocket - broadcastToClients({ - type: 'MEMORY_UPDATED', - payload: { - entity: { id: entityId, type, value }, - stats: { - read_count: stats?.read_count || 0, - write_count: stats?.write_count || 0, - mention_count: stats?.mention_count || 0, - heat_score: heatScore - }, - timestamp: now - } - }); - - return { - success: true, - entity_id: entityId, - heat_score: heatScore - }; - } catch (error: unknown) { - return { error: (error as Error).message, status: 500 }; - } - }); - return; - } - - // API: Memory Module - Get native Claude history from ~/.claude/history.jsonl - if (pathname === '/api/memory/native-history') { - const projectPath = url.searchParams.get('path') || initialPath; - const limit = parseInt(url.searchParams.get('limit') || '100', 10); - const historyFile = join(homedir(), '.claude', 'history.jsonl'); - - try { - if (!existsSync(historyFile)) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ prompts: [], total: 0, message: 'No history file found' })); - return; - } - - const content = readFileSync(historyFile, 'utf8'); - const lines = content.trim().split('\n').filter(line => line.trim()); - const allPrompts = []; - - for (const line of lines) { - try { - const entry = JSON.parse(line); - // Filter by project if specified - if (projectPath && entry.project) { - const normalizedProject = entry.project.replace(/\\/g, '/').toLowerCase(); - const normalizedPath = projectPath.replace(/\\/g, '/').toLowerCase(); - if (!normalizedProject.includes(normalizedPath) && !normalizedPath.includes(normalizedProject)) { - continue; - } - } - - allPrompts.push({ - id: `${entry.sessionId}-${entry.timestamp}`, - text: entry.display || '', - timestamp: new Date(entry.timestamp).toISOString(), - project: entry.project || '', - session_id: entry.sessionId || '', - pasted_contents: entry.pastedContents || {}, - // Derive intent from content keywords - intent: derivePromptIntent(entry.display || ''), - quality_score: calculateQualityScore(entry.display || '') - }); - } catch (parseError) { - // Skip malformed lines - } - } - - // Sort by timestamp descending - allPrompts.sort((a, b) => new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime()); - - // Apply limit - const prompts = allPrompts.slice(0, limit); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ prompts, total: allPrompts.length })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Get prompt history - if (pathname === '/api/memory/prompts') { - const projectPath = url.searchParams.get('path') || initialPath; - const limit = parseInt(url.searchParams.get('limit') || '50', 10); - const search = url.searchParams.get('search') || null; - - try { - const memoryStore = getMemoryStore(projectPath); - let prompts; - - if (search) { - prompts = memoryStore.searchPrompts(search, limit); - } else { - // Get all recent prompts (we'll need to add this method to MemoryStore) - const stmt = memoryStore['db'].prepare(` - SELECT * FROM prompt_history - ORDER BY timestamp DESC - LIMIT ? - `); - prompts = stmt.all(limit); - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ prompts })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Get insights - if (pathname === '/api/memory/insights') { - const projectPath = url.searchParams.get('path') || initialPath; - - try { - const memoryStore = getMemoryStore(projectPath); - - // Get total prompt count - const countStmt = memoryStore['db'].prepare(`SELECT COUNT(*) as count FROM prompt_history`); - const { count: totalPrompts } = countStmt.get() as { count: number }; - - // Get top intent - const topIntentStmt = memoryStore['db'].prepare(` - SELECT intent_label, COUNT(*) as count - FROM prompt_history - WHERE intent_label IS NOT NULL - GROUP BY intent_label - ORDER BY count DESC - LIMIT 1 - `); - const topIntentRow = topIntentStmt.get() as { intent_label: string; count: number } | undefined; - - // Get average prompt length - const avgLengthStmt = memoryStore['db'].prepare(` - SELECT AVG(LENGTH(prompt_text)) as avg_length - FROM prompt_history - WHERE prompt_text IS NOT NULL - `); - const { avg_length: avgLength } = avgLengthStmt.get() as { avg_length: number }; - - // Get prompt patterns - const patternsStmt = memoryStore['db'].prepare(` - SELECT * FROM prompt_patterns - ORDER BY frequency DESC - LIMIT 10 - `); - const patterns = patternsStmt.all(); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ - stats: { - totalPrompts, - topIntent: topIntentRow?.intent_label || 'unknown', - avgLength: Math.round(avgLength || 0) - }, - patterns: patterns.map((p: any) => ({ - type: p.pattern_type, - description: `Pattern detected in prompts`, - occurrences: p.frequency, - suggestion: `Consider using more specific prompts for ${p.pattern_type}` - })) - })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Trigger async CLI-based insights analysis - if (pathname === '/api/memory/insights/analyze' && req.method === 'POST') { - handlePostRequest(req, res, async (body: any) => { - const projectPath = body.path || initialPath; - const tool = body.tool || 'gemini'; // gemini, qwen, codex, claude - const prompts = body.prompts || []; - const lang = body.lang || 'en'; // Language preference - - if (prompts.length === 0) { - return { error: 'No prompts provided for analysis', status: 400 }; - } - - // Prepare prompt summary for CLI analysis - const promptSummary = prompts.slice(0, 20).map((p: any, i: number) => { - return `${i + 1}. [${p.intent || 'unknown'}] ${(p.text || '').substring(0, 100)}...`; - }).join('\n'); - - const langInstruction = lang === 'zh' - ? '请用中文回复。所有 description、suggestion、title 字段必须使用中文。' - : 'Respond in English. All description, suggestion, title fields must be in English.'; - - const analysisPrompt = ` -PURPOSE: Analyze prompt patterns and provide optimization suggestions -TASK: -• Review the following prompt history summary -• Identify common patterns (vague requests, repetitive queries, incomplete context) -• Suggest specific improvements for prompt quality -• Detect areas where prompts could be more effective -MODE: analysis -CONTEXT: ${prompts.length} prompts from project: ${projectPath} -EXPECTED: JSON with patterns array and suggestions array -LANGUAGE: ${langInstruction} - -PROMPT HISTORY: -${promptSummary} - -Return ONLY valid JSON in this exact format (no markdown, no code blocks, just pure JSON): -{ - "patterns": [ - {"type": "pattern_type", "description": "description", "occurrences": count, "severity": "low|medium|high", "suggestion": "how to improve"} - ], - "suggestions": [ - {"title": "title", "description": "description", "example": "example prompt"} - ] -}`; - - try { - // Queue CLI execution - const result = await executeCliTool({ - tool, - prompt: analysisPrompt, - mode: 'analysis', - timeout: 120000 - }); - - // Try to parse JSON from response - let insights = { patterns: [], suggestions: [] }; - if (result.stdout) { - let outputText = result.stdout; - - // Strip markdown code blocks if present - const codeBlockMatch = outputText.match(/```(?:json)?\s*([\s\S]*?)```/); - if (codeBlockMatch) { - outputText = codeBlockMatch[1].trim(); - } - - // Find JSON object in the response - const jsonMatch = outputText.match(/\{[\s\S]*\}/); - if (jsonMatch) { - try { - insights = JSON.parse(jsonMatch[0]); - // Ensure arrays exist - if (!Array.isArray(insights.patterns)) insights.patterns = []; - if (!Array.isArray(insights.suggestions)) insights.suggestions = []; - } catch (e) { - console.error('[insights/analyze] JSON parse error:', e); - // Return raw output if JSON parse fails - insights = { - patterns: [{ type: 'raw_analysis', description: result.stdout.substring(0, 500), occurrences: 1, severity: 'low', suggestion: '' }], - suggestions: [] - }; - } - } else { - // No JSON found, wrap raw output - insights = { - patterns: [{ type: 'raw_analysis', description: result.stdout.substring(0, 500), occurrences: 1, severity: 'low', suggestion: '' }], - suggestions: [] - }; - } - } - - // Save insight to database - try { - const storeModule = await import('../tools/cli-history-store.js'); - const store = storeModule.getHistoryStore(projectPath); - const insightId = `insight-${Date.now()}`; - store.saveInsight({ - id: insightId, - tool, - promptCount: prompts.length, - patterns: insights.patterns, - suggestions: insights.suggestions, - rawOutput: result.stdout || '', - executionId: result.execution?.id, - lang - }); - console.log('[Insights] Saved insight:', insightId); - } catch (saveErr) { - console.warn('[Insights] Failed to save insight:', (saveErr as Error).message); - } - - return { - success: true, - insights, - tool, - executionId: result.execution.id - }; - } catch (error: unknown) { - return { error: (error as Error).message, status: 500 }; - } - }); - return; - } - - // API: Get insights history - if (pathname === '/api/memory/insights') { - const projectPath = url.searchParams.get('path') || initialPath; - const limit = parseInt(url.searchParams.get('limit') || '20', 10); - const tool = url.searchParams.get('tool') || undefined; - - try { - const storeModule = await import('../tools/cli-history-store.js'); - const store = storeModule.getHistoryStore(projectPath); - const insights = store.getInsights({ limit, tool }); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ success: true, insights })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Get single insight detail - if (pathname.startsWith('/api/memory/insights/') && req.method === 'GET') { - const insightId = pathname.replace('/api/memory/insights/', ''); - const projectPath = url.searchParams.get('path') || initialPath; - - if (!insightId || insightId === 'analyze') { - // Skip - handled by other routes - } else { - try { - const storeModule = await import('../tools/cli-history-store.js'); - const store = storeModule.getHistoryStore(projectPath); - const insight = store.getInsight(insightId); - - if (insight) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ success: true, insight })); - } else { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Insight not found' })); - } - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - } - - // API: Delete insight - if (pathname.startsWith('/api/memory/insights/') && req.method === 'DELETE') { - const insightId = pathname.replace('/api/memory/insights/', ''); - const projectPath = url.searchParams.get('path') || initialPath; - - try { - const storeModule = await import('../tools/cli-history-store.js'); - const store = storeModule.getHistoryStore(projectPath); - const deleted = store.deleteInsight(insightId); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ success: deleted })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Get hotspot statistics - if (pathname === '/api/memory/stats') { - const projectPath = url.searchParams.get('path') || initialPath; - const filter = url.searchParams.get('filter') || 'all'; // today, week, all - const limit = parseInt(url.searchParams.get('limit') || '10', 10); - - try { - const memoryStore = getMemoryStore(projectPath); - const hotEntities = memoryStore.getHotEntities(limit * 4); - - // Filter by time if needed - let filtered = hotEntities; - if (filter === 'today') { - const today = new Date(); - today.setHours(0, 0, 0, 0); - filtered = hotEntities.filter((e: any) => new Date(e.last_seen_at) >= today); - } else if (filter === 'week') { - const weekAgo = new Date(); - weekAgo.setDate(weekAgo.getDate() - 7); - filtered = hotEntities.filter((e: any) => new Date(e.last_seen_at) >= weekAgo); - } - - // Separate into mostRead and mostEdited - const fileEntities = filtered.filter((e: any) => e.type === 'file'); - - const mostRead = fileEntities - .filter((e: any) => e.stats.read_count > 0) - .sort((a: any, b: any) => b.stats.read_count - a.stats.read_count) - .slice(0, limit) - .map((e: any) => ({ - path: e.value, - file: e.value.split(/[/\\]/).pop(), - heat: e.stats.read_count, - count: e.stats.read_count, - lastSeen: e.last_seen_at - })); - - const mostEdited = fileEntities - .filter((e: any) => e.stats.write_count > 0) - .sort((a: any, b: any) => b.stats.write_count - a.stats.write_count) - .slice(0, limit) - .map((e: any) => ({ - path: e.value, - file: e.value.split(/[/\\]/).pop(), - heat: e.stats.write_count, - count: e.stats.write_count, - lastSeen: e.last_seen_at - })); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ stats: { mostRead, mostEdited } })); - } catch (error: unknown) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ stats: { mostRead: [], mostEdited: [] } })); - } - return; - } - - // API: Memory Module - Get memory graph (file associations with modules and components) - if (pathname === '/api/memory/graph') { - const projectPath = url.searchParams.get('path') || initialPath; - - try { - const memoryStore = getMemoryStore(projectPath); - const hotEntities = memoryStore.getHotEntities(100); - - // Build file nodes from entities - const fileEntities = hotEntities.filter((e: any) => e.type === 'file'); - const fileNodes = fileEntities.map((e: any) => { - const fileName = e.value.split(/[/\\]/).pop() || ''; - // Detect component type based on file name patterns - const isComponent = /\.(tsx|jsx|vue|svelte)$/.test(fileName) || - /^[A-Z][a-zA-Z]+\.(ts|js)$/.test(fileName) || - fileName.includes('.component.') || - fileName.includes('.controller.'); - - return { - id: e.value, - name: fileName, - path: e.value, - type: isComponent ? 'component' : 'file', - heat: Math.min(25, 8 + e.stats.heat_score / 10) - }; - }); - - // Extract unique modules (directories) from file paths - const moduleMap = new Map(); - for (const file of fileEntities) { - const parts = file.value.split(/[/\\]/); - // Get parent directory as module (skip if root level) - if (parts.length > 1) { - const modulePath = parts.slice(0, -1).join('/'); - const moduleName = parts[parts.length - 2] || modulePath; - // Skip common non-module directories - if (['node_modules', '.git', 'dist', 'build', '.next', '.nuxt'].includes(moduleName)) continue; - - if (!moduleMap.has(modulePath)) { - moduleMap.set(modulePath, { heat: 0, files: [] }); - } - const mod = moduleMap.get(modulePath)!; - mod.heat += file.stats.heat_score / 20; - mod.files.push(file.value); - } - } - - // Create module nodes (limit to top modules by heat) - const moduleNodes = Array.from(moduleMap.entries()) - .sort((a, b) => b[1].heat - a[1].heat) - .slice(0, 15) - .map(([modulePath, data]) => ({ - id: modulePath, - name: modulePath.split(/[/\\]/).pop() || modulePath, - path: modulePath, - type: 'module', - heat: Math.min(20, 12 + data.heat / 5), - fileCount: data.files.length - })); - - // Combine all nodes - const nodes = [...fileNodes, ...moduleNodes]; - const nodeIds = new Set(nodes.map(n => n.id)); - - // Build edges from associations - const edges: any[] = []; - const edgeSet = new Set(); // Prevent duplicate edges - - // Add file-to-file associations - for (const entity of hotEntities) { - if (!entity.id || entity.type !== 'file') continue; - const associations = memoryStore.getAssociations(entity.id, 10); - for (const assoc of associations) { - if (assoc.target && nodeIds.has(assoc.target.value)) { - const edgeKey = [entity.value, assoc.target.value].sort().join('|'); - if (!edgeSet.has(edgeKey)) { - edgeSet.add(edgeKey); - edges.push({ - source: entity.value, - target: assoc.target.value, - weight: assoc.weight - }); - } - } - } - } - - // Add file-to-module edges (files belong to their parent modules) - for (const [modulePath, data] of moduleMap.entries()) { - if (!nodeIds.has(modulePath)) continue; - for (const filePath of data.files) { - if (nodeIds.has(filePath)) { - const edgeKey = [modulePath, filePath].sort().join('|'); - if (!edgeSet.has(edgeKey)) { - edgeSet.add(edgeKey); - edges.push({ - source: modulePath, - target: filePath, - weight: 2 // Lower weight for structural relationships - }); - } - } - } - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ graph: { nodes, edges } })); - } catch (error: unknown) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ graph: { nodes: [], edges: [] } })); - } - return; - } - - // API: Memory Module - Get recent context activities - if (pathname === '/api/memory/recent') { - const projectPath = url.searchParams.get('path') || initialPath; - const limit = parseInt(url.searchParams.get('limit') || '20', 10); - - try { - const memoryStore = getMemoryStore(projectPath); - - // Get recent access logs with entity info - filter to file type only - const db = (memoryStore as any).db; - const recentLogs = db.prepare(` - SELECT a.*, e.type, e.value - FROM access_logs a - JOIN entities e ON a.entity_id = e.id - WHERE e.type = 'file' - ORDER BY a.timestamp DESC - LIMIT ? - `).all(limit * 2) as any[]; // Fetch more to account for filtering - - // Filter out invalid entries (JSON strings, error messages, etc.) - const validLogs = recentLogs.filter((log: any) => { - const value = log.value || ''; - // Skip if value looks like JSON or contains error-like patterns - if (value.includes('"status"') || value.includes('"content"') || - value.includes('"activeForm"') || value.startsWith('{') || - value.startsWith('[') || value.includes('graph 400')) { - return false; - } - // Must have a file extension or look like a valid path - const hasExtension = /\.[a-zA-Z0-9]{1,10}$/.test(value); - const looksLikePath = value.includes('/') || value.includes('\\'); - return hasExtension || looksLikePath; - }).slice(0, limit); - - const recent = validLogs.map((log: any) => ({ - type: log.action, // read, write, mention - timestamp: log.timestamp, - prompt: log.context_summary || '', - files: [log.value], - description: `${log.action}: ${log.value.split(/[/\\]/).pop()}` - })); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ recent })); - } catch (error: unknown) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ recent: [] })); - } - return; - } - - // API: Active Memory - Get status - if (pathname === '/api/memory/active/status') { - const projectPath = url.searchParams.get('path') || initialPath; - - if (!projectPath) { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ enabled: false, status: null, config: { interval: 'manual', tool: 'gemini' } })); - return; - } - - try { - const configPath = join(projectPath, '.claude', 'rules', 'active_memory.md'); - const configJsonPath = join(projectPath, '.claude', 'rules', 'active_memory_config.json'); - const enabled = existsSync(configPath); - let lastSync: string | null = null; - let fileCount = 0; - let config = { interval: 'manual', tool: 'gemini' }; - - if (enabled) { - const stats = statSync(configPath); - lastSync = stats.mtime.toISOString(); - const content = readFileSync(configPath, 'utf-8'); - // Count file sections - fileCount = (content.match(/^## /gm) || []).length; - } - - // Load config if exists - if (existsSync(configJsonPath)) { - try { - config = JSON.parse(readFileSync(configJsonPath, 'utf-8')); - } catch (e) { /* ignore parse errors */ } - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ - enabled, - status: enabled ? { lastSync, fileCount } : null, - config - })); - } catch (error: unknown) { - console.error('Active Memory status error:', error); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ enabled: false, status: null, config: { interval: 'manual', tool: 'gemini' } })); - } - return; - } - - // API: Active Memory - Toggle - if (pathname === '/api/memory/active/toggle' && req.method === 'POST') { - let body = ''; - req.on('data', (chunk: Buffer) => { body += chunk.toString(); }); - req.on('end', async () => { - try { - const { enabled, config } = JSON.parse(body || '{}'); - const projectPath = initialPath; - - if (!projectPath) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'No project path configured' })); - return; - } - - const rulesDir = join(projectPath, '.claude', 'rules'); - const configPath = join(rulesDir, 'active_memory.md'); - const configJsonPath = join(rulesDir, 'active_memory_config.json'); - - if (enabled) { - // Enable: Create directory and initial file - if (!existsSync(rulesDir)) { - mkdirSync(rulesDir, { recursive: true }); - } - - // Save config - if (config) { - writeFileSync(configJsonPath, JSON.stringify(config, null, 2), 'utf-8'); - } - - // Create initial active_memory.md with header - const initialContent = `# Active Memory - -> Auto-generated understanding of frequently accessed files. -> Last updated: ${new Date().toISOString()} - ---- - -*No files analyzed yet. Click "Sync Now" to analyze hot files.* -`; - writeFileSync(configPath, initialContent, 'utf-8'); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ enabled: true, message: 'Active Memory enabled' })); - } else { - // Disable: Remove the files - if (existsSync(configPath)) { - unlinkSync(configPath); - } - if (existsSync(configJsonPath)) { - unlinkSync(configJsonPath); - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ enabled: false, message: 'Active Memory disabled' })); - } - } catch (error: unknown) { - console.error('Active Memory toggle error:', error); - if (!res.headersSent) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - } - }); - return; - } - - // API: Active Memory - Update Config - if (pathname === '/api/memory/active/config' && req.method === 'POST') { - let body = ''; - req.on('data', (chunk: Buffer) => { body += chunk.toString(); }); - req.on('end', async () => { - try { - const { config } = JSON.parse(body || '{}'); - const projectPath = initialPath; - const rulesDir = join(projectPath, '.claude', 'rules'); - const configJsonPath = join(rulesDir, 'active_memory_config.json'); - - if (!existsSync(rulesDir)) { - mkdirSync(rulesDir, { recursive: true }); - } - - writeFileSync(configJsonPath, JSON.stringify(config, null, 2), 'utf-8'); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ success: true, config })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - }); - return; - } - - // API: Active Memory - Sync (analyze hot files using CLI and update active_memory.md) - if (pathname === '/api/memory/active/sync' && req.method === 'POST') { - let body = ''; - req.on('data', (chunk: Buffer) => { body += chunk.toString(); }); - req.on('end', async () => { - try { - const { tool = 'gemini' } = JSON.parse(body || '{}'); - const projectPath = initialPath; - - if (!projectPath) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'No project path configured' })); - return; - } - - const rulesDir = join(projectPath, '.claude', 'rules'); - const configPath = join(rulesDir, 'active_memory.md'); - - // Get hot files from memory store - with fallback - let hotFiles: any[] = []; - try { - const memoryStore = getMemoryStore(projectPath); - const hotEntities = memoryStore.getHotEntities(20); - hotFiles = hotEntities - .filter((e: any) => e.type === 'file') - .slice(0, 10); - } catch (memErr) { - console.warn('[Active Memory] Memory store error, using empty list:', (memErr as Error).message); - } - - // Build file list for CLI analysis - const filePaths = hotFiles.map((f: any) => { - const filePath = f.value; - return isAbsolute(filePath) ? filePath : join(projectPath, filePath); - }).filter((p: string) => existsSync(p)); - - // Build the active memory content header - let content = `# Active Memory - -> Auto-generated understanding of frequently accessed files using ${tool.toUpperCase()}. -> Last updated: ${new Date().toISOString()} -> Files analyzed: ${hotFiles.length} -> CLI Tool: ${tool} - ---- - -`; - - // Use CCW CLI tool to analyze files - let cliOutput = ''; - - // Build CLI prompt - const cliPrompt = `PURPOSE: Analyze the following hot files and provide a concise understanding of each. -TASK: For each file, describe its purpose, key exports, dependencies, and how it relates to other files. -MODE: analysis -CONTEXT: ${filePaths.map((p: string) => '@' + p).join(' ')} -EXPECTED: Markdown format with ## headings for each file, bullet points for key information. -RULES: Be concise. Focus on practical understanding. Include function signatures for key exports.`; - - // Try to execute CLI using CCW's built-in executor - try { - const syncId = `active-memory-${Date.now()}`; - const result = await executeCliTool({ - tool: tool === 'qwen' ? 'qwen' : 'gemini', - prompt: cliPrompt, - mode: 'analysis', - format: 'plain', - cd: projectPath, - timeout: 120000, - stream: false, - category: 'internal', - id: syncId - }); - - if (result.success && result.execution?.output) { - cliOutput = result.execution.output; - } - - // Add CLI output to content - content += cliOutput + '\n\n---\n\n'; - - } catch (cliErr) { - // Fallback to basic analysis if CLI fails - console.warn('[Active Memory] CLI analysis failed, using basic analysis:', (cliErr as Error).message); - - // Basic analysis fallback - for (const file of hotFiles) { - const fileName = file.value.split(/[/\\]/).pop() || file.value; - const filePath = file.value; - const heat = file.stats?.heat_score || 0; - const readCount = file.stats?.read_count || 0; - const writeCount = file.stats?.write_count || 0; - - content += `## ${fileName} - -- **Path**: \`${filePath}\` -- **Heat Score**: ${heat} -- **Access**: ${readCount} reads, ${writeCount} writes -- **Last Seen**: ${file.last_seen_at || 'Unknown'} - -`; - - // Try to read file and generate summary - try { - const fullPath = isAbsolute(filePath) ? filePath : join(projectPath, filePath); - - if (existsSync(fullPath)) { - const stat = statSync(fullPath); - const ext = extname(fullPath).toLowerCase(); - - content += `- **Size**: ${(stat.size / 1024).toFixed(1)} KB\n`; - content += `- **Type**: ${ext || 'unknown'}\n`; - - const textExts = ['.ts', '.js', '.tsx', '.jsx', '.md', '.json', '.css', '.html', '.vue', '.svelte', '.py', '.go', '.rs']; - if (textExts.includes(ext) && stat.size < 100000) { - const fileContent = readFileSync(fullPath, 'utf-8'); - const lines = fileContent.split('\n').slice(0, 30); - - const exports = lines.filter(l => - l.includes('export ') || l.includes('function ') || - l.includes('class ') || l.includes('interface ') - ).slice(0, 8); - - if (exports.length > 0) { - content += `\n**Key Exports**:\n\`\`\`\n${exports.join('\n')}\n\`\`\`\n`; - } - } - } - } catch (fileErr) { - // Skip file analysis errors - } - - content += '\n---\n\n'; - } - } - - // Ensure directory exists - if (!existsSync(rulesDir)) { - mkdirSync(rulesDir, { recursive: true }); - } - - // Write the file - writeFileSync(configPath, content, 'utf-8'); - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ - success: true, - filesAnalyzed: hotFiles.length, - path: configPath, - usedCli: cliOutput.length > 0 - })); - } catch (error: unknown) { - console.error('[Active Memory] Sync error:', error); - if (!res.headersSent) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - } - }); - return; - } - - // API: Memory Module - Get conversations index - if (pathname === '/api/memory/conversations') { - const projectPath = url.searchParams.get('path') || initialPath; - const project = url.searchParams.get('project') || null; - const limit = parseInt(url.searchParams.get('limit') || '20', 10); - - try { - const memoryStore = getMemoryStore(projectPath); - - let conversations; - if (project) { - const stmt = memoryStore['db'].prepare(` - SELECT * FROM conversations - WHERE project_name = ? - ORDER BY updated_at DESC - LIMIT ? - `); - conversations = stmt.all(project, limit); - } else { - conversations = memoryStore.getConversations(limit); - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ conversations })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Replay conversation - if (pathname.startsWith('/api/memory/replay/')) { - const conversationId = pathname.replace('/api/memory/replay/', ''); - const projectPath = url.searchParams.get('path') || initialPath; - - if (!conversationId) { - res.writeHead(400, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Conversation ID is required' })); - return; - } - - try { - const memoryStore = getMemoryStore(projectPath); - const conversation = memoryStore.getConversation(conversationId); - - if (!conversation) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Conversation not found' })); - return; - } - - const messages = memoryStore.getMessages(conversationId); - - // Enhance messages with tool calls - const messagesWithTools = []; - for (const message of messages) { - const toolCalls = message.id ? memoryStore.getToolCalls(message.id) : []; - messagesWithTools.push({ - ...message, - tool_calls: toolCalls - }); - } - - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ - conversation, - messages: messagesWithTools - })); - } catch (error: unknown) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: (error as Error).message })); - } - return; - } - - // API: Memory Module - Import history (async task) - if (pathname === '/api/memory/import' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { source = 'all', project, path: projectPath } = body; - const basePath = projectPath || initialPath; - - // Generate task ID for async operation - const taskId = `import-${Date.now()}`; - - // TODO: Implement actual history import using HistoryImporter - // For now, return a placeholder response - console.log(`[Memory] Import task ${taskId} started: source=${source}, project=${project}`); - - return { - success: true, - taskId, - message: 'Import task started (not yet implemented)', - source, - project - }; - }); - return; - } - - // API: Update CLAUDE.md using CLI tools (Explorer view) - if (pathname === '/api/update-claude-md' && req.method === 'POST') { - handlePostRequest(req, res, async (body) => { - const { path: targetPath, tool = 'gemini', strategy = 'single-layer' } = body; - if (!targetPath) { - return { error: 'path is required', status: 400 }; - } - return await triggerUpdateClaudeMd(targetPath, tool, strategy); - }); - return; - } - - // ========== Skills & Rules API Routes ========== - - // API: Get single skill detail - if (pathname.startsWith('/api/skills/') && req.method === 'GET' && !pathname.endsWith('/skills/')) { - const skillName = decodeURIComponent(pathname.replace('/api/skills/', '')); - const location = url.searchParams.get('location') || 'project'; - const projectPathParam = url.searchParams.get('path') || initialPath; - const skillDetail = getSkillDetail(skillName, location, projectPathParam); - if (skillDetail.error) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(skillDetail)); - } else { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(skillDetail)); - } - return; + // Skills routes (/api/skills*) + if (pathname.startsWith('/api/skills')) { + if (await handleSkillsRoutes(routeContext)) return; } - // API: Delete skill - if (pathname.startsWith('/api/skills/') && req.method === 'DELETE') { - const skillName = decodeURIComponent(pathname.replace('/api/skills/', '')); - handlePostRequest(req, res, async (body) => { - const { location, projectPath: projectPathParam } = body; - return deleteSkill(skillName, location, projectPathParam || initialPath); - }); - return; + // Rules routes (/api/rules*) + if (pathname.startsWith('/api/rules')) { + if (await handleRulesRoutes(routeContext)) return; } - // API: Get all rules - if (pathname === '/api/rules') { - const projectPathParam = url.searchParams.get('path') || initialPath; - const rulesData = getRulesConfig(projectPathParam); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(rulesData)); - return; + // Session routes (/api/session-detail, /api/update-task-status, /api/bulk-update-task-status) + if (pathname.includes('session') || pathname.includes('task-status')) { + if (await handleSessionRoutes(routeContext)) return; } - // API: Get single rule detail - if (pathname.startsWith('/api/rules/') && req.method === 'GET' && !pathname.endsWith('/rules/')) { - const ruleName = decodeURIComponent(pathname.replace('/api/rules/', '')); - const location = url.searchParams.get('location') || 'project'; - const projectPathParam = url.searchParams.get('path') || initialPath; - const ruleDetail = getRuleDetail(ruleName, location, projectPathParam); - if (ruleDetail.error) { - res.writeHead(404, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(ruleDetail)); - } else { - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify(ruleDetail)); - } - return; + // Files routes (/api/files, /api/file, /api/file-content, /api/update-claude-md) + if (pathname === '/api/files' || pathname === '/api/file' || + pathname === '/api/file-content' || pathname === '/api/update-claude-md') { + if (await handleFilesRoutes(routeContext)) return; } - // API: Delete rule - if (pathname.startsWith('/api/rules/') && req.method === 'DELETE') { - const ruleName = decodeURIComponent(pathname.replace('/api/rules/', '')); - handlePostRequest(req, res, async (body) => { - const { location, projectPath: projectPathParam } = body; - return deleteRule(ruleName, location, projectPathParam || initialPath); - }); - return; + // System routes (data, health, version, paths, shutdown, notify) + if (pathname === '/api/data' || pathname === '/api/health' || + pathname === '/api/version-check' || pathname === '/api/shutdown' || + pathname === '/api/recent-paths' || pathname === '/api/switch-path' || + pathname === '/api/remove-recent-path' || pathname === '/api/system/notify') { + if (await handleSystemRoutes(routeContext)) return; } // Serve dashboard HTML @@ -2163,7 +303,14 @@ RULES: Be concise. Focus on practical understanding. Include function signatures return; } - // Serve static assets (js, css) + // Handle favicon.ico (return empty response to prevent 404) + if (pathname === '/favicon.ico') { + res.writeHead(204); + res.end(); + return; + } + + // Serve static assets (js, css, images, fonts) if (pathname.startsWith('/assets/')) { const assetPath = join(ASSETS_DIR, pathname.replace('/assets/', '')); if (existsSync(assetPath)) { @@ -2221,2112 +368,3 @@ RULES: Be concise. Focus on practical understanding. Include function signatures server.on('error', reject); }); } - -// ======================================== -// WebSocket Functions -// ======================================== - -/** - * Handle WebSocket upgrade - */ -function handleWebSocketUpgrade(req, socket, head) { - const key = req.headers['sec-websocket-key']; - const acceptKey = createHash('sha1') - .update(key + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11') - .digest('base64'); - - const responseHeaders = [ - 'HTTP/1.1 101 Switching Protocols', - 'Upgrade: websocket', - 'Connection: Upgrade', - `Sec-WebSocket-Accept: ${acceptKey}`, - '', - '' - ].join('\r\n'); - - socket.write(responseHeaders); - - // Add to clients set - wsClients.add(socket); - console.log(`[WS] Client connected (${wsClients.size} total)`); - - // Handle incoming messages - socket.on('data', (buffer) => { - try { - const frame = parseWebSocketFrame(buffer); - if (!frame) return; - - const { opcode, payload } = frame; - - switch (opcode) { - case 0x1: // Text frame - if (payload) { - console.log('[WS] Received:', payload); - } - break; - case 0x8: // Close frame - socket.end(); - break; - case 0x9: // Ping frame - respond with Pong - const pongFrame = Buffer.alloc(2); - pongFrame[0] = 0x8A; // Pong opcode with FIN bit - pongFrame[1] = 0x00; // No payload - socket.write(pongFrame); - break; - case 0xA: // Pong frame - ignore - break; - default: - // Ignore other frame types (binary, continuation) - break; - } - } catch (e) { - // Ignore parse errors - } - }); - - // Handle disconnect - socket.on('close', () => { - wsClients.delete(socket); - console.log(`[WS] Client disconnected (${wsClients.size} remaining)`); - }); - - socket.on('error', () => { - wsClients.delete(socket); - }); -} - -/** - * Parse WebSocket frame (simplified) - * Returns { opcode, payload } or null - */ -function parseWebSocketFrame(buffer) { - if (buffer.length < 2) return null; - - const firstByte = buffer[0]; - const opcode = firstByte & 0x0f; // Extract opcode (bits 0-3) - - // Opcode types: - // 0x0 = continuation, 0x1 = text, 0x2 = binary - // 0x8 = close, 0x9 = ping, 0xA = pong - - const secondByte = buffer[1]; - const isMasked = (secondByte & 0x80) !== 0; - let payloadLength = secondByte & 0x7f; - - let offset = 2; - if (payloadLength === 126) { - payloadLength = buffer.readUInt16BE(2); - offset = 4; - } else if (payloadLength === 127) { - payloadLength = Number(buffer.readBigUInt64BE(2)); - offset = 10; - } - - let mask = null; - if (isMasked) { - mask = buffer.slice(offset, offset + 4); - offset += 4; - } - - const payload = buffer.slice(offset, offset + payloadLength); - - if (isMasked && mask) { - for (let i = 0; i < payload.length; i++) { - payload[i] ^= mask[i % 4]; - } - } - - return { opcode, payload: payload.toString('utf8') }; -} - -/** - * Create WebSocket frame - */ -function createWebSocketFrame(data) { - const payload = Buffer.from(JSON.stringify(data), 'utf8'); - const length = payload.length; - - let frame; - if (length <= 125) { - frame = Buffer.alloc(2 + length); - frame[0] = 0x81; // Text frame, FIN - frame[1] = length; - payload.copy(frame, 2); - } else if (length <= 65535) { - frame = Buffer.alloc(4 + length); - frame[0] = 0x81; - frame[1] = 126; - frame.writeUInt16BE(length, 2); - payload.copy(frame, 4); - } else { - frame = Buffer.alloc(10 + length); - frame[0] = 0x81; - frame[1] = 127; - frame.writeBigUInt64BE(BigInt(length), 2); - payload.copy(frame, 10); - } - - return frame; -} - -/** - * Broadcast message to all connected WebSocket clients - */ -function broadcastToClients(data) { - const frame = createWebSocketFrame(data); - - for (const client of wsClients) { - try { - client.write(frame); - } catch (e) { - wsClients.delete(client); - } - } - - console.log(`[WS] Broadcast to ${wsClients.size} clients:`, data.type); -} - -/** - * Extract session ID from file path - */ -function extractSessionIdFromPath(filePath) { - // Normalize path - const normalized = filePath.replace(/\\/g, '/'); - - // Look for session pattern: WFS-xxx, WRS-xxx, etc. - const sessionMatch = normalized.match(/\/(W[A-Z]S-[^/]+)\//); - if (sessionMatch) { - return sessionMatch[1]; - } - - // Look for .workflow/.sessions/xxx pattern - const sessionsMatch = normalized.match(/\.workflow\/\.sessions\/([^/]+)/); - if (sessionsMatch) { - return sessionsMatch[1]; - } - - // Look for lite-plan/lite-fix pattern - const liteMatch = normalized.match(/\.(lite-plan|lite-fix)\/([^/]+)/); - if (liteMatch) { - return liteMatch[2]; - } - - return null; -} - -/** - * Get workflow data for a project path - * @param {string} projectPath - * @returns {Promise} - */ -async function getWorkflowData(projectPath) { - const resolvedPath = resolvePath(projectPath); - const workflowDir = join(resolvedPath, '.workflow'); - - // Track this path - trackRecentPath(resolvedPath); - - // Check if .workflow exists - if (!existsSync(workflowDir)) { - return { - generatedAt: new Date().toISOString(), - activeSessions: [], - archivedSessions: [], - liteTasks: { litePlan: [], liteFix: [] }, - reviewData: { dimensions: {} }, - projectOverview: null, - statistics: { - totalSessions: 0, - activeSessions: 0, - totalTasks: 0, - completedTasks: 0, - reviewFindings: 0, - litePlanCount: 0, - liteFixCount: 0 - }, - projectPath: normalizePathForDisplay(resolvedPath), - recentPaths: getRecentPaths() - }; - } - - // Scan and aggregate data - const sessions = await scanSessions(workflowDir); - const data = await aggregateData(sessions, workflowDir); - - data.projectPath = normalizePathForDisplay(resolvedPath); - data.recentPaths = getRecentPaths(); - - return data; -} - -/** - * Get session detail data (context, summaries, impl-plan, review) - * @param {string} sessionPath - Path to session directory - * @param {string} dataType - Type of data to load: context, summary, impl-plan, review, or all - * @returns {Promise} - */ -async function getSessionDetailData(sessionPath, dataType) { - const result = {}; - - // Normalize path - const normalizedPath = sessionPath.replace(/\\/g, '/'); - - try { - // Load context-package.json (in .process/ subfolder) - if (dataType === 'context' || dataType === 'all') { - // Try .process/context-package.json first (common location) - let contextFile = join(normalizedPath, '.process', 'context-package.json'); - if (!existsSync(contextFile)) { - // Fallback to session root - contextFile = join(normalizedPath, 'context-package.json'); - } - if (existsSync(contextFile)) { - try { - result.context = JSON.parse(readFileSync(contextFile, 'utf8')); - } catch (e) { - result.context = null; - } - } - } - - // Load task JSONs from .task/ folder - if (dataType === 'tasks' || dataType === 'all') { - const taskDir = join(normalizedPath, '.task'); - result.tasks = []; - if (existsSync(taskDir)) { - const files = readdirSync(taskDir).filter(f => f.endsWith('.json') && f.startsWith('IMPL-')); - for (const file of files) { - try { - const content = JSON.parse(readFileSync(join(taskDir, file), 'utf8')); - result.tasks.push({ - filename: file, - task_id: file.replace('.json', ''), - ...content - }); - } catch (e) { - // Skip unreadable files - } - } - // Sort by task ID - result.tasks.sort((a, b) => a.task_id.localeCompare(b.task_id)); - } - } - - // Load summaries from .summaries/ - if (dataType === 'summary' || dataType === 'all') { - const summariesDir = join(normalizedPath, '.summaries'); - result.summaries = []; - if (existsSync(summariesDir)) { - const files = readdirSync(summariesDir).filter(f => f.endsWith('.md')); - for (const file of files) { - try { - const content = readFileSync(join(summariesDir, file), 'utf8'); - result.summaries.push({ name: file.replace('.md', ''), content }); - } catch (e) { - // Skip unreadable files - } - } - } - } - - // Load plan.json (for lite tasks) - if (dataType === 'plan' || dataType === 'all') { - const planFile = join(normalizedPath, 'plan.json'); - if (existsSync(planFile)) { - try { - result.plan = JSON.parse(readFileSync(planFile, 'utf8')); - } catch (e) { - result.plan = null; - } - } - } - - // Load explorations (exploration-*.json files) - check .process/ first, then session root - if (dataType === 'context' || dataType === 'explorations' || dataType === 'all') { - result.explorations = { manifest: null, data: {} }; - - // Try .process/ first (standard workflow sessions), then session root (lite tasks) - const searchDirs = [ - join(normalizedPath, '.process'), - normalizedPath - ]; - - for (const searchDir of searchDirs) { - if (!existsSync(searchDir)) continue; - - // Look for explorations-manifest.json - const manifestFile = join(searchDir, 'explorations-manifest.json'); - if (existsSync(manifestFile)) { - try { - result.explorations.manifest = JSON.parse(readFileSync(manifestFile, 'utf8')); - - // Load each exploration file based on manifest - const explorations = result.explorations.manifest.explorations || []; - for (const exp of explorations) { - const expFile = join(searchDir, exp.file); - if (existsSync(expFile)) { - try { - result.explorations.data[exp.angle] = JSON.parse(readFileSync(expFile, 'utf8')); - } catch (e) { - // Skip unreadable exploration files - } - } - } - break; // Found manifest, stop searching - } catch (e) { - result.explorations.manifest = null; - } - } else { - // Fallback: scan for exploration-*.json files directly - try { - const files = readdirSync(searchDir).filter(f => f.startsWith('exploration-') && f.endsWith('.json')); - if (files.length > 0) { - // Create synthetic manifest - result.explorations.manifest = { - exploration_count: files.length, - explorations: files.map((f, i) => ({ - angle: f.replace('exploration-', '').replace('.json', ''), - file: f, - index: i + 1 - })) - }; - - // Load each file - for (const file of files) { - const angle = file.replace('exploration-', '').replace('.json', ''); - try { - result.explorations.data[angle] = JSON.parse(readFileSync(join(searchDir, file), 'utf8')); - } catch (e) { - // Skip unreadable files - } - } - break; // Found explorations, stop searching - } - } catch (e) { - // Directory read failed - } - } - } - } - - // Load conflict resolution decisions (conflict-resolution-decisions.json) - if (dataType === 'context' || dataType === 'conflict' || dataType === 'all') { - result.conflictResolution = null; - - // Try .process/ first (standard workflow sessions) - const conflictFiles = [ - join(normalizedPath, '.process', 'conflict-resolution-decisions.json'), - join(normalizedPath, 'conflict-resolution-decisions.json') - ]; - - for (const conflictFile of conflictFiles) { - if (existsSync(conflictFile)) { - try { - result.conflictResolution = JSON.parse(readFileSync(conflictFile, 'utf8')); - break; // Found file, stop searching - } catch (e) { - // Skip unreadable file - } - } - } - } - - // Load IMPL_PLAN.md - if (dataType === 'impl-plan' || dataType === 'all') { - const implPlanFile = join(normalizedPath, 'IMPL_PLAN.md'); - if (existsSync(implPlanFile)) { - try { - result.implPlan = readFileSync(implPlanFile, 'utf8'); - } catch (e) { - result.implPlan = null; - } - } - } - - // Load review data from .review/ - if (dataType === 'review' || dataType === 'all') { - const reviewDir = join(normalizedPath, '.review'); - result.review = { - state: null, - dimensions: [], - severityDistribution: null, - totalFindings: 0 - }; - - if (existsSync(reviewDir)) { - // Load review-state.json - const stateFile = join(reviewDir, 'review-state.json'); - if (existsSync(stateFile)) { - try { - const state = JSON.parse(readFileSync(stateFile, 'utf8')); - result.review.state = state; - result.review.severityDistribution = state.severity_distribution || {}; - result.review.totalFindings = state.total_findings || 0; - result.review.phase = state.phase || 'unknown'; - result.review.dimensionSummaries = state.dimension_summaries || {}; - result.review.crossCuttingConcerns = state.cross_cutting_concerns || []; - result.review.criticalFiles = state.critical_files || []; - } catch (e) { - // Skip unreadable state - } - } - - // Load dimension findings - const dimensionsDir = join(reviewDir, 'dimensions'); - if (existsSync(dimensionsDir)) { - const files = readdirSync(dimensionsDir).filter(f => f.endsWith('.json')); - for (const file of files) { - try { - const dimName = file.replace('.json', ''); - const data = JSON.parse(readFileSync(join(dimensionsDir, file), 'utf8')); - - // Handle array structure: [ { findings: [...] } ] - let findings = []; - let summary = null; - - if (Array.isArray(data) && data.length > 0) { - const dimData = data[0]; - findings = dimData.findings || []; - summary = dimData.summary || null; - } else if (data.findings) { - findings = data.findings; - summary = data.summary || null; - } - - result.review.dimensions.push({ - name: dimName, - findings: findings, - summary: summary, - count: findings.length - }); - } catch (e) { - // Skip unreadable files - } - } - } - } - } - - } catch (error: unknown) { - console.error('Error loading session detail:', error); - result.error = (error as Error).message; - } - - return result; -} - -/** - * Update task status in a task JSON file - * @param {string} sessionPath - Path to session directory - * @param {string} taskId - Task ID (e.g., IMPL-001) - * @param {string} newStatus - New status (pending, in_progress, completed) - * @returns {Promise} - */ -async function updateTaskStatus(sessionPath, taskId, newStatus) { - // Normalize path (handle both forward and back slashes) - let normalizedPath = sessionPath.replace(/\\/g, '/'); - - // Handle Windows drive letter format - if (normalizedPath.match(/^[a-zA-Z]:\//)) { - // Already in correct format - } else if (normalizedPath.match(/^\/[a-zA-Z]\//)) { - // Convert /D/path to D:/path - normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); - } - - const taskDir = join(normalizedPath, '.task'); - - // Check if task directory exists - if (!existsSync(taskDir)) { - throw new Error(`Task directory not found: ${taskDir}`); - } - - // Try to find the task file - let taskFile = join(taskDir, `${taskId}.json`); - - if (!existsSync(taskFile)) { - // Try without .json if taskId already has it - if (taskId.endsWith('.json')) { - taskFile = join(taskDir, taskId); - } - if (!existsSync(taskFile)) { - throw new Error(`Task file not found: ${taskId}.json in ${taskDir}`); - } - } - - try { - const content = JSON.parse(readFileSync(taskFile, 'utf8')); - const oldStatus = content.status || 'pending'; - content.status = newStatus; - - // Add status change timestamp - if (!content.status_history) { - content.status_history = []; - } - content.status_history.push({ - from: oldStatus, - to: newStatus, - changed_at: new Date().toISOString() - }); - - writeFileSync(taskFile, JSON.stringify(content, null, 2), 'utf8'); - - return { - success: true, - taskId, - oldStatus, - newStatus, - file: taskFile - }; - } catch (error: unknown) { - throw new Error(`Failed to update task ${taskId}: ${(error as Error).message}`); - } -} - -/** - * Generate dashboard HTML for server mode - * @param {string} initialPath - * @returns {string} - */ -function generateServerDashboard(initialPath) { - let html = readFileSync(TEMPLATE_PATH, 'utf8'); - - // Read and concatenate modular CSS files in load order - const cssContent = MODULE_CSS_FILES.map(file => { - const filePath = join(MODULE_CSS_DIR, file); - return existsSync(filePath) ? readFileSync(filePath, 'utf8') : ''; - }).join('\n\n'); - - // Read and concatenate modular JS files in dependency order - let jsContent = MODULE_FILES.map(file => { - const filePath = join(MODULE_JS_DIR, file); - return existsSync(filePath) ? readFileSync(filePath, 'utf8') : ''; - }).join('\n\n'); - - // Inject CSS content - html = html.replace('{{CSS_CONTENT}}', cssContent); - - // Prepare JS content with empty initial data (will be loaded dynamically) - const emptyData = { - generatedAt: new Date().toISOString(), - activeSessions: [], - archivedSessions: [], - liteTasks: { litePlan: [], liteFix: [] }, - reviewData: { dimensions: {} }, - projectOverview: null, - statistics: { totalSessions: 0, activeSessions: 0, totalTasks: 0, completedTasks: 0, reviewFindings: 0, litePlanCount: 0, liteFixCount: 0 } - }; - - // Replace JS placeholders - jsContent = jsContent.replace('{{WORKFLOW_DATA}}', JSON.stringify(emptyData, null, 2)); - jsContent = jsContent.replace(/\{\{PROJECT_PATH\}\}/g, normalizePathForDisplay(initialPath).replace(/\\/g, '/')); - jsContent = jsContent.replace('{{RECENT_PATHS}}', JSON.stringify(getRecentPaths())); - - // Add server mode flag at the start of JS - // Note: loadDashboardData and loadRecentPaths are defined in api.js module - const serverModeScript = ` -// Server mode - load data dynamically -window.SERVER_MODE = true; -window.INITIAL_PATH = '${normalizePathForDisplay(initialPath).replace(/\\/g, '/')}'; -`; - - // Prepend server mode script to JS content - jsContent = serverModeScript + jsContent; - - // Inject JS content - html = html.replace('{{JS_CONTENT}}', jsContent); - - // Replace any remaining placeholders in HTML - html = html.replace(/\{\{PROJECT_PATH\}\}/g, normalizePathForDisplay(initialPath).replace(/\\/g, '/')); - - return html; -} - -// ======================================== -// MCP Configuration Functions -// ======================================== - -/** - * Derive prompt intent from text content - */ -function derivePromptIntent(text: string): string { - const lower = text.toLowerCase(); - - // Implementation/coding patterns - if (/实现|implement|create|add|build|write|develop|make/.test(lower)) return 'implement'; - if (/修复|fix|bug|error|issue|problem|解决/.test(lower)) return 'fix'; - if (/重构|refactor|optimize|improve|clean/.test(lower)) return 'refactor'; - if (/测试|test|spec|coverage/.test(lower)) return 'test'; - - // Analysis patterns - if (/分析|analyze|review|check|examine|audit/.test(lower)) return 'analyze'; - if (/解释|explain|what|how|why|understand/.test(lower)) return 'explain'; - if (/搜索|search|find|look|where|locate/.test(lower)) return 'search'; - - // Documentation patterns - if (/文档|document|readme|comment|注释/.test(lower)) return 'document'; - - // Planning patterns - if (/计划|plan|design|architect|strategy/.test(lower)) return 'plan'; - - // Configuration patterns - if (/配置|config|setup|install|设置/.test(lower)) return 'configure'; - - // Default - return 'general'; -} - -/** - * Calculate prompt quality score (0-100) - */ -function calculateQualityScore(text: string): number { - let score = 50; // Base score - - // Length factors - const length = text.length; - if (length > 50 && length < 500) score += 15; - else if (length >= 500 && length < 1000) score += 10; - else if (length < 20) score -= 20; - - // Specificity indicators - if (/file|path|function|class|method|variable/i.test(text)) score += 10; - if (/src\/|\.ts|\.js|\.py|\.go/i.test(text)) score += 10; - - // Context indicators - if (/when|after|before|because|since/i.test(text)) score += 5; - - // Action clarity - if (/please|要|请|帮|help/i.test(text)) score += 5; - - // Structure indicators - if (/\d+\.|•|-\s/.test(text)) score += 10; // Lists - - // Cap at 100 - return Math.min(100, Math.max(0, score)); -} - -/** - * Safely read and parse JSON file - * @param {string} filePath - * @returns {Object|null} - */ -function safeReadJson(filePath) { - try { - if (!existsSync(filePath)) return null; - const content = readFileSync(filePath, 'utf8'); - return JSON.parse(content); - } catch { - return null; - } -} - -/** - * Get MCP servers from a JSON file (expects mcpServers key at top level) - * @param {string} filePath - * @returns {Object} mcpServers object or empty object - */ -function getMcpServersFromFile(filePath) { - const config = safeReadJson(filePath); - if (!config) return {}; - return config.mcpServers || {}; -} - -/** - * Get MCP configuration from multiple sources (per official Claude Code docs): - * - * Priority (highest to lowest): - * 1. Enterprise managed-mcp.json (cannot be overridden) - * 2. Local scope (project-specific private in ~/.claude.json) - * 3. Project scope (.mcp.json in project root) - * 4. User scope (mcpServers in ~/.claude.json) - * - * Note: ~/.claude/settings.json is for MCP PERMISSIONS, NOT definitions! - * - * @returns {Object} - */ -function getMcpConfig() { - try { - const result = { - projects: {}, - userServers: {}, // User-level servers from ~/.claude.json mcpServers - enterpriseServers: {}, // Enterprise managed servers (highest priority) - configSources: [] // Track where configs came from for debugging - }; - - // 1. Read Enterprise managed MCP servers (highest priority) - const enterprisePath = getEnterpriseMcpPath(); - if (existsSync(enterprisePath)) { - const enterpriseConfig = safeReadJson(enterprisePath); - if (enterpriseConfig?.mcpServers) { - result.enterpriseServers = enterpriseConfig.mcpServers; - result.configSources.push({ type: 'enterprise', path: enterprisePath, count: Object.keys(enterpriseConfig.mcpServers).length }); - } - } - - // 2. Read from ~/.claude.json - if (existsSync(CLAUDE_CONFIG_PATH)) { - const claudeConfig = safeReadJson(CLAUDE_CONFIG_PATH); - if (claudeConfig) { - // 2a. User-level mcpServers (top-level mcpServers key) - if (claudeConfig.mcpServers) { - result.userServers = claudeConfig.mcpServers; - result.configSources.push({ type: 'user', path: CLAUDE_CONFIG_PATH, count: Object.keys(claudeConfig.mcpServers).length }); - } - - // 2b. Project-specific configurations (projects[path].mcpServers) - if (claudeConfig.projects) { - result.projects = claudeConfig.projects; - } - } - } - - // 3. For each known project, check for .mcp.json (project-level config) - const projectPaths = Object.keys(result.projects); - for (const projectPath of projectPaths) { - const mcpJsonPath = join(projectPath, '.mcp.json'); - if (existsSync(mcpJsonPath)) { - const mcpJsonConfig = safeReadJson(mcpJsonPath); - if (mcpJsonConfig?.mcpServers) { - // Merge .mcp.json servers into project config - // Project's .mcp.json has lower priority than ~/.claude.json projects[path].mcpServers - const existingServers = result.projects[projectPath]?.mcpServers || {}; - result.projects[projectPath] = { - ...result.projects[projectPath], - mcpServers: { - ...mcpJsonConfig.mcpServers, // .mcp.json (lower priority) - ...existingServers // ~/.claude.json projects[path] (higher priority) - }, - mcpJsonPath: mcpJsonPath // Track source for debugging - }; - result.configSources.push({ type: 'project-mcp-json', path: mcpJsonPath, count: Object.keys(mcpJsonConfig.mcpServers).length }); - } - } - } - - // Build globalServers by merging user and enterprise servers - // Enterprise servers override user servers - result.globalServers = { - ...result.userServers, - ...result.enterpriseServers - }; - - return result; - } catch (error: unknown) { - console.error('Error reading MCP config:', error); - return { projects: {}, globalServers: {}, userServers: {}, enterpriseServers: {}, configSources: [], error: (error as Error).message }; - } -} - -/** - * Normalize project path for .claude.json (Windows backslash format) - * @param {string} path - * @returns {string} - */ -function normalizeProjectPathForConfig(path) { - // Convert forward slashes to backslashes for Windows .claude.json format - let normalized = path.replace(/\//g, '\\'); - - // Handle /d/path format -> D:\path - if (normalized.match(/^\\[a-zA-Z]\\/)) { - normalized = normalized.charAt(1).toUpperCase() + ':' + normalized.slice(2); - } - - return normalized; -} - -/** - * Toggle MCP server enabled/disabled - * @param {string} projectPath - * @param {string} serverName - * @param {boolean} enable - * @returns {Object} - */ -function toggleMcpServerEnabled(projectPath, serverName, enable) { - try { - if (!existsSync(CLAUDE_CONFIG_PATH)) { - return { error: '.claude.json not found' }; - } - - const content = readFileSync(CLAUDE_CONFIG_PATH, 'utf8'); - const config = JSON.parse(content); - - const normalizedPath = normalizeProjectPathForConfig(projectPath); - - if (!config.projects || !config.projects[normalizedPath]) { - return { error: `Project not found: ${normalizedPath}` }; - } - - const projectConfig = config.projects[normalizedPath]; - - // Ensure disabledMcpServers array exists - if (!projectConfig.disabledMcpServers) { - projectConfig.disabledMcpServers = []; - } - - if (enable) { - // Remove from disabled list - projectConfig.disabledMcpServers = projectConfig.disabledMcpServers.filter(s => s !== serverName); - } else { - // Add to disabled list if not already there - if (!projectConfig.disabledMcpServers.includes(serverName)) { - projectConfig.disabledMcpServers.push(serverName); - } - } - - // Write back to file - writeFileSync(CLAUDE_CONFIG_PATH, JSON.stringify(config, null, 2), 'utf8'); - - return { - success: true, - serverName, - enabled: enable, - disabledMcpServers: projectConfig.disabledMcpServers - }; - } catch (error: unknown) { - console.error('Error toggling MCP server:', error); - return { error: (error as Error).message }; - } -} - -/** - * Add MCP server to project - * @param {string} projectPath - * @param {string} serverName - * @param {Object} serverConfig - * @returns {Object} - */ -function addMcpServerToProject(projectPath, serverName, serverConfig) { - try { - if (!existsSync(CLAUDE_CONFIG_PATH)) { - return { error: '.claude.json not found' }; - } - - const content = readFileSync(CLAUDE_CONFIG_PATH, 'utf8'); - const config = JSON.parse(content); - - const normalizedPath = normalizeProjectPathForConfig(projectPath); - - // Create project entry if it doesn't exist - if (!config.projects) { - config.projects = {}; - } - - if (!config.projects[normalizedPath]) { - config.projects[normalizedPath] = { - allowedTools: [], - mcpContextUris: [], - mcpServers: {}, - enabledMcpjsonServers: [], - disabledMcpjsonServers: [], - hasTrustDialogAccepted: false, - projectOnboardingSeenCount: 0, - hasClaudeMdExternalIncludesApproved: false, - hasClaudeMdExternalIncludesWarningShown: false - }; - } - - const projectConfig = config.projects[normalizedPath]; - - // Ensure mcpServers exists - if (!projectConfig.mcpServers) { - projectConfig.mcpServers = {}; - } - - // Add the server - projectConfig.mcpServers[serverName] = serverConfig; - - // Write back to file - writeFileSync(CLAUDE_CONFIG_PATH, JSON.stringify(config, null, 2), 'utf8'); - - return { - success: true, - serverName, - serverConfig - }; - } catch (error: unknown) { - console.error('Error adding MCP server:', error); - return { error: (error as Error).message }; - } -} - -/** - * Remove MCP server from project - * @param {string} projectPath - * @param {string} serverName - * @returns {Object} - */ -function removeMcpServerFromProject(projectPath, serverName) { - try { - if (!existsSync(CLAUDE_CONFIG_PATH)) { - return { error: '.claude.json not found' }; - } - - const content = readFileSync(CLAUDE_CONFIG_PATH, 'utf8'); - const config = JSON.parse(content); - - const normalizedPath = normalizeProjectPathForConfig(projectPath); - - if (!config.projects || !config.projects[normalizedPath]) { - return { error: `Project not found: ${normalizedPath}` }; - } - - const projectConfig = config.projects[normalizedPath]; - - if (!projectConfig.mcpServers || !projectConfig.mcpServers[serverName]) { - return { error: `Server not found: ${serverName}` }; - } - - // Remove the server - delete projectConfig.mcpServers[serverName]; - - // Also remove from disabled list if present - if (projectConfig.disabledMcpServers) { - projectConfig.disabledMcpServers = projectConfig.disabledMcpServers.filter(s => s !== serverName); - } - - // Write back to file - writeFileSync(CLAUDE_CONFIG_PATH, JSON.stringify(config, null, 2), 'utf8'); - - return { - success: true, - serverName, - removed: true - }; - } catch (error: unknown) { - console.error('Error removing MCP server:', error); - return { error: (error as Error).message }; - } -} - -// ======================================== -// Hook Configuration Functions -// ======================================== - -const GLOBAL_SETTINGS_PATH = join(homedir(), '.claude', 'settings.json'); - -/** - * Get project settings path - * @param {string} projectPath - * @returns {string} - */ -function getProjectSettingsPath(projectPath) { - const normalizedPath = projectPath.replace(/\//g, '\\').replace(/^\\([a-zA-Z])\\/, '$1:\\'); - return join(normalizedPath, '.claude', 'settings.json'); -} - -/** - * Read settings file safely - * @param {string} filePath - * @returns {Object} - */ -function readSettingsFile(filePath) { - try { - if (!existsSync(filePath)) { - return { hooks: {} }; - } - const content = readFileSync(filePath, 'utf8'); - return JSON.parse(content); - } catch (error: unknown) { - console.error(`Error reading settings file ${filePath}:`, error); - return { hooks: {} }; - } -} - -/** - * Write settings file safely - * @param {string} filePath - * @param {Object} settings - */ -function writeSettingsFile(filePath, settings) { - const dirPath = dirname(filePath); - // Ensure directory exists - if (!existsSync(dirPath)) { - mkdirSync(dirPath, { recursive: true }); - } - writeFileSync(filePath, JSON.stringify(settings, null, 2), 'utf8'); -} - - -/** - * Discover SKILL packages in project - * @param {string} projectPath - Project root path - * @returns {Object} - List of discovered SKILL packages - */ -async function discoverSkillPackages(projectPath) { - const skills = []; - const skillsDir = join(projectPath, '.claude', 'skills'); - - try { - // Check if skills directory exists - if (!existsSync(skillsDir)) { - return { skills: [], skillsDir: null }; - } - - // Read all subdirectories in skills folder - const entries = readdirSync(skillsDir, { withFileTypes: true }); - - for (const entry of entries) { - if (entry.isDirectory()) { - const skillPath = join(skillsDir, entry.name); - const skillMdPath = join(skillPath, 'SKILL.md'); - - // Check if SKILL.md exists - if (existsSync(skillMdPath)) { - const skillContent = readFileSync(skillMdPath, 'utf8'); - - // Parse YAML frontmatter - let metadata = { name: entry.name, description: '' }; - const frontmatterRegex = /^---\n([\s\S]*?)\n---/; - const frontmatterMatch = skillContent.match(frontmatterRegex); - if (frontmatterMatch) { - const yaml = frontmatterMatch[1]; - const nameMatch = yaml.match(/name:\s*(.+)/); - const descMatch = yaml.match(/description:\s*(.+)/); - if (nameMatch) metadata.name = nameMatch[1].trim(); - if (descMatch) metadata.description = descMatch[1].trim(); - } - - skills.push({ - id: entry.name, - name: metadata.name, - description: metadata.description, - path: skillPath, - skillMdPath: skillMdPath - }); - } - } - } - - return { skills, skillsDir }; - } catch (err) { - console.error('Error discovering SKILL packages:', err); - return { skills: [], skillsDir: null, error: err.message }; - } -} - -/** - * Get hooks configuration from both global and project settings - * @param {string} projectPath - * @returns {Object} - */ -function getHooksConfig(projectPath) { - const globalSettings = readSettingsFile(GLOBAL_SETTINGS_PATH); - const projectSettingsPath = projectPath ? getProjectSettingsPath(projectPath) : null; - const projectSettings = projectSettingsPath ? readSettingsFile(projectSettingsPath) : { hooks: {} }; - - return { - global: { - path: GLOBAL_SETTINGS_PATH, - hooks: globalSettings.hooks || {} - }, - project: { - path: projectSettingsPath, - hooks: projectSettings.hooks || {} - } - }; -} - -/** - * Save a hook to settings file - * @param {string} projectPath - * @param {string} scope - 'global' or 'project' - * @param {string} event - Hook event type - * @param {Object} hookData - Hook configuration - * @returns {Object} - */ -function saveHookToSettings(projectPath, scope, event, hookData) { - try { - const filePath = scope === 'global' ? GLOBAL_SETTINGS_PATH : getProjectSettingsPath(projectPath); - const settings = readSettingsFile(filePath); - - // Ensure hooks object exists - if (!settings.hooks) { - settings.hooks = {}; - } - - // Ensure the event array exists - if (!settings.hooks[event]) { - settings.hooks[event] = []; - } - - // Ensure it's an array - if (!Array.isArray(settings.hooks[event])) { - settings.hooks[event] = [settings.hooks[event]]; - } - - // Check if we're replacing an existing hook - if (hookData.replaceIndex !== undefined) { - const index = hookData.replaceIndex; - delete hookData.replaceIndex; - if (index >= 0 && index < settings.hooks[event].length) { - settings.hooks[event][index] = hookData; - } - } else { - // Add new hook - settings.hooks[event].push(hookData); - } - - // Ensure directory exists and write file - const dirPath = dirname(filePath); - if (!existsSync(dirPath)) { - mkdirSync(dirPath, { recursive: true }); - } - writeFileSync(filePath, JSON.stringify(settings, null, 2), 'utf8'); - - return { - success: true, - event, - hookData - }; - } catch (error: unknown) { - console.error('Error saving hook:', error); - return { error: (error as Error).message }; - } -} - -/** - * Delete a hook from settings file - * @param {string} projectPath - * @param {string} scope - 'global' or 'project' - * @param {string} event - Hook event type - * @param {number} hookIndex - Index of hook to delete - * @returns {Object} - */ -function deleteHookFromSettings(projectPath, scope, event, hookIndex) { - try { - const filePath = scope === 'global' ? GLOBAL_SETTINGS_PATH : getProjectSettingsPath(projectPath); - const settings = readSettingsFile(filePath); - - if (!settings.hooks || !settings.hooks[event]) { - return { error: 'Hook not found' }; - } - - // Ensure it's an array - if (!Array.isArray(settings.hooks[event])) { - settings.hooks[event] = [settings.hooks[event]]; - } - - if (hookIndex < 0 || hookIndex >= settings.hooks[event].length) { - return { error: 'Invalid hook index' }; - } - - // Remove the hook - settings.hooks[event].splice(hookIndex, 1); - - // Remove empty event arrays - if (settings.hooks[event].length === 0) { - delete settings.hooks[event]; - } - - writeFileSync(filePath, JSON.stringify(settings, null, 2), 'utf8'); - - return { - success: true, - event, - hookIndex - }; - } catch (error: unknown) { - console.error('Error deleting hook:', error); - return { error: (error as Error).message }; - } -} - -// ======================================== -// Explorer View Functions -// ======================================== - -// Directories to always exclude from file tree -const EXPLORER_EXCLUDE_DIRS = [ - '.git', '__pycache__', 'node_modules', '.venv', 'venv', 'env', - 'dist', 'build', '.cache', '.pytest_cache', '.mypy_cache', - 'coverage', '.nyc_output', 'logs', 'tmp', 'temp', '.next', - '.nuxt', '.output', '.turbo', '.parcel-cache' -]; - -// File extensions to language mapping for syntax highlighting -const EXT_TO_LANGUAGE = { - '.js': 'javascript', - '.jsx': 'javascript', - '.ts': 'typescript', - '.tsx': 'typescript', - '.py': 'python', - '.rb': 'ruby', - '.java': 'java', - '.go': 'go', - '.rs': 'rust', - '.c': 'c', - '.cpp': 'cpp', - '.h': 'c', - '.hpp': 'cpp', - '.cs': 'csharp', - '.php': 'php', - '.swift': 'swift', - '.kt': 'kotlin', - '.scala': 'scala', - '.sh': 'bash', - '.bash': 'bash', - '.zsh': 'bash', - '.ps1': 'powershell', - '.sql': 'sql', - '.html': 'html', - '.htm': 'html', - '.css': 'css', - '.scss': 'scss', - '.sass': 'sass', - '.less': 'less', - '.json': 'json', - '.xml': 'xml', - '.yaml': 'yaml', - '.yml': 'yaml', - '.toml': 'toml', - '.ini': 'ini', - '.cfg': 'ini', - '.conf': 'nginx', - '.md': 'markdown', - '.markdown': 'markdown', - '.txt': 'plaintext', - '.log': 'plaintext', - '.env': 'bash', - '.dockerfile': 'dockerfile', - '.vue': 'html', - '.svelte': 'html' -}; - -/** - * Parse .gitignore file and return patterns - * @param {string} gitignorePath - Path to .gitignore file - * @returns {string[]} Array of gitignore patterns - */ -function parseGitignore(gitignorePath) { - try { - if (!existsSync(gitignorePath)) return []; - const content = readFileSync(gitignorePath, 'utf8'); - return content - .split('\n') - .map(line => line.trim()) - .filter(line => line && !line.startsWith('#')); - } catch { - return []; - } -} - -/** - * Check if a file/directory should be ignored based on gitignore patterns - * Simple pattern matching (supports basic glob patterns) - * @param {string} name - File or directory name - * @param {string[]} patterns - Gitignore patterns - * @param {boolean} isDirectory - Whether the entry is a directory - * @returns {boolean} - */ -function shouldIgnore(name, patterns, isDirectory) { - // Always exclude certain directories - if (isDirectory && EXPLORER_EXCLUDE_DIRS.includes(name)) { - return true; - } - - // Skip hidden files/directories (starting with .) - if (name.startsWith('.') && name !== '.claude' && name !== '.workflow') { - return true; - } - - for (const pattern of patterns) { - let p = pattern; - - // Handle negation patterns (we skip them for simplicity) - if (p.startsWith('!')) continue; - - // Handle directory-only patterns - if (p.endsWith('/')) { - if (!isDirectory) continue; - p = p.slice(0, -1); - } - - // Simple pattern matching - if (p === name) return true; - - // Handle wildcard patterns - if (p.includes('*')) { - const regex = new RegExp('^' + p.replace(/\./g, '\\.').replace(/\*/g, '.*') + '$'); - if (regex.test(name)) return true; - } - - // Handle extension patterns like *.log - if (p.startsWith('*.')) { - const ext = p.slice(1); - if (name.endsWith(ext)) return true; - } - } - - return false; -} - -/** - * List directory files with .gitignore filtering - * @param {string} dirPath - Directory path to list - * @returns {Promise} - */ -async function listDirectoryFiles(dirPath) { - try { - // Normalize path - let normalizedPath = dirPath.replace(/\\/g, '/'); - if (normalizedPath.match(/^\/[a-zA-Z]\//)) { - normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); - } - - if (!existsSync(normalizedPath)) { - return { error: 'Directory not found', files: [] }; - } - - if (!statSync(normalizedPath).isDirectory()) { - return { error: 'Not a directory', files: [] }; - } - - // Parse .gitignore patterns - const gitignorePath = join(normalizedPath, '.gitignore'); - const gitignorePatterns = parseGitignore(gitignorePath); - - // Read directory entries - const entries = readdirSync(normalizedPath, { withFileTypes: true }); - - const files = []; - for (const entry of entries) { - const isDirectory = entry.isDirectory(); - - // Check if should be ignored - if (shouldIgnore(entry.name, gitignorePatterns, isDirectory)) { - continue; - } - - const entryPath = join(normalizedPath, entry.name); - const fileInfo = { - name: entry.name, - type: isDirectory ? 'directory' : 'file', - path: entryPath.replace(/\\/g, '/') - }; - - // Check if directory has CLAUDE.md - if (isDirectory) { - const claudeMdPath = join(entryPath, 'CLAUDE.md'); - fileInfo.hasClaudeMd = existsSync(claudeMdPath); - } - - files.push(fileInfo); - } - - // Sort: directories first, then alphabetically - files.sort((a, b) => { - if (a.type === 'directory' && b.type !== 'directory') return -1; - if (a.type !== 'directory' && b.type === 'directory') return 1; - return a.name.localeCompare(b.name); - }); - - return { - path: normalizedPath.replace(/\\/g, '/'), - files, - gitignorePatterns - }; - } catch (error: unknown) { - console.error('Error listing directory:', error); - return { error: (error as Error).message, files: [] }; - } -} - -/** - * Get file content for preview - * @param {string} filePath - Path to file - * @returns {Promise} - */ -async function getFileContent(filePath) { - try { - // Normalize path - let normalizedPath = filePath.replace(/\\/g, '/'); - if (normalizedPath.match(/^\/[a-zA-Z]\//)) { - normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); - } - - if (!existsSync(normalizedPath)) { - return { error: 'File not found' }; - } - - const stats = statSync(normalizedPath); - if (stats.isDirectory()) { - return { error: 'Cannot read directory' }; - } - - // Check file size (limit to 1MB for preview) - if (stats.size > 1024 * 1024) { - return { error: 'File too large for preview (max 1MB)', size: stats.size }; - } - - // Read file content - const content = readFileSync(normalizedPath, 'utf8'); - const ext = normalizedPath.substring(normalizedPath.lastIndexOf('.')).toLowerCase(); - const language = EXT_TO_LANGUAGE[ext] || 'plaintext'; - const isMarkdown = ext === '.md' || ext === '.markdown'; - const fileName = normalizedPath.split('/').pop(); - - return { - content, - language, - isMarkdown, - fileName, - path: normalizedPath, - size: stats.size, - lines: content.split('\n').length - }; - } catch (error: unknown) { - console.error('Error reading file:', error); - return { error: (error as Error).message }; - } -} - -/** - * Trigger update-module-claude tool (async execution) - * @param {string} targetPath - Directory path to update - * @param {string} tool - CLI tool to use (gemini, qwen, codex, claude) - * @param {string} strategy - Update strategy (single-layer, multi-layer) - * @returns {Promise} - */ -async function triggerUpdateClaudeMd(targetPath, tool, strategy) { - const { spawn } = await import('child_process'); - - // Normalize path - let normalizedPath = targetPath.replace(/\\/g, '/'); - if (normalizedPath.match(/^\/[a-zA-Z]\//)) { - normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2); - } - - if (!existsSync(normalizedPath)) { - return { error: 'Directory not found' }; - } - - if (!statSync(normalizedPath).isDirectory()) { - return { error: 'Not a directory' }; - } - - // Build ccw tool command with JSON parameters - const params = JSON.stringify({ - strategy, - path: normalizedPath, - tool - }); - - console.log(`[Explorer] Running async: ccw tool exec update_module_claude with ${tool} (${strategy})`); - - return new Promise((resolve) => { - const isWindows = process.platform === 'win32'; - - // Spawn the process - const child = spawn('ccw', ['tool', 'exec', 'update_module_claude', params], { - cwd: normalizedPath, - shell: isWindows, - stdio: ['ignore', 'pipe', 'pipe'] - }); - - let stdout = ''; - let stderr = ''; - - child.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - child.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - child.on('close', (code) => { - if (code === 0) { - // Parse the JSON output from the tool - let result; - try { - result = JSON.parse(stdout); - } catch { - result = { output: stdout }; - } - - if (result.success === false || result.error) { - resolve({ - success: false, - error: result.error || result.message || 'Update failed', - output: stdout - }); - } else { - resolve({ - success: true, - message: result.message || `CLAUDE.md updated successfully using ${tool} (${strategy})`, - output: stdout, - path: normalizedPath - }); - } - } else { - resolve({ - success: false, - error: stderr || `Process exited with code ${code}`, - output: stdout + stderr - }); - } - }); - - child.on('error', (error) => { - console.error('Error spawning process:', error); - resolve({ - success: false, - error: (error as Error).message, - output: '' - }); - }); - - // Timeout after 5 minutes - setTimeout(() => { - child.kill(); - resolve({ - success: false, - error: 'Timeout: Process took longer than 5 minutes', - output: stdout - }); - }, 300000); - }); -} - - -// ======================================== -// Version Check Functions -// ======================================== - -// Package name on npm registry -const NPM_PACKAGE_NAME = 'claude-code-workflow'; - -// Cache for version check (avoid too frequent requests) -let versionCheckCache = null; -let versionCheckTime = 0; -const VERSION_CHECK_CACHE_TTL = 3600000; // 1 hour - -/** - * Get current package version from package.json - * @returns {string} - */ -function getCurrentVersion() { - try { - const packageJsonPath = join(import.meta.dirname, '../../../package.json'); - if (existsSync(packageJsonPath)) { - const pkg = JSON.parse(readFileSync(packageJsonPath, 'utf8')); - return pkg.version || '0.0.0'; - } - } catch (e) { - console.error('Error reading package.json:', e); - } - return '0.0.0'; -} - -/** - * Check npm registry for latest version - * @returns {Promise} - */ -async function checkNpmVersion() { - // Return cached result if still valid - const now = Date.now(); - if (versionCheckCache && (now - versionCheckTime) < VERSION_CHECK_CACHE_TTL) { - return versionCheckCache; - } - - const currentVersion = getCurrentVersion(); - - try { - // Fetch latest version from npm registry - const npmUrl = 'https://registry.npmjs.org/' + encodeURIComponent(NPM_PACKAGE_NAME) + '/latest'; - const response = await fetch(npmUrl, { - headers: { 'Accept': 'application/json' } - }); - - if (!response.ok) { - throw new Error('HTTP ' + response.status); - } - - const data = await response.json(); - const latestVersion = data.version; - - // Compare versions - const hasUpdate = compareVersions(latestVersion, currentVersion) > 0; - - const result = { - currentVersion, - latestVersion, - hasUpdate, - packageName: NPM_PACKAGE_NAME, - updateCommand: 'npm update -g ' + NPM_PACKAGE_NAME, - checkedAt: new Date().toISOString() - }; - - // Cache the result - versionCheckCache = result; - versionCheckTime = now; - - return result; - } catch (error: unknown) { - console.error('Version check failed:', (error as Error).message); - return { - currentVersion, - latestVersion: null, - hasUpdate: false, - error: (error as Error).message, - checkedAt: new Date().toISOString() - }; - } -} - -/** - * Compare two semver versions - * @param {string} v1 - * @param {string} v2 - * @returns {number} 1 if v1 > v2, -1 if v1 < v2, 0 if equal - */ -function compareVersions(v1, v2) { - const parts1 = v1.split('.').map(Number); - const parts2 = v2.split('.').map(Number); - - for (let i = 0; i < 3; i++) { - const p1 = parts1[i] || 0; - const p2 = parts2[i] || 0; - if (p1 > p2) return 1; - if (p1 < p2) return -1; - } - return 0; -} - -// ========== Skills Helper Functions ========== - -/** - * Parse SKILL.md file to extract frontmatter and content - * @param {string} content - File content - * @returns {Object} Parsed frontmatter and content - */ -function parseSkillFrontmatter(content) { - const result = { - name: '', - description: '', - version: null, - allowedTools: [], - content: '' - }; - - // Check for YAML frontmatter - if (content.startsWith('---')) { - const endIndex = content.indexOf('---', 3); - if (endIndex > 0) { - const frontmatter = content.substring(3, endIndex).trim(); - result.content = content.substring(endIndex + 3).trim(); - - // Parse frontmatter lines - const lines = frontmatter.split('\n'); - for (const line of lines) { - const colonIndex = line.indexOf(':'); - if (colonIndex > 0) { - const key = line.substring(0, colonIndex).trim().toLowerCase(); - const value = line.substring(colonIndex + 1).trim(); - - if (key === 'name') { - result.name = value.replace(/^["']|["']$/g, ''); - } else if (key === 'description') { - result.description = value.replace(/^["']|["']$/g, ''); - } else if (key === 'version') { - result.version = value.replace(/^["']|["']$/g, ''); - } else if (key === 'allowed-tools' || key === 'allowedtools') { - // Parse as comma-separated or YAML array - result.allowedTools = value.replace(/^\[|\]$/g, '').split(',').map(t => t.trim()).filter(Boolean); - } - } - } - } - } else { - result.content = content; - } - - return result; -} - -/** - * Get skills configuration from project and user directories - * @param {string} projectPath - * @returns {Object} - */ -function getSkillsConfig(projectPath) { - const result = { - projectSkills: [], - userSkills: [] - }; - - try { - // Project skills: .claude/skills/ - const projectSkillsDir = join(projectPath, '.claude', 'skills'); - if (existsSync(projectSkillsDir)) { - const skills = readdirSync(projectSkillsDir, { withFileTypes: true }); - for (const skill of skills) { - if (skill.isDirectory()) { - const skillMdPath = join(projectSkillsDir, skill.name, 'SKILL.md'); - if (existsSync(skillMdPath)) { - const content = readFileSync(skillMdPath, 'utf8'); - const parsed = parseSkillFrontmatter(content); - - // Get supporting files - const skillDir = join(projectSkillsDir, skill.name); - const supportingFiles = getSupportingFiles(skillDir); - - result.projectSkills.push({ - name: parsed.name || skill.name, - description: parsed.description, - version: parsed.version, - allowedTools: parsed.allowedTools, - location: 'project', - path: skillDir, - supportingFiles - }); - } - } - } - } - - // User skills: ~/.claude/skills/ - const userSkillsDir = join(homedir(), '.claude', 'skills'); - if (existsSync(userSkillsDir)) { - const skills = readdirSync(userSkillsDir, { withFileTypes: true }); - for (const skill of skills) { - if (skill.isDirectory()) { - const skillMdPath = join(userSkillsDir, skill.name, 'SKILL.md'); - if (existsSync(skillMdPath)) { - const content = readFileSync(skillMdPath, 'utf8'); - const parsed = parseSkillFrontmatter(content); - - // Get supporting files - const skillDir = join(userSkillsDir, skill.name); - const supportingFiles = getSupportingFiles(skillDir); - - result.userSkills.push({ - name: parsed.name || skill.name, - description: parsed.description, - version: parsed.version, - allowedTools: parsed.allowedTools, - location: 'user', - path: skillDir, - supportingFiles - }); - } - } - } - } - } catch (error) { - console.error('Error reading skills config:', error); - } - - return result; -} - -/** - * Get list of supporting files for a skill - * @param {string} skillDir - * @returns {string[]} - */ -function getSupportingFiles(skillDir) { - const files = []; - try { - const entries = readdirSync(skillDir, { withFileTypes: true }); - for (const entry of entries) { - if (entry.name !== 'SKILL.md') { - if (entry.isFile()) { - files.push(entry.name); - } else if (entry.isDirectory()) { - files.push(entry.name + '/'); - } - } - } - } catch (e) { - // Ignore errors - } - return files; -} - -/** - * Get single skill detail - * @param {string} skillName - * @param {string} location - 'project' or 'user' - * @param {string} projectPath - * @returns {Object} - */ -function getSkillDetail(skillName, location, projectPath) { - try { - const baseDir = location === 'project' - ? join(projectPath, '.claude', 'skills') - : join(homedir(), '.claude', 'skills'); - - const skillDir = join(baseDir, skillName); - const skillMdPath = join(skillDir, 'SKILL.md'); - - if (!existsSync(skillMdPath)) { - return { error: 'Skill not found' }; - } - - const content = readFileSync(skillMdPath, 'utf8'); - const parsed = parseSkillFrontmatter(content); - const supportingFiles = getSupportingFiles(skillDir); - - return { - skill: { - name: parsed.name || skillName, - description: parsed.description, - version: parsed.version, - allowedTools: parsed.allowedTools, - content: parsed.content, - location, - path: skillDir, - supportingFiles - } - }; - } catch (error) { - return { error: (error as Error).message }; - } -} - -/** - * Delete a skill - * @param {string} skillName - * @param {string} location - * @param {string} projectPath - * @returns {Object} - */ -function deleteSkill(skillName, location, projectPath) { - try { - const baseDir = location === 'project' - ? join(projectPath, '.claude', 'skills') - : join(homedir(), '.claude', 'skills'); - - const skillDir = join(baseDir, skillName); - - if (!existsSync(skillDir)) { - return { error: 'Skill not found' }; - } - - // Recursively delete directory - const deleteRecursive = (dirPath) => { - if (existsSync(dirPath)) { - readdirSync(dirPath).forEach((file) => { - const curPath = join(dirPath, file); - if (statSync(curPath).isDirectory()) { - deleteRecursive(curPath); - } else { - unlinkSync(curPath); - } - }); - fsPromises.rmdir(dirPath); - } - }; - - deleteRecursive(skillDir); - - return { success: true, skillName, location }; - } catch (error) { - return { error: (error as Error).message }; - } -} - -// ========== Rules Helper Functions ========== - -/** - * Parse rule file to extract frontmatter (paths) and content - * @param {string} content - File content - * @returns {Object} Parsed frontmatter and content - */ -function parseRuleFrontmatter(content) { - const result = { - paths: [], - content: content - }; - - // Check for YAML frontmatter - if (content.startsWith('---')) { - const endIndex = content.indexOf('---', 3); - if (endIndex > 0) { - const frontmatter = content.substring(3, endIndex).trim(); - result.content = content.substring(endIndex + 3).trim(); - - // Parse frontmatter lines - const lines = frontmatter.split('\n'); - for (const line of lines) { - const colonIndex = line.indexOf(':'); - if (colonIndex > 0) { - const key = line.substring(0, colonIndex).trim().toLowerCase(); - const value = line.substring(colonIndex + 1).trim(); - - if (key === 'paths') { - // Parse as comma-separated or YAML array - result.paths = value.replace(/^\[|\]$/g, '').split(',').map(t => t.trim()).filter(Boolean); - } - } - } - } - } - - return result; -} - -/** - * Get rules configuration from project and user directories - * @param {string} projectPath - * @returns {Object} - */ -function getRulesConfig(projectPath) { - const result = { - projectRules: [], - userRules: [] - }; - - try { - // Project rules: .claude/rules/ - const projectRulesDir = join(projectPath, '.claude', 'rules'); - if (existsSync(projectRulesDir)) { - const rules = scanRulesDirectory(projectRulesDir, 'project', ''); - result.projectRules = rules; - } - - // User rules: ~/.claude/rules/ - const userRulesDir = join(homedir(), '.claude', 'rules'); - if (existsSync(userRulesDir)) { - const rules = scanRulesDirectory(userRulesDir, 'user', ''); - result.userRules = rules; - } - } catch (error) { - console.error('Error reading rules config:', error); - } - - return result; -} - -/** - * Recursively scan rules directory for .md files - * @param {string} dirPath - * @param {string} location - * @param {string} subdirectory - * @returns {Object[]} - */ -function scanRulesDirectory(dirPath, location, subdirectory) { - const rules = []; - - try { - const entries = readdirSync(dirPath, { withFileTypes: true }); - for (const entry of entries) { - const fullPath = join(dirPath, entry.name); - - if (entry.isFile() && entry.name.endsWith('.md')) { - const content = readFileSync(fullPath, 'utf8'); - const parsed = parseRuleFrontmatter(content); - - rules.push({ - name: entry.name, - paths: parsed.paths, - content: parsed.content, - location, - path: fullPath, - subdirectory: subdirectory || null - }); - } else if (entry.isDirectory()) { - // Recursively scan subdirectories - const subRules = scanRulesDirectory(fullPath, location, subdirectory ? `${subdirectory}/${entry.name}` : entry.name); - rules.push(...subRules); - } - } - } catch (e) { - // Ignore errors - } - - return rules; -} - -/** - * Get single rule detail - * @param {string} ruleName - * @param {string} location - 'project' or 'user' - * @param {string} projectPath - * @returns {Object} - */ -function getRuleDetail(ruleName, location, projectPath) { - try { - const baseDir = location === 'project' - ? join(projectPath, '.claude', 'rules') - : join(homedir(), '.claude', 'rules'); - - // Find the rule file (could be in subdirectory) - const rulePath = findRuleFile(baseDir, ruleName); - - if (!rulePath) { - return { error: 'Rule not found' }; - } - - const content = readFileSync(rulePath, 'utf8'); - const parsed = parseRuleFrontmatter(content); - - return { - rule: { - name: ruleName, - paths: parsed.paths, - content: parsed.content, - location, - path: rulePath - } - }; - } catch (error) { - return { error: (error as Error).message }; - } -} - -/** - * Find rule file in directory (including subdirectories) - * @param {string} baseDir - * @param {string} ruleName - * @returns {string|null} - */ -function findRuleFile(baseDir, ruleName) { - try { - // Direct path - const directPath = join(baseDir, ruleName); - if (existsSync(directPath)) { - return directPath; - } - - // Search in subdirectories - const entries = readdirSync(baseDir, { withFileTypes: true }); - for (const entry of entries) { - if (entry.isDirectory()) { - const subPath = findRuleFile(join(baseDir, entry.name), ruleName); - if (subPath) return subPath; - } - } - } catch (e) { - // Ignore errors - } - return null; -} - -/** - * Delete a rule - * @param {string} ruleName - * @param {string} location - * @param {string} projectPath - * @returns {Object} - */ -function deleteRule(ruleName, location, projectPath) { - try { - const baseDir = location === 'project' - ? join(projectPath, '.claude', 'rules') - : join(homedir(), '.claude', 'rules'); - - const rulePath = findRuleFile(baseDir, ruleName); - - if (!rulePath) { - return { error: 'Rule not found' }; - } - - unlinkSync(rulePath); - - return { success: true, ruleName, location }; - } catch (error) { - return { error: (error as Error).message }; - } -} diff --git a/ccw/src/core/websocket.ts b/ccw/src/core/websocket.ts new file mode 100644 index 00000000..efeb51af --- /dev/null +++ b/ccw/src/core/websocket.ts @@ -0,0 +1,190 @@ +// @ts-nocheck +import { createHash } from 'crypto'; + +// WebSocket clients for real-time notifications +export const wsClients = new Set(); + +export function handleWebSocketUpgrade(req, socket, head) { + const key = req.headers['sec-websocket-key']; + const acceptKey = createHash('sha1') + .update(key + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11') + .digest('base64'); + + const responseHeaders = [ + 'HTTP/1.1 101 Switching Protocols', + 'Upgrade: websocket', + 'Connection: Upgrade', + `Sec-WebSocket-Accept: ${acceptKey}`, + '', + '' + ].join('\r\n'); + + socket.write(responseHeaders); + + // Add to clients set + wsClients.add(socket); + console.log(`[WS] Client connected (${wsClients.size} total)`); + + // Handle incoming messages + socket.on('data', (buffer) => { + try { + const frame = parseWebSocketFrame(buffer); + if (!frame) return; + + const { opcode, payload } = frame; + + switch (opcode) { + case 0x1: // Text frame + if (payload) { + console.log('[WS] Received:', payload); + } + break; + case 0x8: // Close frame + socket.end(); + break; + case 0x9: // Ping frame - respond with Pong + const pongFrame = Buffer.alloc(2); + pongFrame[0] = 0x8A; // Pong opcode with FIN bit + pongFrame[1] = 0x00; // No payload + socket.write(pongFrame); + break; + case 0xA: // Pong frame - ignore + break; + default: + // Ignore other frame types (binary, continuation) + break; + } + } catch (e) { + // Ignore parse errors + } + }); + + // Handle disconnect + socket.on('close', () => { + wsClients.delete(socket); + console.log(`[WS] Client disconnected (${wsClients.size} remaining)`); + }); + + socket.on('error', () => { + wsClients.delete(socket); + }); +} + +/** + * Parse WebSocket frame (simplified) + * Returns { opcode, payload } or null + */ +export function parseWebSocketFrame(buffer) { + if (buffer.length < 2) return null; + + const firstByte = buffer[0]; + const opcode = firstByte & 0x0f; // Extract opcode (bits 0-3) + + // Opcode types: + // 0x0 = continuation, 0x1 = text, 0x2 = binary + // 0x8 = close, 0x9 = ping, 0xA = pong + + const secondByte = buffer[1]; + const isMasked = (secondByte & 0x80) !== 0; + let payloadLength = secondByte & 0x7f; + + let offset = 2; + if (payloadLength === 126) { + payloadLength = buffer.readUInt16BE(2); + offset = 4; + } else if (payloadLength === 127) { + payloadLength = Number(buffer.readBigUInt64BE(2)); + offset = 10; + } + + let mask = null; + if (isMasked) { + mask = buffer.slice(offset, offset + 4); + offset += 4; + } + + const payload = buffer.slice(offset, offset + payloadLength); + + if (isMasked && mask) { + for (let i = 0; i < payload.length; i++) { + payload[i] ^= mask[i % 4]; + } + } + + return { opcode, payload: payload.toString('utf8') }; +} + +/** + * Create WebSocket frame + */ +export function createWebSocketFrame(data) { + const payload = Buffer.from(JSON.stringify(data), 'utf8'); + const length = payload.length; + + let frame; + if (length <= 125) { + frame = Buffer.alloc(2 + length); + frame[0] = 0x81; // Text frame, FIN + frame[1] = length; + payload.copy(frame, 2); + } else if (length <= 65535) { + frame = Buffer.alloc(4 + length); + frame[0] = 0x81; + frame[1] = 126; + frame.writeUInt16BE(length, 2); + payload.copy(frame, 4); + } else { + frame = Buffer.alloc(10 + length); + frame[0] = 0x81; + frame[1] = 127; + frame.writeBigUInt64BE(BigInt(length), 2); + payload.copy(frame, 10); + } + + return frame; +} + +/** + * Broadcast message to all connected WebSocket clients + */ +export function broadcastToClients(data) { + const frame = createWebSocketFrame(data); + + for (const client of wsClients) { + try { + client.write(frame); + } catch (e) { + wsClients.delete(client); + } + } + + console.log(`[WS] Broadcast to ${wsClients.size} clients:`, data.type); +} + +/** + * Extract session ID from file path + */ +export function extractSessionIdFromPath(filePath) { + // Normalize path + const normalized = filePath.replace(/\\/g, '/'); + + // Look for session pattern: WFS-xxx, WRS-xxx, etc. + const sessionMatch = normalized.match(/\/(W[A-Z]S-[^/]+)\//); + if (sessionMatch) { + return sessionMatch[1]; + } + + // Look for .workflow/.sessions/xxx pattern + const sessionsMatch = normalized.match(/\.workflow\/\.sessions\/([^/]+)/); + if (sessionsMatch) { + return sessionsMatch[1]; + } + + // Look for lite-plan/lite-fix pattern + const liteMatch = normalized.match(/\.(lite-plan|lite-fix)\/([^/]+)/); + if (liteMatch) { + return liteMatch[2]; + } + + return null; +} diff --git a/ccw/src/templates/dashboard-css/10-cli.css b/ccw/src/templates/dashboard-css/10-cli.css index 0458a304..6db434b8 100644 --- a/ccw/src/templates/dashboard-css/10-cli.css +++ b/ccw/src/templates/dashboard-css/10-cli.css @@ -198,6 +198,11 @@ color: hsl(var(--primary)); } +.tool-type-badge.llm { + background: hsl(142 76% 36% / 0.15); + color: hsl(142 76% 36%); +} + .tool-item-right { display: flex; align-items: center; @@ -814,6 +819,15 @@ border-color: hsl(260 80% 60% / 0.7); } +.cli-tool-card.tool-semantic.clickable { + cursor: pointer; +} + +.cli-tool-card.tool-semantic.clickable:hover { + transform: translateY(-1px); + box-shadow: 0 4px 12px hsl(260 80% 60% / 0.15); +} + /* Execute Panel */ .cli-execute-header { display: flex; @@ -3064,6 +3078,211 @@ flex-wrap: wrap; } +/* ======================================== + * Enhanced Native Session Display + * ======================================== */ + +/* View Full Process Button in Execution Detail */ +.cli-detail-native-action { + margin-top: 0.75rem; + padding-top: 0.75rem; + border-top: 1px solid hsl(var(--border) / 0.5); +} + +.cli-detail-native-action .btn { + font-size: 0.8125rem; + gap: 0.5rem; +} + +/* Collapsible Thinking Process */ +.turn-thinking-details { + border: none; + margin: 0; +} + +.turn-thinking-summary { + display: flex; + align-items: center; + gap: 0.375rem; + cursor: pointer; + padding: 0.5rem; + background: hsl(var(--warning) / 0.08); + border: 1px solid hsl(var(--warning) / 0.25); + border-radius: 0.375rem; + font-size: 0.75rem; + font-weight: 600; + color: hsl(var(--warning)); + transition: all 0.2s ease; + list-style: none; +} + +.turn-thinking-summary::-webkit-details-marker { + display: none; +} + +.turn-thinking-summary:hover { + background: hsl(var(--warning) / 0.15); + border-color: hsl(var(--warning) / 0.4); +} + +.turn-thinking-summary::before { + content: '▶'; + display: inline-block; + margin-right: 0.25rem; + transition: transform 0.2s ease; + font-size: 0.6875rem; +} + +.turn-thinking-details[open] .turn-thinking-summary::before { + transform: rotate(90deg); +} + +.turn-thinking-content { + padding: 0.75rem; + margin-top: 0.5rem; + background: hsl(var(--warning) / 0.03); + border: 1px solid hsl(var(--warning) / 0.15); + border-radius: 0.375rem; + font-style: italic; +} + +.turn-thinking-content ul { + margin: 0; + padding-left: 1.25rem; +} + +.turn-thinking-content li { + margin-bottom: 0.375rem; + font-size: 0.6875rem; + line-height: 1.6; + color: hsl(var(--foreground) / 0.85); +} + +/* Tool Calls Header */ +.turn-tool-calls-header { + display: flex; + align-items: center; + gap: 0.375rem; + font-size: 0.75rem; + font-weight: 600; + color: hsl(var(--muted-foreground)); + margin-bottom: 0.625rem; + padding-bottom: 0.375rem; + border-bottom: 1px solid hsl(var(--border) / 0.5); +} + +/* Collapsible Tool Calls */ +.turn-tool-call-details { + border: none; + margin-bottom: 0.5rem; +} + +.turn-tool-call-summary { + display: flex; + align-items: center; + justify-content: space-between; + cursor: pointer; + padding: 0.5rem 0.75rem; + background: hsl(var(--background)); + border: 1px solid hsl(var(--border)); + border-radius: 0.375rem; + font-size: 0.7rem; + transition: all 0.2s ease; + list-style: none; +} + +.turn-tool-call-summary::-webkit-details-marker { + display: none; +} + +.turn-tool-call-summary:hover { + background: hsl(var(--muted) / 0.5); + border-color: hsl(var(--primary) / 0.4); +} + +.turn-tool-call-summary::before { + content: '▶'; + display: inline-block; + margin-right: 0.5rem; + transition: transform 0.2s ease; + font-size: 0.625rem; + color: hsl(var(--muted-foreground)); +} + +.turn-tool-call-details[open] .turn-tool-call-summary::before { + transform: rotate(90deg); +} + +.native-tool-size { + font-size: 0.625rem; + color: hsl(var(--muted-foreground)); + font-weight: 400; +} + +.turn-tool-call-content { + padding: 0.75rem; + margin-top: 0.5rem; + background: hsl(var(--muted) / 0.3); + border: 1px solid hsl(var(--border)); + border-radius: 0.375rem; + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.turn-tool-input, +.turn-tool-output { + margin-bottom: 0.75rem; +} + +.turn-tool-input:last-child, +.turn-tool-output:last-child { + margin-bottom: 0; +} + +.turn-tool-input strong, +.turn-tool-output strong { + display: block; + font-size: 0.6875rem; + font-weight: 600; + color: hsl(var(--foreground)); + margin-bottom: 0.375rem; +} + +.turn-tool-input pre, +.turn-tool-output pre { + margin: 0; + padding: 0.5rem; + background: hsl(var(--background)); + border: 1px solid hsl(var(--border)); + border-radius: 0.25rem; + font-family: monospace; + font-size: 0.6875rem; + line-height: 1.5; + white-space: pre-wrap; + word-wrap: break-word; + max-height: 400px; + overflow-y: auto; +} + +/* Improved scrollbar for tool output */ +.turn-tool-output pre::-webkit-scrollbar { + width: 6px; +} + +.turn-tool-output pre::-webkit-scrollbar-track { + background: hsl(var(--muted)); + border-radius: 3px; +} + +.turn-tool-output pre::-webkit-scrollbar-thumb { + background: hsl(var(--muted-foreground) / 0.3); + border-radius: 3px; +} + +.turn-tool-output pre::-webkit-scrollbar-thumb:hover { + background: hsl(var(--muted-foreground) / 0.5); +} + /* ======================================== * Task Queue Sidebar - CLI Tab Styles * ======================================== */ @@ -3251,3 +3470,290 @@ .cli-queue-native { font-size: 0.75rem; } + +/* ======================================== + * CLI Tool Management Styles + * ======================================== */ + +/* Disabled tool card */ +.cli-tool-card.disabled { + opacity: 0.6; +} + +.cli-tool-card.disabled .cli-tool-header { + opacity: 0.8; +} + +/* Disabled status indicator */ +.cli-tool-status.status-disabled { + background: hsl(var(--warning)); +} + +/* Warning badge */ +.cli-tool-badge.badge-warning { + background: hsl(var(--warning) / 0.15); + color: hsl(var(--warning)); + font-size: 0.65rem; + padding: 0.125rem 0.375rem; + border-radius: 0.25rem; + margin-left: 0.25rem; +} + +/* Compact toggle for tool cards */ +.cli-toggle-compact { + position: relative; + display: inline-block; + width: 28px; + height: 16px; + cursor: pointer; +} + +.cli-toggle-compact input { + opacity: 0; + width: 0; + height: 0; + position: absolute; +} + +.cli-toggle-slider-compact { + position: absolute; + cursor: pointer; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: hsl(var(--muted)); + transition: 0.2s; + border-radius: 16px; +} + +.cli-toggle-slider-compact:before { + position: absolute; + content: ""; + height: 12px; + width: 12px; + left: 2px; + bottom: 2px; + background-color: white; + transition: 0.2s; + border-radius: 50%; + box-shadow: 0 1px 2px rgba(0, 0, 0, 0.15); +} + +.cli-toggle-compact input:checked + .cli-toggle-slider-compact { + background-color: hsl(var(--primary)); +} + +.cli-toggle-compact input:checked + .cli-toggle-slider-compact:before { + transform: translateX(12px); +} + +.cli-toggle-compact input:focus + .cli-toggle-slider-compact { + box-shadow: 0 0 0 2px hsl(var(--ring) / 0.5); +} + +/* Ghost button variant for destructive actions */ +.btn-ghost.text-destructive { + color: hsl(var(--destructive)); +} + +/* ======================================== + * Tool Configuration Modal + * ======================================== */ + +/* Tool item clickable */ +.tool-item.clickable { + cursor: pointer; + transition: all 0.15s ease; +} + +.tool-item.clickable:hover { + background: hsl(var(--accent)); + border-color: hsl(var(--primary) / 0.3); +} + +.tool-item.clickable:hover .tool-config-icon { + opacity: 1; +} + +.tool-config-icon { + margin-left: 0.375rem; + color: hsl(var(--muted-foreground)); + opacity: 0; + transition: opacity 0.15s ease; +} + +/* Tool Config Modal */ +.tool-config-modal { + display: flex; + flex-direction: column; + gap: 1.25rem; +} + +.tool-config-section { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +.tool-config-section h4 { + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.025em; + color: hsl(var(--muted-foreground)); + margin: 0; +} + +.tool-config-section h4 .text-muted { + font-weight: 400; + text-transform: none; + color: hsl(var(--muted-foreground) / 0.7); +} + +/* Status Badges */ +.tool-config-badges { + display: flex; + gap: 0.5rem; + flex-wrap: wrap; +} + +.badge { + display: inline-flex; + align-items: center; + gap: 0.25rem; + padding: 0.25rem 0.5rem; + font-size: 0.6875rem; + font-weight: 500; + border-radius: 9999px; +} + +.badge-success { + background: hsl(var(--success) / 0.15); + color: hsl(var(--success)); +} + +.badge-primary { + background: hsl(var(--primary) / 0.15); + color: hsl(var(--primary)); +} + +.badge-muted { + background: hsl(var(--muted)); + color: hsl(var(--muted-foreground)); +} + +/* Config Actions */ +.tool-config-actions { + display: flex; + gap: 0.5rem; + flex-wrap: wrap; +} + +.btn-danger-outline { + border-color: hsl(var(--destructive) / 0.5); + color: hsl(var(--destructive)); +} + +.btn-danger-outline:hover { + background: hsl(var(--destructive) / 0.1); + border-color: hsl(var(--destructive)); +} + +/* Config Selects and Inputs */ +.tool-config-select, +.tool-config-input { + width: 100%; + padding: 0.5rem 0.75rem; + font-size: 0.8125rem; + font-family: inherit; + border: 1px solid hsl(var(--border)); + border-radius: 0.375rem; + background: hsl(var(--background)); + color: hsl(var(--foreground)); + transition: border-color 0.15s ease, box-shadow 0.15s ease; +} + +.tool-config-select:focus, +.tool-config-input:focus { + outline: none; + border-color: hsl(var(--primary)); + box-shadow: 0 0 0 2px hsl(var(--primary) / 0.2); +} + +.tool-config-select { + cursor: pointer; + appearance: none; + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e"); + background-position: right 0.5rem center; + background-repeat: no-repeat; + background-size: 1.25rem; + padding-right: 2rem; +} + +.tool-config-input.hidden { + display: none; +} + +.tool-config-input { + margin-top: 0.375rem; +} + +/* Config Footer */ +.tool-config-footer { + display: flex; + justify-content: flex-end; + gap: 0.75rem; + padding-top: 1rem; + border-top: 1px solid hsl(var(--border)); + margin-top: 0.5rem; +} + +.tool-config-footer .btn { + display: inline-flex; + align-items: center; + justify-content: center; + gap: 0.375rem; + padding: 0.5rem 1rem; + font-size: 0.8125rem; + font-weight: 500; + border-radius: 0.375rem; + cursor: pointer; + transition: all 0.15s ease; +} + +.tool-config-footer .btn-outline { + background: transparent; + border: 1px solid hsl(var(--border)); + color: hsl(var(--foreground)); +} + +.tool-config-footer .btn-outline:hover { + background: hsl(var(--muted)); + border-color: hsl(var(--muted-foreground) / 0.3); +} + +.tool-config-footer .btn-primary { + background: hsl(var(--primary)); + border: 1px solid hsl(var(--primary)); + color: hsl(var(--primary-foreground)); +} + +.tool-config-footer .btn-primary:hover { + background: hsl(var(--primary) / 0.9); +} + +/* Model Select Group */ +.model-select-group { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +.model-select-group .tool-config-input { + margin-top: 0; +} + +.btn-ghost.text-destructive:hover { + background: hsl(var(--destructive) / 0.1); +} diff --git a/ccw/src/templates/dashboard-css/11-memory.css b/ccw/src/templates/dashboard-css/11-memory.css index f41cb50e..33bc6a8a 100644 --- a/ccw/src/templates/dashboard-css/11-memory.css +++ b/ccw/src/templates/dashboard-css/11-memory.css @@ -887,6 +887,216 @@ text-transform: uppercase; } +/* ========== Node Details Panel ========== */ +.node-details { + padding: 1rem; +} + +.node-detail-header { + display: flex; + align-items: flex-start; + gap: 0.75rem; + padding-bottom: 1rem; + border-bottom: 1px solid hsl(var(--border)); + margin-bottom: 1rem; +} + +.node-detail-icon { + display: flex; + align-items: center; + justify-content: center; + width: 2.5rem; + height: 2.5rem; + border-radius: 0.5rem; + flex-shrink: 0; +} + +.node-detail-icon.file { + background: hsl(var(--primary) / 0.1); + color: hsl(var(--primary)); +} + +.node-detail-icon.module { + background: hsl(267, 84%, 95%); + color: hsl(267, 84%, 50%); +} + +.node-detail-icon.component { + background: hsl(142, 71%, 92%); + color: hsl(142, 71%, 40%); +} + +.node-detail-info { + flex: 1; + min-width: 0; +} + +.node-detail-name { + font-size: 0.9375rem; + font-weight: 600; + color: hsl(var(--foreground)); + margin-bottom: 0.25rem; + word-break: break-word; +} + +.node-detail-path { + font-size: 0.75rem; + color: hsl(var(--muted-foreground)); + font-family: monospace; + word-break: break-all; +} + +.node-detail-stats { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 0.75rem; + padding: 0.875rem; + background: hsl(var(--muted) / 0.3); + border-radius: 0.5rem; + margin-bottom: 1rem; +} + +.detail-stat { + display: flex; + flex-direction: column; + align-items: center; + gap: 0.25rem; + text-align: center; +} + +.detail-stat-label { + font-size: 0.6875rem; + color: hsl(var(--muted-foreground)); + text-transform: uppercase; + letter-spacing: 0.025em; +} + +.detail-stat-value { + font-size: 1rem; + font-weight: 600; + color: hsl(var(--foreground)); +} + +.node-associations { + margin-top: 1rem; +} + +.associations-title { + display: flex; + align-items: center; + gap: 0.375rem; + font-size: 0.8125rem; + font-weight: 600; + color: hsl(var(--foreground)); + margin: 0 0 0.75rem 0; +} + +.associations-list { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +.association-item { + display: flex; + align-items: center; + gap: 0.625rem; + padding: 0.625rem; + background: hsl(var(--background)); + border: 1px solid hsl(var(--border)); + border-radius: 0.375rem; + cursor: pointer; + transition: all 0.15s ease; +} + +.association-item:hover { + background: hsl(var(--hover)); + border-color: hsl(var(--primary) / 0.3); +} + +.association-node { + display: flex; + align-items: center; + gap: 0.5rem; + flex: 1; + min-width: 0; +} + +.association-node i { + color: hsl(var(--muted-foreground)); + flex-shrink: 0; +} + +.association-node span { + font-size: 0.8125rem; + font-weight: 500; + color: hsl(var(--foreground)); + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.association-icon { + display: flex; + align-items: center; + justify-content: center; + width: 1.75rem; + height: 1.75rem; + border-radius: 0.375rem; + background: hsl(var(--muted)); + color: hsl(var(--muted-foreground)); + flex-shrink: 0; +} + +.association-icon.file { + background: hsl(var(--primary) / 0.1); + color: hsl(var(--primary)); +} + +.association-icon.module { + background: hsl(267, 84%, 95%); + color: hsl(267, 84%, 50%); +} + +.association-info { + flex: 1; + min-width: 0; +} + +.association-name { + font-size: 0.8125rem; + font-weight: 500; + color: hsl(var(--foreground)); + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.association-weight { + font-size: 0.6875rem; + color: hsl(var(--muted-foreground)); + padding: 0.125rem 0.375rem; + background: hsl(var(--muted)); + border-radius: 0.25rem; + flex-shrink: 0; +} + +.node-no-associations { + padding: 1.5rem; + text-align: center; + color: hsl(var(--muted-foreground)); + font-size: 0.875rem; +} + +.associations-more { + text-align: center; + padding: 0.5rem; + font-size: 0.75rem; + color: hsl(var(--muted-foreground)); + border-top: 1px solid hsl(var(--border)); + margin-top: 0.5rem; +} + @media (max-width: 1400px) { .memory-columns { grid-template-columns: 260px 1fr 280px; diff --git a/ccw/src/templates/dashboard-js/components/cli-history.js b/ccw/src/templates/dashboard-js/components/cli-history.js index ee8219c9..fc5fa83f 100644 --- a/ccw/src/templates/dashboard-js/components/cli-history.js +++ b/ccw/src/templates/dashboard-js/components/cli-history.js @@ -330,6 +330,9 @@ async function showExecutionDetail(executionId, sourceDir) { `; } + // Check if native session is available + const hasNativeSession = conversation.hasNativeSession || conversation.nativeSessionId; + const modalContent = `
@@ -344,6 +347,13 @@ async function showExecutionDetail(executionId, sourceDir) { ${new Date(createdAt).toLocaleString()} ${executionId.split('-')[0]}
+ ${hasNativeSession ? ` +
+ +
+ ` : ''}
${turnCount > 1 ? `
@@ -665,26 +675,52 @@ async function showNativeSessionDetail(executionId) { ` : ''; - // Thoughts section + // Thoughts section (collapsible) const thoughtsHtml = turn.thoughts && turn.thoughts.length > 0 ? `
-
Thoughts
-
    - ${turn.thoughts.map(t => `
  • ${escapeHtml(t)}
  • `).join('')} -
+
+ + + 💭 Thinking Process (${turn.thoughts.length} thoughts) + +
+
    + ${turn.thoughts.map(t => `
  • ${escapeHtml(t)}
  • `).join('')} +
+
+
` : ''; - // Tool calls section + // Tool calls section (collapsible for each call) const toolCallsHtml = turn.toolCalls && turn.toolCalls.length > 0 ? `
-
Tool Calls (${turn.toolCalls.length})
+
+ + Tool Calls (${turn.toolCalls.length}) +
- ${turn.toolCalls.map(tc => ` -
- ${escapeHtml(tc.name)} - ${tc.output ? `
${escapeHtml(tc.output.substring(0, 500))}${tc.output.length > 500 ? '...' : ''}
` : ''} -
+ ${turn.toolCalls.map((tc, tcIdx) => ` +
+ + 🔧 ${escapeHtml(tc.name)} + ${tc.output ? `(${tc.output.length} chars)` : ''} + +
+ ${tc.input ? ` +
+ Input: +
${escapeHtml(JSON.stringify(tc.input, null, 2))}
+
+ ` : ''} + ${tc.output ? ` +
+ Output: +
${escapeHtml(tc.output)}
+
+ ` : ''} +
+
`).join('')}
` @@ -758,7 +794,7 @@ async function showNativeSessionDetail(executionId) { // Store for export window._currentNativeSession = nativeSession; - showModal('Native Session Detail', modalContent, 'modal-lg'); + showModal('Native Session Detail', modalContent, { size: 'lg' }); } /** diff --git a/ccw/src/templates/dashboard-js/components/cli-status.js b/ccw/src/templates/dashboard-js/components/cli-status.js index 4b9fc230..e6dbc6be 100644 --- a/ccw/src/templates/dashboard-js/components/cli-status.js +++ b/ccw/src/templates/dashboard-js/components/cli-status.js @@ -15,6 +15,15 @@ let smartContextMaxFiles = parseInt(localStorage.getItem('ccw-smart-context-max- // Native Resume settings let nativeResumeEnabled = localStorage.getItem('ccw-native-resume') !== 'false'; // default true +// LLM Enhancement settings for Semantic Search +let llmEnhancementSettings = { + enabled: localStorage.getItem('ccw-llm-enhancement-enabled') === 'true', + tool: localStorage.getItem('ccw-llm-enhancement-tool') || 'gemini', + fallbackTool: localStorage.getItem('ccw-llm-enhancement-fallback') || 'qwen', + batchSize: parseInt(localStorage.getItem('ccw-llm-enhancement-batch-size') || '5', 10), + timeoutMs: parseInt(localStorage.getItem('ccw-llm-enhancement-timeout') || '300000', 10) +}; + // ========== Initialization ========== function initCliStatus() { // Load CLI status on init @@ -182,12 +191,17 @@ function renderCliStatus() { `; // Semantic Search card (only show if CodexLens is installed) + const llmStatusBadge = llmEnhancementSettings.enabled + ? `LLM` + : ''; const semanticHtml = codexLensStatus.ready ? ` -
+
Semantic Search AI + ${llmStatusBadge}
${semanticStatus.available ? 'AI-powered code understanding' : 'Natural language code search'} @@ -200,17 +214,27 @@ function renderCliStatus() {
${!semanticStatus.available ? ` - -
- - ~500MB download +
+
+ + ~500MB +
+
` : ` -
- - bge-small-en-v1.5 +
+
+ + bge-small-en-v1.5 +
+
`}
@@ -550,3 +574,535 @@ async function startSemanticInstall() { if (window.lucide) lucide.createIcons(); } } + +// ========== Semantic Search Settings Modal ========== +function openSemanticSettingsModal() { + const availableTools = Object.entries(cliToolStatus) + .filter(function(entry) { return entry[1].available; }) + .map(function(entry) { return entry[0]; }); + + const modal = document.createElement('div'); + modal.id = 'semanticSettingsModal'; + modal.className = 'fixed inset-0 bg-black/50 flex items-center justify-center z-50'; + modal.onclick = function(e) { if (e.target === modal) closeSemanticSettingsModal(); }; + + const toolOptions = availableTools.map(function(tool) { + return ''; + }).join(''); + + const fallbackOptions = '' + availableTools.map(function(tool) { + return ''; + }).join(''); + + const disabled = !llmEnhancementSettings.enabled ? 'disabled' : ''; + const opacityClass = !llmEnhancementSettings.enabled ? 'opacity-50' : ''; + + modal.innerHTML = + '
' + + '
' + + '
' + + '
' + + '' + + '
' + + '
' + + '

Semantic Search Settings

' + + '

Configure LLM enhancement for semantic indexing

' + + '
' + + '
' + + '
' + + '
' + + '
' + + '

' + + 'LLM Enhancement

' + + '

Use LLM to generate code summaries for better semantic search

' + + '
' + + '' + + '
' + + '
' + + '
' + + '
' + + '' + + '' + + '
' + + '
' + + '' + + '' + + '
' + + '
' + + '
' + + '
' + + '' + + '' + + '
' + + '
' + + '' + + '' + + '
' + + '
' + + '
' + + '
' + + '
' + + '' + + '
' + + '

LLM enhancement generates code summaries and keywords for each file, improving semantic search accuracy.

' + + '

Run codex-lens enhance after enabling to process existing files.

' + + '
' + + '
' + + '
' + + '
' + + '' + + '' + + '
' + + '
' + + '
' + + '
' + + '' + + '
' + + '
'; + + document.body.appendChild(modal); + + var handleEscape = function(e) { + if (e.key === 'Escape') { + closeSemanticSettingsModal(); + document.removeEventListener('keydown', handleEscape); + } + }; + document.addEventListener('keydown', handleEscape); + + if (window.lucide) { + lucide.createIcons(); + } +} + +function closeSemanticSettingsModal() { + var modal = document.getElementById('semanticSettingsModal'); + if (modal) modal.remove(); +} + +function toggleLlmEnhancement(enabled) { + llmEnhancementSettings.enabled = enabled; + localStorage.setItem('ccw-llm-enhancement-enabled', enabled.toString()); + + var settingsSection = document.getElementById('llmSettingsSection'); + if (settingsSection) { + settingsSection.classList.toggle('opacity-50', !enabled); + settingsSection.querySelectorAll('select').forEach(function(el) { el.disabled = !enabled; }); + } + + renderCliStatus(); + showRefreshToast('LLM Enhancement ' + (enabled ? 'enabled' : 'disabled'), 'success'); +} + +function updateLlmTool(tool) { + llmEnhancementSettings.tool = tool; + localStorage.setItem('ccw-llm-enhancement-tool', tool); + showRefreshToast('Primary LLM tool set to ' + tool, 'success'); +} + +function updateLlmFallback(tool) { + llmEnhancementSettings.fallbackTool = tool; + localStorage.setItem('ccw-llm-enhancement-fallback', tool); + showRefreshToast('Fallback tool set to ' + (tool || 'none'), 'success'); +} + +function updateLlmBatchSize(size) { + llmEnhancementSettings.batchSize = parseInt(size, 10); + localStorage.setItem('ccw-llm-enhancement-batch-size', size); + showRefreshToast('Batch size set to ' + size + ' files', 'success'); +} + +function updateLlmTimeout(ms) { + llmEnhancementSettings.timeoutMs = parseInt(ms, 10); + localStorage.setItem('ccw-llm-enhancement-timeout', ms); + var mins = parseInt(ms, 10) / 60000; + showRefreshToast('Timeout set to ' + mins + ' minute' + (mins > 1 ? 's' : ''), 'success'); +} + +async function runEnhanceCommand() { + if (!llmEnhancementSettings.enabled) { + showRefreshToast('Please enable LLM Enhancement first', 'warning'); + return; + } + + showRefreshToast('Starting LLM enhancement...', 'info'); + closeSemanticSettingsModal(); + + try { + var response = await fetch('/api/codexlens/enhance', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + path: projectPath, + tool: llmEnhancementSettings.tool, + batchSize: llmEnhancementSettings.batchSize, + timeoutMs: llmEnhancementSettings.timeoutMs + }) + }); + + var result = await response.json(); + if (result.success) { + var enhanced = result.result?.enhanced || 0; + showRefreshToast('Enhanced ' + enhanced + ' files with LLM', 'success'); + } else { + showRefreshToast('Enhance failed: ' + result.error, 'error'); + } + } catch (err) { + showRefreshToast('Enhance error: ' + err.message, 'error'); + } +} + +function viewEnhanceStatus() { + openSemanticMetadataViewer(); +} + +// ========== Semantic Metadata Viewer ========== +var semanticMetadataCache = { + entries: [], + total: 0, + offset: 0, + limit: 50, + loading: false +}; + +async function openSemanticMetadataViewer() { + closeSemanticSettingsModal(); + + var modal = document.createElement('div'); + modal.id = 'semanticMetadataModal'; + modal.className = 'generic-modal-overlay'; + modal.onclick = function(e) { if (e.target === modal) closeSemanticMetadataViewer(); }; + + modal.innerHTML = + '
' + + '
' + + '
' + + '' + + '

Semantic Metadata Browser

' + + 'Loading...' + + '
' + + '' + + '
' + + '
' + + '
' + + '
' + + '' + + '' + + '
' + + '
' + + '-' + + '
' + + '
' + + '
' + + '
' + + '
' + + 'Loading metadata...' + + '
' + + '
' + + '' + + '
' + + '
'; + + document.body.appendChild(modal); + + requestAnimationFrame(function() { + modal.classList.add('active'); + }); + + var handleEscape = function(e) { + if (e.key === 'Escape') { + closeSemanticMetadataViewer(); + document.removeEventListener('keydown', handleEscape); + } + }; + document.addEventListener('keydown', handleEscape); + + if (window.lucide) { + lucide.createIcons(); + } + + await loadSemanticMetadata(); +} + +function closeSemanticMetadataViewer() { + var modal = document.getElementById('semanticMetadataModal'); + if (modal) { + modal.classList.remove('active'); + setTimeout(function() { modal.remove(); }, 200); + } +} + +async function loadSemanticMetadata(offset, toolFilter) { + offset = typeof offset === 'number' ? offset : semanticMetadataCache.offset; + toolFilter = toolFilter !== undefined ? toolFilter : (document.getElementById('semanticToolFilter')?.value || ''); + + semanticMetadataCache.loading = true; + + var container = document.getElementById('semanticMetadataTableContainer'); + if (container) { + container.innerHTML = + '
' + + '
' + + 'Loading metadata...' + + '
'; + } + + try { + var url = '/api/codexlens/semantic/metadata?offset=' + offset + '&limit=' + semanticMetadataCache.limit; + if (toolFilter) { + url += '&tool=' + encodeURIComponent(toolFilter); + } + + var response = await fetch(url); + var data = await response.json(); + + if (data.success && data.result) { + semanticMetadataCache.entries = data.result.entries || []; + semanticMetadataCache.total = data.result.total || 0; + semanticMetadataCache.offset = offset; + + renderSemanticMetadataTable(); + updateSemanticPagination(); + } else { + container.innerHTML = + '
' + + '' + + '

Error loading metadata: ' + (data.error || 'Unknown error') + '

' + + '
'; + if (window.lucide) lucide.createIcons(); + } + } catch (err) { + container.innerHTML = + '
' + + '' + + '

Error: ' + err.message + '

' + + '
'; + if (window.lucide) lucide.createIcons(); + } + + semanticMetadataCache.loading = false; +} + +function escapeHtmlSemantic(text) { + if (!text) return ''; + var div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; +} + +function renderSemanticMetadataTable() { + var container = document.getElementById('semanticMetadataTableContainer'); + if (!container) return; + + var entries = semanticMetadataCache.entries; + + if (!entries.length) { + container.innerHTML = + '
' + + '' + + '

No semantic metadata found

' + + '

Run \'codex-lens enhance\' to generate metadata for indexed files.

' + + '' + + '
'; + if (window.lucide) lucide.createIcons(); + return; + } + + var rows = entries.map(function(entry, idx) { + var keywordsHtml = (entry.keywords || []).slice(0, 4).map(function(k) { + return '' + escapeHtmlSemantic(k) + ''; + }).join(''); + if ((entry.keywords || []).length > 4) { + keywordsHtml += '+' + (entry.keywords.length - 4) + ''; + } + + var date = entry.generated_at ? new Date(entry.generated_at * 1000).toLocaleDateString() : '-'; + + return ( + '' + + '' + + '
' + + '' + + '' + escapeHtmlSemantic(entry.file_name || '-') + '' + + '
' + + '
' + + escapeHtmlSemantic(entry.full_path || '-') + + '
' + + '' + + '' + escapeHtmlSemantic(entry.language || '-') + '' + + '' + escapeHtmlSemantic((entry.purpose || '-').substring(0, 50)) + + ((entry.purpose || '').length > 50 ? '...' : '') + '' + + '' + (keywordsHtml || '-') + '' + + '' + + '' + + escapeHtmlSemantic(entry.llm_tool || '-') + + '' + + '' + + '' + date + '' + + '' + + '' + + '' + + '
' + + '
' + + '

Summary

' + + '

' + escapeHtmlSemantic(entry.summary || 'No summary available') + '

' + + '
' + + '
' + + '

All Keywords

' + + '
' + + (entry.keywords || []).map(function(k) { + return '' + escapeHtmlSemantic(k) + ''; + }).join('') + + '
' + + '
' + + '
' + + ' ' + (entry.line_count || 0) + ' lines' + + ' ' + escapeHtmlSemantic(entry.llm_tool || 'Unknown') + '' + + ' ' + date + '' + + '
' + + '
' + + '' + + '' + ); + }).join(''); + + container.innerHTML = + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + rows + '' + + '
FileLanguagePurposeKeywordsToolDate
'; + + if (window.lucide) lucide.createIcons(); +} + +function toggleSemanticDetail(idx) { + var detailRow = document.getElementById('semanticDetail' + idx); + if (detailRow) { + detailRow.classList.toggle('hidden'); + if (window.lucide) lucide.createIcons(); + } +} + +function updateSemanticPagination() { + var total = semanticMetadataCache.total; + var offset = semanticMetadataCache.offset; + var limit = semanticMetadataCache.limit; + var entries = semanticMetadataCache.entries; + + var countBadge = document.getElementById('semanticMetadataCount'); + if (countBadge) { + countBadge.textContent = total + ' entries'; + } + + var paginationInfo = document.getElementById('semanticPaginationInfo'); + if (paginationInfo) { + if (total > 0) { + paginationInfo.textContent = (offset + 1) + '-' + (offset + entries.length) + ' of ' + total; + } else { + paginationInfo.textContent = 'No entries'; + } + } + + var pageSelect = document.getElementById('semanticPageSelect'); + if (pageSelect) { + var totalPages = Math.ceil(total / limit) || 1; + var currentPage = Math.floor(offset / limit); + + pageSelect.innerHTML = ''; + for (var i = 0; i < totalPages; i++) { + var opt = document.createElement('option'); + opt.value = i; + opt.textContent = i + 1; + if (i === currentPage) opt.selected = true; + pageSelect.appendChild(opt); + } + } + + var prevBtn = document.getElementById('semanticPrevBtn'); + var nextBtn = document.getElementById('semanticNextBtn'); + if (prevBtn) prevBtn.disabled = offset === 0; + if (nextBtn) nextBtn.disabled = offset + limit >= total; +} + +function semanticPrevPage() { + if (semanticMetadataCache.offset > 0) { + loadSemanticMetadata(Math.max(0, semanticMetadataCache.offset - semanticMetadataCache.limit)); + } +} + +function semanticNextPage() { + if (semanticMetadataCache.offset + semanticMetadataCache.limit < semanticMetadataCache.total) { + loadSemanticMetadata(semanticMetadataCache.offset + semanticMetadataCache.limit); + } +} + +function semanticGoToPage(pageIndex) { + var offset = parseInt(pageIndex, 10) * semanticMetadataCache.limit; + loadSemanticMetadata(offset); +} + +function filterSemanticByTool(tool) { + loadSemanticMetadata(0, tool); +} + +function refreshSemanticMetadata() { + loadSemanticMetadata(semanticMetadataCache.offset); +} + +function getLlmEnhancementSettings() { + return Object.assign({}, llmEnhancementSettings); +} diff --git a/ccw/src/templates/dashboard-js/components/notifications.js b/ccw/src/templates/dashboard-js/components/notifications.js index 5211b044..e48b5967 100644 --- a/ccw/src/templates/dashboard-js/components/notifications.js +++ b/ccw/src/templates/dashboard-js/components/notifications.js @@ -194,6 +194,50 @@ function handleNotification(data) { } break; + // CLI Review Events + case 'CLI_REVIEW_UPDATED': + if (typeof handleCliReviewUpdated === 'function') { + handleCliReviewUpdated(payload); + } + // Also refresh CLI history to show review status + if (typeof refreshCliHistory === 'function') { + refreshCliHistory(); + } + break; + + // System Notify Events (from CLI commands) + case 'REFRESH_REQUIRED': + handleRefreshRequired(payload); + break; + + case 'MEMORY_UPDATED': + if (typeof handleMemoryUpdated === 'function') { + handleMemoryUpdated(payload); + } + // Force refresh of memory view if active + if (getCurrentView && getCurrentView() === 'memory') { + if (typeof loadMemoryStats === 'function') { + loadMemoryStats().then(function() { + if (typeof renderHotspotsColumn === 'function') renderHotspotsColumn(); + }); + } + } + break; + + case 'HISTORY_UPDATED': + // Refresh CLI history when updated externally + if (typeof refreshCliHistory === 'function') { + refreshCliHistory(); + } + break; + + case 'INSIGHT_GENERATED': + // Refresh insights when new insight is generated + if (typeof loadInsightsHistory === 'function') { + loadInsightsHistory(); + } + break; + default: console.log('[WS] Unknown notification type:', type); } @@ -427,6 +471,60 @@ async function refreshWorkspaceData(newData) { lastDataHash = calculateDataHash(); } +/** + * Handle REFRESH_REQUIRED events from CLI commands + * @param {Object} payload - Contains scope (memory|history|insights|all) + */ +function handleRefreshRequired(payload) { + const scope = payload?.scope || 'all'; + console.log('[WS] Refresh required for scope:', scope); + + switch (scope) { + case 'memory': + // Refresh memory stats and graph + if (typeof loadMemoryStats === 'function') { + loadMemoryStats().then(function() { + if (typeof renderHotspotsColumn === 'function') renderHotspotsColumn(); + }); + } + if (typeof loadMemoryGraph === 'function') { + loadMemoryGraph(); + } + break; + + case 'history': + // Refresh CLI history + if (typeof refreshCliHistory === 'function') { + refreshCliHistory(); + } + break; + + case 'insights': + // Refresh insights history + if (typeof loadInsightsHistory === 'function') { + loadInsightsHistory(); + } + break; + + case 'all': + default: + // Refresh everything + refreshIfNeeded(); + if (typeof loadMemoryStats === 'function') { + loadMemoryStats().then(function() { + if (typeof renderHotspotsColumn === 'function') renderHotspotsColumn(); + }); + } + if (typeof refreshCliHistory === 'function') { + refreshCliHistory(); + } + if (typeof loadInsightsHistory === 'function') { + loadInsightsHistory(); + } + break; + } +} + // ========== Cleanup ========== function stopAutoRefresh() { if (autoRefreshInterval) { diff --git a/ccw/src/templates/dashboard-js/views/cli-manager.js b/ccw/src/templates/dashboard-js/views/cli-manager.js index 77ec91c2..c6803840 100644 --- a/ccw/src/templates/dashboard-js/views/cli-manager.js +++ b/ccw/src/templates/dashboard-js/views/cli-manager.js @@ -6,6 +6,8 @@ var currentCliExecution = null; var cliExecutionOutput = ''; var ccwInstallations = []; var ccwEndpointTools = []; +var cliToolConfig = null; // Store loaded CLI config +var predefinedModels = {}; // Store predefined models per tool // ========== CCW Installations ========== async function loadCcwInstallations() { @@ -37,6 +39,271 @@ async function loadCcwEndpointTools() { } } +// ========== CLI Tool Configuration ========== +async function loadCliToolConfig() { + try { + var response = await fetch('/api/cli/config'); + if (!response.ok) throw new Error('Failed to load CLI config'); + var data = await response.json(); + cliToolConfig = data.config || null; + predefinedModels = data.predefinedModels || {}; + return data; + } catch (err) { + console.error('Failed to load CLI config:', err); + cliToolConfig = null; + predefinedModels = {}; + return null; + } +} + +async function updateCliToolConfig(tool, updates) { + try { + var response = await fetch('/api/cli/config/' + tool, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(updates) + }); + if (!response.ok) throw new Error('Failed to update CLI config'); + var data = await response.json(); + if (data.success && cliToolConfig && cliToolConfig.tools) { + cliToolConfig.tools[tool] = data.config; + } + return data; + } catch (err) { + console.error('Failed to update CLI config:', err); + throw err; + } +} + +// ========== Tool Configuration Modal ========== +async function showToolConfigModal(toolName) { + // Load config if not already loaded + if (!cliToolConfig) { + await loadCliToolConfig(); + } + + var toolConfig = cliToolConfig && cliToolConfig.tools ? cliToolConfig.tools[toolName] : null; + var models = predefinedModels[toolName] || []; + var status = cliToolStatus[toolName] || {}; + + if (!toolConfig) { + toolConfig = { enabled: true, primaryModel: '', secondaryModel: '' }; + } + + var content = buildToolConfigModalContent(toolName, toolConfig, models, status); + showModal('Configure ' + toolName.charAt(0).toUpperCase() + toolName.slice(1), content, { size: 'md' }); + + // Initialize event handlers after modal is shown + setTimeout(function() { + initToolConfigModalEvents(toolName, toolConfig, models); + }, 100); +} + +function buildToolConfigModalContent(tool, config, models, status) { + var isAvailable = status.available; + var isEnabled = config.enabled; + + // Check if model is custom (not in predefined list or empty) + var isPrimaryCustom = !config.primaryModel || models.indexOf(config.primaryModel) === -1; + var isSecondaryCustom = !config.secondaryModel || models.indexOf(config.secondaryModel) === -1; + + var modelsOptionsHtml = function(selected, isCustom) { + var html = ''; + for (var i = 0; i < models.length; i++) { + var m = models[i]; + html += ''; + } + html += ''; + return html; + }; + + return '
' + + // Status Section + '
' + + '

Status

' + + '
' + + '' + + ' ' + + (isAvailable ? 'Installed' : 'Not Installed') + + '' + + '' + + ' ' + + (isEnabled ? 'Enabled' : 'Disabled') + + '' + + '
' + + '
' + + + // Actions Section + '
' + + '

Actions

' + + '
' + + '' + + '' + + '
' + + '
' + + + // Primary Model Section + '
' + + '

Primary Model (CLI endpoint calls)

' + + '
' + + '' + + '' + + '
' + + '
' + + + // Secondary Model Section + '
' + + '

Secondary Model (internal tools)

' + + '
' + + '' + + '' + + '
' + + '
' + + + // Footer + '' + + '
'; +} + +function initToolConfigModalEvents(tool, currentConfig, models) { + // Toggle Enable/Disable + var toggleBtn = document.getElementById('toggleEnableBtn'); + if (toggleBtn) { + toggleBtn.onclick = async function() { + var newEnabled = !currentConfig.enabled; + try { + await updateCliToolConfig(tool, { enabled: newEnabled }); + showRefreshToast(tool + ' ' + (newEnabled ? 'enabled' : 'disabled'), 'success'); + closeModal(); + renderToolsSection(); + if (window.lucide) lucide.createIcons(); + } catch (err) { + showRefreshToast('Failed to update: ' + err.message, 'error'); + } + }; + } + + // Install/Uninstall + var installBtn = document.getElementById('installBtn'); + if (installBtn) { + installBtn.onclick = async function() { + var status = cliToolStatus[tool] || {}; + var endpoint = status.available ? '/api/cli/uninstall' : '/api/cli/install'; + var action = status.available ? 'uninstalling' : 'installing'; + + showRefreshToast(tool.charAt(0).toUpperCase() + tool.slice(1) + ' ' + action + '...', 'info'); + closeModal(); + + try { + var response = await fetch(endpoint, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ tool: tool }) + }); + var result = await response.json(); + + if (result.success) { + showRefreshToast(result.message || (tool + ' ' + (status.available ? 'uninstalled' : 'installed')), 'success'); + await loadCliToolStatus(); + renderToolsSection(); + if (window.lucide) lucide.createIcons(); + } else { + showRefreshToast(result.error || 'Operation failed', 'error'); + } + } catch (err) { + showRefreshToast('Failed: ' + err.message, 'error'); + } + }; + } + + // Model select handlers + var primarySelect = document.getElementById('primaryModelSelect'); + var primaryCustom = document.getElementById('primaryModelCustom'); + var secondarySelect = document.getElementById('secondaryModelSelect'); + var secondaryCustom = document.getElementById('secondaryModelCustom'); + + if (primarySelect && primaryCustom) { + primarySelect.onchange = function() { + if (this.value === '__custom__') { + primaryCustom.style.display = 'block'; + primaryCustom.focus(); + } else { + primaryCustom.style.display = 'none'; + primaryCustom.value = ''; + } + }; + } + + if (secondarySelect && secondaryCustom) { + secondarySelect.onchange = function() { + if (this.value === '__custom__') { + secondaryCustom.style.display = 'block'; + secondaryCustom.focus(); + } else { + secondaryCustom.style.display = 'none'; + secondaryCustom.value = ''; + } + }; + } + + // Save button + var saveBtn = document.getElementById('saveConfigBtn'); + if (saveBtn) { + saveBtn.onclick = async function() { + var primaryModel = primarySelect.value === '__custom__' + ? primaryCustom.value.trim() + : primarySelect.value; + var secondaryModel = secondarySelect.value === '__custom__' + ? secondaryCustom.value.trim() + : secondarySelect.value; + + if (!primaryModel) { + showRefreshToast('Primary model is required', 'error'); + return; + } + if (!secondaryModel) { + showRefreshToast('Secondary model is required', 'error'); + return; + } + + try { + await updateCliToolConfig(tool, { + primaryModel: primaryModel, + secondaryModel: secondaryModel + }); + showRefreshToast('Configuration saved', 'success'); + closeModal(); + } catch (err) { + showRefreshToast('Failed to save: ' + err.message, 'error'); + } + }; + } + + // Initialize lucide icons in modal + if (window.lucide) lucide.createIcons(); +} + // ========== Rendering ========== async function renderCliManager() { var container = document.getElementById('mainContent'); @@ -94,12 +361,13 @@ function renderToolsSection() { var isAvailable = status.available; var isDefault = defaultCliTool === tool; - return '
' + + return '
' + '
' + '' + '
' + '
' + tool.charAt(0).toUpperCase() + tool.slice(1) + (isDefault ? '' + t('cli.default') + '' : '') + + '' + '
' + '
' + toolDescriptions[tool] + '
' + '
' + @@ -109,7 +377,7 @@ function renderToolsSection() { ? ' ' + t('cli.ready') + '' : ' ' + t('cli.notInstalled') + '') + (isAvailable && !isDefault - ? '' + ? '' : '') + '
' + '
'; @@ -136,11 +404,13 @@ function renderToolsSection() { // Semantic Search item (only show if CodexLens is installed) var semanticHtml = ''; if (codexLensStatus.ready) { - semanticHtml = '
' + + semanticHtml = '
' + '
' + '' + '
' + - '
Semantic Search AI
' + + '
Semantic Search AI' + + (llmEnhancementSettings.enabled ? 'LLM' : '') + + '
' + '
' + (semanticStatus.available ? 'AI-powered code understanding' : 'Natural language code search') + '
' + '
' + '
' + @@ -148,7 +418,7 @@ function renderToolsSection() { (semanticStatus.available ? ' ' + (semanticStatus.backend || 'Ready') + '' : ' Not Installed' + - '') + + '') + '
' + '
'; } diff --git a/ccw/src/templates/dashboard-js/views/prompt-history.js b/ccw/src/templates/dashboard-js/views/prompt-history.js index 13e4c4f4..8e896516 100644 --- a/ccw/src/templates/dashboard-js/views/prompt-history.js +++ b/ccw/src/templates/dashboard-js/views/prompt-history.js @@ -44,7 +44,7 @@ async function loadPromptInsights() { async function loadPromptInsightsHistory() { try { - var response = await fetch('/api/memory/insights?limit=20'); + var response = await fetch('/api/memory/insights?limit=20&path=' + encodeURIComponent(projectPath)); if (!response.ok) throw new Error('Failed to load insights history'); var data = await response.json(); promptInsightsHistory = data.insights || []; @@ -699,6 +699,9 @@ async function triggerCliInsightsAnalysis() { console.log('[PromptHistory] Insights parsed:', promptInsights); } + // Reload insights history to show the new analysis result + await loadPromptInsightsHistory(); + showRefreshToast(t('toast.completed') + ' (' + tool + ')', 'success'); } catch (err) { console.error('CLI insights analysis failed:', err); diff --git a/ccw/src/tools/cli-config-manager.ts b/ccw/src/tools/cli-config-manager.ts new file mode 100644 index 00000000..2b960cd1 --- /dev/null +++ b/ccw/src/tools/cli-config-manager.ts @@ -0,0 +1,272 @@ +/** + * CLI Configuration Manager + * Handles loading, saving, and managing CLI tool configurations + * Stores config in .workflow/cli-config.json + */ +import * as fs from 'fs'; +import * as path from 'path'; + +// ========== Types ========== + +export interface CliToolConfig { + enabled: boolean; + primaryModel: string; // For CLI endpoint calls (ccw cli exec) + secondaryModel: string; // For internal calls (llm_enhancer, generate_module_docs) +} + +export interface CliConfig { + version: number; + tools: Record; +} + +export type CliToolName = 'gemini' | 'qwen' | 'codex'; + +// ========== Constants ========== + +export const PREDEFINED_MODELS: Record = { + gemini: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-1.5-pro', 'gemini-1.5-flash'], + qwen: ['coder-model', 'vision-model', 'qwen2.5-coder-32b'], + codex: ['gpt5-codex', 'gpt-4.1', 'o4-mini', 'o3'] +}; + +export const DEFAULT_CONFIG: CliConfig = { + version: 1, + tools: { + gemini: { + enabled: true, + primaryModel: 'gemini-2.5-pro', + secondaryModel: 'gemini-2.5-flash' + }, + qwen: { + enabled: true, + primaryModel: 'coder-model', + secondaryModel: 'coder-model' + }, + codex: { + enabled: true, + primaryModel: 'gpt5-codex', + secondaryModel: 'gpt5-codex' + } + } +}; + +const CONFIG_DIR = '.workflow'; +const CONFIG_FILE = 'cli-config.json'; + +// ========== Helper Functions ========== + +function getConfigPath(baseDir: string): string { + return path.join(baseDir, CONFIG_DIR, CONFIG_FILE); +} + +function ensureConfigDir(baseDir: string): void { + const configDir = path.join(baseDir, CONFIG_DIR); + if (!fs.existsSync(configDir)) { + fs.mkdirSync(configDir, { recursive: true }); + } +} + +function isValidToolName(tool: string): tool is CliToolName { + return ['gemini', 'qwen', 'codex'].includes(tool); +} + +function validateConfig(config: unknown): config is CliConfig { + if (!config || typeof config !== 'object') return false; + const c = config as Record; + + if (typeof c.version !== 'number') return false; + if (!c.tools || typeof c.tools !== 'object') return false; + + const tools = c.tools as Record; + for (const toolName of ['gemini', 'qwen', 'codex']) { + const tool = tools[toolName]; + if (!tool || typeof tool !== 'object') return false; + + const t = tool as Record; + if (typeof t.enabled !== 'boolean') return false; + if (typeof t.primaryModel !== 'string') return false; + if (typeof t.secondaryModel !== 'string') return false; + } + + return true; +} + +function mergeWithDefaults(config: Partial): CliConfig { + const result: CliConfig = { + version: config.version ?? DEFAULT_CONFIG.version, + tools: { ...DEFAULT_CONFIG.tools } + }; + + if (config.tools) { + for (const toolName of Object.keys(config.tools)) { + if (isValidToolName(toolName) && config.tools[toolName]) { + result.tools[toolName] = { + ...DEFAULT_CONFIG.tools[toolName], + ...config.tools[toolName] + }; + } + } + } + + return result; +} + +// ========== Main Functions ========== + +/** + * Load CLI configuration from .workflow/cli-config.json + * Returns default config if file doesn't exist or is invalid + */ +export function loadCliConfig(baseDir: string): CliConfig { + const configPath = getConfigPath(baseDir); + + try { + if (!fs.existsSync(configPath)) { + return { ...DEFAULT_CONFIG }; + } + + const content = fs.readFileSync(configPath, 'utf-8'); + const parsed = JSON.parse(content); + + if (validateConfig(parsed)) { + return mergeWithDefaults(parsed); + } + + // Invalid config, return defaults + console.warn('[cli-config] Invalid config file, using defaults'); + return { ...DEFAULT_CONFIG }; + } catch (err) { + console.error('[cli-config] Error loading config:', err); + return { ...DEFAULT_CONFIG }; + } +} + +/** + * Save CLI configuration to .workflow/cli-config.json + */ +export function saveCliConfig(baseDir: string, config: CliConfig): void { + ensureConfigDir(baseDir); + const configPath = getConfigPath(baseDir); + + try { + fs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf-8'); + } catch (err) { + console.error('[cli-config] Error saving config:', err); + throw new Error(`Failed to save CLI config: ${err}`); + } +} + +/** + * Get configuration for a specific tool + */ +export function getToolConfig(baseDir: string, tool: string): CliToolConfig { + if (!isValidToolName(tool)) { + throw new Error(`Invalid tool name: ${tool}`); + } + + const config = loadCliConfig(baseDir); + return config.tools[tool] || DEFAULT_CONFIG.tools[tool]; +} + +/** + * Update configuration for a specific tool + * Returns the updated tool config + */ +export function updateToolConfig( + baseDir: string, + tool: string, + updates: Partial +): CliToolConfig { + if (!isValidToolName(tool)) { + throw new Error(`Invalid tool name: ${tool}`); + } + + const config = loadCliConfig(baseDir); + const currentToolConfig = config.tools[tool] || DEFAULT_CONFIG.tools[tool]; + + // Apply updates + const updatedToolConfig: CliToolConfig = { + enabled: updates.enabled !== undefined ? updates.enabled : currentToolConfig.enabled, + primaryModel: updates.primaryModel || currentToolConfig.primaryModel, + secondaryModel: updates.secondaryModel || currentToolConfig.secondaryModel + }; + + // Save updated config + config.tools[tool] = updatedToolConfig; + saveCliConfig(baseDir, config); + + return updatedToolConfig; +} + +/** + * Enable a CLI tool + */ +export function enableTool(baseDir: string, tool: string): CliToolConfig { + return updateToolConfig(baseDir, tool, { enabled: true }); +} + +/** + * Disable a CLI tool + */ +export function disableTool(baseDir: string, tool: string): CliToolConfig { + return updateToolConfig(baseDir, tool, { enabled: false }); +} + +/** + * Check if a tool is enabled + */ +export function isToolEnabled(baseDir: string, tool: string): boolean { + try { + const config = getToolConfig(baseDir, tool); + return config.enabled; + } catch { + return true; // Default to enabled if error + } +} + +/** + * Get primary model for a tool + */ +export function getPrimaryModel(baseDir: string, tool: string): string { + try { + const config = getToolConfig(baseDir, tool); + return config.primaryModel; + } catch { + return isValidToolName(tool) ? DEFAULT_CONFIG.tools[tool].primaryModel : 'gemini-2.5-pro'; + } +} + +/** + * Get secondary model for a tool (used for internal calls) + */ +export function getSecondaryModel(baseDir: string, tool: string): string { + try { + const config = getToolConfig(baseDir, tool); + return config.secondaryModel; + } catch { + return isValidToolName(tool) ? DEFAULT_CONFIG.tools[tool].secondaryModel : 'gemini-2.5-flash'; + } +} + +/** + * Get all predefined models for a tool + */ +export function getPredefinedModels(tool: string): string[] { + if (!isValidToolName(tool)) { + return []; + } + return [...PREDEFINED_MODELS[tool]]; +} + +/** + * Get full config response for API (includes predefined models) + */ +export function getFullConfigResponse(baseDir: string): { + config: CliConfig; + predefinedModels: Record; +} { + return { + config: loadCliConfig(baseDir), + predefinedModels: { ...PREDEFINED_MODELS } + }; +} diff --git a/ccw/src/tools/cli-executor.ts b/ccw/src/tools/cli-executor.ts index 70cd75db..ce065835 100644 --- a/ccw/src/tools/cli-executor.ts +++ b/ccw/src/tools/cli-executor.ts @@ -22,6 +22,12 @@ import { getResumeModeDescription, type ResumeDecision } from './resume-strategy.js'; +import { + isToolEnabled as isToolEnabledFromConfig, + enableTool as enableToolFromConfig, + disableTool as disableToolFromConfig, + getPrimaryModel +} from './cli-config-manager.js'; // CLI History storage path const CLI_HISTORY_DIR = join(process.cwd(), '.workflow', '.cli-history'); @@ -720,12 +726,23 @@ async function executeCliTool( } } + // Determine effective model (use config's primaryModel if not explicitly provided) + let effectiveModel = model; + if (!effectiveModel) { + try { + effectiveModel = getPrimaryModel(workingDir, tool); + } catch { + // Config not available, use default (let the CLI tool use its own default) + effectiveModel = undefined; + } + } + // Build command const { command, args, useStdin } = buildCommand({ tool, prompt: finalPrompt, mode, - model, + model: effectiveModel, dir: cd, include: includeDirs, nativeResume: nativeResumeConfig @@ -1203,6 +1220,19 @@ export function getConversationDetail(baseDir: string, conversationId: string): return loadConversation(historyDir, conversationId); } +/** + * Get conversation detail with native session info + */ +export function getConversationDetailWithNativeInfo(baseDir: string, conversationId: string) { + try { + const store = getSqliteStoreSync(baseDir); + return store.getConversationWithNativeInfo(conversationId); + } catch { + // SQLite not initialized, return null + return null; + } +} + /** * Get execution detail by ID (legacy, returns ExecutionRecord for backward compatibility) */ @@ -1274,6 +1304,181 @@ export async function getCliToolsStatus(): Promise = { + gemini: '@google/gemini-cli', + qwen: '@qwen-code/qwen-code', + codex: '@openai/codex', + claude: '@anthropic-ai/claude-code' +}; + +// Disabled tools storage (in-memory fallback, main storage is in cli-config.json) +const disabledTools = new Set(); + +// Default working directory for config operations +let configBaseDir = process.cwd(); + +/** + * Set the base directory for config operations + */ +export function setConfigBaseDir(dir: string): void { + configBaseDir = dir; +} + +/** + * Install a CLI tool via npm + */ +export async function installCliTool(tool: string): Promise<{ success: boolean; error?: string }> { + const packageName = CLI_TOOL_PACKAGES[tool]; + if (!packageName) { + return { success: false, error: `Unknown tool: ${tool}` }; + } + + return new Promise((resolve) => { + const child = spawn('npm', ['install', '-g', packageName], { + shell: true, + stdio: ['ignore', 'pipe', 'pipe'] + }); + + let stderr = ''; + child.stderr?.on('data', (data) => { stderr += data.toString(); }); + + child.on('close', (code) => { + // Clear cache to force re-check + toolAvailabilityCache.delete(tool); + + if (code === 0) { + resolve({ success: true }); + } else { + resolve({ success: false, error: stderr || `npm install failed with code ${code}` }); + } + }); + + child.on('error', (err) => { + resolve({ success: false, error: err.message }); + }); + + // Timeout after 2 minutes + setTimeout(() => { + child.kill(); + resolve({ success: false, error: 'Installation timed out' }); + }, 120000); + }); +} + +/** + * Uninstall a CLI tool via npm + */ +export async function uninstallCliTool(tool: string): Promise<{ success: boolean; error?: string }> { + const packageName = CLI_TOOL_PACKAGES[tool]; + if (!packageName) { + return { success: false, error: `Unknown tool: ${tool}` }; + } + + return new Promise((resolve) => { + const child = spawn('npm', ['uninstall', '-g', packageName], { + shell: true, + stdio: ['ignore', 'pipe', 'pipe'] + }); + + let stderr = ''; + child.stderr?.on('data', (data) => { stderr += data.toString(); }); + + child.on('close', (code) => { + // Clear cache to force re-check + toolAvailabilityCache.delete(tool); + + if (code === 0) { + resolve({ success: true }); + } else { + resolve({ success: false, error: stderr || `npm uninstall failed with code ${code}` }); + } + }); + + child.on('error', (err) => { + resolve({ success: false, error: err.message }); + }); + + // Timeout after 1 minute + setTimeout(() => { + child.kill(); + resolve({ success: false, error: 'Uninstallation timed out' }); + }, 60000); + }); +} + +/** + * Enable a CLI tool (updates config file) + */ +export function enableCliTool(tool: string): { success: boolean } { + try { + enableToolFromConfig(configBaseDir, tool); + disabledTools.delete(tool); // Also update in-memory fallback + return { success: true }; + } catch (err) { + console.error('[cli-executor] Error enabling tool:', err); + disabledTools.delete(tool); // Fallback to in-memory + return { success: true }; + } +} + +/** + * Disable a CLI tool (updates config file) + */ +export function disableCliTool(tool: string): { success: boolean } { + try { + disableToolFromConfig(configBaseDir, tool); + disabledTools.add(tool); // Also update in-memory fallback + return { success: true }; + } catch (err) { + console.error('[cli-executor] Error disabling tool:', err); + disabledTools.add(tool); // Fallback to in-memory + return { success: true }; + } +} + +/** + * Check if a tool is enabled (reads from config file) + */ +export function isToolEnabled(tool: string): boolean { + try { + return isToolEnabledFromConfig(configBaseDir, tool); + } catch { + // Fallback to in-memory check + return !disabledTools.has(tool); + } +} + +/** + * Get full status of all CLI tools including enabled state + */ +export async function getCliToolsFullStatus(): Promise> { + const tools = Object.keys(CLI_TOOL_PACKAGES); + const results: Record = {}; + + await Promise.all(tools.map(async (tool) => { + const availability = await checkToolAvailability(tool); + results[tool] = { + available: availability.available, + enabled: isToolEnabled(tool), + path: availability.path, + packageName: CLI_TOOL_PACKAGES[tool] + }; + })); + + return results; +} + // ========== Prompt Concatenation System ========== /** diff --git a/ccw/src/tools/cli-history-store.ts b/ccw/src/tools/cli-history-store.ts index e4f53045..cfb89f16 100644 --- a/ccw/src/tools/cli-history-store.ts +++ b/ccw/src/tools/cli-history-store.ts @@ -463,6 +463,26 @@ export class CliHistoryStore { }; } + /** + * Get conversation with native session info + */ + getConversationWithNativeInfo(id: string): (ConversationRecord & { + hasNativeSession: boolean; + nativeSessionId?: string; + nativeSessionPath?: string; + }) | null { + const conv = this.getConversation(id); + if (!conv) return null; + + const mapping = this.getNativeSessionMapping(id); + return { + ...conv, + hasNativeSession: !!mapping, + nativeSessionId: mapping?.native_session_id, + nativeSessionPath: mapping?.native_session_path + }; + } + /** * Query execution history */ diff --git a/ccw/src/tools/generate-module-docs.ts b/ccw/src/tools/generate-module-docs.ts index 942d62a6..ea41e259 100644 --- a/ccw/src/tools/generate-module-docs.ts +++ b/ccw/src/tools/generate-module-docs.ts @@ -9,6 +9,7 @@ import { readdirSync, statSync, existsSync, readFileSync, mkdirSync, writeFileSy import { join, resolve, basename, extname, relative } from 'path'; import { execSync } from 'child_process'; import { tmpdir } from 'os'; +import { getSecondaryModel } from './cli-config-manager.js'; // Directories to exclude const EXCLUDE_DIRS = [ @@ -266,8 +267,15 @@ export async function handler(params: Record): Promise": { + "summary": "Brief description of what this code does", + "keywords": ["keyword1", "keyword2", ...], + "purpose": "category like: auth, api, util, ui, data, config, test" + } + } +} \ No newline at end of file diff --git a/codex-lens/_test_result.txt b/codex-lens/_test_result.txt new file mode 100644 index 00000000..6ab2f61f --- /dev/null +++ b/codex-lens/_test_result.txt @@ -0,0 +1,19 @@ + + Executing gemini (analysis mode)... + +Loaded cached credentials. +[STARTUP] StartupProfiler.flush() called with 9 phases +[STARTUP] Recording metric for phase: cli_startup duration: 1197.5227999999997 +[STARTUP] Recording metric for phase: load_settings duration: 2.119999999999891 +[STARTUP] Recording metric for phase: migrate_settings duration: 1.401600000000144 +[STARTUP] Recording metric for phase: parse_arguments duration: 18.296000000000276 +[STARTUP] Recording metric for phase: load_cli_config duration: 56.0604000000003 +[STARTUP] Recording metric for phase: initialize_app duration: 1109.9696999999996 +[STARTUP] Recording metric for phase: authenticate duration: 1104.0013 +[STARTUP] Recording metric for phase: discover_tools duration: 3.9744999999993524 +[STARTUP] Recording metric for phase: initialize_mcp_clients duration: 0.8747000000003027 +Setup complete. I am ready for your first command. + + ✓ Completed in 16.0s + ID: 1765690668720-gemini + Continue: ccw cli exec "..." --resume 1765690668720-gemini diff --git a/codex-lens/src/codexlens/cli/commands.py b/codex-lens/src/codexlens/cli/commands.py index 66240246..a5b3f6e5 100644 --- a/codex-lens/src/codexlens/cli/commands.py +++ b/codex-lens/src/codexlens/cli/commands.py @@ -1098,3 +1098,132 @@ def clean( else: console.print(f"[red]Clean failed (unexpected):[/red] {exc}") raise typer.Exit(code=1) + + +@app.command("semantic-list") +def semantic_list( + path: Path = typer.Option(Path("."), "--path", "-p", help="Project path to list metadata from."), + offset: int = typer.Option(0, "--offset", "-o", min=0, help="Number of records to skip."), + limit: int = typer.Option(50, "--limit", "-n", min=1, max=100, help="Maximum records to return."), + tool_filter: Optional[str] = typer.Option(None, "--tool", "-t", help="Filter by LLM tool (gemini/qwen)."), + json_mode: bool = typer.Option(False, "--json", help="Output JSON response."), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable debug logging."), +) -> None: + """List semantic metadata entries for indexed files. + + Shows files that have LLM-generated summaries and keywords. + Results are aggregated from all index databases in the project. + """ + _configure_logging(verbose) + base_path = path.expanduser().resolve() + + registry: Optional[RegistryStore] = None + try: + registry = RegistryStore() + registry.initialize() + mapper = PathMapper() + + project_info = registry.find_project(base_path) + if not project_info: + raise CodexLensError(f"No index found for: {base_path}. Run 'codex-lens init' first.") + + index_dir = mapper.source_to_index_dir(base_path) + if not index_dir.exists(): + raise CodexLensError(f"Index directory not found: {index_dir}") + + all_results: list = [] + total_count = 0 + + index_files = sorted(index_dir.rglob("_index.db")) + + for db_path in index_files: + try: + store = DirIndexStore(db_path) + store.initialize() + + results, count = store.list_semantic_metadata( + offset=0, + limit=1000, + llm_tool=tool_filter, + ) + + source_dir = mapper.index_to_source(db_path.parent) + for r in results: + r["source_dir"] = str(source_dir) + + all_results.extend(results) + total_count += count + + store.close() + except Exception as e: + if verbose: + console.print(f"[yellow]Warning: Error reading {db_path}: {e}[/yellow]") + + all_results.sort(key=lambda x: x["generated_at"], reverse=True) + paginated = all_results[offset : offset + limit] + + result = { + "path": str(base_path), + "total": total_count, + "offset": offset, + "limit": limit, + "count": len(paginated), + "entries": paginated, + } + + if json_mode: + print_json(success=True, result=result) + else: + if not paginated: + console.print("[yellow]No semantic metadata found.[/yellow]") + console.print("Run 'codex-lens enhance' to generate metadata for indexed files.") + else: + table = Table(title=f"Semantic Metadata ({total_count} total)") + table.add_column("File", style="cyan", max_width=40) + table.add_column("Language", style="dim") + table.add_column("Purpose", max_width=30) + table.add_column("Keywords", max_width=25) + table.add_column("Tool") + + for entry in paginated: + keywords_str = ", ".join(entry["keywords"][:3]) + if len(entry["keywords"]) > 3: + keywords_str += f" (+{len(entry['keywords']) - 3})" + + table.add_row( + entry["file_name"], + entry["language"] or "-", + (entry["purpose"] or "-")[:30], + keywords_str or "-", + entry["llm_tool"] or "-", + ) + + console.print(table) + + if total_count > len(paginated): + console.print( + f"[dim]Showing {offset + 1}-{offset + len(paginated)} of {total_count}. " + "Use --offset and --limit for pagination.[/dim]" + ) + + except StorageError as exc: + if json_mode: + print_json(success=False, error=f"Storage error: {exc}") + else: + console.print(f"[red]Semantic-list failed (storage):[/red] {exc}") + raise typer.Exit(code=1) + except CodexLensError as exc: + if json_mode: + print_json(success=False, error=str(exc)) + else: + console.print(f"[red]Semantic-list failed:[/red] {exc}") + raise typer.Exit(code=1) + except Exception as exc: + if json_mode: + print_json(success=False, error=f"Unexpected error: {exc}") + else: + console.print(f"[red]Semantic-list failed (unexpected):[/red] {exc}") + raise typer.Exit(code=1) + finally: + if registry is not None: + registry.close() diff --git a/codex-lens/src/codexlens/config.py b/codex-lens/src/codexlens/config.py index 27b3d73f..cd8a93ad 100644 --- a/codex-lens/src/codexlens/config.py +++ b/codex-lens/src/codexlens/config.py @@ -78,6 +78,11 @@ class Config: } ) + llm_enabled: bool = False + llm_tool: str = "gemini" + llm_timeout_ms: int = 300000 + llm_batch_size: int = 5 + def __post_init__(self) -> None: try: self.data_dir = self.data_dir.expanduser().resolve() diff --git a/codex-lens/src/codexlens/search/chain_search.py b/codex-lens/src/codexlens/search/chain_search.py index 588d56d4..ffd2f913 100644 --- a/codex-lens/src/codexlens/search/chain_search.py +++ b/codex-lens/src/codexlens/search/chain_search.py @@ -30,6 +30,7 @@ class SearchOptions: total_limit: Total result limit across all directories include_symbols: Whether to include symbol search results files_only: Return only file paths without excerpts + include_semantic: Whether to include semantic keyword search results """ depth: int = -1 max_workers: int = 8 @@ -37,6 +38,7 @@ class SearchOptions: total_limit: int = 100 include_symbols: bool = False files_only: bool = False + include_semantic: bool = False @dataclass @@ -378,7 +380,8 @@ class ChainSearchEngine: idx_path, query, options.limit_per_dir, - options.files_only + options.files_only, + options.include_semantic ): idx_path for idx_path in index_paths } @@ -400,7 +403,8 @@ class ChainSearchEngine: def _search_single_index(self, index_path: Path, query: str, limit: int, - files_only: bool = False) -> List[SearchResult]: + files_only: bool = False, + include_semantic: bool = False) -> List[SearchResult]: """Search a single index database. Handles exceptions gracefully, returning empty list on failure. @@ -410,18 +414,40 @@ class ChainSearchEngine: query: FTS5 query string limit: Maximum results from this index files_only: If True, skip snippet generation for faster search + include_semantic: If True, also search semantic keywords and merge results Returns: List of SearchResult objects (empty on error) """ try: with DirIndexStore(index_path) as store: + # Get FTS results if files_only: # Fast path: return paths only without snippets paths = store.search_files_only(query, limit=limit) - return [SearchResult(path=p, score=0.0, excerpt="") for p in paths] + fts_results = [SearchResult(path=p, score=0.0, excerpt="") for p in paths] else: - return store.search_fts(query, limit=limit) + fts_results = store.search_fts(query, limit=limit) + + # Optionally add semantic keyword results + if include_semantic: + try: + semantic_matches = store.search_semantic_keywords(query) + # Convert semantic matches to SearchResult with 0.8x weight + for file_entry, keywords in semantic_matches: + # Create excerpt from keywords + excerpt = f"Keywords: {', '.join(keywords[:5])}" + # Use a base score of 10.0 for semantic matches, weighted by 0.8 + semantic_result = SearchResult( + path=str(file_entry.full_path), + score=10.0 * 0.8, + excerpt=excerpt + ) + fts_results.append(semantic_result) + except Exception as sem_exc: + self.logger.debug(f"Semantic search error in {index_path}: {sem_exc}") + + return fts_results except Exception as exc: self.logger.debug(f"Search error in {index_path}: {exc}") return [] diff --git a/codex-lens/src/codexlens/semantic/__init__.py b/codex-lens/src/codexlens/semantic/__init__.py index 43a5ba0f..7b6c5ac9 100644 --- a/codex-lens/src/codexlens/semantic/__init__.py +++ b/codex-lens/src/codexlens/semantic/__init__.py @@ -32,4 +32,38 @@ def check_semantic_available() -> tuple[bool, str | None]: """Check if semantic search dependencies are available.""" return SEMANTIC_AVAILABLE, _import_error -__all__ = ["SEMANTIC_AVAILABLE", "SEMANTIC_BACKEND", "check_semantic_available"] +# Export LLM enhancement classes +try: + from .llm_enhancer import ( + LLMEnhancer, + LLMConfig, + SemanticMetadata, + FileData, + EnhancedSemanticIndexer, + create_enhancer, + create_enhanced_indexer, + ) + LLM_AVAILABLE = True +except ImportError: + LLM_AVAILABLE = False + LLMEnhancer = None # type: ignore + LLMConfig = None # type: ignore + SemanticMetadata = None # type: ignore + FileData = None # type: ignore + EnhancedSemanticIndexer = None # type: ignore + create_enhancer = None # type: ignore + create_enhanced_indexer = None # type: ignore + +__all__ = [ + "SEMANTIC_AVAILABLE", + "SEMANTIC_BACKEND", + "check_semantic_available", + "LLM_AVAILABLE", + "LLMEnhancer", + "LLMConfig", + "SemanticMetadata", + "FileData", + "EnhancedSemanticIndexer", + "create_enhancer", + "create_enhanced_indexer", +] diff --git a/codex-lens/src/codexlens/semantic/llm_enhancer.py b/codex-lens/src/codexlens/semantic/llm_enhancer.py new file mode 100644 index 00000000..9a254d54 --- /dev/null +++ b/codex-lens/src/codexlens/semantic/llm_enhancer.py @@ -0,0 +1,667 @@ +"""LLM-based semantic enhancement using CCW CLI. + +This module provides LLM-generated descriptions that are then embedded +by fastembed for improved semantic search. The flow is: + + Code → LLM Summary → fastembed embedding → VectorStore → semantic search + +LLM-generated summaries match natural language queries better than raw code. +""" + +from __future__ import annotations + +import json +import logging +import os +import subprocess +import shutil +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Optional, TYPE_CHECKING + +from codexlens.entities import SemanticChunk, Symbol + +if TYPE_CHECKING: + from .embedder import Embedder + from .vector_store import VectorStore + + +logger = logging.getLogger(__name__) + + +@dataclass +class SemanticMetadata: + """LLM-generated semantic metadata for a file or symbol.""" + + summary: str + keywords: List[str] + purpose: str + file_path: Optional[str] = None + symbol_name: Optional[str] = None + llm_tool: Optional[str] = None + + +@dataclass +class FileData: + """File data for LLM processing.""" + + path: str + content: str + language: str + symbols: List[Symbol] = field(default_factory=list) + + +@dataclass +class LLMConfig: + """Configuration for LLM enhancement. + + Tool selection can be overridden via environment variables: + - CCW_CLI_SECONDARY_TOOL: Primary tool for LLM calls (default: gemini) + - CCW_CLI_FALLBACK_TOOL: Fallback tool if primary fails (default: qwen) + """ + + tool: str = field(default_factory=lambda: os.environ.get("CCW_CLI_SECONDARY_TOOL", "gemini")) + fallback_tool: str = field(default_factory=lambda: os.environ.get("CCW_CLI_FALLBACK_TOOL", "qwen")) + timeout_ms: int = 300000 + batch_size: int = 5 + max_content_chars: int = 8000 # Max chars per file in batch prompt + enabled: bool = True + + +class LLMEnhancer: + """LLM-based semantic enhancement using CCW CLI. + + Generates code summaries and search keywords by calling + external LLM tools (gemini, qwen) via CCW CLI subprocess. + """ + + PROMPT_TEMPLATE = '''PURPOSE: Generate semantic summaries and search keywords for code files +TASK: +- For each code block, generate a concise summary (1-2 sentences) +- Extract 5-10 relevant search keywords +- Identify the functional purpose/category +MODE: analysis +EXPECTED: JSON format output + +=== CODE BLOCKS === +{code_blocks} + +=== OUTPUT FORMAT === +Return ONLY valid JSON (no markdown, no explanation): +{{ + "files": {{ + "": {{ + "summary": "Brief description of what this code does", + "keywords": ["keyword1", "keyword2", ...], + "purpose": "category like: auth, api, util, ui, data, config, test" + }} + }} +}}''' + + def __init__(self, config: LLMConfig | None = None) -> None: + """Initialize LLM enhancer. + + Args: + config: LLM configuration, uses defaults if None + """ + self.config = config or LLMConfig() + self._ccw_available: Optional[bool] = None + + def check_available(self) -> bool: + """Check if CCW CLI tool is available.""" + if self._ccw_available is not None: + return self._ccw_available + + self._ccw_available = shutil.which("ccw") is not None + if not self._ccw_available: + logger.warning("CCW CLI not found in PATH, LLM enhancement disabled") + return self._ccw_available + + def enhance_files( + self, + files: List[FileData], + working_dir: Optional[Path] = None, + ) -> Dict[str, SemanticMetadata]: + """Enhance multiple files with LLM-generated semantic metadata. + + Processes files in batches to manage token limits and API costs. + + Args: + files: List of file data to process + working_dir: Optional working directory for CCW CLI + + Returns: + Dict mapping file paths to SemanticMetadata + """ + if not self.config.enabled: + logger.debug("LLM enhancement disabled by config") + return {} + + if not self.check_available(): + return {} + + if not files: + return {} + + results: Dict[str, SemanticMetadata] = {} + batch_size = self.config.batch_size + + for i in range(0, len(files), batch_size): + batch = files[i:i + batch_size] + try: + batch_results = self._process_batch(batch, working_dir) + results.update(batch_results) + logger.debug( + "Processed batch %d/%d: %d files enhanced", + i // batch_size + 1, + (len(files) + batch_size - 1) // batch_size, + len(batch_results), + ) + except Exception as e: + logger.warning( + "Batch %d failed, continuing: %s", + i // batch_size + 1, + e, + ) + continue + + return results + + def enhance_file( + self, + path: str, + content: str, + language: str, + working_dir: Optional[Path] = None, + ) -> SemanticMetadata: + """Enhance a single file with LLM-generated semantic metadata. + + Convenience method that wraps enhance_files for single file processing. + + Args: + path: File path + content: File content + language: Programming language + working_dir: Optional working directory for CCW CLI + + Returns: + SemanticMetadata for the file + + Raises: + ValueError: If enhancement fails + """ + file_data = FileData(path=path, content=content, language=language) + results = self.enhance_files([file_data], working_dir) + + if path not in results: + # Return default metadata if enhancement failed + return SemanticMetadata( + summary=f"Code file written in {language}", + keywords=[language, "code"], + purpose="unknown", + file_path=path, + llm_tool=self.config.tool, + ) + + return results[path] + + + def _process_batch( + self, + files: List[FileData], + working_dir: Optional[Path] = None, + ) -> Dict[str, SemanticMetadata]: + """Process a single batch of files.""" + prompt = self._build_batch_prompt(files) + + # Try primary tool first + result = self._invoke_ccw_cli( + prompt, + tool=self.config.tool, + working_dir=working_dir, + ) + + # Fallback to secondary tool if primary fails + if not result["success"] and self.config.fallback_tool: + logger.debug( + "Primary tool %s failed, trying fallback %s", + self.config.tool, + self.config.fallback_tool, + ) + result = self._invoke_ccw_cli( + prompt, + tool=self.config.fallback_tool, + working_dir=working_dir, + ) + + if not result["success"]: + logger.warning("LLM call failed: %s", result.get("stderr", "unknown error")) + return {} + + return self._parse_response(result["stdout"], self.config.tool) + + def _build_batch_prompt(self, files: List[FileData]) -> str: + """Build prompt for batch processing.""" + code_blocks_parts: List[str] = [] + + for file_data in files: + # Truncate content if too long + content = file_data.content + if len(content) > self.config.max_content_chars: + content = content[:self.config.max_content_chars] + "\n... [truncated]" + + # Format code block + lang_hint = file_data.language or "text" + code_block = f'''[FILE: {file_data.path}] +```{lang_hint} +{content} +```''' + code_blocks_parts.append(code_block) + + code_blocks = "\n\n".join(code_blocks_parts) + return self.PROMPT_TEMPLATE.format(code_blocks=code_blocks) + + def _invoke_ccw_cli( + self, + prompt: str, + tool: str = "gemini", + working_dir: Optional[Path] = None, + ) -> Dict[str, Any]: + """Invoke CCW CLI tool via subprocess. + + Args: + prompt: The prompt to send to LLM + tool: Tool name (gemini, qwen, codex) + working_dir: Optional working directory + + Returns: + Dict with success, stdout, stderr, exit_code + """ + import sys + import os + + timeout_seconds = (self.config.timeout_ms / 1000) + 30 + + # Build base arguments + base_args = [ + "cli", "exec", + prompt, # Direct string argument + "--tool", tool, + "--mode", "analysis", + "--timeout", str(self.config.timeout_ms), + ] + if working_dir: + base_args.extend(["--cd", str(working_dir)]) + + try: + if sys.platform == "win32": + # On Windows, ccw is a .CMD wrapper that requires shell + # Instead, directly invoke node with the ccw.js script + ccw_path = shutil.which("ccw") + if ccw_path and ccw_path.lower().endswith(".cmd"): + # Find the ccw.js script location + npm_dir = Path(ccw_path).parent + ccw_js = npm_dir / "node_modules" / "ccw" / "bin" / "ccw.js" + if ccw_js.exists(): + cmd = ["node", str(ccw_js)] + base_args + else: + # Fallback to shell execution + cmd_str = "ccw " + " ".join(f'"{a}"' if " " in a else a for a in base_args) + result = subprocess.run( + cmd_str, shell=True, capture_output=True, text=True, + timeout=timeout_seconds, cwd=working_dir, + encoding="utf-8", errors="replace", + ) + return { + "success": result.returncode == 0, + "stdout": result.stdout, + "stderr": result.stderr, + "exit_code": result.returncode, + } + else: + cmd = ["ccw"] + base_args + else: + cmd = ["ccw"] + base_args + + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout_seconds, + cwd=working_dir, + encoding="utf-8", + errors="replace", + ) + + return { + "success": result.returncode == 0, + "stdout": result.stdout, + "stderr": result.stderr, + "exit_code": result.returncode, + } + + except subprocess.TimeoutExpired: + logger.warning("CCW CLI timeout after %ds", self.config.timeout_ms / 1000) + return { + "success": False, + "stdout": "", + "stderr": "timeout", + "exit_code": -1, + } + except FileNotFoundError: + logger.warning("CCW CLI not found - ensure 'ccw' is in PATH") + return { + "success": False, + "stdout": "", + "stderr": "ccw command not found", + "exit_code": -1, + } + except Exception as e: + logger.warning("CCW CLI invocation failed: %s", e) + return { + "success": False, + "stdout": "", + "stderr": str(e), + "exit_code": -1, + } + + def _parse_response( + self, + stdout: str, + tool: str, + ) -> Dict[str, SemanticMetadata]: + """Parse LLM response into SemanticMetadata objects. + + Args: + stdout: Raw stdout from CCW CLI + tool: Tool name used for generation + + Returns: + Dict mapping file paths to SemanticMetadata + """ + results: Dict[str, SemanticMetadata] = {} + + # Extract JSON from response (may be wrapped in markdown or other text) + json_str = self._extract_json(stdout) + if not json_str: + logger.warning("No JSON found in LLM response") + return results + + try: + data = json.loads(json_str) + except json.JSONDecodeError as e: + logger.warning("Failed to parse LLM response JSON: %s", e) + return results + + # Handle expected format: {"files": {"path": {...}}} + files_data = data.get("files", data) + if not isinstance(files_data, dict): + logger.warning("Unexpected response format: expected dict") + return results + + for file_path, metadata in files_data.items(): + if not isinstance(metadata, dict): + continue + + try: + results[file_path] = SemanticMetadata( + summary=metadata.get("summary", ""), + keywords=metadata.get("keywords", []), + purpose=metadata.get("purpose", ""), + file_path=file_path, + llm_tool=tool, + ) + except Exception as e: + logger.debug("Failed to parse metadata for %s: %s", file_path, e) + continue + + return results + + def _extract_json(self, text: str) -> Optional[str]: + """Extract JSON object from text that may contain markdown or other content.""" + # Try to find JSON object boundaries + text = text.strip() + + # Remove markdown code blocks if present + if text.startswith("```"): + lines = text.split("\n") + # Remove first line (```json or ```) + lines = lines[1:] + # Find closing ``` + for i, line in enumerate(lines): + if line.strip() == "```": + lines = lines[:i] + break + text = "\n".join(lines) + + # Find JSON object + start = text.find("{") + if start == -1: + return None + + # Find matching closing brace + depth = 0 + end = start + for i, char in enumerate(text[start:], start): + if char == "{": + depth += 1 + elif char == "}": + depth -= 1 + if depth == 0: + end = i + 1 + break + + if depth != 0: + return None + + return text[start:end] + + +def create_enhancer( + tool: str = "gemini", + timeout_ms: int = 300000, + batch_size: int = 5, + enabled: bool = True, +) -> LLMEnhancer: + """Factory function to create LLM enhancer with custom config.""" + config = LLMConfig( + tool=tool, + timeout_ms=timeout_ms, + batch_size=batch_size, + enabled=enabled, + ) + return LLMEnhancer(config) + + +class EnhancedSemanticIndexer: + """Integrates LLM enhancement with fastembed vector search. + + Flow: + 1. Code files → LLM generates summaries/keywords + 2. Summaries → fastembed generates embeddings + 3. Embeddings → VectorStore for similarity search + + This produces better semantic search because: + - LLM summaries are natural language descriptions + - Natural language queries match summaries better than raw code + - Keywords expand search coverage + """ + + def __init__( + self, + enhancer: LLMEnhancer, + embedder: "Embedder", + vector_store: "VectorStore", + ) -> None: + """Initialize enhanced semantic indexer. + + Args: + enhancer: LLM enhancer for generating summaries + embedder: Fastembed embedder for vector generation + vector_store: Vector storage for similarity search + """ + self.enhancer = enhancer + self.embedder = embedder + self.vector_store = vector_store + + def index_files( + self, + files: List[FileData], + working_dir: Optional[Path] = None, + ) -> int: + """Index files with LLM-enhanced semantic search. + + Args: + files: List of file data to index + working_dir: Optional working directory for LLM calls + + Returns: + Number of files successfully indexed + """ + if not files: + return 0 + + # Step 1: Generate LLM summaries + logger.info("Generating LLM summaries for %d files...", len(files)) + metadata_map = self.enhancer.enhance_files(files, working_dir) + + if not metadata_map: + logger.warning("No LLM metadata generated, falling back to raw code") + return self._index_raw_code(files) + + # Step 2: Create semantic chunks from LLM summaries + chunks_to_embed: List[SemanticChunk] = [] + file_paths: List[str] = [] + + for file_data in files: + metadata = metadata_map.get(file_data.path) + if metadata: + # Use LLM-generated summary + keywords for embedding + embeddable_text = self._create_embeddable_text(metadata, file_data) + chunk = SemanticChunk( + content=embeddable_text, + embedding=None, + metadata={ + "file": file_data.path, + "language": file_data.language, + "summary": metadata.summary, + "keywords": metadata.keywords, + "purpose": metadata.purpose, + "llm_tool": metadata.llm_tool, + "strategy": "llm_enhanced", + }, + ) + else: + # Fallback: use truncated raw code + chunk = SemanticChunk( + content=file_data.content[:2000], + embedding=None, + metadata={ + "file": file_data.path, + "language": file_data.language, + "strategy": "raw_code", + }, + ) + + chunks_to_embed.append(chunk) + file_paths.append(file_data.path) + + # Step 3: Generate embeddings + logger.info("Generating embeddings for %d chunks...", len(chunks_to_embed)) + texts = [chunk.content for chunk in chunks_to_embed] + embeddings = self.embedder.embed(texts) + + # Step 4: Store in vector store + indexed_count = 0 + for chunk, embedding, file_path in zip(chunks_to_embed, embeddings, file_paths): + chunk.embedding = embedding + try: + self.vector_store.add_chunk(chunk, file_path) + indexed_count += 1 + except Exception as e: + logger.debug("Failed to store chunk for %s: %s", file_path, e) + + logger.info("Successfully indexed %d/%d files", indexed_count, len(files)) + return indexed_count + + def _create_embeddable_text( + self, + metadata: SemanticMetadata, + file_data: FileData, + ) -> str: + """Create text optimized for embedding from LLM metadata. + + Combines summary, keywords, and purpose into a single string + that will produce good semantic matches for natural language queries. + """ + parts = [] + + # Summary is the primary content + if metadata.summary: + parts.append(metadata.summary) + + # Purpose adds categorical context + if metadata.purpose: + parts.append(f"Category: {metadata.purpose}") + + # Keywords expand search coverage + if metadata.keywords: + parts.append(f"Keywords: {', '.join(metadata.keywords)}") + + # Add file name for context + parts.append(f"File: {Path(file_data.path).name}") + + return "\n".join(parts) + + def _index_raw_code(self, files: List[FileData]) -> int: + """Fallback: index raw code without LLM enhancement.""" + indexed_count = 0 + + for file_data in files: + # Truncate to reasonable size + content = file_data.content[:2000] + + chunk = SemanticChunk( + content=content, + embedding=None, + metadata={ + "file": file_data.path, + "language": file_data.language, + "strategy": "raw_code", + }, + ) + + try: + embedding = self.embedder.embed_single(content) + chunk.embedding = embedding + self.vector_store.add_chunk(chunk, file_data.path) + indexed_count += 1 + except Exception as e: + logger.debug("Failed to index %s: %s", file_data.path, e) + + return indexed_count + + +def create_enhanced_indexer( + vector_store_path: Path, + llm_tool: str = "gemini", + llm_enabled: bool = True, +) -> EnhancedSemanticIndexer: + """Factory function to create an enhanced semantic indexer. + + Args: + vector_store_path: Path for the vector store database + llm_tool: LLM tool to use (gemini, qwen) + llm_enabled: Whether to enable LLM enhancement + + Returns: + Configured EnhancedSemanticIndexer instance + """ + from .embedder import Embedder + from .vector_store import VectorStore + + enhancer = create_enhancer(tool=llm_tool, enabled=llm_enabled) + embedder = Embedder() + vector_store = VectorStore(vector_store_path) + + return EnhancedSemanticIndexer(enhancer, embedder, vector_store) diff --git a/codex-lens/src/codexlens/storage/dir_index.py b/codex-lens/src/codexlens/storage/dir_index.py index 6f891585..1eeed440 100644 --- a/codex-lens/src/codexlens/storage/dir_index.py +++ b/codex-lens/src/codexlens/storage/dir_index.py @@ -347,6 +347,222 @@ class DirIndexStore: row = conn.execute("SELECT COUNT(*) AS c FROM files").fetchone() return int(row["c"]) if row else 0 + # === Semantic Metadata === + + def add_semantic_metadata( + self, + file_id: int, + summary: str, + keywords: List[str], + purpose: str, + llm_tool: str + ) -> None: + """Add or update semantic metadata for a file. + + Args: + file_id: File ID from files table + summary: LLM-generated summary + keywords: List of keywords + purpose: Purpose/role of the file + llm_tool: Tool used to generate metadata (gemini/qwen) + """ + with self._lock: + conn = self._get_connection() + + import json + import time + + keywords_json = json.dumps(keywords) + generated_at = time.time() + + conn.execute( + """ + INSERT INTO semantic_metadata(file_id, summary, keywords, purpose, llm_tool, generated_at) + VALUES(?, ?, ?, ?, ?, ?) + ON CONFLICT(file_id) DO UPDATE SET + summary=excluded.summary, + keywords=excluded.keywords, + purpose=excluded.purpose, + llm_tool=excluded.llm_tool, + generated_at=excluded.generated_at + """, + (file_id, summary, keywords_json, purpose, llm_tool, generated_at), + ) + conn.commit() + + def get_semantic_metadata(self, file_id: int) -> Optional[Dict[str, Any]]: + """Get semantic metadata for a file. + + Args: + file_id: File ID from files table + + Returns: + Dict with summary, keywords, purpose, llm_tool, generated_at, or None if not found + """ + with self._lock: + conn = self._get_connection() + + row = conn.execute( + """ + SELECT summary, keywords, purpose, llm_tool, generated_at + FROM semantic_metadata WHERE file_id=? + """, + (file_id,), + ).fetchone() + + if not row: + return None + + import json + + return { + "summary": row["summary"], + "keywords": json.loads(row["keywords"]) if row["keywords"] else [], + "purpose": row["purpose"], + "llm_tool": row["llm_tool"], + "generated_at": float(row["generated_at"]) if row["generated_at"] else 0.0, + } + + def get_files_without_semantic(self) -> List[FileEntry]: + """Get all files that don't have semantic metadata. + + Returns: + List of FileEntry objects without semantic metadata + """ + with self._lock: + conn = self._get_connection() + + rows = conn.execute( + """ + SELECT f.id, f.name, f.full_path, f.language, f.mtime, f.line_count + FROM files f + LEFT JOIN semantic_metadata sm ON f.id = sm.file_id + WHERE sm.id IS NULL + ORDER BY f.name + """ + ).fetchall() + + return [ + FileEntry( + id=int(row["id"]), + name=row["name"], + full_path=Path(row["full_path"]), + language=row["language"], + mtime=float(row["mtime"]) if row["mtime"] else 0.0, + line_count=int(row["line_count"]) if row["line_count"] else 0, + ) + for row in rows + ] + + def search_semantic_keywords(self, keyword: str) -> List[Tuple[FileEntry, List[str]]]: + """Search files by semantic keywords. + + Args: + keyword: Keyword to search for (case-insensitive) + + Returns: + List of (FileEntry, keywords) tuples where keyword matches + """ + with self._lock: + conn = self._get_connection() + + keyword_pattern = f"%{keyword}%" + + rows = conn.execute( + """ + SELECT f.id, f.name, f.full_path, f.language, f.mtime, f.line_count, sm.keywords + FROM files f + JOIN semantic_metadata sm ON f.id = sm.file_id + WHERE sm.keywords LIKE ? COLLATE NOCASE + ORDER BY f.name + """, + (keyword_pattern,), + ).fetchall() + + import json + + results = [] + for row in rows: + file_entry = FileEntry( + id=int(row["id"]), + name=row["name"], + full_path=Path(row["full_path"]), + language=row["language"], + mtime=float(row["mtime"]) if row["mtime"] else 0.0, + line_count=int(row["line_count"]) if row["line_count"] else 0, + ) + keywords = json.loads(row["keywords"]) if row["keywords"] else [] + results.append((file_entry, keywords)) + + return results + + def list_semantic_metadata( + self, + offset: int = 0, + limit: int = 50, + llm_tool: Optional[str] = None, + ) -> Tuple[List[Dict[str, Any]], int]: + """List all semantic metadata with file information. + + Args: + offset: Number of records to skip (for pagination) + limit: Maximum records to return (max 100) + llm_tool: Optional filter by LLM tool used + + Returns: + Tuple of (list of metadata dicts, total count) + """ + import json + + with self._lock: + conn = self._get_connection() + + base_query = """ + SELECT f.id as file_id, f.name as file_name, f.full_path, + f.language, f.line_count, + sm.summary, sm.keywords, sm.purpose, + sm.llm_tool, sm.generated_at + FROM files f + JOIN semantic_metadata sm ON f.id = sm.file_id + """ + count_query = """ + SELECT COUNT(*) as total + FROM files f + JOIN semantic_metadata sm ON f.id = sm.file_id + """ + + params: List[Any] = [] + if llm_tool: + base_query += " WHERE sm.llm_tool = ?" + count_query += " WHERE sm.llm_tool = ?" + params.append(llm_tool) + + base_query += " ORDER BY sm.generated_at DESC LIMIT ? OFFSET ?" + params.extend([min(limit, 100), offset]) + + count_params = [llm_tool] if llm_tool else [] + total_row = conn.execute(count_query, count_params).fetchone() + total = int(total_row["total"]) if total_row else 0 + + rows = conn.execute(base_query, params).fetchall() + + results = [] + for row in rows: + results.append({ + "file_id": int(row["file_id"]), + "file_name": row["file_name"], + "full_path": row["full_path"], + "language": row["language"], + "line_count": int(row["line_count"]) if row["line_count"] else 0, + "summary": row["summary"], + "keywords": json.loads(row["keywords"]) if row["keywords"] else [], + "purpose": row["purpose"], + "llm_tool": row["llm_tool"], + "generated_at": float(row["generated_at"]) if row["generated_at"] else 0.0, + }) + + return results, total + # === Subdirectory Links === def register_subdir( @@ -748,12 +964,28 @@ class DirIndexStore: """ ) + # Semantic metadata table + conn.execute( + """ + CREATE TABLE IF NOT EXISTS semantic_metadata ( + id INTEGER PRIMARY KEY, + file_id INTEGER UNIQUE REFERENCES files(id) ON DELETE CASCADE, + summary TEXT, + keywords TEXT, + purpose TEXT, + llm_tool TEXT, + generated_at REAL + ) + """ + ) + # Indexes conn.execute("CREATE INDEX IF NOT EXISTS idx_files_name ON files(name)") conn.execute("CREATE INDEX IF NOT EXISTS idx_files_path ON files(full_path)") conn.execute("CREATE INDEX IF NOT EXISTS idx_subdirs_name ON subdirs(name)") conn.execute("CREATE INDEX IF NOT EXISTS idx_symbols_name ON symbols(name)") conn.execute("CREATE INDEX IF NOT EXISTS idx_symbols_file ON symbols(file_id)") + conn.execute("CREATE INDEX IF NOT EXISTS idx_semantic_file ON semantic_metadata(file_id)") except sqlite3.DatabaseError as exc: raise StorageError(f"Failed to create schema: {exc}") from exc diff --git a/codex-lens/tests/test_llm_enhancer.py b/codex-lens/tests/test_llm_enhancer.py new file mode 100644 index 00000000..e838c57c --- /dev/null +++ b/codex-lens/tests/test_llm_enhancer.py @@ -0,0 +1,831 @@ +"""Tests for LLM-based semantic enhancement functionality. + +Tests cover: +- LLMConfig and data classes +- LLMEnhancer initialization and configuration +- Prompt building and JSON parsing +- Batch processing logic +- CCW CLI invocation (mocked) +- EnhancedSemanticIndexer integration +- Error handling and fallback behavior +""" + +import json +import tempfile +from pathlib import Path +from typing import Dict, Any +from unittest.mock import MagicMock, patch, PropertyMock + +import pytest + +from codexlens.entities import SemanticChunk, Symbol +from codexlens.semantic.llm_enhancer import ( + SemanticMetadata, + FileData, + LLMConfig, + LLMEnhancer, + EnhancedSemanticIndexer, + create_enhancer, + create_enhanced_indexer, +) + + +# === Data Class Tests === + +class TestSemanticMetadata: + """Tests for SemanticMetadata dataclass.""" + + def test_basic_creation(self): + """Test creating SemanticMetadata with required fields.""" + metadata = SemanticMetadata( + summary="Authentication handler", + keywords=["auth", "login", "jwt"], + purpose="auth", + ) + assert metadata.summary == "Authentication handler" + assert metadata.keywords == ["auth", "login", "jwt"] + assert metadata.purpose == "auth" + assert metadata.file_path is None + assert metadata.symbol_name is None + assert metadata.llm_tool is None + + def test_full_creation(self): + """Test creating SemanticMetadata with all fields.""" + metadata = SemanticMetadata( + summary="User login function", + keywords=["login", "user"], + purpose="auth", + file_path="/test/auth.py", + symbol_name="login", + llm_tool="gemini", + ) + assert metadata.file_path == "/test/auth.py" + assert metadata.symbol_name == "login" + assert metadata.llm_tool == "gemini" + + def test_empty_keywords(self): + """Test creating SemanticMetadata with empty keywords.""" + metadata = SemanticMetadata( + summary="Empty", + keywords=[], + purpose="", + ) + assert metadata.keywords == [] + + +class TestFileData: + """Tests for FileData dataclass.""" + + def test_basic_creation(self): + """Test creating FileData with required fields.""" + data = FileData( + path="/test/file.py", + content="def hello(): pass", + language="python", + ) + assert data.path == "/test/file.py" + assert data.content == "def hello(): pass" + assert data.language == "python" + assert data.symbols == [] + + def test_with_symbols(self): + """Test creating FileData with symbols.""" + symbols = [ + Symbol(name="hello", kind="function", range=(1, 1)), + Symbol(name="MyClass", kind="class", range=(3, 10)), + ] + data = FileData( + path="/test/file.py", + content="code", + language="python", + symbols=symbols, + ) + assert len(data.symbols) == 2 + assert data.symbols[0].name == "hello" + + +class TestLLMConfig: + """Tests for LLMConfig dataclass.""" + + def test_default_values(self): + """Test default configuration values.""" + config = LLMConfig() + assert config.tool == "gemini" + assert config.fallback_tool == "qwen" + assert config.timeout_ms == 300000 + assert config.batch_size == 5 + assert config.max_content_chars == 8000 + assert config.enabled is True + + def test_custom_values(self): + """Test custom configuration values.""" + config = LLMConfig( + tool="qwen", + fallback_tool="gemini", + timeout_ms=600000, + batch_size=10, + max_content_chars=4000, + enabled=False, + ) + assert config.tool == "qwen" + assert config.fallback_tool == "gemini" + assert config.timeout_ms == 600000 + assert config.batch_size == 10 + assert config.max_content_chars == 4000 + assert config.enabled is False + + @patch.dict("os.environ", {"CCW_CLI_SECONDARY_TOOL": "codex", "CCW_CLI_FALLBACK_TOOL": "gemini"}) + def test_env_override(self): + """Test environment variable override.""" + config = LLMConfig() + assert config.tool == "codex" + assert config.fallback_tool == "gemini" + + +# === LLMEnhancer Tests === + +class TestLLMEnhancerInit: + """Tests for LLMEnhancer initialization.""" + + def test_default_init(self): + """Test default initialization.""" + enhancer = LLMEnhancer() + assert enhancer.config is not None + assert enhancer.config.tool == "gemini" + assert enhancer._ccw_available is None + + def test_custom_config(self): + """Test initialization with custom config.""" + config = LLMConfig(tool="qwen", batch_size=3) + enhancer = LLMEnhancer(config) + assert enhancer.config.tool == "qwen" + assert enhancer.config.batch_size == 3 + + +class TestLLMEnhancerAvailability: + """Tests for CCW CLI availability check.""" + + @patch("shutil.which") + def test_ccw_available(self, mock_which): + """Test CCW available returns True.""" + mock_which.return_value = "/usr/bin/ccw" + enhancer = LLMEnhancer() + + result = enhancer.check_available() + + assert result is True + assert enhancer._ccw_available is True + mock_which.assert_called_with("ccw") + + @patch("shutil.which") + def test_ccw_not_available(self, mock_which): + """Test CCW not available returns False.""" + mock_which.return_value = None + enhancer = LLMEnhancer() + + result = enhancer.check_available() + + assert result is False + assert enhancer._ccw_available is False + + @patch("shutil.which") + def test_ccw_availability_cached(self, mock_which): + """Test availability result is cached.""" + mock_which.return_value = "/usr/bin/ccw" + enhancer = LLMEnhancer() + + # First call + enhancer.check_available() + # Second call + enhancer.check_available() + + # which should only be called once + mock_which.assert_called_once() + + +class TestPromptBuilding: + """Tests for prompt building.""" + + def test_build_single_file_prompt(self): + """Test prompt building with single file.""" + enhancer = LLMEnhancer() + files = [ + FileData( + path="/test/auth.py", + content="def login(): pass", + language="python", + ) + ] + + prompt = enhancer._build_batch_prompt(files) + + assert "[FILE: /test/auth.py]" in prompt + assert "```python" in prompt + assert "def login(): pass" in prompt + assert "PURPOSE:" in prompt + assert "JSON format output" in prompt + + def test_build_multiple_files_prompt(self): + """Test prompt building with multiple files.""" + enhancer = LLMEnhancer() + files = [ + FileData(path="/test/a.py", content="def a(): pass", language="python"), + FileData(path="/test/b.js", content="function b() {}", language="javascript"), + ] + + prompt = enhancer._build_batch_prompt(files) + + assert "[FILE: /test/a.py]" in prompt + assert "[FILE: /test/b.js]" in prompt + assert "```python" in prompt + assert "```javascript" in prompt + + def test_build_prompt_truncates_long_content(self): + """Test prompt truncates long content.""" + config = LLMConfig(max_content_chars=100) + enhancer = LLMEnhancer(config) + + long_content = "x" * 200 + files = [FileData(path="/test/long.py", content=long_content, language="python")] + + prompt = enhancer._build_batch_prompt(files) + + assert "... [truncated]" in prompt + assert "x" * 200 not in prompt + + +class TestJSONParsing: + """Tests for JSON response parsing.""" + + def test_parse_valid_response(self): + """Test parsing valid JSON response.""" + enhancer = LLMEnhancer() + response = json.dumps({ + "files": { + "/test/auth.py": { + "summary": "Authentication handler", + "keywords": ["auth", "login"], + "purpose": "auth", + } + } + }) + + result = enhancer._parse_response(response, "gemini") + + assert "/test/auth.py" in result + assert result["/test/auth.py"].summary == "Authentication handler" + assert result["/test/auth.py"].keywords == ["auth", "login"] + assert result["/test/auth.py"].purpose == "auth" + assert result["/test/auth.py"].llm_tool == "gemini" + + def test_parse_response_with_markdown(self): + """Test parsing response wrapped in markdown.""" + enhancer = LLMEnhancer() + response = '''```json +{ + "files": { + "/test/file.py": { + "summary": "Test file", + "keywords": ["test"], + "purpose": "test" + } + } +} +```''' + + result = enhancer._parse_response(response, "qwen") + + assert "/test/file.py" in result + assert result["/test/file.py"].summary == "Test file" + + def test_parse_response_multiple_files(self): + """Test parsing response with multiple files.""" + enhancer = LLMEnhancer() + response = json.dumps({ + "files": { + "/test/a.py": {"summary": "File A", "keywords": ["a"], "purpose": "util"}, + "/test/b.py": {"summary": "File B", "keywords": ["b"], "purpose": "api"}, + } + }) + + result = enhancer._parse_response(response, "gemini") + + assert len(result) == 2 + assert result["/test/a.py"].summary == "File A" + assert result["/test/b.py"].summary == "File B" + + def test_parse_invalid_json(self): + """Test parsing invalid JSON returns empty dict.""" + enhancer = LLMEnhancer() + response = "not valid json at all" + + result = enhancer._parse_response(response, "gemini") + + assert result == {} + + def test_parse_empty_response(self): + """Test parsing empty response returns empty dict.""" + enhancer = LLMEnhancer() + + result = enhancer._parse_response("", "gemini") + + assert result == {} + + +class TestJSONExtraction: + """Tests for JSON extraction from mixed text.""" + + def test_extract_json_from_plain(self): + """Test extracting JSON from plain text.""" + enhancer = LLMEnhancer() + text = '{"key": "value"}' + + result = enhancer._extract_json(text) + + assert result == '{"key": "value"}' + + def test_extract_json_from_markdown(self): + """Test extracting JSON from markdown code block.""" + enhancer = LLMEnhancer() + text = '''```json +{"key": "value"} +```''' + + result = enhancer._extract_json(text) + + assert result == '{"key": "value"}' + + def test_extract_json_with_surrounding_text(self): + """Test extracting JSON with surrounding text.""" + enhancer = LLMEnhancer() + text = 'Here is the result: {"key": "value"} That is all.' + + result = enhancer._extract_json(text) + + assert result == '{"key": "value"}' + + def test_extract_nested_json(self): + """Test extracting nested JSON.""" + enhancer = LLMEnhancer() + text = '{"outer": {"inner": "value"}}' + + result = enhancer._extract_json(text) + + assert '"outer"' in result + assert '"inner"' in result + + def test_extract_no_json(self): + """Test extracting from text without JSON.""" + enhancer = LLMEnhancer() + text = "No JSON here at all" + + result = enhancer._extract_json(text) + + assert result is None + + def test_extract_malformed_json(self): + """Test extracting malformed JSON returns None.""" + enhancer = LLMEnhancer() + text = '{"key": "value"' # Missing closing brace + + result = enhancer._extract_json(text) + + assert result is None + + +class TestEnhanceFiles: + """Tests for enhance_files method.""" + + @patch.object(LLMEnhancer, "check_available", return_value=False) + def test_enhance_files_ccw_not_available(self, mock_check): + """Test enhance_files returns empty when CCW not available.""" + enhancer = LLMEnhancer() + files = [FileData(path="/test/a.py", content="code", language="python")] + + result = enhancer.enhance_files(files) + + assert result == {} + + def test_enhance_files_disabled(self): + """Test enhance_files returns empty when disabled.""" + config = LLMConfig(enabled=False) + enhancer = LLMEnhancer(config) + files = [FileData(path="/test/a.py", content="code", language="python")] + + result = enhancer.enhance_files(files) + + assert result == {} + + @patch.object(LLMEnhancer, "check_available", return_value=True) + def test_enhance_files_empty_list(self, mock_check): + """Test enhance_files with empty list returns empty dict.""" + enhancer = LLMEnhancer() + + result = enhancer.enhance_files([]) + + assert result == {} + + @patch.object(LLMEnhancer, "check_available", return_value=True) + @patch.object(LLMEnhancer, "_invoke_ccw_cli") + def test_enhance_files_success(self, mock_invoke, mock_check): + """Test enhance_files successful processing.""" + mock_invoke.return_value = { + "success": True, + "stdout": json.dumps({ + "files": { + "/test/auth.py": { + "summary": "Auth module", + "keywords": ["auth"], + "purpose": "auth", + } + } + }), + "stderr": "", + "exit_code": 0, + } + + enhancer = LLMEnhancer() + files = [FileData(path="/test/auth.py", content="def login(): pass", language="python")] + + result = enhancer.enhance_files(files) + + assert "/test/auth.py" in result + assert result["/test/auth.py"].summary == "Auth module" + + @patch.object(LLMEnhancer, "check_available", return_value=True) + @patch.object(LLMEnhancer, "_invoke_ccw_cli") + def test_enhance_files_fallback(self, mock_invoke, mock_check): + """Test enhance_files falls back to secondary tool.""" + # First call fails, second succeeds + mock_invoke.side_effect = [ + {"success": False, "stdout": "", "stderr": "error", "exit_code": 1}, + { + "success": True, + "stdout": json.dumps({ + "files": { + "/test/file.py": { + "summary": "Fallback result", + "keywords": ["fallback"], + "purpose": "util", + } + } + }), + "stderr": "", + "exit_code": 0, + }, + ] + + enhancer = LLMEnhancer() + files = [FileData(path="/test/file.py", content="code", language="python")] + + result = enhancer.enhance_files(files) + + assert "/test/file.py" in result + assert result["/test/file.py"].summary == "Fallback result" + assert mock_invoke.call_count == 2 + + +class TestEnhanceFile: + """Tests for enhance_file single file method.""" + + @patch.object(LLMEnhancer, "enhance_files") + def test_enhance_file_success(self, mock_enhance_files): + """Test enhance_file returns metadata on success.""" + mock_enhance_files.return_value = { + "/test/auth.py": SemanticMetadata( + summary="Auth module", + keywords=["auth", "login"], + purpose="auth", + file_path="/test/auth.py", + llm_tool="gemini", + ) + } + + enhancer = LLMEnhancer() + result = enhancer.enhance_file("/test/auth.py", "def login(): pass", "python") + + assert result.summary == "Auth module" + assert result.keywords == ["auth", "login"] + + @patch.object(LLMEnhancer, "enhance_files") + def test_enhance_file_fallback_on_failure(self, mock_enhance_files): + """Test enhance_file returns default metadata on failure.""" + mock_enhance_files.return_value = {} # Enhancement failed + + enhancer = LLMEnhancer() + result = enhancer.enhance_file("/test/file.py", "code", "python") + + assert "python" in result.summary.lower() + assert "python" in result.keywords + assert result.purpose == "unknown" + + +class TestBatchProcessing: + """Tests for batch processing.""" + + @patch.object(LLMEnhancer, "check_available", return_value=True) + @patch.object(LLMEnhancer, "_process_batch") + def test_batch_processing(self, mock_process, mock_check): + """Test files are processed in batches.""" + mock_process.return_value = {} + + config = LLMConfig(batch_size=2) + enhancer = LLMEnhancer(config) + + files = [ + FileData(path=f"/test/file{i}.py", content="code", language="python") + for i in range(5) + ] + + enhancer.enhance_files(files) + + # 5 files with batch_size=2 should result in 3 batches + assert mock_process.call_count == 3 + + @patch.object(LLMEnhancer, "check_available", return_value=True) + @patch.object(LLMEnhancer, "_process_batch") + def test_batch_continues_on_error(self, mock_process, mock_check): + """Test batch processing continues on error.""" + # First batch fails, second succeeds + mock_process.side_effect = [ + Exception("Batch 1 failed"), + {"/test/file2.py": SemanticMetadata(summary="OK", keywords=[], purpose="")}, + ] + + config = LLMConfig(batch_size=1) + enhancer = LLMEnhancer(config) + + files = [ + FileData(path="/test/file1.py", content="code", language="python"), + FileData(path="/test/file2.py", content="code", language="python"), + ] + + result = enhancer.enhance_files(files) + + # Should still get results from second batch + assert "/test/file2.py" in result + + +# === CCW CLI Invocation Tests === + +class TestCCWInvocation: + """Tests for CCW CLI invocation.""" + + @patch("subprocess.run") + @patch("shutil.which", return_value="/usr/bin/ccw") + def test_invoke_success(self, mock_which, mock_run): + """Test successful CCW CLI invocation.""" + mock_run.return_value = MagicMock( + returncode=0, + stdout='{"files": {}}', + stderr="", + ) + + enhancer = LLMEnhancer() + result = enhancer._invoke_ccw_cli("test prompt", tool="gemini") + + assert result["success"] is True + assert result["exit_code"] == 0 + + @patch("subprocess.run") + @patch("shutil.which", return_value="/usr/bin/ccw") + def test_invoke_failure(self, mock_which, mock_run): + """Test failed CCW CLI invocation.""" + mock_run.return_value = MagicMock( + returncode=1, + stdout="", + stderr="Error occurred", + ) + + enhancer = LLMEnhancer() + result = enhancer._invoke_ccw_cli("test prompt", tool="gemini") + + assert result["success"] is False + assert result["exit_code"] == 1 + + @patch("subprocess.run") + @patch("shutil.which", return_value="/usr/bin/ccw") + def test_invoke_timeout(self, mock_which, mock_run): + """Test CCW CLI timeout handling.""" + import subprocess + mock_run.side_effect = subprocess.TimeoutExpired(cmd="ccw", timeout=300) + + enhancer = LLMEnhancer() + result = enhancer._invoke_ccw_cli("test prompt", tool="gemini") + + assert result["success"] is False + assert "timeout" in result["stderr"] + + @patch("subprocess.run") + @patch("shutil.which", return_value=None) + def test_invoke_ccw_not_found(self, mock_which, mock_run): + """Test CCW CLI not found handling.""" + mock_run.side_effect = FileNotFoundError() + + enhancer = LLMEnhancer() + result = enhancer._invoke_ccw_cli("test prompt", tool="gemini") + + assert result["success"] is False + assert "not found" in result["stderr"] + + +# === EnhancedSemanticIndexer Tests === + +class TestEnhancedSemanticIndexer: + """Tests for EnhancedSemanticIndexer integration.""" + + @pytest.fixture + def mock_enhancer(self): + """Create mock LLM enhancer.""" + enhancer = MagicMock(spec=LLMEnhancer) + enhancer.enhance_files.return_value = { + "/test/auth.py": SemanticMetadata( + summary="Authentication handler", + keywords=["auth", "login", "jwt"], + purpose="auth", + file_path="/test/auth.py", + llm_tool="gemini", + ) + } + return enhancer + + @pytest.fixture + def mock_embedder(self): + """Create mock embedder.""" + embedder = MagicMock() + embedder.embed.return_value = [[0.1] * 384] + embedder.embed_single.return_value = [0.1] * 384 + return embedder + + @pytest.fixture + def mock_vector_store(self): + """Create mock vector store.""" + store = MagicMock() + store.add_chunk.return_value = 1 + return store + + def test_index_files_empty_list(self, mock_enhancer, mock_embedder, mock_vector_store): + """Test indexing empty file list.""" + indexer = EnhancedSemanticIndexer(mock_enhancer, mock_embedder, mock_vector_store) + + result = indexer.index_files([]) + + assert result == 0 + mock_enhancer.enhance_files.assert_not_called() + + def test_index_files_with_llm_enhancement(self, mock_enhancer, mock_embedder, mock_vector_store): + """Test indexing with LLM enhancement.""" + indexer = EnhancedSemanticIndexer(mock_enhancer, mock_embedder, mock_vector_store) + files = [FileData(path="/test/auth.py", content="def login(): pass", language="python")] + + result = indexer.index_files(files) + + assert result == 1 + mock_enhancer.enhance_files.assert_called_once() + mock_embedder.embed.assert_called_once() + mock_vector_store.add_chunk.assert_called_once() + + def test_index_files_fallback_to_raw_code(self, mock_embedder, mock_vector_store): + """Test indexing falls back to raw code when LLM fails.""" + mock_enhancer = MagicMock(spec=LLMEnhancer) + mock_enhancer.enhance_files.return_value = {} # No enhancement + + indexer = EnhancedSemanticIndexer(mock_enhancer, mock_embedder, mock_vector_store) + files = [FileData(path="/test/file.py", content="code", language="python")] + + result = indexer.index_files(files) + + assert result == 1 + mock_embedder.embed_single.assert_called() + + def test_create_embeddable_text(self, mock_enhancer, mock_embedder, mock_vector_store): + """Test embeddable text creation.""" + indexer = EnhancedSemanticIndexer(mock_enhancer, mock_embedder, mock_vector_store) + + metadata = SemanticMetadata( + summary="Handles user authentication", + keywords=["auth", "login", "user"], + purpose="auth", + ) + file_data = FileData(path="/test/auth.py", content="code", language="python") + + text = indexer._create_embeddable_text(metadata, file_data) + + assert "Handles user authentication" in text + assert "auth" in text.lower() + assert "Keywords:" in text + assert "auth.py" in text + + +# === Factory Function Tests === + +class TestFactoryFunctions: + """Tests for factory functions.""" + + def test_create_enhancer_default(self): + """Test create_enhancer with defaults.""" + enhancer = create_enhancer() + + assert enhancer.config.tool == "gemini" + assert enhancer.config.enabled is True + + def test_create_enhancer_custom(self): + """Test create_enhancer with custom params.""" + enhancer = create_enhancer( + tool="qwen", + timeout_ms=600000, + batch_size=10, + enabled=False, + ) + + assert enhancer.config.tool == "qwen" + assert enhancer.config.timeout_ms == 600000 + assert enhancer.config.batch_size == 10 + assert enhancer.config.enabled is False + + @pytest.mark.skipif( + not pytest.importorskip("codexlens.semantic", reason="semantic not available"), + reason="Semantic dependencies not installed" + ) + def test_create_enhanced_indexer(self, tmp_path): + """Test create_enhanced_indexer factory.""" + try: + from codexlens.semantic import SEMANTIC_AVAILABLE + if not SEMANTIC_AVAILABLE: + pytest.skip("Semantic dependencies not installed") + + db_path = tmp_path / "semantic.db" + indexer = create_enhanced_indexer(db_path, llm_tool="gemini", llm_enabled=False) + + assert indexer.enhancer is not None + assert indexer.embedder is not None + assert indexer.vector_store is not None + except ImportError: + pytest.skip("Semantic dependencies not installed") + + +# === Edge Cases === + +class TestEdgeCases: + """Tests for edge cases.""" + + def test_semantic_metadata_with_special_chars(self): + """Test metadata with special characters.""" + metadata = SemanticMetadata( + summary='Test "quoted" and \'single\' quotes', + keywords=["special", "chars", "test's"], + purpose="test", + ) + assert '"quoted"' in metadata.summary + assert "test's" in metadata.keywords + + def test_file_data_with_unicode(self): + """Test FileData with unicode content.""" + data = FileData( + path="/test/中文.py", + content="def 你好(): return '世界'", + language="python", + ) + assert "中文" in data.path + assert "你好" in data.content + + @patch.object(LLMEnhancer, "check_available", return_value=True) + @patch.object(LLMEnhancer, "_invoke_ccw_cli") + def test_enhance_with_very_long_content(self, mock_invoke, mock_check): + """Test enhancement with very long content.""" + mock_invoke.return_value = { + "success": True, + "stdout": json.dumps({"files": {}}), + "stderr": "", + "exit_code": 0, + } + + config = LLMConfig(max_content_chars=100) + enhancer = LLMEnhancer(config) + + long_content = "x" * 10000 + files = [FileData(path="/test/long.py", content=long_content, language="python")] + + enhancer.enhance_files(files) + + # Should not crash, content should be truncated in prompt + mock_invoke.assert_called_once() + + def test_parse_response_with_missing_fields(self): + """Test parsing response with missing fields.""" + enhancer = LLMEnhancer() + response = json.dumps({ + "files": { + "/test/file.py": { + "summary": "Only summary provided", + # keywords and purpose missing + } + } + }) + + result = enhancer._parse_response(response, "gemini") + + assert "/test/file.py" in result + assert result["/test/file.py"].summary == "Only summary provided" + assert result["/test/file.py"].keywords == [] + assert result["/test/file.py"].purpose == "" diff --git a/codex-lens/tests/test_search_full_coverage.py b/codex-lens/tests/test_search_full_coverage.py new file mode 100644 index 00000000..6e3e8143 --- /dev/null +++ b/codex-lens/tests/test_search_full_coverage.py @@ -0,0 +1,1190 @@ +"""Full coverage tests for CodexLens search functionality. + +Comprehensive test suite covering: +- Chain search engine internals +- Multi-directory hierarchical search +- Result merging and deduplication +- Context manager behavior +- Semantic search integration +- Edge cases and error recovery +- Parallel search stress tests +- Boundary conditions +""" + +import tempfile +import pytest +import threading +import time +from pathlib import Path +from unittest.mock import MagicMock, patch, PropertyMock +from concurrent.futures import ThreadPoolExecutor + +from codexlens.storage.sqlite_store import SQLiteStore +from codexlens.storage.dir_index import DirIndexStore +from codexlens.storage.registry import RegistryStore +from codexlens.storage.path_mapper import PathMapper +from codexlens.search import ( + ChainSearchEngine, + SearchOptions, + SearchStats, + ChainSearchResult, + quick_search, +) +from codexlens.entities import IndexedFile, Symbol, SearchResult + + +# === Fixtures === + +@pytest.fixture +def temp_dir(): + """Create a temporary directory.""" + with tempfile.TemporaryDirectory() as tmpdir: + yield Path(tmpdir) + + +@pytest.fixture +def mock_registry(): + """Create a mock registry.""" + registry = MagicMock(spec=RegistryStore) + registry.find_nearest_index.return_value = None + return registry + + +@pytest.fixture +def mock_mapper(): + """Create a mock path mapper.""" + return MagicMock(spec=PathMapper) + + +@pytest.fixture +def sample_code_files(): + """Sample code file data for comprehensive testing.""" + return [ + # Authentication module + { + "name": "auth.py", + "language": "python", + "content": """ +def authenticate(username, password): + '''Authenticate user with credentials.''' + user = find_user(username) + if user and check_password(user, password): + return create_token(user) + return None + +def verify_token(token): + '''Verify JWT token validity.''' + try: + payload = decode_token(token) + return payload + except TokenExpired: + return None + +class AuthManager: + '''Manages authentication state.''' + def __init__(self): + self.sessions = {} + + def login(self, user): + token = authenticate(user.name, user.password) + self.sessions[user.id] = token + return token +""", + "symbols": [ + Symbol(name="authenticate", kind="function", range=(2, 8)), + Symbol(name="verify_token", kind="function", range=(10, 17)), + Symbol(name="AuthManager", kind="class", range=(19, 28)), + ], + }, + # Database module + { + "name": "database.py", + "language": "python", + "content": """ +def connect(host, port, database): + '''Establish database connection.''' + return Connection(host, port, database) + +def query(connection, sql, params=None): + '''Execute SQL query and return results.''' + cursor = connection.cursor() + cursor.execute(sql, params or []) + return cursor.fetchall() + +class DatabasePool: + '''Connection pool for database.''' + def __init__(self, size=10): + self.pool = [] + self.size = size + + def get_connection(self): + if self.pool: + return self.pool.pop() + return connect() +""", + "symbols": [ + Symbol(name="connect", kind="function", range=(2, 4)), + Symbol(name="query", kind="function", range=(6, 10)), + Symbol(name="DatabasePool", kind="class", range=(12, 21)), + ], + }, + # Utils module + { + "name": "utils.py", + "language": "python", + "content": """ +def format_date(date, fmt='%Y-%m-%d'): + return date.strftime(fmt) + +def parse_json(data): + '''Parse JSON string to dictionary.''' + import json + return json.loads(data) + +def hash_password(password, salt=None): + '''Hash password using bcrypt.''' + import hashlib + salt = salt or generate_salt() + return hashlib.sha256((password + salt).encode()).hexdigest() +""", + "symbols": [ + Symbol(name="format_date", kind="function", range=(2, 3)), + Symbol(name="parse_json", kind="function", range=(5, 8)), + Symbol(name="hash_password", kind="function", range=(10, 14)), + ], + }, + ] + + +@pytest.fixture +def populated_single_store(temp_dir, sample_code_files): + """Create a single populated DirIndexStore.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + + for file_data in sample_code_files: + store.add_file( + name=file_data["name"], + full_path=str(temp_dir / file_data["name"]), + content=file_data["content"], + language=file_data["language"], + symbols=file_data["symbols"], + ) + + yield store + store.close() + + +@pytest.fixture +def hierarchical_index_structure(temp_dir, sample_code_files): + """Create a multi-level directory index structure for chain search testing. + + Structure: + project/ + _index.db (root) + src/ + _index.db + auth/ + _index.db + db/ + _index.db + tests/ + _index.db + """ + structure = {} + + # Root directory + root_dir = temp_dir / "project" + root_dir.mkdir() + root_db = root_dir / "_index.db" + root_store = DirIndexStore(root_db) + root_store.initialize() + root_store.add_file( + name="main.py", + full_path=str(root_dir / "main.py"), + content="# Main entry point\nfrom src import auth, db\ndef main(): pass", + language="python", + symbols=[Symbol(name="main", kind="function", range=(3, 3))], + ) + structure["root"] = {"path": root_dir, "db": root_db, "store": root_store} + + # src directory + src_dir = root_dir / "src" + src_dir.mkdir() + src_db = src_dir / "_index.db" + src_store = DirIndexStore(src_db) + src_store.initialize() + src_store.add_file( + name="__init__.py", + full_path=str(src_dir / "__init__.py"), + content="# Source package\nfrom .auth import authenticate\nfrom .db import connect", + language="python", + ) + structure["src"] = {"path": src_dir, "db": src_db, "store": src_store} + + # src/auth directory + auth_dir = src_dir / "auth" + auth_dir.mkdir() + auth_db = auth_dir / "_index.db" + auth_store = DirIndexStore(auth_db) + auth_store.initialize() + auth_store.add_file( + name="auth.py", + full_path=str(auth_dir / "auth.py"), + content=sample_code_files[0]["content"], + language="python", + symbols=sample_code_files[0]["symbols"], + ) + structure["auth"] = {"path": auth_dir, "db": auth_db, "store": auth_store} + + # src/db directory + db_dir = src_dir / "db" + db_dir.mkdir() + db_db = db_dir / "_index.db" + db_store = DirIndexStore(db_db) + db_store.initialize() + db_store.add_file( + name="database.py", + full_path=str(db_dir / "database.py"), + content=sample_code_files[1]["content"], + language="python", + symbols=sample_code_files[1]["symbols"], + ) + structure["db"] = {"path": db_dir, "db": db_db, "store": db_store} + + # tests directory + tests_dir = root_dir / "tests" + tests_dir.mkdir() + tests_db = tests_dir / "_index.db" + tests_store = DirIndexStore(tests_db) + tests_store.initialize() + tests_store.add_file( + name="test_auth.py", + full_path=str(tests_dir / "test_auth.py"), + content="import pytest\nfrom src.auth import authenticate\ndef test_authenticate(): assert authenticate('user', 'pass')", + language="python", + symbols=[Symbol(name="test_authenticate", kind="function", range=(3, 3))], + ) + structure["tests"] = {"path": tests_dir, "db": tests_db, "store": tests_store} + + # Link subdirectories + root_store.register_subdir(name="src", index_path=src_db) + root_store.register_subdir(name="tests", index_path=tests_db) + src_store.register_subdir(name="auth", index_path=auth_db) + src_store.register_subdir(name="db", index_path=db_db) + + # Close all stores before yielding to avoid Windows file locking issues + root_store.close() + src_store.close() + auth_store.close() + db_store.close() + tests_store.close() + + yield structure + + +# === Chain Search Engine Internal Tests === + +class TestChainSearchEngineInternals: + """Tests for ChainSearchEngine internal methods.""" + + def test_context_manager_enter_exit(self, mock_registry, mock_mapper): + """Test context manager protocol.""" + with ChainSearchEngine(mock_registry, mock_mapper) as engine: + assert engine is not None + assert isinstance(engine, ChainSearchEngine) + # Engine should be closed after exit + + def test_close_without_executor(self, mock_registry, mock_mapper): + """Test close() when executor was never created.""" + engine = ChainSearchEngine(mock_registry, mock_mapper) + engine.close() # Should not raise + + def test_close_with_executor(self, mock_registry, mock_mapper, temp_dir): + """Test close() properly shuts down executor.""" + # Create index + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="test content searchable", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + # Trigger executor creation + engine.search("test", temp_dir) + + # Close should work + engine.close() + assert engine._executor is None + + def test_get_executor_lazy_initialization(self, mock_registry, mock_mapper): + """Test executor is lazily initialized.""" + engine = ChainSearchEngine(mock_registry, mock_mapper) + assert engine._executor is None + + executor = engine._get_executor() + assert executor is not None + assert engine._executor is executor + + # Second call returns same instance + assert engine._get_executor() is executor + + engine.close() + + def test_get_executor_custom_workers(self, mock_registry, mock_mapper): + """Test executor with custom worker count.""" + engine = ChainSearchEngine(mock_registry, mock_mapper, max_workers=4) + executor = engine._get_executor() + assert executor is not None + engine.close() + + +class TestIndexPathCollection: + """Tests for _collect_index_paths method.""" + + def test_collect_depth_zero(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test collection with depth=0 returns only start index.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + paths = engine._collect_index_paths(root_db, depth=0) + + assert len(paths) == 1 + assert paths[0] == root_db.resolve() + engine.close() + + def test_collect_depth_one(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test collection with depth=1 returns root + immediate children.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + paths = engine._collect_index_paths(root_db, depth=1) + + # Should include root, src, tests (not auth/db which are depth 2) + assert len(paths) == 3 + engine.close() + + def test_collect_depth_unlimited(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test collection with depth=-1 returns all indexes.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + paths = engine._collect_index_paths(root_db, depth=-1) + + # Should include all 5: root, src, tests, auth, db + assert len(paths) == 5 + engine.close() + + def test_collect_avoids_duplicates(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test collection deduplicates paths.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + + engine = ChainSearchEngine(mock_registry, mock_mapper) + paths = engine._collect_index_paths(root_db, depth=-1) + + # All paths should be unique + path_set = set(str(p) for p in paths) + assert len(path_set) == len(paths) + engine.close() + + def test_collect_handles_missing_subdir_index(self, mock_registry, mock_mapper, temp_dir): + """Test collection handles missing subdirectory indexes gracefully.""" + # Create root with reference to non-existent subdir + root_db = temp_dir / "_index.db" + store = DirIndexStore(root_db) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="test", + language="python", + ) + # Add reference to non-existent index + store.register_subdir(name="missing", index_path=temp_dir / "missing" / "_index.db") + store.close() + + engine = ChainSearchEngine(mock_registry, mock_mapper) + paths = engine._collect_index_paths(root_db, depth=-1) + + # Should only include root (missing subdir is skipped) + assert len(paths) == 1 + engine.close() + + +class TestResultMergeAndRank: + """Tests for _merge_and_rank method.""" + + def test_merge_deduplicates_by_path(self, mock_registry, mock_mapper): + """Test merging deduplicates results by path.""" + engine = ChainSearchEngine(mock_registry, mock_mapper) + + results = [ + SearchResult(path="/test/file.py", score=10.0, excerpt="match 1"), + SearchResult(path="/test/file.py", score=5.0, excerpt="match 2"), + SearchResult(path="/test/other.py", score=8.0, excerpt="match 3"), + ] + + merged = engine._merge_and_rank(results, limit=10) + + assert len(merged) == 2 + # Should keep highest score for duplicate path + file_result = next(r for r in merged if r.path == "/test/file.py") + assert file_result.score == 10.0 + engine.close() + + def test_merge_sorts_by_score_descending(self, mock_registry, mock_mapper): + """Test merged results are sorted by score descending.""" + engine = ChainSearchEngine(mock_registry, mock_mapper) + + results = [ + SearchResult(path="/test/low.py", score=1.0, excerpt=""), + SearchResult(path="/test/high.py", score=100.0, excerpt=""), + SearchResult(path="/test/mid.py", score=50.0, excerpt=""), + ] + + merged = engine._merge_and_rank(results, limit=10) + + assert merged[0].path == "/test/high.py" + assert merged[1].path == "/test/mid.py" + assert merged[2].path == "/test/low.py" + engine.close() + + def test_merge_respects_limit(self, mock_registry, mock_mapper): + """Test merge respects limit parameter.""" + engine = ChainSearchEngine(mock_registry, mock_mapper) + + results = [ + SearchResult(path=f"/test/file{i}.py", score=float(i), excerpt="") + for i in range(100) + ] + + merged = engine._merge_and_rank(results, limit=5) + + assert len(merged) == 5 + # Should be the top 5 by score + assert merged[0].score == 99.0 + engine.close() + + def test_merge_empty_results(self, mock_registry, mock_mapper): + """Test merge handles empty results.""" + engine = ChainSearchEngine(mock_registry, mock_mapper) + merged = engine._merge_and_rank([], limit=10) + assert merged == [] + engine.close() + + +# === Hierarchical Chain Search Tests === + +class TestHierarchicalChainSearch: + """Tests for searching across directory hierarchies.""" + + def test_search_from_root(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test search starting from root finds results in all subdirectories.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + root_path = structure["root"]["path"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("authenticate", root_path) + + # Should find authenticate in auth.py and test_auth.py + assert len(result.results) >= 1 + assert result.stats.dirs_searched == 5 # All directories + engine.close() + + def test_search_from_subdir(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test search starting from subdirectory.""" + structure = hierarchical_index_structure + src_db = structure["src"]["db"] + src_path = structure["src"]["path"] + + mock_mapper.source_to_index_db.return_value = src_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("authenticate", src_path) + + # Should find only in src subtree (src, auth, db) + assert result.stats.dirs_searched == 3 + engine.close() + + def test_search_with_depth_limit(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test search respects depth limit.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + root_path = structure["root"]["path"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + options = SearchOptions(depth=1) + result = engine.search("authenticate", root_path, options) + + # Depth 1: root + immediate children (src, tests) = 3 + assert result.stats.dirs_searched == 3 + engine.close() + + def test_search_aggregates_results(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test search aggregates results from multiple directories.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + root_path = structure["root"]["path"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + # Search for term that appears in multiple files + result = engine.search("def", root_path) + + # Should find results from multiple files + assert len(result.results) >= 3 + engine.close() + + +# === Search Files Only Tests === + +class TestSearchFilesOnly: + """Tests for search_files_only method.""" + + def test_returns_list_of_strings(self, mock_registry, mock_mapper, temp_dir): + """Test search_files_only returns list of path strings.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="searchable content here", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + paths = engine.search_files_only("searchable", temp_dir) + + assert isinstance(paths, list) + assert all(isinstance(p, str) for p in paths) + engine.close() + + def test_files_only_faster_than_full(self, mock_registry, mock_mapper, temp_dir): + """Test files_only search is at least as fast as full search.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + + # Add multiple files + for i in range(20): + store.add_file( + name=f"file{i}.py", + full_path=str(temp_dir / f"file{i}.py"), + content=f"searchable content number {i} with more text to index", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + # Time files_only + start = time.perf_counter() + for _ in range(10): + engine.search_files_only("searchable", temp_dir) + files_only_time = time.perf_counter() - start + + # Time full search + start = time.perf_counter() + for _ in range(10): + engine.search("searchable", temp_dir) + full_time = time.perf_counter() - start + + # files_only should not be significantly slower + # (may not be faster due to small dataset) + assert files_only_time <= full_time * 2 + engine.close() + + +# === Symbol Search Tests === + +class TestChainSymbolSearch: + """Tests for chain symbol search.""" + + def test_symbol_search_finds_across_dirs(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test symbol search finds symbols across directories.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + root_path = structure["root"]["path"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + symbols = engine.search_symbols("auth", root_path) + + # Should find authenticate and AuthManager + assert len(symbols) >= 2 + engine.close() + + def test_symbol_search_with_kind_filter(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test symbol search with kind filter.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + root_path = structure["root"]["path"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + classes = engine.search_symbols("", root_path, kind="class") + + # Should find AuthManager and DatabasePool + assert all(s.kind == "class" for s in classes) + engine.close() + + def test_symbol_search_deduplicates(self, mock_registry, mock_mapper, temp_dir): + """Test symbol search deduplicates by (name, kind, range) but keeps different ranges.""" + # Create two indexes with same symbol name but different ranges + dir1 = temp_dir / "dir1" + dir1.mkdir() + db1 = dir1 / "_index.db" + store1 = DirIndexStore(db1) + store1.initialize() + store1.add_file( + name="a.py", + full_path=str(dir1 / "a.py"), + content="def foo(): pass", + language="python", + symbols=[Symbol(name="foo", kind="function", range=(1, 5))], # Different range + ) + + dir2 = temp_dir / "dir2" + dir2.mkdir() + db2 = dir2 / "_index.db" + store2 = DirIndexStore(db2) + store2.initialize() + store2.add_file( + name="b.py", + full_path=str(dir2 / "b.py"), + content="def foo(): pass\n# more code\n", + language="python", + symbols=[Symbol(name="foo", kind="function", range=(1, 10))], # Different range + ) + store2.close() + + # Register subdir after dir2 is created + store1.register_subdir(name="dir2", index_path=db2) + store1.close() + + mock_mapper.source_to_index_db.return_value = db1 + + engine = ChainSearchEngine(mock_registry, mock_mapper) + symbols = engine.search_symbols("foo", dir1) + + # Should have exactly 2 (different ranges make them unique) + assert len(symbols) == 2 + engine.close() + + +# === Search Options Tests === + +class TestSearchOptionsExtended: + """Extended tests for SearchOptions.""" + + def test_include_semantic_option(self): + """Test include_semantic option.""" + options = SearchOptions(include_semantic=True) + assert options.include_semantic is True + + options_default = SearchOptions() + assert options_default.include_semantic is False + + def test_all_options_combined(self): + """Test all options set together.""" + options = SearchOptions( + depth=5, + max_workers=16, + limit_per_dir=20, + total_limit=200, + include_symbols=True, + files_only=True, + include_semantic=True, + ) + assert options.depth == 5 + assert options.max_workers == 16 + assert options.limit_per_dir == 20 + assert options.total_limit == 200 + assert options.include_symbols is True + assert options.files_only is True + assert options.include_semantic is True + + def test_options_with_zero_values(self): + """Test options with zero values.""" + options = SearchOptions( + depth=0, + max_workers=1, + limit_per_dir=1, + total_limit=1, + ) + assert options.depth == 0 + assert options.max_workers == 1 + assert options.limit_per_dir == 1 + assert options.total_limit == 1 + + +# === Quick Search Tests === + +class TestQuickSearch: + """Tests for quick_search convenience function.""" + + def test_quick_search_returns_results(self, temp_dir): + """Test quick_search returns SearchResult list.""" + # Setup: Create index at a known location + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="searchable content for quick search test", + language="python", + ) + store.close() + + # Test requires actual registry - skip if not initialized + try: + results = quick_search("searchable", temp_dir) + assert isinstance(results, list) + except Exception: + # May fail if registry not properly set up + pytest.skip("Registry not available for quick_search test") + + def test_quick_search_with_depth(self, temp_dir): + """Test quick_search respects depth parameter.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="test content", + language="python", + ) + store.close() + + try: + results = quick_search("test", temp_dir, depth=0) + assert isinstance(results, list) + except Exception: + pytest.skip("Registry not available for quick_search test") + + +# === Edge Cases and Error Handling === + +class TestSearchErrorHandling: + """Tests for search error handling.""" + + def test_search_corrupted_index(self, mock_registry, mock_mapper, temp_dir): + """Test search handles corrupted index gracefully.""" + # Create corrupted index file + db_path = temp_dir / "_index.db" + db_path.write_text("not a valid sqlite database") + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + try: + result = engine.search("test", temp_dir) + # Should return empty results, not crash + assert result.results == [] + finally: + engine.close() + # Force cleanup on Windows + import gc + gc.collect() + + def test_search_empty_index(self, mock_registry, mock_mapper, temp_dir): + """Test search on empty index returns empty results.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("anything", temp_dir) + + assert result.results == [] + assert result.stats.files_matched == 0 + engine.close() + + def test_search_special_fts_characters(self, mock_registry, mock_mapper, temp_dir): + """Test search handles FTS5 special characters.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="test content", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + # These should not crash + special_queries = [ + "test*", + "test OR other", + '"exact phrase"', + "NOT invalid", + ] + + for query in special_queries: + result = engine.search(query, temp_dir) + assert isinstance(result.results, list) + + engine.close() + + +# === Concurrent Search Tests === + +class TestConcurrentSearch: + """Tests for concurrent search operations.""" + + def test_multiple_concurrent_searches(self, mock_registry, mock_mapper, temp_dir): + """Test multiple concurrent searches don't interfere.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + for i in range(10): + store.add_file( + name=f"file{i}.py", + full_path=str(temp_dir / f"file{i}.py"), + content=f"content{i} searchable data", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + results = [] + errors = [] + + def search_task(query): + try: + r = engine.search(query, temp_dir) + results.append(len(r.results)) + except Exception as e: + errors.append(str(e)) + + threads = [ + threading.Thread(target=search_task, args=(f"content{i}",)) + for i in range(5) + ] + + for t in threads: + t.start() + for t in threads: + t.join() + + assert len(errors) == 0 + assert len(results) == 5 + engine.close() + + def test_search_during_close(self, mock_registry, mock_mapper, temp_dir): + """Test behavior when search happens during close.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="test content", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + # Start a search then immediately close + result = engine.search("test", temp_dir) + engine.close() + + # Should complete without error + assert isinstance(result.results, list) + + +# === Search Statistics Tests === + +class TestSearchStatsExtended: + """Extended tests for search statistics.""" + + def test_stats_time_is_positive(self, mock_registry, mock_mapper, temp_dir): + """Test search time is recorded and positive.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="test content", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("test", temp_dir) + + assert result.stats.time_ms >= 0 + engine.close() + + def test_stats_dirs_searched_accurate(self, mock_registry, mock_mapper, hierarchical_index_structure): + """Test dirs_searched count is accurate.""" + structure = hierarchical_index_structure + root_db = structure["root"]["db"] + root_path = structure["root"]["path"] + + mock_mapper.source_to_index_db.return_value = root_db + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + # Depth 0 + result0 = engine.search("test", root_path, SearchOptions(depth=0)) + assert result0.stats.dirs_searched == 1 + + # Depth 1 + result1 = engine.search("test", root_path, SearchOptions(depth=1)) + assert result1.stats.dirs_searched == 3 # root + src + tests + + # Unlimited + result_all = engine.search("test", root_path, SearchOptions(depth=-1)) + assert result_all.stats.dirs_searched == 5 + + engine.close() + + def test_stats_files_matched_accurate(self, mock_registry, mock_mapper, temp_dir): + """Test files_matched count is accurate.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + + # Add files with different content + store.add_file(name="match1.py", full_path=str(temp_dir / "match1.py"), + content="findme keyword", language="python") + store.add_file(name="match2.py", full_path=str(temp_dir / "match2.py"), + content="findme keyword", language="python") + store.add_file(name="nomatch.py", full_path=str(temp_dir / "nomatch.py"), + content="other content", language="python") + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("findme", temp_dir) + + assert result.stats.files_matched == 2 + engine.close() + + +# === Boundary Condition Tests === + +class TestBoundaryConditions: + """Tests for boundary conditions.""" + + def test_search_with_max_workers_one(self, mock_registry, mock_mapper, temp_dir): + """Test search with single worker.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file(name="test.py", full_path=str(temp_dir / "test.py"), + content="test content", language="python") + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper, max_workers=1) + result = engine.search("test", temp_dir, SearchOptions(max_workers=1)) + + assert isinstance(result.results, list) + engine.close() + + def test_search_with_limit_one(self, mock_registry, mock_mapper, temp_dir): + """Test search with limit=1.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + for i in range(10): + store.add_file(name=f"file{i}.py", full_path=str(temp_dir / f"file{i}.py"), + content="searchable content", language="python") + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("searchable", temp_dir, SearchOptions(total_limit=1)) + + assert len(result.results) <= 1 + engine.close() + + def test_search_very_long_query(self, mock_registry, mock_mapper, temp_dir): + """Test search with very long query.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file(name="test.py", full_path=str(temp_dir / "test.py"), + content="test content", language="python") + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + # Very long query + long_query = " ".join(["word"] * 100) + result = engine.search(long_query, temp_dir) + + # Should not crash + assert isinstance(result.results, list) + engine.close() + + def test_search_unicode_query(self, mock_registry, mock_mapper, temp_dir): + """Test search with unicode query does not crash.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="unicode.py", + full_path=str(temp_dir / "unicode.py"), + content="# Chinese comment\ndef hello(): return 'hello world'", + language="python", + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + # Unicode query should not crash (may or may not find results depending on FTS5 tokenizer) + result = engine.search("hello", temp_dir) + + assert isinstance(result.results, list) + assert len(result.results) >= 1 + engine.close() + + def test_search_empty_directory(self, mock_registry, mock_mapper, temp_dir): + """Test search in directory with no files.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + # Don't add any files + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + result = engine.search("anything", temp_dir) + + assert result.results == [] + assert result.stats.files_matched == 0 + engine.close() + + +# === Include Symbols Option Tests === + +class TestIncludeSymbolsOption: + """Tests for include_symbols search option.""" + + def test_search_with_include_symbols(self, mock_registry, mock_mapper, temp_dir): + """Test search returns symbols when include_symbols=True.""" + db_path = temp_dir / "_index.db" + store = DirIndexStore(db_path) + store.initialize() + store.add_file( + name="test.py", + full_path=str(temp_dir / "test.py"), + content="def my_function(): pass", + language="python", + symbols=[Symbol(name="my_function", kind="function", range=(1, 1))], + ) + store.close() + + mock_mapper.source_to_index_db.return_value = db_path + + engine = ChainSearchEngine(mock_registry, mock_mapper) + + # Without include_symbols + result_no_symbols = engine.search("function", temp_dir, SearchOptions(include_symbols=False)) + assert result_no_symbols.symbols == [] + + # With include_symbols + result_with_symbols = engine.search("function", temp_dir, SearchOptions(include_symbols=True)) + # Symbols list populated (may or may not match depending on implementation) + assert isinstance(result_with_symbols.symbols, list) + + engine.close() + + +# === ChainSearchResult Tests === + +class TestChainSearchResultExtended: + """Extended tests for ChainSearchResult dataclass.""" + + def test_result_immutability(self): + """Test ChainSearchResult fields.""" + stats = SearchStats(dirs_searched=5, files_matched=10, time_ms=100.5) + results = [SearchResult(path="/test.py", score=1.0, excerpt="test")] + symbols = [Symbol(name="foo", kind="function", range=(1, 5))] + + result = ChainSearchResult( + query="test query", + results=results, + symbols=symbols, + stats=stats, + ) + + assert result.query == "test query" + assert len(result.results) == 1 + assert len(result.symbols) == 1 + assert result.stats.dirs_searched == 5 + + def test_result_with_empty_collections(self): + """Test ChainSearchResult with empty results and symbols.""" + result = ChainSearchResult( + query="no matches", + results=[], + symbols=[], + stats=SearchStats(), + ) + + assert result.query == "no matches" + assert result.results == [] + assert result.symbols == [] + assert result.stats.dirs_searched == 0 diff --git a/codex-lens/tests/test_vector_search_full.py b/codex-lens/tests/test_vector_search_full.py new file mode 100644 index 00000000..fe79f95b --- /dev/null +++ b/codex-lens/tests/test_vector_search_full.py @@ -0,0 +1,747 @@ +"""Full coverage tests for vector/semantic search functionality. + +Tests cover: +- Embedder model loading and embedding generation +- VectorStore CRUD operations and caching +- Cosine similarity computation +- Semantic search accuracy and relevance +- Performance benchmarks +- Edge cases and error handling +- Thread safety and concurrent access +""" + +import json +import tempfile +import threading +import time +from pathlib import Path +from typing import List + +import pytest + +from codexlens.entities import SemanticChunk, Symbol, SearchResult +from codexlens.semantic import SEMANTIC_AVAILABLE, check_semantic_available + +# Skip all tests if semantic dependencies not available +pytestmark = pytest.mark.skipif( + not SEMANTIC_AVAILABLE, + reason="Semantic search dependencies not installed (pip install codexlens[semantic])" +) + + +# === Fixtures === + +@pytest.fixture +def temp_db(tmp_path): + """Create temporary database path.""" + return tmp_path / "test_semantic.db" + + +@pytest.fixture +def embedder(): + """Create Embedder instance.""" + from codexlens.semantic.embedder import Embedder + return Embedder() + + +@pytest.fixture +def vector_store(temp_db): + """Create VectorStore instance.""" + from codexlens.semantic.vector_store import VectorStore + return VectorStore(temp_db) + + +@pytest.fixture +def sample_code_chunks(): + """Sample code chunks for testing.""" + return [ + { + "content": "def authenticate(username, password): return check_credentials(username, password)", + "metadata": {"symbol_name": "authenticate", "symbol_kind": "function", "start_line": 1, "end_line": 1, "language": "python"}, + }, + { + "content": "class DatabaseConnection:\n def connect(self, host, port): pass\n def execute(self, query): pass", + "metadata": {"symbol_name": "DatabaseConnection", "symbol_kind": "class", "start_line": 1, "end_line": 3, "language": "python"}, + }, + { + "content": "async function fetchUserData(userId) { return await api.get('/users/' + userId); }", + "metadata": {"symbol_name": "fetchUserData", "symbol_kind": "function", "start_line": 1, "end_line": 1, "language": "javascript"}, + }, + { + "content": "def calculate_sum(numbers): return sum(numbers)", + "metadata": {"symbol_name": "calculate_sum", "symbol_kind": "function", "start_line": 1, "end_line": 1, "language": "python"}, + }, + { + "content": "class UserProfile:\n def __init__(self, name, email):\n self.name = name\n self.email = email", + "metadata": {"symbol_name": "UserProfile", "symbol_kind": "class", "start_line": 1, "end_line": 4, "language": "python"}, + }, + ] + + +# === Embedder Tests === + +class TestEmbedder: + """Tests for Embedder class.""" + + def test_embedder_initialization(self, embedder): + """Test embedder initializes correctly.""" + assert embedder.model_name == "BAAI/bge-small-en-v1.5" + assert embedder.EMBEDDING_DIM == 384 + assert embedder._model is None # Lazy loading + + def test_embed_single_returns_correct_dimension(self, embedder): + """Test single embedding has correct dimension.""" + text = "def hello(): print('world')" + embedding = embedder.embed_single(text) + + assert isinstance(embedding, list) + assert len(embedding) == 384 + assert all(isinstance(x, float) for x in embedding) + + def test_embed_batch_returns_correct_count(self, embedder): + """Test batch embedding returns correct number of embeddings.""" + texts = [ + "def foo(): pass", + "def bar(): pass", + "def baz(): pass", + ] + embeddings = embedder.embed(texts) + + assert len(embeddings) == len(texts) + assert all(len(e) == 384 for e in embeddings) + + def test_embed_empty_string(self, embedder): + """Test embedding empty string.""" + embedding = embedder.embed_single("") + assert len(embedding) == 384 + + def test_embed_unicode_text(self, embedder): + """Test embedding unicode text.""" + text = "def 你好(): return '世界'" + embedding = embedder.embed_single(text) + assert len(embedding) == 384 + + def test_embed_long_text(self, embedder): + """Test embedding long text.""" + text = "def process(): pass\n" * 100 + embedding = embedder.embed_single(text) + assert len(embedding) == 384 + + def test_embed_special_characters(self, embedder): + """Test embedding text with special characters.""" + text = "def test(): return {'key': 'value', '@decorator': True}" + embedding = embedder.embed_single(text) + assert len(embedding) == 384 + + def test_lazy_model_loading(self, embedder): + """Test model loads lazily on first embed call.""" + assert embedder._model is None + embedder.embed_single("test") + assert embedder._model is not None + + def test_model_reuse(self, embedder): + """Test model is reused across multiple calls.""" + embedder.embed_single("test1") + model_ref = embedder._model + embedder.embed_single("test2") + assert embedder._model is model_ref # Same instance + + +class TestEmbeddingSimilarity: + """Tests for embedding similarity.""" + + def test_identical_text_similarity(self, embedder): + """Test identical text has similarity ~1.0.""" + from codexlens.semantic.vector_store import _cosine_similarity + + text = "def calculate_sum(a, b): return a + b" + emb1 = embedder.embed_single(text) + emb2 = embedder.embed_single(text) + + similarity = _cosine_similarity(emb1, emb2) + assert similarity > 0.99, "Identical text should have ~1.0 similarity" + + def test_similar_code_high_similarity(self, embedder): + """Test similar code has high similarity.""" + from codexlens.semantic.vector_store import _cosine_similarity + + code1 = "def add(a, b): return a + b" + code2 = "def sum_numbers(x, y): return x + y" + + emb1 = embedder.embed_single(code1) + emb2 = embedder.embed_single(code2) + + similarity = _cosine_similarity(emb1, emb2) + assert similarity > 0.6, "Similar functions should have high similarity" + + def test_different_code_lower_similarity(self, embedder): + """Test different code has lower similarity than similar code.""" + from codexlens.semantic.vector_store import _cosine_similarity + + code1 = "def add(a, b): return a + b" + code2 = "def sum_numbers(x, y): return x + y" + code3 = "class UserAuth: def login(self, user, pwd): pass" + + emb1 = embedder.embed_single(code1) + emb2 = embedder.embed_single(code2) + emb3 = embedder.embed_single(code3) + + sim_similar = _cosine_similarity(emb1, emb2) + sim_different = _cosine_similarity(emb1, emb3) + + assert sim_similar > sim_different, "Similar code should have higher similarity" + + def test_zero_vector_similarity(self): + """Test cosine similarity with zero vector.""" + from codexlens.semantic.vector_store import _cosine_similarity + + zero_vec = [0.0] * 384 + normal_vec = [1.0] * 384 + + similarity = _cosine_similarity(zero_vec, normal_vec) + assert similarity == 0.0, "Zero vector should have 0 similarity" + + +# === VectorStore Tests === + +class TestVectorStoreCRUD: + """Tests for VectorStore CRUD operations.""" + + def test_add_chunk(self, vector_store, embedder): + """Test adding a single chunk.""" + chunk = SemanticChunk( + content="def test(): pass", + metadata={"language": "python"}, + ) + chunk.embedding = embedder.embed_single(chunk.content) + + chunk_id = vector_store.add_chunk(chunk, "/test/file.py") + + assert chunk_id > 0 + assert vector_store.count_chunks() == 1 + + def test_add_chunk_without_embedding_raises(self, vector_store): + """Test adding chunk without embedding raises error.""" + chunk = SemanticChunk(content="def test(): pass", metadata={}) + + with pytest.raises(ValueError, match="must have embedding"): + vector_store.add_chunk(chunk, "/test/file.py") + + def test_add_chunks_batch(self, vector_store, embedder, sample_code_chunks): + """Test batch adding chunks.""" + chunks = [] + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + chunks.append(chunk) + + ids = vector_store.add_chunks(chunks, "/test/multi.py") + + assert len(ids) == len(chunks) + assert vector_store.count_chunks() == len(chunks) + + def test_add_empty_batch(self, vector_store): + """Test adding empty batch returns empty list.""" + ids = vector_store.add_chunks([], "/test/empty.py") + assert ids == [] + + def test_delete_file_chunks(self, vector_store, embedder): + """Test deleting chunks by file path.""" + # Add chunks for two files + chunk1 = SemanticChunk(content="def a(): pass", metadata={}) + chunk1.embedding = embedder.embed_single(chunk1.content) + vector_store.add_chunk(chunk1, "/test/file1.py") + + chunk2 = SemanticChunk(content="def b(): pass", metadata={}) + chunk2.embedding = embedder.embed_single(chunk2.content) + vector_store.add_chunk(chunk2, "/test/file2.py") + + assert vector_store.count_chunks() == 2 + + # Delete one file's chunks + deleted = vector_store.delete_file_chunks("/test/file1.py") + + assert deleted == 1 + assert vector_store.count_chunks() == 1 + + def test_delete_nonexistent_file(self, vector_store): + """Test deleting non-existent file returns 0.""" + deleted = vector_store.delete_file_chunks("/nonexistent/file.py") + assert deleted == 0 + + def test_count_chunks_empty(self, vector_store): + """Test count on empty store.""" + assert vector_store.count_chunks() == 0 + + +class TestVectorStoreSearch: + """Tests for VectorStore search functionality.""" + + def test_search_similar_basic(self, vector_store, embedder, sample_code_chunks): + """Test basic similarity search.""" + # Add chunks + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + # Search + query = "function to authenticate user login" + query_embedding = embedder.embed_single(query) + results = vector_store.search_similar(query_embedding, top_k=3) + + assert len(results) > 0 + assert all(isinstance(r, SearchResult) for r in results) + # Top result should be auth-related + assert "authenticate" in results[0].excerpt.lower() or "auth" in results[0].path.lower() + + def test_search_respects_top_k(self, vector_store, embedder, sample_code_chunks): + """Test search respects top_k parameter.""" + # Add all chunks + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + query_embedding = embedder.embed_single("code") + + results_2 = vector_store.search_similar(query_embedding, top_k=2) + results_5 = vector_store.search_similar(query_embedding, top_k=5) + + assert len(results_2) <= 2 + assert len(results_5) <= 5 + + def test_search_min_score_filtering(self, vector_store, embedder): + """Test min_score filtering.""" + chunk = SemanticChunk( + content="def hello(): print('hello world')", + metadata={}, + ) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/hello.py") + + query_embedding = embedder.embed_single("database connection pool") + + results_no_filter = vector_store.search_similar(query_embedding, min_score=0.0) + results_high_filter = vector_store.search_similar(query_embedding, min_score=0.9) + + assert len(results_no_filter) >= len(results_high_filter) + + def test_search_returns_sorted_by_score(self, vector_store, embedder, sample_code_chunks): + """Test results are sorted by score descending.""" + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + query_embedding = embedder.embed_single("function") + results = vector_store.search_similar(query_embedding, top_k=5) + + if len(results) > 1: + for i in range(len(results) - 1): + assert results[i].score >= results[i + 1].score + + def test_search_includes_metadata(self, vector_store, embedder): + """Test search results include metadata.""" + chunk = SemanticChunk( + content="def test_function(): pass", + metadata={ + "symbol_name": "test_function", + "symbol_kind": "function", + "start_line": 10, + "end_line": 15, + }, + ) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/func.py") + + query_embedding = embedder.embed_single("test function") + results = vector_store.search_similar(query_embedding, top_k=1) + + assert len(results) == 1 + assert results[0].symbol_name == "test_function" + assert results[0].symbol_kind == "function" + assert results[0].start_line == 10 + assert results[0].end_line == 15 + + def test_search_empty_store_returns_empty(self, vector_store, embedder): + """Test search on empty store returns empty list.""" + query_embedding = embedder.embed_single("anything") + results = vector_store.search_similar(query_embedding) + assert results == [] + + def test_search_with_return_full_content_false(self, vector_store, embedder): + """Test search with return_full_content=False.""" + chunk = SemanticChunk( + content="def long_function(): " + "pass\n" * 100, + metadata={}, + ) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/long.py") + + query_embedding = embedder.embed_single("function") + results = vector_store.search_similar( + query_embedding, top_k=1, return_full_content=False + ) + + assert len(results) == 1 + assert results[0].content is None + assert results[0].excerpt is not None + + +class TestVectorStoreCache: + """Tests for VectorStore caching behavior.""" + + def test_cache_invalidation_on_add(self, vector_store, embedder): + """Test cache is invalidated when chunks are added.""" + chunk1 = SemanticChunk(content="def a(): pass", metadata={}) + chunk1.embedding = embedder.embed_single(chunk1.content) + vector_store.add_chunk(chunk1, "/test/a.py") + + # Trigger cache population + query_embedding = embedder.embed_single("function") + vector_store.search_similar(query_embedding) + + initial_version = vector_store._cache_version + + # Add another chunk + chunk2 = SemanticChunk(content="def b(): pass", metadata={}) + chunk2.embedding = embedder.embed_single(chunk2.content) + vector_store.add_chunk(chunk2, "/test/b.py") + + assert vector_store._cache_version > initial_version + assert vector_store._embedding_matrix is None + + def test_cache_invalidation_on_delete(self, vector_store, embedder): + """Test cache is invalidated when chunks are deleted.""" + chunk = SemanticChunk(content="def a(): pass", metadata={}) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/a.py") + + # Trigger cache population + query_embedding = embedder.embed_single("function") + vector_store.search_similar(query_embedding) + + initial_version = vector_store._cache_version + + # Delete chunk + vector_store.delete_file_chunks("/test/a.py") + + assert vector_store._cache_version > initial_version + + def test_manual_cache_clear(self, vector_store, embedder): + """Test manual cache clearing.""" + chunk = SemanticChunk(content="def a(): pass", metadata={}) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/a.py") + + # Trigger cache population + query_embedding = embedder.embed_single("function") + vector_store.search_similar(query_embedding) + + assert vector_store._embedding_matrix is not None + + vector_store.clear_cache() + + assert vector_store._embedding_matrix is None + + +# === Semantic Search Accuracy Tests === + +class TestSemanticSearchAccuracy: + """Tests for semantic search accuracy and relevance.""" + + def test_auth_query_finds_auth_code(self, vector_store, embedder, sample_code_chunks): + """Test authentication query finds auth code.""" + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + query = "user authentication login" + query_embedding = embedder.embed_single(query) + results = vector_store.search_similar(query_embedding, top_k=1) + + assert len(results) > 0 + assert "authenticate" in results[0].excerpt.lower() + + def test_database_query_finds_db_code(self, vector_store, embedder, sample_code_chunks): + """Test database query finds database code.""" + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + query = "database connection execute query" + query_embedding = embedder.embed_single(query) + results = vector_store.search_similar(query_embedding, top_k=1) + + assert len(results) > 0 + assert "database" in results[0].excerpt.lower() or "connect" in results[0].excerpt.lower() + + def test_math_query_finds_calculation_code(self, vector_store, embedder, sample_code_chunks): + """Test math query finds calculation code.""" + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + query = "sum numbers add calculation" + query_embedding = embedder.embed_single(query) + results = vector_store.search_similar(query_embedding, top_k=1) + + assert len(results) > 0 + assert "sum" in results[0].excerpt.lower() or "calculate" in results[0].excerpt.lower() + + +# === Performance Tests === + +class TestVectorSearchPerformance: + """Performance tests for vector search.""" + + def test_embedding_performance(self, embedder): + """Test embedding generation performance.""" + text = "def calculate_sum(a, b): return a + b" + + # Warm up + embedder.embed_single(text) + + # Measure + start = time.perf_counter() + iterations = 10 + for _ in range(iterations): + embedder.embed_single(text) + elapsed = time.perf_counter() - start + + avg_ms = (elapsed / iterations) * 1000 + assert avg_ms < 100, f"Single embedding should be <100ms, got {avg_ms:.2f}ms" + + def test_batch_embedding_performance(self, embedder): + """Test batch embedding performance.""" + texts = [f"def function_{i}(): pass" for i in range(50)] + + # Warm up + embedder.embed(texts[:5]) + + # Measure + start = time.perf_counter() + embedder.embed(texts) + elapsed = time.perf_counter() - start + + total_ms = elapsed * 1000 + per_text_ms = total_ms / len(texts) + assert per_text_ms < 20, f"Per-text embedding should be <20ms, got {per_text_ms:.2f}ms" + + def test_search_performance_small(self, vector_store, embedder): + """Test search performance with small dataset.""" + # Add 100 chunks + for i in range(100): + chunk = SemanticChunk( + content=f"def function_{i}(): return {i}", + metadata={"index": i}, + ) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, f"/test/file_{i}.py") + + query_embedding = embedder.embed_single("function return value") + + # Warm up + vector_store.search_similar(query_embedding) + + # Measure + start = time.perf_counter() + iterations = 10 + for _ in range(iterations): + vector_store.search_similar(query_embedding) + elapsed = time.perf_counter() - start + + avg_ms = (elapsed / iterations) * 1000 + assert avg_ms < 50, f"Search with 100 chunks should be <50ms, got {avg_ms:.2f}ms" + + def test_search_performance_medium(self, vector_store, embedder): + """Test search performance with medium dataset.""" + # Add 500 chunks in batch + chunks = [] + for i in range(500): + chunk = SemanticChunk( + content=f"def function_{i}(x): return x * {i}", + metadata={"index": i}, + ) + chunk.embedding = embedder.embed_single(chunk.content) + chunks.append(chunk) + + vector_store.add_chunks(chunks, "/test/bulk.py") + + query_embedding = embedder.embed_single("multiply value") + + # Warm up + vector_store.search_similar(query_embedding) + + # Measure + start = time.perf_counter() + iterations = 5 + for _ in range(iterations): + vector_store.search_similar(query_embedding) + elapsed = time.perf_counter() - start + + avg_ms = (elapsed / iterations) * 1000 + assert avg_ms < 100, f"Search with 500 chunks should be <100ms, got {avg_ms:.2f}ms" + + +# === Thread Safety Tests === + +class TestThreadSafety: + """Tests for thread safety.""" + + def test_concurrent_searches(self, vector_store, embedder, sample_code_chunks): + """Test concurrent searches are thread-safe.""" + # Populate store + for data in sample_code_chunks: + chunk = SemanticChunk(content=data["content"], metadata=data["metadata"]) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + results_list = [] + errors = [] + + def search_task(query): + try: + query_embedding = embedder.embed_single(query) + results = vector_store.search_similar(query_embedding, top_k=3) + results_list.append(len(results)) + except Exception as e: + errors.append(str(e)) + + queries = ["authentication", "database", "function", "class", "async"] + threads = [threading.Thread(target=search_task, args=(q,)) for q in queries] + + for t in threads: + t.start() + for t in threads: + t.join() + + assert len(errors) == 0, f"Errors during concurrent search: {errors}" + assert len(results_list) == len(queries) + + def test_concurrent_add_and_search(self, vector_store, embedder): + """Test concurrent add and search operations.""" + errors = [] + + def add_task(idx): + try: + chunk = SemanticChunk( + content=f"def task_{idx}(): pass", + metadata={"idx": idx}, + ) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, f"/test/task_{idx}.py") + except Exception as e: + errors.append(f"Add error: {e}") + + def search_task(): + try: + query_embedding = embedder.embed_single("function task") + vector_store.search_similar(query_embedding) + except Exception as e: + errors.append(f"Search error: {e}") + + threads = [] + for i in range(10): + threads.append(threading.Thread(target=add_task, args=(i,))) + threads.append(threading.Thread(target=search_task)) + + for t in threads: + t.start() + for t in threads: + t.join() + + assert len(errors) == 0, f"Errors during concurrent ops: {errors}" + + +# === Edge Cases === + +class TestEdgeCases: + """Tests for edge cases.""" + + def test_very_short_content(self, vector_store, embedder): + """Test handling very short content.""" + chunk = SemanticChunk(content="x", metadata={}) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/short.py") + + query_embedding = embedder.embed_single("x") + results = vector_store.search_similar(query_embedding) + + assert len(results) == 1 + + def test_special_characters_in_path(self, vector_store, embedder): + """Test handling special characters in file path.""" + chunk = SemanticChunk(content="def test(): pass", metadata={}) + chunk.embedding = embedder.embed_single(chunk.content) + + special_path = "/test/path with spaces/file-name_v2.py" + vector_store.add_chunk(chunk, special_path) + + query_embedding = embedder.embed_single("test function") + results = vector_store.search_similar(query_embedding) + + assert len(results) == 1 + assert results[0].path == special_path + + def test_json_metadata_special_chars(self, vector_store, embedder): + """Test metadata with special JSON characters.""" + metadata = { + "description": 'Test "quoted" text with \'single\' quotes', + "path": "C:\\Users\\test\\file.py", + "tags": ["tag1", "tag2"], + } + chunk = SemanticChunk(content="def test(): pass", metadata=metadata) + chunk.embedding = embedder.embed_single(chunk.content) + + vector_store.add_chunk(chunk, "/test/special.py") + + query_embedding = embedder.embed_single("test") + results = vector_store.search_similar(query_embedding) + + assert len(results) == 1 + assert results[0].metadata["description"] == metadata["description"] + + def test_search_zero_top_k(self, vector_store, embedder): + """Test search with top_k=0.""" + chunk = SemanticChunk(content="def test(): pass", metadata={}) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/file.py") + + query_embedding = embedder.embed_single("test") + results = vector_store.search_similar(query_embedding, top_k=0) + + assert results == [] + + def test_search_very_high_min_score(self, vector_store, embedder): + """Test search with very high min_score filters all results.""" + chunk = SemanticChunk(content="def hello(): print('world')", metadata={}) + chunk.embedding = embedder.embed_single(chunk.content) + vector_store.add_chunk(chunk, "/test/hello.py") + + # Query something unrelated with very high threshold + query_embedding = embedder.embed_single("database connection") + results = vector_store.search_similar(query_embedding, min_score=0.99) + + # Should filter out since unrelated + assert len(results) == 0 + + +# === Availability Check Tests === + +class TestAvailabilityCheck: + """Tests for semantic availability checking.""" + + def test_check_semantic_available(self): + """Test check_semantic_available function.""" + available, error = check_semantic_available() + assert available is True + assert error is None + + def test_semantic_available_flag(self): + """Test SEMANTIC_AVAILABLE flag is True when deps installed.""" + assert SEMANTIC_AVAILABLE is True