mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-09 02:24:11 +08:00
Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f64f619713 | ||
|
|
a742fa0f8a | ||
|
|
6894c7e80b | ||
|
|
203100431b | ||
|
|
e8b9bcae92 | ||
|
|
052351ab5b | ||
|
|
9dd84e3416 | ||
|
|
211c25d969 | ||
|
|
275684d319 | ||
|
|
0f8a47e8f6 | ||
|
|
303c840464 | ||
|
|
b15008fbce | ||
|
|
a8cf3e1ad6 | ||
|
|
0515ef6e8b | ||
|
|
777d5df573 | ||
|
|
c5f379ba01 | ||
|
|
145d38c9bd | ||
|
|
eab957ce00 | ||
|
|
b5fb077ad6 | ||
|
|
ebcbb11cb2 | ||
|
|
a1413dd1b3 | ||
|
|
4e6ee2db25 | ||
|
|
8e744597d1 | ||
|
|
dfa8b541b4 | ||
|
|
1dc55f8811 | ||
|
|
501d9a05d4 | ||
|
|
229d51cd18 | ||
|
|
40e61b30d6 | ||
|
|
3c3ce55842 | ||
|
|
e3e61bcae9 | ||
|
|
dfca4d60ee | ||
|
|
e671b45948 | ||
|
|
b00113d212 | ||
|
|
9b926d1a1e | ||
|
|
98c9f1a830 | ||
|
|
46ac591fe8 | ||
|
|
bf66b095c7 | ||
|
|
5228581324 | ||
|
|
c9c704e671 | ||
|
|
16d4c7c646 | ||
|
|
39056292b7 | ||
|
|
87ffd283ce | ||
|
|
8eb42816f1 | ||
|
|
ebdf64c0b9 | ||
|
|
caab5f476e | ||
|
|
1998f3ae8a | ||
|
|
5ff2a43b70 | ||
|
|
3cd842ca1a | ||
|
|
86cefa7bda | ||
|
|
fdac697f6e | ||
|
|
8203d690cb | ||
|
|
cf58dc0dd3 | ||
|
|
6a69af3bf1 | ||
|
|
acdfbb4644 | ||
|
|
72f24bf535 | ||
|
|
ba23244876 | ||
|
|
624f9f18b4 | ||
|
|
17002345c9 | ||
|
|
f3f2051c45 | ||
|
|
e60d793c8c |
@@ -2,5 +2,24 @@
|
|||||||
|
|
||||||
- **CLI Tools Usage**: @~/.claude/workflows/cli-tools-usage.md
|
- **CLI Tools Usage**: @~/.claude/workflows/cli-tools-usage.md
|
||||||
- **Coding Philosophy**: @~/.claude/workflows/coding-philosophy.md
|
- **Coding Philosophy**: @~/.claude/workflows/coding-philosophy.md
|
||||||
- **Context Requirements**: @~/.claude/workflows/context-tools.md
|
- **Context Requirements**: @~/.claude/workflows/context-tools-ace.md
|
||||||
- **File Modification**: @~/.claude/workflows/file-modification.md
|
- **File Modification**: @~/.claude/workflows/file-modification.md
|
||||||
|
- **CLI Endpoints Config**: @.claude/cli-tools.json
|
||||||
|
|
||||||
|
## CLI Endpoints
|
||||||
|
|
||||||
|
**Strictly follow the @.claude/cli-tools.json configuration**
|
||||||
|
|
||||||
|
Available CLI endpoints are dynamically defined by the config file:
|
||||||
|
- Built-in tools and their enable/disable status
|
||||||
|
- Custom API endpoints registered via the Dashboard
|
||||||
|
- Managed through the CCW Dashboard Status page
|
||||||
|
|
||||||
|
## Agent Execution
|
||||||
|
|
||||||
|
- **Always use `run_in_background: false`** for Task tool agent calls: `Task({ subagent_type: "xxx", prompt: "...", run_in_background: false })` to ensure synchronous execution and immediate result visibility
|
||||||
|
- **TaskOutput usage**: Only use `TaskOutput({ task_id: "xxx", block: false })` + sleep loop to poll completion status. NEVER read intermediate output during agent/CLI execution - wait for final result only
|
||||||
|
|
||||||
|
## Code Diagnostics
|
||||||
|
|
||||||
|
- **Prefer `mcp__ide__getDiagnostics`** for code error checking over shell-based TypeScript compilation
|
||||||
|
|||||||
47
.claude/cli-tools.json
Normal file
47
.claude/cli-tools.json
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
{
|
||||||
|
"version": "1.0.0",
|
||||||
|
"tools": {
|
||||||
|
"gemini": {
|
||||||
|
"enabled": true,
|
||||||
|
"isBuiltin": true,
|
||||||
|
"command": "gemini",
|
||||||
|
"description": "Google AI for code analysis"
|
||||||
|
},
|
||||||
|
"qwen": {
|
||||||
|
"enabled": true,
|
||||||
|
"isBuiltin": true,
|
||||||
|
"command": "qwen",
|
||||||
|
"description": "Alibaba AI assistant"
|
||||||
|
},
|
||||||
|
"codex": {
|
||||||
|
"enabled": true,
|
||||||
|
"isBuiltin": true,
|
||||||
|
"command": "codex",
|
||||||
|
"description": "OpenAI code generation"
|
||||||
|
},
|
||||||
|
"claude": {
|
||||||
|
"enabled": true,
|
||||||
|
"isBuiltin": true,
|
||||||
|
"command": "claude",
|
||||||
|
"description": "Anthropic AI assistant"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"customEndpoints": [],
|
||||||
|
"defaultTool": "gemini",
|
||||||
|
"settings": {
|
||||||
|
"promptFormat": "plain",
|
||||||
|
"smartContext": {
|
||||||
|
"enabled": false,
|
||||||
|
"maxFiles": 10
|
||||||
|
},
|
||||||
|
"nativeResume": true,
|
||||||
|
"recursiveQuery": true,
|
||||||
|
"cache": {
|
||||||
|
"injectionMode": "auto",
|
||||||
|
"defaultPrefix": "",
|
||||||
|
"defaultSuffix": ""
|
||||||
|
},
|
||||||
|
"codeIndexMcp": "ace"
|
||||||
|
},
|
||||||
|
"$schema": "./cli-tools.schema.json"
|
||||||
|
}
|
||||||
@@ -140,11 +140,17 @@ function selectAngles(taskDescription, count) {
|
|||||||
|
|
||||||
const selectedAngles = selectAngles(task_description, complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1))
|
const selectedAngles = selectAngles(task_description, complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1))
|
||||||
|
|
||||||
|
// Planning strategy determination
|
||||||
|
const planningStrategy = complexity === 'Low'
|
||||||
|
? 'Direct Claude Planning'
|
||||||
|
: 'cli-lite-planning-agent'
|
||||||
|
|
||||||
console.log(`
|
console.log(`
|
||||||
## Exploration Plan
|
## Exploration Plan
|
||||||
|
|
||||||
Task Complexity: ${complexity}
|
Task Complexity: ${complexity}
|
||||||
Selected Angles: ${selectedAngles.join(', ')}
|
Selected Angles: ${selectedAngles.join(', ')}
|
||||||
|
Planning Strategy: ${planningStrategy}
|
||||||
|
|
||||||
Launching ${selectedAngles.length} parallel explorations...
|
Launching ${selectedAngles.length} parallel explorations...
|
||||||
`)
|
`)
|
||||||
@@ -358,10 +364,7 @@ if (dedupedClarifications.length > 0) {
|
|||||||
```javascript
|
```javascript
|
||||||
// 分配规则(优先级从高到低):
|
// 分配规则(优先级从高到低):
|
||||||
// 1. 用户明确指定:"用 gemini 分析..." → gemini, "codex 实现..." → codex
|
// 1. 用户明确指定:"用 gemini 分析..." → gemini, "codex 实现..." → codex
|
||||||
// 2. 任务类型推断:
|
// 2. 默认 → agent
|
||||||
// - 分析|审查|评估|探索 → gemini
|
|
||||||
// - 实现|创建|修改|修复 → codex (复杂) 或 agent (简单)
|
|
||||||
// 3. 默认 → agent
|
|
||||||
|
|
||||||
const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } }
|
const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } }
|
||||||
plan.tasks.forEach(task => {
|
plan.tasks.forEach(task => {
|
||||||
|
|||||||
@@ -1,10 +1,17 @@
|
|||||||
# Analysis Mode Protocol
|
# Analysis Mode Protocol
|
||||||
|
|
||||||
## Mode Definition
|
## Mode Definition
|
||||||
|
|
||||||
**Mode**: `analysis` (READ-ONLY)
|
**Mode**: `analysis` (READ-ONLY)
|
||||||
**Tools**: Gemini, Qwen (default mode)
|
## Prompt Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
PURPOSE: [development goal]
|
||||||
|
TASK: [specific implementation task]
|
||||||
|
MODE: [auto|write]
|
||||||
|
CONTEXT: [file patterns]
|
||||||
|
EXPECTED: [deliverables]
|
||||||
|
RULES: [templates | additional constraints]
|
||||||
|
```
|
||||||
## Operation Boundaries
|
## Operation Boundaries
|
||||||
|
|
||||||
### ALLOWED Operations
|
### ALLOWED Operations
|
||||||
@@ -27,8 +34,8 @@
|
|||||||
2. **Read** and analyze CONTEXT files thoroughly
|
2. **Read** and analyze CONTEXT files thoroughly
|
||||||
3. **Identify** patterns, issues, and dependencies
|
3. **Identify** patterns, issues, and dependencies
|
||||||
4. **Generate** insights and recommendations
|
4. **Generate** insights and recommendations
|
||||||
5. **Output** structured analysis (text response only)
|
5. **Validate** EXPECTED deliverables met
|
||||||
6. **Validate** EXPECTED deliverables met
|
6. **Output** structured analysis (text response only)
|
||||||
|
|
||||||
## Core Requirements
|
## Core Requirements
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
# Write Mode Protocol
|
# Write Mode Protocol
|
||||||
|
## Prompt Structure
|
||||||
|
|
||||||
## Mode Definition
|
```
|
||||||
|
PURPOSE: [development goal]
|
||||||
**Mode**: `write` (FILE OPERATIONS) / `auto` (FULL OPERATIONS)
|
TASK: [specific implementation task]
|
||||||
**Tools**: Codex (auto), Gemini/Qwen (write)
|
MODE: [auto|write]
|
||||||
|
CONTEXT: [file patterns]
|
||||||
|
EXPECTED: [deliverables]
|
||||||
|
RULES: [templates | additional constraints]
|
||||||
|
```
|
||||||
## Operation Boundaries
|
## Operation Boundaries
|
||||||
|
|
||||||
### MODE: write
|
### MODE: write
|
||||||
@@ -15,12 +19,6 @@
|
|||||||
|
|
||||||
**Restrictions**: Follow project conventions, cannot break existing functionality
|
**Restrictions**: Follow project conventions, cannot break existing functionality
|
||||||
|
|
||||||
### MODE: auto (Codex only)
|
|
||||||
- All `write` mode operations
|
|
||||||
- Run tests and builds
|
|
||||||
- Commit code incrementally
|
|
||||||
- Full autonomous development
|
|
||||||
|
|
||||||
**Constraint**: Must test every change
|
**Constraint**: Must test every change
|
||||||
|
|
||||||
## Execution Flow
|
## Execution Flow
|
||||||
@@ -33,16 +31,6 @@
|
|||||||
5. **Validate** changes
|
5. **Validate** changes
|
||||||
6. **Report** file changes
|
6. **Report** file changes
|
||||||
|
|
||||||
### MODE: auto
|
|
||||||
1. **Parse** all 6 fields
|
|
||||||
2. **Analyze** CONTEXT files - find 3+ similar patterns
|
|
||||||
3. **Plan** implementation following RULES
|
|
||||||
4. **Generate** code with tests
|
|
||||||
5. **Run** tests continuously
|
|
||||||
6. **Commit** working code incrementally
|
|
||||||
7. **Validate** EXPECTED deliverables
|
|
||||||
8. **Report** results
|
|
||||||
|
|
||||||
## Core Requirements
|
## Core Requirements
|
||||||
|
|
||||||
**ALWAYS**:
|
**ALWAYS**:
|
||||||
@@ -61,17 +49,6 @@
|
|||||||
- Break backward compatibility
|
- Break backward compatibility
|
||||||
- Exceed 3 failed attempts without stopping
|
- Exceed 3 failed attempts without stopping
|
||||||
|
|
||||||
## Multi-Task Execution (Resume)
|
|
||||||
|
|
||||||
**First subtask**: Standard execution flow
|
|
||||||
**Subsequent subtasks** (via `resume`):
|
|
||||||
- Recall context from previous subtasks
|
|
||||||
- Build on previous work
|
|
||||||
- Maintain consistency
|
|
||||||
- Test integration
|
|
||||||
- Report context for next subtask
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
**Three-Attempt Rule**: On 3rd failure, stop and report what attempted, what failed, root cause
|
**Three-Attempt Rule**: On 3rd failure, stop and report what attempted, what failed, root cause
|
||||||
|
|
||||||
@@ -92,7 +69,7 @@
|
|||||||
|
|
||||||
**If template has no format** → Use default format below
|
**If template has no format** → Use default format below
|
||||||
|
|
||||||
### Single Task Implementation
|
### Task Implementation
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
# Implementation: [TASK Title]
|
# Implementation: [TASK Title]
|
||||||
@@ -124,48 +101,6 @@
|
|||||||
[Recommendations if any]
|
[Recommendations if any]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Multi-Task (First Subtask)
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
# Subtask 1/N: [TASK Title]
|
|
||||||
|
|
||||||
## Changes
|
|
||||||
[List of file changes]
|
|
||||||
|
|
||||||
## Implementation
|
|
||||||
[Details with code references]
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
✅ Tests: X passing
|
|
||||||
|
|
||||||
## Context for Next Subtask
|
|
||||||
- Key decisions: [established patterns]
|
|
||||||
- Files created: [paths and purposes]
|
|
||||||
- Integration points: [where next subtask should connect]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Multi-Task (Subsequent Subtasks)
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
# Subtask N/M: [TASK Title]
|
|
||||||
|
|
||||||
## Changes
|
|
||||||
[List of file changes]
|
|
||||||
|
|
||||||
## Integration Notes
|
|
||||||
✅ Compatible with previous subtask
|
|
||||||
✅ Maintains established patterns
|
|
||||||
|
|
||||||
## Implementation
|
|
||||||
[Details with code references]
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
✅ Tests: X passing
|
|
||||||
|
|
||||||
## Context for Next Subtask
|
|
||||||
[If not final, provide context]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Partial Completion
|
### Partial Completion
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
|
|||||||
@@ -362,10 +362,6 @@ ccw cli -p "RULES: \$(cat ~/.claude/workflows/cli-templates/protocols/analysis-p
|
|||||||
- Description: Additional directories (comma-separated)
|
- Description: Additional directories (comma-separated)
|
||||||
- Default: none
|
- Default: none
|
||||||
|
|
||||||
- **`--timeout <ms>`**
|
|
||||||
- Description: Timeout in milliseconds
|
|
||||||
- Default: 300000
|
|
||||||
|
|
||||||
- **`--resume [id]`**
|
- **`--resume [id]`**
|
||||||
- Description: Resume previous session
|
- Description: Resume previous session
|
||||||
- Default: -
|
- Default: -
|
||||||
@@ -430,7 +426,7 @@ MODE: analysis
|
|||||||
CONTEXT: @src/auth/**/* @src/middleware/auth.ts | Memory: Using bcrypt for passwords, JWT for sessions
|
CONTEXT: @src/auth/**/* @src/middleware/auth.ts | Memory: Using bcrypt for passwords, JWT for sessions
|
||||||
EXPECTED: Security report with: severity matrix, file:line references, CVE mappings where applicable, remediation code snippets prioritized by risk
|
EXPECTED: Security report with: severity matrix, file:line references, CVE mappings where applicable, remediation code snippets prioritized by risk
|
||||||
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/analysis/03-assess-security-risks.txt) | Focus on authentication | Ignore test files
|
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/analysis/03-assess-security-risks.txt) | Focus on authentication | Ignore test files
|
||||||
" --tool gemini --cd src/auth --timeout 600000
|
" --tool gemini --mode analysis --cd src/auth
|
||||||
```
|
```
|
||||||
|
|
||||||
**Implementation Task** (New Feature):
|
**Implementation Task** (New Feature):
|
||||||
@@ -442,7 +438,7 @@ MODE: write
|
|||||||
CONTEXT: @src/middleware/**/* @src/config/**/* | Memory: Using Express.js, Redis already configured, existing middleware pattern in auth.ts
|
CONTEXT: @src/middleware/**/* @src/config/**/* | Memory: Using Express.js, Redis already configured, existing middleware pattern in auth.ts
|
||||||
EXPECTED: Production-ready code with: TypeScript types, unit tests, integration test, configuration example, migration guide
|
EXPECTED: Production-ready code with: TypeScript types, unit tests, integration test, configuration example, migration guide
|
||||||
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/write-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/development/02-implement-feature.txt) | Follow existing middleware patterns | No breaking changes
|
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/write-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/development/02-implement-feature.txt) | Follow existing middleware patterns | No breaking changes
|
||||||
" --tool codex --mode write --timeout 1800000
|
" --tool codex --mode write
|
||||||
```
|
```
|
||||||
|
|
||||||
**Bug Fix Task**:
|
**Bug Fix Task**:
|
||||||
@@ -454,7 +450,7 @@ MODE: analysis
|
|||||||
CONTEXT: @src/websocket/**/* @src/services/connection-manager.ts | Memory: Using ws library, ~5000 concurrent connections in production
|
CONTEXT: @src/websocket/**/* @src/services/connection-manager.ts | Memory: Using ws library, ~5000 concurrent connections in production
|
||||||
EXPECTED: Root cause analysis with: memory profile, leak source (file:line), fix recommendation with code, verification steps
|
EXPECTED: Root cause analysis with: memory profile, leak source (file:line), fix recommendation with code, verification steps
|
||||||
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt) | Focus on resource cleanup
|
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt) | Focus on resource cleanup
|
||||||
" --tool gemini --cd src --timeout 900000
|
" --tool gemini --mode analysis --cd src
|
||||||
```
|
```
|
||||||
|
|
||||||
**Refactoring Task**:
|
**Refactoring Task**:
|
||||||
@@ -466,30 +462,25 @@ MODE: write
|
|||||||
CONTEXT: @src/payments/**/* @src/types/payment.ts | Memory: Currently only Stripe, adding PayPal next sprint, must support future gateways
|
CONTEXT: @src/payments/**/* @src/types/payment.ts | Memory: Currently only Stripe, adding PayPal next sprint, must support future gateways
|
||||||
EXPECTED: Refactored code with: strategy interface, concrete implementations, factory class, updated tests, migration checklist
|
EXPECTED: Refactored code with: strategy interface, concrete implementations, factory class, updated tests, migration checklist
|
||||||
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/write-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/development/02-refactor-codebase.txt) | Preserve all existing behavior | Tests must pass
|
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/write-protocol.md) $(cat ~/.claude/workflows/cli-templates/prompts/development/02-refactor-codebase.txt) | Preserve all existing behavior | Tests must pass
|
||||||
" --tool gemini --mode write --timeout 1200000
|
" --tool gemini --mode write
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
|
|
||||||
## Configuration
|
## ⚙️ Execution Configuration
|
||||||
|
|
||||||
### Timeout Allocation
|
### Dynamic Timeout Allocation
|
||||||
|
|
||||||
**Minimum**: 5 minutes (300000ms)
|
**Minimum timeout: 5 minutes (300000ms)** - Never set below this threshold.
|
||||||
|
|
||||||
- **Simple**: 5-10min (300000-600000ms)
|
**Timeout Ranges**:
|
||||||
- Examples: Analysis, search
|
- **Simple** (analysis, search): 5-10min (300000-600000ms)
|
||||||
|
- **Medium** (refactoring, documentation): 10-20min (600000-1200000ms)
|
||||||
|
- **Complex** (implementation, migration): 20-60min (1200000-3600000ms)
|
||||||
|
- **Heavy** (large codebase, multi-file): 60-120min (3600000-7200000ms)
|
||||||
|
|
||||||
- **Medium**: 10-20min (600000-1200000ms)
|
**Codex Multiplier**: 3x of allocated time (minimum 15min / 900000ms)
|
||||||
- Examples: Refactoring, documentation
|
|
||||||
|
|
||||||
- **Complex**: 20-60min (1200000-3600000ms)
|
|
||||||
- Examples: Implementation, migration
|
|
||||||
|
|
||||||
- **Heavy**: 60-120min (3600000-7200000ms)
|
|
||||||
- Examples: Large codebase, multi-file
|
|
||||||
|
|
||||||
**Codex Multiplier**: 3x allocated time (minimum 15min / 900000ms)
|
|
||||||
|
|
||||||
|
**Auto-detection**: Analyze PURPOSE and TASK fields to determine timeout
|
||||||
|
|
||||||
### Permission Framework
|
### Permission Framework
|
||||||
|
|
||||||
@@ -523,4 +514,3 @@ RULES: $(cat ~/.claude/workflows/cli-templates/protocols/write-protocol.md) $(ca
|
|||||||
- [ ] **Tool selected** - `--tool gemini|qwen|codex`
|
- [ ] **Tool selected** - `--tool gemini|qwen|codex`
|
||||||
- [ ] **Template applied (REQUIRED)** - Use specific or universal fallback template
|
- [ ] **Template applied (REQUIRED)** - Use specific or universal fallback template
|
||||||
- [ ] **Constraints specified** - Scope, requirements
|
- [ ] **Constraints specified** - Scope, requirements
|
||||||
- [ ] **Timeout configured** - Based on complexity
|
|
||||||
|
|||||||
105
.claude/workflows/context-tools-ace.md
Normal file
105
.claude/workflows/context-tools-ace.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
## MCP Tools Usage
|
||||||
|
|
||||||
|
### search_context (ACE) - Code Search (REQUIRED - HIGHEST PRIORITY)
|
||||||
|
|
||||||
|
**OVERRIDES**: All other search/discovery rules in other workflow files
|
||||||
|
|
||||||
|
**When**: ANY code discovery task, including:
|
||||||
|
- Find code, understand codebase structure, locate implementations
|
||||||
|
- Explore unknown locations
|
||||||
|
- Verify file existence before reading
|
||||||
|
- Pattern-based file discovery
|
||||||
|
- Semantic code understanding
|
||||||
|
|
||||||
|
**Priority Rule**:
|
||||||
|
1. **Always use mcp__ace-tool__search_context FIRST** for any code/file discovery
|
||||||
|
2. Only use Built-in Grep for single-file exact line search (after location confirmed)
|
||||||
|
3. Only use Built-in Read for known, confirmed file paths
|
||||||
|
|
||||||
|
**How**:
|
||||||
|
```javascript
|
||||||
|
// Natural language code search - best for understanding and exploration
|
||||||
|
mcp__ace-tool__search_context({
|
||||||
|
project_root_path: "/path/to/project",
|
||||||
|
query: "authentication logic"
|
||||||
|
})
|
||||||
|
|
||||||
|
// With keywords for better semantic matching
|
||||||
|
mcp__ace-tool__search_context({
|
||||||
|
project_root_path: "/path/to/project",
|
||||||
|
query: "I want to find where the server handles user login. Keywords: auth, login, session"
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Good Query Examples**:
|
||||||
|
- "Where is the function that handles user authentication?"
|
||||||
|
- "What tests are there for the login functionality?"
|
||||||
|
- "How is the database connected to the application?"
|
||||||
|
- "I want to find where the server handles chunk merging. Keywords: upload chunk merge"
|
||||||
|
- "Locate where the system refreshes cached data. Keywords: cache refresh, invalidation"
|
||||||
|
|
||||||
|
**Bad Query Examples** (use grep or file view instead):
|
||||||
|
- "Find definition of constructor of class Foo" (use grep tool instead)
|
||||||
|
- "Find all references to function bar" (use grep tool instead)
|
||||||
|
- "Show me how Checkout class is used in services/payment.py" (use file view tool instead)
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- Real-time index of the codebase (always up-to-date)
|
||||||
|
- Cross-language retrieval support
|
||||||
|
- Semantic search with embeddings
|
||||||
|
- No manual index initialization required
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### read_file - Read File Contents
|
||||||
|
|
||||||
|
**When**: Read files found by search_context
|
||||||
|
|
||||||
|
**How**:
|
||||||
|
```javascript
|
||||||
|
read_file(path="/path/to/file.ts") // Single file
|
||||||
|
read_file(path="/src/**/*.config.ts") // Pattern matching
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### edit_file - Modify Files
|
||||||
|
|
||||||
|
**When**: Built-in Edit tool fails or need advanced features
|
||||||
|
|
||||||
|
**How**:
|
||||||
|
```javascript
|
||||||
|
edit_file(path="/file.ts", old_string="...", new_string="...", mode="update")
|
||||||
|
edit_file(path="/file.ts", line=10, content="...", mode="insert_after")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Modes**: `update` (replace text), `insert_after`, `insert_before`, `delete_line`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### write_file - Create/Overwrite Files
|
||||||
|
|
||||||
|
**When**: Create new files or completely replace content
|
||||||
|
|
||||||
|
**How**:
|
||||||
|
```javascript
|
||||||
|
write_file(path="/new-file.ts", content="...")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Exa - External Search
|
||||||
|
|
||||||
|
**When**: Find documentation/examples outside codebase
|
||||||
|
|
||||||
|
**How**:
|
||||||
|
```javascript
|
||||||
|
mcp__exa__search(query="React hooks 2025 documentation")
|
||||||
|
mcp__exa__search(query="FastAPI auth example", numResults=10)
|
||||||
|
mcp__exa__search(query="latest API docs", livecrawl="always")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**:
|
||||||
|
- `query` (required): Search query string
|
||||||
|
- `numResults` (optional): Number of results to return (default: 5)
|
||||||
|
- `livecrawl` (optional): `"always"` or `"fallback"` for live crawling
|
||||||
@@ -21,8 +21,11 @@
|
|||||||
- Graceful degradation
|
- Graceful degradation
|
||||||
- Don't expose sensitive info
|
- Don't expose sensitive info
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Core Principles
|
## Core Principles
|
||||||
|
|
||||||
|
|
||||||
**Incremental Progress**:
|
**Incremental Progress**:
|
||||||
- Small, testable changes
|
- Small, testable changes
|
||||||
- Commit working code frequently
|
- Commit working code frequently
|
||||||
@@ -43,11 +46,58 @@
|
|||||||
- Maintain established patterns
|
- Maintain established patterns
|
||||||
- Test integration between subtasks
|
- Test integration between subtasks
|
||||||
|
|
||||||
|
|
||||||
|
## System Optimization
|
||||||
|
|
||||||
|
**Direct Binary Calls**: Always call binaries directly in `functions.shell`, set `workdir`, avoid shell wrappers (`bash -lc`, `cmd /c`, etc.)
|
||||||
|
|
||||||
|
**Text Editing Priority**:
|
||||||
|
1. Use `apply_patch` tool for all routine text edits
|
||||||
|
2. Fall back to `sed` for single-line substitutions if unavailable
|
||||||
|
3. Avoid Python editing scripts unless both fail
|
||||||
|
|
||||||
|
**apply_patch invocation**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"command": ["apply_patch", "*** Begin Patch\n*** Update File: path/to/file\n@@\n- old\n+ new\n*** End Patch\n"],
|
||||||
|
"workdir": "<workdir>",
|
||||||
|
"justification": "Brief reason"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Windows UTF-8 Encoding** (before commands):
|
||||||
|
```powershell
|
||||||
|
[Console]::InputEncoding = [Text.UTF8Encoding]::new($false)
|
||||||
|
[Console]::OutputEncoding = [Text.UTF8Encoding]::new($false)
|
||||||
|
chcp 65001 > $null
|
||||||
|
```
|
||||||
|
|
||||||
|
## Context Acquisition (MCP Tools Priority)
|
||||||
|
|
||||||
|
**For task context gathering and analysis, ALWAYS prefer MCP tools**:
|
||||||
|
|
||||||
|
1. **smart_search** - First choice for code discovery
|
||||||
|
- Use `smart_search(query="...")` for semantic/keyword search
|
||||||
|
- Use `smart_search(action="find_files", pattern="*.ts")` for file discovery
|
||||||
|
- Supports modes: `auto`, `hybrid`, `exact`, `ripgrep`
|
||||||
|
|
||||||
|
2. **read_file** - Batch file reading
|
||||||
|
- Read multiple files in parallel: `read_file(path="file1.ts")`, `read_file(path="file2.ts")`
|
||||||
|
- Supports glob patterns: `read_file(path="src/**/*.config.ts")`
|
||||||
|
|
||||||
|
**Priority Order**:
|
||||||
|
```
|
||||||
|
smart_search (discovery) → read_file (batch read) → shell commands (fallback)
|
||||||
|
```
|
||||||
|
|
||||||
|
**NEVER** use shell commands (`cat`, `find`, `grep`) when MCP tools are available.
|
||||||
|
|
||||||
## Execution Checklist
|
## Execution Checklist
|
||||||
|
|
||||||
**Before**:
|
**Before**:
|
||||||
- [ ] Understand PURPOSE and TASK clearly
|
- [ ] Understand PURPOSE and TASK clearly
|
||||||
- [ ] Review CONTEXT files, find 3+ patterns
|
- [ ] Use smart_search to discover relevant files
|
||||||
|
- [ ] Use read_file to batch read context files, find 3+ patterns
|
||||||
- [ ] Check RULES templates and constraints
|
- [ ] Check RULES templates and constraints
|
||||||
|
|
||||||
**During**:
|
**During**:
|
||||||
|
|||||||
Binary file not shown.
378
.codex/prompts/compact.md
Normal file
378
.codex/prompts/compact.md
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
---
|
||||||
|
description: Compact current session memory into structured text for session recovery
|
||||||
|
argument-hint: "[optional: session description]"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Memory Compact Command (/memory:compact)
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
The `memory:compact` command **compresses current session working memory** into structured text optimized for **session recovery**, extracts critical information, and saves it to persistent storage via MCP `core_memory` tool.
|
||||||
|
|
||||||
|
**Core Philosophy**:
|
||||||
|
- **Session Recovery First**: Capture everything needed to resume work seamlessly
|
||||||
|
- **Minimize Re-exploration**: Include file paths, decisions, and state to avoid redundant analysis
|
||||||
|
- **Preserve Train of Thought**: Keep notes and hypotheses for complex debugging
|
||||||
|
- **Actionable State**: Record last action result and known issues
|
||||||
|
|
||||||
|
## 2. Parameters
|
||||||
|
|
||||||
|
- `"session description"` (Optional): Session description to supplement objective
|
||||||
|
- Example: "completed core-memory module"
|
||||||
|
- Example: "debugging JWT refresh - suspected memory leak"
|
||||||
|
|
||||||
|
## 3. Structured Output Format
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Session ID
|
||||||
|
[WFS-ID if workflow session active, otherwise (none)]
|
||||||
|
|
||||||
|
## Project Root
|
||||||
|
[Absolute path to project root, e.g., D:\Claude_dms3]
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
[High-level goal - the "North Star" of this session]
|
||||||
|
|
||||||
|
## Execution Plan
|
||||||
|
[CRITICAL: Embed the LATEST plan in its COMPLETE and DETAILED form]
|
||||||
|
|
||||||
|
### Source: [workflow | todo | user-stated | inferred]
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Full Execution Plan (Click to expand)</summary>
|
||||||
|
|
||||||
|
[PRESERVE COMPLETE PLAN VERBATIM - DO NOT SUMMARIZE]
|
||||||
|
- ALL phases, tasks, subtasks
|
||||||
|
- ALL file paths (absolute)
|
||||||
|
- ALL dependencies and prerequisites
|
||||||
|
- ALL acceptance criteria
|
||||||
|
- ALL status markers ([x] done, [ ] pending)
|
||||||
|
- ALL notes and context
|
||||||
|
|
||||||
|
Example:
|
||||||
|
## Phase 1: Setup
|
||||||
|
- [x] Initialize project structure
|
||||||
|
- Created D:\Claude_dms3\src\core\index.ts
|
||||||
|
- Added dependencies: lodash, zod
|
||||||
|
- [ ] Configure TypeScript
|
||||||
|
- Update tsconfig.json for strict mode
|
||||||
|
|
||||||
|
## Phase 2: Implementation
|
||||||
|
- [ ] Implement core API
|
||||||
|
- Target: D:\Claude_dms3\src\api\handler.ts
|
||||||
|
- Dependencies: Phase 1 complete
|
||||||
|
- Acceptance: All tests pass
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Working Files (Modified)
|
||||||
|
[Absolute paths to actively modified files]
|
||||||
|
- D:\Claude_dms3\src\file1.ts (role: main implementation)
|
||||||
|
- D:\Claude_dms3\tests\file1.test.ts (role: unit tests)
|
||||||
|
|
||||||
|
## Reference Files (Read-Only)
|
||||||
|
[Absolute paths to context files - NOT modified but essential for understanding]
|
||||||
|
- D:\Claude_dms3\.claude\CLAUDE.md (role: project instructions)
|
||||||
|
- D:\Claude_dms3\src\types\index.ts (role: type definitions)
|
||||||
|
- D:\Claude_dms3\package.json (role: dependencies)
|
||||||
|
|
||||||
|
## Last Action
|
||||||
|
[Last significant action and its result/status]
|
||||||
|
|
||||||
|
## Decisions
|
||||||
|
- [Decision]: [Reasoning]
|
||||||
|
- [Decision]: [Reasoning]
|
||||||
|
|
||||||
|
## Constraints
|
||||||
|
- [User-specified limitation or preference]
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
- [Added/changed packages or environment requirements]
|
||||||
|
|
||||||
|
## Known Issues
|
||||||
|
- [Deferred bug or edge case]
|
||||||
|
|
||||||
|
## Changes Made
|
||||||
|
- [Completed modification]
|
||||||
|
|
||||||
|
## Pending
|
||||||
|
- [Next step] or (none)
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
[Unstructured thoughts, hypotheses, debugging trails]
|
||||||
|
```
|
||||||
|
|
||||||
|
## 4. Field Definitions
|
||||||
|
|
||||||
|
| Field | Purpose | Recovery Value |
|
||||||
|
|-------|---------|----------------|
|
||||||
|
| **Session ID** | Workflow session identifier (WFS-*) | Links memory to specific stateful task execution |
|
||||||
|
| **Project Root** | Absolute path to project directory | Enables correct path resolution in new sessions |
|
||||||
|
| **Objective** | Ultimate goal of the session | Prevents losing track of broader feature |
|
||||||
|
| **Execution Plan** | Complete plan from any source (verbatim) | Preserves full planning context, avoids re-planning |
|
||||||
|
| **Working Files** | Actively modified files (absolute paths) | Immediately identifies where work was happening |
|
||||||
|
| **Reference Files** | Read-only context files (absolute paths) | Eliminates re-exploration for critical context |
|
||||||
|
| **Last Action** | Final tool output/status | Immediate state awareness (success/failure) |
|
||||||
|
| **Decisions** | Architectural choices + reasoning | Prevents re-litigating settled decisions |
|
||||||
|
| **Constraints** | User-imposed limitations | Maintains personalized coding style |
|
||||||
|
| **Dependencies** | Package/environment changes | Prevents missing dependency errors |
|
||||||
|
| **Known Issues** | Deferred bugs/edge cases | Ensures issues aren't forgotten |
|
||||||
|
| **Changes Made** | Completed modifications | Clear record of what was done |
|
||||||
|
| **Pending** | Next steps | Immediate action items |
|
||||||
|
| **Notes** | Hypotheses, debugging trails | Preserves "train of thought" |
|
||||||
|
|
||||||
|
## 5. Execution Flow
|
||||||
|
|
||||||
|
### Step 1: Analyze Current Session
|
||||||
|
|
||||||
|
Extract the following from conversation history:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const sessionAnalysis = {
|
||||||
|
sessionId: "", // WFS-* if workflow session active, null otherwise
|
||||||
|
projectRoot: "", // Absolute path: D:\Claude_dms3
|
||||||
|
objective: "", // High-level goal (1-2 sentences)
|
||||||
|
executionPlan: {
|
||||||
|
source: "workflow" | "todo" | "user-stated" | "inferred",
|
||||||
|
content: "" // Full plan content - ALWAYS preserve COMPLETE and DETAILED form
|
||||||
|
},
|
||||||
|
workingFiles: [], // {absolutePath, role} - modified files
|
||||||
|
referenceFiles: [], // {absolutePath, role} - read-only context files
|
||||||
|
lastAction: "", // Last significant action + result
|
||||||
|
decisions: [], // {decision, reasoning}
|
||||||
|
constraints: [], // User-specified limitations
|
||||||
|
dependencies: [], // Added/changed packages
|
||||||
|
knownIssues: [], // Deferred bugs
|
||||||
|
changesMade: [], // Completed modifications
|
||||||
|
pending: [], // Next steps
|
||||||
|
notes: "" // Unstructured thoughts
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Generate Structured Text
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Helper: Generate execution plan section
|
||||||
|
const generateExecutionPlan = (plan) => {
|
||||||
|
const sourceLabels = {
|
||||||
|
'workflow': 'workflow (IMPL_PLAN.md)',
|
||||||
|
'todo': 'todo (TodoWrite)',
|
||||||
|
'user-stated': 'user-stated',
|
||||||
|
'inferred': 'inferred'
|
||||||
|
};
|
||||||
|
|
||||||
|
// CRITICAL: Preserve complete plan content verbatim - DO NOT summarize
|
||||||
|
return `### Source: ${sourceLabels[plan.source] || plan.source}
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Full Execution Plan (Click to expand)</summary>
|
||||||
|
|
||||||
|
${plan.content}
|
||||||
|
|
||||||
|
</details>`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const structuredText = `## Session ID
|
||||||
|
${sessionAnalysis.sessionId || '(none)'}
|
||||||
|
|
||||||
|
## Project Root
|
||||||
|
${sessionAnalysis.projectRoot}
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
${sessionAnalysis.objective}
|
||||||
|
|
||||||
|
## Execution Plan
|
||||||
|
${generateExecutionPlan(sessionAnalysis.executionPlan)}
|
||||||
|
|
||||||
|
## Working Files (Modified)
|
||||||
|
${sessionAnalysis.workingFiles.map(f => `- ${f.absolutePath} (role: ${f.role})`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Reference Files (Read-Only)
|
||||||
|
${sessionAnalysis.referenceFiles.map(f => `- ${f.absolutePath} (role: ${f.role})`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Last Action
|
||||||
|
${sessionAnalysis.lastAction}
|
||||||
|
|
||||||
|
## Decisions
|
||||||
|
${sessionAnalysis.decisions.map(d => `- ${d.decision}: ${d.reasoning}`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Constraints
|
||||||
|
${sessionAnalysis.constraints.map(c => `- ${c}`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
${sessionAnalysis.dependencies.map(d => `- ${d}`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Known Issues
|
||||||
|
${sessionAnalysis.knownIssues.map(i => `- ${i}`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Changes Made
|
||||||
|
${sessionAnalysis.changesMade.map(c => `- ${c}`).join('\n') || '(none)'}
|
||||||
|
|
||||||
|
## Pending
|
||||||
|
${sessionAnalysis.pending.length > 0
|
||||||
|
? sessionAnalysis.pending.map(p => `- ${p}`).join('\n')
|
||||||
|
: '(none)'}
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
${sessionAnalysis.notes || '(none)'}`
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Import to Core Memory via MCP
|
||||||
|
|
||||||
|
Use the MCP `core_memory` tool to save the structured text:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
mcp__ccw-tools__core_memory({
|
||||||
|
operation: "import",
|
||||||
|
text: structuredText
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
Or via CLI (pipe structured text to import):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Write structured text to temp file, then import
|
||||||
|
echo "$structuredText" | ccw core-memory import
|
||||||
|
|
||||||
|
# Or from a file
|
||||||
|
ccw core-memory import --file /path/to/session-memory.md
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response Format**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"operation": "import",
|
||||||
|
"id": "CMEM-YYYYMMDD-HHMMSS",
|
||||||
|
"message": "Created memory: CMEM-YYYYMMDD-HHMMSS"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Report Recovery ID
|
||||||
|
|
||||||
|
After successful import, **clearly display the Recovery ID** to the user:
|
||||||
|
|
||||||
|
```
|
||||||
|
╔════════════════════════════════════════════════════════════════════════════╗
|
||||||
|
║ ✓ Session Memory Saved ║
|
||||||
|
║ ║
|
||||||
|
║ Recovery ID: CMEM-YYYYMMDD-HHMMSS ║
|
||||||
|
║ ║
|
||||||
|
║ To restore: "Please import memory <ID>" ║
|
||||||
|
║ (MCP: core_memory export | CLI: ccw core-memory export --id <ID>) ║
|
||||||
|
╚════════════════════════════════════════════════════════════════════════════╝
|
||||||
|
```
|
||||||
|
|
||||||
|
## 6. Quality Checklist
|
||||||
|
|
||||||
|
Before generating:
|
||||||
|
- [ ] Session ID captured if workflow session active (WFS-*)
|
||||||
|
- [ ] Project Root is absolute path (e.g., D:\Claude_dms3)
|
||||||
|
- [ ] Objective clearly states the "North Star" goal
|
||||||
|
- [ ] Execution Plan: COMPLETE plan preserved VERBATIM (no summarization)
|
||||||
|
- [ ] Plan Source: Clearly identified (workflow | todo | user-stated | inferred)
|
||||||
|
- [ ] Plan Details: ALL phases, tasks, file paths, dependencies, status markers included
|
||||||
|
- [ ] All file paths are ABSOLUTE (not relative)
|
||||||
|
- [ ] Working Files: 3-8 modified files with roles
|
||||||
|
- [ ] Reference Files: Key context files (CLAUDE.md, types, configs)
|
||||||
|
- [ ] Last Action captures final state (success/failure)
|
||||||
|
- [ ] Decisions include reasoning, not just choices
|
||||||
|
- [ ] Known Issues separates deferred from forgotten bugs
|
||||||
|
- [ ] Notes preserve debugging hypotheses if any
|
||||||
|
|
||||||
|
## 7. Path Resolution Rules
|
||||||
|
|
||||||
|
### Project Root Detection
|
||||||
|
1. Check current working directory from environment
|
||||||
|
2. Look for project markers: `.git/`, `package.json`, `.claude/`
|
||||||
|
3. Use the topmost directory containing these markers
|
||||||
|
|
||||||
|
### Absolute Path Conversion
|
||||||
|
```javascript
|
||||||
|
// Convert relative to absolute
|
||||||
|
const toAbsolutePath = (relativePath, projectRoot) => {
|
||||||
|
if (path.isAbsolute(relativePath)) return relativePath;
|
||||||
|
return path.join(projectRoot, relativePath);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Example: "src/api/auth.ts" → "D:\Claude_dms3\src\api\auth.ts"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reference File Categories
|
||||||
|
| Category | Examples | Priority |
|
||||||
|
|----------|----------|----------|
|
||||||
|
| Project Config | `.claude/CLAUDE.md`, `package.json`, `tsconfig.json` | High |
|
||||||
|
| Type Definitions | `src/types/*.ts`, `*.d.ts` | High |
|
||||||
|
| Related Modules | Parent/sibling modules with shared interfaces | Medium |
|
||||||
|
| Test Files | Corresponding test files for modified code | Medium |
|
||||||
|
| Documentation | `README.md`, `ARCHITECTURE.md` | Low |
|
||||||
|
|
||||||
|
## 8. Plan Detection (Priority Order)
|
||||||
|
|
||||||
|
### Priority 1: Workflow Session (IMPL_PLAN.md)
|
||||||
|
```javascript
|
||||||
|
// Check for active workflow session
|
||||||
|
const manifest = await mcp__ccw-tools__session_manager({
|
||||||
|
operation: "list",
|
||||||
|
location: "active"
|
||||||
|
});
|
||||||
|
|
||||||
|
if (manifest.sessions?.length > 0) {
|
||||||
|
const session = manifest.sessions[0];
|
||||||
|
const plan = await mcp__ccw-tools__session_manager({
|
||||||
|
operation: "read",
|
||||||
|
session_id: session.id,
|
||||||
|
content_type: "plan"
|
||||||
|
});
|
||||||
|
sessionAnalysis.sessionId = session.id;
|
||||||
|
sessionAnalysis.executionPlan.source = "workflow";
|
||||||
|
sessionAnalysis.executionPlan.content = plan.content;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Priority 2: TodoWrite (Current Session Todos)
|
||||||
|
```javascript
|
||||||
|
// Extract from conversation - look for TodoWrite tool calls
|
||||||
|
// Preserve COMPLETE todo list with all details
|
||||||
|
const todos = extractTodosFromConversation();
|
||||||
|
if (todos.length > 0) {
|
||||||
|
sessionAnalysis.executionPlan.source = "todo";
|
||||||
|
// Format todos with full context - preserve status markers
|
||||||
|
sessionAnalysis.executionPlan.content = todos.map(t =>
|
||||||
|
`- [${t.status === 'completed' ? 'x' : t.status === 'in_progress' ? '>' : ' '}] ${t.content}`
|
||||||
|
).join('\n');
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Priority 3: User-Stated Plan
|
||||||
|
```javascript
|
||||||
|
// Look for explicit plan statements in user messages:
|
||||||
|
// - "Here's my plan: 1. ... 2. ... 3. ..."
|
||||||
|
// - "I want to: first..., then..., finally..."
|
||||||
|
// - Numbered or bulleted lists describing steps
|
||||||
|
const userPlan = extractUserStatedPlan();
|
||||||
|
if (userPlan) {
|
||||||
|
sessionAnalysis.executionPlan.source = "user-stated";
|
||||||
|
sessionAnalysis.executionPlan.content = userPlan;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Priority 4: Inferred Plan
|
||||||
|
```javascript
|
||||||
|
// If no explicit plan, infer from:
|
||||||
|
// - Task description and breakdown discussion
|
||||||
|
// - Sequence of actions taken
|
||||||
|
// - Outstanding work mentioned
|
||||||
|
const inferredPlan = inferPlanFromDiscussion();
|
||||||
|
if (inferredPlan) {
|
||||||
|
sessionAnalysis.executionPlan.source = "inferred";
|
||||||
|
sessionAnalysis.executionPlan.content = inferredPlan;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 9. Notes
|
||||||
|
|
||||||
|
- **Timing**: Execute at task completion or before context switch
|
||||||
|
- **Frequency**: Once per independent task or milestone
|
||||||
|
- **Recovery**: New session can immediately continue with full context
|
||||||
|
- **Knowledge Graph**: Entity relationships auto-extracted for visualization
|
||||||
|
- **Absolute Paths**: Critical for cross-session recovery on different machines
|
||||||
@@ -1,25 +1,62 @@
|
|||||||
# Gemini Code Guidelines
|
|
||||||
|
## Code Quality Standards
|
||||||
|
|
||||||
|
### Code Quality
|
||||||
|
- Follow project's existing patterns
|
||||||
|
- Match import style and naming conventions
|
||||||
|
- Single responsibility per function/class
|
||||||
|
- DRY (Don't Repeat Yourself)
|
||||||
|
- YAGNI (You Aren't Gonna Need It)
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Test all public functions
|
||||||
|
- Test edge cases and error conditions
|
||||||
|
- Mock external dependencies
|
||||||
|
- Target 80%+ coverage
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
- Proper try-catch blocks
|
||||||
|
- Clear error messages
|
||||||
|
- Graceful degradation
|
||||||
|
- Don't expose sensitive info
|
||||||
|
|
||||||
## Core Principles
|
## Core Principles
|
||||||
|
|
||||||
**Thoroughness**:
|
**Incremental Progress**:
|
||||||
- Analyze ALL CONTEXT files completely
|
- Small, testable changes
|
||||||
- Check cross-file patterns and dependencies
|
- Commit working code frequently
|
||||||
- Identify edge cases and quantify metrics
|
- Build on previous work (subtasks)
|
||||||
|
|
||||||
**Evidence-Based**:
|
**Evidence-Based**:
|
||||||
- Quote relevant code with `file:line` references
|
- Study 3+ similar patterns before implementing
|
||||||
- Link related patterns across files
|
- Match project style exactly
|
||||||
- Support all claims with concrete examples
|
- Verify with existing code
|
||||||
|
|
||||||
**Actionable**:
|
**Pragmatic**:
|
||||||
- Clear, specific recommendations (not vague)
|
- Boring solutions over clever code
|
||||||
- Prioritized by impact
|
- Simple over complex
|
||||||
- Incremental changes over big rewrites
|
- Adapt to project reality
|
||||||
|
|
||||||
**Philosophy**:
|
**Context Continuity** (Multi-Task):
|
||||||
- **Simple over complex** - Avoid over-engineering
|
- Leverage resume for consistency
|
||||||
- **Clear over clever** - Prefer obvious solutions
|
- Maintain established patterns
|
||||||
- **Learn from existing** - Reference project patterns
|
- Test integration between subtasks
|
||||||
- **Pragmatic over dogmatic** - Adapt to project reality
|
|
||||||
- **Incremental progress** - Small, testable changes
|
## Execution Checklist
|
||||||
|
|
||||||
|
**Before**:
|
||||||
|
- [ ] Understand PURPOSE and TASK clearly
|
||||||
|
- [ ] Review CONTEXT files, find 3+ patterns
|
||||||
|
- [ ] Check RULES templates and constraints
|
||||||
|
|
||||||
|
**During**:
|
||||||
|
- [ ] Follow existing patterns exactly
|
||||||
|
- [ ] Write tests alongside code
|
||||||
|
- [ ] Run tests after every change
|
||||||
|
- [ ] Commit working code incrementally
|
||||||
|
|
||||||
|
**After**:
|
||||||
|
- [ ] All tests pass
|
||||||
|
- [ ] Coverage meets target
|
||||||
|
- [ ] Build succeeds
|
||||||
|
- [ ] All EXPECTED deliverables met
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -29,3 +29,4 @@ COMMAND_TEMPLATE_ORCHESTRATOR.md
|
|||||||
settings.json
|
settings.json
|
||||||
*.mcp.json
|
*.mcp.json
|
||||||
.mcp.json
|
.mcp.json
|
||||||
|
.ace-tool/
|
||||||
|
|||||||
22
.mcp.json
22
.mcp.json
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"mcpServers": {
|
|
||||||
"chrome-devtools": {
|
|
||||||
"type": "stdio",
|
|
||||||
"command": "npx",
|
|
||||||
"args": [
|
|
||||||
"chrome-devtools-mcp@latest"
|
|
||||||
],
|
|
||||||
"env": {}
|
|
||||||
},
|
|
||||||
"ccw-tools": {
|
|
||||||
"command": "npx",
|
|
||||||
"args": [
|
|
||||||
"-y",
|
|
||||||
"ccw-mcp"
|
|
||||||
],
|
|
||||||
"env": {
|
|
||||||
"CCW_ENABLED_TOOLS": "write_file,edit_file,smart_search,core_memory"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -46,7 +46,6 @@ Install-Claude.ps1
|
|||||||
install-remote.ps1
|
install-remote.ps1
|
||||||
*.mcp.json
|
*.mcp.json
|
||||||
# ccw internal files
|
# ccw internal files
|
||||||
ccw/package.json
|
|
||||||
ccw/node_modules/
|
ccw/node_modules/
|
||||||
ccw/*.md
|
ccw/*.md
|
||||||
|
|
||||||
|
|||||||
331
AGENTS.md
Normal file
331
AGENTS.md
Normal file
@@ -0,0 +1,331 @@
|
|||||||
|
# Codex Agent Execution Protocol
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Role**: Autonomous development, implementation, and testing specialist
|
||||||
|
|
||||||
|
|
||||||
|
## Prompt Structure
|
||||||
|
|
||||||
|
All prompts follow this 6-field format:
|
||||||
|
|
||||||
|
```
|
||||||
|
PURPOSE: [development goal]
|
||||||
|
TASK: [specific implementation task]
|
||||||
|
MODE: [auto|write]
|
||||||
|
CONTEXT: [file patterns]
|
||||||
|
EXPECTED: [deliverables]
|
||||||
|
RULES: [templates | additional constraints]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Subtask indicator**: `Subtask N of M: [title]` or `CONTINUE TO NEXT SUBTASK`
|
||||||
|
|
||||||
|
## MODE Definitions
|
||||||
|
|
||||||
|
### MODE: auto (default)
|
||||||
|
|
||||||
|
**Permissions**:
|
||||||
|
- Full file operations (create/modify/delete)
|
||||||
|
- Run tests and builds
|
||||||
|
- Commit code incrementally
|
||||||
|
|
||||||
|
**Execute**:
|
||||||
|
1. Parse PURPOSE and TASK
|
||||||
|
2. Analyze CONTEXT files - find 3+ similar patterns
|
||||||
|
3. Plan implementation following RULES
|
||||||
|
4. Generate code with tests
|
||||||
|
5. Run tests continuously
|
||||||
|
6. Commit working code incrementally
|
||||||
|
7. Validate EXPECTED deliverables
|
||||||
|
8. Report results (with context for next subtask if multi-task)
|
||||||
|
|
||||||
|
**Constraint**: Must test every change
|
||||||
|
|
||||||
|
### MODE: write
|
||||||
|
|
||||||
|
**Permissions**:
|
||||||
|
- Focused file operations
|
||||||
|
- Create/modify specific files
|
||||||
|
- Run tests for validation
|
||||||
|
|
||||||
|
**Execute**:
|
||||||
|
1. Analyze CONTEXT files
|
||||||
|
2. Make targeted changes
|
||||||
|
3. Validate tests pass
|
||||||
|
4. Report file changes
|
||||||
|
|
||||||
|
## Execution Protocol
|
||||||
|
|
||||||
|
### Core Requirements
|
||||||
|
|
||||||
|
**ALWAYS**:
|
||||||
|
- Parse all 6 fields (PURPOSE, TASK, MODE, CONTEXT, EXPECTED, RULES)
|
||||||
|
- Study CONTEXT files - find 3+ similar patterns before implementing
|
||||||
|
- Apply RULES (templates + constraints) exactly
|
||||||
|
- Test continuously after every change
|
||||||
|
- Commit incrementally with working code
|
||||||
|
- Match project style and patterns exactly
|
||||||
|
- List all created/modified files at output beginning
|
||||||
|
- Use direct binary calls (avoid shell wrappers)
|
||||||
|
- Prefer apply_patch for text edits
|
||||||
|
- Configure Windows UTF-8 encoding for Chinese support
|
||||||
|
|
||||||
|
**NEVER**:
|
||||||
|
- Make assumptions without code verification
|
||||||
|
- Ignore existing patterns
|
||||||
|
- Skip tests
|
||||||
|
- Use clever tricks over boring solutions
|
||||||
|
- Over-engineer solutions
|
||||||
|
- Break existing code or backward compatibility
|
||||||
|
- Exceed 3 failed attempts without stopping
|
||||||
|
|
||||||
|
### RULES Processing
|
||||||
|
|
||||||
|
- Parse RULES field to extract template content and constraints
|
||||||
|
- Recognize `|` as separator: `template content | additional constraints`
|
||||||
|
- Apply ALL template guidelines as mandatory
|
||||||
|
- Apply ALL additional constraints as mandatory
|
||||||
|
- Treat rule violations as task failures
|
||||||
|
|
||||||
|
### Multi-Task Execution (Resume Pattern)
|
||||||
|
|
||||||
|
**First subtask**: Standard execution flow above
|
||||||
|
**Subsequent subtasks** (via `resume --last`):
|
||||||
|
- Recall context from previous subtasks
|
||||||
|
- Build on previous work (don't repeat)
|
||||||
|
- Maintain consistency with established patterns
|
||||||
|
- Focus on current subtask scope only
|
||||||
|
- Test integration with previous work
|
||||||
|
- Report context for next subtask
|
||||||
|
|
||||||
|
## System Optimization
|
||||||
|
|
||||||
|
**Direct Binary Calls**: Always call binaries directly in `functions.shell`, set `workdir`, avoid shell wrappers (`bash -lc`, `cmd /c`, etc.)
|
||||||
|
|
||||||
|
**Text Editing Priority**:
|
||||||
|
1. Use `apply_patch` tool for all routine text edits
|
||||||
|
2. Fall back to `sed` for single-line substitutions if unavailable
|
||||||
|
3. Avoid Python editing scripts unless both fail
|
||||||
|
|
||||||
|
**apply_patch invocation**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"command": ["apply_patch", "*** Begin Patch\n*** Update File: path/to/file\n@@\n- old\n+ new\n*** End Patch\n"],
|
||||||
|
"workdir": "<workdir>",
|
||||||
|
"justification": "Brief reason"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Windows UTF-8 Encoding** (before commands):
|
||||||
|
```powershell
|
||||||
|
[Console]::InputEncoding = [Text.UTF8Encoding]::new($false)
|
||||||
|
[Console]::OutputEncoding = [Text.UTF8Encoding]::new($false)
|
||||||
|
chcp 65001 > $null
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Standards
|
||||||
|
|
||||||
|
### Format Priority
|
||||||
|
|
||||||
|
**If template defines output format** → Follow template format EXACTLY (all sections mandatory)
|
||||||
|
|
||||||
|
**If template has no format** → Use default format below based on task type
|
||||||
|
|
||||||
|
### Default Output Formats
|
||||||
|
|
||||||
|
#### Single Task Implementation
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# Implementation: [TASK Title]
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
- Created: `path/to/file1.ext` (X lines)
|
||||||
|
- Modified: `path/to/file2.ext` (+Y/-Z lines)
|
||||||
|
- Deleted: `path/to/file3.ext`
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
[2-3 sentence overview of what was implemented]
|
||||||
|
|
||||||
|
## Key Decisions
|
||||||
|
1. [Decision] - Rationale and reference to similar pattern
|
||||||
|
2. [Decision] - path/to/reference:line
|
||||||
|
|
||||||
|
## Implementation Details
|
||||||
|
[Evidence-based description with code references]
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
- Tests written: X new tests
|
||||||
|
- Tests passing: Y/Z tests
|
||||||
|
- Coverage: N%
|
||||||
|
|
||||||
|
## Validation
|
||||||
|
✅ Tests: X passing
|
||||||
|
✅ Coverage: Y%
|
||||||
|
✅ Build: Success
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
[Recommendations or future improvements]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multi-Task Execution (with Resume)
|
||||||
|
|
||||||
|
**First Subtask**:
|
||||||
|
```markdown
|
||||||
|
# Subtask 1/N: [TASK Title]
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
[List of file changes]
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
[Details with code references]
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
✅ Tests: X passing
|
||||||
|
✅ Integration: Compatible with existing code
|
||||||
|
|
||||||
|
## Context for Next Subtask
|
||||||
|
- Key decisions: [established patterns]
|
||||||
|
- Files created: [paths and purposes]
|
||||||
|
- Integration points: [where next subtask should connect]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Subsequent Subtasks**:
|
||||||
|
```markdown
|
||||||
|
# Subtask N/M: [TASK Title]
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
[List of file changes]
|
||||||
|
|
||||||
|
## Integration Notes
|
||||||
|
✅ Compatible with subtask N-1
|
||||||
|
✅ Maintains established patterns
|
||||||
|
✅ Tests pass with previous work
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
[Details with code references]
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
✅ Tests: X passing
|
||||||
|
✅ Total coverage: Y%
|
||||||
|
|
||||||
|
## Context for Next Subtask
|
||||||
|
[If not final subtask, provide context for continuation]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Partial Completion
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# Task Status: Partially Completed
|
||||||
|
|
||||||
|
## Completed
|
||||||
|
- [What worked successfully]
|
||||||
|
- Files: `path/to/completed.ext`
|
||||||
|
|
||||||
|
## Blocked
|
||||||
|
- **Issue**: [What failed]
|
||||||
|
- **Root Cause**: [Analysis of failure]
|
||||||
|
- **Attempted**: [Solutions tried - attempt X of 3]
|
||||||
|
|
||||||
|
## Required
|
||||||
|
[What's needed to proceed]
|
||||||
|
|
||||||
|
## Recommendation
|
||||||
|
[Suggested next steps or alternative approaches]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Code References
|
||||||
|
|
||||||
|
**Format**: `path/to/file:line_number`
|
||||||
|
|
||||||
|
**Example**: `src/auth/jwt.ts:45` - Implemented token validation following pattern from `src/auth/session.ts:78`
|
||||||
|
|
||||||
|
### Related Files Section
|
||||||
|
|
||||||
|
**Always include at output beginning** - List ALL files analyzed, created, or modified:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Related Files
|
||||||
|
- `path/to/file1.ext` - [Role in implementation]
|
||||||
|
- `path/to/file2.ext` - [Reference pattern used]
|
||||||
|
- `path/to/file3.ext` - [Modified for X reason]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Three-Attempt Rule
|
||||||
|
|
||||||
|
**On 3rd failed attempt**:
|
||||||
|
1. Stop execution
|
||||||
|
2. Report: What attempted, what failed, root cause
|
||||||
|
3. Request guidance or suggest alternatives
|
||||||
|
|
||||||
|
### Recovery Strategies
|
||||||
|
|
||||||
|
| Error Type | Response |
|
||||||
|
|------------|----------|
|
||||||
|
| **Syntax/Type** | Review errors → Fix → Re-run tests → Validate build |
|
||||||
|
| **Runtime** | Analyze stack trace → Add error handling → Test error cases |
|
||||||
|
| **Test Failure** | Debug in isolation → Review setup → Fix implementation/test |
|
||||||
|
| **Build Failure** | Check messages → Fix incrementally → Validate each fix |
|
||||||
|
|
||||||
|
## Quality Standards
|
||||||
|
|
||||||
|
### Code Quality
|
||||||
|
- Follow project's existing patterns
|
||||||
|
- Match import style and naming conventions
|
||||||
|
- Single responsibility per function/class
|
||||||
|
- DRY (Don't Repeat Yourself)
|
||||||
|
- YAGNI (You Aren't Gonna Need It)
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Test all public functions
|
||||||
|
- Test edge cases and error conditions
|
||||||
|
- Mock external dependencies
|
||||||
|
- Target 80%+ coverage
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
- Proper try-catch blocks
|
||||||
|
- Clear error messages
|
||||||
|
- Graceful degradation
|
||||||
|
- Don't expose sensitive info
|
||||||
|
|
||||||
|
## Core Principles
|
||||||
|
|
||||||
|
**Incremental Progress**:
|
||||||
|
- Small, testable changes
|
||||||
|
- Commit working code frequently
|
||||||
|
- Build on previous work (subtasks)
|
||||||
|
|
||||||
|
**Evidence-Based**:
|
||||||
|
- Study 3+ similar patterns before implementing
|
||||||
|
- Match project style exactly
|
||||||
|
- Verify with existing code
|
||||||
|
|
||||||
|
**Pragmatic**:
|
||||||
|
- Boring solutions over clever code
|
||||||
|
- Simple over complex
|
||||||
|
- Adapt to project reality
|
||||||
|
|
||||||
|
**Context Continuity** (Multi-Task):
|
||||||
|
- Leverage resume for consistency
|
||||||
|
- Maintain established patterns
|
||||||
|
- Test integration between subtasks
|
||||||
|
|
||||||
|
## Execution Checklist
|
||||||
|
|
||||||
|
**Before**:
|
||||||
|
- [ ] Understand PURPOSE and TASK clearly
|
||||||
|
- [ ] Review CONTEXT files, find 3+ patterns
|
||||||
|
- [ ] Check RULES templates and constraints
|
||||||
|
|
||||||
|
**During**:
|
||||||
|
- [ ] Follow existing patterns exactly
|
||||||
|
- [ ] Write tests alongside code
|
||||||
|
- [ ] Run tests after every change
|
||||||
|
- [ ] Commit working code incrementally
|
||||||
|
|
||||||
|
**After**:
|
||||||
|
- [ ] All tests pass
|
||||||
|
- [ ] Coverage meets target
|
||||||
|
- [ ] Build succeeds
|
||||||
|
- [ ] All EXPECTED deliverables met
|
||||||
196
API_SETTINGS_IMPLEMENTATION.md
Normal file
196
API_SETTINGS_IMPLEMENTATION.md
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
# API Settings 页面实现完成
|
||||||
|
|
||||||
|
## 创建的文件
|
||||||
|
|
||||||
|
### 1. JavaScript 文件
|
||||||
|
**位置**: `ccw/src/templates/dashboard-js/views/api-settings.js` (28KB)
|
||||||
|
|
||||||
|
**主要功能**:
|
||||||
|
- ✅ Provider Management (提供商管理)
|
||||||
|
- 添加/编辑/删除提供商
|
||||||
|
- 支持 OpenAI, Anthropic, Google, Ollama, Azure, Mistral, DeepSeek, Custom
|
||||||
|
- API Key 管理(支持环境变量)
|
||||||
|
- 连接测试功能
|
||||||
|
|
||||||
|
- ✅ Endpoint Management (端点管理)
|
||||||
|
- 创建自定义端点
|
||||||
|
- 关联提供商和模型
|
||||||
|
- 缓存策略配置
|
||||||
|
- 显示 CLI 使用示例
|
||||||
|
|
||||||
|
- ✅ Cache Management (缓存管理)
|
||||||
|
- 全局缓存开关
|
||||||
|
- 缓存统计显示
|
||||||
|
- 清除缓存功能
|
||||||
|
|
||||||
|
### 2. CSS 样式文件
|
||||||
|
**位置**: `ccw/src/templates/dashboard-css/31-api-settings.css` (6.8KB)
|
||||||
|
|
||||||
|
**样式包括**:
|
||||||
|
- 卡片式布局
|
||||||
|
- 表单样式
|
||||||
|
- 进度条
|
||||||
|
- 响应式设计
|
||||||
|
- 空状态显示
|
||||||
|
|
||||||
|
### 3. 国际化支持
|
||||||
|
**位置**: `ccw/src/templates/dashboard-js/i18n.js`
|
||||||
|
|
||||||
|
**添加的翻译**:
|
||||||
|
- 英文:54 个翻译键
|
||||||
|
- 中文:54 个翻译键
|
||||||
|
- 包含所有 UI 文本、提示信息、错误消息
|
||||||
|
|
||||||
|
### 4. 配置更新
|
||||||
|
|
||||||
|
#### dashboard-generator.ts
|
||||||
|
- ✅ 添加 `31-api-settings.css` 到 CSS 模块列表
|
||||||
|
- ✅ 添加 `views/api-settings.js` 到 JS 模块列表
|
||||||
|
|
||||||
|
#### navigation.js
|
||||||
|
- ✅ 添加 `api-settings` 路由处理
|
||||||
|
- ✅ 添加标题更新逻辑
|
||||||
|
|
||||||
|
#### dashboard.html
|
||||||
|
- ✅ 添加导航菜单项 (Settings 图标)
|
||||||
|
|
||||||
|
## API 端点使用
|
||||||
|
|
||||||
|
该页面使用以下后端 API(已存在):
|
||||||
|
|
||||||
|
### Provider APIs
|
||||||
|
- `GET /api/litellm-api/providers` - 获取所有提供商
|
||||||
|
- `POST /api/litellm-api/providers` - 创建提供商
|
||||||
|
- `PUT /api/litellm-api/providers/:id` - 更新提供商
|
||||||
|
- `DELETE /api/litellm-api/providers/:id` - 删除提供商
|
||||||
|
- `POST /api/litellm-api/providers/:id/test` - 测试连接
|
||||||
|
|
||||||
|
### Endpoint APIs
|
||||||
|
- `GET /api/litellm-api/endpoints` - 获取所有端点
|
||||||
|
- `POST /api/litellm-api/endpoints` - 创建端点
|
||||||
|
- `PUT /api/litellm-api/endpoints/:id` - 更新端点
|
||||||
|
- `DELETE /api/litellm-api/endpoints/:id` - 删除端点
|
||||||
|
|
||||||
|
### Model Discovery
|
||||||
|
- `GET /api/litellm-api/models/:providerType` - 获取提供商支持的模型列表
|
||||||
|
|
||||||
|
### Cache APIs
|
||||||
|
- `GET /api/litellm-api/cache/stats` - 获取缓存统计
|
||||||
|
- `POST /api/litellm-api/cache/clear` - 清除缓存
|
||||||
|
|
||||||
|
### Config APIs
|
||||||
|
- `GET /api/litellm-api/config` - 获取完整配置
|
||||||
|
- `PUT /api/litellm-api/config/cache` - 更新全局缓存设置
|
||||||
|
|
||||||
|
## 页面特性
|
||||||
|
|
||||||
|
### Provider 管理
|
||||||
|
```
|
||||||
|
+-- Provider Card ------------------------+
|
||||||
|
| OpenAI Production [Edit] [Del] |
|
||||||
|
| Type: openai |
|
||||||
|
| Key: sk-...abc |
|
||||||
|
| URL: https://api.openai.com/v1 |
|
||||||
|
| Status: ✓ Enabled |
|
||||||
|
+-----------------------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
### Endpoint 管理
|
||||||
|
```
|
||||||
|
+-- Endpoint Card ------------------------+
|
||||||
|
| GPT-4o Code Review [Edit] [Del]|
|
||||||
|
| ID: my-gpt4o |
|
||||||
|
| Provider: OpenAI Production |
|
||||||
|
| Model: gpt-4-turbo |
|
||||||
|
| Cache: Enabled (60 min) |
|
||||||
|
| Usage: ccw cli -p "..." --model my-gpt4o|
|
||||||
|
+-----------------------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
### 表单功能
|
||||||
|
- **Provider Form**:
|
||||||
|
- 类型选择(8 种提供商)
|
||||||
|
- API Key 输入(支持显示/隐藏)
|
||||||
|
- 环境变量支持
|
||||||
|
- Base URL 自定义
|
||||||
|
- 启用/禁用开关
|
||||||
|
|
||||||
|
- **Endpoint Form**:
|
||||||
|
- 端点 ID(CLI 使用)
|
||||||
|
- 显示名称
|
||||||
|
- 提供商选择(动态加载)
|
||||||
|
- 模型选择(根据提供商动态加载)
|
||||||
|
- 缓存策略配置
|
||||||
|
- TTL(分钟)
|
||||||
|
- 最大大小(KB)
|
||||||
|
- 自动缓存文件模式
|
||||||
|
|
||||||
|
## 使用流程
|
||||||
|
|
||||||
|
### 1. 添加提供商
|
||||||
|
1. 点击 "Add Provider"
|
||||||
|
2. 选择提供商类型(如 OpenAI)
|
||||||
|
3. 输入显示名称
|
||||||
|
4. 输入 API Key(或使用环境变量)
|
||||||
|
5. 可选:输入自定义 API Base URL
|
||||||
|
6. 保存
|
||||||
|
|
||||||
|
### 2. 创建自定义端点
|
||||||
|
1. 点击 "Add Endpoint"
|
||||||
|
2. 输入端点 ID(用于 CLI)
|
||||||
|
3. 输入显示名称
|
||||||
|
4. 选择提供商
|
||||||
|
5. 选择模型(自动加载该提供商支持的模型)
|
||||||
|
6. 可选:配置缓存策略
|
||||||
|
7. 保存
|
||||||
|
|
||||||
|
### 3. 使用端点
|
||||||
|
```bash
|
||||||
|
ccw cli -p "Analyze this code..." --model my-gpt4o
|
||||||
|
```
|
||||||
|
|
||||||
|
## 代码质量
|
||||||
|
|
||||||
|
- ✅ 遵循现有代码风格
|
||||||
|
- ✅ 使用 i18n 函数支持国际化
|
||||||
|
- ✅ 响应式设计(移动端友好)
|
||||||
|
- ✅ 完整的表单验证
|
||||||
|
- ✅ 用户友好的错误提示
|
||||||
|
- ✅ 使用 Lucide 图标
|
||||||
|
- ✅ 模态框复用现有样式
|
||||||
|
- ✅ 与后端 API 完全集成
|
||||||
|
|
||||||
|
## 测试建议
|
||||||
|
|
||||||
|
1. **基础功能测试**:
|
||||||
|
- 添加/编辑/删除提供商
|
||||||
|
- 添加/编辑/删除端点
|
||||||
|
- 清除缓存
|
||||||
|
|
||||||
|
2. **表单验证测试**:
|
||||||
|
- 必填字段验证
|
||||||
|
- API Key 显示/隐藏
|
||||||
|
- 环境变量切换
|
||||||
|
|
||||||
|
3. **数据加载测试**:
|
||||||
|
- 模型列表动态加载
|
||||||
|
- 缓存统计显示
|
||||||
|
- 空状态显示
|
||||||
|
|
||||||
|
4. **国际化测试**:
|
||||||
|
- 切换语言(英文/中文)
|
||||||
|
- 验证所有文本正确显示
|
||||||
|
|
||||||
|
## 下一步
|
||||||
|
|
||||||
|
页面已完成并集成到项目中。启动 CCW Dashboard 后:
|
||||||
|
1. 导航栏会显示 "API Settings" 菜单项(Settings 图标)
|
||||||
|
2. 点击进入即可使用所有功能
|
||||||
|
3. 所有操作会实时同步到配置文件
|
||||||
|
|
||||||
|
## 注意事项
|
||||||
|
|
||||||
|
- 页面使用现有的 LiteLLM API 路由(`litellm-api-routes.ts`)
|
||||||
|
- 配置保存在项目的 LiteLLM 配置文件中
|
||||||
|
- 支持环境变量引用格式:`${VARIABLE_NAME}`
|
||||||
|
- API Key 在显示时会自动脱敏(显示前 4 位和后 4 位)
|
||||||
32
README_CN.md
32
README_CN.md
@@ -1,5 +1,8 @@
|
|||||||
# 🚀 Claude Code Workflow (CCW)
|
# 🚀 Claude Code Workflow (CCW)
|
||||||
|
|
||||||
|
[](https://smithery.ai/skills?ns=catlog22&utm_source=github&utm_medium=badge)
|
||||||
|
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
[](https://github.com/catlog22/Claude-Code-Workflow/releases)
|
[](https://github.com/catlog22/Claude-Code-Workflow/releases)
|
||||||
@@ -13,7 +16,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Claude Code Workflow (CCW)** 将 AI 开发从简单的提示词链接转变为一个强大的、上下文优先的编排系统。它通过结构化规划、确定性执行和智能多模型编排,解决了执行不确定性和误差累积的问题。
|
**Claude Code Workflow (CCW)** 是一个 JSON 驱动的多智能体开发框架,具有智能 CLI 编排(Gemini/Qwen/Codex)、上下文优先架构和自动化工作流执行。它将 AI 开发从简单的提示词链接转变为一个强大的编排系统。
|
||||||
|
|
||||||
> **🎉 版本 6.2.0: 原生 CodexLens 与 Dashboard 革新**
|
> **🎉 版本 6.2.0: 原生 CodexLens 与 Dashboard 革新**
|
||||||
>
|
>
|
||||||
@@ -38,8 +41,8 @@
|
|||||||
|
|
||||||
CCW 构建在一系列核心原则之上,这些原则使其与传统的 AI 开发方法区别开来:
|
CCW 构建在一系列核心原则之上,这些原则使其与传统的 AI 开发方法区别开来:
|
||||||
|
|
||||||
- **上下文优先架构**: 通过预定义的上下文收集,消除了执行过程中的不确定性,确保智能体在实现*之前*就拥有正确的信息。
|
- **上下文优先架构**: 通过预定义的上下文收集消除执行过程中的不确定性,确保智能体在实现*之前*就拥有正确的信息。
|
||||||
- **JSON 优先的状态管理**: 任务状态完全存储在 `.task/IMPL-*.json` 文件中,作为唯一的事实来源,实现了无需状态漂移的程序化编排。
|
- **JSON 优先的状态管理**: 任务状态完全存储在 `.task/IMPL-*.json` 文件中作为唯一的事实来源,实现无状态漂移的程序化编排。
|
||||||
- **自主多阶段编排**: 命令链式调用专门的子命令和智能体,以零用户干预的方式自动化复杂的工作流。
|
- **自主多阶段编排**: 命令链式调用专门的子命令和智能体,以零用户干预的方式自动化复杂的工作流。
|
||||||
- **多模型策略**: 充分利用不同 AI 模型(如 Gemini 用于分析,Codex 用于实现)的独特优势,以获得更优越的结果。
|
- **多模型策略**: 充分利用不同 AI 模型(如 Gemini 用于分析,Codex 用于实现)的独特优势,以获得更优越的结果。
|
||||||
- **分层内存系统**: 一个 4 层文档系统,在适当的抽象级别上提供上下文,防止信息过载。
|
- **分层内存系统**: 一个 4 层文档系统,在适当的抽象级别上提供上下文,防止信息过载。
|
||||||
@@ -49,18 +52,23 @@ CCW 构建在一系列核心原则之上,这些原则使其与传统的 AI 开
|
|||||||
|
|
||||||
## ⚙️ 安装
|
## ⚙️ 安装
|
||||||
|
|
||||||
有关详细的安装说明,请参阅 [**INSTALL_CN.md**](INSTALL_CN.md) 指南。
|
### **📦 npm 安装(推荐)**
|
||||||
|
|
||||||
### **🚀 一键快速安装**
|
通过 npm 全局安装:
|
||||||
|
```bash
|
||||||
**Windows (PowerShell):**
|
npm install -g claude-code-workflow
|
||||||
```powershell
|
|
||||||
Invoke-Expression (Invoke-WebRequest -Uri "https://raw.githubusercontent.com/catlog22/Claude-Code-Workflow/main/install-remote.ps1" -UseBasicParsing).Content
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Linux/macOS (Bash/Zsh):**
|
然后将工作流文件安装到您的系统:
|
||||||
```bash
|
```bash
|
||||||
bash <(curl -fsSL https://raw.githubusercontent.com/catlog22/Claude-Code-Workflow/main/install-remote.sh)
|
# 交互式安装
|
||||||
|
ccw install
|
||||||
|
|
||||||
|
# 全局安装(到 ~/.claude)
|
||||||
|
ccw install -m Global
|
||||||
|
|
||||||
|
# 项目特定安装
|
||||||
|
ccw install -m Path -p /path/to/project
|
||||||
```
|
```
|
||||||
|
|
||||||
### **✅ 验证安装**
|
### **✅ 验证安装**
|
||||||
@@ -283,4 +291,4 @@ CCW 提供全面的文档,帮助您快速上手并掌握高级功能:
|
|||||||
|
|
||||||
## 📄 许可证
|
## 📄 许可证
|
||||||
|
|
||||||
此项目根据 **MIT 许可证** 授权。详见 [LICENSE](LICENSE) 文件。
|
此项目根据 **MIT 许可证** 授权。详见 [LICENSE](LICENSE) 文件。
|
||||||
|
|||||||
180
ccw-litellm/README.md
Normal file
180
ccw-litellm/README.md
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
# ccw-litellm
|
||||||
|
|
||||||
|
Unified LiteLLM interface layer shared by ccw and codex-lens projects.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Unified LLM Interface**: Abstract interface for LLM operations (chat, completion)
|
||||||
|
- **Unified Embedding Interface**: Abstract interface for text embeddings
|
||||||
|
- **Multi-Provider Support**: OpenAI, Anthropic, Azure, and more via LiteLLM
|
||||||
|
- **Configuration Management**: YAML-based configuration with environment variable substitution
|
||||||
|
- **Type Safety**: Full type annotations with Pydantic models
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e .
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Create a configuration file at `~/.ccw/config/litellm-config.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: 1
|
||||||
|
default_provider: openai
|
||||||
|
|
||||||
|
providers:
|
||||||
|
openai:
|
||||||
|
api_key: ${OPENAI_API_KEY}
|
||||||
|
api_base: https://api.openai.com/v1
|
||||||
|
|
||||||
|
llm_models:
|
||||||
|
default:
|
||||||
|
provider: openai
|
||||||
|
model: gpt-4
|
||||||
|
|
||||||
|
embedding_models:
|
||||||
|
default:
|
||||||
|
provider: openai
|
||||||
|
model: text-embedding-3-small
|
||||||
|
dimensions: 1536
|
||||||
|
```
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
#### LLM Client
|
||||||
|
|
||||||
|
```python
|
||||||
|
from ccw_litellm import LiteLLMClient, ChatMessage
|
||||||
|
|
||||||
|
# Initialize client with default model
|
||||||
|
client = LiteLLMClient(model="default")
|
||||||
|
|
||||||
|
# Chat completion
|
||||||
|
messages = [
|
||||||
|
ChatMessage(role="user", content="Hello, how are you?")
|
||||||
|
]
|
||||||
|
response = client.chat(messages)
|
||||||
|
print(response.content)
|
||||||
|
|
||||||
|
# Text completion
|
||||||
|
response = client.complete("Once upon a time")
|
||||||
|
print(response.content)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Embedder
|
||||||
|
|
||||||
|
```python
|
||||||
|
from ccw_litellm import LiteLLMEmbedder
|
||||||
|
|
||||||
|
# Initialize embedder with default model
|
||||||
|
embedder = LiteLLMEmbedder(model="default")
|
||||||
|
|
||||||
|
# Embed single text
|
||||||
|
vector = embedder.embed("Hello world")
|
||||||
|
print(vector.shape) # (1, 1536)
|
||||||
|
|
||||||
|
# Embed multiple texts
|
||||||
|
vectors = embedder.embed(["Text 1", "Text 2", "Text 3"])
|
||||||
|
print(vectors.shape) # (3, 1536)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Custom Configuration
|
||||||
|
|
||||||
|
```python
|
||||||
|
from ccw_litellm import LiteLLMClient, load_config
|
||||||
|
|
||||||
|
# Load custom configuration
|
||||||
|
config = load_config("/path/to/custom-config.yaml")
|
||||||
|
|
||||||
|
# Use custom configuration
|
||||||
|
client = LiteLLMClient(model="fast", config=config)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Reference
|
||||||
|
|
||||||
|
### Provider Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
providers:
|
||||||
|
<provider_name>:
|
||||||
|
api_key: <api_key_or_${ENV_VAR}>
|
||||||
|
api_base: <base_url>
|
||||||
|
```
|
||||||
|
|
||||||
|
Supported providers: `openai`, `anthropic`, `azure`, `vertex_ai`, `bedrock`, etc.
|
||||||
|
|
||||||
|
### LLM Model Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
llm_models:
|
||||||
|
<model_name>:
|
||||||
|
provider: <provider_name>
|
||||||
|
model: <model_identifier>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Embedding Model Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
embedding_models:
|
||||||
|
<model_name>:
|
||||||
|
provider: <provider_name>
|
||||||
|
model: <model_identifier>
|
||||||
|
dimensions: <embedding_dimensions>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
The configuration supports environment variable substitution using the `${VAR}` or `${VAR:-default}` syntax:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
providers:
|
||||||
|
openai:
|
||||||
|
api_key: ${OPENAI_API_KEY} # Required
|
||||||
|
api_base: ${OPENAI_API_BASE:-https://api.openai.com/v1} # With default
|
||||||
|
```
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### Interfaces
|
||||||
|
|
||||||
|
- `AbstractLLMClient`: Abstract base class for LLM clients
|
||||||
|
- `AbstractEmbedder`: Abstract base class for embedders
|
||||||
|
- `ChatMessage`: Message data class (role, content)
|
||||||
|
- `LLMResponse`: Response data class (content, raw)
|
||||||
|
|
||||||
|
### Implementations
|
||||||
|
|
||||||
|
- `LiteLLMClient`: LiteLLM implementation of AbstractLLMClient
|
||||||
|
- `LiteLLMEmbedder`: LiteLLM implementation of AbstractEmbedder
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
- `LiteLLMConfig`: Root configuration model
|
||||||
|
- `ProviderConfig`: Provider configuration model
|
||||||
|
- `LLMModelConfig`: LLM model configuration model
|
||||||
|
- `EmbeddingModelConfig`: Embedding model configuration model
|
||||||
|
- `load_config(path)`: Load configuration from YAML file
|
||||||
|
- `get_config(path, reload)`: Get global configuration singleton
|
||||||
|
- `reset_config()`: Reset global configuration (for testing)
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pytest tests/ -v
|
||||||
|
```
|
||||||
|
|
||||||
|
### Type Checking
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mypy src/ccw_litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT
|
||||||
53
ccw-litellm/litellm-config.yaml.example
Normal file
53
ccw-litellm/litellm-config.yaml.example
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# LiteLLM Unified Configuration
|
||||||
|
# Copy to ~/.ccw/config/litellm-config.yaml
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
|
||||||
|
# Default provider for LLM calls
|
||||||
|
default_provider: openai
|
||||||
|
|
||||||
|
# Provider configurations
|
||||||
|
providers:
|
||||||
|
openai:
|
||||||
|
api_key: ${OPENAI_API_KEY}
|
||||||
|
api_base: https://api.openai.com/v1
|
||||||
|
|
||||||
|
anthropic:
|
||||||
|
api_key: ${ANTHROPIC_API_KEY}
|
||||||
|
|
||||||
|
ollama:
|
||||||
|
api_base: http://localhost:11434
|
||||||
|
|
||||||
|
azure:
|
||||||
|
api_key: ${AZURE_API_KEY}
|
||||||
|
api_base: ${AZURE_API_BASE}
|
||||||
|
|
||||||
|
# LLM model configurations
|
||||||
|
llm_models:
|
||||||
|
default:
|
||||||
|
provider: openai
|
||||||
|
model: gpt-4o
|
||||||
|
fast:
|
||||||
|
provider: openai
|
||||||
|
model: gpt-4o-mini
|
||||||
|
claude:
|
||||||
|
provider: anthropic
|
||||||
|
model: claude-sonnet-4-20250514
|
||||||
|
local:
|
||||||
|
provider: ollama
|
||||||
|
model: llama3.2
|
||||||
|
|
||||||
|
# Embedding model configurations
|
||||||
|
embedding_models:
|
||||||
|
default:
|
||||||
|
provider: openai
|
||||||
|
model: text-embedding-3-small
|
||||||
|
dimensions: 1536
|
||||||
|
large:
|
||||||
|
provider: openai
|
||||||
|
model: text-embedding-3-large
|
||||||
|
dimensions: 3072
|
||||||
|
ada:
|
||||||
|
provider: openai
|
||||||
|
model: text-embedding-ada-002
|
||||||
|
dimensions: 1536
|
||||||
35
ccw-litellm/pyproject.toml
Normal file
35
ccw-litellm/pyproject.toml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "ccw-litellm"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Unified LiteLLM interface layer shared by ccw and codex-lens"
|
||||||
|
requires-python = ">=3.10"
|
||||||
|
authors = [{ name = "ccw-litellm contributors" }]
|
||||||
|
dependencies = [
|
||||||
|
"litellm>=1.0.0",
|
||||||
|
"pyyaml",
|
||||||
|
"numpy",
|
||||||
|
"pydantic>=2.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
ccw-litellm = "ccw_litellm.cli:main"
|
||||||
|
|
||||||
|
[tool.setuptools]
|
||||||
|
package-dir = { "" = "src" }
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["src"]
|
||||||
|
include = ["ccw_litellm*"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
addopts = "-q"
|
||||||
12
ccw-litellm/src/ccw_litellm.egg-info/PKG-INFO
Normal file
12
ccw-litellm/src/ccw_litellm.egg-info/PKG-INFO
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Metadata-Version: 2.4
|
||||||
|
Name: ccw-litellm
|
||||||
|
Version: 0.1.0
|
||||||
|
Summary: Unified LiteLLM interface layer shared by ccw and codex-lens
|
||||||
|
Author: ccw-litellm contributors
|
||||||
|
Requires-Python: >=3.10
|
||||||
|
Requires-Dist: litellm>=1.0.0
|
||||||
|
Requires-Dist: pyyaml
|
||||||
|
Requires-Dist: numpy
|
||||||
|
Requires-Dist: pydantic>=2.0
|
||||||
|
Provides-Extra: dev
|
||||||
|
Requires-Dist: pytest>=7.0; extra == "dev"
|
||||||
20
ccw-litellm/src/ccw_litellm.egg-info/SOURCES.txt
Normal file
20
ccw-litellm/src/ccw_litellm.egg-info/SOURCES.txt
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
README.md
|
||||||
|
pyproject.toml
|
||||||
|
src/ccw_litellm/__init__.py
|
||||||
|
src/ccw_litellm/cli.py
|
||||||
|
src/ccw_litellm.egg-info/PKG-INFO
|
||||||
|
src/ccw_litellm.egg-info/SOURCES.txt
|
||||||
|
src/ccw_litellm.egg-info/dependency_links.txt
|
||||||
|
src/ccw_litellm.egg-info/entry_points.txt
|
||||||
|
src/ccw_litellm.egg-info/requires.txt
|
||||||
|
src/ccw_litellm.egg-info/top_level.txt
|
||||||
|
src/ccw_litellm/clients/__init__.py
|
||||||
|
src/ccw_litellm/clients/litellm_embedder.py
|
||||||
|
src/ccw_litellm/clients/litellm_llm.py
|
||||||
|
src/ccw_litellm/config/__init__.py
|
||||||
|
src/ccw_litellm/config/loader.py
|
||||||
|
src/ccw_litellm/config/models.py
|
||||||
|
src/ccw_litellm/interfaces/__init__.py
|
||||||
|
src/ccw_litellm/interfaces/embedder.py
|
||||||
|
src/ccw_litellm/interfaces/llm.py
|
||||||
|
tests/test_interfaces.py
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
2
ccw-litellm/src/ccw_litellm.egg-info/entry_points.txt
Normal file
2
ccw-litellm/src/ccw_litellm.egg-info/entry_points.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[console_scripts]
|
||||||
|
ccw-litellm = ccw_litellm.cli:main
|
||||||
7
ccw-litellm/src/ccw_litellm.egg-info/requires.txt
Normal file
7
ccw-litellm/src/ccw_litellm.egg-info/requires.txt
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
litellm>=1.0.0
|
||||||
|
pyyaml
|
||||||
|
numpy
|
||||||
|
pydantic>=2.0
|
||||||
|
|
||||||
|
[dev]
|
||||||
|
pytest>=7.0
|
||||||
1
ccw-litellm/src/ccw_litellm.egg-info/top_level.txt
Normal file
1
ccw-litellm/src/ccw_litellm.egg-info/top_level.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ccw_litellm
|
||||||
47
ccw-litellm/src/ccw_litellm/__init__.py
Normal file
47
ccw-litellm/src/ccw_litellm/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
"""ccw-litellm package.
|
||||||
|
|
||||||
|
This package provides a small, stable interface layer around LiteLLM to share
|
||||||
|
between the ccw and codex-lens projects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .clients import LiteLLMClient, LiteLLMEmbedder
|
||||||
|
from .config import (
|
||||||
|
EmbeddingModelConfig,
|
||||||
|
LiteLLMConfig,
|
||||||
|
LLMModelConfig,
|
||||||
|
ProviderConfig,
|
||||||
|
get_config,
|
||||||
|
load_config,
|
||||||
|
reset_config,
|
||||||
|
)
|
||||||
|
from .interfaces import (
|
||||||
|
AbstractEmbedder,
|
||||||
|
AbstractLLMClient,
|
||||||
|
ChatMessage,
|
||||||
|
LLMResponse,
|
||||||
|
)
|
||||||
|
|
||||||
|
__version__ = "0.1.0"
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"__version__",
|
||||||
|
# Abstract interfaces
|
||||||
|
"AbstractEmbedder",
|
||||||
|
"AbstractLLMClient",
|
||||||
|
"ChatMessage",
|
||||||
|
"LLMResponse",
|
||||||
|
# Client implementations
|
||||||
|
"LiteLLMClient",
|
||||||
|
"LiteLLMEmbedder",
|
||||||
|
# Configuration
|
||||||
|
"LiteLLMConfig",
|
||||||
|
"ProviderConfig",
|
||||||
|
"LLMModelConfig",
|
||||||
|
"EmbeddingModelConfig",
|
||||||
|
"load_config",
|
||||||
|
"get_config",
|
||||||
|
"reset_config",
|
||||||
|
]
|
||||||
|
|
||||||
108
ccw-litellm/src/ccw_litellm/cli.py
Normal file
108
ccw-litellm/src/ccw_litellm/cli.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
"""CLI entry point for ccw-litellm."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
"""Main CLI entry point."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
prog="ccw-litellm",
|
||||||
|
description="Unified LiteLLM interface for ccw and codex-lens",
|
||||||
|
)
|
||||||
|
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
||||||
|
|
||||||
|
# config command
|
||||||
|
config_parser = subparsers.add_parser("config", help="Show configuration")
|
||||||
|
config_parser.add_argument(
|
||||||
|
"--path",
|
||||||
|
type=Path,
|
||||||
|
help="Configuration file path",
|
||||||
|
)
|
||||||
|
|
||||||
|
# embed command
|
||||||
|
embed_parser = subparsers.add_parser("embed", help="Generate embeddings")
|
||||||
|
embed_parser.add_argument("texts", nargs="+", help="Texts to embed")
|
||||||
|
embed_parser.add_argument(
|
||||||
|
"--model",
|
||||||
|
default="default",
|
||||||
|
help="Embedding model name (default: default)",
|
||||||
|
)
|
||||||
|
embed_parser.add_argument(
|
||||||
|
"--output",
|
||||||
|
choices=["json", "shape"],
|
||||||
|
default="shape",
|
||||||
|
help="Output format (default: shape)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# chat command
|
||||||
|
chat_parser = subparsers.add_parser("chat", help="Chat with LLM")
|
||||||
|
chat_parser.add_argument("message", help="Message to send")
|
||||||
|
chat_parser.add_argument(
|
||||||
|
"--model",
|
||||||
|
default="default",
|
||||||
|
help="LLM model name (default: default)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# version command
|
||||||
|
subparsers.add_parser("version", help="Show version")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.command == "version":
|
||||||
|
from . import __version__
|
||||||
|
|
||||||
|
print(f"ccw-litellm {__version__}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if args.command == "config":
|
||||||
|
from .config import get_config
|
||||||
|
|
||||||
|
try:
|
||||||
|
config = get_config(config_path=args.path if hasattr(args, "path") else None)
|
||||||
|
print(config.model_dump_json(indent=2))
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error loading config: {e}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if args.command == "embed":
|
||||||
|
from .clients import LiteLLMEmbedder
|
||||||
|
|
||||||
|
try:
|
||||||
|
embedder = LiteLLMEmbedder(model=args.model)
|
||||||
|
vectors = embedder.embed(args.texts)
|
||||||
|
|
||||||
|
if args.output == "json":
|
||||||
|
print(json.dumps(vectors.tolist()))
|
||||||
|
else:
|
||||||
|
print(f"Shape: {vectors.shape}")
|
||||||
|
print(f"Dimensions: {embedder.dimensions}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if args.command == "chat":
|
||||||
|
from .clients import LiteLLMClient
|
||||||
|
from .interfaces import ChatMessage
|
||||||
|
|
||||||
|
try:
|
||||||
|
client = LiteLLMClient(model=args.model)
|
||||||
|
response = client.chat([ChatMessage(role="user", content=args.message)])
|
||||||
|
print(response.content)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
parser.print_help()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
12
ccw-litellm/src/ccw_litellm/clients/__init__.py
Normal file
12
ccw-litellm/src/ccw_litellm/clients/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
"""Client implementations for ccw-litellm."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .litellm_embedder import LiteLLMEmbedder
|
||||||
|
from .litellm_llm import LiteLLMClient
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"LiteLLMClient",
|
||||||
|
"LiteLLMEmbedder",
|
||||||
|
]
|
||||||
|
|
||||||
251
ccw-litellm/src/ccw_litellm/clients/litellm_embedder.py
Normal file
251
ccw-litellm/src/ccw_litellm/clients/litellm_embedder.py
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
"""LiteLLM embedder implementation for text embeddings."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, Sequence
|
||||||
|
|
||||||
|
import litellm
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
from ..config import LiteLLMConfig, get_config
|
||||||
|
from ..interfaces.embedder import AbstractEmbedder
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LiteLLMEmbedder(AbstractEmbedder):
|
||||||
|
"""LiteLLM embedder implementation.
|
||||||
|
|
||||||
|
Supports multiple embedding providers (OpenAI, etc.) through LiteLLM's unified interface.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
embedder = LiteLLMEmbedder(model="default")
|
||||||
|
vectors = embedder.embed(["Hello world", "Another text"])
|
||||||
|
print(vectors.shape) # (2, 1536)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: str = "default",
|
||||||
|
config: LiteLLMConfig | None = None,
|
||||||
|
**litellm_kwargs: Any,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize LiteLLM embedder.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name from configuration (default: "default")
|
||||||
|
config: Configuration instance (default: use global config)
|
||||||
|
**litellm_kwargs: Additional arguments to pass to litellm.embedding()
|
||||||
|
"""
|
||||||
|
self._config = config or get_config()
|
||||||
|
self._model_name = model
|
||||||
|
self._litellm_kwargs = litellm_kwargs
|
||||||
|
|
||||||
|
# Get embedding model configuration
|
||||||
|
try:
|
||||||
|
self._model_config = self._config.get_embedding_model(model)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Failed to get embedding model configuration: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Get provider configuration
|
||||||
|
try:
|
||||||
|
self._provider_config = self._config.get_provider(self._model_config.provider)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Failed to get provider configuration: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Set up LiteLLM environment
|
||||||
|
self._setup_litellm()
|
||||||
|
|
||||||
|
def _setup_litellm(self) -> None:
|
||||||
|
"""Configure LiteLLM with provider settings."""
|
||||||
|
provider = self._model_config.provider
|
||||||
|
|
||||||
|
# Set API key
|
||||||
|
if self._provider_config.api_key:
|
||||||
|
litellm.api_key = self._provider_config.api_key
|
||||||
|
# Also set environment-specific keys
|
||||||
|
if provider == "openai":
|
||||||
|
litellm.openai_key = self._provider_config.api_key
|
||||||
|
elif provider == "anthropic":
|
||||||
|
litellm.anthropic_key = self._provider_config.api_key
|
||||||
|
|
||||||
|
# Set API base
|
||||||
|
if self._provider_config.api_base:
|
||||||
|
litellm.api_base = self._provider_config.api_base
|
||||||
|
|
||||||
|
def _format_model_name(self) -> str:
|
||||||
|
"""Format model name for LiteLLM.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted model name (e.g., "openai/text-embedding-3-small")
|
||||||
|
"""
|
||||||
|
provider = self._model_config.provider
|
||||||
|
model = self._model_config.model
|
||||||
|
|
||||||
|
# For some providers, LiteLLM expects explicit prefix
|
||||||
|
if provider in ["azure", "vertex_ai", "bedrock"]:
|
||||||
|
return f"{provider}/{model}"
|
||||||
|
|
||||||
|
# For providers with custom api_base (OpenAI-compatible endpoints),
|
||||||
|
# use openai/ prefix to tell LiteLLM to use OpenAI API format
|
||||||
|
if self._provider_config.api_base and provider not in ["openai", "anthropic"]:
|
||||||
|
return f"openai/{model}"
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dimensions(self) -> int:
|
||||||
|
"""Embedding vector size."""
|
||||||
|
return self._model_config.dimensions
|
||||||
|
|
||||||
|
def _estimate_tokens(self, text: str) -> int:
|
||||||
|
"""Estimate token count for a text using fast heuristic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text: Text to estimate tokens for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Estimated token count (len/4 is a reasonable approximation)
|
||||||
|
"""
|
||||||
|
return len(text) // 4
|
||||||
|
|
||||||
|
def _create_batches(
|
||||||
|
self,
|
||||||
|
texts: list[str],
|
||||||
|
max_tokens: int = 30000
|
||||||
|
) -> list[list[str]]:
|
||||||
|
"""Split texts into batches that fit within token limits.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to batch
|
||||||
|
max_tokens: Maximum tokens per batch (default: 30000, safe margin for 40960 limit)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text batches
|
||||||
|
"""
|
||||||
|
batches = []
|
||||||
|
current_batch = []
|
||||||
|
current_tokens = 0
|
||||||
|
|
||||||
|
for text in texts:
|
||||||
|
text_tokens = self._estimate_tokens(text)
|
||||||
|
|
||||||
|
# If single text exceeds limit, truncate it
|
||||||
|
if text_tokens > max_tokens:
|
||||||
|
logger.warning(f"Text with {text_tokens} estimated tokens exceeds limit, truncating")
|
||||||
|
# Truncate to fit (rough estimate: 4 chars per token)
|
||||||
|
max_chars = max_tokens * 4
|
||||||
|
text = text[:max_chars]
|
||||||
|
text_tokens = self._estimate_tokens(text)
|
||||||
|
|
||||||
|
# Start new batch if current would exceed limit
|
||||||
|
if current_tokens + text_tokens > max_tokens and current_batch:
|
||||||
|
batches.append(current_batch)
|
||||||
|
current_batch = []
|
||||||
|
current_tokens = 0
|
||||||
|
|
||||||
|
current_batch.append(text)
|
||||||
|
current_tokens += text_tokens
|
||||||
|
|
||||||
|
# Add final batch
|
||||||
|
if current_batch:
|
||||||
|
batches.append(current_batch)
|
||||||
|
|
||||||
|
return batches
|
||||||
|
|
||||||
|
def embed(
|
||||||
|
self,
|
||||||
|
texts: str | Sequence[str],
|
||||||
|
*,
|
||||||
|
batch_size: int | None = None,
|
||||||
|
max_tokens_per_batch: int = 30000,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> NDArray[np.floating]:
|
||||||
|
"""Embed one or more texts.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: Single text or sequence of texts
|
||||||
|
batch_size: Batch size for processing (deprecated, use max_tokens_per_batch)
|
||||||
|
max_tokens_per_batch: Maximum estimated tokens per API call (default: 30000)
|
||||||
|
**kwargs: Additional arguments for litellm.embedding()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A numpy array of shape (n_texts, dimensions).
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If LiteLLM embedding fails
|
||||||
|
"""
|
||||||
|
# Normalize input to list
|
||||||
|
if isinstance(texts, str):
|
||||||
|
text_list = [texts]
|
||||||
|
else:
|
||||||
|
text_list = list(texts)
|
||||||
|
|
||||||
|
if not text_list:
|
||||||
|
# Return empty array with correct shape
|
||||||
|
return np.empty((0, self.dimensions), dtype=np.float32)
|
||||||
|
|
||||||
|
# Merge kwargs
|
||||||
|
embedding_kwargs = {**self._litellm_kwargs, **kwargs}
|
||||||
|
|
||||||
|
# For OpenAI-compatible endpoints, ensure encoding_format is set
|
||||||
|
if self._provider_config.api_base and "encoding_format" not in embedding_kwargs:
|
||||||
|
embedding_kwargs["encoding_format"] = "float"
|
||||||
|
|
||||||
|
# Split into token-aware batches
|
||||||
|
batches = self._create_batches(text_list, max_tokens_per_batch)
|
||||||
|
|
||||||
|
if len(batches) > 1:
|
||||||
|
logger.info(f"Split {len(text_list)} texts into {len(batches)} batches for embedding")
|
||||||
|
|
||||||
|
all_embeddings = []
|
||||||
|
|
||||||
|
for batch_idx, batch in enumerate(batches):
|
||||||
|
try:
|
||||||
|
# Build call kwargs with explicit api_base
|
||||||
|
call_kwargs = {**embedding_kwargs}
|
||||||
|
if self._provider_config.api_base:
|
||||||
|
call_kwargs["api_base"] = self._provider_config.api_base
|
||||||
|
if self._provider_config.api_key:
|
||||||
|
call_kwargs["api_key"] = self._provider_config.api_key
|
||||||
|
|
||||||
|
# Call LiteLLM embedding for this batch
|
||||||
|
response = litellm.embedding(
|
||||||
|
model=self._format_model_name(),
|
||||||
|
input=batch,
|
||||||
|
**call_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract embeddings
|
||||||
|
batch_embeddings = [item["embedding"] for item in response.data]
|
||||||
|
all_embeddings.extend(batch_embeddings)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"LiteLLM embedding failed for batch {batch_idx + 1}/{len(batches)}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Convert to numpy array
|
||||||
|
result = np.array(all_embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
# Validate dimensions
|
||||||
|
if result.shape[1] != self.dimensions:
|
||||||
|
logger.warning(
|
||||||
|
f"Expected {self.dimensions} dimensions, got {result.shape[1]}. "
|
||||||
|
f"Configuration may be incorrect."
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_name(self) -> str:
|
||||||
|
"""Get configured model name."""
|
||||||
|
return self._model_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def provider(self) -> str:
|
||||||
|
"""Get configured provider name."""
|
||||||
|
return self._model_config.provider
|
||||||
165
ccw-litellm/src/ccw_litellm/clients/litellm_llm.py
Normal file
165
ccw-litellm/src/ccw_litellm/clients/litellm_llm.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
"""LiteLLM client implementation for LLM operations."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, Sequence
|
||||||
|
|
||||||
|
import litellm
|
||||||
|
|
||||||
|
from ..config import LiteLLMConfig, get_config
|
||||||
|
from ..interfaces.llm import AbstractLLMClient, ChatMessage, LLMResponse
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LiteLLMClient(AbstractLLMClient):
|
||||||
|
"""LiteLLM client implementation.
|
||||||
|
|
||||||
|
Supports multiple providers (OpenAI, Anthropic, etc.) through LiteLLM's unified interface.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
client = LiteLLMClient(model="default")
|
||||||
|
response = client.chat([
|
||||||
|
ChatMessage(role="user", content="Hello!")
|
||||||
|
])
|
||||||
|
print(response.content)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: str = "default",
|
||||||
|
config: LiteLLMConfig | None = None,
|
||||||
|
**litellm_kwargs: Any,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize LiteLLM client.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name from configuration (default: "default")
|
||||||
|
config: Configuration instance (default: use global config)
|
||||||
|
**litellm_kwargs: Additional arguments to pass to litellm.completion()
|
||||||
|
"""
|
||||||
|
self._config = config or get_config()
|
||||||
|
self._model_name = model
|
||||||
|
self._litellm_kwargs = litellm_kwargs
|
||||||
|
|
||||||
|
# Get model configuration
|
||||||
|
try:
|
||||||
|
self._model_config = self._config.get_llm_model(model)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Failed to get model configuration: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Get provider configuration
|
||||||
|
try:
|
||||||
|
self._provider_config = self._config.get_provider(self._model_config.provider)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Failed to get provider configuration: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Set up LiteLLM environment
|
||||||
|
self._setup_litellm()
|
||||||
|
|
||||||
|
def _setup_litellm(self) -> None:
|
||||||
|
"""Configure LiteLLM with provider settings."""
|
||||||
|
provider = self._model_config.provider
|
||||||
|
|
||||||
|
# Set API key
|
||||||
|
if self._provider_config.api_key:
|
||||||
|
env_var = f"{provider.upper()}_API_KEY"
|
||||||
|
litellm.api_key = self._provider_config.api_key
|
||||||
|
# Also set environment-specific keys
|
||||||
|
if provider == "openai":
|
||||||
|
litellm.openai_key = self._provider_config.api_key
|
||||||
|
elif provider == "anthropic":
|
||||||
|
litellm.anthropic_key = self._provider_config.api_key
|
||||||
|
|
||||||
|
# Set API base
|
||||||
|
if self._provider_config.api_base:
|
||||||
|
litellm.api_base = self._provider_config.api_base
|
||||||
|
|
||||||
|
def _format_model_name(self) -> str:
|
||||||
|
"""Format model name for LiteLLM.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted model name (e.g., "gpt-4", "claude-3-opus-20240229")
|
||||||
|
"""
|
||||||
|
# LiteLLM expects model names in format: "provider/model" or just "model"
|
||||||
|
# If provider is explicit, use provider/model format
|
||||||
|
provider = self._model_config.provider
|
||||||
|
model = self._model_config.model
|
||||||
|
|
||||||
|
# For some providers, LiteLLM expects explicit prefix
|
||||||
|
if provider in ["anthropic", "azure", "vertex_ai", "bedrock"]:
|
||||||
|
return f"{provider}/{model}"
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
def chat(
|
||||||
|
self,
|
||||||
|
messages: Sequence[ChatMessage],
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> LLMResponse:
|
||||||
|
"""Chat completion for a sequence of messages.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: Sequence of chat messages
|
||||||
|
**kwargs: Additional arguments for litellm.completion()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LLM response with content and raw response
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If LiteLLM completion fails
|
||||||
|
"""
|
||||||
|
# Convert messages to LiteLLM format
|
||||||
|
litellm_messages = [
|
||||||
|
{"role": msg.role, "content": msg.content} for msg in messages
|
||||||
|
]
|
||||||
|
|
||||||
|
# Merge kwargs
|
||||||
|
completion_kwargs = {**self._litellm_kwargs, **kwargs}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Call LiteLLM
|
||||||
|
response = litellm.completion(
|
||||||
|
model=self._format_model_name(),
|
||||||
|
messages=litellm_messages,
|
||||||
|
**completion_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract content
|
||||||
|
content = response.choices[0].message.content or ""
|
||||||
|
|
||||||
|
return LLMResponse(content=content, raw=response)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"LiteLLM completion failed: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def complete(self, prompt: str, **kwargs: Any) -> LLMResponse:
|
||||||
|
"""Text completion for a prompt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt: Input prompt
|
||||||
|
**kwargs: Additional arguments for litellm.completion()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LLM response with content and raw response
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If LiteLLM completion fails
|
||||||
|
"""
|
||||||
|
# Convert to chat format (most modern models use chat interface)
|
||||||
|
messages = [ChatMessage(role="user", content=prompt)]
|
||||||
|
return self.chat(messages, **kwargs)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_name(self) -> str:
|
||||||
|
"""Get configured model name."""
|
||||||
|
return self._model_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def provider(self) -> str:
|
||||||
|
"""Get configured provider name."""
|
||||||
|
return self._model_config.provider
|
||||||
22
ccw-litellm/src/ccw_litellm/config/__init__.py
Normal file
22
ccw-litellm/src/ccw_litellm/config/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
"""Configuration management for LiteLLM integration."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .loader import get_config, load_config, reset_config
|
||||||
|
from .models import (
|
||||||
|
EmbeddingModelConfig,
|
||||||
|
LiteLLMConfig,
|
||||||
|
LLMModelConfig,
|
||||||
|
ProviderConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"LiteLLMConfig",
|
||||||
|
"ProviderConfig",
|
||||||
|
"LLMModelConfig",
|
||||||
|
"EmbeddingModelConfig",
|
||||||
|
"load_config",
|
||||||
|
"get_config",
|
||||||
|
"reset_config",
|
||||||
|
]
|
||||||
|
|
||||||
316
ccw-litellm/src/ccw_litellm/config/loader.py
Normal file
316
ccw-litellm/src/ccw_litellm/config/loader.py
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
"""Configuration loader with environment variable substitution."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from .models import LiteLLMConfig
|
||||||
|
|
||||||
|
# Default configuration paths
|
||||||
|
# JSON format (UI config) takes priority over YAML format
|
||||||
|
DEFAULT_JSON_CONFIG_PATH = Path.home() / ".ccw" / "config" / "litellm-api-config.json"
|
||||||
|
DEFAULT_YAML_CONFIG_PATH = Path.home() / ".ccw" / "config" / "litellm-config.yaml"
|
||||||
|
# Keep backward compatibility
|
||||||
|
DEFAULT_CONFIG_PATH = DEFAULT_YAML_CONFIG_PATH
|
||||||
|
|
||||||
|
# Global configuration singleton
|
||||||
|
_config_instance: LiteLLMConfig | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def _substitute_env_vars(value: Any) -> Any:
|
||||||
|
"""Recursively substitute environment variables in configuration values.
|
||||||
|
|
||||||
|
Supports ${ENV_VAR} and ${ENV_VAR:-default} syntax.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value: Configuration value (str, dict, list, or primitive)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Value with environment variables substituted
|
||||||
|
"""
|
||||||
|
if isinstance(value, str):
|
||||||
|
# Pattern: ${VAR} or ${VAR:-default}
|
||||||
|
pattern = r"\$\{([^:}]+)(?::-(.*?))?\}"
|
||||||
|
|
||||||
|
def replace_var(match: re.Match) -> str:
|
||||||
|
var_name = match.group(1)
|
||||||
|
default_value = match.group(2) if match.group(2) is not None else ""
|
||||||
|
return os.environ.get(var_name, default_value)
|
||||||
|
|
||||||
|
return re.sub(pattern, replace_var, value)
|
||||||
|
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return {k: _substitute_env_vars(v) for k, v in value.items()}
|
||||||
|
|
||||||
|
if isinstance(value, list):
|
||||||
|
return [_substitute_env_vars(item) for item in value]
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def _get_default_config() -> dict[str, Any]:
|
||||||
|
"""Get default configuration when no config file exists.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Default configuration dictionary
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"version": 1,
|
||||||
|
"default_provider": "openai",
|
||||||
|
"providers": {
|
||||||
|
"openai": {
|
||||||
|
"api_key": "${OPENAI_API_KEY}",
|
||||||
|
"api_base": "https://api.openai.com/v1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"llm_models": {
|
||||||
|
"default": {
|
||||||
|
"provider": "openai",
|
||||||
|
"model": "gpt-4",
|
||||||
|
},
|
||||||
|
"fast": {
|
||||||
|
"provider": "openai",
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"embedding_models": {
|
||||||
|
"default": {
|
||||||
|
"provider": "openai",
|
||||||
|
"model": "text-embedding-3-small",
|
||||||
|
"dimensions": 1536,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_json_to_internal_format(json_config: dict[str, Any]) -> dict[str, Any]:
|
||||||
|
"""Convert UI JSON config format to internal format.
|
||||||
|
|
||||||
|
The UI stores config in a different structure:
|
||||||
|
- providers: array of {id, name, type, apiKey, apiBase, llmModels[], embeddingModels[]}
|
||||||
|
|
||||||
|
Internal format uses:
|
||||||
|
- providers: dict of {provider_id: {api_key, api_base}}
|
||||||
|
- llm_models: dict of {model_id: {provider, model}}
|
||||||
|
- embedding_models: dict of {model_id: {provider, model, dimensions}}
|
||||||
|
|
||||||
|
Args:
|
||||||
|
json_config: Configuration in UI JSON format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configuration in internal format
|
||||||
|
"""
|
||||||
|
providers: dict[str, Any] = {}
|
||||||
|
llm_models: dict[str, Any] = {}
|
||||||
|
embedding_models: dict[str, Any] = {}
|
||||||
|
default_provider: str | None = None
|
||||||
|
|
||||||
|
for provider in json_config.get("providers", []):
|
||||||
|
if not provider.get("enabled", True):
|
||||||
|
continue
|
||||||
|
|
||||||
|
provider_id = provider.get("id", "")
|
||||||
|
if not provider_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Set first enabled provider as default
|
||||||
|
if default_provider is None:
|
||||||
|
default_provider = provider_id
|
||||||
|
|
||||||
|
# Convert provider with advanced settings
|
||||||
|
provider_config: dict[str, Any] = {
|
||||||
|
"api_key": provider.get("apiKey", ""),
|
||||||
|
"api_base": provider.get("apiBase"),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Map advanced settings
|
||||||
|
adv = provider.get("advancedSettings", {})
|
||||||
|
if adv.get("timeout"):
|
||||||
|
provider_config["timeout"] = adv["timeout"]
|
||||||
|
if adv.get("maxRetries"):
|
||||||
|
provider_config["max_retries"] = adv["maxRetries"]
|
||||||
|
if adv.get("organization"):
|
||||||
|
provider_config["organization"] = adv["organization"]
|
||||||
|
if adv.get("apiVersion"):
|
||||||
|
provider_config["api_version"] = adv["apiVersion"]
|
||||||
|
if adv.get("customHeaders"):
|
||||||
|
provider_config["custom_headers"] = adv["customHeaders"]
|
||||||
|
|
||||||
|
providers[provider_id] = provider_config
|
||||||
|
|
||||||
|
# Convert LLM models
|
||||||
|
for model in provider.get("llmModels", []):
|
||||||
|
if not model.get("enabled", True):
|
||||||
|
continue
|
||||||
|
model_id = model.get("id", "")
|
||||||
|
if not model_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
llm_model_config: dict[str, Any] = {
|
||||||
|
"provider": provider_id,
|
||||||
|
"model": model.get("name", ""),
|
||||||
|
}
|
||||||
|
# Add model-specific endpoint settings
|
||||||
|
endpoint = model.get("endpointSettings", {})
|
||||||
|
if endpoint.get("baseUrl"):
|
||||||
|
llm_model_config["api_base"] = endpoint["baseUrl"]
|
||||||
|
if endpoint.get("timeout"):
|
||||||
|
llm_model_config["timeout"] = endpoint["timeout"]
|
||||||
|
if endpoint.get("maxRetries"):
|
||||||
|
llm_model_config["max_retries"] = endpoint["maxRetries"]
|
||||||
|
|
||||||
|
# Add capabilities
|
||||||
|
caps = model.get("capabilities", {})
|
||||||
|
if caps.get("contextWindow"):
|
||||||
|
llm_model_config["context_window"] = caps["contextWindow"]
|
||||||
|
if caps.get("maxOutputTokens"):
|
||||||
|
llm_model_config["max_output_tokens"] = caps["maxOutputTokens"]
|
||||||
|
|
||||||
|
llm_models[model_id] = llm_model_config
|
||||||
|
|
||||||
|
# Convert embedding models
|
||||||
|
for model in provider.get("embeddingModels", []):
|
||||||
|
if not model.get("enabled", True):
|
||||||
|
continue
|
||||||
|
model_id = model.get("id", "")
|
||||||
|
if not model_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
embedding_model_config: dict[str, Any] = {
|
||||||
|
"provider": provider_id,
|
||||||
|
"model": model.get("name", ""),
|
||||||
|
"dimensions": model.get("capabilities", {}).get("embeddingDimension", 1536),
|
||||||
|
}
|
||||||
|
# Add model-specific endpoint settings
|
||||||
|
endpoint = model.get("endpointSettings", {})
|
||||||
|
if endpoint.get("baseUrl"):
|
||||||
|
embedding_model_config["api_base"] = endpoint["baseUrl"]
|
||||||
|
if endpoint.get("timeout"):
|
||||||
|
embedding_model_config["timeout"] = endpoint["timeout"]
|
||||||
|
|
||||||
|
embedding_models[model_id] = embedding_model_config
|
||||||
|
|
||||||
|
# Ensure we have defaults if no models found
|
||||||
|
if not llm_models:
|
||||||
|
llm_models["default"] = {
|
||||||
|
"provider": default_provider or "openai",
|
||||||
|
"model": "gpt-4",
|
||||||
|
}
|
||||||
|
|
||||||
|
if not embedding_models:
|
||||||
|
embedding_models["default"] = {
|
||||||
|
"provider": default_provider or "openai",
|
||||||
|
"model": "text-embedding-3-small",
|
||||||
|
"dimensions": 1536,
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"version": json_config.get("version", 1),
|
||||||
|
"default_provider": default_provider or "openai",
|
||||||
|
"providers": providers,
|
||||||
|
"llm_models": llm_models,
|
||||||
|
"embedding_models": embedding_models,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def load_config(config_path: Path | str | None = None) -> LiteLLMConfig:
|
||||||
|
"""Load LiteLLM configuration from JSON or YAML file.
|
||||||
|
|
||||||
|
Priority order:
|
||||||
|
1. Explicit config_path if provided
|
||||||
|
2. JSON config (UI format): ~/.ccw/config/litellm-api-config.json
|
||||||
|
3. YAML config: ~/.ccw/config/litellm-config.yaml
|
||||||
|
4. Default configuration
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_path: Path to configuration file (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Parsed and validated configuration
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If config file not found and no default available
|
||||||
|
ValueError: If configuration is invalid
|
||||||
|
"""
|
||||||
|
raw_config: dict[str, Any] | None = None
|
||||||
|
is_json_format = False
|
||||||
|
|
||||||
|
if config_path is not None:
|
||||||
|
config_path = Path(config_path)
|
||||||
|
if config_path.exists():
|
||||||
|
try:
|
||||||
|
with open(config_path, "r", encoding="utf-8") as f:
|
||||||
|
if config_path.suffix == ".json":
|
||||||
|
raw_config = json.load(f)
|
||||||
|
is_json_format = True
|
||||||
|
else:
|
||||||
|
raw_config = yaml.safe_load(f)
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Failed to load configuration from {config_path}: {e}") from e
|
||||||
|
|
||||||
|
# Check JSON config first (UI format)
|
||||||
|
if raw_config is None and DEFAULT_JSON_CONFIG_PATH.exists():
|
||||||
|
try:
|
||||||
|
with open(DEFAULT_JSON_CONFIG_PATH, "r", encoding="utf-8") as f:
|
||||||
|
raw_config = json.load(f)
|
||||||
|
is_json_format = True
|
||||||
|
except Exception:
|
||||||
|
pass # Fall through to YAML
|
||||||
|
|
||||||
|
# Check YAML config
|
||||||
|
if raw_config is None and DEFAULT_YAML_CONFIG_PATH.exists():
|
||||||
|
try:
|
||||||
|
with open(DEFAULT_YAML_CONFIG_PATH, "r", encoding="utf-8") as f:
|
||||||
|
raw_config = yaml.safe_load(f)
|
||||||
|
except Exception:
|
||||||
|
pass # Fall through to default
|
||||||
|
|
||||||
|
# Use default configuration
|
||||||
|
if raw_config is None:
|
||||||
|
raw_config = _get_default_config()
|
||||||
|
|
||||||
|
# Convert JSON format to internal format if needed
|
||||||
|
if is_json_format:
|
||||||
|
raw_config = _convert_json_to_internal_format(raw_config)
|
||||||
|
|
||||||
|
# Substitute environment variables
|
||||||
|
config_data = _substitute_env_vars(raw_config)
|
||||||
|
|
||||||
|
# Validate and parse with Pydantic
|
||||||
|
try:
|
||||||
|
return LiteLLMConfig.model_validate(config_data)
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Invalid configuration: {e}") from e
|
||||||
|
|
||||||
|
|
||||||
|
def get_config(config_path: Path | str | None = None, reload: bool = False) -> LiteLLMConfig:
|
||||||
|
"""Get global configuration singleton.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_path: Path to configuration file (default: ~/.ccw/config/litellm-config.yaml)
|
||||||
|
reload: Force reload configuration from disk
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Global configuration instance
|
||||||
|
"""
|
||||||
|
global _config_instance
|
||||||
|
|
||||||
|
if _config_instance is None or reload:
|
||||||
|
_config_instance = load_config(config_path)
|
||||||
|
|
||||||
|
return _config_instance
|
||||||
|
|
||||||
|
|
||||||
|
def reset_config() -> None:
|
||||||
|
"""Reset global configuration singleton.
|
||||||
|
|
||||||
|
Useful for testing.
|
||||||
|
"""
|
||||||
|
global _config_instance
|
||||||
|
_config_instance = None
|
||||||
130
ccw-litellm/src/ccw_litellm/config/models.py
Normal file
130
ccw-litellm/src/ccw_litellm/config/models.py
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
"""Pydantic configuration models for LiteLLM integration."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class ProviderConfig(BaseModel):
|
||||||
|
"""Provider API configuration.
|
||||||
|
|
||||||
|
Supports environment variable substitution in the format ${ENV_VAR}.
|
||||||
|
"""
|
||||||
|
|
||||||
|
api_key: str | None = None
|
||||||
|
api_base: str | None = None
|
||||||
|
|
||||||
|
model_config = {"extra": "allow"}
|
||||||
|
|
||||||
|
|
||||||
|
class LLMModelConfig(BaseModel):
|
||||||
|
"""LLM model configuration."""
|
||||||
|
|
||||||
|
provider: str
|
||||||
|
model: str
|
||||||
|
|
||||||
|
model_config = {"extra": "allow"}
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingModelConfig(BaseModel):
|
||||||
|
"""Embedding model configuration."""
|
||||||
|
|
||||||
|
provider: str # "openai", "fastembed", "ollama", etc.
|
||||||
|
model: str
|
||||||
|
dimensions: int
|
||||||
|
|
||||||
|
model_config = {"extra": "allow"}
|
||||||
|
|
||||||
|
|
||||||
|
class LiteLLMConfig(BaseModel):
|
||||||
|
"""Root configuration for LiteLLM integration.
|
||||||
|
|
||||||
|
Example YAML:
|
||||||
|
version: 1
|
||||||
|
default_provider: openai
|
||||||
|
providers:
|
||||||
|
openai:
|
||||||
|
api_key: ${OPENAI_API_KEY}
|
||||||
|
api_base: https://api.openai.com/v1
|
||||||
|
anthropic:
|
||||||
|
api_key: ${ANTHROPIC_API_KEY}
|
||||||
|
llm_models:
|
||||||
|
default:
|
||||||
|
provider: openai
|
||||||
|
model: gpt-4
|
||||||
|
fast:
|
||||||
|
provider: openai
|
||||||
|
model: gpt-3.5-turbo
|
||||||
|
embedding_models:
|
||||||
|
default:
|
||||||
|
provider: openai
|
||||||
|
model: text-embedding-3-small
|
||||||
|
dimensions: 1536
|
||||||
|
"""
|
||||||
|
|
||||||
|
version: int = 1
|
||||||
|
default_provider: str = "openai"
|
||||||
|
providers: dict[str, ProviderConfig] = Field(default_factory=dict)
|
||||||
|
llm_models: dict[str, LLMModelConfig] = Field(default_factory=dict)
|
||||||
|
embedding_models: dict[str, EmbeddingModelConfig] = Field(default_factory=dict)
|
||||||
|
|
||||||
|
model_config = {"extra": "allow"}
|
||||||
|
|
||||||
|
def get_llm_model(self, model: str = "default") -> LLMModelConfig:
|
||||||
|
"""Get LLM model configuration by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name or "default"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LLM model configuration
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If model not found
|
||||||
|
"""
|
||||||
|
if model not in self.llm_models:
|
||||||
|
raise ValueError(
|
||||||
|
f"LLM model '{model}' not found in configuration. "
|
||||||
|
f"Available models: {list(self.llm_models.keys())}"
|
||||||
|
)
|
||||||
|
return self.llm_models[model]
|
||||||
|
|
||||||
|
def get_embedding_model(self, model: str = "default") -> EmbeddingModelConfig:
|
||||||
|
"""Get embedding model configuration by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name or "default"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Embedding model configuration
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If model not found
|
||||||
|
"""
|
||||||
|
if model not in self.embedding_models:
|
||||||
|
raise ValueError(
|
||||||
|
f"Embedding model '{model}' not found in configuration. "
|
||||||
|
f"Available models: {list(self.embedding_models.keys())}"
|
||||||
|
)
|
||||||
|
return self.embedding_models[model]
|
||||||
|
|
||||||
|
def get_provider(self, provider: str) -> ProviderConfig:
|
||||||
|
"""Get provider configuration by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
provider: Provider name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Provider configuration
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If provider not found
|
||||||
|
"""
|
||||||
|
if provider not in self.providers:
|
||||||
|
raise ValueError(
|
||||||
|
f"Provider '{provider}' not found in configuration. "
|
||||||
|
f"Available providers: {list(self.providers.keys())}"
|
||||||
|
)
|
||||||
|
return self.providers[provider]
|
||||||
14
ccw-litellm/src/ccw_litellm/interfaces/__init__.py
Normal file
14
ccw-litellm/src/ccw_litellm/interfaces/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
"""Abstract interfaces for ccw-litellm."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .embedder import AbstractEmbedder
|
||||||
|
from .llm import AbstractLLMClient, ChatMessage, LLMResponse
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"AbstractEmbedder",
|
||||||
|
"AbstractLLMClient",
|
||||||
|
"ChatMessage",
|
||||||
|
"LLMResponse",
|
||||||
|
]
|
||||||
|
|
||||||
52
ccw-litellm/src/ccw_litellm/interfaces/embedder.py
Normal file
52
ccw-litellm/src/ccw_litellm/interfaces/embedder.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Sequence
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
|
||||||
|
class AbstractEmbedder(ABC):
|
||||||
|
"""Embedding interface compatible with fastembed-style embedders.
|
||||||
|
|
||||||
|
Implementers only need to provide the synchronous `embed` method; an
|
||||||
|
asynchronous `aembed` wrapper is provided for convenience.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def dimensions(self) -> int:
|
||||||
|
"""Embedding vector size."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def embed(
|
||||||
|
self,
|
||||||
|
texts: str | Sequence[str],
|
||||||
|
*,
|
||||||
|
batch_size: int | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> NDArray[np.floating]:
|
||||||
|
"""Embed one or more texts.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A numpy array of shape (n_texts, dimensions).
|
||||||
|
"""
|
||||||
|
|
||||||
|
async def aembed(
|
||||||
|
self,
|
||||||
|
texts: str | Sequence[str],
|
||||||
|
*,
|
||||||
|
batch_size: int | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> NDArray[np.floating]:
|
||||||
|
"""Async wrapper around `embed` using a worker thread by default."""
|
||||||
|
|
||||||
|
return await asyncio.to_thread(
|
||||||
|
self.embed,
|
||||||
|
texts,
|
||||||
|
batch_size=batch_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
45
ccw-litellm/src/ccw_litellm/interfaces/llm.py
Normal file
45
ccw-litellm/src/ccw_litellm/interfaces/llm.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Literal, Sequence
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, slots=True)
|
||||||
|
class ChatMessage:
|
||||||
|
role: Literal["system", "user", "assistant", "tool"]
|
||||||
|
content: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, slots=True)
|
||||||
|
class LLMResponse:
|
||||||
|
content: str
|
||||||
|
raw: Any | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class AbstractLLMClient(ABC):
|
||||||
|
"""LiteLLM-like client interface.
|
||||||
|
|
||||||
|
Implementers only need to provide synchronous methods; async wrappers are
|
||||||
|
provided via `asyncio.to_thread`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> LLMResponse:
|
||||||
|
"""Chat completion for a sequence of messages."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def complete(self, prompt: str, **kwargs: Any) -> LLMResponse:
|
||||||
|
"""Text completion for a prompt."""
|
||||||
|
|
||||||
|
async def achat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> LLMResponse:
|
||||||
|
"""Async wrapper around `chat` using a worker thread by default."""
|
||||||
|
|
||||||
|
return await asyncio.to_thread(self.chat, messages, **kwargs)
|
||||||
|
|
||||||
|
async def acomplete(self, prompt: str, **kwargs: Any) -> LLMResponse:
|
||||||
|
"""Async wrapper around `complete` using a worker thread by default."""
|
||||||
|
|
||||||
|
return await asyncio.to_thread(self.complete, prompt, **kwargs)
|
||||||
|
|
||||||
11
ccw-litellm/tests/conftest.py
Normal file
11
ccw-litellm/tests/conftest.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure() -> None:
|
||||||
|
project_root = Path(__file__).resolve().parents[1]
|
||||||
|
src_dir = project_root / "src"
|
||||||
|
sys.path.insert(0, str(src_dir))
|
||||||
|
|
||||||
64
ccw-litellm/tests/test_interfaces.py
Normal file
64
ccw-litellm/tests/test_interfaces.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from typing import Any, Sequence
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from ccw_litellm.interfaces import AbstractEmbedder, AbstractLLMClient, ChatMessage, LLMResponse
|
||||||
|
|
||||||
|
|
||||||
|
class _DummyEmbedder(AbstractEmbedder):
|
||||||
|
@property
|
||||||
|
def dimensions(self) -> int:
|
||||||
|
return 3
|
||||||
|
|
||||||
|
def embed(
|
||||||
|
self,
|
||||||
|
texts: str | Sequence[str],
|
||||||
|
*,
|
||||||
|
batch_size: int | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> np.ndarray:
|
||||||
|
if isinstance(texts, str):
|
||||||
|
texts = [texts]
|
||||||
|
_ = batch_size
|
||||||
|
_ = kwargs
|
||||||
|
return np.zeros((len(texts), self.dimensions), dtype=np.float32)
|
||||||
|
|
||||||
|
|
||||||
|
class _DummyLLM(AbstractLLMClient):
|
||||||
|
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> LLMResponse:
|
||||||
|
_ = kwargs
|
||||||
|
return LLMResponse(content="".join(m.content for m in messages))
|
||||||
|
|
||||||
|
def complete(self, prompt: str, **kwargs: Any) -> LLMResponse:
|
||||||
|
_ = kwargs
|
||||||
|
return LLMResponse(content=prompt)
|
||||||
|
|
||||||
|
|
||||||
|
def test_embed_sync_shape_and_dtype() -> None:
|
||||||
|
emb = _DummyEmbedder()
|
||||||
|
out = emb.embed(["a", "b"])
|
||||||
|
assert out.shape == (2, 3)
|
||||||
|
assert out.dtype == np.float32
|
||||||
|
|
||||||
|
|
||||||
|
def test_embed_async_wrapper() -> None:
|
||||||
|
emb = _DummyEmbedder()
|
||||||
|
out = asyncio.run(emb.aembed("x"))
|
||||||
|
assert out.shape == (1, 3)
|
||||||
|
|
||||||
|
|
||||||
|
def test_llm_sync() -> None:
|
||||||
|
llm = _DummyLLM()
|
||||||
|
out = llm.chat([ChatMessage(role="user", content="hi")])
|
||||||
|
assert out == LLMResponse(content="hi")
|
||||||
|
|
||||||
|
|
||||||
|
def test_llm_async_wrappers() -> None:
|
||||||
|
llm = _DummyLLM()
|
||||||
|
out1 = asyncio.run(llm.achat([ChatMessage(role="user", content="a")]))
|
||||||
|
out2 = asyncio.run(llm.acomplete("b"))
|
||||||
|
assert out1.content == "a"
|
||||||
|
assert out2.content == "b"
|
||||||
15
ccw/.npmignore
Normal file
15
ccw/.npmignore
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# npm ignore file - overrides .gitignore for npm publish
|
||||||
|
# dist/ is NOT excluded here so it gets published
|
||||||
|
|
||||||
|
# Development files
|
||||||
|
node_modules/
|
||||||
|
*.log
|
||||||
|
*.tmp
|
||||||
|
|
||||||
|
# Test files
|
||||||
|
tests/
|
||||||
|
*.test.js
|
||||||
|
*.spec.js
|
||||||
|
|
||||||
|
# TypeScript source maps (optional, can keep for debugging)
|
||||||
|
# *.map
|
||||||
308
ccw/LITELLM_INTEGRATION.md
Normal file
308
ccw/LITELLM_INTEGRATION.md
Normal file
@@ -0,0 +1,308 @@
|
|||||||
|
# LiteLLM Integration Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
CCW now supports custom LiteLLM endpoints with integrated context caching. You can configure multiple providers (OpenAI, Anthropic, Ollama, etc.) and create custom endpoints with file-based caching strategies.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ CLI Executor │
|
||||||
|
│ │
|
||||||
|
│ ┌─────────────┐ ┌──────────────────────────────┐ │
|
||||||
|
│ │ --model │────────>│ Route Decision: │ │
|
||||||
|
│ │ flag │ │ - gemini/qwen/codex → CLI │ │
|
||||||
|
│ └─────────────┘ │ - custom ID → LiteLLM │ │
|
||||||
|
│ └──────────────────────────────┘ │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ LiteLLM Executor │
|
||||||
|
│ │
|
||||||
|
│ 1. Load endpoint config (litellm-api-config.json) │
|
||||||
|
│ 2. Extract @patterns from prompt │
|
||||||
|
│ 3. Pack files via context-cache │
|
||||||
|
│ 4. Call LiteLLM client with cached content + prompt │
|
||||||
|
│ 5. Return result │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### File Location
|
||||||
|
|
||||||
|
Configuration is stored per-project:
|
||||||
|
```
|
||||||
|
<project>/.ccw/storage/config/litellm-api-config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"providers": [
|
||||||
|
{
|
||||||
|
"id": "openai-1234567890",
|
||||||
|
"name": "My OpenAI",
|
||||||
|
"type": "openai",
|
||||||
|
"apiKey": "${OPENAI_API_KEY}",
|
||||||
|
"enabled": true,
|
||||||
|
"createdAt": "2025-01-01T00:00:00.000Z",
|
||||||
|
"updatedAt": "2025-01-01T00:00:00.000Z"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"id": "my-gpt4o",
|
||||||
|
"name": "GPT-4o with Context Cache",
|
||||||
|
"providerId": "openai-1234567890",
|
||||||
|
"model": "gpt-4o",
|
||||||
|
"description": "GPT-4o with automatic file caching",
|
||||||
|
"cacheStrategy": {
|
||||||
|
"enabled": true,
|
||||||
|
"ttlMinutes": 60,
|
||||||
|
"maxSizeKB": 512,
|
||||||
|
"filePatterns": ["*.md", "*.ts", "*.js"]
|
||||||
|
},
|
||||||
|
"enabled": true,
|
||||||
|
"createdAt": "2025-01-01T00:00:00.000Z",
|
||||||
|
"updatedAt": "2025-01-01T00:00:00.000Z"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"defaultEndpoint": "my-gpt4o",
|
||||||
|
"globalCacheSettings": {
|
||||||
|
"enabled": true,
|
||||||
|
"cacheDir": "~/.ccw/cache/context",
|
||||||
|
"maxTotalSizeMB": 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Via CLI
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use custom endpoint with --model flag
|
||||||
|
ccw cli -p "Analyze authentication flow" --tool litellm --model my-gpt4o
|
||||||
|
|
||||||
|
# With context patterns (automatically cached)
|
||||||
|
ccw cli -p "@src/auth/**/*.ts Review security" --tool litellm --model my-gpt4o
|
||||||
|
|
||||||
|
# Disable caching for specific call
|
||||||
|
ccw cli -p "Quick question" --tool litellm --model my-gpt4o --no-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
### Via Dashboard API
|
||||||
|
|
||||||
|
#### Create Provider
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:3000/api/litellm-api/providers \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"name": "My OpenAI",
|
||||||
|
"type": "openai",
|
||||||
|
"apiKey": "${OPENAI_API_KEY}",
|
||||||
|
"enabled": true
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Create Endpoint
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:3000/api/litellm-api/endpoints \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"id": "my-gpt4o",
|
||||||
|
"name": "GPT-4o with Cache",
|
||||||
|
"providerId": "openai-1234567890",
|
||||||
|
"model": "gpt-4o",
|
||||||
|
"cacheStrategy": {
|
||||||
|
"enabled": true,
|
||||||
|
"ttlMinutes": 60,
|
||||||
|
"maxSizeKB": 512,
|
||||||
|
"filePatterns": ["*.md", "*.ts"]
|
||||||
|
},
|
||||||
|
"enabled": true
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Test Provider Connection
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:3000/api/litellm-api/providers/openai-1234567890/test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Context Caching
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. **Pattern Detection**: LiteLLM executor scans prompt for `@patterns`
|
||||||
|
```
|
||||||
|
@src/**/*.ts
|
||||||
|
@CLAUDE.md
|
||||||
|
@../shared/**/*
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **File Packing**: Files matching patterns are packed via `context-cache` tool
|
||||||
|
- Respects `max_file_size` limit (default: 1MB per file)
|
||||||
|
- Applies TTL from endpoint config
|
||||||
|
- Generates session ID for retrieval
|
||||||
|
|
||||||
|
3. **Cache Integration**: Cached content is prepended to prompt
|
||||||
|
```
|
||||||
|
<cached files>
|
||||||
|
---
|
||||||
|
<original prompt>
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **LLM Call**: Combined prompt sent to LiteLLM with provider credentials
|
||||||
|
|
||||||
|
### Cache Strategy Configuration
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
interface CacheStrategy {
|
||||||
|
enabled: boolean; // Enable/disable caching for this endpoint
|
||||||
|
ttlMinutes: number; // Cache lifetime (default: 60)
|
||||||
|
maxSizeKB: number; // Max cache size (default: 512KB)
|
||||||
|
filePatterns: string[]; // Glob patterns to cache
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Security Audit with Cache
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ccw cli -p "
|
||||||
|
PURPOSE: OWASP Top 10 security audit of authentication module
|
||||||
|
TASK: • Check SQL injection • Verify session management • Test XSS vectors
|
||||||
|
CONTEXT: @src/auth/**/*.ts @src/middleware/auth.ts
|
||||||
|
EXPECTED: Security report with severity levels and remediation steps
|
||||||
|
" --tool litellm --model my-security-scanner --mode analysis
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens:**
|
||||||
|
1. Executor detects `@src/auth/**/*.ts` and `@src/middleware/auth.ts`
|
||||||
|
2. Packs matching files into context cache
|
||||||
|
3. Cache entry valid for 60 minutes (per endpoint config)
|
||||||
|
4. Subsequent calls reuse cached files (no re-packing)
|
||||||
|
5. LiteLLM receives full context without manual file specification
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
### Provider API Keys
|
||||||
|
|
||||||
|
LiteLLM uses standard environment variable names:
|
||||||
|
|
||||||
|
| Provider | Env Var Name |
|
||||||
|
|------------|-----------------------|
|
||||||
|
| OpenAI | `OPENAI_API_KEY` |
|
||||||
|
| Anthropic | `ANTHROPIC_API_KEY` |
|
||||||
|
| Google | `GOOGLE_API_KEY` |
|
||||||
|
| Azure | `AZURE_API_KEY` |
|
||||||
|
| Mistral | `MISTRAL_API_KEY` |
|
||||||
|
| DeepSeek | `DEEPSEEK_API_KEY` |
|
||||||
|
|
||||||
|
### Configuration Syntax
|
||||||
|
|
||||||
|
Use `${ENV_VAR}` syntax in config:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"apiKey": "${OPENAI_API_KEY}"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The executor resolves these at runtime via `resolveEnvVar()`.
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### Config Manager (`litellm-api-config-manager.ts`)
|
||||||
|
|
||||||
|
#### Provider Management
|
||||||
|
```typescript
|
||||||
|
getAllProviders(baseDir: string): ProviderCredential[]
|
||||||
|
getProvider(baseDir: string, providerId: string): ProviderCredential | null
|
||||||
|
getProviderWithResolvedEnvVars(baseDir: string, providerId: string): ProviderCredential & { resolvedApiKey: string } | null
|
||||||
|
addProvider(baseDir: string, providerData): ProviderCredential
|
||||||
|
updateProvider(baseDir: string, providerId: string, updates): ProviderCredential
|
||||||
|
deleteProvider(baseDir: string, providerId: string): boolean
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Endpoint Management
|
||||||
|
```typescript
|
||||||
|
getAllEndpoints(baseDir: string): CustomEndpoint[]
|
||||||
|
getEndpoint(baseDir: string, endpointId: string): CustomEndpoint | null
|
||||||
|
findEndpointById(baseDir: string, endpointId: string): CustomEndpoint | null
|
||||||
|
addEndpoint(baseDir: string, endpointData): CustomEndpoint
|
||||||
|
updateEndpoint(baseDir: string, endpointId: string, updates): CustomEndpoint
|
||||||
|
deleteEndpoint(baseDir: string, endpointId: string): boolean
|
||||||
|
```
|
||||||
|
|
||||||
|
### Executor (`litellm-executor.ts`)
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
interface LiteLLMExecutionOptions {
|
||||||
|
prompt: string;
|
||||||
|
endpointId: string;
|
||||||
|
baseDir: string;
|
||||||
|
cwd?: string;
|
||||||
|
includeDirs?: string[];
|
||||||
|
enableCache?: boolean;
|
||||||
|
onOutput?: (data: { type: string; data: string }) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LiteLLMExecutionResult {
|
||||||
|
success: boolean;
|
||||||
|
output: string;
|
||||||
|
model: string;
|
||||||
|
provider: string;
|
||||||
|
cacheUsed: boolean;
|
||||||
|
cachedFiles?: string[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
executeLiteLLMEndpoint(options: LiteLLMExecutionOptions): Promise<LiteLLMExecutionResult>
|
||||||
|
extractPatterns(prompt: string): string[]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dashboard Integration
|
||||||
|
|
||||||
|
The dashboard provides UI for managing LiteLLM configuration:
|
||||||
|
|
||||||
|
- **Providers**: Add/edit/delete provider credentials
|
||||||
|
- **Endpoints**: Configure custom endpoints with cache strategies
|
||||||
|
- **Cache Stats**: View cache usage and clear entries
|
||||||
|
- **Test Connections**: Verify provider API access
|
||||||
|
|
||||||
|
Routes are handled by `litellm-api-routes.ts`.
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
1. **Python Dependency**: Requires `ccw-litellm` Python package installed
|
||||||
|
2. **Model Support**: Limited to models supported by LiteLLM library
|
||||||
|
3. **Cache Scope**: Context cache is in-memory (not persisted across restarts)
|
||||||
|
4. **Pattern Syntax**: Only supports glob-style `@patterns`, not regex
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Error: "Endpoint not found"
|
||||||
|
- Verify endpoint ID matches config file
|
||||||
|
- Check `litellm-api-config.json` exists in `.ccw/storage/config/`
|
||||||
|
|
||||||
|
### Error: "API key not configured"
|
||||||
|
- Ensure environment variable is set
|
||||||
|
- Verify `${ENV_VAR}` syntax in config
|
||||||
|
- Test with `echo $OPENAI_API_KEY`
|
||||||
|
|
||||||
|
### Error: "Failed to spawn Python process"
|
||||||
|
- Install ccw-litellm: `pip install ccw-litellm`
|
||||||
|
- Verify Python accessible: `python --version`
|
||||||
|
|
||||||
|
### Cache Not Applied
|
||||||
|
- Check endpoint has `cacheStrategy.enabled: true`
|
||||||
|
- Verify prompt contains `@patterns`
|
||||||
|
- Check cache TTL hasn't expired
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
See `examples/litellm-config.json` for complete configuration template.
|
||||||
77
ccw/examples/litellm-usage.ts
Normal file
77
ccw/examples/litellm-usage.ts
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
/**
|
||||||
|
* LiteLLM Usage Examples
|
||||||
|
* Demonstrates how to use the LiteLLM TypeScript client
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { getLiteLLMClient, getLiteLLMStatus } from '../src/tools/litellm-client';
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
console.log('=== LiteLLM TypeScript Bridge Examples ===\n');
|
||||||
|
|
||||||
|
// Example 1: Check availability
|
||||||
|
console.log('1. Checking LiteLLM availability...');
|
||||||
|
const status = await getLiteLLMStatus();
|
||||||
|
console.log(' Status:', status);
|
||||||
|
console.log('');
|
||||||
|
|
||||||
|
if (!status.available) {
|
||||||
|
console.log('❌ LiteLLM is not available. Please install ccw-litellm:');
|
||||||
|
console.log(' pip install ccw-litellm');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
|
||||||
|
// Example 2: Get configuration
|
||||||
|
console.log('2. Getting configuration...');
|
||||||
|
try {
|
||||||
|
const config = await client.getConfig();
|
||||||
|
console.log(' Config:', config);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(' Error:', error.message);
|
||||||
|
}
|
||||||
|
console.log('');
|
||||||
|
|
||||||
|
// Example 3: Generate embeddings
|
||||||
|
console.log('3. Generating embeddings...');
|
||||||
|
try {
|
||||||
|
const texts = ['Hello world', 'Machine learning is amazing'];
|
||||||
|
const embedResult = await client.embed(texts, 'default');
|
||||||
|
console.log(' Dimensions:', embedResult.dimensions);
|
||||||
|
console.log(' Vectors count:', embedResult.vectors.length);
|
||||||
|
console.log(' First vector (first 5 dims):', embedResult.vectors[0]?.slice(0, 5));
|
||||||
|
} catch (error) {
|
||||||
|
console.log(' Error:', error.message);
|
||||||
|
}
|
||||||
|
console.log('');
|
||||||
|
|
||||||
|
// Example 4: Single message chat
|
||||||
|
console.log('4. Single message chat...');
|
||||||
|
try {
|
||||||
|
const response = await client.chat('What is 2+2?', 'default');
|
||||||
|
console.log(' Response:', response);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(' Error:', error.message);
|
||||||
|
}
|
||||||
|
console.log('');
|
||||||
|
|
||||||
|
// Example 5: Multi-turn chat
|
||||||
|
console.log('5. Multi-turn chat...');
|
||||||
|
try {
|
||||||
|
const chatResponse = await client.chatMessages([
|
||||||
|
{ role: 'system', content: 'You are a helpful math tutor.' },
|
||||||
|
{ role: 'user', content: 'What is the Pythagorean theorem?' }
|
||||||
|
], 'default');
|
||||||
|
console.log(' Content:', chatResponse.content);
|
||||||
|
console.log(' Model:', chatResponse.model);
|
||||||
|
console.log(' Usage:', chatResponse.usage);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(' Error:', error.message);
|
||||||
|
}
|
||||||
|
console.log('');
|
||||||
|
|
||||||
|
console.log('=== Examples completed ===');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run examples
|
||||||
|
main().catch(console.error);
|
||||||
3854
ccw/package-lock.json
generated
3854
ccw/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,65 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "ccw",
|
|
||||||
"version": "6.2.0",
|
|
||||||
"description": "Claude Code Workflow CLI - Dashboard viewer for workflow sessions and reviews",
|
|
||||||
"type": "module",
|
|
||||||
"main": "dist/index.js",
|
|
||||||
"types": "dist/index.d.ts",
|
|
||||||
"bin": {
|
|
||||||
"ccw": "./bin/ccw.js",
|
|
||||||
"ccw-mcp": "./bin/ccw-mcp.js"
|
|
||||||
},
|
|
||||||
"scripts": {
|
|
||||||
"build": "tsc",
|
|
||||||
"dev": "tsx watch src/cli.ts",
|
|
||||||
"test": "node --test tests/*.test.js",
|
|
||||||
"test:codexlens": "node --test tests/codex-lens*.test.js",
|
|
||||||
"test:mcp": "node --test tests/mcp-server.test.js",
|
|
||||||
"lint": "eslint src/"
|
|
||||||
},
|
|
||||||
"keywords": [
|
|
||||||
"claude",
|
|
||||||
"workflow",
|
|
||||||
"cli",
|
|
||||||
"dashboard",
|
|
||||||
"code-review"
|
|
||||||
],
|
|
||||||
"author": "Claude Code Workflow",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=16.0.0"
|
|
||||||
},
|
|
||||||
"dependencies": {
|
|
||||||
"@modelcontextprotocol/sdk": "^1.0.4",
|
|
||||||
"better-sqlite3": "^11.7.0",
|
|
||||||
"boxen": "^7.1.0",
|
|
||||||
"chalk": "^5.3.0",
|
|
||||||
"commander": "^11.0.0",
|
|
||||||
"figlet": "^1.7.0",
|
|
||||||
"glob": "^10.3.0",
|
|
||||||
"gradient-string": "^2.0.2",
|
|
||||||
"inquirer": "^9.2.0",
|
|
||||||
"open": "^9.1.0",
|
|
||||||
"ora": "^7.0.0",
|
|
||||||
"zod": "^4.1.13"
|
|
||||||
},
|
|
||||||
"files": [
|
|
||||||
"bin/",
|
|
||||||
"dist/",
|
|
||||||
"src/",
|
|
||||||
"README.md",
|
|
||||||
"LICENSE"
|
|
||||||
],
|
|
||||||
"repository": {
|
|
||||||
"type": "git",
|
|
||||||
"url": "https://github.com/claude-code-workflow/ccw"
|
|
||||||
},
|
|
||||||
"devDependencies": {
|
|
||||||
"@types/better-sqlite3": "^7.6.12",
|
|
||||||
"@types/gradient-string": "^1.1.6",
|
|
||||||
"@types/inquirer": "^9.0.9",
|
|
||||||
"@types/node": "^25.0.1",
|
|
||||||
"tsx": "^4.21.0",
|
|
||||||
"typescript": "^5.9.3"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -28,20 +28,32 @@ interface PackageInfo {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Load package.json with error handling
|
* Load package.json with error handling
|
||||||
|
* Tries root package.json first (../../package.json from dist),
|
||||||
|
* then falls back to ccw package.json (../package.json from dist)
|
||||||
* @returns Package info with version
|
* @returns Package info with version
|
||||||
*/
|
*/
|
||||||
function loadPackageInfo(): PackageInfo {
|
function loadPackageInfo(): PackageInfo {
|
||||||
const pkgPath = join(__dirname, '../package.json');
|
// First try root package.json (parent of ccw directory)
|
||||||
|
const rootPkgPath = join(__dirname, '../../package.json');
|
||||||
|
// Fallback to ccw package.json
|
||||||
|
const ccwPkgPath = join(__dirname, '../package.json');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!existsSync(pkgPath)) {
|
// Try root package.json first
|
||||||
console.error('Fatal Error: package.json not found.');
|
if (existsSync(rootPkgPath)) {
|
||||||
console.error(`Expected location: ${pkgPath}`);
|
const content = readFileSync(rootPkgPath, 'utf8');
|
||||||
process.exit(1);
|
return JSON.parse(content) as PackageInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
const content = readFileSync(pkgPath, 'utf8');
|
// Fallback to ccw package.json
|
||||||
return JSON.parse(content) as PackageInfo;
|
if (existsSync(ccwPkgPath)) {
|
||||||
|
const content = readFileSync(ccwPkgPath, 'utf8');
|
||||||
|
return JSON.parse(content) as PackageInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.error('Fatal Error: package.json not found.');
|
||||||
|
console.error(`Tried locations:\n - ${rootPkgPath}\n - ${ccwPkgPath}`);
|
||||||
|
process.exit(1);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (error instanceof SyntaxError) {
|
if (error instanceof SyntaxError) {
|
||||||
console.error('Fatal Error: package.json contains invalid JSON.');
|
console.error('Fatal Error: package.json contains invalid JSON.');
|
||||||
@@ -169,12 +181,14 @@ export function run(argv: string[]): void {
|
|||||||
.option('--resume [id]', 'Resume previous session (empty=last, or execution ID, or comma-separated IDs for merge)')
|
.option('--resume [id]', 'Resume previous session (empty=last, or execution ID, or comma-separated IDs for merge)')
|
||||||
.option('--id <id>', 'Custom execution ID (e.g., IMPL-001-step1)')
|
.option('--id <id>', 'Custom execution ID (e.g., IMPL-001-step1)')
|
||||||
.option('--no-native', 'Force prompt concatenation instead of native resume')
|
.option('--no-native', 'Force prompt concatenation instead of native resume')
|
||||||
|
.option('--cache [items]', 'Cache: comma-separated @patterns and text content')
|
||||||
|
.option('--inject-mode <mode>', 'Inject mode: none, full, progressive (default: codex=full, others=none)')
|
||||||
// Storage options
|
// Storage options
|
||||||
.option('--project <path>', 'Project path for storage operations')
|
.option('--project <path>', 'Project path for storage operations')
|
||||||
.option('--force', 'Confirm destructive operations')
|
.option('--force', 'Confirm destructive operations')
|
||||||
.option('--cli-history', 'Target CLI history storage')
|
.option('--cli-history', 'Target CLI history storage')
|
||||||
.option('--memory', 'Target memory storage')
|
.option('--memory', 'Target memory storage')
|
||||||
.option('--cache', 'Target cache storage')
|
.option('--storage-cache', 'Target cache storage')
|
||||||
.option('--config', 'Target config storage')
|
.option('--config', 'Target config storage')
|
||||||
.action((subcommand, args, options) => cliCommand(subcommand, args, options));
|
.action((subcommand, args, options) => cliCommand(subcommand, args, options));
|
||||||
|
|
||||||
|
|||||||
@@ -78,6 +78,14 @@ interface CliExecOptions {
|
|||||||
resume?: string | boolean; // true = last, string = execution ID, comma-separated for merge
|
resume?: string | boolean; // true = last, string = execution ID, comma-separated for merge
|
||||||
id?: string; // Custom execution ID (e.g., IMPL-001-step1)
|
id?: string; // Custom execution ID (e.g., IMPL-001-step1)
|
||||||
noNative?: boolean; // Force prompt concatenation instead of native resume
|
noNative?: boolean; // Force prompt concatenation instead of native resume
|
||||||
|
cache?: string | boolean; // Cache: true = auto from CONTEXT, string = comma-separated patterns/content
|
||||||
|
injectMode?: 'none' | 'full' | 'progressive'; // Inject mode for cached content
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Cache configuration parsed from --cache */
|
||||||
|
interface CacheConfig {
|
||||||
|
patterns?: string[]; // @patterns to pack (items starting with @)
|
||||||
|
content?: string; // Additional text content (items not starting with @)
|
||||||
}
|
}
|
||||||
|
|
||||||
interface HistoryOptions {
|
interface HistoryOptions {
|
||||||
@@ -91,7 +99,7 @@ interface StorageOptions {
|
|||||||
project?: string;
|
project?: string;
|
||||||
cliHistory?: boolean;
|
cliHistory?: boolean;
|
||||||
memory?: boolean;
|
memory?: boolean;
|
||||||
cache?: boolean;
|
storageCache?: boolean;
|
||||||
config?: boolean;
|
config?: boolean;
|
||||||
force?: boolean;
|
force?: boolean;
|
||||||
}
|
}
|
||||||
@@ -173,15 +181,15 @@ async function showStorageInfo(): Promise<void> {
|
|||||||
* Clean storage
|
* Clean storage
|
||||||
*/
|
*/
|
||||||
async function cleanStorage(options: StorageOptions): Promise<void> {
|
async function cleanStorage(options: StorageOptions): Promise<void> {
|
||||||
const { all, project, force, cliHistory, memory, cache, config } = options;
|
const { all, project, force, cliHistory, memory, storageCache, config } = options;
|
||||||
|
|
||||||
// Determine what to clean
|
// Determine what to clean
|
||||||
const cleanTypes = {
|
const cleanTypes = {
|
||||||
cliHistory: cliHistory || (!cliHistory && !memory && !cache && !config),
|
cliHistory: cliHistory || (!cliHistory && !memory && !storageCache && !config),
|
||||||
memory: memory || (!cliHistory && !memory && !cache && !config),
|
memory: memory || (!cliHistory && !memory && !storageCache && !config),
|
||||||
cache: cache || (!cliHistory && !memory && !cache && !config),
|
cache: storageCache || (!cliHistory && !memory && !storageCache && !config),
|
||||||
config: config || false, // Config requires explicit flag
|
config: config || false, // Config requires explicit flag
|
||||||
all: !cliHistory && !memory && !cache && !config
|
all: !cliHistory && !memory && !storageCache && !config
|
||||||
};
|
};
|
||||||
|
|
||||||
if (project) {
|
if (project) {
|
||||||
@@ -383,7 +391,7 @@ async function statusAction(): Promise<void> {
|
|||||||
* @param {Object} options - CLI options
|
* @param {Object} options - CLI options
|
||||||
*/
|
*/
|
||||||
async function execAction(positionalPrompt: string | undefined, options: CliExecOptions): Promise<void> {
|
async function execAction(positionalPrompt: string | undefined, options: CliExecOptions): Promise<void> {
|
||||||
const { prompt: optionPrompt, file, tool = 'gemini', mode = 'analysis', model, cd, includeDirs, timeout, noStream, resume, id, noNative } = options;
|
const { prompt: optionPrompt, file, tool = 'gemini', mode = 'analysis', model, cd, includeDirs, timeout, noStream, resume, id, noNative, cache, injectMode } = options;
|
||||||
|
|
||||||
// Priority: 1. --file, 2. --prompt/-p option, 3. positional argument
|
// Priority: 1. --file, 2. --prompt/-p option, 3. positional argument
|
||||||
let finalPrompt: string | undefined;
|
let finalPrompt: string | undefined;
|
||||||
@@ -421,6 +429,128 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
|||||||
|
|
||||||
const prompt_to_use = finalPrompt || '';
|
const prompt_to_use = finalPrompt || '';
|
||||||
|
|
||||||
|
// Handle cache option: pack @patterns and/or content
|
||||||
|
let cacheSessionId: string | undefined;
|
||||||
|
let actualPrompt = prompt_to_use;
|
||||||
|
|
||||||
|
if (cache) {
|
||||||
|
const { handler: contextCacheHandler } = await import('../tools/context-cache.js');
|
||||||
|
|
||||||
|
// Parse cache config from comma-separated string
|
||||||
|
// Items starting with @ are patterns, others are text content
|
||||||
|
let cacheConfig: CacheConfig = {};
|
||||||
|
|
||||||
|
if (cache === true) {
|
||||||
|
// --cache without value: auto-extract from CONTEXT field
|
||||||
|
const contextMatch = prompt_to_use.match(/CONTEXT:\s*([^\n]+)/i);
|
||||||
|
if (contextMatch) {
|
||||||
|
const contextLine = contextMatch[1];
|
||||||
|
const patternMatches = contextLine.matchAll(/@[^\s|]+/g);
|
||||||
|
cacheConfig.patterns = Array.from(patternMatches).map(m => m[0]);
|
||||||
|
}
|
||||||
|
} else if (typeof cache === 'string') {
|
||||||
|
// Parse comma-separated items: @patterns and text content
|
||||||
|
const items = cache.split(',').map(s => s.trim()).filter(Boolean);
|
||||||
|
const patterns: string[] = [];
|
||||||
|
const contentParts: string[] = [];
|
||||||
|
|
||||||
|
for (const item of items) {
|
||||||
|
if (item.startsWith('@')) {
|
||||||
|
patterns.push(item);
|
||||||
|
} else {
|
||||||
|
contentParts.push(item);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (patterns.length > 0) {
|
||||||
|
cacheConfig.patterns = patterns;
|
||||||
|
}
|
||||||
|
if (contentParts.length > 0) {
|
||||||
|
cacheConfig.content = contentParts.join('\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also extract patterns from CONTEXT if not provided
|
||||||
|
if ((!cacheConfig.patterns || cacheConfig.patterns.length === 0) && prompt_to_use) {
|
||||||
|
const contextMatch = prompt_to_use.match(/CONTEXT:\s*([^\n]+)/i);
|
||||||
|
if (contextMatch) {
|
||||||
|
const contextLine = contextMatch[1];
|
||||||
|
const patternMatches = contextLine.matchAll(/@[^\s|]+/g);
|
||||||
|
cacheConfig.patterns = Array.from(patternMatches).map(m => m[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack if we have patterns or content
|
||||||
|
if ((cacheConfig.patterns && cacheConfig.patterns.length > 0) || cacheConfig.content) {
|
||||||
|
const patternCount = cacheConfig.patterns?.length || 0;
|
||||||
|
const hasContent = !!cacheConfig.content;
|
||||||
|
console.log(chalk.gray(` Caching: ${patternCount} pattern(s)${hasContent ? ' + text content' : ''}...`));
|
||||||
|
|
||||||
|
const cacheResult = await contextCacheHandler({
|
||||||
|
operation: 'pack',
|
||||||
|
patterns: cacheConfig.patterns,
|
||||||
|
content: cacheConfig.content,
|
||||||
|
cwd: cd || process.cwd(),
|
||||||
|
include_dirs: includeDirs ? includeDirs.split(',') : undefined,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (cacheResult.success && cacheResult.result) {
|
||||||
|
const packResult = cacheResult.result as { session_id: string; files_packed: number; total_bytes: number };
|
||||||
|
cacheSessionId = packResult.session_id;
|
||||||
|
console.log(chalk.gray(` Cached: ${packResult.files_packed} files, ${packResult.total_bytes} bytes`));
|
||||||
|
console.log(chalk.gray(` Session: ${cacheSessionId}`));
|
||||||
|
|
||||||
|
// Determine inject mode:
|
||||||
|
// --inject-mode explicitly set > tool default (codex=full, others=none)
|
||||||
|
const effectiveInjectMode = injectMode ?? (tool === 'codex' ? 'full' : 'none');
|
||||||
|
|
||||||
|
if (effectiveInjectMode !== 'none' && cacheSessionId) {
|
||||||
|
if (effectiveInjectMode === 'full') {
|
||||||
|
// Read full cache content
|
||||||
|
const readResult = await contextCacheHandler({
|
||||||
|
operation: 'read',
|
||||||
|
session_id: cacheSessionId,
|
||||||
|
offset: 0,
|
||||||
|
limit: 1024 * 1024, // 1MB max
|
||||||
|
});
|
||||||
|
|
||||||
|
if (readResult.success && readResult.result) {
|
||||||
|
const { content: cachedContent, total_bytes } = readResult.result as { content: string; total_bytes: number };
|
||||||
|
console.log(chalk.gray(` Injecting ${total_bytes} bytes (full mode)...`));
|
||||||
|
actualPrompt = `=== CACHED CONTEXT (${packResult.files_packed} files) ===\n${cachedContent}\n\n=== USER PROMPT ===\n${prompt_to_use}`;
|
||||||
|
}
|
||||||
|
} else if (effectiveInjectMode === 'progressive') {
|
||||||
|
// Progressive mode: read first page only (64KB default)
|
||||||
|
const pageLimit = 65536;
|
||||||
|
const readResult = await contextCacheHandler({
|
||||||
|
operation: 'read',
|
||||||
|
session_id: cacheSessionId,
|
||||||
|
offset: 0,
|
||||||
|
limit: pageLimit,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (readResult.success && readResult.result) {
|
||||||
|
const { content: cachedContent, total_bytes, has_more, next_offset } = readResult.result as {
|
||||||
|
content: string; total_bytes: number; has_more: boolean; next_offset: number | null
|
||||||
|
};
|
||||||
|
console.log(chalk.gray(` Injecting ${cachedContent.length}/${total_bytes} bytes (progressive mode)...`));
|
||||||
|
|
||||||
|
const moreInfo = has_more
|
||||||
|
? `\n[... ${total_bytes - cachedContent.length} more bytes available via: context_cache(operation="read", session_id="${cacheSessionId}", offset=${next_offset}) ...]`
|
||||||
|
: '';
|
||||||
|
|
||||||
|
actualPrompt = `=== CACHED CONTEXT (${packResult.files_packed} files, progressive) ===\n${cachedContent}${moreInfo}\n\n=== USER PROMPT ===\n${prompt_to_use}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log();
|
||||||
|
} else {
|
||||||
|
console.log(chalk.yellow(` Cache warning: ${cacheResult.error}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Parse resume IDs for merge scenario
|
// Parse resume IDs for merge scenario
|
||||||
const resumeIds = resume && typeof resume === 'string' ? resume.split(',').map(s => s.trim()).filter(Boolean) : [];
|
const resumeIds = resume && typeof resume === 'string' ? resume.split(',').map(s => s.trim()).filter(Boolean) : [];
|
||||||
const isMerge = resumeIds.length > 1;
|
const isMerge = resumeIds.length > 1;
|
||||||
@@ -462,7 +592,7 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
|||||||
try {
|
try {
|
||||||
const result = await cliExecutorTool.execute({
|
const result = await cliExecutorTool.execute({
|
||||||
tool,
|
tool,
|
||||||
prompt: prompt_to_use,
|
prompt: actualPrompt,
|
||||||
mode,
|
mode,
|
||||||
model,
|
model,
|
||||||
cd,
|
cd,
|
||||||
@@ -725,14 +855,28 @@ export async function cliCommand(
|
|||||||
console.log(chalk.gray(' --model <model> Model override'));
|
console.log(chalk.gray(' --model <model> Model override'));
|
||||||
console.log(chalk.gray(' --cd <path> Working directory'));
|
console.log(chalk.gray(' --cd <path> Working directory'));
|
||||||
console.log(chalk.gray(' --includeDirs <dirs> Additional directories'));
|
console.log(chalk.gray(' --includeDirs <dirs> Additional directories'));
|
||||||
console.log(chalk.gray(' --timeout <ms> Timeout (default: 300000)'));
|
console.log(chalk.gray(' --timeout <ms> Timeout (default: 0=disabled)'));
|
||||||
console.log(chalk.gray(' --resume [id] Resume previous session'));
|
console.log(chalk.gray(' --resume [id] Resume previous session'));
|
||||||
|
console.log(chalk.gray(' --cache <items> Cache: comma-separated @patterns and text'));
|
||||||
|
console.log(chalk.gray(' --inject-mode <m> Inject mode: none, full, progressive'));
|
||||||
|
console.log();
|
||||||
|
console.log(' Cache format:');
|
||||||
|
console.log(chalk.gray(' --cache "@src/**/*.ts,@CLAUDE.md" # @patterns to pack'));
|
||||||
|
console.log(chalk.gray(' --cache "@src/**/*,extra context" # patterns + text content'));
|
||||||
|
console.log(chalk.gray(' --cache # auto from CONTEXT field'));
|
||||||
|
console.log();
|
||||||
|
console.log(' Inject modes:');
|
||||||
|
console.log(chalk.gray(' none: cache only, no injection (default for gemini/qwen)'));
|
||||||
|
console.log(chalk.gray(' full: inject all cached content (default for codex)'));
|
||||||
|
console.log(chalk.gray(' progressive: inject first 64KB with MCP continuation hint'));
|
||||||
console.log();
|
console.log();
|
||||||
console.log(' Examples:');
|
console.log(' Examples:');
|
||||||
console.log(chalk.gray(' ccw cli -p "Analyze auth module" --tool gemini'));
|
console.log(chalk.gray(' ccw cli -p "Analyze auth module" --tool gemini'));
|
||||||
console.log(chalk.gray(' ccw cli -f prompt.txt --tool codex --mode write'));
|
console.log(chalk.gray(' ccw cli -f prompt.txt --tool codex --mode write'));
|
||||||
console.log(chalk.gray(' ccw cli -p "$(cat template.md)" --tool gemini'));
|
console.log(chalk.gray(' ccw cli -p "$(cat template.md)" --tool gemini'));
|
||||||
console.log(chalk.gray(' ccw cli --resume --tool gemini'));
|
console.log(chalk.gray(' ccw cli --resume --tool gemini'));
|
||||||
|
console.log(chalk.gray(' ccw cli -p "..." --cache "@src/**/*.ts" --tool codex'));
|
||||||
|
console.log(chalk.gray(' ccw cli -p "..." --cache "@src/**/*" --inject-mode progressive --tool gemini'));
|
||||||
console.log();
|
console.log();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
|
import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
|
||||||
import { join, dirname } from 'path';
|
import { join, dirname } from 'path';
|
||||||
import { tmpdir } from 'os';
|
import { homedir } from 'os';
|
||||||
|
|
||||||
interface HookOptions {
|
interface HookOptions {
|
||||||
stdin?: boolean;
|
stdin?: boolean;
|
||||||
@@ -53,9 +53,10 @@ async function readStdin(): Promise<string> {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get session state file path
|
* Get session state file path
|
||||||
|
* Uses ~/.claude/.ccw-sessions/ for reliable persistence across sessions
|
||||||
*/
|
*/
|
||||||
function getSessionStateFile(sessionId: string): string {
|
function getSessionStateFile(sessionId: string): string {
|
||||||
const stateDir = join(tmpdir(), '.ccw-sessions');
|
const stateDir = join(homedir(), '.claude', '.ccw-sessions');
|
||||||
if (!existsSync(stateDir)) {
|
if (!existsSync(stateDir)) {
|
||||||
mkdirSync(stateDir, { recursive: true });
|
mkdirSync(stateDir, { recursive: true });
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,441 @@
|
|||||||
|
/**
|
||||||
|
* LiteLLM API Config Manager
|
||||||
|
* Manages provider credentials, endpoint configurations, and model discovery
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { join } from 'path';
|
||||||
|
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
|
||||||
|
import { homedir } from 'os';
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Type Definitions
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
export type ProviderType =
|
||||||
|
| 'openai'
|
||||||
|
| 'anthropic'
|
||||||
|
| 'google'
|
||||||
|
| 'cohere'
|
||||||
|
| 'azure'
|
||||||
|
| 'bedrock'
|
||||||
|
| 'vertexai'
|
||||||
|
| 'huggingface'
|
||||||
|
| 'ollama'
|
||||||
|
| 'custom';
|
||||||
|
|
||||||
|
export interface ProviderCredential {
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
type: ProviderType;
|
||||||
|
apiKey?: string;
|
||||||
|
baseUrl?: string;
|
||||||
|
apiVersion?: string;
|
||||||
|
region?: string;
|
||||||
|
projectId?: string;
|
||||||
|
organizationId?: string;
|
||||||
|
enabled: boolean;
|
||||||
|
metadata?: Record<string, any>;
|
||||||
|
createdAt: string;
|
||||||
|
updatedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface EndpointConfig {
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
providerId: string;
|
||||||
|
model: string;
|
||||||
|
alias?: string;
|
||||||
|
temperature?: number;
|
||||||
|
maxTokens?: number;
|
||||||
|
topP?: number;
|
||||||
|
enabled: boolean;
|
||||||
|
metadata?: Record<string, any>;
|
||||||
|
createdAt: string;
|
||||||
|
updatedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ModelInfo {
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
provider: ProviderType;
|
||||||
|
contextWindow: number;
|
||||||
|
supportsFunctions: boolean;
|
||||||
|
supportsStreaming: boolean;
|
||||||
|
inputCostPer1k?: number;
|
||||||
|
outputCostPer1k?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface LiteLLMApiConfig {
|
||||||
|
version: string;
|
||||||
|
providers: ProviderCredential[];
|
||||||
|
endpoints: EndpointConfig[];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Model Definitions
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
export const PROVIDER_MODELS: Record<ProviderType, ModelInfo[]> = {
|
||||||
|
openai: [
|
||||||
|
{
|
||||||
|
id: 'gpt-4-turbo',
|
||||||
|
name: 'GPT-4 Turbo',
|
||||||
|
provider: 'openai',
|
||||||
|
contextWindow: 128000,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
inputCostPer1k: 0.01,
|
||||||
|
outputCostPer1k: 0.03,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'gpt-4',
|
||||||
|
name: 'GPT-4',
|
||||||
|
provider: 'openai',
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
inputCostPer1k: 0.03,
|
||||||
|
outputCostPer1k: 0.06,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'gpt-3.5-turbo',
|
||||||
|
name: 'GPT-3.5 Turbo',
|
||||||
|
provider: 'openai',
|
||||||
|
contextWindow: 16385,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
inputCostPer1k: 0.0005,
|
||||||
|
outputCostPer1k: 0.0015,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
anthropic: [
|
||||||
|
{
|
||||||
|
id: 'claude-3-opus-20240229',
|
||||||
|
name: 'Claude 3 Opus',
|
||||||
|
provider: 'anthropic',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
inputCostPer1k: 0.015,
|
||||||
|
outputCostPer1k: 0.075,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-3-sonnet-20240229',
|
||||||
|
name: 'Claude 3 Sonnet',
|
||||||
|
provider: 'anthropic',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
inputCostPer1k: 0.003,
|
||||||
|
outputCostPer1k: 0.015,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-3-haiku-20240307',
|
||||||
|
name: 'Claude 3 Haiku',
|
||||||
|
provider: 'anthropic',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
inputCostPer1k: 0.00025,
|
||||||
|
outputCostPer1k: 0.00125,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
google: [
|
||||||
|
{
|
||||||
|
id: 'gemini-pro',
|
||||||
|
name: 'Gemini Pro',
|
||||||
|
provider: 'google',
|
||||||
|
contextWindow: 32768,
|
||||||
|
supportsFunctions: true,
|
||||||
|
supportsStreaming: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'gemini-pro-vision',
|
||||||
|
name: 'Gemini Pro Vision',
|
||||||
|
provider: 'google',
|
||||||
|
contextWindow: 16384,
|
||||||
|
supportsFunctions: false,
|
||||||
|
supportsStreaming: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
cohere: [
|
||||||
|
{
|
||||||
|
id: 'command',
|
||||||
|
name: 'Command',
|
||||||
|
provider: 'cohere',
|
||||||
|
contextWindow: 4096,
|
||||||
|
supportsFunctions: false,
|
||||||
|
supportsStreaming: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'command-light',
|
||||||
|
name: 'Command Light',
|
||||||
|
provider: 'cohere',
|
||||||
|
contextWindow: 4096,
|
||||||
|
supportsFunctions: false,
|
||||||
|
supportsStreaming: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
azure: [],
|
||||||
|
bedrock: [],
|
||||||
|
vertexai: [],
|
||||||
|
huggingface: [],
|
||||||
|
ollama: [],
|
||||||
|
custom: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Config File Management
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
const CONFIG_DIR = join(homedir(), '.claude', 'litellm');
|
||||||
|
const CONFIG_FILE = join(CONFIG_DIR, 'config.json');
|
||||||
|
|
||||||
|
function ensureConfigDir(): void {
|
||||||
|
if (!existsSync(CONFIG_DIR)) {
|
||||||
|
mkdirSync(CONFIG_DIR, { recursive: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadConfig(): LiteLLMApiConfig {
|
||||||
|
ensureConfigDir();
|
||||||
|
|
||||||
|
if (!existsSync(CONFIG_FILE)) {
|
||||||
|
const defaultConfig: LiteLLMApiConfig = {
|
||||||
|
version: '1.0.0',
|
||||||
|
providers: [],
|
||||||
|
endpoints: [],
|
||||||
|
};
|
||||||
|
saveConfig(defaultConfig);
|
||||||
|
return defaultConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const content = readFileSync(CONFIG_FILE, 'utf-8');
|
||||||
|
return JSON.parse(content);
|
||||||
|
} catch (err) {
|
||||||
|
throw new Error(`Failed to load config: ${(err as Error).message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function saveConfig(config: LiteLLMApiConfig): void {
|
||||||
|
ensureConfigDir();
|
||||||
|
|
||||||
|
try {
|
||||||
|
writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2), 'utf-8');
|
||||||
|
} catch (err) {
|
||||||
|
throw new Error(`Failed to save config: ${(err as Error).message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Provider Management
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
export function getAllProviders(): ProviderCredential[] {
|
||||||
|
const config = loadConfig();
|
||||||
|
return config.providers;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getProvider(id: string): ProviderCredential | null {
|
||||||
|
const config = loadConfig();
|
||||||
|
return config.providers.find((p) => p.id === id) || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createProvider(
|
||||||
|
data: Omit<ProviderCredential, 'id' | 'createdAt' | 'updatedAt'>
|
||||||
|
): ProviderCredential {
|
||||||
|
const config = loadConfig();
|
||||||
|
|
||||||
|
const now = new Date().toISOString();
|
||||||
|
const provider: ProviderCredential = {
|
||||||
|
...data,
|
||||||
|
id: `provider-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||||
|
createdAt: now,
|
||||||
|
updatedAt: now,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.providers.push(provider);
|
||||||
|
saveConfig(config);
|
||||||
|
|
||||||
|
return provider;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function updateProvider(
|
||||||
|
id: string,
|
||||||
|
updates: Partial<ProviderCredential>
|
||||||
|
): ProviderCredential | null {
|
||||||
|
const config = loadConfig();
|
||||||
|
|
||||||
|
const index = config.providers.findIndex((p) => p.id === id);
|
||||||
|
if (index === -1) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const updated: ProviderCredential = {
|
||||||
|
...config.providers[index],
|
||||||
|
...updates,
|
||||||
|
id,
|
||||||
|
updatedAt: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
|
||||||
|
config.providers[index] = updated;
|
||||||
|
saveConfig(config);
|
||||||
|
|
||||||
|
return updated;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function deleteProvider(id: string): { success: boolean } {
|
||||||
|
const config = loadConfig();
|
||||||
|
|
||||||
|
const index = config.providers.findIndex((p) => p.id === id);
|
||||||
|
if (index === -1) {
|
||||||
|
return { success: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
config.providers.splice(index, 1);
|
||||||
|
|
||||||
|
// Also delete endpoints using this provider
|
||||||
|
config.endpoints = config.endpoints.filter((e) => e.providerId !== id);
|
||||||
|
|
||||||
|
saveConfig(config);
|
||||||
|
|
||||||
|
return { success: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function testProviderConnection(
|
||||||
|
providerId: string
|
||||||
|
): Promise<{ success: boolean; error?: string }> {
|
||||||
|
const provider = getProvider(providerId);
|
||||||
|
|
||||||
|
if (!provider) {
|
||||||
|
return { success: false, error: 'Provider not found' };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!provider.enabled) {
|
||||||
|
return { success: false, error: 'Provider is disabled' };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basic validation
|
||||||
|
if (!provider.apiKey && provider.type !== 'ollama' && provider.type !== 'custom') {
|
||||||
|
return { success: false, error: 'API key is required for this provider type' };
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Implement actual provider connection testing using litellm-client
|
||||||
|
// For now, just validate the configuration
|
||||||
|
return { success: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Endpoint Management
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
export function getAllEndpoints(): EndpointConfig[] {
|
||||||
|
const config = loadConfig();
|
||||||
|
return config.endpoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getEndpoint(id: string): EndpointConfig | null {
|
||||||
|
const config = loadConfig();
|
||||||
|
return config.endpoints.find((e) => e.id === id) || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createEndpoint(
|
||||||
|
data: Omit<EndpointConfig, 'id' | 'createdAt' | 'updatedAt'>
|
||||||
|
): EndpointConfig {
|
||||||
|
const config = loadConfig();
|
||||||
|
|
||||||
|
// Validate provider exists
|
||||||
|
const provider = config.providers.find((p) => p.id === data.providerId);
|
||||||
|
if (!provider) {
|
||||||
|
throw new Error('Provider not found');
|
||||||
|
}
|
||||||
|
|
||||||
|
const now = new Date().toISOString();
|
||||||
|
const endpoint: EndpointConfig = {
|
||||||
|
...data,
|
||||||
|
id: `endpoint-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||||
|
createdAt: now,
|
||||||
|
updatedAt: now,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.endpoints.push(endpoint);
|
||||||
|
saveConfig(config);
|
||||||
|
|
||||||
|
return endpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function updateEndpoint(
|
||||||
|
id: string,
|
||||||
|
updates: Partial<EndpointConfig>
|
||||||
|
): EndpointConfig | null {
|
||||||
|
const config = loadConfig();
|
||||||
|
|
||||||
|
const index = config.endpoints.findIndex((e) => e.id === id);
|
||||||
|
if (index === -1) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate provider if being updated
|
||||||
|
if (updates.providerId) {
|
||||||
|
const provider = config.providers.find((p) => p.id === updates.providerId);
|
||||||
|
if (!provider) {
|
||||||
|
throw new Error('Provider not found');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const updated: EndpointConfig = {
|
||||||
|
...config.endpoints[index],
|
||||||
|
...updates,
|
||||||
|
id,
|
||||||
|
updatedAt: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
|
||||||
|
config.endpoints[index] = updated;
|
||||||
|
saveConfig(config);
|
||||||
|
|
||||||
|
return updated;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function deleteEndpoint(id: string): { success: boolean } {
|
||||||
|
const config = loadConfig();
|
||||||
|
|
||||||
|
const index = config.endpoints.findIndex((e) => e.id === id);
|
||||||
|
if (index === -1) {
|
||||||
|
return { success: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
config.endpoints.splice(index, 1);
|
||||||
|
saveConfig(config);
|
||||||
|
|
||||||
|
return { success: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Model Discovery
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
export function getModelsForProviderType(providerType: ProviderType): ModelInfo[] | null {
|
||||||
|
return PROVIDER_MODELS[providerType] || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getAllModels(): Record<ProviderType, ModelInfo[]> {
|
||||||
|
return PROVIDER_MODELS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Config Access
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
export function getFullConfig(): LiteLLMApiConfig {
|
||||||
|
return loadConfig();
|
||||||
|
}
|
||||||
|
|
||||||
|
export function resetConfig(): void {
|
||||||
|
const defaultConfig: LiteLLMApiConfig = {
|
||||||
|
version: '1.0.0',
|
||||||
|
providers: [],
|
||||||
|
endpoints: [],
|
||||||
|
};
|
||||||
|
saveConfig(defaultConfig);
|
||||||
|
}
|
||||||
1012
ccw/src/config/litellm-api-config-manager.ts
Normal file
1012
ccw/src/config/litellm-api-config-manager.ts
Normal file
File diff suppressed because it is too large
Load Diff
222
ccw/src/config/provider-models.ts
Normal file
222
ccw/src/config/provider-models.ts
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
/**
|
||||||
|
* Provider Model Presets
|
||||||
|
*
|
||||||
|
* Predefined model information for each supported LLM provider.
|
||||||
|
* Used for UI dropdowns and validation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { ProviderType } from '../types/litellm-api-config.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Model information metadata
|
||||||
|
*/
|
||||||
|
export interface ModelInfo {
|
||||||
|
/** Model identifier (used in API calls) */
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
/** Human-readable display name */
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
/** Context window size in tokens */
|
||||||
|
contextWindow: number;
|
||||||
|
|
||||||
|
/** Whether this model supports prompt caching */
|
||||||
|
supportsCaching: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Embedding model information metadata
|
||||||
|
*/
|
||||||
|
export interface EmbeddingModelInfo {
|
||||||
|
/** Model identifier (used in API calls) */
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
/** Human-readable display name */
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
/** Embedding dimensions */
|
||||||
|
dimensions: number;
|
||||||
|
|
||||||
|
/** Maximum input tokens */
|
||||||
|
maxTokens: number;
|
||||||
|
|
||||||
|
/** Provider identifier */
|
||||||
|
provider: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Predefined models for each API format
|
||||||
|
* Used for UI selection and validation
|
||||||
|
* Note: Most providers use OpenAI-compatible format
|
||||||
|
*/
|
||||||
|
export const PROVIDER_MODELS: Record<ProviderType, ModelInfo[]> = {
|
||||||
|
// OpenAI-compatible format (used by OpenAI, DeepSeek, Ollama, etc.)
|
||||||
|
openai: [
|
||||||
|
{
|
||||||
|
id: 'gpt-4o',
|
||||||
|
name: 'GPT-4o',
|
||||||
|
contextWindow: 128000,
|
||||||
|
supportsCaching: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'gpt-4o-mini',
|
||||||
|
name: 'GPT-4o Mini',
|
||||||
|
contextWindow: 128000,
|
||||||
|
supportsCaching: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'o1',
|
||||||
|
name: 'O1',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsCaching: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'deepseek-chat',
|
||||||
|
name: 'DeepSeek Chat',
|
||||||
|
contextWindow: 64000,
|
||||||
|
supportsCaching: false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'deepseek-coder',
|
||||||
|
name: 'DeepSeek Coder',
|
||||||
|
contextWindow: 64000,
|
||||||
|
supportsCaching: false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'llama3.2',
|
||||||
|
name: 'Llama 3.2',
|
||||||
|
contextWindow: 128000,
|
||||||
|
supportsCaching: false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen2.5-coder',
|
||||||
|
name: 'Qwen 2.5 Coder',
|
||||||
|
contextWindow: 32000,
|
||||||
|
supportsCaching: false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
// Anthropic format
|
||||||
|
anthropic: [
|
||||||
|
{
|
||||||
|
id: 'claude-sonnet-4-20250514',
|
||||||
|
name: 'Claude Sonnet 4',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsCaching: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-3-5-sonnet-20241022',
|
||||||
|
name: 'Claude 3.5 Sonnet',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsCaching: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-3-5-haiku-20241022',
|
||||||
|
name: 'Claude 3.5 Haiku',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsCaching: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-3-opus-20240229',
|
||||||
|
name: 'Claude 3 Opus',
|
||||||
|
contextWindow: 200000,
|
||||||
|
supportsCaching: false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
// Custom format
|
||||||
|
custom: [
|
||||||
|
{
|
||||||
|
id: 'custom-model',
|
||||||
|
name: 'Custom Model',
|
||||||
|
contextWindow: 128000,
|
||||||
|
supportsCaching: false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get models for a specific provider
|
||||||
|
* @param providerType - Provider type to get models for
|
||||||
|
* @returns Array of model information
|
||||||
|
*/
|
||||||
|
export function getModelsForProvider(providerType: ProviderType): ModelInfo[] {
|
||||||
|
return PROVIDER_MODELS[providerType] || [];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Predefined embedding models for each API format
|
||||||
|
* Used for UI selection and validation
|
||||||
|
*/
|
||||||
|
export const EMBEDDING_MODELS: Record<ProviderType, EmbeddingModelInfo[]> = {
|
||||||
|
// OpenAI embedding models
|
||||||
|
openai: [
|
||||||
|
{
|
||||||
|
id: 'text-embedding-3-small',
|
||||||
|
name: 'Text Embedding 3 Small',
|
||||||
|
dimensions: 1536,
|
||||||
|
maxTokens: 8191,
|
||||||
|
provider: 'openai'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'text-embedding-3-large',
|
||||||
|
name: 'Text Embedding 3 Large',
|
||||||
|
dimensions: 3072,
|
||||||
|
maxTokens: 8191,
|
||||||
|
provider: 'openai'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'text-embedding-ada-002',
|
||||||
|
name: 'Ada 002',
|
||||||
|
dimensions: 1536,
|
||||||
|
maxTokens: 8191,
|
||||||
|
provider: 'openai'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
// Anthropic doesn't have embedding models
|
||||||
|
anthropic: [],
|
||||||
|
|
||||||
|
// Custom embedding models
|
||||||
|
custom: [
|
||||||
|
{
|
||||||
|
id: 'custom-embedding',
|
||||||
|
name: 'Custom Embedding',
|
||||||
|
dimensions: 1536,
|
||||||
|
maxTokens: 8192,
|
||||||
|
provider: 'custom'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get embedding models for a specific provider
|
||||||
|
* @param providerType - Provider type to get embedding models for
|
||||||
|
* @returns Array of embedding model information
|
||||||
|
*/
|
||||||
|
export function getEmbeddingModelsForProvider(providerType: ProviderType): EmbeddingModelInfo[] {
|
||||||
|
return EMBEDDING_MODELS[providerType] || [];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get model information by ID within a provider
|
||||||
|
* @param providerType - Provider type
|
||||||
|
* @param modelId - Model identifier
|
||||||
|
* @returns Model information or undefined if not found
|
||||||
|
*/
|
||||||
|
export function getModelInfo(providerType: ProviderType, modelId: string): ModelInfo | undefined {
|
||||||
|
const models = PROVIDER_MODELS[providerType] || [];
|
||||||
|
return models.find(m => m.id === modelId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate if a model ID is supported by a provider
|
||||||
|
* @param providerType - Provider type
|
||||||
|
* @param modelId - Model identifier to validate
|
||||||
|
* @returns true if model is valid for provider
|
||||||
|
*/
|
||||||
|
export function isValidModel(providerType: ProviderType, modelId: string): boolean {
|
||||||
|
return getModelInfo(providerType, modelId) !== undefined;
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
import { existsSync, mkdirSync, readFileSync, writeFileSync, statSync } from 'fs';
|
import { existsSync, mkdirSync, readFileSync, writeFileSync, statSync, unlinkSync, readdirSync } from 'fs';
|
||||||
import { join, dirname } from 'path';
|
import { join, dirname } from 'path';
|
||||||
import { StoragePaths, ensureStorageDir } from '../config/storage-paths.js';
|
import { StoragePaths, ensureStorageDir } from '../config/storage-paths.js';
|
||||||
|
|
||||||
@@ -118,8 +118,7 @@ export class CacheManager<T> {
|
|||||||
invalidate(): void {
|
invalidate(): void {
|
||||||
try {
|
try {
|
||||||
if (existsSync(this.cacheFile)) {
|
if (existsSync(this.cacheFile)) {
|
||||||
const fs = require('fs');
|
unlinkSync(this.cacheFile);
|
||||||
fs.unlinkSync(this.cacheFile);
|
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.warn(`Cache invalidation error for ${this.cacheFile}:`, (err as Error).message);
|
console.warn(`Cache invalidation error for ${this.cacheFile}:`, (err as Error).message);
|
||||||
@@ -180,8 +179,7 @@ export class CacheManager<T> {
|
|||||||
if (depth > 3) return; // Limit recursion depth
|
if (depth > 3) return; // Limit recursion depth
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const fs = require('fs');
|
const entries = readdirSync(dirPath, { withFileTypes: true });
|
||||||
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
const fullPath = join(dirPath, entry.name);
|
const fullPath = join(dirPath, entry.name);
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ const MODULE_CSS_FILES = [
|
|||||||
'27-graph-explorer.css',
|
'27-graph-explorer.css',
|
||||||
'28-mcp-manager.css',
|
'28-mcp-manager.css',
|
||||||
'29-help.css',
|
'29-help.css',
|
||||||
'30-core-memory.css'
|
'30-core-memory.css',
|
||||||
|
'31-api-settings.css'
|
||||||
];
|
];
|
||||||
|
|
||||||
const MODULE_FILES = [
|
const MODULE_FILES = [
|
||||||
@@ -95,6 +96,7 @@ const MODULE_FILES = [
|
|||||||
'views/skills-manager.js',
|
'views/skills-manager.js',
|
||||||
'views/rules-manager.js',
|
'views/rules-manager.js',
|
||||||
'views/claude-manager.js',
|
'views/claude-manager.js',
|
||||||
|
'views/api-settings.js',
|
||||||
'views/help.js',
|
'views/help.js',
|
||||||
'main.js'
|
'main.js'
|
||||||
];
|
];
|
||||||
|
|||||||
@@ -284,8 +284,12 @@ function normalizeTask(task: unknown): NormalizedTask | null {
|
|||||||
const implementation = taskObj.implementation as unknown[] | undefined;
|
const implementation = taskObj.implementation as unknown[] | undefined;
|
||||||
const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined;
|
const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined;
|
||||||
|
|
||||||
|
// Ensure id is always a string (handle numeric IDs from JSON)
|
||||||
|
const rawId = taskObj.id ?? taskObj.task_id;
|
||||||
|
const stringId = rawId != null ? String(rawId) : 'unknown';
|
||||||
|
|
||||||
return {
|
return {
|
||||||
id: (taskObj.id as string) || (taskObj.task_id as string) || 'unknown',
|
id: stringId,
|
||||||
title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task',
|
title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task',
|
||||||
status: (status as string).toLowerCase(),
|
status: (status as string).toLowerCase(),
|
||||||
// Preserve original fields for flexible rendering
|
// Preserve original fields for flexible rendering
|
||||||
|
|||||||
@@ -284,8 +284,12 @@ function normalizeTask(task: unknown): NormalizedTask | null {
|
|||||||
const implementation = taskObj.implementation as unknown[] | undefined;
|
const implementation = taskObj.implementation as unknown[] | undefined;
|
||||||
const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined;
|
const modificationPoints = taskObj.modification_points as Array<{ file?: string }> | undefined;
|
||||||
|
|
||||||
|
// Ensure id is always a string (handle numeric IDs from JSON)
|
||||||
|
const rawId = taskObj.id ?? taskObj.task_id;
|
||||||
|
const stringId = rawId != null ? String(rawId) : 'unknown';
|
||||||
|
|
||||||
return {
|
return {
|
||||||
id: (taskObj.id as string) || (taskObj.task_id as string) || 'unknown',
|
id: stringId,
|
||||||
title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task',
|
title: (taskObj.title as string) || (taskObj.name as string) || (taskObj.summary as string) || 'Untitled Task',
|
||||||
status: (status as string).toLowerCase(),
|
status: (status as string).toLowerCase(),
|
||||||
// Preserve original fields for flexible rendering
|
// Preserve original fields for flexible rendering
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
* Handles all CLAUDE.md memory rules management endpoints
|
* Handles all CLAUDE.md memory rules management endpoints
|
||||||
*/
|
*/
|
||||||
import type { IncomingMessage, ServerResponse } from 'http';
|
import type { IncomingMessage, ServerResponse } from 'http';
|
||||||
import { readFileSync, writeFileSync, existsSync, readdirSync, statSync } from 'fs';
|
import { readFileSync, writeFileSync, existsSync, readdirSync, statSync, unlinkSync, mkdirSync } from 'fs';
|
||||||
import { join, relative } from 'path';
|
import { join, relative } from 'path';
|
||||||
import { homedir } from 'os';
|
import { homedir } from 'os';
|
||||||
|
|
||||||
@@ -453,8 +453,7 @@ function deleteClaudeFile(filePath: string): { success: boolean; error?: string
|
|||||||
writeFileSync(backupPath, content, 'utf8');
|
writeFileSync(backupPath, content, 'utf8');
|
||||||
|
|
||||||
// Delete original file
|
// Delete original file
|
||||||
const fs = require('fs');
|
unlinkSync(filePath);
|
||||||
fs.unlinkSync(filePath);
|
|
||||||
|
|
||||||
return { success: true };
|
return { success: true };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -500,9 +499,8 @@ function createNewClaudeFile(level: 'user' | 'project' | 'module', template: str
|
|||||||
|
|
||||||
// Ensure directory exists
|
// Ensure directory exists
|
||||||
const dir = filePath.substring(0, filePath.lastIndexOf('/') || filePath.lastIndexOf('\\'));
|
const dir = filePath.substring(0, filePath.lastIndexOf('/') || filePath.lastIndexOf('\\'));
|
||||||
const fs = require('fs');
|
|
||||||
if (!existsSync(dir)) {
|
if (!existsSync(dir)) {
|
||||||
fs.mkdirSync(dir, { recursive: true });
|
mkdirSync(dir, { recursive: true });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write file
|
// Write file
|
||||||
|
|||||||
@@ -33,6 +33,17 @@ import {
|
|||||||
getFullConfigResponse,
|
getFullConfigResponse,
|
||||||
PREDEFINED_MODELS
|
PREDEFINED_MODELS
|
||||||
} from '../../tools/cli-config-manager.js';
|
} from '../../tools/cli-config-manager.js';
|
||||||
|
import {
|
||||||
|
loadClaudeCliTools,
|
||||||
|
saveClaudeCliTools,
|
||||||
|
updateClaudeToolEnabled,
|
||||||
|
updateClaudeCacheSettings,
|
||||||
|
getClaudeCliToolsInfo,
|
||||||
|
addClaudeCustomEndpoint,
|
||||||
|
removeClaudeCustomEndpoint,
|
||||||
|
updateCodeIndexMcp,
|
||||||
|
getCodeIndexMcp
|
||||||
|
} from '../../tools/claude-cli-tools.js';
|
||||||
|
|
||||||
export interface RouteContext {
|
export interface RouteContext {
|
||||||
pathname: string;
|
pathname: string;
|
||||||
@@ -204,6 +215,93 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// API: Get all custom endpoints
|
||||||
|
if (pathname === '/api/cli/endpoints' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const config = loadClaudeCliTools(initialPath);
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ endpoints: config.customEndpoints || [] }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Add/Update custom endpoint
|
||||||
|
if (pathname === '/api/cli/endpoints' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
try {
|
||||||
|
const { id, name, enabled } = body as { id: string; name: string; enabled: boolean };
|
||||||
|
if (!id || !name) {
|
||||||
|
return { error: 'id and name are required', status: 400 };
|
||||||
|
}
|
||||||
|
const config = addClaudeCustomEndpoint(initialPath, { id, name, enabled: enabled !== false });
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CLI_ENDPOINT_UPDATED',
|
||||||
|
payload: { endpoint: { id, name, enabled }, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, endpoints: config.customEndpoints };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Update custom endpoint enabled status
|
||||||
|
if (pathname.match(/^\/api\/cli\/endpoints\/[^/]+$/) && req.method === 'PUT') {
|
||||||
|
const endpointId = pathname.split('/').pop() || '';
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
try {
|
||||||
|
const { enabled, name } = body as { enabled?: boolean; name?: string };
|
||||||
|
const config = loadClaudeCliTools(initialPath);
|
||||||
|
const endpoint = config.customEndpoints.find(e => e.id === endpointId);
|
||||||
|
|
||||||
|
if (!endpoint) {
|
||||||
|
return { error: 'Endpoint not found', status: 404 };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof enabled === 'boolean') endpoint.enabled = enabled;
|
||||||
|
if (name) endpoint.name = name;
|
||||||
|
|
||||||
|
saveClaudeCliTools(initialPath, config);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CLI_ENDPOINT_UPDATED',
|
||||||
|
payload: { endpoint, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, endpoint };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Delete custom endpoint
|
||||||
|
if (pathname.match(/^\/api\/cli\/endpoints\/[^/]+$/) && req.method === 'DELETE') {
|
||||||
|
const endpointId = pathname.split('/').pop() || '';
|
||||||
|
try {
|
||||||
|
const config = removeClaudeCustomEndpoint(initialPath, endpointId);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CLI_ENDPOINT_DELETED',
|
||||||
|
payload: { endpointId, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, endpoints: config.customEndpoints }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// API: CLI Execution History
|
// API: CLI Execution History
|
||||||
if (pathname === '/api/cli/history') {
|
if (pathname === '/api/cli/history') {
|
||||||
const projectPath = url.searchParams.get('path') || initialPath;
|
const projectPath = url.searchParams.get('path') || initialPath;
|
||||||
@@ -362,8 +460,9 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
const status = url.searchParams.get('status') || null;
|
const status = url.searchParams.get('status') || null;
|
||||||
const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null;
|
const category = url.searchParams.get('category') as 'user' | 'internal' | 'insight' | null;
|
||||||
const search = url.searchParams.get('search') || null;
|
const search = url.searchParams.get('search') || null;
|
||||||
|
const recursive = url.searchParams.get('recursive') !== 'false';
|
||||||
|
|
||||||
getHistoryWithNativeInfo(projectPath, { limit, tool, status, category, search })
|
getHistoryWithNativeInfo(projectPath, { limit, tool, status, category, search, recursive })
|
||||||
.then(history => {
|
.then(history => {
|
||||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
res.end(JSON.stringify(history));
|
res.end(JSON.stringify(history));
|
||||||
@@ -557,5 +656,141 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// API: Get CLI Tools Config from .claude/cli-tools.json (with fallback to global)
|
||||||
|
if (pathname === '/api/cli/tools-config' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const config = loadClaudeCliTools(initialPath);
|
||||||
|
const info = getClaudeCliToolsInfo(initialPath);
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
...config,
|
||||||
|
_configInfo: info
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Update CLI Tools Config
|
||||||
|
if (pathname === '/api/cli/tools-config' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
try {
|
||||||
|
const updates = body as Partial<any>;
|
||||||
|
const config = loadClaudeCliTools(initialPath);
|
||||||
|
|
||||||
|
// Merge updates
|
||||||
|
const updatedConfig = {
|
||||||
|
...config,
|
||||||
|
...updates,
|
||||||
|
tools: { ...config.tools, ...(updates.tools || {}) },
|
||||||
|
settings: {
|
||||||
|
...config.settings,
|
||||||
|
...(updates.settings || {}),
|
||||||
|
cache: {
|
||||||
|
...config.settings.cache,
|
||||||
|
...(updates.settings?.cache || {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
saveClaudeCliTools(initialPath, updatedConfig);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CLI_TOOLS_CONFIG_UPDATED',
|
||||||
|
payload: { config: updatedConfig, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, config: updatedConfig };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Update specific tool enabled status
|
||||||
|
const toolsConfigMatch = pathname.match(/^\/api\/cli\/tools-config\/([a-zA-Z0-9_-]+)$/);
|
||||||
|
if (toolsConfigMatch && req.method === 'PUT') {
|
||||||
|
const toolName = toolsConfigMatch[1];
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
try {
|
||||||
|
const { enabled } = body as { enabled: boolean };
|
||||||
|
const config = updateClaudeToolEnabled(initialPath, toolName, enabled);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CLI_TOOL_TOGGLED',
|
||||||
|
payload: { tool: toolName, enabled, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, config };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Update cache settings
|
||||||
|
if (pathname === '/api/cli/tools-config/cache' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
try {
|
||||||
|
const cacheSettings = body as { injectionMode?: string; defaultPrefix?: string; defaultSuffix?: string };
|
||||||
|
const config = updateClaudeCacheSettings(initialPath, cacheSettings as any);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CLI_CACHE_SETTINGS_UPDATED',
|
||||||
|
payload: { cache: config.settings.cache, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, config };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Get Code Index MCP provider
|
||||||
|
if (pathname === '/api/cli/code-index-mcp' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const provider = getCodeIndexMcp(initialPath);
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ provider }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Update Code Index MCP provider
|
||||||
|
if (pathname === '/api/cli/code-index-mcp' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
try {
|
||||||
|
const { provider } = body as { provider: 'codexlens' | 'ace' };
|
||||||
|
if (!provider || !['codexlens', 'ace'].includes(provider)) {
|
||||||
|
return { error: 'Invalid provider. Must be "codexlens" or "ace"', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = updateCodeIndexMcp(initialPath, provider);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CODE_INDEX_MCP_UPDATED',
|
||||||
|
payload: { provider, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
return { success: true, provider };
|
||||||
|
} else {
|
||||||
|
return { error: result.error, status: 500 };
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,12 +9,14 @@ import {
|
|||||||
bootstrapVenv,
|
bootstrapVenv,
|
||||||
executeCodexLens,
|
executeCodexLens,
|
||||||
checkSemanticStatus,
|
checkSemanticStatus,
|
||||||
|
ensureLiteLLMEmbedderReady,
|
||||||
installSemantic,
|
installSemantic,
|
||||||
|
detectGpuSupport,
|
||||||
uninstallCodexLens,
|
uninstallCodexLens,
|
||||||
cancelIndexing,
|
cancelIndexing,
|
||||||
isIndexingInProgress
|
isIndexingInProgress
|
||||||
} from '../../tools/codex-lens.js';
|
} from '../../tools/codex-lens.js';
|
||||||
import type { ProgressInfo } from '../../tools/codex-lens.js';
|
import type { ProgressInfo, GpuMode } from '../../tools/codex-lens.js';
|
||||||
|
|
||||||
export interface RouteContext {
|
export interface RouteContext {
|
||||||
pathname: string;
|
pathname: string;
|
||||||
@@ -79,10 +81,22 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
// API: CodexLens Index List - Get all indexed projects with details
|
// API: CodexLens Index List - Get all indexed projects with details
|
||||||
if (pathname === '/api/codexlens/indexes') {
|
if (pathname === '/api/codexlens/indexes') {
|
||||||
try {
|
try {
|
||||||
// Get config for index directory path
|
// Check if CodexLens is installed first (without auto-installing)
|
||||||
const configResult = await executeCodexLens(['config', '--json']);
|
const venvStatus = await checkVenvStatus();
|
||||||
|
if (!venvStatus.ready) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, indexes: [], totalSize: 0, totalSizeFormatted: '0 B' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute all CLI commands in parallel
|
||||||
|
const [configResult, projectsResult, statusResult] = await Promise.all([
|
||||||
|
executeCodexLens(['config', '--json']),
|
||||||
|
executeCodexLens(['projects', 'list', '--json']),
|
||||||
|
executeCodexLens(['status', '--json'])
|
||||||
|
]);
|
||||||
|
|
||||||
let indexDir = '';
|
let indexDir = '';
|
||||||
|
|
||||||
if (configResult.success) {
|
if (configResult.success) {
|
||||||
try {
|
try {
|
||||||
const config = extractJSON(configResult.output);
|
const config = extractJSON(configResult.output);
|
||||||
@@ -95,8 +109,6 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get project list using 'projects list' command
|
|
||||||
const projectsResult = await executeCodexLens(['projects', 'list', '--json']);
|
|
||||||
let indexes: any[] = [];
|
let indexes: any[] = [];
|
||||||
let totalSize = 0;
|
let totalSize = 0;
|
||||||
let vectorIndexCount = 0;
|
let vectorIndexCount = 0;
|
||||||
@@ -106,7 +118,8 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
try {
|
try {
|
||||||
const projectsData = extractJSON(projectsResult.output);
|
const projectsData = extractJSON(projectsResult.output);
|
||||||
if (projectsData.success && Array.isArray(projectsData.result)) {
|
if (projectsData.success && Array.isArray(projectsData.result)) {
|
||||||
const { statSync, existsSync } = await import('fs');
|
const { stat, readdir } = await import('fs/promises');
|
||||||
|
const { existsSync } = await import('fs');
|
||||||
const { basename, join } = await import('path');
|
const { basename, join } = await import('path');
|
||||||
|
|
||||||
for (const project of projectsData.result) {
|
for (const project of projectsData.result) {
|
||||||
@@ -127,15 +140,14 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
// Try to get actual index size from index_root
|
// Try to get actual index size from index_root
|
||||||
if (project.index_root && existsSync(project.index_root)) {
|
if (project.index_root && existsSync(project.index_root)) {
|
||||||
try {
|
try {
|
||||||
const { readdirSync } = await import('fs');
|
const files = await readdir(project.index_root);
|
||||||
const files = readdirSync(project.index_root);
|
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
try {
|
try {
|
||||||
const filePath = join(project.index_root, file);
|
const filePath = join(project.index_root, file);
|
||||||
const stat = statSync(filePath);
|
const fileStat = await stat(filePath);
|
||||||
projectSize += stat.size;
|
projectSize += fileStat.size;
|
||||||
if (!lastModified || stat.mtime > lastModified) {
|
if (!lastModified || fileStat.mtime > lastModified) {
|
||||||
lastModified = stat.mtime;
|
lastModified = fileStat.mtime;
|
||||||
}
|
}
|
||||||
// Check for vector/embedding files
|
// Check for vector/embedding files
|
||||||
if (file.includes('vector') || file.includes('embedding') ||
|
if (file.includes('vector') || file.includes('embedding') ||
|
||||||
@@ -185,8 +197,7 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also get summary stats from status command
|
// Parse summary stats from status command (already fetched in parallel)
|
||||||
const statusResult = await executeCodexLens(['status', '--json']);
|
|
||||||
let statusSummary: any = {};
|
let statusSummary: any = {};
|
||||||
|
|
||||||
if (statusResult.success) {
|
if (statusResult.success) {
|
||||||
@@ -241,6 +252,71 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// API: CodexLens Dashboard Init - Aggregated endpoint for page initialization
|
||||||
|
if (pathname === '/api/codexlens/dashboard-init') {
|
||||||
|
try {
|
||||||
|
const venvStatus = await checkVenvStatus();
|
||||||
|
|
||||||
|
if (!venvStatus.ready) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
installed: false,
|
||||||
|
status: venvStatus,
|
||||||
|
config: { index_dir: '~/.codexlens/indexes', index_count: 0 },
|
||||||
|
semantic: { available: false }
|
||||||
|
}));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parallel fetch all initialization data
|
||||||
|
const [configResult, statusResult, semanticStatus] = await Promise.all([
|
||||||
|
executeCodexLens(['config', '--json']),
|
||||||
|
executeCodexLens(['status', '--json']),
|
||||||
|
checkSemanticStatus()
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Parse config
|
||||||
|
let config = { index_dir: '~/.codexlens/indexes', index_count: 0 };
|
||||||
|
if (configResult.success) {
|
||||||
|
try {
|
||||||
|
const configData = extractJSON(configResult.output);
|
||||||
|
if (configData.success && configData.result) {
|
||||||
|
config.index_dir = configData.result.index_dir || configData.result.index_root || config.index_dir;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('[CodexLens] Failed to parse config for dashboard init:', e.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse status
|
||||||
|
let statusData: any = {};
|
||||||
|
if (statusResult.success) {
|
||||||
|
try {
|
||||||
|
const status = extractJSON(statusResult.output);
|
||||||
|
if (status.success && status.result) {
|
||||||
|
config.index_count = status.result.projects_count || 0;
|
||||||
|
statusData = status.result;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('[CodexLens] Failed to parse status for dashboard init:', e.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
installed: true,
|
||||||
|
status: venvStatus,
|
||||||
|
config,
|
||||||
|
semantic: semanticStatus,
|
||||||
|
statusData
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: err.message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// API: CodexLens Bootstrap (Install)
|
// API: CodexLens Bootstrap (Install)
|
||||||
if (pathname === '/api/codexlens/bootstrap' && req.method === 'POST') {
|
if (pathname === '/api/codexlens/bootstrap' && req.method === 'POST') {
|
||||||
handlePostRequest(req, res, async () => {
|
handlePostRequest(req, res, async () => {
|
||||||
@@ -289,14 +365,24 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
// API: CodexLens Config - GET (Get current configuration with index count)
|
// API: CodexLens Config - GET (Get current configuration with index count)
|
||||||
if (pathname === '/api/codexlens/config' && req.method === 'GET') {
|
if (pathname === '/api/codexlens/config' && req.method === 'GET') {
|
||||||
try {
|
try {
|
||||||
|
// Check if CodexLens is installed first (without auto-installing)
|
||||||
|
const venvStatus = await checkVenvStatus();
|
||||||
|
|
||||||
|
let responseData = { index_dir: '~/.codexlens/indexes', index_count: 0 };
|
||||||
|
|
||||||
|
// If not installed, return default config without executing CodexLens
|
||||||
|
if (!venvStatus.ready) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(responseData));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch both config and status to merge index_count
|
// Fetch both config and status to merge index_count
|
||||||
const [configResult, statusResult] = await Promise.all([
|
const [configResult, statusResult] = await Promise.all([
|
||||||
executeCodexLens(['config', '--json']),
|
executeCodexLens(['config', '--json']),
|
||||||
executeCodexLens(['status', '--json'])
|
executeCodexLens(['status', '--json'])
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let responseData = { index_dir: '~/.codexlens/indexes', index_count: 0 };
|
|
||||||
|
|
||||||
// Parse config (extract JSON from output that may contain log messages)
|
// Parse config (extract JSON from output that may contain log messages)
|
||||||
if (configResult.success) {
|
if (configResult.success) {
|
||||||
try {
|
try {
|
||||||
@@ -343,7 +429,7 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await executeCodexLens(['config-set', '--key', 'index_dir', '--value', index_dir, '--json']);
|
const result = await executeCodexLens(['config', 'set', 'index_dir', index_dir, '--json']);
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
return { success: true, message: 'Configuration updated successfully' };
|
return { success: true, message: 'Configuration updated successfully' };
|
||||||
} else {
|
} else {
|
||||||
@@ -387,9 +473,17 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
// API: CodexLens Init (Initialize workspace index)
|
// API: CodexLens Init (Initialize workspace index)
|
||||||
if (pathname === '/api/codexlens/init' && req.method === 'POST') {
|
if (pathname === '/api/codexlens/init' && req.method === 'POST') {
|
||||||
handlePostRequest(req, res, async (body) => {
|
handlePostRequest(req, res, async (body) => {
|
||||||
const { path: projectPath, indexType = 'vector', embeddingModel = 'code' } = body;
|
const { path: projectPath, indexType = 'vector', embeddingModel = 'code', embeddingBackend = 'fastembed', maxWorkers = 1 } = body;
|
||||||
const targetPath = projectPath || initialPath;
|
const targetPath = projectPath || initialPath;
|
||||||
|
|
||||||
|
// Ensure LiteLLM backend dependencies are installed before running the CLI
|
||||||
|
if (indexType !== 'normal' && embeddingBackend === 'litellm') {
|
||||||
|
const installResult = await ensureLiteLLMEmbedderReady();
|
||||||
|
if (!installResult.success) {
|
||||||
|
return { success: false, error: installResult.error || 'Failed to prepare LiteLLM embedder', status: 500 };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Build CLI arguments based on index type
|
// Build CLI arguments based on index type
|
||||||
const args = ['init', targetPath, '--json'];
|
const args = ['init', targetPath, '--json'];
|
||||||
if (indexType === 'normal') {
|
if (indexType === 'normal') {
|
||||||
@@ -397,6 +491,14 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
} else {
|
} else {
|
||||||
// Add embedding model selection for vector index
|
// Add embedding model selection for vector index
|
||||||
args.push('--embedding-model', embeddingModel);
|
args.push('--embedding-model', embeddingModel);
|
||||||
|
// Add embedding backend if not using default fastembed
|
||||||
|
if (embeddingBackend && embeddingBackend !== 'fastembed') {
|
||||||
|
args.push('--embedding-backend', embeddingBackend);
|
||||||
|
}
|
||||||
|
// Add max workers for concurrent API calls (useful for litellm backend)
|
||||||
|
if (maxWorkers && maxWorkers > 1) {
|
||||||
|
args.push('--max-workers', String(maxWorkers));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast start event
|
// Broadcast start event
|
||||||
@@ -551,6 +653,8 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
const query = url.searchParams.get('query') || '';
|
const query = url.searchParams.get('query') || '';
|
||||||
const limit = parseInt(url.searchParams.get('limit') || '20', 10);
|
const limit = parseInt(url.searchParams.get('limit') || '20', 10);
|
||||||
const mode = url.searchParams.get('mode') || 'exact'; // exact, fuzzy, hybrid, vector
|
const mode = url.searchParams.get('mode') || 'exact'; // exact, fuzzy, hybrid, vector
|
||||||
|
const maxContentLength = parseInt(url.searchParams.get('max_content_length') || '200', 10);
|
||||||
|
const extraFilesCount = parseInt(url.searchParams.get('extra_files_count') || '10', 10);
|
||||||
const projectPath = url.searchParams.get('path') || initialPath;
|
const projectPath = url.searchParams.get('path') || initialPath;
|
||||||
|
|
||||||
if (!query) {
|
if (!query) {
|
||||||
@@ -560,15 +664,46 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const args = ['search', query, '--path', projectPath, '--limit', limit.toString(), '--mode', mode, '--json'];
|
// Request more results to support split (full content + extra files)
|
||||||
|
const totalToFetch = limit + extraFilesCount;
|
||||||
|
const args = ['search', query, '--path', projectPath, '--limit', totalToFetch.toString(), '--mode', mode, '--json'];
|
||||||
|
|
||||||
const result = await executeCodexLens(args, { cwd: projectPath });
|
const result = await executeCodexLens(args, { cwd: projectPath });
|
||||||
|
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
try {
|
try {
|
||||||
const parsed = extractJSON(result.output);
|
const parsed = extractJSON(result.output);
|
||||||
|
const allResults = parsed.result?.results || [];
|
||||||
|
|
||||||
|
// Truncate content and split results
|
||||||
|
const truncateContent = (content: string | null | undefined): string => {
|
||||||
|
if (!content) return '';
|
||||||
|
if (content.length <= maxContentLength) return content;
|
||||||
|
return content.slice(0, maxContentLength) + '...';
|
||||||
|
};
|
||||||
|
|
||||||
|
// Split results: first N with full content, rest as file paths only
|
||||||
|
const resultsWithContent = allResults.slice(0, limit).map((r: any) => ({
|
||||||
|
...r,
|
||||||
|
content: truncateContent(r.content || r.excerpt),
|
||||||
|
excerpt: truncateContent(r.excerpt || r.content),
|
||||||
|
}));
|
||||||
|
|
||||||
|
const extraResults = allResults.slice(limit, limit + extraFilesCount);
|
||||||
|
const extraFiles = [...new Set(extraResults.map((r: any) => r.path || r.file))];
|
||||||
|
|
||||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
res.end(JSON.stringify({ success: true, ...parsed.result }));
|
res.end(JSON.stringify({
|
||||||
|
success: true,
|
||||||
|
results: resultsWithContent,
|
||||||
|
extra_files: extraFiles.length > 0 ? extraFiles : undefined,
|
||||||
|
metadata: {
|
||||||
|
total: allResults.length,
|
||||||
|
limit,
|
||||||
|
max_content_length: maxContentLength,
|
||||||
|
extra_files_count: extraFilesCount,
|
||||||
|
},
|
||||||
|
}));
|
||||||
} catch {
|
} catch {
|
||||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
res.end(JSON.stringify({ success: true, results: [], output: result.output }));
|
res.end(JSON.stringify({ success: true, results: [], output: result.output }));
|
||||||
@@ -668,16 +803,124 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// API: CodexLens Semantic Search Install (fastembed, ONNX-based, ~200MB)
|
// API: Detect GPU support for semantic search
|
||||||
if (pathname === '/api/codexlens/semantic/install' && req.method === 'POST') {
|
if (pathname === '/api/codexlens/gpu/detect' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const gpuInfo = await detectGpuSupport();
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, ...gpuInfo }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: err.message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: List available GPU devices for selection
|
||||||
|
if (pathname === '/api/codexlens/gpu/list' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
// Check if CodexLens is installed first (without auto-installing)
|
||||||
|
const venvStatus = await checkVenvStatus();
|
||||||
|
if (!venvStatus.ready) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, devices: [], selected_device_id: null }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const result = await executeCodexLens(['gpu-list', '--json']);
|
||||||
|
if (result.success) {
|
||||||
|
try {
|
||||||
|
const parsed = extractJSON(result.output);
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(parsed));
|
||||||
|
} catch {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, devices: [], output: result.output }));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: result.error }));
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: err.message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Select GPU device for embedding
|
||||||
|
if (pathname === '/api/codexlens/gpu/select' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
const { device_id } = body;
|
||||||
|
|
||||||
|
if (device_id === undefined || device_id === null) {
|
||||||
|
return { success: false, error: 'device_id is required', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await executeCodexLens(['gpu-select', String(device_id), '--json']);
|
||||||
|
if (result.success) {
|
||||||
|
try {
|
||||||
|
const parsed = extractJSON(result.output);
|
||||||
|
return parsed;
|
||||||
|
} catch {
|
||||||
|
return { success: true, message: 'GPU selected', output: result.output };
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return { success: false, error: result.error, status: 500 };
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
return { success: false, error: err.message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Reset GPU selection to auto-detection
|
||||||
|
if (pathname === '/api/codexlens/gpu/reset' && req.method === 'POST') {
|
||||||
handlePostRequest(req, res, async () => {
|
handlePostRequest(req, res, async () => {
|
||||||
try {
|
try {
|
||||||
const result = await installSemantic();
|
const result = await executeCodexLens(['gpu-reset', '--json']);
|
||||||
|
if (result.success) {
|
||||||
|
try {
|
||||||
|
const parsed = extractJSON(result.output);
|
||||||
|
return parsed;
|
||||||
|
} catch {
|
||||||
|
return { success: true, message: 'GPU selection reset', output: result.output };
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return { success: false, error: result.error, status: 500 };
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
return { success: false, error: err.message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: CodexLens Semantic Search Install (with GPU mode support)
|
||||||
|
if (pathname === '/api/codexlens/semantic/install' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
try {
|
||||||
|
// Get GPU mode from request body, default to 'cpu'
|
||||||
|
const gpuMode: GpuMode = body?.gpuMode || 'cpu';
|
||||||
|
const validModes: GpuMode[] = ['cpu', 'cuda', 'directml'];
|
||||||
|
|
||||||
|
if (!validModes.includes(gpuMode)) {
|
||||||
|
return { success: false, error: `Invalid GPU mode: ${gpuMode}. Valid modes: ${validModes.join(', ')}`, status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await installSemantic(gpuMode);
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
const status = await checkSemanticStatus();
|
const status = await checkSemanticStatus();
|
||||||
|
const modeDescriptions = {
|
||||||
|
cpu: 'CPU (ONNX Runtime)',
|
||||||
|
cuda: 'NVIDIA CUDA GPU',
|
||||||
|
directml: 'Windows DirectML GPU'
|
||||||
|
};
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
message: 'Semantic search installed successfully (fastembed)',
|
message: `Semantic search installed successfully with ${modeDescriptions[gpuMode]}`,
|
||||||
|
gpuMode,
|
||||||
...status
|
...status
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
@@ -693,6 +936,13 @@ export async function handleCodexLensRoutes(ctx: RouteContext): Promise<boolean>
|
|||||||
// API: CodexLens Model List (list available embedding models)
|
// API: CodexLens Model List (list available embedding models)
|
||||||
if (pathname === '/api/codexlens/models' && req.method === 'GET') {
|
if (pathname === '/api/codexlens/models' && req.method === 'GET') {
|
||||||
try {
|
try {
|
||||||
|
// Check if CodexLens is installed first (without auto-installing)
|
||||||
|
const venvStatus = await checkVenvStatus();
|
||||||
|
if (!venvStatus.ready) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: 'CodexLens not installed' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
const result = await executeCodexLens(['model-list', '--json']);
|
const result = await executeCodexLens(['model-list', '--json']);
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ const GLOBAL_SETTINGS_PATH = join(homedir(), '.claude', 'settings.json');
|
|||||||
* @returns {string}
|
* @returns {string}
|
||||||
*/
|
*/
|
||||||
function getProjectSettingsPath(projectPath) {
|
function getProjectSettingsPath(projectPath) {
|
||||||
const normalizedPath = projectPath.replace(/\//g, '\\').replace(/^\\([a-zA-Z])\\/, '$1:\\');
|
// path.join automatically handles cross-platform path separators
|
||||||
return join(normalizedPath, '.claude', 'settings.json');
|
return join(projectPath, '.claude', 'settings.json');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -181,29 +181,13 @@ function deleteHookFromSettings(projectPath, scope, event, hookIndex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ========================================
|
// ========================================
|
||||||
// Session State Tracking (for progressive disclosure)
|
// Session State Tracking
|
||||||
// ========================================
|
// ========================================
|
||||||
|
// NOTE: Session state is managed by the CLI command (src/commands/hook.ts)
|
||||||
// Track sessions that have received startup context
|
// using file-based persistence (~/.claude/.ccw-sessions/).
|
||||||
// Key: sessionId, Value: timestamp of first context load
|
// This ensures consistent state tracking across all invocation methods.
|
||||||
const sessionContextState = new Map<string, {
|
// The /api/hook endpoint delegates to SessionClusteringService without
|
||||||
firstLoad: string;
|
// managing its own state, as the authoritative state lives in the CLI layer.
|
||||||
loadCount: number;
|
|
||||||
lastPrompt?: string;
|
|
||||||
}>();
|
|
||||||
|
|
||||||
// Cleanup old sessions (older than 24 hours)
|
|
||||||
function cleanupOldSessions() {
|
|
||||||
const cutoff = Date.now() - 24 * 60 * 60 * 1000;
|
|
||||||
for (const [sessionId, state] of sessionContextState.entries()) {
|
|
||||||
if (new Date(state.firstLoad).getTime() < cutoff) {
|
|
||||||
sessionContextState.delete(sessionId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run cleanup every hour
|
|
||||||
setInterval(cleanupOldSessions, 60 * 60 * 1000);
|
|
||||||
|
|
||||||
// ========================================
|
// ========================================
|
||||||
// Route Handler
|
// Route Handler
|
||||||
@@ -286,7 +270,8 @@ export async function handleHooksRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// API: Unified Session Context endpoint (Progressive Disclosure)
|
// API: Unified Session Context endpoint (Progressive Disclosure)
|
||||||
// Automatically detects first prompt vs subsequent prompts
|
// DEPRECATED: Use CLI command `ccw hook session-context --stdin` instead.
|
||||||
|
// This endpoint now uses file-based state (shared with CLI) for consistency.
|
||||||
// - First prompt: returns cluster-based session overview
|
// - First prompt: returns cluster-based session overview
|
||||||
// - Subsequent prompts: returns intent-matched sessions based on prompt
|
// - Subsequent prompts: returns intent-matched sessions based on prompt
|
||||||
if (pathname === '/api/hook/session-context' && req.method === 'POST') {
|
if (pathname === '/api/hook/session-context' && req.method === 'POST') {
|
||||||
@@ -306,21 +291,30 @@ export async function handleHooksRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
const { SessionClusteringService } = await import('../session-clustering-service.js');
|
const { SessionClusteringService } = await import('../session-clustering-service.js');
|
||||||
const clusteringService = new SessionClusteringService(projectPath);
|
const clusteringService = new SessionClusteringService(projectPath);
|
||||||
|
|
||||||
// Check if this is the first prompt for this session
|
// Use file-based session state (shared with CLI hook.ts)
|
||||||
const existingState = sessionContextState.get(sessionId);
|
const sessionStateDir = join(homedir(), '.claude', '.ccw-sessions');
|
||||||
|
const sessionStateFile = join(sessionStateDir, `session-${sessionId}.json`);
|
||||||
|
|
||||||
|
let existingState: { firstLoad: string; loadCount: number; lastPrompt?: string } | null = null;
|
||||||
|
if (existsSync(sessionStateFile)) {
|
||||||
|
try {
|
||||||
|
existingState = JSON.parse(readFileSync(sessionStateFile, 'utf-8'));
|
||||||
|
} catch {
|
||||||
|
existingState = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const isFirstPrompt = !existingState;
|
const isFirstPrompt = !existingState;
|
||||||
|
|
||||||
// Update session state
|
// Update session state (file-based)
|
||||||
if (isFirstPrompt) {
|
const newState = isFirstPrompt
|
||||||
sessionContextState.set(sessionId, {
|
? { firstLoad: new Date().toISOString(), loadCount: 1, lastPrompt: prompt }
|
||||||
firstLoad: new Date().toISOString(),
|
: { ...existingState!, loadCount: existingState!.loadCount + 1, lastPrompt: prompt };
|
||||||
loadCount: 1,
|
|
||||||
lastPrompt: prompt
|
if (!existsSync(sessionStateDir)) {
|
||||||
});
|
mkdirSync(sessionStateDir, { recursive: true });
|
||||||
} else {
|
|
||||||
existingState.loadCount++;
|
|
||||||
existingState.lastPrompt = prompt;
|
|
||||||
}
|
}
|
||||||
|
writeFileSync(sessionStateFile, JSON.stringify(newState, null, 2));
|
||||||
|
|
||||||
// Determine which type of context to return
|
// Determine which type of context to return
|
||||||
let contextType: 'session-start' | 'context';
|
let contextType: 'session-start' | 'context';
|
||||||
@@ -351,7 +345,7 @@ export async function handleHooksRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
success: true,
|
success: true,
|
||||||
type: contextType,
|
type: contextType,
|
||||||
isFirstPrompt,
|
isFirstPrompt,
|
||||||
loadCount: sessionContextState.get(sessionId)?.loadCount || 1,
|
loadCount: newState.loadCount,
|
||||||
content,
|
content,
|
||||||
sessionId
|
sessionId
|
||||||
};
|
};
|
||||||
|
|||||||
930
ccw/src/core/routes/litellm-api-routes.ts
Normal file
930
ccw/src/core/routes/litellm-api-routes.ts
Normal file
@@ -0,0 +1,930 @@
|
|||||||
|
// @ts-nocheck
|
||||||
|
/**
|
||||||
|
* LiteLLM API Routes Module
|
||||||
|
* Handles LiteLLM provider management, endpoint configuration, and cache management
|
||||||
|
*/
|
||||||
|
import type { IncomingMessage, ServerResponse } from 'http';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { dirname, join as pathJoin } from 'path';
|
||||||
|
|
||||||
|
// Get current module path for package-relative lookups
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
// Package root: routes -> core -> src -> ccw -> package root
|
||||||
|
const PACKAGE_ROOT = pathJoin(__dirname, '..', '..', '..', '..');
|
||||||
|
|
||||||
|
import {
|
||||||
|
getAllProviders,
|
||||||
|
getProvider,
|
||||||
|
addProvider,
|
||||||
|
updateProvider,
|
||||||
|
deleteProvider,
|
||||||
|
getAllEndpoints,
|
||||||
|
getEndpoint,
|
||||||
|
addEndpoint,
|
||||||
|
updateEndpoint,
|
||||||
|
deleteEndpoint,
|
||||||
|
getDefaultEndpoint,
|
||||||
|
setDefaultEndpoint,
|
||||||
|
getGlobalCacheSettings,
|
||||||
|
updateGlobalCacheSettings,
|
||||||
|
loadLiteLLMApiConfig,
|
||||||
|
saveLiteLLMYamlConfig,
|
||||||
|
generateLiteLLMYamlConfig,
|
||||||
|
getCodexLensEmbeddingRotation,
|
||||||
|
updateCodexLensEmbeddingRotation,
|
||||||
|
getEmbeddingProvidersForRotation,
|
||||||
|
generateRotationEndpoints,
|
||||||
|
syncCodexLensConfig,
|
||||||
|
getEmbeddingPoolConfig,
|
||||||
|
updateEmbeddingPoolConfig,
|
||||||
|
discoverProvidersForModel,
|
||||||
|
type ProviderCredential,
|
||||||
|
type CustomEndpoint,
|
||||||
|
type ProviderType,
|
||||||
|
type CodexLensEmbeddingRotation,
|
||||||
|
type EmbeddingPoolConfig,
|
||||||
|
} from '../../config/litellm-api-config-manager.js';
|
||||||
|
import { getContextCacheStore } from '../../tools/context-cache-store.js';
|
||||||
|
import { getLiteLLMClient } from '../../tools/litellm-client.js';
|
||||||
|
|
||||||
|
// Cache for ccw-litellm status check
|
||||||
|
let ccwLitellmStatusCache: {
|
||||||
|
data: { installed: boolean; version?: string; error?: string } | null;
|
||||||
|
timestamp: number;
|
||||||
|
ttl: number;
|
||||||
|
} = {
|
||||||
|
data: null,
|
||||||
|
timestamp: 0,
|
||||||
|
ttl: 5 * 60 * 1000, // 5 minutes
|
||||||
|
};
|
||||||
|
|
||||||
|
// Clear cache (call after install)
|
||||||
|
export function clearCcwLitellmStatusCache() {
|
||||||
|
ccwLitellmStatusCache.data = null;
|
||||||
|
ccwLitellmStatusCache.timestamp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface RouteContext {
|
||||||
|
pathname: string;
|
||||||
|
url: URL;
|
||||||
|
req: IncomingMessage;
|
||||||
|
res: ServerResponse;
|
||||||
|
initialPath: string;
|
||||||
|
handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise<any>) => void;
|
||||||
|
broadcastToClients: (data: unknown) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Model Information
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
interface ModelInfo {
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
provider: ProviderType;
|
||||||
|
description?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const PROVIDER_MODELS: Record<ProviderType, ModelInfo[]> = {
|
||||||
|
openai: [
|
||||||
|
{ id: 'gpt-4-turbo', name: 'GPT-4 Turbo', provider: 'openai', description: '128K context' },
|
||||||
|
{ id: 'gpt-4', name: 'GPT-4', provider: 'openai', description: '8K context' },
|
||||||
|
{ id: 'gpt-3.5-turbo', name: 'GPT-3.5 Turbo', provider: 'openai', description: '16K context' },
|
||||||
|
],
|
||||||
|
anthropic: [
|
||||||
|
{ id: 'claude-3-opus-20240229', name: 'Claude 3 Opus', provider: 'anthropic', description: '200K context' },
|
||||||
|
{ id: 'claude-3-sonnet-20240229', name: 'Claude 3 Sonnet', provider: 'anthropic', description: '200K context' },
|
||||||
|
{ id: 'claude-3-haiku-20240307', name: 'Claude 3 Haiku', provider: 'anthropic', description: '200K context' },
|
||||||
|
],
|
||||||
|
google: [
|
||||||
|
{ id: 'gemini-pro', name: 'Gemini Pro', provider: 'google', description: '32K context' },
|
||||||
|
{ id: 'gemini-pro-vision', name: 'Gemini Pro Vision', provider: 'google', description: '16K context' },
|
||||||
|
],
|
||||||
|
ollama: [
|
||||||
|
{ id: 'llama2', name: 'Llama 2', provider: 'ollama', description: 'Local model' },
|
||||||
|
{ id: 'mistral', name: 'Mistral', provider: 'ollama', description: 'Local model' },
|
||||||
|
],
|
||||||
|
azure: [],
|
||||||
|
mistral: [
|
||||||
|
{ id: 'mistral-large-latest', name: 'Mistral Large', provider: 'mistral', description: '32K context' },
|
||||||
|
{ id: 'mistral-medium-latest', name: 'Mistral Medium', provider: 'mistral', description: '32K context' },
|
||||||
|
],
|
||||||
|
deepseek: [
|
||||||
|
{ id: 'deepseek-chat', name: 'DeepSeek Chat', provider: 'deepseek', description: '64K context' },
|
||||||
|
{ id: 'deepseek-coder', name: 'DeepSeek Coder', provider: 'deepseek', description: '64K context' },
|
||||||
|
],
|
||||||
|
custom: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle LiteLLM API routes
|
||||||
|
* @returns true if route was handled, false otherwise
|
||||||
|
*/
|
||||||
|
export async function handleLiteLLMApiRoutes(ctx: RouteContext): Promise<boolean> {
|
||||||
|
const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients } = ctx;
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Provider Management Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/providers - List all providers
|
||||||
|
if (pathname === '/api/litellm-api/providers' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const providers = getAllProviders(initialPath);
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ providers, count: providers.length }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/providers - Create provider
|
||||||
|
if (pathname === '/api/litellm-api/providers' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const providerData = body as Omit<ProviderCredential, 'id' | 'createdAt' | 'updatedAt'>;
|
||||||
|
|
||||||
|
if (!providerData.name || !providerData.type || !providerData.apiKey) {
|
||||||
|
return { error: 'Provider name, type, and apiKey are required', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const provider = addProvider(initialPath, providerData);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_PROVIDER_CREATED',
|
||||||
|
payload: { provider, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, provider };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /api/litellm-api/providers/:id - Get provider by ID
|
||||||
|
const providerGetMatch = pathname.match(/^\/api\/litellm-api\/providers\/([^/]+)$/);
|
||||||
|
if (providerGetMatch && req.method === 'GET') {
|
||||||
|
const providerId = providerGetMatch[1];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const provider = getProvider(initialPath, providerId);
|
||||||
|
if (!provider) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Provider not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(provider));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUT /api/litellm-api/providers/:id - Update provider
|
||||||
|
const providerUpdateMatch = pathname.match(/^\/api\/litellm-api\/providers\/([^/]+)$/);
|
||||||
|
if (providerUpdateMatch && req.method === 'PUT') {
|
||||||
|
const providerId = providerUpdateMatch[1];
|
||||||
|
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const updates = body as Partial<Omit<ProviderCredential, 'id' | 'createdAt' | 'updatedAt'>>;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const provider = updateProvider(initialPath, providerId, updates);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_PROVIDER_UPDATED',
|
||||||
|
payload: { provider, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, provider };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 404 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// DELETE /api/litellm-api/providers/:id - Delete provider
|
||||||
|
const providerDeleteMatch = pathname.match(/^\/api\/litellm-api\/providers\/([^/]+)$/);
|
||||||
|
if (providerDeleteMatch && req.method === 'DELETE') {
|
||||||
|
const providerId = providerDeleteMatch[1];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const success = deleteProvider(initialPath, providerId);
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Provider not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_PROVIDER_DELETED',
|
||||||
|
payload: { providerId, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, message: 'Provider deleted' }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/providers/:id/test - Test provider connection
|
||||||
|
const providerTestMatch = pathname.match(/^\/api\/litellm-api\/providers\/([^/]+)\/test$/);
|
||||||
|
if (providerTestMatch && req.method === 'POST') {
|
||||||
|
const providerId = providerTestMatch[1];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const provider = getProvider(initialPath, providerId);
|
||||||
|
|
||||||
|
if (!provider) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: 'Provider not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!provider.enabled) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: 'Provider is disabled' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test connection using litellm client
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
const available = await client.isAvailable();
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: available, provider: provider.type }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Endpoint Management Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/endpoints - List all endpoints
|
||||||
|
if (pathname === '/api/litellm-api/endpoints' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const endpoints = getAllEndpoints(initialPath);
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ endpoints, count: endpoints.length }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/endpoints - Create endpoint
|
||||||
|
if (pathname === '/api/litellm-api/endpoints' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const endpointData = body as Omit<CustomEndpoint, 'createdAt' | 'updatedAt'>;
|
||||||
|
|
||||||
|
if (!endpointData.id || !endpointData.name || !endpointData.providerId || !endpointData.model) {
|
||||||
|
return { error: 'Endpoint id, name, providerId, and model are required', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const endpoint = addEndpoint(initialPath, endpointData);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_ENDPOINT_CREATED',
|
||||||
|
payload: { endpoint, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, endpoint };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /api/litellm-api/endpoints/:id - Get endpoint by ID
|
||||||
|
const endpointGetMatch = pathname.match(/^\/api\/litellm-api\/endpoints\/([^/]+)$/);
|
||||||
|
if (endpointGetMatch && req.method === 'GET') {
|
||||||
|
const endpointId = endpointGetMatch[1];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const endpoint = getEndpoint(initialPath, endpointId);
|
||||||
|
if (!endpoint) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Endpoint not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(endpoint));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUT /api/litellm-api/endpoints/:id - Update endpoint
|
||||||
|
const endpointUpdateMatch = pathname.match(/^\/api\/litellm-api\/endpoints\/([^/]+)$/);
|
||||||
|
if (endpointUpdateMatch && req.method === 'PUT') {
|
||||||
|
const endpointId = endpointUpdateMatch[1];
|
||||||
|
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const updates = body as Partial<Omit<CustomEndpoint, 'id' | 'createdAt' | 'updatedAt'>>;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const endpoint = updateEndpoint(initialPath, endpointId, updates);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_ENDPOINT_UPDATED',
|
||||||
|
payload: { endpoint, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, endpoint };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 404 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// DELETE /api/litellm-api/endpoints/:id - Delete endpoint
|
||||||
|
const endpointDeleteMatch = pathname.match(/^\/api\/litellm-api\/endpoints\/([^/]+)$/);
|
||||||
|
if (endpointDeleteMatch && req.method === 'DELETE') {
|
||||||
|
const endpointId = endpointDeleteMatch[1];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const success = deleteEndpoint(initialPath, endpointId);
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Endpoint not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_ENDPOINT_DELETED',
|
||||||
|
payload: { endpointId, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, message: 'Endpoint deleted' }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Model Discovery Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/models/:providerType - Get available models for provider type
|
||||||
|
const modelsMatch = pathname.match(/^\/api\/litellm-api\/models\/([^/]+)$/);
|
||||||
|
if (modelsMatch && req.method === 'GET') {
|
||||||
|
const providerType = modelsMatch[1] as ProviderType;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const models = PROVIDER_MODELS[providerType];
|
||||||
|
|
||||||
|
if (!models) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Provider type not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ providerType, models, count: models.length }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Cache Management Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/cache/stats - Get cache statistics
|
||||||
|
if (pathname === '/api/litellm-api/cache/stats' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const cacheStore = getContextCacheStore();
|
||||||
|
const stats = cacheStore.getStatus();
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(stats));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/cache/clear - Clear cache
|
||||||
|
if (pathname === '/api/litellm-api/cache/clear' && req.method === 'POST') {
|
||||||
|
try {
|
||||||
|
const cacheStore = getContextCacheStore();
|
||||||
|
const result = cacheStore.clear();
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_CACHE_CLEARED',
|
||||||
|
payload: { removed: result.removed, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: true, removed: result.removed }));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Config Management Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/config - Get full config
|
||||||
|
if (pathname === '/api/litellm-api/config' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const config = loadLiteLLMApiConfig(initialPath);
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(config));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUT /api/litellm-api/config/cache - Update global cache settings
|
||||||
|
if (pathname === '/api/litellm-api/config/cache' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const settings = body as Partial<{ enabled: boolean; cacheDir: string; maxTotalSizeMB: number }>;
|
||||||
|
|
||||||
|
try {
|
||||||
|
updateGlobalCacheSettings(initialPath, settings);
|
||||||
|
|
||||||
|
const updatedSettings = getGlobalCacheSettings(initialPath);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_CACHE_SETTINGS_UPDATED',
|
||||||
|
payload: { settings: updatedSettings, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, settings: updatedSettings };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUT /api/litellm-api/config/default-endpoint - Set default endpoint
|
||||||
|
if (pathname === '/api/litellm-api/config/default-endpoint' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const { endpointId } = body as { endpointId?: string };
|
||||||
|
|
||||||
|
try {
|
||||||
|
setDefaultEndpoint(initialPath, endpointId);
|
||||||
|
|
||||||
|
const defaultEndpoint = getDefaultEndpoint(initialPath);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'LITELLM_DEFAULT_ENDPOINT_UPDATED',
|
||||||
|
payload: { endpointId, defaultEndpoint, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, defaultEndpoint };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Config Sync Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// POST /api/litellm-api/config/sync - Sync UI config to ccw_litellm YAML config
|
||||||
|
if (pathname === '/api/litellm-api/config/sync' && req.method === 'POST') {
|
||||||
|
try {
|
||||||
|
const yamlPath = saveLiteLLMYamlConfig(initialPath);
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
success: true,
|
||||||
|
message: 'Config synced to ccw_litellm',
|
||||||
|
yamlPath,
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /api/litellm-api/config/yaml-preview - Preview YAML config without saving
|
||||||
|
if (pathname === '/api/litellm-api/config/yaml-preview' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const yamlConfig = generateLiteLLMYamlConfig(initialPath);
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
success: true,
|
||||||
|
config: yamlConfig,
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// CCW-LiteLLM Package Management
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/ccw-litellm/status - Check ccw-litellm installation status
|
||||||
|
// Supports ?refresh=true to bypass cache
|
||||||
|
if (pathname === '/api/litellm-api/ccw-litellm/status' && req.method === 'GET') {
|
||||||
|
const forceRefresh = url.searchParams.get('refresh') === 'true';
|
||||||
|
|
||||||
|
// Check cache first (unless force refresh)
|
||||||
|
if (!forceRefresh && ccwLitellmStatusCache.data &&
|
||||||
|
Date.now() - ccwLitellmStatusCache.timestamp < ccwLitellmStatusCache.ttl) {
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(ccwLitellmStatusCache.data));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Async check - use pip show for more reliable detection
|
||||||
|
try {
|
||||||
|
const { exec } = await import('child_process');
|
||||||
|
const { promisify } = await import('util');
|
||||||
|
const execAsync = promisify(exec);
|
||||||
|
|
||||||
|
let result: { installed: boolean; version?: string; error?: string } = { installed: false };
|
||||||
|
|
||||||
|
// Method 1: Try pip show ccw-litellm (most reliable)
|
||||||
|
try {
|
||||||
|
const { stdout } = await execAsync('pip show ccw-litellm', {
|
||||||
|
timeout: 10000,
|
||||||
|
windowsHide: true,
|
||||||
|
shell: true,
|
||||||
|
});
|
||||||
|
// Parse version from pip show output
|
||||||
|
const versionMatch = stdout.match(/Version:\s*(.+)/i);
|
||||||
|
if (versionMatch) {
|
||||||
|
result = { installed: true, version: versionMatch[1].trim() };
|
||||||
|
console.log(`[ccw-litellm status] Found via pip show: ${result.version}`);
|
||||||
|
}
|
||||||
|
} catch (pipErr) {
|
||||||
|
console.log('[ccw-litellm status] pip show failed, trying python import...');
|
||||||
|
|
||||||
|
// Method 2: Fallback to Python import
|
||||||
|
const pythonExecutables = ['python', 'python3', 'py'];
|
||||||
|
for (const pythonExe of pythonExecutables) {
|
||||||
|
try {
|
||||||
|
// Use simpler Python code without complex quotes
|
||||||
|
const { stdout } = await execAsync(`${pythonExe} -c "import ccw_litellm; print(ccw_litellm.__version__)"`, {
|
||||||
|
timeout: 5000,
|
||||||
|
windowsHide: true,
|
||||||
|
shell: true,
|
||||||
|
});
|
||||||
|
const version = stdout.trim();
|
||||||
|
if (version) {
|
||||||
|
result = { installed: true, version };
|
||||||
|
console.log(`[ccw-litellm status] Found with ${pythonExe}: ${version}`);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
result.error = (err as Error).message;
|
||||||
|
console.log(`[ccw-litellm status] ${pythonExe} failed:`, result.error.substring(0, 100));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
ccwLitellmStatusCache = {
|
||||||
|
data: result,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
ttl: 5 * 60 * 1000,
|
||||||
|
};
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(result));
|
||||||
|
} catch (err) {
|
||||||
|
const errorResult = { installed: false, error: (err as Error).message };
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(errorResult));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// CodexLens Embedding Rotation Routes
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/codexlens/rotation - Get rotation config
|
||||||
|
if (pathname === '/api/litellm-api/codexlens/rotation' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const rotationConfig = getCodexLensEmbeddingRotation(initialPath);
|
||||||
|
const availableProviders = getEmbeddingProvidersForRotation(initialPath);
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
rotationConfig: rotationConfig || null,
|
||||||
|
availableProviders,
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUT /api/litellm-api/codexlens/rotation - Update rotation config
|
||||||
|
if (pathname === '/api/litellm-api/codexlens/rotation' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const rotationConfig = body as CodexLensEmbeddingRotation | null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { syncResult } = updateCodexLensEmbeddingRotation(initialPath, rotationConfig || undefined);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CODEXLENS_ROTATION_UPDATED',
|
||||||
|
payload: { rotationConfig, syncResult, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, rotationConfig, syncResult };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /api/litellm-api/codexlens/rotation/endpoints - Get generated rotation endpoints
|
||||||
|
if (pathname === '/api/litellm-api/codexlens/rotation/endpoints' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const endpoints = generateRotationEndpoints(initialPath);
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
endpoints,
|
||||||
|
count: endpoints.length,
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/codexlens/rotation/sync - Manually sync rotation config to CodexLens
|
||||||
|
if (pathname === '/api/litellm-api/codexlens/rotation/sync' && req.method === 'POST') {
|
||||||
|
try {
|
||||||
|
const syncResult = syncCodexLensConfig(initialPath);
|
||||||
|
|
||||||
|
if (syncResult.success) {
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CODEXLENS_CONFIG_SYNCED',
|
||||||
|
payload: { ...syncResult, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(syncResult));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ success: false, message: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===========================
|
||||||
|
// Embedding Pool Routes (New Generic API)
|
||||||
|
// ===========================
|
||||||
|
|
||||||
|
// GET /api/litellm-api/embedding-pool - Get pool config and available models
|
||||||
|
if (pathname === '/api/litellm-api/embedding-pool' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const poolConfig = getEmbeddingPoolConfig(initialPath);
|
||||||
|
|
||||||
|
// Get list of all available embedding models from all providers
|
||||||
|
const config = loadLiteLLMApiConfig(initialPath);
|
||||||
|
const availableModels: Array<{ modelId: string; modelName: string; providers: string[] }> = [];
|
||||||
|
const modelMap = new Map<string, { modelId: string; modelName: string; providers: string[] }>();
|
||||||
|
|
||||||
|
for (const provider of config.providers) {
|
||||||
|
if (!provider.enabled || !provider.embeddingModels) continue;
|
||||||
|
|
||||||
|
for (const model of provider.embeddingModels) {
|
||||||
|
if (!model.enabled) continue;
|
||||||
|
|
||||||
|
const key = model.id;
|
||||||
|
if (modelMap.has(key)) {
|
||||||
|
modelMap.get(key)!.providers.push(provider.name);
|
||||||
|
} else {
|
||||||
|
modelMap.set(key, {
|
||||||
|
modelId: model.id,
|
||||||
|
modelName: model.name,
|
||||||
|
providers: [provider.name],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
availableModels.push(...Array.from(modelMap.values()));
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
poolConfig: poolConfig || null,
|
||||||
|
availableModels,
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUT /api/litellm-api/embedding-pool - Update pool config
|
||||||
|
if (pathname === '/api/litellm-api/embedding-pool' && req.method === 'PUT') {
|
||||||
|
handlePostRequest(req, res, async (body: unknown) => {
|
||||||
|
const poolConfig = body as EmbeddingPoolConfig | null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { syncResult } = updateEmbeddingPoolConfig(initialPath, poolConfig || undefined);
|
||||||
|
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'EMBEDDING_POOL_UPDATED',
|
||||||
|
payload: { poolConfig, syncResult, timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { success: true, poolConfig, syncResult };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: (err as Error).message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /api/litellm-api/embedding-pool/discover/:model - Preview auto-discovery results
|
||||||
|
const discoverMatch = pathname.match(/^\/api\/litellm-api\/embedding-pool\/discover\/([^/]+)$/);
|
||||||
|
if (discoverMatch && req.method === 'GET') {
|
||||||
|
const targetModel = decodeURIComponent(discoverMatch[1]);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const discovered = discoverProvidersForModel(initialPath, targetModel);
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
targetModel,
|
||||||
|
discovered,
|
||||||
|
count: discovered.length,
|
||||||
|
}));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/ccw-litellm/install - Install ccw-litellm package
|
||||||
|
if (pathname === '/api/litellm-api/ccw-litellm/install' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async () => {
|
||||||
|
try {
|
||||||
|
const { spawn } = await import('child_process');
|
||||||
|
const path = await import('path');
|
||||||
|
const fs = await import('fs');
|
||||||
|
|
||||||
|
// Try to find ccw-litellm package in distribution
|
||||||
|
const possiblePaths = [
|
||||||
|
path.join(initialPath, 'ccw-litellm'),
|
||||||
|
path.join(initialPath, '..', 'ccw-litellm'),
|
||||||
|
path.join(process.cwd(), 'ccw-litellm'),
|
||||||
|
path.join(PACKAGE_ROOT, 'ccw-litellm'), // npm package internal path
|
||||||
|
];
|
||||||
|
|
||||||
|
let packagePath = '';
|
||||||
|
for (const p of possiblePaths) {
|
||||||
|
const pyproject = path.join(p, 'pyproject.toml');
|
||||||
|
if (fs.existsSync(pyproject)) {
|
||||||
|
packagePath = p;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!packagePath) {
|
||||||
|
// Try pip install from PyPI as fallback
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const proc = spawn('pip', ['install', 'ccw-litellm'], { shell: true, timeout: 300000 });
|
||||||
|
let output = '';
|
||||||
|
let error = '';
|
||||||
|
proc.stdout?.on('data', (data) => { output += data.toString(); });
|
||||||
|
proc.stderr?.on('data', (data) => { error += data.toString(); });
|
||||||
|
proc.on('close', (code) => {
|
||||||
|
if (code === 0) {
|
||||||
|
// Clear status cache after successful installation
|
||||||
|
clearCcwLitellmStatusCache();
|
||||||
|
resolve({ success: true, message: 'ccw-litellm installed from PyPI' });
|
||||||
|
} else {
|
||||||
|
resolve({ success: false, error: error || 'Installation failed' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
proc.on('error', (err) => resolve({ success: false, error: err.message }));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Install from local package
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const proc = spawn('pip', ['install', '-e', packagePath], { shell: true, timeout: 300000 });
|
||||||
|
let output = '';
|
||||||
|
let error = '';
|
||||||
|
proc.stdout?.on('data', (data) => { output += data.toString(); });
|
||||||
|
proc.stderr?.on('data', (data) => { error += data.toString(); });
|
||||||
|
proc.on('close', (code) => {
|
||||||
|
if (code === 0) {
|
||||||
|
// Clear status cache after successful installation
|
||||||
|
clearCcwLitellmStatusCache();
|
||||||
|
|
||||||
|
// Broadcast installation event
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CCW_LITELLM_INSTALLED',
|
||||||
|
payload: { timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
resolve({ success: true, message: 'ccw-litellm installed successfully', path: packagePath });
|
||||||
|
} else {
|
||||||
|
resolve({ success: false, error: error || output || 'Installation failed' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
proc.on('error', (err) => resolve({ success: false, error: err.message }));
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
return { success: false, error: (err as Error).message };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/litellm-api/ccw-litellm/uninstall - Uninstall ccw-litellm package
|
||||||
|
if (pathname === '/api/litellm-api/ccw-litellm/uninstall' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async () => {
|
||||||
|
try {
|
||||||
|
const { spawn } = await import('child_process');
|
||||||
|
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const proc = spawn('pip', ['uninstall', '-y', 'ccw-litellm'], { shell: true, timeout: 120000 });
|
||||||
|
let output = '';
|
||||||
|
let error = '';
|
||||||
|
proc.stdout?.on('data', (data) => { output += data.toString(); });
|
||||||
|
proc.stderr?.on('data', (data) => { error += data.toString(); });
|
||||||
|
proc.on('close', (code) => {
|
||||||
|
// Clear status cache after uninstallation attempt
|
||||||
|
clearCcwLitellmStatusCache();
|
||||||
|
|
||||||
|
if (code === 0) {
|
||||||
|
broadcastToClients({
|
||||||
|
type: 'CCW_LITELLM_UNINSTALLED',
|
||||||
|
payload: { timestamp: new Date().toISOString() }
|
||||||
|
});
|
||||||
|
resolve({ success: true, message: 'ccw-litellm uninstalled successfully' });
|
||||||
|
} else {
|
||||||
|
// Check if package was not installed
|
||||||
|
if (error.includes('not installed') || output.includes('not installed')) {
|
||||||
|
resolve({ success: true, message: 'ccw-litellm was not installed' });
|
||||||
|
} else {
|
||||||
|
resolve({ success: false, error: error || output || 'Uninstallation failed' });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
proc.on('error', (err) => resolve({ success: false, error: err.message }));
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
return { success: false, error: (err as Error).message };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
107
ccw/src/core/routes/litellm-routes.ts
Normal file
107
ccw/src/core/routes/litellm-routes.ts
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// @ts-nocheck
|
||||||
|
/**
|
||||||
|
* LiteLLM Routes Module
|
||||||
|
* Handles all LiteLLM-related API endpoints
|
||||||
|
*/
|
||||||
|
import type { IncomingMessage, ServerResponse } from 'http';
|
||||||
|
import { getLiteLLMClient, getLiteLLMStatus, checkLiteLLMAvailable } from '../../tools/litellm-client.js';
|
||||||
|
|
||||||
|
export interface RouteContext {
|
||||||
|
pathname: string;
|
||||||
|
url: URL;
|
||||||
|
req: IncomingMessage;
|
||||||
|
res: ServerResponse;
|
||||||
|
initialPath: string;
|
||||||
|
handlePostRequest: (req: IncomingMessage, res: ServerResponse, handler: (body: unknown) => Promise<any>) => void;
|
||||||
|
broadcastToClients: (data: unknown) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle LiteLLM routes
|
||||||
|
* @returns true if route was handled, false otherwise
|
||||||
|
*/
|
||||||
|
export async function handleLiteLLMRoutes(ctx: RouteContext): Promise<boolean> {
|
||||||
|
const { pathname, url, req, res, initialPath, handlePostRequest } = ctx;
|
||||||
|
|
||||||
|
// API: LiteLLM Status - Check availability and version
|
||||||
|
if (pathname === '/api/litellm/status') {
|
||||||
|
try {
|
||||||
|
const status = await getLiteLLMStatus();
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(status));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ available: false, error: err.message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: LiteLLM Config - Get configuration
|
||||||
|
if (pathname === '/api/litellm/config' && req.method === 'GET') {
|
||||||
|
try {
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
const config = await client.getConfig();
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify(config));
|
||||||
|
} catch (err) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: err.message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: LiteLLM Embed - Generate embeddings
|
||||||
|
if (pathname === '/api/litellm/embed' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
const { texts, model = 'default' } = body;
|
||||||
|
|
||||||
|
if (!texts || !Array.isArray(texts)) {
|
||||||
|
return { error: 'texts array is required', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (texts.length === 0) {
|
||||||
|
return { error: 'texts array cannot be empty', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
const result = await client.embed(texts, model);
|
||||||
|
return { success: true, ...result };
|
||||||
|
} catch (err) {
|
||||||
|
return { error: err.message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: LiteLLM Chat - Chat with LLM
|
||||||
|
if (pathname === '/api/litellm/chat' && req.method === 'POST') {
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
const { message, messages, model = 'default' } = body;
|
||||||
|
|
||||||
|
// Support both single message and messages array
|
||||||
|
if (!message && (!messages || !Array.isArray(messages))) {
|
||||||
|
return { error: 'message or messages array is required', status: 400 };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
|
||||||
|
if (messages && Array.isArray(messages)) {
|
||||||
|
// Multi-turn chat
|
||||||
|
const result = await client.chatMessages(messages, model);
|
||||||
|
return { success: true, ...result };
|
||||||
|
} else {
|
||||||
|
// Single message chat
|
||||||
|
const content = await client.chat(message, model);
|
||||||
|
return { success: true, content, model };
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
return { error: err.message, status: 500 };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
@@ -1000,8 +1000,8 @@ function writeSettingsFile(filePath, settings) {
|
|||||||
* @returns {string}
|
* @returns {string}
|
||||||
*/
|
*/
|
||||||
function getProjectSettingsPath(projectPath) {
|
function getProjectSettingsPath(projectPath) {
|
||||||
const normalizedPath = projectPath.replace(/\//g, '\\').replace(/^\\([a-zA-Z])\\/, '$1:\\');
|
// path.join automatically handles cross-platform path separators
|
||||||
return join(normalizedPath, '.claude', 'settings.json');
|
return join(projectPath, '.claude', 'settings.json');
|
||||||
}
|
}
|
||||||
|
|
||||||
// ========================================
|
// ========================================
|
||||||
|
|||||||
@@ -291,13 +291,14 @@ FOCUS AREAS: ${extractFocus || 'naming conventions, error handling, code structu
|
|||||||
return { error: `Unknown generation type: ${generationType}` };
|
return { error: `Unknown generation type: ${generationType}` };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute CLI tool (Gemini) with at least 10 minutes timeout
|
// Execute CLI tool (Claude) with at least 10 minutes timeout
|
||||||
const result = await executeCliTool({
|
const result = await executeCliTool({
|
||||||
tool: 'gemini',
|
tool: 'claude',
|
||||||
prompt,
|
prompt,
|
||||||
mode,
|
mode,
|
||||||
cd: workingDir,
|
cd: workingDir,
|
||||||
timeout: 600000 // 10 minutes
|
timeout: 600000, // 10 minutes
|
||||||
|
category: 'internal'
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
|
|||||||
@@ -123,6 +123,7 @@ function getSkillsConfig(projectPath) {
|
|||||||
|
|
||||||
result.projectSkills.push({
|
result.projectSkills.push({
|
||||||
name: parsed.name || skill.name,
|
name: parsed.name || skill.name,
|
||||||
|
folderName: skill.name, // Actual folder name for API queries
|
||||||
description: parsed.description,
|
description: parsed.description,
|
||||||
version: parsed.version,
|
version: parsed.version,
|
||||||
allowedTools: parsed.allowedTools,
|
allowedTools: parsed.allowedTools,
|
||||||
@@ -152,6 +153,7 @@ function getSkillsConfig(projectPath) {
|
|||||||
|
|
||||||
result.userSkills.push({
|
result.userSkills.push({
|
||||||
name: parsed.name || skill.name,
|
name: parsed.name || skill.name,
|
||||||
|
folderName: skill.name, // Actual folder name for API queries
|
||||||
description: parsed.description,
|
description: parsed.description,
|
||||||
version: parsed.version,
|
version: parsed.version,
|
||||||
allowedTools: parsed.allowedTools,
|
allowedTools: parsed.allowedTools,
|
||||||
@@ -197,6 +199,7 @@ function getSkillDetail(skillName, location, projectPath) {
|
|||||||
return {
|
return {
|
||||||
skill: {
|
skill: {
|
||||||
name: parsed.name || skillName,
|
name: parsed.name || skillName,
|
||||||
|
folderName: skillName, // Actual folder name for API queries
|
||||||
description: parsed.description,
|
description: parsed.description,
|
||||||
version: parsed.version,
|
version: parsed.version,
|
||||||
allowedTools: parsed.allowedTools,
|
allowedTools: parsed.allowedTools,
|
||||||
@@ -390,7 +393,7 @@ async function importSkill(sourcePath, location, projectPath, customName) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate skill via CLI tool (Gemini)
|
* Generate skill via CLI tool (Claude)
|
||||||
* @param {Object} params - Generation parameters
|
* @param {Object} params - Generation parameters
|
||||||
* @param {string} params.generationType - 'description' or 'template'
|
* @param {string} params.generationType - 'description' or 'template'
|
||||||
* @param {string} params.description - Skill description from user
|
* @param {string} params.description - Skill description from user
|
||||||
@@ -455,9 +458,9 @@ REQUIREMENTS:
|
|||||||
3. If the skill requires supporting files (e.g., templates, scripts), create them in the skill folder
|
3. If the skill requires supporting files (e.g., templates, scripts), create them in the skill folder
|
||||||
4. Ensure all files are properly formatted and follow best practices`;
|
4. Ensure all files are properly formatted and follow best practices`;
|
||||||
|
|
||||||
// Execute CLI tool (Gemini) with write mode
|
// Execute CLI tool (Claude) with write mode
|
||||||
const result = await executeCliTool({
|
const result = await executeCliTool({
|
||||||
tool: 'gemini',
|
tool: 'claude',
|
||||||
prompt,
|
prompt,
|
||||||
mode: 'write',
|
mode: 'write',
|
||||||
cd: baseDir,
|
cd: baseDir,
|
||||||
@@ -515,8 +518,143 @@ export async function handleSkillsRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// API: Get single skill detail
|
// API: List skill directory contents
|
||||||
if (pathname.startsWith('/api/skills/') && req.method === 'GET' && !pathname.endsWith('/skills/')) {
|
if (pathname.match(/^\/api\/skills\/[^/]+\/dir$/) && req.method === 'GET') {
|
||||||
|
const pathParts = pathname.split('/');
|
||||||
|
const skillName = decodeURIComponent(pathParts[3]);
|
||||||
|
const subPath = url.searchParams.get('subpath') || '';
|
||||||
|
const location = url.searchParams.get('location') || 'project';
|
||||||
|
const projectPathParam = url.searchParams.get('path') || initialPath;
|
||||||
|
|
||||||
|
const baseDir = location === 'project'
|
||||||
|
? join(projectPathParam, '.claude', 'skills')
|
||||||
|
: join(homedir(), '.claude', 'skills');
|
||||||
|
|
||||||
|
const dirPath = subPath
|
||||||
|
? join(baseDir, skillName, subPath)
|
||||||
|
: join(baseDir, skillName);
|
||||||
|
|
||||||
|
// Security check: ensure path is within skill folder
|
||||||
|
if (!dirPath.startsWith(join(baseDir, skillName))) {
|
||||||
|
res.writeHead(403, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Access denied' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!existsSync(dirPath)) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Directory not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const stat = statSync(dirPath);
|
||||||
|
if (!stat.isDirectory()) {
|
||||||
|
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Path is not a directory' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const entries = readdirSync(dirPath, { withFileTypes: true });
|
||||||
|
const files = entries.map(entry => ({
|
||||||
|
name: entry.name,
|
||||||
|
isDirectory: entry.isDirectory(),
|
||||||
|
path: subPath ? `${subPath}/${entry.name}` : entry.name
|
||||||
|
}));
|
||||||
|
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ files, subPath, skillName }));
|
||||||
|
} catch (error) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (error as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Read skill file content
|
||||||
|
if (pathname.match(/^\/api\/skills\/[^/]+\/file$/) && req.method === 'GET') {
|
||||||
|
const pathParts = pathname.split('/');
|
||||||
|
const skillName = decodeURIComponent(pathParts[3]);
|
||||||
|
const fileName = url.searchParams.get('filename');
|
||||||
|
const location = url.searchParams.get('location') || 'project';
|
||||||
|
const projectPathParam = url.searchParams.get('path') || initialPath;
|
||||||
|
|
||||||
|
if (!fileName) {
|
||||||
|
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'filename parameter is required' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const baseDir = location === 'project'
|
||||||
|
? join(projectPathParam, '.claude', 'skills')
|
||||||
|
: join(homedir(), '.claude', 'skills');
|
||||||
|
|
||||||
|
const filePath = join(baseDir, skillName, fileName);
|
||||||
|
|
||||||
|
// Security check: ensure file is within skill folder
|
||||||
|
if (!filePath.startsWith(join(baseDir, skillName))) {
|
||||||
|
res.writeHead(403, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'Access denied' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!existsSync(filePath)) {
|
||||||
|
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: 'File not found' }));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const content = readFileSync(filePath, 'utf8');
|
||||||
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ content, fileName, path: filePath }));
|
||||||
|
} catch (error) {
|
||||||
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||||
|
res.end(JSON.stringify({ error: (error as Error).message }));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Write skill file content
|
||||||
|
if (pathname.match(/^\/api\/skills\/[^/]+\/file$/) && req.method === 'POST') {
|
||||||
|
const pathParts = pathname.split('/');
|
||||||
|
const skillName = decodeURIComponent(pathParts[3]);
|
||||||
|
|
||||||
|
handlePostRequest(req, res, async (body) => {
|
||||||
|
const { fileName, content, location, projectPath: projectPathParam } = body;
|
||||||
|
|
||||||
|
if (!fileName) {
|
||||||
|
return { error: 'fileName is required' };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (content === undefined) {
|
||||||
|
return { error: 'content is required' };
|
||||||
|
}
|
||||||
|
|
||||||
|
const baseDir = location === 'project'
|
||||||
|
? join(projectPathParam || initialPath, '.claude', 'skills')
|
||||||
|
: join(homedir(), '.claude', 'skills');
|
||||||
|
|
||||||
|
const filePath = join(baseDir, skillName, fileName);
|
||||||
|
|
||||||
|
// Security check: ensure file is within skill folder
|
||||||
|
if (!filePath.startsWith(join(baseDir, skillName))) {
|
||||||
|
return { error: 'Access denied' };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await fsPromises.writeFile(filePath, content, 'utf8');
|
||||||
|
return { success: true, fileName, path: filePath };
|
||||||
|
} catch (error) {
|
||||||
|
return { error: (error as Error).message };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// API: Get single skill detail (exclude /dir and /file sub-routes)
|
||||||
|
if (pathname.startsWith('/api/skills/') && req.method === 'GET' &&
|
||||||
|
!pathname.endsWith('/skills/') && !pathname.endsWith('/dir') && !pathname.endsWith('/file')) {
|
||||||
const skillName = decodeURIComponent(pathname.replace('/api/skills/', ''));
|
const skillName = decodeURIComponent(pathname.replace('/api/skills/', ''));
|
||||||
const location = url.searchParams.get('location') || 'project';
|
const location = url.searchParams.get('location') || 'project';
|
||||||
const projectPathParam = url.searchParams.get('path') || initialPath;
|
const projectPathParam = url.searchParams.get('path') || initialPath;
|
||||||
@@ -576,7 +714,7 @@ export async function handleSkillsRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
|
|
||||||
return await importSkill(sourcePath, location, projectPath, skillName);
|
return await importSkill(sourcePath, location, projectPath, skillName);
|
||||||
} else if (mode === 'cli-generate') {
|
} else if (mode === 'cli-generate') {
|
||||||
// CLI generate mode: use Gemini to generate skill
|
// CLI generate mode: use Claude to generate skill
|
||||||
if (!skillName) {
|
if (!skillName) {
|
||||||
return { error: 'Skill name is required for CLI generation mode' };
|
return { error: 'Skill name is required for CLI generation mode' };
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,56 @@
|
|||||||
* Aggregated status endpoint for faster dashboard loading
|
* Aggregated status endpoint for faster dashboard loading
|
||||||
*/
|
*/
|
||||||
import type { IncomingMessage, ServerResponse } from 'http';
|
import type { IncomingMessage, ServerResponse } from 'http';
|
||||||
|
import { existsSync } from 'fs';
|
||||||
|
import { join } from 'path';
|
||||||
|
import { homedir } from 'os';
|
||||||
import { getCliToolsStatus } from '../../tools/cli-executor.js';
|
import { getCliToolsStatus } from '../../tools/cli-executor.js';
|
||||||
import { checkVenvStatus, checkSemanticStatus } from '../../tools/codex-lens.js';
|
import { checkVenvStatus, checkSemanticStatus } from '../../tools/codex-lens.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check CCW installation status
|
||||||
|
* Verifies that required workflow files are installed in user's home directory
|
||||||
|
*/
|
||||||
|
function checkCcwInstallStatus(): {
|
||||||
|
installed: boolean;
|
||||||
|
workflowsInstalled: boolean;
|
||||||
|
missingFiles: string[];
|
||||||
|
installPath: string;
|
||||||
|
} {
|
||||||
|
const claudeDir = join(homedir(), '.claude');
|
||||||
|
const workflowsDir = join(claudeDir, 'workflows');
|
||||||
|
|
||||||
|
// Required workflow files for full functionality
|
||||||
|
const requiredFiles = [
|
||||||
|
'chinese-response.md',
|
||||||
|
'windows-platform.md',
|
||||||
|
'cli-tools-usage.md',
|
||||||
|
'coding-philosophy.md',
|
||||||
|
'context-tools.md',
|
||||||
|
'file-modification.md'
|
||||||
|
];
|
||||||
|
|
||||||
|
const missingFiles: string[] = [];
|
||||||
|
|
||||||
|
// Check each required file
|
||||||
|
for (const file of requiredFiles) {
|
||||||
|
const filePath = join(workflowsDir, file);
|
||||||
|
if (!existsSync(filePath)) {
|
||||||
|
missingFiles.push(file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const workflowsInstalled = existsSync(workflowsDir) && missingFiles.length === 0;
|
||||||
|
const installed = existsSync(claudeDir) && workflowsInstalled;
|
||||||
|
|
||||||
|
return {
|
||||||
|
installed,
|
||||||
|
workflowsInstalled,
|
||||||
|
missingFiles,
|
||||||
|
installPath: claudeDir
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
export interface RouteContext {
|
export interface RouteContext {
|
||||||
pathname: string;
|
pathname: string;
|
||||||
url: URL;
|
url: URL;
|
||||||
@@ -27,6 +74,9 @@ export async function handleStatusRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
// API: Aggregated Status (all statuses in one call)
|
// API: Aggregated Status (all statuses in one call)
|
||||||
if (pathname === '/api/status/all') {
|
if (pathname === '/api/status/all') {
|
||||||
try {
|
try {
|
||||||
|
// Check CCW installation status (sync, fast)
|
||||||
|
const ccwInstallStatus = checkCcwInstallStatus();
|
||||||
|
|
||||||
// Execute all status checks in parallel
|
// Execute all status checks in parallel
|
||||||
const [cliStatus, codexLensStatus, semanticStatus] = await Promise.all([
|
const [cliStatus, codexLensStatus, semanticStatus] = await Promise.all([
|
||||||
getCliToolsStatus(),
|
getCliToolsStatus(),
|
||||||
@@ -39,6 +89,7 @@ export async function handleStatusRoutes(ctx: RouteContext): Promise<boolean> {
|
|||||||
cli: cliStatus,
|
cli: cliStatus,
|
||||||
codexLens: codexLensStatus,
|
codexLens: codexLensStatus,
|
||||||
semantic: semanticStatus,
|
semantic: semanticStatus,
|
||||||
|
ccwInstall: ccwInstallStatus,
|
||||||
timestamp: new Date().toISOString()
|
timestamp: new Date().toISOString()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ const VERSION_CHECK_CACHE_TTL = 3600000; // 1 hour
|
|||||||
*/
|
*/
|
||||||
function getCurrentVersion(): string {
|
function getCurrentVersion(): string {
|
||||||
try {
|
try {
|
||||||
const packageJsonPath = join(import.meta.dirname, '../../../package.json');
|
const packageJsonPath = join(import.meta.dirname, '../../../../package.json');
|
||||||
if (existsSync(packageJsonPath)) {
|
if (existsSync(packageJsonPath)) {
|
||||||
const pkg = JSON.parse(readFileSync(packageJsonPath, 'utf8'));
|
const pkg = JSON.parse(readFileSync(packageJsonPath, 'utf8'));
|
||||||
return pkg.version || '0.0.0';
|
return pkg.version || '0.0.0';
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ import { handleSessionRoutes } from './routes/session-routes.js';
|
|||||||
import { handleCcwRoutes } from './routes/ccw-routes.js';
|
import { handleCcwRoutes } from './routes/ccw-routes.js';
|
||||||
import { handleClaudeRoutes } from './routes/claude-routes.js';
|
import { handleClaudeRoutes } from './routes/claude-routes.js';
|
||||||
import { handleHelpRoutes } from './routes/help-routes.js';
|
import { handleHelpRoutes } from './routes/help-routes.js';
|
||||||
|
import { handleLiteLLMRoutes } from './routes/litellm-routes.js';
|
||||||
|
import { handleLiteLLMApiRoutes } from './routes/litellm-api-routes.js';
|
||||||
|
|
||||||
// Import WebSocket handling
|
// Import WebSocket handling
|
||||||
import { handleWebSocketUpgrade, broadcastToClients } from './websocket.js';
|
import { handleWebSocketUpgrade, broadcastToClients } from './websocket.js';
|
||||||
@@ -83,7 +85,8 @@ const MODULE_CSS_FILES = [
|
|||||||
'27-graph-explorer.css',
|
'27-graph-explorer.css',
|
||||||
'28-mcp-manager.css',
|
'28-mcp-manager.css',
|
||||||
'29-help.css',
|
'29-help.css',
|
||||||
'30-core-memory.css'
|
'30-core-memory.css',
|
||||||
|
'31-api-settings.css'
|
||||||
];
|
];
|
||||||
|
|
||||||
// Modular JS files in dependency order
|
// Modular JS files in dependency order
|
||||||
@@ -137,6 +140,7 @@ const MODULE_FILES = [
|
|||||||
'views/skills-manager.js',
|
'views/skills-manager.js',
|
||||||
'views/rules-manager.js',
|
'views/rules-manager.js',
|
||||||
'views/claude-manager.js',
|
'views/claude-manager.js',
|
||||||
|
'views/api-settings.js',
|
||||||
'views/help.js',
|
'views/help.js',
|
||||||
'main.js'
|
'main.js'
|
||||||
];
|
];
|
||||||
@@ -311,6 +315,16 @@ export async function startServer(options: ServerOptions = {}): Promise<http.Ser
|
|||||||
if (await handleCodexLensRoutes(routeContext)) return;
|
if (await handleCodexLensRoutes(routeContext)) return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LiteLLM routes (/api/litellm/*)
|
||||||
|
if (pathname.startsWith('/api/litellm/')) {
|
||||||
|
if (await handleLiteLLMRoutes(routeContext)) return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// LiteLLM API routes (/api/litellm-api/*)
|
||||||
|
if (pathname.startsWith('/api/litellm-api/')) {
|
||||||
|
if (await handleLiteLLMApiRoutes(routeContext)) return;
|
||||||
|
}
|
||||||
|
|
||||||
// Graph routes (/api/graph/*)
|
// Graph routes (/api/graph/*)
|
||||||
if (pathname.startsWith('/api/graph/')) {
|
if (pathname.startsWith('/api/graph/')) {
|
||||||
if (await handleGraphRoutes(routeContext)) return;
|
if (await handleGraphRoutes(routeContext)) return;
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ const ENV_PROJECT_ROOT = 'CCW_PROJECT_ROOT';
|
|||||||
const ENV_ALLOWED_DIRS = 'CCW_ALLOWED_DIRS';
|
const ENV_ALLOWED_DIRS = 'CCW_ALLOWED_DIRS';
|
||||||
|
|
||||||
// Default enabled tools (core set)
|
// Default enabled tools (core set)
|
||||||
const DEFAULT_TOOLS: string[] = ['write_file', 'edit_file', 'read_file', 'smart_search', 'core_memory'];
|
const DEFAULT_TOOLS: string[] = ['write_file', 'edit_file', 'read_file', 'smart_search', 'core_memory', 'context_cache'];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get list of enabled tools from environment or defaults
|
* Get list of enabled tools from environment or defaults
|
||||||
|
|||||||
@@ -170,6 +170,27 @@
|
|||||||
letter-spacing: 0.03em;
|
letter-spacing: 0.03em;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.cli-tool-badge-disabled {
|
||||||
|
font-size: 0.5625rem;
|
||||||
|
font-weight: 600;
|
||||||
|
padding: 0.125rem 0.375rem;
|
||||||
|
background: hsl(38 92% 50% / 0.2);
|
||||||
|
color: hsl(38 92% 50%);
|
||||||
|
border-radius: 9999px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.03em;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Disabled tool card state */
|
||||||
|
.cli-tool-card.disabled {
|
||||||
|
opacity: 0.7;
|
||||||
|
border-style: dashed;
|
||||||
|
}
|
||||||
|
|
||||||
|
.cli-tool-card.disabled .cli-tool-name {
|
||||||
|
color: hsl(var(--muted-foreground));
|
||||||
|
}
|
||||||
|
|
||||||
.cli-tool-info {
|
.cli-tool-info {
|
||||||
font-size: 0.6875rem;
|
font-size: 0.6875rem;
|
||||||
margin-bottom: 0.3125rem;
|
margin-bottom: 0.3125rem;
|
||||||
@@ -773,6 +794,29 @@
|
|||||||
border-color: hsl(var(--destructive) / 0.5);
|
border-color: hsl(var(--destructive) / 0.5);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Enable/Disable button variants */
|
||||||
|
.btn-sm.btn-outline-success {
|
||||||
|
background: transparent;
|
||||||
|
border: 1px solid hsl(142 76% 36% / 0.4);
|
||||||
|
color: hsl(142 76% 36%);
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-sm.btn-outline-success:hover {
|
||||||
|
background: hsl(142 76% 36% / 0.1);
|
||||||
|
border-color: hsl(142 76% 36% / 0.6);
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-sm.btn-outline-warning {
|
||||||
|
background: transparent;
|
||||||
|
border: 1px solid hsl(38 92% 50% / 0.4);
|
||||||
|
color: hsl(38 92% 50%);
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-sm.btn-outline-warning:hover {
|
||||||
|
background: hsl(38 92% 50% / 0.1);
|
||||||
|
border-color: hsl(38 92% 50% / 0.6);
|
||||||
|
}
|
||||||
|
|
||||||
/* Empty State */
|
/* Empty State */
|
||||||
.empty-state {
|
.empty-state {
|
||||||
display: flex;
|
display: flex;
|
||||||
|
|||||||
@@ -158,3 +158,37 @@
|
|||||||
pointer-events: none;
|
pointer-events: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Code Index MCP Toggle Buttons */
|
||||||
|
.code-mcp-btn {
|
||||||
|
padding: 0.375rem 0.75rem;
|
||||||
|
font-size: 0.75rem;
|
||||||
|
font-weight: 500;
|
||||||
|
border-radius: 0.375rem;
|
||||||
|
border: none;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.15s ease;
|
||||||
|
background: transparent;
|
||||||
|
color: hsl(var(--muted-foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.code-mcp-btn:hover {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
background: hsl(var(--muted) / 0.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
.code-mcp-btn.active,
|
||||||
|
.code-mcp-btn[class*="bg-primary"] {
|
||||||
|
background: hsl(var(--primary));
|
||||||
|
color: hsl(var(--primary-foreground));
|
||||||
|
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.code-mcp-toggle {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 0.25rem;
|
||||||
|
background: hsl(var(--muted));
|
||||||
|
border-radius: 0.5rem;
|
||||||
|
padding: 0.125rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
2265
ccw/src/templates/dashboard-css/31-api-settings.css
Normal file
2265
ccw/src/templates/dashboard-css/31-api-settings.css
Normal file
File diff suppressed because it is too large
Load Diff
@@ -33,9 +33,13 @@ async function loadCliHistory(options = {}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load native session content for a specific execution
|
// Load native session content for a specific execution
|
||||||
async function loadNativeSessionContent(executionId) {
|
async function loadNativeSessionContent(executionId, sourceDir) {
|
||||||
try {
|
try {
|
||||||
const url = `/api/cli/native-session?path=${encodeURIComponent(projectPath)}&id=${encodeURIComponent(executionId)}`;
|
// If sourceDir provided, use it to build the correct path
|
||||||
|
const basePath = sourceDir && sourceDir !== '.'
|
||||||
|
? projectPath + '/' + sourceDir
|
||||||
|
: projectPath;
|
||||||
|
const url = `/api/cli/native-session?path=${encodeURIComponent(basePath)}&id=${encodeURIComponent(executionId)}`;
|
||||||
const response = await fetch(url);
|
const response = await fetch(url);
|
||||||
if (!response.ok) return null;
|
if (!response.ok) return null;
|
||||||
return await response.json();
|
return await response.json();
|
||||||
@@ -133,9 +137,12 @@ function renderCliHistory() {
|
|||||||
</span>`
|
</span>`
|
||||||
: '';
|
: '';
|
||||||
|
|
||||||
|
// Escape sourceDir for use in onclick
|
||||||
|
const sourceDirEscaped = exec.sourceDir ? exec.sourceDir.replace(/'/g, "\\'") : '';
|
||||||
|
|
||||||
return `
|
return `
|
||||||
<div class="cli-history-item ${hasNative ? 'has-native' : ''}">
|
<div class="cli-history-item ${hasNative ? 'has-native' : ''}">
|
||||||
<div class="cli-history-item-content" onclick="showExecutionDetail('${exec.id}')">
|
<div class="cli-history-item-content" onclick="showExecutionDetail('${exec.id}', '${sourceDirEscaped}')">
|
||||||
<div class="cli-history-item-header">
|
<div class="cli-history-item-header">
|
||||||
<span class="cli-tool-tag cli-tool-${exec.tool}">${exec.tool.toUpperCase()}</span>
|
<span class="cli-tool-tag cli-tool-${exec.tool}">${exec.tool.toUpperCase()}</span>
|
||||||
<span class="cli-mode-tag">${exec.mode || 'analysis'}</span>
|
<span class="cli-mode-tag">${exec.mode || 'analysis'}</span>
|
||||||
@@ -154,14 +161,14 @@ function renderCliHistory() {
|
|||||||
</div>
|
</div>
|
||||||
<div class="cli-history-actions">
|
<div class="cli-history-actions">
|
||||||
${hasNative ? `
|
${hasNative ? `
|
||||||
<button class="btn-icon" onclick="event.stopPropagation(); showNativeSessionDetail('${exec.id}')" title="View Native Session">
|
<button class="btn-icon" onclick="event.stopPropagation(); showNativeSessionDetail('${exec.id}', '${sourceDirEscaped}')" title="View Native Session">
|
||||||
<i data-lucide="file-json" class="w-3.5 h-3.5"></i>
|
<i data-lucide="file-json" class="w-3.5 h-3.5"></i>
|
||||||
</button>
|
</button>
|
||||||
` : ''}
|
` : ''}
|
||||||
<button class="btn-icon" onclick="event.stopPropagation(); showExecutionDetail('${exec.id}')" title="View Details">
|
<button class="btn-icon" onclick="event.stopPropagation(); showExecutionDetail('${exec.id}', '${sourceDirEscaped}')" title="View Details">
|
||||||
<i data-lucide="eye" class="w-3.5 h-3.5"></i>
|
<i data-lucide="eye" class="w-3.5 h-3.5"></i>
|
||||||
</button>
|
</button>
|
||||||
<button class="btn-icon btn-danger" onclick="event.stopPropagation(); confirmDeleteExecution('${exec.id}')" title="Delete">
|
<button class="btn-icon btn-danger" onclick="event.stopPropagation(); confirmDeleteExecution('${exec.id}', '${sourceDirEscaped}')" title="Delete">
|
||||||
<i data-lucide="trash-2" class="w-3.5 h-3.5"></i>
|
<i data-lucide="trash-2" class="w-3.5 h-3.5"></i>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -650,9 +657,9 @@ async function copyConcatenatedPrompt(executionId) {
|
|||||||
/**
|
/**
|
||||||
* Show native session detail modal with full conversation content
|
* Show native session detail modal with full conversation content
|
||||||
*/
|
*/
|
||||||
async function showNativeSessionDetail(executionId) {
|
async function showNativeSessionDetail(executionId, sourceDir) {
|
||||||
// Load native session content
|
// Load native session content
|
||||||
const nativeSession = await loadNativeSessionContent(executionId);
|
const nativeSession = await loadNativeSessionContent(executionId, sourceDir);
|
||||||
|
|
||||||
if (!nativeSession) {
|
if (!nativeSession) {
|
||||||
showRefreshToast('Native session not found', 'error');
|
showRefreshToast('Native session not found', 'error');
|
||||||
|
|||||||
@@ -5,8 +5,11 @@
|
|||||||
let cliToolStatus = { gemini: {}, qwen: {}, codex: {}, claude: {} };
|
let cliToolStatus = { gemini: {}, qwen: {}, codex: {}, claude: {} };
|
||||||
let codexLensStatus = { ready: false };
|
let codexLensStatus = { ready: false };
|
||||||
let semanticStatus = { available: false };
|
let semanticStatus = { available: false };
|
||||||
|
let ccwInstallStatus = { installed: true, workflowsInstalled: true, missingFiles: [], installPath: '' };
|
||||||
let defaultCliTool = 'gemini';
|
let defaultCliTool = 'gemini';
|
||||||
let promptConcatFormat = localStorage.getItem('ccw-prompt-format') || 'plain'; // plain, yaml, json
|
let promptConcatFormat = localStorage.getItem('ccw-prompt-format') || 'plain'; // plain, yaml, json
|
||||||
|
let cliToolsConfig = {}; // CLI tools enable/disable config
|
||||||
|
let apiEndpoints = []; // API endpoints from LiteLLM config
|
||||||
|
|
||||||
// Smart Context settings
|
// Smart Context settings
|
||||||
let smartContextEnabled = localStorage.getItem('ccw-smart-context') === 'true';
|
let smartContextEnabled = localStorage.getItem('ccw-smart-context') === 'true';
|
||||||
@@ -18,6 +21,9 @@ let nativeResumeEnabled = localStorage.getItem('ccw-native-resume') !== 'false';
|
|||||||
// Recursive Query settings (for hierarchical storage aggregation)
|
// Recursive Query settings (for hierarchical storage aggregation)
|
||||||
let recursiveQueryEnabled = localStorage.getItem('ccw-recursive-query') !== 'false'; // default true
|
let recursiveQueryEnabled = localStorage.getItem('ccw-recursive-query') !== 'false'; // default true
|
||||||
|
|
||||||
|
// Code Index MCP provider (codexlens or ace)
|
||||||
|
let codeIndexMcpProvider = 'codexlens';
|
||||||
|
|
||||||
// ========== Initialization ==========
|
// ========== Initialization ==========
|
||||||
function initCliStatus() {
|
function initCliStatus() {
|
||||||
// Load all statuses in one call using aggregated endpoint
|
// Load all statuses in one call using aggregated endpoint
|
||||||
@@ -38,10 +44,18 @@ async function loadAllStatuses() {
|
|||||||
cliToolStatus = data.cli || { gemini: {}, qwen: {}, codex: {}, claude: {} };
|
cliToolStatus = data.cli || { gemini: {}, qwen: {}, codex: {}, claude: {} };
|
||||||
codexLensStatus = data.codexLens || { ready: false };
|
codexLensStatus = data.codexLens || { ready: false };
|
||||||
semanticStatus = data.semantic || { available: false };
|
semanticStatus = data.semantic || { available: false };
|
||||||
|
ccwInstallStatus = data.ccwInstall || { installed: true, workflowsInstalled: true, missingFiles: [], installPath: '' };
|
||||||
|
|
||||||
|
// Load CLI tools config and API endpoints
|
||||||
|
await Promise.all([
|
||||||
|
loadCliToolsConfig(),
|
||||||
|
loadApiEndpoints()
|
||||||
|
]);
|
||||||
|
|
||||||
// Update badges
|
// Update badges
|
||||||
updateCliBadge();
|
updateCliBadge();
|
||||||
updateCodexLensBadge();
|
updateCodexLensBadge();
|
||||||
|
updateCcwInstallBadge();
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
@@ -98,15 +112,17 @@ async function loadCodexLensStatus() {
|
|||||||
}
|
}
|
||||||
window.cliToolsStatus.codexlens = {
|
window.cliToolsStatus.codexlens = {
|
||||||
installed: data.ready || false,
|
installed: data.ready || false,
|
||||||
version: data.version || null
|
version: data.version || null,
|
||||||
|
installedModels: [] // Will be populated by loadSemanticStatus
|
||||||
};
|
};
|
||||||
|
|
||||||
// Update CodexLens badge
|
// Update CodexLens badge
|
||||||
updateCodexLensBadge();
|
updateCodexLensBadge();
|
||||||
|
|
||||||
// If CodexLens is ready, also check semantic status
|
// If CodexLens is ready, also check semantic status and models
|
||||||
if (data.ready) {
|
if (data.ready) {
|
||||||
await loadSemanticStatus();
|
await loadSemanticStatus();
|
||||||
|
await loadInstalledModels();
|
||||||
}
|
}
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
@@ -116,6 +132,54 @@ async function loadCodexLensStatus() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load CodexLens dashboard data using aggregated endpoint (single API call)
|
||||||
|
* This is optimized for the CodexLens Manager page initialization
|
||||||
|
* @returns {Promise<object|null>} Dashboard init data or null on error
|
||||||
|
*/
|
||||||
|
async function loadCodexLensDashboardInit() {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/codexlens/dashboard-init');
|
||||||
|
if (!response.ok) throw new Error('Failed to load CodexLens dashboard init');
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
// Update status variables from aggregated response
|
||||||
|
codexLensStatus = data.status || { ready: false };
|
||||||
|
semanticStatus = data.semantic || { available: false };
|
||||||
|
|
||||||
|
// Expose to window for other modules
|
||||||
|
if (!window.cliToolsStatus) {
|
||||||
|
window.cliToolsStatus = {};
|
||||||
|
}
|
||||||
|
window.cliToolsStatus.codexlens = {
|
||||||
|
installed: data.installed || false,
|
||||||
|
version: data.status?.version || null,
|
||||||
|
installedModels: [],
|
||||||
|
config: data.config || {},
|
||||||
|
semantic: data.semantic || {}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Store config globally for easy access
|
||||||
|
window.codexLensConfig = data.config || {};
|
||||||
|
window.codexLensStatusData = data.statusData || {};
|
||||||
|
|
||||||
|
// Update badges
|
||||||
|
updateCodexLensBadge();
|
||||||
|
|
||||||
|
console.log('[CLI Status] CodexLens dashboard init loaded:', {
|
||||||
|
installed: data.installed,
|
||||||
|
version: data.status?.version,
|
||||||
|
semanticAvailable: data.semantic?.available
|
||||||
|
});
|
||||||
|
|
||||||
|
return data;
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load CodexLens dashboard init:', err);
|
||||||
|
// Fallback to individual calls
|
||||||
|
return await loadCodexLensStatus();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Legacy: Load semantic status individually
|
* Legacy: Load semantic status individually
|
||||||
*/
|
*/
|
||||||
@@ -132,6 +196,103 @@ async function loadSemanticStatus() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load installed embedding models
|
||||||
|
*/
|
||||||
|
async function loadInstalledModels() {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/codexlens/models');
|
||||||
|
if (!response.ok) throw new Error('Failed to load models');
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
if (data.success && data.result && data.result.models) {
|
||||||
|
// Filter to only installed models
|
||||||
|
const installedModels = data.result.models
|
||||||
|
.filter(m => m.installed)
|
||||||
|
.map(m => m.profile);
|
||||||
|
|
||||||
|
// Update window.cliToolsStatus
|
||||||
|
if (window.cliToolsStatus && window.cliToolsStatus.codexlens) {
|
||||||
|
window.cliToolsStatus.codexlens.installedModels = installedModels;
|
||||||
|
window.cliToolsStatus.codexlens.allModels = data.result.models;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('[CLI Status] Installed models:', installedModels);
|
||||||
|
return installedModels;
|
||||||
|
}
|
||||||
|
return [];
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load installed models:', err);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load CLI tools config from .claude/cli-tools.json (project or global fallback)
|
||||||
|
*/
|
||||||
|
async function loadCliToolsConfig() {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/cli/tools-config');
|
||||||
|
if (!response.ok) return null;
|
||||||
|
const data = await response.json();
|
||||||
|
// Store full config and extract tools for backward compatibility
|
||||||
|
cliToolsConfig = data.tools || {};
|
||||||
|
window.claudeCliToolsConfig = data; // Full config available globally
|
||||||
|
|
||||||
|
// Load default tool from config
|
||||||
|
if (data.defaultTool) {
|
||||||
|
defaultCliTool = data.defaultTool;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load Code Index MCP provider from config
|
||||||
|
if (data.settings?.codeIndexMcp) {
|
||||||
|
codeIndexMcpProvider = data.settings.codeIndexMcp;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('[CLI Config] Loaded from:', data._configInfo?.source || 'unknown', '| Default:', data.defaultTool, '| CodeIndexMCP:', codeIndexMcpProvider);
|
||||||
|
return data;
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load CLI tools config:', err);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update CLI tool enabled status
|
||||||
|
*/
|
||||||
|
async function updateCliToolEnabled(tool, enabled) {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/cli/tools-config/' + tool, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ enabled: enabled })
|
||||||
|
});
|
||||||
|
if (!response.ok) throw new Error('Failed to update');
|
||||||
|
showRefreshToast(tool + (enabled ? ' enabled' : ' disabled'), 'success');
|
||||||
|
return await response.json();
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to update CLI tool:', err);
|
||||||
|
showRefreshToast('Failed to update ' + tool, 'error');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load API endpoints from LiteLLM config
|
||||||
|
*/
|
||||||
|
async function loadApiEndpoints() {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/litellm-api/endpoints');
|
||||||
|
if (!response.ok) return [];
|
||||||
|
const data = await response.json();
|
||||||
|
apiEndpoints = data.endpoints || [];
|
||||||
|
return apiEndpoints;
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load API endpoints:', err);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ========== Badge Update ==========
|
// ========== Badge Update ==========
|
||||||
function updateCliBadge() {
|
function updateCliBadge() {
|
||||||
const badge = document.getElementById('badgeCliTools');
|
const badge = document.getElementById('badgeCliTools');
|
||||||
@@ -154,6 +315,25 @@ function updateCodexLensBadge() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function updateCcwInstallBadge() {
|
||||||
|
const badge = document.getElementById('badgeCcwInstall');
|
||||||
|
if (badge) {
|
||||||
|
if (ccwInstallStatus.installed) {
|
||||||
|
badge.textContent = t('status.installed');
|
||||||
|
badge.classList.add('text-success');
|
||||||
|
badge.classList.remove('text-warning', 'text-destructive');
|
||||||
|
} else if (ccwInstallStatus.workflowsInstalled === false) {
|
||||||
|
badge.textContent = t('status.incomplete');
|
||||||
|
badge.classList.add('text-warning');
|
||||||
|
badge.classList.remove('text-success', 'text-destructive');
|
||||||
|
} else {
|
||||||
|
badge.textContent = t('status.notInstalled');
|
||||||
|
badge.classList.add('text-destructive');
|
||||||
|
badge.classList.remove('text-success', 'text-warning');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ========== Rendering ==========
|
// ========== Rendering ==========
|
||||||
function renderCliStatus() {
|
function renderCliStatus() {
|
||||||
const container = document.getElementById('cli-status-panel');
|
const container = document.getElementById('cli-status-panel');
|
||||||
@@ -179,25 +359,41 @@ function renderCliStatus() {
|
|||||||
const status = cliToolStatus[tool] || {};
|
const status = cliToolStatus[tool] || {};
|
||||||
const isAvailable = status.available;
|
const isAvailable = status.available;
|
||||||
const isDefault = defaultCliTool === tool;
|
const isDefault = defaultCliTool === tool;
|
||||||
|
const config = cliToolsConfig[tool] || { enabled: true };
|
||||||
|
const isEnabled = config.enabled !== false;
|
||||||
|
const canSetDefault = isAvailable && isEnabled && !isDefault;
|
||||||
|
|
||||||
return `
|
return `
|
||||||
<div class="cli-tool-card tool-${tool} ${isAvailable ? 'available' : 'unavailable'}">
|
<div class="cli-tool-card tool-${tool} ${isAvailable ? 'available' : 'unavailable'} ${!isEnabled ? 'disabled' : ''}">
|
||||||
<div class="cli-tool-header">
|
<div class="cli-tool-header">
|
||||||
<span class="cli-tool-status ${isAvailable ? 'status-available' : 'status-unavailable'}"></span>
|
<span class="cli-tool-status ${isAvailable && isEnabled ? 'status-available' : 'status-unavailable'}"></span>
|
||||||
<span class="cli-tool-name">${tool.charAt(0).toUpperCase() + tool.slice(1)}</span>
|
<span class="cli-tool-name">${tool.charAt(0).toUpperCase() + tool.slice(1)}</span>
|
||||||
${isDefault ? '<span class="cli-tool-badge">Default</span>' : ''}
|
${isDefault ? '<span class="cli-tool-badge">Default</span>' : ''}
|
||||||
|
${!isEnabled && isAvailable ? '<span class="cli-tool-badge-disabled">Disabled</span>' : ''}
|
||||||
</div>
|
</div>
|
||||||
<div class="cli-tool-desc text-xs text-muted-foreground mt-1">
|
<div class="cli-tool-desc text-xs text-muted-foreground mt-1">
|
||||||
${toolDescriptions[tool]}
|
${toolDescriptions[tool]}
|
||||||
</div>
|
</div>
|
||||||
<div class="cli-tool-info mt-2">
|
<div class="cli-tool-info mt-2 flex items-center justify-between">
|
||||||
${isAvailable
|
<div>
|
||||||
? `<span class="text-success flex items-center gap-1"><i data-lucide="check-circle" class="w-3 h-3"></i> Ready</span>`
|
${isAvailable
|
||||||
: `<span class="text-muted-foreground flex items-center gap-1"><i data-lucide="circle-dashed" class="w-3 h-3"></i> Not Installed</span>`
|
? (isEnabled
|
||||||
}
|
? `<span class="text-success flex items-center gap-1"><i data-lucide="check-circle" class="w-3 h-3"></i> Ready</span>`
|
||||||
|
: `<span class="text-warning flex items-center gap-1"><i data-lucide="pause-circle" class="w-3 h-3"></i> Disabled</span>`)
|
||||||
|
: `<span class="text-muted-foreground flex items-center gap-1"><i data-lucide="circle-dashed" class="w-3 h-3"></i> Not Installed</span>`
|
||||||
|
}
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="cli-tool-actions mt-3">
|
<div class="cli-tool-actions mt-3 flex gap-2">
|
||||||
${isAvailable && !isDefault
|
${isAvailable ? (isEnabled
|
||||||
|
? `<button class="btn-sm btn-outline-warning flex items-center gap-1" onclick="toggleCliTool('${tool}', false)">
|
||||||
|
<i data-lucide="pause" class="w-3 h-3"></i> Disable
|
||||||
|
</button>`
|
||||||
|
: `<button class="btn-sm btn-outline-success flex items-center gap-1" onclick="toggleCliTool('${tool}', true)">
|
||||||
|
<i data-lucide="play" class="w-3 h-3"></i> Enable
|
||||||
|
</button>`
|
||||||
|
) : ''}
|
||||||
|
${canSetDefault
|
||||||
? `<button class="btn-sm btn-outline flex items-center gap-1" onclick="setDefaultCliTool('${tool}')">
|
? `<button class="btn-sm btn-outline flex items-center gap-1" onclick="setDefaultCliTool('${tool}')">
|
||||||
<i data-lucide="star" class="w-3 h-3"></i> Set Default
|
<i data-lucide="star" class="w-3 h-3"></i> Set Default
|
||||||
</button>`
|
</button>`
|
||||||
@@ -277,11 +473,75 @@ function renderCliStatus() {
|
|||||||
</div>
|
</div>
|
||||||
` : '';
|
` : '';
|
||||||
|
|
||||||
|
// CCW Installation Status card (show warning if not fully installed)
|
||||||
|
const ccwInstallHtml = !ccwInstallStatus.installed ? `
|
||||||
|
<div class="cli-tool-card tool-ccw-install unavailable" style="border: 1px solid var(--warning); background: rgba(var(--warning-rgb), 0.05);">
|
||||||
|
<div class="cli-tool-header">
|
||||||
|
<span class="cli-tool-status status-unavailable" style="background: var(--warning);"></span>
|
||||||
|
<span class="cli-tool-name">${t('status.ccwInstall')}</span>
|
||||||
|
<span class="badge px-1.5 py-0.5 text-xs rounded bg-warning/20 text-warning">${t('status.required')}</span>
|
||||||
|
</div>
|
||||||
|
<div class="cli-tool-desc text-xs text-muted-foreground mt-1">
|
||||||
|
${t('status.ccwInstallDesc')}
|
||||||
|
</div>
|
||||||
|
<div class="cli-tool-info mt-2">
|
||||||
|
<span class="text-warning flex items-center gap-1">
|
||||||
|
<i data-lucide="alert-triangle" class="w-3 h-3"></i>
|
||||||
|
${ccwInstallStatus.missingFiles.length} ${t('status.filesMissing')}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<div class="cli-tool-actions flex flex-col gap-2 mt-3">
|
||||||
|
<div class="text-xs text-muted-foreground">
|
||||||
|
<p class="mb-1">${t('status.missingFiles')}:</p>
|
||||||
|
<ul class="list-disc list-inside text-xs opacity-70">
|
||||||
|
${ccwInstallStatus.missingFiles.slice(0, 3).map(f => `<li>${f}</li>`).join('')}
|
||||||
|
${ccwInstallStatus.missingFiles.length > 3 ? `<li>+${ccwInstallStatus.missingFiles.length - 3} more...</li>` : ''}
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div class="bg-muted/50 rounded p-2 mt-2">
|
||||||
|
<p class="text-xs font-medium mb-1">${t('status.runToFix')}:</p>
|
||||||
|
<code class="text-xs bg-background px-2 py-1 rounded block">ccw install</code>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
` : '';
|
||||||
|
|
||||||
|
// API Endpoints section
|
||||||
|
const apiEndpointsHtml = apiEndpoints.length > 0 ? `
|
||||||
|
<div class="cli-api-endpoints-section" style="margin-top: 1.5rem;">
|
||||||
|
<div class="cli-section-header" style="display: flex; align-items: center; gap: 0.5rem; margin-bottom: 1rem;">
|
||||||
|
<h4 style="display: flex; align-items: center; gap: 0.5rem; font-weight: 600; margin: 0;">
|
||||||
|
<i data-lucide="link" class="w-4 h-4"></i> API Endpoints
|
||||||
|
</h4>
|
||||||
|
<span class="badge" style="padding: 0.125rem 0.5rem; font-size: 0.75rem; border-radius: 0.25rem; background: var(--muted); color: var(--muted-foreground);">${apiEndpoints.length}</span>
|
||||||
|
</div>
|
||||||
|
<div class="cli-endpoints-list" style="display: grid; grid-template-columns: repeat(auto-fill, minmax(250px, 1fr)); gap: 0.75rem;">
|
||||||
|
${apiEndpoints.map(ep => `
|
||||||
|
<div class="cli-endpoint-card ${ep.enabled ? 'available' : 'unavailable'}" style="padding: 0.75rem; border: 1px solid var(--border); border-radius: 0.5rem; background: var(--card);">
|
||||||
|
<div class="cli-endpoint-header" style="display: flex; align-items: center; gap: 0.5rem; margin-bottom: 0.5rem;">
|
||||||
|
<span class="cli-tool-status ${ep.enabled ? 'status-available' : 'status-unavailable'}" style="width: 8px; height: 8px; border-radius: 50%; background: ${ep.enabled ? 'var(--success)' : 'var(--muted-foreground)'}; flex-shrink: 0;"></span>
|
||||||
|
<span class="cli-endpoint-id" style="font-weight: 500; font-size: 0.875rem;">${ep.id}</span>
|
||||||
|
</div>
|
||||||
|
<div class="cli-endpoint-info" style="margin-top: 0.25rem;">
|
||||||
|
<span class="text-xs text-muted-foreground" style="font-size: 0.75rem; color: var(--muted-foreground);">${ep.model}</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`).join('')}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
` : '';
|
||||||
|
|
||||||
|
// Config source info
|
||||||
|
const configInfo = window.claudeCliToolsConfig?._configInfo || {};
|
||||||
|
const configSourceLabel = configInfo.source === 'project' ? 'Project' : configInfo.source === 'global' ? 'Global' : 'Default';
|
||||||
|
const configSourceClass = configInfo.source === 'project' ? 'text-success' : configInfo.source === 'global' ? 'text-primary' : 'text-muted-foreground';
|
||||||
|
|
||||||
// CLI Settings section
|
// CLI Settings section
|
||||||
const settingsHtml = `
|
const settingsHtml = `
|
||||||
<div class="cli-settings-section">
|
<div class="cli-settings-section">
|
||||||
<div class="cli-settings-header">
|
<div class="cli-settings-header">
|
||||||
<h4><i data-lucide="settings" class="w-3.5 h-3.5"></i> Settings</h4>
|
<h4><i data-lucide="settings" class="w-3.5 h-3.5"></i> Settings</h4>
|
||||||
|
<span class="badge text-xs ${configSourceClass}" title="${configInfo.activePath || ''}">${configSourceLabel}</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="cli-settings-grid">
|
<div class="cli-settings-grid">
|
||||||
<div class="cli-setting-item">
|
<div class="cli-setting-item">
|
||||||
@@ -348,6 +608,39 @@ function renderCliStatus() {
|
|||||||
</div>
|
</div>
|
||||||
<p class="cli-setting-desc">Maximum files to include in smart context</p>
|
<p class="cli-setting-desc">Maximum files to include in smart context</p>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="cli-setting-item">
|
||||||
|
<label class="cli-setting-label">
|
||||||
|
<i data-lucide="hard-drive" class="w-3 h-3"></i>
|
||||||
|
Cache Injection
|
||||||
|
</label>
|
||||||
|
<div class="cli-setting-control">
|
||||||
|
<select class="cli-setting-select" onchange="setCacheInjectionMode(this.value)">
|
||||||
|
<option value="auto" ${getCacheInjectionMode() === 'auto' ? 'selected' : ''}>Auto</option>
|
||||||
|
<option value="manual" ${getCacheInjectionMode() === 'manual' ? 'selected' : ''}>Manual</option>
|
||||||
|
<option value="disabled" ${getCacheInjectionMode() === 'disabled' ? 'selected' : ''}>Disabled</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<p class="cli-setting-desc">Cache prefix/suffix injection mode for prompts</p>
|
||||||
|
</div>
|
||||||
|
<div class="cli-setting-item">
|
||||||
|
<label class="cli-setting-label">
|
||||||
|
<i data-lucide="search" class="w-3 h-3"></i>
|
||||||
|
Code Index MCP
|
||||||
|
</label>
|
||||||
|
<div class="cli-setting-control">
|
||||||
|
<div class="flex items-center bg-muted rounded-lg p-0.5">
|
||||||
|
<button class="code-mcp-btn px-3 py-1.5 text-xs font-medium rounded-md transition-all ${codeIndexMcpProvider === 'codexlens' ? 'bg-primary text-primary-foreground shadow-sm' : 'text-muted-foreground hover:text-foreground'}"
|
||||||
|
onclick="setCodeIndexMcpProvider('codexlens')">
|
||||||
|
CodexLens
|
||||||
|
</button>
|
||||||
|
<button class="code-mcp-btn px-3 py-1.5 text-xs font-medium rounded-md transition-all ${codeIndexMcpProvider === 'ace' ? 'bg-primary text-primary-foreground shadow-sm' : 'text-muted-foreground hover:text-foreground'}"
|
||||||
|
onclick="setCodeIndexMcpProvider('ace')">
|
||||||
|
ACE
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<p class="cli-setting-desc">Code search provider (updates CLAUDE.md context-tools reference)</p>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
@@ -359,11 +652,13 @@ function renderCliStatus() {
|
|||||||
<i data-lucide="refresh-cw" class="w-4 h-4"></i>
|
<i data-lucide="refresh-cw" class="w-4 h-4"></i>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
${ccwInstallHtml}
|
||||||
<div class="cli-tools-grid">
|
<div class="cli-tools-grid">
|
||||||
${toolsHtml}
|
${toolsHtml}
|
||||||
${codexLensHtml}
|
${codexLensHtml}
|
||||||
${semanticHtml}
|
${semanticHtml}
|
||||||
</div>
|
</div>
|
||||||
|
${apiEndpointsHtml}
|
||||||
${settingsHtml}
|
${settingsHtml}
|
||||||
`;
|
`;
|
||||||
|
|
||||||
@@ -375,7 +670,30 @@ function renderCliStatus() {
|
|||||||
|
|
||||||
// ========== Actions ==========
|
// ========== Actions ==========
|
||||||
function setDefaultCliTool(tool) {
|
function setDefaultCliTool(tool) {
|
||||||
|
// Validate: tool must be available and enabled
|
||||||
|
const status = cliToolStatus[tool] || {};
|
||||||
|
const config = cliToolsConfig[tool] || { enabled: true };
|
||||||
|
|
||||||
|
if (!status.available) {
|
||||||
|
showRefreshToast(`Cannot set ${tool} as default: not installed`, 'error');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.enabled === false) {
|
||||||
|
showRefreshToast(`Cannot set ${tool} as default: tool is disabled`, 'error');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
defaultCliTool = tool;
|
defaultCliTool = tool;
|
||||||
|
// Save to config
|
||||||
|
if (window.claudeCliToolsConfig) {
|
||||||
|
window.claudeCliToolsConfig.defaultTool = tool;
|
||||||
|
fetch('/api/cli/tools-config', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ defaultTool: tool })
|
||||||
|
}).catch(err => console.error('Failed to save default tool:', err));
|
||||||
|
}
|
||||||
renderCliStatus();
|
renderCliStatus();
|
||||||
showRefreshToast(`Default CLI tool set to ${tool}`, 'success');
|
showRefreshToast(`Default CLI tool set to ${tool}`, 'success');
|
||||||
}
|
}
|
||||||
@@ -416,11 +734,93 @@ function setRecursiveQueryEnabled(enabled) {
|
|||||||
showRefreshToast(`Recursive Query ${enabled ? 'enabled' : 'disabled'}`, 'success');
|
showRefreshToast(`Recursive Query ${enabled ? 'enabled' : 'disabled'}`, 'success');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getCacheInjectionMode() {
|
||||||
|
if (window.claudeCliToolsConfig && window.claudeCliToolsConfig.settings) {
|
||||||
|
return window.claudeCliToolsConfig.settings.cache?.injectionMode || 'auto';
|
||||||
|
}
|
||||||
|
return localStorage.getItem('ccw-cache-injection-mode') || 'auto';
|
||||||
|
}
|
||||||
|
|
||||||
|
async function setCacheInjectionMode(mode) {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/cli/tools-config/cache', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ injectionMode: mode })
|
||||||
|
});
|
||||||
|
if (response.ok) {
|
||||||
|
localStorage.setItem('ccw-cache-injection-mode', mode);
|
||||||
|
if (window.claudeCliToolsConfig) {
|
||||||
|
window.claudeCliToolsConfig.settings.cache.injectionMode = mode;
|
||||||
|
}
|
||||||
|
showRefreshToast(`Cache injection mode set to ${mode}`, 'success');
|
||||||
|
} else {
|
||||||
|
showRefreshToast('Failed to update cache settings', 'error');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to update cache settings:', err);
|
||||||
|
showRefreshToast('Failed to update cache settings', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function setCodeIndexMcpProvider(provider) {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/cli/code-index-mcp', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ provider: provider })
|
||||||
|
});
|
||||||
|
if (response.ok) {
|
||||||
|
codeIndexMcpProvider = provider;
|
||||||
|
if (window.claudeCliToolsConfig && window.claudeCliToolsConfig.settings) {
|
||||||
|
window.claudeCliToolsConfig.settings.codeIndexMcp = provider;
|
||||||
|
}
|
||||||
|
showRefreshToast(`Code Index MCP switched to ${provider === 'ace' ? 'ACE (Augment)' : 'CodexLens'}`, 'success');
|
||||||
|
// Re-render both CLI status and settings section
|
||||||
|
if (typeof renderCliStatus === 'function') renderCliStatus();
|
||||||
|
if (typeof renderCliSettingsSection === 'function') renderCliSettingsSection();
|
||||||
|
} else {
|
||||||
|
const data = await response.json();
|
||||||
|
showRefreshToast(`Failed to switch Code Index MCP: ${data.error}`, 'error');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to switch Code Index MCP:', err);
|
||||||
|
showRefreshToast('Failed to switch Code Index MCP', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async function refreshAllCliStatus() {
|
async function refreshAllCliStatus() {
|
||||||
await loadAllStatuses();
|
await loadAllStatuses();
|
||||||
renderCliStatus();
|
renderCliStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function toggleCliTool(tool, enabled) {
|
||||||
|
// If disabling the current default tool, switch to another available+enabled tool
|
||||||
|
if (!enabled && defaultCliTool === tool) {
|
||||||
|
const tools = ['gemini', 'qwen', 'codex', 'claude'];
|
||||||
|
const newDefault = tools.find(t => {
|
||||||
|
if (t === tool) return false;
|
||||||
|
const status = cliToolStatus[t] || {};
|
||||||
|
const config = cliToolsConfig[t] || { enabled: true };
|
||||||
|
return status.available && config.enabled !== false;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (newDefault) {
|
||||||
|
defaultCliTool = newDefault;
|
||||||
|
if (window.claudeCliToolsConfig) {
|
||||||
|
window.claudeCliToolsConfig.defaultTool = newDefault;
|
||||||
|
}
|
||||||
|
showRefreshToast(`Default tool switched to ${newDefault}`, 'info');
|
||||||
|
} else {
|
||||||
|
showRefreshToast(`Warning: No other enabled tool available for default`, 'warning');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await updateCliToolEnabled(tool, enabled);
|
||||||
|
await loadAllStatuses();
|
||||||
|
renderCliStatus();
|
||||||
|
}
|
||||||
|
|
||||||
function installCodexLens() {
|
function installCodexLens() {
|
||||||
openCodexLensInstallWizard();
|
openCodexLensInstallWizard();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,14 +138,14 @@ const HOOK_TEMPLATES = {
|
|||||||
category: 'memory',
|
category: 'memory',
|
||||||
timeout: 5000
|
timeout: 5000
|
||||||
},
|
},
|
||||||
// Session Context - Fires once per session at startup
|
// Session Context - Progressive disclosure based on session state
|
||||||
// Uses state file to detect first prompt, only fires once
|
// First prompt: returns cluster overview, subsequent: intent-matched sessions
|
||||||
'session-context': {
|
'session-context': {
|
||||||
event: 'UserPromptSubmit',
|
event: 'UserPromptSubmit',
|
||||||
matcher: '',
|
matcher: '',
|
||||||
command: 'bash',
|
command: 'ccw',
|
||||||
args: ['-c', 'STATE_FILE="/tmp/.ccw-session-$CLAUDE_SESSION_ID"; [ -f "$STATE_FILE" ] && exit 0; touch "$STATE_FILE"; curl -s -X POST -H "Content-Type: application/json" -d "{\\"sessionId\\":\\"$CLAUDE_SESSION_ID\\"}" http://localhost:3456/api/hook/session-context 2>/dev/null | jq -r ".content // empty"'],
|
args: ['hook', 'session-context', '--stdin'],
|
||||||
description: 'Load session context once at startup (cluster overview)',
|
description: 'Progressive session context (cluster overview → intent matching)',
|
||||||
category: 'context',
|
category: 'context',
|
||||||
timeout: 5000
|
timeout: 5000
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -946,20 +946,15 @@ function setCcwProjectRootToCurrent() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build CCW Tools config with selected tools
|
// Build CCW Tools config with selected tools
|
||||||
// Uses isWindowsPlatform from state.js to generate platform-appropriate commands
|
// Uses globally installed ccw-mcp command (from claude-code-workflow package)
|
||||||
function buildCcwToolsConfig(selectedTools, pathConfig = {}) {
|
function buildCcwToolsConfig(selectedTools, pathConfig = {}) {
|
||||||
const { projectRoot, allowedDirs } = pathConfig;
|
const { projectRoot, allowedDirs } = pathConfig;
|
||||||
// Windows requires 'cmd /c' wrapper to execute npx
|
// Use globally installed ccw-mcp command directly
|
||||||
// Other platforms (macOS, Linux) can run npx directly
|
// Requires: npm install -g claude-code-workflow
|
||||||
const config = isWindowsPlatform
|
const config = {
|
||||||
? {
|
command: "ccw-mcp",
|
||||||
command: "cmd",
|
args: []
|
||||||
args: ["/c", "npx", "-y", "ccw-mcp"]
|
};
|
||||||
}
|
|
||||||
: {
|
|
||||||
command: "npx",
|
|
||||||
args: ["-y", "ccw-mcp"]
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add env if not all tools or not default 4 core tools
|
// Add env if not all tools or not default 4 core tools
|
||||||
const coreTools = ['write_file', 'edit_file', 'codex_lens', 'smart_search'];
|
const coreTools = ['write_file', 'edit_file', 'codex_lens', 'smart_search'];
|
||||||
|
|||||||
@@ -143,6 +143,18 @@ function initNavigation() {
|
|||||||
} else {
|
} else {
|
||||||
console.error('renderCoreMemoryView not defined - please refresh the page');
|
console.error('renderCoreMemoryView not defined - please refresh the page');
|
||||||
}
|
}
|
||||||
|
} else if (currentView === 'codexlens-manager') {
|
||||||
|
if (typeof renderCodexLensManager === 'function') {
|
||||||
|
renderCodexLensManager();
|
||||||
|
} else {
|
||||||
|
console.error('renderCodexLensManager not defined - please refresh the page');
|
||||||
|
}
|
||||||
|
} else if (currentView === 'api-settings') {
|
||||||
|
if (typeof renderApiSettings === 'function') {
|
||||||
|
renderApiSettings();
|
||||||
|
} else {
|
||||||
|
console.error('renderApiSettings not defined - please refresh the page');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -183,6 +195,10 @@ function updateContentTitle() {
|
|||||||
titleEl.textContent = t('title.helpGuide');
|
titleEl.textContent = t('title.helpGuide');
|
||||||
} else if (currentView === 'core-memory') {
|
} else if (currentView === 'core-memory') {
|
||||||
titleEl.textContent = t('title.coreMemory');
|
titleEl.textContent = t('title.coreMemory');
|
||||||
|
} else if (currentView === 'codexlens-manager') {
|
||||||
|
titleEl.textContent = t('title.codexLensManager');
|
||||||
|
} else if (currentView === 'api-settings') {
|
||||||
|
titleEl.textContent = t('title.apiSettings');
|
||||||
} else if (currentView === 'liteTasks') {
|
} else if (currentView === 'liteTasks') {
|
||||||
const names = { 'lite-plan': t('title.litePlanSessions'), 'lite-fix': t('title.liteFixSessions') };
|
const names = { 'lite-plan': t('title.litePlanSessions'), 'lite-fix': t('title.liteFixSessions') };
|
||||||
titleEl.textContent = names[currentLiteType] || t('title.liteTasks');
|
titleEl.textContent = names[currentLiteType] || t('title.liteTasks');
|
||||||
|
|||||||
@@ -19,10 +19,18 @@ const i18n = {
|
|||||||
'common.delete': 'Delete',
|
'common.delete': 'Delete',
|
||||||
'common.cancel': 'Cancel',
|
'common.cancel': 'Cancel',
|
||||||
'common.save': 'Save',
|
'common.save': 'Save',
|
||||||
|
'common.include': 'Include',
|
||||||
'common.close': 'Close',
|
'common.close': 'Close',
|
||||||
'common.loading': 'Loading...',
|
'common.loading': 'Loading...',
|
||||||
'common.error': 'Error',
|
'common.error': 'Error',
|
||||||
'common.success': 'Success',
|
'common.success': 'Success',
|
||||||
|
'common.deleteSuccess': 'Deleted successfully',
|
||||||
|
'common.deleteFailed': 'Delete failed',
|
||||||
|
'common.retry': 'Retry',
|
||||||
|
'common.refresh': 'Refresh',
|
||||||
|
'common.minutes': 'minutes',
|
||||||
|
'common.enabled': 'Enabled',
|
||||||
|
'common.disabled': 'Disabled',
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
'header.project': 'Project:',
|
'header.project': 'Project:',
|
||||||
@@ -38,6 +46,7 @@ const i18n = {
|
|||||||
'nav.explorer': 'Explorer',
|
'nav.explorer': 'Explorer',
|
||||||
'nav.status': 'Status',
|
'nav.status': 'Status',
|
||||||
'nav.history': 'History',
|
'nav.history': 'History',
|
||||||
|
'nav.codexLensManager': 'CodexLens',
|
||||||
'nav.memory': 'Memory',
|
'nav.memory': 'Memory',
|
||||||
'nav.contextMemory': 'Context',
|
'nav.contextMemory': 'Context',
|
||||||
'nav.coreMemory': 'Core Memory',
|
'nav.coreMemory': 'Core Memory',
|
||||||
@@ -95,7 +104,8 @@ const i18n = {
|
|||||||
'title.hookManager': 'Hook Manager',
|
'title.hookManager': 'Hook Manager',
|
||||||
'title.memoryModule': 'Memory Module',
|
'title.memoryModule': 'Memory Module',
|
||||||
'title.promptHistory': 'Prompt History',
|
'title.promptHistory': 'Prompt History',
|
||||||
|
'title.codexLensManager': 'CodexLens Manager',
|
||||||
|
|
||||||
// Search
|
// Search
|
||||||
'search.placeholder': 'Search...',
|
'search.placeholder': 'Search...',
|
||||||
|
|
||||||
@@ -212,6 +222,7 @@ const i18n = {
|
|||||||
'cli.default': 'Default',
|
'cli.default': 'Default',
|
||||||
'cli.install': 'Install',
|
'cli.install': 'Install',
|
||||||
'cli.uninstall': 'Uninstall',
|
'cli.uninstall': 'Uninstall',
|
||||||
|
'cli.openManager': 'Manager',
|
||||||
'cli.initIndex': 'Init Index',
|
'cli.initIndex': 'Init Index',
|
||||||
'cli.geminiDesc': 'Google AI for code analysis',
|
'cli.geminiDesc': 'Google AI for code analysis',
|
||||||
'cli.qwenDesc': 'Alibaba AI assistant',
|
'cli.qwenDesc': 'Alibaba AI assistant',
|
||||||
@@ -220,12 +231,19 @@ const i18n = {
|
|||||||
'cli.codexLensDescFull': 'Full-text code search engine',
|
'cli.codexLensDescFull': 'Full-text code search engine',
|
||||||
'cli.semanticDesc': 'AI-powered code understanding',
|
'cli.semanticDesc': 'AI-powered code understanding',
|
||||||
'cli.semanticDescFull': 'Natural language code search',
|
'cli.semanticDescFull': 'Natural language code search',
|
||||||
|
'cli.apiEndpoints': 'API Endpoints',
|
||||||
|
'cli.configured': 'configured',
|
||||||
|
'cli.addToCli': 'Add to CLI',
|
||||||
|
'cli.enabled': 'Enabled',
|
||||||
|
'cli.disabled': 'Disabled',
|
||||||
|
|
||||||
// CodexLens Configuration
|
// CodexLens Configuration
|
||||||
'codexlens.config': 'CodexLens Configuration',
|
'codexlens.config': 'CodexLens Configuration',
|
||||||
|
'codexlens.configDesc': 'Manage code indexing, semantic search, and embedding models',
|
||||||
'codexlens.status': 'Status',
|
'codexlens.status': 'Status',
|
||||||
'codexlens.installed': 'Installed',
|
'codexlens.installed': 'Installed',
|
||||||
'codexlens.notInstalled': 'Not Installed',
|
'codexlens.notInstalled': 'Not Installed',
|
||||||
|
'codexlens.installFirst': 'Install CodexLens to access semantic search and model management features',
|
||||||
'codexlens.indexes': 'Indexes',
|
'codexlens.indexes': 'Indexes',
|
||||||
'codexlens.currentWorkspace': 'Current Workspace',
|
'codexlens.currentWorkspace': 'Current Workspace',
|
||||||
'codexlens.indexStoragePath': 'Index Storage Path',
|
'codexlens.indexStoragePath': 'Index Storage Path',
|
||||||
@@ -234,6 +252,8 @@ const i18n = {
|
|||||||
'codexlens.newStoragePath': 'New Storage Path',
|
'codexlens.newStoragePath': 'New Storage Path',
|
||||||
'codexlens.pathPlaceholder': 'e.g., /path/to/indexes or ~/.codexlens/indexes',
|
'codexlens.pathPlaceholder': 'e.g., /path/to/indexes or ~/.codexlens/indexes',
|
||||||
'codexlens.pathInfo': 'Supports ~ for home directory. Changes take effect immediately.',
|
'codexlens.pathInfo': 'Supports ~ for home directory. Changes take effect immediately.',
|
||||||
|
'codexlens.pathUnchanged': 'Path unchanged',
|
||||||
|
'codexlens.pathEmpty': 'Path cannot be empty',
|
||||||
'codexlens.migrationRequired': 'Migration Required',
|
'codexlens.migrationRequired': 'Migration Required',
|
||||||
'codexlens.migrationWarning': 'After changing the path, existing indexes will need to be re-initialized for each workspace.',
|
'codexlens.migrationWarning': 'After changing the path, existing indexes will need to be re-initialized for each workspace.',
|
||||||
'codexlens.actions': 'Actions',
|
'codexlens.actions': 'Actions',
|
||||||
@@ -241,6 +261,50 @@ const i18n = {
|
|||||||
'codexlens.cleanCurrentWorkspace': 'Clean Current Workspace',
|
'codexlens.cleanCurrentWorkspace': 'Clean Current Workspace',
|
||||||
'codexlens.cleanAllIndexes': 'Clean All Indexes',
|
'codexlens.cleanAllIndexes': 'Clean All Indexes',
|
||||||
'codexlens.installCodexLens': 'Install CodexLens',
|
'codexlens.installCodexLens': 'Install CodexLens',
|
||||||
|
'codexlens.createIndex': 'Create Index',
|
||||||
|
'codexlens.embeddingBackend': 'Embedding Backend',
|
||||||
|
'codexlens.localFastembed': 'Local (FastEmbed)',
|
||||||
|
'codexlens.apiLitellm': 'API (LiteLLM)',
|
||||||
|
'codexlens.backendHint': 'Select local model or remote API endpoint',
|
||||||
|
'codexlens.noApiModels': 'No API embedding models configured',
|
||||||
|
'codexlens.embeddingModel': 'Embedding Model',
|
||||||
|
'codexlens.modelHint': 'Select embedding model for vector search (models with ✓ are installed)',
|
||||||
|
'codexlens.concurrency': 'API Concurrency',
|
||||||
|
'codexlens.concurrencyHint': 'Number of parallel API calls. Higher values speed up indexing but may hit rate limits.',
|
||||||
|
'codexlens.concurrencyCustom': 'Custom',
|
||||||
|
'codexlens.rotation': 'Multi-Provider Rotation',
|
||||||
|
'codexlens.rotationDesc': 'Aggregate multiple API providers and keys for parallel embedding generation',
|
||||||
|
'codexlens.rotationEnabled': 'Enable Rotation',
|
||||||
|
'codexlens.rotationStrategy': 'Rotation Strategy',
|
||||||
|
'codexlens.strategyRoundRobin': 'Round Robin',
|
||||||
|
'codexlens.strategyLatencyAware': 'Latency Aware',
|
||||||
|
'codexlens.strategyWeightedRandom': 'Weighted Random',
|
||||||
|
'codexlens.targetModel': 'Target Model',
|
||||||
|
'codexlens.targetModelHint': 'Model name that all providers should support (e.g., qwen3-embedding)',
|
||||||
|
'codexlens.cooldownSeconds': 'Cooldown (seconds)',
|
||||||
|
'codexlens.cooldownHint': 'Default cooldown after rate limit (60s recommended)',
|
||||||
|
'codexlens.rotationProviders': 'Rotation Providers',
|
||||||
|
'codexlens.addProvider': 'Add Provider',
|
||||||
|
'codexlens.noRotationProviders': 'No providers configured for rotation',
|
||||||
|
'codexlens.providerWeight': 'Weight',
|
||||||
|
'codexlens.maxConcurrentPerKey': 'Max Concurrent/Key',
|
||||||
|
'codexlens.useAllKeys': 'Use All Keys',
|
||||||
|
'codexlens.selectKeys': 'Select Keys',
|
||||||
|
'codexlens.configureRotation': 'Configure Rotation',
|
||||||
|
'codexlens.configureInApiSettings': 'Configure in API Settings',
|
||||||
|
'codexlens.rotationSaved': 'Rotation config saved successfully',
|
||||||
|
'codexlens.endpointsSynced': 'endpoints synced to CodexLens',
|
||||||
|
'codexlens.syncFailed': 'Sync failed',
|
||||||
|
'codexlens.rotationDeleted': 'Rotation config deleted',
|
||||||
|
'codexlens.totalEndpoints': 'Total Endpoints',
|
||||||
|
'codexlens.fullIndex': 'Full',
|
||||||
|
'codexlens.vectorIndex': 'Vector',
|
||||||
|
'codexlens.ftsIndex': 'FTS',
|
||||||
|
'codexlens.fullIndexDesc': 'FTS + Semantic search (recommended)',
|
||||||
|
'codexlens.vectorIndexDesc': 'Semantic search with embeddings only',
|
||||||
|
'codexlens.ftsIndexDesc': 'Fast full-text search only',
|
||||||
|
'codexlens.indexTypeHint': 'Full index includes FTS + semantic search. FTS only is faster but without AI-powered search.',
|
||||||
|
'codexlens.maintenance': 'Maintenance',
|
||||||
'codexlens.testSearch': 'Test Search',
|
'codexlens.testSearch': 'Test Search',
|
||||||
'codexlens.testFunctionality': 'test CodexLens functionality',
|
'codexlens.testFunctionality': 'test CodexLens functionality',
|
||||||
'codexlens.textSearch': 'Text Search',
|
'codexlens.textSearch': 'Text Search',
|
||||||
@@ -254,6 +318,9 @@ const i18n = {
|
|||||||
'codexlens.runSearch': 'Run Search',
|
'codexlens.runSearch': 'Run Search',
|
||||||
'codexlens.results': 'Results',
|
'codexlens.results': 'Results',
|
||||||
'codexlens.resultsCount': 'results',
|
'codexlens.resultsCount': 'results',
|
||||||
|
'codexlens.resultLimit': 'Limit',
|
||||||
|
'codexlens.contentLength': 'Content Length',
|
||||||
|
'codexlens.extraFiles': 'Extra Files',
|
||||||
'codexlens.saveConfig': 'Save Configuration',
|
'codexlens.saveConfig': 'Save Configuration',
|
||||||
'codexlens.searching': 'Searching...',
|
'codexlens.searching': 'Searching...',
|
||||||
'codexlens.searchCompleted': 'Search completed',
|
'codexlens.searchCompleted': 'Search completed',
|
||||||
@@ -277,8 +344,28 @@ const i18n = {
|
|||||||
'codexlens.installDeps': 'Install Dependencies',
|
'codexlens.installDeps': 'Install Dependencies',
|
||||||
'codexlens.installDepsPrompt': 'Would you like to install them now? (This may take a few minutes)\n\nClick "Cancel" to create FTS index only.',
|
'codexlens.installDepsPrompt': 'Would you like to install them now? (This may take a few minutes)\n\nClick "Cancel" to create FTS index only.',
|
||||||
'codexlens.installingDeps': 'Installing dependencies...',
|
'codexlens.installingDeps': 'Installing dependencies...',
|
||||||
|
'codexlens.installingMode': 'Installing with',
|
||||||
'codexlens.depsInstalled': 'Dependencies installed successfully',
|
'codexlens.depsInstalled': 'Dependencies installed successfully',
|
||||||
'codexlens.depsInstallFailed': 'Failed to install dependencies',
|
'codexlens.depsInstallFailed': 'Failed to install dependencies',
|
||||||
|
|
||||||
|
// GPU Mode Selection
|
||||||
|
'codexlens.selectGpuMode': 'Select acceleration mode',
|
||||||
|
'codexlens.cpuModeDesc': 'Standard CPU processing',
|
||||||
|
'codexlens.directmlModeDesc': 'Windows GPU (NVIDIA/AMD/Intel)',
|
||||||
|
'codexlens.cudaModeDesc': 'NVIDIA GPU (requires CUDA Toolkit)',
|
||||||
|
'common.recommended': 'Recommended',
|
||||||
|
'common.unavailable': 'Unavailable',
|
||||||
|
'common.auto': 'Auto',
|
||||||
|
|
||||||
|
// GPU Device Selection
|
||||||
|
'codexlens.selectGpuDevice': 'Select GPU Device',
|
||||||
|
'codexlens.discrete': 'Discrete',
|
||||||
|
'codexlens.integrated': 'Integrated',
|
||||||
|
'codexlens.selectingGpu': 'Selecting GPU...',
|
||||||
|
'codexlens.gpuSelected': 'GPU selected',
|
||||||
|
'codexlens.resettingGpu': 'Resetting GPU selection...',
|
||||||
|
'codexlens.gpuReset': 'GPU selection reset to auto',
|
||||||
|
'codexlens.resetToAuto': 'Reset to Auto',
|
||||||
'codexlens.modelManagement': 'Model Management',
|
'codexlens.modelManagement': 'Model Management',
|
||||||
'codexlens.loadingModels': 'Loading models...',
|
'codexlens.loadingModels': 'Loading models...',
|
||||||
'codexlens.downloadModel': 'Download',
|
'codexlens.downloadModel': 'Download',
|
||||||
@@ -293,6 +380,35 @@ const i18n = {
|
|||||||
'codexlens.modelListError': 'Failed to load models',
|
'codexlens.modelListError': 'Failed to load models',
|
||||||
'codexlens.noModelsAvailable': 'No models available',
|
'codexlens.noModelsAvailable': 'No models available',
|
||||||
|
|
||||||
|
// Model Download Progress
|
||||||
|
'codexlens.downloadingModel': 'Downloading',
|
||||||
|
'codexlens.connectingToHuggingFace': 'Connecting to Hugging Face...',
|
||||||
|
'codexlens.downloadTimeEstimate': 'Estimated time',
|
||||||
|
'codexlens.manualDownloadHint': 'Manual download',
|
||||||
|
'codexlens.downloadingModelFiles': 'Downloading model files...',
|
||||||
|
'codexlens.downloadingWeights': 'Downloading model weights...',
|
||||||
|
'codexlens.downloadingTokenizer': 'Downloading tokenizer...',
|
||||||
|
'codexlens.verifyingModel': 'Verifying model...',
|
||||||
|
'codexlens.finalizingDownload': 'Finalizing...',
|
||||||
|
'codexlens.downloadComplete': 'Download complete!',
|
||||||
|
'codexlens.downloadFailed': 'Download failed',
|
||||||
|
'codexlens.manualDownloadOptions': 'Manual download options',
|
||||||
|
'codexlens.cliDownload': 'CLI',
|
||||||
|
'codexlens.huggingfaceDownload': 'Hugging Face',
|
||||||
|
'codexlens.downloadCanceled': 'Download canceled',
|
||||||
|
|
||||||
|
// Manual Download Guide
|
||||||
|
'codexlens.manualDownloadGuide': 'Manual Download Guide',
|
||||||
|
'codexlens.cliMethod': 'Command Line (Recommended)',
|
||||||
|
'codexlens.cliMethodDesc': 'Run in terminal with progress display:',
|
||||||
|
'codexlens.pythonMethod': 'Python Script',
|
||||||
|
'codexlens.pythonMethodDesc': 'Pre-download model using Python:',
|
||||||
|
'codexlens.hfHubMethod': 'Hugging Face Hub CLI',
|
||||||
|
'codexlens.hfHubMethodDesc': 'Download using huggingface-cli with resume support:',
|
||||||
|
'codexlens.modelLinks': 'Direct Model Links',
|
||||||
|
'codexlens.cacheLocation': 'Model Storage Location',
|
||||||
|
'common.copied': 'Copied to clipboard',
|
||||||
|
|
||||||
// CodexLens Indexing Progress
|
// CodexLens Indexing Progress
|
||||||
'codexlens.indexing': 'Indexing',
|
'codexlens.indexing': 'Indexing',
|
||||||
'codexlens.indexingDesc': 'Building code index for workspace',
|
'codexlens.indexingDesc': 'Building code index for workspace',
|
||||||
@@ -301,6 +417,45 @@ const i18n = {
|
|||||||
'codexlens.indexComplete': 'Index complete',
|
'codexlens.indexComplete': 'Index complete',
|
||||||
'codexlens.indexSuccess': 'Index created successfully',
|
'codexlens.indexSuccess': 'Index created successfully',
|
||||||
'codexlens.indexFailed': 'Indexing failed',
|
'codexlens.indexFailed': 'Indexing failed',
|
||||||
|
'codexlens.embeddingsFailed': 'Embeddings generation failed',
|
||||||
|
'codexlens.ftsSuccessEmbeddingsFailed': 'FTS index created, but embeddings failed',
|
||||||
|
|
||||||
|
// CodexLens Install
|
||||||
|
'codexlens.installDesc': 'Python-based code indexing engine',
|
||||||
|
'codexlens.whatWillBeInstalled': 'What will be installed:',
|
||||||
|
'codexlens.pythonVenv': 'Python virtual environment',
|
||||||
|
'codexlens.pythonVenvDesc': 'Isolated Python environment',
|
||||||
|
'codexlens.codexlensPackage': 'CodexLens package',
|
||||||
|
'codexlens.codexlensPackageDesc': 'Code indexing and search engine',
|
||||||
|
'codexlens.sqliteFtsDesc': 'Full-text search database',
|
||||||
|
'codexlens.installLocation': 'Installation Location',
|
||||||
|
'codexlens.installTime': 'First installation may take 2-3 minutes to download and setup Python packages.',
|
||||||
|
'codexlens.startingInstall': 'Starting installation...',
|
||||||
|
'codexlens.installing': 'Installing...',
|
||||||
|
'codexlens.creatingVenv': 'Creating virtual environment...',
|
||||||
|
'codexlens.installingPip': 'Installing pip packages...',
|
||||||
|
'codexlens.installingPackage': 'Installing CodexLens package...',
|
||||||
|
'codexlens.settingUpDeps': 'Setting up Python dependencies...',
|
||||||
|
'codexlens.installComplete': 'Installation complete!',
|
||||||
|
'codexlens.installSuccess': 'CodexLens installed successfully!',
|
||||||
|
'codexlens.installNow': 'Install Now',
|
||||||
|
'codexlens.accelerator': 'Accelerator',
|
||||||
|
|
||||||
|
// CodexLens Uninstall
|
||||||
|
'codexlens.uninstall': 'Uninstall',
|
||||||
|
'codexlens.uninstallDesc': 'Remove CodexLens and all data',
|
||||||
|
'codexlens.whatWillBeRemoved': 'What will be removed:',
|
||||||
|
'codexlens.removeVenv': 'Virtual environment at ~/.codexlens/venv',
|
||||||
|
'codexlens.removeData': 'All CodexLens indexed data and databases',
|
||||||
|
'codexlens.removeConfig': 'Configuration and semantic search models',
|
||||||
|
'codexlens.removing': 'Removing files...',
|
||||||
|
'codexlens.uninstalling': 'Uninstalling...',
|
||||||
|
'codexlens.removingVenv': 'Removing virtual environment...',
|
||||||
|
'codexlens.removingData': 'Deleting indexed data...',
|
||||||
|
'codexlens.removingConfig': 'Cleaning up configuration...',
|
||||||
|
'codexlens.finalizing': 'Finalizing removal...',
|
||||||
|
'codexlens.uninstallComplete': 'Uninstallation complete!',
|
||||||
|
'codexlens.uninstallSuccess': 'CodexLens uninstalled successfully!',
|
||||||
|
|
||||||
// Index Manager
|
// Index Manager
|
||||||
'index.manager': 'Index Manager',
|
'index.manager': 'Index Manager',
|
||||||
@@ -333,9 +488,12 @@ const i18n = {
|
|||||||
'index.fullDesc': 'FTS + Semantic search (recommended)',
|
'index.fullDesc': 'FTS + Semantic search (recommended)',
|
||||||
'index.selectModel': 'Select embedding model',
|
'index.selectModel': 'Select embedding model',
|
||||||
'index.modelCode': 'Code (768d)',
|
'index.modelCode': 'Code (768d)',
|
||||||
|
'index.modelBase': 'Base (768d)',
|
||||||
'index.modelFast': 'Fast (384d)',
|
'index.modelFast': 'Fast (384d)',
|
||||||
'index.modelMultilingual': 'Multilingual (1024d)',
|
'index.modelMinilm': 'MiniLM (384d)',
|
||||||
'index.modelBalanced': 'Balanced (1024d)',
|
'index.modelMultilingual': 'Multilingual (1024d) ⚠️',
|
||||||
|
'index.modelBalanced': 'Balanced (1024d) ⚠️',
|
||||||
|
'index.dimensionWarning': '1024d models require more resources',
|
||||||
|
|
||||||
// Semantic Search Configuration
|
// Semantic Search Configuration
|
||||||
'semantic.settings': 'Semantic Search Settings',
|
'semantic.settings': 'Semantic Search Settings',
|
||||||
@@ -363,6 +521,19 @@ const i18n = {
|
|||||||
'lang.windowsDisableSuccess': 'Windows platform guidelines disabled',
|
'lang.windowsDisableSuccess': 'Windows platform guidelines disabled',
|
||||||
'lang.windowsEnableFailed': 'Failed to enable Windows platform guidelines',
|
'lang.windowsEnableFailed': 'Failed to enable Windows platform guidelines',
|
||||||
'lang.windowsDisableFailed': 'Failed to disable Windows platform guidelines',
|
'lang.windowsDisableFailed': 'Failed to disable Windows platform guidelines',
|
||||||
|
'lang.installRequired': 'Run "ccw install" to enable this feature',
|
||||||
|
|
||||||
|
// CCW Installation Status
|
||||||
|
'status.installed': 'Installed',
|
||||||
|
'status.incomplete': 'Incomplete',
|
||||||
|
'status.notInstalled': 'Not Installed',
|
||||||
|
'status.ccwInstall': 'CCW Workflows',
|
||||||
|
'status.ccwInstallDesc': 'Required workflow files for full functionality',
|
||||||
|
'status.required': 'Required',
|
||||||
|
'status.filesMissing': 'files missing',
|
||||||
|
'status.missingFiles': 'Missing files',
|
||||||
|
'status.runToFix': 'Run to fix',
|
||||||
|
|
||||||
'cli.promptFormat': 'Prompt Format',
|
'cli.promptFormat': 'Prompt Format',
|
||||||
'cli.promptFormatDesc': 'Format for multi-turn conversation concatenation',
|
'cli.promptFormatDesc': 'Format for multi-turn conversation concatenation',
|
||||||
'cli.storageBackend': 'Storage Backend',
|
'cli.storageBackend': 'Storage Backend',
|
||||||
@@ -375,7 +546,9 @@ const i18n = {
|
|||||||
'cli.recursiveQueryDesc': 'Aggregate CLI history and memory data from parent and child projects',
|
'cli.recursiveQueryDesc': 'Aggregate CLI history and memory data from parent and child projects',
|
||||||
'cli.maxContextFiles': 'Max Context Files',
|
'cli.maxContextFiles': 'Max Context Files',
|
||||||
'cli.maxContextFilesDesc': 'Maximum files to include in smart context',
|
'cli.maxContextFilesDesc': 'Maximum files to include in smart context',
|
||||||
|
'cli.codeIndexMcp': 'Code Index MCP',
|
||||||
|
'cli.codeIndexMcpDesc': 'Code search provider (updates CLAUDE.md context-tools reference)',
|
||||||
|
|
||||||
// CCW Install
|
// CCW Install
|
||||||
'ccw.install': 'CCW Install',
|
'ccw.install': 'CCW Install',
|
||||||
'ccw.installations': 'installation',
|
'ccw.installations': 'installation',
|
||||||
@@ -1208,6 +1381,206 @@ const i18n = {
|
|||||||
'claude.unsupportedFileType': 'Unsupported file type',
|
'claude.unsupportedFileType': 'Unsupported file type',
|
||||||
'claude.loadFileError': 'Failed to load file',
|
'claude.loadFileError': 'Failed to load file',
|
||||||
|
|
||||||
|
|
||||||
|
// API Settings
|
||||||
|
'nav.apiSettings': 'API Settings',
|
||||||
|
'title.apiSettings': 'API Settings',
|
||||||
|
'apiSettings.providers': 'Providers',
|
||||||
|
'apiSettings.customEndpoints': 'Custom Endpoints',
|
||||||
|
'apiSettings.cacheSettings': 'Cache Settings',
|
||||||
|
'apiSettings.addProvider': 'Add Provider',
|
||||||
|
'apiSettings.editProvider': 'Edit Provider',
|
||||||
|
'apiSettings.deleteProvider': 'Delete Provider',
|
||||||
|
'apiSettings.addEndpoint': 'Add Endpoint',
|
||||||
|
'apiSettings.editEndpoint': 'Edit Endpoint',
|
||||||
|
'apiSettings.deleteEndpoint': 'Delete Endpoint',
|
||||||
|
'apiSettings.providerType': 'Provider Type',
|
||||||
|
'apiSettings.apiFormat': 'API Format',
|
||||||
|
'apiSettings.compatible': 'Compatible',
|
||||||
|
'apiSettings.customFormat': 'Custom Format',
|
||||||
|
'apiSettings.apiFormatHint': 'Most providers (DeepSeek, Ollama, etc.) use OpenAI-compatible format',
|
||||||
|
'apiSettings.displayName': 'Display Name',
|
||||||
|
'apiSettings.apiKey': 'API Key',
|
||||||
|
'apiSettings.apiBaseUrl': 'API Base URL',
|
||||||
|
'apiSettings.useEnvVar': 'Use environment variable',
|
||||||
|
'apiSettings.enableProvider': 'Enable provider',
|
||||||
|
'apiSettings.advancedSettings': 'Advanced Settings',
|
||||||
|
'apiSettings.basicInfo': 'Basic Info',
|
||||||
|
'apiSettings.endpointSettings': 'Endpoint Settings',
|
||||||
|
'apiSettings.timeout': 'Timeout (seconds)',
|
||||||
|
'apiSettings.seconds': 'seconds',
|
||||||
|
'apiSettings.timeoutHint': 'Request timeout in seconds (default: 300)',
|
||||||
|
'apiSettings.maxRetries': 'Max Retries',
|
||||||
|
'apiSettings.maxRetriesHint': 'Maximum retry attempts on failure',
|
||||||
|
'apiSettings.organization': 'Organization ID',
|
||||||
|
'apiSettings.organizationHint': 'OpenAI organization ID (org-...)',
|
||||||
|
'apiSettings.apiVersion': 'API Version',
|
||||||
|
'apiSettings.apiVersionHint': 'Azure API version (e.g., 2024-02-01)',
|
||||||
|
'apiSettings.rpm': 'RPM Limit',
|
||||||
|
'apiSettings.tpm': 'TPM Limit',
|
||||||
|
'apiSettings.unlimited': 'Unlimited',
|
||||||
|
'apiSettings.proxy': 'Proxy Server',
|
||||||
|
'apiSettings.proxyHint': 'HTTP proxy server URL',
|
||||||
|
'apiSettings.customHeaders': 'Custom Headers',
|
||||||
|
'apiSettings.customHeadersHint': 'JSON object with custom HTTP headers',
|
||||||
|
'apiSettings.invalidJsonHeaders': 'Invalid JSON in custom headers',
|
||||||
|
'apiSettings.searchProviders': 'Search providers...',
|
||||||
|
'apiSettings.selectProvider': 'Select a Provider',
|
||||||
|
'apiSettings.selectProviderHint': 'Select a provider from the list to view and manage its settings',
|
||||||
|
'apiSettings.noProvidersFound': 'No providers found',
|
||||||
|
'apiSettings.llmModels': 'LLM Models',
|
||||||
|
'apiSettings.embeddingModels': 'Embedding Models',
|
||||||
|
'apiSettings.manageModels': 'Manage',
|
||||||
|
'apiSettings.addModel': 'Add Model',
|
||||||
|
'apiSettings.multiKeySettings': 'Multi-Key Settings',
|
||||||
|
'apiSettings.noModels': 'No models configured',
|
||||||
|
'apiSettings.previewModel': 'Preview',
|
||||||
|
'apiSettings.modelSettings': 'Model Settings',
|
||||||
|
'apiSettings.deleteModel': 'Delete Model',
|
||||||
|
'apiSettings.endpointPreview': 'Endpoint Preview',
|
||||||
|
'apiSettings.modelBaseUrlOverride': 'Base URL Override',
|
||||||
|
'apiSettings.modelBaseUrlHint': 'Override the provider base URL for this specific model (leave empty to use provider default)',
|
||||||
|
'apiSettings.providerUpdated': 'Provider updated',
|
||||||
|
'apiSettings.syncToCodexLens': 'Sync to CodexLens',
|
||||||
|
'apiSettings.configSynced': 'Config synced to CodexLens',
|
||||||
|
'apiSettings.sdkAutoAppends': 'SDK auto-appends',
|
||||||
|
'apiSettings.preview': 'Preview',
|
||||||
|
'apiSettings.used': 'used',
|
||||||
|
'apiSettings.total': 'total',
|
||||||
|
'apiSettings.testConnection': 'Test Connection',
|
||||||
|
'apiSettings.endpointId': 'Endpoint ID',
|
||||||
|
'apiSettings.endpointIdHint': 'Usage: ccw cli -p "..." --model <endpoint-id>',
|
||||||
|
'apiSettings.endpoints': 'Endpoints',
|
||||||
|
'apiSettings.addEndpointHint': 'Create custom endpoint aliases for CLI usage',
|
||||||
|
'apiSettings.endpointModel': 'Model',
|
||||||
|
'apiSettings.selectEndpoint': 'Select an endpoint',
|
||||||
|
'apiSettings.selectEndpointHint': 'Choose an endpoint from the list to view or edit its settings',
|
||||||
|
'apiSettings.provider': 'Provider',
|
||||||
|
'apiSettings.model': 'Model',
|
||||||
|
'apiSettings.selectModel': 'Select model',
|
||||||
|
'apiSettings.noModelsConfigured': 'No models configured for this provider',
|
||||||
|
'apiSettings.cacheStrategy': 'Cache Strategy',
|
||||||
|
'apiSettings.enableContextCaching': 'Enable Context Caching',
|
||||||
|
'apiSettings.cacheTTL': 'TTL (minutes)',
|
||||||
|
'apiSettings.cacheMaxSize': 'Max Size (KB)',
|
||||||
|
'apiSettings.autoCachePatterns': 'Auto-cache file patterns',
|
||||||
|
'apiSettings.enableGlobalCaching': 'Enable Global Caching',
|
||||||
|
'apiSettings.cacheUsed': 'Used',
|
||||||
|
'apiSettings.cacheEntries': 'Entries',
|
||||||
|
'apiSettings.clearCache': 'Clear Cache',
|
||||||
|
'apiSettings.noProviders': 'No providers configured',
|
||||||
|
'apiSettings.noEndpoints': 'No endpoints configured',
|
||||||
|
'apiSettings.enabled': 'Enabled',
|
||||||
|
'apiSettings.disabled': 'Disabled',
|
||||||
|
'apiSettings.cacheEnabled': 'Cache Enabled',
|
||||||
|
'apiSettings.cacheDisabled': 'Cache Disabled',
|
||||||
|
'apiSettings.providerSaved': 'Provider saved successfully',
|
||||||
|
'apiSettings.providerDeleted': 'Provider deleted successfully',
|
||||||
|
'apiSettings.apiBaseUpdated': 'API Base URL updated successfully',
|
||||||
|
'apiSettings.endpointSaved': 'Endpoint saved successfully',
|
||||||
|
'apiSettings.endpointDeleted': 'Endpoint deleted successfully',
|
||||||
|
'apiSettings.cacheCleared': 'Cache cleared successfully',
|
||||||
|
'apiSettings.cacheSettingsUpdated': 'Cache settings updated',
|
||||||
|
'apiSettings.embeddingPool': 'Embedding Pool',
|
||||||
|
'apiSettings.embeddingPoolDesc': 'Auto-rotate between providers with same model',
|
||||||
|
'apiSettings.targetModel': 'Target Model',
|
||||||
|
'apiSettings.discoveredProviders': 'Discovered Providers',
|
||||||
|
'apiSettings.autoDiscover': 'Auto-discover providers',
|
||||||
|
'apiSettings.excludeProvider': 'Exclude',
|
||||||
|
'apiSettings.defaultCooldown': 'Cooldown (seconds)',
|
||||||
|
'apiSettings.defaultConcurrent': 'Concurrent per key',
|
||||||
|
'apiSettings.poolEnabled': 'Enable Embedding Pool',
|
||||||
|
'apiSettings.noProvidersFound': 'No providers found for this model',
|
||||||
|
'apiSettings.poolSaved': 'Embedding pool config saved',
|
||||||
|
'apiSettings.strategy': 'Strategy',
|
||||||
|
'apiSettings.providerKeys': 'keys',
|
||||||
|
'apiSettings.selectTargetModel': 'Select target model',
|
||||||
|
'apiSettings.confirmDeleteProvider': 'Are you sure you want to delete this provider?',
|
||||||
|
'apiSettings.confirmDeleteEndpoint': 'Are you sure you want to delete this endpoint?',
|
||||||
|
'apiSettings.confirmClearCache': 'Are you sure you want to clear the cache?',
|
||||||
|
'apiSettings.connectionSuccess': 'Connection successful',
|
||||||
|
'apiSettings.connectionFailed': 'Connection failed',
|
||||||
|
'apiSettings.saveProviderFirst': 'Please save the provider first',
|
||||||
|
'apiSettings.addProviderFirst': 'Please add a provider first',
|
||||||
|
'apiSettings.failedToLoad': 'Failed to load API settings',
|
||||||
|
'apiSettings.toggleVisibility': 'Toggle visibility',
|
||||||
|
'apiSettings.noProvidersHint': 'Add an API provider to get started',
|
||||||
|
'apiSettings.noEndpointsHint': 'Create custom endpoints for quick access to models',
|
||||||
|
'apiSettings.cache': 'Cache',
|
||||||
|
'apiSettings.off': 'Off',
|
||||||
|
'apiSettings.used': 'used',
|
||||||
|
'apiSettings.total': 'total',
|
||||||
|
'apiSettings.cacheUsage': 'Usage',
|
||||||
|
'apiSettings.cacheSize': 'Size',
|
||||||
|
'apiSettings.endpointsDescription': 'Manage custom API endpoints for quick model access',
|
||||||
|
'apiSettings.totalEndpoints': 'Total Endpoints',
|
||||||
|
'apiSettings.cachedEndpoints': 'Cached Endpoints',
|
||||||
|
'apiSettings.cacheTabHint': 'Configure global cache settings and view statistics in the main panel',
|
||||||
|
'apiSettings.cacheDescription': 'Manage response caching to improve performance and reduce costs',
|
||||||
|
'apiSettings.cachedEntries': 'Cached Entries',
|
||||||
|
'apiSettings.storageUsed': 'Storage Used',
|
||||||
|
'apiSettings.cacheActions': 'Cache Actions',
|
||||||
|
'apiSettings.cacheStatistics': 'Cache Statistics',
|
||||||
|
'apiSettings.globalCache': 'Global Cache',
|
||||||
|
|
||||||
|
// Multi-key management
|
||||||
|
'apiSettings.apiKeys': 'API Keys',
|
||||||
|
'apiSettings.addKey': 'Add Key',
|
||||||
|
'apiSettings.keyLabel': 'Label',
|
||||||
|
'apiSettings.keyValue': 'API Key',
|
||||||
|
'apiSettings.keyWeight': 'Weight',
|
||||||
|
'apiSettings.removeKey': 'Remove',
|
||||||
|
'apiSettings.noKeys': 'No API keys configured',
|
||||||
|
'apiSettings.primaryKey': 'Primary Key',
|
||||||
|
|
||||||
|
// Routing strategy
|
||||||
|
'apiSettings.routingStrategy': 'Routing Strategy',
|
||||||
|
'apiSettings.simpleShuffleRouting': 'Simple Shuffle (Random)',
|
||||||
|
'apiSettings.weightedRouting': 'Weighted Distribution',
|
||||||
|
'apiSettings.latencyRouting': 'Latency-Based',
|
||||||
|
'apiSettings.costRouting': 'Cost-Based',
|
||||||
|
'apiSettings.leastBusyRouting': 'Least Busy',
|
||||||
|
'apiSettings.routingHint': 'How to distribute requests across multiple API keys',
|
||||||
|
|
||||||
|
// Health check
|
||||||
|
'apiSettings.healthCheck': 'Health Check',
|
||||||
|
'apiSettings.enableHealthCheck': 'Enable Health Check',
|
||||||
|
'apiSettings.healthInterval': 'Check Interval (seconds)',
|
||||||
|
'apiSettings.healthCooldown': 'Cooldown (seconds)',
|
||||||
|
'apiSettings.failureThreshold': 'Failure Threshold',
|
||||||
|
'apiSettings.healthStatus': 'Status',
|
||||||
|
'apiSettings.healthy': 'Healthy',
|
||||||
|
'apiSettings.unhealthy': 'Unhealthy',
|
||||||
|
'apiSettings.unknown': 'Unknown',
|
||||||
|
'apiSettings.lastCheck': 'Last Check',
|
||||||
|
'apiSettings.testKey': 'Test Key',
|
||||||
|
'apiSettings.testingKey': 'Testing...',
|
||||||
|
'apiSettings.keyValid': 'Key is valid',
|
||||||
|
'apiSettings.keyInvalid': 'Key is invalid',
|
||||||
|
|
||||||
|
// Embedding models
|
||||||
|
'apiSettings.embeddingDimensions': 'Dimensions',
|
||||||
|
'apiSettings.embeddingMaxTokens': 'Max Tokens',
|
||||||
|
'apiSettings.selectEmbeddingModel': 'Select Embedding Model',
|
||||||
|
|
||||||
|
// Model modal
|
||||||
|
'apiSettings.addLlmModel': 'Add LLM Model',
|
||||||
|
'apiSettings.addEmbeddingModel': 'Add Embedding Model',
|
||||||
|
'apiSettings.modelId': 'Model ID',
|
||||||
|
'apiSettings.modelName': 'Display Name',
|
||||||
|
'apiSettings.modelSeries': 'Series',
|
||||||
|
'apiSettings.selectFromPresets': 'Select from Presets',
|
||||||
|
'apiSettings.customModel': 'Custom Model',
|
||||||
|
'apiSettings.capabilities': 'Capabilities',
|
||||||
|
'apiSettings.streaming': 'Streaming',
|
||||||
|
'apiSettings.functionCalling': 'Function Calling',
|
||||||
|
'apiSettings.vision': 'Vision',
|
||||||
|
'apiSettings.contextWindow': 'Context Window',
|
||||||
|
'apiSettings.description': 'Description',
|
||||||
|
'apiSettings.optional': 'Optional',
|
||||||
|
'apiSettings.modelIdExists': 'Model ID already exists',
|
||||||
|
'apiSettings.useModelTreeToManage': 'Use the model tree to manage individual models',
|
||||||
|
|
||||||
// Common
|
// Common
|
||||||
'common.cancel': 'Cancel',
|
'common.cancel': 'Cancel',
|
||||||
'common.optional': '(Optional)',
|
'common.optional': '(Optional)',
|
||||||
@@ -1231,6 +1604,7 @@ const i18n = {
|
|||||||
'common.saveFailed': 'Failed to save',
|
'common.saveFailed': 'Failed to save',
|
||||||
'common.unknownError': 'Unknown error',
|
'common.unknownError': 'Unknown error',
|
||||||
'common.exception': 'Exception',
|
'common.exception': 'Exception',
|
||||||
|
'common.status': 'Status',
|
||||||
|
|
||||||
// Core Memory
|
// Core Memory
|
||||||
'title.coreMemory': 'Core Memory',
|
'title.coreMemory': 'Core Memory',
|
||||||
@@ -1354,11 +1728,19 @@ const i18n = {
|
|||||||
'common.delete': '删除',
|
'common.delete': '删除',
|
||||||
'common.cancel': '取消',
|
'common.cancel': '取消',
|
||||||
'common.save': '保存',
|
'common.save': '保存',
|
||||||
|
'common.include': '包含',
|
||||||
'common.close': '关闭',
|
'common.close': '关闭',
|
||||||
'common.loading': '加载中...',
|
'common.loading': '加载中...',
|
||||||
'common.error': '错误',
|
'common.error': '错误',
|
||||||
'common.success': '成功',
|
'common.success': '成功',
|
||||||
|
'common.deleteSuccess': '删除成功',
|
||||||
|
'common.deleteFailed': '删除失败',
|
||||||
|
'common.retry': '重试',
|
||||||
|
'common.refresh': '刷新',
|
||||||
|
'common.minutes': '分钟',
|
||||||
|
'common.enabled': '已启用',
|
||||||
|
'common.disabled': '已禁用',
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
'header.project': '项目:',
|
'header.project': '项目:',
|
||||||
'header.recentProjects': '最近项目',
|
'header.recentProjects': '最近项目',
|
||||||
@@ -1373,6 +1755,7 @@ const i18n = {
|
|||||||
'nav.explorer': '文件浏览器',
|
'nav.explorer': '文件浏览器',
|
||||||
'nav.status': '状态',
|
'nav.status': '状态',
|
||||||
'nav.history': '历史',
|
'nav.history': '历史',
|
||||||
|
'nav.codexLensManager': 'CodexLens',
|
||||||
'nav.memory': '记忆',
|
'nav.memory': '记忆',
|
||||||
'nav.contextMemory': '活动',
|
'nav.contextMemory': '活动',
|
||||||
'nav.coreMemory': '核心记忆',
|
'nav.coreMemory': '核心记忆',
|
||||||
@@ -1430,6 +1813,7 @@ const i18n = {
|
|||||||
'title.hookManager': '钩子管理',
|
'title.hookManager': '钩子管理',
|
||||||
'title.memoryModule': '记忆模块',
|
'title.memoryModule': '记忆模块',
|
||||||
'title.promptHistory': '提示历史',
|
'title.promptHistory': '提示历史',
|
||||||
|
'title.codexLensManager': 'CodexLens 管理',
|
||||||
|
|
||||||
// Search
|
// Search
|
||||||
'search.placeholder': '搜索...',
|
'search.placeholder': '搜索...',
|
||||||
@@ -1547,6 +1931,7 @@ const i18n = {
|
|||||||
'cli.default': '默认',
|
'cli.default': '默认',
|
||||||
'cli.install': '安装',
|
'cli.install': '安装',
|
||||||
'cli.uninstall': '卸载',
|
'cli.uninstall': '卸载',
|
||||||
|
'cli.openManager': '管理',
|
||||||
'cli.initIndex': '初始化索引',
|
'cli.initIndex': '初始化索引',
|
||||||
'cli.geminiDesc': 'Google AI 代码分析',
|
'cli.geminiDesc': 'Google AI 代码分析',
|
||||||
'cli.qwenDesc': '阿里通义 AI 助手',
|
'cli.qwenDesc': '阿里通义 AI 助手',
|
||||||
@@ -1555,12 +1940,19 @@ const i18n = {
|
|||||||
'cli.codexLensDescFull': '全文代码搜索引擎',
|
'cli.codexLensDescFull': '全文代码搜索引擎',
|
||||||
'cli.semanticDesc': 'AI 驱动的代码理解',
|
'cli.semanticDesc': 'AI 驱动的代码理解',
|
||||||
'cli.semanticDescFull': '自然语言代码搜索',
|
'cli.semanticDescFull': '自然语言代码搜索',
|
||||||
|
'cli.apiEndpoints': 'API 端点',
|
||||||
|
'cli.configured': '已配置',
|
||||||
|
'cli.addToCli': '添加到 CLI',
|
||||||
|
'cli.enabled': '已启用',
|
||||||
|
'cli.disabled': '已禁用',
|
||||||
|
|
||||||
// CodexLens 配置
|
// CodexLens 配置
|
||||||
'codexlens.config': 'CodexLens 配置',
|
'codexlens.config': 'CodexLens 配置',
|
||||||
|
'codexlens.configDesc': '管理代码索引、语义搜索和嵌入模型',
|
||||||
'codexlens.status': '状态',
|
'codexlens.status': '状态',
|
||||||
'codexlens.installed': '已安装',
|
'codexlens.installed': '已安装',
|
||||||
'codexlens.notInstalled': '未安装',
|
'codexlens.notInstalled': '未安装',
|
||||||
|
'codexlens.installFirst': '安装 CodexLens 以访问语义搜索和模型管理功能',
|
||||||
'codexlens.indexes': '索引',
|
'codexlens.indexes': '索引',
|
||||||
'codexlens.currentWorkspace': '当前工作区',
|
'codexlens.currentWorkspace': '当前工作区',
|
||||||
'codexlens.indexStoragePath': '索引存储路径',
|
'codexlens.indexStoragePath': '索引存储路径',
|
||||||
@@ -1569,6 +1961,8 @@ const i18n = {
|
|||||||
'codexlens.newStoragePath': '新存储路径',
|
'codexlens.newStoragePath': '新存储路径',
|
||||||
'codexlens.pathPlaceholder': '例如:/path/to/indexes 或 ~/.codexlens/indexes',
|
'codexlens.pathPlaceholder': '例如:/path/to/indexes 或 ~/.codexlens/indexes',
|
||||||
'codexlens.pathInfo': '支持 ~ 表示用户目录。更改立即生效。',
|
'codexlens.pathInfo': '支持 ~ 表示用户目录。更改立即生效。',
|
||||||
|
'codexlens.pathUnchanged': '路径未变更',
|
||||||
|
'codexlens.pathEmpty': '路径不能为空',
|
||||||
'codexlens.migrationRequired': '需要迁移',
|
'codexlens.migrationRequired': '需要迁移',
|
||||||
'codexlens.migrationWarning': '更改路径后,需要为每个工作区重新初始化索引。',
|
'codexlens.migrationWarning': '更改路径后,需要为每个工作区重新初始化索引。',
|
||||||
'codexlens.actions': '操作',
|
'codexlens.actions': '操作',
|
||||||
@@ -1576,6 +1970,50 @@ const i18n = {
|
|||||||
'codexlens.cleanCurrentWorkspace': '清理当前工作空间',
|
'codexlens.cleanCurrentWorkspace': '清理当前工作空间',
|
||||||
'codexlens.cleanAllIndexes': '清理所有索引',
|
'codexlens.cleanAllIndexes': '清理所有索引',
|
||||||
'codexlens.installCodexLens': '安装 CodexLens',
|
'codexlens.installCodexLens': '安装 CodexLens',
|
||||||
|
'codexlens.createIndex': '创建索引',
|
||||||
|
'codexlens.embeddingBackend': '嵌入后端',
|
||||||
|
'codexlens.localFastembed': '本地 (FastEmbed)',
|
||||||
|
'codexlens.apiLitellm': 'API (LiteLLM)',
|
||||||
|
'codexlens.backendHint': '选择本地模型或远程 API 端点',
|
||||||
|
'codexlens.noApiModels': '未配置 API 嵌入模型',
|
||||||
|
'codexlens.embeddingModel': '嵌入模型',
|
||||||
|
'codexlens.modelHint': '选择向量搜索的嵌入模型(带 ✓ 的已安装)',
|
||||||
|
'codexlens.concurrency': 'API 并发数',
|
||||||
|
'codexlens.concurrencyHint': '并行 API 调用数量。较高的值可加速索引但可能触发速率限制。',
|
||||||
|
'codexlens.concurrencyCustom': '自定义',
|
||||||
|
'codexlens.rotation': '多供应商轮训',
|
||||||
|
'codexlens.rotationDesc': '聚合多个 API 供应商和密钥进行并行嵌入生成',
|
||||||
|
'codexlens.rotationEnabled': '启用轮训',
|
||||||
|
'codexlens.rotationStrategy': '轮训策略',
|
||||||
|
'codexlens.strategyRoundRobin': '轮询',
|
||||||
|
'codexlens.strategyLatencyAware': '延迟感知',
|
||||||
|
'codexlens.strategyWeightedRandom': '加权随机',
|
||||||
|
'codexlens.targetModel': '目标模型',
|
||||||
|
'codexlens.targetModelHint': '所有供应商应支持的模型名称(例如 qwen3-embedding)',
|
||||||
|
'codexlens.cooldownSeconds': '冷却时间(秒)',
|
||||||
|
'codexlens.cooldownHint': '速率限制后的默认冷却时间(推荐 60 秒)',
|
||||||
|
'codexlens.rotationProviders': '轮训供应商',
|
||||||
|
'codexlens.addProvider': '添加供应商',
|
||||||
|
'codexlens.noRotationProviders': '未配置轮训供应商',
|
||||||
|
'codexlens.providerWeight': '权重',
|
||||||
|
'codexlens.maxConcurrentPerKey': '每密钥最大并发',
|
||||||
|
'codexlens.useAllKeys': '使用所有密钥',
|
||||||
|
'codexlens.selectKeys': '选择密钥',
|
||||||
|
'codexlens.configureRotation': '配置轮训',
|
||||||
|
'codexlens.configureInApiSettings': '在 API 设置中配置',
|
||||||
|
'codexlens.rotationSaved': '轮训配置保存成功',
|
||||||
|
'codexlens.endpointsSynced': '个端点已同步到 CodexLens',
|
||||||
|
'codexlens.syncFailed': '同步失败',
|
||||||
|
'codexlens.rotationDeleted': '轮训配置已删除',
|
||||||
|
'codexlens.totalEndpoints': '总端点数',
|
||||||
|
'codexlens.fullIndex': '全部',
|
||||||
|
'codexlens.vectorIndex': '向量',
|
||||||
|
'codexlens.ftsIndex': 'FTS',
|
||||||
|
'codexlens.fullIndexDesc': 'FTS + 语义搜索(推荐)',
|
||||||
|
'codexlens.vectorIndexDesc': '仅语义嵌入搜索',
|
||||||
|
'codexlens.ftsIndexDesc': '仅快速全文搜索',
|
||||||
|
'codexlens.indexTypeHint': '完整索引包含 FTS + 语义搜索。仅 FTS 更快但无 AI 搜索功能。',
|
||||||
|
'codexlens.maintenance': '维护',
|
||||||
'codexlens.testSearch': '测试搜索',
|
'codexlens.testSearch': '测试搜索',
|
||||||
'codexlens.testFunctionality': '测试 CodexLens 功能',
|
'codexlens.testFunctionality': '测试 CodexLens 功能',
|
||||||
'codexlens.textSearch': '文本搜索',
|
'codexlens.textSearch': '文本搜索',
|
||||||
@@ -1589,6 +2027,9 @@ const i18n = {
|
|||||||
'codexlens.runSearch': '运行搜索',
|
'codexlens.runSearch': '运行搜索',
|
||||||
'codexlens.results': '结果',
|
'codexlens.results': '结果',
|
||||||
'codexlens.resultsCount': '个结果',
|
'codexlens.resultsCount': '个结果',
|
||||||
|
'codexlens.resultLimit': '数量限制',
|
||||||
|
'codexlens.contentLength': '内容长度',
|
||||||
|
'codexlens.extraFiles': '额外文件',
|
||||||
'codexlens.saveConfig': '保存配置',
|
'codexlens.saveConfig': '保存配置',
|
||||||
'codexlens.searching': '搜索中...',
|
'codexlens.searching': '搜索中...',
|
||||||
'codexlens.searchCompleted': '搜索完成',
|
'codexlens.searchCompleted': '搜索完成',
|
||||||
@@ -1612,8 +2053,29 @@ const i18n = {
|
|||||||
'codexlens.installDeps': '安装依赖',
|
'codexlens.installDeps': '安装依赖',
|
||||||
'codexlens.installDepsPrompt': '是否立即安装?(可能需要几分钟)\n\n点击"取消"将只创建 FTS 索引。',
|
'codexlens.installDepsPrompt': '是否立即安装?(可能需要几分钟)\n\n点击"取消"将只创建 FTS 索引。',
|
||||||
'codexlens.installingDeps': '安装依赖中...',
|
'codexlens.installingDeps': '安装依赖中...',
|
||||||
|
'codexlens.installingMode': '正在安装',
|
||||||
'codexlens.depsInstalled': '依赖安装成功',
|
'codexlens.depsInstalled': '依赖安装成功',
|
||||||
'codexlens.depsInstallFailed': '依赖安装失败',
|
'codexlens.depsInstallFailed': '依赖安装失败',
|
||||||
|
|
||||||
|
// GPU 模式选择
|
||||||
|
'codexlens.selectGpuMode': '选择加速模式',
|
||||||
|
'codexlens.cpuModeDesc': '标准 CPU 处理',
|
||||||
|
'codexlens.directmlModeDesc': 'Windows GPU(NVIDIA/AMD/Intel)',
|
||||||
|
'codexlens.cudaModeDesc': 'NVIDIA GPU(需要 CUDA Toolkit)',
|
||||||
|
'common.recommended': '推荐',
|
||||||
|
'common.unavailable': '不可用',
|
||||||
|
'common.auto': '自动',
|
||||||
|
|
||||||
|
// GPU 设备选择
|
||||||
|
'codexlens.selectGpuDevice': '选择 GPU 设备',
|
||||||
|
'codexlens.discrete': '独立显卡',
|
||||||
|
'codexlens.integrated': '集成显卡',
|
||||||
|
'codexlens.selectingGpu': '选择 GPU 中...',
|
||||||
|
'codexlens.gpuSelected': 'GPU 已选择',
|
||||||
|
'codexlens.resettingGpu': '重置 GPU 选择中...',
|
||||||
|
'codexlens.gpuReset': 'GPU 选择已重置为自动',
|
||||||
|
'codexlens.resetToAuto': '重置为自动',
|
||||||
|
|
||||||
'codexlens.modelManagement': '模型管理',
|
'codexlens.modelManagement': '模型管理',
|
||||||
'codexlens.loadingModels': '加载模型中...',
|
'codexlens.loadingModels': '加载模型中...',
|
||||||
'codexlens.downloadModel': '下载',
|
'codexlens.downloadModel': '下载',
|
||||||
@@ -1628,6 +2090,35 @@ const i18n = {
|
|||||||
'codexlens.modelListError': '加载模型列表失败',
|
'codexlens.modelListError': '加载模型列表失败',
|
||||||
'codexlens.noModelsAvailable': '没有可用模型',
|
'codexlens.noModelsAvailable': '没有可用模型',
|
||||||
|
|
||||||
|
// 模型下载进度
|
||||||
|
'codexlens.downloadingModel': '正在下载',
|
||||||
|
'codexlens.connectingToHuggingFace': '正在连接 Hugging Face...',
|
||||||
|
'codexlens.downloadTimeEstimate': '预计时间',
|
||||||
|
'codexlens.manualDownloadHint': '手动下载',
|
||||||
|
'codexlens.downloadingModelFiles': '正在下载模型文件...',
|
||||||
|
'codexlens.downloadingWeights': '正在下载模型权重...',
|
||||||
|
'codexlens.downloadingTokenizer': '正在下载分词器...',
|
||||||
|
'codexlens.verifyingModel': '正在验证模型...',
|
||||||
|
'codexlens.finalizingDownload': '正在完成...',
|
||||||
|
'codexlens.downloadComplete': '下载完成!',
|
||||||
|
'codexlens.downloadFailed': '下载失败',
|
||||||
|
'codexlens.manualDownloadOptions': '手动下载选项',
|
||||||
|
'codexlens.cliDownload': '命令行',
|
||||||
|
'codexlens.huggingfaceDownload': 'Hugging Face',
|
||||||
|
'codexlens.downloadCanceled': '下载已取消',
|
||||||
|
|
||||||
|
// 手动下载指南
|
||||||
|
'codexlens.manualDownloadGuide': '手动下载指南',
|
||||||
|
'codexlens.cliMethod': '命令行(推荐)',
|
||||||
|
'codexlens.cliMethodDesc': '在终端运行,显示下载进度:',
|
||||||
|
'codexlens.pythonMethod': 'Python 脚本',
|
||||||
|
'codexlens.pythonMethodDesc': '使用 Python 预下载模型:',
|
||||||
|
'codexlens.hfHubMethod': 'Hugging Face Hub CLI',
|
||||||
|
'codexlens.hfHubMethodDesc': '使用 huggingface-cli 下载,支持断点续传:',
|
||||||
|
'codexlens.modelLinks': '模型直链',
|
||||||
|
'codexlens.cacheLocation': '模型存储位置',
|
||||||
|
'common.copied': '已复制到剪贴板',
|
||||||
|
|
||||||
// CodexLens 索引进度
|
// CodexLens 索引进度
|
||||||
'codexlens.indexing': '索引中',
|
'codexlens.indexing': '索引中',
|
||||||
'codexlens.indexingDesc': '正在为工作区构建代码索引',
|
'codexlens.indexingDesc': '正在为工作区构建代码索引',
|
||||||
@@ -1636,6 +2127,45 @@ const i18n = {
|
|||||||
'codexlens.indexComplete': '索引完成',
|
'codexlens.indexComplete': '索引完成',
|
||||||
'codexlens.indexSuccess': '索引创建成功',
|
'codexlens.indexSuccess': '索引创建成功',
|
||||||
'codexlens.indexFailed': '索引失败',
|
'codexlens.indexFailed': '索引失败',
|
||||||
|
'codexlens.embeddingsFailed': '嵌入生成失败',
|
||||||
|
'codexlens.ftsSuccessEmbeddingsFailed': 'FTS 索引已创建,但嵌入生成失败',
|
||||||
|
|
||||||
|
// CodexLens 安装
|
||||||
|
'codexlens.installDesc': '基于 Python 的代码索引引擎',
|
||||||
|
'codexlens.whatWillBeInstalled': '将安装的内容:',
|
||||||
|
'codexlens.pythonVenv': 'Python 虚拟环境',
|
||||||
|
'codexlens.pythonVenvDesc': '隔离的 Python 环境',
|
||||||
|
'codexlens.codexlensPackage': 'CodexLens 包',
|
||||||
|
'codexlens.codexlensPackageDesc': '代码索引和搜索引擎',
|
||||||
|
'codexlens.sqliteFtsDesc': '全文搜索数据库',
|
||||||
|
'codexlens.installLocation': '安装位置',
|
||||||
|
'codexlens.installTime': '首次安装可能需要 2-3 分钟下载和配置 Python 包。',
|
||||||
|
'codexlens.startingInstall': '正在启动安装...',
|
||||||
|
'codexlens.installing': '安装中...',
|
||||||
|
'codexlens.creatingVenv': '正在创建虚拟环境...',
|
||||||
|
'codexlens.installingPip': '正在安装 pip 包...',
|
||||||
|
'codexlens.installingPackage': '正在安装 CodexLens 包...',
|
||||||
|
'codexlens.settingUpDeps': '正在配置 Python 依赖...',
|
||||||
|
'codexlens.installComplete': '安装完成!',
|
||||||
|
'codexlens.installSuccess': 'CodexLens 安装成功!',
|
||||||
|
'codexlens.installNow': '立即安装',
|
||||||
|
'codexlens.accelerator': '加速器',
|
||||||
|
|
||||||
|
// CodexLens 卸载
|
||||||
|
'codexlens.uninstall': '卸载',
|
||||||
|
'codexlens.uninstallDesc': '移除 CodexLens 及所有数据',
|
||||||
|
'codexlens.whatWillBeRemoved': '将被移除的内容:',
|
||||||
|
'codexlens.removeVenv': '虚拟环境 ~/.codexlens/venv',
|
||||||
|
'codexlens.removeData': '所有 CodexLens 索引数据和数据库',
|
||||||
|
'codexlens.removeConfig': '配置文件和语义搜索模型',
|
||||||
|
'codexlens.removing': '正在删除文件...',
|
||||||
|
'codexlens.uninstalling': '正在卸载...',
|
||||||
|
'codexlens.removingVenv': '正在删除虚拟环境...',
|
||||||
|
'codexlens.removingData': '正在删除索引数据...',
|
||||||
|
'codexlens.removingConfig': '正在清理配置文件...',
|
||||||
|
'codexlens.finalizing': '正在完成卸载...',
|
||||||
|
'codexlens.uninstallComplete': '卸载完成!',
|
||||||
|
'codexlens.uninstallSuccess': 'CodexLens 卸载成功!',
|
||||||
|
|
||||||
// 索引管理器
|
// 索引管理器
|
||||||
'index.manager': '索引管理器',
|
'index.manager': '索引管理器',
|
||||||
@@ -1668,9 +2198,12 @@ const i18n = {
|
|||||||
'index.fullDesc': 'FTS + 语义搜索(推荐)',
|
'index.fullDesc': 'FTS + 语义搜索(推荐)',
|
||||||
'index.selectModel': '选择嵌入模型',
|
'index.selectModel': '选择嵌入模型',
|
||||||
'index.modelCode': '代码优化 (768维)',
|
'index.modelCode': '代码优化 (768维)',
|
||||||
|
'index.modelBase': '通用基础 (768维)',
|
||||||
'index.modelFast': '快速轻量 (384维)',
|
'index.modelFast': '快速轻量 (384维)',
|
||||||
'index.modelMultilingual': '多语言 (1024维)',
|
'index.modelMinilm': 'MiniLM (384维)',
|
||||||
'index.modelBalanced': '高精度 (1024维)',
|
'index.modelMultilingual': '多语言 (1024维) ⚠️',
|
||||||
|
'index.modelBalanced': '高精度 (1024维) ⚠️',
|
||||||
|
'index.dimensionWarning': '1024维模型需要更多资源',
|
||||||
|
|
||||||
// Semantic Search 配置
|
// Semantic Search 配置
|
||||||
'semantic.settings': '语义搜索设置',
|
'semantic.settings': '语义搜索设置',
|
||||||
@@ -1698,6 +2231,19 @@ const i18n = {
|
|||||||
'lang.windowsDisableSuccess': 'Windows 平台规范已禁用',
|
'lang.windowsDisableSuccess': 'Windows 平台规范已禁用',
|
||||||
'lang.windowsEnableFailed': '启用 Windows 平台规范失败',
|
'lang.windowsEnableFailed': '启用 Windows 平台规范失败',
|
||||||
'lang.windowsDisableFailed': '禁用 Windows 平台规范失败',
|
'lang.windowsDisableFailed': '禁用 Windows 平台规范失败',
|
||||||
|
'lang.installRequired': '请运行 "ccw install" 以启用此功能',
|
||||||
|
|
||||||
|
// CCW 安装状态
|
||||||
|
'status.installed': '已安装',
|
||||||
|
'status.incomplete': '不完整',
|
||||||
|
'status.notInstalled': '未安装',
|
||||||
|
'status.ccwInstall': 'CCW 工作流',
|
||||||
|
'status.ccwInstallDesc': '完整功能所需的工作流文件',
|
||||||
|
'status.required': '必需',
|
||||||
|
'status.filesMissing': '个文件缺失',
|
||||||
|
'status.missingFiles': '缺失文件',
|
||||||
|
'status.runToFix': '修复命令',
|
||||||
|
|
||||||
'cli.promptFormat': '提示词格式',
|
'cli.promptFormat': '提示词格式',
|
||||||
'cli.promptFormatDesc': '多轮对话拼接格式',
|
'cli.promptFormatDesc': '多轮对话拼接格式',
|
||||||
'cli.storageBackend': '存储后端',
|
'cli.storageBackend': '存储后端',
|
||||||
@@ -1710,7 +2256,9 @@ const i18n = {
|
|||||||
'cli.recursiveQueryDesc': '聚合显示父项目和子项目的 CLI 历史与内存数据',
|
'cli.recursiveQueryDesc': '聚合显示父项目和子项目的 CLI 历史与内存数据',
|
||||||
'cli.maxContextFiles': '最大上下文文件数',
|
'cli.maxContextFiles': '最大上下文文件数',
|
||||||
'cli.maxContextFilesDesc': '智能上下文包含的最大文件数',
|
'cli.maxContextFilesDesc': '智能上下文包含的最大文件数',
|
||||||
|
'cli.codeIndexMcp': '代码索引 MCP',
|
||||||
|
'cli.codeIndexMcpDesc': '代码搜索提供者 (更新 CLAUDE.md 的 context-tools 引用)',
|
||||||
|
|
||||||
// CCW Install
|
// CCW Install
|
||||||
'ccw.install': 'CCW 安装',
|
'ccw.install': 'CCW 安装',
|
||||||
'ccw.installations': '个安装',
|
'ccw.installations': '个安装',
|
||||||
@@ -2552,6 +3100,205 @@ const i18n = {
|
|||||||
'claudeManager.saved': 'File saved successfully',
|
'claudeManager.saved': 'File saved successfully',
|
||||||
'claudeManager.saveError': 'Failed to save file',
|
'claudeManager.saveError': 'Failed to save file',
|
||||||
|
|
||||||
|
|
||||||
|
// API Settings
|
||||||
|
'nav.apiSettings': 'API 设置',
|
||||||
|
'title.apiSettings': 'API 设置',
|
||||||
|
'apiSettings.providers': '提供商',
|
||||||
|
'apiSettings.customEndpoints': '自定义端点',
|
||||||
|
'apiSettings.cacheSettings': '缓存设置',
|
||||||
|
'apiSettings.addProvider': '添加提供商',
|
||||||
|
'apiSettings.editProvider': '编辑提供商',
|
||||||
|
'apiSettings.deleteProvider': '删除提供商',
|
||||||
|
'apiSettings.addEndpoint': '添加端点',
|
||||||
|
'apiSettings.editEndpoint': '编辑端点',
|
||||||
|
'apiSettings.deleteEndpoint': '删除端点',
|
||||||
|
'apiSettings.providerType': '提供商类型',
|
||||||
|
'apiSettings.apiFormat': 'API 格式',
|
||||||
|
'apiSettings.compatible': '兼容',
|
||||||
|
'apiSettings.customFormat': '自定义格式',
|
||||||
|
'apiSettings.apiFormatHint': '大多数供应商(DeepSeek、Ollama 等)使用 OpenAI 兼容格式',
|
||||||
|
'apiSettings.displayName': '显示名称',
|
||||||
|
'apiSettings.apiKey': 'API 密钥',
|
||||||
|
'apiSettings.apiBaseUrl': 'API 基础 URL',
|
||||||
|
'apiSettings.useEnvVar': '使用环境变量',
|
||||||
|
'apiSettings.enableProvider': '启用提供商',
|
||||||
|
'apiSettings.advancedSettings': '高级设置',
|
||||||
|
'apiSettings.basicInfo': '基本信息',
|
||||||
|
'apiSettings.endpointSettings': '端点设置',
|
||||||
|
'apiSettings.timeout': '超时时间(秒)',
|
||||||
|
'apiSettings.seconds': '秒',
|
||||||
|
'apiSettings.timeoutHint': '请求超时时间,单位秒(默认:300)',
|
||||||
|
'apiSettings.maxRetries': '最大重试次数',
|
||||||
|
'apiSettings.maxRetriesHint': '失败后最大重试次数',
|
||||||
|
'apiSettings.organization': '组织 ID',
|
||||||
|
'apiSettings.organizationHint': 'OpenAI 组织 ID(org-...)',
|
||||||
|
'apiSettings.apiVersion': 'API 版本',
|
||||||
|
'apiSettings.apiVersionHint': 'Azure API 版本(如 2024-02-01)',
|
||||||
|
'apiSettings.rpm': 'RPM 限制',
|
||||||
|
'apiSettings.tpm': 'TPM 限制',
|
||||||
|
'apiSettings.unlimited': '无限制',
|
||||||
|
'apiSettings.proxy': '代理服务器',
|
||||||
|
'apiSettings.proxyHint': 'HTTP 代理服务器 URL',
|
||||||
|
'apiSettings.customHeaders': '自定义请求头',
|
||||||
|
'apiSettings.customHeadersHint': '自定义 HTTP 请求头的 JSON 对象',
|
||||||
|
'apiSettings.invalidJsonHeaders': '自定义请求头 JSON 格式无效',
|
||||||
|
'apiSettings.searchProviders': '搜索供应商...',
|
||||||
|
'apiSettings.selectProvider': '选择供应商',
|
||||||
|
'apiSettings.selectProviderHint': '从列表中选择一个供应商来查看和管理其设置',
|
||||||
|
'apiSettings.noProvidersFound': '未找到供应商',
|
||||||
|
'apiSettings.llmModels': '大语言模型',
|
||||||
|
'apiSettings.embeddingModels': '向量模型',
|
||||||
|
'apiSettings.manageModels': '管理',
|
||||||
|
'apiSettings.addModel': '添加模型',
|
||||||
|
'apiSettings.multiKeySettings': '多密钥设置',
|
||||||
|
'apiSettings.noModels': '暂无模型配置',
|
||||||
|
'apiSettings.previewModel': '预览',
|
||||||
|
'apiSettings.modelSettings': '模型设置',
|
||||||
|
'apiSettings.deleteModel': '删除模型',
|
||||||
|
'apiSettings.endpointPreview': '端点预览',
|
||||||
|
'apiSettings.modelBaseUrlOverride': '基础 URL 覆盖',
|
||||||
|
'apiSettings.modelBaseUrlHint': '为此模型覆盖供应商的基础 URL(留空则使用供应商默认值)',
|
||||||
|
'apiSettings.providerUpdated': '供应商已更新',
|
||||||
|
'apiSettings.syncToCodexLens': '同步到 CodexLens',
|
||||||
|
'apiSettings.configSynced': '配置已同步到 CodexLens',
|
||||||
|
'apiSettings.preview': '预览',
|
||||||
|
'apiSettings.used': '已使用',
|
||||||
|
'apiSettings.total': '总计',
|
||||||
|
'apiSettings.testConnection': '测试连接',
|
||||||
|
'apiSettings.endpointId': '端点 ID',
|
||||||
|
'apiSettings.endpointIdHint': '用法: ccw cli -p "..." --model <端点ID>',
|
||||||
|
'apiSettings.endpoints': '端点',
|
||||||
|
'apiSettings.addEndpointHint': '创建用于 CLI 的自定义端点别名',
|
||||||
|
'apiSettings.endpointModel': '模型',
|
||||||
|
'apiSettings.selectEndpoint': '选择端点',
|
||||||
|
'apiSettings.selectEndpointHint': '从列表中选择一个端点以查看或编辑其设置',
|
||||||
|
'apiSettings.provider': '提供商',
|
||||||
|
'apiSettings.model': '模型',
|
||||||
|
'apiSettings.selectModel': '选择模型',
|
||||||
|
'apiSettings.noModelsConfigured': '该供应商未配置模型',
|
||||||
|
'apiSettings.cacheStrategy': '缓存策略',
|
||||||
|
'apiSettings.enableContextCaching': '启用上下文缓存',
|
||||||
|
'apiSettings.cacheTTL': 'TTL (分钟)',
|
||||||
|
'apiSettings.cacheMaxSize': '最大大小 (KB)',
|
||||||
|
'apiSettings.autoCachePatterns': '自动缓存文件模式',
|
||||||
|
'apiSettings.enableGlobalCaching': '启用全局缓存',
|
||||||
|
'apiSettings.cacheUsed': '已使用',
|
||||||
|
'apiSettings.cacheEntries': '条目数',
|
||||||
|
'apiSettings.clearCache': '清除缓存',
|
||||||
|
'apiSettings.noProviders': '未配置提供商',
|
||||||
|
'apiSettings.noEndpoints': '未配置端点',
|
||||||
|
'apiSettings.enabled': '已启用',
|
||||||
|
'apiSettings.disabled': '已禁用',
|
||||||
|
'apiSettings.cacheEnabled': '缓存已启用',
|
||||||
|
'apiSettings.cacheDisabled': '缓存已禁用',
|
||||||
|
'apiSettings.providerSaved': '提供商保存成功',
|
||||||
|
'apiSettings.providerDeleted': '提供商删除成功',
|
||||||
|
'apiSettings.apiBaseUpdated': 'API 基础 URL 更新成功',
|
||||||
|
'apiSettings.endpointSaved': '端点保存成功',
|
||||||
|
'apiSettings.endpointDeleted': '端点删除成功',
|
||||||
|
'apiSettings.cacheCleared': '缓存清除成功',
|
||||||
|
'apiSettings.cacheSettingsUpdated': '缓存设置已更新',
|
||||||
|
'apiSettings.embeddingPool': '高可用嵌入',
|
||||||
|
'apiSettings.embeddingPoolDesc': '自动轮训相同模型的供应商',
|
||||||
|
'apiSettings.targetModel': '目标模型',
|
||||||
|
'apiSettings.discoveredProviders': '发现的供应商',
|
||||||
|
'apiSettings.autoDiscover': '自动发现供应商',
|
||||||
|
'apiSettings.excludeProvider': '排除',
|
||||||
|
'apiSettings.defaultCooldown': '冷却时间(秒)',
|
||||||
|
'apiSettings.defaultConcurrent': '每密钥并发数',
|
||||||
|
'apiSettings.poolEnabled': '启用嵌入池',
|
||||||
|
'apiSettings.noProvidersFound': '未找到提供此模型的供应商',
|
||||||
|
'apiSettings.poolSaved': '嵌入池配置已保存',
|
||||||
|
'apiSettings.strategy': '策略',
|
||||||
|
'apiSettings.providerKeys': '密钥',
|
||||||
|
'apiSettings.selectTargetModel': '选择目标模型',
|
||||||
|
'apiSettings.confirmDeleteProvider': '确定要删除此提供商吗?',
|
||||||
|
'apiSettings.confirmDeleteEndpoint': '确定要删除此端点吗?',
|
||||||
|
'apiSettings.confirmClearCache': '确定要清除缓存吗?',
|
||||||
|
'apiSettings.connectionSuccess': '连接成功',
|
||||||
|
'apiSettings.connectionFailed': '连接失败',
|
||||||
|
'apiSettings.saveProviderFirst': '请先保存提供商',
|
||||||
|
'apiSettings.addProviderFirst': '请先添加提供商',
|
||||||
|
'apiSettings.failedToLoad': '加载 API 设置失败',
|
||||||
|
'apiSettings.toggleVisibility': '切换可见性',
|
||||||
|
'apiSettings.noProvidersHint': '添加 API 提供商以开始使用',
|
||||||
|
'apiSettings.noEndpointsHint': '创建自定义端点以快速访问模型',
|
||||||
|
'apiSettings.cache': '缓存',
|
||||||
|
'apiSettings.off': '关闭',
|
||||||
|
'apiSettings.used': '已用',
|
||||||
|
'apiSettings.total': '总计',
|
||||||
|
'apiSettings.cacheUsage': '使用率',
|
||||||
|
'apiSettings.cacheSize': '大小',
|
||||||
|
'apiSettings.endpointsDescription': '管理自定义 API 端点以快速访问模型',
|
||||||
|
'apiSettings.totalEndpoints': '总端点数',
|
||||||
|
'apiSettings.cachedEndpoints': '缓存端点数',
|
||||||
|
'apiSettings.cacheTabHint': '在主面板中配置全局缓存设置并查看统计信息',
|
||||||
|
'apiSettings.cacheDescription': '管理响应缓存以提高性能并降低成本',
|
||||||
|
'apiSettings.cachedEntries': '缓存条目',
|
||||||
|
'apiSettings.storageUsed': '已用存储',
|
||||||
|
'apiSettings.cacheActions': '缓存操作',
|
||||||
|
'apiSettings.cacheStatistics': '缓存统计',
|
||||||
|
'apiSettings.globalCache': '全局缓存',
|
||||||
|
|
||||||
|
// Multi-key management
|
||||||
|
'apiSettings.apiKeys': 'API 密钥',
|
||||||
|
'apiSettings.addKey': '添加密钥',
|
||||||
|
'apiSettings.keyLabel': '标签',
|
||||||
|
'apiSettings.keyValue': 'API 密钥',
|
||||||
|
'apiSettings.keyWeight': '权重',
|
||||||
|
'apiSettings.removeKey': '移除',
|
||||||
|
'apiSettings.noKeys': '未配置 API 密钥',
|
||||||
|
'apiSettings.primaryKey': '主密钥',
|
||||||
|
|
||||||
|
// Routing strategy
|
||||||
|
'apiSettings.routingStrategy': '路由策略',
|
||||||
|
'apiSettings.simpleShuffleRouting': '简单随机',
|
||||||
|
'apiSettings.weightedRouting': '权重分配',
|
||||||
|
'apiSettings.latencyRouting': '延迟优先',
|
||||||
|
'apiSettings.costRouting': '成本优先',
|
||||||
|
'apiSettings.leastBusyRouting': '最少并发',
|
||||||
|
'apiSettings.routingHint': '如何在多个 API 密钥间分配请求',
|
||||||
|
|
||||||
|
// Health check
|
||||||
|
'apiSettings.healthCheck': '健康检查',
|
||||||
|
'apiSettings.enableHealthCheck': '启用健康检查',
|
||||||
|
'apiSettings.healthInterval': '检查间隔(秒)',
|
||||||
|
'apiSettings.healthCooldown': '冷却时间(秒)',
|
||||||
|
'apiSettings.failureThreshold': '失败阈值',
|
||||||
|
'apiSettings.healthStatus': '状态',
|
||||||
|
'apiSettings.healthy': '健康',
|
||||||
|
'apiSettings.unhealthy': '异常',
|
||||||
|
'apiSettings.unknown': '未知',
|
||||||
|
'apiSettings.lastCheck': '最后检查',
|
||||||
|
'apiSettings.testKey': '测试密钥',
|
||||||
|
'apiSettings.testingKey': '测试中...',
|
||||||
|
'apiSettings.keyValid': '密钥有效',
|
||||||
|
'apiSettings.keyInvalid': '密钥无效',
|
||||||
|
|
||||||
|
// Embedding models
|
||||||
|
'apiSettings.embeddingDimensions': '向量维度',
|
||||||
|
'apiSettings.embeddingMaxTokens': '最大 Token',
|
||||||
|
'apiSettings.selectEmbeddingModel': '选择嵌入模型',
|
||||||
|
|
||||||
|
// Model modal
|
||||||
|
'apiSettings.addLlmModel': '添加 LLM 模型',
|
||||||
|
'apiSettings.addEmbeddingModel': '添加嵌入模型',
|
||||||
|
'apiSettings.modelId': '模型 ID',
|
||||||
|
'apiSettings.modelName': '显示名称',
|
||||||
|
'apiSettings.modelSeries': '模型系列',
|
||||||
|
'apiSettings.selectFromPresets': '从预设选择',
|
||||||
|
'apiSettings.customModel': '自定义模型',
|
||||||
|
'apiSettings.capabilities': '能力',
|
||||||
|
'apiSettings.streaming': '流式输出',
|
||||||
|
'apiSettings.functionCalling': '函数调用',
|
||||||
|
'apiSettings.vision': '视觉能力',
|
||||||
|
'apiSettings.contextWindow': '上下文窗口',
|
||||||
|
'apiSettings.description': '描述',
|
||||||
|
'apiSettings.optional': '可选',
|
||||||
|
'apiSettings.modelIdExists': '模型 ID 已存在',
|
||||||
|
'apiSettings.useModelTreeToManage': '使用模型树管理各个模型',
|
||||||
|
|
||||||
// Common
|
// Common
|
||||||
'common.cancel': '取消',
|
'common.cancel': '取消',
|
||||||
'common.optional': '(可选)',
|
'common.optional': '(可选)',
|
||||||
@@ -2575,6 +3322,7 @@ const i18n = {
|
|||||||
'common.saveFailed': '保存失败',
|
'common.saveFailed': '保存失败',
|
||||||
'common.unknownError': '未知错误',
|
'common.unknownError': '未知错误',
|
||||||
'common.exception': '异常',
|
'common.exception': '异常',
|
||||||
|
'common.status': '状态',
|
||||||
|
|
||||||
// Core Memory
|
// Core Memory
|
||||||
'title.coreMemory': '核心记忆',
|
'title.coreMemory': '核心记忆',
|
||||||
|
|||||||
3362
ccw/src/templates/dashboard-js/views/api-settings.js
Normal file
3362
ccw/src/templates/dashboard-js/views/api-settings.js
Normal file
File diff suppressed because it is too large
Load Diff
@@ -102,6 +102,7 @@ async function loadClaudeFiles() {
|
|||||||
updateClaudeBadge(); // Update navigation badge
|
updateClaudeBadge(); // Update navigation badge
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error loading CLAUDE.md files:', error);
|
console.error('Error loading CLAUDE.md files:', error);
|
||||||
|
showRefreshToast(t('claudeManager.loadError') || 'Failed to load files', 'error');
|
||||||
addGlobalNotification('error', t('claudeManager.loadError'), null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claudeManager.loadError'), null, 'CLAUDE.md');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -113,6 +114,7 @@ async function refreshClaudeFiles() {
|
|||||||
renderFileViewer();
|
renderFileViewer();
|
||||||
renderFileMetadata();
|
renderFileMetadata();
|
||||||
if (window.lucide) lucide.createIcons();
|
if (window.lucide) lucide.createIcons();
|
||||||
|
showRefreshToast(t('claudeManager.refreshed') || 'Files refreshed', 'success');
|
||||||
addGlobalNotification('success', t('claudeManager.refreshed'), null, 'CLAUDE.md');
|
addGlobalNotification('success', t('claudeManager.refreshed'), null, 'CLAUDE.md');
|
||||||
// Load freshness data in background
|
// Load freshness data in background
|
||||||
loadFreshnessDataAsync();
|
loadFreshnessDataAsync();
|
||||||
@@ -155,6 +157,7 @@ async function markFileAsUpdated() {
|
|||||||
|
|
||||||
if (!res.ok) throw new Error('Failed to mark file as updated');
|
if (!res.ok) throw new Error('Failed to mark file as updated');
|
||||||
|
|
||||||
|
showRefreshToast(t('claudeManager.markedAsUpdated') || 'Marked as updated', 'success');
|
||||||
addGlobalNotification('success', t('claudeManager.markedAsUpdated') || 'Marked as updated', null, 'CLAUDE.md');
|
addGlobalNotification('success', t('claudeManager.markedAsUpdated') || 'Marked as updated', null, 'CLAUDE.md');
|
||||||
|
|
||||||
// Reload freshness data
|
// Reload freshness data
|
||||||
@@ -163,6 +166,7 @@ async function markFileAsUpdated() {
|
|||||||
renderFileMetadata();
|
renderFileMetadata();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error marking file as updated:', error);
|
console.error('Error marking file as updated:', error);
|
||||||
|
showRefreshToast(t('claudeManager.markUpdateError') || 'Failed to mark as updated', 'error');
|
||||||
addGlobalNotification('error', t('claudeManager.markUpdateError') || 'Failed to mark as updated', null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claudeManager.markUpdateError') || 'Failed to mark as updated', null, 'CLAUDE.md');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -481,10 +485,12 @@ async function saveClaudeFile() {
|
|||||||
selectedFile.stats = calculateFileStats(newContent);
|
selectedFile.stats = calculateFileStats(newContent);
|
||||||
isDirty = false;
|
isDirty = false;
|
||||||
|
|
||||||
|
showRefreshToast(t('claudeManager.saved') || 'File saved', 'success');
|
||||||
addGlobalNotification('success', t('claudeManager.saved'), null, 'CLAUDE.md');
|
addGlobalNotification('success', t('claudeManager.saved'), null, 'CLAUDE.md');
|
||||||
renderFileMetadata();
|
renderFileMetadata();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error saving file:', error);
|
console.error('Error saving file:', error);
|
||||||
|
showRefreshToast(t('claudeManager.saveError') || 'Save failed', 'error');
|
||||||
addGlobalNotification('error', t('claudeManager.saveError'), null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claudeManager.saveError'), null, 'CLAUDE.md');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -733,12 +739,13 @@ async function loadFileContent(filePath) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function showClaudeNotification(type, message) {
|
function showClaudeNotification(type, message) {
|
||||||
// Use global notification system if available
|
// Show toast for immediate feedback
|
||||||
|
if (typeof showRefreshToast === 'function') {
|
||||||
|
showRefreshToast(message, type);
|
||||||
|
}
|
||||||
|
// Also add to global notification system if available
|
||||||
if (typeof addGlobalNotification === 'function') {
|
if (typeof addGlobalNotification === 'function') {
|
||||||
addGlobalNotification(type, message, null, 'CLAUDE.md');
|
addGlobalNotification(type, message, null, 'CLAUDE.md');
|
||||||
} else {
|
|
||||||
// Fallback to simple alert
|
|
||||||
alert(message);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -822,6 +829,7 @@ async function createNewFile() {
|
|||||||
var modulePath = document.getElementById('modulePath').value;
|
var modulePath = document.getElementById('modulePath').value;
|
||||||
|
|
||||||
if (level === 'module' && !modulePath) {
|
if (level === 'module' && !modulePath) {
|
||||||
|
showRefreshToast(t('claude.modulePathRequired') || 'Module path is required', 'error');
|
||||||
addGlobalNotification('error', t('claude.modulePathRequired') || 'Module path is required', null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claude.modulePathRequired') || 'Module path is required', null, 'CLAUDE.md');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -841,12 +849,14 @@ async function createNewFile() {
|
|||||||
|
|
||||||
var result = await res.json();
|
var result = await res.json();
|
||||||
closeCreateDialog();
|
closeCreateDialog();
|
||||||
|
showRefreshToast(t('claude.fileCreated') || 'File created successfully', 'success');
|
||||||
addGlobalNotification('success', t('claude.fileCreated') || 'File created successfully', null, 'CLAUDE.md');
|
addGlobalNotification('success', t('claude.fileCreated') || 'File created successfully', null, 'CLAUDE.md');
|
||||||
|
|
||||||
// Refresh file tree
|
// Refresh file tree
|
||||||
await refreshClaudeFiles();
|
await refreshClaudeFiles();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error creating file:', error);
|
console.error('Error creating file:', error);
|
||||||
|
showRefreshToast(t('claude.createFileError') || 'Failed to create file', 'error');
|
||||||
addGlobalNotification('error', t('claude.createFileError') || 'Failed to create file', null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claude.createFileError') || 'Failed to create file', null, 'CLAUDE.md');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -870,6 +880,7 @@ async function confirmDeleteFile() {
|
|||||||
|
|
||||||
if (!res.ok) throw new Error('Failed to delete file');
|
if (!res.ok) throw new Error('Failed to delete file');
|
||||||
|
|
||||||
|
showRefreshToast(t('claude.fileDeleted') || 'File deleted successfully', 'success');
|
||||||
addGlobalNotification('success', t('claude.fileDeleted') || 'File deleted successfully', null, 'CLAUDE.md');
|
addGlobalNotification('success', t('claude.fileDeleted') || 'File deleted successfully', null, 'CLAUDE.md');
|
||||||
selectedFile = null;
|
selectedFile = null;
|
||||||
|
|
||||||
@@ -877,6 +888,7 @@ async function confirmDeleteFile() {
|
|||||||
await refreshClaudeFiles();
|
await refreshClaudeFiles();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error deleting file:', error);
|
console.error('Error deleting file:', error);
|
||||||
|
showRefreshToast(t('claude.deleteFileError') || 'Failed to delete file', 'error');
|
||||||
addGlobalNotification('error', t('claude.deleteFileError') || 'Failed to delete file', null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claude.deleteFileError') || 'Failed to delete file', null, 'CLAUDE.md');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -886,9 +898,11 @@ function copyFileContent() {
|
|||||||
if (!selectedFile || !selectedFile.content) return;
|
if (!selectedFile || !selectedFile.content) return;
|
||||||
|
|
||||||
navigator.clipboard.writeText(selectedFile.content).then(function() {
|
navigator.clipboard.writeText(selectedFile.content).then(function() {
|
||||||
|
showRefreshToast(t('claude.contentCopied') || 'Content copied to clipboard', 'success');
|
||||||
addGlobalNotification('success', t('claude.contentCopied') || 'Content copied to clipboard', null, 'CLAUDE.md');
|
addGlobalNotification('success', t('claude.contentCopied') || 'Content copied to clipboard', null, 'CLAUDE.md');
|
||||||
}).catch(function(error) {
|
}).catch(function(error) {
|
||||||
console.error('Error copying content:', error);
|
console.error('Error copying content:', error);
|
||||||
|
showRefreshToast(t('claude.copyError') || 'Failed to copy content', 'error');
|
||||||
addGlobalNotification('error', t('claude.copyError') || 'Failed to copy content', null, 'CLAUDE.md');
|
addGlobalNotification('error', t('claude.copyError') || 'Failed to copy content', null, 'CLAUDE.md');
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,26 @@ var ccwEndpointTools = [];
|
|||||||
var cliToolConfig = null; // Store loaded CLI config
|
var cliToolConfig = null; // Store loaded CLI config
|
||||||
var predefinedModels = {}; // Store predefined models per tool
|
var predefinedModels = {}; // Store predefined models per tool
|
||||||
|
|
||||||
|
// ========== Navigation Helpers ==========
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Navigate to CodexLens Manager page
|
||||||
|
*/
|
||||||
|
function navigateToCodexLensManager() {
|
||||||
|
var navItem = document.querySelector('.nav-item[data-view="codexlens-manager"]');
|
||||||
|
if (navItem) {
|
||||||
|
navItem.click();
|
||||||
|
} else {
|
||||||
|
// Fallback: try to render directly
|
||||||
|
if (typeof renderCodexLensManager === 'function') {
|
||||||
|
currentView = 'codexlens-manager';
|
||||||
|
renderCodexLensManager();
|
||||||
|
} else {
|
||||||
|
showRefreshToast(t('common.error') + ': CodexLens Manager not available', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ========== CCW Installations ==========
|
// ========== CCW Installations ==========
|
||||||
async function loadCcwInstallations() {
|
async function loadCcwInstallations() {
|
||||||
try {
|
try {
|
||||||
@@ -39,6 +59,91 @@ async function loadCcwEndpointTools() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ========== LiteLLM API Endpoints ==========
|
||||||
|
var litellmApiEndpoints = [];
|
||||||
|
var cliCustomEndpoints = [];
|
||||||
|
|
||||||
|
async function loadLitellmApiEndpoints() {
|
||||||
|
try {
|
||||||
|
var response = await fetch('/api/litellm-api/config');
|
||||||
|
if (!response.ok) throw new Error('Failed to load LiteLLM endpoints');
|
||||||
|
var data = await response.json();
|
||||||
|
litellmApiEndpoints = data.endpoints || [];
|
||||||
|
window.litellmApiConfig = data;
|
||||||
|
return litellmApiEndpoints;
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load LiteLLM endpoints:', err);
|
||||||
|
litellmApiEndpoints = [];
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function loadCliCustomEndpoints() {
|
||||||
|
try {
|
||||||
|
var response = await fetch('/api/cli/endpoints');
|
||||||
|
if (!response.ok) throw new Error('Failed to load CLI custom endpoints');
|
||||||
|
var data = await response.json();
|
||||||
|
cliCustomEndpoints = data.endpoints || [];
|
||||||
|
return cliCustomEndpoints;
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load CLI custom endpoints:', err);
|
||||||
|
cliCustomEndpoints = [];
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function toggleEndpointEnabled(endpointId, enabled) {
|
||||||
|
try {
|
||||||
|
var response = await fetch('/api/cli/endpoints/' + endpointId, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ enabled: enabled })
|
||||||
|
});
|
||||||
|
if (!response.ok) throw new Error('Failed to update endpoint');
|
||||||
|
var data = await response.json();
|
||||||
|
if (data.success) {
|
||||||
|
// Update local state
|
||||||
|
var idx = cliCustomEndpoints.findIndex(function(e) { return e.id === endpointId; });
|
||||||
|
if (idx >= 0) {
|
||||||
|
cliCustomEndpoints[idx].enabled = enabled;
|
||||||
|
}
|
||||||
|
showRefreshToast((enabled ? 'Enabled' : 'Disabled') + ' endpoint: ' + endpointId, 'success');
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
} catch (err) {
|
||||||
|
showRefreshToast('Failed to update endpoint: ' + err.message, 'error');
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function syncEndpointToCliTools(endpoint) {
|
||||||
|
try {
|
||||||
|
var response = await fetch('/api/cli/endpoints', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
id: endpoint.id,
|
||||||
|
name: endpoint.name,
|
||||||
|
enabled: true
|
||||||
|
})
|
||||||
|
});
|
||||||
|
if (!response.ok) throw new Error('Failed to sync endpoint');
|
||||||
|
var data = await response.json();
|
||||||
|
if (data.success) {
|
||||||
|
cliCustomEndpoints = data.endpoints;
|
||||||
|
showRefreshToast('Endpoint synced to CLI tools: ' + endpoint.id, 'success');
|
||||||
|
renderToolsSection();
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
} catch (err) {
|
||||||
|
showRefreshToast('Failed to sync endpoint: ' + err.message, 'error');
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
window.toggleEndpointEnabled = toggleEndpointEnabled;
|
||||||
|
window.syncEndpointToCliTools = syncEndpointToCliTools;
|
||||||
|
|
||||||
// ========== CLI Tool Configuration ==========
|
// ========== CLI Tool Configuration ==========
|
||||||
async function loadCliToolConfig() {
|
async function loadCliToolConfig() {
|
||||||
try {
|
try {
|
||||||
@@ -302,7 +407,9 @@ async function renderCliManager() {
|
|||||||
loadCliToolStatus(),
|
loadCliToolStatus(),
|
||||||
loadCodexLensStatus(),
|
loadCodexLensStatus(),
|
||||||
loadCcwInstallations(),
|
loadCcwInstallations(),
|
||||||
loadCcwEndpointTools()
|
loadCcwEndpointTools(),
|
||||||
|
loadLitellmApiEndpoints(),
|
||||||
|
loadCliCustomEndpoints()
|
||||||
]);
|
]);
|
||||||
|
|
||||||
container.innerHTML = '<div class="status-manager">' +
|
container.innerHTML = '<div class="status-manager">' +
|
||||||
@@ -314,8 +421,7 @@ async function renderCliManager() {
|
|||||||
'<div class="cli-settings-section" id="cli-settings-section" style="margin-top: 1.5rem;"></div>' +
|
'<div class="cli-settings-section" id="cli-settings-section" style="margin-top: 1.5rem;"></div>' +
|
||||||
'<div class="cli-section" id="ccw-endpoint-tools-section" style="margin-top: 1.5rem;"></div>' +
|
'<div class="cli-section" id="ccw-endpoint-tools-section" style="margin-top: 1.5rem;"></div>' +
|
||||||
'</div>' +
|
'</div>' +
|
||||||
'<section id="storageCard" class="mb-6"></section>' +
|
'<section id="storageCard" class="mb-6"></section>';
|
||||||
'<section id="indexCard" class="mb-6"></section>';
|
|
||||||
|
|
||||||
// Render sub-panels
|
// Render sub-panels
|
||||||
renderToolsSection();
|
renderToolsSection();
|
||||||
@@ -329,11 +435,6 @@ async function renderCliManager() {
|
|||||||
initStorageManager();
|
initStorageManager();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize index manager card
|
|
||||||
if (typeof initIndexManager === 'function') {
|
|
||||||
initIndexManager();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize Lucide icons
|
// Initialize Lucide icons
|
||||||
if (window.lucide) lucide.createIcons();
|
if (window.lucide) lucide.createIcons();
|
||||||
}
|
}
|
||||||
@@ -349,6 +450,50 @@ function getSelectedModel() {
|
|||||||
return select ? select.value : 'code';
|
return select ? select.value : 'code';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build model select options HTML, showing only installed models
|
||||||
|
* @returns {string} HTML string for select options
|
||||||
|
*/
|
||||||
|
function buildModelSelectOptions() {
|
||||||
|
var installedModels = window.cliToolsStatus?.codexlens?.installedModels || [];
|
||||||
|
var allModels = window.cliToolsStatus?.codexlens?.allModels || [];
|
||||||
|
|
||||||
|
// Model display configuration
|
||||||
|
var modelConfig = {
|
||||||
|
'code': { label: t('index.modelCode') || 'Code (768d)', star: true },
|
||||||
|
'base': { label: t('index.modelBase') || 'Base (768d)', star: false },
|
||||||
|
'fast': { label: t('index.modelFast') || 'Fast (384d)', star: false },
|
||||||
|
'minilm': { label: t('index.modelMinilm') || 'MiniLM (384d)', star: false },
|
||||||
|
'multilingual': { label: t('index.modelMultilingual') || 'Multilingual (1024d)', warn: true },
|
||||||
|
'balanced': { label: t('index.modelBalanced') || 'Balanced (1024d)', warn: true }
|
||||||
|
};
|
||||||
|
|
||||||
|
// If no models installed, show placeholder
|
||||||
|
if (installedModels.length === 0) {
|
||||||
|
return '<option value="" disabled selected>' + (t('index.noModelsInstalled') || 'No models installed') + '</option>';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build options for installed models only
|
||||||
|
var options = '';
|
||||||
|
var firstInstalled = null;
|
||||||
|
|
||||||
|
// Preferred order: code, fast, minilm, base, multilingual, balanced
|
||||||
|
var preferredOrder = ['code', 'fast', 'minilm', 'base', 'multilingual', 'balanced'];
|
||||||
|
|
||||||
|
preferredOrder.forEach(function(profile) {
|
||||||
|
if (installedModels.includes(profile) && modelConfig[profile]) {
|
||||||
|
var config = modelConfig[profile];
|
||||||
|
var style = config.warn ? ' style="color: var(--muted-foreground)"' : '';
|
||||||
|
var suffix = config.star ? ' ⭐' : (config.warn ? ' ⚠️' : '');
|
||||||
|
var selected = !firstInstalled ? ' selected' : '';
|
||||||
|
if (!firstInstalled) firstInstalled = profile;
|
||||||
|
options += '<option value="' + profile + '"' + style + selected + '>' + config.label + suffix + '</option>';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return options;
|
||||||
|
}
|
||||||
|
|
||||||
// ========== Tools Section (Left Column) ==========
|
// ========== Tools Section (Left Column) ==========
|
||||||
function renderToolsSection() {
|
function renderToolsSection() {
|
||||||
var container = document.getElementById('tools-section');
|
var container = document.getElementById('tools-section');
|
||||||
@@ -390,31 +535,22 @@ function renderToolsSection() {
|
|||||||
'</div>';
|
'</div>';
|
||||||
}).join('');
|
}).join('');
|
||||||
|
|
||||||
// CodexLens item
|
// CodexLens item - simplified view with link to manager page
|
||||||
var codexLensHtml = '<div class="tool-item clickable ' + (codexLensStatus.ready ? 'available' : 'unavailable') + '" onclick="showCodexLensConfigModal()">' +
|
var codexLensHtml = '<div class="tool-item clickable ' + (codexLensStatus.ready ? 'available' : 'unavailable') + '" onclick="navigateToCodexLensManager()">' +
|
||||||
'<div class="tool-item-left">' +
|
'<div class="tool-item-left">' +
|
||||||
'<span class="tool-status-dot ' + (codexLensStatus.ready ? 'status-available' : 'status-unavailable') + '"></span>' +
|
'<span class="tool-status-dot ' + (codexLensStatus.ready ? 'status-available' : 'status-unavailable') + '"></span>' +
|
||||||
'<div class="tool-item-info">' +
|
'<div class="tool-item-info">' +
|
||||||
'<div class="tool-item-name">CodexLens <span class="tool-type-badge">Index</span>' +
|
'<div class="tool-item-name">CodexLens <span class="tool-type-badge">Index</span>' +
|
||||||
'<i data-lucide="settings" class="w-3 h-3 tool-config-icon"></i></div>' +
|
'<i data-lucide="external-link" class="w-3 h-3 tool-config-icon"></i></div>' +
|
||||||
'<div class="tool-item-desc">' + (codexLensStatus.ready ? t('cli.codexLensDesc') : t('cli.codexLensDescFull')) + '</div>' +
|
'<div class="tool-item-desc">' + (codexLensStatus.ready ? t('cli.codexLensDesc') : t('cli.codexLensDescFull')) + '</div>' +
|
||||||
'</div>' +
|
'</div>' +
|
||||||
'</div>' +
|
'</div>' +
|
||||||
'<div class="tool-item-right">' +
|
'<div class="tool-item-right">' +
|
||||||
(codexLensStatus.ready
|
(codexLensStatus.ready
|
||||||
? '<span class="tool-status-text success"><i data-lucide="check-circle" class="w-3.5 h-3.5"></i> v' + (codexLensStatus.version || 'installed') + '</span>' +
|
? '<span class="tool-status-text success"><i data-lucide="check-circle" class="w-3.5 h-3.5"></i> v' + (codexLensStatus.version || 'installed') + '</span>' +
|
||||||
'<select id="codexlensModelSelect" class="btn-sm bg-muted border border-border rounded text-xs" onclick="event.stopPropagation()" title="' + (t('index.selectModel') || 'Select embedding model') + '">' +
|
'<button class="btn-sm btn-primary" onclick="event.stopPropagation(); navigateToCodexLensManager()"><i data-lucide="settings" class="w-3 h-3"></i> ' + t('cli.openManager') + '</button>'
|
||||||
'<option value="code">' + (t('index.modelCode') || 'Code (768d)') + '</option>' +
|
|
||||||
'<option value="fast">' + (t('index.modelFast') || 'Fast (384d)') + '</option>' +
|
|
||||||
'<option value="multilingual">' + (t('index.modelMultilingual') || 'Multilingual (1024d)') + '</option>' +
|
|
||||||
'<option value="balanced">' + (t('index.modelBalanced') || 'Balanced (1024d)') + '</option>' +
|
|
||||||
'</select>' +
|
|
||||||
'<button class="btn-sm btn-primary" onclick="event.stopPropagation(); initCodexLensIndex(\'full\', getSelectedModel())" title="' + (t('index.fullDesc') || 'FTS + Semantic search (recommended)') + '"><i data-lucide="layers" class="w-3 h-3"></i> ' + (t('index.fullIndex') || '全部索引') + '</button>' +
|
|
||||||
'<button class="btn-sm btn-outline" onclick="event.stopPropagation(); initCodexLensIndex(\'vector\', getSelectedModel())" title="' + (t('index.vectorDesc') || 'Semantic search with embeddings') + '"><i data-lucide="sparkles" class="w-3 h-3"></i> ' + (t('index.vectorIndex') || '向量索引') + '</button>' +
|
|
||||||
'<button class="btn-sm btn-outline" onclick="event.stopPropagation(); initCodexLensIndex(\'normal\')" title="' + (t('index.normalDesc') || 'Fast full-text search only') + '"><i data-lucide="file-text" class="w-3 h-3"></i> ' + (t('index.normalIndex') || 'FTS索引') + '</button>' +
|
|
||||||
'<button class="btn-sm btn-outline btn-danger" onclick="event.stopPropagation(); uninstallCodexLens()"><i data-lucide="trash-2" class="w-3 h-3"></i> ' + t('cli.uninstall') + '</button>'
|
|
||||||
: '<span class="tool-status-text muted"><i data-lucide="circle-dashed" class="w-3.5 h-3.5"></i> ' + t('cli.notInstalled') + '</span>' +
|
: '<span class="tool-status-text muted"><i data-lucide="circle-dashed" class="w-3.5 h-3.5"></i> ' + t('cli.notInstalled') + '</span>' +
|
||||||
'<button class="btn-sm btn-primary" onclick="event.stopPropagation(); installCodexLens()"><i data-lucide="download" class="w-3 h-3"></i> ' + t('cli.install') + '</button>') +
|
'<button class="btn-sm btn-primary" onclick="event.stopPropagation(); navigateToCodexLensManager()"><i data-lucide="settings" class="w-3 h-3"></i> ' + t('cli.openManager') + '</button>') +
|
||||||
'</div>' +
|
'</div>' +
|
||||||
'</div>';
|
'</div>';
|
||||||
|
|
||||||
@@ -438,6 +574,51 @@ function renderToolsSection() {
|
|||||||
'</div>';
|
'</div>';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// API Endpoints section
|
||||||
|
var apiEndpointsHtml = '';
|
||||||
|
if (litellmApiEndpoints.length > 0) {
|
||||||
|
var endpointItems = litellmApiEndpoints.map(function(endpoint) {
|
||||||
|
// Check if endpoint is synced to CLI tools
|
||||||
|
var cliEndpoint = cliCustomEndpoints.find(function(e) { return e.id === endpoint.id; });
|
||||||
|
var isSynced = !!cliEndpoint;
|
||||||
|
var isEnabled = cliEndpoint ? cliEndpoint.enabled : false;
|
||||||
|
|
||||||
|
// Find provider info
|
||||||
|
var provider = (window.litellmApiConfig?.providers || []).find(function(p) { return p.id === endpoint.providerId; });
|
||||||
|
var providerName = provider ? provider.name : endpoint.providerId;
|
||||||
|
|
||||||
|
return '<div class="tool-item ' + (isSynced && isEnabled ? 'available' : 'unavailable') + '">' +
|
||||||
|
'<div class="tool-item-left">' +
|
||||||
|
'<span class="tool-status-dot ' + (isSynced && isEnabled ? 'status-available' : 'status-unavailable') + '"></span>' +
|
||||||
|
'<div class="tool-item-info">' +
|
||||||
|
'<div class="tool-item-name">' + endpoint.id + ' <span class="tool-type-badge">API</span></div>' +
|
||||||
|
'<div class="tool-item-desc">' + endpoint.model + ' (' + providerName + ')</div>' +
|
||||||
|
'</div>' +
|
||||||
|
'</div>' +
|
||||||
|
'<div class="tool-item-right">' +
|
||||||
|
(isSynced
|
||||||
|
? '<label class="toggle-switch" onclick="event.stopPropagation()">' +
|
||||||
|
'<input type="checkbox" ' + (isEnabled ? 'checked' : '') + ' onchange="toggleEndpointEnabled(\'' + endpoint.id + '\', this.checked); renderToolsSection();">' +
|
||||||
|
'<span class="toggle-slider"></span>' +
|
||||||
|
'</label>'
|
||||||
|
: '<button class="btn-sm btn-primary" onclick="event.stopPropagation(); syncEndpointToCliTools({id: \'' + endpoint.id + '\', name: \'' + endpoint.name + '\'})">' +
|
||||||
|
'<i data-lucide="plus" class="w-3 h-3"></i> ' + (t('cli.addToCli') || 'Add to CLI') +
|
||||||
|
'</button>') +
|
||||||
|
'</div>' +
|
||||||
|
'</div>';
|
||||||
|
}).join('');
|
||||||
|
|
||||||
|
apiEndpointsHtml = '<div class="tools-subsection" style="margin-top: 1rem; padding-top: 1rem; border-top: 1px solid var(--border);">' +
|
||||||
|
'<div class="section-header-left" style="margin-bottom: 0.5rem;">' +
|
||||||
|
'<h4 style="font-size: 0.875rem; font-weight: 600; display: flex; align-items: center; gap: 0.5rem;">' +
|
||||||
|
'<i data-lucide="cloud" class="w-4 h-4"></i> ' + (t('cli.apiEndpoints') || 'API Endpoints') +
|
||||||
|
'</h4>' +
|
||||||
|
'<span class="section-count">' + litellmApiEndpoints.length + ' ' + (t('cli.configured') || 'configured') + '</span>' +
|
||||||
|
'</div>' +
|
||||||
|
'<div class="tools-list">' + endpointItems + '</div>' +
|
||||||
|
'</div>';
|
||||||
|
}
|
||||||
|
|
||||||
container.innerHTML = '<div class="section-header">' +
|
container.innerHTML = '<div class="section-header">' +
|
||||||
'<div class="section-header-left">' +
|
'<div class="section-header-left">' +
|
||||||
'<h3><i data-lucide="terminal" class="w-4 h-4"></i> ' + t('cli.tools') + '</h3>' +
|
'<h3><i data-lucide="terminal" class="w-4 h-4"></i> ' + t('cli.tools') + '</h3>' +
|
||||||
@@ -451,7 +632,8 @@ function renderToolsSection() {
|
|||||||
toolsHtml +
|
toolsHtml +
|
||||||
codexLensHtml +
|
codexLensHtml +
|
||||||
semanticHtml +
|
semanticHtml +
|
||||||
'</div>';
|
'</div>' +
|
||||||
|
apiEndpointsHtml;
|
||||||
|
|
||||||
if (window.lucide) lucide.createIcons();
|
if (window.lucide) lucide.createIcons();
|
||||||
}
|
}
|
||||||
@@ -565,6 +747,16 @@ async function loadWindowsPlatformSettings() {
|
|||||||
|
|
||||||
async function toggleChineseResponse(enabled) {
|
async function toggleChineseResponse(enabled) {
|
||||||
if (chineseResponseLoading) return;
|
if (chineseResponseLoading) return;
|
||||||
|
|
||||||
|
// Pre-check: verify CCW workflows are installed (only when enabling)
|
||||||
|
if (enabled && typeof ccwInstallStatus !== 'undefined' && !ccwInstallStatus.installed) {
|
||||||
|
var missingFile = ccwInstallStatus.missingFiles.find(function(f) { return f === 'chinese-response.md'; });
|
||||||
|
if (missingFile) {
|
||||||
|
showRefreshToast(t('lang.installRequired'), 'warning');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
chineseResponseLoading = true;
|
chineseResponseLoading = true;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@@ -576,7 +768,14 @@ async function toggleChineseResponse(enabled) {
|
|||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
var errData = await response.json();
|
var errData = await response.json();
|
||||||
throw new Error(errData.error || 'Failed to update setting');
|
// Show specific error message from backend
|
||||||
|
var errorMsg = errData.error || 'Failed to update setting';
|
||||||
|
if (errorMsg.includes('not found')) {
|
||||||
|
showRefreshToast(t('lang.installRequired'), 'warning');
|
||||||
|
} else {
|
||||||
|
showRefreshToast((enabled ? t('lang.enableFailed') : t('lang.disableFailed')) + ': ' + errorMsg, 'error');
|
||||||
|
}
|
||||||
|
throw new Error(errorMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
var data = await response.json();
|
var data = await response.json();
|
||||||
@@ -589,7 +788,7 @@ async function toggleChineseResponse(enabled) {
|
|||||||
showRefreshToast(enabled ? t('lang.enableSuccess') : t('lang.disableSuccess'), 'success');
|
showRefreshToast(enabled ? t('lang.enableSuccess') : t('lang.disableSuccess'), 'success');
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to toggle Chinese response:', err);
|
console.error('Failed to toggle Chinese response:', err);
|
||||||
showRefreshToast(enabled ? t('lang.enableFailed') : t('lang.disableFailed'), 'error');
|
// Error already shown in the !response.ok block
|
||||||
} finally {
|
} finally {
|
||||||
chineseResponseLoading = false;
|
chineseResponseLoading = false;
|
||||||
}
|
}
|
||||||
@@ -597,6 +796,16 @@ async function toggleChineseResponse(enabled) {
|
|||||||
|
|
||||||
async function toggleWindowsPlatform(enabled) {
|
async function toggleWindowsPlatform(enabled) {
|
||||||
if (windowsPlatformLoading) return;
|
if (windowsPlatformLoading) return;
|
||||||
|
|
||||||
|
// Pre-check: verify CCW workflows are installed (only when enabling)
|
||||||
|
if (enabled && typeof ccwInstallStatus !== 'undefined' && !ccwInstallStatus.installed) {
|
||||||
|
var missingFile = ccwInstallStatus.missingFiles.find(function(f) { return f === 'windows-platform.md'; });
|
||||||
|
if (missingFile) {
|
||||||
|
showRefreshToast(t('lang.installRequired'), 'warning');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
windowsPlatformLoading = true;
|
windowsPlatformLoading = true;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@@ -608,7 +817,14 @@ async function toggleWindowsPlatform(enabled) {
|
|||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
var errData = await response.json();
|
var errData = await response.json();
|
||||||
throw new Error(errData.error || 'Failed to update setting');
|
// Show specific error message from backend
|
||||||
|
var errorMsg = errData.error || 'Failed to update setting';
|
||||||
|
if (errorMsg.includes('not found')) {
|
||||||
|
showRefreshToast(t('lang.installRequired'), 'warning');
|
||||||
|
} else {
|
||||||
|
showRefreshToast((enabled ? t('lang.windowsEnableFailed') : t('lang.windowsDisableFailed')) + ': ' + errorMsg, 'error');
|
||||||
|
}
|
||||||
|
throw new Error(errorMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
var data = await response.json();
|
var data = await response.json();
|
||||||
@@ -621,7 +837,7 @@ async function toggleWindowsPlatform(enabled) {
|
|||||||
showRefreshToast(enabled ? t('lang.windowsEnableSuccess') : t('lang.windowsDisableSuccess'), 'success');
|
showRefreshToast(enabled ? t('lang.windowsEnableSuccess') : t('lang.windowsDisableSuccess'), 'success');
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to toggle Windows platform:', err);
|
console.error('Failed to toggle Windows platform:', err);
|
||||||
showRefreshToast(enabled ? t('lang.windowsEnableFailed') : t('lang.windowsDisableFailed'), 'error');
|
// Error already shown in the !response.ok block
|
||||||
} finally {
|
} finally {
|
||||||
windowsPlatformLoading = false;
|
windowsPlatformLoading = false;
|
||||||
}
|
}
|
||||||
@@ -771,6 +987,19 @@ function renderCliSettingsSection() {
|
|||||||
'</div>' +
|
'</div>' +
|
||||||
'<p class="cli-setting-desc">' + t('cli.maxContextFilesDesc') + '</p>' +
|
'<p class="cli-setting-desc">' + t('cli.maxContextFilesDesc') + '</p>' +
|
||||||
'</div>' +
|
'</div>' +
|
||||||
|
'<div class="cli-setting-item">' +
|
||||||
|
'<label class="cli-setting-label">' +
|
||||||
|
'<i data-lucide="search" class="w-3 h-3"></i>' +
|
||||||
|
t('cli.codeIndexMcp') +
|
||||||
|
'</label>' +
|
||||||
|
'<div class="cli-setting-control">' +
|
||||||
|
'<select class="cli-setting-select" onchange="setCodeIndexMcpProvider(this.value)">' +
|
||||||
|
'<option value="codexlens"' + (codeIndexMcpProvider === 'codexlens' ? ' selected' : '') + '>CodexLens</option>' +
|
||||||
|
'<option value="ace"' + (codeIndexMcpProvider === 'ace' ? ' selected' : '') + '>ACE (Augment)</option>' +
|
||||||
|
'</select>' +
|
||||||
|
'</div>' +
|
||||||
|
'<p class="cli-setting-desc">' + t('cli.codeIndexMcpDesc') + '</p>' +
|
||||||
|
'</div>' +
|
||||||
'</div>';
|
'</div>';
|
||||||
|
|
||||||
container.innerHTML = settingsHtml;
|
container.innerHTML = settingsHtml;
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -449,8 +449,23 @@ function isHookTemplateInstalled(templateId) {
|
|||||||
const template = HOOK_TEMPLATES[templateId];
|
const template = HOOK_TEMPLATES[templateId];
|
||||||
if (!template) return false;
|
if (!template) return false;
|
||||||
|
|
||||||
// Build expected command string
|
// Define unique patterns for each template type (more specific than just command)
|
||||||
const templateCmd = template.command + (template.args ? ' ' + template.args.join(' ') : '');
|
const uniquePatterns = {
|
||||||
|
'session-context': 'hook session-context',
|
||||||
|
'codexlens-update': 'codexlens update',
|
||||||
|
'ccw-notify': 'api/hook',
|
||||||
|
'log-tool': 'tool-usage.log',
|
||||||
|
'lint-check': 'eslint',
|
||||||
|
'git-add': 'git add',
|
||||||
|
'memory-file-read': 'memory track --type file --action read',
|
||||||
|
'memory-file-write': 'memory track --type file --action write',
|
||||||
|
'memory-prompt-track': 'memory track --type topic',
|
||||||
|
'skill-context-auto': 'skill-context-auto'
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use unique pattern if defined, otherwise fall back to command + args
|
||||||
|
const searchPattern = uniquePatterns[templateId] ||
|
||||||
|
(template.command + (template.args ? ' ' + template.args.join(' ') : ''));
|
||||||
|
|
||||||
// Check project hooks
|
// Check project hooks
|
||||||
const projectHooks = hookConfig.project?.hooks?.[template.event];
|
const projectHooks = hookConfig.project?.hooks?.[template.event];
|
||||||
@@ -459,7 +474,7 @@ function isHookTemplateInstalled(templateId) {
|
|||||||
if (hookList.some(h => {
|
if (hookList.some(h => {
|
||||||
// Check both old format (h.command) and new format (h.hooks[0].command)
|
// Check both old format (h.command) and new format (h.hooks[0].command)
|
||||||
const cmd = h.hooks?.[0]?.command || h.command || '';
|
const cmd = h.hooks?.[0]?.command || h.command || '';
|
||||||
return cmd.includes(template.command);
|
return cmd.includes(searchPattern);
|
||||||
})) return true;
|
})) return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -469,7 +484,7 @@ function isHookTemplateInstalled(templateId) {
|
|||||||
const hookList = Array.isArray(globalHooks) ? globalHooks : [globalHooks];
|
const hookList = Array.isArray(globalHooks) ? globalHooks : [globalHooks];
|
||||||
if (hookList.some(h => {
|
if (hookList.some(h => {
|
||||||
const cmd = h.hooks?.[0]?.command || h.command || '';
|
const cmd = h.hooks?.[0]?.command || h.command || '';
|
||||||
return cmd.includes(template.command);
|
return cmd.includes(searchPattern);
|
||||||
})) return true;
|
})) return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -512,7 +527,7 @@ async function uninstallHookTemplate(templateId) {
|
|||||||
|
|
||||||
// Define unique patterns for each template type
|
// Define unique patterns for each template type
|
||||||
const uniquePatterns = {
|
const uniquePatterns = {
|
||||||
'session-context': 'api/hook/session-context',
|
'session-context': 'hook session-context',
|
||||||
'codexlens-update': 'codexlens update',
|
'codexlens-update': 'codexlens update',
|
||||||
'ccw-notify': 'api/hook',
|
'ccw-notify': 'api/hook',
|
||||||
'log-tool': 'tool-usage.log',
|
'log-tool': 'tool-usage.log',
|
||||||
|
|||||||
@@ -42,17 +42,41 @@ function getCcwEnabledToolsCodex() {
|
|||||||
|
|
||||||
// Get current CCW_PROJECT_ROOT from config
|
// Get current CCW_PROJECT_ROOT from config
|
||||||
function getCcwProjectRoot() {
|
function getCcwProjectRoot() {
|
||||||
|
// Try project config first, then global config
|
||||||
const currentPath = projectPath;
|
const currentPath = projectPath;
|
||||||
const projectData = mcpAllProjects[currentPath] || {};
|
const projectData = mcpAllProjects[currentPath] || {};
|
||||||
const ccwConfig = projectData.mcpServers?.['ccw-tools'];
|
const projectCcwConfig = projectData.mcpServers?.['ccw-tools'];
|
||||||
return ccwConfig?.env?.CCW_PROJECT_ROOT || '';
|
if (projectCcwConfig?.env?.CCW_PROJECT_ROOT) {
|
||||||
|
return projectCcwConfig.env.CCW_PROJECT_ROOT;
|
||||||
|
}
|
||||||
|
// Fallback to global config
|
||||||
|
const globalCcwConfig = mcpUserServers?.['ccw-tools'];
|
||||||
|
return globalCcwConfig?.env?.CCW_PROJECT_ROOT || '';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get current CCW_ALLOWED_DIRS from config
|
// Get current CCW_ALLOWED_DIRS from config
|
||||||
function getCcwAllowedDirs() {
|
function getCcwAllowedDirs() {
|
||||||
|
// Try project config first, then global config
|
||||||
const currentPath = projectPath;
|
const currentPath = projectPath;
|
||||||
const projectData = mcpAllProjects[currentPath] || {};
|
const projectData = mcpAllProjects[currentPath] || {};
|
||||||
const ccwConfig = projectData.mcpServers?.['ccw-tools'];
|
const projectCcwConfig = projectData.mcpServers?.['ccw-tools'];
|
||||||
|
if (projectCcwConfig?.env?.CCW_ALLOWED_DIRS) {
|
||||||
|
return projectCcwConfig.env.CCW_ALLOWED_DIRS;
|
||||||
|
}
|
||||||
|
// Fallback to global config
|
||||||
|
const globalCcwConfig = mcpUserServers?.['ccw-tools'];
|
||||||
|
return globalCcwConfig?.env?.CCW_ALLOWED_DIRS || '';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current CCW_PROJECT_ROOT from Codex config
|
||||||
|
function getCcwProjectRootCodex() {
|
||||||
|
const ccwConfig = codexMcpServers?.['ccw-tools'];
|
||||||
|
return ccwConfig?.env?.CCW_PROJECT_ROOT || '';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current CCW_ALLOWED_DIRS from Codex config
|
||||||
|
function getCcwAllowedDirsCodex() {
|
||||||
|
const ccwConfig = codexMcpServers?.['ccw-tools'];
|
||||||
return ccwConfig?.env?.CCW_ALLOWED_DIRS || '';
|
return ccwConfig?.env?.CCW_ALLOWED_DIRS || '';
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,7 +284,7 @@ async function renderMcpManager() {
|
|||||||
<input type="text"
|
<input type="text"
|
||||||
class="ccw-project-root-input flex-1 px-2 py-1 text-xs bg-background border border-border rounded focus:outline-none focus:ring-1 focus:ring-primary"
|
class="ccw-project-root-input flex-1 px-2 py-1 text-xs bg-background border border-border rounded focus:outline-none focus:ring-1 focus:ring-primary"
|
||||||
placeholder="${projectPath || t('mcp.useCurrentDir')}"
|
placeholder="${projectPath || t('mcp.useCurrentDir')}"
|
||||||
value="${getCcwProjectRoot()}">
|
value="${getCcwProjectRootCodex()}">
|
||||||
<button class="p-1 text-muted-foreground hover:text-foreground"
|
<button class="p-1 text-muted-foreground hover:text-foreground"
|
||||||
onclick="setCcwProjectRootToCurrent()"
|
onclick="setCcwProjectRootToCurrent()"
|
||||||
title="${t('mcp.useCurrentProject')}">
|
title="${t('mcp.useCurrentProject')}">
|
||||||
@@ -272,7 +296,7 @@ async function renderMcpManager() {
|
|||||||
<input type="text"
|
<input type="text"
|
||||||
class="ccw-allowed-dirs-input flex-1 px-2 py-1 text-xs bg-background border border-border rounded focus:outline-none focus:ring-1 focus:ring-primary"
|
class="ccw-allowed-dirs-input flex-1 px-2 py-1 text-xs bg-background border border-border rounded focus:outline-none focus:ring-1 focus:ring-primary"
|
||||||
placeholder="${t('mcp.allowedDirsPlaceholder')}"
|
placeholder="${t('mcp.allowedDirsPlaceholder')}"
|
||||||
value="${getCcwAllowedDirs()}">
|
value="${getCcwAllowedDirsCodex()}">
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -638,9 +638,26 @@ function addRulePath() {
|
|||||||
|
|
||||||
function removeRulePath(index) {
|
function removeRulePath(index) {
|
||||||
ruleCreateState.paths.splice(index, 1);
|
ruleCreateState.paths.splice(index, 1);
|
||||||
// Re-render paths list
|
|
||||||
closeRuleCreateModal();
|
// Re-render paths list without closing modal
|
||||||
openRuleCreateModal();
|
const pathsList = document.getElementById('rulePathsList');
|
||||||
|
if (pathsList) {
|
||||||
|
pathsList.innerHTML = ruleCreateState.paths.map((path, idx) => `
|
||||||
|
<div class="flex gap-2">
|
||||||
|
<input type="text" class="rule-path-input flex-1 px-3 py-2 bg-background border border-border rounded-lg text-sm focus:outline-none focus:ring-2 focus:ring-primary"
|
||||||
|
placeholder="src/**/*.ts"
|
||||||
|
value="${path}"
|
||||||
|
data-index="${idx}">
|
||||||
|
${idx > 0 ? `
|
||||||
|
<button class="px-3 py-2 text-destructive hover:bg-destructive/10 rounded-lg transition-colors"
|
||||||
|
onclick="removeRulePath(${idx})">
|
||||||
|
<i data-lucide="x" class="w-4 h-4"></i>
|
||||||
|
</button>
|
||||||
|
` : ''}
|
||||||
|
</div>
|
||||||
|
`).join('');
|
||||||
|
if (typeof lucide !== 'undefined') lucide.createIcons();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function switchRuleCreateMode(mode) {
|
function switchRuleCreateMode(mode) {
|
||||||
@@ -674,9 +691,21 @@ function switchRuleCreateMode(mode) {
|
|||||||
if (contentSection) contentSection.style.display = 'block';
|
if (contentSection) contentSection.style.display = 'block';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-render modal to update button states
|
// Update mode button styles without re-rendering
|
||||||
closeRuleCreateModal();
|
const modeButtons = document.querySelectorAll('#ruleCreateModal .mode-btn');
|
||||||
openRuleCreateModal();
|
modeButtons.forEach(btn => {
|
||||||
|
const btnText = btn.querySelector('.font-medium')?.textContent || '';
|
||||||
|
const isInput = btnText.includes(t('rules.manualInput'));
|
||||||
|
const isCliGenerate = btnText.includes(t('rules.cliGenerate'));
|
||||||
|
|
||||||
|
if ((isInput && mode === 'input') || (isCliGenerate && mode === 'cli-generate')) {
|
||||||
|
btn.classList.remove('border-border', 'hover:border-primary/50');
|
||||||
|
btn.classList.add('border-primary', 'bg-primary/10');
|
||||||
|
} else {
|
||||||
|
btn.classList.remove('border-primary', 'bg-primary/10');
|
||||||
|
btn.classList.add('border-border', 'hover:border-primary/50');
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function switchRuleGenerationType(type) {
|
function switchRuleGenerationType(type) {
|
||||||
|
|||||||
@@ -153,10 +153,11 @@ function renderSkillCard(skill, location) {
|
|||||||
const locationIcon = location === 'project' ? 'folder' : 'user';
|
const locationIcon = location === 'project' ? 'folder' : 'user';
|
||||||
const locationClass = location === 'project' ? 'text-primary' : 'text-indigo';
|
const locationClass = location === 'project' ? 'text-primary' : 'text-indigo';
|
||||||
const locationBg = location === 'project' ? 'bg-primary/10' : 'bg-indigo/10';
|
const locationBg = location === 'project' ? 'bg-primary/10' : 'bg-indigo/10';
|
||||||
|
const folderName = skill.folderName || skill.name;
|
||||||
|
|
||||||
return `
|
return `
|
||||||
<div class="skill-card bg-card border border-border rounded-lg p-4 hover:shadow-md transition-all cursor-pointer"
|
<div class="skill-card bg-card border border-border rounded-lg p-4 hover:shadow-md transition-all cursor-pointer"
|
||||||
onclick="showSkillDetail('${escapeHtml(skill.name)}', '${location}')">
|
onclick="showSkillDetail('${escapeHtml(folderName)}', '${location}')">
|
||||||
<div class="flex items-start justify-between mb-3">
|
<div class="flex items-start justify-between mb-3">
|
||||||
<div class="flex items-center gap-3">
|
<div class="flex items-center gap-3">
|
||||||
<div class="w-10 h-10 ${locationBg} rounded-lg flex items-center justify-center">
|
<div class="w-10 h-10 ${locationBg} rounded-lg flex items-center justify-center">
|
||||||
@@ -198,6 +199,7 @@ function renderSkillCard(skill, location) {
|
|||||||
function renderSkillDetailPanel(skill) {
|
function renderSkillDetailPanel(skill) {
|
||||||
const hasAllowedTools = skill.allowedTools && skill.allowedTools.length > 0;
|
const hasAllowedTools = skill.allowedTools && skill.allowedTools.length > 0;
|
||||||
const hasSupportingFiles = skill.supportingFiles && skill.supportingFiles.length > 0;
|
const hasSupportingFiles = skill.supportingFiles && skill.supportingFiles.length > 0;
|
||||||
|
const folderName = skill.folderName || skill.name;
|
||||||
|
|
||||||
return `
|
return `
|
||||||
<div class="skill-detail-panel fixed top-0 right-0 w-1/2 max-w-xl h-full bg-card border-l border-border shadow-lg z-50 flex flex-col">
|
<div class="skill-detail-panel fixed top-0 right-0 w-1/2 max-w-xl h-full bg-card border-l border-border shadow-lg z-50 flex flex-col">
|
||||||
@@ -243,20 +245,54 @@ function renderSkillDetailPanel(skill) {
|
|||||||
</div>
|
</div>
|
||||||
` : ''}
|
` : ''}
|
||||||
|
|
||||||
<!-- Supporting Files -->
|
<!-- Skill Files (SKILL.md + Supporting Files) -->
|
||||||
${hasSupportingFiles ? `
|
<div>
|
||||||
<div>
|
<h4 class="text-sm font-semibold text-foreground mb-2">${t('skills.files') || 'Files'}</h4>
|
||||||
<h4 class="text-sm font-semibold text-foreground mb-2">${t('skills.supportingFiles')}</h4>
|
<div class="space-y-2">
|
||||||
<div class="space-y-2">
|
<!-- SKILL.md (main file) -->
|
||||||
${skill.supportingFiles.map(file => `
|
<div class="flex items-center justify-between p-2 bg-primary/5 border border-primary/20 rounded-lg cursor-pointer hover:bg-primary/10 transition-colors"
|
||||||
<div class="flex items-center gap-2 p-2 bg-muted/50 rounded-lg">
|
onclick="viewSkillFile('${escapeHtml(folderName)}', 'SKILL.md', '${skill.location}')">
|
||||||
<i data-lucide="file-text" class="w-4 h-4 text-muted-foreground"></i>
|
<div class="flex items-center gap-2">
|
||||||
<span class="text-sm font-mono text-foreground">${escapeHtml(file)}</span>
|
<i data-lucide="file-text" class="w-4 h-4 text-primary"></i>
|
||||||
</div>
|
<span class="text-sm font-mono text-foreground font-medium">SKILL.md</span>
|
||||||
`).join('')}
|
</div>
|
||||||
|
<div class="flex items-center gap-1">
|
||||||
|
<button class="p-1 text-primary hover:bg-primary/20 rounded transition-colors"
|
||||||
|
onclick="event.stopPropagation(); editSkillFile('${escapeHtml(folderName)}', 'SKILL.md', '${skill.location}')"
|
||||||
|
title="${t('common.edit')}">
|
||||||
|
<i data-lucide="edit-2" class="w-3.5 h-3.5"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
${hasSupportingFiles ? skill.supportingFiles.map(file => {
|
||||||
|
const isDir = file.endsWith('/');
|
||||||
|
const dirName = isDir ? file.slice(0, -1) : file;
|
||||||
|
return `
|
||||||
|
<!-- Supporting file: ${escapeHtml(file)} -->
|
||||||
|
<div class="skill-file-item" data-path="${escapeHtml(dirName)}">
|
||||||
|
<div class="flex items-center justify-between p-2 bg-muted/50 rounded-lg cursor-pointer hover:bg-muted transition-colors"
|
||||||
|
onclick="${isDir ? `toggleSkillFolder('${escapeHtml(folderName)}', '${escapeHtml(dirName)}', '${skill.location}', this)` : `viewSkillFile('${escapeHtml(folderName)}', '${escapeHtml(file)}', '${skill.location}')`}">
|
||||||
|
<div class="flex items-center gap-2">
|
||||||
|
<i data-lucide="${isDir ? 'folder' : 'file-text'}" class="w-4 h-4 text-muted-foreground ${isDir ? 'folder-icon' : ''}"></i>
|
||||||
|
<span class="text-sm font-mono text-foreground">${escapeHtml(isDir ? dirName : file)}</span>
|
||||||
|
${isDir ? '<i data-lucide="chevron-right" class="w-3 h-3 text-muted-foreground folder-chevron transition-transform"></i>' : ''}
|
||||||
|
</div>
|
||||||
|
${!isDir ? `
|
||||||
|
<div class="flex items-center gap-1">
|
||||||
|
<button class="p-1 text-muted-foreground hover:text-foreground hover:bg-muted rounded transition-colors"
|
||||||
|
onclick="event.stopPropagation(); editSkillFile('${escapeHtml(folderName)}', '${escapeHtml(file)}', '${skill.location}')"
|
||||||
|
title="${t('common.edit')}">
|
||||||
|
<i data-lucide="edit-2" class="w-3.5 h-3.5"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
` : ''}
|
||||||
|
</div>
|
||||||
|
<div class="folder-contents hidden ml-4 mt-1 space-y-1"></div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}).join('') : ''}
|
||||||
</div>
|
</div>
|
||||||
` : ''}
|
</div>
|
||||||
|
|
||||||
<!-- Path -->
|
<!-- Path -->
|
||||||
<div>
|
<div>
|
||||||
@@ -269,12 +305,12 @@ function renderSkillDetailPanel(skill) {
|
|||||||
<!-- Actions -->
|
<!-- Actions -->
|
||||||
<div class="px-5 py-4 border-t border-border flex justify-between">
|
<div class="px-5 py-4 border-t border-border flex justify-between">
|
||||||
<button class="px-4 py-2 text-sm text-destructive hover:bg-destructive/10 rounded-lg transition-colors flex items-center gap-2"
|
<button class="px-4 py-2 text-sm text-destructive hover:bg-destructive/10 rounded-lg transition-colors flex items-center gap-2"
|
||||||
onclick="deleteSkill('${escapeHtml(skill.name)}', '${skill.location}')">
|
onclick="deleteSkill('${escapeHtml(folderName)}', '${skill.location}')">
|
||||||
<i data-lucide="trash-2" class="w-4 h-4"></i>
|
<i data-lucide="trash-2" class="w-4 h-4"></i>
|
||||||
${t('common.delete')}
|
${t('common.delete')}
|
||||||
</button>
|
</button>
|
||||||
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
|
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
|
||||||
onclick="editSkill('${escapeHtml(skill.name)}', '${skill.location}')">
|
onclick="editSkill('${escapeHtml(folderName)}', '${skill.location}')">
|
||||||
<i data-lucide="edit" class="w-4 h-4"></i>
|
<i data-lucide="edit" class="w-4 h-4"></i>
|
||||||
${t('common.edit')}
|
${t('common.edit')}
|
||||||
</button>
|
</button>
|
||||||
@@ -525,7 +561,7 @@ function openSkillCreateModal() {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Footer -->
|
<!-- Footer -->
|
||||||
<div class="flex items-center justify-end gap-3 px-6 py-4 border-t border-border">
|
<div id="skillModalFooter" class="flex items-center justify-end gap-3 px-6 py-4 border-t border-border">
|
||||||
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
|
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
|
||||||
onclick="closeSkillCreateModal()">
|
onclick="closeSkillCreateModal()">
|
||||||
${t('common.cancel')}
|
${t('common.cancel')}
|
||||||
@@ -588,16 +624,76 @@ function selectSkillLocation(location) {
|
|||||||
|
|
||||||
function switchSkillCreateMode(mode) {
|
function switchSkillCreateMode(mode) {
|
||||||
skillCreateState.mode = mode;
|
skillCreateState.mode = mode;
|
||||||
// Re-render modal
|
|
||||||
closeSkillCreateModal();
|
// Toggle visibility of mode sections
|
||||||
openSkillCreateModal();
|
const importSection = document.getElementById('skillImportMode');
|
||||||
|
const cliGenerateSection = document.getElementById('skillCliGenerateMode');
|
||||||
|
const footerContainer = document.getElementById('skillModalFooter');
|
||||||
|
|
||||||
|
if (importSection) importSection.style.display = mode === 'import' ? 'block' : 'none';
|
||||||
|
if (cliGenerateSection) cliGenerateSection.style.display = mode === 'cli-generate' ? 'block' : 'none';
|
||||||
|
|
||||||
|
// Update mode button styles
|
||||||
|
const modeButtons = document.querySelectorAll('#skillCreateModal .mode-btn');
|
||||||
|
modeButtons.forEach(btn => {
|
||||||
|
const btnText = btn.querySelector('.font-medium')?.textContent || '';
|
||||||
|
const isImport = btnText.includes(t('skills.importFolder'));
|
||||||
|
const isCliGenerate = btnText.includes(t('skills.cliGenerate'));
|
||||||
|
|
||||||
|
if ((isImport && mode === 'import') || (isCliGenerate && mode === 'cli-generate')) {
|
||||||
|
btn.classList.remove('border-border', 'hover:border-primary/50');
|
||||||
|
btn.classList.add('border-primary', 'bg-primary/10');
|
||||||
|
} else {
|
||||||
|
btn.classList.remove('border-primary', 'bg-primary/10');
|
||||||
|
btn.classList.add('border-border', 'hover:border-primary/50');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update footer buttons
|
||||||
|
if (footerContainer) {
|
||||||
|
if (mode === 'import') {
|
||||||
|
footerContainer.innerHTML = `
|
||||||
|
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
|
||||||
|
onclick="closeSkillCreateModal()">
|
||||||
|
${t('common.cancel')}
|
||||||
|
</button>
|
||||||
|
<button class="px-4 py-2 text-sm bg-primary/10 text-primary rounded-lg hover:bg-primary/20 transition-colors"
|
||||||
|
onclick="validateSkillImport()">
|
||||||
|
${t('skills.validate')}
|
||||||
|
</button>
|
||||||
|
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity"
|
||||||
|
onclick="createSkill()">
|
||||||
|
${t('skills.import')}
|
||||||
|
</button>
|
||||||
|
`;
|
||||||
|
} else {
|
||||||
|
footerContainer.innerHTML = `
|
||||||
|
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
|
||||||
|
onclick="closeSkillCreateModal()">
|
||||||
|
${t('common.cancel')}
|
||||||
|
</button>
|
||||||
|
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
|
||||||
|
onclick="createSkill()">
|
||||||
|
<i data-lucide="sparkles" class="w-4 h-4"></i>
|
||||||
|
${t('skills.generate')}
|
||||||
|
</button>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
if (typeof lucide !== 'undefined') lucide.createIcons();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function switchSkillGenerationType(type) {
|
function switchSkillGenerationType(type) {
|
||||||
skillCreateState.generationType = type;
|
skillCreateState.generationType = type;
|
||||||
// Re-render modal
|
|
||||||
closeSkillCreateModal();
|
// Toggle visibility of description area
|
||||||
openSkillCreateModal();
|
const descriptionArea = document.getElementById('skillDescriptionArea');
|
||||||
|
if (descriptionArea) {
|
||||||
|
descriptionArea.style.display = type === 'description' ? 'block' : 'none';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update generation type button styles (only the description button is active, template is disabled)
|
||||||
|
// No need to update button styles since template button is disabled
|
||||||
}
|
}
|
||||||
|
|
||||||
function browseSkillFolder() {
|
function browseSkillFolder() {
|
||||||
@@ -817,3 +913,271 @@ async function createSkill() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ========== Skill File View/Edit Functions ==========
|
||||||
|
|
||||||
|
var skillFileEditorState = {
|
||||||
|
skillName: '',
|
||||||
|
fileName: '',
|
||||||
|
location: '',
|
||||||
|
content: '',
|
||||||
|
isEditing: false
|
||||||
|
};
|
||||||
|
|
||||||
|
async function viewSkillFile(skillName, fileName, location) {
|
||||||
|
try {
|
||||||
|
const response = await fetch(
|
||||||
|
'/api/skills/' + encodeURIComponent(skillName) + '/file?filename=' + encodeURIComponent(fileName) +
|
||||||
|
'&location=' + location + '&path=' + encodeURIComponent(projectPath)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const error = await response.json();
|
||||||
|
throw new Error(error.error || 'Failed to load file');
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
skillFileEditorState = {
|
||||||
|
skillName,
|
||||||
|
fileName,
|
||||||
|
location,
|
||||||
|
content: data.content,
|
||||||
|
isEditing: false
|
||||||
|
};
|
||||||
|
|
||||||
|
renderSkillFileModal();
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load skill file:', err);
|
||||||
|
if (window.showToast) {
|
||||||
|
showToast(err.message || t('skills.fileLoadError') || 'Failed to load file', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function editSkillFile(skillName, fileName, location) {
|
||||||
|
viewSkillFile(skillName, fileName, location).then(() => {
|
||||||
|
skillFileEditorState.isEditing = true;
|
||||||
|
renderSkillFileModal();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderSkillFileModal() {
|
||||||
|
// Remove existing modal if any
|
||||||
|
const existingModal = document.getElementById('skillFileModal');
|
||||||
|
if (existingModal) existingModal.remove();
|
||||||
|
|
||||||
|
const { skillName, fileName, content, isEditing, location } = skillFileEditorState;
|
||||||
|
|
||||||
|
const modalHtml = `
|
||||||
|
<div class="modal-overlay fixed inset-0 bg-black/50 z-[60] flex items-center justify-center" onclick="closeSkillFileModal(event)">
|
||||||
|
<div class="modal-dialog bg-card rounded-lg shadow-lg w-full max-w-4xl max-h-[90vh] mx-4 flex flex-col" onclick="event.stopPropagation()">
|
||||||
|
<!-- Header -->
|
||||||
|
<div class="flex items-center justify-between px-6 py-4 border-b border-border">
|
||||||
|
<div class="flex items-center gap-3">
|
||||||
|
<i data-lucide="file-text" class="w-5 h-5 text-primary"></i>
|
||||||
|
<div>
|
||||||
|
<h3 class="text-lg font-semibold text-foreground font-mono">${escapeHtml(fileName)}</h3>
|
||||||
|
<p class="text-xs text-muted-foreground">${escapeHtml(skillName)} / ${location}</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex items-center gap-2">
|
||||||
|
${!isEditing ? `
|
||||||
|
<button class="px-3 py-1.5 text-sm bg-primary/10 text-primary rounded-lg hover:bg-primary/20 transition-colors flex items-center gap-1"
|
||||||
|
onclick="toggleSkillFileEdit()">
|
||||||
|
<i data-lucide="edit-2" class="w-4 h-4"></i>
|
||||||
|
${t('common.edit')}
|
||||||
|
</button>
|
||||||
|
` : ''}
|
||||||
|
<button class="w-8 h-8 flex items-center justify-center text-xl text-muted-foreground hover:text-foreground hover:bg-hover rounded"
|
||||||
|
onclick="closeSkillFileModal()">×</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Content -->
|
||||||
|
<div class="flex-1 overflow-hidden p-4">
|
||||||
|
${isEditing ? `
|
||||||
|
<textarea id="skillFileContent"
|
||||||
|
class="w-full h-full min-h-[400px] px-4 py-3 bg-background border border-border rounded-lg text-sm font-mono focus:outline-none focus:ring-2 focus:ring-primary resize-none"
|
||||||
|
spellcheck="false">${escapeHtml(content)}</textarea>
|
||||||
|
` : `
|
||||||
|
<div class="w-full h-full min-h-[400px] overflow-auto">
|
||||||
|
<pre class="px-4 py-3 bg-muted/30 rounded-lg text-sm font-mono whitespace-pre-wrap break-words">${escapeHtml(content)}</pre>
|
||||||
|
</div>
|
||||||
|
`}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Footer -->
|
||||||
|
${isEditing ? `
|
||||||
|
<div class="flex items-center justify-end gap-3 px-6 py-4 border-t border-border">
|
||||||
|
<button class="px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors"
|
||||||
|
onclick="cancelSkillFileEdit()">
|
||||||
|
${t('common.cancel')}
|
||||||
|
</button>
|
||||||
|
<button class="px-4 py-2 text-sm bg-primary text-primary-foreground rounded-lg hover:opacity-90 transition-opacity flex items-center gap-2"
|
||||||
|
onclick="saveSkillFile()">
|
||||||
|
<i data-lucide="save" class="w-4 h-4"></i>
|
||||||
|
${t('common.save')}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
` : ''}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
|
||||||
|
const modalContainer = document.createElement('div');
|
||||||
|
modalContainer.id = 'skillFileModal';
|
||||||
|
modalContainer.innerHTML = modalHtml;
|
||||||
|
document.body.appendChild(modalContainer);
|
||||||
|
|
||||||
|
if (typeof lucide !== 'undefined') lucide.createIcons();
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeSkillFileModal(event) {
|
||||||
|
if (event && event.target !== event.currentTarget) return;
|
||||||
|
const modal = document.getElementById('skillFileModal');
|
||||||
|
if (modal) modal.remove();
|
||||||
|
skillFileEditorState = { skillName: '', fileName: '', location: '', content: '', isEditing: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
function toggleSkillFileEdit() {
|
||||||
|
skillFileEditorState.isEditing = true;
|
||||||
|
renderSkillFileModal();
|
||||||
|
}
|
||||||
|
|
||||||
|
function cancelSkillFileEdit() {
|
||||||
|
skillFileEditorState.isEditing = false;
|
||||||
|
renderSkillFileModal();
|
||||||
|
}
|
||||||
|
|
||||||
|
async function saveSkillFile() {
|
||||||
|
const contentTextarea = document.getElementById('skillFileContent');
|
||||||
|
if (!contentTextarea) return;
|
||||||
|
|
||||||
|
const newContent = contentTextarea.value;
|
||||||
|
const { skillName, fileName, location } = skillFileEditorState;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch('/api/skills/' + encodeURIComponent(skillName) + '/file', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
fileName,
|
||||||
|
content: newContent,
|
||||||
|
location,
|
||||||
|
projectPath
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const error = await response.json();
|
||||||
|
throw new Error(error.error || 'Failed to save file');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update state and close edit mode
|
||||||
|
skillFileEditorState.content = newContent;
|
||||||
|
skillFileEditorState.isEditing = false;
|
||||||
|
renderSkillFileModal();
|
||||||
|
|
||||||
|
// Refresh skill detail if SKILL.md was edited
|
||||||
|
if (fileName === 'SKILL.md') {
|
||||||
|
await loadSkillsData();
|
||||||
|
// Reload current skill detail
|
||||||
|
if (selectedSkill) {
|
||||||
|
await showSkillDetail(skillName, location);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (window.showToast) {
|
||||||
|
showToast(t('skills.fileSaved') || 'File saved successfully', 'success');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to save skill file:', err);
|
||||||
|
if (window.showToast) {
|
||||||
|
showToast(err.message || t('skills.fileSaveError') || 'Failed to save file', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// ========== Skill Folder Expansion Functions ==========
|
||||||
|
|
||||||
|
var expandedFolders = new Set();
|
||||||
|
|
||||||
|
async function toggleSkillFolder(skillName, subPath, location, element) {
|
||||||
|
const fileItem = element.closest('.skill-file-item');
|
||||||
|
if (!fileItem) return;
|
||||||
|
|
||||||
|
const contentsDiv = fileItem.querySelector('.folder-contents');
|
||||||
|
const chevron = element.querySelector('.folder-chevron');
|
||||||
|
const folderIcon = element.querySelector('.folder-icon');
|
||||||
|
const folderKey = `${skillName}:${subPath}:${location}`;
|
||||||
|
|
||||||
|
if (expandedFolders.has(folderKey)) {
|
||||||
|
// Collapse folder
|
||||||
|
expandedFolders.delete(folderKey);
|
||||||
|
contentsDiv.classList.add('hidden');
|
||||||
|
contentsDiv.innerHTML = '';
|
||||||
|
if (chevron) chevron.style.transform = '';
|
||||||
|
if (folderIcon) folderIcon.setAttribute('data-lucide', 'folder');
|
||||||
|
if (typeof lucide !== 'undefined') lucide.createIcons();
|
||||||
|
} else {
|
||||||
|
// Expand folder
|
||||||
|
try {
|
||||||
|
const response = await fetch(
|
||||||
|
'/api/skills/' + encodeURIComponent(skillName) + '/dir?subpath=' + encodeURIComponent(subPath) +
|
||||||
|
'&location=' + location + '&path=' + encodeURIComponent(projectPath)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const error = await response.json();
|
||||||
|
throw new Error(error.error || 'Failed to load folder');
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
expandedFolders.add(folderKey);
|
||||||
|
if (chevron) chevron.style.transform = 'rotate(90deg)';
|
||||||
|
if (folderIcon) folderIcon.setAttribute('data-lucide', 'folder-open');
|
||||||
|
|
||||||
|
// Render folder contents
|
||||||
|
contentsDiv.innerHTML = data.files.map(file => {
|
||||||
|
const filePath = file.path;
|
||||||
|
const isDir = file.isDirectory;
|
||||||
|
return `
|
||||||
|
<div class="skill-file-item" data-path="${escapeHtml(filePath)}">
|
||||||
|
<div class="flex items-center justify-between p-2 bg-muted/30 rounded-lg cursor-pointer hover:bg-muted/50 transition-colors"
|
||||||
|
onclick="${isDir ? `toggleSkillFolder('${escapeHtml(skillName)}', '${escapeHtml(filePath)}', '${location}', this)` : `viewSkillFile('${escapeHtml(skillName)}', '${escapeHtml(filePath)}', '${location}')`}">
|
||||||
|
<div class="flex items-center gap-2">
|
||||||
|
<i data-lucide="${isDir ? 'folder' : 'file-text'}" class="w-4 h-4 text-muted-foreground ${isDir ? 'folder-icon' : ''}"></i>
|
||||||
|
<span class="text-sm font-mono text-foreground">${escapeHtml(file.name)}</span>
|
||||||
|
${isDir ? '<i data-lucide="chevron-right" class="w-3 h-3 text-muted-foreground folder-chevron transition-transform"></i>' : ''}
|
||||||
|
</div>
|
||||||
|
${!isDir ? `
|
||||||
|
<div class="flex items-center gap-1">
|
||||||
|
<button class="p-1 text-muted-foreground hover:text-foreground hover:bg-muted rounded transition-colors"
|
||||||
|
onclick="event.stopPropagation(); editSkillFile('${escapeHtml(skillName)}', '${escapeHtml(filePath)}', '${location}')"
|
||||||
|
title="${t('common.edit')}">
|
||||||
|
<i data-lucide="edit-2" class="w-3.5 h-3.5"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
` : ''}
|
||||||
|
</div>
|
||||||
|
<div class="folder-contents hidden ml-4 mt-1 space-y-1"></div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}).join('');
|
||||||
|
|
||||||
|
contentsDiv.classList.remove('hidden');
|
||||||
|
if (typeof lucide !== 'undefined') lucide.createIcons();
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Failed to load folder contents:', err);
|
||||||
|
if (window.showToast) {
|
||||||
|
showToast(err.message || 'Failed to load folder', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -331,6 +331,15 @@
|
|||||||
<i data-lucide="history" class="nav-icon"></i>
|
<i data-lucide="history" class="nav-icon"></i>
|
||||||
<span class="nav-text flex-1" data-i18n="nav.history">History</span>
|
<span class="nav-text flex-1" data-i18n="nav.history">History</span>
|
||||||
</li>
|
</li>
|
||||||
|
<li class="nav-item flex items-center gap-2 px-3 py-2.5 text-sm text-muted-foreground hover:bg-hover hover:text-foreground rounded cursor-pointer transition-colors" data-view="codexlens-manager" data-tooltip="CodexLens Manager">
|
||||||
|
<i data-lucide="search-code" class="nav-icon"></i>
|
||||||
|
<span class="nav-text flex-1" data-i18n="nav.codexLensManager">CodexLens</span>
|
||||||
|
<span class="badge px-2 py-0.5 text-xs font-semibold rounded-full bg-hover text-muted-foreground" id="badgeCodexLens">-</span>
|
||||||
|
</li>
|
||||||
|
<li class="nav-item flex items-center gap-2 px-3 py-2.5 text-sm text-muted-foreground hover:bg-hover hover:text-foreground rounded cursor-pointer transition-colors" data-view="api-settings" data-tooltip="API Settings">
|
||||||
|
<i data-lucide="settings" class="nav-icon"></i>
|
||||||
|
<span class="nav-text flex-1" data-i18n="nav.apiSettings">API Settings</span>
|
||||||
|
</li>
|
||||||
<!-- Hidden: Code Graph Explorer (feature disabled)
|
<!-- Hidden: Code Graph Explorer (feature disabled)
|
||||||
<li class="nav-item flex items-center gap-2 px-3 py-2.5 text-sm text-muted-foreground hover:bg-hover hover:text-foreground rounded cursor-pointer transition-colors" data-view="graph-explorer" data-tooltip="Code Graph Explorer">
|
<li class="nav-item flex items-center gap-2 px-3 py-2.5 text-sm text-muted-foreground hover:bg-hover hover:text-foreground rounded cursor-pointer transition-colors" data-view="graph-explorer" data-tooltip="Code Graph Explorer">
|
||||||
<i data-lucide="git-branch" class="nav-icon"></i>
|
<i data-lucide="git-branch" class="nav-icon"></i>
|
||||||
|
|||||||
371
ccw/src/tools/claude-cli-tools.ts
Normal file
371
ccw/src/tools/claude-cli-tools.ts
Normal file
@@ -0,0 +1,371 @@
|
|||||||
|
/**
|
||||||
|
* Claude CLI Tools Configuration Manager
|
||||||
|
* Manages .claude/cli-tools.json with fallback:
|
||||||
|
* 1. Project workspace: {projectDir}/.claude/cli-tools.json (priority)
|
||||||
|
* 2. Global: ~/.claude/cli-tools.json (fallback)
|
||||||
|
*/
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
|
||||||
|
// ========== Types ==========
|
||||||
|
|
||||||
|
export interface ClaudeCliTool {
|
||||||
|
enabled: boolean;
|
||||||
|
isBuiltin: boolean;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ClaudeCacheSettings {
|
||||||
|
injectionMode: 'auto' | 'manual' | 'disabled';
|
||||||
|
defaultPrefix: string;
|
||||||
|
defaultSuffix: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ClaudeCliToolsConfig {
|
||||||
|
$schema?: string;
|
||||||
|
version: string;
|
||||||
|
tools: Record<string, ClaudeCliTool>;
|
||||||
|
customEndpoints: Array<{
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
enabled: boolean;
|
||||||
|
}>;
|
||||||
|
defaultTool: string;
|
||||||
|
settings: {
|
||||||
|
promptFormat: 'plain' | 'yaml' | 'json';
|
||||||
|
smartContext: {
|
||||||
|
enabled: boolean;
|
||||||
|
maxFiles: number;
|
||||||
|
};
|
||||||
|
nativeResume: boolean;
|
||||||
|
recursiveQuery: boolean;
|
||||||
|
cache: ClaudeCacheSettings;
|
||||||
|
codeIndexMcp: 'codexlens' | 'ace'; // Code Index MCP provider
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// ========== Default Config ==========
|
||||||
|
|
||||||
|
const DEFAULT_CONFIG: ClaudeCliToolsConfig = {
|
||||||
|
version: '1.0.0',
|
||||||
|
tools: {
|
||||||
|
gemini: {
|
||||||
|
enabled: true,
|
||||||
|
isBuiltin: true,
|
||||||
|
command: 'gemini',
|
||||||
|
description: 'Google AI for code analysis'
|
||||||
|
},
|
||||||
|
qwen: {
|
||||||
|
enabled: true,
|
||||||
|
isBuiltin: true,
|
||||||
|
command: 'qwen',
|
||||||
|
description: 'Alibaba AI assistant'
|
||||||
|
},
|
||||||
|
codex: {
|
||||||
|
enabled: true,
|
||||||
|
isBuiltin: true,
|
||||||
|
command: 'codex',
|
||||||
|
description: 'OpenAI code generation'
|
||||||
|
},
|
||||||
|
claude: {
|
||||||
|
enabled: true,
|
||||||
|
isBuiltin: true,
|
||||||
|
command: 'claude',
|
||||||
|
description: 'Anthropic AI assistant'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
customEndpoints: [],
|
||||||
|
defaultTool: 'gemini',
|
||||||
|
settings: {
|
||||||
|
promptFormat: 'plain',
|
||||||
|
smartContext: {
|
||||||
|
enabled: false,
|
||||||
|
maxFiles: 10
|
||||||
|
},
|
||||||
|
nativeResume: true,
|
||||||
|
recursiveQuery: true,
|
||||||
|
cache: {
|
||||||
|
injectionMode: 'auto',
|
||||||
|
defaultPrefix: '',
|
||||||
|
defaultSuffix: ''
|
||||||
|
},
|
||||||
|
codeIndexMcp: 'codexlens' // Default to CodexLens
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// ========== Helper Functions ==========
|
||||||
|
|
||||||
|
function getProjectConfigPath(projectDir: string): string {
|
||||||
|
return path.join(projectDir, '.claude', 'cli-tools.json');
|
||||||
|
}
|
||||||
|
|
||||||
|
function getGlobalConfigPath(): string {
|
||||||
|
return path.join(os.homedir(), '.claude', 'cli-tools.json');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve config path with fallback:
|
||||||
|
* 1. Project: {projectDir}/.claude/cli-tools.json
|
||||||
|
* 2. Global: ~/.claude/cli-tools.json
|
||||||
|
* Returns { path, source } where source is 'project' | 'global' | 'default'
|
||||||
|
*/
|
||||||
|
function resolveConfigPath(projectDir: string): { path: string; source: 'project' | 'global' | 'default' } {
|
||||||
|
const projectPath = getProjectConfigPath(projectDir);
|
||||||
|
if (fs.existsSync(projectPath)) {
|
||||||
|
return { path: projectPath, source: 'project' };
|
||||||
|
}
|
||||||
|
|
||||||
|
const globalPath = getGlobalConfigPath();
|
||||||
|
if (fs.existsSync(globalPath)) {
|
||||||
|
return { path: globalPath, source: 'global' };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { path: projectPath, source: 'default' };
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureClaudeDir(projectDir: string): void {
|
||||||
|
const claudeDir = path.join(projectDir, '.claude');
|
||||||
|
if (!fs.existsSync(claudeDir)) {
|
||||||
|
fs.mkdirSync(claudeDir, { recursive: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ========== Main Functions ==========
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load CLI tools configuration with fallback:
|
||||||
|
* 1. Project: {projectDir}/.claude/cli-tools.json
|
||||||
|
* 2. Global: ~/.claude/cli-tools.json
|
||||||
|
* 3. Default config
|
||||||
|
*/
|
||||||
|
export function loadClaudeCliTools(projectDir: string): ClaudeCliToolsConfig & { _source?: string } {
|
||||||
|
const resolved = resolveConfigPath(projectDir);
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (resolved.source === 'default') {
|
||||||
|
// No config file found, return defaults
|
||||||
|
return { ...DEFAULT_CONFIG, _source: 'default' };
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = fs.readFileSync(resolved.path, 'utf-8');
|
||||||
|
const parsed = JSON.parse(content) as Partial<ClaudeCliToolsConfig>;
|
||||||
|
|
||||||
|
// Merge with defaults
|
||||||
|
const config = {
|
||||||
|
...DEFAULT_CONFIG,
|
||||||
|
...parsed,
|
||||||
|
tools: { ...DEFAULT_CONFIG.tools, ...(parsed.tools || {}) },
|
||||||
|
settings: {
|
||||||
|
...DEFAULT_CONFIG.settings,
|
||||||
|
...(parsed.settings || {}),
|
||||||
|
smartContext: {
|
||||||
|
...DEFAULT_CONFIG.settings.smartContext,
|
||||||
|
...(parsed.settings?.smartContext || {})
|
||||||
|
},
|
||||||
|
cache: {
|
||||||
|
...DEFAULT_CONFIG.settings.cache,
|
||||||
|
...(parsed.settings?.cache || {})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_source: resolved.source
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log(`[claude-cli-tools] Loaded config from ${resolved.source}: ${resolved.path}`);
|
||||||
|
return config;
|
||||||
|
} catch (err) {
|
||||||
|
console.error('[claude-cli-tools] Error loading config:', err);
|
||||||
|
return { ...DEFAULT_CONFIG, _source: 'default' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save CLI tools configuration to project .claude/cli-tools.json
|
||||||
|
* Always saves to project directory (not global)
|
||||||
|
*/
|
||||||
|
export function saveClaudeCliTools(projectDir: string, config: ClaudeCliToolsConfig & { _source?: string }): void {
|
||||||
|
ensureClaudeDir(projectDir);
|
||||||
|
const configPath = getProjectConfigPath(projectDir);
|
||||||
|
|
||||||
|
// Remove internal _source field before saving
|
||||||
|
const { _source, ...configToSave } = config;
|
||||||
|
|
||||||
|
try {
|
||||||
|
fs.writeFileSync(configPath, JSON.stringify(configToSave, null, 2), 'utf-8');
|
||||||
|
console.log(`[claude-cli-tools] Saved config to project: ${configPath}`);
|
||||||
|
} catch (err) {
|
||||||
|
console.error('[claude-cli-tools] Error saving config:', err);
|
||||||
|
throw new Error(`Failed to save CLI tools config: ${err}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update enabled status for a specific tool
|
||||||
|
*/
|
||||||
|
export function updateClaudeToolEnabled(
|
||||||
|
projectDir: string,
|
||||||
|
toolName: string,
|
||||||
|
enabled: boolean
|
||||||
|
): ClaudeCliToolsConfig {
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
|
||||||
|
if (config.tools[toolName]) {
|
||||||
|
config.tools[toolName].enabled = enabled;
|
||||||
|
saveClaudeCliTools(projectDir, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update cache settings
|
||||||
|
*/
|
||||||
|
export function updateClaudeCacheSettings(
|
||||||
|
projectDir: string,
|
||||||
|
cacheSettings: Partial<ClaudeCacheSettings>
|
||||||
|
): ClaudeCliToolsConfig {
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
|
||||||
|
config.settings.cache = {
|
||||||
|
...config.settings.cache,
|
||||||
|
...cacheSettings
|
||||||
|
};
|
||||||
|
|
||||||
|
saveClaudeCliTools(projectDir, config);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update default tool
|
||||||
|
*/
|
||||||
|
export function updateClaudeDefaultTool(
|
||||||
|
projectDir: string,
|
||||||
|
defaultTool: string
|
||||||
|
): ClaudeCliToolsConfig {
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
config.defaultTool = defaultTool;
|
||||||
|
saveClaudeCliTools(projectDir, config);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add custom endpoint
|
||||||
|
*/
|
||||||
|
export function addClaudeCustomEndpoint(
|
||||||
|
projectDir: string,
|
||||||
|
endpoint: { id: string; name: string; enabled: boolean }
|
||||||
|
): ClaudeCliToolsConfig {
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
|
||||||
|
// Check if endpoint already exists
|
||||||
|
const existingIndex = config.customEndpoints.findIndex(e => e.id === endpoint.id);
|
||||||
|
if (existingIndex >= 0) {
|
||||||
|
config.customEndpoints[existingIndex] = endpoint;
|
||||||
|
} else {
|
||||||
|
config.customEndpoints.push(endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
saveClaudeCliTools(projectDir, config);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove custom endpoint
|
||||||
|
*/
|
||||||
|
export function removeClaudeCustomEndpoint(
|
||||||
|
projectDir: string,
|
||||||
|
endpointId: string
|
||||||
|
): ClaudeCliToolsConfig {
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
config.customEndpoints = config.customEndpoints.filter(e => e.id !== endpointId);
|
||||||
|
saveClaudeCliTools(projectDir, config);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get config source info
|
||||||
|
*/
|
||||||
|
export function getClaudeCliToolsInfo(projectDir: string): {
|
||||||
|
projectPath: string;
|
||||||
|
globalPath: string;
|
||||||
|
activePath: string;
|
||||||
|
source: 'project' | 'global' | 'default';
|
||||||
|
} {
|
||||||
|
const resolved = resolveConfigPath(projectDir);
|
||||||
|
return {
|
||||||
|
projectPath: getProjectConfigPath(projectDir),
|
||||||
|
globalPath: getGlobalConfigPath(),
|
||||||
|
activePath: resolved.path,
|
||||||
|
source: resolved.source
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update Code Index MCP provider and switch CLAUDE.md reference
|
||||||
|
* Strategy: Only modify global user-level CLAUDE.md (~/.claude/CLAUDE.md)
|
||||||
|
* This is consistent with Chinese response and Windows platform settings
|
||||||
|
*/
|
||||||
|
export function updateCodeIndexMcp(
|
||||||
|
projectDir: string,
|
||||||
|
provider: 'codexlens' | 'ace'
|
||||||
|
): { success: boolean; error?: string; config?: ClaudeCliToolsConfig } {
|
||||||
|
try {
|
||||||
|
// Update config
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
config.settings.codeIndexMcp = provider;
|
||||||
|
saveClaudeCliTools(projectDir, config);
|
||||||
|
|
||||||
|
// Only update global CLAUDE.md (consistent with Chinese response / Windows platform)
|
||||||
|
const globalClaudeMdPath = path.join(os.homedir(), '.claude', 'CLAUDE.md');
|
||||||
|
|
||||||
|
if (!fs.existsSync(globalClaudeMdPath)) {
|
||||||
|
// If global CLAUDE.md doesn't exist, check project-level
|
||||||
|
const projectClaudeMdPath = path.join(projectDir, '.claude', 'CLAUDE.md');
|
||||||
|
if (fs.existsSync(projectClaudeMdPath)) {
|
||||||
|
let content = fs.readFileSync(projectClaudeMdPath, 'utf-8');
|
||||||
|
|
||||||
|
// Define patterns for both formats
|
||||||
|
const codexlensPattern = /@~\/\.claude\/workflows\/context-tools\.md/g;
|
||||||
|
const acePattern = /@~\/\.claude\/workflows\/context-tools-ace\.md/g;
|
||||||
|
|
||||||
|
if (provider === 'ace') {
|
||||||
|
content = content.replace(codexlensPattern, '@~/.claude/workflows/context-tools-ace.md');
|
||||||
|
} else {
|
||||||
|
content = content.replace(acePattern, '@~/.claude/workflows/context-tools.md');
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.writeFileSync(projectClaudeMdPath, content, 'utf-8');
|
||||||
|
console.log(`[claude-cli-tools] Updated project CLAUDE.md to use ${provider} (no global CLAUDE.md found)`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Update global CLAUDE.md (primary target)
|
||||||
|
let content = fs.readFileSync(globalClaudeMdPath, 'utf-8');
|
||||||
|
|
||||||
|
const codexlensPattern = /@~\/\.claude\/workflows\/context-tools\.md/g;
|
||||||
|
const acePattern = /@~\/\.claude\/workflows\/context-tools-ace\.md/g;
|
||||||
|
|
||||||
|
if (provider === 'ace') {
|
||||||
|
content = content.replace(codexlensPattern, '@~/.claude/workflows/context-tools-ace.md');
|
||||||
|
} else {
|
||||||
|
content = content.replace(acePattern, '@~/.claude/workflows/context-tools.md');
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.writeFileSync(globalClaudeMdPath, content, 'utf-8');
|
||||||
|
console.log(`[claude-cli-tools] Updated global CLAUDE.md to use ${provider}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { success: true, config };
|
||||||
|
} catch (err) {
|
||||||
|
console.error('[claude-cli-tools] Error updating Code Index MCP:', err);
|
||||||
|
return { success: false, error: (err as Error).message };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current Code Index MCP provider
|
||||||
|
*/
|
||||||
|
export function getCodeIndexMcp(projectDir: string): 'codexlens' | 'ace' {
|
||||||
|
const config = loadClaudeCliTools(projectDir);
|
||||||
|
return config.settings.codeIndexMcp || 'codexlens';
|
||||||
|
}
|
||||||
@@ -5,10 +5,15 @@
|
|||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from 'zod';
|
||||||
import type { ToolSchema, ToolResult } from '../types/tool.js';
|
import type { ToolSchema, ToolResult } from '../types/tool.js';
|
||||||
|
import type { HistoryIndexEntry } from './cli-history-store.js';
|
||||||
import { spawn, ChildProcess } from 'child_process';
|
import { spawn, ChildProcess } from 'child_process';
|
||||||
import { existsSync, mkdirSync, readFileSync, writeFileSync, unlinkSync, readdirSync, statSync } from 'fs';
|
import { existsSync, mkdirSync, readFileSync, writeFileSync, unlinkSync, readdirSync, statSync } from 'fs';
|
||||||
import { join, relative } from 'path';
|
import { join, relative } from 'path';
|
||||||
|
|
||||||
|
// LiteLLM integration
|
||||||
|
import { executeLiteLLMEndpoint } from './litellm-executor.js';
|
||||||
|
import { findEndpointById } from '../config/litellm-api-config-manager.js';
|
||||||
|
|
||||||
// Native resume support
|
// Native resume support
|
||||||
import {
|
import {
|
||||||
trackNewSession,
|
trackNewSession,
|
||||||
@@ -62,7 +67,7 @@ const ParamsSchema = z.object({
|
|||||||
model: z.string().optional(),
|
model: z.string().optional(),
|
||||||
cd: z.string().optional(),
|
cd: z.string().optional(),
|
||||||
includeDirs: z.string().optional(),
|
includeDirs: z.string().optional(),
|
||||||
timeout: z.number().default(300000),
|
timeout: z.number().default(0), // 0 = no internal timeout, controlled by external caller (e.g., bash timeout)
|
||||||
resume: z.union([z.boolean(), z.string()]).optional(), // true = last, string = single ID or comma-separated IDs
|
resume: z.union([z.boolean(), z.string()]).optional(), // true = last, string = single ID or comma-separated IDs
|
||||||
id: z.string().optional(), // Custom execution ID (e.g., IMPL-001-step1)
|
id: z.string().optional(), // Custom execution ID (e.g., IMPL-001-step1)
|
||||||
noNative: z.boolean().optional(), // Force prompt concatenation instead of native resume
|
noNative: z.boolean().optional(), // Force prompt concatenation instead of native resume
|
||||||
@@ -332,9 +337,8 @@ function buildCommand(params: {
|
|||||||
args.push(nativeResume.sessionId);
|
args.push(nativeResume.sessionId);
|
||||||
}
|
}
|
||||||
// Codex resume still supports additional flags
|
// Codex resume still supports additional flags
|
||||||
if (dir) {
|
// Note: -C is NOT used because spawn's cwd already sets the working directory
|
||||||
args.push('-C', dir);
|
// Using both would cause path to be applied twice (e.g., codex-lens/codex-lens)
|
||||||
}
|
|
||||||
// Permission configuration based on mode:
|
// Permission configuration based on mode:
|
||||||
// - analysis: --full-auto (read-only sandbox, no prompts) - safer for read operations
|
// - analysis: --full-auto (read-only sandbox, no prompts) - safer for read operations
|
||||||
// - write/auto: --dangerously-bypass-approvals-and-sandbox (full access for modifications)
|
// - write/auto: --dangerously-bypass-approvals-and-sandbox (full access for modifications)
|
||||||
@@ -357,9 +361,8 @@ function buildCommand(params: {
|
|||||||
} else {
|
} else {
|
||||||
// Standard exec mode
|
// Standard exec mode
|
||||||
args.push('exec');
|
args.push('exec');
|
||||||
if (dir) {
|
// Note: -C is NOT used because spawn's cwd already sets the working directory
|
||||||
args.push('-C', dir);
|
// Using both would cause path to be applied twice (e.g., codex-lens/codex-lens)
|
||||||
}
|
|
||||||
// Permission configuration based on mode:
|
// Permission configuration based on mode:
|
||||||
// - analysis: --full-auto (read-only sandbox, no prompts) - safer for read operations
|
// - analysis: --full-auto (read-only sandbox, no prompts) - safer for read operations
|
||||||
// - write/auto: --dangerously-bypass-approvals-and-sandbox (full access for modifications)
|
// - write/auto: --dangerously-bypass-approvals-and-sandbox (full access for modifications)
|
||||||
@@ -591,6 +594,66 @@ async function executeCliTool(
|
|||||||
const workingDir = cd || process.cwd();
|
const workingDir = cd || process.cwd();
|
||||||
ensureHistoryDir(workingDir); // Ensure history directory exists
|
ensureHistoryDir(workingDir); // Ensure history directory exists
|
||||||
|
|
||||||
|
// NEW: Check if model is a custom LiteLLM endpoint ID
|
||||||
|
if (model && !['gemini', 'qwen', 'codex'].includes(tool)) {
|
||||||
|
const endpoint = findEndpointById(workingDir, model);
|
||||||
|
if (endpoint) {
|
||||||
|
// Route to LiteLLM executor
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({ type: 'stderr', data: `[Routing to LiteLLM endpoint: ${model}]\n` });
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await executeLiteLLMEndpoint({
|
||||||
|
prompt,
|
||||||
|
endpointId: model,
|
||||||
|
baseDir: workingDir,
|
||||||
|
cwd: cd,
|
||||||
|
includeDirs: includeDirs ? includeDirs.split(',').map(d => d.trim()) : undefined,
|
||||||
|
enableCache: true,
|
||||||
|
onOutput: onOutput || undefined,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Convert LiteLLM result to ExecutionOutput format
|
||||||
|
const startTime = Date.now();
|
||||||
|
const endTime = Date.now();
|
||||||
|
const duration = endTime - startTime;
|
||||||
|
|
||||||
|
const execution: ExecutionRecord = {
|
||||||
|
id: customId || `${Date.now()}-litellm`,
|
||||||
|
timestamp: new Date(startTime).toISOString(),
|
||||||
|
tool: 'litellm',
|
||||||
|
model: result.model,
|
||||||
|
mode,
|
||||||
|
prompt,
|
||||||
|
status: result.success ? 'success' : 'error',
|
||||||
|
exit_code: result.success ? 0 : 1,
|
||||||
|
duration_ms: duration,
|
||||||
|
output: {
|
||||||
|
stdout: result.output,
|
||||||
|
stderr: result.error || '',
|
||||||
|
truncated: false,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const conversation = convertToConversation(execution);
|
||||||
|
|
||||||
|
// Try to save to history
|
||||||
|
try {
|
||||||
|
saveConversation(workingDir, conversation);
|
||||||
|
} catch (err) {
|
||||||
|
console.error('[CLI Executor] Failed to save LiteLLM history:', (err as Error).message);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: result.success,
|
||||||
|
execution,
|
||||||
|
conversation,
|
||||||
|
stdout: result.output,
|
||||||
|
stderr: result.error || '',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get SQLite store for native session lookup
|
// Get SQLite store for native session lookup
|
||||||
const store = await getSqliteStore(workingDir);
|
const store = await getSqliteStore(workingDir);
|
||||||
|
|
||||||
@@ -993,19 +1056,24 @@ async function executeCliTool(
|
|||||||
reject(new Error(`Failed to spawn ${tool}: ${error.message}`));
|
reject(new Error(`Failed to spawn ${tool}: ${error.message}`));
|
||||||
});
|
});
|
||||||
|
|
||||||
// Timeout handling
|
// Timeout handling (timeout=0 disables internal timeout, controlled by external caller)
|
||||||
const timeoutId = setTimeout(() => {
|
let timeoutId: NodeJS.Timeout | null = null;
|
||||||
timedOut = true;
|
if (timeout > 0) {
|
||||||
child.kill('SIGTERM');
|
timeoutId = setTimeout(() => {
|
||||||
setTimeout(() => {
|
timedOut = true;
|
||||||
if (!child.killed) {
|
child.kill('SIGTERM');
|
||||||
child.kill('SIGKILL');
|
setTimeout(() => {
|
||||||
}
|
if (!child.killed) {
|
||||||
}, 5000);
|
child.kill('SIGKILL');
|
||||||
}, timeout);
|
}
|
||||||
|
}, 5000);
|
||||||
|
}, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
child.on('close', () => {
|
child.on('close', () => {
|
||||||
clearTimeout(timeoutId);
|
if (timeoutId) {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -1050,8 +1118,8 @@ Modes:
|
|||||||
},
|
},
|
||||||
timeout: {
|
timeout: {
|
||||||
type: 'number',
|
type: 'number',
|
||||||
description: 'Timeout in milliseconds (default: 300000 = 5 minutes)',
|
description: 'Timeout in milliseconds (default: 0 = disabled, controlled by external caller)',
|
||||||
default: 300000
|
default: 0
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
required: ['tool', 'prompt']
|
required: ['tool', 'prompt']
|
||||||
@@ -1982,6 +2050,7 @@ export async function getEnrichedConversation(baseDir: string, ccwId: string) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get history with native session info
|
* Get history with native session info
|
||||||
|
* Supports recursive querying of child projects
|
||||||
*/
|
*/
|
||||||
export async function getHistoryWithNativeInfo(baseDir: string, options?: {
|
export async function getHistoryWithNativeInfo(baseDir: string, options?: {
|
||||||
limit?: number;
|
limit?: number;
|
||||||
@@ -1990,9 +2059,75 @@ export async function getHistoryWithNativeInfo(baseDir: string, options?: {
|
|||||||
status?: string | null;
|
status?: string | null;
|
||||||
category?: ExecutionCategory | null;
|
category?: ExecutionCategory | null;
|
||||||
search?: string | null;
|
search?: string | null;
|
||||||
|
recursive?: boolean;
|
||||||
}) {
|
}) {
|
||||||
const store = await getSqliteStore(baseDir);
|
const { limit = 50, recursive = false, ...queryOptions } = options || {};
|
||||||
return store.getHistoryWithNativeInfo(options || {});
|
|
||||||
|
// Non-recursive mode: query single project
|
||||||
|
if (!recursive) {
|
||||||
|
const store = await getSqliteStore(baseDir);
|
||||||
|
return store.getHistoryWithNativeInfo({ limit, ...queryOptions });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursive mode: aggregate data from parent and all child projects
|
||||||
|
const { scanChildProjectsAsync } = await import('../config/storage-paths.js');
|
||||||
|
const childProjects = await scanChildProjectsAsync(baseDir);
|
||||||
|
|
||||||
|
// Use the same type as store.getHistoryWithNativeInfo returns
|
||||||
|
type ExecutionWithNativeAndSource = HistoryIndexEntry & {
|
||||||
|
hasNativeSession: boolean;
|
||||||
|
nativeSessionId?: string;
|
||||||
|
nativeSessionPath?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
const allExecutions: ExecutionWithNativeAndSource[] = [];
|
||||||
|
let totalCount = 0;
|
||||||
|
|
||||||
|
// Query parent project
|
||||||
|
try {
|
||||||
|
const parentStore = await getSqliteStore(baseDir);
|
||||||
|
const parentResult = parentStore.getHistoryWithNativeInfo({ limit, ...queryOptions });
|
||||||
|
totalCount += parentResult.total;
|
||||||
|
|
||||||
|
for (const exec of parentResult.executions) {
|
||||||
|
allExecutions.push({ ...exec, sourceDir: baseDir });
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
if (process.env.DEBUG) {
|
||||||
|
console.error(`[CLI History] Failed to query parent project ${baseDir}:`, error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query all child projects
|
||||||
|
for (const child of childProjects) {
|
||||||
|
try {
|
||||||
|
const childStore = await getSqliteStore(child.projectPath);
|
||||||
|
const childResult = childStore.getHistoryWithNativeInfo({ limit, ...queryOptions });
|
||||||
|
totalCount += childResult.total;
|
||||||
|
|
||||||
|
for (const exec of childResult.executions) {
|
||||||
|
allExecutions.push({ ...exec, sourceDir: child.projectPath });
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
if (process.env.DEBUG) {
|
||||||
|
console.error(`[CLI History] Failed to query child project ${child.projectPath}:`, error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by updated_at descending and apply limit
|
||||||
|
allExecutions.sort((a, b) => {
|
||||||
|
const timeA = a.updated_at ? new Date(a.updated_at).getTime() : new Date(a.timestamp).getTime();
|
||||||
|
const timeB = b.updated_at ? new Date(b.updated_at).getTime() : new Date(b.timestamp).getTime();
|
||||||
|
return timeB - timeA;
|
||||||
|
});
|
||||||
|
const limitedExecutions = allExecutions.slice(0, limit);
|
||||||
|
|
||||||
|
return {
|
||||||
|
total: totalCount,
|
||||||
|
count: limitedExecutions.length,
|
||||||
|
executions: limitedExecutions
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export types
|
// Export types
|
||||||
|
|||||||
@@ -33,6 +33,14 @@ const VENV_PYTHON =
|
|||||||
let bootstrapChecked = false;
|
let bootstrapChecked = false;
|
||||||
let bootstrapReady = false;
|
let bootstrapReady = false;
|
||||||
|
|
||||||
|
// Venv status cache with TTL
|
||||||
|
interface VenvStatusCache {
|
||||||
|
status: ReadyStatus;
|
||||||
|
timestamp: number;
|
||||||
|
}
|
||||||
|
let venvStatusCache: VenvStatusCache | null = null;
|
||||||
|
const VENV_STATUS_TTL = 5 * 60 * 1000; // 5 minutes TTL
|
||||||
|
|
||||||
// Track running indexing process for cancellation
|
// Track running indexing process for cancellation
|
||||||
let currentIndexingProcess: ReturnType<typeof spawn> | null = null;
|
let currentIndexingProcess: ReturnType<typeof spawn> | null = null;
|
||||||
let currentIndexingAborted = false;
|
let currentIndexingAborted = false;
|
||||||
@@ -75,6 +83,9 @@ interface ReadyStatus {
|
|||||||
interface SemanticStatus {
|
interface SemanticStatus {
|
||||||
available: boolean;
|
available: boolean;
|
||||||
backend?: string;
|
backend?: string;
|
||||||
|
accelerator?: string;
|
||||||
|
providers?: string[];
|
||||||
|
litellmAvailable?: boolean;
|
||||||
error?: string;
|
error?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,6 +124,13 @@ interface ProgressInfo {
|
|||||||
totalFiles?: number;
|
totalFiles?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear venv status cache (call after install/uninstall operations)
|
||||||
|
*/
|
||||||
|
function clearVenvStatusCache(): void {
|
||||||
|
venvStatusCache = null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Detect available Python 3 executable
|
* Detect available Python 3 executable
|
||||||
* @returns Python executable command
|
* @returns Python executable command
|
||||||
@@ -135,17 +153,27 @@ function getSystemPython(): string {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if CodexLens venv exists and has required packages
|
* Check if CodexLens venv exists and has required packages
|
||||||
|
* @param force - Force refresh cache (default: false)
|
||||||
* @returns Ready status
|
* @returns Ready status
|
||||||
*/
|
*/
|
||||||
async function checkVenvStatus(): Promise<ReadyStatus> {
|
async function checkVenvStatus(force = false): Promise<ReadyStatus> {
|
||||||
|
// Use cached result if available and not expired
|
||||||
|
if (!force && venvStatusCache && (Date.now() - venvStatusCache.timestamp < VENV_STATUS_TTL)) {
|
||||||
|
return venvStatusCache.status;
|
||||||
|
}
|
||||||
|
|
||||||
// Check venv exists
|
// Check venv exists
|
||||||
if (!existsSync(CODEXLENS_VENV)) {
|
if (!existsSync(CODEXLENS_VENV)) {
|
||||||
return { ready: false, error: 'Venv not found' };
|
const result = { ready: false, error: 'Venv not found' };
|
||||||
|
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check python executable exists
|
// Check python executable exists
|
||||||
if (!existsSync(VENV_PYTHON)) {
|
if (!existsSync(VENV_PYTHON)) {
|
||||||
return { ready: false, error: 'Python executable not found in venv' };
|
const result = { ready: false, error: 'Python executable not found in venv' };
|
||||||
|
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check codexlens is importable
|
// Check codexlens is importable
|
||||||
@@ -166,15 +194,21 @@ async function checkVenvStatus(): Promise<ReadyStatus> {
|
|||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
|
let result: ReadyStatus;
|
||||||
if (code === 0) {
|
if (code === 0) {
|
||||||
resolve({ ready: true, version: stdout.trim() });
|
result = { ready: true, version: stdout.trim() };
|
||||||
} else {
|
} else {
|
||||||
resolve({ ready: false, error: `CodexLens not installed: ${stderr}` });
|
result = { ready: false, error: `CodexLens not installed: ${stderr}` };
|
||||||
}
|
}
|
||||||
|
// Cache the result
|
||||||
|
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||||
|
resolve(result);
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('error', (err) => {
|
child.on('error', (err) => {
|
||||||
resolve({ ready: false, error: `Failed to check venv: ${err.message}` });
|
const result = { ready: false, error: `Failed to check venv: ${err.message}` };
|
||||||
|
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||||
|
resolve(result);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -190,18 +224,46 @@ async function checkSemanticStatus(): Promise<SemanticStatus> {
|
|||||||
return { available: false, error: 'CodexLens not installed' };
|
return { available: false, error: 'CodexLens not installed' };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check semantic module availability
|
// Check semantic module availability and accelerator info
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const checkCode = `
|
const checkCode = `
|
||||||
import sys
|
import sys
|
||||||
|
import json
|
||||||
try:
|
try:
|
||||||
from codexlens.semantic import SEMANTIC_AVAILABLE, SEMANTIC_BACKEND
|
import codexlens.semantic as semantic
|
||||||
if SEMANTIC_AVAILABLE:
|
SEMANTIC_AVAILABLE = bool(getattr(semantic, "SEMANTIC_AVAILABLE", False))
|
||||||
print(f"available:{SEMANTIC_BACKEND}")
|
SEMANTIC_BACKEND = getattr(semantic, "SEMANTIC_BACKEND", None)
|
||||||
else:
|
LITELLM_AVAILABLE = bool(getattr(semantic, "LITELLM_AVAILABLE", False))
|
||||||
print("unavailable")
|
result = {
|
||||||
|
"available": SEMANTIC_AVAILABLE,
|
||||||
|
"backend": SEMANTIC_BACKEND if SEMANTIC_AVAILABLE else None,
|
||||||
|
"litellm_available": LITELLM_AVAILABLE,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get ONNX providers for accelerator info
|
||||||
|
try:
|
||||||
|
import onnxruntime
|
||||||
|
providers = onnxruntime.get_available_providers()
|
||||||
|
result["providers"] = providers
|
||||||
|
|
||||||
|
# Determine accelerator type
|
||||||
|
if "CUDAExecutionProvider" in providers or "TensorrtExecutionProvider" in providers:
|
||||||
|
result["accelerator"] = "CUDA"
|
||||||
|
elif "DmlExecutionProvider" in providers:
|
||||||
|
result["accelerator"] = "DirectML"
|
||||||
|
elif "CoreMLExecutionProvider" in providers:
|
||||||
|
result["accelerator"] = "CoreML"
|
||||||
|
elif "ROCMExecutionProvider" in providers:
|
||||||
|
result["accelerator"] = "ROCm"
|
||||||
|
else:
|
||||||
|
result["accelerator"] = "CPU"
|
||||||
|
except:
|
||||||
|
result["providers"] = []
|
||||||
|
result["accelerator"] = "CPU"
|
||||||
|
|
||||||
|
print(json.dumps(result))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"error:{e}")
|
print(json.dumps({"available": False, "error": str(e)}))
|
||||||
`;
|
`;
|
||||||
const child = spawn(VENV_PYTHON, ['-c', checkCode], {
|
const child = spawn(VENV_PYTHON, ['-c', checkCode], {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
@@ -220,12 +282,17 @@ except Exception as e:
|
|||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
const output = stdout.trim();
|
const output = stdout.trim();
|
||||||
if (output.startsWith('available:')) {
|
try {
|
||||||
const backend = output.split(':')[1];
|
const result = JSON.parse(output);
|
||||||
resolve({ available: true, backend });
|
resolve({
|
||||||
} else if (output === 'unavailable') {
|
available: result.available || false,
|
||||||
resolve({ available: false, error: 'Semantic dependencies not installed' });
|
backend: result.backend,
|
||||||
} else {
|
accelerator: result.accelerator || 'CPU',
|
||||||
|
providers: result.providers || [],
|
||||||
|
litellmAvailable: result.litellm_available || false,
|
||||||
|
error: result.error
|
||||||
|
});
|
||||||
|
} catch {
|
||||||
resolve({ available: false, error: output || stderr || 'Unknown error' });
|
resolve({ available: false, error: output || stderr || 'Unknown error' });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -237,10 +304,137 @@ except Exception as e:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Install semantic search dependencies (fastembed, ONNX-based, ~200MB)
|
* Ensure LiteLLM embedder dependencies are available in the CodexLens venv.
|
||||||
|
* Installs ccw-litellm into the venv if needed.
|
||||||
|
*/
|
||||||
|
async function ensureLiteLLMEmbedderReady(): Promise<BootstrapResult> {
|
||||||
|
// Ensure CodexLens venv exists and CodexLens is installed.
|
||||||
|
const readyStatus = await ensureReady();
|
||||||
|
if (!readyStatus.ready) {
|
||||||
|
return { success: false, error: readyStatus.error || 'CodexLens not ready' };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if ccw_litellm can be imported
|
||||||
|
const importStatus = await new Promise<{ ok: boolean; error?: string }>((resolve) => {
|
||||||
|
const child = spawn(VENV_PYTHON, ['-c', 'import ccw_litellm; print("OK")'], {
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
timeout: 15000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stderr = '';
|
||||||
|
child.stderr.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
resolve({ ok: code === 0, error: stderr.trim() || undefined });
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('error', (err) => {
|
||||||
|
resolve({ ok: false, error: err.message });
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
if (importStatus.ok) {
|
||||||
|
return { success: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
const pipPath =
|
||||||
|
process.platform === 'win32'
|
||||||
|
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
|
||||||
|
: join(CODEXLENS_VENV, 'bin', 'pip');
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('[CodexLens] Installing ccw-litellm for LiteLLM embedding backend...');
|
||||||
|
|
||||||
|
const possiblePaths = [
|
||||||
|
join(process.cwd(), 'ccw-litellm'),
|
||||||
|
join(__dirname, '..', '..', '..', 'ccw-litellm'), // ccw/src/tools -> project root
|
||||||
|
join(homedir(), 'ccw-litellm'),
|
||||||
|
];
|
||||||
|
|
||||||
|
let installed = false;
|
||||||
|
for (const localPath of possiblePaths) {
|
||||||
|
if (existsSync(join(localPath, 'pyproject.toml'))) {
|
||||||
|
console.log(`[CodexLens] Installing ccw-litellm from local path: ${localPath}`);
|
||||||
|
execSync(`"${pipPath}" install -e "${localPath}"`, { stdio: 'inherit' });
|
||||||
|
installed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!installed) {
|
||||||
|
console.log('[CodexLens] Installing ccw-litellm from PyPI...');
|
||||||
|
execSync(`"${pipPath}" install ccw-litellm`, { stdio: 'inherit' });
|
||||||
|
}
|
||||||
|
|
||||||
|
return { success: true };
|
||||||
|
} catch (err) {
|
||||||
|
return { success: false, error: `Failed to install ccw-litellm: ${(err as Error).message}` };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GPU acceleration mode for semantic search
|
||||||
|
*/
|
||||||
|
type GpuMode = 'cpu' | 'cuda' | 'directml';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detect available GPU acceleration
|
||||||
|
* @returns Detected GPU mode and info
|
||||||
|
*/
|
||||||
|
async function detectGpuSupport(): Promise<{ mode: GpuMode; available: GpuMode[]; info: string }> {
|
||||||
|
const available: GpuMode[] = ['cpu'];
|
||||||
|
let detectedInfo = 'CPU only';
|
||||||
|
|
||||||
|
// Check for NVIDIA GPU (CUDA)
|
||||||
|
try {
|
||||||
|
if (process.platform === 'win32') {
|
||||||
|
execSync('nvidia-smi', { stdio: 'pipe' });
|
||||||
|
available.push('cuda');
|
||||||
|
detectedInfo = 'NVIDIA GPU detected (CUDA available)';
|
||||||
|
} else {
|
||||||
|
execSync('which nvidia-smi', { stdio: 'pipe' });
|
||||||
|
available.push('cuda');
|
||||||
|
detectedInfo = 'NVIDIA GPU detected (CUDA available)';
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// NVIDIA not available
|
||||||
|
}
|
||||||
|
|
||||||
|
// On Windows, DirectML is always available if DirectX 12 is supported
|
||||||
|
if (process.platform === 'win32') {
|
||||||
|
try {
|
||||||
|
// Check for DirectX 12 support via dxdiag or registry
|
||||||
|
// DirectML works on most modern Windows 10/11 systems
|
||||||
|
available.push('directml');
|
||||||
|
if (available.includes('cuda')) {
|
||||||
|
detectedInfo = 'NVIDIA GPU detected (CUDA & DirectML available)';
|
||||||
|
} else {
|
||||||
|
detectedInfo = 'DirectML available (Windows GPU acceleration)';
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// DirectML check failed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recommend best available mode
|
||||||
|
let recommendedMode: GpuMode = 'cpu';
|
||||||
|
if (process.platform === 'win32' && available.includes('directml')) {
|
||||||
|
recommendedMode = 'directml'; // DirectML is easier on Windows (no CUDA toolkit needed)
|
||||||
|
} else if (available.includes('cuda')) {
|
||||||
|
recommendedMode = 'cuda';
|
||||||
|
}
|
||||||
|
|
||||||
|
return { mode: recommendedMode, available, info: detectedInfo };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Install semantic search dependencies with optional GPU acceleration
|
||||||
|
* @param gpuMode - GPU acceleration mode: 'cpu', 'cuda', or 'directml'
|
||||||
* @returns Bootstrap result
|
* @returns Bootstrap result
|
||||||
*/
|
*/
|
||||||
async function installSemantic(): Promise<BootstrapResult> {
|
async function installSemantic(gpuMode: GpuMode = 'cpu'): Promise<BootstrapResult> {
|
||||||
// First ensure CodexLens is installed
|
// First ensure CodexLens is installed
|
||||||
const venvStatus = await checkVenvStatus();
|
const venvStatus = await checkVenvStatus();
|
||||||
if (!venvStatus.ready) {
|
if (!venvStatus.ready) {
|
||||||
@@ -252,42 +446,117 @@ async function installSemantic(): Promise<BootstrapResult> {
|
|||||||
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
|
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
|
||||||
: join(CODEXLENS_VENV, 'bin', 'pip');
|
: join(CODEXLENS_VENV, 'bin', 'pip');
|
||||||
|
|
||||||
return new Promise((resolve) => {
|
// IMPORTANT: Uninstall all onnxruntime variants first to prevent conflicts
|
||||||
console.log('[CodexLens] Installing semantic search dependencies (fastembed)...');
|
// Having multiple onnxruntime packages causes provider detection issues
|
||||||
console.log('[CodexLens] Using ONNX-based fastembed backend (~200MB)');
|
const onnxVariants = ['onnxruntime', 'onnxruntime-gpu', 'onnxruntime-directml'];
|
||||||
|
console.log(`[CodexLens] Cleaning up existing ONNX Runtime packages...`);
|
||||||
|
|
||||||
const child = spawn(pipPath, ['install', 'numpy>=1.24', 'fastembed>=0.2'], {
|
for (const pkg of onnxVariants) {
|
||||||
|
try {
|
||||||
|
execSync(`"${pipPath}" uninstall ${pkg} -y`, { stdio: 'pipe' });
|
||||||
|
console.log(`[CodexLens] Removed ${pkg}`);
|
||||||
|
} catch {
|
||||||
|
// Package not installed, ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build package list based on GPU mode
|
||||||
|
const packages = ['numpy>=1.24', 'fastembed>=0.5', 'hnswlib>=0.8.0'];
|
||||||
|
|
||||||
|
let modeDescription = 'CPU (ONNX Runtime)';
|
||||||
|
let onnxPackage = 'onnxruntime>=1.18.0'; // Default CPU
|
||||||
|
|
||||||
|
if (gpuMode === 'cuda') {
|
||||||
|
onnxPackage = 'onnxruntime-gpu>=1.18.0';
|
||||||
|
modeDescription = 'NVIDIA CUDA GPU acceleration';
|
||||||
|
} else if (gpuMode === 'directml') {
|
||||||
|
onnxPackage = 'onnxruntime-directml>=1.18.0';
|
||||||
|
modeDescription = 'Windows DirectML GPU acceleration';
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
console.log(`[CodexLens] Installing semantic search dependencies...`);
|
||||||
|
console.log(`[CodexLens] Mode: ${modeDescription}`);
|
||||||
|
console.log(`[CodexLens] ONNX Runtime: ${onnxPackage}`);
|
||||||
|
console.log(`[CodexLens] Packages: ${packages.join(', ')}`);
|
||||||
|
|
||||||
|
// Install ONNX Runtime first with force-reinstall to ensure clean state
|
||||||
|
const installOnnx = spawn(pipPath, ['install', '--force-reinstall', onnxPackage], {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
timeout: 600000, // 10 minutes for potential model download
|
timeout: 600000, // 10 minutes for GPU packages
|
||||||
});
|
});
|
||||||
|
|
||||||
let stdout = '';
|
let onnxStdout = '';
|
||||||
let stderr = '';
|
let onnxStderr = '';
|
||||||
|
|
||||||
child.stdout.on('data', (data) => {
|
installOnnx.stdout.on('data', (data) => {
|
||||||
stdout += data.toString();
|
onnxStdout += data.toString();
|
||||||
// Log progress
|
|
||||||
const line = data.toString().trim();
|
const line = data.toString().trim();
|
||||||
if (line.includes('Downloading') || line.includes('Installing') || line.includes('Collecting')) {
|
if (line.includes('Downloading') || line.includes('Installing')) {
|
||||||
console.log(`[CodexLens] ${line}`);
|
console.log(`[CodexLens] ${line}`);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
child.stderr.on('data', (data) => {
|
installOnnx.stderr.on('data', (data) => {
|
||||||
stderr += data.toString();
|
onnxStderr += data.toString();
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
installOnnx.on('close', (onnxCode) => {
|
||||||
if (code === 0) {
|
if (onnxCode !== 0) {
|
||||||
console.log('[CodexLens] Semantic dependencies installed successfully');
|
resolve({ success: false, error: `Failed to install ${onnxPackage}: ${onnxStderr || onnxStdout}` });
|
||||||
resolve({ success: true });
|
return;
|
||||||
} else {
|
|
||||||
resolve({ success: false, error: `Installation failed: ${stderr || stdout}` });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
console.log(`[CodexLens] ${onnxPackage} installed successfully`);
|
||||||
|
|
||||||
|
// Now install remaining packages
|
||||||
|
const child = spawn(pipPath, ['install', ...packages], {
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
timeout: 600000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
|
||||||
|
child.stdout.on('data', (data) => {
|
||||||
|
stdout += data.toString();
|
||||||
|
const line = data.toString().trim();
|
||||||
|
if (line.includes('Downloading') || line.includes('Installing') || line.includes('Collecting')) {
|
||||||
|
console.log(`[CodexLens] ${line}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
if (code === 0) {
|
||||||
|
// IMPORTANT: fastembed installs onnxruntime (CPU) as dependency, which conflicts
|
||||||
|
// with onnxruntime-directml/gpu. Reinstall the GPU version to ensure it takes precedence.
|
||||||
|
if (gpuMode !== 'cpu') {
|
||||||
|
try {
|
||||||
|
console.log(`[CodexLens] Reinstalling ${onnxPackage} to ensure GPU provider works...`);
|
||||||
|
execSync(`"${pipPath}" install --force-reinstall ${onnxPackage}`, { stdio: 'pipe', timeout: 300000 });
|
||||||
|
console.log(`[CodexLens] ${onnxPackage} reinstalled successfully`);
|
||||||
|
} catch (e) {
|
||||||
|
console.warn(`[CodexLens] Warning: Failed to reinstall ${onnxPackage}: ${(e as Error).message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log(`[CodexLens] Semantic dependencies installed successfully (${gpuMode} mode)`);
|
||||||
|
resolve({ success: true, message: `Installed with ${modeDescription}` });
|
||||||
|
} else {
|
||||||
|
resolve({ success: false, error: `Installation failed: ${stderr || stdout}` });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('error', (err) => {
|
||||||
|
resolve({ success: false, error: `Failed to run pip: ${err.message}` });
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('error', (err) => {
|
installOnnx.on('error', (err) => {
|
||||||
resolve({ success: false, error: `Failed to run pip: ${err.message}` });
|
resolve({ success: false, error: `Failed to install ONNX Runtime: ${err.message}` });
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -343,6 +612,8 @@ async function bootstrapVenv(): Promise<BootstrapResult> {
|
|||||||
execSync(`"${pipPath}" install codexlens`, { stdio: 'inherit' });
|
execSync(`"${pipPath}" install codexlens`, { stdio: 'inherit' });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear cache after successful installation
|
||||||
|
clearVenvStatusCache();
|
||||||
return { success: true };
|
return { success: true };
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
return { success: false, error: `Failed to install codexlens: ${(err as Error).message}` };
|
return { success: false, error: `Failed to install codexlens: ${(err as Error).message}` };
|
||||||
@@ -1062,6 +1333,7 @@ async function uninstallCodexLens(): Promise<BootstrapResult> {
|
|||||||
// Reset bootstrap cache
|
// Reset bootstrap cache
|
||||||
bootstrapChecked = false;
|
bootstrapChecked = false;
|
||||||
bootstrapReady = false;
|
bootstrapReady = false;
|
||||||
|
clearVenvStatusCache();
|
||||||
|
|
||||||
console.log('[CodexLens] CodexLens uninstalled successfully');
|
console.log('[CodexLens] CodexLens uninstalled successfully');
|
||||||
return { success: true, message: 'CodexLens uninstalled successfully' };
|
return { success: true, message: 'CodexLens uninstalled successfully' };
|
||||||
@@ -1126,7 +1398,20 @@ function isIndexingInProgress(): boolean {
|
|||||||
export type { ProgressInfo, ExecuteOptions };
|
export type { ProgressInfo, ExecuteOptions };
|
||||||
|
|
||||||
// Export for direct usage
|
// Export for direct usage
|
||||||
export { ensureReady, executeCodexLens, checkVenvStatus, bootstrapVenv, checkSemanticStatus, installSemantic, uninstallCodexLens, cancelIndexing, isIndexingInProgress };
|
export {
|
||||||
|
ensureReady,
|
||||||
|
executeCodexLens,
|
||||||
|
checkVenvStatus,
|
||||||
|
bootstrapVenv,
|
||||||
|
checkSemanticStatus,
|
||||||
|
ensureLiteLLMEmbedderReady,
|
||||||
|
installSemantic,
|
||||||
|
detectGpuSupport,
|
||||||
|
uninstallCodexLens,
|
||||||
|
cancelIndexing,
|
||||||
|
isIndexingInProgress,
|
||||||
|
};
|
||||||
|
export type { GpuMode };
|
||||||
|
|
||||||
// Backward-compatible export for tests
|
// Backward-compatible export for tests
|
||||||
export const codexLensTool = {
|
export const codexLensTool = {
|
||||||
|
|||||||
368
ccw/src/tools/context-cache-store.ts
Normal file
368
ccw/src/tools/context-cache-store.ts
Normal file
@@ -0,0 +1,368 @@
|
|||||||
|
/**
|
||||||
|
* Context Cache Store - In-memory cache with TTL and LRU eviction
|
||||||
|
* Stores packed file contents with session-based lifecycle management
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** Cache entry metadata */
|
||||||
|
export interface CacheMetadata {
|
||||||
|
files: string[]; // Source file paths
|
||||||
|
patterns: string[]; // Original @patterns
|
||||||
|
total_bytes: number; // Total content bytes
|
||||||
|
file_count: number; // Number of files packed
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Cache entry structure */
|
||||||
|
export interface CacheEntry {
|
||||||
|
session_id: string;
|
||||||
|
created_at: number; // Timestamp ms
|
||||||
|
accessed_at: number; // Last access timestamp
|
||||||
|
ttl: number; // TTL in ms
|
||||||
|
content: string; // Packed file content
|
||||||
|
metadata: CacheMetadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Paginated read result */
|
||||||
|
export interface PagedReadResult {
|
||||||
|
content: string; // Current page content
|
||||||
|
offset: number; // Current byte offset
|
||||||
|
limit: number; // Requested bytes
|
||||||
|
total_bytes: number; // Total content bytes
|
||||||
|
has_more: boolean; // Has more content
|
||||||
|
next_offset: number | null; // Next page offset (null if no more)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Cache status info */
|
||||||
|
export interface CacheStatus {
|
||||||
|
entries: number; // Total cache entries
|
||||||
|
total_bytes: number; // Total bytes cached
|
||||||
|
oldest_session: string | null;
|
||||||
|
newest_session: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Session status info */
|
||||||
|
export interface SessionStatus {
|
||||||
|
session_id: string;
|
||||||
|
exists: boolean;
|
||||||
|
files?: string[];
|
||||||
|
file_count?: number;
|
||||||
|
total_bytes?: number;
|
||||||
|
created_at?: string;
|
||||||
|
expires_at?: string;
|
||||||
|
accessed_at?: string;
|
||||||
|
ttl_remaining_ms?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Default configuration */
|
||||||
|
const DEFAULT_MAX_ENTRIES = 100;
|
||||||
|
const DEFAULT_TTL_MS = 30 * 60 * 1000; // 30 minutes
|
||||||
|
const DEFAULT_PAGE_SIZE = 65536; // 64KB
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Context Cache Store singleton
|
||||||
|
* Manages in-memory cache with TTL expiration and LRU eviction
|
||||||
|
*/
|
||||||
|
class ContextCacheStore {
|
||||||
|
private cache: Map<string, CacheEntry> = new Map();
|
||||||
|
private maxEntries: number;
|
||||||
|
private defaultTTL: number;
|
||||||
|
private cleanupInterval: NodeJS.Timeout | null = null;
|
||||||
|
|
||||||
|
constructor(options: {
|
||||||
|
maxEntries?: number;
|
||||||
|
defaultTTL?: number;
|
||||||
|
cleanupIntervalMs?: number;
|
||||||
|
} = {}) {
|
||||||
|
this.maxEntries = options.maxEntries ?? DEFAULT_MAX_ENTRIES;
|
||||||
|
this.defaultTTL = options.defaultTTL ?? DEFAULT_TTL_MS;
|
||||||
|
|
||||||
|
// Start periodic cleanup
|
||||||
|
const cleanupMs = options.cleanupIntervalMs ?? 60000; // 1 minute
|
||||||
|
this.cleanupInterval = setInterval(() => {
|
||||||
|
this.cleanupExpired();
|
||||||
|
}, cleanupMs);
|
||||||
|
|
||||||
|
// Allow cleanup to not keep process alive
|
||||||
|
if (this.cleanupInterval.unref) {
|
||||||
|
this.cleanupInterval.unref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Store packed content in cache
|
||||||
|
*/
|
||||||
|
set(
|
||||||
|
sessionId: string,
|
||||||
|
content: string,
|
||||||
|
metadata: CacheMetadata,
|
||||||
|
ttl?: number
|
||||||
|
): CacheEntry {
|
||||||
|
const now = Date.now();
|
||||||
|
const entryTTL = ttl ?? this.defaultTTL;
|
||||||
|
|
||||||
|
// Evict if at capacity
|
||||||
|
if (this.cache.size >= this.maxEntries && !this.cache.has(sessionId)) {
|
||||||
|
this.evictOldest();
|
||||||
|
}
|
||||||
|
|
||||||
|
const entry: CacheEntry = {
|
||||||
|
session_id: sessionId,
|
||||||
|
created_at: now,
|
||||||
|
accessed_at: now,
|
||||||
|
ttl: entryTTL,
|
||||||
|
content,
|
||||||
|
metadata,
|
||||||
|
};
|
||||||
|
|
||||||
|
this.cache.set(sessionId, entry);
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get cache entry by session ID
|
||||||
|
*/
|
||||||
|
get(sessionId: string): CacheEntry | null {
|
||||||
|
const entry = this.cache.get(sessionId);
|
||||||
|
|
||||||
|
if (!entry) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check TTL expiration
|
||||||
|
if (this.isExpired(entry)) {
|
||||||
|
this.cache.delete(sessionId);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update access time (LRU)
|
||||||
|
entry.accessed_at = Date.now();
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read content with pagination
|
||||||
|
*/
|
||||||
|
read(
|
||||||
|
sessionId: string,
|
||||||
|
offset: number = 0,
|
||||||
|
limit: number = DEFAULT_PAGE_SIZE
|
||||||
|
): PagedReadResult | null {
|
||||||
|
const entry = this.get(sessionId);
|
||||||
|
|
||||||
|
if (!entry) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = entry.content;
|
||||||
|
const totalBytes = Buffer.byteLength(content, 'utf-8');
|
||||||
|
|
||||||
|
// Handle byte-based offset for UTF-8
|
||||||
|
// For simplicity, we use character-based slicing
|
||||||
|
// This is approximate but works for most use cases
|
||||||
|
const charOffset = Math.min(offset, content.length);
|
||||||
|
const charLimit = Math.min(limit, content.length - charOffset);
|
||||||
|
|
||||||
|
const pageContent = content.slice(charOffset, charOffset + charLimit);
|
||||||
|
const endOffset = charOffset + pageContent.length;
|
||||||
|
const hasMore = endOffset < content.length;
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: pageContent,
|
||||||
|
offset: charOffset,
|
||||||
|
limit: charLimit,
|
||||||
|
total_bytes: totalBytes,
|
||||||
|
has_more: hasMore,
|
||||||
|
next_offset: hasMore ? endOffset : null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Release (delete) cache entry
|
||||||
|
*/
|
||||||
|
release(sessionId: string): { released: boolean; freed_bytes: number } {
|
||||||
|
const entry = this.cache.get(sessionId);
|
||||||
|
|
||||||
|
if (!entry) {
|
||||||
|
return { released: false, freed_bytes: 0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const freedBytes = entry.metadata.total_bytes;
|
||||||
|
this.cache.delete(sessionId);
|
||||||
|
|
||||||
|
return { released: true, freed_bytes: freedBytes };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get session status
|
||||||
|
*/
|
||||||
|
getSessionStatus(sessionId: string): SessionStatus {
|
||||||
|
const entry = this.cache.get(sessionId);
|
||||||
|
|
||||||
|
if (!entry) {
|
||||||
|
return { session_id: sessionId, exists: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if expired
|
||||||
|
if (this.isExpired(entry)) {
|
||||||
|
this.cache.delete(sessionId);
|
||||||
|
return { session_id: sessionId, exists: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
const expiresAt = entry.created_at + entry.ttl;
|
||||||
|
const ttlRemaining = Math.max(0, expiresAt - now);
|
||||||
|
|
||||||
|
return {
|
||||||
|
session_id: sessionId,
|
||||||
|
exists: true,
|
||||||
|
files: entry.metadata.files,
|
||||||
|
file_count: entry.metadata.file_count,
|
||||||
|
total_bytes: entry.metadata.total_bytes,
|
||||||
|
created_at: new Date(entry.created_at).toISOString(),
|
||||||
|
expires_at: new Date(expiresAt).toISOString(),
|
||||||
|
accessed_at: new Date(entry.accessed_at).toISOString(),
|
||||||
|
ttl_remaining_ms: ttlRemaining,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get overall cache status
|
||||||
|
*/
|
||||||
|
getStatus(): CacheStatus {
|
||||||
|
let totalBytes = 0;
|
||||||
|
let oldest: CacheEntry | null = null;
|
||||||
|
let newest: CacheEntry | null = null;
|
||||||
|
|
||||||
|
for (const entry of this.cache.values()) {
|
||||||
|
// Skip expired entries
|
||||||
|
if (this.isExpired(entry)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
totalBytes += entry.metadata.total_bytes;
|
||||||
|
|
||||||
|
if (!oldest || entry.created_at < oldest.created_at) {
|
||||||
|
oldest = entry;
|
||||||
|
}
|
||||||
|
if (!newest || entry.created_at > newest.created_at) {
|
||||||
|
newest = entry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
entries: this.cache.size,
|
||||||
|
total_bytes: totalBytes,
|
||||||
|
oldest_session: oldest?.session_id ?? null,
|
||||||
|
newest_session: newest?.session_id ?? null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup expired entries
|
||||||
|
*/
|
||||||
|
cleanupExpired(): { removed: number } {
|
||||||
|
let removed = 0;
|
||||||
|
const now = Date.now();
|
||||||
|
|
||||||
|
for (const [sessionId, entry] of this.cache.entries()) {
|
||||||
|
if (this.isExpired(entry, now)) {
|
||||||
|
this.cache.delete(sessionId);
|
||||||
|
removed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { removed };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all cache entries
|
||||||
|
*/
|
||||||
|
clear(): { removed: number } {
|
||||||
|
const count = this.cache.size;
|
||||||
|
this.cache.clear();
|
||||||
|
return { removed: count };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if entry is expired
|
||||||
|
*/
|
||||||
|
private isExpired(entry: CacheEntry, now?: number): boolean {
|
||||||
|
const currentTime = now ?? Date.now();
|
||||||
|
return currentTime > entry.created_at + entry.ttl;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Evict oldest entry (LRU)
|
||||||
|
*/
|
||||||
|
private evictOldest(): void {
|
||||||
|
let oldest: [string, CacheEntry] | null = null;
|
||||||
|
|
||||||
|
for (const [sessionId, entry] of this.cache.entries()) {
|
||||||
|
if (!oldest || entry.accessed_at < oldest[1].accessed_at) {
|
||||||
|
oldest = [sessionId, entry];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (oldest) {
|
||||||
|
this.cache.delete(oldest[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop cleanup timer (for graceful shutdown)
|
||||||
|
*/
|
||||||
|
destroy(): void {
|
||||||
|
if (this.cleanupInterval) {
|
||||||
|
clearInterval(this.cleanupInterval);
|
||||||
|
this.cleanupInterval = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List all session IDs
|
||||||
|
*/
|
||||||
|
listSessions(): string[] {
|
||||||
|
return Array.from(this.cache.keys());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if session exists and is valid
|
||||||
|
*/
|
||||||
|
has(sessionId: string): boolean {
|
||||||
|
const entry = this.cache.get(sessionId);
|
||||||
|
if (!entry) return false;
|
||||||
|
if (this.isExpired(entry)) {
|
||||||
|
this.cache.delete(sessionId);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singleton instance
|
||||||
|
let cacheInstance: ContextCacheStore | null = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the singleton cache instance
|
||||||
|
*/
|
||||||
|
export function getContextCacheStore(options?: {
|
||||||
|
maxEntries?: number;
|
||||||
|
defaultTTL?: number;
|
||||||
|
cleanupIntervalMs?: number;
|
||||||
|
}): ContextCacheStore {
|
||||||
|
if (!cacheInstance) {
|
||||||
|
cacheInstance = new ContextCacheStore(options);
|
||||||
|
}
|
||||||
|
return cacheInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reset the cache instance (for testing)
|
||||||
|
*/
|
||||||
|
export function resetContextCacheStore(): void {
|
||||||
|
if (cacheInstance) {
|
||||||
|
cacheInstance.destroy();
|
||||||
|
cacheInstance = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export { ContextCacheStore };
|
||||||
393
ccw/src/tools/context-cache.ts
Normal file
393
ccw/src/tools/context-cache.ts
Normal file
@@ -0,0 +1,393 @@
|
|||||||
|
/**
|
||||||
|
* Context Cache MCP Tool
|
||||||
|
* Pack files by @patterns, cache in memory, paginated read by session ID
|
||||||
|
*
|
||||||
|
* Operations:
|
||||||
|
* - pack: Parse @patterns and cache file contents
|
||||||
|
* - read: Paginated read from cache by session ID
|
||||||
|
* - status: Get cache/session status
|
||||||
|
* - release: Release session cache
|
||||||
|
* - cleanup: Cleanup expired caches
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import type { ToolSchema, ToolResult } from '../types/tool.js';
|
||||||
|
import { parseAndPack } from './pattern-parser.js';
|
||||||
|
import {
|
||||||
|
getContextCacheStore,
|
||||||
|
type CacheMetadata,
|
||||||
|
type PagedReadResult,
|
||||||
|
type CacheStatus,
|
||||||
|
type SessionStatus,
|
||||||
|
} from './context-cache-store.js';
|
||||||
|
|
||||||
|
// Zod schema for parameter validation
|
||||||
|
const OperationEnum = z.enum(['pack', 'read', 'status', 'release', 'cleanup']);
|
||||||
|
|
||||||
|
const ParamsSchema = z.object({
|
||||||
|
operation: OperationEnum,
|
||||||
|
// Pack parameters
|
||||||
|
patterns: z.array(z.string()).optional(),
|
||||||
|
content: z.string().optional(), // Direct text content to cache
|
||||||
|
session_id: z.string().optional(),
|
||||||
|
cwd: z.string().optional(),
|
||||||
|
include_dirs: z.array(z.string()).optional(),
|
||||||
|
ttl: z.number().optional(),
|
||||||
|
include_metadata: z.boolean().optional().default(true),
|
||||||
|
max_file_size: z.number().optional(),
|
||||||
|
// Read parameters
|
||||||
|
offset: z.number().optional().default(0),
|
||||||
|
limit: z.number().optional().default(65536), // 64KB default
|
||||||
|
});
|
||||||
|
|
||||||
|
type Params = z.infer<typeof ParamsSchema>;
|
||||||
|
|
||||||
|
// Result types
|
||||||
|
interface PackResult {
|
||||||
|
operation: 'pack';
|
||||||
|
session_id: string;
|
||||||
|
files_packed: number;
|
||||||
|
files_skipped: number;
|
||||||
|
total_bytes: number;
|
||||||
|
patterns_matched: number;
|
||||||
|
patterns_failed: number;
|
||||||
|
expires_at: string;
|
||||||
|
errors?: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ReadResult {
|
||||||
|
operation: 'read';
|
||||||
|
session_id: string;
|
||||||
|
content: string;
|
||||||
|
offset: number;
|
||||||
|
limit: number;
|
||||||
|
total_bytes: number;
|
||||||
|
has_more: boolean;
|
||||||
|
next_offset: number | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface StatusResult {
|
||||||
|
operation: 'status';
|
||||||
|
session_id?: string;
|
||||||
|
session?: SessionStatus;
|
||||||
|
cache?: CacheStatus;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ReleaseResult {
|
||||||
|
operation: 'release';
|
||||||
|
session_id: string;
|
||||||
|
released: boolean;
|
||||||
|
freed_bytes: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface CleanupResult {
|
||||||
|
operation: 'cleanup';
|
||||||
|
removed: number;
|
||||||
|
remaining: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
type OperationResult = PackResult | ReadResult | StatusResult | ReleaseResult | CleanupResult;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate session ID if not provided
|
||||||
|
*/
|
||||||
|
function generateSessionId(): string {
|
||||||
|
return `ctx-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Operation: pack
|
||||||
|
* Parse @patterns and/or cache text content directly
|
||||||
|
*/
|
||||||
|
async function executePack(params: Params): Promise<PackResult> {
|
||||||
|
const {
|
||||||
|
patterns,
|
||||||
|
content,
|
||||||
|
session_id,
|
||||||
|
cwd,
|
||||||
|
include_dirs,
|
||||||
|
ttl,
|
||||||
|
include_metadata,
|
||||||
|
max_file_size,
|
||||||
|
} = params;
|
||||||
|
|
||||||
|
// Require at least patterns or content
|
||||||
|
if ((!patterns || patterns.length === 0) && !content) {
|
||||||
|
throw new Error('Either "patterns" or "content" is required for pack operation');
|
||||||
|
}
|
||||||
|
|
||||||
|
const sessionId = session_id || generateSessionId();
|
||||||
|
const store = getContextCacheStore();
|
||||||
|
|
||||||
|
let finalContent = '';
|
||||||
|
let filesPacked = 0;
|
||||||
|
let filesSkipped = 0;
|
||||||
|
let totalBytes = 0;
|
||||||
|
let patternsMatched = 0;
|
||||||
|
let patternsFailed = 0;
|
||||||
|
let errors: string[] = [];
|
||||||
|
let files: string[] = [];
|
||||||
|
let parsedPatterns: string[] = [];
|
||||||
|
|
||||||
|
// Pack files from patterns if provided
|
||||||
|
if (patterns && patterns.length > 0) {
|
||||||
|
const result = await parseAndPack(patterns, {
|
||||||
|
cwd: cwd || process.cwd(),
|
||||||
|
includeDirs: include_dirs,
|
||||||
|
includeMetadata: include_metadata,
|
||||||
|
maxFileSize: max_file_size,
|
||||||
|
});
|
||||||
|
|
||||||
|
finalContent = result.content;
|
||||||
|
filesPacked = result.packedFiles.length;
|
||||||
|
filesSkipped = result.skippedFiles.length;
|
||||||
|
totalBytes = result.totalBytes;
|
||||||
|
patternsMatched = result.parseResult.stats.matched_patterns;
|
||||||
|
patternsFailed = result.parseResult.stats.total_patterns - patternsMatched;
|
||||||
|
errors = result.parseResult.errors;
|
||||||
|
files = result.packedFiles;
|
||||||
|
parsedPatterns = result.parseResult.patterns;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append direct content if provided
|
||||||
|
if (content) {
|
||||||
|
if (finalContent) {
|
||||||
|
finalContent += '\n\n=== ADDITIONAL CONTENT ===\n' + content;
|
||||||
|
} else {
|
||||||
|
finalContent = content;
|
||||||
|
}
|
||||||
|
totalBytes += Buffer.byteLength(content, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store in cache
|
||||||
|
const metadata: CacheMetadata = {
|
||||||
|
files,
|
||||||
|
patterns: parsedPatterns,
|
||||||
|
total_bytes: totalBytes,
|
||||||
|
file_count: filesPacked,
|
||||||
|
};
|
||||||
|
|
||||||
|
const entry = store.set(sessionId, finalContent, metadata, ttl);
|
||||||
|
const expiresAt = new Date(entry.created_at + entry.ttl).toISOString();
|
||||||
|
|
||||||
|
return {
|
||||||
|
operation: 'pack',
|
||||||
|
session_id: sessionId,
|
||||||
|
files_packed: filesPacked,
|
||||||
|
files_skipped: filesSkipped,
|
||||||
|
total_bytes: totalBytes,
|
||||||
|
patterns_matched: patternsMatched,
|
||||||
|
patterns_failed: patternsFailed,
|
||||||
|
expires_at: expiresAt,
|
||||||
|
errors: errors.length > 0 ? errors : undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Operation: read
|
||||||
|
* Paginated read from cache
|
||||||
|
*/
|
||||||
|
function executeRead(params: Params): ReadResult {
|
||||||
|
const { session_id, offset, limit } = params;
|
||||||
|
|
||||||
|
if (!session_id) {
|
||||||
|
throw new Error('Parameter "session_id" is required for read operation');
|
||||||
|
}
|
||||||
|
|
||||||
|
const store = getContextCacheStore();
|
||||||
|
const result = store.read(session_id, offset, limit);
|
||||||
|
|
||||||
|
if (!result) {
|
||||||
|
throw new Error(`Session "${session_id}" not found or expired`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
operation: 'read',
|
||||||
|
session_id,
|
||||||
|
content: result.content,
|
||||||
|
offset: result.offset,
|
||||||
|
limit: result.limit,
|
||||||
|
total_bytes: result.total_bytes,
|
||||||
|
has_more: result.has_more,
|
||||||
|
next_offset: result.next_offset,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Operation: status
|
||||||
|
* Get session or overall cache status
|
||||||
|
*/
|
||||||
|
function executeStatus(params: Params): StatusResult {
|
||||||
|
const { session_id } = params;
|
||||||
|
const store = getContextCacheStore();
|
||||||
|
|
||||||
|
if (session_id) {
|
||||||
|
// Session-specific status
|
||||||
|
const sessionStatus = store.getSessionStatus(session_id);
|
||||||
|
return {
|
||||||
|
operation: 'status',
|
||||||
|
session_id,
|
||||||
|
session: sessionStatus,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overall cache status
|
||||||
|
const cacheStatus = store.getStatus();
|
||||||
|
return {
|
||||||
|
operation: 'status',
|
||||||
|
cache: cacheStatus,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Operation: release
|
||||||
|
* Release session cache
|
||||||
|
*/
|
||||||
|
function executeRelease(params: Params): ReleaseResult {
|
||||||
|
const { session_id } = params;
|
||||||
|
|
||||||
|
if (!session_id) {
|
||||||
|
throw new Error('Parameter "session_id" is required for release operation');
|
||||||
|
}
|
||||||
|
|
||||||
|
const store = getContextCacheStore();
|
||||||
|
const result = store.release(session_id);
|
||||||
|
|
||||||
|
return {
|
||||||
|
operation: 'release',
|
||||||
|
session_id,
|
||||||
|
released: result.released,
|
||||||
|
freed_bytes: result.freed_bytes,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Operation: cleanup
|
||||||
|
* Cleanup expired caches
|
||||||
|
*/
|
||||||
|
function executeCleanup(): CleanupResult {
|
||||||
|
const store = getContextCacheStore();
|
||||||
|
const result = store.cleanupExpired();
|
||||||
|
const status = store.getStatus();
|
||||||
|
|
||||||
|
return {
|
||||||
|
operation: 'cleanup',
|
||||||
|
removed: result.removed,
|
||||||
|
remaining: status.entries,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Route to operation handler
|
||||||
|
*/
|
||||||
|
async function execute(params: Params): Promise<OperationResult> {
|
||||||
|
const { operation } = params;
|
||||||
|
|
||||||
|
switch (operation) {
|
||||||
|
case 'pack':
|
||||||
|
return executePack(params);
|
||||||
|
case 'read':
|
||||||
|
return executeRead(params);
|
||||||
|
case 'status':
|
||||||
|
return executeStatus(params);
|
||||||
|
case 'release':
|
||||||
|
return executeRelease(params);
|
||||||
|
case 'cleanup':
|
||||||
|
return executeCleanup();
|
||||||
|
default:
|
||||||
|
throw new Error(
|
||||||
|
`Unknown operation: ${operation}. Valid operations: pack, read, status, release, cleanup`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MCP Tool Schema
|
||||||
|
export const schema: ToolSchema = {
|
||||||
|
name: 'context_cache',
|
||||||
|
description: `Context file cache with @pattern and text content support, paginated reading.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
context_cache(operation="pack", patterns=["@src/**/*.ts"], session_id="...")
|
||||||
|
context_cache(operation="pack", content="text to cache", session_id="...")
|
||||||
|
context_cache(operation="pack", patterns=["@src/**/*.ts"], content="extra text")
|
||||||
|
context_cache(operation="read", session_id="...", offset=0, limit=65536)
|
||||||
|
context_cache(operation="status", session_id="...")
|
||||||
|
context_cache(operation="release", session_id="...")
|
||||||
|
context_cache(operation="cleanup")
|
||||||
|
|
||||||
|
Pattern syntax:
|
||||||
|
@src/**/*.ts - All TypeScript files in src
|
||||||
|
@CLAUDE.md - Specific file
|
||||||
|
@../shared/**/* - Sibling directory (needs include_dirs)`,
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
operation: {
|
||||||
|
type: 'string',
|
||||||
|
enum: ['pack', 'read', 'status', 'release', 'cleanup'],
|
||||||
|
description: 'Operation to perform',
|
||||||
|
},
|
||||||
|
patterns: {
|
||||||
|
type: 'array',
|
||||||
|
items: { type: 'string' },
|
||||||
|
description: '@patterns to pack (e.g., ["@src/**/*.ts"]). Either patterns or content required for pack.',
|
||||||
|
},
|
||||||
|
content: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Direct text content to cache. Either patterns or content required for pack.',
|
||||||
|
},
|
||||||
|
session_id: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Cache session ID. Auto-generated for pack if not provided.',
|
||||||
|
},
|
||||||
|
cwd: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Working directory for pattern resolution (default: process.cwd())',
|
||||||
|
},
|
||||||
|
include_dirs: {
|
||||||
|
type: 'array',
|
||||||
|
items: { type: 'string' },
|
||||||
|
description: 'Additional directories to include for pattern matching',
|
||||||
|
},
|
||||||
|
ttl: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Cache TTL in milliseconds (default: 1800000 = 30min)',
|
||||||
|
},
|
||||||
|
include_metadata: {
|
||||||
|
type: 'boolean',
|
||||||
|
description: 'Include file metadata headers in packed content (default: true)',
|
||||||
|
},
|
||||||
|
max_file_size: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Max file size in bytes to include (default: 1MB). Larger files are skipped.',
|
||||||
|
},
|
||||||
|
offset: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Byte offset for paginated read (default: 0)',
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Max bytes to read (default: 65536 = 64KB)',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ['operation'],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handler function
|
||||||
|
export async function handler(
|
||||||
|
params: Record<string, unknown>
|
||||||
|
): Promise<ToolResult<OperationResult>> {
|
||||||
|
const parsed = ParamsSchema.safeParse(params);
|
||||||
|
|
||||||
|
if (!parsed.success) {
|
||||||
|
return { success: false, error: `Invalid params: ${parsed.error.message}` };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await execute(parsed.data);
|
||||||
|
return { success: true, result };
|
||||||
|
} catch (error) {
|
||||||
|
return { success: false, error: (error as Error).message };
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,6 +16,8 @@ const OperationEnum = z.enum(['list', 'import', 'export', 'summary', 'embed', 's
|
|||||||
|
|
||||||
const ParamsSchema = z.object({
|
const ParamsSchema = z.object({
|
||||||
operation: OperationEnum,
|
operation: OperationEnum,
|
||||||
|
// Path parameter - highest priority for project resolution
|
||||||
|
path: z.string().optional(),
|
||||||
text: z.string().optional(),
|
text: z.string().optional(),
|
||||||
id: z.string().optional(),
|
id: z.string().optional(),
|
||||||
tool: z.enum(['gemini', 'qwen']).optional().default('gemini'),
|
tool: z.enum(['gemini', 'qwen']).optional().default('gemini'),
|
||||||
@@ -106,17 +108,21 @@ interface EmbedStatusResult {
|
|||||||
type OperationResult = ListResult | ImportResult | ExportResult | SummaryResult | EmbedResult | SearchResult | EmbedStatusResult;
|
type OperationResult = ListResult | ImportResult | ExportResult | SummaryResult | EmbedResult | SearchResult | EmbedStatusResult;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get project path from current working directory
|
* Get project path - uses explicit path if provided, otherwise falls back to current working directory
|
||||||
|
* Priority: path parameter > getProjectRoot()
|
||||||
*/
|
*/
|
||||||
function getProjectPath(): string {
|
function getProjectPath(explicitPath?: string): string {
|
||||||
|
if (explicitPath) {
|
||||||
|
return explicitPath;
|
||||||
|
}
|
||||||
return getProjectRoot();
|
return getProjectRoot();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get database path for current project
|
* Get database path for project
|
||||||
*/
|
*/
|
||||||
function getDatabasePath(): string {
|
function getDatabasePath(explicitPath?: string): string {
|
||||||
const projectPath = getProjectPath();
|
const projectPath = getProjectPath(explicitPath);
|
||||||
const paths = StoragePaths.project(projectPath);
|
const paths = StoragePaths.project(projectPath);
|
||||||
return join(paths.root, 'core-memory', 'core_memory.db');
|
return join(paths.root, 'core-memory', 'core_memory.db');
|
||||||
}
|
}
|
||||||
@@ -129,8 +135,8 @@ const PREVIEW_MAX_LENGTH = 100;
|
|||||||
* List all memories with compact output
|
* List all memories with compact output
|
||||||
*/
|
*/
|
||||||
function executeList(params: Params): ListResult {
|
function executeList(params: Params): ListResult {
|
||||||
const { limit } = params;
|
const { limit, path } = params;
|
||||||
const store = getCoreMemoryStore(getProjectPath());
|
const store = getCoreMemoryStore(getProjectPath(path));
|
||||||
const memories = store.getMemories({ limit }) as CoreMemory[];
|
const memories = store.getMemories({ limit }) as CoreMemory[];
|
||||||
|
|
||||||
// Convert to compact format with truncated preview
|
// Convert to compact format with truncated preview
|
||||||
@@ -160,13 +166,13 @@ function executeList(params: Params): ListResult {
|
|||||||
* Import text as a new memory
|
* Import text as a new memory
|
||||||
*/
|
*/
|
||||||
function executeImport(params: Params): ImportResult {
|
function executeImport(params: Params): ImportResult {
|
||||||
const { text } = params;
|
const { text, path } = params;
|
||||||
|
|
||||||
if (!text || text.trim() === '') {
|
if (!text || text.trim() === '') {
|
||||||
throw new Error('Parameter "text" is required for import operation');
|
throw new Error('Parameter "text" is required for import operation');
|
||||||
}
|
}
|
||||||
|
|
||||||
const store = getCoreMemoryStore(getProjectPath());
|
const store = getCoreMemoryStore(getProjectPath(path));
|
||||||
const memory = store.upsertMemory({
|
const memory = store.upsertMemory({
|
||||||
content: text.trim(),
|
content: text.trim(),
|
||||||
});
|
});
|
||||||
@@ -184,14 +190,14 @@ function executeImport(params: Params): ImportResult {
|
|||||||
* Searches current project first, then all projects if not found
|
* Searches current project first, then all projects if not found
|
||||||
*/
|
*/
|
||||||
function executeExport(params: Params): ExportResult {
|
function executeExport(params: Params): ExportResult {
|
||||||
const { id } = params;
|
const { id, path } = params;
|
||||||
|
|
||||||
if (!id) {
|
if (!id) {
|
||||||
throw new Error('Parameter "id" is required for export operation');
|
throw new Error('Parameter "id" is required for export operation');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try current project first
|
// Try current project first (or explicit path if provided)
|
||||||
const store = getCoreMemoryStore(getProjectPath());
|
const store = getCoreMemoryStore(getProjectPath(path));
|
||||||
let memory = store.getMemory(id);
|
let memory = store.getMemory(id);
|
||||||
|
|
||||||
// If not found, search across all projects
|
// If not found, search across all projects
|
||||||
@@ -218,13 +224,13 @@ function executeExport(params: Params): ExportResult {
|
|||||||
* Generate AI summary for a memory
|
* Generate AI summary for a memory
|
||||||
*/
|
*/
|
||||||
async function executeSummary(params: Params): Promise<SummaryResult> {
|
async function executeSummary(params: Params): Promise<SummaryResult> {
|
||||||
const { id, tool = 'gemini' } = params;
|
const { id, tool = 'gemini', path } = params;
|
||||||
|
|
||||||
if (!id) {
|
if (!id) {
|
||||||
throw new Error('Parameter "id" is required for summary operation');
|
throw new Error('Parameter "id" is required for summary operation');
|
||||||
}
|
}
|
||||||
|
|
||||||
const store = getCoreMemoryStore(getProjectPath());
|
const store = getCoreMemoryStore(getProjectPath(path));
|
||||||
const memory = store.getMemory(id);
|
const memory = store.getMemory(id);
|
||||||
|
|
||||||
if (!memory) {
|
if (!memory) {
|
||||||
@@ -245,8 +251,8 @@ async function executeSummary(params: Params): Promise<SummaryResult> {
|
|||||||
* Generate embeddings for memory chunks
|
* Generate embeddings for memory chunks
|
||||||
*/
|
*/
|
||||||
async function executeEmbed(params: Params): Promise<EmbedResult> {
|
async function executeEmbed(params: Params): Promise<EmbedResult> {
|
||||||
const { source_id, batch_size = 8, force = false } = params;
|
const { source_id, batch_size = 8, force = false, path } = params;
|
||||||
const dbPath = getDatabasePath();
|
const dbPath = getDatabasePath(path);
|
||||||
|
|
||||||
const result = await MemoryEmbedder.generateEmbeddings(dbPath, {
|
const result = await MemoryEmbedder.generateEmbeddings(dbPath, {
|
||||||
sourceId: source_id,
|
sourceId: source_id,
|
||||||
@@ -272,13 +278,13 @@ async function executeEmbed(params: Params): Promise<EmbedResult> {
|
|||||||
* Search memory chunks using semantic search
|
* Search memory chunks using semantic search
|
||||||
*/
|
*/
|
||||||
async function executeSearch(params: Params): Promise<SearchResult> {
|
async function executeSearch(params: Params): Promise<SearchResult> {
|
||||||
const { query, top_k = 10, min_score = 0.3, source_type } = params;
|
const { query, top_k = 10, min_score = 0.3, source_type, path } = params;
|
||||||
|
|
||||||
if (!query) {
|
if (!query) {
|
||||||
throw new Error('Parameter "query" is required for search operation');
|
throw new Error('Parameter "query" is required for search operation');
|
||||||
}
|
}
|
||||||
|
|
||||||
const dbPath = getDatabasePath();
|
const dbPath = getDatabasePath(path);
|
||||||
|
|
||||||
const result = await MemoryEmbedder.searchMemories(dbPath, query, {
|
const result = await MemoryEmbedder.searchMemories(dbPath, query, {
|
||||||
topK: top_k,
|
topK: top_k,
|
||||||
@@ -309,7 +315,8 @@ async function executeSearch(params: Params): Promise<SearchResult> {
|
|||||||
* Get embedding status statistics
|
* Get embedding status statistics
|
||||||
*/
|
*/
|
||||||
async function executeEmbedStatus(params: Params): Promise<EmbedStatusResult> {
|
async function executeEmbedStatus(params: Params): Promise<EmbedStatusResult> {
|
||||||
const dbPath = getDatabasePath();
|
const { path } = params;
|
||||||
|
const dbPath = getDatabasePath(path);
|
||||||
|
|
||||||
const result = await MemoryEmbedder.getEmbeddingStatus(dbPath);
|
const result = await MemoryEmbedder.getEmbeddingStatus(dbPath);
|
||||||
|
|
||||||
@@ -368,6 +375,9 @@ Usage:
|
|||||||
core_memory(operation="search", query="authentication") # Search memories semantically
|
core_memory(operation="search", query="authentication") # Search memories semantically
|
||||||
core_memory(operation="embed_status") # Check embedding status
|
core_memory(operation="embed_status") # Check embedding status
|
||||||
|
|
||||||
|
Path parameter (highest priority):
|
||||||
|
core_memory(operation="list", path="/path/to/project") # Use specific project path
|
||||||
|
|
||||||
Memory IDs use format: CMEM-YYYYMMDD-HHMMSS`,
|
Memory IDs use format: CMEM-YYYYMMDD-HHMMSS`,
|
||||||
inputSchema: {
|
inputSchema: {
|
||||||
type: 'object',
|
type: 'object',
|
||||||
@@ -377,6 +387,10 @@ Memory IDs use format: CMEM-YYYYMMDD-HHMMSS`,
|
|||||||
enum: ['list', 'import', 'export', 'summary', 'embed', 'search', 'embed_status'],
|
enum: ['list', 'import', 'export', 'summary', 'embed', 'search', 'embed_status'],
|
||||||
description: 'Operation to perform',
|
description: 'Operation to perform',
|
||||||
},
|
},
|
||||||
|
path: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Project path (highest priority - overrides auto-detected project root)',
|
||||||
|
},
|
||||||
text: {
|
text: {
|
||||||
type: 'string',
|
type: 'string',
|
||||||
description: 'Text content to import (required for import operation)',
|
description: 'Text content to import (required for import operation)',
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import { executeInitWithProgress } from './smart-search.js';
|
|||||||
// codex_lens removed - functionality integrated into smart_search
|
// codex_lens removed - functionality integrated into smart_search
|
||||||
import * as readFileMod from './read-file.js';
|
import * as readFileMod from './read-file.js';
|
||||||
import * as coreMemoryMod from './core-memory.js';
|
import * as coreMemoryMod from './core-memory.js';
|
||||||
|
import * as contextCacheMod from './context-cache.js';
|
||||||
import type { ProgressInfo } from './codex-lens.js';
|
import type { ProgressInfo } from './codex-lens.js';
|
||||||
|
|
||||||
// Import legacy JS tools
|
// Import legacy JS tools
|
||||||
@@ -357,6 +358,7 @@ registerTool(toLegacyTool(smartSearchMod));
|
|||||||
// codex_lens removed - functionality integrated into smart_search
|
// codex_lens removed - functionality integrated into smart_search
|
||||||
registerTool(toLegacyTool(readFileMod));
|
registerTool(toLegacyTool(readFileMod));
|
||||||
registerTool(toLegacyTool(coreMemoryMod));
|
registerTool(toLegacyTool(coreMemoryMod));
|
||||||
|
registerTool(toLegacyTool(contextCacheMod));
|
||||||
|
|
||||||
// Register legacy JS tools
|
// Register legacy JS tools
|
||||||
registerTool(uiGeneratePreviewTool);
|
registerTool(uiGeneratePreviewTool);
|
||||||
|
|||||||
246
ccw/src/tools/litellm-client.ts
Normal file
246
ccw/src/tools/litellm-client.ts
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
/**
|
||||||
|
* LiteLLM Client - Bridge between CCW and ccw-litellm Python package
|
||||||
|
* Provides LLM chat and embedding capabilities via spawned Python process
|
||||||
|
*
|
||||||
|
* Features:
|
||||||
|
* - Chat completions with multiple models
|
||||||
|
* - Text embeddings generation
|
||||||
|
* - Configuration management
|
||||||
|
* - JSON protocol communication
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { spawn } from 'child_process';
|
||||||
|
import { promisify } from 'util';
|
||||||
|
|
||||||
|
export interface LiteLLMConfig {
|
||||||
|
pythonPath?: string; // Default 'python'
|
||||||
|
configPath?: string; // Configuration file path
|
||||||
|
timeout?: number; // Default 60000ms
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ChatMessage {
|
||||||
|
role: 'system' | 'user' | 'assistant';
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ChatResponse {
|
||||||
|
content: string;
|
||||||
|
model: string;
|
||||||
|
usage?: {
|
||||||
|
prompt_tokens: number;
|
||||||
|
completion_tokens: number;
|
||||||
|
total_tokens: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface EmbedResponse {
|
||||||
|
vectors: number[][];
|
||||||
|
dimensions: number;
|
||||||
|
model: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface LiteLLMStatus {
|
||||||
|
available: boolean;
|
||||||
|
version?: string;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class LiteLLMClient {
|
||||||
|
private pythonPath: string;
|
||||||
|
private configPath?: string;
|
||||||
|
private timeout: number;
|
||||||
|
|
||||||
|
constructor(config: LiteLLMConfig = {}) {
|
||||||
|
this.pythonPath = config.pythonPath || 'python';
|
||||||
|
this.configPath = config.configPath;
|
||||||
|
this.timeout = config.timeout || 60000;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute Python ccw-litellm command
|
||||||
|
*/
|
||||||
|
private async executePython(args: string[], options: { timeout?: number } = {}): Promise<string> {
|
||||||
|
const timeout = options.timeout || this.timeout;
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const proc = spawn(this.pythonPath, ['-m', 'ccw_litellm.cli', ...args], {
|
||||||
|
stdio: ['pipe', 'pipe', 'pipe'],
|
||||||
|
env: { ...process.env }
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
let timedOut = false;
|
||||||
|
|
||||||
|
// Set up timeout
|
||||||
|
const timeoutId = setTimeout(() => {
|
||||||
|
timedOut = true;
|
||||||
|
proc.kill('SIGTERM');
|
||||||
|
reject(new Error(`Command timed out after ${timeout}ms`));
|
||||||
|
}, timeout);
|
||||||
|
|
||||||
|
proc.stdout.on('data', (data) => {
|
||||||
|
stdout += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
proc.stderr.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
proc.on('error', (error) => {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
reject(new Error(`Failed to spawn Python process: ${error.message}`));
|
||||||
|
});
|
||||||
|
|
||||||
|
proc.on('close', (code) => {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
|
||||||
|
if (timedOut) {
|
||||||
|
return; // Already rejected
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code === 0) {
|
||||||
|
resolve(stdout.trim());
|
||||||
|
} else {
|
||||||
|
const errorMsg = stderr.trim() || `Process exited with code ${code}`;
|
||||||
|
reject(new Error(errorMsg));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if ccw-litellm is available
|
||||||
|
*/
|
||||||
|
async isAvailable(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
await this.executePython(['version'], { timeout: 5000 });
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get status information
|
||||||
|
*/
|
||||||
|
async getStatus(): Promise<LiteLLMStatus> {
|
||||||
|
try {
|
||||||
|
const output = await this.executePython(['version'], { timeout: 5000 });
|
||||||
|
return {
|
||||||
|
available: true,
|
||||||
|
version: output.trim()
|
||||||
|
};
|
||||||
|
} catch (error: any) {
|
||||||
|
return {
|
||||||
|
available: false,
|
||||||
|
error: error.message
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current configuration
|
||||||
|
*/
|
||||||
|
async getConfig(): Promise<any> {
|
||||||
|
const output = await this.executePython(['config', '--json']);
|
||||||
|
return JSON.parse(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate embeddings for texts
|
||||||
|
*/
|
||||||
|
async embed(texts: string[], model: string = 'default'): Promise<EmbedResponse> {
|
||||||
|
if (!texts || texts.length === 0) {
|
||||||
|
throw new Error('texts array cannot be empty');
|
||||||
|
}
|
||||||
|
|
||||||
|
const args = ['embed', '--model', model, '--output', 'json'];
|
||||||
|
|
||||||
|
// Add texts as arguments
|
||||||
|
for (const text of texts) {
|
||||||
|
args.push(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
const output = await this.executePython(args, { timeout: this.timeout * 2 });
|
||||||
|
const vectors = JSON.parse(output);
|
||||||
|
|
||||||
|
return {
|
||||||
|
vectors,
|
||||||
|
dimensions: vectors[0]?.length || 0,
|
||||||
|
model
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Chat with LLM
|
||||||
|
*/
|
||||||
|
async chat(message: string, model: string = 'default'): Promise<string> {
|
||||||
|
if (!message) {
|
||||||
|
throw new Error('message cannot be empty');
|
||||||
|
}
|
||||||
|
|
||||||
|
const args = ['chat', '--model', model, message];
|
||||||
|
return this.executePython(args, { timeout: this.timeout * 2 });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Multi-turn chat with messages array
|
||||||
|
*/
|
||||||
|
async chatMessages(messages: ChatMessage[], model: string = 'default'): Promise<ChatResponse> {
|
||||||
|
if (!messages || messages.length === 0) {
|
||||||
|
throw new Error('messages array cannot be empty');
|
||||||
|
}
|
||||||
|
|
||||||
|
// For now, just use the last user message
|
||||||
|
// TODO: Implement full message history support in ccw-litellm
|
||||||
|
const lastMessage = messages[messages.length - 1];
|
||||||
|
const content = await this.chat(lastMessage.content, model);
|
||||||
|
|
||||||
|
return {
|
||||||
|
content,
|
||||||
|
model,
|
||||||
|
usage: undefined // TODO: Add usage tracking
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singleton instance
|
||||||
|
let _client: LiteLLMClient | null = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get or create singleton LiteLLM client
|
||||||
|
*/
|
||||||
|
export function getLiteLLMClient(config?: LiteLLMConfig): LiteLLMClient {
|
||||||
|
if (!_client) {
|
||||||
|
_client = new LiteLLMClient(config);
|
||||||
|
}
|
||||||
|
return _client;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if LiteLLM is available
|
||||||
|
*/
|
||||||
|
export async function checkLiteLLMAvailable(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
return await client.isAvailable();
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get LiteLLM status
|
||||||
|
*/
|
||||||
|
export async function getLiteLLMStatus(): Promise<LiteLLMStatus> {
|
||||||
|
try {
|
||||||
|
const client = getLiteLLMClient();
|
||||||
|
return await client.getStatus();
|
||||||
|
} catch (error: any) {
|
||||||
|
return {
|
||||||
|
available: false,
|
||||||
|
error: error.message
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
241
ccw/src/tools/litellm-executor.ts
Normal file
241
ccw/src/tools/litellm-executor.ts
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
/**
|
||||||
|
* LiteLLM Executor - Execute LiteLLM endpoints with context caching
|
||||||
|
* Integrates with context-cache for file packing and LiteLLM client for API calls
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { getLiteLLMClient } from './litellm-client.js';
|
||||||
|
import { handler as contextCacheHandler } from './context-cache.js';
|
||||||
|
import {
|
||||||
|
findEndpointById,
|
||||||
|
getProviderWithResolvedEnvVars,
|
||||||
|
} from '../config/litellm-api-config-manager.js';
|
||||||
|
import type { CustomEndpoint, ProviderCredential } from '../types/litellm-api-config.js';
|
||||||
|
|
||||||
|
export interface LiteLLMExecutionOptions {
|
||||||
|
prompt: string;
|
||||||
|
endpointId: string; // Custom endpoint ID (e.g., "my-gpt4o")
|
||||||
|
baseDir: string; // Project base directory
|
||||||
|
cwd?: string; // Working directory for file resolution
|
||||||
|
includeDirs?: string[]; // Additional directories for @patterns
|
||||||
|
enableCache?: boolean; // Override endpoint cache setting
|
||||||
|
onOutput?: (data: { type: string; data: string }) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface LiteLLMExecutionResult {
|
||||||
|
success: boolean;
|
||||||
|
output: string;
|
||||||
|
model: string;
|
||||||
|
provider: string;
|
||||||
|
cacheUsed: boolean;
|
||||||
|
cachedFiles?: string[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract @patterns from prompt text
|
||||||
|
*/
|
||||||
|
export function extractPatterns(prompt: string): string[] {
|
||||||
|
// Match @path patterns: @src/**/*.ts, @CLAUDE.md, @../shared/**/*
|
||||||
|
const regex = /@([^\s]+)/g;
|
||||||
|
const patterns: string[] = [];
|
||||||
|
let match;
|
||||||
|
while ((match = regex.exec(prompt)) !== null) {
|
||||||
|
patterns.push('@' + match[1]);
|
||||||
|
}
|
||||||
|
return patterns;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute LiteLLM endpoint with optional context caching
|
||||||
|
*/
|
||||||
|
export async function executeLiteLLMEndpoint(
|
||||||
|
options: LiteLLMExecutionOptions
|
||||||
|
): Promise<LiteLLMExecutionResult> {
|
||||||
|
const { prompt, endpointId, baseDir, cwd, includeDirs, enableCache, onOutput } = options;
|
||||||
|
|
||||||
|
// 1. Find endpoint configuration
|
||||||
|
const endpoint = findEndpointById(baseDir, endpointId);
|
||||||
|
if (!endpoint) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: '',
|
||||||
|
model: '',
|
||||||
|
provider: '',
|
||||||
|
cacheUsed: false,
|
||||||
|
error: `Endpoint not found: ${endpointId}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Get provider with resolved env vars
|
||||||
|
const provider = getProviderWithResolvedEnvVars(baseDir, endpoint.providerId);
|
||||||
|
if (!provider) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: '',
|
||||||
|
model: '',
|
||||||
|
provider: '',
|
||||||
|
cacheUsed: false,
|
||||||
|
error: `Provider not found: ${endpoint.providerId}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify API key is available
|
||||||
|
if (!provider.resolvedApiKey) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: '',
|
||||||
|
model: endpoint.model,
|
||||||
|
provider: provider.type,
|
||||||
|
cacheUsed: false,
|
||||||
|
error: `API key not configured for provider: ${provider.name}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Process context cache if enabled
|
||||||
|
let finalPrompt = prompt;
|
||||||
|
let cacheUsed = false;
|
||||||
|
let cachedFiles: string[] = [];
|
||||||
|
|
||||||
|
const shouldCache = enableCache ?? endpoint.cacheStrategy.enabled;
|
||||||
|
if (shouldCache) {
|
||||||
|
const patterns = extractPatterns(prompt);
|
||||||
|
if (patterns.length > 0) {
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({ type: 'stderr', data: `[Context cache: Found ${patterns.length} @patterns]\n` });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack files into cache
|
||||||
|
const packResult = await contextCacheHandler({
|
||||||
|
operation: 'pack',
|
||||||
|
patterns,
|
||||||
|
cwd: cwd || process.cwd(),
|
||||||
|
include_dirs: includeDirs,
|
||||||
|
ttl: endpoint.cacheStrategy.ttlMinutes * 60 * 1000,
|
||||||
|
max_file_size: endpoint.cacheStrategy.maxSizeKB * 1024,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (packResult.success && packResult.result) {
|
||||||
|
const pack = packResult.result as any;
|
||||||
|
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({
|
||||||
|
type: 'stderr',
|
||||||
|
data: `[Context cache: Packed ${pack.files_packed} files, ${pack.total_bytes} bytes]\n`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read cached content
|
||||||
|
const readResult = await contextCacheHandler({
|
||||||
|
operation: 'read',
|
||||||
|
session_id: pack.session_id,
|
||||||
|
limit: endpoint.cacheStrategy.maxSizeKB * 1024,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (readResult.success && readResult.result) {
|
||||||
|
const read = readResult.result as any;
|
||||||
|
// Prepend cached content to prompt
|
||||||
|
finalPrompt = `${read.content}\n\n---\n\n${prompt}`;
|
||||||
|
cacheUsed = true;
|
||||||
|
cachedFiles = pack.files_packed ? Array(pack.files_packed).fill('...') : [];
|
||||||
|
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({ type: 'stderr', data: `[Context cache: Applied to prompt]\n` });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (packResult.error) {
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({ type: 'stderr', data: `[Context cache warning: ${packResult.error}]\n` });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Call LiteLLM
|
||||||
|
try {
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({
|
||||||
|
type: 'stderr',
|
||||||
|
data: `[LiteLLM: Calling ${provider.type}/${endpoint.model}]\n`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const client = getLiteLLMClient({
|
||||||
|
pythonPath: 'python',
|
||||||
|
timeout: 120000, // 2 minutes
|
||||||
|
});
|
||||||
|
|
||||||
|
// Configure provider credentials via environment
|
||||||
|
// LiteLLM uses standard env vars like OPENAI_API_KEY, ANTHROPIC_API_KEY
|
||||||
|
const envVarName = getProviderEnvVarName(provider.type);
|
||||||
|
if (envVarName) {
|
||||||
|
process.env[envVarName] = provider.resolvedApiKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set base URL if custom
|
||||||
|
if (provider.apiBase) {
|
||||||
|
const baseUrlEnvVar = getProviderBaseUrlEnvVarName(provider.type);
|
||||||
|
if (baseUrlEnvVar) {
|
||||||
|
process.env[baseUrlEnvVar] = provider.apiBase;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use litellm-client to call chat
|
||||||
|
const response = await client.chat(finalPrompt, endpoint.model);
|
||||||
|
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({ type: 'stdout', data: response });
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
output: response,
|
||||||
|
model: endpoint.model,
|
||||||
|
provider: provider.type,
|
||||||
|
cacheUsed,
|
||||||
|
cachedFiles,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const errorMsg = (error as Error).message;
|
||||||
|
if (onOutput) {
|
||||||
|
onOutput({ type: 'stderr', data: `[LiteLLM error: ${errorMsg}]\n` });
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: '',
|
||||||
|
model: endpoint.model,
|
||||||
|
provider: provider.type,
|
||||||
|
cacheUsed,
|
||||||
|
error: errorMsg,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get environment variable name for provider API key
|
||||||
|
*/
|
||||||
|
function getProviderEnvVarName(providerType: string): string | null {
|
||||||
|
const envVarMap: Record<string, string> = {
|
||||||
|
openai: 'OPENAI_API_KEY',
|
||||||
|
anthropic: 'ANTHROPIC_API_KEY',
|
||||||
|
google: 'GOOGLE_API_KEY',
|
||||||
|
azure: 'AZURE_API_KEY',
|
||||||
|
mistral: 'MISTRAL_API_KEY',
|
||||||
|
deepseek: 'DEEPSEEK_API_KEY',
|
||||||
|
};
|
||||||
|
|
||||||
|
return envVarMap[providerType] || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get environment variable name for provider base URL
|
||||||
|
*/
|
||||||
|
function getProviderBaseUrlEnvVarName(providerType: string): string | null {
|
||||||
|
const envVarMap: Record<string, string> = {
|
||||||
|
openai: 'OPENAI_API_BASE',
|
||||||
|
anthropic: 'ANTHROPIC_API_BASE',
|
||||||
|
azure: 'AZURE_API_BASE',
|
||||||
|
};
|
||||||
|
|
||||||
|
return envVarMap[providerType] || null;
|
||||||
|
}
|
||||||
329
ccw/src/tools/pattern-parser.ts
Normal file
329
ccw/src/tools/pattern-parser.ts
Normal file
@@ -0,0 +1,329 @@
|
|||||||
|
/**
|
||||||
|
* Pattern Parser - Parse @expression patterns to file lists
|
||||||
|
* Supports glob patterns like @src/**.ts, @CLAUDE.md, @../shared/**
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { glob } from 'glob';
|
||||||
|
import { resolve, isAbsolute, normalize } from 'path';
|
||||||
|
import { existsSync, statSync, readFileSync } from 'fs';
|
||||||
|
|
||||||
|
/** Result of parsing @patterns */
|
||||||
|
export interface PatternParseResult {
|
||||||
|
files: string[]; // Matched file paths (absolute)
|
||||||
|
patterns: string[]; // Original patterns
|
||||||
|
errors: string[]; // Parse errors
|
||||||
|
stats: {
|
||||||
|
total_files: number;
|
||||||
|
total_patterns: number;
|
||||||
|
matched_patterns: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Options for pattern parsing */
|
||||||
|
export interface PatternParseOptions {
|
||||||
|
cwd?: string; // Working directory
|
||||||
|
includeDirs?: string[]; // Additional directories to include
|
||||||
|
ignore?: string[]; // Ignore patterns
|
||||||
|
maxFiles?: number; // Max files to return (default: 1000)
|
||||||
|
followSymlinks?: boolean; // Follow symlinks (default: false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Default ignore patterns */
|
||||||
|
const DEFAULT_IGNORE = [
|
||||||
|
'**/node_modules/**',
|
||||||
|
'**/.git/**',
|
||||||
|
'**/dist/**',
|
||||||
|
'**/build/**',
|
||||||
|
'**/.next/**',
|
||||||
|
'**/__pycache__/**',
|
||||||
|
'**/*.pyc',
|
||||||
|
'**/venv/**',
|
||||||
|
'**/.venv/**',
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract pattern from @expression
|
||||||
|
* Example: "@src/**.ts" -> "src/**.ts"
|
||||||
|
*/
|
||||||
|
function extractPattern(expression: string): string | null {
|
||||||
|
const trimmed = expression.trim();
|
||||||
|
if (!trimmed.startsWith('@')) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return trimmed.slice(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a pattern is a glob pattern or exact file
|
||||||
|
*/
|
||||||
|
function isGlobPattern(pattern: string): boolean {
|
||||||
|
return pattern.includes('*') || pattern.includes('?') || pattern.includes('{') || pattern.includes('[');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate that a path is within allowed directories
|
||||||
|
*/
|
||||||
|
function isPathAllowed(filePath: string, allowedDirs: string[]): boolean {
|
||||||
|
const normalized = normalize(filePath);
|
||||||
|
return allowedDirs.some(dir => normalized.startsWith(normalize(dir)));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build allowed directories list from options
|
||||||
|
*/
|
||||||
|
function buildAllowedDirs(cwd: string, includeDirs?: string[]): string[] {
|
||||||
|
const allowed = [cwd];
|
||||||
|
|
||||||
|
if (includeDirs) {
|
||||||
|
for (const dir of includeDirs) {
|
||||||
|
const absDir = isAbsolute(dir) ? dir : resolve(cwd, dir);
|
||||||
|
if (existsSync(absDir) && statSync(absDir).isDirectory()) {
|
||||||
|
allowed.push(absDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allowed.map(d => normalize(d));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse @expressions and return matched files
|
||||||
|
*/
|
||||||
|
export async function parsePatterns(
|
||||||
|
patterns: string[],
|
||||||
|
options: PatternParseOptions = {}
|
||||||
|
): Promise<PatternParseResult> {
|
||||||
|
const {
|
||||||
|
cwd = process.cwd(),
|
||||||
|
includeDirs = [],
|
||||||
|
ignore = [],
|
||||||
|
maxFiles = 1000,
|
||||||
|
followSymlinks = false,
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
const result: PatternParseResult = {
|
||||||
|
files: [],
|
||||||
|
patterns: [],
|
||||||
|
errors: [],
|
||||||
|
stats: {
|
||||||
|
total_files: 0,
|
||||||
|
total_patterns: patterns.length,
|
||||||
|
matched_patterns: 0,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Build allowed directories
|
||||||
|
const allowedDirs = buildAllowedDirs(cwd, includeDirs);
|
||||||
|
|
||||||
|
// Merge ignore patterns
|
||||||
|
const allIgnore = [...DEFAULT_IGNORE, ...ignore];
|
||||||
|
|
||||||
|
// Track unique files
|
||||||
|
const fileSet = new Set<string>();
|
||||||
|
|
||||||
|
for (const expr of patterns) {
|
||||||
|
const pattern = extractPattern(expr);
|
||||||
|
|
||||||
|
if (!pattern) {
|
||||||
|
result.errors.push(`Invalid pattern: ${expr} (must start with @)`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
result.patterns.push(pattern);
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (isGlobPattern(pattern)) {
|
||||||
|
// Glob pattern - use glob package
|
||||||
|
// Determine base directory for pattern
|
||||||
|
let baseDir = cwd;
|
||||||
|
let globPattern = pattern;
|
||||||
|
|
||||||
|
// Handle relative paths like ../shared/**
|
||||||
|
if (pattern.startsWith('../') || pattern.startsWith('./')) {
|
||||||
|
const parts = pattern.split('/');
|
||||||
|
const pathParts: string[] = [];
|
||||||
|
let i = 0;
|
||||||
|
|
||||||
|
// Extract path prefix
|
||||||
|
while (i < parts.length && (parts[i] === '..' || parts[i] === '.')) {
|
||||||
|
pathParts.push(parts[i]);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep non-glob path parts
|
||||||
|
while (i < parts.length && !isGlobPattern(parts[i])) {
|
||||||
|
pathParts.push(parts[i]);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve base directory
|
||||||
|
if (pathParts.length > 0) {
|
||||||
|
baseDir = resolve(cwd, pathParts.join('/'));
|
||||||
|
globPattern = parts.slice(i).join('/') || '**/*';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if base directory is allowed
|
||||||
|
if (!isPathAllowed(baseDir, allowedDirs)) {
|
||||||
|
result.errors.push(`Pattern ${expr}: base directory not in allowed paths`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute glob using the glob package
|
||||||
|
const matches = await glob(globPattern, {
|
||||||
|
cwd: baseDir,
|
||||||
|
absolute: true,
|
||||||
|
nodir: true,
|
||||||
|
follow: followSymlinks,
|
||||||
|
ignore: allIgnore,
|
||||||
|
dot: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
let matchCount = 0;
|
||||||
|
for (const file of matches) {
|
||||||
|
// Validate each file is in allowed directories
|
||||||
|
if (isPathAllowed(file, allowedDirs)) {
|
||||||
|
fileSet.add(file);
|
||||||
|
matchCount++;
|
||||||
|
if (fileSet.size >= maxFiles) break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matchCount > 0) {
|
||||||
|
result.stats.matched_patterns++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Exact file path
|
||||||
|
const absPath = isAbsolute(pattern) ? pattern : resolve(cwd, pattern);
|
||||||
|
|
||||||
|
// Validate path is allowed
|
||||||
|
if (!isPathAllowed(absPath, allowedDirs)) {
|
||||||
|
result.errors.push(`Pattern ${expr}: path not in allowed directories`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check file exists
|
||||||
|
if (existsSync(absPath) && statSync(absPath).isFile()) {
|
||||||
|
fileSet.add(absPath);
|
||||||
|
result.stats.matched_patterns++;
|
||||||
|
} else {
|
||||||
|
result.errors.push(`Pattern ${expr}: file not found`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
result.errors.push(`Pattern ${expr}: ${(err as Error).message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check max files limit
|
||||||
|
if (fileSet.size >= maxFiles) {
|
||||||
|
result.errors.push(`Max files limit (${maxFiles}) reached`);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result.files = Array.from(fileSet);
|
||||||
|
result.stats.total_files = result.files.length;
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pack files into a single content string with metadata headers
|
||||||
|
*/
|
||||||
|
export async function packFiles(
|
||||||
|
files: string[],
|
||||||
|
options: {
|
||||||
|
includeMetadata?: boolean;
|
||||||
|
separator?: string;
|
||||||
|
maxFileSize?: number; // Max size per file in bytes (default: 1MB)
|
||||||
|
} = {}
|
||||||
|
): Promise<{
|
||||||
|
content: string;
|
||||||
|
packedFiles: string[];
|
||||||
|
skippedFiles: string[];
|
||||||
|
totalBytes: number;
|
||||||
|
}> {
|
||||||
|
const {
|
||||||
|
includeMetadata = true,
|
||||||
|
separator = '\n\n',
|
||||||
|
maxFileSize = 1024 * 1024, // 1MB default
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
const parts: string[] = [];
|
||||||
|
const packedFiles: string[] = [];
|
||||||
|
const skippedFiles: string[] = [];
|
||||||
|
let totalBytes = 0;
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
try {
|
||||||
|
const stats = statSync(file);
|
||||||
|
|
||||||
|
// Skip files that are too large
|
||||||
|
if (stats.size > maxFileSize) {
|
||||||
|
skippedFiles.push(file);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = readFileSync(file, 'utf-8');
|
||||||
|
|
||||||
|
if (includeMetadata) {
|
||||||
|
// Add file header with metadata
|
||||||
|
const header = [
|
||||||
|
`=== FILE: ${file} ===`,
|
||||||
|
`Size: ${stats.size} bytes`,
|
||||||
|
`Modified: ${stats.mtime.toISOString()}`,
|
||||||
|
'---',
|
||||||
|
].join('\n');
|
||||||
|
parts.push(header + '\n' + content);
|
||||||
|
} else {
|
||||||
|
parts.push(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
packedFiles.push(file);
|
||||||
|
totalBytes += content.length;
|
||||||
|
} catch {
|
||||||
|
skippedFiles.push(file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: parts.join(separator),
|
||||||
|
packedFiles,
|
||||||
|
skippedFiles,
|
||||||
|
totalBytes,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse patterns and pack files in one call
|
||||||
|
*/
|
||||||
|
export async function parseAndPack(
|
||||||
|
patterns: string[],
|
||||||
|
options: PatternParseOptions & {
|
||||||
|
includeMetadata?: boolean;
|
||||||
|
separator?: string;
|
||||||
|
maxFileSize?: number;
|
||||||
|
} = {}
|
||||||
|
): Promise<{
|
||||||
|
content: string;
|
||||||
|
parseResult: PatternParseResult;
|
||||||
|
packedFiles: string[];
|
||||||
|
skippedFiles: string[];
|
||||||
|
totalBytes: number;
|
||||||
|
}> {
|
||||||
|
const parseResult = await parsePatterns(patterns, options);
|
||||||
|
|
||||||
|
const packResult = await packFiles(parseResult.files, {
|
||||||
|
includeMetadata: options.includeMetadata,
|
||||||
|
separator: options.separator,
|
||||||
|
maxFileSize: options.maxFileSize,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: packResult.content,
|
||||||
|
parseResult,
|
||||||
|
packedFiles: packResult.packedFiles,
|
||||||
|
skippedFiles: packResult.skippedFiles,
|
||||||
|
totalBytes: packResult.totalBytes,
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -36,10 +36,12 @@ const ParamsSchema = z.object({
|
|||||||
path: z.string().optional(),
|
path: z.string().optional(),
|
||||||
paths: z.array(z.string()).default([]),
|
paths: z.array(z.string()).default([]),
|
||||||
contextLines: z.number().default(0),
|
contextLines: z.number().default(0),
|
||||||
maxResults: z.number().default(20), // Increased default
|
maxResults: z.number().default(5), // Default 5 with full content
|
||||||
includeHidden: z.boolean().default(false),
|
includeHidden: z.boolean().default(false),
|
||||||
languages: z.array(z.string()).optional(),
|
languages: z.array(z.string()).optional(),
|
||||||
limit: z.number().default(20), // Increased default
|
limit: z.number().default(5), // Default 5 with full content
|
||||||
|
extraFilesCount: z.number().default(10), // Additional file-only results
|
||||||
|
maxContentLength: z.number().default(200), // Max content length for truncation (50-2000)
|
||||||
offset: z.number().default(0), // NEW: Pagination offset (start_index)
|
offset: z.number().default(0), // NEW: Pagination offset (start_index)
|
||||||
enrich: z.boolean().default(false),
|
enrich: z.boolean().default(false),
|
||||||
// Search modifiers for ripgrep mode
|
// Search modifiers for ripgrep mode
|
||||||
@@ -244,6 +246,7 @@ interface SearchMetadata {
|
|||||||
warning?: string;
|
warning?: string;
|
||||||
note?: string;
|
note?: string;
|
||||||
index_status?: 'indexed' | 'not_indexed' | 'partial';
|
index_status?: 'indexed' | 'not_indexed' | 'partial';
|
||||||
|
fallback?: string; // Fallback mode used (e.g., 'fuzzy')
|
||||||
fallback_history?: string[];
|
fallback_history?: string[];
|
||||||
suggested_weights?: Record<string, number>;
|
suggested_weights?: Record<string, number>;
|
||||||
// Tokenization metadata (ripgrep mode)
|
// Tokenization metadata (ripgrep mode)
|
||||||
@@ -267,6 +270,7 @@ interface SearchMetadata {
|
|||||||
interface SearchResult {
|
interface SearchResult {
|
||||||
success: boolean;
|
success: boolean;
|
||||||
results?: ExactMatch[] | SemanticMatch[] | GraphMatch[] | FileMatch[] | unknown;
|
results?: ExactMatch[] | SemanticMatch[] | GraphMatch[] | FileMatch[] | unknown;
|
||||||
|
extra_files?: string[]; // Additional file paths without content
|
||||||
output?: string;
|
output?: string;
|
||||||
metadata?: SearchMetadata;
|
metadata?: SearchMetadata;
|
||||||
error?: string;
|
error?: string;
|
||||||
@@ -274,11 +278,22 @@ interface SearchResult {
|
|||||||
message?: string;
|
message?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interface ModelInfo {
|
||||||
|
model_profile?: string;
|
||||||
|
model_name?: string;
|
||||||
|
embedding_dim?: number;
|
||||||
|
backend?: string;
|
||||||
|
created_at?: string;
|
||||||
|
updated_at?: string;
|
||||||
|
}
|
||||||
|
|
||||||
interface IndexStatus {
|
interface IndexStatus {
|
||||||
indexed: boolean;
|
indexed: boolean;
|
||||||
has_embeddings: boolean;
|
has_embeddings: boolean;
|
||||||
file_count?: number;
|
file_count?: number;
|
||||||
embeddings_coverage_percent?: number;
|
embeddings_coverage_percent?: number;
|
||||||
|
total_chunks?: number;
|
||||||
|
model_info?: ModelInfo | null;
|
||||||
warning?: string;
|
warning?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -289,6 +304,42 @@ function stripAnsi(str: string): string {
|
|||||||
return str.replace(/\x1b\[[0-9;]*m/g, '');
|
return str.replace(/\x1b\[[0-9;]*m/g, '');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Default maximum content length to return (avoid excessive output) */
|
||||||
|
const DEFAULT_MAX_CONTENT_LENGTH = 200;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Truncate content to specified length with ellipsis
|
||||||
|
* @param content - The content to truncate
|
||||||
|
* @param maxLength - Maximum length (default: 200)
|
||||||
|
*/
|
||||||
|
function truncateContent(content: string | null | undefined, maxLength: number = DEFAULT_MAX_CONTENT_LENGTH): string {
|
||||||
|
if (!content) return '';
|
||||||
|
if (content.length <= maxLength) return content;
|
||||||
|
return content.slice(0, maxLength) + '...';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Split results into full content results and extra file-only results
|
||||||
|
* Generic function supporting both SemanticMatch and ExactMatch types
|
||||||
|
* @param allResults - All search results (must have 'file' property)
|
||||||
|
* @param fullContentLimit - Number of results with full content (default: 5)
|
||||||
|
* @param extraFilesCount - Number of additional file-only results (default: 10)
|
||||||
|
*/
|
||||||
|
function splitResultsWithExtraFiles<T extends { file: string }>(
|
||||||
|
allResults: T[],
|
||||||
|
fullContentLimit: number = 5,
|
||||||
|
extraFilesCount: number = 10
|
||||||
|
): { results: T[]; extra_files: string[] } {
|
||||||
|
// First N results with full content
|
||||||
|
const results = allResults.slice(0, fullContentLimit);
|
||||||
|
|
||||||
|
// Next M results as file paths only (deduplicated)
|
||||||
|
const extraResults = allResults.slice(fullContentLimit, fullContentLimit + extraFilesCount);
|
||||||
|
const extra_files = [...new Set(extraResults.map(r => r.file))];
|
||||||
|
|
||||||
|
return { results, extra_files };
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if CodexLens index exists for current directory
|
* Check if CodexLens index exists for current directory
|
||||||
* @param path - Directory path to check
|
* @param path - Directory path to check
|
||||||
@@ -319,6 +370,18 @@ async function checkIndexStatus(path: string = '.'): Promise<IndexStatus> {
|
|||||||
const embeddingsData = status.embeddings || {};
|
const embeddingsData = status.embeddings || {};
|
||||||
const embeddingsCoverage = embeddingsData.coverage_percent || 0;
|
const embeddingsCoverage = embeddingsData.coverage_percent || 0;
|
||||||
const has_embeddings = embeddingsCoverage >= 50; // Threshold: 50%
|
const has_embeddings = embeddingsCoverage >= 50; // Threshold: 50%
|
||||||
|
const totalChunks = embeddingsData.total_chunks || 0;
|
||||||
|
|
||||||
|
// Extract model info if available
|
||||||
|
const modelInfoData = embeddingsData.model_info;
|
||||||
|
const modelInfo: ModelInfo | undefined = modelInfoData ? {
|
||||||
|
model_profile: modelInfoData.model_profile,
|
||||||
|
model_name: modelInfoData.model_name,
|
||||||
|
embedding_dim: modelInfoData.embedding_dim,
|
||||||
|
backend: modelInfoData.backend,
|
||||||
|
created_at: modelInfoData.created_at,
|
||||||
|
updated_at: modelInfoData.updated_at,
|
||||||
|
} : undefined;
|
||||||
|
|
||||||
let warning: string | undefined;
|
let warning: string | undefined;
|
||||||
if (!indexed) {
|
if (!indexed) {
|
||||||
@@ -334,6 +397,9 @@ async function checkIndexStatus(path: string = '.'): Promise<IndexStatus> {
|
|||||||
has_embeddings,
|
has_embeddings,
|
||||||
file_count: status.total_files,
|
file_count: status.total_files,
|
||||||
embeddings_coverage_percent: embeddingsCoverage,
|
embeddings_coverage_percent: embeddingsCoverage,
|
||||||
|
total_chunks: totalChunks,
|
||||||
|
// Ensure model_info is null instead of undefined so it's included in JSON
|
||||||
|
model_info: modelInfo ?? null,
|
||||||
warning,
|
warning,
|
||||||
};
|
};
|
||||||
} catch {
|
} catch {
|
||||||
@@ -687,7 +753,7 @@ async function executeAutoMode(params: Params): Promise<SearchResult> {
|
|||||||
* Supports tokenized multi-word queries with OR matching and result ranking
|
* Supports tokenized multi-word queries with OR matching and result ranking
|
||||||
*/
|
*/
|
||||||
async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
||||||
const { query, paths = [], contextLines = 0, maxResults = 10, includeHidden = false, path = '.', regex = true, caseSensitive = true, tokenize = true } = params;
|
const { query, paths = [], contextLines = 0, maxResults = 5, extraFilesCount = 10, maxContentLength = 200, includeHidden = false, path = '.', regex = true, caseSensitive = true, tokenize = true } = params;
|
||||||
|
|
||||||
if (!query) {
|
if (!query) {
|
||||||
return {
|
return {
|
||||||
@@ -699,6 +765,9 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
// Check if ripgrep is available
|
// Check if ripgrep is available
|
||||||
const hasRipgrep = checkToolAvailability('rg');
|
const hasRipgrep = checkToolAvailability('rg');
|
||||||
|
|
||||||
|
// Calculate total to fetch for split (full content + extra files)
|
||||||
|
const totalToFetch = maxResults + extraFilesCount;
|
||||||
|
|
||||||
// If ripgrep not available, fall back to CodexLens exact mode
|
// If ripgrep not available, fall back to CodexLens exact mode
|
||||||
if (!hasRipgrep) {
|
if (!hasRipgrep) {
|
||||||
const readyStatus = await ensureCodexLensReady();
|
const readyStatus = await ensureCodexLensReady();
|
||||||
@@ -710,7 +779,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use CodexLens exact mode as fallback
|
// Use CodexLens exact mode as fallback
|
||||||
const args = ['search', query, '--limit', maxResults.toString(), '--mode', 'exact', '--json'];
|
const args = ['search', query, '--limit', totalToFetch.toString(), '--mode', 'exact', '--json'];
|
||||||
const result = await executeCodexLens(args, { cwd: path });
|
const result = await executeCodexLens(args, { cwd: path });
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
@@ -727,23 +796,27 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse results
|
// Parse results
|
||||||
let results: SemanticMatch[] = [];
|
let allResults: SemanticMatch[] = [];
|
||||||
try {
|
try {
|
||||||
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
||||||
const data = parsed.result?.results || parsed.results || parsed;
|
const data = parsed.result?.results || parsed.results || parsed;
|
||||||
results = (Array.isArray(data) ? data : []).map((item: any) => ({
|
allResults = (Array.isArray(data) ? data : []).map((item: any) => ({
|
||||||
file: item.path || item.file,
|
file: item.path || item.file,
|
||||||
score: item.score || 0,
|
score: item.score || 0,
|
||||||
content: item.excerpt || item.content || '',
|
content: truncateContent(item.content || item.excerpt, maxContentLength),
|
||||||
symbol: item.symbol || null,
|
symbol: item.symbol || null,
|
||||||
}));
|
}));
|
||||||
} catch {
|
} catch {
|
||||||
// Keep empty results
|
// Keep empty results
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Split results: first N with full content, rest as file paths only
|
||||||
|
const { results, extra_files } = splitResultsWithExtraFiles(allResults, maxResults, extraFilesCount);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
results,
|
results,
|
||||||
|
extra_files: extra_files.length > 0 ? extra_files : undefined,
|
||||||
metadata: {
|
metadata: {
|
||||||
mode: 'ripgrep',
|
mode: 'ripgrep',
|
||||||
backend: 'codexlens-fallback',
|
backend: 'codexlens-fallback',
|
||||||
@@ -754,12 +827,12 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use ripgrep
|
// Use ripgrep - request more results to support split
|
||||||
const { command, args, tokens } = buildRipgrepCommand({
|
const { command, args, tokens } = buildRipgrepCommand({
|
||||||
query,
|
query,
|
||||||
paths: paths.length > 0 ? paths : [path],
|
paths: paths.length > 0 ? paths : [path],
|
||||||
contextLines,
|
contextLines,
|
||||||
maxResults,
|
maxResults: totalToFetch, // Fetch more to support split
|
||||||
includeHidden,
|
includeHidden,
|
||||||
regex,
|
regex,
|
||||||
caseSensitive,
|
caseSensitive,
|
||||||
@@ -774,6 +847,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
|
|
||||||
let stdout = '';
|
let stdout = '';
|
||||||
let stderr = '';
|
let stderr = '';
|
||||||
|
let resultLimitReached = false;
|
||||||
|
|
||||||
child.stdout.on('data', (data) => {
|
child.stdout.on('data', (data) => {
|
||||||
stdout += data.toString();
|
stdout += data.toString();
|
||||||
@@ -784,10 +858,18 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
const results: ExactMatch[] = [];
|
const allResults: ExactMatch[] = [];
|
||||||
const lines = stdout.split('\n').filter((line) => line.trim());
|
const lines = stdout.split('\n').filter((line) => line.trim());
|
||||||
|
// Limit total results to prevent memory overflow (--max-count only limits per-file)
|
||||||
|
const effectiveLimit = totalToFetch > 0 ? totalToFetch : 500;
|
||||||
|
|
||||||
for (const line of lines) {
|
for (const line of lines) {
|
||||||
|
// Stop collecting if we've reached the limit
|
||||||
|
if (allResults.length >= effectiveLimit) {
|
||||||
|
resultLimitReached = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const item = JSON.parse(line);
|
const item = JSON.parse(line);
|
||||||
|
|
||||||
@@ -801,7 +883,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
: 1,
|
: 1,
|
||||||
content: item.data.lines.text.trim(),
|
content: item.data.lines.text.trim(),
|
||||||
};
|
};
|
||||||
results.push(match);
|
allResults.push(match);
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
continue;
|
continue;
|
||||||
@@ -814,23 +896,36 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
|
|
||||||
// Apply token-based scoring and sorting for multi-word queries
|
// Apply token-based scoring and sorting for multi-word queries
|
||||||
// Results matching more tokens are ranked higher (exact matches first)
|
// Results matching more tokens are ranked higher (exact matches first)
|
||||||
const scoredResults = tokens.length > 1 ? scoreByTokenMatch(results, tokens) : results;
|
const scoredResults = tokens.length > 1 ? scoreByTokenMatch(allResults, tokens) : allResults;
|
||||||
|
|
||||||
if (code === 0 || code === 1 || (isWindowsDeviceError && scoredResults.length > 0)) {
|
if (code === 0 || code === 1 || (isWindowsDeviceError && scoredResults.length > 0)) {
|
||||||
|
// Split results: first N with full content, rest as file paths only
|
||||||
|
const { results, extra_files } = splitResultsWithExtraFiles(scoredResults, maxResults, extraFilesCount);
|
||||||
|
|
||||||
|
// Build warning message for various conditions
|
||||||
|
const warnings: string[] = [];
|
||||||
|
if (resultLimitReached) {
|
||||||
|
warnings.push(`Result limit reached (${effectiveLimit}). Use a more specific query or increase limit.`);
|
||||||
|
}
|
||||||
|
if (isWindowsDeviceError) {
|
||||||
|
warnings.push('Some Windows device files were skipped');
|
||||||
|
}
|
||||||
|
|
||||||
resolve({
|
resolve({
|
||||||
success: true,
|
success: true,
|
||||||
results: scoredResults,
|
results,
|
||||||
|
extra_files: extra_files.length > 0 ? extra_files : undefined,
|
||||||
metadata: {
|
metadata: {
|
||||||
mode: 'ripgrep',
|
mode: 'ripgrep',
|
||||||
backend: 'ripgrep',
|
backend: 'ripgrep',
|
||||||
count: scoredResults.length,
|
count: results.length,
|
||||||
query,
|
query,
|
||||||
tokens: tokens.length > 1 ? tokens : undefined, // Include tokens in metadata for debugging
|
tokens: tokens.length > 1 ? tokens : undefined, // Include tokens in metadata for debugging
|
||||||
tokenized: tokens.length > 1,
|
tokenized: tokens.length > 1,
|
||||||
...(isWindowsDeviceError && { warning: 'Some Windows device files were skipped' }),
|
...(warnings.length > 0 && { warning: warnings.join('; ') }),
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
} else if (isWindowsDeviceError && results.length === 0) {
|
} else if (isWindowsDeviceError && allResults.length === 0) {
|
||||||
// Windows device error but no results - might be the only issue
|
// Windows device error but no results - might be the only issue
|
||||||
resolve({
|
resolve({
|
||||||
success: true,
|
success: true,
|
||||||
@@ -867,7 +962,7 @@ async function executeRipgrepMode(params: Params): Promise<SearchResult> {
|
|||||||
* Requires index
|
* Requires index
|
||||||
*/
|
*/
|
||||||
async function executeCodexLensExactMode(params: Params): Promise<SearchResult> {
|
async function executeCodexLensExactMode(params: Params): Promise<SearchResult> {
|
||||||
const { query, path = '.', maxResults = 10, enrich = false } = params;
|
const { query, path = '.', maxResults = 5, extraFilesCount = 10, maxContentLength = 200, enrich = false } = params;
|
||||||
|
|
||||||
if (!query) {
|
if (!query) {
|
||||||
return {
|
return {
|
||||||
@@ -888,7 +983,9 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
|||||||
// Check index status
|
// Check index status
|
||||||
const indexStatus = await checkIndexStatus(path);
|
const indexStatus = await checkIndexStatus(path);
|
||||||
|
|
||||||
const args = ['search', query, '--limit', maxResults.toString(), '--mode', 'exact', '--json'];
|
// Request more results to support split (full content + extra files)
|
||||||
|
const totalToFetch = maxResults + extraFilesCount;
|
||||||
|
const args = ['search', query, '--limit', totalToFetch.toString(), '--mode', 'exact', '--json'];
|
||||||
if (enrich) {
|
if (enrich) {
|
||||||
args.push('--enrich');
|
args.push('--enrich');
|
||||||
}
|
}
|
||||||
@@ -909,23 +1006,70 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse results
|
// Parse results
|
||||||
let results: SemanticMatch[] = [];
|
let allResults: SemanticMatch[] = [];
|
||||||
try {
|
try {
|
||||||
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
||||||
const data = parsed.result?.results || parsed.results || parsed;
|
const data = parsed.result?.results || parsed.results || parsed;
|
||||||
results = (Array.isArray(data) ? data : []).map((item: any) => ({
|
allResults = (Array.isArray(data) ? data : []).map((item: any) => ({
|
||||||
file: item.path || item.file,
|
file: item.path || item.file,
|
||||||
score: item.score || 0,
|
score: item.score || 0,
|
||||||
content: item.excerpt || item.content || '',
|
content: truncateContent(item.content || item.excerpt, maxContentLength),
|
||||||
symbol: item.symbol || null,
|
symbol: item.symbol || null,
|
||||||
}));
|
}));
|
||||||
} catch {
|
} catch {
|
||||||
// Keep empty results
|
// Keep empty results
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fallback to fuzzy mode if exact returns no results
|
||||||
|
if (allResults.length === 0) {
|
||||||
|
const fuzzyArgs = ['search', query, '--limit', totalToFetch.toString(), '--mode', 'fuzzy', '--json'];
|
||||||
|
if (enrich) {
|
||||||
|
fuzzyArgs.push('--enrich');
|
||||||
|
}
|
||||||
|
const fuzzyResult = await executeCodexLens(fuzzyArgs, { cwd: path });
|
||||||
|
|
||||||
|
if (fuzzyResult.success) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(stripAnsi(fuzzyResult.output || '{}'));
|
||||||
|
const data = parsed.result?.results || parsed.results || parsed;
|
||||||
|
allResults = (Array.isArray(data) ? data : []).map((item: any) => ({
|
||||||
|
file: item.path || item.file,
|
||||||
|
score: item.score || 0,
|
||||||
|
content: truncateContent(item.content || item.excerpt, maxContentLength),
|
||||||
|
symbol: item.symbol || null,
|
||||||
|
}));
|
||||||
|
} catch {
|
||||||
|
// Keep empty results
|
||||||
|
}
|
||||||
|
|
||||||
|
if (allResults.length > 0) {
|
||||||
|
// Split results: first N with full content, rest as file paths only
|
||||||
|
const { results, extra_files } = splitResultsWithExtraFiles(allResults, maxResults, extraFilesCount);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
results,
|
||||||
|
extra_files: extra_files.length > 0 ? extra_files : undefined,
|
||||||
|
metadata: {
|
||||||
|
mode: 'exact',
|
||||||
|
backend: 'codexlens',
|
||||||
|
count: results.length,
|
||||||
|
query,
|
||||||
|
warning: indexStatus.warning,
|
||||||
|
note: 'No exact matches found, showing fuzzy results',
|
||||||
|
fallback: 'fuzzy',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split results: first N with full content, rest as file paths only
|
||||||
|
const { results, extra_files } = splitResultsWithExtraFiles(allResults, maxResults, extraFilesCount);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
results,
|
results,
|
||||||
|
extra_files: extra_files.length > 0 ? extra_files : undefined,
|
||||||
metadata: {
|
metadata: {
|
||||||
mode: 'exact',
|
mode: 'exact',
|
||||||
backend: 'codexlens',
|
backend: 'codexlens',
|
||||||
@@ -942,7 +1086,7 @@ async function executeCodexLensExactMode(params: Params): Promise<SearchResult>
|
|||||||
* Requires index with embeddings
|
* Requires index with embeddings
|
||||||
*/
|
*/
|
||||||
async function executeHybridMode(params: Params): Promise<SearchResult> {
|
async function executeHybridMode(params: Params): Promise<SearchResult> {
|
||||||
const { query, path = '.', maxResults = 10, enrich = false } = params;
|
const { query, path = '.', maxResults = 5, extraFilesCount = 10, maxContentLength = 200, enrich = false } = params;
|
||||||
|
|
||||||
if (!query) {
|
if (!query) {
|
||||||
return {
|
return {
|
||||||
@@ -963,7 +1107,9 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
|||||||
// Check index status
|
// Check index status
|
||||||
const indexStatus = await checkIndexStatus(path);
|
const indexStatus = await checkIndexStatus(path);
|
||||||
|
|
||||||
const args = ['search', query, '--limit', maxResults.toString(), '--mode', 'hybrid', '--json'];
|
// Request more results to support split (full content + extra files)
|
||||||
|
const totalToFetch = maxResults + extraFilesCount;
|
||||||
|
const args = ['search', query, '--limit', totalToFetch.toString(), '--mode', 'hybrid', '--json'];
|
||||||
if (enrich) {
|
if (enrich) {
|
||||||
args.push('--enrich');
|
args.push('--enrich');
|
||||||
}
|
}
|
||||||
@@ -984,14 +1130,14 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse results
|
// Parse results
|
||||||
let results: SemanticMatch[] = [];
|
let allResults: SemanticMatch[] = [];
|
||||||
let baselineInfo: { score: number; count: number } | null = null;
|
let baselineInfo: { score: number; count: number } | null = null;
|
||||||
let initialCount = 0;
|
let initialCount = 0;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
const parsed = JSON.parse(stripAnsi(result.output || '{}'));
|
||||||
const data = parsed.result?.results || parsed.results || parsed;
|
const data = parsed.result?.results || parsed.results || parsed;
|
||||||
results = (Array.isArray(data) ? data : []).map((item: any) => {
|
allResults = (Array.isArray(data) ? data : []).map((item: any) => {
|
||||||
const rawScore = item.score || 0;
|
const rawScore = item.score || 0;
|
||||||
// Hybrid mode returns distance scores (lower is better).
|
// Hybrid mode returns distance scores (lower is better).
|
||||||
// Convert to similarity scores (higher is better) for consistency.
|
// Convert to similarity scores (higher is better) for consistency.
|
||||||
@@ -1000,27 +1146,27 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
|||||||
return {
|
return {
|
||||||
file: item.path || item.file,
|
file: item.path || item.file,
|
||||||
score: similarityScore,
|
score: similarityScore,
|
||||||
content: item.excerpt || item.content || '',
|
content: truncateContent(item.content || item.excerpt, maxContentLength),
|
||||||
symbol: item.symbol || null,
|
symbol: item.symbol || null,
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
initialCount = results.length;
|
initialCount = allResults.length;
|
||||||
|
|
||||||
// Post-processing pipeline to improve semantic search quality
|
// Post-processing pipeline to improve semantic search quality
|
||||||
// 0. Filter dominant baseline scores (hot spot detection)
|
// 0. Filter dominant baseline scores (hot spot detection)
|
||||||
const baselineResult = filterDominantBaselineScores(results);
|
const baselineResult = filterDominantBaselineScores(allResults);
|
||||||
results = baselineResult.filteredResults;
|
allResults = baselineResult.filteredResults;
|
||||||
baselineInfo = baselineResult.baselineInfo;
|
baselineInfo = baselineResult.baselineInfo;
|
||||||
|
|
||||||
// 1. Filter noisy files (coverage, node_modules, etc.)
|
// 1. Filter noisy files (coverage, node_modules, etc.)
|
||||||
results = filterNoisyFiles(results);
|
allResults = filterNoisyFiles(allResults);
|
||||||
// 2. Boost results containing query keywords
|
// 2. Boost results containing query keywords
|
||||||
results = applyKeywordBoosting(results, query);
|
allResults = applyKeywordBoosting(allResults, query);
|
||||||
// 3. Enforce score diversity (penalize identical scores)
|
// 3. Enforce score diversity (penalize identical scores)
|
||||||
results = enforceScoreDiversity(results);
|
allResults = enforceScoreDiversity(allResults);
|
||||||
// 4. Re-sort by adjusted scores
|
// 4. Re-sort by adjusted scores
|
||||||
results.sort((a, b) => b.score - a.score);
|
allResults.sort((a, b) => b.score - a.score);
|
||||||
} catch {
|
} catch {
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
@@ -1036,15 +1182,19 @@ async function executeHybridMode(params: Params): Promise<SearchResult> {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Split results: first N with full content, rest as file paths only
|
||||||
|
const { results, extra_files } = splitResultsWithExtraFiles(allResults, maxResults, extraFilesCount);
|
||||||
|
|
||||||
// Build metadata with baseline info if detected
|
// Build metadata with baseline info if detected
|
||||||
let note = 'Hybrid mode uses RRF fusion (exact + fuzzy + vector) for best results';
|
let note = 'Hybrid mode uses RRF fusion (exact + fuzzy + vector) for best results';
|
||||||
if (baselineInfo) {
|
if (baselineInfo) {
|
||||||
note += ` | Filtered ${initialCount - results.length} hot-spot results with baseline score ~${baselineInfo.score.toFixed(4)}`;
|
note += ` | Filtered ${initialCount - allResults.length} hot-spot results with baseline score ~${baselineInfo.score.toFixed(4)}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
results,
|
results,
|
||||||
|
extra_files: extra_files.length > 0 ? extra_files : undefined,
|
||||||
metadata: {
|
metadata: {
|
||||||
mode: 'hybrid',
|
mode: 'hybrid',
|
||||||
backend: 'codexlens',
|
backend: 'codexlens',
|
||||||
@@ -1455,7 +1605,7 @@ export const schema: ToolSchema = {
|
|||||||
mode: {
|
mode: {
|
||||||
type: 'string',
|
type: 'string',
|
||||||
enum: SEARCH_MODES,
|
enum: SEARCH_MODES,
|
||||||
description: 'Search mode: auto (default), hybrid (best quality), exact (CodexLens FTS), ripgrep (fast, no index), priority (fallback: hybrid->exact->ripgrep)',
|
description: 'Search mode: auto, hybrid (best quality), exact (CodexLens FTS), ripgrep (fast, no index), priority (fallback chain)',
|
||||||
default: 'auto',
|
default: 'auto',
|
||||||
},
|
},
|
||||||
output_mode: {
|
output_mode: {
|
||||||
@@ -1491,6 +1641,16 @@ export const schema: ToolSchema = {
|
|||||||
description: 'Alias for maxResults (default: 20)',
|
description: 'Alias for maxResults (default: 20)',
|
||||||
default: 20,
|
default: 20,
|
||||||
},
|
},
|
||||||
|
extraFilesCount: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Number of additional file-only results (paths without content)',
|
||||||
|
default: 10,
|
||||||
|
},
|
||||||
|
maxContentLength: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Maximum content length for truncation (50-2000)',
|
||||||
|
default: 200,
|
||||||
|
},
|
||||||
offset: {
|
offset: {
|
||||||
type: 'number',
|
type: 'number',
|
||||||
description: 'Pagination offset - skip first N results (default: 0)',
|
description: 'Pagination offset - skip first N results (default: 0)',
|
||||||
|
|||||||
402
ccw/src/types/litellm-api-config.ts
Normal file
402
ccw/src/types/litellm-api-config.ts
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
/**
|
||||||
|
* LiteLLM API Configuration Type Definitions
|
||||||
|
*
|
||||||
|
* Defines types for provider credentials, cache strategies, custom endpoints,
|
||||||
|
* and the overall configuration structure for LiteLLM API integration.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* API format types (simplified)
|
||||||
|
* Most providers use OpenAI-compatible format
|
||||||
|
*/
|
||||||
|
export type ProviderType =
|
||||||
|
| 'openai' // OpenAI-compatible format (most providers)
|
||||||
|
| 'anthropic' // Anthropic format
|
||||||
|
| 'custom'; // Custom format
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Advanced provider settings for LiteLLM compatibility
|
||||||
|
* Maps to LiteLLM's provider configuration options
|
||||||
|
*/
|
||||||
|
export interface ProviderAdvancedSettings {
|
||||||
|
/** Request timeout in seconds (default: 300) */
|
||||||
|
timeout?: number;
|
||||||
|
|
||||||
|
/** Maximum retry attempts on failure (default: 3) */
|
||||||
|
maxRetries?: number;
|
||||||
|
|
||||||
|
/** Organization ID (OpenAI-specific) */
|
||||||
|
organization?: string;
|
||||||
|
|
||||||
|
/** API version string (Azure-specific, e.g., "2024-02-01") */
|
||||||
|
apiVersion?: string;
|
||||||
|
|
||||||
|
/** Custom HTTP headers as JSON object */
|
||||||
|
customHeaders?: Record<string, string>;
|
||||||
|
|
||||||
|
/** Requests per minute rate limit */
|
||||||
|
rpm?: number;
|
||||||
|
|
||||||
|
/** Tokens per minute rate limit */
|
||||||
|
tpm?: number;
|
||||||
|
|
||||||
|
/** Proxy server URL (e.g., "http://proxy.example.com:8080") */
|
||||||
|
proxy?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Model type classification
|
||||||
|
*/
|
||||||
|
export type ModelType = 'llm' | 'embedding';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Model capability metadata
|
||||||
|
*/
|
||||||
|
export interface ModelCapabilities {
|
||||||
|
/** Whether the model supports streaming responses */
|
||||||
|
streaming?: boolean;
|
||||||
|
|
||||||
|
/** Whether the model supports function/tool calling */
|
||||||
|
functionCalling?: boolean;
|
||||||
|
|
||||||
|
/** Whether the model supports vision/image input */
|
||||||
|
vision?: boolean;
|
||||||
|
|
||||||
|
/** Context window size in tokens */
|
||||||
|
contextWindow?: number;
|
||||||
|
|
||||||
|
/** Embedding dimension (for embedding models only) */
|
||||||
|
embeddingDimension?: number;
|
||||||
|
|
||||||
|
/** Maximum output tokens */
|
||||||
|
maxOutputTokens?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Routing strategy for load balancing across multiple keys
|
||||||
|
*/
|
||||||
|
export type RoutingStrategy =
|
||||||
|
| 'simple-shuffle' // Random selection (default, recommended)
|
||||||
|
| 'weighted' // Weight-based distribution
|
||||||
|
| 'latency-based' // Route to lowest latency
|
||||||
|
| 'cost-based' // Route to lowest cost
|
||||||
|
| 'least-busy'; // Route to least concurrent
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Individual API key configuration with optional weight
|
||||||
|
*/
|
||||||
|
export interface ApiKeyEntry {
|
||||||
|
/** Unique identifier */
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
/** API key value or env var reference */
|
||||||
|
key: string;
|
||||||
|
|
||||||
|
/** Display label for this key */
|
||||||
|
label?: string;
|
||||||
|
|
||||||
|
/** Weight for weighted routing (default: 1) */
|
||||||
|
weight?: number;
|
||||||
|
|
||||||
|
/** Whether this key is enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Last health check status */
|
||||||
|
healthStatus?: 'healthy' | 'unhealthy' | 'unknown';
|
||||||
|
|
||||||
|
/** Last health check timestamp */
|
||||||
|
lastHealthCheck?: string;
|
||||||
|
|
||||||
|
/** Error message if unhealthy */
|
||||||
|
lastError?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Health check configuration
|
||||||
|
*/
|
||||||
|
export interface HealthCheckConfig {
|
||||||
|
/** Enable automatic health checks */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Check interval in seconds (default: 300) */
|
||||||
|
intervalSeconds: number;
|
||||||
|
|
||||||
|
/** Cooldown period after failure in seconds (default: 5) */
|
||||||
|
cooldownSeconds: number;
|
||||||
|
|
||||||
|
/** Number of failures before marking unhealthy (default: 3) */
|
||||||
|
failureThreshold: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Model-specific endpoint settings
|
||||||
|
* Allows per-model configuration overrides
|
||||||
|
*/
|
||||||
|
export interface ModelEndpointSettings {
|
||||||
|
/** Override base URL for this model */
|
||||||
|
baseUrl?: string;
|
||||||
|
|
||||||
|
/** Override timeout for this model */
|
||||||
|
timeout?: number;
|
||||||
|
|
||||||
|
/** Override max retries for this model */
|
||||||
|
maxRetries?: number;
|
||||||
|
|
||||||
|
/** Custom headers for this model */
|
||||||
|
customHeaders?: Record<string, string>;
|
||||||
|
|
||||||
|
/** Cache strategy for this model */
|
||||||
|
cacheStrategy?: CacheStrategy;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Model definition with type and grouping
|
||||||
|
*/
|
||||||
|
export interface ModelDefinition {
|
||||||
|
/** Unique identifier for this model */
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
/** Display name for UI */
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
/** Model type: LLM or Embedding */
|
||||||
|
type: ModelType;
|
||||||
|
|
||||||
|
/** Model series for grouping (e.g., "GPT-4", "Claude-3") */
|
||||||
|
series: string;
|
||||||
|
|
||||||
|
/** Whether this model is enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Model capabilities */
|
||||||
|
capabilities?: ModelCapabilities;
|
||||||
|
|
||||||
|
/** Model-specific endpoint settings */
|
||||||
|
endpointSettings?: ModelEndpointSettings;
|
||||||
|
|
||||||
|
/** Optional description */
|
||||||
|
description?: string;
|
||||||
|
|
||||||
|
/** Creation timestamp (ISO 8601) */
|
||||||
|
createdAt: string;
|
||||||
|
|
||||||
|
/** Last update timestamp (ISO 8601) */
|
||||||
|
updatedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provider credential configuration
|
||||||
|
* Stores API keys, base URLs, and provider metadata
|
||||||
|
*/
|
||||||
|
export interface ProviderCredential {
|
||||||
|
/** Unique identifier for this provider configuration */
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
/** Display name for UI */
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
/** Provider type */
|
||||||
|
type: ProviderType;
|
||||||
|
|
||||||
|
/** API key or environment variable reference (e.g., ${OPENAI_API_KEY}) */
|
||||||
|
apiKey: string;
|
||||||
|
|
||||||
|
/** Custom API base URL (optional, overrides provider default) */
|
||||||
|
apiBase?: string;
|
||||||
|
|
||||||
|
/** Whether this provider is enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Advanced provider settings (optional) */
|
||||||
|
advancedSettings?: ProviderAdvancedSettings;
|
||||||
|
|
||||||
|
/** Multiple API keys for load balancing */
|
||||||
|
apiKeys?: ApiKeyEntry[];
|
||||||
|
|
||||||
|
/** Routing strategy for multi-key load balancing */
|
||||||
|
routingStrategy?: RoutingStrategy;
|
||||||
|
|
||||||
|
/** Health check configuration */
|
||||||
|
healthCheck?: HealthCheckConfig;
|
||||||
|
|
||||||
|
/** LLM models configured for this provider */
|
||||||
|
llmModels?: ModelDefinition[];
|
||||||
|
|
||||||
|
/** Embedding models configured for this provider */
|
||||||
|
embeddingModels?: ModelDefinition[];
|
||||||
|
|
||||||
|
/** Creation timestamp (ISO 8601) */
|
||||||
|
createdAt: string;
|
||||||
|
|
||||||
|
/** Last update timestamp (ISO 8601) */
|
||||||
|
updatedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache strategy for prompt context optimization
|
||||||
|
* Enables file-based caching to reduce token usage
|
||||||
|
*/
|
||||||
|
export interface CacheStrategy {
|
||||||
|
/** Whether caching is enabled for this endpoint */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Time-to-live in minutes (default: 60) */
|
||||||
|
ttlMinutes: number;
|
||||||
|
|
||||||
|
/** Maximum cache size in KB (default: 512) */
|
||||||
|
maxSizeKB: number;
|
||||||
|
|
||||||
|
/** File patterns to cache (glob patterns like "*.md", "*.ts") */
|
||||||
|
filePatterns: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Custom endpoint configuration
|
||||||
|
* Maps CLI identifiers to specific models and caching strategies
|
||||||
|
*/
|
||||||
|
export interface CustomEndpoint {
|
||||||
|
/** Unique CLI identifier (used in --model flag, e.g., "my-gpt4o") */
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
/** Display name for UI */
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
/** Reference to provider credential ID */
|
||||||
|
providerId: string;
|
||||||
|
|
||||||
|
/** Model identifier (e.g., "gpt-4o", "claude-3-5-sonnet-20241022") */
|
||||||
|
model: string;
|
||||||
|
|
||||||
|
/** Optional description */
|
||||||
|
description?: string;
|
||||||
|
|
||||||
|
/** Cache strategy for this endpoint */
|
||||||
|
cacheStrategy: CacheStrategy;
|
||||||
|
|
||||||
|
/** Whether this endpoint is enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Creation timestamp (ISO 8601) */
|
||||||
|
createdAt: string;
|
||||||
|
|
||||||
|
/** Last update timestamp (ISO 8601) */
|
||||||
|
updatedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Global cache settings
|
||||||
|
* Applies to all endpoints unless overridden
|
||||||
|
*/
|
||||||
|
export interface GlobalCacheSettings {
|
||||||
|
/** Whether caching is globally enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Cache directory path (default: ~/.ccw/cache/context) */
|
||||||
|
cacheDir: string;
|
||||||
|
|
||||||
|
/** Maximum total cache size in MB (default: 100) */
|
||||||
|
maxTotalSizeMB: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CodexLens embedding provider selection for rotation
|
||||||
|
* Aggregates provider + model + all API keys
|
||||||
|
*/
|
||||||
|
export interface CodexLensEmbeddingProvider {
|
||||||
|
/** Reference to provider credential ID */
|
||||||
|
providerId: string;
|
||||||
|
|
||||||
|
/** Embedding model ID from the provider */
|
||||||
|
modelId: string;
|
||||||
|
|
||||||
|
/** Whether to use all API keys from this provider (default: true) */
|
||||||
|
useAllKeys: boolean;
|
||||||
|
|
||||||
|
/** Specific API key IDs to use (if useAllKeys is false) */
|
||||||
|
selectedKeyIds?: string[];
|
||||||
|
|
||||||
|
/** Weight for weighted routing (default: 1.0, applies to all keys from this provider) */
|
||||||
|
weight: number;
|
||||||
|
|
||||||
|
/** Maximum concurrent requests per key (default: 4) */
|
||||||
|
maxConcurrentPerKey: number;
|
||||||
|
|
||||||
|
/** Whether this provider is enabled for rotation */
|
||||||
|
enabled: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CodexLens multi-provider embedding rotation configuration
|
||||||
|
* Aggregates multiple providers with same model for parallel rotation
|
||||||
|
*/
|
||||||
|
export interface CodexLensEmbeddingRotation {
|
||||||
|
/** Whether multi-provider rotation is enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Selection strategy: round_robin, latency_aware, weighted_random */
|
||||||
|
strategy: 'round_robin' | 'latency_aware' | 'weighted_random';
|
||||||
|
|
||||||
|
/** Default cooldown seconds for rate-limited endpoints (default: 60) */
|
||||||
|
defaultCooldown: number;
|
||||||
|
|
||||||
|
/** Target model name that all providers should support (e.g., "qwen3-embedding") */
|
||||||
|
targetModel: string;
|
||||||
|
|
||||||
|
/** List of providers to aggregate for rotation */
|
||||||
|
providers: CodexLensEmbeddingProvider[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic embedding pool configuration (refactored from CodexLensEmbeddingRotation)
|
||||||
|
* Supports automatic discovery of all providers offering a specific model
|
||||||
|
*/
|
||||||
|
export interface EmbeddingPoolConfig {
|
||||||
|
/** Whether embedding pool is enabled */
|
||||||
|
enabled: boolean;
|
||||||
|
|
||||||
|
/** Target embedding model name (e.g., "text-embedding-3-small") */
|
||||||
|
targetModel: string;
|
||||||
|
|
||||||
|
/** Selection strategy: round_robin, latency_aware, weighted_random */
|
||||||
|
strategy: 'round_robin' | 'latency_aware' | 'weighted_random';
|
||||||
|
|
||||||
|
/** Whether to automatically discover all providers offering targetModel */
|
||||||
|
autoDiscover: boolean;
|
||||||
|
|
||||||
|
/** Provider IDs to exclude from auto-discovery (optional) */
|
||||||
|
excludedProviderIds?: string[];
|
||||||
|
|
||||||
|
/** Default cooldown seconds for rate-limited endpoints (default: 60) */
|
||||||
|
defaultCooldown: number;
|
||||||
|
|
||||||
|
/** Default maximum concurrent requests per key (default: 4) */
|
||||||
|
defaultMaxConcurrentPerKey: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete LiteLLM API configuration
|
||||||
|
* Root configuration object stored in JSON file
|
||||||
|
*/
|
||||||
|
export interface LiteLLMApiConfig {
|
||||||
|
/** Configuration schema version */
|
||||||
|
version: number;
|
||||||
|
|
||||||
|
/** List of configured providers */
|
||||||
|
providers: ProviderCredential[];
|
||||||
|
|
||||||
|
/** List of custom endpoints */
|
||||||
|
endpoints: CustomEndpoint[];
|
||||||
|
|
||||||
|
/** Default endpoint ID (optional) */
|
||||||
|
defaultEndpoint?: string;
|
||||||
|
|
||||||
|
/** Global cache settings */
|
||||||
|
globalCacheSettings: GlobalCacheSettings;
|
||||||
|
|
||||||
|
/** CodexLens multi-provider embedding rotation config (deprecated, use embeddingPoolConfig) */
|
||||||
|
codexlensEmbeddingRotation?: CodexLensEmbeddingRotation;
|
||||||
|
|
||||||
|
/** Generic embedding pool configuration with auto-discovery support */
|
||||||
|
embeddingPoolConfig?: EmbeddingPoolConfig;
|
||||||
|
}
|
||||||
96
ccw/tests/litellm-client.test.ts
Normal file
96
ccw/tests/litellm-client.test.ts
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
/**
|
||||||
|
* LiteLLM Client Tests
|
||||||
|
* Tests for the LiteLLM TypeScript bridge
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach } from '@jest/globals';
|
||||||
|
import { LiteLLMClient, getLiteLLMClient, checkLiteLLMAvailable, getLiteLLMStatus } from '../src/tools/litellm-client';
|
||||||
|
|
||||||
|
describe('LiteLLMClient', () => {
|
||||||
|
let client: LiteLLMClient;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
client = new LiteLLMClient({ timeout: 5000 });
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Constructor', () => {
|
||||||
|
it('should create client with default config', () => {
|
||||||
|
const defaultClient = new LiteLLMClient();
|
||||||
|
expect(defaultClient).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create client with custom config', () => {
|
||||||
|
const customClient = new LiteLLMClient({
|
||||||
|
pythonPath: 'python3',
|
||||||
|
timeout: 10000
|
||||||
|
});
|
||||||
|
expect(customClient).toBeDefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isAvailable', () => {
|
||||||
|
it('should check if ccw-litellm is available', async () => {
|
||||||
|
const available = await client.isAvailable();
|
||||||
|
expect(typeof available).toBe('boolean');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getStatus', () => {
|
||||||
|
it('should return status object', async () => {
|
||||||
|
const status = await client.getStatus();
|
||||||
|
expect(status).toHaveProperty('available');
|
||||||
|
expect(typeof status.available).toBe('boolean');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('embed', () => {
|
||||||
|
it('should throw error for empty texts array', async () => {
|
||||||
|
await expect(client.embed([])).rejects.toThrow('texts array cannot be empty');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error for null texts', async () => {
|
||||||
|
await expect(client.embed(null as any)).rejects.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('chat', () => {
|
||||||
|
it('should throw error for empty message', async () => {
|
||||||
|
await expect(client.chat('')).rejects.toThrow('message cannot be empty');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('chatMessages', () => {
|
||||||
|
it('should throw error for empty messages array', async () => {
|
||||||
|
await expect(client.chatMessages([])).rejects.toThrow('messages array cannot be empty');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error for null messages', async () => {
|
||||||
|
await expect(client.chatMessages(null as any)).rejects.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Singleton Functions', () => {
|
||||||
|
describe('getLiteLLMClient', () => {
|
||||||
|
it('should return singleton instance', () => {
|
||||||
|
const client1 = getLiteLLMClient();
|
||||||
|
const client2 = getLiteLLMClient();
|
||||||
|
expect(client1).toBe(client2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('checkLiteLLMAvailable', () => {
|
||||||
|
it('should return boolean', async () => {
|
||||||
|
const available = await checkLiteLLMAvailable();
|
||||||
|
expect(typeof available).toBe('boolean');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getLiteLLMStatus', () => {
|
||||||
|
it('should return status object', async () => {
|
||||||
|
const status = await getLiteLLMStatus();
|
||||||
|
expect(status).toHaveProperty('available');
|
||||||
|
expect(typeof status.available).toBe('boolean');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
1
ccw/tsconfig.tsbuildinfo
Normal file
1
ccw/tsconfig.tsbuildinfo
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"root":["./src/cli.ts","./src/index.ts","./src/commands/cli.ts","./src/commands/core-memory.ts","./src/commands/hook.ts","./src/commands/install.ts","./src/commands/list.ts","./src/commands/memory.ts","./src/commands/serve.ts","./src/commands/session-path-resolver.ts","./src/commands/session.ts","./src/commands/stop.ts","./src/commands/tool.ts","./src/commands/uninstall.ts","./src/commands/upgrade.ts","./src/commands/view.ts","./src/config/litellm-api-config-manager.ts","./src/config/provider-models.ts","./src/config/storage-paths.ts","./src/core/cache-manager.ts","./src/core/claude-freshness.ts","./src/core/core-memory-store.ts","./src/core/dashboard-generator-patch.ts","./src/core/dashboard-generator.ts","./src/core/data-aggregator.ts","./src/core/history-importer.ts","./src/core/lite-scanner-complete.ts","./src/core/lite-scanner.ts","./src/core/manifest.ts","./src/core/memory-embedder-bridge.ts","./src/core/memory-store.ts","./src/core/server.ts","./src/core/session-clustering-service.ts","./src/core/session-scanner.ts","./src/core/websocket.ts","./src/core/routes/ccw-routes.ts","./src/core/routes/claude-routes.ts","./src/core/routes/cli-routes.ts","./src/core/routes/codexlens-routes.ts","./src/core/routes/core-memory-routes.ts","./src/core/routes/files-routes.ts","./src/core/routes/graph-routes.ts","./src/core/routes/help-routes.ts","./src/core/routes/hooks-routes.ts","./src/core/routes/litellm-api-routes.ts","./src/core/routes/litellm-routes.ts","./src/core/routes/mcp-routes.ts","./src/core/routes/mcp-templates-db.ts","./src/core/routes/memory-routes.ts","./src/core/routes/rules-routes.ts","./src/core/routes/session-routes.ts","./src/core/routes/skills-routes.ts","./src/core/routes/status-routes.ts","./src/core/routes/system-routes.ts","./src/mcp-server/index.ts","./src/tools/classify-folders.ts","./src/tools/claude-cli-tools.ts","./src/tools/cli-config-manager.ts","./src/tools/cli-executor.ts","./src/tools/cli-history-store.ts","./src/tools/codex-lens.ts","./src/tools/context-cache-store.ts","./src/tools/context-cache.ts","./src/tools/convert-tokens-to-css.ts","./src/tools/core-memory.ts","./src/tools/detect-changed-modules.ts","./src/tools/discover-design-files.ts","./src/tools/edit-file.ts","./src/tools/generate-module-docs.ts","./src/tools/get-modules-by-depth.ts","./src/tools/index.ts","./src/tools/litellm-client.ts","./src/tools/litellm-executor.ts","./src/tools/native-session-discovery.ts","./src/tools/notifier.ts","./src/tools/pattern-parser.ts","./src/tools/read-file.ts","./src/tools/resume-strategy.ts","./src/tools/session-content-parser.ts","./src/tools/session-manager.ts","./src/tools/smart-context.ts","./src/tools/smart-search.ts","./src/tools/storage-manager.ts","./src/tools/ui-generate-preview.js","./src/tools/ui-instantiate-prototypes.js","./src/tools/update-module-claude.js","./src/tools/write-file.ts","./src/types/config.ts","./src/types/index.ts","./src/types/litellm-api-config.ts","./src/types/session.ts","./src/types/tool.ts","./src/utils/browser-launcher.ts","./src/utils/file-utils.ts","./src/utils/path-resolver.ts","./src/utils/path-validator.ts","./src/utils/ui.ts"],"version":"5.9.3"}
|
||||||
@@ -31,6 +31,24 @@ semantic = [
|
|||||||
"hnswlib>=0.8.0",
|
"hnswlib>=0.8.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# GPU acceleration for semantic search (NVIDIA CUDA)
|
||||||
|
# Install with: pip install codexlens[semantic-gpu]
|
||||||
|
semantic-gpu = [
|
||||||
|
"numpy>=1.24",
|
||||||
|
"fastembed>=0.2",
|
||||||
|
"hnswlib>=0.8.0",
|
||||||
|
"onnxruntime-gpu>=1.15.0", # CUDA support
|
||||||
|
]
|
||||||
|
|
||||||
|
# GPU acceleration for Windows (DirectML - supports NVIDIA/AMD/Intel)
|
||||||
|
# Install with: pip install codexlens[semantic-directml]
|
||||||
|
semantic-directml = [
|
||||||
|
"numpy>=1.24",
|
||||||
|
"fastembed>=0.2",
|
||||||
|
"hnswlib>=0.8.0",
|
||||||
|
"onnxruntime-directml>=1.15.0", # DirectML support
|
||||||
|
]
|
||||||
|
|
||||||
# Encoding detection for non-UTF8 files
|
# Encoding detection for non-UTF8 files
|
||||||
encoding = [
|
encoding = [
|
||||||
"chardet>=5.0",
|
"chardet>=5.0",
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user