mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
Add multi-dimensional code review rules for architecture, correctness, performance, readability, security, and testing
- Introduced architecture rules to detect circular dependencies, god classes, layer violations, and mixed concerns. - Added correctness rules focusing on null checks, empty catch blocks, unreachable code, and type coercion. - Implemented performance rules addressing nested loops, synchronous I/O, memory leaks, and unnecessary re-renders in React. - Created readability rules to improve function length, variable naming, deep nesting, magic numbers, and commented code. - Established security rules to identify XSS risks, hardcoded secrets, SQL injection vulnerabilities, and insecure random generation. - Developed testing rules to enhance test quality, coverage, and maintainability, including missing assertions and error path testing. - Documented the structure and schema for rule files in the index.md for better understanding and usage.
This commit is contained in:
@@ -66,40 +66,42 @@ async function execute(state, workDir, currentDimension) {
|
||||
const context = state.context;
|
||||
const dimension = currentDimension;
|
||||
const findings = [];
|
||||
|
||||
// 获取维度特定的检查规则
|
||||
const rules = getDimensionRules(dimension);
|
||||
|
||||
|
||||
// 从外部 JSON 文件加载规则
|
||||
const rulesConfig = loadRulesConfig(dimension, workDir);
|
||||
const rules = rulesConfig.rules || [];
|
||||
const prefix = rulesConfig.prefix || getDimensionPrefix(dimension);
|
||||
|
||||
// 优先审查高风险区域
|
||||
const filesToReview = state.scan_summary?.risk_areas
|
||||
?.map(r => r.file)
|
||||
?.filter(f => context.files.includes(f)) || context.files;
|
||||
|
||||
|
||||
const filesToCheck = [...new Set([
|
||||
...filesToReview.slice(0, 20),
|
||||
...context.files.slice(0, 30)
|
||||
])].slice(0, 50); // 最多50个文件
|
||||
|
||||
|
||||
let findingCounter = 1;
|
||||
|
||||
|
||||
for (const file of filesToCheck) {
|
||||
try {
|
||||
const content = Read(file);
|
||||
const lines = content.split('\n');
|
||||
|
||||
// 应用维度特定规则
|
||||
|
||||
// 应用外部规则文件中的规则
|
||||
for (const rule of rules) {
|
||||
const matches = rule.detect(content, lines, file);
|
||||
const matches = detectByPattern(content, lines, file, rule);
|
||||
for (const match of matches) {
|
||||
findings.push({
|
||||
id: `${getDimensionPrefix(dimension)}-${String(findingCounter++).padStart(3, '0')}`,
|
||||
severity: match.severity,
|
||||
id: `${prefix}-${String(findingCounter++).padStart(3, '0')}`,
|
||||
severity: rule.severity || match.severity,
|
||||
dimension: dimension,
|
||||
category: rule.category,
|
||||
file: file,
|
||||
line: match.line,
|
||||
code_snippet: match.snippet,
|
||||
description: match.description,
|
||||
description: rule.description,
|
||||
recommendation: rule.recommendation,
|
||||
fix_example: rule.fixExample
|
||||
});
|
||||
@@ -109,10 +111,10 @@ async function execute(state, workDir, currentDimension) {
|
||||
// 跳过无法读取的文件
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// 保存维度发现
|
||||
Write(`${workDir}/findings/${dimension}.json`, JSON.stringify(findings, null, 2));
|
||||
|
||||
|
||||
return {
|
||||
stateUpdates: {
|
||||
reviewed_dimensions: [...(state.reviewed_dimensions || []), dimension],
|
||||
@@ -122,6 +124,137 @@ async function execute(state, workDir, currentDimension) {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* 从外部 JSON 文件加载规则配置
|
||||
* 规则文件位于 specs/rules/{dimension}-rules.json
|
||||
* @param {string} dimension - 维度名称 (correctness, security, etc.)
|
||||
* @param {string} workDir - 工作目录 (用于日志记录)
|
||||
* @returns {object} 规则配置对象,包含 rules 数组和 prefix
|
||||
*/
|
||||
function loadRulesConfig(dimension, workDir) {
|
||||
// 规则文件路径:相对于 skill 目录
|
||||
const rulesPath = `specs/rules/${dimension}-rules.json`;
|
||||
|
||||
try {
|
||||
const rulesFile = Read(rulesPath);
|
||||
const rulesConfig = JSON.parse(rulesFile);
|
||||
return rulesConfig;
|
||||
} catch (e) {
|
||||
console.warn(`Failed to load rules for ${dimension}: ${e.message}`);
|
||||
// 返回空规则配置,保持向后兼容
|
||||
return { rules: [], prefix: getDimensionPrefix(dimension) };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据规则的 patternType 检测代码问题
|
||||
* 支持的 patternType: regex, includes
|
||||
* @param {string} content - 文件内容
|
||||
* @param {string[]} lines - 按行分割的内容
|
||||
* @param {string} file - 文件路径
|
||||
* @param {object} rule - 规则配置对象
|
||||
* @returns {Array} 匹配结果数组
|
||||
*/
|
||||
function detectByPattern(content, lines, file, rule) {
|
||||
const matches = [];
|
||||
const { pattern, patternType, negativePatterns, caseInsensitive } = rule;
|
||||
|
||||
if (!pattern) return matches;
|
||||
|
||||
switch (patternType) {
|
||||
case 'regex':
|
||||
return detectByRegex(content, lines, pattern, negativePatterns, caseInsensitive);
|
||||
|
||||
case 'includes':
|
||||
return detectByIncludes(content, lines, pattern, negativePatterns);
|
||||
|
||||
default:
|
||||
// 默认使用 includes 模式
|
||||
return detectByIncludes(content, lines, pattern, negativePatterns);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 使用正则表达式检测代码问题
|
||||
* @param {string} content - 文件完整内容
|
||||
* @param {string[]} lines - 按行分割的内容
|
||||
* @param {string} pattern - 正则表达式模式
|
||||
* @param {string[]} negativePatterns - 排除模式列表
|
||||
* @param {boolean} caseInsensitive - 是否忽略大小写
|
||||
* @returns {Array} 匹配结果数组
|
||||
*/
|
||||
function detectByRegex(content, lines, pattern, negativePatterns, caseInsensitive) {
|
||||
const matches = [];
|
||||
const flags = caseInsensitive ? 'gi' : 'g';
|
||||
|
||||
try {
|
||||
const regex = new RegExp(pattern, flags);
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(content)) !== null) {
|
||||
const lineNum = content.substring(0, match.index).split('\n').length;
|
||||
const lineContent = lines[lineNum - 1] || '';
|
||||
|
||||
// 检查排除模式 - 如果行内容匹配任一排除模式则跳过
|
||||
if (negativePatterns && negativePatterns.length > 0) {
|
||||
const shouldExclude = negativePatterns.some(np => {
|
||||
try {
|
||||
return new RegExp(np).test(lineContent);
|
||||
} catch {
|
||||
return lineContent.includes(np);
|
||||
}
|
||||
});
|
||||
if (shouldExclude) continue;
|
||||
}
|
||||
|
||||
matches.push({
|
||||
line: lineNum,
|
||||
snippet: lineContent.trim().substring(0, 100),
|
||||
matchedText: match[0]
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn(`Invalid regex pattern: ${pattern}`);
|
||||
}
|
||||
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* 使用字符串包含检测代码问题
|
||||
* @param {string} content - 文件完整内容 (未使用但保持接口一致)
|
||||
* @param {string[]} lines - 按行分割的内容
|
||||
* @param {string} pattern - 要查找的字符串
|
||||
* @param {string[]} negativePatterns - 排除模式列表
|
||||
* @returns {Array} 匹配结果数组
|
||||
*/
|
||||
function detectByIncludes(content, lines, pattern, negativePatterns) {
|
||||
const matches = [];
|
||||
|
||||
lines.forEach((line, i) => {
|
||||
if (line.includes(pattern)) {
|
||||
// 检查排除模式 - 如果行内容包含任一排除字符串则跳过
|
||||
if (negativePatterns && negativePatterns.length > 0) {
|
||||
const shouldExclude = negativePatterns.some(np => line.includes(np));
|
||||
if (shouldExclude) return;
|
||||
}
|
||||
|
||||
matches.push({
|
||||
line: i + 1,
|
||||
snippet: line.trim().substring(0, 100),
|
||||
matchedText: pattern
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取维度前缀(作为规则文件不存在时的备用)
|
||||
* @param {string} dimension - 维度名称
|
||||
* @returns {string} 4字符前缀
|
||||
*/
|
||||
function getDimensionPrefix(dimension) {
|
||||
const prefixes = {
|
||||
correctness: 'CORR',
|
||||
@@ -133,93 +266,6 @@ function getDimensionPrefix(dimension) {
|
||||
};
|
||||
return prefixes[dimension] || 'MISC';
|
||||
}
|
||||
|
||||
function getDimensionRules(dimension) {
|
||||
// 参见 specs/review-dimensions.md 获取完整规则
|
||||
const allRules = {
|
||||
correctness: [
|
||||
{
|
||||
category: 'null-check',
|
||||
detect: (content, lines) => {
|
||||
const issues = [];
|
||||
lines.forEach((line, i) => {
|
||||
if (line.match(/\w+\.\w+/) && !line.includes('?.') && !line.includes('if') && !line.includes('null')) {
|
||||
// 简化检测:可能缺少 null 检查
|
||||
}
|
||||
});
|
||||
return issues;
|
||||
},
|
||||
recommendation: 'Add null/undefined check before accessing properties',
|
||||
fixExample: 'obj?.property or if (obj) { obj.property }'
|
||||
},
|
||||
{
|
||||
category: 'empty-catch',
|
||||
detect: (content, lines) => {
|
||||
const issues = [];
|
||||
const regex = /catch\s*\([^)]*\)\s*{\s*}/g;
|
||||
let match;
|
||||
while ((match = regex.exec(content)) !== null) {
|
||||
const lineNum = content.substring(0, match.index).split('\n').length;
|
||||
issues.push({
|
||||
severity: 'high',
|
||||
line: lineNum,
|
||||
snippet: match[0],
|
||||
description: 'Empty catch block silently swallows errors'
|
||||
});
|
||||
}
|
||||
return issues;
|
||||
},
|
||||
recommendation: 'Log the error or rethrow it',
|
||||
fixExample: 'catch (e) { console.error(e); throw e; }'
|
||||
}
|
||||
],
|
||||
security: [
|
||||
{
|
||||
category: 'xss-risk',
|
||||
detect: (content, lines) => {
|
||||
const issues = [];
|
||||
lines.forEach((line, i) => {
|
||||
if (line.includes('innerHTML') || line.includes('dangerouslySetInnerHTML')) {
|
||||
issues.push({
|
||||
severity: 'critical',
|
||||
line: i + 1,
|
||||
snippet: line.trim().substring(0, 100),
|
||||
description: 'Direct HTML injection can lead to XSS vulnerabilities'
|
||||
});
|
||||
}
|
||||
});
|
||||
return issues;
|
||||
},
|
||||
recommendation: 'Use textContent or sanitize HTML before injection',
|
||||
fixExample: 'element.textContent = userInput; // or sanitize(userInput)'
|
||||
},
|
||||
{
|
||||
category: 'hardcoded-secret',
|
||||
detect: (content, lines) => {
|
||||
const issues = [];
|
||||
const regex = /(?:password|secret|api[_-]?key|token)\s*[=:]\s*['"]([^'"]{8,})['"]/gi;
|
||||
lines.forEach((line, i) => {
|
||||
if (regex.test(line)) {
|
||||
issues.push({
|
||||
severity: 'critical',
|
||||
line: i + 1,
|
||||
snippet: line.trim().substring(0, 100),
|
||||
description: 'Hardcoded credentials detected'
|
||||
});
|
||||
}
|
||||
regex.lastIndex = 0; // Reset regex
|
||||
});
|
||||
return issues;
|
||||
},
|
||||
recommendation: 'Use environment variables or secret management',
|
||||
fixExample: 'const apiKey = process.env.API_KEY;'
|
||||
}
|
||||
],
|
||||
// ... 其他维度规则参见 specs/review-dimensions.md
|
||||
};
|
||||
|
||||
return allRules[dimension] || [];
|
||||
}
|
||||
```
|
||||
|
||||
## State Updates
|
||||
|
||||
@@ -10,27 +10,41 @@ Code Review 编排器,负责:
|
||||
3. 执行动作并更新状态
|
||||
4. 循环直到审查完成
|
||||
|
||||
## Dependencies
|
||||
|
||||
- **State Manager**: [state-manager.md](./state-manager.md) - 提供原子化状态操作、自动备份、验证和回滚功能
|
||||
|
||||
## State Management
|
||||
|
||||
### 读取状态
|
||||
本模块使用 StateManager 进行所有状态操作,确保:
|
||||
- **原子更新** - 写入临时文件后重命名,防止损坏
|
||||
- **自动备份** - 每次更新前自动创建备份
|
||||
- **回滚能力** - 失败时可从备份恢复
|
||||
- **结构验证** - 确保状态结构完整性
|
||||
|
||||
### StateManager API (from state-manager.md)
|
||||
|
||||
```javascript
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
```
|
||||
// 初始化状态
|
||||
StateManager.initState(workDir)
|
||||
|
||||
### 更新状态
|
||||
// 读取当前状态
|
||||
StateManager.getState(workDir)
|
||||
|
||||
```javascript
|
||||
function updateState(updates) {
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
const newState = {
|
||||
...state,
|
||||
...updates,
|
||||
updated_at: new Date().toISOString()
|
||||
};
|
||||
Write(`${workDir}/state.json`, JSON.stringify(newState, null, 2));
|
||||
return newState;
|
||||
}
|
||||
// 更新状态(原子操作,自动备份)
|
||||
StateManager.updateState(workDir, updates)
|
||||
|
||||
// 获取下一个待审查维度
|
||||
StateManager.getNextDimension(state)
|
||||
|
||||
// 标记维度完成
|
||||
StateManager.markDimensionComplete(workDir, dimension)
|
||||
|
||||
// 记录错误
|
||||
StateManager.recordError(workDir, action, message)
|
||||
|
||||
// 从备份恢复
|
||||
StateManager.restoreState(workDir)
|
||||
```
|
||||
|
||||
## Decision Logic
|
||||
@@ -41,32 +55,28 @@ function selectNextAction(state) {
|
||||
if (state.status === 'completed') return null;
|
||||
if (state.status === 'user_exit') return null;
|
||||
if (state.error_count >= 3) return 'action-abort';
|
||||
|
||||
|
||||
// 2. 初始化阶段
|
||||
if (state.status === 'pending' || !state.context) {
|
||||
return 'action-collect-context';
|
||||
}
|
||||
|
||||
|
||||
// 3. 快速扫描阶段
|
||||
if (!state.scan_completed) {
|
||||
return 'action-quick-scan';
|
||||
}
|
||||
|
||||
// 4. 深入审查阶段 - 逐维度审查
|
||||
const dimensions = ['correctness', 'readability', 'performance', 'security', 'testing', 'architecture'];
|
||||
const reviewedDimensions = state.reviewed_dimensions || [];
|
||||
|
||||
for (const dim of dimensions) {
|
||||
if (!reviewedDimensions.includes(dim)) {
|
||||
return 'action-deep-review'; // 传递 dimension 参数
|
||||
}
|
||||
|
||||
// 4. 深入审查阶段 - 使用 StateManager 获取下一个维度
|
||||
const nextDimension = StateManager.getNextDimension(state);
|
||||
if (nextDimension) {
|
||||
return 'action-deep-review'; // 传递 dimension 参数
|
||||
}
|
||||
|
||||
|
||||
// 5. 报告生成阶段
|
||||
if (!state.report_generated) {
|
||||
return 'action-generate-report';
|
||||
}
|
||||
|
||||
|
||||
// 6. 完成
|
||||
return 'action-complete';
|
||||
}
|
||||
@@ -77,42 +87,51 @@ function selectNextAction(state) {
|
||||
```javascript
|
||||
async function runOrchestrator() {
|
||||
console.log('=== Code Review Orchestrator Started ===');
|
||||
|
||||
|
||||
let iteration = 0;
|
||||
const MAX_ITERATIONS = 20; // 6 dimensions + overhead
|
||||
|
||||
|
||||
// 初始化状态(如果尚未初始化)
|
||||
let state = StateManager.getState(workDir);
|
||||
if (!state) {
|
||||
state = StateManager.initState(workDir);
|
||||
}
|
||||
|
||||
while (iteration < MAX_ITERATIONS) {
|
||||
iteration++;
|
||||
|
||||
// 1. 读取当前状态
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
|
||||
// 1. 读取当前状态(使用 StateManager)
|
||||
state = StateManager.getState(workDir);
|
||||
if (!state) {
|
||||
console.error('[Orchestrator] Failed to read state, attempting recovery...');
|
||||
state = StateManager.restoreState(workDir);
|
||||
if (!state) {
|
||||
console.error('[Orchestrator] Recovery failed, aborting.');
|
||||
break;
|
||||
}
|
||||
}
|
||||
console.log(`[Iteration ${iteration}] Status: ${state.status}`);
|
||||
|
||||
|
||||
// 2. 选择下一个动作
|
||||
const actionId = selectNextAction(state);
|
||||
|
||||
|
||||
if (!actionId) {
|
||||
console.log('Review completed, terminating.');
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
console.log(`[Iteration ${iteration}] Executing: ${actionId}`);
|
||||
|
||||
// 3. 更新状态:当前动作
|
||||
updateState({ current_action: actionId });
|
||||
|
||||
|
||||
// 3. 更新状态:当前动作(使用 StateManager)
|
||||
StateManager.updateState(workDir, { current_action: actionId });
|
||||
|
||||
// 4. 执行动作
|
||||
try {
|
||||
const actionPrompt = Read(`phases/actions/${actionId}.md`);
|
||||
|
||||
// 确定当前需要审查的维度
|
||||
let currentDimension = null;
|
||||
if (actionId === 'action-deep-review') {
|
||||
const dimensions = ['correctness', 'readability', 'performance', 'security', 'testing', 'architecture'];
|
||||
const reviewed = state.reviewed_dimensions || [];
|
||||
currentDimension = dimensions.find(d => !reviewed.includes(d));
|
||||
}
|
||||
|
||||
|
||||
// 确定当前需要审查的维度(使用 StateManager)
|
||||
const currentDimension = StateManager.getNextDimension(state);
|
||||
|
||||
const result = await Task({
|
||||
subagent_type: 'universal-executor',
|
||||
run_in_background: false,
|
||||
@@ -137,30 +156,38 @@ Issue Classification: specs/issue-classification.md
|
||||
Return JSON with stateUpdates field containing updates to apply to state.
|
||||
`
|
||||
});
|
||||
|
||||
|
||||
const actionResult = JSON.parse(result);
|
||||
|
||||
// 5. 更新状态:动作完成
|
||||
updateState({
|
||||
|
||||
// 5. 更新状态:动作完成(使用 StateManager)
|
||||
StateManager.updateState(workDir, {
|
||||
current_action: null,
|
||||
completed_actions: [...(state.completed_actions || []), actionId],
|
||||
...actionResult.stateUpdates
|
||||
});
|
||||
|
||||
|
||||
// 如果是深入审查动作,标记维度完成
|
||||
if (actionId === 'action-deep-review' && currentDimension) {
|
||||
StateManager.markDimensionComplete(workDir, currentDimension);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
// 错误处理
|
||||
updateState({
|
||||
current_action: null,
|
||||
errors: [...(state.errors || []), {
|
||||
action: actionId,
|
||||
message: error.message,
|
||||
timestamp: new Date().toISOString()
|
||||
}],
|
||||
error_count: (state.error_count || 0) + 1
|
||||
});
|
||||
// 错误处理(使用 StateManager.recordError)
|
||||
console.error(`[Orchestrator] Action failed: ${error.message}`);
|
||||
StateManager.recordError(workDir, actionId, error.message);
|
||||
|
||||
// 清除当前动作
|
||||
StateManager.updateState(workDir, { current_action: null });
|
||||
|
||||
// 检查是否需要恢复状态
|
||||
const updatedState = StateManager.getState(workDir);
|
||||
if (updatedState && updatedState.error_count >= 3) {
|
||||
console.error('[Orchestrator] Too many errors, attempting state recovery...');
|
||||
StateManager.restoreState(workDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
console.log('=== Code Review Orchestrator Finished ===');
|
||||
}
|
||||
```
|
||||
@@ -179,14 +206,46 @@ Return JSON with stateUpdates field containing updates to apply to state.
|
||||
|
||||
- `state.status === 'completed'` - 审查正常完成
|
||||
- `state.status === 'user_exit'` - 用户主动退出
|
||||
- `state.error_count >= 3` - 错误次数超限
|
||||
- `state.error_count >= 3` - 错误次数超限(由 StateManager.recordError 自动处理)
|
||||
- `iteration >= MAX_ITERATIONS` - 迭代次数超限
|
||||
|
||||
## Error Recovery
|
||||
|
||||
| Error Type | Recovery Strategy |
|
||||
|------------|-------------------|
|
||||
| 文件读取失败 | 跳过该文件,记录警告 |
|
||||
| 动作执行失败 | 重试最多 3 次 |
|
||||
| 状态不一致 | 重新初始化状态 |
|
||||
| 用户中止 | 保存当前进度,允许恢复 |
|
||||
本模块利用 StateManager 提供的错误恢复机制:
|
||||
|
||||
| Error Type | Recovery Strategy | StateManager Function |
|
||||
|------------|-------------------|----------------------|
|
||||
| 状态读取失败 | 从备份恢复 | `restoreState(workDir)` |
|
||||
| 动作执行失败 | 记录错误,累计超限后自动失败 | `recordError(workDir, action, message)` |
|
||||
| 状态不一致 | 验证并恢复 | `getState()` 内置验证 |
|
||||
| 用户中止 | 保存当前进度 | `updateState(workDir, { status: 'user_exit' })` |
|
||||
|
||||
### 错误处理流程
|
||||
|
||||
```
|
||||
1. 动作执行失败
|
||||
|
|
||||
2. StateManager.recordError() 记录错误
|
||||
|
|
||||
3. 检查 error_count
|
||||
|
|
||||
+-- < 3: 继续下一次迭代
|
||||
+-- >= 3: StateManager 自动设置 status='failed'
|
||||
|
|
||||
Orchestrator 检测到 status 变化
|
||||
|
|
||||
尝试 restoreState() 恢复到上一个稳定状态
|
||||
```
|
||||
|
||||
### 状态备份时机
|
||||
|
||||
StateManager 在以下时机自动创建备份:
|
||||
- 每次 `updateState()` 调用前
|
||||
- 可通过 `backupState(workDir, suffix)` 手动创建命名备份
|
||||
|
||||
### 历史追踪
|
||||
|
||||
所有状态变更记录在 `state-history.json`,便于调试和审计:
|
||||
- 初始化事件
|
||||
- 每次更新的字段变更
|
||||
- 恢复操作记录
|
||||
|
||||
752
.claude/skills/review-code/phases/state-manager.md
Normal file
752
.claude/skills/review-code/phases/state-manager.md
Normal file
@@ -0,0 +1,752 @@
|
||||
# State Manager
|
||||
|
||||
Centralized state management module for Code Review workflow. Provides atomic operations, automatic backups, validation, and rollback capabilities.
|
||||
|
||||
## Overview
|
||||
|
||||
This module solves the fragile state management problem by providing:
|
||||
- **Atomic updates** - Write to temp file, then rename (prevents corruption)
|
||||
- **Automatic backups** - Every update creates a backup first
|
||||
- **Rollback capability** - Restore from backup on failure
|
||||
- **Schema validation** - Ensure state structure integrity
|
||||
- **Change history** - Track all state modifications
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
{workDir}/
|
||||
state.json # Current state
|
||||
state.backup.json # Latest backup
|
||||
state-history.json # Change history log
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### initState(workDir)
|
||||
|
||||
Initialize a new state file with default values.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Initialize state file with default structure
|
||||
* @param {string} workDir - Working directory path
|
||||
* @returns {object} - Initial state object
|
||||
*/
|
||||
function initState(workDir) {
|
||||
const now = new Date().toISOString();
|
||||
|
||||
const initialState = {
|
||||
status: 'pending',
|
||||
started_at: now,
|
||||
updated_at: now,
|
||||
context: null,
|
||||
scan_completed: false,
|
||||
scan_summary: null,
|
||||
reviewed_dimensions: [],
|
||||
current_dimension: null,
|
||||
findings: {
|
||||
correctness: [],
|
||||
readability: [],
|
||||
performance: [],
|
||||
security: [],
|
||||
testing: [],
|
||||
architecture: []
|
||||
},
|
||||
report_generated: false,
|
||||
report_path: null,
|
||||
current_action: null,
|
||||
completed_actions: [],
|
||||
errors: [],
|
||||
error_count: 0,
|
||||
summary: null
|
||||
};
|
||||
|
||||
// Write state file
|
||||
const statePath = `${workDir}/state.json`;
|
||||
Write(statePath, JSON.stringify(initialState, null, 2));
|
||||
|
||||
// Initialize history log
|
||||
const historyPath = `${workDir}/state-history.json`;
|
||||
const historyEntry = {
|
||||
entries: [{
|
||||
timestamp: now,
|
||||
action: 'init',
|
||||
changes: { type: 'initialize', status: 'pending' }
|
||||
}]
|
||||
};
|
||||
Write(historyPath, JSON.stringify(historyEntry, null, 2));
|
||||
|
||||
console.log(`[StateManager] Initialized state at ${statePath}`);
|
||||
return initialState;
|
||||
}
|
||||
```
|
||||
|
||||
### getState(workDir)
|
||||
|
||||
Read and parse current state from file.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Read current state from file
|
||||
* @param {string} workDir - Working directory path
|
||||
* @returns {object|null} - Current state or null if not found
|
||||
*/
|
||||
function getState(workDir) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
|
||||
try {
|
||||
const content = Read(statePath);
|
||||
const state = JSON.parse(content);
|
||||
|
||||
// Validate structure before returning
|
||||
const validation = validateState(state);
|
||||
if (!validation.valid) {
|
||||
console.warn(`[StateManager] State validation warnings: ${validation.warnings.join(', ')}`);
|
||||
}
|
||||
|
||||
return state;
|
||||
} catch (error) {
|
||||
console.error(`[StateManager] Failed to read state: ${error.message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### updateState(workDir, updates)
|
||||
|
||||
Safely update state with atomic write and automatic backup.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Safely update state with atomic write
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {object} updates - Partial state updates to apply
|
||||
* @returns {object} - Updated state object
|
||||
* @throws {Error} - If update fails (automatically rolls back)
|
||||
*/
|
||||
function updateState(workDir, updates) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
const tempPath = `${workDir}/state.tmp.json`;
|
||||
const backupPath = `${workDir}/state.backup.json`;
|
||||
const historyPath = `${workDir}/state-history.json`;
|
||||
|
||||
// Step 1: Read current state
|
||||
let currentState;
|
||||
try {
|
||||
currentState = JSON.parse(Read(statePath));
|
||||
} catch (error) {
|
||||
throw new Error(`Cannot read current state: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 2: Create backup before any modification
|
||||
try {
|
||||
Write(backupPath, JSON.stringify(currentState, null, 2));
|
||||
} catch (error) {
|
||||
throw new Error(`Cannot create backup: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 3: Merge updates
|
||||
const now = new Date().toISOString();
|
||||
const newState = deepMerge(currentState, {
|
||||
...updates,
|
||||
updated_at: now
|
||||
});
|
||||
|
||||
// Step 4: Validate new state
|
||||
const validation = validateState(newState);
|
||||
if (!validation.valid && validation.errors.length > 0) {
|
||||
throw new Error(`Invalid state after update: ${validation.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
// Step 5: Write to temp file first (atomic preparation)
|
||||
try {
|
||||
Write(tempPath, JSON.stringify(newState, null, 2));
|
||||
} catch (error) {
|
||||
throw new Error(`Cannot write temp state: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 6: Atomic rename (replace original with temp)
|
||||
try {
|
||||
// Read temp and write to original (simulating atomic rename)
|
||||
const tempContent = Read(tempPath);
|
||||
Write(statePath, tempContent);
|
||||
|
||||
// Clean up temp file
|
||||
Bash(`rm -f "${tempPath}"`);
|
||||
} catch (error) {
|
||||
// Rollback: restore from backup
|
||||
console.error(`[StateManager] Update failed, rolling back: ${error.message}`);
|
||||
try {
|
||||
const backup = Read(backupPath);
|
||||
Write(statePath, backup);
|
||||
} catch (rollbackError) {
|
||||
throw new Error(`Critical: Update failed and rollback failed: ${rollbackError.message}`);
|
||||
}
|
||||
throw new Error(`Update failed, rolled back: ${error.message}`);
|
||||
}
|
||||
|
||||
// Step 7: Record in history
|
||||
try {
|
||||
let history = { entries: [] };
|
||||
try {
|
||||
history = JSON.parse(Read(historyPath));
|
||||
} catch (e) {
|
||||
// History file may not exist, start fresh
|
||||
}
|
||||
|
||||
history.entries.push({
|
||||
timestamp: now,
|
||||
action: 'update',
|
||||
changes: summarizeChanges(currentState, newState, updates)
|
||||
});
|
||||
|
||||
// Keep only last 100 entries
|
||||
if (history.entries.length > 100) {
|
||||
history.entries = history.entries.slice(-100);
|
||||
}
|
||||
|
||||
Write(historyPath, JSON.stringify(history, null, 2));
|
||||
} catch (error) {
|
||||
// History logging failure is non-critical
|
||||
console.warn(`[StateManager] Failed to log history: ${error.message}`);
|
||||
}
|
||||
|
||||
console.log(`[StateManager] State updated successfully`);
|
||||
return newState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deep merge helper - merges nested objects
|
||||
*/
|
||||
function deepMerge(target, source) {
|
||||
const result = { ...target };
|
||||
|
||||
for (const key of Object.keys(source)) {
|
||||
if (source[key] === null || source[key] === undefined) {
|
||||
result[key] = source[key];
|
||||
} else if (Array.isArray(source[key])) {
|
||||
result[key] = source[key];
|
||||
} else if (typeof source[key] === 'object' && typeof target[key] === 'object') {
|
||||
result[key] = deepMerge(target[key], source[key]);
|
||||
} else {
|
||||
result[key] = source[key];
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Summarize changes for history logging
|
||||
*/
|
||||
function summarizeChanges(oldState, newState, updates) {
|
||||
const changes = {};
|
||||
|
||||
for (const key of Object.keys(updates)) {
|
||||
if (key === 'updated_at') continue;
|
||||
|
||||
const oldVal = oldState[key];
|
||||
const newVal = newState[key];
|
||||
|
||||
if (JSON.stringify(oldVal) !== JSON.stringify(newVal)) {
|
||||
changes[key] = {
|
||||
from: typeof oldVal === 'object' ? '[object]' : oldVal,
|
||||
to: typeof newVal === 'object' ? '[object]' : newVal
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return changes;
|
||||
}
|
||||
```
|
||||
|
||||
### validateState(state)
|
||||
|
||||
Validate state structure against schema.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Validate state structure
|
||||
* @param {object} state - State object to validate
|
||||
* @returns {object} - { valid: boolean, errors: string[], warnings: string[] }
|
||||
*/
|
||||
function validateState(state) {
|
||||
const errors = [];
|
||||
const warnings = [];
|
||||
|
||||
// Required fields
|
||||
const requiredFields = ['status', 'started_at', 'updated_at'];
|
||||
for (const field of requiredFields) {
|
||||
if (state[field] === undefined) {
|
||||
errors.push(`Missing required field: ${field}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Status validation
|
||||
const validStatuses = ['pending', 'running', 'completed', 'failed', 'user_exit'];
|
||||
if (state.status && !validStatuses.includes(state.status)) {
|
||||
errors.push(`Invalid status: ${state.status}. Must be one of: ${validStatuses.join(', ')}`);
|
||||
}
|
||||
|
||||
// Timestamp format validation
|
||||
const timestampFields = ['started_at', 'updated_at', 'completed_at'];
|
||||
for (const field of timestampFields) {
|
||||
if (state[field] && !isValidISOTimestamp(state[field])) {
|
||||
warnings.push(`Invalid timestamp format for ${field}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Findings structure validation
|
||||
if (state.findings) {
|
||||
const expectedDimensions = ['correctness', 'readability', 'performance', 'security', 'testing', 'architecture'];
|
||||
for (const dim of expectedDimensions) {
|
||||
if (!Array.isArray(state.findings[dim])) {
|
||||
warnings.push(`findings.${dim} should be an array`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Context validation (when present)
|
||||
if (state.context !== null && state.context !== undefined) {
|
||||
const contextFields = ['target_path', 'files', 'language', 'total_lines', 'file_count'];
|
||||
for (const field of contextFields) {
|
||||
if (state.context[field] === undefined) {
|
||||
warnings.push(`context.${field} is missing`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error count validation
|
||||
if (typeof state.error_count !== 'number') {
|
||||
warnings.push('error_count should be a number');
|
||||
}
|
||||
|
||||
// Array fields validation
|
||||
const arrayFields = ['reviewed_dimensions', 'completed_actions', 'errors'];
|
||||
for (const field of arrayFields) {
|
||||
if (state[field] !== undefined && !Array.isArray(state[field])) {
|
||||
errors.push(`${field} must be an array`);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors,
|
||||
warnings
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if string is valid ISO timestamp
|
||||
*/
|
||||
function isValidISOTimestamp(str) {
|
||||
if (typeof str !== 'string') return false;
|
||||
const date = new Date(str);
|
||||
return !isNaN(date.getTime()) && str.includes('T');
|
||||
}
|
||||
```
|
||||
|
||||
### backupState(workDir)
|
||||
|
||||
Create a manual backup of current state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Create a manual backup of current state
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} [suffix] - Optional suffix for backup file name
|
||||
* @returns {string} - Backup file path
|
||||
*/
|
||||
function backupState(workDir, suffix = null) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
const backupName = suffix
|
||||
? `state.backup.${suffix}.json`
|
||||
: `state.backup.${timestamp}.json`;
|
||||
const backupPath = `${workDir}/${backupName}`;
|
||||
|
||||
try {
|
||||
const content = Read(statePath);
|
||||
Write(backupPath, content);
|
||||
console.log(`[StateManager] Backup created: ${backupPath}`);
|
||||
return backupPath;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to create backup: ${error.message}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### restoreState(workDir, backupPath)
|
||||
|
||||
Restore state from a backup file.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Restore state from a backup file
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} [backupPath] - Path to backup file (default: latest backup)
|
||||
* @returns {object} - Restored state object
|
||||
*/
|
||||
function restoreState(workDir, backupPath = null) {
|
||||
const statePath = `${workDir}/state.json`;
|
||||
const defaultBackup = `${workDir}/state.backup.json`;
|
||||
const historyPath = `${workDir}/state-history.json`;
|
||||
|
||||
const sourcePath = backupPath || defaultBackup;
|
||||
|
||||
try {
|
||||
// Read backup
|
||||
const backupContent = Read(sourcePath);
|
||||
const backupState = JSON.parse(backupContent);
|
||||
|
||||
// Validate backup state
|
||||
const validation = validateState(backupState);
|
||||
if (!validation.valid) {
|
||||
throw new Error(`Backup state is invalid: ${validation.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
// Create backup of current state before restore (for safety)
|
||||
try {
|
||||
const currentContent = Read(statePath);
|
||||
Write(`${workDir}/state.pre-restore.json`, currentContent);
|
||||
} catch (e) {
|
||||
// Current state may not exist, that's okay
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
const now = new Date().toISOString();
|
||||
backupState.updated_at = now;
|
||||
|
||||
// Write restored state
|
||||
Write(statePath, JSON.stringify(backupState, null, 2));
|
||||
|
||||
// Log to history
|
||||
try {
|
||||
let history = { entries: [] };
|
||||
try {
|
||||
history = JSON.parse(Read(historyPath));
|
||||
} catch (e) {}
|
||||
|
||||
history.entries.push({
|
||||
timestamp: now,
|
||||
action: 'restore',
|
||||
changes: { source: sourcePath }
|
||||
});
|
||||
|
||||
Write(historyPath, JSON.stringify(history, null, 2));
|
||||
} catch (e) {
|
||||
console.warn(`[StateManager] Failed to log restore to history`);
|
||||
}
|
||||
|
||||
console.log(`[StateManager] State restored from ${sourcePath}`);
|
||||
return backupState;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to restore state: ${error.message}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Convenience Functions
|
||||
|
||||
### getNextDimension(state)
|
||||
|
||||
Get the next dimension to review based on current state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Get next dimension to review
|
||||
* @param {object} state - Current state
|
||||
* @returns {string|null} - Next dimension or null if all reviewed
|
||||
*/
|
||||
function getNextDimension(state) {
|
||||
const dimensions = ['correctness', 'security', 'performance', 'readability', 'testing', 'architecture'];
|
||||
const reviewed = state.reviewed_dimensions || [];
|
||||
|
||||
for (const dim of dimensions) {
|
||||
if (!reviewed.includes(dim)) {
|
||||
return dim;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
```
|
||||
|
||||
### addFinding(workDir, finding)
|
||||
|
||||
Add a new finding to the state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Add a finding to the appropriate dimension
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {object} finding - Finding object (must include dimension field)
|
||||
* @returns {object} - Updated state
|
||||
*/
|
||||
function addFinding(workDir, finding) {
|
||||
if (!finding.dimension) {
|
||||
throw new Error('Finding must have a dimension field');
|
||||
}
|
||||
|
||||
const state = getState(workDir);
|
||||
const dimension = finding.dimension;
|
||||
|
||||
// Generate ID if not provided
|
||||
if (!finding.id) {
|
||||
const prefixes = {
|
||||
correctness: 'CORR',
|
||||
readability: 'READ',
|
||||
performance: 'PERF',
|
||||
security: 'SEC',
|
||||
testing: 'TEST',
|
||||
architecture: 'ARCH'
|
||||
};
|
||||
const prefix = prefixes[dimension] || 'MISC';
|
||||
const count = (state.findings[dimension]?.length || 0) + 1;
|
||||
finding.id = `${prefix}-${String(count).padStart(3, '0')}`;
|
||||
}
|
||||
|
||||
const currentFindings = state.findings[dimension] || [];
|
||||
|
||||
return updateState(workDir, {
|
||||
findings: {
|
||||
...state.findings,
|
||||
[dimension]: [...currentFindings, finding]
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### markDimensionComplete(workDir, dimension)
|
||||
|
||||
Mark a dimension as reviewed.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Mark a dimension as reviewed
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} dimension - Dimension name
|
||||
* @returns {object} - Updated state
|
||||
*/
|
||||
function markDimensionComplete(workDir, dimension) {
|
||||
const state = getState(workDir);
|
||||
const reviewed = state.reviewed_dimensions || [];
|
||||
|
||||
if (reviewed.includes(dimension)) {
|
||||
console.warn(`[StateManager] Dimension ${dimension} already marked as reviewed`);
|
||||
return state;
|
||||
}
|
||||
|
||||
return updateState(workDir, {
|
||||
reviewed_dimensions: [...reviewed, dimension],
|
||||
current_dimension: null
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### recordError(workDir, action, message)
|
||||
|
||||
Record an error in state.
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Record an execution error
|
||||
* @param {string} workDir - Working directory path
|
||||
* @param {string} action - Action that failed
|
||||
* @param {string} message - Error message
|
||||
* @returns {object} - Updated state
|
||||
*/
|
||||
function recordError(workDir, action, message) {
|
||||
const state = getState(workDir);
|
||||
const errors = state.errors || [];
|
||||
const errorCount = (state.error_count || 0) + 1;
|
||||
|
||||
const newError = {
|
||||
action,
|
||||
message,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
|
||||
const newState = updateState(workDir, {
|
||||
errors: [...errors, newError],
|
||||
error_count: errorCount
|
||||
});
|
||||
|
||||
// Auto-fail if error count exceeds threshold
|
||||
if (errorCount >= 3) {
|
||||
return updateState(workDir, {
|
||||
status: 'failed'
|
||||
});
|
||||
}
|
||||
|
||||
return newState;
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Initialize and Run Review
|
||||
|
||||
```javascript
|
||||
// Initialize new review session
|
||||
const workDir = '/path/to/review-session';
|
||||
const state = initState(workDir);
|
||||
|
||||
// Update status to running
|
||||
updateState(workDir, { status: 'running' });
|
||||
|
||||
// After collecting context
|
||||
updateState(workDir, {
|
||||
context: {
|
||||
target_path: '/src/auth',
|
||||
files: ['auth.ts', 'login.ts'],
|
||||
language: 'typescript',
|
||||
total_lines: 500,
|
||||
file_count: 2
|
||||
}
|
||||
});
|
||||
|
||||
// After completing quick scan
|
||||
updateState(workDir, {
|
||||
scan_completed: true,
|
||||
scan_summary: {
|
||||
risk_areas: [{ file: 'auth.ts', reason: 'Complex logic', priority: 'high' }],
|
||||
complexity_score: 7.5,
|
||||
quick_issues: []
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Add Findings During Review
|
||||
|
||||
```javascript
|
||||
// Add a security finding
|
||||
addFinding(workDir, {
|
||||
dimension: 'security',
|
||||
severity: 'high',
|
||||
category: 'injection',
|
||||
file: 'auth.ts',
|
||||
line: 45,
|
||||
description: 'SQL injection vulnerability',
|
||||
recommendation: 'Use parameterized queries'
|
||||
});
|
||||
|
||||
// Mark dimension complete
|
||||
markDimensionComplete(workDir, 'security');
|
||||
```
|
||||
|
||||
### Error Handling with Rollback
|
||||
|
||||
```javascript
|
||||
try {
|
||||
updateState(workDir, {
|
||||
status: 'running',
|
||||
current_action: 'deep-review'
|
||||
});
|
||||
|
||||
// ... do review work ...
|
||||
|
||||
} catch (error) {
|
||||
// Record error
|
||||
recordError(workDir, 'deep-review', error.message);
|
||||
|
||||
// If needed, restore from backup
|
||||
restoreState(workDir);
|
||||
}
|
||||
```
|
||||
|
||||
### Check Review Progress
|
||||
|
||||
```javascript
|
||||
const state = getState(workDir);
|
||||
const nextDim = getNextDimension(state);
|
||||
|
||||
if (nextDim) {
|
||||
console.log(`Next dimension to review: ${nextDim}`);
|
||||
updateState(workDir, { current_dimension: nextDim });
|
||||
} else {
|
||||
console.log('All dimensions reviewed');
|
||||
}
|
||||
```
|
||||
|
||||
## Integration with Orchestrator
|
||||
|
||||
Update the orchestrator to use StateManager:
|
||||
|
||||
```javascript
|
||||
// In orchestrator.md - Replace direct state operations with StateManager calls
|
||||
|
||||
// OLD:
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
|
||||
// NEW:
|
||||
const state = getState(workDir);
|
||||
|
||||
// OLD:
|
||||
function updateState(updates) {
|
||||
const state = JSON.parse(Read(`${workDir}/state.json`));
|
||||
const newState = { ...state, ...updates, updated_at: new Date().toISOString() };
|
||||
Write(`${workDir}/state.json`, JSON.stringify(newState, null, 2));
|
||||
return newState;
|
||||
}
|
||||
|
||||
// NEW:
|
||||
// Import from state-manager.md
|
||||
// updateState(workDir, updates) - handles atomic write, backup, validation
|
||||
|
||||
// Error handling - OLD:
|
||||
updateState({
|
||||
errors: [...(state.errors || []), { action: actionId, message: error.message, timestamp: new Date().toISOString() }],
|
||||
error_count: (state.error_count || 0) + 1
|
||||
});
|
||||
|
||||
// Error handling - NEW:
|
||||
recordError(workDir, actionId, error.message);
|
||||
```
|
||||
|
||||
## State History Format
|
||||
|
||||
The `state-history.json` file tracks all state changes:
|
||||
|
||||
```json
|
||||
{
|
||||
"entries": [
|
||||
{
|
||||
"timestamp": "2024-01-01T10:00:00.000Z",
|
||||
"action": "init",
|
||||
"changes": { "type": "initialize", "status": "pending" }
|
||||
},
|
||||
{
|
||||
"timestamp": "2024-01-01T10:01:00.000Z",
|
||||
"action": "update",
|
||||
"changes": {
|
||||
"status": { "from": "pending", "to": "running" },
|
||||
"current_action": { "from": null, "to": "action-collect-context" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": "2024-01-01T10:05:00.000Z",
|
||||
"action": "restore",
|
||||
"changes": { "source": "/path/state.backup.json" }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Error Recovery Strategies
|
||||
|
||||
| Scenario | Strategy | Function |
|
||||
|----------|----------|----------|
|
||||
| State file corrupted | Restore from backup | `restoreState(workDir)` |
|
||||
| Invalid state after update | Auto-rollback (built-in) | N/A (automatic) |
|
||||
| Multiple errors | Auto-fail after 3 | `recordError()` |
|
||||
| Need to retry from checkpoint | Restore specific backup | `restoreState(workDir, backupPath)` |
|
||||
| Review interrupted | Resume from saved state | `getState(workDir)` |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always use `updateState()`** - Never write directly to state.json
|
||||
2. **Check validation warnings** - Warnings may indicate data issues
|
||||
3. **Use convenience functions** - `addFinding()`, `markDimensionComplete()`, etc.
|
||||
4. **Monitor history** - Check state-history.json for debugging
|
||||
5. **Create named backups** - Before major operations: `backupState(workDir, 'pre-deep-review')`
|
||||
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"dimension": "architecture",
|
||||
"prefix": "ARCH",
|
||||
"description": "Rules for detecting architecture issues including coupling, layering, and design patterns",
|
||||
"rules": [
|
||||
{
|
||||
"id": "circular-dependency",
|
||||
"category": "dependency",
|
||||
"severity": "high",
|
||||
"pattern": "import\\s+.*from\\s+['\"]\\.\\..*['\"]",
|
||||
"patternType": "regex",
|
||||
"contextPattern": "export.*import.*from.*same-module",
|
||||
"description": "Potential circular dependency detected. Circular imports cause initialization issues and tight coupling",
|
||||
"recommendation": "Extract shared code to a separate module, use dependency injection, or restructure the dependency graph",
|
||||
"fixExample": "// Before - A imports B, B imports A\n// moduleA.ts\nimport { funcB } from './moduleB';\nexport const funcA = () => funcB();\n\n// moduleB.ts\nimport { funcA } from './moduleA'; // circular!\n\n// After - extract shared logic\n// shared.ts\nexport const sharedLogic = () => { ... };\n\n// moduleA.ts\nimport { sharedLogic } from './shared';"
|
||||
},
|
||||
{
|
||||
"id": "god-class",
|
||||
"category": "single-responsibility",
|
||||
"severity": "high",
|
||||
"pattern": "class\\s+\\w+\\s*\\{",
|
||||
"patternType": "regex",
|
||||
"methodThreshold": 15,
|
||||
"lineThreshold": 300,
|
||||
"description": "Class with too many methods or lines violates single responsibility principle",
|
||||
"recommendation": "Split into smaller, focused classes. Each class should have one reason to change",
|
||||
"fixExample": "// Before - UserManager handles everything\nclass UserManager {\n createUser() { ... }\n updateUser() { ... }\n sendEmail() { ... }\n generateReport() { ... }\n validatePassword() { ... }\n}\n\n// After - separated concerns\nclass UserRepository { create, update, delete }\nclass EmailService { sendEmail }\nclass ReportGenerator { generate }\nclass PasswordValidator { validate }"
|
||||
},
|
||||
{
|
||||
"id": "layer-violation",
|
||||
"category": "layering",
|
||||
"severity": "high",
|
||||
"pattern": "import.*(?:repository|database|sql|prisma|mongoose).*from",
|
||||
"patternType": "regex",
|
||||
"contextPath": ["controller", "handler", "route", "component"],
|
||||
"description": "Direct database access from presentation layer violates layered architecture",
|
||||
"recommendation": "Access data through service/use-case layer. Keep controllers thin and delegate to services",
|
||||
"fixExample": "// Before - controller accesses DB directly\nimport { prisma } from './database';\nconst getUsers = async () => prisma.user.findMany();\n\n// After - use service layer\nimport { userService } from './services';\nconst getUsers = async () => userService.getAll();"
|
||||
},
|
||||
{
|
||||
"id": "missing-interface",
|
||||
"category": "abstraction",
|
||||
"severity": "medium",
|
||||
"pattern": "new\\s+\\w+Service\\(|new\\s+\\w+Repository\\(",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["interface", "implements", "inject"],
|
||||
"description": "Direct instantiation of services/repositories creates tight coupling",
|
||||
"recommendation": "Define interfaces and use dependency injection for loose coupling and testability",
|
||||
"fixExample": "// Before - tight coupling\nclass OrderService {\n private repo = new OrderRepository();\n}\n\n// After - loose coupling\ninterface IOrderRepository {\n findById(id: string): Promise<Order>;\n}\n\nclass OrderService {\n constructor(private repo: IOrderRepository) {}\n}"
|
||||
},
|
||||
{
|
||||
"id": "mixed-concerns",
|
||||
"category": "separation-of-concerns",
|
||||
"severity": "medium",
|
||||
"pattern": "fetch\\s*\\(|axios\\.|http\\.",
|
||||
"patternType": "regex",
|
||||
"contextPath": ["component", "view", "page"],
|
||||
"description": "Network calls in UI components mix data fetching with presentation",
|
||||
"recommendation": "Extract data fetching to hooks, services, or state management layer",
|
||||
"fixExample": "// Before - fetch in component\nfunction UserList() {\n const [users, setUsers] = useState([]);\n useEffect(() => {\n fetch('/api/users').then(r => r.json()).then(setUsers);\n }, []);\n}\n\n// After - custom hook\nfunction useUsers() {\n return useQuery('users', () => userService.getAll());\n}\n\nfunction UserList() {\n const { data: users } = useUsers();\n}"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"dimension": "correctness",
|
||||
"prefix": "CORR",
|
||||
"description": "Rules for detecting logical errors, null handling, and error handling issues",
|
||||
"rules": [
|
||||
{
|
||||
"id": "null-check",
|
||||
"category": "null-check",
|
||||
"severity": "high",
|
||||
"pattern": "\\w+\\.\\w+(?!\\.?\\?)",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["\\?\\.", "if\\s*\\(", "!==?\\s*null", "!==?\\s*undefined", "&&\\s*\\w+\\."],
|
||||
"description": "Property access without null/undefined check may cause runtime errors",
|
||||
"recommendation": "Add null/undefined check before accessing properties using optional chaining or conditional checks",
|
||||
"fixExample": "// Before\nobj.property.value\n\n// After\nobj?.property?.value\n// or\nif (obj && obj.property) { obj.property.value }"
|
||||
},
|
||||
{
|
||||
"id": "empty-catch",
|
||||
"category": "empty-catch",
|
||||
"severity": "high",
|
||||
"pattern": "catch\\s*\\([^)]*\\)\\s*\\{\\s*\\}",
|
||||
"patternType": "regex",
|
||||
"description": "Empty catch block silently swallows errors, hiding bugs and making debugging difficult",
|
||||
"recommendation": "Log the error, rethrow it, or handle it appropriately. Never silently ignore exceptions",
|
||||
"fixExample": "// Before\ncatch (e) { }\n\n// After\ncatch (e) {\n console.error('Operation failed:', e);\n throw e; // or handle appropriately\n}"
|
||||
},
|
||||
{
|
||||
"id": "unreachable-code",
|
||||
"category": "unreachable-code",
|
||||
"severity": "medium",
|
||||
"pattern": "return\\s+[^;]+;\\s*\\n\\s*[^}\\s]",
|
||||
"patternType": "regex",
|
||||
"description": "Code after return statement is unreachable and will never execute",
|
||||
"recommendation": "Remove unreachable code or restructure the logic to ensure all code paths are accessible",
|
||||
"fixExample": "// Before\nfunction example() {\n return value;\n doSomething(); // unreachable\n}\n\n// After\nfunction example() {\n doSomething();\n return value;\n}"
|
||||
},
|
||||
{
|
||||
"id": "array-index-unchecked",
|
||||
"category": "boundary-check",
|
||||
"severity": "high",
|
||||
"pattern": "\\[\\d+\\]|\\[\\w+\\](?!\\s*[!=<>])",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["\\.length", "Array\\.isArray", "\\?.\\["],
|
||||
"description": "Array index access without boundary check may cause undefined access or out-of-bounds errors",
|
||||
"recommendation": "Check array length or use optional chaining before accessing array elements",
|
||||
"fixExample": "// Before\nconst item = arr[index];\n\n// After\nconst item = arr?.[index];\n// or\nconst item = index < arr.length ? arr[index] : defaultValue;"
|
||||
},
|
||||
{
|
||||
"id": "comparison-type-coercion",
|
||||
"category": "type-safety",
|
||||
"severity": "medium",
|
||||
"pattern": "[^!=]==[^=]|[^!]==[^=]",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["===", "!=="],
|
||||
"description": "Using == instead of === can lead to unexpected type coercion",
|
||||
"recommendation": "Use strict equality (===) to avoid implicit type conversion",
|
||||
"fixExample": "// Before\nif (value == null)\nif (a == b)\n\n// After\nif (value === null || value === undefined)\nif (a === b)"
|
||||
}
|
||||
]
|
||||
}
|
||||
140
.claude/skills/review-code/specs/rules/index.md
Normal file
140
.claude/skills/review-code/specs/rules/index.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Code Review Rules Index
|
||||
|
||||
This directory contains externalized review rules for the multi-dimensional code review skill.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
rules/
|
||||
├── index.md # This file
|
||||
├── correctness-rules.json # CORR - Logic and error handling
|
||||
├── security-rules.json # SEC - Security vulnerabilities
|
||||
├── performance-rules.json # PERF - Performance issues
|
||||
├── readability-rules.json # READ - Code clarity
|
||||
├── testing-rules.json # TEST - Test quality
|
||||
└── architecture-rules.json # ARCH - Design patterns
|
||||
```
|
||||
|
||||
## Rule File Schema
|
||||
|
||||
Each rule file follows this JSON schema:
|
||||
|
||||
```json
|
||||
{
|
||||
"dimension": "string", // Dimension identifier
|
||||
"prefix": "string", // Finding ID prefix (4 chars)
|
||||
"description": "string", // Dimension description
|
||||
"rules": [
|
||||
{
|
||||
"id": "string", // Unique rule identifier
|
||||
"category": "string", // Rule category within dimension
|
||||
"severity": "critical|high|medium|low",
|
||||
"pattern": "string", // Detection pattern
|
||||
"patternType": "regex|includes|ast",
|
||||
"negativePatterns": [], // Patterns that exclude matches
|
||||
"caseInsensitive": false, // For regex patterns
|
||||
"contextPattern": "", // Additional context requirement
|
||||
"contextPath": [], // Path patterns for context
|
||||
"lineThreshold": 0, // For size-based rules
|
||||
"methodThreshold": 0, // For complexity rules
|
||||
"description": "string", // Issue description
|
||||
"recommendation": "string", // How to fix
|
||||
"fixExample": "string" // Code example
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Dimension Summary
|
||||
|
||||
| Dimension | Prefix | Rules | Focus Areas |
|
||||
|-----------|--------|-------|-------------|
|
||||
| Correctness | CORR | 5 | Null checks, error handling, type safety |
|
||||
| Security | SEC | 5 | XSS, injection, secrets, crypto |
|
||||
| Performance | PERF | 5 | Complexity, I/O, memory leaks |
|
||||
| Readability | READ | 5 | Naming, length, nesting, magic values |
|
||||
| Testing | TEST | 5 | Assertions, coverage, mock quality |
|
||||
| Architecture | ARCH | 5 | Dependencies, layering, coupling |
|
||||
|
||||
## Severity Levels
|
||||
|
||||
| Severity | Description | Action |
|
||||
|----------|-------------|--------|
|
||||
| **critical** | Security vulnerability or data loss risk | Must fix before release |
|
||||
| **high** | Bug or significant quality issue | Fix in current sprint |
|
||||
| **medium** | Code smell or maintainability concern | Plan to address |
|
||||
| **low** | Style or minor improvement | Address when convenient |
|
||||
|
||||
## Pattern Types
|
||||
|
||||
### regex
|
||||
Standard regular expression pattern. Supports flags via `caseInsensitive`.
|
||||
|
||||
```json
|
||||
{
|
||||
"pattern": "catch\\s*\\([^)]*\\)\\s*\\{\\s*\\}",
|
||||
"patternType": "regex"
|
||||
}
|
||||
```
|
||||
|
||||
### includes
|
||||
Simple substring match. Faster than regex for literal strings.
|
||||
|
||||
```json
|
||||
{
|
||||
"pattern": "innerHTML",
|
||||
"patternType": "includes"
|
||||
}
|
||||
```
|
||||
|
||||
### ast (Future)
|
||||
AST-based detection for complex structural patterns.
|
||||
|
||||
```json
|
||||
{
|
||||
"pattern": "function[params>5]",
|
||||
"patternType": "ast"
|
||||
}
|
||||
```
|
||||
|
||||
## Usage in Code
|
||||
|
||||
```javascript
|
||||
// Load rules
|
||||
const rules = JSON.parse(fs.readFileSync('correctness-rules.json'));
|
||||
|
||||
// Apply rules
|
||||
for (const rule of rules.rules) {
|
||||
const matches = detectByPattern(content, rule.pattern, rule.patternType);
|
||||
for (const match of matches) {
|
||||
// Check negative patterns
|
||||
if (rule.negativePatterns?.some(np => match.context.includes(np))) {
|
||||
continue;
|
||||
}
|
||||
findings.push({
|
||||
id: `${rules.prefix}-${counter++}`,
|
||||
severity: rule.severity,
|
||||
category: rule.category,
|
||||
description: rule.description,
|
||||
recommendation: rule.recommendation,
|
||||
fixExample: rule.fixExample
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Adding New Rules
|
||||
|
||||
1. Identify the appropriate dimension
|
||||
2. Create rule with unique `id` within dimension
|
||||
3. Choose appropriate `patternType`
|
||||
4. Provide clear `description` and `recommendation`
|
||||
5. Include practical `fixExample`
|
||||
6. Test against sample code
|
||||
|
||||
## Rule Maintenance
|
||||
|
||||
- Review rules quarterly for relevance
|
||||
- Update patterns as language/framework evolves
|
||||
- Track false positive rates
|
||||
- Collect feedback from users
|
||||
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"dimension": "performance",
|
||||
"prefix": "PERF",
|
||||
"description": "Rules for detecting performance issues including inefficient algorithms, memory leaks, and resource waste",
|
||||
"rules": [
|
||||
{
|
||||
"id": "nested-loops",
|
||||
"category": "algorithm-complexity",
|
||||
"severity": "medium",
|
||||
"pattern": "for\\s*\\([^)]+\\)\\s*\\{[^}]*for\\s*\\([^)]+\\)|forEach\\s*\\([^)]+\\)\\s*\\{[^}]*forEach",
|
||||
"patternType": "regex",
|
||||
"description": "Nested loops may indicate O(n^2) or worse complexity. Consider if this can be optimized",
|
||||
"recommendation": "Use Map/Set for O(1) lookups, break early when possible, or restructure the algorithm",
|
||||
"fixExample": "// Before - O(n^2)\nfor (const a of listA) {\n for (const b of listB) {\n if (a.id === b.id) { ... }\n }\n}\n\n// After - O(n)\nconst bMap = new Map(listB.map(b => [b.id, b]));\nfor (const a of listA) {\n const b = bMap.get(a.id);\n if (b) { ... }\n}"
|
||||
},
|
||||
{
|
||||
"id": "array-in-loop",
|
||||
"category": "inefficient-operation",
|
||||
"severity": "high",
|
||||
"pattern": "\\.includes\\s*\\(|indexOf\\s*\\(|find\\s*\\(",
|
||||
"patternType": "includes",
|
||||
"contextPattern": "for|while|forEach|map|filter|reduce",
|
||||
"description": "Array search methods inside loops cause O(n*m) complexity. Consider using Set or Map",
|
||||
"recommendation": "Convert array to Set before the loop for O(1) lookups",
|
||||
"fixExample": "// Before - O(n*m)\nfor (const item of items) {\n if (existingIds.includes(item.id)) { ... }\n}\n\n// After - O(n)\nconst idSet = new Set(existingIds);\nfor (const item of items) {\n if (idSet.has(item.id)) { ... }\n}"
|
||||
},
|
||||
{
|
||||
"id": "synchronous-io",
|
||||
"category": "io-efficiency",
|
||||
"severity": "high",
|
||||
"pattern": "readFileSync|writeFileSync|execSync|spawnSync",
|
||||
"patternType": "includes",
|
||||
"description": "Synchronous I/O blocks the event loop and degrades application responsiveness",
|
||||
"recommendation": "Use async versions (readFile, writeFile) or Promise-based APIs",
|
||||
"fixExample": "// Before\nconst data = fs.readFileSync(path);\n\n// After\nconst data = await fs.promises.readFile(path);\n// or\nfs.readFile(path, (err, data) => { ... });"
|
||||
},
|
||||
{
|
||||
"id": "memory-leak-closure",
|
||||
"category": "memory-leak",
|
||||
"severity": "high",
|
||||
"pattern": "addEventListener\\s*\\(|setInterval\\s*\\(|setTimeout\\s*\\(",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["removeEventListener", "clearInterval", "clearTimeout"],
|
||||
"description": "Event listeners and timers without cleanup can cause memory leaks",
|
||||
"recommendation": "Always remove event listeners and clear timers in cleanup functions (componentWillUnmount, useEffect cleanup)",
|
||||
"fixExample": "// Before\nuseEffect(() => {\n window.addEventListener('resize', handler);\n}, []);\n\n// After\nuseEffect(() => {\n window.addEventListener('resize', handler);\n return () => window.removeEventListener('resize', handler);\n}, []);"
|
||||
},
|
||||
{
|
||||
"id": "unnecessary-rerender",
|
||||
"category": "react-performance",
|
||||
"severity": "medium",
|
||||
"pattern": "useState\\s*\\(\\s*\\{|useState\\s*\\(\\s*\\[",
|
||||
"patternType": "regex",
|
||||
"description": "Creating new object/array references in useState can cause unnecessary re-renders",
|
||||
"recommendation": "Use useMemo for computed values, useCallback for functions, or consider state management libraries",
|
||||
"fixExample": "// Before - new object every render\nconst [config] = useState({ theme: 'dark' });\n\n// After - stable reference\nconst defaultConfig = useMemo(() => ({ theme: 'dark' }), []);\nconst [config] = useState(defaultConfig);"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"dimension": "readability",
|
||||
"prefix": "READ",
|
||||
"description": "Rules for detecting code readability issues including naming, complexity, and documentation",
|
||||
"rules": [
|
||||
{
|
||||
"id": "long-function",
|
||||
"category": "function-length",
|
||||
"severity": "medium",
|
||||
"pattern": "function\\s+\\w+\\s*\\([^)]*\\)\\s*\\{|=>\\s*\\{",
|
||||
"patternType": "regex",
|
||||
"lineThreshold": 50,
|
||||
"description": "Functions longer than 50 lines are difficult to understand and maintain",
|
||||
"recommendation": "Break down into smaller, focused functions. Each function should do one thing well",
|
||||
"fixExample": "// Before - 100 line function\nfunction processData(data) {\n // validation\n // transformation\n // calculation\n // formatting\n // output\n}\n\n// After - composed functions\nfunction processData(data) {\n const validated = validateData(data);\n const transformed = transformData(validated);\n return formatOutput(calculateResults(transformed));\n}"
|
||||
},
|
||||
{
|
||||
"id": "single-letter-variable",
|
||||
"category": "naming",
|
||||
"severity": "low",
|
||||
"pattern": "(?:const|let|var)\\s+[a-z]\\s*=",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["for\\s*\\(", "\\[\\w,\\s*\\w\\]", "catch\\s*\\(e\\)"],
|
||||
"description": "Single letter variable names reduce code readability except in specific contexts (loop counters, catch)",
|
||||
"recommendation": "Use descriptive names that convey the variable's purpose",
|
||||
"fixExample": "// Before\nconst d = getData();\nconst r = d.map(x => x.value);\n\n// After\nconst userData = getData();\nconst userValues = userData.map(user => user.value);"
|
||||
},
|
||||
{
|
||||
"id": "deep-nesting",
|
||||
"category": "complexity",
|
||||
"severity": "high",
|
||||
"pattern": "\\{[^}]*\\{[^}]*\\{[^}]*\\{",
|
||||
"patternType": "regex",
|
||||
"description": "Deeply nested code (4+ levels) is hard to follow and maintain",
|
||||
"recommendation": "Use early returns, extract functions, or flatten conditionals",
|
||||
"fixExample": "// Before\nif (user) {\n if (user.permissions) {\n if (user.permissions.canEdit) {\n if (document.isEditable) {\n // do work\n }\n }\n }\n}\n\n// After\nif (!user?.permissions?.canEdit) return;\nif (!document.isEditable) return;\n// do work"
|
||||
},
|
||||
{
|
||||
"id": "magic-number",
|
||||
"category": "magic-value",
|
||||
"severity": "low",
|
||||
"pattern": "[^\\d]\\d{2,}[^\\d]|setTimeout\\s*\\([^,]+,\\s*\\d{4,}\\)",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["const", "let", "enum", "0x", "100", "1000"],
|
||||
"description": "Magic numbers without explanation make code hard to understand",
|
||||
"recommendation": "Extract magic numbers into named constants with descriptive names",
|
||||
"fixExample": "// Before\nif (status === 403) { ... }\nsetTimeout(callback, 86400000);\n\n// After\nconst HTTP_FORBIDDEN = 403;\nconst ONE_DAY_MS = 24 * 60 * 60 * 1000;\nif (status === HTTP_FORBIDDEN) { ... }\nsetTimeout(callback, ONE_DAY_MS);"
|
||||
},
|
||||
{
|
||||
"id": "commented-code",
|
||||
"category": "dead-code",
|
||||
"severity": "low",
|
||||
"pattern": "//\\s*(const|let|var|function|if|for|while|return)\\s+",
|
||||
"patternType": "regex",
|
||||
"description": "Commented-out code adds noise and should be removed. Use version control for history",
|
||||
"recommendation": "Remove commented code. If needed for reference, add a comment explaining why with a link to relevant commit/issue",
|
||||
"fixExample": "// Before\n// function oldImplementation() { ... }\n// const legacyConfig = {...};\n\n// After\n// See PR #123 for previous implementation\n// removed 2024-01-01"
|
||||
}
|
||||
]
|
||||
}
|
||||
58
.claude/skills/review-code/specs/rules/security-rules.json
Normal file
58
.claude/skills/review-code/specs/rules/security-rules.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"dimension": "security",
|
||||
"prefix": "SEC",
|
||||
"description": "Rules for detecting security vulnerabilities including XSS, injection, and credential exposure",
|
||||
"rules": [
|
||||
{
|
||||
"id": "xss-innerHTML",
|
||||
"category": "xss-risk",
|
||||
"severity": "critical",
|
||||
"pattern": "innerHTML\\s*=|dangerouslySetInnerHTML",
|
||||
"patternType": "includes",
|
||||
"description": "Direct HTML injection via innerHTML or dangerouslySetInnerHTML can lead to XSS vulnerabilities",
|
||||
"recommendation": "Use textContent for plain text, or sanitize HTML input using a library like DOMPurify before injection",
|
||||
"fixExample": "// Before\nelement.innerHTML = userInput;\n<div dangerouslySetInnerHTML={{__html: data}} />\n\n// After\nelement.textContent = userInput;\n// or\nimport DOMPurify from 'dompurify';\nelement.innerHTML = DOMPurify.sanitize(userInput);"
|
||||
},
|
||||
{
|
||||
"id": "hardcoded-secret",
|
||||
"category": "hardcoded-secret",
|
||||
"severity": "critical",
|
||||
"pattern": "(?:password|secret|api[_-]?key|token|credential)\\s*[=:]\\s*['\"][^'\"]{8,}['\"]",
|
||||
"patternType": "regex",
|
||||
"caseInsensitive": true,
|
||||
"description": "Hardcoded credentials detected in source code. This is a security risk if code is exposed",
|
||||
"recommendation": "Use environment variables, secret management services, or configuration files excluded from version control",
|
||||
"fixExample": "// Before\nconst apiKey = 'sk-1234567890abcdef';\n\n// After\nconst apiKey = process.env.API_KEY;\n// or\nconst apiKey = await getSecretFromVault('api-key');"
|
||||
},
|
||||
{
|
||||
"id": "sql-injection",
|
||||
"category": "injection",
|
||||
"severity": "critical",
|
||||
"pattern": "query\\s*\\(\\s*[`'\"].*\\$\\{|execute\\s*\\(\\s*[`'\"].*\\+",
|
||||
"patternType": "regex",
|
||||
"description": "String concatenation or template literals in SQL queries can lead to SQL injection",
|
||||
"recommendation": "Use parameterized queries or prepared statements with placeholders",
|
||||
"fixExample": "// Before\ndb.query(`SELECT * FROM users WHERE id = ${userId}`);\n\n// After\ndb.query('SELECT * FROM users WHERE id = ?', [userId]);\n// or\ndb.query('SELECT * FROM users WHERE id = $1', [userId]);"
|
||||
},
|
||||
{
|
||||
"id": "command-injection",
|
||||
"category": "injection",
|
||||
"severity": "critical",
|
||||
"pattern": "exec\\s*\\(|execSync\\s*\\(|spawn\\s*\\([^,]*\\+|child_process",
|
||||
"patternType": "regex",
|
||||
"description": "Command execution with user input can lead to command injection attacks",
|
||||
"recommendation": "Validate and sanitize input, use parameterized commands, or avoid shell execution entirely",
|
||||
"fixExample": "// Before\nexec(`ls ${userInput}`);\n\n// After\nexecFile('ls', [sanitizedInput], options);\n// or use spawn with {shell: false}"
|
||||
},
|
||||
{
|
||||
"id": "insecure-random",
|
||||
"category": "cryptography",
|
||||
"severity": "high",
|
||||
"pattern": "Math\\.random\\(\\)",
|
||||
"patternType": "includes",
|
||||
"description": "Math.random() is not cryptographically secure and should not be used for security-sensitive operations",
|
||||
"recommendation": "Use crypto.randomBytes() or crypto.getRandomValues() for security-critical random generation",
|
||||
"fixExample": "// Before\nconst token = Math.random().toString(36);\n\n// After\nimport crypto from 'crypto';\nconst token = crypto.randomBytes(32).toString('hex');"
|
||||
}
|
||||
]
|
||||
}
|
||||
59
.claude/skills/review-code/specs/rules/testing-rules.json
Normal file
59
.claude/skills/review-code/specs/rules/testing-rules.json
Normal file
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"dimension": "testing",
|
||||
"prefix": "TEST",
|
||||
"description": "Rules for detecting testing issues including coverage gaps, test quality, and mock usage",
|
||||
"rules": [
|
||||
{
|
||||
"id": "missing-assertion",
|
||||
"category": "test-quality",
|
||||
"severity": "high",
|
||||
"pattern": "(?:it|test)\\s*\\([^)]+,\\s*(?:async\\s*)?\\(\\)\\s*=>\\s*\\{[^}]*\\}\\s*\\)",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["expect", "assert", "should", "toBe", "toEqual"],
|
||||
"description": "Test case without assertions always passes and provides no value",
|
||||
"recommendation": "Add assertions to verify expected behavior. Each test should have at least one meaningful assertion",
|
||||
"fixExample": "// Before\nit('should process data', async () => {\n await processData(input);\n});\n\n// After\nit('should process data', async () => {\n const result = await processData(input);\n expect(result.success).toBe(true);\n expect(result.data).toHaveLength(3);\n});"
|
||||
},
|
||||
{
|
||||
"id": "hardcoded-test-data",
|
||||
"category": "test-maintainability",
|
||||
"severity": "low",
|
||||
"pattern": "expect\\s*\\([^)]+\\)\\.toBe\\s*\\(['\"][^'\"]{20,}['\"]\\)",
|
||||
"patternType": "regex",
|
||||
"description": "Long hardcoded strings in assertions are brittle and hard to maintain",
|
||||
"recommendation": "Use snapshots for large outputs, or extract expected values to test fixtures",
|
||||
"fixExample": "// Before\nexpect(result).toBe('very long expected string that is hard to maintain...');\n\n// After\nexpect(result).toMatchSnapshot();\n// or\nconst expected = loadFixture('expected-output.json');\nexpect(result).toEqual(expected);"
|
||||
},
|
||||
{
|
||||
"id": "no-error-test",
|
||||
"category": "coverage-gap",
|
||||
"severity": "medium",
|
||||
"pattern": "describe\\s*\\([^)]+",
|
||||
"patternType": "regex",
|
||||
"negativePatterns": ["throw", "reject", "error", "fail", "catch"],
|
||||
"description": "Test suite may be missing error path testing. Error handling is critical for reliability",
|
||||
"recommendation": "Add tests for error cases: invalid input, network failures, edge cases",
|
||||
"fixExample": "// Add error path tests\nit('should throw on invalid input', () => {\n expect(() => processData(null)).toThrow('Invalid input');\n});\n\nit('should handle network failure', async () => {\n mockApi.mockRejectedValue(new Error('Network error'));\n await expect(fetchData()).rejects.toThrow('Network error');\n});"
|
||||
},
|
||||
{
|
||||
"id": "test-implementation-detail",
|
||||
"category": "test-quality",
|
||||
"severity": "medium",
|
||||
"pattern": "toHaveBeenCalledWith|toHaveBeenCalledTimes",
|
||||
"patternType": "includes",
|
||||
"description": "Testing implementation details (call counts, exact parameters) makes tests brittle to refactoring",
|
||||
"recommendation": "Prefer testing observable behavior and outcomes over internal implementation",
|
||||
"fixExample": "// Before - brittle\nexpect(mockService.process).toHaveBeenCalledTimes(3);\nexpect(mockService.process).toHaveBeenCalledWith('exact-arg');\n\n// After - behavior-focused\nexpect(result.items).toHaveLength(3);\nexpect(result.processed).toBe(true);"
|
||||
},
|
||||
{
|
||||
"id": "skip-test",
|
||||
"category": "test-coverage",
|
||||
"severity": "high",
|
||||
"pattern": "it\\.skip|test\\.skip|xit|xdescribe|describe\\.skip",
|
||||
"patternType": "regex",
|
||||
"description": "Skipped tests indicate untested code paths or broken functionality",
|
||||
"recommendation": "Fix or remove skipped tests. If temporarily skipped, add TODO comment with issue reference",
|
||||
"fixExample": "// Before\nit.skip('should handle edge case', () => { ... });\n\n// After - either fix it\nit('should handle edge case', () => {\n // fixed implementation\n});\n\n// Or document why skipped\n// TODO(#123): Re-enable after API migration\nit.skip('should handle edge case', () => { ... });"
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user