feat: Implement fuzzy search functionality in smart-search.js

- Added buildFuzzyRegex function for approximate matching.
- Enhanced buildRipgrepCommand to support fuzzy parameter.
- Updated executeAutoMode to handle fuzzy search case.
- Implemented executeFuzzyMode for executing fuzzy search using ripgrep.
- Refactored import and export parsing functions for better modularity.
- Improved dependency graph building and circular dependency detection.
- Added caching mechanism for dependency graph to optimize performance.
This commit is contained in:
catlog22
2025-12-11 23:28:35 +08:00
parent e8f1caa219
commit 811382775d
11 changed files with 2119 additions and 30 deletions

View File

@@ -0,0 +1,429 @@
#!/usr/bin/env python3
import re
# Read the file
with open('ccw/src/tools/smart-search.js', 'r', encoding='utf-8') as f:
content = f.read()
# Fix 1: Update imports
content = content.replace(
"import { existsSync, readdirSync, statSync } from 'fs';",
"import { existsSync, readdirSync, statSync, readFileSync, writeFileSync, mkdirSync } from 'fs';"
)
# Fix 2: Remove duplicate query declaration in buildRipgrepCommand (keep fuzzy version)
content = re.sub(
r'(function buildRipgrepCommand\(params\) \{\s*const \{ query, paths = \[.*?\], contextLines = 0, maxResults = 100, includeHidden = false \} = params;\s*)',
'',
content,
count=1
)
# Fix 3: Remove errant 'n' character
content = re.sub(r'\nn/\*\*', r'\n/**', content)
# Fix 4: Remove duplicated lines in buildRipgrepCommand
lines = content.split('\n')
fixed_lines = []
skip_next = False
for i, line in enumerate(lines):
if skip_next:
skip_next = False
continue
# Skip duplicate ripgrep command logic
if '// Use literal/fixed string matching for exact mode' in line:
# Skip old version
if i + 3 < len(lines) and 'args.push(...paths)' in lines[i + 3]:
skip_next = False
continue
if '// Use fuzzy regex or literal matching based on mode' in line:
# Keep fuzzy version
fixed_lines.append(line)
continue
fixed_lines.append(line)
content = '\n'.join(fixed_lines)
# Fix 5: Replace executeGraphMode implementation
graph_impl = '''/**
* Parse import statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{source: string, specifiers: string[]}>}
*/
function parseImports(fileContent) {
const imports = [];
// Pattern 1: ES6 import statements
const es6ImportPattern = /import\\s+(?:(?:(\\*\\s+as\\s+\\w+)|(\\w+)|(?:\\{([^}]+)\\}))\\s+from\\s+)?['\"]([^'\"]+)['\"]/g;
let match;
while ((match = es6ImportPattern.exec(fileContent)) !== null) {
const source = match[4];
const specifiers = [];
if (match[1]) specifiers.push(match[1]);
else if (match[2]) specifiers.push(match[2]);
else if (match[3]) {
const named = match[3].split(',').map(s => s.trim());
specifiers.push(...named);
}
imports.push({ source, specifiers });
}
// Pattern 2: CommonJS require()
const requirePattern = /require\\(['\"]([^'\"]+)['\"]\\)/g;
while ((match = requirePattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 3: Dynamic import()
const dynamicImportPattern = /import\\(['\"]([^'\"]+)['\"]\\)/g;
while ((match = dynamicImportPattern)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 4: TypeScript import type
const typeImportPattern = /import\\s+type\\s+(?:\\{([^}]+)\\})\\s+from\\s+['\"]([^'\"]+)['\"]/g;
while ((match = typeImportPattern.exec(fileContent)) !== null) {
const source = match[2];
const specifiers = match[1].split(',').map(s => s.trim());
imports.push({ source, specifiers });
}
return imports;
}
/**
* Parse export statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{name: string, type: string}>}
*/
function parseExports(fileContent) {
const exports = [];
// Pattern 1: export default
const defaultExportPattern = /export\\s+default\\s+(?:class|function|const|let|var)?\\s*(\\w+)?/g;
let match;
while ((match = defaultExportPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1] || 'default', type: 'default' });
}
// Pattern 2: export named declarations
const namedDeclPattern = /export\\s+(?:const|let|var|function|class)\\s+(\\w+)/g;
while ((match = namedDeclPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1], type: 'named' });
}
// Pattern 3: export { ... }
const namedExportPattern = /export\\s+\\{([^}]+)\\}/g;
while ((match = namedExportPattern.exec(fileContent)) !== null) {
const names = match[1].split(',').map(s => {
const parts = s.trim().split(/\\s+as\\s+/);
return parts[parts.length - 1];
});
names.forEach(name => {
exports.push({ name: name.trim(), type: 'named' });
});
}
return exports;
}
/**
* Build dependency graph by scanning project files
* @param {string} rootPath - Root directory to scan
* @param {string[]} gitignorePatterns - Patterns to exclude
* @returns {{nodes: Array, edges: Array, metadata: Object}}
*/
function buildDependencyGraph(rootPath, gitignorePatterns = []) {
const nodes = [];
const edges = [];
const processedFiles = new Set();
const SYSTEM_EXCLUDES = [
'.git', 'node_modules', '.npm', '.yarn', '.pnpm',
'dist', 'build', 'out', 'coverage', '.cache',
'.next', '.nuxt', '.vite', '__pycache__', 'venv'
];
function shouldExclude(name) {
if (SYSTEM_EXCLUDES.includes(name)) return true;
for (const pattern of gitignorePatterns) {
if (name === pattern) return true;
if (pattern.includes('*')) {
const regex = new RegExp('^' + pattern.replace(/\\*/g, '.*') + '$');
if (regex.test(name)) return true;
}
}
return false;
}
function scanDirectory(dirPath) {
if (!existsSync(dirPath)) return;
try {
const entries = readdirSync(dirPath, { withFileTypes: true });
for (const entry of entries) {
if (shouldExclude(entry.name)) continue;
const fullPath = join(dirPath, entry.name);
if (entry.isDirectory()) {
scanDirectory(fullPath);
} else if (entry.isFile()) {
const ext = entry.name.split('.').pop();
if (['js', 'mjs', 'cjs', 'ts', 'tsx', 'jsx'].includes(ext)) {
processFile(fullPath);
}
}
}
} catch (err) {
// Skip directories we can't read
}
}
function processFile(filePath) {
if (processedFiles.has(filePath)) return;
processedFiles.add(filePath);
try {
const content = readFileSync(filePath, 'utf8');
const relativePath = './' + filePath.replace(rootPath, '').replace(/\\\\/g, '/').replace(/^\\//, '');
const fileExports = parseExports(content);
nodes.push({
id: relativePath,
path: filePath,
exports: fileExports
});
const imports = parseImports(content);
imports.forEach(imp => {
let targetPath = imp.source;
if (!targetPath.startsWith('.') && !targetPath.startsWith('/')) {
return;
}
const targetRelative = './' + targetPath.replace(/^\\.\\//, '');
edges.push({
from: relativePath,
to: targetRelative,
imports: imp.specifiers
});
});
} catch (err) {
// Skip files we can't read or parse
}
}
scanDirectory(rootPath);
const circularDeps = detectCircularDependencies(edges);
return {
nodes,
edges,
metadata: {
timestamp: Date.now(),
rootPath,
nodeCount: nodes.length,
edgeCount: edges.length,
circular_deps_detected: circularDeps.length > 0,
circular_deps: circularDeps
}
};
}
/**
* Detect circular dependencies in the graph
* @param {Array} edges - Graph edges
* @returns {Array} List of circular dependency chains
*/
function detectCircularDependencies(edges) {
const cycles = [];
const visited = new Set();
const recStack = new Set();
const graph = {};
edges.forEach(edge => {
if (!graph[edge.from]) graph[edge.from] = [];
graph[edge.from].push(edge.to);
});
function dfs(node, path = []) {
if (recStack.has(node)) {
const cycleStart = path.indexOf(node);
if (cycleStart !== -1) {
cycles.push(path.slice(cycleStart).concat(node));
}
return;
}
if (visited.has(node)) return;
visited.add(node);
recStack.add(node);
path.push(node);
const neighbors = graph[node] || [];
for (const neighbor of neighbors) {
dfs(neighbor, [...path]);
}
recStack.delete(node);
}
Object.keys(graph).forEach(node => {
if (!visited.has(node)) {
dfs(node);
}
});
return cycles;
}
/**
* Mode: graph - Dependency and relationship traversal
* Analyzes code relationships (imports, exports, dependencies)
*/
async function executeGraphMode(params) {
const { query, paths = [], maxResults = 100 } = params;
const rootPath = resolve(process.cwd(), paths[0] || '.');
const cacheDir = join(process.cwd(), '.ccw-cache');
const cacheFile = join(cacheDir, 'dependency-graph.json');
const CACHE_TTL = 5 * 60 * 1000;
let graph;
if (existsSync(cacheFile)) {
try {
const cached = JSON.parse(readFileSync(cacheFile, 'utf8'));
const age = Date.now() - cached.metadata.timestamp;
if (age < CACHE_TTL) {
graph = cached;
}
} catch (err) {
// Cache invalid, will rebuild
}
}
if (!graph) {
const gitignorePatterns = [];
const gitignorePath = join(rootPath, '.gitignore');
if (existsSync(gitignorePath)) {
const content = readFileSync(gitignorePath, 'utf8');
content.split('\\n').forEach(line => {
line = line.trim();
if (!line || line.startsWith('#')) return;
gitignorePatterns.push(line.replace(/\\/$/, ''));
});
}
graph = buildDependencyGraph(rootPath, gitignorePatterns);
try {
mkdirSync(cacheDir, { recursive: true });
writeFileSync(cacheFile, JSON.stringify(graph, null, 2), 'utf8');
} catch (err) {
// Cache write failed, continue
}
}
const queryLower = query.toLowerCase();
let queryType = 'unknown';
let filteredNodes = [];
let filteredEdges = [];
let queryPaths = [];
if (queryLower.match(/imports?\\s+(\\S+)/)) {
queryType = 'imports';
const target = queryLower.match(/imports?\\s+(\\S+)/)[1];
filteredEdges = graph.edges.filter(edge =>
edge.to.includes(target) || edge.imports.some(imp => imp.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredEdges.map(e => e.from));
filteredNodes = graph.nodes.filter(n => nodeIds.has(n.id));
} else if (queryLower.match(/exports?\\s+(\\S+)/)) {
queryType = 'exports';
const target = queryLower.match(/exports?\\s+(\\S+)/)[1];
filteredNodes = graph.nodes.filter(node =>
node.exports.some(exp => exp.name.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
} else if (queryLower.includes('dependency') || queryLower.includes('chain') || queryLower.includes('depends')) {
queryType = 'dependency_chain';
filteredNodes = graph.nodes.slice(0, maxResults);
filteredEdges = graph.edges;
if (graph.metadata.circular_deps && graph.metadata.circular_deps.length > 0) {
queryPaths = graph.metadata.circular_deps.slice(0, 10);
}
} else {
queryType = 'module_search';
filteredNodes = graph.nodes.filter(node =>
node.id.toLowerCase().includes(queryLower) ||
node.path.toLowerCase().includes(queryLower)
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
}
if (filteredNodes.length > maxResults) {
filteredNodes = filteredNodes.slice(0, maxResults);
}
return {
success: true,
graph: {
nodes: filteredNodes,
edges: filteredEdges,
paths: queryPaths
},
metadata: {
mode: 'graph',
storage: 'json',
query_type: queryType,
total_nodes: graph.metadata.nodeCount,
total_edges: graph.metadata.edgeCount,
filtered_nodes: filteredNodes.length,
filtered_edges: filteredEdges.length,
circular_deps_detected: graph.metadata.circular_deps_detected,
cached: existsSync(cacheFile),
query
}
};
}
'''
# Find and replace executeGraphMode
pattern = r'/\*\*\s*\* Mode: graph.*?\* Analyzes code relationships.*?\*/\s*async function executeGraphMode\(params\) \{.*?error: \'Graph mode not implemented - dependency analysis pending\'\s*\};?\s*\}'
content = re.sub(pattern, graph_impl, content, flags=re.DOTALL)
# Write the file
with open('ccw/src/tools/smart-search.js', 'w', encoding='utf-8') as f:
f.write(content)
print('File updated successfully')

378
.ccw-cache/graph-impl.js Normal file
View File

@@ -0,0 +1,378 @@
/**
* Parse import statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{source: string, specifiers: string[]}>}
*/
function parseImports(fileContent) {
const imports = [];
// Pattern 1: ES6 import statements
const es6ImportPattern = /import\s+(?:(?:(\*\s+as\s+\w+)|(\w+)|(?:\{([^}]+)\}))\s+from\s+)?['"]([^'"]+)['"]/g;
let match;
while ((match = es6ImportPattern.exec(fileContent)) !== null) {
const source = match[4];
const specifiers = [];
if (match[1]) specifiers.push(match[1]);
else if (match[2]) specifiers.push(match[2]);
else if (match[3]) {
const named = match[3].split(',').map(s => s.trim());
specifiers.push(...named);
}
imports.push({ source, specifiers });
}
// Pattern 2: CommonJS require()
const requirePattern = /require\(['"]([^'"]+)['"]\)/g;
while ((match = requirePattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 3: Dynamic import()
const dynamicImportPattern = /import\(['"]([^'"]+)['"]\)/g;
while ((match = dynamicImportPattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 4: TypeScript import type
const typeImportPattern = /import\s+type\s+(?:\{([^}]+)\})\s+from\s+['"]([^'"]+)['"]/g;
while ((match = typeImportPattern.exec(fileContent)) !== null) {
const source = match[2];
const specifiers = match[1].split(',').map(s => s.trim());
imports.push({ source, specifiers });
}
return imports;
}
/**
* Parse export statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{name: string, type: string}>}
*/
function parseExports(fileContent) {
const exports = [];
// Pattern 1: export default
const defaultExportPattern = /export\s+default\s+(?:class|function|const|let|var)?\s*(\w+)?/g;
let match;
while ((match = defaultExportPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1] || 'default', type: 'default' });
}
// Pattern 2: export named declarations
const namedDeclPattern = /export\s+(?:const|let|var|function|class)\s+(\w+)/g;
while ((match = namedDeclPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1], type: 'named' });
}
// Pattern 3: export { ... }
const namedExportPattern = /export\s+\{([^}]+)\}/g;
while ((match = namedExportPattern.exec(fileContent)) !== null) {
const names = match[1].split(',').map(s => {
const parts = s.trim().split(/\s+as\s+/);
return parts[parts.length - 1];
});
names.forEach(name => {
exports.push({ name: name.trim(), type: 'named' });
});
}
return exports;
}
/**
* Build dependency graph by scanning project files
* @param {string} rootPath - Root directory to scan
* @param {string[]} gitignorePatterns - Patterns to exclude
* @returns {{nodes: Array, edges: Array, metadata: Object}}
*/
function buildDependencyGraph(rootPath, gitignorePatterns = []) {
const { readFileSync, readdirSync, existsSync } = require('fs');
const { join, relative, resolve: resolvePath } = require('path');
const nodes = [];
const edges = [];
const processedFiles = new Set();
const SYSTEM_EXCLUDES = [
'.git', 'node_modules', '.npm', '.yarn', '.pnpm',
'dist', 'build', 'out', 'coverage', '.cache',
'.next', '.nuxt', '.vite', '__pycache__', 'venv'
];
function shouldExclude(name) {
if (SYSTEM_EXCLUDES.includes(name)) return true;
for (const pattern of gitignorePatterns) {
if (name === pattern) return true;
if (pattern.includes('*')) {
const regex = new RegExp('^' + pattern.replace(/\*/g, '.*') + '$');
if (regex.test(name)) return true;
}
}
return false;
}
function scanDirectory(dirPath) {
if (!existsSync(dirPath)) return;
try {
const entries = readdirSync(dirPath, { withFileTypes: true });
for (const entry of entries) {
if (shouldExclude(entry.name)) continue;
const fullPath = join(dirPath, entry.name);
if (entry.isDirectory()) {
scanDirectory(fullPath);
} else if (entry.isFile()) {
const ext = entry.name.split('.').pop();
if (['js', 'mjs', 'cjs', 'ts', 'tsx', 'jsx'].includes(ext)) {
processFile(fullPath);
}
}
}
} catch (err) {
// Skip directories we can't read
}
}
function processFile(filePath) {
if (processedFiles.has(filePath)) return;
processedFiles.add(filePath);
try {
const content = readFileSync(filePath, 'utf8');
const relativePath = './' + relative(rootPath, filePath).replace(/\\/g, '/');
const fileExports = parseExports(content);
nodes.push({
id: relativePath,
path: filePath,
exports: fileExports
});
const imports = parseImports(content);
imports.forEach(imp => {
let targetPath = imp.source;
if (!targetPath.startsWith('.') && !targetPath.startsWith('/')) {
return;
}
try {
targetPath = resolvePath(join(filePath, '..', targetPath));
const targetRelative = './' + relative(rootPath, targetPath).replace(/\\/g, '/');
edges.push({
from: relativePath,
to: targetRelative,
imports: imp.specifiers
});
} catch (err) {
// Skip invalid paths
}
});
} catch (err) {
// Skip files we can't read or parse
}
}
scanDirectory(rootPath);
const circularDeps = detectCircularDependencies(edges);
return {
nodes,
edges,
metadata: {
timestamp: Date.now(),
rootPath,
nodeCount: nodes.length,
edgeCount: edges.length,
circular_deps_detected: circularDeps.length > 0,
circular_deps: circularDeps
}
};
}
/**
* Detect circular dependencies in the graph
* @param {Array} edges - Graph edges
* @returns {Array} List of circular dependency chains
*/
function detectCircularDependencies(edges) {
const cycles = [];
const visited = new Set();
const recStack = new Set();
const graph = {};
edges.forEach(edge => {
if (!graph[edge.from]) graph[edge.from] = [];
graph[edge.from].push(edge.to);
});
function dfs(node, path = []) {
if (recStack.has(node)) {
const cycleStart = path.indexOf(node);
if (cycleStart !== -1) {
cycles.push(path.slice(cycleStart).concat(node));
}
return;
}
if (visited.has(node)) return;
visited.add(node);
recStack.add(node);
path.push(node);
const neighbors = graph[node] || [];
for (const neighbor of neighbors) {
dfs(neighbor, [...path]);
}
recStack.delete(node);
}
Object.keys(graph).forEach(node => {
if (!visited.has(node)) {
dfs(node);
}
});
return cycles;
}
/**
* Mode: graph - Dependency and relationship traversal
* Analyzes code relationships (imports, exports, dependencies)
*/
async function executeGraphMode(params) {
const { readFileSync, writeFileSync, mkdirSync, existsSync } = await import('fs');
const { join, resolve: resolvePath } = await import('path');
const { query, paths = [], maxResults = 100 } = params;
const rootPath = resolvePath(process.cwd(), paths[0] || '.');
const cacheDir = join(process.cwd(), '.ccw-cache');
const cacheFile = join(cacheDir, 'dependency-graph.json');
const CACHE_TTL = 5 * 60 * 1000;
let graph;
if (existsSync(cacheFile)) {
try {
const cached = JSON.parse(readFileSync(cacheFile, 'utf8'));
const age = Date.now() - cached.metadata.timestamp;
if (age < CACHE_TTL) {
graph = cached;
}
} catch (err) {
// Cache invalid, will rebuild
}
}
if (!graph) {
const gitignorePatterns = [];
const gitignorePath = join(rootPath, '.gitignore');
if (existsSync(gitignorePath)) {
const content = readFileSync(gitignorePath, 'utf8');
content.split('\n').forEach(line => {
line = line.trim();
if (!line || line.startsWith('#')) return;
gitignorePatterns.push(line.replace(/\/$/, ''));
});
}
graph = buildDependencyGraph(rootPath, gitignorePatterns);
try {
mkdirSync(cacheDir, { recursive: true });
writeFileSync(cacheFile, JSON.stringify(graph, null, 2), 'utf8');
} catch (err) {
// Cache write failed, continue
}
}
const queryLower = query.toLowerCase();
let queryType = 'unknown';
let filteredNodes = [];
let filteredEdges = [];
let paths = [];
if (queryLower.match(/imports?\s+(\S+)/)) {
queryType = 'imports';
const target = queryLower.match(/imports?\s+(\S+)/)[1];
filteredEdges = graph.edges.filter(edge =>
edge.to.includes(target) || edge.imports.some(imp => imp.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredEdges.map(e => e.from));
filteredNodes = graph.nodes.filter(n => nodeIds.has(n.id));
} else if (queryLower.match(/exports?\s+(\S+)/)) {
queryType = 'exports';
const target = queryLower.match(/exports?\s+(\S+)/)[1];
filteredNodes = graph.nodes.filter(node =>
node.exports.some(exp => exp.name.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
} else if (queryLower.includes('dependency') || queryLower.includes('chain') || queryLower.includes('depends')) {
queryType = 'dependency_chain';
filteredNodes = graph.nodes.slice(0, maxResults);
filteredEdges = graph.edges;
if (graph.metadata.circular_deps && graph.metadata.circular_deps.length > 0) {
paths = graph.metadata.circular_deps.slice(0, 10);
}
} else {
queryType = 'module_search';
filteredNodes = graph.nodes.filter(node =>
node.id.toLowerCase().includes(queryLower) ||
node.path.toLowerCase().includes(queryLower)
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
}
if (filteredNodes.length > maxResults) {
filteredNodes = filteredNodes.slice(0, maxResults);
}
return {
success: true,
graph: {
nodes: filteredNodes,
edges: filteredEdges,
paths
},
metadata: {
mode: 'graph',
storage: 'json',
query_type: queryType,
total_nodes: graph.metadata.nodeCount,
total_edges: graph.metadata.edgeCount,
filtered_nodes: filteredNodes.length,
filtered_edges: filteredEdges.length,
circular_deps_detected: graph.metadata.circular_deps_detected,
cached: existsSync(cacheFile),
query
}
};
}

View File

@@ -0,0 +1,367 @@
/**
* Parse import statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{source: string, specifiers: string[]}>}
*/
function parseImports(fileContent) {
const imports = [];
// Pattern 1: ES6 import statements
const es6ImportPattern = /import\s+(?:(?:(\*\s+as\s+\w+)|(\w+)|(?:\{([^}]+)\}))\s+from\s+)?['"]([^'"]+)['"]/g;
let match;
while ((match = es6ImportPattern.exec(fileContent)) !== null) {
const source = match[4];
const specifiers = [];
if (match[1]) specifiers.push(match[1]);
else if (match[2]) specifiers.push(match[2]);
else if (match[3]) {
const named = match[3].split(',').map(s => s.trim());
specifiers.push(...named);
}
imports.push({ source, specifiers });
}
// Pattern 2: CommonJS require()
const requirePattern = /require\(['"]([^'"]+)['"]\)/g;
while ((match = requirePattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 3: Dynamic import()
const dynamicImportPattern = /import\(['"]([^'"]+)['"]\)/g;
while ((match = dynamicImportPattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 4: TypeScript import type
const typeImportPattern = /import\s+type\s+(?:\{([^}]+)\})\s+from\s+['"]([^'"]+)['"]/g;
while ((match = typeImportPattern.exec(fileContent)) !== null) {
const source = match[2];
const specifiers = match[1].split(',').map(s => s.trim());
imports.push({ source, specifiers });
}
return imports;
}
/**
* Parse export statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{name: string, type: string}>}
*/
function parseExports(fileContent) {
const exports = [];
// Pattern 1: export default
const defaultExportPattern = /export\s+default\s+(?:class|function|const|let|var)?\s*(\w+)?/g;
let match;
while ((match = defaultExportPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1] || 'default', type: 'default' });
}
// Pattern 2: export named declarations
const namedDeclPattern = /export\s+(?:const|let|var|function|class)\s+(\w+)/g;
while ((match = namedDeclPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1], type: 'named' });
}
// Pattern 3: export { ... }
const namedExportPattern = /export\s+\{([^}]+)\}/g;
while ((match = namedExportPattern.exec(fileContent)) !== null) {
const names = match[1].split(',').map(s => {
const parts = s.trim().split(/\s+as\s+/);
return parts[parts.length - 1];
});
names.forEach(name => {
exports.push({ name: name.trim(), type: 'named' });
});
}
return exports;
}
/**
* Build dependency graph by scanning project files
* @param {string} rootPath - Root directory to scan
* @param {string[]} gitignorePatterns - Patterns to exclude
* @returns {{nodes: Array, edges: Array, metadata: Object}}
*/
function buildDependencyGraph(rootPath, gitignorePatterns = []) {
const nodes = [];
const edges = [];
const processedFiles = new Set();
const SYSTEM_EXCLUDES = [
'.git', 'node_modules', '.npm', '.yarn', '.pnpm',
'dist', 'build', 'out', 'coverage', '.cache',
'.next', '.nuxt', '.vite', '__pycache__', 'venv'
];
function shouldExclude(name) {
if (SYSTEM_EXCLUDES.includes(name)) return true;
for (const pattern of gitignorePatterns) {
if (name === pattern) return true;
if (pattern.includes('*')) {
const regex = new RegExp('^' + pattern.replace(/\*/g, '.*') + '$');
if (regex.test(name)) return true;
}
}
return false;
}
function scanDirectory(dirPath) {
if (!existsSync(dirPath)) return;
try {
const entries = readdirSync(dirPath, { withFileTypes: true });
for (const entry of entries) {
if (shouldExclude(entry.name)) continue;
const fullPath = join(dirPath, entry.name);
if (entry.isDirectory()) {
scanDirectory(fullPath);
} else if (entry.isFile()) {
const ext = entry.name.split('.').pop();
if (['js', 'mjs', 'cjs', 'ts', 'tsx', 'jsx'].includes(ext)) {
processFile(fullPath);
}
}
}
} catch (err) {
// Skip directories we can't read
}
}
function processFile(filePath) {
if (processedFiles.has(filePath)) return;
processedFiles.add(filePath);
try {
const content = readFileSync(filePath, 'utf8');
const relativePath = './' + filePath.replace(rootPath, '').replace(/\\/g, '/').replace(/^\//, '');
const fileExports = parseExports(content);
nodes.push({
id: relativePath,
path: filePath,
exports: fileExports
});
const imports = parseImports(content);
imports.forEach(imp => {
let targetPath = imp.source;
if (!targetPath.startsWith('.') && !targetPath.startsWith('/')) {
return;
}
const targetRelative = './' + targetPath.replace(/^\.\//, '');
edges.push({
from: relativePath,
to: targetRelative,
imports: imp.specifiers
});
});
} catch (err) {
// Skip files we can't read or parse
}
}
scanDirectory(rootPath);
const circularDeps = detectCircularDependencies(edges);
return {
nodes,
edges,
metadata: {
timestamp: Date.now(),
rootPath,
nodeCount: nodes.length,
edgeCount: edges.length,
circular_deps_detected: circularDeps.length > 0,
circular_deps: circularDeps
}
};
}
/**
* Detect circular dependencies in the graph
* @param {Array} edges - Graph edges
* @returns {Array} List of circular dependency chains
*/
function detectCircularDependencies(edges) {
const cycles = [];
const visited = new Set();
const recStack = new Set();
const graph = {};
edges.forEach(edge => {
if (!graph[edge.from]) graph[edge.from] = [];
graph[edge.from].push(edge.to);
});
function dfs(node, path = []) {
if (recStack.has(node)) {
const cycleStart = path.indexOf(node);
if (cycleStart !== -1) {
cycles.push(path.slice(cycleStart).concat(node));
}
return;
}
if (visited.has(node)) return;
visited.add(node);
recStack.add(node);
path.push(node);
const neighbors = graph[node] || [];
for (const neighbor of neighbors) {
dfs(neighbor, [...path]);
}
recStack.delete(node);
}
Object.keys(graph).forEach(node => {
if (!visited.has(node)) {
dfs(node);
}
});
return cycles;
}
/**
* Mode: graph - Dependency and relationship traversal
* Analyzes code relationships (imports, exports, dependencies)
*/
async function executeGraphMode(params) {
const { query, paths = [], maxResults = 100 } = params;
const rootPath = resolve(process.cwd(), paths[0] || '.');
const cacheDir = join(process.cwd(), '.ccw-cache');
const cacheFile = join(cacheDir, 'dependency-graph.json');
const CACHE_TTL = 5 * 60 * 1000;
let graph;
if (existsSync(cacheFile)) {
try {
const cached = JSON.parse(readFileSync(cacheFile, 'utf8'));
const age = Date.now() - cached.metadata.timestamp;
if (age < CACHE_TTL) {
graph = cached;
}
} catch (err) {
// Cache invalid, will rebuild
}
}
if (!graph) {
const gitignorePatterns = [];
const gitignorePath = join(rootPath, '.gitignore');
if (existsSync(gitignorePath)) {
const content = readFileSync(gitignorePath, 'utf8');
content.split('\n').forEach(line => {
line = line.trim();
if (!line || line.startsWith('#')) return;
gitignorePatterns.push(line.replace(/\/$/, ''));
});
}
graph = buildDependencyGraph(rootPath, gitignorePatterns);
try {
mkdirSync(cacheDir, { recursive: true });
writeFileSync(cacheFile, JSON.stringify(graph, null, 2), 'utf8');
} catch (err) {
// Cache write failed, continue
}
}
const queryLower = query.toLowerCase();
let queryType = 'unknown';
let filteredNodes = [];
let filteredEdges = [];
let queryPaths = [];
if (queryLower.match(/imports?\s+(\S+)/)) {
queryType = 'imports';
const target = queryLower.match(/imports?\s+(\S+)/)[1];
filteredEdges = graph.edges.filter(edge =>
edge.to.includes(target) || edge.imports.some(imp => imp.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredEdges.map(e => e.from));
filteredNodes = graph.nodes.filter(n => nodeIds.has(n.id));
} else if (queryLower.match(/exports?\s+(\S+)/)) {
queryType = 'exports';
const target = queryLower.match(/exports?\s+(\S+)/)[1];
filteredNodes = graph.nodes.filter(node =>
node.exports.some(exp => exp.name.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
} else if (queryLower.includes('dependency') || queryLower.includes('chain') || queryLower.includes('depends')) {
queryType = 'dependency_chain';
filteredNodes = graph.nodes.slice(0, maxResults);
filteredEdges = graph.edges;
if (graph.metadata.circular_deps && graph.metadata.circular_deps.length > 0) {
queryPaths = graph.metadata.circular_deps.slice(0, 10);
}
} else {
queryType = 'module_search';
filteredNodes = graph.nodes.filter(node =>
node.id.toLowerCase().includes(queryLower) ||
node.path.toLowerCase().includes(queryLower)
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
}
if (filteredNodes.length > maxResults) {
filteredNodes = filteredNodes.slice(0, maxResults);
}
return {
success: true,
graph: {
nodes: filteredNodes,
edges: filteredEdges,
paths: queryPaths
},
metadata: {
mode: 'graph',
storage: 'json',
query_type: queryType,
total_nodes: graph.metadata.nodeCount,
total_edges: graph.metadata.edgeCount,
filtered_nodes: filteredNodes.length,
filtered_edges: filteredEdges.length,
circular_deps_detected: graph.metadata.circular_deps_detected,
cached: existsSync(cacheFile),
query
}
};
}

View File

@@ -0,0 +1,442 @@
import { readFileSync, writeFileSync } from 'fs';
// Read current file
let content = readFileSync('ccw/src/tools/smart-search.js', 'utf8');
// Step 1: Fix imports
content = content.replace(
"import { existsSync, readdirSync, statSync } from 'fs';",
"import { existsSync, readdirSync, statSync, readFileSync, writeFileSync, mkdirSync } from 'fs';"
);
// Step 2: Fix duplicate const { query... } lines in buildRipgrepCommand
const lines = content.split('\n');
const fixedLines = [];
let inBuildRipgrep = false;
let foundQueryDecl = false;
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
if (line.includes('function buildRipgrepCommand(params)')) {
inBuildRipgrep = true;
foundQueryDecl = false;
fixedLines.push(line);
continue;
}
if (inBuildRipgrep && line.includes('const { query,')) {
if (!foundQueryDecl) {
// Keep the first (fuzzy) version
foundQueryDecl = true;
fixedLines.push(line);
}
// Skip duplicate
continue;
}
if (inBuildRipgrep && line.includes('return { command:')) {
inBuildRipgrep = false;
}
// Remove old exact-mode-only comment
if (line.includes('// Use literal/fixed string matching for exact mode')) {
continue;
}
// Skip old args.push('-F', query)
if (line.trim() === "args.push('-F', query);") {
continue;
}
// Remove errant 'n/**' line
if (line.trim() === 'n/**') {
continue;
}
fixedLines.push(line);
}
content = fixedLines.join('\n');
// Step 3: Insert helper functions before executeGraphMode
const graphHelpers = `
/**
* Parse import statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{source: string, specifiers: string[]}>}
*/
function parseImports(fileContent) {
const imports = [];
// Pattern 1: ES6 import statements
const es6ImportPattern = /import\\s+(?:(?:(\\*\\s+as\\s+\\w+)|(\\w+)|(?:\\{([^}]+)\\}))\\s+from\\s+)?['\"]([^'\"]+)['\"]/g;
let match;
while ((match = es6ImportPattern.exec(fileContent)) !== null) {
const source = match[4];
const specifiers = [];
if (match[1]) specifiers.push(match[1]);
else if (match[2]) specifiers.push(match[2]);
else if (match[3]) {
const named = match[3].split(',').map(s => s.trim());
specifiers.push(...named);
}
imports.push({ source, specifiers });
}
// Pattern 2: CommonJS require()
const requirePattern = /require\\(['\"]([^'\"]+)['\"]\\)/g;
while ((match = requirePattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 3: Dynamic import()
const dynamicImportPattern = /import\\(['\"]([^'\"]+)['\"]\\)/g;
while ((match = dynamicImportPattern.exec(fileContent)) !== null) {
imports.push({ source: match[1], specifiers: [] });
}
// Pattern 4: TypeScript import type
const typeImportPattern = /import\\s+type\\s+(?:\\{([^}]+)\\})\\s+from\\s+['\"]([^'\"]+)['\"]/g;
while ((match = typeImportPattern.exec(fileContent)) !== null) {
const source = match[2];
const specifiers = match[1].split(',').map(s => s.trim());
imports.push({ source, specifiers });
}
return imports;
}
/**
* Parse export statements from file content
* @param {string} fileContent - File content to parse
* @returns {Array<{name: string, type: string}>}
*/
function parseExports(fileContent) {
const exports = [];
// Pattern 1: export default
const defaultExportPattern = /export\\s+default\\s+(?:class|function|const|let|var)?\\s*(\\w+)?/g;
let match;
while ((match = defaultExportPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1] || 'default', type: 'default' });
}
// Pattern 2: export named declarations
const namedDeclPattern = /export\\s+(?:const|let|var|function|class)\\s+(\\w+)/g;
while ((match = namedDeclPattern.exec(fileContent)) !== null) {
exports.push({ name: match[1], type: 'named' });
}
// Pattern 3: export { ... }
const namedExportPattern = /export\\s+\\{([^}]+)\\}/g;
while ((match = namedExportPattern.exec(fileContent)) !== null) {
const names = match[1].split(',').map(s => {
const parts = s.trim().split(/\\s+as\\s+/);
return parts[parts.length - 1];
});
names.forEach(name => {
exports.push({ name: name.trim(), type: 'named' });
});
}
return exports;
}
/**
* Build dependency graph by scanning project files
* @param {string} rootPath - Root directory to scan
* @param {string[]} gitignorePatterns - Patterns to exclude
* @returns {{nodes: Array, edges: Array, metadata: Object}}
*/
function buildDependencyGraph(rootPath, gitignorePatterns = []) {
const nodes = [];
const edges = [];
const processedFiles = new Set();
const SYSTEM_EXCLUDES = [
'.git', 'node_modules', '.npm', '.yarn', '.pnpm',
'dist', 'build', 'out', 'coverage', '.cache',
'.next', '.nuxt', '.vite', '__pycache__', 'venv'
];
function shouldExclude(name) {
if (SYSTEM_EXCLUDES.includes(name)) return true;
for (const pattern of gitignorePatterns) {
if (name === pattern) return true;
if (pattern.includes('*')) {
const regex = new RegExp('^' + pattern.replace(/\\*/g, '.*') + '$');
if (regex.test(name)) return true;
}
}
return false;
}
function scanDirectory(dirPath) {
if (!existsSync(dirPath)) return;
try {
const entries = readdirSync(dirPath, { withFileTypes: true });
for (const entry of entries) {
if (shouldExclude(entry.name)) continue;
const fullPath = join(dirPath, entry.name);
if (entry.isDirectory()) {
scanDirectory(fullPath);
} else if (entry.isFile()) {
const ext = entry.name.split('.').pop();
if (['js', 'mjs', 'cjs', 'ts', 'tsx', 'jsx'].includes(ext)) {
processFile(fullPath);
}
}
}
} catch (err) {
// Skip directories we can't read
}
}
function processFile(filePath) {
if (processedFiles.has(filePath)) return;
processedFiles.add(filePath);
try {
const content = readFileSync(filePath, 'utf8');
const relativePath = './' + filePath.replace(rootPath, '').replace(/\\\\/g, '/').replace(/^\\//, '');
const fileExports = parseExports(content);
nodes.push({
id: relativePath,
path: filePath,
exports: fileExports
});
const imports = parseImports(content);
imports.forEach(imp => {
let targetPath = imp.source;
if (!targetPath.startsWith('.') && !targetPath.startsWith('/')) {
return;
}
const targetRelative = './' + targetPath.replace(/^\\.\\//, '');
edges.push({
from: relativePath,
to: targetRelative,
imports: imp.specifiers
});
});
} catch (err) {
// Skip files we can't read or parse
}
}
scanDirectory(rootPath);
const circularDeps = detectCircularDependencies(edges);
return {
nodes,
edges,
metadata: {
timestamp: Date.now(),
rootPath,
nodeCount: nodes.length,
edgeCount: edges.length,
circular_deps_detected: circularDeps.length > 0,
circular_deps: circularDeps
}
};
}
/**
* Detect circular dependencies in the graph
* @param {Array} edges - Graph edges
* @returns {Array} List of circular dependency chains
*/
function detectCircularDependencies(edges) {
const cycles = [];
const visited = new Set();
const recStack = new Set();
const graph = {};
edges.forEach(edge => {
if (!graph[edge.from]) graph[edge.from] = [];
graph[edge.from].push(edge.to);
});
function dfs(node, path = []) {
if (recStack.has(node)) {
const cycleStart = path.indexOf(node);
if (cycleStart !== -1) {
cycles.push(path.slice(cycleStart).concat(node));
}
return;
}
if (visited.has(node)) return;
visited.add(node);
recStack.add(node);
path.push(node);
const neighbors = graph[node] || [];
for (const neighbor of neighbors) {
dfs(neighbor, [...path]);
}
recStack.delete(node);
}
Object.keys(graph).forEach(node => {
if (!visited.has(node)) {
dfs(node);
}
});
return cycles;
}
`;
const newExecuteGraphMode = `/**
* Mode: graph - Dependency and relationship traversal
* Analyzes code relationships (imports, exports, dependencies)
*/
async function executeGraphMode(params) {
const { query, paths = [], maxResults = 100 } = params;
const rootPath = resolve(process.cwd(), paths[0] || '.');
const cacheDir = join(process.cwd(), '.ccw-cache');
const cacheFile = join(cacheDir, 'dependency-graph.json');
const CACHE_TTL = 5 * 60 * 1000;
let graph;
if (existsSync(cacheFile)) {
try {
const cached = JSON.parse(readFileSync(cacheFile, 'utf8'));
const age = Date.now() - cached.metadata.timestamp;
if (age < CACHE_TTL) {
graph = cached;
}
} catch (err) {
// Cache invalid, will rebuild
}
}
if (!graph) {
const gitignorePatterns = [];
const gitignorePath = join(rootPath, '.gitignore');
if (existsSync(gitignorePath)) {
const content = readFileSync(gitignorePath, 'utf8');
content.split('\\n').forEach(line => {
line = line.trim();
if (!line || line.startsWith('#')) return;
gitignorePatterns.push(line.replace(/\\/$/, ''));
});
}
graph = buildDependencyGraph(rootPath, gitignorePatterns);
try {
mkdirSync(cacheDir, { recursive: true });
writeFileSync(cacheFile, JSON.stringify(graph, null, 2), 'utf8');
} catch (err) {
// Cache write failed, continue
}
}
const queryLower = query.toLowerCase();
let queryType = 'unknown';
let filteredNodes = [];
let filteredEdges = [];
let queryPaths = [];
if (queryLower.match(/imports?\\s+(\\S+)/)) {
queryType = 'imports';
const target = queryLower.match(/imports?\\s+(\\S+)/)[1];
filteredEdges = graph.edges.filter(edge =>
edge.to.includes(target) || edge.imports.some(imp => imp.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredEdges.map(e => e.from));
filteredNodes = graph.nodes.filter(n => nodeIds.has(n.id));
} else if (queryLower.match(/exports?\\s+(\\S+)/)) {
queryType = 'exports';
const target = queryLower.match(/exports?\\s+(\\S+)/)[1];
filteredNodes = graph.nodes.filter(node =>
node.exports.some(exp => exp.name.toLowerCase().includes(target))
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
} else if (queryLower.includes('dependency') || queryLower.includes('chain') || queryLower.includes('depends')) {
queryType = 'dependency_chain';
filteredNodes = graph.nodes.slice(0, maxResults);
filteredEdges = graph.edges;
if (graph.metadata.circular_deps && graph.metadata.circular_deps.length > 0) {
queryPaths = graph.metadata.circular_deps.slice(0, 10);
}
} else {
queryType = 'module_search';
filteredNodes = graph.nodes.filter(node =>
node.id.toLowerCase().includes(queryLower) ||
node.path.toLowerCase().includes(queryLower)
);
const nodeIds = new Set(filteredNodes.map(n => n.id));
filteredEdges = graph.edges.filter(e => nodeIds.has(e.from) || nodeIds.has(e.to));
}
if (filteredNodes.length > maxResults) {
filteredNodes = filteredNodes.slice(0, maxResults);
}
return {
success: true,
graph: {
nodes: filteredNodes,
edges: filteredEdges,
paths: queryPaths
},
metadata: {
mode: 'graph',
storage: 'json',
query_type: queryType,
total_nodes: graph.metadata.nodeCount,
total_edges: graph.metadata.edgeCount,
filtered_nodes: filteredNodes.length,
filtered_edges: filteredEdges.length,
circular_deps_detected: graph.metadata.circular_deps_detected,
cached: existsSync(cacheFile),
query
}
};
}`;
// Replace old executeGraphMode
const oldGraphMode = /\\/\\*\\*[\\s\\S]*?\\* Mode: graph - Dependency and relationship traversal[\\s\\S]*?\\*\\/\\s*async function executeGraphMode\\(params\\) \\{[\\s\\S]*?error: 'Graph mode not implemented - dependency analysis pending'[\\s\\S]*?\\}/;
content = content.replace(oldGraphMode, graphHelpers + newExecuteGraphMode);
// Write back
writeFileSync('ccw/src/tools/smart-search.js', content, 'utf8');
console.log('Successfully updated smart-search.js');

View File

@@ -185,10 +185,10 @@ SlashCommand(command="/workflow:tools:conflict-resolution --session [sessionId]
**Parse Output**:
- Extract: Execution status (success/skipped/failed)
- Verify: CONFLICT_RESOLUTION.md file path (if executed)
- Verify: conflict-resolution.json file path (if executed)
**Validation**:
- File `.workflow/active/[sessionId]/.process/CONFLICT_RESOLUTION.md` exists (if executed)
- File `.workflow/active/[sessionId]/.process/conflict-resolution.json` exists (if executed)
**Skip Behavior**:
- If conflict_risk is "none" or "low", skip directly to Phase 3.5
@@ -497,7 +497,7 @@ Return summary to user
- Parse context path from Phase 2 output, store in memory
- **Extract conflict_risk from context-package.json**: Determine Phase 3 execution
- **If conflict_risk ≥ medium**: Launch Phase 3 conflict-resolution with sessionId and contextPath
- Wait for Phase 3 to finish executing (if executed), verify CONFLICT_RESOLUTION.md created
- Wait for Phase 3 to finish executing (if executed), verify conflict-resolution.json created
- **If conflict_risk is none/low**: Skip Phase 3, proceed directly to Phase 4
- **Build Phase 4 command**: `/workflow:tools:task-generate-agent --session [sessionId]`
- Pass session ID to Phase 4 command

View File

@@ -164,10 +164,10 @@ SlashCommand(command="/workflow:tools:conflict-resolution --session [sessionId]
**Parse Output**:
- Extract: Execution status (success/skipped/failed)
- Verify: CONFLICT_RESOLUTION.md file path (if executed)
- Verify: conflict-resolution.json file path (if executed)
**Validation**:
- File `.workflow/active/[sessionId]/.process/CONFLICT_RESOLUTION.md` exists (if executed)
- File `.workflow/active/[sessionId]/.process/conflict-resolution.json` exists (if executed)
**Skip Behavior**:
- If conflict_risk is "none" or "low", skip directly to Phase 5
@@ -402,7 +402,7 @@ TDD Workflow Orchestrator
│ ├─ Phase 4.1: Detect conflicts with CLI
│ ├─ Phase 4.2: Present conflicts to user
│ └─ Phase 4.3: Apply resolution strategies
│ └─ Returns: CONFLICT_RESOLUTION.md ← COLLAPSED
│ └─ Returns: conflict-resolution.json ← COLLAPSED
│ ELSE:
│ └─ Skip to Phase 5

View File

@@ -169,7 +169,7 @@ Task(subagent_type="cli-execution-agent", prompt=`
### 4. Return Structured Conflict Data
⚠️ DO NOT generate CONFLICT_RESOLUTION.md file
⚠️ Output to conflict-resolution.json (generated in Phase 4)
Return JSON format for programmatic processing:
@@ -467,14 +467,30 @@ selectedStrategies.forEach(item => {
console.log(`\n正在应用 ${modifications.length} 个修改...`);
// 2. Apply each modification using Edit tool
// 2. Apply each modification using Edit tool (with fallback to context-package.json)
const appliedModifications = [];
const failedModifications = [];
const fallbackConstraints = []; // For files that don't exist
modifications.forEach((mod, idx) => {
try {
console.log(`[${idx + 1}/${modifications.length}] 修改 ${mod.file}...`);
// Check if target file exists (brainstorm files may not exist in lite workflow)
if (!file_exists(mod.file)) {
console.log(` ⚠️ 文件不存在,写入 context-package.json 作为约束`);
fallbackConstraints.push({
source: "conflict-resolution",
conflict_id: mod.conflict_id,
target_file: mod.file,
section: mod.section,
change_type: mod.change_type,
content: mod.new_content,
rationale: mod.rationale
});
return; // Skip to next modification
}
if (mod.change_type === "update") {
Edit({
file_path: mod.file,
@@ -502,14 +518,45 @@ modifications.forEach((mod, idx) => {
}
});
// 3. Update context-package.json with resolution details
// 2b. Generate conflict-resolution.json output file
const resolutionOutput = {
session_id: sessionId,
resolved_at: new Date().toISOString(),
summary: {
total_conflicts: conflicts.length,
resolved_with_strategy: selectedStrategies.length,
custom_handling: customConflicts.length,
fallback_constraints: fallbackConstraints.length
},
resolved_conflicts: selectedStrategies.map(s => ({
conflict_id: s.conflict_id,
strategy_name: s.strategy.name,
strategy_approach: s.strategy.approach,
clarifications: s.clarifications || [],
modifications_applied: s.strategy.modifications?.filter(m =>
appliedModifications.some(am => am.conflict_id === s.conflict_id)
) || []
})),
custom_conflicts: customConflicts.map(c => ({
id: c.id,
brief: c.brief,
category: c.category,
suggestions: c.suggestions,
overlap_analysis: c.overlap_analysis || null
})),
planning_constraints: fallbackConstraints, // Constraints for files that don't exist
failed_modifications: failedModifications
};
const resolutionPath = `.workflow/active/${sessionId}/.process/conflict-resolution.json`;
Write(resolutionPath, JSON.stringify(resolutionOutput, null, 2));
console.log(`\n📄 冲突解决结果已保存: ${resolutionPath}`);
// 3. Update context-package.json with resolution details (reference to JSON file)
const contextPackage = JSON.parse(Read(contextPath));
contextPackage.conflict_detection.conflict_risk = "resolved";
contextPackage.conflict_detection.resolved_conflicts = selectedStrategies.map(s => ({
conflict_id: s.conflict_id,
strategy_name: s.strategy.name,
clarifications: s.clarifications
}));
contextPackage.conflict_detection.resolution_file = resolutionPath; // Reference to detailed JSON
contextPackage.conflict_detection.resolved_conflicts = selectedStrategies.map(s => s.conflict_id);
contextPackage.conflict_detection.custom_conflicts = customConflicts.map(c => c.id);
contextPackage.conflict_detection.resolved_at = new Date().toISOString();
Write(contextPath, JSON.stringify(contextPackage, null, 2));
@@ -582,12 +629,50 @@ return {
✓ Agent log saved to .workflow/active/{session_id}/.chat/
```
## Output Format: Agent JSON Response
## Output Format
### Primary Output: conflict-resolution.json
**Path**: `.workflow/active/{session_id}/.process/conflict-resolution.json`
**Schema**:
```json
{
"session_id": "WFS-xxx",
"resolved_at": "ISO timestamp",
"summary": {
"total_conflicts": 3,
"resolved_with_strategy": 2,
"custom_handling": 1,
"fallback_constraints": 0
},
"resolved_conflicts": [
{
"conflict_id": "CON-001",
"strategy_name": "策略名称",
"strategy_approach": "实现方法",
"clarifications": [],
"modifications_applied": []
}
],
"custom_conflicts": [
{
"id": "CON-002",
"brief": "冲突摘要",
"category": "ModuleOverlap",
"suggestions": ["建议1", "建议2"],
"overlap_analysis": null
}
],
"planning_constraints": [],
"failed_modifications": []
}
```
### Secondary: Agent JSON Response (stdout)
**Focus**: Structured conflict data with actionable modifications for programmatic processing.
**Format**: JSON to stdout (NO file generation)
**Structure**: Defined in Phase 2, Step 4 (agent prompt)
### Key Requirements
@@ -635,11 +720,12 @@ If Edit tool fails mid-application:
- Requires: `conflict_risk ≥ medium`
**Output**:
- Modified files:
- Generated file:
- `.workflow/active/{session_id}/.process/conflict-resolution.json` (primary output)
- Modified files (if exist):
- `.workflow/active/{session_id}/.brainstorm/guidance-specification.md`
- `.workflow/active/{session_id}/.brainstorm/{role}/analysis.md`
- `.workflow/active/{session_id}/.process/context-package.json` (conflict_risk → resolved)
- NO report file generation
- `.workflow/active/{session_id}/.process/context-package.json` (conflict_risk → resolved, resolution_file reference)
**User Interaction**:
- **Iterative conflict processing**: One conflict at a time, not in batches
@@ -667,7 +753,7 @@ If Edit tool fails mid-application:
✓ guidance-specification.md updated with resolved conflicts
✓ Role analyses (*.md) updated with resolved conflicts
✓ context-package.json marked as "resolved" with clarification records
No CONFLICT_RESOLUTION.md file generated
conflict-resolution.json generated with full resolution details
✓ Modification summary includes:
- Total conflicts
- Resolved with strategy (count)

View File

@@ -89,6 +89,14 @@ Phase 3: Integration (+1 Coordinator, Multi-Module Only)
3. **Auto Module Detection** (determines single vs parallel mode):
```javascript
function autoDetectModules(contextPackage, projectRoot) {
// === Complexity Gate: Only parallelize for High complexity ===
const complexity = contextPackage.metadata?.complexity || 'Medium';
if (complexity !== 'High') {
// Force single agent mode for Low/Medium complexity
// This maximizes agent context reuse for related tasks
return [{ name: 'main', prefix: '', paths: ['.'] }];
}
// Priority 1: Explicit frontend/backend separation
if (exists('src/frontend') && exists('src/backend')) {
return [
@@ -112,8 +120,9 @@ Phase 3: Integration (+1 Coordinator, Multi-Module Only)
```
**Decision Logic**:
- `complexity !== 'High'` → Force Phase 2A (Single Agent, maximize context reuse)
- `modules.length == 1` → Phase 2A (Single Agent, original flow)
- `modules.length >= 2` → Phase 2B + Phase 3 (N+1 Parallel)
- `modules.length >= 2 && complexity == 'High'` → Phase 2B + Phase 3 (N+1 Parallel)
**Note**: CLI tool usage is now determined semantically by action-planning-agent based on user's task description, not by flags.
@@ -163,6 +172,13 @@ Determine CLI tool usage per-step based on user's task description:
- Use aggregated_insights.all_integration_points for precise modification locations
- Use conflict_indicators for risk-aware task sequencing
## CONFLICT RESOLUTION CONTEXT (if exists)
- Check context-package.conflict_detection.resolution_file for conflict-resolution.json path
- If exists, load .process/conflict-resolution.json:
- Apply planning_constraints as task constraints (for brainstorm-less workflows)
- Reference resolved_conflicts for implementation approach alignment
- Handle custom_conflicts with explicit task notes
## EXPECTED DELIVERABLES
1. Task JSON Files (.task/IMPL-*.json)
- 6-field schema (id, title, status, context_package_path, meta, context, flow_control)

View File

@@ -152,9 +152,14 @@ Phase 2: Agent Execution (Document Generation)
roleAnalysisPaths.forEach(path => Read(path));
```
5. **Load Conflict Resolution** (from context-package.json, if exists)
5. **Load Conflict Resolution** (from conflict-resolution.json, if exists)
```javascript
if (contextPackage.brainstorm_artifacts.conflict_resolution?.exists) {
// Check for new conflict-resolution.json format
if (contextPackage.conflict_detection?.resolution_file) {
Read(contextPackage.conflict_detection.resolution_file) // .process/conflict-resolution.json
}
// Fallback: legacy brainstorm_artifacts path
else if (contextPackage.brainstorm_artifacts?.conflict_resolution?.exists) {
Read(contextPackage.brainstorm_artifacts.conflict_resolution.path)
}
```
@@ -223,7 +228,7 @@ If conflict_risk was medium/high, modifications have been applied to:
- **guidance-specification.md**: Design decisions updated to resolve conflicts
- **Role analyses (*.md)**: Recommendations adjusted for compatibility
- **context-package.json**: Marked as "resolved" with conflict IDs
- NO separate CONFLICT_RESOLUTION.md file (conflicts resolved in-place)
- Conflict resolution results stored in conflict-resolution.json
### MCP Analysis Results (Optional)
**Code Structure**: {mcp_code_index_results}
@@ -373,10 +378,12 @@ const agentContext = {
.flatMap(role => role.files)
.map(file => Read(file.path)),
// Load conflict resolution if exists (from context package)
conflict_resolution: brainstorm_artifacts.conflict_resolution?.exists
? Read(brainstorm_artifacts.conflict_resolution.path)
: null,
// Load conflict resolution if exists (prefer new JSON format)
conflict_resolution: context_package.conflict_detection?.resolution_file
? Read(context_package.conflict_detection.resolution_file) // .process/conflict-resolution.json
: (brainstorm_artifacts?.conflict_resolution?.exists
? Read(brainstorm_artifacts.conflict_resolution.path)
: null),
// Optional MCP enhancements
mcp_analysis: executeMcpDiscovery()
@@ -408,7 +415,7 @@ This section provides quick reference for TDD task JSON structure. For complete
│ ├── IMPL-3.2.json # Complex feature subtask (if needed)
│ └── ...
└── .process/
├── CONFLICT_RESOLUTION.md # Conflict resolution strategies (if conflict_risk ≥ medium)
├── conflict-resolution.json # Conflict resolution results (if conflict_risk ≥ medium)
├── test-context-package.json # Test coverage analysis
├── context-package.json # Input from context-gather
├── context_package_path # Path to smart context package

177
apply-fuzzy-changes.js Normal file
View File

@@ -0,0 +1,177 @@
import { readFileSync, writeFileSync } from 'fs';
const filePath = 'ccw/src/tools/smart-search.js';
let content = readFileSync(filePath, 'utf8');
// 1. Add buildFuzzyRegex function after detectRelationship
const buildFuzzyRegexFunc = `
/**
* Build fuzzy regex pattern for approximate matching
* @param {string} query - Search query string
* @param {number} maxDistance - Edit distance tolerance (default: 1)
* @returns {string} - Regex pattern suitable for ripgrep -e flag
*/
function buildFuzzyRegex(query, maxDistance = 1) {
const escaped = query.replace(/[.*+?^${}()|[\\]\\\\]/g, '\\\\$&');
let pattern;
if (maxDistance === 1) {
pattern = escaped.split('').map(c => {
const upper = c.toUpperCase();
const lower = c.toLowerCase();
if (upper !== lower) {
return \`[\${upper}\${lower}]\`;
}
return c;
}).join('');
} else if (maxDistance === 2) {
pattern = escaped.split('').map(c => \`\${c}?\`).join('.*');
} else {
pattern = escaped;
}
if (/^[a-zA-Z0-9_]+$/.test(query)) {
pattern = \`\\\\b\${pattern}\\\\b\`;
}
return pattern;
}
`;
content = content.replace(
/(function detectRelationship\(query\) \{[\s\S]*?\n\})\n\n(\/\*\*\n \* Classify query intent)/,
`$1\n${buildFuzzyRegexFunc}\n$2`
);
// 2. Add fuzzy parameter to buildRipgrepCommand
content = content.replace(
'const { query, paths = [\'.\'], contextLines = 0, maxResults = 100, includeHidden = false } = params;',
'const { query, paths = [\'.\'], contextLines = 0, maxResults = 100, includeHidden = false, fuzzy = false } = params;'
);
// 3. Replace literal matching line with fuzzy conditional
content = content.replace(
/\/\/ Use literal\/fixed string matching for exact mode\n args\.push\('-F', query\);/,
`// Use fuzzy regex or literal matching based on mode
if (fuzzy) {
args.push('-i', '-e', buildFuzzyRegex(query));
} else {
args.push('-F', query);
}`
);
// 4. Add fuzzy case in executeAutoMode
content = content.replace(
/(case 'exact':[\s\S]*?\};\n\n)( case 'fuzzy':\n case 'semantic':)/,
`$1 case 'fuzzy':
// Execute fuzzy mode and enrich result with classification metadata
const fuzzyResult = await executeFuzzyMode(params);
return {
...fuzzyResult,
metadata: {
...fuzzyResult.metadata,
classified_as: classification.mode,
confidence: classification.confidence,
reasoning: classification.reasoning
}
};
case 'semantic':`
);
// 5. Replace executeFuzzyMode implementation
const fuzzyModeImpl = `async function executeFuzzyMode(params) {
const { query, paths = [], contextLines = 0, maxResults = 100, includeHidden = false } = params;
// Check ripgrep availability
if (!checkToolAvailability('rg')) {
return {
success: false,
error: 'ripgrep not available - please install ripgrep (rg) to use fuzzy search mode'
};
}
// Build ripgrep command with fuzzy=true
const { command, args } = buildRipgrepCommand({
query,
paths: paths.length > 0 ? paths : ['.'],
contextLines,
maxResults,
includeHidden,
fuzzy: true
});
return new Promise((resolve) => {
const child = spawn(command, args, {
cwd: process.cwd(),
stdio: ['ignore', 'pipe', 'pipe']
});
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
const results = [];
if (code === 0 || (code === 1 && stdout.trim())) {
const lines = stdout.split('\\n').filter(line => line.trim());
for (const line of lines) {
try {
const item = JSON.parse(line);
if (item.type === 'match') {
const match = {
file: item.data.path.text,
line: item.data.line_number,
column: item.data.submatches && item.data.submatches[0] ? item.data.submatches[0].start + 1 : 1,
content: item.data.lines.text.trim()
};
results.push(match);
}
} catch (err) {
continue;
}
}
resolve({
success: true,
results,
metadata: {
mode: 'fuzzy',
backend: 'ripgrep-regex',
fuzzy_strategy: 'approximate regex',
count: results.length,
query
}
});
} else {
resolve({
success: false,
error: \`ripgrep execution failed with code \${code}: \${stderr}\`,
results: []
});
}
});
child.on('error', (error) => {
resolve({
success: false,
error: \`Failed to spawn ripgrep: \${error.message}\`,
results: []
});
});
});
}`;
content = content.replace(
/async function executeFuzzyMode\(params\) \{[\s\S]*? \}\n\}/,
fuzzyModeImpl
);
writeFileSync(filePath, content, 'utf8');
console.log('Fuzzy mode implementation applied successfully');

187
apply-fuzzy.py Normal file
View File

@@ -0,0 +1,187 @@
#!/usr/bin/env python3
import re
with open('ccw/src/tools/smart-search.js', 'r', encoding='utf-8') as f:
content = f.read()
# Step 1: Add buildFuzzyRegex after detectRelationship
fuzzy_regex_func = r'''
/**
* Build fuzzy regex pattern for approximate matching
* @param {string} query - Search query string
* @param {number} maxDistance - Edit distance tolerance (default: 1)
* @returns {string} - Regex pattern suitable for ripgrep -e flag
*/
function buildFuzzyRegex(query, maxDistance = 1) {
const escaped = query.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
let pattern;
if (maxDistance === 1) {
pattern = escaped.split('').map(c => {
const upper = c.toUpperCase();
const lower = c.toLowerCase();
if (upper !== lower) {
return `[${upper}${lower}]`;
}
return c;
}).join('');
} else if (maxDistance === 2) {
pattern = escaped.split('').map(c => `${c}?`).join('.*');
} else {
pattern = escaped;
}
if (/^[a-zA-Z0-9_]+$/.test(query)) {
pattern = `\\b${pattern}\\b`;
}
return pattern;
}
'''
content = re.sub(
r'(function detectRelationship\(query\) \{[^}]+\})\n\n(/\*\*\n \* Classify)',
r'\1' + fuzzy_regex_func + r'\n\2',
content
)
# Step 2: Add fuzzy param to buildRipgrepCommand
content = content.replace(
"const { query, paths = ['.'], contextLines = 0, maxResults = 100, includeHidden = false } = params;",
"const { query, paths = ['.'], contextLines = 0, maxResults = 100, includeHidden = false, fuzzy = false } = params;"
)
# Step 3: Replace literal matching with fuzzy conditional
content = re.sub(
r' // Use literal/fixed string matching for exact mode\n args\.push\(\'-F\', query\);',
r''' // Use fuzzy regex or literal matching based on mode
if (fuzzy) {
args.push('-i', '-e', buildFuzzyRegex(query));
} else {
args.push('-F', query);
}''',
content
)
# Step 4: Update executeAutoMode fuzzy case
fuzzy_case = ''' case 'fuzzy':
// Execute fuzzy mode and enrich result with classification metadata
const fuzzyResult = await executeFuzzyMode(params);
return {
...fuzzyResult,
metadata: {
...fuzzyResult.metadata,
classified_as: classification.mode,
confidence: classification.confidence,
reasoning: classification.reasoning
}
};
case 'semantic':'''
content = re.sub(
r" case 'fuzzy':\n case 'semantic':",
fuzzy_case,
content
)
# Step 5: Replace executeFuzzyMode
fuzzy_impl = '''async function executeFuzzyMode(params) {
const { query, paths = [], contextLines = 0, maxResults = 100, includeHidden = false } = params;
// Check ripgrep availability
if (!checkToolAvailability('rg')) {
return {
success: false,
error: 'ripgrep not available - please install ripgrep (rg) to use fuzzy search mode'
};
}
// Build ripgrep command with fuzzy=true
const { command, args } = buildRipgrepCommand({
query,
paths: paths.length > 0 ? paths : ['.'],
contextLines,
maxResults,
includeHidden,
fuzzy: true
});
return new Promise((resolve) => {
const child = spawn(command, args, {
cwd: process.cwd(),
stdio: ['ignore', 'pipe', 'pipe']
});
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
const results = [];
if (code === 0 || (code === 1 && stdout.trim())) {
const lines = stdout.split('\\n').filter(line => line.trim());
for (const line of lines) {
try {
const item = JSON.parse(line);
if (item.type === 'match') {
const match = {
file: item.data.path.text,
line: item.data.line_number,
column: item.data.submatches && item.data.submatches[0] ? item.data.submatches[0].start + 1 : 1,
content: item.data.lines.text.trim()
};
results.push(match);
}
} catch (err) {
continue;
}
}
resolve({
success: true,
results,
metadata: {
mode: 'fuzzy',
backend: 'ripgrep-regex',
fuzzy_strategy: 'approximate regex',
count: results.length,
query
}
});
} else {
resolve({
success: false,
error: `ripgrep execution failed with code ${code}: ${stderr}`,
results: []
});
}
});
child.on('error', (error) => {
resolve({
success: false,
error: `Failed to spawn ripgrep: ${error.message}`,
results: []
});
});
});
}'''
content = re.sub(
r'async function executeFuzzyMode\(params\) \{.*? \}\n\}',
fuzzy_impl,
content,
flags=re.DOTALL
)
with open('ccw/src/tools/smart-search.js', 'w', encoding='utf-8') as f:
f.write(content)
print('Fuzzy mode implementation applied successfully')