mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-11 17:21:03 +08:00
feat: Implement DeepWiki generator and CLI integration
- Added `deepwiki_generator.py` for generating documentation from source code. - Integrated symbol extraction and markdown generation for supported file types. - Implemented database migration for legacy timestamp formats in DeepWikiStore. - Enhanced debug logging for better traceability during conversation and store operations. - Updated dependencies in `PKG-INFO` and `requires.txt` for compatibility. - Added new tests for the DeepWiki generator and storage functionalities. - Refactored existing code for improved readability and maintainability.
This commit is contained in:
@@ -216,7 +216,7 @@ rg "export.*Component" --files-with-matches --type ts
|
||||
CONTEXT: @components/Auth.tsx @types/auth.d.ts | Memory: Previous type refactoring
|
||||
|
||||
# Step 3: Execute CLI
|
||||
ccw cli -p "..." --tool <tool-id> --mode analysis --cd src
|
||||
ccw cli -p "..." --tool <tool-id> --mode analysis --cd "src"
|
||||
```
|
||||
|
||||
### --rule Configuration
|
||||
@@ -313,12 +313,12 @@ ccw cli -p "..." --tool gemini --mode analysis --rule analysis-review-architectu
|
||||
- Description: Model override
|
||||
- Default: Tool's primaryModel from config
|
||||
|
||||
- **`--cd <path>`**
|
||||
- Description: Working directory
|
||||
- **`--cd "<path>"`**
|
||||
- Description: Working directory (quote if path contains spaces)
|
||||
- Default: current
|
||||
|
||||
- **`--includeDirs <dirs>`**
|
||||
- Description: Additional directories (comma-separated)
|
||||
- **`--includeDirs "<dirs>"`**
|
||||
- Description: Additional directories (comma-separated, quote if paths contain spaces)
|
||||
- Default: none
|
||||
|
||||
- **`--resume [id]`**
|
||||
@@ -347,10 +347,10 @@ When using `--cd`:
|
||||
|
||||
```bash
|
||||
# Single directory
|
||||
ccw cli -p "CONTEXT: @**/* @../shared/**/*" --tool <tool-id> --mode analysis --cd src/auth --includeDirs ../shared
|
||||
ccw cli -p "CONTEXT: @**/* @../shared/**/*" --tool <tool-id> --mode analysis --cd "src/auth" --includeDirs "../shared"
|
||||
|
||||
# Multiple directories
|
||||
ccw cli -p "..." --tool <tool-id> --mode analysis --cd src/auth --includeDirs ../shared,../types,../utils
|
||||
ccw cli -p "..." --tool <tool-id> --mode analysis --cd "src/auth" --includeDirs "../shared,../types,../utils"
|
||||
```
|
||||
|
||||
**Rule**: If CONTEXT contains `@../dir/**/*`, MUST include `--includeDirs ../dir`
|
||||
@@ -397,7 +397,7 @@ MODE: analysis
|
||||
CONTEXT: @src/auth/**/* @src/middleware/auth.ts | Memory: Using bcrypt for passwords, JWT for sessions
|
||||
EXPECTED: Security report with: severity matrix, file:line references, CVE mappings where applicable, remediation code snippets prioritized by risk
|
||||
CONSTRAINTS: Focus on authentication | Ignore test files
|
||||
" --tool gemini --mode analysis --rule analysis-assess-security-risks --cd src/auth
|
||||
" --tool gemini --mode analysis --rule analysis-assess-security-risks --cd "src/auth"
|
||||
```
|
||||
|
||||
**Implementation Task** (New Feature):
|
||||
@@ -419,7 +419,7 @@ MODE: analysis
|
||||
CONTEXT: @src/websocket/**/* @src/services/connection-manager.ts | Memory: Using ws library, ~5000 concurrent connections in production
|
||||
EXPECTED: Root cause analysis with: memory profile, leak source (file:line), fix recommendation with code, verification steps
|
||||
CONSTRAINTS: Focus on resource cleanup
|
||||
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause --cd src
|
||||
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause --cd "src"
|
||||
```
|
||||
|
||||
**Refactoring Task**:
|
||||
|
||||
24749
ccw/package-lock.json
generated
24749
ccw/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -201,7 +201,7 @@ export function run(argv: string[]): void {
|
||||
.option('--status <status>', 'Filter by status')
|
||||
.option('--category <category>', 'Execution category: user, internal, insight', 'user')
|
||||
.option('--resume [id]', 'Resume previous session (empty=last, or execution ID, or comma-separated IDs for merge)')
|
||||
.option('--id <id>', 'Custom execution ID (e.g., IMPL-001-step1)')
|
||||
.option('--id <id>', 'Execution ID (recommended, auto-generated if omitted)')
|
||||
.option('--no-native', 'Force prompt concatenation instead of native resume')
|
||||
.option('--cache [items]', 'Cache: comma-separated @patterns and text content')
|
||||
.option('--inject-mode <mode>', 'Inject mode: none, full, progressive (default: codex=full, others=none)')
|
||||
@@ -226,7 +226,10 @@ export function run(argv: string[]): void {
|
||||
.option('--output-type <type>', 'Output type: stdout, stderr, both', 'both')
|
||||
.option('--turn <n>', 'Turn number for cache (default: latest)')
|
||||
.option('--raw', 'Raw output only (no formatting)')
|
||||
.option('--final', 'Output final result only with usage hint')
|
||||
.option('--final', 'Output final result only (legacy, now default)')
|
||||
.option('--verbose', 'Show full metadata in output view')
|
||||
.option('--timeout <seconds>', 'Timeout for watch command')
|
||||
.option('--all', 'Show all executions in show command')
|
||||
.option('--to-file <path>', 'Save output to file')
|
||||
.action((subcommand, args, options) => cliCommand(subcommand, args, options));
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import type { CliOutputUnit } from '../tools/cli-output-converter.js';
|
||||
import { SmartContentFormatter } from '../tools/cli-output-converter.js';
|
||||
import {
|
||||
cliExecutorTool,
|
||||
generateExecutionId,
|
||||
getCliToolsStatus,
|
||||
getExecutionHistory,
|
||||
getExecutionHistoryAsync,
|
||||
@@ -181,6 +182,7 @@ interface OutputViewOptions {
|
||||
turn?: string;
|
||||
raw?: boolean;
|
||||
final?: boolean; // Only output final result with usage hint
|
||||
verbose?: boolean; // Show full metadata (original default behavior)
|
||||
project?: string; // Optional project path for lookup
|
||||
}
|
||||
|
||||
@@ -432,56 +434,57 @@ async function outputAction(conversationId: string | undefined, options: OutputV
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.final) {
|
||||
// Final result only with usage hint
|
||||
// Prefer finalOutput (agent_message only) > parsedOutput (filtered) > raw stdout
|
||||
const outputContent = result.finalOutput?.content || result.parsedOutput?.content || result.stdout?.content;
|
||||
if (outputContent) {
|
||||
console.log(outputContent);
|
||||
}
|
||||
if (options.verbose) {
|
||||
// Verbose: full metadata + output (original default behavior)
|
||||
console.log(chalk.bold.cyan('Execution Output\n'));
|
||||
console.log(` ${chalk.gray('ID:')} ${result.conversationId}`);
|
||||
console.log(` ${chalk.gray('Turn:')} ${result.turnNumber}`);
|
||||
console.log(` ${chalk.gray('Cached:')} ${result.cached ? chalk.green('Yes') : chalk.yellow('No')}`);
|
||||
console.log(` ${chalk.gray('Status:')} ${result.status}`);
|
||||
console.log(` ${chalk.gray('Time:')} ${result.timestamp}`);
|
||||
console.log(` ${chalk.gray('Project:')} ${chalk.cyan(projectPath)}`);
|
||||
console.log();
|
||||
console.log(chalk.gray('─'.repeat(60)));
|
||||
console.log(chalk.dim(`Usage: ccw cli output ${conversationId} [options]`));
|
||||
console.log(chalk.dim(' --raw Raw output (no formatting)'));
|
||||
console.log(chalk.dim(' --offset <n> Start from byte offset'));
|
||||
console.log(chalk.dim(' --limit <n> Limit output bytes'));
|
||||
console.log(chalk.dim(' --project <p> Specify project path explicitly'));
|
||||
console.log(chalk.dim(` --resume ccw cli -p "..." --resume ${conversationId}`));
|
||||
|
||||
if (result.stdout) {
|
||||
console.log(` ${chalk.gray('Stdout:')} (${result.stdout.totalBytes} bytes, offset ${result.stdout.offset})`);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
console.log(result.stdout.content);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
if (result.stdout.hasMore) {
|
||||
console.log(chalk.yellow(` ... ${result.stdout.totalBytes - result.stdout.offset - result.stdout.content.length} more bytes available`));
|
||||
console.log(chalk.gray(` Use --offset ${result.stdout.offset + result.stdout.content.length} to continue`));
|
||||
}
|
||||
console.log();
|
||||
}
|
||||
|
||||
if (result.stderr && result.stderr.content) {
|
||||
console.log(` ${chalk.gray('Stderr:')} (${result.stderr.totalBytes} bytes, offset ${result.stderr.offset})`);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
console.log(result.stderr.content);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
if (result.stderr.hasMore) {
|
||||
console.log(chalk.yellow(` ... ${result.stderr.totalBytes - result.stderr.offset - result.stderr.content.length} more bytes available`));
|
||||
}
|
||||
console.log();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Formatted output
|
||||
console.log(chalk.bold.cyan('Execution Output\n'));
|
||||
console.log(` ${chalk.gray('ID:')} ${result.conversationId}`);
|
||||
console.log(` ${chalk.gray('Turn:')} ${result.turnNumber}`);
|
||||
console.log(` ${chalk.gray('Cached:')} ${result.cached ? chalk.green('Yes') : chalk.yellow('No')}`);
|
||||
console.log(` ${chalk.gray('Status:')} ${result.status}`);
|
||||
console.log(` ${chalk.gray('Time:')} ${result.timestamp}`);
|
||||
console.log(` ${chalk.gray('Project:')} ${chalk.cyan(projectPath)}`);
|
||||
// Default: final result only (equivalent to --final)
|
||||
// Prefer finalOutput (agent_message only) > parsedOutput (filtered) > raw stdout
|
||||
const outputContent = result.finalOutput?.content || result.parsedOutput?.content || result.stdout?.content;
|
||||
if (outputContent) {
|
||||
console.log(outputContent);
|
||||
}
|
||||
console.log();
|
||||
|
||||
if (result.stdout) {
|
||||
console.log(` ${chalk.gray('Stdout:')} (${result.stdout.totalBytes} bytes, offset ${result.stdout.offset})`);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
console.log(result.stdout.content);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
if (result.stdout.hasMore) {
|
||||
console.log(chalk.yellow(` ... ${result.stdout.totalBytes - result.stdout.offset - result.stdout.content.length} more bytes available`));
|
||||
console.log(chalk.gray(` Use --offset ${result.stdout.offset + result.stdout.content.length} to continue`));
|
||||
}
|
||||
console.log();
|
||||
}
|
||||
|
||||
if (result.stderr && result.stderr.content) {
|
||||
console.log(` ${chalk.gray('Stderr:')} (${result.stderr.totalBytes} bytes, offset ${result.stderr.offset})`);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
console.log(result.stderr.content);
|
||||
console.log(chalk.gray(' ' + '-'.repeat(60)));
|
||||
if (result.stderr.hasMore) {
|
||||
console.log(chalk.yellow(` ... ${result.stderr.totalBytes - result.stderr.offset - result.stderr.content.length} more bytes available`));
|
||||
}
|
||||
console.log();
|
||||
}
|
||||
console.log(chalk.gray('\u2500'.repeat(60)));
|
||||
console.log(chalk.dim(`Usage: ccw cli output ${conversationId} [options]`));
|
||||
console.log(chalk.dim(' --verbose Show full metadata'));
|
||||
console.log(chalk.dim(' --raw Raw output (no formatting)'));
|
||||
console.log(chalk.dim(' --offset <n> Start from byte offset'));
|
||||
console.log(chalk.dim(' --limit <n> Limit output bytes'));
|
||||
console.log(chalk.dim(' --project <p> Specify project path explicitly'));
|
||||
console.log(chalk.dim(` --resume ccw cli -p "..." --resume ${conversationId}`));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -922,8 +925,8 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Generate execution ID for streaming (use custom ID or timestamp-based)
|
||||
const executionId = id || `${Date.now()}-${tool}`;
|
||||
// Generate execution ID for streaming (use custom ID or auto-generated readable ID)
|
||||
const executionId = id || generateExecutionId(tool);
|
||||
const startTime = Date.now();
|
||||
const modelInfo = model ? ` @${model}` : '';
|
||||
const spinnerBaseText = `Executing ${tool}${modelInfo} (${mode} mode${resumeInfo}${nativeMode})${idInfo}...`;
|
||||
@@ -989,9 +992,9 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
mode
|
||||
});
|
||||
|
||||
if (process.env.DEBUG) {
|
||||
console.error(`[CLI] Generated executionId: ${executionId}`);
|
||||
}
|
||||
// Always output execution ID to stderr for programmatic capture
|
||||
// Callers can: ccw cli -p "..." 2>&1 | grep CCW_EXEC_ID
|
||||
console.error(`[CCW_EXEC_ID=${executionId}]`);
|
||||
|
||||
// Buffer to accumulate output when both --stream and --to-file are specified
|
||||
let streamBuffer = '';
|
||||
@@ -1306,6 +1309,214 @@ async function execAction(positionalPrompt: string | undefined, options: CliExec
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Show all executions — active (running) + recent completed
|
||||
* Combines live dashboard state with SQLite history
|
||||
*/
|
||||
async function showAction(options: { all?: boolean }): Promise<void> {
|
||||
console.log(chalk.bold.cyan('\n CLI Executions\n'));
|
||||
|
||||
// 1. Try to fetch active executions from dashboard
|
||||
let activeExecs: Array<{
|
||||
id: string; tool: string; mode: string; status: string;
|
||||
prompt: string; startTime: number; isComplete?: boolean;
|
||||
}> = [];
|
||||
|
||||
try {
|
||||
const data = await new Promise<string>((resolve, reject) => {
|
||||
const req = http.request({
|
||||
hostname: 'localhost',
|
||||
port: Number(DASHBOARD_PORT),
|
||||
path: '/api/cli/active',
|
||||
method: 'GET',
|
||||
timeout: 2000,
|
||||
agent: false,
|
||||
headers: { 'Connection': 'close' }
|
||||
}, (res) => {
|
||||
let body = '';
|
||||
res.on('data', (chunk: Buffer) => { body += chunk.toString(); });
|
||||
res.on('end', () => resolve(body));
|
||||
});
|
||||
req.on('error', reject);
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error('timeout')); });
|
||||
req.end();
|
||||
});
|
||||
const parsed = JSON.parse(data);
|
||||
activeExecs = Array.isArray(parsed) ? parsed : (parsed.executions || []);
|
||||
} catch {
|
||||
// Dashboard not available — show only history
|
||||
}
|
||||
|
||||
// 2. Get recent history from SQLite
|
||||
const historyLimit = options.all ? 100 : 20;
|
||||
const history = await getExecutionHistoryAsync(process.cwd(), { limit: historyLimit, recursive: true });
|
||||
|
||||
// 3. Build unified list: active first, then history (de-duped)
|
||||
const seenIds = new Set<string>();
|
||||
const rows: Array<{
|
||||
id: string; tool: string; mode: string; status: string;
|
||||
prompt: string; time: string; duration: string;
|
||||
}> = [];
|
||||
|
||||
// Active executions (running)
|
||||
for (const exec of activeExecs) {
|
||||
if (exec.status === 'running') {
|
||||
seenIds.add(exec.id);
|
||||
const elapsed = Math.floor((Date.now() - exec.startTime) / 1000);
|
||||
rows.push({
|
||||
id: exec.id,
|
||||
tool: exec.tool,
|
||||
mode: exec.mode,
|
||||
status: 'running',
|
||||
prompt: (exec.prompt || '').replace(/\n/g, ' ').substring(0, 50),
|
||||
time: `${elapsed}s ago`,
|
||||
duration: `${elapsed}s...`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// History executions
|
||||
for (const exec of history.executions) {
|
||||
if (seenIds.has(exec.id)) continue;
|
||||
seenIds.add(exec.id);
|
||||
const duration = exec.duration_ms >= 1000
|
||||
? `${(exec.duration_ms / 1000).toFixed(1)}s`
|
||||
: `${exec.duration_ms}ms`;
|
||||
const timeAgo = getTimeAgo(new Date(exec.updated_at || exec.timestamp));
|
||||
rows.push({
|
||||
id: exec.id,
|
||||
tool: exec.tool,
|
||||
mode: exec.mode || 'analysis',
|
||||
status: exec.status,
|
||||
prompt: exec.prompt_preview.replace(/\n/g, ' ').substring(0, 50),
|
||||
time: timeAgo,
|
||||
duration,
|
||||
});
|
||||
}
|
||||
|
||||
if (rows.length === 0) {
|
||||
console.log(chalk.gray(' No executions found.\n'));
|
||||
return;
|
||||
}
|
||||
|
||||
// 4. Render table
|
||||
console.log(chalk.gray(' Status Tool Mode Duration Time ID'));
|
||||
console.log(chalk.gray(' ' + '\u2500'.repeat(80)));
|
||||
|
||||
for (const row of rows) {
|
||||
const statusIcon = row.status === 'running' ? chalk.blue('\u25CF') :
|
||||
row.status === 'success' || row.status === 'completed' ? chalk.green('\u25CF') :
|
||||
row.status === 'timeout' ? chalk.yellow('\u25CF') : chalk.red('\u25CF');
|
||||
console.log(` ${statusIcon} ${chalk.bold.white(row.tool.padEnd(8))} ${chalk.gray(row.mode.padEnd(9))} ${chalk.gray(row.duration.padEnd(9))} ${chalk.gray(row.time.padEnd(11))} ${chalk.dim(row.id)}`);
|
||||
if (row.prompt) {
|
||||
console.log(chalk.gray(` ${row.prompt}${row.prompt.length >= 50 ? '...' : ''}`));
|
||||
}
|
||||
}
|
||||
|
||||
console.log();
|
||||
console.log(chalk.gray(' ' + '\u2500'.repeat(80)));
|
||||
console.log(chalk.dim(' Output: ccw cli output <id>'));
|
||||
console.log(chalk.dim(' Watch: ccw cli watch <id>'));
|
||||
console.log(chalk.dim(' Detail: ccw cli detail <id>'));
|
||||
console.log();
|
||||
}
|
||||
|
||||
/**
|
||||
* Watch a running execution — stream output to stderr until completion
|
||||
* Exits with code 0 (success), 1 (error), or 2 (timeout)
|
||||
*/
|
||||
async function watchAction(watchId: string | undefined, options: { timeout?: string }): Promise<void> {
|
||||
if (!watchId) {
|
||||
console.error(chalk.red('Error: Execution ID is required'));
|
||||
console.error(chalk.gray('Usage: ccw cli watch <id> [--timeout 120]'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const timeoutMs = options.timeout ? parseInt(options.timeout, 10) * 1000 : 0;
|
||||
const startTime = Date.now();
|
||||
|
||||
process.stderr.write(chalk.cyan(`Watching execution: ${watchId}\n`));
|
||||
|
||||
// Track output position for incremental display
|
||||
let lastOutputLen = 0;
|
||||
|
||||
const poll = async (): Promise<number> => {
|
||||
// Check timeout
|
||||
if (timeoutMs > 0 && (Date.now() - startTime) > timeoutMs) {
|
||||
process.stderr.write(chalk.yellow('\nWatch timed out.\n'));
|
||||
return 2;
|
||||
}
|
||||
|
||||
try {
|
||||
// Fetch active execution state from dashboard
|
||||
const data = await new Promise<string>((resolve, reject) => {
|
||||
const req = http.request({
|
||||
hostname: 'localhost',
|
||||
port: Number(DASHBOARD_PORT),
|
||||
path: '/api/cli/active',
|
||||
method: 'GET',
|
||||
timeout: 3000,
|
||||
agent: false,
|
||||
headers: { 'Connection': 'close' }
|
||||
}, (res) => {
|
||||
let body = '';
|
||||
res.on('data', (chunk: Buffer) => { body += chunk.toString(); });
|
||||
res.on('end', () => resolve(body));
|
||||
});
|
||||
req.on('error', reject);
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error('timeout')); });
|
||||
req.end();
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(data);
|
||||
const executions = Array.isArray(parsed) ? parsed : (parsed.executions || []);
|
||||
const exec = executions.find((e: { id: string }) => e.id === watchId);
|
||||
|
||||
if (exec) {
|
||||
// Show incremental output
|
||||
const fullOutput = exec.output || '';
|
||||
if (fullOutput.length > lastOutputLen) {
|
||||
process.stderr.write(fullOutput.slice(lastOutputLen));
|
||||
lastOutputLen = fullOutput.length;
|
||||
}
|
||||
|
||||
if (exec.status === 'running') {
|
||||
// Still running — wait and poll again
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
return poll();
|
||||
}
|
||||
|
||||
// Completed
|
||||
process.stderr.write(chalk.green(`\nExecution ${exec.status === 'completed' || exec.status === 'success' ? 'completed' : 'failed'}.\n`));
|
||||
return (exec.status === 'completed' || exec.status === 'success') ? 0 : 1;
|
||||
}
|
||||
} catch {
|
||||
// Dashboard not available
|
||||
}
|
||||
|
||||
// Not found in active — check SQLite history
|
||||
const store = getHistoryStore(process.cwd());
|
||||
const result = store.getCachedOutput(watchId);
|
||||
if (result) {
|
||||
process.stderr.write(chalk.gray(`\nExecution already completed (status: ${result.status}).\n`));
|
||||
process.stderr.write(chalk.dim(`Use: ccw cli output ${watchId}\n`));
|
||||
return result.status === 'success' ? 0 : 1;
|
||||
}
|
||||
|
||||
// Not found anywhere — may still be starting, wait and retry a few times
|
||||
if ((Date.now() - startTime) < 10000) {
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
return poll();
|
||||
}
|
||||
|
||||
process.stderr.write(chalk.red(`\nExecution not found: ${watchId}\n`));
|
||||
return 1;
|
||||
};
|
||||
|
||||
const exitCode = await poll();
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Show execution history
|
||||
* @param {Object} options - CLI options
|
||||
@@ -1455,6 +1666,14 @@ export async function cliCommand(
|
||||
await historyAction(options as HistoryOptions);
|
||||
break;
|
||||
|
||||
case 'show':
|
||||
await showAction(options as unknown as { all?: boolean });
|
||||
break;
|
||||
|
||||
case 'watch':
|
||||
await watchAction(argsArray[0], options as unknown as { timeout?: string });
|
||||
break;
|
||||
|
||||
case 'detail':
|
||||
await detailAction(argsArray[0]);
|
||||
break;
|
||||
@@ -1506,11 +1725,13 @@ export async function cliCommand(
|
||||
console.log(chalk.gray(' echo "prompt" | ccw cli --tool <tool> Execute from stdin (pipe)'));
|
||||
console.log();
|
||||
console.log(' Subcommands:');
|
||||
console.log(chalk.gray(' show List all executions (active + recent)'));
|
||||
console.log(chalk.gray(' watch <id> Stream execution output (stderr, exits on completion)'));
|
||||
console.log(chalk.gray(' output <id> Get final execution result'));
|
||||
console.log(chalk.gray(' status Check CLI tools availability'));
|
||||
console.log(chalk.gray(' storage [cmd] Manage CCW storage (info/clean/config)'));
|
||||
console.log(chalk.gray(' history Show execution history'));
|
||||
console.log(chalk.gray(' detail <id> Show execution detail'));
|
||||
console.log(chalk.gray(' output <id> Show execution output with pagination'));
|
||||
console.log(chalk.gray(' detail <id> Show execution detail (legacy, use show/output)'));
|
||||
console.log(chalk.gray(' test-parse [args] Debug CLI argument parsing'));
|
||||
console.log();
|
||||
console.log(' Options:');
|
||||
@@ -1523,7 +1744,7 @@ export async function cliCommand(
|
||||
console.log(chalk.gray(' --effort <level> Effort level for claude (low, medium, high)'));
|
||||
console.log(chalk.gray(' --cd <path> Working directory'));
|
||||
console.log(chalk.gray(' --includeDirs <dirs> Additional directories'));
|
||||
// --timeout removed - controlled by external caller (bash timeout)
|
||||
console.log(chalk.gray(' --id <id> Execution ID (recommended, auto-generated if omitted)'));
|
||||
console.log(chalk.gray(' --resume [id] Resume previous session'));
|
||||
console.log(chalk.gray(' --cache <items> Cache: comma-separated @patterns and text'));
|
||||
console.log(chalk.gray(' --inject-mode <m> Inject mode: none, full, progressive'));
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
deleteExecutionAsync,
|
||||
batchDeleteExecutionsAsync,
|
||||
executeCliTool,
|
||||
generateExecutionId,
|
||||
getNativeSessionContent,
|
||||
getFormattedNativeConversation,
|
||||
getEnrichedConversation,
|
||||
@@ -940,7 +941,7 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
}
|
||||
}
|
||||
|
||||
const executionId = `${Date.now()}-${tool}`;
|
||||
const executionId = generateExecutionId(tool);
|
||||
|
||||
// Store active execution for state recovery
|
||||
// Check map size limit before creating new execution
|
||||
|
||||
@@ -11,13 +11,22 @@
|
||||
|
||||
import type { RouteContext } from './types.js';
|
||||
import { getDeepWikiService } from '../../services/deepwiki-service.js';
|
||||
import { handleDeepWikiRoutes as handleDeepWikiPostRoutes } from '../../services/deepwiki-service.js';
|
||||
|
||||
/**
|
||||
* Handle DeepWiki routes
|
||||
* @returns true if route was handled, false otherwise
|
||||
*/
|
||||
export async function handleDeepWikiRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
const { pathname, url, res } = ctx;
|
||||
const { pathname, url, res, req } = ctx;
|
||||
|
||||
// POST endpoints implemented in the DeepWiki service module.
|
||||
if (
|
||||
(pathname === '/api/deepwiki/symbols-for-paths' || pathname === '/api/deepwiki/stale-files') &&
|
||||
req.method === 'POST'
|
||||
) {
|
||||
return handleDeepWikiPostRoutes({ pathname, req, res });
|
||||
}
|
||||
|
||||
// GET /api/deepwiki/files - List all documented files
|
||||
if (pathname === '/api/deepwiki/files') {
|
||||
|
||||
@@ -17,6 +17,7 @@ import { getLaunchConfig } from './cli-launch-registry.js';
|
||||
import { assembleInstruction, type InstructionType } from './cli-instruction-assembler.js';
|
||||
import { loadEndpointSettings } from '../../config/cli-settings-manager.js';
|
||||
import { getToolConfig } from '../../tools/claude-cli-tools.js';
|
||||
import { generateExecutionId } from '../../tools/cli-executor.js';
|
||||
|
||||
export interface CliSession {
|
||||
sessionKey: string;
|
||||
@@ -532,7 +533,7 @@ export class CliSessionManager {
|
||||
|
||||
const executionId = resumeKey
|
||||
? `${resumeKey}-${Date.now()}`
|
||||
: `exec-${Date.now()}-${randomBytes(3).toString('hex')}`;
|
||||
: generateExecutionId(options.tool);
|
||||
|
||||
let command: string;
|
||||
|
||||
|
||||
@@ -35,22 +35,31 @@ import {
|
||||
saveConversation
|
||||
} from './cli-executor-state.js';
|
||||
|
||||
// Track all running child processes for cleanup on interruption (multi-process support)
|
||||
const runningChildProcesses = new Set<ChildProcess>();
|
||||
|
||||
// Debug logging for parallel execution testing
|
||||
// Debug logging for history save investigation (Iteration 4)
|
||||
const DEBUG_SESSION_ID = 'DBG-parallel-ccw-cli-test-2026-03-07';
|
||||
const DEBUG_LOG_PATH = path.join(process.cwd(), '.workflow', '.debug', DEBUG_SESSION_ID, 'debug.log');
|
||||
const DEBUG_LOG_PATH = path.join(process.cwd(), '.workflow', '.debug', DEBUG_SESSION_ID, 'debug-save.log');
|
||||
|
||||
// Ensure debug log directory exists
|
||||
try {
|
||||
const debugDir = path.dirname(DEBUG_LOG_PATH);
|
||||
if (!fs.existsSync(debugDir)) {
|
||||
fs.mkdirSync(debugDir, { recursive: true });
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore directory creation errors
|
||||
}
|
||||
|
||||
function writeDebugLog(event: string, data: Record<string, any>): void {
|
||||
try {
|
||||
const logEntry = JSON.stringify({ event, ...data, timestamp: new Date().toISOString() }) + '\n';
|
||||
fs.appendFileSync(DEBUG_LOG_PATH, logEntry, 'utf8');
|
||||
} catch (err) {
|
||||
// Silently ignore logging errors to avoid disrupting execution
|
||||
// Silently ignore logging errors
|
||||
}
|
||||
}
|
||||
|
||||
// Track all running child processes for cleanup on interruption (multi-process support)
|
||||
const runningChildProcesses = new Set<ChildProcess>();
|
||||
|
||||
/**
|
||||
* Kill all running CLI child processes
|
||||
@@ -58,16 +67,14 @@ function writeDebugLog(event: string, data: Record<string, any>): void {
|
||||
*/
|
||||
export function killAllCliProcesses(): boolean {
|
||||
if (runningChildProcesses.size === 0) return false;
|
||||
writeDebugLog('KILL_ALL_START', { initial_set_size: runningChildProcesses.size });
|
||||
|
||||
const processesToKill = Array.from(runningChildProcesses);
|
||||
debugLog('KILL', `Killing ${processesToKill.length} child process(es)`, { pids: processesToKill.map(p => p.pid) });
|
||||
writeDebugLog('KILL_ALL_COPY', { pids_to_kill: processesToKill.map(p => p.pid) });
|
||||
|
||||
// 1. SIGTERM for graceful shutdown
|
||||
for (const child of processesToKill) {
|
||||
if (!child.killed) {
|
||||
try { child.kill('SIGTERM'); } catch (e: any) { writeDebugLog('KILL_SIGTERM_ERROR', { pid: child.pid, error: e.message }); }
|
||||
try { child.kill('SIGTERM'); } catch { /* ignore */ }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,13 +82,12 @@ export function killAllCliProcesses(): boolean {
|
||||
const killTimeout = setTimeout(() => {
|
||||
for (const child of processesToKill) {
|
||||
if (!child.killed) {
|
||||
try { child.kill('SIGKILL'); } catch (e: any) { writeDebugLog('KILL_SIGKILL_ERROR', { pid: child.pid, error: e.message }); }
|
||||
try { child.kill('SIGKILL'); } catch { /* ignore */ }
|
||||
}
|
||||
}
|
||||
}, 2000);
|
||||
killTimeout.unref();
|
||||
|
||||
writeDebugLog('KILL_ALL_CLEAR', { set_size_before: runningChildProcesses.size, pids_in_set: Array.from(runningChildProcesses).map(p => p.pid) });
|
||||
runningChildProcesses.clear();
|
||||
return true;
|
||||
}
|
||||
@@ -257,7 +263,6 @@ async function executeClaudeWithSettings(params: ClaudeWithSettingsParams): Prom
|
||||
|
||||
// Track child process for cleanup (multi-process support)
|
||||
runningChildProcesses.add(child);
|
||||
writeDebugLog('PROCESS_ADD', { pid: child.pid, set_size_after: runningChildProcesses.size, function: 'executeClaudeWithSettings' });
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
@@ -297,7 +302,6 @@ async function executeClaudeWithSettings(params: ClaudeWithSettingsParams): Prom
|
||||
|
||||
child.on('close', (code) => {
|
||||
runningChildProcesses.delete(child);
|
||||
writeDebugLog('PROCESS_DELETE', { pid: child.pid, exit_code: code, set_size_after: runningChildProcesses.size, function: 'executeClaudeWithSettings', handler: 'close' });
|
||||
|
||||
const endTime = Date.now();
|
||||
const duration = endTime - startTime;
|
||||
@@ -338,10 +342,8 @@ async function executeClaudeWithSettings(params: ClaudeWithSettingsParams): Prom
|
||||
|
||||
// Save to history
|
||||
try {
|
||||
writeDebugLog('SAVE_CONVERSATION_START', { conversationId: conversation.id, pid: child.pid, function: 'executeClaudeWithSettings' });
|
||||
saveConversation(workingDir, conversation);
|
||||
} catch (err) {
|
||||
writeDebugLog('SAVE_CONVERSATION_ERROR', { conversationId: conversation.id, pid: child.pid, error: (err as Error).message, stack: (err as Error).stack, function: 'executeClaudeWithSettings' });
|
||||
console.error('[CLI Executor] Failed to save CLI封装 history:', (err as Error).message);
|
||||
}
|
||||
|
||||
@@ -356,7 +358,6 @@ async function executeClaudeWithSettings(params: ClaudeWithSettingsParams): Prom
|
||||
|
||||
child.on('error', (error) => {
|
||||
runningChildProcesses.delete(child);
|
||||
writeDebugLog('PROCESS_DELETE', { pid: child.pid, set_size_after: runningChildProcesses.size, function: 'executeClaudeWithSettings', handler: 'error' });
|
||||
reject(new Error(`Failed to spawn claude: ${error.message}`));
|
||||
});
|
||||
});
|
||||
@@ -391,6 +392,30 @@ type BuiltinCliTool = typeof BUILTIN_CLI_TOOLS[number];
|
||||
*/
|
||||
export type TransactionId = string;
|
||||
|
||||
/**
|
||||
* Generate a readable execution ID for CLI executions
|
||||
* Format: {prefix}-{HHmmss}-{rand4} → e.g. gem-143022-x7k2
|
||||
* @param tool - CLI tool name (gemini, qwen, codex, claude, opencode, litellm, etc.)
|
||||
* @returns Short, human-readable execution ID
|
||||
*/
|
||||
export function generateExecutionId(tool: string): string {
|
||||
const prefixMap: Record<string, string> = {
|
||||
gemini: 'gem',
|
||||
qwen: 'qwn',
|
||||
codex: 'cdx',
|
||||
claude: 'cld',
|
||||
opencode: 'opc',
|
||||
litellm: 'llm',
|
||||
};
|
||||
const prefix = prefixMap[tool] || tool.slice(0, 3);
|
||||
const now = new Date();
|
||||
const time = [now.getHours(), now.getMinutes(), now.getSeconds()]
|
||||
.map(n => String(n).padStart(2, '0'))
|
||||
.join('');
|
||||
const rand = Math.random().toString(36).slice(2, 6);
|
||||
return `${prefix}-${time}-${rand}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique transaction ID for the current execution
|
||||
* @param conversationId - CCW conversation ID
|
||||
@@ -526,7 +551,7 @@ async function executeCliTool(
|
||||
const duration = endTime - startTime;
|
||||
|
||||
const execution: ExecutionRecord = {
|
||||
id: customId || `${Date.now()}-litellm`,
|
||||
id: customId || generateExecutionId('litellm'),
|
||||
timestamp: new Date(startTime).toISOString(),
|
||||
tool: 'litellm',
|
||||
model: result.model,
|
||||
@@ -676,7 +701,7 @@ async function executeCliTool(
|
||||
const duration = endTime - startTime;
|
||||
|
||||
const execution: ExecutionRecord = {
|
||||
id: customId || `${Date.now()}-litellm`,
|
||||
id: customId || generateExecutionId('litellm'),
|
||||
timestamp: new Date(startTime).toISOString(),
|
||||
tool: toolName,
|
||||
model: result.model, // Use effective model from result (reflects any override)
|
||||
@@ -789,11 +814,11 @@ async function executeCliTool(
|
||||
existingConversation = loadConversation(workingDir, conversationId);
|
||||
} else {
|
||||
// No previous conversation, create new
|
||||
conversationId = `${Date.now()}-${tool}`;
|
||||
conversationId = generateExecutionId(tool);
|
||||
}
|
||||
} else {
|
||||
// New conversation with auto-generated ID
|
||||
conversationId = `${Date.now()}-${tool}`;
|
||||
conversationId = generateExecutionId(tool);
|
||||
}
|
||||
|
||||
// Generate transaction ID for concurrent session disambiguation
|
||||
@@ -1019,7 +1044,6 @@ async function executeCliTool(
|
||||
|
||||
// Track child process for cleanup on interruption (multi-process support)
|
||||
runningChildProcesses.add(child);
|
||||
writeDebugLog('PROCESS_ADD', { pid: child.pid, set_size_after: runningChildProcesses.size, function: 'executeCliTool', tool });
|
||||
|
||||
debugLog('SPAWN', `Process spawned`, { pid: child.pid });
|
||||
|
||||
@@ -1071,7 +1095,6 @@ async function executeCliTool(
|
||||
child.on('close', async (code) => {
|
||||
// Remove from running processes
|
||||
runningChildProcesses.delete(child);
|
||||
writeDebugLog('PROCESS_DELETE', { pid: child.pid, exit_code: code, set_size_after: runningChildProcesses.size, function: 'executeCliTool', handler: 'close', tool });
|
||||
|
||||
// Flush remaining buffer from parser
|
||||
const remainingUnits = parser.flush();
|
||||
@@ -1200,11 +1223,9 @@ async function executeCliTool(
|
||||
// Save all source conversations
|
||||
try {
|
||||
for (const conv of savedConversations) {
|
||||
writeDebugLog('SAVE_CONVERSATION_START', { conversationId: conv.id, pid: child.pid, function: 'executeCliTool', context: 'merge-loop', tool });
|
||||
saveConversation(workingDir, conv);
|
||||
}
|
||||
} catch (err) {
|
||||
writeDebugLog('SAVE_CONVERSATION_ERROR', { pid: child.pid, error: (err as Error).message, stack: (err as Error).stack, function: 'executeCliTool', context: 'merge-loop', tool });
|
||||
console.error('[CLI Executor] Failed to save merged histories:', (err as Error).message);
|
||||
}
|
||||
} else if (isMerge && mergeResult && customId) {
|
||||
@@ -1244,10 +1265,8 @@ async function executeCliTool(
|
||||
};
|
||||
// Save merged conversation
|
||||
try {
|
||||
writeDebugLog('SAVE_CONVERSATION_START', { conversationId: conversation.id, pid: child.pid, function: 'executeCliTool', context: 'merge-with-id', tool });
|
||||
saveConversation(workingDir, conversation);
|
||||
} catch (err) {
|
||||
writeDebugLog('SAVE_CONVERSATION_ERROR', { conversationId: conversation.id, pid: child.pid, error: (err as Error).message, stack: (err as Error).stack, function: 'executeCliTool', context: 'merge-with-id', tool });
|
||||
console.error('[CLI Executor] Failed to save merged conversation:', (err as Error).message);
|
||||
}
|
||||
} else {
|
||||
@@ -1277,10 +1296,11 @@ async function executeCliTool(
|
||||
};
|
||||
// Try to save conversation to history
|
||||
try {
|
||||
writeDebugLog('SAVE_CONVERSATION_START', { conversationId: conversation.id, pid: child.pid, function: 'executeCliTool', context: 'normal', tool });
|
||||
writeDebugLog('BEFORE_SAVE_CONV', { conversationId: conversation.id, workingDir, tool });
|
||||
saveConversation(workingDir, conversation);
|
||||
writeDebugLog('AFTER_SAVE_CONV', { conversationId: conversation.id, workingDir, tool });
|
||||
} catch (err) {
|
||||
writeDebugLog('SAVE_CONVERSATION_ERROR', { conversationId: conversation.id, pid: child.pid, error: (err as Error).message, stack: (err as Error).stack, function: 'executeCliTool', context: 'normal', tool });
|
||||
writeDebugLog('SAVE_CONV_OUTER_ERROR', { conversationId: conversation.id, workingDir, tool, error: (err as Error).message, stack: (err as Error).stack });
|
||||
// Non-fatal: continue even if history save fails
|
||||
console.error('[CLI Executor] Failed to save history:', (err as Error).message);
|
||||
}
|
||||
@@ -1341,7 +1361,6 @@ async function executeCliTool(
|
||||
child.on('error', (error) => {
|
||||
// Remove from running processes
|
||||
runningChildProcesses.delete(child);
|
||||
writeDebugLog('PROCESS_DELETE', { pid: child.pid, set_size_after: runningChildProcesses.size, function: 'executeCliTool', handler: 'error', tool });
|
||||
|
||||
errorLog('SPAWN', `Failed to spawn process`, error, {
|
||||
tool,
|
||||
|
||||
@@ -6,6 +6,31 @@
|
||||
import type { HistoryIndexEntry } from './cli-history-store.js';
|
||||
import { StoragePaths, ensureStorageDir } from '../config/storage-paths.js';
|
||||
import type { CliOutputUnit } from './cli-output-converter.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
// Debug logging for history save investigation (Iteration 4)
|
||||
const DEBUG_SESSION_ID = 'DBG-parallel-ccw-cli-test-2026-03-07';
|
||||
const DEBUG_LOG_PATH = path.join(process.cwd(), '.workflow', '.debug', DEBUG_SESSION_ID, 'debug-save.log');
|
||||
|
||||
// Ensure debug log directory exists
|
||||
try {
|
||||
const debugDir = path.dirname(DEBUG_LOG_PATH);
|
||||
if (!fs.existsSync(debugDir)) {
|
||||
fs.mkdirSync(debugDir, { recursive: true });
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore directory creation errors
|
||||
}
|
||||
|
||||
function writeDebugLog(event: string, data: Record<string, any>): void {
|
||||
try {
|
||||
const logEntry = JSON.stringify({ event, ...data, timestamp: new Date().toISOString() }) + '\n';
|
||||
fs.appendFileSync(DEBUG_LOG_PATH, logEntry, 'utf8');
|
||||
} catch (err) {
|
||||
// Silently ignore logging errors
|
||||
}
|
||||
}
|
||||
|
||||
// Lazy-loaded SQLite store module
|
||||
let sqliteStoreModule: typeof import('./cli-history-store.js') | null = null;
|
||||
@@ -14,8 +39,10 @@ let sqliteStoreModule: typeof import('./cli-history-store.js') | null = null;
|
||||
* Get or initialize SQLite store (async)
|
||||
*/
|
||||
export async function getSqliteStore(baseDir: string) {
|
||||
writeDebugLog('GET_STORE', { baseDir, baseDirType: typeof baseDir, moduleInitialized: sqliteStoreModule !== null });
|
||||
if (!sqliteStoreModule) {
|
||||
sqliteStoreModule = await import('./cli-history-store.js');
|
||||
writeDebugLog('MODULE_LOADED', { baseDir });
|
||||
}
|
||||
return sqliteStoreModule.getHistoryStore(baseDir);
|
||||
}
|
||||
@@ -136,15 +163,20 @@ async function saveConversationAsync(baseDir: string, conversation: Conversation
|
||||
* @param baseDir - Project base directory (NOT historyDir)
|
||||
*/
|
||||
export function saveConversation(baseDir: string, conversation: ConversationRecord): void {
|
||||
writeDebugLog('SAVE_CONV_START', { baseDir, conversationId: conversation.id, moduleInitialized: sqliteStoreModule !== null });
|
||||
try {
|
||||
const store = getSqliteStoreSync(baseDir);
|
||||
writeDebugLog('SAVE_CONV_SYNC', { baseDir, conversationId: conversation.id });
|
||||
// Fire and forget - don't block on async save in sync context
|
||||
store.saveConversation(conversation).catch(err => {
|
||||
writeDebugLog('SAVE_CONV_ERROR', { baseDir, conversationId: conversation.id, error: err.message, stack: err.stack });
|
||||
console.error('[CLI Executor] Failed to save conversation:', err.message);
|
||||
});
|
||||
} catch {
|
||||
} catch (err) {
|
||||
writeDebugLog('SAVE_CONV_FALLBACK_ASYNC', { baseDir, conversationId: conversation.id, error: (err as Error).message });
|
||||
// If sync not available, queue for async save
|
||||
saveConversationAsync(baseDir, conversation).catch(err => {
|
||||
writeDebugLog('SAVE_CONV_ASYNC_ERROR', { baseDir, conversationId: conversation.id, error: err.message, stack: err.stack });
|
||||
console.error('[CLI Executor] Failed to save conversation:', err.message);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -4,13 +4,36 @@
|
||||
*/
|
||||
|
||||
import Database from 'better-sqlite3';
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, statSync, unlinkSync, rmdirSync } from 'fs';
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, statSync, unlinkSync, rmdirSync, appendFileSync } from 'fs';
|
||||
import { join, dirname, resolve } from 'path';
|
||||
import { parseSessionFile, formatConversation, extractConversationPairs, type ParsedSession, type ParsedTurn } from './session-content-parser.js';
|
||||
import { getDiscoverer, getNativeSessions } from './native-session-discovery.js';
|
||||
import { StoragePaths, ensureStorageDir, getProjectId, getCCWHome } from '../config/storage-paths.js';
|
||||
import type { CliOutputUnit } from './cli-output-converter.js';
|
||||
|
||||
// Debug logging for history save investigation (Iteration 4)
|
||||
const DEBUG_SESSION_ID = 'DBG-parallel-ccw-cli-test-2026-03-07';
|
||||
const DEBUG_LOG_PATH = join(process.cwd(), '.workflow', '.debug', DEBUG_SESSION_ID, 'debug-save.log');
|
||||
|
||||
// Ensure debug log directory exists
|
||||
try {
|
||||
const debugDir = dirname(DEBUG_LOG_PATH);
|
||||
if (!existsSync(debugDir)) {
|
||||
mkdirSync(debugDir, { recursive: true });
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore directory creation errors
|
||||
}
|
||||
|
||||
function writeDebugLog(event: string, data: Record<string, any>): void {
|
||||
try {
|
||||
const logEntry = JSON.stringify({ event, ...data, timestamp: new Date().toISOString() }) + '\n';
|
||||
appendFileSync(DEBUG_LOG_PATH, logEntry, 'utf8');
|
||||
} catch (err) {
|
||||
// Silently ignore logging errors
|
||||
}
|
||||
}
|
||||
|
||||
// Types
|
||||
export interface ConversationTurn {
|
||||
turn: number;
|
||||
@@ -110,22 +133,29 @@ export class CliHistoryStore {
|
||||
private projectPath: string;
|
||||
|
||||
constructor(baseDir: string) {
|
||||
writeDebugLog('STORE_CONSTRUCT_START', { baseDir });
|
||||
this.projectPath = baseDir;
|
||||
|
||||
// Use centralized storage path
|
||||
const paths = StoragePaths.project(baseDir);
|
||||
const historyDir = paths.cliHistory;
|
||||
writeDebugLog('STORAGE_PATHS', { baseDir, historyDir, historyDb: paths.historyDb });
|
||||
ensureStorageDir(historyDir);
|
||||
|
||||
this.dbPath = paths.historyDb;
|
||||
writeDebugLog('DB_INSTANCE_CREATE', { dbPath: this.dbPath });
|
||||
this.db = new Database(this.dbPath);
|
||||
writeDebugLog('DB_INSTANCE_CREATED', { dbPath: this.dbPath });
|
||||
this.db.pragma('journal_mode = WAL');
|
||||
this.db.pragma('synchronous = NORMAL');
|
||||
this.db.pragma('busy_timeout = 10000'); // Wait up to 10 seconds for locks (increased for write-heavy scenarios)
|
||||
this.db.pragma('wal_autocheckpoint = 1000'); // Optimize WAL checkpointing
|
||||
|
||||
writeDebugLog('INIT_SCHEMA_START', { dbPath: this.dbPath });
|
||||
this.initSchema();
|
||||
writeDebugLog('INIT_SCHEMA_COMPLETE', { dbPath: this.dbPath });
|
||||
this.migrateFromJson(historyDir);
|
||||
writeDebugLog('STORE_CONSTRUCT_COMPLETE', { baseDir, dbPath: this.dbPath });
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -936,11 +966,7 @@ export class CliHistoryStore {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO native_session_mapping (ccw_id, tool, native_session_id, native_session_path, project_hash, transaction_id, created_at)
|
||||
VALUES (@ccw_id, @tool, @native_session_id, @native_session_path, @project_hash, @transaction_id, @created_at)
|
||||
ON CONFLICT(ccw_id) DO UPDATE SET
|
||||
native_session_id = @native_session_id,
|
||||
native_session_path = @native_session_path,
|
||||
project_hash = @project_hash,
|
||||
transaction_id = @transaction_id
|
||||
ON CONFLICT(tool, native_session_id) DO NOTHING
|
||||
`);
|
||||
|
||||
await this.withRetry(() => stmt.run({
|
||||
|
||||
@@ -9,8 +9,9 @@ Requires-Python: >=3.10
|
||||
Description-Content-Type: text/markdown
|
||||
License-File: LICENSE
|
||||
Requires-Dist: typer~=0.9.0
|
||||
Requires-Dist: click<9,>=8.0.0
|
||||
Requires-Dist: rich~=13.0.0
|
||||
Requires-Dist: pydantic~=2.0.0
|
||||
Requires-Dist: pydantic>=2.5.0
|
||||
Requires-Dist: tree-sitter~=0.20.0
|
||||
Requires-Dist: tree-sitter-python~=0.25.0
|
||||
Requires-Dist: tree-sitter-javascript~=0.25.0
|
||||
@@ -20,16 +21,16 @@ Requires-Dist: watchdog~=3.0.0
|
||||
Requires-Dist: ast-grep-py~=0.40.0
|
||||
Provides-Extra: semantic
|
||||
Requires-Dist: numpy~=1.26.0; extra == "semantic"
|
||||
Requires-Dist: fastembed~=0.2.0; extra == "semantic"
|
||||
Requires-Dist: fastembed~=0.2.1; extra == "semantic"
|
||||
Requires-Dist: hnswlib~=0.8.0; extra == "semantic"
|
||||
Provides-Extra: semantic-gpu
|
||||
Requires-Dist: numpy~=1.26.0; extra == "semantic-gpu"
|
||||
Requires-Dist: fastembed~=0.2.0; extra == "semantic-gpu"
|
||||
Requires-Dist: fastembed~=0.2.1; extra == "semantic-gpu"
|
||||
Requires-Dist: hnswlib~=0.8.0; extra == "semantic-gpu"
|
||||
Requires-Dist: onnxruntime-gpu~=1.15.0; extra == "semantic-gpu"
|
||||
Provides-Extra: semantic-directml
|
||||
Requires-Dist: numpy~=1.26.0; extra == "semantic-directml"
|
||||
Requires-Dist: fastembed~=0.2.0; extra == "semantic-directml"
|
||||
Requires-Dist: fastembed~=0.2.1; extra == "semantic-directml"
|
||||
Requires-Dist: hnswlib~=0.8.0; extra == "semantic-directml"
|
||||
Requires-Dist: onnxruntime-directml~=1.15.0; extra == "semantic-directml"
|
||||
Provides-Extra: reranker-onnx
|
||||
|
||||
@@ -36,6 +36,7 @@ src/codexlens/indexing/symbol_extractor.py
|
||||
src/codexlens/lsp/__init__.py
|
||||
src/codexlens/lsp/handlers.py
|
||||
src/codexlens/lsp/keepalive_bridge.py
|
||||
src/codexlens/lsp/lsp-servers.json
|
||||
src/codexlens/lsp/lsp_bridge.py
|
||||
src/codexlens/lsp/lsp_graph_builder.py
|
||||
src/codexlens/lsp/providers.py
|
||||
@@ -97,6 +98,8 @@ src/codexlens/semantic/reranker/legacy.py
|
||||
src/codexlens/semantic/reranker/litellm_reranker.py
|
||||
src/codexlens/semantic/reranker/onnx_reranker.py
|
||||
src/codexlens/storage/__init__.py
|
||||
src/codexlens/storage/deepwiki_models.py
|
||||
src/codexlens/storage/deepwiki_store.py
|
||||
src/codexlens/storage/dir_index.py
|
||||
src/codexlens/storage/file_cache.py
|
||||
src/codexlens/storage/global_index.py
|
||||
@@ -117,6 +120,8 @@ src/codexlens/storage/migrations/migration_006_enhance_relationships.py
|
||||
src/codexlens/storage/migrations/migration_007_add_graph_neighbors.py
|
||||
src/codexlens/storage/migrations/migration_008_add_merkle_hashes.py
|
||||
src/codexlens/storage/migrations/migration_010_add_multi_vector_chunks.py
|
||||
src/codexlens/tools/__init__.py
|
||||
src/codexlens/tools/deepwiki_generator.py
|
||||
src/codexlens/watcher/__init__.py
|
||||
src/codexlens/watcher/events.py
|
||||
src/codexlens/watcher/file_watcher.py
|
||||
@@ -129,6 +134,7 @@ tests/test_astgrep_binding.py
|
||||
tests/test_binary_searcher.py
|
||||
tests/test_cascade_strategies.py
|
||||
tests/test_chain_search.py
|
||||
tests/test_cli_help.py
|
||||
tests/test_cli_hybrid_search.py
|
||||
tests/test_cli_output.py
|
||||
tests/test_clustering_strategies.py
|
||||
@@ -136,6 +142,8 @@ tests/test_code_extractor.py
|
||||
tests/test_config.py
|
||||
tests/test_config_cascade.py
|
||||
tests/test_config_staged_env_overrides.py
|
||||
tests/test_deepwiki_store.py
|
||||
tests/test_deepwiki_types.py
|
||||
tests/test_dual_fts.py
|
||||
tests/test_embedder.py
|
||||
tests/test_embedding_backend_availability.py
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[console_scripts]
|
||||
codexlens-lsp = codexlens.lsp:main
|
||||
codexlens-lsp = codexlens.lsp.server:main
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
typer~=0.9.0
|
||||
click<9,>=8.0.0
|
||||
rich~=13.0.0
|
||||
pydantic~=2.0.0
|
||||
pydantic>=2.5.0
|
||||
tree-sitter~=0.20.0
|
||||
tree-sitter-python~=0.25.0
|
||||
tree-sitter-javascript~=0.25.0
|
||||
@@ -43,17 +44,17 @@ transformers~=4.36.0
|
||||
|
||||
[semantic]
|
||||
numpy~=1.26.0
|
||||
fastembed~=0.2.0
|
||||
fastembed~=0.2.1
|
||||
hnswlib~=0.8.0
|
||||
|
||||
[semantic-directml]
|
||||
numpy~=1.26.0
|
||||
fastembed~=0.2.0
|
||||
fastembed~=0.2.1
|
||||
hnswlib~=0.8.0
|
||||
onnxruntime-directml~=1.15.0
|
||||
|
||||
[semantic-gpu]
|
||||
numpy~=1.26.0
|
||||
fastembed~=0.2.0
|
||||
fastembed~=0.2.1
|
||||
hnswlib~=0.8.0
|
||||
onnxruntime-gpu~=1.15.0
|
||||
|
||||
@@ -251,6 +251,11 @@ class DeepWikiStore:
|
||||
except sqlite3.OperationalError:
|
||||
pass # Column already exists
|
||||
|
||||
# Legacy migration: some earlier DeepWiki DBs stored timestamps as TEXT (ISO strings).
|
||||
# better-sqlite3 + JS code expects numeric (REAL) seconds, so ensure timestamp columns
|
||||
# have REAL affinity by rebuilding affected tables when needed.
|
||||
self._migrate_text_timestamps_to_real(conn)
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.DatabaseError as exc:
|
||||
raise StorageError(
|
||||
@@ -270,6 +275,193 @@ class DeepWikiStore:
|
||||
"""
|
||||
return str(Path(path).resolve()).replace("\\", "/")
|
||||
|
||||
def _migrate_text_timestamps_to_real(self, conn: sqlite3.Connection) -> None:
|
||||
"""Migrate legacy TEXT timestamp columns to REAL affinity.
|
||||
|
||||
SQLite's type system is dynamic, but column affinity influences how values are stored and
|
||||
returned. Older DeepWiki databases used TEXT timestamps (often ISO strings). The current
|
||||
schema uses REAL epoch seconds. When we detect TEXT affinity on timestamp columns, we
|
||||
rebuild the table with REAL columns and convert existing values during copy.
|
||||
"""
|
||||
|
||||
self._rebuild_table_with_timestamp_conversion(
|
||||
conn,
|
||||
table="deepwiki_files",
|
||||
create_sql="""
|
||||
CREATE TABLE deepwiki_files (
|
||||
id INTEGER PRIMARY KEY,
|
||||
path TEXT UNIQUE NOT NULL,
|
||||
content_hash TEXT NOT NULL,
|
||||
last_indexed REAL NOT NULL,
|
||||
symbols_count INTEGER DEFAULT 0,
|
||||
docs_generated INTEGER DEFAULT 0,
|
||||
staleness_score REAL DEFAULT 0.0,
|
||||
last_checked_commit TEXT,
|
||||
last_checked_at REAL,
|
||||
staleness_factors TEXT
|
||||
)
|
||||
""",
|
||||
timestamp_columns={"last_indexed", "last_checked_at"},
|
||||
required_timestamp_columns={"last_indexed"},
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_deepwiki_files_path ON deepwiki_files(path)"
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_deepwiki_files_hash ON deepwiki_files(content_hash)"
|
||||
)
|
||||
|
||||
self._rebuild_table_with_timestamp_conversion(
|
||||
conn,
|
||||
table="deepwiki_docs",
|
||||
create_sql="""
|
||||
CREATE TABLE deepwiki_docs (
|
||||
id INTEGER PRIMARY KEY,
|
||||
path TEXT UNIQUE NOT NULL,
|
||||
content_hash TEXT NOT NULL,
|
||||
symbols TEXT DEFAULT '[]',
|
||||
generated_at REAL NOT NULL,
|
||||
llm_tool TEXT
|
||||
)
|
||||
""",
|
||||
timestamp_columns={"generated_at"},
|
||||
required_timestamp_columns={"generated_at"},
|
||||
)
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_deepwiki_docs_path ON deepwiki_docs(path)")
|
||||
|
||||
self._rebuild_table_with_timestamp_conversion(
|
||||
conn,
|
||||
table="deepwiki_symbols",
|
||||
create_sql="""
|
||||
CREATE TABLE deepwiki_symbols (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
source_file TEXT NOT NULL,
|
||||
doc_file TEXT NOT NULL,
|
||||
anchor TEXT NOT NULL,
|
||||
start_line INTEGER NOT NULL,
|
||||
end_line INTEGER NOT NULL,
|
||||
created_at REAL,
|
||||
updated_at REAL,
|
||||
staleness_score REAL DEFAULT 0.0,
|
||||
last_checked_commit TEXT,
|
||||
last_checked_at REAL,
|
||||
staleness_factors TEXT,
|
||||
UNIQUE(name, source_file)
|
||||
)
|
||||
""",
|
||||
timestamp_columns={"created_at", "updated_at", "last_checked_at"},
|
||||
required_timestamp_columns=set(),
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_deepwiki_symbols_name ON deepwiki_symbols(name)"
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_deepwiki_symbols_source ON deepwiki_symbols(source_file)"
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_deepwiki_symbols_doc ON deepwiki_symbols(doc_file)"
|
||||
)
|
||||
|
||||
self._rebuild_table_with_timestamp_conversion(
|
||||
conn,
|
||||
table="generation_progress",
|
||||
create_sql="""
|
||||
CREATE TABLE generation_progress (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
symbol_key TEXT NOT NULL UNIQUE,
|
||||
file_path TEXT NOT NULL,
|
||||
symbol_name TEXT NOT NULL,
|
||||
symbol_type TEXT NOT NULL,
|
||||
layer INTEGER NOT NULL,
|
||||
source_hash TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
attempts INTEGER DEFAULT 0,
|
||||
last_tool TEXT,
|
||||
last_error TEXT,
|
||||
generated_at REAL,
|
||||
created_at REAL,
|
||||
updated_at REAL
|
||||
)
|
||||
""",
|
||||
timestamp_columns={"generated_at", "created_at", "updated_at"},
|
||||
required_timestamp_columns=set(),
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_progress_status ON generation_progress(status)"
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_progress_file ON generation_progress(file_path)"
|
||||
)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_progress_hash ON generation_progress(source_hash)"
|
||||
)
|
||||
|
||||
def _rebuild_table_with_timestamp_conversion(
|
||||
self,
|
||||
conn: sqlite3.Connection,
|
||||
*,
|
||||
table: str,
|
||||
create_sql: str,
|
||||
timestamp_columns: set[str],
|
||||
required_timestamp_columns: set[str],
|
||||
) -> None:
|
||||
info = conn.execute(f"PRAGMA table_info({table})").fetchall()
|
||||
if not info:
|
||||
return
|
||||
|
||||
declared_types = {
|
||||
row["name"]: str(row["type"] or "").strip().upper() for row in info
|
||||
}
|
||||
needs_migration = any(
|
||||
declared_types.get(col) == "TEXT" for col in timestamp_columns if col in declared_types
|
||||
)
|
||||
if not needs_migration:
|
||||
return
|
||||
|
||||
old_table = f"{table}__old_ts"
|
||||
conn.execute(f"ALTER TABLE {table} RENAME TO {old_table}")
|
||||
conn.execute(create_sql)
|
||||
|
||||
old_cols = [
|
||||
r["name"]
|
||||
for r in conn.execute(f"PRAGMA table_info({old_table})").fetchall()
|
||||
]
|
||||
new_cols = [r["name"] for r in conn.execute(f"PRAGMA table_info({table})").fetchall()]
|
||||
common_cols = [c for c in new_cols if c in old_cols]
|
||||
|
||||
select_exprs: list[str] = []
|
||||
for col in common_cols:
|
||||
if col in timestamp_columns:
|
||||
expr = self._sql_timestamp_to_real(col)
|
||||
if col in required_timestamp_columns:
|
||||
expr = f"COALESCE({expr}, CAST(strftime('%s','now') AS REAL))"
|
||||
select_exprs.append(f"{expr} AS {col}")
|
||||
else:
|
||||
select_exprs.append(col)
|
||||
|
||||
cols_sql = ", ".join(common_cols)
|
||||
select_sql = ", ".join(select_exprs)
|
||||
conn.execute(
|
||||
f"INSERT INTO {table} ({cols_sql}) SELECT {select_sql} FROM {old_table}"
|
||||
)
|
||||
conn.execute(f"DROP TABLE {old_table}")
|
||||
|
||||
def _sql_timestamp_to_real(self, col: str) -> str:
|
||||
# Convert various timestamp representations to epoch seconds (REAL).
|
||||
# - numeric types: keep as REAL
|
||||
# - numeric strings: CAST to REAL
|
||||
# - ISO datetime strings: strftime('%s', ...) to epoch seconds
|
||||
return f"""(
|
||||
CASE
|
||||
WHEN {col} IS NULL THEN NULL
|
||||
WHEN typeof({col}) IN ('integer', 'real') THEN CAST({col} AS REAL)
|
||||
WHEN trim({col}) GLOB '[0-9]*' THEN CAST({col} AS REAL)
|
||||
ELSE CAST(strftime('%s', replace(substr({col}, 1, 19), 'T', ' ')) AS REAL)
|
||||
END
|
||||
)"""
|
||||
|
||||
# === File Operations ===
|
||||
|
||||
def add_file(
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
"""DeepWiki document generation tools.
|
||||
|
||||
|
||||
This module provides tools for generating documentation from source code.
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional, Protocol
|
||||
from typing import Dict, List, Optional, Protocol, Any
|
||||
|
||||
from codexlens.storage.deepwiki_store import DeepWikiStore
|
||||
from codexlens.storage.deepwiki_models import DeepWikiSymbol
|
||||
from codexlens.errors import StorageError
|
||||
from codexlens.indexing.symbol_extractor import SymbolExtractor
|
||||
from codexlens.parsers.factory import ParserFactory
|
||||
from codexlens.errors import StorageError
|
||||
from codexlens.storage.deepwiki_models import DeepWikiSymbol
|
||||
from codexlens.storage.deepwiki_store import DeepWikiStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
# Default timeout for AI generation (30 seconds)
|
||||
AI_TIMEOUT = 30
|
||||
# HTML metadata markers for documentation
|
||||
SYMBOL_START_MARKER = "<!-- deepwiki-symbol-start name=\"symbol_name}\" -->"
|
||||
SYMBOL_START_MARKER = '<!-- deepwiki-symbol-start name="{symbol_name}" -->'
|
||||
SYMBOL_END_MARKER = "<!-- deepwiki-symbol-end -->"
|
||||
|
||||
|
||||
@@ -48,8 +47,8 @@ class MockMarkdownGenerator(MarkdownGenerator):
|
||||
"""Mock Markdown generator for testing."""
|
||||
|
||||
def generate(self, symbol: DeepWikiSymbol, source_code: str) -> str:
|
||||
"""Generate mock Markdown documentation."""
|
||||
return f"# {symbol.name}\n\n## {symbol.type}\n\n{source_code}\n```\n```
|
||||
"""Generate mock Markdown documentation."""
|
||||
return f"# {symbol.name}\\n\\n## {symbol.type}\\n\\n```\\n{source_code}\\n```"
|
||||
|
||||
|
||||
class DeepWikiGenerator:
|
||||
@@ -60,382 +59,168 @@ class DeepWikiGenerator:
|
||||
"""
|
||||
|
||||
DEFAULT_DB_PATH = DeepWikiStore.DEFAULT_DB_PATH
|
||||
SUPPORT_extensions = [".py", ".ts", ".tsx", ".js", ".jsx", ".java", ".go", ".rs", ".swift"]
|
||||
SUPPORTED_EXTENSIONS = [
|
||||
".py",
|
||||
".ts",
|
||||
".tsx",
|
||||
".js",
|
||||
".jsx",
|
||||
".java",
|
||||
".go",
|
||||
".rs",
|
||||
".swift",
|
||||
]
|
||||
AI_TIMEOUT: int = 30 # Timeout for AI generation
|
||||
MAX_SYMBOLS_PER_FILE: int = 100 # Batch size for processing large files
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db_path: Path | None = None,
|
||||
store: DeepWikiStore = markdown_generator: MarkdownGenerator | None, None,
|
||||
store: DeepWikiStore | None = None,
|
||||
markdown_generator: MarkdownGenerator | None = None,
|
||||
max_symbols_per_file: int = 100,
|
||||
ai_timeout: int = 30,
|
||||
) -> None:
|
||||
self.markdown_generator = MockMarkdownGenerator()
|
||||
self.store = store
|
||||
self._extractor = Symbol_extractor()
|
||||
|
||||
else:
|
||||
self._extractor = SymbolExtractor()
|
||||
if file_path not in _should_process_file:
|
||||
self._extractor.extract_symbols(file_path)
|
||||
if symbols:
|
||||
logger.debug(f"Found {len(symbols)} symbols in {file_path}")
|
||||
else:
|
||||
logger.debug(f"No symbols found in {file_path}")
|
||||
return []
|
||||
# Extract symbols from the file
|
||||
for symbol in symbols:
|
||||
try:
|
||||
file_type = Parser_factory.get_parser(file_path.suffix)
|
||||
if file_type is None:
|
||||
logger.warning(f"Unsupported file type: {file_path}")
|
||||
continue
|
||||
symbols.append(symbols)
|
||||
doc_path = self._generate_docs(symbol)
|
||||
doc_path.mkdir(doc_path, exist_ok=True)
|
||||
for symbol in symbols:
|
||||
doc_path = self._generate_markdown(symbol, source_code)
|
||||
doc.write(doc(doc_id)
|
||||
logger.debug(f"Generated docs for {len(symbols)} symbols in {file_path}")
|
||||
self._store.save_symbol(symbol, doc_path, doc_content, doc_path)
|
||||
self._store.update_file_stats(existing_file.path, symbols_count)
|
||||
self._store.update_file_stats(
|
||||
existing_file.path,
|
||||
symbols_count=len(existing_file.symbols),
|
||||
new_symbols_count=len(symbols),
|
||||
docs_generated += 1
|
||||
)
|
||||
else:
|
||||
# Skip unchanged files (skip update)
|
||||
logger.debug(f"Skipped {len(unchanged_files)} unchanged symbols")
|
||||
logger.debug(f"No symbols found in {file_path}, skipping update")
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting symbols from {file_path}: {e}")
|
||||
raise StorageError(f"Failed to extract symbols from {file_path}")
|
||||
try:
|
||||
symbol_extractor = SymbolExtractor()
|
||||
symbols = []
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize symbol extractor: {e}")
|
||||
raise StorageError(f"Failed to initialize symbol extractor for {file_path}")
|
||||
# Return empty list
|
||||
doc_paths = []
|
||||
for doc_path in doc_paths:
|
||||
try:
|
||||
doc_path.mkdir(doc_path, parents=True, exist_ok=True)
|
||||
for file in files:
|
||||
if not file_path.endswith in support_extensions:
|
||||
continue
|
||||
source_file = file_path
|
||||
source_content = file_path.read_bytes()
|
||||
content_hash = self._calculate_file_hash(file_path)
|
||||
return hash_obj.hexdigest()
|
||||
file_hash = existing_hash
|
||||
if existing_hash == new_hash:
|
||||
logger.debug(
|
||||
f"File unchanged: {file_path}. Skipping (hash match)"
|
||||
)
|
||||
return existing_file
|
||||
# Get language from file path
|
||||
language = self._get_language(file_path)
|
||||
if language is None:
|
||||
language = file_path.suffix
|
||||
# Default to Python if it is other extension
|
||||
language_map = {
|
||||
".ts": "TypeScript",
|
||||
".tsx": "TypeScript React",
|
||||
".js": "JavaScript",
|
||||
".jsx": "JavaScript React",
|
||||
".java": "Java",
|
||||
".go": "Go",
|
||||
".rs": "Rust",
|
||||
".swift": "Swift",
|
||||
}
|
||||
return language
|
||||
file_type = None
|
||||
except ValueError("Unsupported file type: {file_path}")
|
||||
logger.warning(f"Unsupported file type: {file_path}, skipping")
|
||||
continue
|
||||
source_file = file_path
|
||||
source_code = file.read_text()
|
||||
if source_code:
|
||||
try:
|
||||
source_code = file.read_bytes(). hash_obj = hashlib.sha256(source_code.encode("utf-8")
|
||||
return hash_obj.hexdigest()
|
||||
else:
|
||||
return ""
|
||||
# Determine language from file extension
|
||||
file_ext = file_extension.lower().find(f".py, ..ts, .tsx)
|
||||
if file_ext in SUPPORT_extensions:
|
||||
for ext in self.Suffix_lower():
|
||||
logger.debug(f"Unsupported file extension: {file_path}, skipping file")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error determining language for {file_path}: {e}")
|
||||
return None, else:
|
||||
return self.suffix_lower() if ext == SUPPORT_extensions:
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
# Check if it is markdown generator exists
|
||||
if markdown_generator:
|
||||
logger.debug("No markdown generator provided, using mock")
|
||||
return None
|
||||
# Check if tool exists
|
||||
if tool:
|
||||
logger.debug(f"Tool not available for {tool}")
|
||||
return None
|
||||
# Extract symbols using regex for tree-sitter
|
||||
language_map = self.Language_map
|
||||
return language_map
|
||||
|
||||
# Read all symbols from the database file
|
||||
file_path = path
|
||||
# Get parser factory
|
||||
if file_path not in support_extensions:
|
||||
logger.debug(f"Unsupported file type: {file_path}, skipping")
|
||||
return []
|
||||
else:
|
||||
logger.debug(f"Extracted {len(symbols)} symbols from {file_path}")
|
||||
return symbols
|
||||
|
||||
def _generate_markdown(self, symbol: DeepWikiSymbol, source_code: str) -> str:
|
||||
"""Generate Markdown documentation for a symbol.
|
||||
|
||||
Args:
|
||||
symbol: The symbol information
|
||||
source_code: The source code content
|
||||
|
||||
Returns:
|
||||
Generated Markdown documentation
|
||||
"""
|
||||
def _generate_markdown(
|
||||
self, symbol: DeepWikiSymbol, source_code: str
|
||||
) -> str:
|
||||
"""Generate mock Markdown documentation."""
|
||||
return f"# {symbol.name}\n\n## {symbol.type}\n\n{source_code}\n```\n```
|
||||
Initializes the DeepWikiGenerator.
|
||||
"""
|
||||
if store:
|
||||
self.store = store
|
||||
else:
|
||||
self.store = DeepWikiStore(db_path or self.DEFAULT_DB_PATH)
|
||||
|
||||
if markdown_generator:
|
||||
self.markdown_generator = markdown_generator
|
||||
else:
|
||||
logger.debug("No markdown generator provided, using mock")
|
||||
self.markdown_generator = MockMarkdownGenerator()
|
||||
|
||||
self._extractor = SymbolExtractor()
|
||||
self.max_symbols_per_file = max_symbols_per_file
|
||||
self.ai_timeout = ai_timeout
|
||||
self._docs_dir = Path("docs") # Default docs directory
|
||||
|
||||
doc_path.mkdir(self.docs_dir, parents=True, exist_ok=True)
|
||||
for file in files:
|
||||
if not file_path.endswith in support_extensions:
|
||||
continue
|
||||
source_content = file.read_bytes()
|
||||
doc_content = f.read_text()
|
||||
# Add content to markdown
|
||||
markdown = f"<!-- deepwiki-symbol-start name=\"{symbol.name}\" -->\n{markdown_content}\n{markdown}
|
||||
|
||||
# Calculate anchor ( generate a_anchor(symbol)
|
||||
anchor_line = symbol.line_range[0]
|
||||
doc_path = self._docs_dir / docs_path
|
||||
source_file = os.path.join(source_file, relative_path,)
|
||||
return line_range
|
||||
elif markdown is None:
|
||||
anchor = ""
|
||||
|
||||
{markdown}
|
||||
|
||||
{markdown}
|
||||
# Add anchor link to the from doc file
|
||||
# Calculate doc file hash
|
||||
file_hash = hashlib.sha256(file_content.encode("utf-8")
|
||||
content_hash = existing_hash
|
||||
file_path = source_file
|
||||
if existing_file is None:
|
||||
return None
|
||||
source_file = source_file
|
||||
file_path = str(source_file)
|
||||
for f in symbols:
|
||||
if file_changed
|
||||
logger.info(
|
||||
f"Generated docs for {len(symbols)} symbols in {file_path}"
|
||||
)
|
||||
logger.debug(
|
||||
f"Updated {len(changed_files)} files - {len(changed_symbols)} "
|
||||
)
|
||||
logger.debug(
|
||||
f"Updated {len(unchanged_files)} files: {len(unchanged_symbols)} "
|
||||
)
|
||||
logger.debug(
|
||||
f"unchanged files: {len(unchanged_files)} (unchanged)"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"Processed {len(files)} files, {len(files)} changed symbols, {len(changed_symbols)}"
|
||||
)
|
||||
logger.debug(f"Processed {len(files)} files in {len(files)} changes:")
|
||||
f"Total files changed: {len(changed_files)}, "
|
||||
f" file changes: {len(changed_files)}", "len(changed_symbols)} symbols, {len(changed_symbols)}, new_docs_generated: {len(changed_symbols)}"
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Save stats
|
||||
stats["total_files"] = total_files
|
||||
stats["total_symbols"] = total_symbols
|
||||
stats["total_changed_symbols"] = changed_symbols_count
|
||||
stats["unchanged_files"] = unchanged_files_count
|
||||
stats["total_changed_files"] = changed_files
|
||||
logger.info(
|
||||
f"Generation complete - {len(files)} files, {len(symbols)} symbols, {len(changed_files)} changed symbols: files_changed}"
|
||||
f" file changes ({len(changed_files)} changed symbols count} symbols"
|
||||
}
|
||||
f"unchanged files: {len(unchanged_files)} (unchanged_files_count}")
|
||||
stats["unchanged_files"] = unchanged_files
|
||||
stats["unchanged_files"] = unchanged_files
|
||||
logger.info(
|
||||
f"generation complete - {len(files)} files, {len(symbols)} symbols, {len(changed_files)} changed symbols, {len(changed_symbols)} docs generated"
|
||||
}
|
||||
else:
|
||||
stats["unchanged_files"] = len(unchanged_files)
|
||||
stats["unchanged_symbols"] = len(unchanged_symbols)
|
||||
stats["total_symbols"] = total_symbols
|
||||
stats["total_docs_generated"] = total_docs_generated
|
||||
stats["total_changed_files"] = changed_files_count
|
||||
stats["total_changed_files"] = unchanged_files_count
|
||||
return stats
|
||||
def _calculate_file_hash(self, file_path: Path) -> str:
|
||||
"""Calculate SHA256 hash of file content."""
|
||||
try:
|
||||
content = file_path.read_bytes()
|
||||
hash_obj = hashlib.sha256(content)
|
||||
return hash_obj.hexdigest()
|
||||
except IOError as e:
|
||||
logger.error(f"Error reading file for hash calculation: {file_path}: {e}")
|
||||
return ""
|
||||
|
||||
def _get_language(self, file_path: Path) -> str | None:
|
||||
"""Determine language from file extension."""
|
||||
ext = file_path.suffix.lower()
|
||||
if ext not in self.SUPPORTED_EXTENSIONS:
|
||||
logger.debug(f"Unsupported file extension: {file_path}, skipping file")
|
||||
return None
|
||||
|
||||
language_map = {
|
||||
".py": "Python",
|
||||
".ts": "TypeScript",
|
||||
".tsx": "TypeScript React",
|
||||
".js": "JavaScript",
|
||||
".jsx": "JavaScript React",
|
||||
".java": "Java",
|
||||
".go": "Go",
|
||||
".rs": "Rust",
|
||||
".swift": "Swift",
|
||||
}
|
||||
finally:
|
||||
return self.close()
|
||||
def run(self, path: str, output_dir: Optional[str] = None, db_path: Optional[Path] = None, force: bool = False,
|
||||
max_symbols_per_file: int = 100,
|
||||
ai_timeout: int = AI_TIMEOUT,
|
||||
backend: str = "fastembed",
|
||||
model: str = "code",
|
||||
max_workers: int = 1,
|
||||
json_mode: bool = False,
|
||||
verbose: bool = False,
|
||||
) -> None:
|
||||
return language_map.get(ext)
|
||||
|
||||
def _should_process_file(self, file_path: Path, force: bool) -> bool:
|
||||
"""Check if a file should be processed based on hash."""
|
||||
if force:
|
||||
return True
|
||||
new_hash = self._calculate_file_hash(file_path)
|
||||
if not new_hash:
|
||||
return False
|
||||
|
||||
existing_file = self.store.get_file(str(file_path))
|
||||
if existing_file and existing_file.content_hash == new_hash:
|
||||
logger.debug(f"File unchanged: {file_path}. Skipping (hash match)")
|
||||
return False
|
||||
return True
|
||||
|
||||
def _generate_markdown_for_symbol(self, symbol: DeepWikiSymbol, source_code: str) -> str:
|
||||
"""Generate markdown and wrap it with markers."""
|
||||
markdown_content = self.markdown_generator.generate(symbol, source_code)
|
||||
return f"{SYMBOL_START_MARKER.format(symbol_name=symbol.name)}\\n{markdown_content}\\n{SYMBOL_END_MARKER}"
|
||||
|
||||
def run(self, path: str, output_dir: Optional[str] = None, force: bool = False) -> Dict[str, Any]:
|
||||
"""
|
||||
Initialize DeepWiki store and generator, and scan the source.
|
||||
|
||||
Args:
|
||||
path: Path to the source directory
|
||||
db_path: Optional database path ( defaults to DEFAULT_DB_PATH)
|
||||
force: Force full reindex ( ignoring file hashes
|
||||
markdown_generator: Optional generator for markdown. If None, use Mock.
|
||||
backend: backend or "fastembed"
|
||||
model: model = "code"
|
||||
max_workers: Maximum concurrent API calls for AI generation
|
||||
max_symbols_per_file: maximum symbols to process per file (batch processing)
|
||||
ai_timeout: timeout for AI generation
|
||||
max_file_size: maximum file size to read in MB before processing ( chunks
|
||||
|
||||
Returns:
|
||||
Generator result with stats dict[str, Any]:
|
||||
"""
|
||||
source_root = Path(path)
|
||||
if output_dir:
|
||||
self._docs_dir = Path(output_dir)
|
||||
|
||||
<system_warning>
|
||||
This task has subtasks - please focus on the current work. You start by reading the task files and completing summaries.
|
||||
|
||||
* Reading the `workflow/.lite-plan/implement-deepwiki-2026-03-05/TODO_LIST.md` for I'll the plan file and get started.
|
||||
|
||||
* Mark TASK 003 as completed.
|
||||
* Update TODO_list by checking the off the "Done when" checkboxes and completed sections
|
||||
* Generate completion summary with links to relevant files
|
||||
* Update main task JSON status to "completed"
|
||||
* * Read more context from previous tasks and understand what was completed
|
||||
* Read plan.json to get tech stack info ( verify implementation approach
|
||||
|
||||
* * Now I'll implement the deepWiki generator. in `codex-lens/src/codexlens/tools/` directory. add CLI commands. and generate commands to.
|
||||
|
||||
I'll write the file `deepwiki_generator.py` with the generator implementation.
|
||||
|
||||
I'll add the `deepwiki` command group to the CLI module.
|
||||
I'll test the implementation after
|
||||
update the TODO list accordingly to the instructions.
|
||||
* * Generate a completion summary in the `.summaries` directory
|
||||
|
||||
* Let me know if you wants to context or questions about the implementation.* I'll adjust the plan as necessary.* * Now, let me read the plan.json file to check the current plan structure: if it exists: need to create it. * let me check the completion status in the TODO list. Let me update the completion time and check if there's a status history to and update it task JSON status.
|
||||
|
||||
* Finally, I'll create a summary file and documenting the completion.I need to create the tools directory first. then create the generator file. Here's the full implementation: Now let me add the CLI commands to and test the implementation. Let me proceed with the tests.
|
||||
|
||||
I I'll verify that `deepwiki generate` command completes successfully
|
||||
The `deepwiki_index` table contains symbol entries after the first run
|
||||
A second run with unchanged source results in 0 new database writes.
|
||||
|
||||
Finally, I'll generate a summary file, document the implementation.
|
||||
* Generate a completion summary in the summaries directory
|
||||
* Update the TODO list to I progress tracking
|
||||
* Mark the task as completed
|
||||
* Update the main task JSON status to "completed" (if applicable, set completion timestamps)
|
||||
|
||||
Let me start by creating the tools directory and `__init__.py` file: and read the existing `deepwiki_store.py` file to understand the database structure and models, and methods available from the store. The as properties as the file tracking, symbol extraction, and documentation generation.Then it will integrate the AI service for generating the actual markdown. for each symbol. Finally, I'll update the stats in the store to track progress, display progress information in the console, and and table output, and log the completion status for each file.
|
||||
|
||||
total_symbols = len(symbols)
|
||||
total_changed_files = len(changed_files)
|
||||
total_unchanged_files = len(unchanged_files)
|
||||
total_docs_generated = len(docs)
|
||||
|
||||
total_changed_symbols += len(changed_symbols)
|
||||
total_docs_generated += docs
|
||||
|
||||
# Clean up removed symbols
|
||||
for symbol in removed_symbols:
|
||||
self.store.delete_symbols_for_file(file_path)
|
||||
for doc in docs:
|
||||
self.store.delete_doc(doc_id)
|
||||
# Remove dangling references
|
||||
for doc in docs:
|
||||
self.store.delete_symbols_for_file(file_path)
|
||||
self.store.delete_file(file_path)
|
||||
|
||||
# Remove empty docs directory if needed
|
||||
docs_dir.mkdir(self.docs_dir, exist_ok=True)
|
||||
os.makedirs(doc_path, parents=True, exist_ok=True)
|
||||
# Generate markdown for each symbol
|
||||
for symbol in symbols:
|
||||
markdown = self._generate_markdown(symbol, source_code)
|
||||
doc_path = self._docs_dir / docs_path
|
||||
doc_content = f"# {symbol.name}\n\n{markdown_content}\n\n # write to database
|
||||
try:
|
||||
self.store.save_symbol(symbol, doc_path, doc_content)
|
||||
doc_id = doc.id
|
||||
logger.debug(f"Generated documentation for symbol: {symbol.name}")
|
||||
total_generated += 1
|
||||
total_symbols += 1
|
||||
total_changed_files.append(file_path)
|
||||
else:
|
||||
logger.debug(f"Skipped {len(unchanged_files)} unchanged symbols")
|
||||
|
||||
# Clean up removed symbols
|
||||
for file_path in removed_files:
|
||||
for doc in docs:
|
||||
self.store.delete_symbols_for_file(file_path)
|
||||
# Delete the doc files for removed files
|
||||
self._cleanup_removed_docs()
|
||||
for doc in docs
|
||||
doc_path.unlink(missing=True)
|
||||
|
||||
return stats
|
||||
|
||||
return total_symbols, total_changed_files, total_changed_symbols, total_docs_generated, total_unchanged_files, len(unchanged_files)
|
||||
|
||||
}
|
||||
|
||||
def _cleanup_removed_docs(self) -> None:
|
||||
for doc in docs:
|
||||
doc_path.unlink(missing=True)
|
||||
try:
|
||||
os.remove(doc_path)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
logger.warning(f"Error removing doc file: {doc_path}: {e}")
|
||||
continue
|
||||
self.close()
|
||||
logger.info(
|
||||
f"DeepWiki generation complete - {len(files)} files, {len(symbols)} symbols"
|
||||
)
|
||||
self.store.close()
|
||||
return {
|
||||
"total_files": total_files,
|
||||
"total_symbols": total_symbols,
|
||||
"total_changed_files": total_changed_files,
|
||||
"total_changed_symbols": total_changed_symbols,
|
||||
"total_docs_generated": total_docs_generated,
|
||||
"total_unchanged_files": total_unchanged_files,
|
||||
stats = {
|
||||
"total_files": 0,
|
||||
"total_symbols": 0,
|
||||
"total_changed_files": 0,
|
||||
"total_changed_symbols": 0,
|
||||
"total_docs_generated": 0,
|
||||
"total_unchanged_files": 0,
|
||||
}
|
||||
|
||||
files_to_process = [p for p in source_root.rglob("*") if p.is_file() and p.suffix in self.SUPPORTED_EXTENSIONS]
|
||||
stats["total_files"] = len(files_to_process)
|
||||
|
||||
changed_files_count = 0
|
||||
unchanged_files_count = 0
|
||||
|
||||
for file_path in files_to_process:
|
||||
if not self._should_process_file(file_path, force):
|
||||
unchanged_files_count += 1
|
||||
continue
|
||||
|
||||
changed_files_count += 1
|
||||
try:
|
||||
source_code = file_path.read_text("utf-8")
|
||||
symbols = self._extractor.extract_symbols(source_code, file_path.suffix, str(file_path))
|
||||
|
||||
if not symbols:
|
||||
logger.debug(f"No symbols found in {file_path}")
|
||||
continue
|
||||
|
||||
logger.debug(f"Found {len(symbols)} symbols in {file_path}")
|
||||
stats["total_symbols"] += len(symbols)
|
||||
docs_generated_count = 0
|
||||
|
||||
for symbol in symbols:
|
||||
# Generate documentation
|
||||
doc_content = self._generate_markdown_for_symbol(symbol, source_code)
|
||||
|
||||
# Define doc path
|
||||
relative_path = file_path.relative_to(source_root)
|
||||
doc_path = (self._docs_dir / relative_path).with_suffix(".md")
|
||||
doc_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Save symbol and doc
|
||||
self.store.save_symbol(symbol, str(doc_path), doc_content)
|
||||
docs_generated_count += 1
|
||||
|
||||
stats["total_docs_generated"] += docs_generated_count
|
||||
stats["total_changed_symbols"] += len(symbols)
|
||||
|
||||
# Update file stats in DB
|
||||
content_hash = self._calculate_file_hash(file_path)
|
||||
self.store.update_file_stats(str(file_path), len(symbols), content_hash)
|
||||
logger.debug(f"Generated docs for {len(symbols)} symbols in {file_path}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing file {file_path}: {e}")
|
||||
raise StorageError(f"Failed to process {file_path}") from e
|
||||
|
||||
stats["total_changed_files"] = changed_files_count
|
||||
stats["total_unchanged_files"] = unchanged_files_count
|
||||
|
||||
logger.info(f"Generation complete. Stats: {stats}")
|
||||
return stats
|
||||
|
||||
def close(self):
|
||||
"""Close the store connection."""
|
||||
self.store.close()
|
||||
|
||||
@@ -40,12 +40,13 @@ class MockMarkdownGenerator:
|
||||
|
||||
def generate(self, symbol: DeepWikiSymbol, source_code: str) -> str:
|
||||
"""Generate mock Markdown documentation."""
|
||||
return f"""{SYMBOL_START_TEMPLATE.format(name=symbol.name, type=symbol.symbol_type)}
|
||||
start_line, end_line = symbol.line_range
|
||||
return f"""{SYMBOL_START_TEMPLATE.format(name=symbol.name, type=symbol.type)}
|
||||
|
||||
## `{symbol.name}`
|
||||
|
||||
**Type**: {symbol.symbol_type}
|
||||
**Location**: `{symbol.source_file}:{symbol.line_start}-{symbol.line_end}`
|
||||
**Type**: {symbol.type}
|
||||
**Location**: `{symbol.source_file}:{start_line}-{end_line}`
|
||||
|
||||
```{symbol.source_file.split('.')[-1] if '.' in symbol.source_file else 'text'}
|
||||
{source_code}
|
||||
@@ -190,12 +191,11 @@ class DeepWikiGenerator:
|
||||
# Create symbol record
|
||||
symbol = DeepWikiSymbol(
|
||||
name=sym["name"],
|
||||
symbol_type=sym["type"],
|
||||
type=sym["type"],
|
||||
source_file=str(file_path),
|
||||
doc_file=f".deepwiki/{file_path.stem}.md",
|
||||
anchor=f"#{sym['name'].lower()}",
|
||||
line_start=sym["line_start"],
|
||||
line_end=sym["line_end"],
|
||||
line_range=(sym["line_start"], sym["line_end"]),
|
||||
)
|
||||
|
||||
# Generate markdown
|
||||
@@ -205,8 +205,13 @@ class DeepWikiGenerator:
|
||||
self.store.add_symbol(symbol)
|
||||
docs_generated += 1
|
||||
|
||||
# Update file hash
|
||||
self.store.update_file_hash(str(file_path), current_hash)
|
||||
# Track file hash + metadata for incremental updates and staleness checks.
|
||||
self.store.add_file(
|
||||
file_path=str(file_path),
|
||||
content_hash=current_hash,
|
||||
symbols_count=len(raw_symbols),
|
||||
docs_generated=docs_generated > 0,
|
||||
)
|
||||
|
||||
logger.info(f"Generated docs for {docs_generated} symbols in {file_path}")
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user