mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
* feat(security): Secure dashboard server by default ## Solution Summary - Solution-ID: SOL-DSC-002-1 - Issue-ID: DSC-002 ## Tasks Completed - [T1] JWT token manager (24h expiry, persisted secret/token) - [T2] API auth middleware + localhost token endpoint - [T3] Default bind 127.0.0.1, add --host with warning - [T4] Localhost-only CORS with credentials + Vary - [T5] SECURITY.md documentation + README link ## Verification - npm run build - npm test -- ccw/tests/token-manager.test.ts ccw/tests/middleware.test.ts ccw/tests/server-auth.integration.test.ts ccw/tests/server.test.ts ccw/tests/cors.test.ts * fix(security): Prevent command injection in Windows spawn() ## Solution Summary - **Solution-ID**: SOL-DSC-001-1 - **Issue-ID**: DSC-001 - **Risk/Impact/Complexity**: high/high/medium ## Tasks Completed - [T1] Create Windows shell escape utility - [T2] Escape cli-executor spawn() args on Windows - [T3] Add command injection regression tests ## Files Modified - ccw/src/utils/shell-escape.ts - ccw/src/tools/cli-executor.ts - ccw/tests/shell-escape.test.ts - ccw/tests/security/command-injection.test.ts ## Verification - npm run build - npm test -- ccw/tests/shell-escape.test.ts ccw/tests/security/command-injection.test.ts * fix(security): Harden path validation (DSC-005) ## Solution Summary - Solution-ID: SOL-DSC-005-1 - Issue-ID: DSC-005 ## Tasks Completed - T1: Refactor path validation to pre-resolution checking - T2: Implement allowlist-based path validation - T3: Add path validation to API routes - T4: Add path security regression tests ## Files Modified - ccw/src/utils/path-resolver.ts - ccw/src/utils/path-validator.ts - ccw/src/core/routes/graph-routes.ts - ccw/src/core/routes/files-routes.ts - ccw/src/core/routes/skills-routes.ts - ccw/tests/path-resolver.test.ts - ccw/tests/graph-routes.test.ts - ccw/tests/files-routes.test.ts - ccw/tests/skills-routes.test.ts - ccw/tests/security/path-traversal.test.ts ## Verification - npm run build - npm test -- path-resolver.test.ts - npm test -- path-validator.test.ts - npm test -- graph-routes.test.ts - npm test -- files-routes.test.ts - npm test -- skills-routes.test.ts - npm test -- ccw/tests/security/path-traversal.test.ts * fix(security): Prevent credential leakage (DSC-004) ## Solution Summary - Solution-ID: SOL-DSC-004-1 - Issue-ID: DSC-004 ## Tasks Completed - T1: Create credential handling security tests - T2: Add log sanitization tests - T3: Add env var leakage prevention tests - T4: Add secure storage tests ## Files Modified - ccw/src/config/litellm-api-config-manager.ts - ccw/src/core/routes/litellm-api-routes.ts - ccw/tests/security/credential-handling.test.ts ## Verification - npm run build - node --experimental-strip-types --test ccw/tests/security/credential-handling.test.ts * test(ranking): expand normalize_weights edge case coverage (ISS-1766920108814-0) ## Solution Summary - Solution-ID: SOL-20251228113607 - Issue-ID: ISS-1766920108814-0 ## Tasks Completed - T1: Fix NaN and invalid total handling in normalize_weights - T2: Add unit tests for NaN edge cases in normalize_weights ## Files Modified - codex-lens/tests/test_rrf_fusion.py ## Verification - python -m pytest codex-lens/tests/test_rrf_fusion.py::TestNormalizeBM25Score -v - python -m pytest codex-lens/tests/test_rrf_fusion.py -v -k normalize - python -m pytest codex-lens/tests/test_rrf_fusion.py::TestReciprocalRankFusion::test_weight_normalization codex-lens/tests/test_cli_hybrid_search.py::TestCLIHybridSearch::test_weights_normalization -v * feat(security): Add CSRF protection and tighten CORS (DSC-006) ## Solution Summary - Solution-ID: SOL-DSC-006-1 - Issue-ID: DSC-006 - Risk/Impact/Complexity: high/high/medium ## Tasks Completed - T1: Create CSRF token generation system - T2: Add CSRF token endpoints - T3: Implement CSRF validation middleware - T4: Restrict CORS to trusted origins - T5: Add CSRF security tests ## Files Modified - ccw/src/core/auth/csrf-manager.ts - ccw/src/core/auth/csrf-middleware.ts - ccw/src/core/routes/auth-routes.ts - ccw/src/core/server.ts - ccw/tests/csrf-manager.test.ts - ccw/tests/auth-routes.test.ts - ccw/tests/csrf-middleware.test.ts - ccw/tests/security/csrf.test.ts ## Verification - npm run build - node --experimental-strip-types --test ccw/tests/csrf-manager.test.ts - node --experimental-strip-types --test ccw/tests/auth-routes.test.ts - node --experimental-strip-types --test ccw/tests/csrf-middleware.test.ts - node --experimental-strip-types --test ccw/tests/cors.test.ts - node --experimental-strip-types --test ccw/tests/security/csrf.test.ts * fix(cli-executor): prevent stale SIGKILL timeouts ## Solution Summary - Solution-ID: SOL-DSC-007-1 - Issue-ID: DSC-007 - Risk/Impact/Complexity: low/low/low ## Tasks Completed - [T1] Store timeout handle in killCurrentCliProcess ## Files Modified - ccw/src/tools/cli-executor.ts - ccw/tests/cli-executor-kill.test.ts ## Verification - node --experimental-strip-types --test ccw/tests/cli-executor-kill.test.ts * fix(cli-executor): enhance merge validation guards ## Solution Summary - Solution-ID: SOL-DSC-008-1 - Issue-ID: DSC-008 - Risk/Impact/Complexity: low/low/low ## Tasks Completed - [T1] Enhance sourceConversations array validation ## Files Modified - ccw/src/tools/cli-executor.ts - ccw/tests/cli-executor-merge-validation.test.ts ## Verification - node --experimental-strip-types --test ccw/tests/cli-executor-merge-validation.test.ts * refactor(core): remove @ts-nocheck from core routes ## Solution Summary - Solution-ID: SOL-DSC-003-1 - Issue-ID: DSC-003 - Queue-ID: QUE-20260106-164500 - Item-ID: S-9 ## Tasks Completed - T1: Create shared RouteContext type definition - T2: Remove @ts-nocheck from small route files - T3: Remove @ts-nocheck from medium route files - T4: Remove @ts-nocheck from large route files - T5: Remove @ts-nocheck from remaining core files ## Files Modified - ccw/src/core/dashboard-generator-patch.ts - ccw/src/core/dashboard-generator.ts - ccw/src/core/routes/ccw-routes.ts - ccw/src/core/routes/claude-routes.ts - ccw/src/core/routes/cli-routes.ts - ccw/src/core/routes/codexlens-routes.ts - ccw/src/core/routes/discovery-routes.ts - ccw/src/core/routes/files-routes.ts - ccw/src/core/routes/graph-routes.ts - ccw/src/core/routes/help-routes.ts - ccw/src/core/routes/hooks-routes.ts - ccw/src/core/routes/issue-routes.ts - ccw/src/core/routes/litellm-api-routes.ts - ccw/src/core/routes/litellm-routes.ts - ccw/src/core/routes/mcp-routes.ts - ccw/src/core/routes/mcp-routes.ts.backup - ccw/src/core/routes/mcp-templates-db.ts - ccw/src/core/routes/nav-status-routes.ts - ccw/src/core/routes/rules-routes.ts - ccw/src/core/routes/session-routes.ts - ccw/src/core/routes/skills-routes.ts - ccw/src/core/routes/status-routes.ts - ccw/src/core/routes/system-routes.ts - ccw/src/core/routes/types.ts - ccw/src/core/server.ts - ccw/src/core/websocket.ts ## Verification - npm run build - npm test * refactor: split cli-executor and codexlens routes into modules ## Solution Summary - Solution-ID: SOL-DSC-012-1 - Issue-ID: DSC-012 - Risk/Impact/Complexity: medium/medium/high ## Tasks Completed - [T1] Extract execution orchestration from cli-executor.ts (Refactor ccw/src/tools) - [T2] Extract route handlers from codexlens-routes.ts (Refactor ccw/src/core/routes) - [T3] Extract prompt concatenation logic from cli-executor (Refactor ccw/src/tools) - [T4] Document refactored module architecture (Docs) ## Files Modified - ccw/src/tools/cli-executor.ts - ccw/src/tools/cli-executor-core.ts - ccw/src/tools/cli-executor-utils.ts - ccw/src/tools/cli-executor-state.ts - ccw/src/tools/cli-prompt-builder.ts - ccw/src/tools/README.md - ccw/src/core/routes/codexlens-routes.ts - ccw/src/core/routes/codexlens/config-handlers.ts - ccw/src/core/routes/codexlens/index-handlers.ts - ccw/src/core/routes/codexlens/semantic-handlers.ts - ccw/src/core/routes/codexlens/watcher-handlers.ts - ccw/src/core/routes/codexlens/utils.ts - ccw/src/core/routes/codexlens/README.md ## Verification - npm run build - npm test * test(issue): Add comprehensive issue command tests ## Solution Summary - **Solution-ID**: SOL-DSC-009-1 - **Issue-ID**: DSC-009 - **Risk/Impact/Complexity**: low/high/medium ## Tasks Completed - [T1] Create issue command test file structure: Create isolated test harness - [T2] Add JSONL read/write operation tests: Verify JSONL correctness and errors - [T3] Add issue lifecycle tests: Verify status transitions and timestamps - [T4] Add solution binding tests: Verify binding flows and error cases - [T5] Add queue formation tests: Verify queue creation, IDs, and DAG behavior - [T6] Add queue execution tests: Verify next/done/retry and status sync ## Files Modified - ccw/src/commands/issue.ts - ccw/tests/issue-command.test.ts ## Verification - node --experimental-strip-types --test ccw/tests/issue-command.test.ts * test(routes): Add integration tests for route modules ## Solution Summary - Solution-ID: SOL-DSC-010-1 - Issue-ID: DSC-010 - Queue-ID: QUE-20260106-164500 ## Tasks Completed - [T1] Add tests for ccw-routes.ts - [T2] Add tests for files-routes.ts - [T3] Add tests for claude-routes.ts (includes Windows path fix for create) - [T4] Add tests for issue-routes.ts - [T5] Add tests for help-routes.ts (avoid hanging watchers) - [T6] Add tests for nav-status-routes.ts - [T7] Add tests for hooks/graph/rules/skills/litellm-api routes ## Files Modified - ccw/src/core/routes/claude-routes.ts - ccw/src/core/routes/help-routes.ts - ccw/tests/integration/ccw-routes.test.ts - ccw/tests/integration/claude-routes.test.ts - ccw/tests/integration/files-routes.test.ts - ccw/tests/integration/issue-routes.test.ts - ccw/tests/integration/help-routes.test.ts - ccw/tests/integration/nav-status-routes.test.ts - ccw/tests/integration/hooks-routes.test.ts - ccw/tests/integration/graph-routes.test.ts - ccw/tests/integration/rules-routes.test.ts - ccw/tests/integration/skills-routes.test.ts - ccw/tests/integration/litellm-api-routes.test.ts ## Verification - node --experimental-strip-types --test ccw/tests/integration/ccw-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/files-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/claude-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/issue-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/help-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/nav-status-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/hooks-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/graph-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/rules-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/skills-routes.test.ts - node --experimental-strip-types --test ccw/tests/integration/litellm-api-routes.test.ts * refactor(core): Switch cache and lite scanning to async fs ## Solution Summary - Solution-ID: SOL-DSC-013-1 - Issue-ID: DSC-013 - Queue-ID: QUE-20260106-164500 ## Tasks Completed - [T1] Convert cache-manager.ts to async file operations - [T2] Convert lite-scanner.ts to async file operations - [T3] Update cache-manager call sites to await async API - [T4] Update lite-scanner call sites to await async API ## Files Modified - ccw/src/core/cache-manager.ts - ccw/src/core/lite-scanner.ts - ccw/src/core/data-aggregator.ts ## Verification - npm run build - npm test * fix(exec): Add timeout protection for execSync ## Solution Summary - Solution-ID: SOL-DSC-014-1 - Issue-ID: DSC-014 - Queue-ID: QUE-20260106-164500 ## Tasks Completed - [T1] Add timeout to execSync calls in python-utils.ts - [T2] Add timeout to execSync calls in detect-changed-modules.ts - [T3] Add timeout to execSync calls in claude-freshness.ts - [T4] Add timeout to execSync calls in issue.ts - [T5] Consolidate execSync timeout constants and audit coverage ## Files Modified - ccw/src/utils/exec-constants.ts - ccw/src/utils/python-utils.ts - ccw/src/tools/detect-changed-modules.ts - ccw/src/core/claude-freshness.ts - ccw/src/commands/issue.ts - ccw/src/tools/smart-search.ts - ccw/src/tools/codex-lens.ts - ccw/src/core/routes/codexlens/config-handlers.ts ## Verification - npm run build - npm test - node --experimental-strip-types --test ccw/tests/issue-command.test.ts * feat(cli): Add progress spinner with elapsed time for long-running operations ## Solution Summary - Solution-ID: SOL-DSC-015-1 - Issue-ID: DSC-015 - Queue-Item: S-15 - Risk/Impact/Complexity: low/medium/low ## Tasks Completed - [T1] Add progress spinner to CLI execution: Update ccw/src/commands/cli.ts ## Files Modified - ccw/src/commands/cli.ts - ccw/tests/cli-command.test.ts ## Verification - node --experimental-strip-types --test ccw/tests/cli-command.test.ts - node --experimental-strip-types --test ccw/tests/cli-executor-kill.test.ts - node --experimental-strip-types --test ccw/tests/cli-executor-merge-validation.test.ts * fix(cli): Move full output hint immediately after truncation notice ## Solution Summary - Solution-ID: SOL-DSC-016-1 - Issue-ID: DSC-016 - Queue-Item: S-16 - Risk/Impact/Complexity: low/high/low ## Tasks Completed - [T1] Relocate output hint after truncation: Update ccw/src/commands/cli.ts ## Files Modified - ccw/src/commands/cli.ts - ccw/tests/cli-command.test.ts ## Verification - npm run build - node --experimental-strip-types --test ccw/tests/cli-command.test.ts * feat(cli): Add confirmation prompts for destructive operations ## Solution Summary - Solution-ID: SOL-DSC-017-1 - Issue-ID: DSC-017 - Queue-Item: S-17 - Risk/Impact/Complexity: low/high/low ## Tasks Completed - [T1] Add confirmation to storage clean operations: Update ccw/src/commands/cli.ts - [T2] Add confirmation to issue queue delete: Update ccw/src/commands/issue.ts ## Files Modified - ccw/src/commands/cli.ts - ccw/src/commands/issue.ts - ccw/tests/cli-command.test.ts - ccw/tests/issue-command.test.ts ## Verification - npm run build - node --experimental-strip-types --test ccw/tests/cli-command.test.ts - node --experimental-strip-types --test ccw/tests/issue-command.test.ts * feat(cli): Improve multi-line prompt guidance ## Solution Summary - Solution-ID: SOL-DSC-018-1 - Issue-ID: DSC-018 - Queue-Item: S-18 - Risk/Impact/Complexity: low/medium/low ## Tasks Completed - [T1] Update CLI help to emphasize --file option: Update ccw/src/commands/cli.ts - [T2] Add inline hint for multi-line detection: Update ccw/src/commands/cli.ts ## Files Modified - ccw/src/commands/cli.ts - ccw/tests/cli-command.test.ts ## Verification - npm run build - node --experimental-strip-types --test ccw/tests/cli-command.test.ts --------- Co-authored-by: catlog22 <catlog22@github.com>
668 lines
22 KiB
TypeScript
668 lines
22 KiB
TypeScript
import { glob } from 'glob';
|
|
import { readFileSync, existsSync } from 'fs';
|
|
import { join, basename } from 'path';
|
|
import { scanLiteTasks } from './lite-scanner.js';
|
|
import { createDashboardCache } from './cache-manager.js';
|
|
|
|
interface SessionData {
|
|
session_id: string;
|
|
project: string;
|
|
status: string;
|
|
type: string;
|
|
workflow_type: string | null;
|
|
created_at: string | null;
|
|
archived_at: string | null;
|
|
path: string;
|
|
tasks: TaskData[];
|
|
taskCount: number;
|
|
hasReview: boolean;
|
|
reviewSummary: ReviewSummary | null;
|
|
reviewDimensions: DimensionData[];
|
|
}
|
|
|
|
interface TaskData {
|
|
task_id: string;
|
|
title: string;
|
|
status: string;
|
|
type: string;
|
|
meta?: Record<string, unknown>;
|
|
context?: Record<string, unknown>;
|
|
flow_control?: Record<string, unknown>;
|
|
}
|
|
|
|
interface ReviewSummary {
|
|
phase: string;
|
|
severityDistribution: Record<string, number>;
|
|
criticalFiles: string[];
|
|
status: string;
|
|
}
|
|
|
|
interface DimensionData {
|
|
name: string;
|
|
findings: Finding[];
|
|
summary: unknown | null;
|
|
status: string;
|
|
}
|
|
|
|
interface Finding {
|
|
severity?: string;
|
|
[key: string]: unknown;
|
|
}
|
|
|
|
interface SessionInput {
|
|
session_id?: string;
|
|
id?: string;
|
|
project?: string;
|
|
description?: string;
|
|
status?: string;
|
|
type?: string;
|
|
workflow_type?: string | null;
|
|
created_at?: string | null; // For backward compatibility
|
|
created?: string; // From SessionMetadata
|
|
updated?: string; // From SessionMetadata
|
|
archived_at?: string | null;
|
|
path: string;
|
|
}
|
|
|
|
interface ScanSessionsResult {
|
|
active: SessionInput[];
|
|
archived: SessionInput[];
|
|
hasReviewData: boolean;
|
|
}
|
|
|
|
interface DashboardData {
|
|
generatedAt: string;
|
|
activeSessions: SessionData[];
|
|
archivedSessions: SessionData[];
|
|
liteTasks: {
|
|
litePlan: unknown[];
|
|
liteFix: unknown[];
|
|
};
|
|
reviewData: ReviewData | null;
|
|
projectOverview: ProjectOverview | null;
|
|
statistics: {
|
|
totalSessions: number;
|
|
activeSessions: number;
|
|
totalTasks: number;
|
|
completedTasks: number;
|
|
reviewFindings: number;
|
|
litePlanCount: number;
|
|
liteFixCount: number;
|
|
};
|
|
}
|
|
|
|
interface ReviewData {
|
|
totalFindings: number;
|
|
severityDistribution: {
|
|
critical: number;
|
|
high: number;
|
|
medium: number;
|
|
low: number;
|
|
};
|
|
dimensionSummary: Record<string, { count: number; sessions: string[] }>;
|
|
sessions: SessionReviewData[];
|
|
}
|
|
|
|
interface SessionReviewData {
|
|
session_id: string;
|
|
progress: unknown | null;
|
|
dimensions: DimensionData[];
|
|
findings: Array<Finding & { dimension: string }>;
|
|
}
|
|
|
|
interface ProjectGuidelines {
|
|
conventions: {
|
|
coding_style: string[];
|
|
naming_patterns: string[];
|
|
file_structure: string[];
|
|
documentation: string[];
|
|
};
|
|
constraints: {
|
|
architecture: string[];
|
|
tech_stack: string[];
|
|
performance: string[];
|
|
security: string[];
|
|
};
|
|
quality_rules: Array<{ rule: string; scope: string; enforced_by?: string }>;
|
|
learnings: Array<{
|
|
date: string;
|
|
session_id?: string;
|
|
insight: string;
|
|
context?: string;
|
|
category?: string;
|
|
}>;
|
|
_metadata?: {
|
|
created_at: string;
|
|
updated_at?: string;
|
|
version: string;
|
|
};
|
|
}
|
|
|
|
interface ProjectOverview {
|
|
projectName: string;
|
|
description: string;
|
|
initializedAt: string | null;
|
|
technologyStack: {
|
|
languages: string[];
|
|
frameworks: string[];
|
|
build_tools: string[];
|
|
test_frameworks: string[];
|
|
};
|
|
architecture: {
|
|
style: string;
|
|
layers: string[];
|
|
patterns: string[];
|
|
};
|
|
keyComponents: string[];
|
|
features: unknown[];
|
|
developmentIndex: {
|
|
feature: unknown[];
|
|
enhancement: unknown[];
|
|
bugfix: unknown[];
|
|
refactor: unknown[];
|
|
docs: unknown[];
|
|
};
|
|
statistics: {
|
|
total_features: number;
|
|
total_sessions: number;
|
|
last_updated: string | null;
|
|
};
|
|
metadata: {
|
|
initialized_by: string;
|
|
analysis_timestamp: string | null;
|
|
analysis_mode: string;
|
|
};
|
|
guidelines: ProjectGuidelines | null;
|
|
}
|
|
|
|
/**
|
|
* Aggregate all data for dashboard rendering (with caching)
|
|
* @param sessions - Scanned sessions from session-scanner
|
|
* @param workflowDir - Path to .workflow directory
|
|
* @returns Aggregated dashboard data
|
|
*/
|
|
export async function aggregateData(sessions: ScanSessionsResult, workflowDir: string): Promise<DashboardData> {
|
|
// Initialize cache manager
|
|
const cache = createDashboardCache(workflowDir);
|
|
|
|
// Prepare paths to watch for changes (includes both new dual files and legacy)
|
|
const watchPaths = [
|
|
join(workflowDir, 'active'),
|
|
join(workflowDir, 'archives'),
|
|
join(workflowDir, 'project-tech.json'),
|
|
join(workflowDir, 'project-guidelines.json'),
|
|
join(workflowDir, 'project.json'), // Legacy support
|
|
...sessions.active.map(s => s.path),
|
|
...sessions.archived.map(s => s.path)
|
|
];
|
|
|
|
// Check cache first
|
|
const cachedData = await cache.get(watchPaths);
|
|
if (cachedData !== null) {
|
|
console.log('Using cached dashboard data');
|
|
return cachedData;
|
|
}
|
|
|
|
console.log('Cache miss - regenerating dashboard data');
|
|
|
|
const data: DashboardData = {
|
|
generatedAt: new Date().toISOString(),
|
|
activeSessions: [],
|
|
archivedSessions: [],
|
|
liteTasks: {
|
|
litePlan: [],
|
|
liteFix: []
|
|
},
|
|
reviewData: null,
|
|
projectOverview: null,
|
|
statistics: {
|
|
totalSessions: 0,
|
|
activeSessions: 0,
|
|
totalTasks: 0,
|
|
completedTasks: 0,
|
|
reviewFindings: 0,
|
|
litePlanCount: 0,
|
|
liteFixCount: 0
|
|
}
|
|
};
|
|
|
|
// Process active sessions
|
|
for (const session of sessions.active) {
|
|
const sessionData = await processSession(session, true);
|
|
data.activeSessions.push(sessionData);
|
|
data.statistics.totalTasks += sessionData.tasks.length;
|
|
data.statistics.completedTasks += sessionData.tasks.filter(t => t.status === 'completed').length;
|
|
}
|
|
|
|
// Process archived sessions
|
|
for (const session of sessions.archived) {
|
|
const sessionData = await processSession(session, false);
|
|
data.archivedSessions.push(sessionData);
|
|
data.statistics.totalTasks += sessionData.taskCount || 0;
|
|
data.statistics.completedTasks += sessionData.taskCount || 0;
|
|
}
|
|
|
|
// Aggregate review data if present
|
|
if (sessions.hasReviewData) {
|
|
data.reviewData = await aggregateReviewData(sessions.active);
|
|
data.statistics.reviewFindings = data.reviewData.totalFindings;
|
|
}
|
|
|
|
data.statistics.totalSessions = sessions.active.length + sessions.archived.length;
|
|
data.statistics.activeSessions = sessions.active.length;
|
|
|
|
// Scan and include lite tasks
|
|
try {
|
|
const liteTasks = await scanLiteTasks(workflowDir);
|
|
data.liteTasks = liteTasks;
|
|
data.statistics.litePlanCount = liteTasks.litePlan.length;
|
|
data.statistics.liteFixCount = liteTasks.liteFix.length;
|
|
} catch (err) {
|
|
console.error('Error scanning lite tasks:', (err as Error).message);
|
|
}
|
|
|
|
// Load project overview from project.json
|
|
try {
|
|
data.projectOverview = loadProjectOverview(workflowDir);
|
|
} catch (err) {
|
|
console.error('Error loading project overview:', (err as Error).message);
|
|
}
|
|
|
|
// Store in cache before returning
|
|
await cache.set(data, watchPaths);
|
|
|
|
return data;
|
|
}
|
|
|
|
/**
|
|
* Process a single session, loading tasks and review info
|
|
* @param session - Session object from scanner
|
|
* @param isActive - Whether session is active
|
|
* @returns Processed session data
|
|
*/
|
|
async function processSession(session: SessionInput, isActive: boolean): Promise<SessionData> {
|
|
const result: SessionData = {
|
|
session_id: session.session_id || session.id || '',
|
|
project: session.project || session.description || session.session_id || session.id || '',
|
|
status: session.status || (isActive ? 'active' : 'archived'),
|
|
type: session.type || 'workflow', // Session type (workflow, review, test, docs)
|
|
workflow_type: session.workflow_type || null, // Original workflow_type for reference
|
|
created_at: session.created || session.created_at || null, // Prefer 'created' from SessionMetadata, fallback to 'created_at'
|
|
archived_at: session.archived_at || null, // Raw ISO string - let frontend format
|
|
path: session.path,
|
|
tasks: [],
|
|
taskCount: 0,
|
|
hasReview: false,
|
|
reviewSummary: null,
|
|
reviewDimensions: []
|
|
};
|
|
|
|
// Load tasks for active sessions (full details)
|
|
if (isActive) {
|
|
const taskDir = join(session.path, '.task');
|
|
if (existsSync(taskDir)) {
|
|
const taskFiles = await safeGlob('IMPL-*.json', taskDir);
|
|
for (const taskFile of taskFiles) {
|
|
try {
|
|
const taskData = JSON.parse(readFileSync(join(taskDir, taskFile), 'utf8')) as Record<string, unknown>;
|
|
result.tasks.push({
|
|
task_id: (taskData.id as string) || basename(taskFile, '.json'),
|
|
title: (taskData.title as string) || 'Untitled Task',
|
|
status: (taskData.status as string) || 'pending',
|
|
type: ((taskData.meta as Record<string, unknown>)?.type as string) || 'task',
|
|
meta: (taskData.meta as Record<string, unknown>) || {},
|
|
context: (taskData.context as Record<string, unknown>) || {},
|
|
flow_control: (taskData.flow_control as Record<string, unknown>) || {}
|
|
});
|
|
} catch {
|
|
// Skip invalid task files
|
|
}
|
|
}
|
|
// Sort tasks by ID
|
|
result.tasks.sort((a, b) => sortTaskIds(a.task_id, b.task_id));
|
|
}
|
|
result.taskCount = result.tasks.length;
|
|
|
|
// Check for review data
|
|
const reviewDir = join(session.path, '.review');
|
|
if (existsSync(reviewDir)) {
|
|
result.hasReview = true;
|
|
result.reviewSummary = loadReviewSummary(reviewDir);
|
|
// Load dimension data for review sessions
|
|
if (session.type === 'review') {
|
|
result.reviewDimensions = await loadDimensionData(reviewDir);
|
|
}
|
|
}
|
|
} else {
|
|
// For archived, also load tasks (same as active)
|
|
const taskDir = join(session.path, '.task');
|
|
if (existsSync(taskDir)) {
|
|
const taskFiles = await safeGlob('IMPL-*.json', taskDir);
|
|
for (const taskFile of taskFiles) {
|
|
try {
|
|
const taskData = JSON.parse(readFileSync(join(taskDir, taskFile), 'utf8')) as Record<string, unknown>;
|
|
result.tasks.push({
|
|
task_id: (taskData.id as string) || basename(taskFile, '.json'),
|
|
title: (taskData.title as string) || 'Untitled Task',
|
|
status: (taskData.status as string) || 'completed', // Archived tasks are usually completed
|
|
type: ((taskData.meta as Record<string, unknown>)?.type as string) || 'task'
|
|
});
|
|
} catch {
|
|
// Skip invalid task files
|
|
}
|
|
}
|
|
// Sort tasks by ID
|
|
result.tasks.sort((a, b) => sortTaskIds(a.task_id, b.task_id));
|
|
result.taskCount = result.tasks.length;
|
|
}
|
|
|
|
// Check for review data in archived sessions too
|
|
const reviewDir = join(session.path, '.review');
|
|
if (existsSync(reviewDir)) {
|
|
result.hasReview = true;
|
|
result.reviewSummary = loadReviewSummary(reviewDir);
|
|
// Load dimension data for review sessions
|
|
if (session.type === 'review') {
|
|
result.reviewDimensions = await loadDimensionData(reviewDir);
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
/**
|
|
* Aggregate review data from all active sessions with reviews
|
|
* @param activeSessions - Active session objects
|
|
* @returns Aggregated review data
|
|
*/
|
|
async function aggregateReviewData(activeSessions: SessionInput[]): Promise<ReviewData> {
|
|
const reviewData: ReviewData = {
|
|
totalFindings: 0,
|
|
severityDistribution: { critical: 0, high: 0, medium: 0, low: 0 },
|
|
dimensionSummary: {},
|
|
sessions: []
|
|
};
|
|
|
|
for (const session of activeSessions) {
|
|
const reviewDir = join(session.path, '.review');
|
|
if (!existsSync(reviewDir)) continue;
|
|
|
|
const reviewProgress = loadReviewProgress(reviewDir);
|
|
const dimensionData = await loadDimensionData(reviewDir);
|
|
|
|
if (reviewProgress || dimensionData.length > 0) {
|
|
const sessionReview: SessionReviewData = {
|
|
session_id: session.session_id || session.id || '',
|
|
progress: reviewProgress,
|
|
dimensions: dimensionData,
|
|
findings: []
|
|
};
|
|
|
|
// Collect and count findings
|
|
for (const dim of dimensionData) {
|
|
if (dim.findings && Array.isArray(dim.findings)) {
|
|
for (const finding of dim.findings) {
|
|
const severity = (finding.severity || 'low').toLowerCase();
|
|
if (reviewData.severityDistribution.hasOwnProperty(severity)) {
|
|
reviewData.severityDistribution[severity as keyof typeof reviewData.severityDistribution]++;
|
|
}
|
|
reviewData.totalFindings++;
|
|
sessionReview.findings.push({
|
|
...finding,
|
|
dimension: dim.name
|
|
});
|
|
}
|
|
}
|
|
|
|
// Track dimension summary
|
|
if (!reviewData.dimensionSummary[dim.name]) {
|
|
reviewData.dimensionSummary[dim.name] = { count: 0, sessions: [] };
|
|
}
|
|
reviewData.dimensionSummary[dim.name].count += dim.findings?.length || 0;
|
|
reviewData.dimensionSummary[dim.name].sessions.push(session.session_id || session.id || '');
|
|
}
|
|
|
|
reviewData.sessions.push(sessionReview);
|
|
}
|
|
}
|
|
|
|
return reviewData;
|
|
}
|
|
|
|
/**
|
|
* Load review progress from review-progress.json
|
|
* @param reviewDir - Path to .review directory
|
|
* @returns Review progress data or null
|
|
*/
|
|
function loadReviewProgress(reviewDir: string): unknown | null {
|
|
const progressFile = join(reviewDir, 'review-progress.json');
|
|
if (!existsSync(progressFile)) return null;
|
|
try {
|
|
return JSON.parse(readFileSync(progressFile, 'utf8'));
|
|
} catch {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Load review summary from review-state.json
|
|
* @param reviewDir - Path to .review directory
|
|
* @returns Review summary or null
|
|
*/
|
|
function loadReviewSummary(reviewDir: string): ReviewSummary | null {
|
|
const stateFile = join(reviewDir, 'review-state.json');
|
|
if (!existsSync(stateFile)) return null;
|
|
try {
|
|
const state = JSON.parse(readFileSync(stateFile, 'utf8')) as Record<string, unknown>;
|
|
return {
|
|
phase: (state.phase as string) || 'unknown',
|
|
severityDistribution: (state.severity_distribution as Record<string, number>) || {},
|
|
criticalFiles: ((state.critical_files as string[]) || []).slice(0, 3),
|
|
status: (state.status as string) || 'in_progress'
|
|
};
|
|
} catch {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Load dimension data from .review/dimensions/
|
|
* @param reviewDir - Path to .review directory
|
|
* @returns Array of dimension data
|
|
*/
|
|
async function loadDimensionData(reviewDir: string): Promise<DimensionData[]> {
|
|
const dimensionsDir = join(reviewDir, 'dimensions');
|
|
if (!existsSync(dimensionsDir)) return [];
|
|
|
|
const dimensions: DimensionData[] = [];
|
|
const dimFiles = await safeGlob('*.json', dimensionsDir);
|
|
|
|
for (const file of dimFiles) {
|
|
try {
|
|
const data = JSON.parse(readFileSync(join(dimensionsDir, file), 'utf8'));
|
|
// Handle array structure: [ { findings: [...], summary: {...} } ]
|
|
let findings: Finding[] = [];
|
|
let summary: unknown | null = null;
|
|
let status = 'completed';
|
|
|
|
if (Array.isArray(data) && data.length > 0) {
|
|
const dimData = data[0] as Record<string, unknown>;
|
|
findings = (dimData.findings as Finding[]) || [];
|
|
summary = dimData.summary || null;
|
|
status = (dimData.status as string) || 'completed';
|
|
} else if ((data as Record<string, unknown>).findings) {
|
|
const dataObj = data as Record<string, unknown>;
|
|
findings = (dataObj.findings as Finding[]) || [];
|
|
summary = dataObj.summary || null;
|
|
status = (dataObj.status as string) || 'completed';
|
|
}
|
|
|
|
dimensions.push({
|
|
name: basename(file, '.json'),
|
|
findings: findings,
|
|
summary: summary,
|
|
status: status
|
|
});
|
|
} catch {
|
|
// Skip invalid dimension files
|
|
}
|
|
}
|
|
|
|
return dimensions;
|
|
}
|
|
|
|
/**
|
|
* Safe glob wrapper that returns empty array on error
|
|
* @param pattern - Glob pattern
|
|
* @param cwd - Current working directory
|
|
* @returns Array of matching file names
|
|
*/
|
|
async function safeGlob(pattern: string, cwd: string): Promise<string[]> {
|
|
try {
|
|
return await glob(pattern, { cwd, absolute: false });
|
|
} catch {
|
|
return [];
|
|
}
|
|
}
|
|
|
|
// formatDate removed - dates are now passed as raw ISO strings
|
|
// Frontend (dashboard.js) handles all date formatting
|
|
|
|
/**
|
|
* Sort task IDs numerically (IMPL-1, IMPL-2, IMPL-1.1, etc.)
|
|
* @param a - First task ID
|
|
* @param b - Second task ID
|
|
* @returns Comparison result
|
|
*/
|
|
function sortTaskIds(a: string, b: string): number {
|
|
const parseId = (id: string): [number, number] => {
|
|
const match = id.match(/IMPL-(\d+)(?:\.(\d+))?/);
|
|
if (!match) return [0, 0];
|
|
return [parseInt(match[1]), parseInt(match[2] || '0')];
|
|
};
|
|
const [a1, a2] = parseId(a);
|
|
const [b1, b2] = parseId(b);
|
|
return a1 - b1 || a2 - b2;
|
|
}
|
|
|
|
/**
|
|
* Load project overview from project-tech.json and project-guidelines.json
|
|
* Supports dual file structure with backward compatibility for legacy project.json
|
|
* @param workflowDir - Path to .workflow directory
|
|
* @returns Project overview data or null if not found
|
|
*/
|
|
function loadProjectOverview(workflowDir: string): ProjectOverview | null {
|
|
const techFile = join(workflowDir, 'project-tech.json');
|
|
const guidelinesFile = join(workflowDir, 'project-guidelines.json');
|
|
const legacyFile = join(workflowDir, 'project.json');
|
|
|
|
// Check for new dual file structure first, fallback to legacy
|
|
const useLegacy = !existsSync(techFile) && existsSync(legacyFile);
|
|
const projectFile = useLegacy ? legacyFile : techFile;
|
|
|
|
if (!existsSync(projectFile)) {
|
|
console.log(`Project file not found at: ${projectFile}`);
|
|
return null;
|
|
}
|
|
|
|
try {
|
|
const fileContent = readFileSync(projectFile, 'utf8');
|
|
const projectData = JSON.parse(fileContent) as Record<string, unknown>;
|
|
|
|
console.log(`Successfully loaded project overview: ${projectData.project_name || 'Unknown'} (${useLegacy ? 'legacy' : 'tech'})`);
|
|
|
|
// Parse tech data (compatible with both legacy and new structure)
|
|
const overview = projectData.overview as Record<string, unknown> | undefined;
|
|
const technologyAnalysis = projectData.technology_analysis as Record<string, unknown> | undefined;
|
|
const developmentStatus = projectData.development_status as Record<string, unknown> | undefined;
|
|
|
|
// Support both old and new schema field names
|
|
const technologyStack = (overview?.technology_stack || technologyAnalysis?.technology_stack) as Record<string, unknown[]> | undefined;
|
|
const architecture = (overview?.architecture || technologyAnalysis?.architecture) as Record<string, unknown> | undefined;
|
|
const developmentIndex = (projectData.development_index || developmentStatus?.development_index) as Record<string, unknown[]> | undefined;
|
|
const statistics = (projectData.statistics || developmentStatus?.statistics) as Record<string, unknown> | undefined;
|
|
const metadata = projectData._metadata as Record<string, unknown> | undefined;
|
|
|
|
// Load guidelines from separate file if exists
|
|
let guidelines: ProjectGuidelines | null = null;
|
|
if (existsSync(guidelinesFile)) {
|
|
try {
|
|
const guidelinesContent = readFileSync(guidelinesFile, 'utf8');
|
|
const guidelinesData = JSON.parse(guidelinesContent) as Record<string, unknown>;
|
|
|
|
const conventions = guidelinesData.conventions as Record<string, string[]> | undefined;
|
|
const constraints = guidelinesData.constraints as Record<string, string[]> | undefined;
|
|
|
|
guidelines = {
|
|
conventions: {
|
|
coding_style: conventions?.coding_style || [],
|
|
naming_patterns: conventions?.naming_patterns || [],
|
|
file_structure: conventions?.file_structure || [],
|
|
documentation: conventions?.documentation || []
|
|
},
|
|
constraints: {
|
|
architecture: constraints?.architecture || [],
|
|
tech_stack: constraints?.tech_stack || [],
|
|
performance: constraints?.performance || [],
|
|
security: constraints?.security || []
|
|
},
|
|
quality_rules: (guidelinesData.quality_rules as Array<{ rule: string; scope: string; enforced_by?: string }>) || [],
|
|
learnings: (guidelinesData.learnings as Array<{
|
|
date: string;
|
|
session_id?: string;
|
|
insight: string;
|
|
context?: string;
|
|
category?: string;
|
|
}>) || [],
|
|
_metadata: guidelinesData._metadata as ProjectGuidelines['_metadata'] | undefined
|
|
};
|
|
console.log(`Successfully loaded project guidelines`);
|
|
} catch (guidelinesErr) {
|
|
console.error(`Failed to parse project-guidelines.json:`, (guidelinesErr as Error).message);
|
|
}
|
|
}
|
|
|
|
return {
|
|
projectName: (projectData.project_name as string) || 'Unknown',
|
|
description: (overview?.description as string) || '',
|
|
initializedAt: (projectData.initialized_at as string) || null,
|
|
technologyStack: {
|
|
languages: (technologyStack?.languages as string[]) || [],
|
|
frameworks: (technologyStack?.frameworks as string[]) || [],
|
|
build_tools: (technologyStack?.build_tools as string[]) || [],
|
|
test_frameworks: (technologyStack?.test_frameworks as string[]) || []
|
|
},
|
|
architecture: {
|
|
style: (architecture?.style as string) || 'Unknown',
|
|
layers: (architecture?.layers as string[]) || [],
|
|
patterns: (architecture?.patterns as string[]) || []
|
|
},
|
|
keyComponents: (overview?.key_components as string[]) || [],
|
|
features: (projectData.features as unknown[]) || [],
|
|
developmentIndex: {
|
|
feature: (developmentIndex?.feature as unknown[]) || [],
|
|
enhancement: (developmentIndex?.enhancement as unknown[]) || [],
|
|
bugfix: (developmentIndex?.bugfix as unknown[]) || [],
|
|
refactor: (developmentIndex?.refactor as unknown[]) || [],
|
|
docs: (developmentIndex?.docs as unknown[]) || []
|
|
},
|
|
statistics: {
|
|
total_features: (statistics?.total_features as number) || 0,
|
|
total_sessions: (statistics?.total_sessions as number) || 0,
|
|
last_updated: (statistics?.last_updated as string) || null
|
|
},
|
|
metadata: {
|
|
initialized_by: (metadata?.initialized_by as string) || 'unknown',
|
|
analysis_timestamp: (metadata?.analysis_timestamp as string) || null,
|
|
analysis_mode: (metadata?.analysis_mode as string) || 'unknown'
|
|
},
|
|
guidelines
|
|
};
|
|
} catch (err) {
|
|
console.error(`Failed to parse project file at ${projectFile}:`, (err as Error).message);
|
|
console.error('Error stack:', (err as Error).stack);
|
|
return null;
|
|
}
|
|
}
|