feat: add Discuss and Explore subagents for dynamic critique and code exploration

- Implement Discuss Subagent for multi-perspective critique with dynamic perspectives.
- Create Explore Subagent for shared codebase exploration with centralized caching.
- Add tests for CcwToolsMcpCard component to ensure enabled tools are preserved on config save.
- Introduce SessionPreviewPanel component for previewing and selecting sessions for Memory V2 extraction.
- Develop CommandCreateDialog component for creating/importing commands with import and CLI generate modes.
This commit is contained in:
catlog22
2026-02-27 17:25:52 +08:00
parent 3db74cc7b0
commit 3b92bfae8c
45 changed files with 6508 additions and 128 deletions

View File

@@ -252,6 +252,11 @@ export function run(argv: string[]): void {
.option('--batch-size <n>', 'Batch size for embedding', '8')
.option('--top-k <n>', 'Number of semantic search results', '10')
.option('--min-score <f>', 'Minimum similarity score for semantic search', '0.5')
// Pipeline V2 options
.option('--include-native', 'Include native sessions (preview)')
.option('--path <path>', 'Project path (pipeline commands)')
.option('--max-sessions <n>', 'Max sessions to extract (extract)')
.option('--session-ids <ids>', 'Comma-separated session IDs (extract)')
.action((subcommand, args, options) => memoryCommand(subcommand, args, options));
// Core Memory command

View File

@@ -20,6 +20,9 @@ import {
} from '../core/memory-embedder-bridge.js';
import { getCoreMemoryStore } from '../core/core-memory-store.js';
import { CliHistoryStore } from '../tools/cli-history-store.js';
import { MemoryExtractionPipeline, type PreviewResult, type SessionPreviewItem } from '../core/memory-extraction-pipeline.js';
import { MemoryConsolidationPipeline } from '../core/memory-consolidation-pipeline.js';
import { MemoryJobScheduler } from '../core/memory-job-scheduler.js';
interface TrackOptions {
type?: string;
@@ -74,6 +77,28 @@ interface EmbedStatusOptions {
json?: boolean;
}
// Memory Pipeline V2 subcommand options
interface PipelinePreviewOptions {
includeNative?: boolean;
path?: string;
json?: boolean;
}
interface PipelineExtractOptions {
maxSessions?: string;
sessionIds?: string;
path?: string;
}
interface PipelineConsolidateOptions {
path?: string;
}
interface PipelineStatusOptions {
path?: string;
json?: boolean;
}
/**
* Read JSON data from stdin (for Claude Code hooks)
*/
@@ -967,9 +992,388 @@ async function embedStatusAction(options: EmbedStatusOptions): Promise<void> {
}
}
// ============================================================
// Memory Pipeline V2 Subcommands
// ============================================================
/**
* Preview eligible sessions for extraction
*/
async function pipelinePreviewAction(options: PipelinePreviewOptions): Promise<void> {
const { includeNative, path: projectPath, json } = options;
const basePath = projectPath || process.cwd();
try {
const pipeline = new MemoryExtractionPipeline(basePath);
const preview = pipeline.previewEligibleSessions({
includeNative: includeNative || false,
});
if (json) {
console.log(JSON.stringify(preview, null, 2));
return;
}
console.log(chalk.bold.cyan('\n Extraction Queue Preview\n'));
console.log(chalk.gray(` Project: ${basePath}`));
console.log(chalk.gray(` Include Native: ${includeNative ? 'Yes' : 'No'}\n`));
// Summary
const { summary } = preview;
console.log(chalk.bold.white(' Summary:'));
console.log(chalk.white(` Total Sessions: ${summary.total}`));
console.log(chalk.white(` Eligible: ${summary.eligible}`));
console.log(chalk.white(` Already Extracted: ${summary.alreadyExtracted}`));
console.log(chalk.green(` Ready for Extraction: ${summary.readyForExtraction}`));
if (preview.sessions.length === 0) {
console.log(chalk.yellow('\n No eligible sessions found.\n'));
return;
}
// Sessions table
console.log(chalk.bold.white('\n Sessions:\n'));
console.log(chalk.gray(' ID Source Tool Turns Bytes Status'));
console.log(chalk.gray(' ' + '-'.repeat(76)));
for (const session of preview.sessions) {
const id = session.sessionId.padEnd(20);
const source = session.source.padEnd(11);
const tool = (session.tool || '-').padEnd(11);
const turns = String(session.turns).padStart(5);
const bytes = String(session.bytes).padStart(9);
const status = session.extracted
? chalk.green('extracted')
: session.eligible
? chalk.cyan('ready')
: chalk.gray('skipped');
console.log(` ${chalk.dim(id)} ${source} ${tool} ${turns} ${bytes} ${status}`);
}
console.log(chalk.gray('\n ' + '-'.repeat(76)));
console.log(chalk.gray(` Showing ${preview.sessions.length} sessions\n`));
} catch (error) {
if (json) {
console.log(JSON.stringify({ error: (error as Error).message }, null, 2));
} else {
console.error(chalk.red(`\n Error: ${(error as Error).message}\n`));
}
process.exit(1);
}
}
/**
* Trigger extraction for sessions
*/
async function pipelineExtractAction(options: PipelineExtractOptions): Promise<void> {
const { maxSessions, sessionIds, path: projectPath } = options;
const basePath = projectPath || process.cwd();
try {
const store = getCoreMemoryStore(basePath);
const scheduler = new MemoryJobScheduler(store.getDb());
const pipeline = new MemoryExtractionPipeline(basePath);
// Selective extraction with specific session IDs
if (sessionIds) {
const ids = sessionIds.split(',').map(id => id.trim()).filter(Boolean);
if (ids.length === 0) {
console.error(chalk.red('Error: No valid session IDs provided'));
process.exit(1);
}
console.log(chalk.bold.cyan('\n Selective Extraction\n'));
console.log(chalk.gray(` Project: ${basePath}`));
console.log(chalk.gray(` Session IDs: ${ids.join(', ')}\n`));
// Validate sessions
const preview = pipeline.previewEligibleSessions({ includeNative: false });
const validSessionIds = new Set(preview.sessions.map(s => s.sessionId));
const queued: string[] = [];
const skipped: string[] = [];
const invalid: string[] = [];
for (const sessionId of ids) {
if (!validSessionIds.has(sessionId)) {
invalid.push(sessionId);
continue;
}
// Check if already extracted
const existingOutput = store.getStage1Output(sessionId);
if (existingOutput) {
skipped.push(sessionId);
continue;
}
// Enqueue job
scheduler.enqueueJob('phase1_extraction', sessionId, Math.floor(Date.now() / 1000));
queued.push(sessionId);
}
console.log(chalk.green(` Queued: ${queued.length} sessions`));
console.log(chalk.yellow(` Skipped (already extracted): ${skipped.length}`));
if (invalid.length > 0) {
console.log(chalk.red(` Invalid: ${invalid.length}`));
console.log(chalk.gray(` ${invalid.join(', ')}`));
}
// Process queued sessions
if (queued.length > 0) {
console.log(chalk.cyan('\n Processing extraction jobs...\n'));
let succeeded = 0;
let failed = 0;
for (const sessionId of queued) {
try {
await pipeline.runExtractionJob(sessionId);
succeeded++;
console.log(chalk.green(` [OK] ${sessionId}`));
} catch (err) {
failed++;
console.log(chalk.red(` [FAIL] ${sessionId}: ${(err as Error).message}`));
}
}
console.log(chalk.bold.white(`\n Completed: ${succeeded} succeeded, ${failed} failed\n`));
} else {
console.log();
}
return;
}
// Batch extraction
const max = maxSessions ? parseInt(maxSessions, 10) : 10;
console.log(chalk.bold.cyan('\n Batch Extraction\n'));
console.log(chalk.gray(` Project: ${basePath}`));
console.log(chalk.gray(` Max Sessions: ${max}\n`));
// Get eligible sessions
const eligible = pipeline.scanEligibleSessions(max);
const preview = pipeline.previewEligibleSessions({ maxSessions: max });
console.log(chalk.white(` Found ${eligible.length} eligible sessions`));
console.log(chalk.white(` Ready for extraction: ${preview.summary.readyForExtraction}\n`));
if (eligible.length === 0) {
console.log(chalk.yellow(' No eligible sessions to extract.\n'));
return;
}
// Queue jobs
const jobId = `batch-${Date.now()}`;
const queued: string[] = [];
for (const session of eligible) {
const existingOutput = store.getStage1Output(session.id);
if (!existingOutput) {
const watermark = Math.floor(new Date(session.updated_at).getTime() / 1000);
scheduler.enqueueJob('phase1_extraction', session.id, watermark);
queued.push(session.id);
}
}
console.log(chalk.cyan(` Job ID: ${jobId}`));
console.log(chalk.cyan(` Queued: ${queued.length} sessions\n`));
// Process queued sessions
if (queued.length > 0) {
console.log(chalk.cyan(' Processing extraction jobs...\n'));
let succeeded = 0;
let failed = 0;
for (const sessionId of queued) {
try {
await pipeline.runExtractionJob(sessionId);
succeeded++;
console.log(chalk.green(` [OK] ${sessionId}`));
} catch (err) {
failed++;
console.log(chalk.red(` [FAIL] ${sessionId}: ${(err as Error).message}`));
}
}
console.log(chalk.bold.white(`\n Completed: ${succeeded} succeeded, ${failed} failed\n`));
} else {
console.log(chalk.yellow(' No new sessions to extract.\n'));
}
} catch (error) {
console.error(chalk.red(`\n Error: ${(error as Error).message}\n`));
process.exit(1);
}
}
/**
* Trigger consolidation pipeline
*/
async function pipelineConsolidateAction(options: PipelineConsolidateOptions): Promise<void> {
const { path: projectPath } = options;
const basePath = projectPath || process.cwd();
try {
const pipeline = new MemoryConsolidationPipeline(basePath);
console.log(chalk.bold.cyan('\n Memory Consolidation\n'));
console.log(chalk.gray(` Project: ${basePath}\n`));
// Get current status
const status = pipeline.getStatus();
if (status) {
console.log(chalk.white(` Current Status: ${status.status}`));
}
console.log(chalk.cyan('\n Triggering consolidation...\n'));
// Run consolidation
await pipeline.runConsolidation();
console.log(chalk.green(' Consolidation completed successfully.\n'));
// Show result
const memoryMd = pipeline.getMemoryMdContent();
if (memoryMd) {
console.log(chalk.white(' Memory.md Preview:'));
console.log(chalk.gray(' ' + '-'.repeat(60)));
const preview = memoryMd.substring(0, 500);
console.log(chalk.dim(preview.split('\n').map(line => ' ' + line).join('\n')));
if (memoryMd.length > 500) {
console.log(chalk.gray(' ...'));
}
console.log(chalk.gray(' ' + '-'.repeat(60)));
console.log(chalk.gray(` (${memoryMd.length} bytes total)\n`));
}
} catch (error) {
console.error(chalk.red(`\n Error: ${(error as Error).message}\n`));
process.exit(1);
}
}
/**
* Show pipeline status
*/
async function pipelineStatusAction(options: PipelineStatusOptions): Promise<void> {
const { path: projectPath, json } = options;
const basePath = projectPath || process.cwd();
try {
const store = getCoreMemoryStore(basePath);
const scheduler = new MemoryJobScheduler(store.getDb());
// Extraction status
const stage1Count = store.countStage1Outputs();
const extractionJobs = scheduler.listJobs('phase1_extraction');
// Consolidation status
let consolidationStatus = 'unavailable';
let memoryMdAvailable = false;
try {
const consolidationPipeline = new MemoryConsolidationPipeline(basePath);
const status = consolidationPipeline.getStatus();
consolidationStatus = status?.status || 'unknown';
memoryMdAvailable = !!consolidationPipeline.getMemoryMdContent();
} catch {
// Consolidation pipeline may not be initialized
}
// Job counts by status
const jobCounts: Record<string, number> = {};
for (const job of extractionJobs) {
jobCounts[job.status] = (jobCounts[job.status] || 0) + 1;
}
const result = {
extraction: {
stage1Count,
totalJobs: extractionJobs.length,
jobCounts,
recentJobs: extractionJobs.slice(0, 10).map(j => ({
job_key: j.job_key,
status: j.status,
started_at: j.started_at,
finished_at: j.finished_at,
last_error: j.last_error,
})),
},
consolidation: {
status: consolidationStatus,
memoryMdAvailable,
},
};
if (json) {
console.log(JSON.stringify(result, null, 2));
return;
}
console.log(chalk.bold.cyan('\n Memory Pipeline Status\n'));
console.log(chalk.gray(` Project: ${basePath}\n`));
// Extraction status
console.log(chalk.bold.white(' Extraction Pipeline:'));
console.log(chalk.white(` Stage 1 Outputs: ${stage1Count}`));
console.log(chalk.white(` Total Jobs: ${extractionJobs.length}`));
if (Object.keys(jobCounts).length > 0) {
console.log(chalk.white(' Job Status:'));
for (const [status, count] of Object.entries(jobCounts)) {
const statusColor = status === 'completed' ? chalk.green :
status === 'running' ? chalk.yellow : chalk.gray;
console.log(` ${statusColor(status)}: ${count}`);
}
}
// Consolidation status
console.log(chalk.bold.white('\n Consolidation Pipeline:'));
console.log(chalk.white(` Status: ${consolidationStatus}`));
console.log(chalk.white(` Memory.md Available: ${memoryMdAvailable ? 'Yes' : 'No'}`));
// Recent jobs
if (extractionJobs.length > 0) {
console.log(chalk.bold.white('\n Recent Extraction Jobs:\n'));
console.log(chalk.gray(' Status Job Key'));
console.log(chalk.gray(' ' + '-'.repeat(60)));
for (const job of extractionJobs.slice(0, 10)) {
const statusIcon = job.status === 'done' ? chalk.green('done ') :
job.status === 'running' ? chalk.yellow('running ') :
job.status === 'pending' ? chalk.gray('pending ') :
chalk.red('error ');
console.log(` ${statusIcon} ${chalk.dim(job.job_key)}`);
}
if (extractionJobs.length > 10) {
console.log(chalk.gray(` ... and ${extractionJobs.length - 10} more`));
}
}
console.log();
} catch (error) {
if (json) {
console.log(JSON.stringify({ error: (error as Error).message }, null, 2));
} else {
console.error(chalk.red(`\n Error: ${(error as Error).message}\n`));
}
process.exit(1);
}
}
/**
* Memory command entry point
* @param {string} subcommand - Subcommand (track, import, stats, search, suggest, prune, embed, embed-status)
* @param {string} subcommand - Subcommand (track, import, stats, search, suggest, prune, embed, embed-status, preview, extract, consolidate, status)
* @param {string|string[]} args - Arguments array
* @param {Object} options - CLI options
*/
@@ -1018,6 +1422,23 @@ export async function memoryCommand(
await embedStatusAction(options as EmbedStatusOptions);
break;
// Memory Pipeline V2 subcommands
case 'preview':
await pipelinePreviewAction(options as PipelinePreviewOptions);
break;
case 'extract':
await pipelineExtractAction(options as PipelineExtractOptions);
break;
case 'consolidate':
await pipelineConsolidateAction(options as PipelineConsolidateOptions);
break;
case 'status':
await pipelineStatusAction(options as PipelineStatusOptions);
break;
default:
console.log(chalk.bold.cyan('\n CCW Memory Module\n'));
console.log(' Context tracking and prompt optimization.\n');
@@ -1031,6 +1452,12 @@ export async function memoryCommand(
console.log(chalk.gray(' embed Generate embeddings for semantic search'));
console.log(chalk.gray(' embed-status Show embedding generation status'));
console.log();
console.log(chalk.bold.cyan(' Memory Pipeline V2:'));
console.log(chalk.gray(' preview Preview eligible sessions for extraction'));
console.log(chalk.gray(' extract Trigger extraction for sessions'));
console.log(chalk.gray(' consolidate Trigger consolidation pipeline'));
console.log(chalk.gray(' status Show pipeline status'));
console.log();
console.log(' Track Options:');
console.log(chalk.gray(' --type <type> Entity type: file, module, topic'));
console.log(chalk.gray(' --action <action> Action: read, write, mention'));
@@ -1074,6 +1501,25 @@ export async function memoryCommand(
console.log(chalk.gray(' --older-than <age> Age threshold (default: 30d)'));
console.log(chalk.gray(' --dry-run Preview without deleting'));
console.log();
console.log(chalk.bold.cyan(' Pipeline V2 Options:'));
console.log();
console.log(' Preview Options:');
console.log(chalk.gray(' --include-native Include native sessions in preview'));
console.log(chalk.gray(' --path <path> Project path (default: current directory)'));
console.log(chalk.gray(' --json Output as JSON'));
console.log();
console.log(' Extract Options:');
console.log(chalk.gray(' --max-sessions <n> Max sessions to extract (default: 10)'));
console.log(chalk.gray(' --session-ids <ids> Comma-separated session IDs for selective extraction'));
console.log(chalk.gray(' --path <path> Project path (default: current directory)'));
console.log();
console.log(' Consolidate Options:');
console.log(chalk.gray(' --path <path> Project path (default: current directory)'));
console.log();
console.log(' Pipeline Status Options:');
console.log(chalk.gray(' --path <path> Project path (default: current directory)'));
console.log(chalk.gray(' --json Output as JSON'));
console.log();
console.log(' Examples:');
console.log(chalk.gray(' ccw memory track --type file --action read --value "src/auth.ts"'));
console.log(chalk.gray(' ccw memory import --source history --project "my-app"'));
@@ -1086,5 +1532,13 @@ export async function memoryCommand(
console.log(chalk.gray(' ccw memory suggest --context "implementing JWT auth"'));
console.log(chalk.gray(' ccw memory prune --older-than 60d --dry-run'));
console.log();
console.log(chalk.cyan(' Pipeline V2 Examples:'));
console.log(chalk.gray(' ccw memory preview # Preview extraction queue'));
console.log(chalk.gray(' ccw memory preview --include-native # Include native sessions'));
console.log(chalk.gray(' ccw memory extract --max-sessions 10 # Batch extract up to 10'));
console.log(chalk.gray(' ccw memory extract --session-ids sess-1,sess-2 # Selective extraction'));
console.log(chalk.gray(' ccw memory consolidate # Run consolidation'));
console.log(chalk.gray(' ccw memory status # Check pipeline status'));
console.log();
}
}

View File

@@ -3,12 +3,12 @@
* Delegates to team-msg.ts handler for JSONL-based persistent messaging
*
* Commands:
* ccw team log --team <name> --from <role> --to <role> --type <type> --summary "..."
* ccw team read --team <name> --id <MSG-NNN>
* ccw team list --team <name> [--from <role>] [--to <role>] [--type <type>] [--last <n>]
* ccw team status --team <name>
* ccw team delete --team <name> --id <MSG-NNN>
* ccw team clear --team <name>
* ccw team log --team <session-id> --from <role> --to <role> --type <type> --summary "..."
* ccw team read --team <session-id> --id <MSG-NNN>
* ccw team list --team <session-id> [--from <role>] [--to <role>] [--type <type>] [--last <n>]
* ccw team status --team <session-id>
* ccw team delete --team <session-id> --id <MSG-NNN>
* ccw team clear --team <session-id>
*/
import chalk from 'chalk';
@@ -145,7 +145,7 @@ function printHelp(): void {
console.log(chalk.gray(' clear Clear all messages for a team'));
console.log();
console.log(' Required:');
console.log(chalk.gray(' --team <name> Team name'));
console.log(chalk.gray(' --team <session-id> Session ID (e.g., TLS-my-project-2026-02-27), NOT team name'));
console.log();
console.log(' Log Options:');
console.log(chalk.gray(' --from <role> Sender role name'));
@@ -168,12 +168,12 @@ function printHelp(): void {
console.log(chalk.gray(' --json Output as JSON'));
console.log();
console.log(' Examples:');
console.log(chalk.gray(' ccw team log --team my-team --from executor --to coordinator --type impl_complete --summary "Task done"'));
console.log(chalk.gray(' ccw team list --team my-team --last 5'));
console.log(chalk.gray(' ccw team read --team my-team --id MSG-003'));
console.log(chalk.gray(' ccw team status --team my-team'));
console.log(chalk.gray(' ccw team delete --team my-team --id MSG-003'));
console.log(chalk.gray(' ccw team clear --team my-team'));
console.log(chalk.gray(' ccw team log --team my-team --from planner --to coordinator --type plan_ready --summary "Plan ready" --json'));
console.log(chalk.gray(' ccw team log --team TLS-my-project-2026-02-27 --from executor --to coordinator --type impl_complete --summary "Task done"'));
console.log(chalk.gray(' ccw team list --team TLS-my-project-2026-02-27 --last 5'));
console.log(chalk.gray(' ccw team read --team TLS-my-project-2026-02-27 --id MSG-003'));
console.log(chalk.gray(' ccw team status --team TLS-my-project-2026-02-27'));
console.log(chalk.gray(' ccw team delete --team TLS-my-project-2026-02-27 --id MSG-003'));
console.log(chalk.gray(' ccw team clear --team TLS-my-project-2026-02-27'));
console.log(chalk.gray(' ccw team log --team TLS-my-project-2026-02-27 --from planner --to coordinator --type plan_ready --summary "Plan ready" --json'));
console.log();
}

View File

@@ -28,6 +28,8 @@ import {
} from './memory-v2-config.js';
import { EXTRACTION_SYSTEM_PROMPT, buildExtractionUserPrompt } from './memory-extraction-prompts.js';
import { redactSecrets } from '../utils/secret-redactor.js';
import { getNativeSessions, type NativeSession } from '../tools/native-session-discovery.js';
import { existsSync, readFileSync, statSync } from 'fs';
// -- Types --
@@ -58,6 +60,27 @@ export interface BatchExtractionResult {
errors: Array<{ sessionId: string; error: string }>;
}
export interface SessionPreviewItem {
sessionId: string;
source: 'ccw' | 'native';
tool: string;
timestamp: number;
eligible: boolean;
extracted: boolean;
bytes: number;
turns: number;
}
export interface PreviewResult {
sessions: SessionPreviewItem[];
summary: {
total: number;
eligible: number;
alreadyExtracted: number;
readyForExtraction: number;
};
}
// -- Turn type bitmask constants --
/** All turn types included */
@@ -77,6 +100,15 @@ const TRUNCATION_MARKER = '\n\n[... CONTENT TRUNCATED ...]\n\n';
const JOB_KIND_EXTRACTION = 'phase1_extraction';
// -- Authorization error for session access --
export class SessionAccessDeniedError extends Error {
constructor(sessionId: string, projectPath: string) {
super(`Session '${sessionId}' does not belong to project '${projectPath}'`);
this.name = 'SessionAccessDeniedError';
}
}
// -- Pipeline --
export class MemoryExtractionPipeline {
@@ -92,6 +124,58 @@ export class MemoryExtractionPipeline {
this.currentSessionId = options?.currentSessionId;
}
// ========================================================================
// Authorization
// ========================================================================
/**
* Verify that a session belongs to the current project path.
*
* This is a security-critical authorization check to prevent cross-project
* session access. Sessions are scoped to projects, and accessing a session
* from another project should be denied.
*
* @param sessionId - The session ID to verify
* @returns true if the session belongs to this project, false otherwise
*/
verifySessionBelongsToProject(sessionId: string): boolean {
const historyStore = getHistoryStore(this.projectPath);
const session = historyStore.getConversation(sessionId);
// If session exists in this project's history store, it's authorized
if (session) {
return true;
}
// Check native sessions - verify the session file is within project directory
const nativeTools = ['gemini', 'qwen', 'codex', 'claude', 'opencode'] as const;
for (const tool of nativeTools) {
try {
const nativeSessions = getNativeSessions(tool, { workingDir: this.projectPath });
const found = nativeSessions.some(s => s.sessionId === sessionId);
if (found) {
return true;
}
} catch {
// Skip tools with discovery errors
}
}
return false;
}
/**
* Verify session access and throw if unauthorized.
*
* @param sessionId - The session ID to verify
* @throws SessionAccessDeniedError if session doesn't belong to project
*/
private ensureSessionAccess(sessionId: string): void {
if (!this.verifySessionBelongsToProject(sessionId)) {
throw new SessionAccessDeniedError(sessionId, this.projectPath);
}
}
// ========================================================================
// Eligibility scanning
// ========================================================================
@@ -148,6 +232,122 @@ export class MemoryExtractionPipeline {
return eligible;
}
/**
* Preview eligible sessions with detailed information for selective extraction.
*
* Returns session metadata including byte size, turn count, and extraction status.
* Native sessions are returned empty in Phase 1 (Phase 2 will implement native integration).
*
* @param options - Preview options
* @param options.includeNative - Whether to include native sessions (placeholder for Phase 2)
* @param options.maxSessions - Maximum number of sessions to return
* @returns PreviewResult with sessions and summary counts
*/
previewEligibleSessions(options?: { includeNative?: boolean; maxSessions?: number }): PreviewResult {
const store = getCoreMemoryStore(this.projectPath);
const maxSessions = options?.maxSessions || MAX_SESSIONS_PER_STARTUP;
// Scan CCW sessions using existing logic
const ccwSessions = this.scanEligibleSessions(maxSessions);
const sessions: SessionPreviewItem[] = [];
// Process CCW sessions
for (const session of ccwSessions) {
const transcript = this.filterTranscript(session);
const bytes = Buffer.byteLength(transcript, 'utf-8');
const turns = session.turns?.length || 0;
const timestamp = new Date(session.created_at).getTime();
// Check if already extracted
const existingOutput = store.getStage1Output(session.id);
const extracted = existingOutput !== null;
sessions.push({
sessionId: session.id,
source: 'ccw',
tool: session.tool || 'unknown',
timestamp,
eligible: true,
extracted,
bytes,
turns,
});
}
// Native sessions integration (Phase 2)
if (options?.includeNative) {
const nativeTools = ['gemini', 'qwen', 'codex', 'claude', 'opencode'] as const;
const now = Date.now();
const maxAgeMs = MAX_SESSION_AGE_DAYS * 24 * 60 * 60 * 1000;
const minIdleMs = MIN_IDLE_HOURS * 60 * 60 * 1000;
for (const tool of nativeTools) {
try {
const nativeSessions = getNativeSessions(tool, { workingDir: this.projectPath });
for (const session of nativeSessions) {
// Age check: created within MAX_SESSION_AGE_DAYS
if (now - session.createdAt.getTime() > maxAgeMs) continue;
// Idle check: last updated at least MIN_IDLE_HOURS ago
if (now - session.updatedAt.getTime() < minIdleMs) continue;
// Skip current session
if (this.currentSessionId && session.sessionId === this.currentSessionId) continue;
// Get file stats for bytes
let bytes = 0;
let turns = 0;
try {
if (existsSync(session.filePath)) {
const stats = statSync(session.filePath);
bytes = stats.size;
// Parse session file to count turns
turns = this.countNativeSessionTurns(session);
}
} catch {
// Skip sessions with file access errors
continue;
}
// Check if already extracted
const existingOutput = store.getStage1Output(session.sessionId);
const extracted = existingOutput !== null;
sessions.push({
sessionId: session.sessionId,
source: 'native',
tool: session.tool,
timestamp: session.updatedAt.getTime(),
eligible: true,
extracted,
bytes,
turns,
});
}
} catch {
// Skip tools with discovery errors
}
}
}
// Compute summary
const eligible = sessions.filter(s => s.eligible && !s.extracted);
const alreadyExtracted = sessions.filter(s => s.extracted);
return {
sessions,
summary: {
total: sessions.length,
eligible: sessions.filter(s => s.eligible).length,
alreadyExtracted: alreadyExtracted.length,
readyForExtraction: eligible.length,
},
};
}
// ========================================================================
// Transcript filtering
// ========================================================================
@@ -202,6 +402,291 @@ export class MemoryExtractionPipeline {
return parts.join('\n\n');
}
// ========================================================================
// Native session handling
// ========================================================================
/**
* Count the number of turns in a native session file.
*
* Parses the session file based on tool-specific format:
* - Gemini: { messages: [{ type, content }] }
* - Qwen: JSONL with { type, message: { parts: [{ text }] } }
* - Codex: JSONL with session events
* - Claude: JSONL with { type, message } entries
* - OpenCode: Message files in message/<session-id>/ directory
*
* @param session - The native session to count turns for
* @returns Number of turns (user/assistant exchanges)
*/
countNativeSessionTurns(session: NativeSession): number {
try {
const content = readFileSync(session.filePath, 'utf8');
switch (session.tool) {
case 'gemini': {
// Gemini format: JSON with messages array
const data = JSON.parse(content);
if (data.messages && Array.isArray(data.messages)) {
// Count user messages as turns
return data.messages.filter((m: { type: string }) => m.type === 'user').length;
}
return 0;
}
case 'qwen': {
// Qwen format: JSONL
const lines = content.split('\n').filter(l => l.trim());
let turnCount = 0;
for (const line of lines) {
try {
const entry = JSON.parse(line);
// Count user messages
if (entry.type === 'user' || entry.role === 'user') {
turnCount++;
}
} catch {
// Skip invalid lines
}
}
return turnCount;
}
case 'codex': {
// Codex format: JSONL with session events
const lines = content.split('\n').filter(l => l.trim());
let turnCount = 0;
for (const line of lines) {
try {
const entry = JSON.parse(line);
// Count user_message events
if (entry.type === 'event_msg' && entry.payload?.type === 'user_message') {
turnCount++;
}
} catch {
// Skip invalid lines
}
}
return turnCount;
}
case 'claude': {
// Claude format: JSONL
const lines = content.split('\n').filter(l => l.trim());
let turnCount = 0;
for (const line of lines) {
try {
const entry = JSON.parse(line);
// Count user messages (skip meta and command messages)
if (entry.type === 'user' &&
entry.message?.role === 'user' &&
!entry.isMeta) {
turnCount++;
}
} catch {
// Skip invalid lines
}
}
return turnCount;
}
case 'opencode': {
// OpenCode uses separate message files, count from session data
// For now, return a reasonable estimate based on file size
// Actual message counting would require reading message files
const stats = statSync(session.filePath);
// Rough estimate: 1 turn per 2KB of session file
return Math.max(1, Math.floor(stats.size / 2048));
}
default:
return 0;
}
} catch {
return 0;
}
}
/**
* Load and format transcript from a native session file.
*
* Extracts text content from the session file and formats it
* consistently with CCW session transcripts.
*
* @param session - The native session to load
* @returns Formatted transcript string
*/
loadNativeSessionTranscript(session: NativeSession): string {
try {
const content = readFileSync(session.filePath, 'utf8');
const parts: string[] = [];
let turnNum = 1;
switch (session.tool) {
case 'gemini': {
// Gemini format: { messages: [{ type, content }] }
const data = JSON.parse(content);
if (data.messages && Array.isArray(data.messages)) {
for (const msg of data.messages) {
if (msg.type === 'user' && msg.content) {
parts.push(`--- Turn ${turnNum} ---\n[USER] ${msg.content}`);
} else if (msg.type === 'assistant' && msg.content) {
parts.push(`[ASSISTANT] ${msg.content}`);
turnNum++;
}
}
}
break;
}
case 'qwen': {
// Qwen format: JSONL with { type, message: { parts: [{ text }] } }
const lines = content.split('\n').filter(l => l.trim());
for (const line of lines) {
try {
const entry = JSON.parse(line);
// User message
if (entry.type === 'user' && entry.message?.parts) {
const text = entry.message.parts
.filter((p: { text?: string }) => p.text)
.map((p: { text?: string }) => p.text)
.join('\n');
if (text) {
parts.push(`--- Turn ${turnNum} ---\n[USER] ${text}`);
}
}
// Assistant response
else if (entry.type === 'assistant' && entry.message?.parts) {
const text = entry.message.parts
.filter((p: { text?: string }) => p.text)
.map((p: { text?: string }) => p.text)
.join('\n');
if (text) {
parts.push(`[ASSISTANT] ${text}`);
turnNum++;
}
}
// Legacy format
else if (entry.role === 'user' && entry.content) {
parts.push(`--- Turn ${turnNum} ---\n[USER] ${entry.content}`);
} else if (entry.role === 'assistant' && entry.content) {
parts.push(`[ASSISTANT] ${entry.content}`);
turnNum++;
}
} catch {
// Skip invalid lines
}
}
break;
}
case 'codex': {
// Codex format: JSONL with { type, payload }
const lines = content.split('\n').filter(l => l.trim());
for (const line of lines) {
try {
const entry = JSON.parse(line);
// User message
if (entry.type === 'event_msg' &&
entry.payload?.type === 'user_message' &&
entry.payload.message) {
parts.push(`--- Turn ${turnNum} ---\n[USER] ${entry.payload.message}`);
}
// Assistant response
else if (entry.type === 'event_msg' &&
entry.payload?.type === 'assistant_message' &&
entry.payload.message) {
parts.push(`[ASSISTANT] ${entry.payload.message}`);
turnNum++;
}
} catch {
// Skip invalid lines
}
}
break;
}
case 'claude': {
// Claude format: JSONL with { type, message }
const lines = content.split('\n').filter(l => l.trim());
for (const line of lines) {
try {
const entry = JSON.parse(line);
if (entry.type === 'user' && entry.message?.role === 'user' && !entry.isMeta) {
const msgContent = entry.message.content;
// Handle string content
if (typeof msgContent === 'string' &&
!msgContent.startsWith('<command-') &&
!msgContent.includes('<local-command')) {
parts.push(`--- Turn ${turnNum} ---\n[USER] ${msgContent}`);
}
// Handle array content
else if (Array.isArray(msgContent)) {
for (const item of msgContent) {
if (item.type === 'text' && item.text) {
parts.push(`--- Turn ${turnNum} ---\n[USER] ${item.text}`);
break;
}
}
}
}
// Assistant response
else if (entry.type === 'assistant' && entry.message?.content) {
const msgContent = entry.message.content;
if (typeof msgContent === 'string') {
parts.push(`[ASSISTANT] ${msgContent}`);
turnNum++;
} else if (Array.isArray(msgContent)) {
const textParts = msgContent
.filter((item: { type?: string; text?: string }) => item.type === 'text' && item.text)
.map((item: { text?: string }) => item.text)
.join('\n');
if (textParts) {
parts.push(`[ASSISTANT] ${textParts}`);
turnNum++;
}
}
}
} catch {
// Skip invalid lines
}
}
break;
}
case 'opencode': {
// OpenCode stores messages in separate files
// For transcript extraction, read session metadata and messages
// This is a simplified extraction - full implementation would
// traverse message/part directories
try {
const sessionData = JSON.parse(content);
if (sessionData.title) {
parts.push(`--- Session ---\n[SESSION] ${sessionData.title}`);
}
if (sessionData.summary) {
parts.push(`[SUMMARY] ${sessionData.summary}`);
}
} catch {
// Return empty if parsing fails
}
break;
}
default:
break;
}
return parts.join('\n\n');
} catch {
return '';
}
}
// ========================================================================
// Truncation
// ========================================================================
@@ -354,20 +839,55 @@ export class MemoryExtractionPipeline {
/**
* Run the full extraction pipeline for a single session.
*
* Pipeline stages: Filter -> Truncate -> LLM Extract -> PostProcess -> Store
* Pipeline stages: Authorize -> Filter -> Truncate -> LLM Extract -> PostProcess -> Store
*
* SECURITY: This method includes authorization verification to ensure the session
* belongs to the current project path before processing.
*
* @param sessionId - The session to extract from
* @param options - Optional configuration
* @param options.source - 'ccw' for CCW history or 'native' for native CLI sessions
* @param options.nativeSession - Native session data (required when source is 'native')
* @param options.skipAuthorization - Internal use only: skip authorization (already validated)
* @returns The stored Stage1Output, or null if extraction failed
* @throws SessionAccessDeniedError if session doesn't belong to the project
*/
async runExtractionJob(sessionId: string): Promise<Stage1Output | null> {
const historyStore = getHistoryStore(this.projectPath);
const record = historyStore.getConversation(sessionId);
if (!record) {
throw new Error(`Session not found: ${sessionId}`);
async runExtractionJob(
sessionId: string,
options?: {
source?: 'ccw' | 'native';
nativeSession?: NativeSession;
skipAuthorization?: boolean;
}
): Promise<Stage1Output | null> {
// SECURITY: Authorization check - verify session belongs to this project
// Skip only if explicitly requested (for internal batch processing where already validated)
if (!options?.skipAuthorization) {
this.ensureSessionAccess(sessionId);
}
const source = options?.source || 'ccw';
let transcript: string;
let sourceUpdatedAt: number;
if (source === 'native' && options?.nativeSession) {
// Native session extraction
const nativeSession = options.nativeSession;
transcript = this.loadNativeSessionTranscript(nativeSession);
sourceUpdatedAt = Math.floor(nativeSession.updatedAt.getTime() / 1000);
} else {
// CCW session extraction (default)
const historyStore = getHistoryStore(this.projectPath);
const record = historyStore.getConversation(sessionId);
if (!record) {
throw new Error(`Session not found: ${sessionId}`);
}
// Stage 1: Filter transcript
transcript = this.filterTranscript(record);
sourceUpdatedAt = Math.floor(new Date(record.updated_at).getTime() / 1000);
}
// Stage 1: Filter transcript
const transcript = this.filterTranscript(record);
if (!transcript.trim()) {
return null; // Empty transcript, nothing to extract
}
@@ -385,7 +905,6 @@ export class MemoryExtractionPipeline {
const extracted = this.postProcess(llmOutput);
// Stage 5: Store result
const sourceUpdatedAt = Math.floor(new Date(record.updated_at).getTime() / 1000);
const generatedAt = Math.floor(Date.now() / 1000);
const output: Stage1Output = {
@@ -492,7 +1011,8 @@ export class MemoryExtractionPipeline {
const token = claim.ownership_token!;
try {
const output = await this.runExtractionJob(session.id);
// Batch extraction: sessions already validated by scanEligibleSessions(), skip auth check
const output = await this.runExtractionJob(session.id, { skipAuthorization: true });
if (output) {
const watermark = output.source_updated_at;
scheduler.markSucceeded(JOB_KIND_EXTRACTION, session.id, token, watermark);

View File

@@ -7,10 +7,13 @@
* - POST /api/commands/:name/toggle - Enable/disable single command
* - POST /api/commands/group/:groupName/toggle - Batch toggle commands by group
*/
import { existsSync, readdirSync, readFileSync, mkdirSync, renameSync } from 'fs';
import { existsSync, readdirSync, readFileSync, mkdirSync, renameSync, copyFileSync } from 'fs';
import { promises as fsPromises } from 'fs';
import { join, relative, dirname, basename } from 'path';
import { homedir } from 'os';
import { validatePath as validateAllowedPath } from '../../utils/path-validator.js';
import { executeCliTool } from '../../tools/cli-executor.js';
import { SmartContentFormatter } from '../../tools/cli-output-converter.js';
import type { RouteContext } from './types.js';
// ========== Types ==========
@@ -62,6 +65,38 @@ interface CommandGroupsConfig {
assignments: Record<string, string>; // commandName -> groupId mapping
}
/**
* Command creation mode type
*/
type CommandCreationMode = 'upload' | 'generate';
/**
* Parameters for creating a command
*/
interface CreateCommandParams {
mode: CommandCreationMode;
location: CommandLocation;
sourcePath?: string; // Required for 'upload' mode - path to uploaded file
skillName?: string; // Required for 'generate' mode - skill to generate from
description?: string; // Optional description for generated commands
projectPath: string;
cliType?: string; // CLI tool type for generation
}
/**
* Result of command creation operation
*/
interface CommandCreationResult extends CommandOperationResult {
commandInfo?: CommandMetadata | null;
}
/**
* Validation result for command file
*/
type CommandFileValidation =
| { valid: true; errors: string[]; commandInfo: CommandMetadata }
| { valid: false; errors: string[]; commandInfo: null };
// ========== Helper Functions ==========
function isRecord(value: unknown): value is Record<string, unknown> {
@@ -126,6 +161,388 @@ function parseCommandFrontmatter(content: string): CommandMetadata {
return result;
}
/**
* Validate a command file for creation
* Checks file existence, reads content, parses frontmatter, validates required fields
*/
function validateCommandFile(filePath: string): CommandFileValidation {
const errors: string[] = [];
// Check file exists
if (!existsSync(filePath)) {
return { valid: false, errors: ['Command file does not exist'], commandInfo: null };
}
// Check file extension
if (!filePath.endsWith('.md')) {
return { valid: false, errors: ['Command file must be a .md file'], commandInfo: null };
}
// Read file content
let content: string;
try {
content = readFileSync(filePath, 'utf8');
} catch (err) {
return { valid: false, errors: [`Failed to read file: ${(err as Error).message}`], commandInfo: null };
}
// Parse frontmatter
const commandInfo = parseCommandFrontmatter(content);
// Validate required fields
if (!commandInfo.name || commandInfo.name.trim() === '') {
errors.push('Command name is required in frontmatter');
}
// Check for valid frontmatter structure
if (!content.startsWith('---')) {
errors.push('Command file must have YAML frontmatter (starting with ---)');
} else {
const endIndex = content.indexOf('---', 3);
if (endIndex < 0) {
errors.push('Command file has invalid frontmatter (missing closing ---)');
}
}
if (errors.length > 0) {
return { valid: false, errors, commandInfo: null };
}
return { valid: true, errors: [], commandInfo };
}
/**
* Upload (copy) a command file to the commands directory
* Handles group subdirectory creation and path security validation
* @param sourcePath - Source command file path
* @param targetGroup - Target group subdirectory (e.g., 'workflow/review')
* @param location - 'project' or 'user'
* @param projectPath - Project root path
* @param customName - Optional custom filename (without .md extension)
* @returns CommandCreationResult with success status and command info
*/
async function uploadCommand(
sourcePath: string,
targetGroup: string,
location: CommandLocation,
projectPath: string,
customName?: string
): Promise<CommandCreationResult> {
try {
// Validate source file exists and is .md
if (!existsSync(sourcePath)) {
return { success: false, message: 'Source command file does not exist', status: 404 };
}
if (!sourcePath.endsWith('.md')) {
return { success: false, message: 'Source file must be a .md file', status: 400 };
}
// Validate source file content
const validation = validateCommandFile(sourcePath);
if (!validation.valid) {
return { success: false, message: validation.errors.join(', '), status: 400 };
}
// Get target commands directory
const commandsDir = getCommandsDir(location, projectPath);
// Build target path with optional group subdirectory
let targetDir = commandsDir;
if (targetGroup && targetGroup.trim() !== '') {
// Sanitize group path - prevent path traversal
const sanitizedGroup = targetGroup
.replace(/\.\./g, '') // Remove path traversal attempts
.replace(/[<>:"|?*]/g, '') // Remove invalid characters
.replace(/\/+/g, '/') // Collapse multiple slashes
.replace(/^\/|\/$/g, ''); // Remove leading/trailing slashes
if (sanitizedGroup) {
targetDir = join(commandsDir, sanitizedGroup);
}
}
// Create target directory if needed
if (!existsSync(targetDir)) {
mkdirSync(targetDir, { recursive: true });
}
// Determine target filename
const sourceBasename = basename(sourcePath, '.md');
const targetFilename = (customName && customName.trim() !== '')
? `${customName.replace(/\.md$/, '')}.md`
: `${sourceBasename}.md`;
// Sanitize filename - prevent path traversal
const sanitizedFilename = targetFilename
.replace(/\.\./g, '')
.replace(/[<>:"|?*]/g, '')
.replace(/\//g, '');
const targetPath = join(targetDir, sanitizedFilename);
// Security check: ensure target path is within commands directory
const resolvedTarget = targetPath; // Already resolved by join
const resolvedCommandsDir = commandsDir;
if (!resolvedTarget.startsWith(resolvedCommandsDir)) {
return { success: false, message: 'Invalid target path - path traversal detected', status: 400 };
}
// Check if target already exists
if (existsSync(targetPath)) {
return { success: false, message: `Command '${sanitizedFilename}' already exists in target location`, status: 409 };
}
// Copy file to target path
copyFileSync(sourcePath, targetPath);
return {
success: true,
message: 'Command uploaded successfully',
commandName: validation.commandInfo.name,
location,
commandInfo: {
name: validation.commandInfo.name,
description: validation.commandInfo.description,
group: targetGroup || 'other'
}
};
} catch (error) {
return {
success: false,
message: (error as Error).message,
status: 500
};
}
}
/**
* Generation parameters for command generation via CLI
*/
interface CommandGenerationParams {
commandName: string;
description: string;
location: CommandLocation;
projectPath: string;
group?: string;
argumentHint?: string;
broadcastToClients?: (data: unknown) => void;
cliType?: string;
}
/**
* Generate command via CLI tool using command-generator skill
* Follows the pattern from skills-routes.ts generateSkillViaCLI
* @param params - Generation parameters including name, description, location, etc.
* @returns CommandCreationResult with success status and generated command info
*/
async function generateCommandViaCLI({
commandName,
description,
location,
projectPath,
group,
argumentHint,
broadcastToClients,
cliType = 'claude'
}: CommandGenerationParams): Promise<CommandCreationResult> {
// Generate unique execution ID for tracking
const executionId = `cmd-gen-${commandName}-${Date.now()}`;
try {
// Validate required inputs
if (!commandName || commandName.trim() === '') {
return { success: false, message: 'Command name is required', status: 400 };
}
if (!description || description.trim() === '') {
return { success: false, message: 'Description is required for command generation', status: 400 };
}
// Sanitize command name - prevent path traversal
if (commandName.includes('..') || commandName.includes('/') || commandName.includes('\\')) {
return { success: false, message: 'Invalid command name - path characters not allowed', status: 400 };
}
// Get target commands directory
const commandsDir = getCommandsDir(location, projectPath);
// Build target path with optional group subdirectory
let targetDir = commandsDir;
if (group && group.trim() !== '') {
const sanitizedGroup = group
.replace(/\.\./g, '')
.replace(/[<>:"|?*]/g, '')
.replace(/\/+/g, '/')
.replace(/^\/|\/$/g, '');
if (sanitizedGroup) {
targetDir = join(commandsDir, sanitizedGroup);
}
}
const targetPath = join(targetDir, `${commandName}.md`);
// Check if command already exists
if (existsSync(targetPath)) {
return {
success: false,
message: `Command '${commandName}' already exists in ${location} location${group ? ` (group: ${group})` : ''}`,
status: 409
};
}
// Ensure target directory exists
if (!existsSync(targetDir)) {
await fsPromises.mkdir(targetDir, { recursive: true });
}
// Build target location display for prompt
const targetLocationDisplay = location === 'project'
? '.claude/commands/'
: '~/.claude/commands/';
// Build structured command parameters for /command-generator skill
const commandParams = {
skillName: commandName,
description,
location,
group: group || '',
argumentHint: argumentHint || ''
};
// Prompt that invokes /command-generator skill with structured parameters
const prompt = `/command-generator
## Command Parameters (Structured Input)
\`\`\`json
${JSON.stringify(commandParams, null, 2)}
\`\`\`
## User Request
Create a new Claude Code command with the following specifications:
- **Command Name**: ${commandName}
- **Description**: ${description}
- **Target Location**: ${targetLocationDisplay}${group ? `${group}/` : ''}${commandName}.md
- **Location Type**: ${location === 'project' ? 'Project-level (.claude/commands/)' : 'User-level (~/.claude/commands/)'}
${group ? `- **Group**: ${group}` : ''}
${argumentHint ? `- **Argument Hint**: ${argumentHint}` : ''}
## Instructions
1. Use the command-generator skill to create a command file with proper YAML frontmatter
2. Include name, description in frontmatter${group ? '\n3. Include group in frontmatter' : ''}${argumentHint ? '\n4. Include argument-hint in frontmatter' : ''}
3. Generate useful command content and usage examples
4. Output the file to: ${targetPath}`;
// Broadcast CLI_EXECUTION_STARTED event
if (broadcastToClients) {
broadcastToClients({
type: 'CLI_EXECUTION_STARTED',
payload: {
executionId,
tool: cliType,
mode: 'write',
category: 'internal',
context: 'command-generation',
commandName
}
});
}
// Create onOutput callback for real-time streaming
const onOutput = broadcastToClients
? (unit: import('../../tools/cli-output-converter.js').CliOutputUnit) => {
const content = SmartContentFormatter.format(unit.content, unit.type);
broadcastToClients({
type: 'CLI_OUTPUT',
payload: {
executionId,
chunkType: unit.type,
data: content
}
});
}
: undefined;
// Execute CLI tool with write mode
const startTime = Date.now();
const result = await executeCliTool({
tool: cliType,
prompt,
mode: 'write',
cd: projectPath,
timeout: 600000, // 10 minutes
category: 'internal',
id: executionId
}, onOutput);
// Broadcast CLI_EXECUTION_COMPLETED event
if (broadcastToClients) {
broadcastToClients({
type: 'CLI_EXECUTION_COMPLETED',
payload: {
executionId,
success: result.success,
status: result.execution?.status || (result.success ? 'success' : 'error'),
duration_ms: Date.now() - startTime
}
});
}
// Check if execution was successful
if (!result.success) {
return {
success: false,
message: `CLI generation failed: ${result.stderr || 'Unknown error'}`,
status: 500
};
}
// Validate the generated command file exists
if (!existsSync(targetPath)) {
return {
success: false,
message: 'Generated command file not found at expected location',
status: 500
};
}
// Validate the generated command file content
const validation = validateCommandFile(targetPath);
if (!validation.valid) {
return {
success: false,
message: `Generated command is invalid: ${validation.errors.join(', ')}`,
status: 500
};
}
return {
success: true,
message: 'Command generated successfully',
commandName: validation.commandInfo.name,
location,
commandInfo: {
name: validation.commandInfo.name,
description: validation.commandInfo.description,
group: validation.commandInfo.group
}
};
} catch (error) {
return {
success: false,
message: (error as Error).message,
status: 500
};
}
}
/**
* Get command groups config file path
*/
@@ -616,5 +1033,103 @@ export async function handleCommandsRoutes(ctx: RouteContext): Promise<boolean>
return true;
}
// POST /api/commands/create - Create command (upload or generate)
if (pathname === '/api/commands/create' && req.method === 'POST') {
handlePostRequest(req, res, async (body) => {
if (!isRecord(body)) {
return { success: false, message: 'Invalid request body', status: 400 };
}
const mode = body.mode;
const locationValue = body.location;
const sourcePath = typeof body.sourcePath === 'string' ? body.sourcePath : undefined;
const skillName = typeof body.skillName === 'string' ? body.skillName : undefined;
const description = typeof body.description === 'string' ? body.description : undefined;
const group = typeof body.group === 'string' ? body.group : undefined;
const argumentHint = typeof body.argumentHint === 'string' ? body.argumentHint : undefined;
const projectPathParam = typeof body.projectPath === 'string' ? body.projectPath : undefined;
const cliType = typeof body.cliType === 'string' ? body.cliType : 'claude';
// Validate mode
if (mode !== 'upload' && mode !== 'generate') {
return { success: false, message: 'Mode is required and must be "upload" or "generate"', status: 400 };
}
// Validate location
if (locationValue !== 'project' && locationValue !== 'user') {
return { success: false, message: 'Location is required (project or user)', status: 400 };
}
const location: CommandLocation = locationValue;
const projectPath = projectPathParam || initialPath;
// Validate project path for security
let validatedProjectPath = projectPath;
if (location === 'project') {
try {
validatedProjectPath = await validateAllowedPath(projectPath, { mustExist: true, allowedDirectories: [initialPath] });
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
const status = message.includes('Access denied') ? 403 : 400;
console.error(`[Commands] Project path validation failed: ${message}`);
return { success: false, message: status === 403 ? 'Access denied' : 'Invalid path', status };
}
}
if (mode === 'upload') {
// Upload mode: copy existing command file
if (!sourcePath) {
return { success: false, message: 'Source path is required for upload mode', status: 400 };
}
// Validate source path for security
let validatedSourcePath: string;
try {
validatedSourcePath = await validateAllowedPath(sourcePath, { mustExist: true });
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
const status = message.includes('Access denied') ? 403 : 400;
console.error(`[Commands] Source path validation failed: ${message}`);
return { success: false, message: status === 403 ? 'Access denied' : 'Invalid source path', status };
}
return await uploadCommand(
validatedSourcePath,
group || '',
location,
validatedProjectPath
);
} else if (mode === 'generate') {
// Generate mode: use CLI to generate command
if (!skillName) {
return { success: false, message: 'Skill name is required for generate mode', status: 400 };
}
if (!description) {
return { success: false, message: 'Description is required for generate mode', status: 400 };
}
// Validate skill name for security
if (skillName.includes('..') || skillName.includes('/') || skillName.includes('\\')) {
return { success: false, message: 'Invalid skill name - path characters not allowed', status: 400 };
}
return await generateCommandViaCLI({
commandName: skillName,
description,
location,
projectPath: validatedProjectPath,
group,
argumentHint,
broadcastToClients: ctx.broadcastToClients,
cliType
});
}
// This should never be reached due to mode validation above
return { success: false, message: 'Invalid mode', status: 400 };
});
return true;
}
return false;
}

View File

@@ -10,6 +10,54 @@ import { StoragePaths } from '../../config/storage-paths.js';
import { join } from 'path';
import { getDefaultTool } from '../../tools/claude-cli-tools.js';
// ========================================
// Error Handling Utilities
// ========================================
/**
* Sanitize error message for client response
* Logs full error server-side, returns user-friendly message to client
*/
function sanitizeErrorMessage(error: unknown, context: string): string {
const errorMessage = error instanceof Error ? error.message : String(error);
// Log full error for debugging (server-side only)
if (process.env.DEBUG || process.env.NODE_ENV === 'development') {
console.error(`[CoreMemoryRoutes] ${context}:`, error);
}
// Map common internal errors to user-friendly messages
const lowerMessage = errorMessage.toLowerCase();
if (lowerMessage.includes('enoent') || lowerMessage.includes('no such file')) {
return 'Resource not found';
}
if (lowerMessage.includes('eacces') || lowerMessage.includes('permission denied')) {
return 'Access denied';
}
if (lowerMessage.includes('sqlite') || lowerMessage.includes('database')) {
return 'Database operation failed';
}
if (lowerMessage.includes('json') || lowerMessage.includes('parse')) {
return 'Invalid data format';
}
// Return generic message for unexpected errors (don't expose internals)
return 'An unexpected error occurred';
}
/**
* Write error response with sanitized message
*/
function writeErrorResponse(
res: http.ServerResponse,
statusCode: number,
message: string
): void {
res.writeHead(statusCode, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: message }));
}
/**
* Route context interface
*/
@@ -303,6 +351,190 @@ export async function handleCoreMemoryRoutes(ctx: RouteContext): Promise<boolean
return true;
}
// API: Preview eligible sessions for selective extraction
if (pathname === '/api/core-memory/extract/preview' && req.method === 'GET') {
const projectPath = url.searchParams.get('path') || initialPath;
const includeNative = url.searchParams.get('includeNative') === 'true';
const maxSessionsParam = url.searchParams.get('maxSessions');
const maxSessions = maxSessionsParam ? parseInt(maxSessionsParam, 10) : undefined;
// Validate maxSessions parameter
if (maxSessionsParam && (isNaN(maxSessions as number) || (maxSessions as number) < 1)) {
writeErrorResponse(res, 400, 'Invalid maxSessions parameter: must be a positive integer');
return true;
}
try {
const { MemoryExtractionPipeline } = await import('../memory-extraction-pipeline.js');
const pipeline = new MemoryExtractionPipeline(projectPath);
const preview = pipeline.previewEligibleSessions({
includeNative,
maxSessions,
});
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({
success: true,
sessions: preview.sessions,
summary: preview.summary,
}));
} catch (error: unknown) {
// Log full error server-side, return sanitized message to client
writeErrorResponse(res, 500, sanitizeErrorMessage(error, 'extract/preview'));
}
return true;
}
// API: Selective extraction for specific sessions
if (pathname === '/api/core-memory/extract/selected' && req.method === 'POST') {
handlePostRequest(req, res, async (body) => {
const { sessionIds, includeNative, path: projectPath } = body;
const basePath = projectPath || initialPath;
// Validate sessionIds - return 400 for invalid input
if (!Array.isArray(sessionIds)) {
return { error: 'sessionIds must be an array', status: 400 };
}
if (sessionIds.length === 0) {
return { error: 'sessionIds cannot be empty', status: 400 };
}
// Validate each sessionId is a non-empty string
for (const id of sessionIds) {
if (typeof id !== 'string' || id.trim() === '') {
return { error: 'Each sessionId must be a non-empty string', status: 400 };
}
}
try {
const store = getCoreMemoryStore(basePath);
const scheduler = new MemoryJobScheduler(store.getDb());
const { MemoryExtractionPipeline, SessionAccessDeniedError } = await import('../memory-extraction-pipeline.js');
const pipeline = new MemoryExtractionPipeline(basePath);
// Get preview to validate sessions (project-scoped)
const preview = pipeline.previewEligibleSessions({ includeNative });
const validSessionIds = new Set(preview.sessions.map(s => s.sessionId));
// Return 404 if no eligible sessions exist at all
if (validSessionIds.size === 0) {
return { error: 'No eligible sessions found for extraction', status: 404 };
}
const queued: string[] = [];
const skipped: string[] = [];
const invalidIds: string[] = [];
const unauthorizedIds: string[] = [];
for (const sessionId of sessionIds) {
// SECURITY: Verify session belongs to this project
// This double-checks that the sessionId is from the project-scoped preview
if (!validSessionIds.has(sessionId)) {
// Check if it's unauthorized (exists but not in this project)
if (!pipeline.verifySessionBelongsToProject(sessionId)) {
unauthorizedIds.push(sessionId);
} else {
invalidIds.push(sessionId);
}
continue;
}
// Check if already extracted
const existingOutput = store.getStage1Output(sessionId);
if (existingOutput) {
skipped.push(sessionId);
continue;
}
// Get session info for watermark
const historyStore = (await import('../../tools/cli-history-store.js')).getHistoryStore(basePath);
const session = historyStore.getConversation(sessionId);
if (!session) {
invalidIds.push(sessionId);
continue;
}
// Enqueue job
const watermark = Math.floor(new Date(session.updated_at).getTime() / 1000);
scheduler.enqueueJob('phase1_extraction', sessionId, watermark);
queued.push(sessionId);
}
// Return 409 Conflict if all sessions were already extracted
if (queued.length === 0 && skipped.length === sessionIds.length) {
return {
error: 'All specified sessions have already been extracted',
status: 409,
skipped
};
}
// Return 404 if no valid sessions were found (all were invalid or unauthorized)
if (queued.length === 0 && skipped.length === 0) {
return { error: 'No valid sessions found among the provided IDs', status: 404 };
}
// Generate batch job ID
const jobId = `batch-${Date.now()}`;
// Broadcast start event
broadcastToClients({
type: 'MEMORY_EXTRACTION_STARTED',
payload: {
timestamp: new Date().toISOString(),
jobId,
queuedCount: queued.length,
selective: true,
}
});
// Fire-and-forget: process queued sessions
// Sessions already validated above, skip auth check for efficiency
(async () => {
try {
for (const sessionId of queued) {
try {
await pipeline.runExtractionJob(sessionId, { skipAuthorization: true });
} catch (err) {
if (process.env.DEBUG) {
console.warn(`[SelectiveExtraction] Failed for ${sessionId}:`, (err as Error).message);
}
}
}
broadcastToClients({
type: 'MEMORY_EXTRACTION_COMPLETED',
payload: { timestamp: new Date().toISOString(), jobId }
});
} catch (err) {
broadcastToClients({
type: 'MEMORY_EXTRACTION_FAILED',
payload: {
timestamp: new Date().toISOString(),
jobId,
error: (err as Error).message,
}
});
}
})();
// Include unauthorizedIds in response for security transparency
return {
success: true,
jobId,
queued: queued.length,
skipped: skipped.length,
invalidIds,
...(unauthorizedIds.length > 0 && { unauthorizedIds }),
};
} catch (error: unknown) {
// Log full error server-side, return sanitized message to client
return { error: sanitizeErrorMessage(error, 'extract/selected'), status: 500 };
}
});
return true;
}
// API: Get extraction pipeline status
if (pathname === '/api/core-memory/extract/status' && req.method === 'GET') {
const projectPath = url.searchParams.get('path') || initialPath;

View File

@@ -7,6 +7,7 @@ import Database from 'better-sqlite3';
import { existsSync, mkdirSync, readdirSync, readFileSync, statSync, unlinkSync, rmdirSync } from 'fs';
import { join, dirname, resolve } from 'path';
import { parseSessionFile, formatConversation, extractConversationPairs, type ParsedSession, type ParsedTurn } from './session-content-parser.js';
import { getDiscoverer, getNativeSessions } from './native-session-discovery.js';
import { StoragePaths, ensureStorageDir, getProjectId, getCCWHome } from '../config/storage-paths.js';
import type { CliOutputUnit } from './cli-output-converter.js';
@@ -1065,11 +1066,94 @@ export class CliHistoryStore {
*/
async getNativeSessionContent(ccwId: string): Promise<ParsedSession | null> {
const mapping = this.getNativeSessionMapping(ccwId);
if (!mapping || !mapping.native_session_path) {
return null;
if (mapping?.native_session_path) {
const parsed = await parseSessionFile(mapping.native_session_path, mapping.tool);
if (parsed) {
return parsed;
}
// If mapping exists but file is missing/invalid, fall through to re-discovery.
}
return parseSessionFile(mapping.native_session_path, mapping.tool);
// On-demand discovery/backfill: attempt to locate native session file from conversation metadata.
try {
const conversation = this.getConversation(ccwId);
if (!conversation) return null;
const tool = conversation.tool;
const discoverer = getDiscoverer(tool);
if (!discoverer) return null;
const createdMs = Date.parse(conversation.created_at);
const updatedMs = Date.parse(conversation.updated_at || conversation.created_at);
const durationMs = conversation.total_duration_ms || 0;
const endMs = Number.isFinite(updatedMs)
? updatedMs
: (Number.isFinite(createdMs) ? createdMs + durationMs : NaN);
if (!Number.isFinite(endMs)) return null;
const afterTimestamp = Number.isFinite(createdMs) ? new Date(createdMs - 60_000) : undefined;
const sessions = getNativeSessions(tool, { workingDir: this.projectPath, afterTimestamp });
if (sessions.length === 0) return null;
// Prefer sessions whose updatedAt is close to execution end time.
const timeWindowMs = Math.max(5 * 60_000, durationMs + 2 * 60_000);
const timeCandidates = sessions.filter(s => Math.abs(s.updatedAt.getTime() - endMs) <= timeWindowMs);
const candidates = timeCandidates.length > 0
? timeCandidates
: sessions
.map(session => ({ session, timeDiffMs: Math.abs(session.updatedAt.getTime() - endMs) }))
.sort((a, b) => a.timeDiffMs - b.timeDiffMs)
.slice(0, 50)
.map(x => x.session);
const prompt = conversation.turns[0]?.prompt || '';
const promptPrefix = prompt.substring(0, 200).trim();
const scored = candidates
.map(session => {
let promptMatch = false;
if (promptPrefix) {
try {
const firstUserMessage = discoverer.extractFirstUserMessage(session.filePath);
promptMatch = !!firstUserMessage && firstUserMessage.includes(promptPrefix);
} catch {
// Ignore extraction errors (still allow time-based match)
}
}
return {
session,
promptMatch,
timeDiffMs: Math.abs(session.updatedAt.getTime() - endMs)
};
})
.sort((a, b) => {
if (a.promptMatch !== b.promptMatch) return a.promptMatch ? -1 : 1;
return a.timeDiffMs - b.timeDiffMs;
});
const best = scored[0]?.session;
if (!best) return null;
// Persist mapping for future loads (best-effort).
try {
this.saveNativeSessionMapping({
ccw_id: ccwId,
tool,
native_session_id: best.sessionId,
native_session_path: best.filePath,
project_hash: best.projectHash,
created_at: new Date().toISOString()
});
} catch {
// Ignore persistence errors; still attempt to return content.
}
return await parseSessionFile(best.filePath, tool);
} catch {
return null;
}
}
/**

View File

@@ -4,7 +4,7 @@
*/
import { existsSync, readdirSync, readFileSync, statSync } from 'fs';
import { join, basename, resolve } from 'path';
import { join, basename, dirname, resolve } from 'path';
// basename is used for extracting session ID from filename
import { createHash } from 'crypto';
import { homedir } from 'os';
@@ -43,6 +43,48 @@ function getHomePath(): string {
return homedir().replace(/\\/g, '/');
}
/**
* Normalize a project root path for comparing against Gemini's `projects.json` keys
* and `.project_root` marker file contents.
*
* On Windows Gemini uses lowercased absolute paths with backslashes.
*/
function normalizeGeminiProjectRootPath(projectDir: string): string {
const absolutePath = resolve(projectDir);
if (process.platform !== 'win32') return absolutePath;
return absolutePath.replace(/\//g, '\\').toLowerCase();
}
let geminiProjectsCache:
| { configPath: string; mtimeMs: number; projects: Record<string, string> }
| null = null;
/**
* Load Gemini project mapping from `~/.gemini/projects.json` (best-effort).
* Format: { "projects": { "<projectRoot>": "<projectName>" } }
*/
function getGeminiProjectsMap(): Record<string, string> | null {
const configPath = join(getHomePath(), '.gemini', 'projects.json');
try {
const stat = statSync(configPath);
if (geminiProjectsCache?.configPath === configPath && geminiProjectsCache.mtimeMs === stat.mtimeMs) {
return geminiProjectsCache.projects;
}
const raw = readFileSync(configPath, 'utf8');
const parsed = JSON.parse(raw) as { projects?: Record<string, string> };
if (!parsed.projects || typeof parsed.projects !== 'object') {
return null;
}
geminiProjectsCache = { configPath, mtimeMs: stat.mtimeMs, projects: parsed.projects };
return parsed.projects;
} catch {
return null;
}
}
/**
* Base session discoverer interface
*/
@@ -177,12 +219,76 @@ abstract class SessionDiscoverer {
/**
* Gemini Session Discoverer
* Path: ~/.gemini/tmp/<projectHash>/chats/session-*.json
* Legacy path: ~/.gemini/tmp/<projectHash>/chats/session-*.json
* Current path (Gemini CLI): ~/.gemini/tmp/<projectName>/chats/session-*.json
*/
class GeminiSessionDiscoverer extends SessionDiscoverer {
tool = 'gemini';
basePath = join(getHomePath(), '.gemini', 'tmp');
private getProjectFoldersForWorkingDir(workingDir: string): string[] {
const folders = new Set<string>();
// Legacy: hashed folder
const projectHash = calculateProjectHash(workingDir);
if (existsSync(join(this.basePath, projectHash))) {
folders.add(projectHash);
}
// Current: project-name folder resolved via ~/.gemini/projects.json
let hasProjectNameFolder = false;
const projectsMap = getGeminiProjectsMap();
if (projectsMap) {
const normalized = normalizeGeminiProjectRootPath(workingDir);
// Prefer exact match first, then walk up parents (Gemini can map nested roots)
let cursor: string | null = normalized;
while (cursor) {
const mapped = projectsMap[cursor];
if (mapped) {
const mappedPath = join(this.basePath, mapped);
if (existsSync(mappedPath)) {
folders.add(mapped);
hasProjectNameFolder = true;
}
break;
}
const parent = dirname(cursor);
cursor = parent !== cursor ? parent : null;
}
}
// Fallback: scan for `.project_root` marker (best-effort; avoids missing mappings)
if (!hasProjectNameFolder) {
const normalized = normalizeGeminiProjectRootPath(workingDir);
try {
if (existsSync(this.basePath)) {
for (const dirName of readdirSync(this.basePath)) {
const fullPath = join(this.basePath, dirName);
try {
if (!statSync(fullPath).isDirectory()) continue;
const markerPath = join(fullPath, '.project_root');
if (!existsSync(markerPath)) continue;
const marker = readFileSync(markerPath, 'utf8').trim();
if (normalizeGeminiProjectRootPath(marker) === normalized) {
folders.add(dirName);
break;
}
} catch {
// Ignore invalid entries
}
}
}
} catch {
// Ignore scan failures
}
}
return Array.from(folders);
}
getSessions(options: SessionDiscoveryOptions = {}): NativeSession[] {
const { workingDir, limit, afterTimestamp } = options;
const sessions: NativeSession[] = [];
@@ -193,9 +299,7 @@ class GeminiSessionDiscoverer extends SessionDiscoverer {
// If workingDir provided, only look in that project's folder
let projectDirs: string[];
if (workingDir) {
const projectHash = calculateProjectHash(workingDir);
const projectPath = join(this.basePath, projectHash);
projectDirs = existsSync(projectPath) ? [projectHash] : [];
projectDirs = this.getProjectFoldersForWorkingDir(workingDir);
} else {
projectDirs = readdirSync(this.basePath).filter(d => {
const fullPath = join(this.basePath, d);
@@ -203,8 +307,8 @@ class GeminiSessionDiscoverer extends SessionDiscoverer {
});
}
for (const projectHash of projectDirs) {
const chatsDir = join(this.basePath, projectHash, 'chats');
for (const projectFolder of projectDirs) {
const chatsDir = join(this.basePath, projectFolder, 'chats');
if (!existsSync(chatsDir)) continue;
const sessionFiles = readdirSync(chatsDir)
@@ -217,7 +321,10 @@ class GeminiSessionDiscoverer extends SessionDiscoverer {
.sort((a, b) => b.stat.mtimeMs - a.stat.mtimeMs);
for (const file of sessionFiles) {
if (afterTimestamp && file.stat.mtime <= afterTimestamp) continue;
if (afterTimestamp && file.stat.mtime <= afterTimestamp) {
// sessionFiles are sorted descending by mtime, we can stop early
break;
}
try {
const content = JSON.parse(readFileSync(file.path, 'utf8'));
@@ -225,7 +332,7 @@ class GeminiSessionDiscoverer extends SessionDiscoverer {
sessionId: content.sessionId,
tool: this.tool,
filePath: file.path,
projectHash,
projectHash: content.projectHash,
createdAt: new Date(content.startTime || file.stat.birthtime),
updatedAt: new Date(content.lastUpdated || file.stat.mtime)
});
@@ -238,7 +345,14 @@ class GeminiSessionDiscoverer extends SessionDiscoverer {
// Sort by updatedAt descending
sessions.sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime());
return limit ? sessions.slice(0, limit) : sessions;
const seen = new Set<string>();
const uniqueSessions = sessions.filter(s => {
if (seen.has(s.sessionId)) return false;
seen.add(s.sessionId);
return true;
});
return limit ? uniqueSessions.slice(0, limit) : uniqueSessions;
} catch {
return [];
}

View File

@@ -162,7 +162,7 @@ Message types: plan_ready, plan_approved, plan_revision, task_unblocked, impl_co
},
team: {
type: 'string',
description: 'Team name',
description: 'Session ID (e.g., TLS-my-project-2026-02-27). Maps to .workflow/.team/{session-id}/.msg/. Use session ID, NOT team name.',
},
from: { type: 'string', description: '[log/list] Sender role' },
to: { type: 'string', description: '[log/list] Recipient role' },