feat: Add comprehensive tests for contentPattern and glob pattern matching

- Implemented final verification tests for contentPattern to validate behavior with empty strings, dangerous patterns, and normal patterns.
- Created glob pattern matching tests to verify regex conversion and matching functionality.
- Developed infinite loop risk tests using Worker threads to isolate potential blocking operations.
- Introduced optimized contentPattern tests to validate improvements in the findMatches function.
- Added verification tests to assess the effectiveness of contentPattern optimizations.
- Conducted safety tests for contentPattern to identify edge cases and potential vulnerabilities.
- Implemented unrestricted loop tests to analyze infinite loop risks without match limits.
- Developed tests for zero-width pattern detection logic to ensure proper handling of dangerous regex patterns.
This commit is contained in:
catlog22
2026-02-09 11:13:01 +08:00
parent dfe153778c
commit 964292ebdb
62 changed files with 7588 additions and 374 deletions

View File

@@ -23,10 +23,12 @@ import { executeInitWithProgress } from './smart-search.js';
import * as codexLensLspMod from './codex-lens-lsp.js';
import * as vscodeLspMod from './vscode-lsp.js';
import * as readFileMod from './read-file.js';
import * as readManyFilesMod from './read-many-files.js';
import * as coreMemoryMod from './core-memory.js';
import * as contextCacheMod from './context-cache.js';
import * as skillContextLoaderMod from './skill-context-loader.js';
import * as askQuestionMod from './ask-question.js';
import * as teamMsgMod from './team-msg.js';
import type { ProgressInfo } from './codex-lens.js';
// Import legacy JS tools
@@ -364,10 +366,12 @@ registerTool(toLegacyTool(smartSearchMod));
registerTool(toLegacyTool(codexLensLspMod));
registerTool(toLegacyTool(vscodeLspMod));
registerTool(toLegacyTool(readFileMod));
registerTool(toLegacyTool(readManyFilesMod));
registerTool(toLegacyTool(coreMemoryMod));
registerTool(toLegacyTool(contextCacheMod));
registerTool(toLegacyTool(skillContextLoaderMod));
registerTool(toLegacyTool(askQuestionMod));
registerTool(toLegacyTool(teamMsgMod));
// Register legacy JS tools
registerTool(uiGeneratePreviewTool);

View File

@@ -1,417 +1,108 @@
/**
* Read File Tool - Read files with multi-file, directory, and regex support
* Read File Tool - Single file precise reading with optional line pagination
*
* Features:
* - Read single or multiple files
* - Read all files in a directory (with depth control)
* - Filter files by glob/regex pattern
* - Content search with regex
* - Compact output format
* - Read a single file with full content
* - Line-based pagination with offset/limit
* - Binary file detection
*/
import { z } from 'zod';
import type { ToolSchema, ToolResult } from '../types/tool.js';
import { readFileSync, readdirSync, statSync, existsSync } from 'fs';
import { resolve, isAbsolute, join, relative, extname } from 'path';
import { existsSync, statSync } from 'fs';
import { relative } from 'path';
import { validatePath, getProjectRoot } from '../utils/path-validator.js';
import {
MAX_CONTENT_LENGTH,
readFileContent,
type FileEntry,
type ReadResult,
} from '../utils/file-reader.js';
// Max content per file (truncate if larger)
const MAX_CONTENT_LENGTH = 5000;
// Max files to return
const MAX_FILES = 50;
// Max total content length
const MAX_TOTAL_CONTENT = 50000;
// Define Zod schema for validation
const ParamsSchema = z.object({
paths: z.union([z.string(), z.array(z.string())]).describe('File path(s) or directory'),
pattern: z.string().optional().describe('Glob pattern to filter files (e.g., "*.ts", "**/*.js")'),
contentPattern: z.string().optional().describe('Regex to search within file content'),
maxDepth: z.number().default(3).describe('Max directory depth to traverse'),
includeContent: z.boolean().default(true).describe('Include file content in result'),
maxFiles: z.number().default(MAX_FILES).describe('Max number of files to return'),
offset: z.number().min(0).optional().describe('Line offset to start reading from (0-based, for single file only)'),
limit: z.number().min(1).optional().describe('Number of lines to read (for single file only)'),
}).refine((data) => {
// Validate: offset/limit only allowed for single file mode
const hasPagination = data.offset !== undefined || data.limit !== undefined;
const isMultiple = Array.isArray(data.paths) && data.paths.length > 1;
return !(hasPagination && isMultiple);
}, {
message: 'offset/limit parameters are only supported for single file mode. Cannot use with multiple paths.',
path: ['offset', 'limit', 'paths'],
path: z.string().describe('Single file path to read'),
offset: z.number().min(0).optional().describe('Line offset to start reading from (0-based)'),
limit: z.number().min(1).optional().describe('Number of lines to read'),
});
type Params = z.infer<typeof ParamsSchema>;
interface FileEntry {
path: string;
size: number;
content?: string;
truncated?: boolean;
matches?: string[];
totalLines?: number;
lineRange?: { start: number; end: number };
}
interface ReadResult {
files: FileEntry[];
totalFiles: number;
message: string;
}
// Common binary extensions to skip
const BINARY_EXTENSIONS = new Set([
'.png', '.jpg', '.jpeg', '.gif', '.bmp', '.ico', '.webp', '.svg',
'.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx',
'.zip', '.tar', '.gz', '.rar', '.7z',
'.exe', '.dll', '.so', '.dylib',
'.mp3', '.mp4', '.wav', '.avi', '.mov',
'.woff', '.woff2', '.ttf', '.eot', '.otf',
'.pyc', '.class', '.o', '.obj',
]);
/**
* Check if file is likely binary
*/
function isBinaryFile(filePath: string): boolean {
const ext = extname(filePath).toLowerCase();
return BINARY_EXTENSIONS.has(ext);
}
/**
* Convert glob pattern to regex
*/
function globToRegex(pattern: string): RegExp {
const escaped = pattern
.replace(/[.+^${}()|[\]\\]/g, '\\$&')
.replace(/\*/g, '.*')
.replace(/\?/g, '.');
return new RegExp(`^${escaped}$`, 'i');
}
/**
* Check if filename matches glob pattern
*/
function matchesPattern(filename: string, pattern: string): boolean {
const regex = globToRegex(pattern);
return regex.test(filename);
}
/**
* Recursively collect files from directory
*/
function collectFiles(
dir: string,
pattern: string | undefined,
maxDepth: number,
currentDepth: number = 0
): string[] {
if (currentDepth > maxDepth) return [];
const files: string[] = [];
try {
const entries = readdirSync(dir, { withFileTypes: true });
for (const entry of entries) {
// Skip hidden files/dirs and node_modules
if (entry.name.startsWith('.') || entry.name === 'node_modules') continue;
const fullPath = join(dir, entry.name);
if (entry.isDirectory()) {
files.push(...collectFiles(fullPath, pattern, maxDepth, currentDepth + 1));
} else if (entry.isFile()) {
if (!pattern || matchesPattern(entry.name, pattern)) {
files.push(fullPath);
}
}
}
} catch {
// Skip directories we can't read
}
return files;
}
interface ReadContentOptions {
maxLength: number;
offset?: number;
limit?: number;
}
interface ReadContentResult {
content: string;
truncated: boolean;
totalLines?: number;
lineRange?: { start: number; end: number };
}
/**
* Read file content with truncation and optional line-based pagination
*/
function readFileContent(filePath: string, options: ReadContentOptions): ReadContentResult {
const { maxLength, offset, limit } = options;
if (isBinaryFile(filePath)) {
return { content: '[Binary file]', truncated: false };
}
try {
const content = readFileSync(filePath, 'utf8');
const lines = content.split('\n');
const totalLines = lines.length;
// If offset/limit specified, use line-based pagination
if (offset !== undefined || limit !== undefined) {
const startLine = Math.min(offset ?? 0, totalLines);
const endLine = limit !== undefined ? Math.min(startLine + limit, totalLines) : totalLines;
const selectedLines = lines.slice(startLine, endLine);
const selectedContent = selectedLines.join('\n');
const actualEnd = endLine;
const hasMore = actualEnd < totalLines;
let finalContent = selectedContent;
if (selectedContent.length > maxLength) {
finalContent = selectedContent.substring(0, maxLength) + `\n... (+${selectedContent.length - maxLength} chars)`;
}
// Calculate actual line range (handle empty selection)
const actualLineEnd = selectedLines.length > 0 ? startLine + selectedLines.length - 1 : startLine;
return {
content: finalContent,
truncated: hasMore || selectedContent.length > maxLength,
totalLines,
lineRange: { start: startLine, end: actualLineEnd },
};
}
// Default behavior: truncate by character length
if (content.length > maxLength) {
return {
content: content.substring(0, maxLength) + `\n... (+${content.length - maxLength} chars)`,
truncated: true,
totalLines,
};
}
return { content, truncated: false, totalLines };
} catch (error) {
return { content: `[Error: ${(error as Error).message}]`, truncated: false };
}
}
/**
* Find regex matches in content
*/
function findMatches(content: string, pattern: string): string[] {
try {
const regex = new RegExp(pattern, 'gm');
const matches: string[] = [];
let match;
while ((match = regex.exec(content)) !== null && matches.length < 10) {
// Get line containing match
const lineStart = content.lastIndexOf('\n', match.index) + 1;
const lineEnd = content.indexOf('\n', match.index);
const line = content.substring(lineStart, lineEnd === -1 ? undefined : lineEnd).trim();
matches.push(line.substring(0, 200)); // Truncate long lines
}
return matches;
} catch {
return [];
}
}
// Tool schema for MCP
export const schema: ToolSchema = {
name: 'read_file',
description: `Read files with multi-file, directory, regex support, and line-based pagination.
description: `Read a single file with optional line-based pagination.
Usage:
read_file(paths="file.ts") # Single file (full content)
read_file(paths="file.ts", offset=100, limit=50) # Lines 100-149 (0-based)
read_file(paths=["a.ts", "b.ts"]) # Multiple files
read_file(paths="src/", pattern="*.ts") # Directory with pattern
read_file(paths="src/", contentPattern="TODO") # Search content
read_file(path="file.ts") # Full content
read_file(path="file.ts", offset=100, limit=50) # Lines 100-149 (0-based)
Supports both absolute and relative paths. Relative paths are resolved from project root.
Returns compact file list with optional content. Use offset/limit for large file pagination.`,
Use offset/limit for large file pagination.`,
inputSchema: {
type: 'object',
properties: {
paths: {
oneOf: [
{ type: 'string', description: 'Single file or directory path' },
{ type: 'array', items: { type: 'string' }, description: 'Array of file paths' }
],
description: 'File path(s) or directory to read',
},
pattern: {
path: {
type: 'string',
description: 'Glob pattern to filter files (e.g., "*.ts", "*.{js,ts}")',
},
contentPattern: {
type: 'string',
description: 'Regex pattern to search within file content',
},
maxDepth: {
type: 'number',
description: 'Max directory depth to traverse (default: 3)',
default: 3,
},
includeContent: {
type: 'boolean',
description: 'Include file content in result (default: true)',
default: true,
},
maxFiles: {
type: 'number',
description: `Max number of files to return (default: ${MAX_FILES})`,
default: MAX_FILES,
description: 'Single file path to read',
},
offset: {
type: 'number',
description: 'Line offset to start reading from (0-based). **Only for single file mode** - validation error if used with multiple paths.',
description: 'Line offset to start reading from (0-based)',
minimum: 0,
},
limit: {
type: 'number',
description: 'Number of lines to read. **Only for single file mode** - validation error if used with multiple paths.',
description: 'Number of lines to read',
minimum: 1,
},
},
required: ['paths'],
required: ['path'],
},
};
// Handler function
export async function handler(params: Record<string, unknown>): Promise<ToolResult<ReadResult>> {
const parsed = ParamsSchema.safeParse(params);
if (!parsed.success) {
return { success: false, error: `Invalid params: ${parsed.error.message}` };
}
const {
paths,
pattern,
contentPattern,
maxDepth,
includeContent,
maxFiles,
const { path: filePath, offset, limit } = parsed.data;
const cwd = getProjectRoot();
const resolvedPath = await validatePath(filePath);
if (!existsSync(resolvedPath)) {
return { success: false, error: `File not found: ${filePath}` };
}
const stat = statSync(resolvedPath);
if (!stat.isFile()) {
return { success: false, error: `Not a file: ${filePath}. Use read_many_files for directories.` };
}
const { content, truncated, totalLines, lineRange } = readFileContent(resolvedPath, {
maxLength: MAX_CONTENT_LENGTH,
offset,
limit,
} = parsed.data;
});
const cwd = getProjectRoot();
const entry: FileEntry = {
path: relative(cwd, resolvedPath) || filePath,
size: stat.size,
content,
truncated,
totalLines,
lineRange,
};
// Normalize paths to array
const inputPaths = Array.isArray(paths) ? paths : [paths];
// Collect all files to read
const allFiles: string[] = [];
for (const inputPath of inputPaths) {
const resolvedPath = await validatePath(inputPath);
if (!existsSync(resolvedPath)) {
continue; // Skip non-existent paths
}
const stat = statSync(resolvedPath);
if (stat.isDirectory()) {
// Collect files from directory
const dirFiles = collectFiles(resolvedPath, pattern, maxDepth);
allFiles.push(...dirFiles);
} else if (stat.isFile()) {
// Add single file (check pattern if provided)
if (!pattern || matchesPattern(relative(cwd, resolvedPath), pattern)) {
allFiles.push(resolvedPath);
}
}
}
// Limit files
const limitedFiles = allFiles.slice(0, maxFiles);
const totalFiles = allFiles.length;
// Process files
const files: FileEntry[] = [];
let totalContent = 0;
// Only apply offset/limit for single file mode
const isSingleFile = limitedFiles.length === 1;
const useLinePagination = isSingleFile && (offset !== undefined || limit !== undefined);
for (const filePath of limitedFiles) {
if (totalContent >= MAX_TOTAL_CONTENT) break;
const stat = statSync(filePath);
const entry: FileEntry = {
path: relative(cwd, filePath) || filePath,
size: stat.size,
};
if (includeContent) {
const remainingSpace = MAX_TOTAL_CONTENT - totalContent;
const maxLen = Math.min(MAX_CONTENT_LENGTH, remainingSpace);
// Pass offset/limit only for single file mode
const readOptions: ReadContentOptions = { maxLength: maxLen };
if (useLinePagination) {
if (offset !== undefined) readOptions.offset = offset;
if (limit !== undefined) readOptions.limit = limit;
}
const { content, truncated, totalLines, lineRange } = readFileContent(filePath, readOptions);
// If contentPattern provided, only include files with matches
if (contentPattern) {
const matches = findMatches(content, contentPattern);
if (matches.length > 0) {
entry.matches = matches;
entry.content = content;
entry.truncated = truncated;
entry.totalLines = totalLines;
entry.lineRange = lineRange;
totalContent += content.length;
} else {
continue; // Skip files without matches
}
} else {
entry.content = content;
entry.truncated = truncated;
entry.totalLines = totalLines;
entry.lineRange = lineRange;
totalContent += content.length;
}
}
files.push(entry);
}
// Build message
let message = `Read ${files.length} file(s)`;
if (totalFiles > maxFiles) {
message += ` (showing ${maxFiles} of ${totalFiles})`;
}
if (useLinePagination && files.length > 0 && files[0].lineRange) {
const { start, end } = files[0].lineRange;
message += ` [lines ${start}-${end} of ${files[0].totalLines}]`;
}
if (contentPattern) {
message += ` matching "${contentPattern}"`;
let message = `Read 1 file`;
if (lineRange) {
message += ` [lines ${lineRange.start}-${lineRange.end} of ${totalLines}]`;
}
return {
success: true,
result: {
files,
totalFiles,
files: [entry],
totalFiles: 1,
message,
},
};

View File

@@ -0,0 +1,195 @@
/**
* Read Many Files Tool - Multi-file batch reading with directory traversal and content search
*
* Features:
* - Read multiple files at once
* - Read all files in a directory (with depth control)
* - Filter files by glob pattern
* - Content search with regex
* - Compact output format
*/
import { z } from 'zod';
import type { ToolSchema, ToolResult } from '../types/tool.js';
import { existsSync, statSync } from 'fs';
import { relative } from 'path';
import { validatePath, getProjectRoot } from '../utils/path-validator.js';
import {
MAX_CONTENT_LENGTH,
MAX_FILES,
MAX_TOTAL_CONTENT,
collectFiles,
matchesPattern,
readFileContent,
findMatches,
type FileEntry,
type ReadResult,
} from '../utils/file-reader.js';
const ParamsSchema = z.object({
paths: z.union([z.string(), z.array(z.string())]).describe('File path(s) or directory'),
pattern: z.string().optional().describe('Glob pattern to filter files (e.g., "*.ts", "**/*.js")'),
contentPattern: z.string().optional().describe('Regex to search within file content'),
maxDepth: z.number().default(3).describe('Max directory depth to traverse'),
includeContent: z.boolean().default(true).describe('Include file content in result'),
maxFiles: z.number().default(MAX_FILES).describe('Max number of files to return'),
});
type Params = z.infer<typeof ParamsSchema>;
export const schema: ToolSchema = {
name: 'read_many_files',
description: `Read multiple files, directories, or search file content with regex.
Usage:
read_many_files(paths=["a.ts", "b.ts"]) # Multiple files
read_many_files(paths="src/", pattern="*.ts") # Directory with glob filter
read_many_files(paths="src/", contentPattern="TODO") # Search content with regex
read_many_files(paths="src/", pattern="*.ts", includeContent=false) # List files only
Supports both absolute and relative paths. Relative paths are resolved from project root.`,
inputSchema: {
type: 'object',
properties: {
paths: {
oneOf: [
{ type: 'string', description: 'Single file or directory path' },
{ type: 'array', items: { type: 'string' }, description: 'Array of file paths' },
],
description: 'File path(s) or directory to read',
},
pattern: {
type: 'string',
description: 'Glob pattern to filter files (e.g., "*.ts", "*.{js,ts}")',
},
contentPattern: {
type: 'string',
description: 'Regex pattern to search within file content. Empty string "" returns all content. Dangerous patterns automatically fall back to returning all content for safety.',
},
maxDepth: {
type: 'number',
description: 'Max directory depth to traverse (default: 3)',
default: 3,
},
includeContent: {
type: 'boolean',
description: 'Include file content in result (default: true)',
default: true,
},
maxFiles: {
type: 'number',
description: `Max number of files to return (default: ${MAX_FILES})`,
default: MAX_FILES,
},
},
required: ['paths'],
},
};
export async function handler(params: Record<string, unknown>): Promise<ToolResult<ReadResult>> {
const parsed = ParamsSchema.safeParse(params);
if (!parsed.success) {
return { success: false, error: `Invalid params: ${parsed.error.message}` };
}
const { paths, pattern, contentPattern, maxDepth, includeContent, maxFiles } = parsed.data;
const cwd = getProjectRoot();
// Normalize paths to array
const inputPaths = Array.isArray(paths) ? paths : [paths];
// Collect all files to read
const allFiles: string[] = [];
for (const inputPath of inputPaths) {
const resolvedPath = await validatePath(inputPath);
if (!existsSync(resolvedPath)) {
continue;
}
const stat = statSync(resolvedPath);
if (stat.isDirectory()) {
const dirFiles = collectFiles(resolvedPath, pattern, maxDepth);
allFiles.push(...dirFiles);
} else if (stat.isFile()) {
if (!pattern || matchesPattern(relative(cwd, resolvedPath), pattern)) {
allFiles.push(resolvedPath);
}
}
}
// Limit files
const limitedFiles = allFiles.slice(0, maxFiles);
const totalFiles = allFiles.length;
// Process files
const files: FileEntry[] = [];
let totalContent = 0;
for (const filePath of limitedFiles) {
if (totalContent >= MAX_TOTAL_CONTENT) break;
const stat = statSync(filePath);
const entry: FileEntry = {
path: relative(cwd, filePath) || filePath,
size: stat.size,
};
if (includeContent) {
const remainingSpace = MAX_TOTAL_CONTENT - totalContent;
const maxLen = Math.min(MAX_CONTENT_LENGTH, remainingSpace);
const { content, truncated, totalLines, lineRange } = readFileContent(filePath, { maxLength: maxLen });
if (contentPattern) {
const matches = findMatches(content, contentPattern);
if (matches === null) {
// Empty/dangerous pattern: include all content
entry.content = content;
entry.truncated = truncated;
entry.totalLines = totalLines;
entry.lineRange = lineRange;
totalContent += content.length;
} else if (matches.length > 0) {
entry.matches = matches;
entry.content = content;
entry.truncated = truncated;
entry.totalLines = totalLines;
entry.lineRange = lineRange;
totalContent += content.length;
} else {
// No matches: skip file
continue;
}
} else {
entry.content = content;
entry.truncated = truncated;
entry.totalLines = totalLines;
entry.lineRange = lineRange;
totalContent += content.length;
}
}
files.push(entry);
}
let message = `Read ${files.length} file(s)`;
if (totalFiles > maxFiles) {
message += ` (showing ${maxFiles} of ${totalFiles})`;
}
if (contentPattern) {
message += ` matching "${contentPattern}"`;
}
return {
success: true,
result: {
files,
totalFiles,
message,
},
};
}

271
ccw/src/tools/team-msg.ts Normal file
View File

@@ -0,0 +1,271 @@
/**
* Team Message Bus - JSONL-based persistent message log for Agent Teams
*
* Operations:
* - log: Append a message, returns auto-incremented ID
* - read: Read message(s) by ID
* - list: List recent messages with optional filters (from/to/type/last N)
* - status: Summarize team member activity from message history
*/
import { z } from 'zod';
import type { ToolSchema, ToolResult } from '../types/tool.js';
import { existsSync, mkdirSync, readFileSync, appendFileSync } from 'fs';
import { join, dirname } from 'path';
import { getProjectRoot } from '../utils/path-validator.js';
// --- Types ---
export interface TeamMessage {
id: string;
ts: string;
from: string;
to: string;
type: string;
summary: string;
ref?: string;
data?: Record<string, unknown>;
}
export interface StatusEntry {
member: string;
lastSeen: string;
lastAction: string;
messageCount: number;
}
// --- Zod Schema ---
const ParamsSchema = z.object({
operation: z.enum(['log', 'read', 'list', 'status']).describe('Operation to perform'),
team: z.string().describe('Team name (maps to .workflow/.team-msg/{team}/messages.jsonl)'),
// log params
from: z.string().optional().describe('[log/list] Sender role name'),
to: z.string().optional().describe('[log/list] Recipient role name'),
type: z.string().optional().describe('[log/list] Message type (plan_ready, impl_complete, test_result, etc.)'),
summary: z.string().optional().describe('[log] One-line human-readable summary'),
ref: z.string().optional().describe('[log] File path reference for large content'),
data: z.record(z.string(), z.unknown()).optional().describe('[log] Optional structured data'),
// read params
id: z.string().optional().describe('[read] Message ID to read (e.g. MSG-003)'),
// list params
last: z.number().min(1).max(100).optional().describe('[list] Return last N messages (default: 20)'),
});
type Params = z.infer<typeof ParamsSchema>;
// --- Tool Schema ---
export const schema: ToolSchema = {
name: 'team_msg',
description: `Team message bus - persistent JSONL log for Agent Team communication.
Operations:
team_msg(operation="log", team="my-team", from="planner", to="coordinator", type="plan_ready", summary="Plan ready: 3 tasks", ref=".workflow/.team-plan/my-team/plan.json")
team_msg(operation="read", team="my-team", id="MSG-003")
team_msg(operation="list", team="my-team")
team_msg(operation="list", team="my-team", from="tester", last=5)
team_msg(operation="status", team="my-team")
Message types: plan_ready, plan_approved, plan_revision, task_unblocked, impl_complete, impl_progress, test_result, review_result, fix_required, error, shutdown`,
inputSchema: {
type: 'object',
properties: {
operation: {
type: 'string',
enum: ['log', 'read', 'list', 'status'],
description: 'Operation: log | read | list | status',
},
team: {
type: 'string',
description: 'Team name',
},
from: { type: 'string', description: '[log/list] Sender role' },
to: { type: 'string', description: '[log/list] Recipient role' },
type: { type: 'string', description: '[log/list] Message type' },
summary: { type: 'string', description: '[log] One-line summary' },
ref: { type: 'string', description: '[log] File path for large content' },
data: { type: 'object', description: '[log] Optional structured data' },
id: { type: 'string', description: '[read] Message ID (e.g. MSG-003)' },
last: { type: 'number', description: '[list] Last N messages (default 20)', minimum: 1, maximum: 100 },
},
required: ['operation', 'team'],
},
};
// --- Helpers ---
export function getLogDir(team: string): string {
const root = getProjectRoot();
return join(root, '.workflow', '.team-msg', team);
}
function getLogPath(team: string): string {
return join(getLogDir(team), 'messages.jsonl');
}
function ensureLogFile(team: string): string {
const logPath = getLogPath(team);
const dir = dirname(logPath);
if (!existsSync(dir)) {
mkdirSync(dir, { recursive: true });
}
if (!existsSync(logPath)) {
appendFileSync(logPath, '', 'utf-8');
}
return logPath;
}
export function readAllMessages(team: string): TeamMessage[] {
const logPath = getLogPath(team);
if (!existsSync(logPath)) return [];
const content = readFileSync(logPath, 'utf-8').trim();
if (!content) return [];
return content.split('\n').map(line => {
try {
return JSON.parse(line) as TeamMessage;
} catch {
return null;
}
}).filter((m): m is TeamMessage => m !== null);
}
function getNextId(messages: TeamMessage[]): string {
const maxNum = messages.reduce((max, m) => {
const match = m.id.match(/^MSG-(\d+)$/);
return match ? Math.max(max, parseInt(match[1], 10)) : max;
}, 0);
return `MSG-${String(maxNum + 1).padStart(3, '0')}`;
}
function nowISO(): string {
return new Date().toISOString().replace('Z', '+00:00');
}
// --- Operations ---
function opLog(params: Params): ToolResult {
if (!params.from) return { success: false, error: 'log requires "from"' };
if (!params.to) return { success: false, error: 'log requires "to"' };
if (!params.summary) return { success: false, error: 'log requires "summary"' };
const logPath = ensureLogFile(params.team);
const messages = readAllMessages(params.team);
const id = getNextId(messages);
const msg: TeamMessage = {
id,
ts: nowISO(),
from: params.from,
to: params.to,
type: params.type || 'message',
summary: params.summary,
};
if (params.ref) msg.ref = params.ref;
if (params.data) msg.data = params.data;
appendFileSync(logPath, JSON.stringify(msg) + '\n', 'utf-8');
return { success: true, result: { id, message: `Logged ${id}: [${msg.from}${msg.to}] ${msg.summary}` } };
}
function opRead(params: Params): ToolResult {
if (!params.id) return { success: false, error: 'read requires "id"' };
const messages = readAllMessages(params.team);
const msg = messages.find(m => m.id === params.id);
if (!msg) {
return { success: false, error: `Message ${params.id} not found in team "${params.team}"` };
}
return { success: true, result: msg };
}
function opList(params: Params): ToolResult {
let messages = readAllMessages(params.team);
// Apply filters
if (params.from) messages = messages.filter(m => m.from === params.from);
if (params.to) messages = messages.filter(m => m.to === params.to);
if (params.type) messages = messages.filter(m => m.type === params.type);
// Take last N
const last = params.last || 20;
const sliced = messages.slice(-last);
const lines = sliced.map(m => `${m.id} [${m.ts.substring(11, 19)}] ${m.from}${m.to} (${m.type}) ${m.summary}`);
return {
success: true,
result: {
total: messages.length,
showing: sliced.length,
messages: sliced,
formatted: lines.join('\n'),
},
};
}
function opStatus(params: Params): ToolResult {
const messages = readAllMessages(params.team);
if (messages.length === 0) {
return { success: true, result: { members: [], summary: 'No messages recorded yet.' } };
}
// Aggregate per-member stats
const memberMap = new Map<string, StatusEntry>();
for (const msg of messages) {
for (const role of [msg.from, msg.to]) {
if (!memberMap.has(role)) {
memberMap.set(role, { member: role, lastSeen: msg.ts, lastAction: '', messageCount: 0 });
}
}
const fromEntry = memberMap.get(msg.from)!;
fromEntry.lastSeen = msg.ts;
fromEntry.lastAction = `sent ${msg.type}${msg.to}`;
fromEntry.messageCount++;
}
const members = Array.from(memberMap.values()).sort((a, b) => b.lastSeen.localeCompare(a.lastSeen));
const formatted = members.map(m =>
`${m.member.padEnd(12)} | last: ${m.lastSeen.substring(11, 19)} | msgs: ${m.messageCount} | ${m.lastAction}`
).join('\n');
return {
success: true,
result: {
members,
total_messages: messages.length,
formatted,
},
};
}
// --- Handler ---
export async function handler(params: Record<string, unknown>): Promise<ToolResult> {
const parsed = ParamsSchema.safeParse(params);
if (!parsed.success) {
return { success: false, error: `Invalid params: ${parsed.error.message}` };
}
const p = parsed.data;
switch (p.operation) {
case 'log': return opLog(p);
case 'read': return opRead(p);
case 'list': return opList(p);
case 'status': return opStatus(p);
default:
return { success: false, error: `Unknown operation: ${p.operation}` };
}
}