mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-14 02:42:04 +08:00
feat: Add global relationships management to GlobalSymbolIndex
- Introduced a new schema version (v2) with a global_relationships table. - Implemented CRUD operations for file relationships, including update and delete functionalities. - Added query capabilities for relationships by target and symbols. - Created migration logic from v1 to v2 schema. - Enhanced tests for global relationships, covering various scenarios including insertion, querying, and deletion. docs: Add update-single command for generating module documentation - Created a new command to generate manual-style documentation (CLAUDE.md) for a single module. - Detailed execution process and implementation phases for the command. - Included usage examples and error handling guidelines. feat: Implement team command for CLI interface - Added a new team command for logging and retrieving messages in a team message bus. - Supported subcommands for logging, reading, listing, and checking status of messages. - Included error handling and JSON output options. test: Add comprehensive tests for global relationships - Developed extensive tests for the global_relationships table in GlobalSymbolIndex. - Covered schema creation, migration, CRUD operations, and performance benchmarks. - Ensured project isolation and validated query functionalities for relationships.
This commit is contained in:
@@ -68,14 +68,15 @@ const task = plan.tasks.find(t => t.id === taskId)
|
||||
const context = {
|
||||
// Base context
|
||||
scope: task.scope,
|
||||
modification_points: task.modification_points,
|
||||
files: task.files, // File-level changes (each has .change)
|
||||
implementation: task.implementation,
|
||||
|
||||
// Medium/High complexity: WHY + HOW to verify
|
||||
// Medium/High complexity: WHY + HOW to verify (PLANNING section)
|
||||
reference: task.reference, // Reference patterns/files
|
||||
rationale: task.rationale?.chosen_approach, // Why this approach
|
||||
verification: task.verification?.success_metrics, // How to verify success
|
||||
success_metrics: task.test?.success_metrics, // How to verify success
|
||||
|
||||
// High complexity: risks + code skeleton
|
||||
// High complexity: risks + code skeleton (PLANNING section)
|
||||
risks: task.risks?.map(r => r.mitigation), // Risk mitigations to follow
|
||||
code_skeleton: task.code_skeleton, // Interface/function signatures
|
||||
|
||||
@@ -165,8 +166,8 @@ TASK: {task.implementation.join('\n')}
|
||||
Key functions: {task.code_skeleton.key_functions.map(f => f.signature)}
|
||||
|
||||
# Include verification in EXPECTED
|
||||
EXPECTED: {task.acceptance.join(', ')}
|
||||
Success metrics: {task.verification.success_metrics.join(', ')}
|
||||
EXPECTED: {task.convergence.criteria.join(', ')}
|
||||
Success metrics: {task.test.success_metrics.join(', ')}
|
||||
|
||||
# Include risk mitigations in CONSTRAINTS (High)
|
||||
CONSTRAINTS: {constraints}
|
||||
@@ -268,8 +269,8 @@ find .workflow/active/ -name 'WFS-*' -type d
|
||||
|
||||
## Phase 5: Log {path} | Summary {summary_path}
|
||||
[Medium/High] Verification Checklist:
|
||||
- Unit Tests: {task.verification.unit_tests.join(', ')}
|
||||
- Success Metrics: {task.verification.success_metrics.join(', ')}
|
||||
- Unit Tests: {task.test.unit.join(', ')}
|
||||
- Success Metrics: {task.test.success_metrics.join(', ')}
|
||||
|
||||
## Next Steps: {actions}
|
||||
```
|
||||
|
||||
@@ -77,7 +77,7 @@ You are a specialized roadmap planning agent that decomposes requirements into s
|
||||
verification: string, // How to verify (command, script, or explicit steps)
|
||||
definition_of_done: string // Business-language completion definition
|
||||
},
|
||||
risk_items: [string], // Risk items for this layer
|
||||
risks: [{description: string, probability: "Low"|"Medium"|"High", impact: "Low"|"Medium"|"High", mitigation: string}], // Structured risk items for this layer
|
||||
effort: "small" | "medium" | "large", // Effort estimate
|
||||
depends_on: ["L{n}"] // Preceding layers
|
||||
}
|
||||
@@ -297,8 +297,9 @@ function parseProgressiveLayers(cliOutput) {
|
||||
scope: scopeMatch?.[1].split(/[,,]/).map(s => s.trim()).filter(Boolean) || [],
|
||||
excludes: excludesMatch?.[1].split(/[,,]/).map(s => s.trim()).filter(Boolean) || [],
|
||||
convergence,
|
||||
risk_items: riskMatch
|
||||
risks: riskMatch
|
||||
? riskMatch[1].split('\n').map(s => s.replace(/^- /, '').trim()).filter(Boolean)
|
||||
.map(desc => ({description: desc, probability: "Medium", impact: "Medium", mitigation: "N/A"}))
|
||||
: [],
|
||||
effort: normalizeEffort(effortMatch?.[1].trim()),
|
||||
depends_on: parseDependsOn(dependsMatch?.[1], 'L')
|
||||
@@ -600,14 +601,14 @@ ${l.convergence.criteria.map(c => `- ✅ ${c}`).join('\n')}
|
||||
- 🔍 **验证方法**: ${l.convergence.verification}
|
||||
- 🎯 **完成定义**: ${l.convergence.definition_of_done}
|
||||
|
||||
**风险项**: ${l.risk_items.length ? l.risk_items.map(r => `\n- ⚠️ ${r}`).join('') : '无'}
|
||||
**风险项**: ${l.risks.length ? l.risks.map(r => `\n- ⚠️ ${r.description} (概率: ${r.probability}, 影响: ${r.impact}, 缓解: ${r.mitigation})`).join('') : '无'}
|
||||
|
||||
**工作量**: ${l.effort}
|
||||
`).join('\n---\n\n')}
|
||||
|
||||
## 风险汇总
|
||||
|
||||
${layers.flatMap(l => l.risk_items.map(r => `- **${l.id}**: ${r}`)).join('\n') || '无已识别风险'}
|
||||
${layers.flatMap(l => l.risks.map(r => `- **${l.id}**: ${r.description} (概率: ${r.probability}, 影响: ${r.impact})`)).join('\n') || '无已识别风险'}
|
||||
|
||||
## 下一步
|
||||
|
||||
@@ -683,7 +684,7 @@ function manualProgressiveDecomposition(requirement, context) {
|
||||
verification: "手动测试核心流程",
|
||||
definition_of_done: "用户可完成一次核心操作的完整流程"
|
||||
},
|
||||
risk_items: ["技术选型待验证"], effort: "medium", depends_on: []
|
||||
risks: [{description: "技术选型待验证", probability: "Medium", impact: "Medium", mitigation: "待评估"}], effort: "medium", depends_on: []
|
||||
},
|
||||
{
|
||||
id: "L1", name: "可用", goal: "关键用户路径完善",
|
||||
@@ -693,7 +694,7 @@ function manualProgressiveDecomposition(requirement, context) {
|
||||
verification: "单元测试 + 手动测试错误场景",
|
||||
definition_of_done: "用户遇到问题时有清晰的引导和恢复路径"
|
||||
},
|
||||
risk_items: [], effort: "medium", depends_on: ["L0"]
|
||||
risks: [], effort: "medium", depends_on: ["L0"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -81,10 +81,10 @@ interface Task {
|
||||
scope: string; // Required: module path or feature area
|
||||
action: Action; // Required: Create|Update|Implement|...
|
||||
description?: string;
|
||||
modification_points?: Array<{file, target, change}>;
|
||||
files?: Array<{path, target, change}>;
|
||||
implementation: string[]; // Required: step-by-step guide
|
||||
test?: { unit?, integration?, commands?, coverage_target? };
|
||||
acceptance: { criteria: string[], verification: string[] }; // Required
|
||||
convergence: { criteria: string[], verification: string[] }; // Required
|
||||
commit?: { type, scope, message_template, breaking? };
|
||||
depends_on?: string[];
|
||||
priority?: number; // 1-5 (default: 3)
|
||||
@@ -202,14 +202,14 @@ function extractFromLitePlan(folderPath) {
|
||||
scope: t.scope || '',
|
||||
action: t.action || 'Implement',
|
||||
description: t.description || t.title,
|
||||
modification_points: t.modification_points || [],
|
||||
files: (t.modification_points || []).map(mp => ({ path: mp.file, target: mp.target, change: mp.change })),
|
||||
implementation: Array.isArray(t.implementation) ? t.implementation : [t.implementation || ''],
|
||||
test: t.verification ? {
|
||||
unit: t.verification.unit_tests,
|
||||
integration: t.verification.integration_tests,
|
||||
commands: t.verification.manual_checks
|
||||
} : {},
|
||||
acceptance: {
|
||||
convergence: {
|
||||
criteria: Array.isArray(t.acceptance) ? t.acceptance : [t.acceptance || ''],
|
||||
verification: t.verification?.manual_checks || []
|
||||
},
|
||||
@@ -258,10 +258,10 @@ function extractFromWorkflowSession(sessionPath) {
|
||||
scope: task.scope || inferScopeFromTask(task),
|
||||
action: capitalizeAction(task.type) || 'Implement',
|
||||
description: task.description,
|
||||
modification_points: task.implementation?.modification_points || [],
|
||||
files: (task.implementation?.modification_points || []).map(mp => ({ path: mp.file, target: mp.target, change: mp.change })),
|
||||
implementation: task.implementation?.steps || [],
|
||||
test: task.implementation?.test || {},
|
||||
acceptance: {
|
||||
convergence: {
|
||||
criteria: task.acceptance_criteria || [],
|
||||
verification: task.verification_steps || []
|
||||
},
|
||||
@@ -286,10 +286,10 @@ function extractFromWorkflowSession(sessionPath) {
|
||||
}
|
||||
|
||||
function inferScopeFromTask(task) {
|
||||
if (task.implementation?.modification_points?.length) {
|
||||
const files = task.implementation.modification_points.map(m => m.file);
|
||||
if (task.files?.length) {
|
||||
const paths = task.files.map(f => f.path);
|
||||
// Find common directory prefix
|
||||
const dirs = files.map(f => f.split('/').slice(0, -1).join('/'));
|
||||
const dirs = paths.map(p => p.split('/').slice(0, -1).join('/'));
|
||||
return [...new Set(dirs)][0] || '';
|
||||
}
|
||||
return '';
|
||||
@@ -354,10 +354,10 @@ ${fileContent}`;
|
||||
scope: t.scope || '',
|
||||
action: validateAction(t.action) || 'Implement',
|
||||
description: t.description || t.title,
|
||||
modification_points: t.modification_points || [],
|
||||
files: (t.modification_points || []).map(mp => ({ path: mp.file, target: mp.target, change: mp.change })),
|
||||
implementation: Array.isArray(t.implementation) ? t.implementation : [t.implementation || ''],
|
||||
test: t.test || {},
|
||||
acceptance: {
|
||||
convergence: {
|
||||
criteria: Array.isArray(t.acceptance) ? t.acceptance : [t.acceptance || ''],
|
||||
verification: t.verification || []
|
||||
},
|
||||
@@ -406,10 +406,10 @@ function extractFromJsonFile(filePath) {
|
||||
scope: t.scope || '',
|
||||
action: t.action || 'Implement',
|
||||
description: t.description || t.title,
|
||||
modification_points: t.modification_points || [],
|
||||
files: (t.modification_points || []).map(mp => ({ path: mp.file, target: mp.target, change: mp.change })),
|
||||
implementation: Array.isArray(t.implementation) ? t.implementation : [t.implementation || ''],
|
||||
test: t.test || t.verification || {},
|
||||
acceptance: normalizeAcceptance(t.acceptance),
|
||||
convergence: normalizeConvergence(t.acceptance, t.convergence),
|
||||
depends_on: t.depends_on || [],
|
||||
priority: t.priority || 3
|
||||
}));
|
||||
@@ -431,11 +431,13 @@ function extractFromJsonFile(filePath) {
|
||||
throw new Error('E002: JSON file does not contain valid plan structure (missing tasks array)');
|
||||
}
|
||||
|
||||
function normalizeAcceptance(acceptance) {
|
||||
if (!acceptance) return { criteria: [], verification: [] };
|
||||
if (typeof acceptance === 'object' && acceptance.criteria) return acceptance;
|
||||
if (Array.isArray(acceptance)) return { criteria: acceptance, verification: [] };
|
||||
return { criteria: [String(acceptance)], verification: [] };
|
||||
function normalizeConvergence(acceptance, convergence) {
|
||||
// Prefer new convergence field; fall back to legacy acceptance
|
||||
const source = convergence || acceptance;
|
||||
if (!source) return { criteria: [], verification: [] };
|
||||
if (typeof source === 'object' && source.criteria) return source;
|
||||
if (Array.isArray(source)) return { criteria: source, verification: [] };
|
||||
return { criteria: [String(source)], verification: [] };
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
347
.claude/commands/memory/update-single.md
Normal file
347
.claude/commands/memory/update-single.md
Normal file
@@ -0,0 +1,347 @@
|
||||
---
|
||||
name: update-single
|
||||
description: Update single module CLAUDE.md using Explore agent for deep codebase understanding, producing manual-style documentation (handbook, not API reference)
|
||||
argument-hint: "<path> [--tool gemini|qwen|codex]"
|
||||
allowed-tools: Task(*), Bash(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
# Single Module Documentation Update (/memory:update-single)
|
||||
|
||||
## Overview
|
||||
|
||||
Generates a manual-style CLAUDE.md for a single target directory using Explore agent for deep semantic codebase understanding. The output reads like a module handbook — explaining what it does, how to use it, and how it integrates — rather than dry API documentation.
|
||||
|
||||
**Core capabilities:**
|
||||
- Explore agent for semantic codebase exploration (not just file scanning)
|
||||
- Manual/handbook-style output (usage guide, not reference docs)
|
||||
- Interactive confirmation with exploration summary preview
|
||||
- Tool fallback (gemini→qwen→codex)
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/memory:update-single <path> [--tool gemini|qwen|codex]
|
||||
|
||||
# Arguments
|
||||
<path> Target directory path (required)
|
||||
|
||||
# Options
|
||||
--tool <gemini|qwen|codex> Primary CLI tool (default: gemini)
|
||||
|
||||
# Examples
|
||||
/memory:update-single src/auth
|
||||
/memory:update-single .claude/commands --tool qwen
|
||||
/memory:update-single ccw/frontend/src/components/issue
|
||||
```
|
||||
|
||||
## Output Artifacts
|
||||
|
||||
| Artifact | Description |
|
||||
|----------|-------------|
|
||||
| `<path>/CLAUDE.md` | Manual-style module handbook |
|
||||
|
||||
**CLAUDE.md Style** — "说明书" not "文档":
|
||||
- What this module does (purpose & responsibility)
|
||||
- How to use it (patterns, conventions, examples)
|
||||
- How it integrates (dependencies, exports, data flow)
|
||||
- Important constraints (gotchas, rules, limitations)
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Phase 1: Target Validation & Scan
|
||||
├─ Parse arguments (path, --tool)
|
||||
├─ Validate target directory exists
|
||||
└─ Quick structure scan (file count, types, depth)
|
||||
|
||||
Phase 2: Deep Exploration (Explore Agent)
|
||||
├─ Launch Explore agent with "very thorough" level
|
||||
├─ Analyze purpose, structure, patterns, exports, dependencies
|
||||
└─ Build comprehensive module understanding
|
||||
|
||||
Phase 3: Confirmation
|
||||
├─ Display exploration summary (key findings)
|
||||
└─ AskUserQuestion: Generate / Cancel
|
||||
|
||||
Phase 4: Generate CLAUDE.md (CLI Tool)
|
||||
├─ Construct manual-style prompt from exploration results
|
||||
├─ Execute ccw cli with --mode write
|
||||
├─ Tool fallback on failure
|
||||
└─ Write to <path>/CLAUDE.md
|
||||
|
||||
Phase 5: Verification
|
||||
└─ Display generated CLAUDE.md preview + stats
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Target Validation & Scan
|
||||
|
||||
```javascript
|
||||
// Parse arguments
|
||||
const args = $ARGUMENTS.trim()
|
||||
const parts = args.split(/\s+/)
|
||||
const toolFlagIdx = parts.indexOf('--tool')
|
||||
const primaryTool = toolFlagIdx !== -1 ? parts[toolFlagIdx + 1] : 'gemini'
|
||||
const targetPath = parts.find(p => !p.startsWith('--') && p !== primaryTool)
|
||||
|
||||
if (!targetPath) {
|
||||
console.log('ERROR: <path> is required. Usage: /memory:update-single <path> [--tool gemini|qwen|codex]')
|
||||
return
|
||||
}
|
||||
|
||||
// Validate path exists
|
||||
Bash({ command: `test -d "${targetPath}" && echo "EXISTS" || echo "NOT_FOUND"`, run_in_background: false })
|
||||
// → NOT_FOUND: abort with error
|
||||
|
||||
// Quick structure scan
|
||||
Bash({ command: `find "${targetPath}" -maxdepth 3 -type f -not -path "*/node_modules/*" -not -path "*/.git/*" | wc -l`, run_in_background: false })
|
||||
Bash({ command: `ls "${targetPath}"`, run_in_background: false })
|
||||
|
||||
// Check existing CLAUDE.md
|
||||
const hasExisting = file_exists(`${targetPath}/CLAUDE.md`)
|
||||
|
||||
console.log(`
|
||||
## Target: ${targetPath}
|
||||
|
||||
Files: ${fileCount}
|
||||
Existing CLAUDE.md: ${hasExisting ? 'Yes (will be overwritten)' : 'No (new)'}
|
||||
Tool: ${primaryTool}
|
||||
|
||||
Launching deep exploration...
|
||||
`)
|
||||
```
|
||||
|
||||
### Phase 2: Deep Exploration (Explore Agent)
|
||||
|
||||
**⚠️ CRITICAL**: Use `run_in_background: false` — exploration results are REQUIRED before generation.
|
||||
|
||||
```javascript
|
||||
const explorationResult = Task(
|
||||
subagent_type="Explore",
|
||||
run_in_background=false,
|
||||
description=`Explore: ${targetPath}`,
|
||||
prompt=`
|
||||
Thoroughly explore the module at "${targetPath}" with "very thorough" level. I need comprehensive understanding for generating a manual-style CLAUDE.md (说明书).
|
||||
|
||||
## Exploration Focus
|
||||
|
||||
Analyze from these 7 dimensions:
|
||||
|
||||
1. **Purpose & Responsibility**
|
||||
- What problem does this module solve?
|
||||
- What is its core responsibility in the larger system?
|
||||
- One-sentence summary a developer would use to describe it
|
||||
|
||||
2. **Directory Structure & Key Files**
|
||||
- Map directory layout and file organization
|
||||
- Identify entry points, core logic files, utilities, types
|
||||
- Note any naming conventions or organizational patterns
|
||||
|
||||
3. **Code Patterns & Conventions**
|
||||
- Common patterns used (factory, observer, middleware, hooks, etc.)
|
||||
- Import/export conventions
|
||||
- Error handling patterns
|
||||
- State management approach (if applicable)
|
||||
|
||||
4. **Public API / Exports**
|
||||
- What does this module expose to the outside?
|
||||
- Key functions, classes, components, types exported
|
||||
- How do consumers typically import from this module?
|
||||
|
||||
5. **Dependencies & Integration**
|
||||
- External packages this module depends on
|
||||
- Internal modules it imports from
|
||||
- Modules that depend on this one (reverse dependencies)
|
||||
- Data flow: how data enters and exits this module
|
||||
|
||||
6. **Constraints & Gotchas**
|
||||
- Non-obvious rules a developer must follow
|
||||
- Performance considerations
|
||||
- Security-sensitive areas
|
||||
- Common pitfalls or mistakes
|
||||
|
||||
7. **Development Workflow**
|
||||
- How to add new functionality to this module
|
||||
- Testing approach used
|
||||
- Build/compilation specifics (if any)
|
||||
|
||||
## Output Format
|
||||
|
||||
Return a structured summary covering all 7 dimensions above. Include specific file:line references where relevant. Focus on **actionable knowledge** — what a developer needs to know to work with this module effectively.
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Phase 3: Confirmation
|
||||
|
||||
```javascript
|
||||
console.log(`
|
||||
## Exploration Summary
|
||||
|
||||
${explorationResult}
|
||||
|
||||
---
|
||||
|
||||
**Will generate**: ${targetPath}/CLAUDE.md
|
||||
**Style**: Manual/handbook (说明书)
|
||||
**Tool**: ${primaryTool}
|
||||
`)
|
||||
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Generate manual-style CLAUDE.md for "${targetPath}"?`,
|
||||
header: "Confirm",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Generate", description: "Write CLAUDE.md based on exploration" },
|
||||
{ label: "Cancel", description: "Abort without changes" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
|
||||
// Cancel → abort
|
||||
```
|
||||
|
||||
### Phase 4: Generate CLAUDE.md (CLI Tool)
|
||||
|
||||
**Tool fallback hierarchy**:
|
||||
```javascript
|
||||
const toolOrder = {
|
||||
'gemini': ['gemini', 'qwen', 'codex'],
|
||||
'qwen': ['qwen', 'gemini', 'codex'],
|
||||
'codex': ['codex', 'gemini', 'qwen']
|
||||
}[primaryTool]
|
||||
```
|
||||
|
||||
**Generation via ccw cli**:
|
||||
```javascript
|
||||
for (let tool of toolOrder) {
|
||||
Bash({
|
||||
command: `ccw cli -p "PURPOSE: Generate a manual-style CLAUDE.md (说明书) for the module at current directory.
|
||||
This CLAUDE.md should read like a developer handbook — practical, actionable, concise.
|
||||
|
||||
## Exploration Context (use as primary source)
|
||||
|
||||
${explorationResult}
|
||||
|
||||
## CLAUDE.md Structure Requirements
|
||||
|
||||
Generate CLAUDE.md following this exact structure:
|
||||
|
||||
### 1. Title & Summary
|
||||
\`# <Module Name>\`
|
||||
> One-line description of purpose
|
||||
|
||||
### 2. Responsibilities
|
||||
- Bullet list of what this module owns
|
||||
- Keep to 3-7 items, each one sentence
|
||||
|
||||
### 3. Structure
|
||||
\`\`\`
|
||||
directory-tree/
|
||||
├── key-files-only
|
||||
└── with-brief-annotations
|
||||
\`\`\`
|
||||
|
||||
### 4. Key Patterns
|
||||
- Code conventions specific to THIS module
|
||||
- Import patterns, naming rules, style decisions
|
||||
- NOT generic best practices — only module-specific patterns
|
||||
|
||||
### 5. Usage
|
||||
- How other modules use this one
|
||||
- Common import/usage examples (real code, not pseudo-code)
|
||||
|
||||
### 6. Integration Points
|
||||
- **Depends on**: modules/packages this uses (with purpose)
|
||||
- **Used by**: modules that import from here
|
||||
|
||||
### 7. Constraints & Gotchas
|
||||
- Non-obvious rules developers MUST follow
|
||||
- Common mistakes to avoid
|
||||
- Performance or security notes
|
||||
|
||||
## Style Rules
|
||||
- Be CONCISE: each section 3-10 lines max
|
||||
- Be PRACTICAL: actionable knowledge only, no boilerplate
|
||||
- Be SPECIFIC: reference actual files and patterns, not generic advice
|
||||
- No API reference listings — this is a handbook, not a reference doc
|
||||
- Total length: 50-150 lines of markdown
|
||||
- Language: Match the project's primary language (check existing CLAUDE.md files)
|
||||
|
||||
MODE: write
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: Single CLAUDE.md file at ./CLAUDE.md following the structure above
|
||||
CONSTRAINTS: Only write CLAUDE.md, no other files" --tool ${tool} --mode write --cd "${targetPath}"`,
|
||||
run_in_background: false
|
||||
})
|
||||
|
||||
if (exit_code === 0) {
|
||||
console.log(`✅ ${targetPath}/CLAUDE.md generated with ${tool}`)
|
||||
break
|
||||
}
|
||||
console.log(`⚠️ ${tool} failed, trying next...`)
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 5: Verification
|
||||
|
||||
```javascript
|
||||
// Check file was created/updated
|
||||
Bash({ command: `test -f "${targetPath}/CLAUDE.md" && echo "EXISTS" || echo "MISSING"`, run_in_background: false })
|
||||
|
||||
// Show stats
|
||||
Bash({ command: `wc -l "${targetPath}/CLAUDE.md"`, run_in_background: false })
|
||||
|
||||
// Preview first 30 lines
|
||||
Read(`${targetPath}/CLAUDE.md`, { limit: 30 })
|
||||
|
||||
console.log(`
|
||||
## Result
|
||||
|
||||
✅ Generated: ${targetPath}/CLAUDE.md
|
||||
Lines: ${lineCount}
|
||||
Style: Manual/handbook format
|
||||
Tool: ${usedTool}
|
||||
`)
|
||||
```
|
||||
|
||||
## CLAUDE.md Output Style Guide
|
||||
|
||||
The generated CLAUDE.md is a **说明书 (handbook)**, NOT a reference doc:
|
||||
|
||||
| Aspect | Handbook Style ✅ | Reference Doc Style ❌ |
|
||||
|--------|-------------------|----------------------|
|
||||
| Purpose | "This module handles user auth" | "Authentication module" |
|
||||
| Content | How to work with it | What every function does |
|
||||
| Patterns | "Always use `createAuthMiddleware()`" | "List of all exports" |
|
||||
| Constraints | "Never store tokens in localStorage" | "Token storage API" |
|
||||
| Length | 50-150 lines | 300+ lines |
|
||||
| Audience | Developer joining the team | API consumer |
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| Path not found | Abort with clear error message |
|
||||
| Explore agent failure | Fallback to basic `ls` + `head` file scan, continue |
|
||||
| All CLI tools fail | Report failure with last error, suggest `--tool` override |
|
||||
| Empty directory | Abort — nothing to document |
|
||||
| Existing CLAUDE.md | Overwrite entirely (full regeneration) |
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```bash
|
||||
# Generate handbook for a module
|
||||
/memory:update-single src/auth
|
||||
|
||||
# Use specific tool
|
||||
/memory:update-single .claude/commands --tool qwen
|
||||
|
||||
# Deep nested module
|
||||
/memory:update-single ccw/frontend/src/components/issue
|
||||
|
||||
# Root-level documentation
|
||||
/memory:update-single .
|
||||
```
|
||||
@@ -34,6 +34,26 @@ mcp__ccw-tools__team_msg({ operation: "read", team: teamName, id: "MSG-003" })
|
||||
**日志位置**: `.workflow/.team-msg/{team-name}/messages.jsonl`
|
||||
**消息类型**: `plan_ready | plan_approved | plan_revision | task_unblocked | impl_complete | impl_progress | test_result | review_result | fix_required | error | shutdown`
|
||||
|
||||
### CLI 回退
|
||||
|
||||
当 `mcp__ccw-tools__team_msg` MCP 不可用时,使用 `ccw team` CLI 作为等效回退:
|
||||
|
||||
```javascript
|
||||
// 回退: 将 MCP 调用替换为 Bash CLI(参数一一对应)
|
||||
// log
|
||||
Bash(`ccw team log --team "${teamName}" --from "coordinator" --to "planner" --type "plan_approved" --summary "Plan已批准" --json`)
|
||||
// list
|
||||
Bash(`ccw team list --team "${teamName}" --last 10 --json`)
|
||||
// list (带过滤)
|
||||
Bash(`ccw team list --team "${teamName}" --from "tester" --last 5 --json`)
|
||||
// status
|
||||
Bash(`ccw team status --team "${teamName}" --json`)
|
||||
// read
|
||||
Bash(`ccw team read --team "${teamName}" --id "MSG-003" --json`)
|
||||
```
|
||||
|
||||
**参数映射**: `team_msg(params)` → `ccw team <operation> --team <team> [--from/--to/--type/--summary/--ref/--data/--id/--last] [--json]`
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
|
||||
@@ -15,7 +15,7 @@ Team executor role command. Operates as a teammate within an Agent Team, respons
|
||||
**Core capabilities:**
|
||||
- Task discovery from shared team task list (IMPL-* tasks)
|
||||
- Plan loading and task decomposition
|
||||
- Code implementation following plan modification points
|
||||
- Code implementation following plan files list
|
||||
- Self-validation: syntax checks, acceptance criteria verification
|
||||
- Progress reporting to coordinator
|
||||
- Sub-agent delegation for complex tasks
|
||||
@@ -71,7 +71,7 @@ Phase 2: Task Grouping
|
||||
|
||||
Phase 3: Code Implementation
|
||||
├─ For each task in plan:
|
||||
│ ├─ Read modification points
|
||||
│ ├─ Read files list
|
||||
│ ├─ Read reference patterns
|
||||
│ ├─ Implement changes (Edit/Write)
|
||||
│ ├─ Complex tasks → code-developer sub-agent
|
||||
@@ -186,8 +186,8 @@ function buildExecutionPrompt(planTask) {
|
||||
|
||||
**Scope**: \`${planTask.scope}\` | **Action**: ${planTask.action || 'implement'}
|
||||
|
||||
### Modification Points
|
||||
${(planTask.modification_points || []).map(p => `- **${p.file}** → \`${p.target}\`: ${p.change}`).join('\n')}
|
||||
### Files
|
||||
${(planTask.files || []).map(f => `- **${f.path}** → \`${f.target}\`: ${f.change}`).join('\n')}
|
||||
|
||||
### How to do it
|
||||
${planTask.description}
|
||||
@@ -199,7 +199,7 @@ ${(planTask.implementation || []).map(step => `- ${step}`).join('\n')}
|
||||
- Files: ${planTask.reference?.files?.join(', ') || 'N/A'}
|
||||
|
||||
### Done when
|
||||
${(planTask.acceptance || []).map(c => `- [ ] ${c}`).join('\n')}
|
||||
${(planTask.convergence?.criteria || []).map(c => `- [ ] ${c}`).join('\n')}
|
||||
`
|
||||
}
|
||||
|
||||
@@ -212,11 +212,11 @@ for (const batch of batches) {
|
||||
// Simple task: direct implementation
|
||||
const t = batch.tasks[0]
|
||||
// Read target files, apply modifications using Edit/Write
|
||||
for (const mp of (t.modification_points || [])) {
|
||||
const content = Read(mp.file)
|
||||
// Apply change based on modification point description
|
||||
Edit({ file_path: mp.file, old_string: "...", new_string: "..." })
|
||||
changedFiles.push(mp.file)
|
||||
for (const f of (t.files || [])) {
|
||||
const content = Read(f.path)
|
||||
// Apply change based on file entry description
|
||||
Edit({ file_path: f.path, old_string: "...", new_string: "..." })
|
||||
changedFiles.push(f.path)
|
||||
}
|
||||
} else {
|
||||
// Complex task(s): delegate to code-developer sub-agent
|
||||
@@ -241,7 +241,7 @@ Complete each task according to its "Done when" checklist.`
|
||||
|
||||
// Collect changed files from sub-agent results
|
||||
batch.tasks.forEach(t => {
|
||||
(t.modification_points || []).forEach(mp => changedFiles.push(mp.file))
|
||||
(t.files || []).forEach(f => changedFiles.push(f.path))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -269,7 +269,7 @@ if (hasSyntaxErrors) {
|
||||
// Step 2: Verify acceptance criteria
|
||||
const acceptanceStatus = plan.tasks.map(t => ({
|
||||
title: t.title,
|
||||
criteria: (t.acceptance || []).map(c => ({
|
||||
criteria: (t.convergence?.criteria || []).map(c => ({
|
||||
criterion: c,
|
||||
met: true // Evaluate based on implementation
|
||||
}))
|
||||
@@ -341,7 +341,7 @@ if (nextTasks.length > 0) {
|
||||
|
||||
```javascript
|
||||
function isSimpleTask(task) {
|
||||
return (task.modification_points || []).length <= 2 &&
|
||||
return (task.files || []).length <= 2 &&
|
||||
!task.code_skeleton &&
|
||||
(task.risks || []).length === 0
|
||||
}
|
||||
|
||||
@@ -41,7 +41,8 @@ When `--yes` or `-y`: Auto-approve splits, skip confirmations.
|
||||
| Artifact | Description |
|
||||
|----------|-------------|
|
||||
| `planning-context.md` | Evidence paths + synthesized understanding |
|
||||
| `plan.json` | Complete agent plan (detailed implementation) |
|
||||
| `plan.json` | Plan overview with task_ids[] (NO embedded tasks[]) |
|
||||
| `.task/TASK-*.json` | Independent task files following task-schema.json |
|
||||
| Updates to `plan-note.md` | Agent fills pre-allocated sections |
|
||||
|
||||
### Phase 3: Final Output
|
||||
@@ -96,12 +97,15 @@ Unified collaborative planning workflow using **Plan Note** architecture:
|
||||
|
||||
```
|
||||
.workflow/.planning/{CPLAN-slug-YYYY-MM-DD}/
|
||||
├── plan-note.md # ⭐ Core: Requirements + Tasks + Conflicts
|
||||
├── plan-note.md # Core: Requirements + Tasks + Conflicts
|
||||
├── requirement-analysis.json # Phase 1: Sub-domain assignments
|
||||
├── agents/ # Phase 2: Per-agent detailed plans
|
||||
│ ├── {focus-area-1}/
|
||||
│ │ ├── planning-context.md # Evidence + understanding
|
||||
│ │ └── plan.json # Complete agent plan
|
||||
│ │ ├── plan.json # Plan overview with task_ids[] (NO embedded tasks[])
|
||||
│ │ └── .task/ # Independent task files
|
||||
│ │ ├── TASK-{ID}.json # Task file following task-schema.json
|
||||
│ │ └── ...
|
||||
│ ├── {focus-area-2}/
|
||||
│ │ └── ...
|
||||
│ └── {focus-area-N}/
|
||||
@@ -280,8 +284,8 @@ Structure Requirements:
|
||||
|
||||
| Task | Output | Description |
|
||||
|------|--------|-------------|
|
||||
| Generate plan.json | `{sessionFolder}/agents/{focus-area}/plan.json` | Complete detailed plan following schema |
|
||||
| Update plan-note.md | Sync to shared file | Fill pre-allocated "任务池" and "上下文证据" sections |
|
||||
| Generate plan.json + .task/*.json | `{sessionFolder}/agents/{focus-area}/plan.json` + `.task/` | Two-layer output: plan overview + independent task files |
|
||||
| Update plan-note.md | Sync to shared file | Fill pre-allocated task pool and evidence sections |
|
||||
|
||||
**Task Summary Format** (for plan-note.md):
|
||||
- Task header: `### TASK-{ID}: {Title} [{focus-area}]`
|
||||
@@ -347,9 +351,18 @@ subDomains.map(sub =>
|
||||
|
||||
## Dual Output Tasks
|
||||
|
||||
### Task 1: Generate Complete plan.json
|
||||
Output: ${sessionFolder}/agents/${sub.focus_area}/plan.json
|
||||
Schema: ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json
|
||||
### Task 1: Generate Two-Layer Plan Output
|
||||
Output: ${sessionFolder}/agents/${sub.focus_area}/plan.json (overview with task_ids[])
|
||||
Output: ${sessionFolder}/agents/${sub.focus_area}/.task/TASK-*.json (independent task files)
|
||||
Schema (plan): ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json
|
||||
Schema (tasks): ~/.ccw/workflows/cli-templates/schemas/task-schema.json
|
||||
|
||||
**Two-Layer Output Format**:
|
||||
- plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array)
|
||||
- .task/TASK-*.json: Independent task files following task-schema.json
|
||||
- plan.json required: summary, approach, task_ids, task_count, _metadata (with plan_type)
|
||||
- Task files required: id, title, description, depends_on, convergence (with criteria[])
|
||||
- Task fields: files[].change (not modification_points), convergence.criteria (not acceptance), test (not verification)
|
||||
|
||||
### Task 2: Sync Summary to plan-note.md
|
||||
|
||||
@@ -365,12 +378,14 @@ Schema: ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json
|
||||
- 相关文件, 现有模式, 约束
|
||||
|
||||
## Execution Steps
|
||||
1. Generate complete plan.json
|
||||
2. Extract summary from plan.json
|
||||
3. Read ${sessionFolder}/plan-note.md
|
||||
4. Locate and replace your task pool section
|
||||
5. Locate and replace your evidence section
|
||||
6. Write back plan-note.md
|
||||
1. Create .task/ directory: mkdir -p ${sessionFolder}/agents/${sub.focus_area}/.task
|
||||
2. Generate individual task files in .task/TASK-*.json following task-schema.json
|
||||
3. Generate plan.json with task_ids[] referencing .task/ files (NO embedded tasks[])
|
||||
4. Extract summary from .task/*.json files
|
||||
5. Read ${sessionFolder}/plan-note.md
|
||||
6. Locate and replace your task pool section
|
||||
7. Locate and replace your evidence section
|
||||
8. Write back plan-note.md
|
||||
|
||||
## Important
|
||||
- Only modify your pre-allocated sections
|
||||
|
||||
@@ -122,11 +122,15 @@ fileContent = Read(filePath)
|
||||
try {
|
||||
jsonData = JSON.parse(fileContent)
|
||||
|
||||
// Check if plan.json from lite-plan session
|
||||
if (jsonData.summary && jsonData.approach && jsonData.tasks) {
|
||||
// Check if plan.json from lite-plan session (two-layer format: task_ids[])
|
||||
if (jsonData.summary && jsonData.approach && jsonData.task_ids) {
|
||||
planObject = jsonData
|
||||
originalUserInput = jsonData.summary
|
||||
isPlanJson = true
|
||||
|
||||
// Load tasks from .task/*.json files
|
||||
const planDir = filePath.replace(/[/\\][^/\\]+$/, '') // parent directory
|
||||
planObject._loadedTasks = loadTaskFiles(planDir, jsonData.task_ids)
|
||||
} else {
|
||||
// Valid JSON but not plan.json - treat as plain text
|
||||
originalUserInput = fileContent
|
||||
@@ -155,6 +159,23 @@ If `isPlanJson === false`:
|
||||
- AskUserQuestion: Select code review tool
|
||||
- Proceed to execution with full context
|
||||
|
||||
## Helper Functions
|
||||
|
||||
```javascript
|
||||
// Load task files from .task/ directory (two-layer format)
|
||||
function loadTaskFiles(planDir, taskIds) {
|
||||
return taskIds.map(id => {
|
||||
const taskPath = `${planDir}/.task/${id}.json`
|
||||
return JSON.parse(Read(taskPath))
|
||||
})
|
||||
}
|
||||
|
||||
// Get tasks array from loaded .task/*.json files
|
||||
function getTasks(planObject) {
|
||||
return planObject._loadedTasks || []
|
||||
}
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
@@ -202,7 +223,7 @@ if (executionContext) {
|
||||
📋 Execution Strategy (from lite-plan):
|
||||
Method: ${executionContext.executionMethod}
|
||||
Review: ${executionContext.codeReviewTool}
|
||||
Tasks: ${executionContext.planObject.tasks.length}
|
||||
Tasks: ${getTasks(executionContext.planObject).length}
|
||||
Complexity: ${executionContext.planObject.complexity}
|
||||
${executionContext.executorAssignments ? ` Assignments: ${JSON.stringify(executionContext.executorAssignments)}` : ''}
|
||||
`)
|
||||
@@ -277,7 +298,7 @@ function createExecutionCalls(tasks, executionMethod) {
|
||||
return calls
|
||||
}
|
||||
|
||||
executionCalls = createExecutionCalls(planObject.tasks, executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` }))
|
||||
executionCalls = createExecutionCalls(getTasks(planObject), executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` }))
|
||||
|
||||
TodoWrite({
|
||||
todos: executionCalls.map(c => ({
|
||||
@@ -345,14 +366,14 @@ for (const call of sequential) {
|
||||
|
||||
```javascript
|
||||
function buildExecutionPrompt(batch) {
|
||||
// Task template (6 parts: Modification Points → Why → How → Reference → Risks → Done)
|
||||
// Task template (6 parts: Files → Why → How → Reference → Risks → Done)
|
||||
const formatTask = (t) => `
|
||||
## ${t.title}
|
||||
|
||||
**Scope**: \`${t.scope}\` | **Action**: ${t.action}
|
||||
|
||||
### Modification Points
|
||||
${t.modification_points.map(p => `- **${p.file}** → \`${p.target}\`: ${p.change}`).join('\n')}
|
||||
### Files
|
||||
${(t.files || []).map(f => `- **${f.path}** → \`${f.target || ''}\`: ${f.change || (f.changes || []).join(', ') || ''}`).join('\n')}
|
||||
|
||||
${t.rationale ? `
|
||||
### Why this approach (Medium/High)
|
||||
@@ -384,8 +405,8 @@ ${t.risks.map(r => `- ${r.description} → **${r.mitigation}**`).join('\n')}
|
||||
` : ''}
|
||||
|
||||
### Done when
|
||||
${t.acceptance.map(c => `- [ ] ${c}`).join('\n')}
|
||||
${t.verification?.success_metrics?.length > 0 ? `\n**Success metrics**: ${t.verification.success_metrics.join(', ')}` : ''}`
|
||||
${(t.convergence?.criteria || []).map(c => `- [ ] ${c}`).join('\n')}
|
||||
${(t.test?.success_metrics || []).length > 0 ? `\n**Success metrics**: ${t.test.success_metrics.join(', ')}` : ''}`
|
||||
|
||||
// Build prompt
|
||||
const sections = []
|
||||
@@ -505,11 +526,11 @@ Progress tracked at batch level (not individual task level). Icons: ⚡ (paralle
|
||||
|
||||
**Skip Condition**: Only run if `codeReviewTool ≠ "Skip"`
|
||||
|
||||
**Review Focus**: Verify implementation against plan acceptance criteria and verification requirements
|
||||
- Read plan.json for task acceptance criteria and verification checklist
|
||||
- Check each acceptance criterion is fulfilled
|
||||
- Verify success metrics from verification field (Medium/High complexity)
|
||||
- Run unit/integration tests specified in verification field
|
||||
**Review Focus**: Verify implementation against plan convergence criteria and test requirements
|
||||
- Read plan.json + .task/*.json for task convergence criteria and test checklist
|
||||
- Check each convergence criterion is fulfilled
|
||||
- Verify success metrics from test field (Medium/High complexity)
|
||||
- Run unit/integration tests specified in test field
|
||||
- Validate code quality and identify issues
|
||||
- Ensure alignment with planned approach and risk mitigations
|
||||
|
||||
@@ -522,24 +543,24 @@ Progress tracked at batch level (not individual task level). Icons: ⚡ (paralle
|
||||
**Unified Review Template** (All tools use same standard):
|
||||
|
||||
**Review Criteria**:
|
||||
- **Acceptance Criteria**: Verify each criterion from plan.tasks[].acceptance
|
||||
- **Verification Checklist** (Medium/High): Check unit_tests, integration_tests, success_metrics from plan.tasks[].verification
|
||||
- **Convergence Criteria**: Verify each criterion from task convergence.criteria
|
||||
- **Test Checklist** (Medium/High): Check unit, integration, success_metrics from task test
|
||||
- **Code Quality**: Analyze quality, identify issues, suggest improvements
|
||||
- **Plan Alignment**: Validate implementation matches planned approach and risk mitigations
|
||||
|
||||
**Shared Prompt Template** (used by all CLI tools):
|
||||
```
|
||||
PURPOSE: Code review for implemented changes against plan acceptance criteria and verification requirements
|
||||
TASK: • Verify plan acceptance criteria fulfillment • Check verification requirements (unit tests, success metrics) • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence and risk mitigations
|
||||
PURPOSE: Code review for implemented changes against plan convergence criteria and test requirements
|
||||
TASK: • Verify plan convergence criteria fulfillment • Check test requirements (unit, integration, success_metrics) • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence and risk mitigations
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* @{plan.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including verification checklist
|
||||
CONTEXT: @**/* @{plan.json} @{.task/*.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including test checklist
|
||||
EXPECTED: Quality assessment with:
|
||||
- Acceptance criteria verification (all tasks)
|
||||
- Verification checklist validation (Medium/High: unit_tests, integration_tests, success_metrics)
|
||||
- Convergence criteria verification (all tasks from .task/*.json)
|
||||
- Test checklist validation (Medium/High: unit, integration, success_metrics)
|
||||
- Issue identification
|
||||
- Recommendations
|
||||
Explicitly check each acceptance criterion and verification item from plan.json tasks.
|
||||
CONSTRAINTS: Focus on plan acceptance criteria, verification requirements, and plan adherence | analysis=READ-ONLY
|
||||
Explicitly check each convergence criterion and test item from .task/*.json files.
|
||||
CONSTRAINTS: Focus on plan convergence criteria, test requirements, and plan adherence | analysis=READ-ONLY
|
||||
```
|
||||
|
||||
**Tool-Specific Execution** (Apply shared prompt template above):
|
||||
@@ -628,7 +649,7 @@ function detectSubFeature(tasks) {
|
||||
const category = detectCategory(`${planObject.summary} ${planObject.approach}`)
|
||||
const entry = {
|
||||
title: planObject.summary.slice(0, 60),
|
||||
sub_feature: detectSubFeature(planObject.tasks),
|
||||
sub_feature: detectSubFeature(getTasks(planObject)),
|
||||
date: new Date().toISOString().split('T')[0],
|
||||
description: planObject.approach.slice(0, 100),
|
||||
status: previousExecutionResults.every(r => r.status === 'completed') ? 'completed' : 'partial',
|
||||
@@ -673,11 +694,15 @@ Passed from lite-plan via global variable:
|
||||
planObject: {
|
||||
summary: string,
|
||||
approach: string,
|
||||
tasks: [...],
|
||||
task_ids: string[], // Task IDs referencing .task/*.json files
|
||||
task_count: number, // Number of tasks
|
||||
_loadedTasks: [...], // Populated at runtime from .task/*.json files
|
||||
estimated_time: string,
|
||||
recommended_execution: string,
|
||||
complexity: string
|
||||
},
|
||||
// Task file paths (populated for two-layer format)
|
||||
taskFiles: [{id: string, path: string}] | null,
|
||||
explorationsContext: {...} | null, // Multi-angle explorations
|
||||
explorationAngles: string[], // List of exploration angles
|
||||
explorationManifest: {...} | null, // Exploration manifest
|
||||
|
||||
@@ -82,8 +82,8 @@ Phase 4: User Decision
|
||||
└─ Cancel → Save session
|
||||
|
||||
Phase 5: Plan Generation & Execution Handoff
|
||||
├─ Generate plan.json (via @cli-lite-planning-agent)
|
||||
├─ Build executionContext with user selections
|
||||
├─ Generate plan.json + .task/*.json (via @cli-lite-planning-agent, two-layer output)
|
||||
├─ Build executionContext with user selections and taskFiles
|
||||
└─ Execute to /workflow:lite-execute --in-memory
|
||||
```
|
||||
|
||||
@@ -93,7 +93,7 @@ Phase 5: Plan Generation & Execution Handoff
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Session management, ACE context, user decisions, phase transitions, executionContext assembly |
|
||||
| **@cli-discuss-agent** | Multi-CLI execution (Gemini/Codex/Claude), cross-verification, solution synthesis, synthesis.json output |
|
||||
| **@cli-lite-planning-agent** | Task decomposition, plan.json generation following schema |
|
||||
| **@cli-lite-planning-agent** | Task decomposition, two-layer output: plan.json (overview with task_ids[]) + .task/*.json (task files) |
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
@@ -360,13 +360,22 @@ Task({
|
||||
description: "Generate implementation plan",
|
||||
prompt: `
|
||||
## Schema Reference
|
||||
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-json-schema.json
|
||||
Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json
|
||||
Execute: cat ~/.ccw/workflows/cli-templates/schemas/task-schema.json
|
||||
|
||||
## Output Format: Two-Layer Structure
|
||||
- plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array)
|
||||
- .task/TASK-*.json: Independent task files following task-schema.json
|
||||
|
||||
plan.json required: summary, approach, task_ids, task_count, _metadata (with plan_type)
|
||||
Task files required: id, title, description, depends_on, convergence (with criteria[])
|
||||
Task fields: files[].change (not modification_points), convergence.criteria (not acceptance), test (not verification)
|
||||
|
||||
## Context-Package (from orchestrator)
|
||||
${JSON.stringify(contextPackage, null, 2)}
|
||||
|
||||
## Execution Process
|
||||
1. Read plan-json-schema.json for output structure
|
||||
1. Read plan-overview-base-schema.json + task-schema.json for output structure
|
||||
2. Read project-tech.json and project-guidelines.json
|
||||
3. Parse context-package fields:
|
||||
- solution: name, feasibility, summary
|
||||
@@ -377,19 +386,23 @@ ${JSON.stringify(contextPackage, null, 2)}
|
||||
- constraints: user requirements
|
||||
4. Use implementation_plan.tasks[] as task foundation
|
||||
5. Preserve task dependencies (depends_on) and execution_flow
|
||||
6. Expand tasks with detailed acceptance criteria
|
||||
7. Generate plan.json following schema exactly
|
||||
6. Expand tasks with convergence.criteria (testable completion conditions)
|
||||
7. Create .task/ directory and write individual TASK-*.json files
|
||||
8. Generate plan.json with task_ids[] referencing .task/ files
|
||||
|
||||
## Output
|
||||
- ${sessionFolder}/plan.json
|
||||
- ${sessionFolder}/plan.json (overview with task_ids[])
|
||||
- ${sessionFolder}/.task/TASK-*.json (independent task files)
|
||||
|
||||
## Completion Checklist
|
||||
- [ ] plan.json preserves task dependencies from implementation_plan
|
||||
- [ ] plan.json has task_ids[] and task_count (NO embedded tasks[])
|
||||
- [ ] .task/*.json files preserve task dependencies from implementation_plan
|
||||
- [ ] Task execution order follows execution_flow
|
||||
- [ ] Key_points reflected in task descriptions
|
||||
- [ ] User constraints applied to implementation
|
||||
- [ ] Acceptance criteria are testable
|
||||
- [ ] Schema fields match plan-json-schema.json exactly
|
||||
- [ ] convergence.criteria are testable
|
||||
- [ ] plan.json follows plan-overview-base-schema.json
|
||||
- [ ] Task files follow task-schema.json
|
||||
`
|
||||
})
|
||||
```
|
||||
@@ -399,9 +412,13 @@ ${JSON.stringify(contextPackage, null, 2)}
|
||||
// After plan.json is generated by cli-lite-planning-agent
|
||||
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
||||
|
||||
// Load task files from .task/ directory (two-layer format)
|
||||
const taskFiles = plan.task_ids.map(id => `${sessionFolder}/.task/${id}.json`)
|
||||
|
||||
// Build executionContext (same structure as lite-plan)
|
||||
executionContext = {
|
||||
planObject: plan,
|
||||
taskFiles: taskFiles, // Paths to .task/*.json files (two-layer format)
|
||||
explorationsContext: null, // Multi-CLI doesn't use exploration files
|
||||
explorationAngles: [], // No exploration angles
|
||||
explorationManifest: null, // No manifest
|
||||
@@ -420,6 +437,7 @@ executionContext = {
|
||||
explorations: [], // No explorations in multi-CLI workflow
|
||||
explorations_manifest: null,
|
||||
plan: `${sessionFolder}/plan.json`,
|
||||
task_dir: plan.task_ids ? `${sessionFolder}/.task/` : null,
|
||||
synthesis_rounds: Array.from({length: currentRound}, (_, i) =>
|
||||
`${sessionFolder}/rounds/${i+1}/synthesis.json`
|
||||
),
|
||||
@@ -445,7 +463,11 @@ Skill(skill="workflow:lite-execute", args="--in-memory")
|
||||
│ ├── 2/synthesis.json # Round 2 analysis (cli-discuss-agent)
|
||||
│ └── .../
|
||||
├── context-package.json # Extracted context for planning (orchestrator)
|
||||
└── plan.json # Structured plan (cli-lite-planning-agent)
|
||||
├── plan.json # Plan overview with task_ids[] (NO embedded tasks[])
|
||||
└── .task/ # Independent task files
|
||||
├── TASK-001.json # Task file following task-schema.json
|
||||
├── TASK-002.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
**File Producers**:
|
||||
@@ -455,7 +477,8 @@ Skill(skill="workflow:lite-execute", args="--in-memory")
|
||||
| `session-state.json` | Orchestrator | Session metadata, rounds, decisions |
|
||||
| `rounds/*/synthesis.json` | cli-discuss-agent | Solutions, convergence, cross-verification |
|
||||
| `context-package.json` | Orchestrator | Extracted solution, dependencies, consensus for planning |
|
||||
| `plan.json` | cli-lite-planning-agent | Structured tasks for lite-execute |
|
||||
| `plan.json` | cli-lite-planning-agent | Plan overview with task_ids[] referencing .task/ files |
|
||||
| `.task/*.json` | cli-lite-planning-agent | Independent task files following task-schema.json |
|
||||
|
||||
## synthesis.json Schema
|
||||
|
||||
|
||||
@@ -115,8 +115,8 @@ Requirement-level layered roadmap planning command. Decomposes a requirement int
|
||||
## Convergence Criteria Details
|
||||
{Expanded convergence for each layer/task}
|
||||
|
||||
## Risk Items
|
||||
{Aggregated risk_items}
|
||||
## Risks
|
||||
{Aggregated risks}
|
||||
|
||||
## Next Steps
|
||||
{Execution guidance}
|
||||
@@ -158,11 +158,11 @@ Each line = one layer. Layer naming convention:
|
||||
| L2 | Refined | Edge case handling, performance optimization, security hardening |
|
||||
| L3 | Optimized | Advanced features, observability, operations support |
|
||||
|
||||
**Schema**: `id, name, goal, scope[], excludes[], convergence{}, risk_items[], effort, depends_on[]`
|
||||
**Schema**: `id, name, goal, scope[], excludes[], convergence{}, risks[], effort, depends_on[]`
|
||||
|
||||
```jsonl
|
||||
{"id":"L0","name":"MVP","goal":"Minimum viable closed loop","scope":["User registration and login","Basic CRUD"],"excludes":["OAuth","2FA"],"convergence":{"criteria":["End-to-end register→login→operate flow works","Core API returns correct responses"],"verification":"curl/Postman manual testing or smoke test script","definition_of_done":"New user can complete the full flow of register→login→perform one core operation"},"risk_items":["JWT library selection needs validation"],"effort":"medium","depends_on":[]}
|
||||
{"id":"L1","name":"Usable","goal":"Complete key user paths","scope":["Password reset","Input validation","Error messages"],"excludes":["Audit logs","Rate limiting"],"convergence":{"criteria":["All form fields have frontend+backend validation","Password reset email can be sent and reset completed","Error scenarios show user-friendly messages"],"verification":"Unit tests cover validation logic + manual test of reset flow","definition_of_done":"Users have a clear recovery path when encountering input errors or forgotten passwords"},"risk_items":[],"effort":"medium","depends_on":["L0"]}
|
||||
{"id":"L0","name":"MVP","goal":"Minimum viable closed loop","scope":["User registration and login","Basic CRUD"],"excludes":["OAuth","2FA"],"convergence":{"criteria":["End-to-end register→login→operate flow works","Core API returns correct responses"],"verification":"curl/Postman manual testing or smoke test script","definition_of_done":"New user can complete the full flow of register→login→perform one core operation"},"risks":[{"description":"JWT library selection needs validation","probability":"Medium","impact":"Medium","mitigation":"N/A"}],"effort":"medium","depends_on":[]}
|
||||
{"id":"L1","name":"Usable","goal":"Complete key user paths","scope":["Password reset","Input validation","Error messages"],"excludes":["Audit logs","Rate limiting"],"convergence":{"criteria":["All form fields have frontend+backend validation","Password reset email can be sent and reset completed","Error scenarios show user-friendly messages"],"verification":"Unit tests cover validation logic + manual test of reset flow","definition_of_done":"Users have a clear recovery path when encountering input errors or forgotten passwords"},"risks":[],"effort":"medium","depends_on":["L0"]}
|
||||
```
|
||||
|
||||
**Constraints**: 2-4 layers, L0 must be a self-contained closed loop with no dependencies, each feature belongs to exactly ONE layer (no scope overlap).
|
||||
@@ -467,7 +467,7 @@ Bash(`mkdir -p ${sessionFolder}`)
|
||||
|
||||
${selectedMode === 'progressive' ? `**Progressive Mode**:
|
||||
- 2-4 layers from MVP to full implementation
|
||||
- Each layer: id (L0-L3), name, goal, scope, excludes, convergence, risk_items, effort, depends_on
|
||||
- Each layer: id (L0-L3), name, goal, scope, excludes, convergence, risks, effort, depends_on
|
||||
- L0 (MVP) must be a self-contained closed loop with no dependencies
|
||||
- Scope: each feature belongs to exactly ONE layer (no overlap)
|
||||
- Layer names: MVP / Usable / Refined / Optimized` :
|
||||
|
||||
@@ -142,12 +142,12 @@ export function ObservabilityPanel() {
|
||||
<label className="block text-xs font-medium text-muted-foreground mb-1">
|
||||
{formatMessage({ id: 'issues.observability.filters.type' })}
|
||||
</label>
|
||||
<Select value={type} onValueChange={(v) => setType(v)}>
|
||||
<Select value={type || '__all__'} onValueChange={(v) => setType(v === '__all__' ? '' : v)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={formatMessage({ id: 'issues.observability.filters.typeAll' })} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="">{formatMessage({ id: 'issues.observability.filters.typeAll' })}</SelectItem>
|
||||
<SelectItem value="__all__">{formatMessage({ id: 'issues.observability.filters.typeAll' })}</SelectItem>
|
||||
{EVENT_TYPES.map((t) => (
|
||||
<SelectItem key={t} value={t}>
|
||||
{t}
|
||||
|
||||
@@ -209,7 +209,7 @@ export function QueuePanel() {
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{(historyIndex.queues || []).length === 0 ? (
|
||||
<SelectItem value="" disabled>
|
||||
<SelectItem value="__none__" disabled>
|
||||
{formatMessage({ id: 'issues.queue.history.empty' })}
|
||||
</SelectItem>
|
||||
) : (
|
||||
|
||||
@@ -215,7 +215,7 @@ export function QueueExecuteInSession({ item, className }: { item: QueueItem; cl
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{sessions.length === 0 ? (
|
||||
<SelectItem value="" disabled>
|
||||
<SelectItem value="__none__" disabled>
|
||||
{formatMessage({ id: 'issues.terminal.session.none' })}
|
||||
</SelectItem>
|
||||
) : (
|
||||
|
||||
@@ -267,7 +267,7 @@ export function QueueSendToOrchestrator({ item, className }: { item: QueueItem;
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{sessions.length === 0 ? (
|
||||
<SelectItem value="" disabled>
|
||||
<SelectItem value="__none__" disabled>
|
||||
{formatMessage({ id: 'issues.terminal.session.none' })}
|
||||
</SelectItem>
|
||||
) : (
|
||||
|
||||
@@ -248,7 +248,7 @@ function ContextContent({
|
||||
<div className="space-y-0.5 pl-2 max-h-32 overflow-y-auto">
|
||||
{ctx.relevant_files.map((f, i) => {
|
||||
const filePath = typeof f === 'string' ? f : f.path;
|
||||
const reason = typeof f === 'string' ? undefined : (f.rationale || f.reason);
|
||||
const reason = typeof f === 'string' ? undefined : f.reason;
|
||||
return (
|
||||
<div key={i} className="group flex items-start gap-1 text-muted-foreground hover:bg-muted/30 rounded px-1 py-0.5">
|
||||
<span className="text-primary/50 shrink-0">{i + 1}.</span>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
import { useState } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { useQuery, useQueryClient } from '@tanstack/react-query';
|
||||
import {
|
||||
Server,
|
||||
Plus,
|
||||
@@ -226,6 +226,7 @@ export function McpManagerPage() {
|
||||
const [saveTemplateDialogOpen, setSaveTemplateDialogOpen] = useState(false);
|
||||
const [serverToSaveAsTemplate, setServerToSaveAsTemplate] = useState<McpServer | undefined>(undefined);
|
||||
|
||||
const queryClient = useQueryClient();
|
||||
const notifications = useNotifications();
|
||||
|
||||
const {
|
||||
@@ -352,15 +353,47 @@ export function McpManagerPage() {
|
||||
};
|
||||
|
||||
const handleToggleCcwTool = async (tool: string, enabled: boolean) => {
|
||||
// Read latest from cache to avoid stale closures
|
||||
const currentConfig = queryClient.getQueryData<CcwMcpConfig>(['ccwMcpConfig']) ?? ccwConfig;
|
||||
const currentTools = currentConfig.enabledTools;
|
||||
const previousConfig = queryClient.getQueryData<CcwMcpConfig>(['ccwMcpConfig']);
|
||||
|
||||
const updatedTools = enabled
|
||||
? [...ccwConfig.enabledTools, tool]
|
||||
: ccwConfig.enabledTools.filter((t) => t !== tool);
|
||||
await updateCcwConfig({ enabledTools: updatedTools });
|
||||
? (currentTools.includes(tool) ? currentTools : [...currentTools, tool])
|
||||
: currentTools.filter((t) => t !== tool);
|
||||
|
||||
// Optimistic cache update for immediate UI response
|
||||
queryClient.setQueryData(['ccwMcpConfig'], (old: CcwMcpConfig | undefined) => {
|
||||
if (!old) return old;
|
||||
return { ...old, enabledTools: updatedTools };
|
||||
});
|
||||
|
||||
try {
|
||||
await updateCcwConfig({ ...currentConfig, enabledTools: updatedTools });
|
||||
} catch (error) {
|
||||
console.error('Failed to toggle CCW tool:', error);
|
||||
queryClient.setQueryData(['ccwMcpConfig'], previousConfig);
|
||||
}
|
||||
ccwMcpQuery.refetch();
|
||||
};
|
||||
|
||||
const handleUpdateCcwConfig = async (config: Partial<CcwMcpConfig>) => {
|
||||
await updateCcwConfig(config);
|
||||
// Read BEFORE optimistic update to capture actual server state
|
||||
const currentConfig = queryClient.getQueryData<CcwMcpConfig>(['ccwMcpConfig']) ?? ccwConfig;
|
||||
const previousConfig = queryClient.getQueryData<CcwMcpConfig>(['ccwMcpConfig']);
|
||||
|
||||
// Optimistic cache update for immediate UI response
|
||||
queryClient.setQueryData(['ccwMcpConfig'], (old: CcwMcpConfig | undefined) => {
|
||||
if (!old) return old;
|
||||
return { ...old, ...config };
|
||||
});
|
||||
|
||||
try {
|
||||
await updateCcwConfig({ ...currentConfig, ...config });
|
||||
} catch (error) {
|
||||
console.error('Failed to update CCW config:', error);
|
||||
queryClient.setQueryData(['ccwMcpConfig'], previousConfig);
|
||||
}
|
||||
ccwMcpQuery.refetch();
|
||||
};
|
||||
|
||||
@@ -378,15 +411,48 @@ export function McpManagerPage() {
|
||||
};
|
||||
|
||||
const handleToggleCcwToolCodex = async (tool: string, enabled: boolean) => {
|
||||
const currentConfig = queryClient.getQueryData<CcwMcpConfig>(['ccwMcpConfigCodex']) ?? ccwCodexConfig;
|
||||
const currentTools = currentConfig.enabledTools;
|
||||
|
||||
const updatedTools = enabled
|
||||
? [...ccwCodexConfig.enabledTools, tool]
|
||||
: ccwCodexConfig.enabledTools.filter((t) => t !== tool);
|
||||
await updateCcwConfigForCodex({ enabledTools: updatedTools });
|
||||
? [...currentTools, tool]
|
||||
: currentTools.filter((t) => t !== tool);
|
||||
|
||||
queryClient.setQueryData(['ccwMcpConfigCodex'], (old: CcwMcpConfig | undefined) => {
|
||||
if (!old) return old;
|
||||
return { ...old, enabledTools: updatedTools };
|
||||
});
|
||||
|
||||
try {
|
||||
await updateCcwConfigForCodex({
|
||||
enabledTools: updatedTools,
|
||||
projectRoot: currentConfig.projectRoot,
|
||||
allowedDirs: currentConfig.allowedDirs,
|
||||
disableSandbox: currentConfig.disableSandbox,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to toggle CCW tool (Codex):', error);
|
||||
}
|
||||
ccwMcpCodexQuery.refetch();
|
||||
};
|
||||
|
||||
const handleUpdateCcwConfigCodex = async (config: Partial<CcwMcpConfig>) => {
|
||||
await updateCcwConfigForCodex(config);
|
||||
queryClient.setQueryData(['ccwMcpConfigCodex'], (old: CcwMcpConfig | undefined) => {
|
||||
if (!old) return old;
|
||||
return { ...old, ...config };
|
||||
});
|
||||
|
||||
try {
|
||||
const currentConfig = queryClient.getQueryData<CcwMcpConfig>(['ccwMcpConfigCodex']) ?? ccwCodexConfig;
|
||||
await updateCcwConfigForCodex({
|
||||
enabledTools: config.enabledTools ?? currentConfig.enabledTools,
|
||||
projectRoot: config.projectRoot ?? currentConfig.projectRoot,
|
||||
allowedDirs: config.allowedDirs ?? currentConfig.allowedDirs,
|
||||
disableSandbox: config.disableSandbox ?? currentConfig.disableSandbox,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to update CCW config (Codex):', error);
|
||||
}
|
||||
ccwMcpCodexQuery.refetch();
|
||||
};
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import { hookCommand } from './commands/hook.js';
|
||||
import { issueCommand } from './commands/issue.js';
|
||||
import { workflowCommand } from './commands/workflow.js';
|
||||
import { loopCommand } from './commands/loop.js';
|
||||
import { teamCommand } from './commands/team.js';
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname, join } from 'path';
|
||||
@@ -318,6 +319,22 @@ export function run(argv: string[]): void {
|
||||
.option('--session <name>', 'Specify workflow session')
|
||||
.action((subcommand, args, options) => loopCommand(subcommand, args, options));
|
||||
|
||||
// Team command - Team Message Bus CLI interface
|
||||
program
|
||||
.command('team [subcommand] [args...]')
|
||||
.description('Team message bus for Agent Team communication')
|
||||
.option('--team <name>', 'Team name')
|
||||
.option('--from <role>', 'Sender role name')
|
||||
.option('--to <role>', 'Recipient role name')
|
||||
.option('--type <type>', 'Message type')
|
||||
.option('--summary <text>', 'One-line summary')
|
||||
.option('--ref <path>', 'File path reference')
|
||||
.option('--data <json>', 'JSON structured data')
|
||||
.option('--id <id>', 'Message ID (for read)')
|
||||
.option('--last <n>', 'Last N messages (for list)')
|
||||
.option('--json', 'Output as JSON')
|
||||
.action((subcommand, args, options) => teamCommand(subcommand, args, options));
|
||||
|
||||
// Workflow command - Workflow installation and management
|
||||
program
|
||||
.command('workflow [subcommand] [args...]')
|
||||
|
||||
179
ccw/src/commands/team.ts
Normal file
179
ccw/src/commands/team.ts
Normal file
@@ -0,0 +1,179 @@
|
||||
/**
|
||||
* Team Command - CLI interface for Team Message Bus
|
||||
* Delegates to team-msg.ts handler for JSONL-based persistent messaging
|
||||
*
|
||||
* Commands:
|
||||
* ccw team log --team <name> --from <role> --to <role> --type <type> --summary "..."
|
||||
* ccw team read --team <name> --id <MSG-NNN>
|
||||
* ccw team list --team <name> [--from <role>] [--to <role>] [--type <type>] [--last <n>]
|
||||
* ccw team status --team <name>
|
||||
* ccw team delete --team <name> --id <MSG-NNN>
|
||||
* ccw team clear --team <name>
|
||||
*/
|
||||
|
||||
import chalk from 'chalk';
|
||||
import { handler } from '../tools/team-msg.js';
|
||||
|
||||
interface TeamOptions {
|
||||
team?: string;
|
||||
from?: string;
|
||||
to?: string;
|
||||
type?: string;
|
||||
summary?: string;
|
||||
ref?: string;
|
||||
data?: string;
|
||||
id?: string;
|
||||
last?: string;
|
||||
json?: boolean;
|
||||
}
|
||||
|
||||
export async function teamCommand(
|
||||
subcommand: string,
|
||||
args: string | string[],
|
||||
options: TeamOptions
|
||||
): Promise<void> {
|
||||
if (!subcommand) {
|
||||
printHelp();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!options.team) {
|
||||
console.error(chalk.red('Error: --team is required'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Build params for handler
|
||||
const params: Record<string, unknown> = {
|
||||
operation: subcommand,
|
||||
team: options.team,
|
||||
};
|
||||
|
||||
if (options.from) params.from = options.from;
|
||||
if (options.to) params.to = options.to;
|
||||
if (options.type) params.type = options.type;
|
||||
if (options.summary) params.summary = options.summary;
|
||||
if (options.ref) params.ref = options.ref;
|
||||
if (options.id) params.id = options.id;
|
||||
if (options.last) params.last = parseInt(options.last, 10);
|
||||
|
||||
// Parse --data as JSON
|
||||
if (options.data) {
|
||||
try {
|
||||
params.data = JSON.parse(options.data);
|
||||
} catch {
|
||||
console.error(chalk.red('Error: --data must be valid JSON'));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await handler(params);
|
||||
|
||||
if (!result.success) {
|
||||
console.error(chalk.red(`Error: ${result.error}`));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// JSON output mode
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify(result.result, null, 2));
|
||||
return;
|
||||
}
|
||||
|
||||
// Formatted output by operation
|
||||
switch (subcommand) {
|
||||
case 'log': {
|
||||
const r = result.result as { id: string; message: string };
|
||||
console.log(chalk.green(`✓ ${r.message}`));
|
||||
break;
|
||||
}
|
||||
case 'read': {
|
||||
const msg = result.result as { id: string; ts: string; from: string; to: string; type: string; summary: string; ref?: string; data?: unknown };
|
||||
console.log(chalk.bold(`${msg.id} [${msg.ts}]`));
|
||||
console.log(` ${chalk.cyan(msg.from)} → ${chalk.yellow(msg.to)} (${msg.type})`);
|
||||
console.log(` ${msg.summary}`);
|
||||
if (msg.ref) console.log(chalk.gray(` ref: ${msg.ref}`));
|
||||
if (msg.data) console.log(chalk.gray(` data: ${JSON.stringify(msg.data)}`));
|
||||
break;
|
||||
}
|
||||
case 'list': {
|
||||
const r = result.result as { formatted: string; total: number; showing: number };
|
||||
console.log(chalk.gray(`Showing ${r.showing} of ${r.total} messages\n`));
|
||||
console.log(r.formatted);
|
||||
break;
|
||||
}
|
||||
case 'status': {
|
||||
const r = result.result as { formatted?: string; summary?: string; total_messages?: number };
|
||||
if (r.summary) {
|
||||
console.log(chalk.yellow(r.summary));
|
||||
} else {
|
||||
console.log(chalk.gray(`Total messages: ${r.total_messages}\n`));
|
||||
console.log(r.formatted);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'delete': {
|
||||
const r = result.result as { message: string };
|
||||
console.log(chalk.green(`✓ ${r.message}`));
|
||||
break;
|
||||
}
|
||||
case 'clear': {
|
||||
const r = result.result as { message: string };
|
||||
console.log(chalk.green(`✓ ${r.message}`));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
console.error(chalk.red(`Unknown subcommand: ${subcommand}`));
|
||||
printHelp();
|
||||
process.exit(1);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Error: ${(error as Error).message}`));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
function printHelp(): void {
|
||||
console.log(chalk.bold.cyan('\n CCW Team Message Bus\n'));
|
||||
console.log(' CLI interface for team message logging and retrieval.\n');
|
||||
console.log(' Subcommands:');
|
||||
console.log(chalk.gray(' log Log a team message'));
|
||||
console.log(chalk.gray(' read Read a specific message by ID'));
|
||||
console.log(chalk.gray(' list List recent messages with filters'));
|
||||
console.log(chalk.gray(' status Show team member activity summary'));
|
||||
console.log(chalk.gray(' delete Delete a specific message by ID'));
|
||||
console.log(chalk.gray(' clear Clear all messages for a team'));
|
||||
console.log();
|
||||
console.log(' Required:');
|
||||
console.log(chalk.gray(' --team <name> Team name'));
|
||||
console.log();
|
||||
console.log(' Log Options:');
|
||||
console.log(chalk.gray(' --from <role> Sender role name'));
|
||||
console.log(chalk.gray(' --to <role> Recipient role name'));
|
||||
console.log(chalk.gray(' --type <type> Message type (plan_ready, impl_complete, etc.)'));
|
||||
console.log(chalk.gray(' --summary <text> One-line summary'));
|
||||
console.log(chalk.gray(' --ref <path> File path reference'));
|
||||
console.log(chalk.gray(' --data <json> JSON structured data'));
|
||||
console.log();
|
||||
console.log(' Read/Delete Options:');
|
||||
console.log(chalk.gray(' --id <MSG-NNN> Message ID'));
|
||||
console.log();
|
||||
console.log(' List Options:');
|
||||
console.log(chalk.gray(' --from <role> Filter by sender'));
|
||||
console.log(chalk.gray(' --to <role> Filter by recipient'));
|
||||
console.log(chalk.gray(' --type <type> Filter by message type'));
|
||||
console.log(chalk.gray(' --last <n> Number of messages (default: 20)'));
|
||||
console.log();
|
||||
console.log(' General:');
|
||||
console.log(chalk.gray(' --json Output as JSON'));
|
||||
console.log();
|
||||
console.log(' Examples:');
|
||||
console.log(chalk.gray(' ccw team log --team my-team --from executor --to coordinator --type impl_complete --summary "Task done"'));
|
||||
console.log(chalk.gray(' ccw team list --team my-team --last 5'));
|
||||
console.log(chalk.gray(' ccw team read --team my-team --id MSG-003'));
|
||||
console.log(chalk.gray(' ccw team status --team my-team'));
|
||||
console.log(chalk.gray(' ccw team delete --team my-team --id MSG-003'));
|
||||
console.log(chalk.gray(' ccw team clear --team my-team'));
|
||||
console.log(chalk.gray(' ccw team log --team my-team --from planner --to coordinator --type plan_ready --summary "Plan ready" --json'));
|
||||
console.log();
|
||||
}
|
||||
@@ -6,11 +6,13 @@
|
||||
* - read: Read message(s) by ID
|
||||
* - list: List recent messages with optional filters (from/to/type/last N)
|
||||
* - status: Summarize team member activity from message history
|
||||
* - delete: Delete a specific message by ID
|
||||
* - clear: Clear all messages for a team
|
||||
*/
|
||||
|
||||
import { z } from 'zod';
|
||||
import type { ToolSchema, ToolResult } from '../types/tool.js';
|
||||
import { existsSync, mkdirSync, readFileSync, appendFileSync } from 'fs';
|
||||
import { existsSync, mkdirSync, readFileSync, appendFileSync, writeFileSync, rmSync } from 'fs';
|
||||
import { join, dirname } from 'path';
|
||||
import { getProjectRoot } from '../utils/path-validator.js';
|
||||
|
||||
@@ -37,7 +39,7 @@ export interface StatusEntry {
|
||||
// --- Zod Schema ---
|
||||
|
||||
const ParamsSchema = z.object({
|
||||
operation: z.enum(['log', 'read', 'list', 'status']).describe('Operation to perform'),
|
||||
operation: z.enum(['log', 'read', 'list', 'status', 'delete', 'clear']).describe('Operation to perform'),
|
||||
team: z.string().describe('Team name (maps to .workflow/.team-msg/{team}/messages.jsonl)'),
|
||||
|
||||
// log params
|
||||
@@ -69,6 +71,8 @@ Operations:
|
||||
team_msg(operation="list", team="my-team")
|
||||
team_msg(operation="list", team="my-team", from="tester", last=5)
|
||||
team_msg(operation="status", team="my-team")
|
||||
team_msg(operation="delete", team="my-team", id="MSG-003")
|
||||
team_msg(operation="clear", team="my-team")
|
||||
|
||||
Message types: plan_ready, plan_approved, plan_revision, task_unblocked, impl_complete, impl_progress, test_result, review_result, fix_required, error, shutdown`,
|
||||
inputSchema: {
|
||||
@@ -76,8 +80,8 @@ Message types: plan_ready, plan_approved, plan_revision, task_unblocked, impl_co
|
||||
properties: {
|
||||
operation: {
|
||||
type: 'string',
|
||||
enum: ['log', 'read', 'list', 'status'],
|
||||
description: 'Operation: log | read | list | status',
|
||||
enum: ['log', 'read', 'list', 'status', 'delete', 'clear'],
|
||||
description: 'Operation: log | read | list | status | delete | clear',
|
||||
},
|
||||
team: {
|
||||
type: 'string',
|
||||
@@ -250,6 +254,37 @@ function opStatus(params: Params): ToolResult {
|
||||
};
|
||||
}
|
||||
|
||||
function opDelete(params: Params): ToolResult {
|
||||
if (!params.id) return { success: false, error: 'delete requires "id"' };
|
||||
|
||||
const messages = readAllMessages(params.team);
|
||||
const idx = messages.findIndex(m => m.id === params.id);
|
||||
|
||||
if (idx === -1) {
|
||||
return { success: false, error: `Message ${params.id} not found in team "${params.team}"` };
|
||||
}
|
||||
|
||||
const removed = messages.splice(idx, 1)[0];
|
||||
const logPath = ensureLogFile(params.team);
|
||||
writeFileSync(logPath, messages.map(m => JSON.stringify(m)).join('\n') + (messages.length > 0 ? '\n' : ''), 'utf-8');
|
||||
|
||||
return { success: true, result: { deleted: removed.id, message: `Deleted ${removed.id}: [${removed.from} → ${removed.to}] ${removed.summary}` } };
|
||||
}
|
||||
|
||||
function opClear(params: Params): ToolResult {
|
||||
const logPath = getLogPath(params.team);
|
||||
const dir = getLogDir(params.team);
|
||||
|
||||
if (!existsSync(logPath)) {
|
||||
return { success: true, result: { message: `Team "${params.team}" has no messages to clear.` } };
|
||||
}
|
||||
|
||||
const count = readAllMessages(params.team).length;
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
|
||||
return { success: true, result: { cleared: count, message: `Cleared ${count} messages for team "${params.team}".` } };
|
||||
}
|
||||
|
||||
// --- Handler ---
|
||||
|
||||
export async function handler(params: Record<string, unknown>): Promise<ToolResult> {
|
||||
@@ -265,6 +300,8 @@ export async function handler(params: Record<string, unknown>): Promise<ToolResu
|
||||
case 'read': return opRead(p);
|
||||
case 'list': return opList(p);
|
||||
case 'status': return opStatus(p);
|
||||
case 'delete': return opDelete(p);
|
||||
case 'clear': return opClear(p);
|
||||
default:
|
||||
return { success: false, error: `Unknown operation: ${p.operation}` };
|
||||
}
|
||||
|
||||
@@ -15,14 +15,14 @@ import threading
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from codexlens.entities import Symbol
|
||||
from codexlens.entities import CodeRelationship, Symbol
|
||||
from codexlens.errors import StorageError
|
||||
|
||||
|
||||
class GlobalSymbolIndex:
|
||||
"""Project-wide symbol index with incremental updates."""
|
||||
|
||||
SCHEMA_VERSION = 1
|
||||
SCHEMA_VERSION = 2
|
||||
DEFAULT_DB_NAME = "_global_symbols.db"
|
||||
|
||||
def __init__(self, db_path: str | Path, project_id: int) -> None:
|
||||
@@ -303,6 +303,186 @@ class GlobalSymbolIndex:
|
||||
for row in rows
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Relationship CRUD
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def update_file_relationships(
|
||||
self,
|
||||
file_path: str | Path,
|
||||
relationships: List[CodeRelationship],
|
||||
) -> None:
|
||||
"""Replace all relationships for a file atomically (delete + insert).
|
||||
|
||||
Uses the same delete-then-insert pattern as ``update_file_symbols``.
|
||||
The *target_qualified_name* stored in the DB is built from
|
||||
``target_file`` (when available) and ``target_symbol`` so that
|
||||
cross-directory lookups work correctly.
|
||||
"""
|
||||
file_path_str = str(Path(file_path).resolve())
|
||||
|
||||
with self._lock:
|
||||
conn = self._get_connection()
|
||||
try:
|
||||
conn.execute("BEGIN")
|
||||
conn.execute(
|
||||
"DELETE FROM global_relationships WHERE project_id=? AND source_file=?",
|
||||
(self.project_id, file_path_str),
|
||||
)
|
||||
|
||||
if relationships:
|
||||
rows = [
|
||||
(
|
||||
self.project_id,
|
||||
file_path_str,
|
||||
rel.source_symbol,
|
||||
self._build_qualified_name(rel),
|
||||
rel.relationship_type.value,
|
||||
rel.source_line,
|
||||
)
|
||||
for rel in relationships
|
||||
]
|
||||
conn.executemany(
|
||||
"""
|
||||
INSERT INTO global_relationships(
|
||||
project_id, source_file, source_symbol,
|
||||
target_qualified_name, relationship_type, source_line
|
||||
)
|
||||
VALUES(?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
rows,
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.DatabaseError as exc:
|
||||
conn.rollback()
|
||||
raise StorageError(
|
||||
f"Failed to update relationships for {file_path_str}: {exc}",
|
||||
db_path=str(self.db_path),
|
||||
operation="update_file_relationships",
|
||||
) from exc
|
||||
|
||||
def query_by_target(
|
||||
self,
|
||||
target_name: str,
|
||||
limit: int = 50,
|
||||
prefix_mode: bool = True,
|
||||
) -> List[Tuple[str, str, str, int]]:
|
||||
"""Query relationships by target_qualified_name.
|
||||
|
||||
Returns list of ``(source_file, source_symbol, relationship_type, source_line)``.
|
||||
When *prefix_mode* is True the target_name is matched as a prefix;
|
||||
otherwise an exact match is required.
|
||||
"""
|
||||
if prefix_mode:
|
||||
pattern = f"{target_name}%"
|
||||
else:
|
||||
pattern = target_name
|
||||
|
||||
with self._lock:
|
||||
conn = self._get_connection()
|
||||
if prefix_mode:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT source_file, source_symbol, relationship_type, source_line
|
||||
FROM global_relationships
|
||||
WHERE project_id=? AND target_qualified_name LIKE ?
|
||||
ORDER BY source_file, source_line
|
||||
LIMIT ?
|
||||
""",
|
||||
(self.project_id, pattern, limit),
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT source_file, source_symbol, relationship_type, source_line
|
||||
FROM global_relationships
|
||||
WHERE project_id=? AND target_qualified_name=?
|
||||
ORDER BY source_file, source_line
|
||||
LIMIT ?
|
||||
""",
|
||||
(self.project_id, pattern, limit),
|
||||
).fetchall()
|
||||
|
||||
return [
|
||||
(
|
||||
row["source_file"],
|
||||
row["source_symbol"],
|
||||
row["relationship_type"],
|
||||
row["source_line"],
|
||||
)
|
||||
for row in rows
|
||||
]
|
||||
|
||||
def query_relationships_for_symbols(
|
||||
self,
|
||||
symbol_names: List[str],
|
||||
limit: int = 100,
|
||||
) -> List[sqlite3.Row]:
|
||||
"""Query all relationships involving any of *symbol_names*.
|
||||
|
||||
Matches against both ``source_symbol`` and ``target_qualified_name``
|
||||
(the target column is checked with a LIKE ``%name%`` pattern so that
|
||||
qualified names like ``mod.ClassName`` still match ``ClassName``).
|
||||
"""
|
||||
if not symbol_names:
|
||||
return []
|
||||
|
||||
with self._lock:
|
||||
conn = self._get_connection()
|
||||
# Build WHERE clause: (source_symbol IN (...)) OR (target LIKE ...)
|
||||
source_placeholders = ",".join("?" for _ in symbol_names)
|
||||
target_clauses = " OR ".join(
|
||||
"target_qualified_name LIKE ?" for _ in symbol_names
|
||||
)
|
||||
target_patterns = [f"%{name}" for name in symbol_names]
|
||||
|
||||
sql = f"""
|
||||
SELECT id, project_id, source_file, source_symbol,
|
||||
target_qualified_name, relationship_type, source_line
|
||||
FROM global_relationships
|
||||
WHERE project_id=?
|
||||
AND (
|
||||
source_symbol IN ({source_placeholders})
|
||||
OR ({target_clauses})
|
||||
)
|
||||
ORDER BY source_file, source_line
|
||||
LIMIT ?
|
||||
"""
|
||||
params: list = [self.project_id, *symbol_names, *target_patterns, limit]
|
||||
return conn.execute(sql, params).fetchall()
|
||||
|
||||
def delete_file_relationships(self, file_path: str | Path) -> int:
|
||||
"""Remove all relationships for a file. Returns number of rows deleted."""
|
||||
file_path_str = str(Path(file_path).resolve())
|
||||
with self._lock:
|
||||
conn = self._get_connection()
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"DELETE FROM global_relationships WHERE project_id=? AND source_file=?",
|
||||
(self.project_id, file_path_str),
|
||||
)
|
||||
conn.commit()
|
||||
return int(cur.rowcount or 0)
|
||||
except sqlite3.DatabaseError as exc:
|
||||
conn.rollback()
|
||||
raise StorageError(
|
||||
f"Failed to delete relationships for {file_path_str}: {exc}",
|
||||
db_path=str(self.db_path),
|
||||
operation="delete_file_relationships",
|
||||
) from exc
|
||||
|
||||
@staticmethod
|
||||
def _build_qualified_name(rel: CodeRelationship) -> str:
|
||||
"""Build a qualified name from a CodeRelationship.
|
||||
|
||||
Format: ``<target_file>::<target_symbol>`` when target_file is known,
|
||||
otherwise just ``<target_symbol>``.
|
||||
"""
|
||||
if rel.target_file:
|
||||
return f"{rel.target_file}::{rel.target_symbol}"
|
||||
return rel.target_symbol
|
||||
|
||||
def _get_existing_index_path(self, file_path_str: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
conn = self._get_connection()
|
||||
@@ -328,9 +508,19 @@ class GlobalSymbolIndex:
|
||||
conn.execute(f"PRAGMA user_version = {int(version)}")
|
||||
|
||||
def _apply_migrations(self, conn: sqlite3.Connection, from_version: int) -> None:
|
||||
# No migrations yet (v1).
|
||||
_ = (conn, from_version)
|
||||
return
|
||||
if from_version < 2:
|
||||
self._migrate_v1_to_v2(conn)
|
||||
|
||||
def _migrate_v1_to_v2(self, conn: sqlite3.Connection) -> None:
|
||||
"""Add global_relationships table for v1 -> v2 migration."""
|
||||
try:
|
||||
self._create_relationships_schema(conn)
|
||||
except sqlite3.DatabaseError as exc:
|
||||
raise StorageError(
|
||||
f"Failed to migrate schema from v1 to v2: {exc}",
|
||||
db_path=str(self.db_path),
|
||||
operation="_migrate_v1_to_v2",
|
||||
) from exc
|
||||
|
||||
def _get_connection(self) -> sqlite3.Connection:
|
||||
if self._conn is None:
|
||||
@@ -389,6 +579,8 @@ class GlobalSymbolIndex:
|
||||
ON global_symbols(project_id, index_path)
|
||||
"""
|
||||
)
|
||||
|
||||
self._create_relationships_schema(conn)
|
||||
except sqlite3.DatabaseError as exc:
|
||||
raise StorageError(
|
||||
f"Failed to initialize global symbol schema: {exc}",
|
||||
@@ -396,3 +588,31 @@ class GlobalSymbolIndex:
|
||||
operation="_create_schema",
|
||||
) from exc
|
||||
|
||||
def _create_relationships_schema(self, conn: sqlite3.Connection) -> None:
|
||||
"""Create the global_relationships table and indexes (idempotent)."""
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS global_relationships (
|
||||
id INTEGER PRIMARY KEY,
|
||||
project_id INTEGER NOT NULL,
|
||||
source_file TEXT NOT NULL,
|
||||
source_symbol TEXT NOT NULL,
|
||||
target_qualified_name TEXT NOT NULL,
|
||||
relationship_type TEXT NOT NULL,
|
||||
source_line INTEGER NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_global_rel_project_target
|
||||
ON global_relationships(project_id, target_qualified_name)
|
||||
"""
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_global_rel_project_source
|
||||
ON global_relationships(project_id, source_file)
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
507
codex-lens/tests/test_global_relationships.py
Normal file
507
codex-lens/tests/test_global_relationships.py
Normal file
@@ -0,0 +1,507 @@
|
||||
"""Tests for global_relationships table in GlobalSymbolIndex."""
|
||||
|
||||
import sqlite3
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from codexlens.entities import CodeRelationship, RelationshipType
|
||||
from codexlens.storage.global_index import GlobalSymbolIndex
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def temp_paths():
|
||||
tmpdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)
|
||||
root = Path(tmpdir.name)
|
||||
yield root
|
||||
try:
|
||||
tmpdir.cleanup()
|
||||
except (PermissionError, OSError):
|
||||
pass
|
||||
|
||||
|
||||
def _make_rel(
|
||||
source_symbol: str,
|
||||
target_symbol: str,
|
||||
rel_type: RelationshipType = RelationshipType.CALL,
|
||||
source_file: str = "src/a.py",
|
||||
target_file: str | None = None,
|
||||
source_line: int = 1,
|
||||
) -> CodeRelationship:
|
||||
return CodeRelationship(
|
||||
source_symbol=source_symbol,
|
||||
target_symbol=target_symbol,
|
||||
relationship_type=rel_type,
|
||||
source_file=source_file,
|
||||
target_file=target_file,
|
||||
source_line=source_line,
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Schema creation (fresh DB)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_fresh_schema_creates_relationships_table(temp_paths: Path):
|
||||
"""New DB at SCHEMA_VERSION=2 should have global_relationships table."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
conn = store._get_connection()
|
||||
tables = {
|
||||
row[0]
|
||||
for row in conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table'"
|
||||
).fetchall()
|
||||
}
|
||||
assert "global_relationships" in tables
|
||||
assert "global_symbols" in tables
|
||||
|
||||
# Verify indexes exist
|
||||
indexes = {
|
||||
row[0]
|
||||
for row in conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='index'"
|
||||
).fetchall()
|
||||
}
|
||||
assert "idx_global_rel_project_target" in indexes
|
||||
assert "idx_global_rel_project_source" in indexes
|
||||
|
||||
|
||||
def test_schema_version_is_2(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
conn = store._get_connection()
|
||||
version = conn.execute("PRAGMA user_version").fetchone()[0]
|
||||
assert version == 2
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Migration v1 -> v2
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_migration_v1_to_v2(temp_paths: Path):
|
||||
"""A v1 database should gain the global_relationships table on upgrade."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Simulate a v1 database: create global_symbols table + set version=1.
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS global_symbols (
|
||||
id INTEGER PRIMARY KEY,
|
||||
project_id INTEGER NOT NULL,
|
||||
symbol_name TEXT NOT NULL,
|
||||
symbol_kind TEXT NOT NULL,
|
||||
file_path TEXT NOT NULL,
|
||||
start_line INTEGER,
|
||||
end_line INTEGER,
|
||||
index_path TEXT NOT NULL,
|
||||
UNIQUE(project_id, symbol_name, symbol_kind, file_path, start_line, end_line)
|
||||
)
|
||||
"""
|
||||
)
|
||||
conn.execute("PRAGMA user_version = 1")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
# Now open with the new code -- migration should fire.
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
conn = store._get_connection()
|
||||
version = conn.execute("PRAGMA user_version").fetchone()[0]
|
||||
assert version == 2
|
||||
|
||||
tables = {
|
||||
row[0]
|
||||
for row in conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table'"
|
||||
).fetchall()
|
||||
}
|
||||
assert "global_relationships" in tables
|
||||
|
||||
|
||||
def test_migration_idempotent(temp_paths: Path):
|
||||
"""Running migration twice should not fail (CREATE TABLE IF NOT EXISTS)."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
|
||||
# First init
|
||||
store = GlobalSymbolIndex(db_path, project_id=1)
|
||||
store.initialize()
|
||||
store.close()
|
||||
|
||||
# Second init on same DB -- should be a no-op.
|
||||
store2 = GlobalSymbolIndex(db_path, project_id=1)
|
||||
store2.initialize()
|
||||
store2.close()
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# update_file_relationships
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_update_file_relationships_insert(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "auth.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel("login", "validate_token", source_file="src/auth.py", source_line=10),
|
||||
_make_rel("login", "hash_password", source_file="src/auth.py", source_line=15),
|
||||
_make_rel("AuthManager", "BaseManager", RelationshipType.INHERITS, "src/auth.py", source_line=1),
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, rels)
|
||||
|
||||
# Verify rows exist
|
||||
conn = store._get_connection()
|
||||
count = conn.execute(
|
||||
"SELECT COUNT(*) FROM global_relationships WHERE project_id=1"
|
||||
).fetchone()[0]
|
||||
assert count == 3
|
||||
|
||||
|
||||
def test_update_file_relationships_replaces_atomically(temp_paths: Path):
|
||||
"""Second call should delete old rows and insert new ones."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "mod.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
old_rels = [_make_rel("foo", "bar", source_file="src/mod.py", source_line=5)]
|
||||
new_rels = [
|
||||
_make_rel("baz", "qux", source_file="src/mod.py", source_line=10),
|
||||
_make_rel("baz", "quux", source_file="src/mod.py", source_line=11),
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, old_rels)
|
||||
store.update_file_relationships(file_path, new_rels)
|
||||
|
||||
conn = store._get_connection()
|
||||
rows = conn.execute(
|
||||
"SELECT source_symbol FROM global_relationships WHERE project_id=1 ORDER BY source_line"
|
||||
).fetchall()
|
||||
names = [r[0] for r in rows]
|
||||
assert "foo" not in names
|
||||
assert "baz" in names
|
||||
assert len(rows) == 2
|
||||
|
||||
|
||||
def test_update_file_relationships_empty_clears(temp_paths: Path):
|
||||
"""Passing empty list should delete all relationships for the file."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "x.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(
|
||||
file_path,
|
||||
[_make_rel("a", "b", source_file="src/x.py")],
|
||||
)
|
||||
store.update_file_relationships(file_path, [])
|
||||
|
||||
conn = store._get_connection()
|
||||
count = conn.execute(
|
||||
"SELECT COUNT(*) FROM global_relationships WHERE project_id=1"
|
||||
).fetchone()[0]
|
||||
assert count == 0
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# query_by_target
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_query_by_target_exact(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "a.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel("caller", "TargetClass", source_file="src/a.py", source_line=10),
|
||||
_make_rel("caller2", "TargetClassExtra", source_file="src/a.py", source_line=20),
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, rels)
|
||||
|
||||
# Exact match
|
||||
results = store.query_by_target("TargetClass", prefix_mode=False)
|
||||
assert len(results) == 1
|
||||
src_file, src_sym, rel_type, line = results[0]
|
||||
assert src_sym == "caller"
|
||||
assert rel_type == "calls"
|
||||
assert line == 10
|
||||
|
||||
|
||||
def test_query_by_target_prefix(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "a.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel("c1", "TargetClass", source_file="src/a.py", source_line=10),
|
||||
_make_rel("c2", "TargetClassExtra", source_file="src/a.py", source_line=20),
|
||||
_make_rel("c3", "Unrelated", source_file="src/a.py", source_line=30),
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, rels)
|
||||
|
||||
# Prefix match should return both Target* rows
|
||||
results = store.query_by_target("TargetClass", prefix_mode=True)
|
||||
assert len(results) == 2
|
||||
symbols = {r[1] for r in results}
|
||||
assert symbols == {"c1", "c2"}
|
||||
|
||||
|
||||
def test_query_by_target_cross_directory(temp_paths: Path):
|
||||
"""Relationships from different files can be queried by the same target."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_a = temp_paths / "src" / "a.py"
|
||||
file_b = temp_paths / "lib" / "b.py"
|
||||
for f in (file_a, file_b):
|
||||
f.parent.mkdir(parents=True, exist_ok=True)
|
||||
f.write_text("", encoding="utf-8")
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(
|
||||
file_a,
|
||||
[_make_rel("funcA", "SharedTarget", source_file="src/a.py", source_line=5)],
|
||||
)
|
||||
store.update_file_relationships(
|
||||
file_b,
|
||||
[_make_rel("funcB", "SharedTarget", source_file="lib/b.py", source_line=8)],
|
||||
)
|
||||
|
||||
results = store.query_by_target("SharedTarget", prefix_mode=False)
|
||||
assert len(results) == 2
|
||||
files = {r[0] for r in results}
|
||||
assert str(file_a.resolve()) in files
|
||||
assert str(file_b.resolve()) in files
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# query_relationships_for_symbols
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_query_relationships_for_symbols_source_match(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "mod.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel("MyClass", "BaseClass", RelationshipType.INHERITS, "src/mod.py", source_line=1),
|
||||
_make_rel("helper", "utils", RelationshipType.IMPORTS, "src/mod.py", source_line=2),
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, rels)
|
||||
|
||||
# Query by source_symbol name
|
||||
rows = store.query_relationships_for_symbols(["MyClass"])
|
||||
assert len(rows) >= 1
|
||||
assert any(r["source_symbol"] == "MyClass" for r in rows)
|
||||
|
||||
|
||||
def test_query_relationships_for_symbols_target_match(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "mod.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel("caller", "TargetFunc", source_file="src/mod.py", source_line=5),
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, rels)
|
||||
|
||||
# Query by target name -- should match via LIKE %TargetFunc
|
||||
rows = store.query_relationships_for_symbols(["TargetFunc"])
|
||||
assert len(rows) >= 1
|
||||
assert any(r["target_qualified_name"] == "TargetFunc" for r in rows)
|
||||
|
||||
|
||||
def test_query_relationships_for_symbols_empty_list(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
rows = store.query_relationships_for_symbols([])
|
||||
assert rows == []
|
||||
|
||||
|
||||
def test_query_relationships_for_symbols_qualified_target(temp_paths: Path):
|
||||
"""A qualified target like 'lib/b.py::BaseClass' should still match 'BaseClass'."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "a.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rel = CodeRelationship(
|
||||
source_symbol="Child",
|
||||
target_symbol="BaseClass",
|
||||
relationship_type=RelationshipType.INHERITS,
|
||||
source_file="src/a.py",
|
||||
target_file="lib/b.py",
|
||||
source_line=1,
|
||||
)
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, [rel])
|
||||
|
||||
# The qualified name is "lib/b.py::BaseClass"
|
||||
# query_relationships_for_symbols uses LIKE %BaseClass which should match
|
||||
rows = store.query_relationships_for_symbols(["BaseClass"])
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["target_qualified_name"] == "lib/b.py::BaseClass"
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# delete_file_relationships
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_delete_file_relationships(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "a.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(
|
||||
file_path,
|
||||
[
|
||||
_make_rel("f1", "t1", source_file="src/a.py", source_line=1),
|
||||
_make_rel("f2", "t2", source_file="src/a.py", source_line=2),
|
||||
],
|
||||
)
|
||||
|
||||
deleted = store.delete_file_relationships(file_path)
|
||||
assert deleted == 2
|
||||
|
||||
conn = store._get_connection()
|
||||
count = conn.execute(
|
||||
"SELECT COUNT(*) FROM global_relationships WHERE project_id=1"
|
||||
).fetchone()[0]
|
||||
assert count == 0
|
||||
|
||||
|
||||
def test_delete_file_relationships_no_rows(temp_paths: Path):
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
nonexistent = temp_paths / "src" / "nope.py"
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
deleted = store.delete_file_relationships(nonexistent)
|
||||
assert deleted == 0
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Project isolation
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_project_isolation(temp_paths: Path):
|
||||
"""Relationships from different project_ids should not leak."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "a.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
store1 = GlobalSymbolIndex(db_path, project_id=1)
|
||||
store1.initialize()
|
||||
store2 = GlobalSymbolIndex(db_path, project_id=2)
|
||||
# store2 reuses the same DB; schema already created.
|
||||
|
||||
store1.update_file_relationships(
|
||||
file_path,
|
||||
[_make_rel("a", "SharedTarget", source_file="src/a.py")],
|
||||
)
|
||||
store2.update_file_relationships(
|
||||
file_path,
|
||||
[_make_rel("b", "SharedTarget", source_file="src/a.py")],
|
||||
)
|
||||
|
||||
results1 = store1.query_by_target("SharedTarget", prefix_mode=False)
|
||||
results2 = store2.query_by_target("SharedTarget", prefix_mode=False)
|
||||
assert len(results1) == 1
|
||||
assert results1[0][1] == "a"
|
||||
assert len(results2) == 1
|
||||
assert results2[0][1] == "b"
|
||||
|
||||
store1.close()
|
||||
store2.close()
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Performance benchmarks
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_update_file_relationships_100_rows_under_50ms(temp_paths: Path):
|
||||
"""Batch insert of 100 relationships should complete in < 50ms."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "perf.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel(f"src_{i}", f"tgt_{i}", source_file="src/perf.py", source_line=i + 1)
|
||||
for i in range(100)
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
start = time.perf_counter()
|
||||
store.update_file_relationships(file_path, rels)
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
assert elapsed_ms < 50.0, f"Took {elapsed_ms:.1f}ms, expected < 50ms"
|
||||
|
||||
|
||||
def test_query_by_target_exact_under_5ms(temp_paths: Path):
|
||||
"""Exact-match query should complete in < 5ms with 500 rows."""
|
||||
db_path = temp_paths / "indexes" / "_global_symbols.db"
|
||||
file_path = temp_paths / "src" / "perf.py"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("", encoding="utf-8")
|
||||
|
||||
rels = [
|
||||
_make_rel(f"src_{i}", f"Target_{i}", source_file="src/perf.py", source_line=i + 1)
|
||||
for i in range(500)
|
||||
]
|
||||
|
||||
with GlobalSymbolIndex(db_path, project_id=1) as store:
|
||||
store.update_file_relationships(file_path, rels)
|
||||
|
||||
start = time.perf_counter()
|
||||
results = store.query_by_target("Target_250", prefix_mode=False)
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
assert elapsed_ms < 5.0, f"Took {elapsed_ms:.1f}ms, expected < 5ms"
|
||||
assert len(results) == 1
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# _build_qualified_name
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_build_qualified_name_with_target_file():
|
||||
rel = _make_rel("src", "tgt", target_file="lib/utils.py")
|
||||
assert GlobalSymbolIndex._build_qualified_name(rel) == "lib/utils.py::tgt"
|
||||
|
||||
|
||||
def test_build_qualified_name_without_target_file():
|
||||
rel = _make_rel("src", "tgt", target_file=None)
|
||||
assert GlobalSymbolIndex._build_qualified_name(rel) == "tgt"
|
||||
Reference in New Issue
Block a user