feat(issue-plan): enhance conflict detection and resolution process with semantic grouping and user clarifications

This commit is contained in:
catlog22
2025-12-29 09:26:57 +08:00
parent ef3b6b9f6e
commit eb4ba89693
4 changed files with 450 additions and 49 deletions

View File

@@ -49,12 +49,14 @@ color: green
```
Phase 1: Issue Understanding (5%)
↓ Fetch details, extract requirements, determine complexity
Phase 2: ACE Exploration (30%)
Phase 2: ACE Exploration (25%)
↓ Semantic search, pattern discovery, dependency mapping
Phase 3: Solution Planning (50%)
Phase 3: Solution Planning (45%)
↓ Task decomposition, 5-phase lifecycle, acceptance criteria
Phase 4: Validation & Output (15%)
Phase 4: Validation & Output (10%)
↓ DAG validation, conflict detection, solution registration
Phase 5: Conflict Analysis (15%)
↓ Gemini CLI multi-solution conflict detection
```
#### Phase 1: Issue Understanding
@@ -199,6 +201,67 @@ for (const issue of issues) {
}
```
#### Phase 5: Conflict Analysis (Gemini CLI)
**Trigger**: When batch contains 2+ solutions
**Conflict Types Analyzed**:
1. **File Conflicts**: Modified file overlaps
2. **API Conflicts**: Interface/breaking changes
3. **Data Model Conflicts**: Schema changes
4. **Dependency Conflicts**: Package version conflicts
5. **Architecture Conflicts**: Pattern violations
**Gemini CLI Call**:
```javascript
function analyzeConflictsGemini(solutions, projectRoot) {
if (solutions.length < 2) return { conflicts: [], safe_parallel: [solutions.map(s => s.id)] };
const solutionSummaries = solutions.map(sol => ({
issue_id: sol.issue_id,
solution_id: sol.id,
files_modified: extractFilesFromTasks(sol.tasks),
api_changes: extractApiChanges(sol.tasks),
data_changes: extractDataChanges(sol.tasks)
}));
const prompt = `
PURPOSE: Detect conflicts between solution implementations; identify all conflict types; provide resolution recommendations
TASK: • Analyze file overlaps • Check API breaking changes • Detect schema conflicts • Find dependency conflicts • Identify architecture violations
MODE: analysis
CONTEXT: Solution summaries
EXPECTED: JSON conflict report with type, severity, solutions_affected, resolution_strategy
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) | Mark severity (high/medium/low) | Provide recommended_order
SOLUTIONS:
${JSON.stringify(solutionSummaries, null, 2)}
OUTPUT FORMAT:
{
"conflicts": [{
"type": "file_conflict|api_conflict|data_conflict|dependency_conflict|architecture_conflict",
"severity": "high|medium|low",
"solutions_affected": ["SOL-001", "SOL-002"],
"summary": "brief description",
"resolution_strategy": "sequential|parallel_with_coordination|refactor_merge",
"recommended_order": ["SOL-001", "SOL-002"],
"rationale": "why this order"
}],
"safe_parallel": [["SOL-003", "SOL-004"]]
}
`;
const taskId = Bash({
command: `ccw cli -p "${prompt}" --tool gemini --mode analysis --cd "${projectRoot}"`,
run_in_background: true, timeout: 900000
});
const output = TaskOutput({ task_id: taskId, block: true });
return JSON.parse(extractJsonFromMarkdown(output));
}
```
**Integration**: After Phase 4 validation, call `analyzeConflictsGemini()` and merge results into return summary.
---
## 2. Output Requirements
@@ -225,7 +288,16 @@ Each line is a solution JSON containing tasks. Schema: `cat .claude/workflows/cl
{
"bound": [{ "issue_id": "...", "solution_id": "...", "task_count": N }],
"pending_selection": [{ "issue_id": "...", "solutions": [{ "id": "SOL-001", "description": "...", "task_count": N }] }],
"conflicts": [{ "file": "...", "issues": [...] }]
"conflicts": [{
"type": "file_conflict|api_conflict|data_conflict|dependency_conflict|architecture_conflict",
"severity": "high|medium|low",
"solutions_affected": ["SOL-001", "SOL-002"],
"summary": "brief description",
"resolution_strategy": "sequential|parallel_with_coordination",
"recommended_order": ["SOL-001", "SOL-002"],
"recommended_resolution": "Use sequential execution: SOL-001 first",
"resolution_options": [{ "strategy": "...", "rationale": "..." }]
}]
}
```

View File

@@ -52,11 +52,13 @@ color: orange
### 1.2 Execution Flow
```
Phase 1: Solution Analysis (20%)
Phase 1: Solution Analysis (15%)
| Parse solutions, collect files_touched, build DAG
Phase 2: Conflict Detection (30%)
| Identify file overlaps between solutions
Phase 3: Conflict Resolution (25%)
Phase 2: Conflict Detection (25%)
| Identify all conflict types (file, API, data, dependency, architecture)
Phase 2.5: Clarification (15%)
| Surface ambiguous dependencies, BLOCK until resolved
Phase 3: Conflict Resolution (20%)
| Apply ordering rules, update DAG
Phase 4: Ordering & Grouping (25%)
| Topological sort, assign parallel/sequential groups
@@ -86,22 +88,106 @@ function buildDependencyGraph(solutions) {
}
```
### 2.2 Conflict Detection
### 2.2 Conflict Detection (5 Types)
Conflict when multiple solutions modify same file:
Detect all conflict types between solutions:
```javascript
function detectConflicts(fileModifications, graph) {
return [...fileModifications.entries()]
.filter(([_, solutions]) => solutions.length > 1)
.map(([file, solutions]) => ({
type: 'file_conflict',
file,
solutions,
resolved: false
}))
function detectConflicts(solutions, graph) {
const conflicts = [];
const fileModifications = buildFileModificationMap(solutions);
// 1. File conflicts (multiple solutions modify same file)
for (const [file, solIds] of fileModifications.entries()) {
if (solIds.length > 1) {
conflicts.push({
type: 'file_conflict', severity: 'medium',
file, solutions: solIds, resolved: false
});
}
}
// 2. API conflicts (breaking interface changes)
const apiChanges = extractApiChangesFromAllSolutions(solutions);
for (const [api, changes] of apiChanges.entries()) {
if (changes.some(c => c.breaking)) {
conflicts.push({
type: 'api_conflict', severity: 'high',
api, solutions: changes.map(c => c.solution_id), resolved: false
});
}
}
// 3. Data model conflicts (schema changes to same model)
const dataChanges = extractDataChangesFromAllSolutions(solutions);
for (const [model, changes] of dataChanges.entries()) {
if (changes.length > 1) {
conflicts.push({
type: 'data_conflict', severity: 'high',
model, solutions: changes.map(c => c.solution_id), resolved: false
});
}
}
// 4. Dependency conflicts (package version conflicts)
const depChanges = extractDependencyChanges(solutions);
for (const [pkg, versions] of depChanges.entries()) {
if (versions.length > 1 && !versionsCompatible(versions)) {
conflicts.push({
type: 'dependency_conflict', severity: 'medium',
package: pkg, solutions: versions.map(v => v.solution_id), resolved: false
});
}
}
// 5. Architecture conflicts (pattern violations)
const archIssues = detectArchitectureViolations(solutions);
conflicts.push(...archIssues.map(issue => ({
type: 'architecture_conflict', severity: 'low',
pattern: issue.pattern, solutions: issue.solutions, resolved: false
})));
return conflicts;
}
```
### 2.2.5 Clarification (BLOCKING)
**Purpose**: Surface ambiguous dependencies for user/system clarification
**Trigger Conditions**:
- High severity conflicts with no clear resolution order
- Circular dependencies detected
- Multiple valid resolution strategies
**Clarification Logic**:
```javascript
function generateClarifications(conflicts, solutions) {
const clarifications = [];
for (const conflict of conflicts) {
if (conflict.severity === 'high' && !conflict.recommended_order) {
clarifications.push({
conflict_id: `CFT-${clarifications.length + 1}`,
question: `${conflict.type}: Which solution should execute first?`,
options: conflict.solutions.map(solId => ({
value: solId,
label: getSolutionSummary(solId, solutions)
})),
requires_user_input: true
});
}
}
return clarifications;
}
```
**Blocking Behavior**: Agent BLOCKS execution until clarifications are resolved
- Return `clarifications` array in output
- Main agent presents to user via AskUserQuestion
- Agent waits for response before proceeding to Phase 3
- No best-guess fallback - explicit user decision required
### 2.3 Resolution Rules
| Priority | Rule | Example |
@@ -189,7 +275,9 @@ Queue Item ID format: `S-N` (S-1, S-2, S-3, ...)
}
```
### 3.3 Return Summary
### 3.3 Return Summary (Brief)
Return brief summaries; full conflict details in separate files:
```json
{
@@ -197,11 +285,27 @@ Queue Item ID format: `S-N` (S-1, S-2, S-3, ...)
"total_solutions": N,
"total_tasks": N,
"execution_groups": [{ "id": "P1", "type": "parallel", "count": N }],
"conflicts_summary": [{
"id": "CFT-001",
"type": "api_conflict",
"severity": "high",
"summary": "Brief 1-line description",
"resolution": "sequential",
"details_path": ".workflow/issues/conflicts/CFT-001.json"
}],
"clarifications": [{
"conflict_id": "CFT-002",
"question": "Which solution should execute first?",
"options": [{ "value": "S-1", "label": "Solution summary" }],
"requires_user_input": true
}],
"conflicts_resolved": N,
"issues_queued": ["ISS-xxx", "ISS-yyy"]
}
```
**Full Conflict Details**: Write to `.workflow/issues/conflicts/{conflict-id}.json`
---
## 4. Quality Standards

View File

@@ -128,40 +128,46 @@ if (flags.allPending) {
}
}
// Intelligent grouping by similarity (tags → title keywords)
function groupBySimilarity(issues, maxSize) {
const batches = [];
const used = new Set();
// Semantic grouping via Gemini CLI (max 6 issues per group)
async function groupBySimilarityGemini(issues) {
const issueSummaries = issues.map(i => ({
id: i.id, title: i.title, tags: i.tags
}));
for (const issue of issues) {
if (used.has(issue.id)) continue;
const prompt = `
PURPOSE: Group similar issues by semantic similarity for batch processing; maximize within-group coherence; max 6 issues per group
TASK: • Analyze issue titles/tags semantically • Identify functional/architectural clusters • Assign each issue to one group
MODE: analysis
CONTEXT: Issue metadata only
EXPECTED: JSON with groups array, each containing max 6 issue_ids, theme, rationale
RULES: $(cat ~/.claude/workflows/cli-templates/protocols/analysis-protocol.md) | Each issue in exactly one group | Max 6 issues per group | Balance group sizes
const batch = [issue];
used.add(issue.id);
const issueTags = new Set(issue.tags);
const issueWords = new Set(issue.title.toLowerCase().split(/\s+/));
INPUT:
${JSON.stringify(issueSummaries, null, 2)}
// Find similar issues
for (const other of issues) {
if (used.has(other.id) || batch.length >= maxSize) continue;
OUTPUT FORMAT:
{"groups":[{"group_id":1,"theme":"...","issue_ids":["..."],"rationale":"..."}],"ungrouped":[]}
`;
// Similarity: shared tags or shared title keywords
const sharedTags = other.tags.filter(t => issueTags.has(t)).length;
const otherWords = other.title.toLowerCase().split(/\s+/);
const sharedWords = otherWords.filter(w => issueWords.has(w) && w.length > 3).length;
const taskId = Bash({
command: `ccw cli -p "${prompt}" --tool gemini --mode analysis`,
run_in_background: true, timeout: 600000
});
const output = TaskOutput({ task_id: taskId, block: true });
if (sharedTags > 0 || sharedWords >= 2) {
batch.push(other);
used.add(other.id);
}
}
batches.push(batch);
// Extract JSON from potential markdown code blocks
function extractJsonFromMarkdown(text) {
const jsonMatch = text.match(/```json\s*\n([\s\S]*?)\n```/) ||
text.match(/```\s*\n([\s\S]*?)\n```/);
return jsonMatch ? jsonMatch[1] : text;
}
return batches;
const result = JSON.parse(extractJsonFromMarkdown(output));
return result.groups.map(g => g.issue_ids.map(id => issues.find(i => i.id === id)));
}
const batches = groupBySimilarity(issues, batchSize);
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es) (grouped by similarity)`);
const batches = await groupBySimilarityGemini(issues);
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es) (Gemini semantic grouping, max 6 issues/agent)`);
TodoWrite({
todos: batches.map((_, i) => ({
@@ -177,6 +183,7 @@ TodoWrite({
```javascript
Bash(`mkdir -p .workflow/issues/solutions`);
const pendingSelections = []; // Collect multi-solution issues for user selection
const agentResults = []; // Collect all agent results for conflict aggregation
// Build prompts for all batches
const agentTasks = batches.map((batch, batchIndex) => {
@@ -248,6 +255,7 @@ for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) {
for (const { taskId, batchIndex } of taskIds) {
const result = TaskOutput(task_id=taskId, block=true);
const summary = JSON.parse(result);
agentResults.push(summary); // Store for Phase 3 conflict aggregation
for (const item of summary.bound || []) {
console.log(`${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`);
@@ -258,17 +266,55 @@ for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) {
pendingSelections.push(pending);
}
if (summary.conflicts?.length > 0) {
console.log(`⚠ Conflicts: ${summary.conflicts.map(c => c.file).join(', ')}`);
console.log(`⚠ Conflicts: ${summary.conflicts.length} detected (will resolve in Phase 3)`);
}
updateTodo(`Plan batch ${batchIndex + 1}`, 'completed');
}
}
```
### Phase 3: Multi-Solution Selection (MANDATORY when pendingSelections > 0)
### Phase 3: Conflict Resolution & Solution Selection
```javascript
// MUST trigger user selection when multiple solutions exist
// Phase 3a: Aggregate and resolve conflicts from all agents
const allConflicts = [];
for (const result of agentResults) {
if (result.conflicts?.length > 0) {
allConflicts.push(...result.conflicts);
}
}
if (allConflicts.length > 0) {
console.log(`\n## Resolving ${allConflicts.length} conflict(s) detected by agents\n`);
// ALWAYS confirm high-severity conflicts (per user preference)
const highSeverity = allConflicts.filter(c => c.severity === 'high');
const lowMedium = allConflicts.filter(c => c.severity !== 'high');
// Auto-resolve low/medium severity
for (const conflict of lowMedium) {
console.log(` Auto-resolved: ${conflict.summary}${conflict.recommended_resolution}`);
}
// ALWAYS require user confirmation for high severity
if (highSeverity.length > 0) {
const conflictAnswer = AskUserQuestion({
questions: highSeverity.slice(0, 4).map(conflict => ({
question: `${conflict.type}: ${conflict.summary}. How to resolve?`,
header: conflict.type.replace('_conflict', ''),
multiSelect: false,
options: conflict.resolution_options.map(opt => ({
label: opt.strategy,
description: opt.rationale
}))
}))
});
// Apply user-selected resolutions
console.log('Applied user-selected conflict resolutions');
}
}
// Phase 3b: Multi-Solution Selection (MANDATORY when pendingSelections > 0)
if (pendingSelections.length > 0) {
console.log(`\n## User Selection Required: ${pendingSelections.length} issue(s) have multiple solutions\n`);

View File

@@ -0,0 +1,179 @@
/**
* Unit tests for CoreMemoryStore (core memory persistence + clustering helpers).
*
* Notes:
* - Targets the runtime implementation shipped in `ccw/dist`.
* - Uses a real temporary CCW data directory to avoid touching user state.
* - Exercises SQLite-backed CRUD behavior (better-sqlite3) in an isolated temp dir.
*/
import { after, afterEach, before, beforeEach, describe, it, mock } from 'node:test';
import assert from 'node:assert/strict';
import { existsSync, mkdtempSync, mkdirSync, rmSync } from 'node:fs';
import { tmpdir } from 'node:os';
import { join } from 'node:path';
const TEST_CCW_HOME = mkdtempSync(join(tmpdir(), 'ccw-core-memory-store-home-'));
const TEST_PROJECT_ROOT = mkdtempSync(join(tmpdir(), 'ccw-core-memory-store-project-'));
const coreMemoryStoreUrl = new URL('../dist/core/core-memory-store.js', import.meta.url);
coreMemoryStoreUrl.searchParams.set('t', String(Date.now()));
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let mod: any;
const originalEnv = { CCW_DATA_DIR: process.env.CCW_DATA_DIR };
function resetDir(dirPath: string): void {
if (existsSync(dirPath)) {
rmSync(dirPath, { recursive: true, force: true });
}
mkdirSync(dirPath, { recursive: true });
}
describe('CoreMemoryStore', async () => {
before(async () => {
process.env.CCW_DATA_DIR = TEST_CCW_HOME;
mod = await import(coreMemoryStoreUrl.href);
});
beforeEach(() => {
process.env.CCW_DATA_DIR = TEST_CCW_HOME;
mock.method(console, 'warn', () => {});
try {
mod?.closeAllStores?.();
} catch {
// ignore
}
resetDir(TEST_CCW_HOME);
});
afterEach(() => {
mock.restoreAll();
});
after(() => {
try {
mod?.closeAllStores?.();
} catch {
// ignore
}
process.env.CCW_DATA_DIR = originalEnv.CCW_DATA_DIR;
rmSync(TEST_CCW_HOME, { recursive: true, force: true });
rmSync(TEST_PROJECT_ROOT, { recursive: true, force: true });
});
it('upserts, lists, archives, and deletes memories', () => {
const store = mod.getCoreMemoryStore(TEST_PROJECT_ROOT);
const created = store.upsertMemory({
id: 'CMEM-TEST-1',
content: 'Hello world',
summary: 'Greeting',
});
assert.equal(created.id, 'CMEM-TEST-1');
assert.equal(created.content, 'Hello world');
assert.equal(created.archived, false);
const fetched = store.getMemory('CMEM-TEST-1');
assert.ok(fetched);
assert.equal(fetched?.summary, 'Greeting');
// Default listing excludes archived
const active = store.getMemories({ limit: 10 });
assert.equal(active.length, 1);
assert.equal(active[0].id, 'CMEM-TEST-1');
// Update existing record (including archived flag)
const updated = store.upsertMemory({
id: 'CMEM-TEST-1',
content: 'Hello updated',
archived: true,
metadata: JSON.stringify({ source: 'test' }),
});
assert.equal(updated.content, 'Hello updated');
assert.equal(updated.archived, true);
assert.equal(updated.metadata, JSON.stringify({ source: 'test' }));
assert.equal(store.getMemories({ limit: 10 }).length, 0);
assert.equal(store.getMemories({ archived: true, limit: 10 }).length, 1);
// Delete should remove the record (no throw for missing ids)
store.deleteMemory('CMEM-TEST-1');
assert.equal(store.getMemory('CMEM-TEST-1'), null);
store.deleteMemory('CMEM-TEST-1');
});
it('lists projects with memory/cluster counts', () => {
const store = mod.getCoreMemoryStore(TEST_PROJECT_ROOT);
store.upsertMemory({ id: 'CMEM-TEST-2', content: 'Project memory' });
const cluster = store.createCluster({ name: 'Cluster A' });
assert.ok(cluster);
const projects = mod.listAllProjects();
assert.equal(projects.length, 1);
assert.equal(projects[0].memoriesCount, 1);
assert.equal(projects[0].clustersCount, 1);
assert.ok(typeof projects[0].id === 'string' && projects[0].id.length > 0);
});
it('manages cluster membership and session metadata cache', () => {
const store = mod.getCoreMemoryStore(TEST_PROJECT_ROOT);
const meta1 = store.upsertSessionMetadata({
session_id: 'WFS-TEST-1',
session_type: 'workflow',
title: 'Auth flow',
summary: 'JWT + refresh',
keywords: ['auth', 'jwt'],
token_estimate: 42,
file_patterns: ['src/auth/**'],
});
assert.ok(meta1);
assert.equal(meta1?.access_count, 1);
const meta2 = store.upsertSessionMetadata({
session_id: 'WFS-TEST-1',
session_type: 'workflow',
title: 'Auth flow (updated)',
keywords: ['auth', 'jwt', 'refresh'],
});
assert.ok(meta2);
assert.equal(meta2?.access_count, 2);
const matches = store.searchSessionsByKeyword('auth');
assert.equal(matches.length, 1);
assert.equal(matches[0].session_id, 'WFS-TEST-1');
const cluster = store.createCluster({ name: 'Cluster B', description: 'Testing clusters' });
assert.ok(cluster);
store.addClusterMember({
cluster_id: cluster.id,
session_id: 'WFS-TEST-1',
session_type: 'workflow',
sequence_order: 1,
relevance_score: 1.0,
});
const sessionClusters = store.getSessionClusters('WFS-TEST-1');
assert.equal(sessionClusters.length, 1);
assert.equal(sessionClusters[0].id, cluster.id);
assert.equal(store.removeClusterMember(cluster.id, 'WFS-TEST-1'), true);
assert.equal(store.getSessionClusters('WFS-TEST-1').length, 0);
});
it('throws clear errors for invalid cross-project access', () => {
assert.throws(
() => mod.getMemoriesFromProject('missing-project-id'),
(err: any) => err instanceof Error && err.message.includes('Project not found'),
);
assert.equal(mod.findMemoryAcrossProjects('CMEM-NOT-THERE'), null);
});
});