mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-10 17:11:04 +08:00
Refactor team collaboration skills and update documentation
- Renamed `team-lifecycle-v5` to `team-lifecycle` across various documentation files for consistency. - Updated references in code examples and usage sections to reflect the new skill name. - Added a new command file for the `monitor` functionality in the `team-iterdev` skill, detailing the coordinator's monitoring events and task management. - Introduced new components for dynamic pipeline visualization and session coordinates display in the frontend. - Implemented utility functions for pipeline stage detection and status derivation based on message history. - Enhanced the team role panel to map members to their respective pipeline roles with status indicators. - Updated Chinese documentation to reflect the changes in skill names and descriptions.
This commit is contained in:
@@ -1,162 +0,0 @@
|
||||
# Command: execute-fixes
|
||||
|
||||
> Applies fixes from fix-plan.json via code-developer subagents. Quick path = 1 agent; standard = 1 agent per group.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Phase 3B of Fixer, after plan-fixes
|
||||
- Requires: `${sessionFolder}/fix/fix-plan.json`, `sessionFolder`, `projectRoot`
|
||||
|
||||
## Strategy
|
||||
|
||||
**Mode**: Sequential Delegation (code-developer agents via Task)
|
||||
|
||||
```
|
||||
quick_path=true -> 1 agent, all findings sequentially
|
||||
quick_path=false -> 1 agent per group, groups in execution_order
|
||||
```
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 1: Load Plan + Helpers
|
||||
|
||||
```javascript
|
||||
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix/fix-plan.json`))
|
||||
const { groups, execution_order, quick_path: isQuickPath } = fixPlan
|
||||
const results = { fixed: [], failed: [], skipped: [] }
|
||||
|
||||
// --- Agent prompt builder ---
|
||||
function buildAgentPrompt(findings, files) {
|
||||
const fileContents = {}
|
||||
for (const file of files) { try { fileContents[file] = Read(file) } catch {} }
|
||||
|
||||
const fDesc = findings.map((f, i) => {
|
||||
const fix = f.suggested_fix || f.optimization?.approach || '(no suggestion)'
|
||||
const deps = (f.fix_dependencies||[]).length ? `\nDepends on: ${f.fix_dependencies.join(', ')}` : ''
|
||||
return `### ${i+1}. ${f.id} [${f.severity}]\n**File**: ${f.location?.file}:${f.location?.line}\n**Title**: ${f.title}\n**Desc**: ${f.description}\n**Strategy**: ${f.fix_strategy||'minimal'}\n**Fix**: ${fix}${deps}`
|
||||
}).join('\n\n')
|
||||
|
||||
const fContent = Object.entries(fileContents)
|
||||
.filter(([,c]) => c).map(([f,c]) => `### ${f}\n\`\`\`\n${String(c).slice(0,8000)}\n\`\`\``).join('\n\n')
|
||||
|
||||
return `You are a code fixer agent. Apply fixes to the codebase.
|
||||
|
||||
## CRITICAL RULES
|
||||
1. Apply each fix using Edit tool, in the order given (dependency-sorted)
|
||||
2. After each fix, run related tests: tests/**/{filename}.test.* or *_test.*
|
||||
3. Tests PASS -> finding is "fixed"
|
||||
4. Tests FAIL -> revert: Bash("git checkout -- {file}") -> mark "failed" -> continue
|
||||
5. Do NOT retry failed fixes with different strategy. Rollback and move on.
|
||||
6. If a finding depends on a previously failed finding, mark "skipped"
|
||||
|
||||
## Findings (in order)
|
||||
${fDesc}
|
||||
|
||||
## File Contents
|
||||
${fContent}
|
||||
|
||||
## Required Output
|
||||
After ALL findings, output JSON:
|
||||
\`\`\`json
|
||||
{"results":[{"id":"SEC-001","status":"fixed","file":"src/a.ts"},{"id":"COR-002","status":"failed","file":"src/b.ts","error":"reason"}]}
|
||||
\`\`\`
|
||||
Process each finding now. Rollback on failure, never retry.`
|
||||
}
|
||||
|
||||
// --- Result parser ---
|
||||
function parseAgentResults(output, findings) {
|
||||
const failedIds = new Set()
|
||||
let parsed = []
|
||||
try {
|
||||
const m = (output||'').match(/```json\s*\n?([\s\S]*?)\n?```/)
|
||||
if (m) { const j = JSON.parse(m[1]); parsed = j.results || j || [] }
|
||||
} catch {}
|
||||
|
||||
if (parsed.length > 0) {
|
||||
for (const r of parsed) {
|
||||
const f = findings.find(x => x.id === r.id); if (!f) continue
|
||||
if (r.status === 'fixed') results.fixed.push({...f})
|
||||
else if (r.status === 'failed') { results.failed.push({...f, error: r.error||'unknown'}); failedIds.add(r.id) }
|
||||
else if (r.status === 'skipped') { results.skipped.push({...f, error: r.error||'dep failed'}); failedIds.add(r.id) }
|
||||
}
|
||||
} else {
|
||||
// Fallback: check git diff per file
|
||||
for (const f of findings) {
|
||||
const file = f.location?.file
|
||||
if (!file) { results.skipped.push({...f, error:'no file'}); continue }
|
||||
const diff = Bash(`git diff --name-only -- "${file}" 2>/dev/null`).trim()
|
||||
if (diff) results.fixed.push({...f})
|
||||
else { results.failed.push({...f, error:'no changes detected'}); failedIds.add(f.id) }
|
||||
}
|
||||
}
|
||||
// Catch unprocessed findings
|
||||
const done = new Set([...results.fixed,...results.failed,...results.skipped].map(x=>x.id))
|
||||
for (const f of findings) {
|
||||
if (done.has(f.id)) continue
|
||||
if ((f.fix_dependencies||[]).some(d => failedIds.has(d)))
|
||||
results.skipped.push({...f, error:'dependency failed'})
|
||||
else results.failed.push({...f, error:'not processed'})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Execute
|
||||
|
||||
```javascript
|
||||
if (isQuickPath) {
|
||||
// Single agent for all findings
|
||||
const group = groups[0]
|
||||
const prompt = buildAgentPrompt(group.findings, group.files)
|
||||
const out = Task({ subagent_type:"code-developer", prompt, run_in_background:false })
|
||||
parseAgentResults(out, group.findings)
|
||||
} else {
|
||||
// One agent per group in execution_order
|
||||
const completedGroups = new Set()
|
||||
|
||||
// Build group dependency map
|
||||
const groupDeps = {}
|
||||
for (const g of groups) {
|
||||
groupDeps[g.id] = new Set()
|
||||
for (const f of g.findings) {
|
||||
for (const depId of (f.fix_dependencies||[])) {
|
||||
const dg = groups.find(x => x.findings.some(fx => fx.id === depId))
|
||||
if (dg && dg.id !== g.id) groupDeps[g.id].add(dg.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const gid of execution_order) {
|
||||
const group = groups.find(g => g.id === gid)
|
||||
if (!group) continue
|
||||
|
||||
const prompt = buildAgentPrompt(group.findings, group.files)
|
||||
const out = Task({ subagent_type:"code-developer", prompt, run_in_background:false })
|
||||
parseAgentResults(out, group.findings)
|
||||
completedGroups.add(gid)
|
||||
|
||||
Write(`${sessionFolder}/fix/fix-progress.json`, JSON.stringify({
|
||||
completed_groups:[...completedGroups],
|
||||
results_so_far:{fixed:results.fixed.length, failed:results.failed.length}
|
||||
}, null, 2))
|
||||
|
||||
mcp__ccw-tools__team_msg({ operation:"log", session_id: sessionId, from:"fixer",
|
||||
to:"coordinator", type:"fix_progress",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Write Results
|
||||
|
||||
```javascript
|
||||
Write(`${sessionFolder}/fix/execution-results.json`, JSON.stringify(results, null, 2))
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Agent crashes | Mark group findings as failed, continue next group |
|
||||
| Test failure after fix | Rollback (`git checkout -- {file}`), mark failed, continue |
|
||||
| No structured output | Fallback to git diff detection |
|
||||
| Dependency failed | Skip dependent findings automatically |
|
||||
| fix-plan.json missing | Report error, write empty results |
|
||||
@@ -1,186 +0,0 @@
|
||||
# Command: plan-fixes
|
||||
|
||||
> Deterministic grouping algorithm. Groups findings by file, merges dependent groups, topological sorts within groups, writes fix-plan.json.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Phase 3A of Fixer, after context resolution
|
||||
- Requires: `fixableFindings[]`, `sessionFolder`, `quickPath` from Phase 2
|
||||
|
||||
**Trigger conditions**:
|
||||
- FIX-* task in Phase 3 with at least 1 fixable finding
|
||||
|
||||
## Strategy
|
||||
|
||||
**Mode**: Direct (inline execution, deterministic algorithm, no CLI needed)
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 1: Group Findings by Primary File
|
||||
|
||||
```javascript
|
||||
const fileGroups = {}
|
||||
for (const f of fixableFindings) {
|
||||
const file = f.location?.file || '_unknown'
|
||||
if (!fileGroups[file]) fileGroups[file] = []
|
||||
fileGroups[file].push(f)
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Merge Groups with Cross-File Dependencies
|
||||
|
||||
```javascript
|
||||
// Build adjacency: if finding A (group X) depends on finding B (group Y), merge X into Y
|
||||
const findingFileMap = {}
|
||||
for (const f of fixableFindings) {
|
||||
findingFileMap[f.id] = f.location?.file || '_unknown'
|
||||
}
|
||||
|
||||
// Union-Find for group merging
|
||||
const parent = {}
|
||||
const find = (x) => parent[x] === x ? x : (parent[x] = find(parent[x]))
|
||||
const union = (a, b) => { parent[find(a)] = find(b) }
|
||||
|
||||
const allFiles = Object.keys(fileGroups)
|
||||
for (const file of allFiles) parent[file] = file
|
||||
|
||||
for (const f of fixableFindings) {
|
||||
const myFile = f.location?.file || '_unknown'
|
||||
for (const depId of (f.fix_dependencies || [])) {
|
||||
const depFile = findingFileMap[depId]
|
||||
if (depFile && depFile !== myFile) {
|
||||
union(myFile, depFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect merged groups
|
||||
const mergedGroupMap = {}
|
||||
for (const file of allFiles) {
|
||||
const root = find(file)
|
||||
if (!mergedGroupMap[root]) mergedGroupMap[root] = { files: [], findings: [] }
|
||||
mergedGroupMap[root].files.push(file)
|
||||
mergedGroupMap[root].findings.push(...fileGroups[file])
|
||||
}
|
||||
|
||||
// Deduplicate files
|
||||
for (const g of Object.values(mergedGroupMap)) {
|
||||
g.files = [...new Set(g.files)]
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Topological Sort Within Each Group
|
||||
|
||||
```javascript
|
||||
function topoSort(findings) {
|
||||
const idSet = new Set(findings.map(f => f.id))
|
||||
const inDegree = {}
|
||||
const adj = {}
|
||||
for (const f of findings) {
|
||||
inDegree[f.id] = 0
|
||||
adj[f.id] = []
|
||||
}
|
||||
for (const f of findings) {
|
||||
for (const depId of (f.fix_dependencies || [])) {
|
||||
if (idSet.has(depId)) {
|
||||
adj[depId].push(f.id)
|
||||
inDegree[f.id]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const queue = findings.filter(f => inDegree[f.id] === 0).map(f => f.id)
|
||||
const sorted = []
|
||||
while (queue.length > 0) {
|
||||
const id = queue.shift()
|
||||
sorted.push(id)
|
||||
for (const next of adj[id]) {
|
||||
inDegree[next]--
|
||||
if (inDegree[next] === 0) queue.push(next)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle cycles: append any unsorted findings at the end
|
||||
const sortedSet = new Set(sorted)
|
||||
for (const f of findings) {
|
||||
if (!sortedSet.has(f.id)) sorted.push(f.id)
|
||||
}
|
||||
|
||||
const findingMap = Object.fromEntries(findings.map(f => [f.id, f]))
|
||||
return sorted.map(id => findingMap[id])
|
||||
}
|
||||
|
||||
const groups = Object.entries(mergedGroupMap).map(([root, g], i) => {
|
||||
const sorted = topoSort(g.findings)
|
||||
const maxSev = sorted.reduce((max, f) => {
|
||||
const ord = { critical: 0, high: 1, medium: 2, low: 3 }
|
||||
return (ord[f.severity] ?? 4) < (ord[max] ?? 4) ? f.severity : max
|
||||
}, 'low')
|
||||
return {
|
||||
id: `G${i + 1}`,
|
||||
files: g.files,
|
||||
findings: sorted,
|
||||
max_severity: maxSev
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
### Step 4: Sort Groups by Max Severity
|
||||
|
||||
```javascript
|
||||
const SEV_ORDER = { critical: 0, high: 1, medium: 2, low: 3 }
|
||||
groups.sort((a, b) => (SEV_ORDER[a.max_severity] ?? 4) - (SEV_ORDER[b.max_severity] ?? 4))
|
||||
|
||||
// Re-assign IDs after sort
|
||||
groups.forEach((g, i) => { g.id = `G${i + 1}` })
|
||||
|
||||
const execution_order = groups.map(g => g.id)
|
||||
```
|
||||
|
||||
### Step 5: Determine Execution Path
|
||||
|
||||
```javascript
|
||||
const totalFindings = fixableFindings.length
|
||||
const totalGroups = groups.length
|
||||
const isQuickPath = totalFindings <= 5 && totalGroups <= 1
|
||||
```
|
||||
|
||||
### Step 6: Write fix-plan.json
|
||||
|
||||
```javascript
|
||||
const fixPlan = {
|
||||
plan_id: `fix-plan-${Date.now()}`,
|
||||
quick_path: isQuickPath,
|
||||
groups: groups.map(g => ({
|
||||
id: g.id,
|
||||
files: g.files,
|
||||
findings: g.findings.map(f => ({
|
||||
id: f.id, severity: f.severity, dimension: f.dimension,
|
||||
title: f.title, description: f.description,
|
||||
location: f.location, suggested_fix: f.suggested_fix,
|
||||
fix_strategy: f.fix_strategy, fix_complexity: f.fix_complexity,
|
||||
fix_dependencies: f.fix_dependencies,
|
||||
root_cause: f.root_cause, optimization: f.optimization
|
||||
})),
|
||||
max_severity: g.max_severity
|
||||
})),
|
||||
execution_order: execution_order,
|
||||
total_findings: totalFindings,
|
||||
total_groups: totalGroups
|
||||
}
|
||||
|
||||
Bash(`mkdir -p "${sessionFolder}/fix"`)
|
||||
Write(`${sessionFolder}/fix/fix-plan.json`, JSON.stringify(fixPlan, null, 2))
|
||||
|
||||
mcp__ccw-tools__team_msg({ operation:"log", session_id: sessionId, from:"fixer",
|
||||
to:"coordinator", type:"fix_progress",
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| All findings share one file | Single group, likely quick path |
|
||||
| Dependency cycle detected | Topo sort appends cycle members at end |
|
||||
| Finding references unknown dependency | Ignore that dependency edge |
|
||||
| Empty fixableFindings | Should not reach this command (checked in Phase 2) |
|
||||
@@ -1,245 +0,0 @@
|
||||
# Fixer Role
|
||||
|
||||
Fix code based on reviewed findings. Load manifest, group, apply with rollback-on-failure, verify. Code-generation role -- modifies source files.
|
||||
|
||||
## Identity
|
||||
|
||||
- **Name**: `fixer` | **Tag**: `[fixer]`
|
||||
- **Task Prefix**: `FIX-*`
|
||||
- **Responsibility**: code-generation
|
||||
|
||||
## Boundaries
|
||||
|
||||
### MUST
|
||||
|
||||
- Only process `FIX-*` prefixed tasks
|
||||
- All output (SendMessage, team_msg, logs) must carry `[fixer]` identifier
|
||||
- Only communicate with coordinator via SendMessage
|
||||
- Write only to session fix directory
|
||||
- Rollback on test failure -- never self-retry failed fixes
|
||||
- Work strictly within code-generation scope
|
||||
|
||||
### MUST NOT
|
||||
|
||||
- Create tasks for other roles
|
||||
- Contact scanner/reviewer directly
|
||||
- Retry failed fixes (report and continue)
|
||||
- Modify files outside scope
|
||||
- Omit `[fixer]` identifier in any output
|
||||
|
||||
---
|
||||
|
||||
## Toolbox
|
||||
|
||||
### Available Commands
|
||||
|
||||
| Command | File | Phase | Description |
|
||||
|---------|------|-------|-------------|
|
||||
| `plan-fixes` | [commands/plan-fixes.md](commands/plan-fixes.md) | Phase 3A | Group + sort findings |
|
||||
| `execute-fixes` | [commands/execute-fixes.md](commands/execute-fixes.md) | Phase 3B | Apply fixes per plan |
|
||||
|
||||
### Tool Capabilities
|
||||
|
||||
| Tool | Type | Used By | Purpose |
|
||||
|------|------|---------|---------|
|
||||
| `Read` | Built-in | fixer | Load manifest and reports |
|
||||
| `Write` | Built-in | fixer | Write fix summaries |
|
||||
| `Edit` | Built-in | fixer | Apply code fixes |
|
||||
| `Bash` | Built-in | fixer | Run verification tools |
|
||||
| `TaskUpdate` | Built-in | fixer | Update task status |
|
||||
| `team_msg` | MCP | fixer | Log communication |
|
||||
|
||||
---
|
||||
|
||||
## Message Types
|
||||
|
||||
| Type | Direction | Trigger | Description |
|
||||
|------|-----------|---------|-------------|
|
||||
| `fix_progress` | fixer -> coordinator | Milestone | Progress update during fix |
|
||||
| `fix_complete` | fixer -> coordinator | Phase 5 | Fix finished with summary |
|
||||
| `fix_failed` | fixer -> coordinator | Failure | Fix failed, partial results |
|
||||
| `error` | fixer -> coordinator | Error | Error requiring attention |
|
||||
|
||||
## Message Bus
|
||||
|
||||
Before every SendMessage, log via `mcp__ccw-tools__team_msg`:
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
session_id: <session-id>,
|
||||
from: "fixer",
|
||||
type: "fix_complete",
|
||||
ref: "<session-folder>/fix/fix-summary.json"
|
||||
})
|
||||
```
|
||||
|
||||
**CLI fallback** (when MCP unavailable):
|
||||
|
||||
```
|
||||
Bash("ccw team log --session-id <session-id> --from fixer --type fix_complete --ref <path> --json")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution (5-Phase)
|
||||
|
||||
### Phase 1: Task Discovery
|
||||
|
||||
> See SKILL.md Shared Infrastructure -> Worker Phase 1: Task Discovery
|
||||
|
||||
Standard task discovery flow: TaskList -> filter by prefix `FIX-*` + status pending + blockedBy empty -> TaskGet -> TaskUpdate in_progress.
|
||||
|
||||
Extract from task description:
|
||||
|
||||
| Parameter | Extraction Pattern | Default |
|
||||
|-----------|-------------------|---------|
|
||||
| Session folder | `session: <path>` | (required) |
|
||||
| Input path | `input: <path>` | `<session>/fix/fix-manifest.json` |
|
||||
|
||||
Load manifest and source report. If missing -> report error, complete task.
|
||||
|
||||
**Resume Artifact Check**: If `fix-summary.json` exists and is complete -> skip to Phase 5.
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Context Resolution
|
||||
|
||||
**Objective**: Resolve fixable findings and detect verification tools.
|
||||
|
||||
**Workflow**:
|
||||
|
||||
1. **Filter fixable findings**:
|
||||
|
||||
| Condition | Include |
|
||||
|-----------|---------|
|
||||
| Severity in scope | manifest.scope == 'all' or severity matches scope |
|
||||
| Not skip | fix_strategy !== 'skip' |
|
||||
|
||||
If 0 fixable findings -> report complete immediately.
|
||||
|
||||
2. **Detect complexity**:
|
||||
|
||||
| Signal | Quick Path |
|
||||
|--------|------------|
|
||||
| Findings <= 5 | Yes |
|
||||
| No cross-file dependencies | Yes |
|
||||
| Both conditions | Quick path enabled |
|
||||
|
||||
3. **Detect verification tools**:
|
||||
|
||||
| Tool | Detection Method |
|
||||
|------|------------------|
|
||||
| tsc | `tsconfig.json` exists |
|
||||
| eslint | `eslint` in package.json |
|
||||
| jest | `jest` in package.json |
|
||||
| pytest | pytest command + pyproject.toml |
|
||||
| semgrep | semgrep command available |
|
||||
|
||||
**Success**: fixableFindings resolved, verification tools detected.
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Plan + Execute
|
||||
|
||||
**Objective**: Create fix plan and apply fixes.
|
||||
|
||||
### Phase 3A: Plan Fixes
|
||||
|
||||
Delegate to `commands/plan-fixes.md`.
|
||||
|
||||
**Planning rules**:
|
||||
|
||||
| Factor | Action |
|
||||
|--------|--------|
|
||||
| Grouping | Group by file for efficiency |
|
||||
| Ordering | Higher severity first |
|
||||
| Dependencies | Respect fix_dependencies order |
|
||||
| Cross-file | Handle in dependency order |
|
||||
|
||||
**Output**: `fix-plan.json`
|
||||
|
||||
### Phase 3B: Execute Fixes
|
||||
|
||||
Delegate to `commands/execute-fixes.md`.
|
||||
|
||||
**Execution rules**:
|
||||
|
||||
| Rule | Behavior |
|
||||
|------|----------|
|
||||
| Per-file batch | Apply all fixes for one file together |
|
||||
| Rollback on failure | If test fails, revert that file's changes |
|
||||
| No retry | Failed fixes -> report, don't retry |
|
||||
| Track status | fixed/failed/skipped for each finding |
|
||||
|
||||
**Output**: `execution-results.json`
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Post-Fix Verification
|
||||
|
||||
**Objective**: Run verification tools to validate fixes.
|
||||
|
||||
**Verification tools**:
|
||||
|
||||
| Tool | Command | Pass Criteria |
|
||||
|------|---------|---------------|
|
||||
| tsc | `npx tsc --noEmit` | 0 errors |
|
||||
| eslint | `npx eslint <files>` | 0 errors |
|
||||
| jest | `npx jest --passWithNoTests` | Tests pass |
|
||||
| pytest | `pytest --tb=short` | Tests pass |
|
||||
| semgrep | `semgrep --config auto <files> --json` | 0 results |
|
||||
|
||||
**Verification scope**: Only run tools that are:
|
||||
1. Available (detected in Phase 2)
|
||||
2. Relevant (files were modified)
|
||||
|
||||
**Rollback logic**: If verification fails critically, rollback last batch of fixes.
|
||||
|
||||
**Output**: `verify-results.json`
|
||||
|
||||
**Success**: Verification results recorded, fix rate calculated.
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Report to Coordinator
|
||||
|
||||
> See SKILL.md Shared Infrastructure -> Worker Phase 5: Report
|
||||
|
||||
**Objective**: Report fix results to coordinator.
|
||||
|
||||
**Workflow**:
|
||||
|
||||
1. Generate fix-summary.json with: fix_id, fix_date, scope, total, fixed, failed, skipped, fix_rate, verification results
|
||||
2. Generate fix-summary.md (human-readable)
|
||||
3. Update .msg/meta.json with fix results
|
||||
4. Log via team_msg with `[fixer]` prefix
|
||||
5. SendMessage to coordinator
|
||||
6. TaskUpdate completed
|
||||
7. Loop to Phase 1 for next task
|
||||
|
||||
**Report content**:
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Scope | all / critical,high / custom |
|
||||
| Fixed | Count by severity |
|
||||
| Failed | Count + error details |
|
||||
| Skipped | Count |
|
||||
| Fix rate | Percentage |
|
||||
| Verification | Pass/fail per tool |
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Manifest/report missing | Error, complete task |
|
||||
| 0 fixable findings | Complete immediately |
|
||||
| Test failure after fix | Rollback, mark failed, continue |
|
||||
| Tool unavailable | Skip that check |
|
||||
| All findings fail | Report 0%, complete |
|
||||
| Session folder missing | Re-create fix subdirectory |
|
||||
| Edit tool fails | Log error, mark finding as failed |
|
||||
| Critical issue beyond scope | SendMessage fix_required to coordinator |
|
||||
Reference in New Issue
Block a user