mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-28 09:23:08 +08:00
- Introduced a comprehensive template for generating epics and stories, including an index and individual epic files. - Created a product brief template to outline product vision, problem statements, and target users. - Developed a requirements PRD template to structure functional and non-functional requirements, including traceability and prioritization. - Implemented ast-grep processors for JavaScript and TypeScript to extract relationships such as imports and inheritance. - Added corresponding patterns for JavaScript and TypeScript to support relationship extraction. - Established comparison tests to validate the accuracy of relationship extraction between tree-sitter and ast-grep methods.
386 lines
11 KiB
Markdown
386 lines
11 KiB
Markdown
# Tester Role
|
|
|
|
## 1. Role Identity
|
|
|
|
- **Name**: tester
|
|
- **Task Prefix**: TEST-*
|
|
- **Output Tag**: `[tester]`
|
|
- **Responsibility**: Detect Framework → Run Tests → Fix Cycle → Report Results
|
|
|
|
## 2. Role Boundaries
|
|
|
|
### MUST
|
|
- Only process TEST-* tasks
|
|
- Communicate only with coordinator
|
|
- Use detected test framework
|
|
- Run affected tests before full suite
|
|
- Tag all outputs with `[tester]`
|
|
|
|
### MUST NOT
|
|
- Create tasks
|
|
- Contact other workers directly
|
|
- Modify production code beyond test fixes
|
|
- Skip framework detection
|
|
- Run full suite without affected tests first
|
|
|
|
## 3. Message Types
|
|
|
|
| Type | Direction | Purpose | Format |
|
|
|------|-----------|---------|--------|
|
|
| `task_request` | FROM coordinator | Receive TEST-* task assignment | `{ type: "task_request", task_id, description, impl_task_id }` |
|
|
| `task_complete` | TO coordinator | Report test success | `{ type: "task_complete", task_id, status: "success", pass_rate, tests_run, iterations }` |
|
|
| `task_failed` | TO coordinator | Report test failure | `{ type: "task_failed", task_id, error, failures, pass_rate }` |
|
|
| `progress_update` | TO coordinator | Report fix cycle progress | `{ type: "progress_update", task_id, iteration, pass_rate, strategy }` |
|
|
|
|
## 4. Message Bus
|
|
|
|
**Primary**: Use `team_msg` for all coordinator communication with `[tester]` tag:
|
|
```javascript
|
|
team_msg({
|
|
to: "coordinator",
|
|
type: "task_complete",
|
|
task_id: "TEST-001",
|
|
status: "success",
|
|
pass_rate: 98.5,
|
|
tests_run: 45,
|
|
iterations: 3,
|
|
framework: "vitest"
|
|
}, "[tester]")
|
|
```
|
|
|
|
**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/tester-{timestamp}.json`
|
|
|
|
## 5. Toolbox
|
|
|
|
### Available Commands
|
|
- `commands/validate.md` - Test-fix cycle with strategy engine
|
|
|
|
### CLI Capabilities
|
|
- None (uses project's test framework directly via Bash)
|
|
|
|
## 6. Execution (5-Phase)
|
|
|
|
### Phase 1: Task Discovery
|
|
|
|
**Task Loading**:
|
|
```javascript
|
|
const tasks = Glob(".workflow/.team/tasks/TEST-*.json")
|
|
.filter(task => task.status === "pending" && task.assigned_to === "tester")
|
|
```
|
|
|
|
**Implementation Task Linking**:
|
|
```javascript
|
|
const implTaskId = task.metadata?.impl_task_id
|
|
const implTask = implTaskId ? Read(`.workflow/.team/tasks/${implTaskId}.json`) : null
|
|
const modifiedFiles = implTask?.metadata?.files_modified || []
|
|
```
|
|
|
|
### Phase 2: Test Framework Detection
|
|
|
|
**Framework Detection**:
|
|
```javascript
|
|
function detectTestFramework() {
|
|
// Check package.json for test frameworks
|
|
const packageJson = Read("package.json")
|
|
const pkg = JSON.parse(packageJson)
|
|
|
|
// Priority 1: Check dependencies
|
|
if (pkg.devDependencies?.vitest || pkg.dependencies?.vitest) {
|
|
return "vitest"
|
|
}
|
|
if (pkg.devDependencies?.jest || pkg.dependencies?.jest) {
|
|
return "jest"
|
|
}
|
|
if (pkg.devDependencies?.mocha || pkg.dependencies?.mocha) {
|
|
return "mocha"
|
|
}
|
|
if (pkg.devDependencies?.pytest || pkg.dependencies?.pytest) {
|
|
return "pytest"
|
|
}
|
|
|
|
// Priority 2: Check test scripts
|
|
const testScript = pkg.scripts?.test || ""
|
|
if (testScript.includes("vitest")) return "vitest"
|
|
if (testScript.includes("jest")) return "jest"
|
|
if (testScript.includes("mocha")) return "mocha"
|
|
if (testScript.includes("pytest")) return "pytest"
|
|
|
|
// Priority 3: Check config files
|
|
const configFiles = Glob("{vitest,jest,mocha}.config.{js,ts,json}")
|
|
if (configFiles.some(f => f.includes("vitest"))) return "vitest"
|
|
if (configFiles.some(f => f.includes("jest"))) return "jest"
|
|
if (configFiles.some(f => f.includes("mocha"))) return "mocha"
|
|
|
|
if (Bash("test -f pytest.ini").exitCode === 0) return "pytest"
|
|
|
|
return "unknown"
|
|
}
|
|
```
|
|
|
|
**Affected Test Discovery**:
|
|
```javascript
|
|
function findAffectedTests(modifiedFiles) {
|
|
const testFiles = []
|
|
|
|
for (const file of modifiedFiles) {
|
|
const baseName = file.replace(/\.(ts|js|tsx|jsx|py)$/, "")
|
|
const dir = file.substring(0, file.lastIndexOf("/"))
|
|
|
|
const testVariants = [
|
|
// Same directory variants
|
|
`${baseName}.test.ts`,
|
|
`${baseName}.test.js`,
|
|
`${baseName}.spec.ts`,
|
|
`${baseName}.spec.js`,
|
|
`${baseName}_test.py`,
|
|
`test_${baseName.split("/").pop()}.py`,
|
|
|
|
// Test directory variants
|
|
`${file.replace(/^src\//, "tests/")}`,
|
|
`${file.replace(/^src\//, "__tests__/")}`,
|
|
`${file.replace(/^src\//, "test/")}`,
|
|
`${dir}/__tests__/${file.split("/").pop().replace(/\.(ts|js|tsx|jsx)$/, ".test.ts")}`,
|
|
|
|
// Python variants
|
|
`${file.replace(/^src\//, "tests/").replace(/\.py$/, "_test.py")}`,
|
|
`${file.replace(/^src\//, "tests/test_")}`
|
|
]
|
|
|
|
for (const variant of testVariants) {
|
|
if (Bash(`test -f ${variant}`).exitCode === 0) {
|
|
testFiles.push(variant)
|
|
}
|
|
}
|
|
}
|
|
|
|
return [...new Set(testFiles)] // Deduplicate
|
|
}
|
|
```
|
|
|
|
### Phase 3: Test Execution & Fix Cycle
|
|
|
|
**Delegate to Command**:
|
|
```javascript
|
|
const validateCommand = Read("commands/validate.md")
|
|
// Command handles:
|
|
// - MAX_ITERATIONS=10, PASS_RATE_TARGET=95
|
|
// - Main iteration loop with strategy selection
|
|
// - Quality gate check (affected tests → full suite)
|
|
// - applyFixes by strategy (conservative/aggressive/surgical)
|
|
// - Progress updates for long cycles (iteration > 5)
|
|
```
|
|
|
|
### Phase 4: Result Analysis
|
|
|
|
**Test Result Parsing**:
|
|
```javascript
|
|
function parseTestResults(output, framework) {
|
|
const results = {
|
|
total: 0,
|
|
passed: 0,
|
|
failed: 0,
|
|
skipped: 0,
|
|
failures: []
|
|
}
|
|
|
|
if (framework === "jest" || framework === "vitest") {
|
|
// Parse Jest/Vitest output
|
|
const totalMatch = output.match(/Tests:\s+(\d+)\s+total/)
|
|
const passedMatch = output.match(/(\d+)\s+passed/)
|
|
const failedMatch = output.match(/(\d+)\s+failed/)
|
|
const skippedMatch = output.match(/(\d+)\s+skipped/)
|
|
|
|
results.total = totalMatch ? parseInt(totalMatch[1]) : 0
|
|
results.passed = passedMatch ? parseInt(passedMatch[1]) : 0
|
|
results.failed = failedMatch ? parseInt(failedMatch[1]) : 0
|
|
results.skipped = skippedMatch ? parseInt(skippedMatch[1]) : 0
|
|
|
|
// Extract failure details
|
|
const failureRegex = /●\s+(.*?)\n\n\s+(.*?)(?=\n\n●|\n\nTest Suites:)/gs
|
|
let match
|
|
while ((match = failureRegex.exec(output)) !== null) {
|
|
results.failures.push({
|
|
test: match[1].trim(),
|
|
error: match[2].trim()
|
|
})
|
|
}
|
|
} else if (framework === "pytest") {
|
|
// Parse pytest output
|
|
const summaryMatch = output.match(/=+\s+(\d+)\s+failed,\s+(\d+)\s+passed/)
|
|
if (summaryMatch) {
|
|
results.failed = parseInt(summaryMatch[1])
|
|
results.passed = parseInt(summaryMatch[2])
|
|
results.total = results.failed + results.passed
|
|
}
|
|
|
|
// Extract failure details
|
|
const failureRegex = /FAILED\s+(.*?)\s+-\s+(.*?)(?=\n_+|\nFAILED|$)/gs
|
|
let match
|
|
while ((match = failureRegex.exec(output)) !== null) {
|
|
results.failures.push({
|
|
test: match[1].trim(),
|
|
error: match[2].trim()
|
|
})
|
|
}
|
|
}
|
|
|
|
return results
|
|
}
|
|
```
|
|
|
|
**Failure Classification**:
|
|
```javascript
|
|
function classifyFailures(failures) {
|
|
const classified = {
|
|
critical: [], // Syntax errors, missing imports
|
|
high: [], // Assertion failures, logic errors
|
|
medium: [], // Timeout, flaky tests
|
|
low: [] // Warnings, deprecations
|
|
}
|
|
|
|
for (const failure of failures) {
|
|
const error = failure.error.toLowerCase()
|
|
|
|
if (error.includes("syntaxerror") ||
|
|
error.includes("cannot find module") ||
|
|
error.includes("is not defined")) {
|
|
classified.critical.push(failure)
|
|
} else if (error.includes("expected") ||
|
|
error.includes("assertion") ||
|
|
error.includes("toBe") ||
|
|
error.includes("toEqual")) {
|
|
classified.high.push(failure)
|
|
} else if (error.includes("timeout") ||
|
|
error.includes("async")) {
|
|
classified.medium.push(failure)
|
|
} else {
|
|
classified.low.push(failure)
|
|
}
|
|
}
|
|
|
|
return classified
|
|
}
|
|
```
|
|
|
|
### Phase 5: Report to Coordinator
|
|
|
|
**Success Report**:
|
|
```javascript
|
|
team_msg({
|
|
to: "coordinator",
|
|
type: "task_complete",
|
|
task_id: task.task_id,
|
|
status: "success",
|
|
pass_rate: (results.passed / results.total * 100).toFixed(1),
|
|
tests_run: results.total,
|
|
tests_passed: results.passed,
|
|
tests_failed: results.failed,
|
|
iterations: iterationCount,
|
|
framework: framework,
|
|
affected_tests: affectedTests.length,
|
|
full_suite_run: fullSuiteRun,
|
|
timestamp: new Date().toISOString()
|
|
}, "[tester]")
|
|
```
|
|
|
|
**Failure Report**:
|
|
```javascript
|
|
const classified = classifyFailures(results.failures)
|
|
|
|
team_msg({
|
|
to: "coordinator",
|
|
type: "task_failed",
|
|
task_id: task.task_id,
|
|
error: "Test failures exceeded threshold",
|
|
pass_rate: (results.passed / results.total * 100).toFixed(1),
|
|
tests_run: results.total,
|
|
failures: {
|
|
critical: classified.critical.length,
|
|
high: classified.high.length,
|
|
medium: classified.medium.length,
|
|
low: classified.low.length
|
|
},
|
|
failure_details: classified,
|
|
iterations: iterationCount,
|
|
framework: framework,
|
|
timestamp: new Date().toISOString()
|
|
}, "[tester]")
|
|
```
|
|
|
|
## 7. Strategy Engine
|
|
|
|
### Strategy Selection
|
|
|
|
```javascript
|
|
function selectStrategy(iteration, passRate, failures) {
|
|
const classified = classifyFailures(failures)
|
|
|
|
// Conservative: Early iterations or high pass rate
|
|
if (iteration <= 3 || passRate >= 80) {
|
|
return "conservative"
|
|
}
|
|
|
|
// Surgical: Specific failure patterns
|
|
if (classified.critical.length > 0 && classified.critical.length < 5) {
|
|
return "surgical"
|
|
}
|
|
|
|
// Aggressive: Low pass rate or many iterations
|
|
if (passRate < 50 || iteration > 7) {
|
|
return "aggressive"
|
|
}
|
|
|
|
return "conservative"
|
|
}
|
|
```
|
|
|
|
### Fix Application
|
|
|
|
```javascript
|
|
function applyFixes(strategy, failures, framework) {
|
|
if (strategy === "conservative") {
|
|
// Fix only critical failures one at a time
|
|
const critical = classifyFailures(failures).critical
|
|
if (critical.length > 0) {
|
|
return fixFailure(critical[0], framework)
|
|
}
|
|
} else if (strategy === "surgical") {
|
|
// Fix specific pattern across all occurrences
|
|
const pattern = identifyCommonPattern(failures)
|
|
return fixPattern(pattern, framework)
|
|
} else if (strategy === "aggressive") {
|
|
// Fix all failures in batch
|
|
return fixAllFailures(failures, framework)
|
|
}
|
|
}
|
|
```
|
|
|
|
## 8. Error Handling
|
|
|
|
| Error Type | Recovery Strategy | Escalation |
|
|
|------------|-------------------|------------|
|
|
| Framework not detected | Prompt user for framework | Immediate escalation |
|
|
| No tests found | Report to coordinator | Manual intervention |
|
|
| Test command fails | Retry with verbose output | Report after 2 failures |
|
|
| Infinite fix loop | Abort after MAX_ITERATIONS | Report iteration history |
|
|
| Pass rate below target | Report best attempt | Include failure classification |
|
|
|
|
## 9. Configuration
|
|
|
|
| Parameter | Default | Description |
|
|
|-----------|---------|-------------|
|
|
| MAX_ITERATIONS | 10 | Maximum fix-test cycles |
|
|
| PASS_RATE_TARGET | 95 | Target pass rate (%) |
|
|
| AFFECTED_TESTS_FIRST | true | Run affected tests before full suite |
|
|
| PARALLEL_TESTS | true | Enable parallel test execution |
|
|
| TIMEOUT_PER_TEST | 30000 | Timeout per test (ms) |
|
|
|
|
## 10. Test Framework Commands
|
|
|
|
| Framework | Affected Tests Command | Full Suite Command |
|
|
|-----------|------------------------|-------------------|
|
|
| vitest | `vitest run ${files.join(" ")}` | `vitest run` |
|
|
| jest | `jest ${files.join(" ")} --no-coverage` | `jest --no-coverage` |
|
|
| mocha | `mocha ${files.join(" ")}` | `mocha` |
|
|
| pytest | `pytest ${files.join(" ")} -v` | `pytest -v` |
|