mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-01 15:03:57 +08:00
feat: Add templates for epics, product brief, and requirements PRD
- Introduced a comprehensive template for generating epics and stories, including an index and individual epic files. - Created a product brief template to outline product vision, problem statements, and target users. - Developed a requirements PRD template to structure functional and non-functional requirements, including traceability and prioritization. - Implemented ast-grep processors for JavaScript and TypeScript to extract relationships such as imports and inheritance. - Added corresponding patterns for JavaScript and TypeScript to support relationship extraction. - Established comparison tests to validate the accuracy of relationship extraction between tree-sitter and ast-grep methods.
This commit is contained in:
@@ -0,0 +1,689 @@
|
||||
# Code Review Command
|
||||
|
||||
## Purpose
|
||||
4-dimension code review analyzing quality, security, architecture, and requirements compliance.
|
||||
|
||||
## Review Dimensions
|
||||
|
||||
### 1. Quality Review
|
||||
|
||||
```javascript
|
||||
function reviewQuality(files, gitDiff) {
|
||||
const issues = {
|
||||
critical: [],
|
||||
high: [],
|
||||
medium: [],
|
||||
low: []
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const content = file.content
|
||||
const lines = content.split("\n")
|
||||
|
||||
// Check for @ts-ignore / @ts-expect-error
|
||||
lines.forEach((line, idx) => {
|
||||
if (line.includes("@ts-ignore") || line.includes("@ts-expect-error")) {
|
||||
const nextLine = lines[idx + 1] || ""
|
||||
const hasJustification = line.includes("//") && line.split("//")[1].trim().length > 10
|
||||
|
||||
if (!hasJustification) {
|
||||
issues.high.push({
|
||||
file: file.path,
|
||||
line: idx + 1,
|
||||
type: "ts-ignore-without-justification",
|
||||
message: "TypeScript error suppression without explanation",
|
||||
code: line.trim()
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Check for 'any' type usage
|
||||
const anyMatches = Grep("\\bany\\b", { path: file.path, "-n": true })
|
||||
if (anyMatches) {
|
||||
anyMatches.forEach(match => {
|
||||
// Exclude comments and type definitions that are intentionally generic
|
||||
if (!match.line.includes("//") && !match.line.includes("Generic")) {
|
||||
issues.high.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "any-type-usage",
|
||||
message: "Using 'any' type reduces type safety",
|
||||
code: match.line.trim()
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check for console.log in production code
|
||||
const consoleMatches = Grep("console\\.(log|debug|info)", { path: file.path, "-n": true })
|
||||
if (consoleMatches && !file.path.includes("test")) {
|
||||
consoleMatches.forEach(match => {
|
||||
issues.high.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "console-log",
|
||||
message: "Console statements should be removed from production code",
|
||||
code: match.line.trim()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Check for empty catch blocks
|
||||
const emptyCatchRegex = /catch\s*\([^)]*\)\s*\{\s*\}/g
|
||||
let match
|
||||
while ((match = emptyCatchRegex.exec(content)) !== null) {
|
||||
const lineNumber = content.substring(0, match.index).split("\n").length
|
||||
issues.critical.push({
|
||||
file: file.path,
|
||||
line: lineNumber,
|
||||
type: "empty-catch",
|
||||
message: "Empty catch block silently swallows errors",
|
||||
code: match[0]
|
||||
})
|
||||
}
|
||||
|
||||
// Check for magic numbers
|
||||
const magicNumberRegex = /(?<![a-zA-Z0-9_])((?!0|1|2|10|100|1000)\d{2,})(?![a-zA-Z0-9_])/g
|
||||
while ((match = magicNumberRegex.exec(content)) !== null) {
|
||||
const lineNumber = content.substring(0, match.index).split("\n").length
|
||||
const line = lines[lineNumber - 1]
|
||||
|
||||
// Exclude if in comment or constant definition
|
||||
if (!line.includes("//") && !line.includes("const") && !line.includes("=")) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: lineNumber,
|
||||
type: "magic-number",
|
||||
message: "Magic number should be extracted to named constant",
|
||||
code: line.trim()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check for duplicate code (simple heuristic: identical lines)
|
||||
const lineHashes = new Map()
|
||||
lines.forEach((line, idx) => {
|
||||
const trimmed = line.trim()
|
||||
if (trimmed.length > 30 && !trimmed.startsWith("//")) {
|
||||
if (!lineHashes.has(trimmed)) {
|
||||
lineHashes.set(trimmed, [])
|
||||
}
|
||||
lineHashes.get(trimmed).push(idx + 1)
|
||||
}
|
||||
})
|
||||
|
||||
lineHashes.forEach((occurrences, line) => {
|
||||
if (occurrences.length > 2) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: occurrences[0],
|
||||
type: "duplicate-code",
|
||||
message: `Duplicate code found at lines: ${occurrences.join(", ")}`,
|
||||
code: line
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return issues
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Security Review
|
||||
|
||||
```javascript
|
||||
function reviewSecurity(files) {
|
||||
const issues = {
|
||||
critical: [],
|
||||
high: [],
|
||||
medium: [],
|
||||
low: []
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const content = file.content
|
||||
|
||||
// Check for eval/exec usage
|
||||
const evalMatches = Grep("\\b(eval|exec|Function\\(|setTimeout\\(.*string|setInterval\\(.*string)\\b", {
|
||||
path: file.path,
|
||||
"-n": true
|
||||
})
|
||||
if (evalMatches) {
|
||||
evalMatches.forEach(match => {
|
||||
issues.high.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "dangerous-eval",
|
||||
message: "eval/exec usage can lead to code injection vulnerabilities",
|
||||
code: match.line.trim()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Check for innerHTML/dangerouslySetInnerHTML
|
||||
const innerHTMLMatches = Grep("(innerHTML|dangerouslySetInnerHTML)", {
|
||||
path: file.path,
|
||||
"-n": true
|
||||
})
|
||||
if (innerHTMLMatches) {
|
||||
innerHTMLMatches.forEach(match => {
|
||||
issues.high.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "xss-risk",
|
||||
message: "Direct HTML injection can lead to XSS vulnerabilities",
|
||||
code: match.line.trim()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Check for hardcoded secrets
|
||||
const secretPatterns = [
|
||||
/api[_-]?key\s*=\s*['"][^'"]{20,}['"]/i,
|
||||
/password\s*=\s*['"][^'"]+['"]/i,
|
||||
/secret\s*=\s*['"][^'"]{20,}['"]/i,
|
||||
/token\s*=\s*['"][^'"]{20,}['"]/i,
|
||||
/aws[_-]?access[_-]?key/i,
|
||||
/private[_-]?key\s*=\s*['"][^'"]+['"]/i
|
||||
]
|
||||
|
||||
secretPatterns.forEach(pattern => {
|
||||
const matches = content.match(new RegExp(pattern, "gm"))
|
||||
if (matches) {
|
||||
matches.forEach(match => {
|
||||
const lineNumber = content.substring(0, content.indexOf(match)).split("\n").length
|
||||
issues.critical.push({
|
||||
file: file.path,
|
||||
line: lineNumber,
|
||||
type: "hardcoded-secret",
|
||||
message: "Hardcoded secrets should be moved to environment variables",
|
||||
code: match.replace(/['"][^'"]+['"]/, "'***'") // Redact secret
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Check for SQL injection vectors
|
||||
const sqlInjectionMatches = Grep("(query|execute)\\s*\\(.*\\+.*\\)", {
|
||||
path: file.path,
|
||||
"-n": true
|
||||
})
|
||||
if (sqlInjectionMatches) {
|
||||
sqlInjectionMatches.forEach(match => {
|
||||
if (!match.line.includes("//") && !match.line.includes("prepared")) {
|
||||
issues.critical.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "sql-injection",
|
||||
message: "String concatenation in SQL queries can lead to SQL injection",
|
||||
code: match.line.trim()
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check for insecure random
|
||||
const insecureRandomMatches = Grep("Math\\.random\\(\\)", {
|
||||
path: file.path,
|
||||
"-n": true
|
||||
})
|
||||
if (insecureRandomMatches) {
|
||||
insecureRandomMatches.forEach(match => {
|
||||
// Check if used for security purposes
|
||||
const context = content.substring(
|
||||
Math.max(0, content.indexOf(match.line) - 200),
|
||||
content.indexOf(match.line) + 200
|
||||
)
|
||||
if (context.match(/token|key|secret|password|session/i)) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "insecure-random",
|
||||
message: "Math.random() is not cryptographically secure, use crypto.randomBytes()",
|
||||
code: match.line.trim()
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check for missing input validation
|
||||
const functionMatches = Grep("(function|const.*=.*\\(|async.*\\()", {
|
||||
path: file.path,
|
||||
"-n": true
|
||||
})
|
||||
if (functionMatches) {
|
||||
functionMatches.forEach(match => {
|
||||
// Simple heuristic: check if function has parameters but no validation
|
||||
if (match.line.includes("(") && !match.line.includes("()")) {
|
||||
const nextLines = content.split("\n").slice(match.lineNumber, match.lineNumber + 5).join("\n")
|
||||
const hasValidation = nextLines.match(/if\s*\(|throw|assert|validate|check/)
|
||||
|
||||
if (!hasValidation && !match.line.includes("test") && !match.line.includes("mock")) {
|
||||
issues.low.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "missing-validation",
|
||||
message: "Function parameters should be validated",
|
||||
code: match.line.trim()
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return issues
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Architecture Review
|
||||
|
||||
```javascript
|
||||
function reviewArchitecture(files) {
|
||||
const issues = {
|
||||
critical: [],
|
||||
high: [],
|
||||
medium: [],
|
||||
low: []
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const content = file.content
|
||||
const lines = content.split("\n")
|
||||
|
||||
// Check for parent directory imports
|
||||
const importMatches = Grep("from\\s+['\"](\\.\\./)+", {
|
||||
path: file.path,
|
||||
"-n": true
|
||||
})
|
||||
if (importMatches) {
|
||||
importMatches.forEach(match => {
|
||||
const parentLevels = (match.line.match(/\.\.\//g) || []).length
|
||||
|
||||
if (parentLevels > 2) {
|
||||
issues.high.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "excessive-parent-imports",
|
||||
message: `Import traverses ${parentLevels} parent directories, consider restructuring`,
|
||||
code: match.line.trim()
|
||||
})
|
||||
} else if (parentLevels === 2) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: match.lineNumber,
|
||||
type: "parent-imports",
|
||||
message: "Consider using absolute imports or restructuring modules",
|
||||
code: match.line.trim()
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check for large files
|
||||
const lineCount = lines.length
|
||||
if (lineCount > 500) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: 1,
|
||||
type: "large-file",
|
||||
message: `File has ${lineCount} lines, consider splitting into smaller modules`,
|
||||
code: `Total lines: ${lineCount}`
|
||||
})
|
||||
}
|
||||
|
||||
// Check for circular dependencies (simple heuristic)
|
||||
const imports = lines
|
||||
.filter(line => line.match(/^import.*from/))
|
||||
.map(line => {
|
||||
const match = line.match(/from\s+['"](.+?)['"]/)
|
||||
return match ? match[1] : null
|
||||
})
|
||||
.filter(Boolean)
|
||||
|
||||
// Check if any imported file imports this file back
|
||||
for (const importPath of imports) {
|
||||
const resolvedPath = resolveImportPath(file.path, importPath)
|
||||
if (resolvedPath && Bash(`test -f ${resolvedPath}`).exitCode === 0) {
|
||||
const importedContent = Read(resolvedPath)
|
||||
const reverseImport = importedContent.includes(file.path.replace(/\.[jt]sx?$/, ""))
|
||||
|
||||
if (reverseImport) {
|
||||
issues.critical.push({
|
||||
file: file.path,
|
||||
line: 1,
|
||||
type: "circular-dependency",
|
||||
message: `Circular dependency detected with ${resolvedPath}`,
|
||||
code: `${file.path} ↔ ${resolvedPath}`
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for tight coupling (many imports from same module)
|
||||
const importCounts = {}
|
||||
imports.forEach(imp => {
|
||||
const baseModule = imp.split("/")[0]
|
||||
importCounts[baseModule] = (importCounts[baseModule] || 0) + 1
|
||||
})
|
||||
|
||||
Object.entries(importCounts).forEach(([module, count]) => {
|
||||
if (count > 5) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: 1,
|
||||
type: "tight-coupling",
|
||||
message: `File imports ${count} items from '${module}', consider facade pattern`,
|
||||
code: `Imports from ${module}: ${count}`
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Check for missing abstractions (long functions)
|
||||
const functionRegex = /(function|const.*=.*\(|async.*\()/g
|
||||
let match
|
||||
while ((match = functionRegex.exec(content)) !== null) {
|
||||
const startLine = content.substring(0, match.index).split("\n").length
|
||||
const functionBody = extractFunctionBody(content, match.index)
|
||||
const functionLines = functionBody.split("\n").length
|
||||
|
||||
if (functionLines > 50) {
|
||||
issues.medium.push({
|
||||
file: file.path,
|
||||
line: startLine,
|
||||
type: "long-function",
|
||||
message: `Function has ${functionLines} lines, consider extracting smaller functions`,
|
||||
code: match[0].trim()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return issues
|
||||
}
|
||||
|
||||
function resolveImportPath(fromFile, importPath) {
|
||||
if (importPath.startsWith(".")) {
|
||||
const dir = fromFile.substring(0, fromFile.lastIndexOf("/"))
|
||||
const resolved = `${dir}/${importPath}`.replace(/\/\.\//g, "/")
|
||||
|
||||
// Try with extensions
|
||||
for (const ext of [".ts", ".js", ".tsx", ".jsx"]) {
|
||||
if (Bash(`test -f ${resolved}${ext}`).exitCode === 0) {
|
||||
return `${resolved}${ext}`
|
||||
}
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
function extractFunctionBody(content, startIndex) {
|
||||
let braceCount = 0
|
||||
let inFunction = false
|
||||
let body = ""
|
||||
|
||||
for (let i = startIndex; i < content.length; i++) {
|
||||
const char = content[i]
|
||||
|
||||
if (char === "{") {
|
||||
braceCount++
|
||||
inFunction = true
|
||||
} else if (char === "}") {
|
||||
braceCount--
|
||||
}
|
||||
|
||||
if (inFunction) {
|
||||
body += char
|
||||
}
|
||||
|
||||
if (inFunction && braceCount === 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Requirements Verification
|
||||
|
||||
```javascript
|
||||
function verifyRequirements(plan, files, gitDiff) {
|
||||
const issues = {
|
||||
critical: [],
|
||||
high: [],
|
||||
medium: [],
|
||||
low: []
|
||||
}
|
||||
|
||||
// Extract acceptance criteria from plan
|
||||
const acceptanceCriteria = extractAcceptanceCriteria(plan)
|
||||
|
||||
// Verify each criterion
|
||||
for (const criterion of acceptanceCriteria) {
|
||||
const verified = verifyCriterion(criterion, files, gitDiff)
|
||||
|
||||
if (!verified.met) {
|
||||
issues.high.push({
|
||||
file: "plan",
|
||||
line: criterion.lineNumber,
|
||||
type: "unmet-acceptance-criteria",
|
||||
message: `Acceptance criterion not met: ${criterion.text}`,
|
||||
code: criterion.text
|
||||
})
|
||||
} else if (verified.partial) {
|
||||
issues.medium.push({
|
||||
file: "plan",
|
||||
line: criterion.lineNumber,
|
||||
type: "partial-acceptance-criteria",
|
||||
message: `Acceptance criterion partially met: ${criterion.text}`,
|
||||
code: criterion.text
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing error handling
|
||||
const errorHandlingRequired = plan.match(/error handling|exception|validation/i)
|
||||
if (errorHandlingRequired) {
|
||||
const hasErrorHandling = files.some(file =>
|
||||
file.content.match(/try\s*\{|catch\s*\(|throw\s+new|\.catch\(/)
|
||||
)
|
||||
|
||||
if (!hasErrorHandling) {
|
||||
issues.high.push({
|
||||
file: "implementation",
|
||||
line: 1,
|
||||
type: "missing-error-handling",
|
||||
message: "Plan requires error handling but none found in implementation",
|
||||
code: "No try-catch or error handling detected"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing tests
|
||||
const testingRequired = plan.match(/test|testing|coverage/i)
|
||||
if (testingRequired) {
|
||||
const hasTests = files.some(file =>
|
||||
file.path.match(/\.(test|spec)\.[jt]sx?$/)
|
||||
)
|
||||
|
||||
if (!hasTests) {
|
||||
issues.medium.push({
|
||||
file: "implementation",
|
||||
line: 1,
|
||||
type: "missing-tests",
|
||||
message: "Plan requires tests but no test files found",
|
||||
code: "No test files detected"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return issues
|
||||
}
|
||||
|
||||
function extractAcceptanceCriteria(plan) {
|
||||
const criteria = []
|
||||
const lines = plan.split("\n")
|
||||
|
||||
let inAcceptanceSection = false
|
||||
lines.forEach((line, idx) => {
|
||||
if (line.match(/acceptance criteria/i)) {
|
||||
inAcceptanceSection = true
|
||||
} else if (line.match(/^##/)) {
|
||||
inAcceptanceSection = false
|
||||
} else if (inAcceptanceSection && line.match(/^[-*]\s+/)) {
|
||||
criteria.push({
|
||||
text: line.replace(/^[-*]\s+/, "").trim(),
|
||||
lineNumber: idx + 1
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
return criteria
|
||||
}
|
||||
|
||||
function verifyCriterion(criterion, files, gitDiff) {
|
||||
// Extract keywords from criterion
|
||||
const keywords = criterion.text.toLowerCase().match(/\b\w{4,}\b/g) || []
|
||||
|
||||
// Check if keywords appear in implementation
|
||||
let matchCount = 0
|
||||
for (const file of files) {
|
||||
const content = file.content.toLowerCase()
|
||||
for (const keyword of keywords) {
|
||||
if (content.includes(keyword)) {
|
||||
matchCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const matchRatio = matchCount / keywords.length
|
||||
|
||||
return {
|
||||
met: matchRatio >= 0.7,
|
||||
partial: matchRatio >= 0.4 && matchRatio < 0.7,
|
||||
matchRatio: matchRatio
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Verdict Determination
|
||||
|
||||
```javascript
|
||||
function determineVerdict(qualityIssues, securityIssues, architectureIssues, requirementIssues) {
|
||||
const allIssues = {
|
||||
critical: [
|
||||
...qualityIssues.critical,
|
||||
...securityIssues.critical,
|
||||
...architectureIssues.critical,
|
||||
...requirementIssues.critical
|
||||
],
|
||||
high: [
|
||||
...qualityIssues.high,
|
||||
...securityIssues.high,
|
||||
...architectureIssues.high,
|
||||
...requirementIssues.high
|
||||
],
|
||||
medium: [
|
||||
...qualityIssues.medium,
|
||||
...securityIssues.medium,
|
||||
...architectureIssues.medium,
|
||||
...requirementIssues.medium
|
||||
],
|
||||
low: [
|
||||
...qualityIssues.low,
|
||||
...securityIssues.low,
|
||||
...architectureIssues.low,
|
||||
...requirementIssues.low
|
||||
]
|
||||
}
|
||||
|
||||
// BLOCK: Any critical issues
|
||||
if (allIssues.critical.length > 0) {
|
||||
return {
|
||||
verdict: "BLOCK",
|
||||
reason: `${allIssues.critical.length} critical issue(s) must be fixed`,
|
||||
blocking_issues: allIssues.critical
|
||||
}
|
||||
}
|
||||
|
||||
// CONDITIONAL: High or medium issues
|
||||
if (allIssues.high.length > 0 || allIssues.medium.length > 0) {
|
||||
return {
|
||||
verdict: "CONDITIONAL",
|
||||
reason: `${allIssues.high.length} high and ${allIssues.medium.length} medium issue(s) should be addressed`,
|
||||
blocking_issues: []
|
||||
}
|
||||
}
|
||||
|
||||
// APPROVE: Only low issues or none
|
||||
return {
|
||||
verdict: "APPROVE",
|
||||
reason: allIssues.low.length > 0
|
||||
? `${allIssues.low.length} low-priority issue(s) noted`
|
||||
: "No issues found",
|
||||
blocking_issues: []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Report Formatting
|
||||
|
||||
```javascript
|
||||
function formatCodeReviewReport(report) {
|
||||
const { verdict, dimensions, recommendations, blocking_issues } = report
|
||||
|
||||
let markdown = `# Code Review Report\n\n`
|
||||
markdown += `**Verdict**: ${verdict}\n\n`
|
||||
|
||||
if (blocking_issues.length > 0) {
|
||||
markdown += `## Blocking Issues\n\n`
|
||||
blocking_issues.forEach(issue => {
|
||||
markdown += `- **${issue.type}** (${issue.file}:${issue.line})\n`
|
||||
markdown += ` ${issue.message}\n`
|
||||
markdown += ` \`\`\`\n ${issue.code}\n \`\`\`\n\n`
|
||||
})
|
||||
}
|
||||
|
||||
markdown += `## Review Dimensions\n\n`
|
||||
|
||||
markdown += `### Quality Issues\n`
|
||||
markdown += formatIssuesByDimension(dimensions.quality)
|
||||
|
||||
markdown += `### Security Issues\n`
|
||||
markdown += formatIssuesByDimension(dimensions.security)
|
||||
|
||||
markdown += `### Architecture Issues\n`
|
||||
markdown += formatIssuesByDimension(dimensions.architecture)
|
||||
|
||||
markdown += `### Requirements Issues\n`
|
||||
markdown += formatIssuesByDimension(dimensions.requirements)
|
||||
|
||||
if (recommendations.length > 0) {
|
||||
markdown += `## Recommendations\n\n`
|
||||
recommendations.forEach((rec, i) => {
|
||||
markdown += `${i + 1}. ${rec}\n`
|
||||
})
|
||||
}
|
||||
|
||||
return markdown
|
||||
}
|
||||
|
||||
function formatIssuesByDimension(issues) {
|
||||
let markdown = ""
|
||||
|
||||
const severities = ["critical", "high", "medium", "low"]
|
||||
severities.forEach(severity => {
|
||||
if (issues[severity].length > 0) {
|
||||
markdown += `\n**${severity.toUpperCase()}** (${issues[severity].length})\n\n`
|
||||
issues[severity].forEach(issue => {
|
||||
markdown += `- ${issue.message} (${issue.file}:${issue.line})\n`
|
||||
markdown += ` \`${issue.code}\`\n\n`
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
return markdown || "No issues found.\n\n"
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,845 @@
|
||||
# Spec Quality Command
|
||||
|
||||
## Purpose
|
||||
5-dimension spec quality check with readiness report generation and quality gate determination.
|
||||
|
||||
## Quality Dimensions
|
||||
|
||||
### 1. Completeness (Weight: 25%)
|
||||
|
||||
```javascript
|
||||
function scoreCompleteness(specDocs) {
|
||||
const requiredSections = {
|
||||
"product-brief": [
|
||||
"Vision Statement",
|
||||
"Problem Statement",
|
||||
"Target Audience",
|
||||
"Success Metrics",
|
||||
"Constraints"
|
||||
],
|
||||
"prd": [
|
||||
"Goals",
|
||||
"Requirements",
|
||||
"User Stories",
|
||||
"Acceptance Criteria",
|
||||
"Non-Functional Requirements"
|
||||
],
|
||||
"architecture": [
|
||||
"System Overview",
|
||||
"Component Design",
|
||||
"Data Models",
|
||||
"API Specifications",
|
||||
"Technology Stack"
|
||||
],
|
||||
"user-stories": [
|
||||
"Story List",
|
||||
"Acceptance Criteria",
|
||||
"Priority",
|
||||
"Estimation"
|
||||
],
|
||||
"implementation-plan": [
|
||||
"Task Breakdown",
|
||||
"Dependencies",
|
||||
"Timeline",
|
||||
"Resource Allocation"
|
||||
],
|
||||
"test-strategy": [
|
||||
"Test Scope",
|
||||
"Test Cases",
|
||||
"Coverage Goals",
|
||||
"Test Environment"
|
||||
]
|
||||
}
|
||||
|
||||
let totalScore = 0
|
||||
let totalWeight = 0
|
||||
const details = []
|
||||
|
||||
for (const doc of specDocs) {
|
||||
const phase = doc.phase
|
||||
const expectedSections = requiredSections[phase] || []
|
||||
|
||||
if (expectedSections.length === 0) continue
|
||||
|
||||
let presentCount = 0
|
||||
let substantialCount = 0
|
||||
|
||||
for (const section of expectedSections) {
|
||||
const sectionRegex = new RegExp(`##\\s+${section}`, "i")
|
||||
const sectionMatch = doc.content.match(sectionRegex)
|
||||
|
||||
if (sectionMatch) {
|
||||
presentCount++
|
||||
|
||||
// Check if section has substantial content (not just header)
|
||||
const sectionIndex = doc.content.indexOf(sectionMatch[0])
|
||||
const nextSectionIndex = doc.content.indexOf("\n##", sectionIndex + 1)
|
||||
const sectionContent = nextSectionIndex > -1
|
||||
? doc.content.substring(sectionIndex, nextSectionIndex)
|
||||
: doc.content.substring(sectionIndex)
|
||||
|
||||
// Substantial = more than 100 chars excluding header
|
||||
const contentWithoutHeader = sectionContent.replace(sectionRegex, "").trim()
|
||||
if (contentWithoutHeader.length > 100) {
|
||||
substantialCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const presentRatio = presentCount / expectedSections.length
|
||||
const substantialRatio = substantialCount / expectedSections.length
|
||||
|
||||
// Score: 50% for presence, 50% for substance
|
||||
const docScore = (presentRatio * 50) + (substantialRatio * 50)
|
||||
|
||||
totalScore += docScore
|
||||
totalWeight += 100
|
||||
|
||||
details.push({
|
||||
phase: phase,
|
||||
score: docScore,
|
||||
present: presentCount,
|
||||
substantial: substantialCount,
|
||||
expected: expectedSections.length,
|
||||
missing: expectedSections.filter(s => !doc.content.match(new RegExp(`##\\s+${s}`, "i")))
|
||||
})
|
||||
}
|
||||
|
||||
const overallScore = totalWeight > 0 ? (totalScore / totalWeight) * 100 : 0
|
||||
|
||||
return {
|
||||
score: overallScore,
|
||||
weight: 25,
|
||||
weighted_score: overallScore * 0.25,
|
||||
details: details
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Consistency (Weight: 20%)
|
||||
|
||||
```javascript
|
||||
function scoreConsistency(specDocs) {
|
||||
const issues = []
|
||||
|
||||
// 1. Terminology consistency
|
||||
const terminologyMap = new Map()
|
||||
|
||||
for (const doc of specDocs) {
|
||||
// Extract key terms (capitalized phrases, technical terms)
|
||||
const terms = doc.content.match(/\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b/g) || []
|
||||
|
||||
terms.forEach(term => {
|
||||
const normalized = term.toLowerCase()
|
||||
if (!terminologyMap.has(normalized)) {
|
||||
terminologyMap.set(normalized, new Set())
|
||||
}
|
||||
terminologyMap.get(normalized).add(term)
|
||||
})
|
||||
}
|
||||
|
||||
// Find inconsistent terminology (same concept, different casing/spelling)
|
||||
terminologyMap.forEach((variants, normalized) => {
|
||||
if (variants.size > 1) {
|
||||
issues.push({
|
||||
type: "terminology",
|
||||
severity: "medium",
|
||||
message: `Inconsistent terminology: ${[...variants].join(", ")}`,
|
||||
suggestion: `Standardize to one variant`
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// 2. Format consistency
|
||||
const headerStyles = new Map()
|
||||
for (const doc of specDocs) {
|
||||
const headers = doc.content.match(/^#{1,6}\s+.+$/gm) || []
|
||||
headers.forEach(header => {
|
||||
const level = header.match(/^#+/)[0].length
|
||||
const style = header.includes("**") ? "bold" : "plain"
|
||||
const key = `level-${level}`
|
||||
|
||||
if (!headerStyles.has(key)) {
|
||||
headerStyles.set(key, new Set())
|
||||
}
|
||||
headerStyles.get(key).add(style)
|
||||
})
|
||||
}
|
||||
|
||||
headerStyles.forEach((styles, level) => {
|
||||
if (styles.size > 1) {
|
||||
issues.push({
|
||||
type: "format",
|
||||
severity: "low",
|
||||
message: `Inconsistent header style at ${level}: ${[...styles].join(", ")}`,
|
||||
suggestion: "Use consistent header formatting"
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// 3. Reference consistency
|
||||
const references = new Map()
|
||||
for (const doc of specDocs) {
|
||||
// Extract references to other documents/sections
|
||||
const refs = doc.content.match(/\[.*?\]\(.*?\)/g) || []
|
||||
refs.forEach(ref => {
|
||||
const linkMatch = ref.match(/\((.*?)\)/)
|
||||
if (linkMatch) {
|
||||
const link = linkMatch[1]
|
||||
if (!references.has(link)) {
|
||||
references.set(link, [])
|
||||
}
|
||||
references.get(link).push(doc.phase)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check for broken references
|
||||
references.forEach((sources, link) => {
|
||||
if (link.startsWith("./") || link.startsWith("../")) {
|
||||
// Check if file exists
|
||||
const exists = Bash(`test -f ${link}`).exitCode === 0
|
||||
if (!exists) {
|
||||
issues.push({
|
||||
type: "reference",
|
||||
severity: "high",
|
||||
message: `Broken reference: ${link} (referenced in ${sources.join(", ")})`,
|
||||
suggestion: "Fix or remove broken reference"
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// 4. Naming convention consistency
|
||||
const namingPatterns = {
|
||||
camelCase: /\b[a-z]+(?:[A-Z][a-z]+)+\b/g,
|
||||
PascalCase: /\b[A-Z][a-z]+(?:[A-Z][a-z]+)+\b/g,
|
||||
snake_case: /\b[a-z]+(?:_[a-z]+)+\b/g,
|
||||
kebab_case: /\b[a-z]+(?:-[a-z]+)+\b/g
|
||||
}
|
||||
|
||||
const namingCounts = {}
|
||||
for (const doc of specDocs) {
|
||||
Object.entries(namingPatterns).forEach(([pattern, regex]) => {
|
||||
const matches = doc.content.match(regex) || []
|
||||
namingCounts[pattern] = (namingCounts[pattern] || 0) + matches.length
|
||||
})
|
||||
}
|
||||
|
||||
const dominantPattern = Object.entries(namingCounts)
|
||||
.sort((a, b) => b[1] - a[1])[0]?.[0]
|
||||
|
||||
Object.entries(namingCounts).forEach(([pattern, count]) => {
|
||||
if (pattern !== dominantPattern && count > 10) {
|
||||
issues.push({
|
||||
type: "naming",
|
||||
severity: "low",
|
||||
message: `Mixed naming conventions: ${pattern} (${count} occurrences) vs ${dominantPattern}`,
|
||||
suggestion: `Standardize to ${dominantPattern}`
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Calculate score based on issues
|
||||
const severityWeights = { high: 10, medium: 5, low: 2 }
|
||||
const totalPenalty = issues.reduce((sum, issue) => sum + severityWeights[issue.severity], 0)
|
||||
const maxPenalty = 100 // Arbitrary max for normalization
|
||||
|
||||
const score = Math.max(0, 100 - (totalPenalty / maxPenalty) * 100)
|
||||
|
||||
return {
|
||||
score: score,
|
||||
weight: 20,
|
||||
weighted_score: score * 0.20,
|
||||
issues: issues,
|
||||
details: {
|
||||
terminology_issues: issues.filter(i => i.type === "terminology").length,
|
||||
format_issues: issues.filter(i => i.type === "format").length,
|
||||
reference_issues: issues.filter(i => i.type === "reference").length,
|
||||
naming_issues: issues.filter(i => i.type === "naming").length
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Traceability (Weight: 25%)
|
||||
|
||||
```javascript
|
||||
function scoreTraceability(specDocs) {
|
||||
const chains = []
|
||||
|
||||
// Extract traceability elements
|
||||
const goals = extractElements(specDocs, "product-brief", /^[-*]\s+Goal:\s*(.+)$/gm)
|
||||
const requirements = extractElements(specDocs, "prd", /^[-*]\s+(?:REQ-\d+|Requirement):\s*(.+)$/gm)
|
||||
const components = extractElements(specDocs, "architecture", /^[-*]\s+(?:Component|Module):\s*(.+)$/gm)
|
||||
const stories = extractElements(specDocs, "user-stories", /^[-*]\s+(?:US-\d+|Story):\s*(.+)$/gm)
|
||||
|
||||
// Build traceability chains: Goals → Requirements → Components → Stories
|
||||
for (const goal of goals) {
|
||||
const chain = {
|
||||
goal: goal.text,
|
||||
requirements: [],
|
||||
components: [],
|
||||
stories: [],
|
||||
complete: false
|
||||
}
|
||||
|
||||
// Find requirements that reference this goal
|
||||
const goalKeywords = extractKeywords(goal.text)
|
||||
for (const req of requirements) {
|
||||
if (hasKeywordOverlap(req.text, goalKeywords, 0.3)) {
|
||||
chain.requirements.push(req.text)
|
||||
|
||||
// Find components that implement this requirement
|
||||
const reqKeywords = extractKeywords(req.text)
|
||||
for (const comp of components) {
|
||||
if (hasKeywordOverlap(comp.text, reqKeywords, 0.3)) {
|
||||
chain.components.push(comp.text)
|
||||
}
|
||||
}
|
||||
|
||||
// Find stories that implement this requirement
|
||||
for (const story of stories) {
|
||||
if (hasKeywordOverlap(story.text, reqKeywords, 0.3)) {
|
||||
chain.stories.push(story.text)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if chain is complete
|
||||
chain.complete = chain.requirements.length > 0 &&
|
||||
chain.components.length > 0 &&
|
||||
chain.stories.length > 0
|
||||
|
||||
chains.push(chain)
|
||||
}
|
||||
|
||||
// Calculate score
|
||||
const completeChains = chains.filter(c => c.complete).length
|
||||
const totalChains = chains.length
|
||||
const score = totalChains > 0 ? (completeChains / totalChains) * 100 : 0
|
||||
|
||||
// Identify weak links
|
||||
const weakLinks = []
|
||||
chains.forEach((chain, idx) => {
|
||||
if (!chain.complete) {
|
||||
if (chain.requirements.length === 0) {
|
||||
weakLinks.push(`Goal ${idx + 1} has no linked requirements`)
|
||||
}
|
||||
if (chain.components.length === 0) {
|
||||
weakLinks.push(`Goal ${idx + 1} has no linked components`)
|
||||
}
|
||||
if (chain.stories.length === 0) {
|
||||
weakLinks.push(`Goal ${idx + 1} has no linked stories`)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
score: score,
|
||||
weight: 25,
|
||||
weighted_score: score * 0.25,
|
||||
details: {
|
||||
total_chains: totalChains,
|
||||
complete_chains: completeChains,
|
||||
weak_links: weakLinks
|
||||
},
|
||||
chains: chains
|
||||
}
|
||||
}
|
||||
|
||||
function extractElements(specDocs, phase, regex) {
|
||||
const elements = []
|
||||
const doc = specDocs.find(d => d.phase === phase)
|
||||
|
||||
if (doc) {
|
||||
let match
|
||||
while ((match = regex.exec(doc.content)) !== null) {
|
||||
elements.push({
|
||||
text: match[1].trim(),
|
||||
phase: phase
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return elements
|
||||
}
|
||||
|
||||
function extractKeywords(text) {
|
||||
// Extract meaningful words (4+ chars, not common words)
|
||||
const commonWords = new Set(["that", "this", "with", "from", "have", "will", "should", "must", "can"])
|
||||
const words = text.toLowerCase().match(/\b\w{4,}\b/g) || []
|
||||
return words.filter(w => !commonWords.has(w))
|
||||
}
|
||||
|
||||
function hasKeywordOverlap(text, keywords, threshold) {
|
||||
const textLower = text.toLowerCase()
|
||||
const matchCount = keywords.filter(kw => textLower.includes(kw)).length
|
||||
return matchCount / keywords.length >= threshold
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Depth (Weight: 20%)
|
||||
|
||||
```javascript
|
||||
function scoreDepth(specDocs) {
|
||||
const dimensions = []
|
||||
|
||||
// 1. Acceptance Criteria Testability
|
||||
const acDoc = specDocs.find(d => d.phase === "prd" || d.phase === "user-stories")
|
||||
if (acDoc) {
|
||||
const acMatches = acDoc.content.match(/Acceptance Criteria:[\s\S]*?(?=\n##|\n\n[-*]|$)/gi) || []
|
||||
let testableCount = 0
|
||||
let totalCount = 0
|
||||
|
||||
acMatches.forEach(section => {
|
||||
const criteria = section.match(/^[-*]\s+(.+)$/gm) || []
|
||||
totalCount += criteria.length
|
||||
|
||||
criteria.forEach(criterion => {
|
||||
// Testable if contains measurable verbs or specific conditions
|
||||
const testablePatterns = [
|
||||
/\b(should|must|will)\s+(display|show|return|validate|check|verify|calculate|send|receive)\b/i,
|
||||
/\b(when|if|given)\b.*\b(then|should|must)\b/i,
|
||||
/\b\d+\b/, // Contains numbers (measurable)
|
||||
/\b(success|error|fail|pass)\b/i
|
||||
]
|
||||
|
||||
const isTestable = testablePatterns.some(pattern => pattern.test(criterion))
|
||||
if (isTestable) testableCount++
|
||||
})
|
||||
})
|
||||
|
||||
const acScore = totalCount > 0 ? (testableCount / totalCount) * 100 : 0
|
||||
dimensions.push({
|
||||
name: "Acceptance Criteria Testability",
|
||||
score: acScore,
|
||||
testable: testableCount,
|
||||
total: totalCount
|
||||
})
|
||||
}
|
||||
|
||||
// 2. ADR Justification
|
||||
const archDoc = specDocs.find(d => d.phase === "architecture")
|
||||
if (archDoc) {
|
||||
const adrMatches = archDoc.content.match(/##\s+(?:ADR|Decision)[\s\S]*?(?=\n##|$)/gi) || []
|
||||
let justifiedCount = 0
|
||||
let totalCount = adrMatches.length
|
||||
|
||||
adrMatches.forEach(adr => {
|
||||
// Justified if contains rationale, alternatives, or consequences
|
||||
const hasJustification = adr.match(/\b(rationale|reason|because|alternative|consequence|trade-?off)\b/i)
|
||||
if (hasJustification) justifiedCount++
|
||||
})
|
||||
|
||||
const adrScore = totalCount > 0 ? (justifiedCount / totalCount) * 100 : 100 // Default 100 if no ADRs
|
||||
dimensions.push({
|
||||
name: "ADR Justification",
|
||||
score: adrScore,
|
||||
justified: justifiedCount,
|
||||
total: totalCount
|
||||
})
|
||||
}
|
||||
|
||||
// 3. User Stories Estimability
|
||||
const storiesDoc = specDocs.find(d => d.phase === "user-stories")
|
||||
if (storiesDoc) {
|
||||
const storyMatches = storiesDoc.content.match(/^[-*]\s+(?:US-\d+|Story)[\s\S]*?(?=\n[-*]|$)/gim) || []
|
||||
let estimableCount = 0
|
||||
let totalCount = storyMatches.length
|
||||
|
||||
storyMatches.forEach(story => {
|
||||
// Estimable if has clear scope, AC, and no ambiguity
|
||||
const hasScope = story.match(/\b(as a|I want|so that)\b/i)
|
||||
const hasAC = story.match(/acceptance criteria/i)
|
||||
const hasEstimate = story.match(/\b(points?|hours?|days?|estimate)\b/i)
|
||||
|
||||
if ((hasScope && hasAC) || hasEstimate) estimableCount++
|
||||
})
|
||||
|
||||
const storiesScore = totalCount > 0 ? (estimableCount / totalCount) * 100 : 0
|
||||
dimensions.push({
|
||||
name: "User Stories Estimability",
|
||||
score: storiesScore,
|
||||
estimable: estimableCount,
|
||||
total: totalCount
|
||||
})
|
||||
}
|
||||
|
||||
// 4. Technical Detail Sufficiency
|
||||
const techDocs = specDocs.filter(d => d.phase === "architecture" || d.phase === "implementation-plan")
|
||||
let detailScore = 0
|
||||
|
||||
if (techDocs.length > 0) {
|
||||
const detailIndicators = [
|
||||
/```[\s\S]*?```/, // Code blocks
|
||||
/\b(API|endpoint|schema|model|interface|class|function)\b/i,
|
||||
/\b(GET|POST|PUT|DELETE|PATCH)\b/, // HTTP methods
|
||||
/\b(database|table|collection|index)\b/i,
|
||||
/\b(authentication|authorization|security)\b/i
|
||||
]
|
||||
|
||||
let indicatorCount = 0
|
||||
techDocs.forEach(doc => {
|
||||
detailIndicators.forEach(pattern => {
|
||||
if (pattern.test(doc.content)) indicatorCount++
|
||||
})
|
||||
})
|
||||
|
||||
detailScore = Math.min(100, (indicatorCount / (detailIndicators.length * techDocs.length)) * 100)
|
||||
dimensions.push({
|
||||
name: "Technical Detail Sufficiency",
|
||||
score: detailScore,
|
||||
indicators_found: indicatorCount,
|
||||
indicators_expected: detailIndicators.length * techDocs.length
|
||||
})
|
||||
}
|
||||
|
||||
// Calculate overall depth score
|
||||
const overallScore = dimensions.reduce((sum, d) => sum + d.score, 0) / dimensions.length
|
||||
|
||||
return {
|
||||
score: overallScore,
|
||||
weight: 20,
|
||||
weighted_score: overallScore * 0.20,
|
||||
dimensions: dimensions
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Requirement Coverage (Weight: 10%)
|
||||
|
||||
```javascript
|
||||
function scoreRequirementCoverage(specDocs, originalRequirements) {
|
||||
// Extract original requirements from task description or initial brief
|
||||
const originalReqs = originalRequirements || extractOriginalRequirements(specDocs)
|
||||
|
||||
if (originalReqs.length === 0) {
|
||||
return {
|
||||
score: 100, // No requirements to cover
|
||||
weight: 10,
|
||||
weighted_score: 10,
|
||||
details: {
|
||||
total: 0,
|
||||
covered: 0,
|
||||
uncovered: []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract all requirements from spec documents
|
||||
const specReqs = []
|
||||
for (const doc of specDocs) {
|
||||
const reqMatches = doc.content.match(/^[-*]\s+(?:REQ-\d+|Requirement|Feature):\s*(.+)$/gm) || []
|
||||
reqMatches.forEach(match => {
|
||||
specReqs.push(match.replace(/^[-*]\s+(?:REQ-\d+|Requirement|Feature):\s*/, "").trim())
|
||||
})
|
||||
}
|
||||
|
||||
// Map original requirements to spec requirements
|
||||
const coverage = []
|
||||
for (const origReq of originalReqs) {
|
||||
const keywords = extractKeywords(origReq)
|
||||
const covered = specReqs.some(specReq => hasKeywordOverlap(specReq, keywords, 0.4))
|
||||
|
||||
coverage.push({
|
||||
requirement: origReq,
|
||||
covered: covered
|
||||
})
|
||||
}
|
||||
|
||||
const coveredCount = coverage.filter(c => c.covered).length
|
||||
const score = (coveredCount / originalReqs.length) * 100
|
||||
|
||||
return {
|
||||
score: score,
|
||||
weight: 10,
|
||||
weighted_score: score * 0.10,
|
||||
details: {
|
||||
total: originalReqs.length,
|
||||
covered: coveredCount,
|
||||
uncovered: coverage.filter(c => !c.covered).map(c => c.requirement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function extractOriginalRequirements(specDocs) {
|
||||
// Try to find original requirements in product brief
|
||||
const briefDoc = specDocs.find(d => d.phase === "product-brief")
|
||||
if (!briefDoc) return []
|
||||
|
||||
const reqSection = briefDoc.content.match(/##\s+(?:Requirements|Objectives)[\s\S]*?(?=\n##|$)/i)
|
||||
if (!reqSection) return []
|
||||
|
||||
const reqs = reqSection[0].match(/^[-*]\s+(.+)$/gm) || []
|
||||
return reqs.map(r => r.replace(/^[-*]\s+/, "").trim())
|
||||
}
|
||||
```
|
||||
|
||||
## Quality Gate Determination
|
||||
|
||||
```javascript
|
||||
function determineQualityGate(overallScore, coverageScore) {
|
||||
// PASS: Score ≥80% AND coverage ≥70%
|
||||
if (overallScore >= 80 && coverageScore >= 70) {
|
||||
return {
|
||||
gate: "PASS",
|
||||
message: "Specification meets quality standards and is ready for implementation",
|
||||
action: "Proceed to implementation phase"
|
||||
}
|
||||
}
|
||||
|
||||
// FAIL: Score <60% OR coverage <50%
|
||||
if (overallScore < 60 || coverageScore < 50) {
|
||||
return {
|
||||
gate: "FAIL",
|
||||
message: "Specification requires major revisions before implementation",
|
||||
action: "Address critical gaps and resubmit for review"
|
||||
}
|
||||
}
|
||||
|
||||
// REVIEW: Between PASS and FAIL
|
||||
return {
|
||||
gate: "REVIEW",
|
||||
message: "Specification needs improvements but may proceed with caution",
|
||||
action: "Address recommendations and consider re-review"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Readiness Report Generation
|
||||
|
||||
```javascript
|
||||
function formatReadinessReport(report, specDocs) {
|
||||
const { overall_score, quality_gate, dimensions, phase_gates } = report
|
||||
|
||||
let markdown = `# Specification Readiness Report\n\n`
|
||||
markdown += `**Generated**: ${new Date().toISOString()}\n\n`
|
||||
markdown += `**Overall Score**: ${overall_score.toFixed(1)}%\n\n`
|
||||
markdown += `**Quality Gate**: ${quality_gate.gate} - ${quality_gate.message}\n\n`
|
||||
markdown += `**Recommended Action**: ${quality_gate.action}\n\n`
|
||||
|
||||
markdown += `---\n\n`
|
||||
|
||||
markdown += `## Dimension Scores\n\n`
|
||||
markdown += `| Dimension | Score | Weight | Weighted Score |\n`
|
||||
markdown += `|-----------|-------|--------|----------------|\n`
|
||||
|
||||
Object.entries(dimensions).forEach(([name, data]) => {
|
||||
markdown += `| ${name} | ${data.score.toFixed(1)}% | ${data.weight}% | ${data.weighted_score.toFixed(1)}% |\n`
|
||||
})
|
||||
|
||||
markdown += `\n---\n\n`
|
||||
|
||||
// Completeness Details
|
||||
markdown += `## Completeness Analysis\n\n`
|
||||
dimensions.completeness.details.forEach(detail => {
|
||||
markdown += `### ${detail.phase}\n`
|
||||
markdown += `- Score: ${detail.score.toFixed(1)}%\n`
|
||||
markdown += `- Sections Present: ${detail.present}/${detail.expected}\n`
|
||||
markdown += `- Substantial Content: ${detail.substantial}/${detail.expected}\n`
|
||||
if (detail.missing.length > 0) {
|
||||
markdown += `- Missing: ${detail.missing.join(", ")}\n`
|
||||
}
|
||||
markdown += `\n`
|
||||
})
|
||||
|
||||
// Consistency Details
|
||||
markdown += `## Consistency Analysis\n\n`
|
||||
if (dimensions.consistency.issues.length > 0) {
|
||||
markdown += `**Issues Found**: ${dimensions.consistency.issues.length}\n\n`
|
||||
dimensions.consistency.issues.forEach(issue => {
|
||||
markdown += `- **${issue.severity.toUpperCase()}**: ${issue.message}\n`
|
||||
markdown += ` *Suggestion*: ${issue.suggestion}\n\n`
|
||||
})
|
||||
} else {
|
||||
markdown += `No consistency issues found.\n\n`
|
||||
}
|
||||
|
||||
// Traceability Details
|
||||
markdown += `## Traceability Analysis\n\n`
|
||||
markdown += `- Complete Chains: ${dimensions.traceability.details.complete_chains}/${dimensions.traceability.details.total_chains}\n\n`
|
||||
if (dimensions.traceability.details.weak_links.length > 0) {
|
||||
markdown += `**Weak Links**:\n`
|
||||
dimensions.traceability.details.weak_links.forEach(link => {
|
||||
markdown += `- ${link}\n`
|
||||
})
|
||||
markdown += `\n`
|
||||
}
|
||||
|
||||
// Depth Details
|
||||
markdown += `## Depth Analysis\n\n`
|
||||
dimensions.depth.dimensions.forEach(dim => {
|
||||
markdown += `### ${dim.name}\n`
|
||||
markdown += `- Score: ${dim.score.toFixed(1)}%\n`
|
||||
if (dim.testable !== undefined) {
|
||||
markdown += `- Testable: ${dim.testable}/${dim.total}\n`
|
||||
}
|
||||
if (dim.justified !== undefined) {
|
||||
markdown += `- Justified: ${dim.justified}/${dim.total}\n`
|
||||
}
|
||||
if (dim.estimable !== undefined) {
|
||||
markdown += `- Estimable: ${dim.estimable}/${dim.total}\n`
|
||||
}
|
||||
markdown += `\n`
|
||||
})
|
||||
|
||||
// Coverage Details
|
||||
markdown += `## Requirement Coverage\n\n`
|
||||
markdown += `- Covered: ${dimensions.coverage.details.covered}/${dimensions.coverage.details.total}\n`
|
||||
if (dimensions.coverage.details.uncovered.length > 0) {
|
||||
markdown += `\n**Uncovered Requirements**:\n`
|
||||
dimensions.coverage.details.uncovered.forEach(req => {
|
||||
markdown += `- ${req}\n`
|
||||
})
|
||||
}
|
||||
markdown += `\n`
|
||||
|
||||
// Phase Gates
|
||||
if (phase_gates) {
|
||||
markdown += `---\n\n`
|
||||
markdown += `## Phase-Level Quality Gates\n\n`
|
||||
Object.entries(phase_gates).forEach(([phase, gate]) => {
|
||||
markdown += `### ${phase}\n`
|
||||
markdown += `- Gate: ${gate.status}\n`
|
||||
markdown += `- Score: ${gate.score.toFixed(1)}%\n`
|
||||
if (gate.issues.length > 0) {
|
||||
markdown += `- Issues: ${gate.issues.join(", ")}\n`
|
||||
}
|
||||
markdown += `\n`
|
||||
})
|
||||
}
|
||||
|
||||
return markdown
|
||||
}
|
||||
```
|
||||
|
||||
## Spec Summary Generation
|
||||
|
||||
```javascript
|
||||
function formatSpecSummary(specDocs, report) {
|
||||
let markdown = `# Specification Summary\n\n`
|
||||
|
||||
markdown += `**Overall Quality Score**: ${report.overall_score.toFixed(1)}%\n`
|
||||
markdown += `**Quality Gate**: ${report.quality_gate.gate}\n\n`
|
||||
|
||||
markdown += `---\n\n`
|
||||
|
||||
// Document Overview
|
||||
markdown += `## Documents Reviewed\n\n`
|
||||
specDocs.forEach(doc => {
|
||||
markdown += `### ${doc.phase}\n`
|
||||
markdown += `- Path: ${doc.path}\n`
|
||||
markdown += `- Size: ${doc.content.length} characters\n`
|
||||
|
||||
// Extract key sections
|
||||
const sections = doc.content.match(/^##\s+(.+)$/gm) || []
|
||||
if (sections.length > 0) {
|
||||
markdown += `- Sections: ${sections.map(s => s.replace(/^##\s+/, "")).join(", ")}\n`
|
||||
}
|
||||
markdown += `\n`
|
||||
})
|
||||
|
||||
markdown += `---\n\n`
|
||||
|
||||
// Key Findings
|
||||
markdown += `## Key Findings\n\n`
|
||||
|
||||
// Strengths
|
||||
const strengths = []
|
||||
Object.entries(report.dimensions).forEach(([name, data]) => {
|
||||
if (data.score >= 80) {
|
||||
strengths.push(`${name}: ${data.score.toFixed(1)}%`)
|
||||
}
|
||||
})
|
||||
|
||||
if (strengths.length > 0) {
|
||||
markdown += `### Strengths\n`
|
||||
strengths.forEach(s => markdown += `- ${s}\n`)
|
||||
markdown += `\n`
|
||||
}
|
||||
|
||||
// Areas for Improvement
|
||||
const improvements = []
|
||||
Object.entries(report.dimensions).forEach(([name, data]) => {
|
||||
if (data.score < 70) {
|
||||
improvements.push(`${name}: ${data.score.toFixed(1)}%`)
|
||||
}
|
||||
})
|
||||
|
||||
if (improvements.length > 0) {
|
||||
markdown += `### Areas for Improvement\n`
|
||||
improvements.forEach(i => markdown += `- ${i}\n`)
|
||||
markdown += `\n`
|
||||
}
|
||||
|
||||
// Recommendations
|
||||
if (report.recommendations && report.recommendations.length > 0) {
|
||||
markdown += `### Recommendations\n`
|
||||
report.recommendations.forEach((rec, i) => {
|
||||
markdown += `${i + 1}. ${rec}\n`
|
||||
})
|
||||
markdown += `\n`
|
||||
}
|
||||
|
||||
return markdown
|
||||
}
|
||||
```
|
||||
|
||||
## Phase-Level Quality Gates
|
||||
|
||||
```javascript
|
||||
function calculatePhaseGates(specDocs) {
|
||||
const gates = {}
|
||||
|
||||
for (const doc of specDocs) {
|
||||
const phase = doc.phase
|
||||
const issues = []
|
||||
let score = 100
|
||||
|
||||
// Check minimum content threshold
|
||||
if (doc.content.length < 500) {
|
||||
issues.push("Insufficient content")
|
||||
score -= 30
|
||||
}
|
||||
|
||||
// Check for required sections (phase-specific)
|
||||
const requiredSections = getRequiredSections(phase)
|
||||
const missingSections = requiredSections.filter(section =>
|
||||
!doc.content.match(new RegExp(`##\\s+${section}`, "i"))
|
||||
)
|
||||
|
||||
if (missingSections.length > 0) {
|
||||
issues.push(`Missing sections: ${missingSections.join(", ")}`)
|
||||
score -= missingSections.length * 15
|
||||
}
|
||||
|
||||
// Determine gate status
|
||||
let status = "PASS"
|
||||
if (score < 60) status = "FAIL"
|
||||
else if (score < 80) status = "REVIEW"
|
||||
|
||||
gates[phase] = {
|
||||
status: status,
|
||||
score: Math.max(0, score),
|
||||
issues: issues
|
||||
}
|
||||
}
|
||||
|
||||
return gates
|
||||
}
|
||||
|
||||
function getRequiredSections(phase) {
|
||||
const sectionMap = {
|
||||
"product-brief": ["Vision", "Problem", "Target Audience"],
|
||||
"prd": ["Goals", "Requirements", "User Stories"],
|
||||
"architecture": ["Overview", "Components", "Data Models"],
|
||||
"user-stories": ["Stories", "Acceptance Criteria"],
|
||||
"implementation-plan": ["Tasks", "Dependencies"],
|
||||
"test-strategy": ["Test Cases", "Coverage"]
|
||||
}
|
||||
|
||||
return sectionMap[phase] || []
|
||||
}
|
||||
```
|
||||
Reference in New Issue
Block a user