feat(discovery): enhance discovery index reading and issue exporting

- Improved the reading of the discovery index by adding a fallback mechanism to scan directories for discovery folders if the index.json is invalid or missing.
- Added sorting of discoveries by creation time in descending order.
- Enhanced the `appendToIssuesJsonl` function to include deduplication logic for issues based on ID and source finding ID.
- Updated the discovery route handler to reflect the number of issues added and skipped during export.
- Introduced UI elements for selecting and deselecting findings in the dashboard.
- Added CSS styles for exported findings and action buttons.
- Implemented search functionality for filtering findings based on title, file, and description.
- Added internationalization support for new UI elements.
- Created scripts for automated API extraction from various project types, including FastAPI and TypeScript.
- Documented the API extraction process and library bundling instructions.
This commit is contained in:
catlog22
2025-12-28 19:27:34 +08:00
parent 3ef1e54412
commit 169f218f7a
18 changed files with 1602 additions and 612 deletions

View File

@@ -59,19 +59,20 @@ Phase 2: Interactive Perspective Selection
Phase 3: Parallel Perspective Analysis
├─ Launch N @cli-explore-agent instances (one per perspective)
├─ Security & Best-Practices auto-trigger Exa research
├─ Generate perspective JSON + markdown reports
├─ Agent writes perspective JSON, returns summary
└─ Update discovery-progress.json
Phase 4: Aggregation & Prioritization
├─ Load all perspective JSON files
├─ Collect agent return summaries
├─ Load perspective JSON files
├─ Merge findings, deduplicate by file+line
─ Calculate priority scores based on impact/urgency
└─ Generate candidate issue list
─ Calculate priority scores
Phase 5: Issue Generation
Phase 5: Issue Generation & Summary
├─ Convert high-priority discoveries to issue format
├─ Write to discovery-issues.jsonl (preview)
─ Generate summary report
├─ Write to discovery-issues.jsonl
─ Generate single summary.md from agent returns
└─ Update discovery-state.json to complete
```
## Perspectives
@@ -96,18 +97,14 @@ When no `--perspectives` flag is provided, the command uses AskUserQuestion:
```javascript
AskUserQuestion({
questions: [{
question: "Select discovery perspectives (multi-select)",
header: "Perspectives",
multiSelect: true,
question: "Select primary discovery focus:",
header: "Focus",
multiSelect: false,
options: [
{ label: "bug", description: "Potential bugs (edge cases, null checks, resource leaks)" },
{ label: "ux", description: "User experience (error messages, loading states, accessibility)" },
{ label: "test", description: "Test coverage (missing tests, edge cases, integration gaps)" },
{ label: "quality", description: "Code quality (complexity, duplication, naming)" },
{ label: "security", description: "Security issues (auto-enables Exa research)" },
{ label: "performance", description: "Performance (N+1 queries, memory, caching)" },
{ label: "maintainability", description: "Maintainability (coupling, tech debt, extensibility)" },
{ label: "best-practices", description: "Best practices (auto-enables Exa research)" }
{ label: "Bug + Test + Quality", description: "Quick scan: potential bugs, test gaps, code quality (Recommended)" },
{ label: "Security + Performance", description: "System audit: security issues, performance bottlenecks" },
{ label: "Maintainability + Best-practices", description: "Long-term health: coupling, tech debt, conventions" },
{ label: "Full analysis", description: "All 7 perspectives (comprehensive, takes longer)" }
]
}]
})
@@ -138,37 +135,18 @@ const discoveryId = `DSC-${formatDate(new Date(), 'YYYYMMDD-HHmmss')}`;
const outputDir = `.workflow/issues/discoveries/${discoveryId}`;
await mkdir(outputDir, { recursive: true });
await mkdir(`${outputDir}/perspectives`, { recursive: true });
await mkdir(`${outputDir}/reports`, { recursive: true });
// Step 4: Initialize discovery state
// Step 4: Initialize unified discovery state (merged state+progress)
await writeJson(`${outputDir}/discovery-state.json`, {
discovery_id: discoveryId,
target_pattern: targetPattern,
metadata: {
created_at: new Date().toISOString(),
resolved_files: resolvedFiles,
perspectives: [], // filled after selection
external_research_enabled: false
},
phase: "initialization",
perspectives_completed: [],
total_findings: 0,
priority_distribution: { critical: 0, high: 0, medium: 0, low: 0 },
issues_generated: 0
});
// Step 5: Initialize progress tracking
await writeJson(`${outputDir}/discovery-progress.json`, {
discovery_id: discoveryId,
last_update: new Date().toISOString(),
phase: "initialization",
progress: {
perspective_analysis: { total: 0, completed: 0, in_progress: 0, percent_complete: 0 },
external_research: { enabled: false, completed: false },
aggregation: { completed: false },
issue_generation: { completed: false }
},
agent_status: []
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
target: { files_count: { total: resolvedFiles.length }, project: {} },
perspectives: [], // filled after selection: [{name, status, findings}]
external_research: { enabled: false, completed: false },
results: { total_findings: 0, issues_generated: 0, priority_distribution: {} }
});
```
@@ -182,34 +160,13 @@ if (args.perspectives) {
selectedPerspectives = args.perspectives.split(',').map(p => p.trim());
} else {
// Interactive selection via AskUserQuestion
const response = await AskUserQuestion({
questions: [{
question: "Select discovery perspectives to analyze:",
header: "Perspectives",
multiSelect: true,
options: PERSPECTIVE_OPTIONS
}]
});
const response = await AskUserQuestion({...});
selectedPerspectives = parseSelectedPerspectives(response);
}
// Validate perspectives
const validPerspectives = ['bug', 'ux', 'test', 'quality', 'security', 'performance', 'maintainability', 'best-practices'];
for (const p of selectedPerspectives) {
if (!validPerspectives.includes(p)) {
throw new Error(`Invalid perspective: ${p}`);
}
}
// Determine if Exa is needed
const exaEnabled = selectedPerspectives.includes('security') ||
selectedPerspectives.includes('best-practices') ||
args.external;
// Update state
// Validate and update state
await updateDiscoveryState(outputDir, {
'metadata.perspectives': selectedPerspectives,
'metadata.external_research_enabled': exaEnabled,
phase: 'parallel'
});
```
@@ -219,40 +176,25 @@ await updateDiscoveryState(outputDir, {
Launch N agents in parallel (one per selected perspective):
```javascript
// Launch agents in parallel
// Launch agents in parallel - agents write JSON and return summary
const agentPromises = selectedPerspectives.map(perspective =>
Task({
subagent_type: "cli-explore-agent",
run_in_background: false,
description: `Discover ${perspective} issues via Deep Scan`,
description: `Discover ${perspective} issues`,
prompt: buildPerspectivePrompt(perspective, discoveryId, resolvedFiles, outputDir)
})
);
// For perspectives with Exa enabled, add external research
if (exaEnabled) {
for (const perspective of ['security', 'best-practices']) {
if (selectedPerspectives.includes(perspective)) {
agentPromises.push(
Task({
subagent_type: "cli-explore-agent",
run_in_background: false,
description: `External research for ${perspective} via Exa`,
prompt: buildExaResearchPrompt(perspective, projectTech, outputDir)
})
);
}
}
}
// Wait for all agents
// Wait for all agents - collect their return summaries
const results = await Promise.all(agentPromises);
// results contain agent summaries for final report
```
**Phase 4: Aggregation & Prioritization**
```javascript
// Load all perspective results
// Load all perspective JSON files written by agents
const allFindings = [];
for (const perspective of selectedPerspectives) {
const jsonPath = `${outputDir}/perspectives/${perspective}.json`;
@@ -262,66 +204,37 @@ for (const perspective of selectedPerspectives) {
}
}
// Deduplicate by file+line
const uniqueFindings = deduplicateFindings(allFindings);
// Deduplicate and prioritize
const prioritizedFindings = deduplicateAndPrioritize(allFindings);
// Calculate priority scores
const prioritizedFindings = uniqueFindings.map(finding => ({
...finding,
priority_score: calculatePriorityScore(finding)
})).sort((a, b) => b.priority_score - a.priority_score);
// Update state with aggregation results
// Update unified state
await updateDiscoveryState(outputDir, {
phase: 'aggregation',
total_findings: prioritizedFindings.length,
priority_distribution: countByPriority(prioritizedFindings)
'results.total_findings': prioritizedFindings.length,
'results.priority_distribution': countByPriority(prioritizedFindings)
});
```
**Phase 5: Issue Generation**
**Phase 5: Issue Generation & Summary**
```javascript
// Filter high-priority findings for issue generation
// Convert high-priority findings to issues
const issueWorthy = prioritizedFindings.filter(f =>
f.priority === 'critical' || f.priority === 'high' || f.priority_score >= 0.7
);
// Convert to issue format
const issues = issueWorthy.map((finding, idx) => ({
id: `DSC-${String(idx + 1).padStart(3, '0')}`,
title: finding.suggested_issue?.title || finding.title,
status: 'discovered',
priority: mapPriorityToNumber(finding.priority),
source: 'discovery',
source_discovery_id: discoveryId,
perspective: finding.perspective,
context: finding.description,
labels: [finding.perspective, ...(finding.labels || [])],
file: finding.file,
line: finding.line,
created_at: new Date().toISOString()
}));
// Write discovery issues (preview, not committed to main issues.jsonl)
// Write discovery-issues.jsonl
await writeJsonl(`${outputDir}/discovery-issues.jsonl`, issues);
// Generate summary report
await generateSummaryReport(outputDir, prioritizedFindings, issues);
// Generate single summary.md from agent return summaries
// Orchestrator briefly summarizes what agents returned (NO detailed reports)
await writeSummaryFromAgentReturns(outputDir, results, prioritizedFindings, issues);
// Update final state
await updateDiscoveryState(outputDir, {
phase: 'complete',
issues_generated: issues.length
});
// Update index
await updateDiscoveryIndex(outputDir, discoveryId, {
target_pattern: targetPattern,
perspectives: selectedPerspectives,
total_findings: prioritizedFindings.length,
issues_generated: issues.length,
completed_at: new Date().toISOString()
updated_at: new Date().toISOString(),
'results.issues_generated': issues.length
});
```
@@ -331,23 +244,12 @@ await updateDiscoveryIndex(outputDir, discoveryId, {
.workflow/issues/discoveries/
├── index.json # Discovery session index
└── {discovery-id}/
├── discovery-state.json # State machine
├── discovery-progress.json # Real-time progress (dashboard polling)
├── discovery-state.json # Unified state (merged state+progress)
├── perspectives/
── bug.json
│ ├── ux.json
│ ├── test.json
│ ├── quality.json
│ ├── security.json
│ ├── performance.json
│ ├── maintainability.json
│ └── best-practices.json
├── external-research.json # Exa research results
── {perspective}.json # Per-perspective findings
├── external-research.json # Exa research results (if enabled)
├── discovery-issues.jsonl # Generated candidate issues
└── reports/
├── summary.md
├── bug-report.md
└── {perspective}-report.md
└── summary.md # Single summary (from agent returns)
```
### Schema References
@@ -359,12 +261,6 @@ await updateDiscoveryIndex(outputDir, discoveryId, {
| **Discovery State** | `~/.claude/workflows/cli-templates/schemas/discovery-state-schema.json` | Session state machine |
| **Discovery Finding** | `~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json` | Perspective analysis results |
**Agent Schema Loading Protocol**:
```bash
# Agent MUST read schema before generating any JSON output
cat ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
```
### Agent Invocation Template
**Perspective Analysis Agent**:
@@ -373,23 +269,10 @@ cat ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
Task({
subagent_type: "cli-explore-agent",
run_in_background: false,
description: `Discover ${perspective} issues via Deep Scan`,
description: `Discover ${perspective} issues`,
prompt: `
## Task Objective
Discover potential ${perspective} issues in specified module files using Deep Scan mode (Bash + Gemini dual-source strategy)
## Analysis Mode
Use **Deep Scan mode** for this discovery:
- Phase 1: Bash structural scan for standard patterns
- Phase 2: Gemini semantic analysis for ${perspective}-specific concerns
- Phase 3: Synthesis with attribution
## MANDATORY FIRST STEPS
1. Read discovery state: ${discoveryStateJsonPath}
2. Get target files from discovery-state.json
3. Validate file access: bash(ls -la ${targetFiles.join(' ')})
4. **CRITICAL**: Read schema FIRST: cat ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
5. Read: .workflow/project-tech.json (technology stack)
Discover potential ${perspective} issues in specified module files.
## Discovery Context
- Discovery ID: ${discoveryId}
@@ -398,59 +281,30 @@ Task({
- Resolved Files: ${resolvedFiles.length} files
- Output Directory: ${outputDir}
## CLI Configuration
- Tool Priority: gemini → qwen → codex
- Mode: analysis (READ-ONLY for code analysis, WRITE for output files)
- Context Pattern: ${targetFiles.map(f => `@${f}`).join(' ')}
## MANDATORY FIRST STEPS
1. Read discovery state: ${outputDir}/discovery-state.json
2. Read schema: ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
3. Analyze target files for ${perspective} concerns
## ⚠️ CRITICAL OUTPUT GUIDELINES
## Output Requirements
**Agent MUST write JSON files directly - DO NOT return JSON to orchestrator**:
**1. Write JSON file**: ${outputDir}/perspectives/${perspective}.json
- Follow discovery-finding-schema.json exactly
- Each finding: id, title, priority, category, description, file, line, snippet, suggested_issue, confidence
1. **Schema Compliance**: Read and strictly follow discovery-finding-schema.json
- All required fields MUST be present
- Use exact enum values (lowercase priority: critical/high/medium/low)
- ID format: dsc-{perspective}-{seq}-{uuid8}
2. **Direct File Output**: Agent writes files using Write/mcp__ccw-tools__write_file:
- JSON: ${outputDir}/perspectives/${perspective}.json
- Report: ${outputDir}/reports/${perspective}-report.md
- DO NOT return raw JSON in response - write to file
3. **Validation Before Write**:
- Validate JSON against schema structure
- Ensure all findings have required fields
- Verify file paths are relative to project root
4. **Progress Update**: After writing, update discovery-progress.json:
- Set perspective status to "completed"
- Update findings_count
- Update completed_at timestamp
## Expected Deliverables
1. Perspective Results JSON: ${outputDir}/perspectives/${perspective}.json
- Follow discovery-finding-schema.json exactly
- Root structure MUST be object with findings array
- Each finding MUST include: id, title, priority, category, description, file, line, snippet, suggested_issue, confidence
2. Discovery Report: ${outputDir}/reports/${perspective}-report.md
- Human-readable summary
- Grouped by priority
- Include file:line references
**2. Return summary** (DO NOT write report file):
- Return a brief text summary of findings
- Include: total findings, priority breakdown, key issues
- This summary will be used by orchestrator for final report
## Perspective-Specific Guidance
${getPerspectiveGuidance(perspective)}
## Success Criteria
- [ ] Schema read and understood before analysis
- [ ] All target files analyzed for ${perspective} concerns
- [ ] JSON written directly to ${outputDir}/perspectives/${perspective}.json
- [ ] Report written to ${outputDir}/reports/${perspective}-report.md
- [ ] JSON written to ${outputDir}/perspectives/${perspective}.json
- [ ] Summary returned with findings count and key issues
- [ ] Each finding includes actionable suggested_issue
- [ ] Priority assessment is accurate (lowercase enum values)
- [ ] Recommendations are specific and implementable
- [ ] discovery-progress.json updated with completion status
- [ ] Priority uses lowercase enum: critical/high/medium/low
`
})
```
@@ -464,66 +318,26 @@ Task({
description: `External research for ${perspective} via Exa`,
prompt: `
## Task Objective
Research industry best practices and common patterns for ${perspective} using Exa search
## MANDATORY FIRST STEPS
1. Read project tech stack: .workflow/project-tech.json
2. Read external research schema structure (if exists)
3. Identify key technologies (e.g., Node.js, React, Express)
Research industry best practices for ${perspective} using Exa search
## Research Steps
1. Use Exa to search for:
- "${technology} ${perspective} best practices 2025"
- "${technology} common ${perspective} issues"
- "${technology} ${perspective} checklist"
2. Synthesize findings relevant to this project
1. Read project tech stack: .workflow/project-tech.json
2. Use Exa to search for best practices
3. Synthesize findings relevant to this project
## ⚠️ CRITICAL OUTPUT GUIDELINES
## Output Requirements
**Agent MUST write files directly - DO NOT return content to orchestrator**:
**1. Write JSON file**: ${outputDir}/external-research.json
- Include sources, key_findings, gap_analysis, recommendations
1. **Direct File Output**: Agent writes files using Write/mcp__ccw-tools__write_file:
- JSON: ${outputDir}/external-research.json
- Report: ${outputDir}/reports/${perspective}-external.md
- DO NOT return raw content in response - write to file
2. **JSON Structure for external-research.json**:
\`\`\`json
{
"discovery_id": "${discoveryId}",
"perspective": "${perspective}",
"research_timestamp": "ISO8601",
"sources": [
{ "title": "...", "url": "...", "relevance": "..." }
],
"key_findings": [...],
"gap_analysis": [...],
"recommendations": [...]
}
\`\`\`
3. **Progress Update**: After writing, update discovery-progress.json:
- Set external_research.completed to true
## Expected Deliverables
1. External Research JSON: ${outputDir}/external-research.json
- Sources with URLs
- Key findings
- Relevance to current codebase
2. Comparison report in ${outputDir}/reports/${perspective}-external.md
- Industry standards vs current implementation
- Gap analysis
- Prioritized recommendations
**2. Return summary** (DO NOT write report file):
- Brief summary of external research findings
- Key recommendations for the project
## Success Criteria
- [ ] At least 3 authoritative sources consulted
- [ ] JSON written directly to ${outputDir}/external-research.json
- [ ] Report written to ${outputDir}/reports/${perspective}-external.md
- [ ] JSON written to ${outputDir}/external-research.json
- [ ] Summary returned with key recommendations
- [ ] Findings are relevant to project's tech stack
- [ ] Recommendations are actionable
- [ ] discovery-progress.json updated
`
})
```
@@ -534,127 +348,38 @@ Task({
function getPerspectiveGuidance(perspective) {
const guidance = {
bug: `
Focus Areas:
- Null/undefined checks before property access
- Edge cases in conditionals (empty arrays, 0 values, empty strings)
- Resource leaks (unclosed connections, streams, file handles)
- Race conditions in async code
- Boundary conditions (array indices, date ranges)
- Exception handling gaps (missing try-catch, swallowed errors)
Priority Criteria:
- Critical: Data corruption, security bypass, system crash
- High: Feature malfunction, data loss potential
- Medium: Unexpected behavior in edge cases
- Low: Minor inconsistencies, cosmetic issues
Focus: Null checks, edge cases, resource leaks, race conditions, boundary conditions, exception handling
Priority: Critical=data corruption/crash, High=malfunction, Medium=edge case issues, Low=minor
`,
ux: `
Focus Areas:
- Error messages (are they user-friendly and actionable?)
- Loading states (are long operations indicated?)
- Feedback (do users know their action succeeded?)
- Accessibility (keyboard navigation, screen readers, color contrast)
- Interaction patterns (consistent behavior across the app)
- Form validation (immediate feedback, clear requirements)
Priority Criteria:
- Critical: Inaccessible features, misleading feedback
- High: Confusing error messages, missing loading states
- Medium: Inconsistent patterns, minor feedback issues
- Low: Cosmetic improvements, nice-to-haves
Focus: Error messages, loading states, feedback, accessibility, interaction patterns, form validation
Priority: Critical=inaccessible, High=confusing, Medium=inconsistent, Low=cosmetic
`,
test: `
Focus Areas:
- Missing unit tests for public functions
- Edge case coverage (null, empty, boundary values)
- Integration test gaps (API endpoints, database operations)
- Coverage holes in critical paths (auth, payment, data mutation)
- Assertion quality (are tests actually verifying behavior?)
- Test isolation (do tests depend on each other?)
Priority Criteria:
- Critical: No tests for security-critical code
- High: Missing tests for core business logic
- Medium: Edge cases not covered, weak assertions
- Low: Minor coverage gaps, test organization issues
Focus: Missing unit tests, edge case coverage, integration gaps, assertion quality, test isolation
Priority: Critical=no security tests, High=no core logic tests, Medium=weak coverage, Low=minor gaps
`,
quality: `
Focus Areas:
- Cyclomatic complexity (deeply nested conditionals)
- Code duplication (copy-pasted logic)
- Naming (unclear variable/function names)
- Documentation (missing JSDoc for public APIs)
- Code smells (long functions, large files, magic numbers)
- Readability (overly clever code, unclear intent)
Priority Criteria:
- Critical: Unmaintainable complexity blocking changes
- High: Significant duplication, confusing logic
- Medium: Naming issues, missing documentation
- Low: Minor refactoring opportunities
Focus: Complexity, duplication, naming, documentation, code smells, readability
Priority: Critical=unmaintainable, High=significant issues, Medium=naming/docs, Low=minor refactoring
`,
security: `
Focus Areas:
- Input validation and sanitization
- Authentication and authorization mechanisms
- SQL/NoSQL injection vulnerabilities
- XSS, CSRF vulnerabilities
- Sensitive data exposure (logs, errors, responses)
- Access control gaps
Priority Criteria:
- Critical: Authentication bypass, injection, RCE
- High: Missing authorization, exposed secrets
- Medium: Missing input validation, weak encryption
- Low: Security headers, verbose errors
Focus: Input validation, auth/authz, injection, XSS/CSRF, data exposure, access control
Priority: Critical=auth bypass/injection, High=missing authz, Medium=weak validation, Low=headers
`,
performance: `
Focus Areas:
- N+1 query problems in ORM usage
- Memory usage patterns (large objects, memory leaks)
- Caching opportunities (repeated computations, API calls)
- Algorithm efficiency (O(n²) where O(n log n) possible)
- Blocking operations on main thread
- Resource usage (CPU, network, disk I/O)
Priority Criteria:
- Critical: Memory leaks, blocking main thread
- High: N+1 queries, inefficient algorithms in hot paths
- Medium: Missing caching, suboptimal data structures
- Low: Minor optimization opportunities
Focus: N+1 queries, memory leaks, caching, algorithm efficiency, blocking operations
Priority: Critical=memory leaks, High=N+1/inefficient, Medium=missing cache, Low=minor optimization
`,
maintainability: `
Focus Areas:
- Module coupling (tight dependencies between unrelated modules)
- Interface design (unclear contracts, leaky abstractions)
- Technical debt indicators (TODOs, FIXMEs, temporary solutions)
- Extensibility (hard to add new features without touching core)
- Module boundaries (unclear separation of responsibilities)
- Configuration management (hardcoded values, environment handling)
Priority Criteria:
- Critical: Changes require touching unrelated code
- High: Unclear module boundaries, significant tech debt
- Medium: Minor coupling issues, configuration problems
- Low: Refactoring opportunities, documentation gaps
Focus: Coupling, interface design, tech debt, extensibility, module boundaries, configuration
Priority: Critical=unrelated code changes, High=unclear boundaries, Medium=coupling, Low=refactoring
`,
'best-practices': `
Focus Areas:
- Framework conventions (are we using the framework idiomatically?)
- Language patterns (modern JS/TS features, async/await usage)
- Anti-patterns (god objects, callback hell, mutation of shared state)
- Deprecated API usage (using old APIs when new ones available)
- Industry standards (OWASP for security, WCAG for accessibility)
- Coding standards (consistent style, ESLint/Prettier compliance)
Priority Criteria:
- Critical: Anti-patterns causing bugs, deprecated security APIs
- High: Major convention violations, poor patterns
- Medium: Minor style issues, suboptimal patterns
- Low: Cosmetic improvements
Focus: Framework conventions, language patterns, anti-patterns, deprecated APIs, coding standards
Priority: Critical=anti-patterns causing bugs, High=convention violations, Medium=style, Low=cosmetic
`
};
return guidance[perspective] || 'General code discovery analysis';
}
```
@@ -674,7 +399,6 @@ Navigate to **Issues > Discovery** to:
- Filter findings by perspective and priority
- Preview finding details
- Select and export findings as issues
- Dismiss irrelevant findings
### Exporting to Issues

View File

@@ -24,7 +24,7 @@ interface Issue {
status: 'registered'; // Initial status
priority: number; // 1 (critical) to 5 (low)
context: string; // Problem description
source: 'github' | 'text'; // Input source type
source: 'github' | 'text' | 'discovery'; // Input source type
source_url?: string; // GitHub URL if applicable
labels?: string[]; // Categorization labels
@@ -35,6 +35,18 @@ interface Issue {
affected_components?: string[];// Files/modules affected
reproduction_steps?: string[]; // Steps to reproduce
// Discovery context (when source='discovery')
discovery_context?: {
discovery_id: string; // Source discovery session
perspective: string; // bug, test, quality, etc.
category: string; // Finding category
file: string; // Primary affected file
line: number; // Line number
snippet?: string; // Code snippet
confidence: number; // Agent confidence (0-1)
suggested_fix?: string; // Suggested remediation
};
// Closed-loop requirements (guide plan generation)
lifecycle_requirements: {
test_strategy: 'unit' | 'integration' | 'e2e' | 'manual' | 'auto';

View File

@@ -202,8 +202,9 @@ ${issueList}
### Steps
1. Fetch: \`ccw issue status <id> --json\`
2. Load project context (project-tech.json + project-guidelines.json)
3. Explore (ACE) → Plan solution (respecting guidelines)
4. Register & bind: \`ccw issue bind <id> --solution <file>\`
3. **If source=discovery**: Use discovery_context (file, line, snippet, suggested_fix) as planning hints
4. Explore (ACE) → Plan solution (respecting guidelines)
5. Register & bind: \`ccw issue bind <id> --solution <file>\`
### Generate Files
\`.workflow/issues/solutions/{issue-id}.jsonl\` - Solution with tasks (schema: cat .claude/workflows/cli-templates/schemas/solution-schema.json)

View File

@@ -38,9 +38,9 @@ Generate comprehensive, interactive software manuals in TiddlyWiki-style single-
1. **主 Agent 编排,子 Agent 执行**: 所有繁重计算委托给 `universal-executor` 子 Agent
2. **Brief Returns**: Agents return path + summary, not full content (avoid context overflow)
3. **System Agents**: 使用 `cli-explore-agent` (探索) 和 `universal-executor` (执行)
4. **Chrome MCP Integration**: Batch screenshot capture with Base64 embedding
4. **成熟库内嵌**: marked.js (MD 解析) + highlight.js (语法高亮),无 CDN 依赖
5. **Single-File HTML**: TiddlyWiki-style interactive document with embedded resources
6. **User-Friendly Writing**: Clear, step-by-step guides with difficulty levels
6. **动态标签**: 根据实际章节自动生成导航标签
## Execution Flow
@@ -54,6 +54,10 @@ Generate comprehensive, interactive software manuals in TiddlyWiki-style single-
│ → 并行探索: architecture, ui-routes, api-endpoints, config │
│ → Output: exploration-*.json │
├─────────────────────────────────────────────────────────────────┤
│ Phase 2.5: API Extraction (extract_apis.py) │
│ → 自动提取: FastAPI/TypeDoc/pdoc │
│ → Output: api-docs/{backend,frontend,modules}/*.md │
├─────────────────────────────────────────────────────────────────┤
│ Phase 3: Parallel Analysis (universal-executor × 6) │
│ → 6 个子 Agent 并行: overview, ui-guide, api-docs, config, │
│ troubleshooting, code-examples │
@@ -161,17 +165,20 @@ Bash(`mkdir "${dir}\\sections" && mkdir "${dir}\\screenshots" && mkdir "${dir}\\
| Document | Purpose |
|----------|---------|
| [phases/01-requirements-discovery.md](phases/01-requirements-discovery.md) | User config collection |
| [phases/02-project-exploration.md](phases/02-project-exploration.md) | Project type detection |
| [phases/03-parallel-analysis.md](phases/03-parallel-analysis.md) | 6 Agent orchestration |
| [phases/03.5-consolidation.md](phases/03.5-consolidation.md) | Cross-section synthesis |
| [phases/04-screenshot-capture.md](phases/04-screenshot-capture.md) | Chrome MCP integration |
| [phases/05-html-assembly.md](phases/05-html-assembly.md) | HTML generation |
| [phases/06-iterative-refinement.md](phases/06-iterative-refinement.md) | Quality iteration |
| [specs/quality-standards.md](specs/quality-standards.md) | Quality gates |
| [specs/writing-style.md](specs/writing-style.md) | User-friendly writing |
| [specs/html-template.md](specs/html-template.md) | HTML template spec |
| [templates/tiddlywiki-shell.html](templates/tiddlywiki-shell.html) | HTML template |
| [scripts/typedoc-runner.md](scripts/typedoc-runner.md) | TypeDoc execution |
| [scripts/swagger-runner.md](scripts/swagger-runner.md) | Swagger/OpenAPI |
| [scripts/screenshot-helper.md](scripts/screenshot-helper.md) | Chrome MCP guide |
| [phases/01-requirements-discovery.md](phases/01-requirements-discovery.md) | 用户配置收集 |
| [phases/02-project-exploration.md](phases/02-project-exploration.md) | 项目类型检测 |
| [phases/02.5-api-extraction.md](phases/02.5-api-extraction.md) | API 自动提取 |
| [phases/03-parallel-analysis.md](phases/03-parallel-analysis.md) | 6 Agent 并行分析 |
| [phases/03.5-consolidation.md](phases/03.5-consolidation.md) | 整合与质量检查 |
| [phases/04-screenshot-capture.md](phases/04-screenshot-capture.md) | Chrome MCP 截图 |
| [phases/05-html-assembly.md](phases/05-html-assembly.md) | HTML 组装 |
| [phases/06-iterative-refinement.md](phases/06-iterative-refinement.md) | 迭代优化 |
| [specs/quality-standards.md](specs/quality-standards.md) | 质量标准 |
| [specs/writing-style.md](specs/writing-style.md) | 写作风格 |
| [templates/tiddlywiki-shell.html](templates/tiddlywiki-shell.html) | HTML 模板 |
| [templates/css/wiki-base.css](templates/css/wiki-base.css) | 基础样式 |
| [templates/css/wiki-dark.css](templates/css/wiki-dark.css) | 暗色主题 |
| [scripts/bundle-libraries.md](scripts/bundle-libraries.md) | 库文件打包 |
| [scripts/api-extractor.md](scripts/api-extractor.md) | API 提取说明 |
| [scripts/extract_apis.py](scripts/extract_apis.py) | API 提取脚本 |
| [scripts/screenshot-helper.md](scripts/screenshot-helper.md) | 截图辅助 |

View File

@@ -0,0 +1,161 @@
# Phase 2.5: API Extraction
在项目探索后、并行分析前,自动提取 API 文档。
## 核心原则
**使用成熟工具提取,确保输出格式与 wiki 模板兼容。**
## 执行流程
```javascript
const config = JSON.parse(Read(`${workDir}/manual-config.json`));
// 检查项目路径配置
const apiSources = config.api_sources || detectApiSources(config.project_path);
// 执行 API 提取
Bash({
command: `python .claude/skills/software-manual/scripts/extract_apis.py -o "${workDir}" -p ${apiSources.join(' ')}`
});
// 验证输出
const apiDocsDir = `${workDir}/api-docs`;
const extractedFiles = Glob(`${apiDocsDir}/**/*.{json,md}`);
console.log(`Extracted ${extractedFiles.length} API documentation files`);
```
## 支持的项目类型
| 类型 | 检测方式 | 提取工具 | 输出格式 |
|------|----------|----------|----------|
| FastAPI | `app/main.py` + FastAPI import | OpenAPI JSON | `openapi.json` + `API_SUMMARY.md` |
| Next.js | `package.json` + next | TypeDoc | `*.md` (Markdown) |
| Python Module | `__init__.py` + setup.py/pyproject.toml | pdoc | `*.md` (Markdown) |
| Express | `package.json` + express | swagger-jsdoc | `openapi.json` |
| NestJS | `package.json` + @nestjs | @nestjs/swagger | `openapi.json` |
## 输出格式规范
### Markdown 兼容性要求
确保输出 Markdown 与 wiki CSS 样式兼容:
```markdown
# API Reference → <h1> (wiki-base.css)
## Endpoints → <h2>
| Method | Path | Summary | → <table> 蓝色表头
|--------|------|---------|
| `GET` | `/api/...` | ... | → <code> 红色高亮
### GET /api/users → <h3>
\`\`\`json → <pre><code> 深色背景
{
"id": 1,
"name": "example"
}
\`\`\`
- Parameter: `id` (required) → <ul><li> + <code>
```
### 格式验证检查
```javascript
function validateApiDocsFormat(apiDocsDir) {
const issues = [];
const mdFiles = Glob(`${apiDocsDir}/**/*.md`);
for (const file of mdFiles) {
const content = Read(file);
// 检查表格格式
if (content.includes('|') && !content.match(/\|.*\|.*\|/)) {
issues.push(`${file}: 表格格式不完整`);
}
// 检查代码块语言标注
const codeBlocks = content.match(/```(\w*)\n/g) || [];
const unlabeled = codeBlocks.filter(b => b === '```\n');
if (unlabeled.length > 0) {
issues.push(`${file}: ${unlabeled.length} 个代码块缺少语言标注`);
}
// 检查标题层级
if (!content.match(/^# /m)) {
issues.push(`${file}: 缺少一级标题`);
}
}
return issues;
}
```
## 项目配置示例
`manual-config.json` 中配置 API 源:
```json
{
"software": {
"name": "Hydro Generator Workbench",
"type": "web"
},
"api_sources": {
"backend": {
"path": "D:/dongdiankaifa9/backend",
"type": "fastapi",
"entry": "app.main:app"
},
"frontend": {
"path": "D:/dongdiankaifa9/frontend",
"type": "typescript",
"entries": ["lib", "hooks", "components"]
},
"hydro_generator_module": {
"path": "D:/dongdiankaifa9/hydro_generator_module",
"type": "python"
},
"multiphysics_network": {
"path": "D:/dongdiankaifa9/multiphysics_network",
"type": "python"
}
}
}
```
## 输出结构
```
{workDir}/api-docs/
├── backend/
│ ├── openapi.json # OpenAPI 3.0 规范
│ └── API_SUMMARY.md # Markdown 摘要wiki 兼容)
├── frontend/
│ ├── modules.md # TypeDoc 模块文档
│ ├── classes/ # 类文档
│ └── functions/ # 函数文档
├── hydro_generator/
│ ├── assembler.md # pdoc 模块文档
│ ├── blueprint.md
│ └── builders/
└── multiphysics/
├── analysis_domain.md
├── builders.md
└── compilers.md
```
## 质量门禁
- [ ] 所有配置的 API 源已提取
- [ ] Markdown 格式与 wiki CSS 兼容
- [ ] 表格正确渲染(蓝色表头)
- [ ] 代码块有语言标注
- [ ] 无空文件或错误文件
## 下一阶段
→ [Phase 3: Parallel Analysis](03-parallel-analysis.md)

View File

@@ -11,51 +11,66 @@ const AGENT_CONFIGS = {
output: 'section-overview.md',
task: '撰写产品概览、核心功能、快速入门指南',
focus: '产品定位、目标用户、5步快速入门、系统要求',
input: ['exploration-architecture.json', 'README.md', 'package.json']
input: ['exploration-architecture.json', 'README.md', 'package.json'],
tag: 'getting-started'
},
'ui-guide': {
role: 'UX Expert',
output: 'section-ui-guide.md',
task: '撰写界面操作指南,标注所有需要截图的 UI 元素',
focus: '界面布局、导航流程、功能操作、快捷键',
input: ['exploration-ui-routes.json', 'pages/**', 'views/**'],
'interface-guide': {
role: 'Product Designer',
output: 'section-interface.md',
task: '撰写界面或交互指南Web 截图、CLI 命令交互、桌面应用操作)',
focus: '视觉布局、交互流程、命令行参数、输入/输出示例',
input: ['exploration-ui-routes.json', 'src/**', 'pages/**', 'views/**', 'components/**', 'src/commands/**'],
tag: 'interface',
screenshot_rules: `
每个关键 UI 交互点必须插入截图标记:
<!-- SCREENSHOT: id="ss-{功能}-{状态}" url="{路由}" selector="{CSS选择器}" wait_for="{等待元素}" description="{描述}" -->
根据项目类型标注交互点:
示例:
- 页面全貌: <!-- SCREENSHOT: id="ss-dashboard-overview" url="/dashboard" description="仪表盘主界面" -->
- 特定组件: <!-- SCREENSHOT: id="ss-login-form" url="/login" selector=".login-form" description="登录表单" -->
- 交互状态: <!-- SCREENSHOT: id="ss-modal-open" url="/settings" selector=".modal" wait_for=".modal.show" description="设置弹窗" -->
[Web] <!-- SCREENSHOT: id="ss-{功能}" url="{路由}" selector="{CSS选择器}" description="{描述}" -->
[CLI] 使用代码块展示命令交互:
\`\`\`bash
$ command --flag value
Expected output here
\`\`\`
[Desktop] <!-- SCREENSHOT: id="ss-{功能}" description="{描述}" -->
`
},
'api-docs': {
role: 'API Architect',
output: 'section-api-reference.md',
task: '撰写 REST API 和前端 API 参考文档',
focus: 'API 概览、端点分类、请求/响应示例、错误码',
input: ['exploration-api-endpoints.json', 'controllers/**', 'routes/**']
'api-reference': {
role: 'Technical Architect',
output: 'section-reference.md',
task: '撰写接口参考文档(REST API / 函数库 / CLI 命令)',
focus: '函数签名、端点定义、参数说明、返回值、错误码',
pre_extract: 'python .claude/skills/software-manual/scripts/extract_apis.py -o ${workDir}',
input: [
'${workDir}/api-docs/backend/openapi.json', // FastAPI OpenAPI
'${workDir}/api-docs/backend/API_SUMMARY.md', // Backend summary
'${workDir}/api-docs/frontend/**/*.md', // TypeDoc output
'${workDir}/api-docs/hydro_generator/**/*.md', // Python module
'${workDir}/api-docs/multiphysics/**/*.md' // Python module
],
tag: 'api'
},
config: {
role: 'DevOps Engineer',
output: 'section-configuration.md',
task: '撰写配置指南,涵盖环境变量、配置文件、部署设置',
focus: '环境变量表格、配置文件格式、部署选项、安全设置',
input: ['exploration-config.json', '.env.example', 'config/**']
input: ['exploration-config.json', '.env.example', 'config/**', '*.config.*'],
tag: 'config'
},
troubleshooting: {
role: 'Support Engineer',
output: 'section-troubleshooting.md',
task: '撰写故障排查指南涵盖常见问题、错误码、FAQ',
focus: '常见问题与解决方案、错误码参考、FAQ、获取帮助',
input: ['all exploration files', 'error handling code']
input: ['docs/troubleshooting.md', 'src/**/errors.*', 'src/**/exceptions.*', 'TROUBLESHOOTING.md'],
tag: 'troubleshooting'
},
'code-examples': {
role: 'Developer Advocate',
output: 'section-examples.md',
task: '撰写多难度级别代码示例入门40%/进阶40%/高级20%',
focus: '完整可运行代码、分步解释、预期输出、最佳实践',
input: ['all exploration files', 'examples/**', 'tests/**']
input: ['examples/**', 'tests/**', 'demo/**', 'samples/**'],
tag: 'examples'
}
};
```
@@ -65,7 +80,16 @@ const AGENT_CONFIGS = {
```javascript
const config = JSON.parse(Read(`${workDir}/manual-config.json`));
// 并行启动 6 个 universal-executor
// 1. 预提取 API 文档(如有 pre_extract 配置)
for (const [name, cfg] of Object.entries(AGENT_CONFIGS)) {
if (cfg.pre_extract) {
const cmd = cfg.pre_extract.replace(/\$\{workDir\}/g, workDir);
console.log(`[Pre-extract] ${name}: ${cmd}`);
Bash({ command: cmd });
}
}
// 2. 并行启动 6 个 universal-executor
const tasks = Object.entries(AGENT_CONFIGS).map(([name, cfg]) =>
Task({
subagent_type: 'universal-executor',
@@ -83,35 +107,51 @@ const results = await Promise.all(tasks);
function buildAgentPrompt(name, cfg, config, workDir) {
const screenshotSection = cfg.screenshot_rules
? `\n[SCREENSHOT RULES]\n${cfg.screenshot_rules}`
: '\n[SCREENSHOT]\n截图标记: <!-- SCREENSHOT: id="ss-xxx" url="/path" description="xxx" -->';
: '';
return `
[ROLE] ${cfg.role}
[PROJECT CONTEXT]
项目类型: ${config.software.type} (web/cli/sdk/desktop)
语言: ${config.software.language || 'auto-detect'}
名称: ${config.software.name}
[TASK]
${cfg.task}
输出: ${workDir}/sections/${cfg.output}
[INPUT]
- Read: ${workDir}/manual-config.json
- Read: ${cfg.input.map(f => `${workDir}/exploration/${f}`).join(', ')}
- 配置: ${workDir}/manual-config.json
- 探索结果: ${workDir}/exploration/
- 扫描路径: ${cfg.input.join(', ')}
[STYLE]
- 用户友好语言,避免技术术语
- 步骤编号清晰
- 代码块标注语言
[CONTENT REQUIREMENTS]
- 标题层级: # ## ### (最多3级)
- 代码块: \`\`\`language ... \`\`\` (必须标注语言)
- 表格: | col1 | col2 | 格式
- 列表: 有序 1. 2. 3. / 无序 - - -
- 内联代码: \`code\`
- 链接: [text](url)
${screenshotSection}
[FOCUS]
${cfg.focus}
[OUTPUT FORMAT]
Markdown 文件,包含:
- 清晰的章节结构
- 具体的代码示例
- 参数/配置表格
- 常见用例说明
[RETURN JSON]
{
"status": "completed",
"output_file": "sections/${cfg.output}",
"summary": "<50字>",
"screenshots_needed": [{ "id": "ss-xxx", "url": "/path", "selector": ".class", "description": "..." }],
"cross_references": []
"tag": "${cfg.tag}",
"screenshots_needed": []
}
`;
}

View File

@@ -29,29 +29,40 @@ function buildAssemblyPrompt(config, workDir) {
[ROLE] HTML Assembler
[TASK]
生成 TiddlyWiki 风格的交互式 HTML 手册
生成 TiddlyWiki 风格的交互式 HTML 手册(使用成熟库,无外部 CDN 依赖)
[INPUT]
- 模板: .claude/skills/software-manual/templates/tiddlywiki-shell.html
- CSS: .claude/skills/software-manual/templates/css/wiki-base.css, wiki-dark.css
- 配置: ${workDir}/manual-config.json
- 章节: ${workDir}/sections/section-*.md
- Agent 结果: ${workDir}/agent-results.json (含 tag 信息)
- 截图: ${workDir}/screenshots/
[LIBRARIES TO EMBED]
1. marked.js (v14+) - Markdown 转 HTML
- 从 https://unpkg.com/marked/marked.min.js 获取内容内嵌
2. highlight.js (v11+) - 代码语法高亮
- 核心 + 常用语言包 (js, ts, python, bash, json, yaml, html, css)
- 使用 github-dark 主题
[STEPS]
1. 读取 HTML 模板和 CSS
2. 逐个读取 section-*.md转换为 HTML tiddlers
3. 处理 <!-- SCREENSHOT: id="..." --> 标记,嵌入 Base64 图片
4. 生成目录、搜索索引
5. 组装最终 HTML写入 ${workDir}/${config.software.name}-使用手册.html
6. 生成构建报告 ${workDir}/build-report.json
2. 内嵌 marked.js 和 highlight.js 代码
3. 读取 agent-results.json 提取各章节 tag
4. 动态生成 {{TAG_BUTTONS_HTML}} (基于实际使用的 tags)
5. 逐个读取 section-*.md使用 marked 转换为 HTML
6. 为代码块添加 data-language 属性和语法高亮
7. 处理 <!-- SCREENSHOT: id="..." --> 标记,嵌入 Base64 图片
8. 生成目录、搜索索引
9. 组装最终 HTML写入 ${workDir}/${config.software.name}-使用手册.html
[HTML FEATURES]
- 搜索: 全文检索 + 高亮
- 折叠: 章节可展开/收起
- 标签: 分类过滤
- 主题: 亮/暗模式切换
- 离线: 所有资源内嵌
[CONTENT FORMATTING]
- 代码块: 深色背景 + 语言标签 + 语法高亮
- 表格: 蓝色表头 + 边框 + 悬停效果
- 内联代码: 红色高亮
- 列表: 有序/无序样式增强
- 左侧导航: 固定侧边栏 + TOC
[RETURN JSON]
{
@@ -59,6 +70,7 @@ function buildAssemblyPrompt(config, workDir) {
"output_file": "${config.software.name}-使用手册.html",
"file_size": "<size>",
"sections_count": <n>,
"tags_generated": [],
"screenshots_embedded": <n>
}
`;

View File

@@ -0,0 +1,245 @@
# API 文档提取脚本
根据项目类型自动提取 API 文档,支持 FastAPI、Next.js、Python 模块。
## 支持的技术栈
| 类型 | 技术栈 | 工具 | 输出格式 |
|------|--------|------|----------|
| Backend | FastAPI | openapi-to-md | Markdown |
| Frontend | Next.js/TypeScript | TypeDoc | Markdown |
| Python Module | Python | pdoc | Markdown/HTML |
## 使用方法
### 1. FastAPI Backend (OpenAPI)
```bash
# 提取 OpenAPI JSON
cd D:/dongdiankaifa9/backend
python -c "
from app.main import app
import json
print(json.dumps(app.openapi(), indent=2))
" > api-docs/openapi.json
# 转换为 Markdown (使用 widdershins)
npx widdershins api-docs/openapi.json -o api-docs/API_REFERENCE.md --language_tabs 'python:Python' 'javascript:JavaScript' 'bash:cURL'
```
**备选方案 (无需启动服务)**:
```python
# scripts/extract_fastapi_openapi.py
import sys
sys.path.insert(0, 'D:/dongdiankaifa9/backend')
from app.main import app
import json
openapi_schema = app.openapi()
with open('api-docs/openapi.json', 'w', encoding='utf-8') as f:
json.dump(openapi_schema, f, indent=2, ensure_ascii=False)
print(f"Extracted {len(openapi_schema.get('paths', {}))} endpoints")
```
### 2. Next.js Frontend (TypeDoc)
```bash
cd D:/dongdiankaifa9/frontend
# 安装 TypeDoc
npm install --save-dev typedoc typedoc-plugin-markdown
# 生成文档
npx typedoc --plugin typedoc-plugin-markdown \
--out api-docs \
--entryPoints "./lib" "./hooks" "./components" \
--entryPointStrategy expand \
--exclude "**/node_modules/**" \
--exclude "**/*.test.*" \
--readme none
```
**typedoc.json 配置**:
```json
{
"$schema": "https://typedoc.org/schema.json",
"entryPoints": ["./lib", "./hooks", "./components"],
"entryPointStrategy": "expand",
"out": "api-docs",
"plugin": ["typedoc-plugin-markdown"],
"exclude": ["**/node_modules/**", "**/*.test.*", "**/*.spec.*"],
"excludePrivate": true,
"excludeInternal": true,
"readme": "none",
"name": "Frontend API Reference"
}
```
### 3. Python Module (pdoc)
```bash
# 安装 pdoc
pip install pdoc
# hydro_generator_module
cd D:/dongdiankaifa9
pdoc hydro_generator_module \
--output-dir api-docs/hydro_generator \
--format markdown \
--no-show-source
# multiphysics_network
pdoc multiphysics_network \
--output-dir api-docs/multiphysics \
--format markdown \
--no-show-source
```
**备选: Sphinx (更强大)**:
```bash
# 安装 Sphinx
pip install sphinx sphinx-markdown-builder
# 生成 API 文档
sphinx-apidoc -o docs/source hydro_generator_module
cd docs && make markdown
```
## 集成脚本
```python
#!/usr/bin/env python3
# scripts/extract_all_apis.py
import subprocess
import sys
from pathlib import Path
PROJECTS = {
'backend': {
'path': 'D:/dongdiankaifa9/backend',
'type': 'fastapi',
'output': 'api-docs/backend'
},
'frontend': {
'path': 'D:/dongdiankaifa9/frontend',
'type': 'typescript',
'output': 'api-docs/frontend'
},
'hydro_generator_module': {
'path': 'D:/dongdiankaifa9/hydro_generator_module',
'type': 'python',
'output': 'api-docs/hydro_generator'
},
'multiphysics_network': {
'path': 'D:/dongdiankaifa9/multiphysics_network',
'type': 'python',
'output': 'api-docs/multiphysics'
}
}
def extract_fastapi(config):
"""提取 FastAPI OpenAPI 文档"""
path = Path(config['path'])
sys.path.insert(0, str(path))
try:
from app.main import app
import json
output_dir = Path(config['output'])
output_dir.mkdir(parents=True, exist_ok=True)
# 导出 OpenAPI JSON
with open(output_dir / 'openapi.json', 'w', encoding='utf-8') as f:
json.dump(app.openapi(), f, indent=2, ensure_ascii=False)
print(f"✓ FastAPI: {len(app.openapi().get('paths', {}))} endpoints")
return True
except Exception as e:
print(f"✗ FastAPI error: {e}")
return False
def extract_typescript(config):
"""提取 TypeScript 文档"""
try:
subprocess.run([
'npx', 'typedoc',
'--plugin', 'typedoc-plugin-markdown',
'--out', config['output'],
'--entryPoints', './lib', './hooks',
'--entryPointStrategy', 'expand'
], cwd=config['path'], check=True)
print(f"✓ TypeDoc: {config['path']}")
return True
except Exception as e:
print(f"✗ TypeDoc error: {e}")
return False
def extract_python(config):
"""提取 Python 模块文档"""
try:
module_name = Path(config['path']).name
subprocess.run([
'pdoc', module_name,
'--output-dir', config['output'],
'--format', 'markdown'
], cwd=Path(config['path']).parent, check=True)
print(f"✓ pdoc: {module_name}")
return True
except Exception as e:
print(f"✗ pdoc error: {e}")
return False
EXTRACTORS = {
'fastapi': extract_fastapi,
'typescript': extract_typescript,
'python': extract_python
}
if __name__ == '__main__':
for name, config in PROJECTS.items():
print(f"\n[{name}]")
extractor = EXTRACTORS.get(config['type'])
if extractor:
extractor(config)
```
## Phase 3 集成
`api-reference` Agent 提示词中添加:
```
[PRE-EXTRACTION]
运行 API 提取脚本获取结构化文档:
- python scripts/extract_all_apis.py
[INPUT FILES]
- api-docs/backend/openapi.json (FastAPI endpoints)
- api-docs/frontend/*.md (TypeDoc output)
- api-docs/hydro_generator/*.md (pdoc output)
- api-docs/multiphysics/*.md (pdoc output)
```
## 输出结构
```
api-docs/
├── backend/
│ ├── openapi.json # Raw OpenAPI spec
│ └── API_REFERENCE.md # Converted Markdown
├── frontend/
│ ├── modules.md
│ ├── functions.md
│ └── classes/
├── hydro_generator/
│ ├── assembler.md
│ ├── blueprint.md
│ └── builders/
└── multiphysics/
├── analysis_domain.md
├── builders.md
└── compilers.md
```

View File

@@ -0,0 +1,85 @@
# 库文件打包说明
## 依赖库
HTML 组装阶段需要内嵌以下成熟库(无 CDN 依赖):
### 1. marked.js - Markdown 解析
```bash
# 获取最新版本
curl -o templates/libs/marked.min.js https://unpkg.com/marked/marked.min.js
```
### 2. highlight.js - 代码语法高亮
```bash
# 获取核心 + 常用语言包
curl -o templates/libs/highlight.min.js https://unpkg.com/@highlightjs/cdn-assets/highlight.min.js
# 获取 github-dark 主题
curl -o templates/libs/github-dark.min.css https://unpkg.com/@highlightjs/cdn-assets/styles/github-dark.min.css
```
## 内嵌方式
Phase 5 Agent 应:
1. 读取 `templates/libs/*.js``*.css`
2. 将内容嵌入 HTML 的 `<script>``<style>` 标签
3.`DOMContentLoaded` 后初始化:
```javascript
// 初始化 marked
marked.setOptions({
highlight: function(code, lang) {
if (lang && hljs.getLanguage(lang)) {
return hljs.highlight(code, { language: lang }).value;
}
return hljs.highlightAuto(code).value;
},
breaks: true,
gfm: true
});
// 应用高亮
document.querySelectorAll('pre code').forEach(block => {
hljs.highlightElement(block);
});
```
## 备选方案
如果无法获取外部库,使用内置的简化 Markdown 转换:
```javascript
function simpleMarkdown(md) {
return md
.replace(/^### (.+)$/gm, '<h3>$1</h3>')
.replace(/^## (.+)$/gm, '<h2>$1</h2>')
.replace(/^# (.+)$/gm, '<h1>$1</h1>')
.replace(/```(\w+)?\n([\s\S]*?)```/g, (m, lang, code) =>
`<pre data-language="${lang || ''}"><code class="language-${lang || ''}">${escapeHtml(code)}</code></pre>`)
.replace(/`([^`]+)`/g, '<code>$1</code>')
.replace(/\*\*(.+?)\*\*/g, '<strong>$1</strong>')
.replace(/\*(.+?)\*/g, '<em>$1</em>')
.replace(/\[([^\]]+)\]\(([^)]+)\)/g, '<a href="$2">$1</a>')
.replace(/^\|(.+)\|$/gm, processTableRow)
.replace(/^- (.+)$/gm, '<li>$1</li>')
.replace(/^\d+\. (.+)$/gm, '<li>$1</li>');
}
```
## 文件结构
```
templates/
├── libs/
│ ├── marked.min.js # Markdown parser
│ ├── highlight.min.js # Syntax highlighting
│ └── github-dark.min.css # Code theme
├── tiddlywiki-shell.html
└── css/
├── wiki-base.css
└── wiki-dark.css
```

View File

@@ -0,0 +1,270 @@
#!/usr/bin/env python3
"""
API 文档提取脚本
支持 FastAPI、TypeScript、Python 模块
"""
import subprocess
import sys
import json
from pathlib import Path
from typing import Dict, Any, Optional
# 项目配置
PROJECTS = {
'backend': {
'path': Path('D:/dongdiankaifa9/backend'),
'type': 'fastapi',
'entry': 'app.main:app',
'output': 'api-docs/backend'
},
'frontend': {
'path': Path('D:/dongdiankaifa9/frontend'),
'type': 'typescript',
'entries': ['lib', 'hooks', 'components'],
'output': 'api-docs/frontend'
},
'hydro_generator_module': {
'path': Path('D:/dongdiankaifa9/hydro_generator_module'),
'type': 'python',
'output': 'api-docs/hydro_generator'
},
'multiphysics_network': {
'path': Path('D:/dongdiankaifa9/multiphysics_network'),
'type': 'python',
'output': 'api-docs/multiphysics'
}
}
def extract_fastapi(name: str, config: Dict[str, Any], output_base: Path) -> bool:
"""提取 FastAPI OpenAPI 文档"""
path = config['path']
output_dir = output_base / config['output']
output_dir.mkdir(parents=True, exist_ok=True)
# 添加路径到 sys.path
if str(path) not in sys.path:
sys.path.insert(0, str(path))
try:
# 动态导入 app
from app.main import app
# 获取 OpenAPI schema
openapi_schema = app.openapi()
# 保存 JSON
json_path = output_dir / 'openapi.json'
with open(json_path, 'w', encoding='utf-8') as f:
json.dump(openapi_schema, f, indent=2, ensure_ascii=False)
# 生成 Markdown 摘要
md_path = output_dir / 'API_SUMMARY.md'
generate_api_markdown(openapi_schema, md_path)
endpoints = len(openapi_schema.get('paths', {}))
print(f" ✓ Extracted {endpoints} endpoints → {output_dir}")
return True
except ImportError as e:
print(f" ✗ Import error: {e}")
return False
except Exception as e:
print(f" ✗ Error: {e}")
return False
def generate_api_markdown(schema: Dict, output_path: Path):
"""从 OpenAPI schema 生成 Markdown"""
lines = [
f"# {schema.get('info', {}).get('title', 'API Reference')}",
"",
f"Version: {schema.get('info', {}).get('version', '1.0.0')}",
"",
"## Endpoints",
"",
"| Method | Path | Summary |",
"|--------|------|---------|"
]
for path, methods in schema.get('paths', {}).items():
for method, details in methods.items():
if method in ('get', 'post', 'put', 'delete', 'patch'):
summary = details.get('summary', details.get('operationId', '-'))
lines.append(f"| `{method.upper()}` | `{path}` | {summary} |")
lines.extend([
"",
"## Schemas",
""
])
for name, schema_def in schema.get('components', {}).get('schemas', {}).items():
lines.append(f"### {name}")
lines.append("")
if 'properties' in schema_def:
lines.append("| Property | Type | Required |")
lines.append("|----------|------|----------|")
required = schema_def.get('required', [])
for prop, prop_def in schema_def['properties'].items():
prop_type = prop_def.get('type', prop_def.get('$ref', 'any'))
is_required = '' if prop in required else ''
lines.append(f"| `{prop}` | {prop_type} | {is_required} |")
lines.append("")
with open(output_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
def extract_typescript(name: str, config: Dict[str, Any], output_base: Path) -> bool:
"""提取 TypeScript 文档 (TypeDoc)"""
path = config['path']
output_dir = output_base / config['output']
# 检查 TypeDoc 是否已安装
try:
result = subprocess.run(
['npx', 'typedoc', '--version'],
cwd=path,
capture_output=True,
text=True
)
if result.returncode != 0:
print(f" ⚠ TypeDoc not installed, installing...")
subprocess.run(
['npm', 'install', '--save-dev', 'typedoc', 'typedoc-plugin-markdown'],
cwd=path,
check=True
)
except FileNotFoundError:
print(f" ✗ npm/npx not found")
return False
# 运行 TypeDoc
try:
entries = config.get('entries', ['lib'])
cmd = [
'npx', 'typedoc',
'--plugin', 'typedoc-plugin-markdown',
'--out', str(output_dir),
'--entryPointStrategy', 'expand',
'--exclude', '**/node_modules/**',
'--exclude', '**/*.test.*',
'--readme', 'none'
]
for entry in entries:
entry_path = path / entry
if entry_path.exists():
cmd.extend(['--entryPoints', str(entry_path)])
result = subprocess.run(cmd, cwd=path, capture_output=True, text=True)
if result.returncode == 0:
print(f" ✓ TypeDoc generated → {output_dir}")
return True
else:
print(f" ✗ TypeDoc error: {result.stderr[:200]}")
return False
except Exception as e:
print(f" ✗ Error: {e}")
return False
def extract_python_module(name: str, config: Dict[str, Any], output_base: Path) -> bool:
"""提取 Python 模块文档 (pdoc)"""
path = config['path']
output_dir = output_base / config['output']
module_name = path.name
# 检查 pdoc
try:
subprocess.run(['pdoc', '--version'], capture_output=True, check=True)
except (FileNotFoundError, subprocess.CalledProcessError):
print(f" ⚠ pdoc not installed, installing...")
subprocess.run([sys.executable, '-m', 'pip', 'install', 'pdoc'], check=True)
# 运行 pdoc
try:
result = subprocess.run(
[
'pdoc', module_name,
'--output-dir', str(output_dir),
'--format', 'markdown'
],
cwd=path.parent,
capture_output=True,
text=True
)
if result.returncode == 0:
# 统计生成的文件
md_files = list(output_dir.glob('**/*.md'))
print(f" ✓ pdoc generated {len(md_files)} files → {output_dir}")
return True
else:
print(f" ✗ pdoc error: {result.stderr[:200]}")
return False
except Exception as e:
print(f" ✗ Error: {e}")
return False
EXTRACTORS = {
'fastapi': extract_fastapi,
'typescript': extract_typescript,
'python': extract_python_module
}
def main(output_base: Optional[str] = None, projects: Optional[list] = None):
"""主入口"""
base = Path(output_base) if output_base else Path.cwd()
print("=" * 50)
print("API Documentation Extraction")
print("=" * 50)
results = {}
for name, config in PROJECTS.items():
if projects and name not in projects:
continue
print(f"\n[{name}] ({config['type']})")
if not config['path'].exists():
print(f" ✗ Path not found: {config['path']}")
results[name] = False
continue
extractor = EXTRACTORS.get(config['type'])
if extractor:
results[name] = extractor(name, config, base)
else:
print(f" ✗ Unknown type: {config['type']}")
results[name] = False
# 汇总
print("\n" + "=" * 50)
print("Summary")
print("=" * 50)
success = sum(1 for v in results.values() if v)
print(f"Success: {success}/{len(results)}")
return all(results.values())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract API documentation')
parser.add_argument('--output', '-o', default='.', help='Output base directory')
parser.add_argument('--projects', '-p', nargs='+', help='Specific projects to extract')
args = parser.parse_args()
success = main(args.output, args.projects)
sys.exit(0 if success else 1)

View File

@@ -449,14 +449,65 @@ body {
margin-bottom: var(--spacing-md);
}
/* Lists - Enhanced Styling */
.tiddler-content ul,
.tiddler-content ol {
margin-bottom: var(--spacing-md);
padding-left: var(--spacing-lg);
margin: var(--spacing-md) 0;
padding-left: var(--spacing-xl);
}
.tiddler-content li {
margin-bottom: var(--spacing-xs);
.tiddler-content ul {
list-style: none;
}
.tiddler-content ul > li {
position: relative;
margin-bottom: var(--spacing-sm);
padding-left: 8px;
}
.tiddler-content ul > li::before {
content: "•";
position: absolute;
left: -16px;
color: var(--accent-color);
font-weight: bold;
}
.tiddler-content ol {
list-style: none;
counter-reset: item;
}
.tiddler-content ol > li {
position: relative;
margin-bottom: var(--spacing-sm);
padding-left: 8px;
counter-increment: item;
}
.tiddler-content ol > li::before {
content: counter(item) ".";
position: absolute;
left: -24px;
color: var(--accent-color);
font-weight: 600;
}
/* Nested lists */
.tiddler-content ul ul,
.tiddler-content ol ol,
.tiddler-content ul ol,
.tiddler-content ol ul {
margin: var(--spacing-xs) 0;
}
.tiddler-content ul ul > li::before {
content: "◦";
}
.tiddler-content ul ul ul > li::before {
content: "▪";
}
.tiddler-content a {
@@ -468,70 +519,112 @@ body {
text-decoration: underline;
}
/* Code */
/* Inline Code - Red Highlight */
.tiddler-content code {
font-family: var(--font-family-mono);
font-size: 0.9em;
font-size: 0.875em;
padding: 2px 6px;
background-color: var(--bg-tertiary);
background-color: #fff5f5;
color: #c92a2a;
border-radius: 4px;
border: 1px solid #ffc9c9;
}
/* Code Blocks - Dark Background */
.tiddler-content pre {
position: relative;
margin-bottom: var(--spacing-md);
padding: var(--spacing-md);
background-color: #1e1e1e;
margin: var(--spacing-md) 0;
padding: 0;
background-color: #1e2128;
border-radius: 8px;
overflow-x: auto;
overflow: hidden;
border: 1px solid #3d4450;
}
.tiddler-content pre::before {
content: attr(data-language);
display: block;
padding: 8px 16px;
background-color: #2d333b;
color: #8b949e;
font-size: 0.75rem;
font-family: var(--font-family);
text-transform: uppercase;
letter-spacing: 0.05em;
border-bottom: 1px solid #3d4450;
}
.tiddler-content pre code {
padding: 0;
display: block;
padding: var(--spacing-md);
background: none;
color: #d4d4d4;
color: #e6edf3;
font-size: var(--font-size-sm);
line-height: 1.6;
overflow-x: auto;
border: none;
}
.copy-code-btn {
position: absolute;
top: var(--spacing-sm);
right: var(--spacing-sm);
padding: var(--spacing-xs) var(--spacing-sm);
font-size: 0.75rem;
background-color: var(--bg-tertiary);
border: none;
top: 6px;
right: 12px;
padding: 4px 10px;
font-size: 0.7rem;
background-color: #3d4450;
color: #8b949e;
border: 1px solid #4d5566;
border-radius: 4px;
cursor: pointer;
opacity: 0;
transition: opacity var(--transition-fast);
transition: all var(--transition-fast);
}
.copy-code-btn:hover {
background-color: #4d5566;
color: #e6edf3;
}
.tiddler-content pre:hover .copy-code-btn {
opacity: 1;
}
/* Tables */
/* Tables - Blue Header Style */
.tiddler-content table {
width: 100%;
margin-bottom: var(--spacing-md);
margin: var(--spacing-md) 0;
border-collapse: collapse;
}
.tiddler-content th,
.tiddler-content td {
padding: var(--spacing-sm) var(--spacing-md);
border: 1px solid var(--border-color);
text-align: left;
border: 1px solid #dee2e6;
border-radius: 8px;
overflow: hidden;
}
.tiddler-content th {
background-color: var(--bg-secondary);
padding: 12px 16px;
background: linear-gradient(135deg, #1971c2, #228be6);
color: white;
font-weight: 600;
text-align: left;
border: none;
border-bottom: 2px solid #1864ab;
}
.tiddler-content tr:nth-child(even) {
background-color: var(--bg-secondary);
.tiddler-content td {
padding: 10px 16px;
border: 1px solid #e9ecef;
text-align: left;
}
.tiddler-content tbody tr:nth-child(odd) {
background-color: #f8f9fa;
}
.tiddler-content tbody tr:nth-child(even) {
background-color: #ffffff;
}
.tiddler-content tbody tr:hover {
background-color: #e7f5ff;
}
/* Screenshots */

View File

@@ -26,15 +26,10 @@
<div id="searchResults" class="search-results" aria-live="polite"></div>
</div>
<!-- Tag Navigation -->
<!-- Tag Navigation (Dynamic) -->
<nav class="wiki-tags" aria-label="Filter by category">
<button class="tag active" data-tag="all">All</button>
<button class="tag" data-tag="getting-started">Getting Started</button>
<button class="tag" data-tag="ui-guide">UI Guide</button>
<button class="tag" data-tag="api">API</button>
<button class="tag" data-tag="config">Configuration</button>
<button class="tag" data-tag="troubleshooting">Troubleshooting</button>
<button class="tag" data-tag="examples">Examples</button>
<button class="tag active" data-tag="all">全部</button>
{{TAG_BUTTONS_HTML}}
</nav>
<!-- Table of Contents -->

View File

@@ -1,10 +1,10 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "discovery-state-schema",
"title": "Discovery State Schema",
"description": "Schema for issue discovery session state machine",
"title": "Discovery State Schema (Merged)",
"description": "Unified schema for issue discovery session (state + progress merged)",
"type": "object",
"required": ["discovery_id", "target_pattern", "metadata", "phase"],
"required": ["discovery_id", "target_pattern", "phase", "created_at"],
"properties": {
"discovery_id": {
"type": "string",
@@ -15,94 +15,111 @@
"target_pattern": {
"type": "string",
"description": "File/directory pattern being analyzed",
"examples": ["src/auth/**", "src/payment/**,src/api/**"]
},
"metadata": {
"type": "object",
"required": ["created_at", "resolved_files", "perspectives"],
"properties": {
"created_at": {
"type": "string",
"format": "date-time",
"description": "ISO 8601 timestamp of discovery creation"
},
"resolved_files": {
"type": "array",
"items": { "type": "string" },
"description": "List of resolved file paths from pattern"
},
"perspectives": {
"type": "array",
"items": {
"type": "string",
"enum": ["bug", "ux", "test", "quality", "security", "performance", "maintainability", "best-practices"]
},
"description": "Selected discovery perspectives"
},
"external_research_enabled": {
"type": "boolean",
"default": false,
"description": "Whether Exa research is enabled"
}
}
"examples": ["src/auth/**", "codex-lens/**/*.py"]
},
"phase": {
"type": "string",
"enum": ["initialization", "parallel", "external", "aggregation", "complete"],
"enum": ["initialization", "parallel", "aggregation", "complete"],
"description": "Current execution phase"
},
"perspectives_completed": {
"created_at": {
"type": "string",
"format": "date-time"
},
"updated_at": {
"type": "string",
"format": "date-time"
},
"target": {
"type": "object",
"description": "Target module information",
"properties": {
"files_count": {
"type": "object",
"properties": {
"source": { "type": "integer" },
"tests": { "type": "integer" },
"total": { "type": "integer" }
}
},
"project": {
"type": "object",
"properties": {
"name": { "type": "string" },
"version": { "type": "string" }
}
}
}
},
"perspectives": {
"type": "array",
"items": { "type": "string" },
"description": "List of completed perspective analyses"
"description": "Perspective analysis status (merged from progress)",
"items": {
"type": "object",
"required": ["name", "status"],
"properties": {
"name": {
"type": "string",
"enum": ["bug", "ux", "test", "quality", "security", "performance", "maintainability", "best-practices"]
},
"status": {
"type": "string",
"enum": ["pending", "in_progress", "completed", "failed"]
},
"findings": {
"type": "integer",
"minimum": 0
}
}
}
},
"total_findings": {
"type": "integer",
"minimum": 0,
"description": "Total number of findings across all perspectives"
},
"priority_distribution": {
"external_research": {
"type": "object",
"properties": {
"critical": { "type": "integer", "minimum": 0 },
"high": { "type": "integer", "minimum": 0 },
"medium": { "type": "integer", "minimum": 0 },
"low": { "type": "integer", "minimum": 0 }
},
"description": "Count of findings by priority level"
"enabled": { "type": "boolean", "default": false },
"completed": { "type": "boolean", "default": false }
}
},
"issues_generated": {
"type": "integer",
"minimum": 0,
"description": "Number of issues generated from discoveries"
},
"completed_at": {
"type": "string",
"format": "date-time",
"description": "ISO 8601 timestamp of discovery completion"
"results": {
"type": "object",
"description": "Aggregated results (final phase)",
"properties": {
"total_findings": { "type": "integer", "minimum": 0 },
"issues_generated": { "type": "integer", "minimum": 0 },
"priority_distribution": {
"type": "object",
"properties": {
"critical": { "type": "integer" },
"high": { "type": "integer" },
"medium": { "type": "integer" },
"low": { "type": "integer" }
}
}
}
}
},
"examples": [
{
"discovery_id": "DSC-20250128-143022",
"target_pattern": "src/auth/**",
"metadata": {
"created_at": "2025-01-28T14:30:22Z",
"resolved_files": ["src/auth/service.ts", "src/auth/validator.ts"],
"perspectives": ["bug", "ux", "test", "quality", "security"],
"external_research_enabled": true
},
"discovery_id": "DSC-20251228-182237",
"target_pattern": "codex-lens/**/*.py",
"phase": "complete",
"perspectives_completed": ["bug", "ux", "test", "quality", "security"],
"total_findings": 45,
"priority_distribution": {
"critical": 2,
"high": 8,
"medium": 20,
"low": 15
"created_at": "2025-12-28T18:22:37+08:00",
"updated_at": "2025-12-28T18:35:00+08:00",
"target": {
"files_count": { "source": 48, "tests": 44, "total": 93 },
"project": { "name": "codex-lens", "version": "0.1.0" }
},
"issues_generated": 10,
"completed_at": "2025-01-28T14:45:00Z"
"perspectives": [
{ "name": "bug", "status": "completed", "findings": 15 },
{ "name": "test", "status": "completed", "findings": 11 },
{ "name": "quality", "status": "completed", "findings": 12 }
],
"external_research": { "enabled": false, "completed": false },
"results": {
"total_findings": 37,
"issues_generated": 15,
"priority_distribution": { "critical": 4, "high": 13, "medium": 16, "low": 6 }
}
}
]
}

View File

@@ -7,7 +7,7 @@
"properties": {
"id": {
"type": "string",
"description": "Issue ID (e.g., GH-123, TEXT-xxx)"
"description": "Issue ID (GH-123, ISS-xxx, DSC-001)"
},
"title": {
"type": "string"
@@ -21,24 +21,16 @@
"type": "integer",
"minimum": 1,
"maximum": 5,
"default": 3
"default": 3,
"description": "1=critical, 2=high, 3=medium, 4=low, 5=trivial"
},
"context": {
"type": "string",
"description": "Issue context/description (markdown)"
},
"bound_solution_id": {
"type": "string",
"description": "ID of the bound solution (null if none bound)"
},
"solution_count": {
"type": "integer",
"default": 0,
"description": "Number of candidate solutions in solutions/{id}.jsonl"
},
"source": {
"type": "string",
"enum": ["github", "text", "file"],
"enum": ["github", "text", "discovery"],
"description": "Source of the issue"
},
"source_url": {
@@ -50,6 +42,81 @@
"items": { "type": "string" },
"description": "Issue labels/tags"
},
"discovery_context": {
"type": "object",
"description": "Enriched context from issue:discover (only when source=discovery)",
"properties": {
"discovery_id": {
"type": "string",
"description": "Source discovery session ID"
},
"perspective": {
"type": "string",
"enum": ["bug", "ux", "test", "quality", "security", "performance", "maintainability", "best-practices"]
},
"category": {
"type": "string",
"description": "Finding category (e.g., edge-case, race-condition)"
},
"file": {
"type": "string",
"description": "Primary affected file"
},
"line": {
"type": "integer",
"description": "Line number in primary file"
},
"snippet": {
"type": "string",
"description": "Code snippet showing the issue"
},
"confidence": {
"type": "number",
"minimum": 0,
"maximum": 1,
"description": "Agent confidence score"
},
"suggested_fix": {
"type": "string",
"description": "Suggested remediation from discovery"
}
}
},
"affected_components": {
"type": "array",
"items": { "type": "string" },
"description": "Files/modules affected"
},
"lifecycle_requirements": {
"type": "object",
"properties": {
"test_strategy": {
"type": "string",
"enum": ["unit", "integration", "e2e", "manual", "auto"]
},
"regression_scope": {
"type": "string",
"enum": ["affected", "related", "full"]
},
"acceptance_type": {
"type": "string",
"enum": ["automated", "manual", "both"]
},
"commit_strategy": {
"type": "string",
"enum": ["per-task", "squash", "atomic"]
}
}
},
"bound_solution_id": {
"type": "string",
"description": "ID of the bound solution (null if none bound)"
},
"solution_count": {
"type": "integer",
"default": 0,
"description": "Number of candidate solutions"
},
"created_at": {
"type": "string",
"format": "date-time"
@@ -62,13 +129,40 @@
"type": "string",
"format": "date-time"
},
"queued_at": {
"type": "string",
"format": "date-time"
},
"completed_at": {
"type": "string",
"format": "date-time"
}
}
},
"examples": [
{
"id": "DSC-001",
"title": "Fix: SQLite connection pool memory leak",
"status": "registered",
"priority": 1,
"context": "Connection pool cleanup only happens when MAX_POOL_SIZE is reached...",
"source": "discovery",
"labels": ["bug", "resource-leak", "critical"],
"discovery_context": {
"discovery_id": "DSC-20251228-182237",
"perspective": "bug",
"category": "resource-leak",
"file": "storage/sqlite_store.py",
"line": 59,
"snippet": "if len(self._pool) >= self.MAX_POOL_SIZE:\n self._cleanup_stale_connections()",
"confidence": 0.85,
"suggested_fix": "Implement periodic cleanup or weak references"
},
"affected_components": ["storage/sqlite_store.py"],
"lifecycle_requirements": {
"test_strategy": "unit",
"regression_scope": "affected",
"acceptance_type": "automated",
"commit_strategy": "per-task"
},
"bound_solution_id": null,
"solution_count": 0,
"created_at": "2025-12-28T18:22:37Z"
}
]
}

View File

@@ -46,11 +46,53 @@ function getDiscoveriesDir(projectPath: string): string {
function readDiscoveryIndex(discoveriesDir: string): { discoveries: any[]; total: number } {
const indexPath = join(discoveriesDir, 'index.json');
if (!existsSync(indexPath)) {
// Try to read index.json first
if (existsSync(indexPath)) {
try {
return JSON.parse(readFileSync(indexPath, 'utf8'));
} catch {
// Fall through to scan
}
}
// Fallback: scan directory for discovery folders
if (!existsSync(discoveriesDir)) {
return { discoveries: [], total: 0 };
}
try {
return JSON.parse(readFileSync(indexPath, 'utf8'));
const entries = readdirSync(discoveriesDir, { withFileTypes: true });
const discoveries: any[] = [];
for (const entry of entries) {
if (entry.isDirectory() && entry.name.startsWith('DSC-')) {
const statePath = join(discoveriesDir, entry.name, 'discovery-state.json');
if (existsSync(statePath)) {
try {
const state = JSON.parse(readFileSync(statePath, 'utf8'));
discoveries.push({
discovery_id: entry.name,
target_pattern: state.target_pattern,
perspectives: state.metadata?.perspectives || [],
created_at: state.metadata?.created_at,
completed_at: state.completed_at
});
} catch {
// Skip invalid entries
}
}
}
}
// Sort by creation time descending
discoveries.sort((a, b) => {
const timeA = new Date(a.created_at || 0).getTime();
const timeB = new Date(b.created_at || 0).getTime();
return timeB - timeA;
});
return { discoveries, total: discoveries.length };
} catch {
return { discoveries: [], total: 0 };
}
@@ -139,7 +181,7 @@ function flattenFindings(perspectiveResults: any[]): any[] {
return allFindings;
}
function appendToIssuesJsonl(projectPath: string, issues: any[]) {
function appendToIssuesJsonl(projectPath: string, issues: any[]): { added: number; skipped: number; skippedIds: string[] } {
const issuesDir = join(projectPath, '.workflow', 'issues');
const issuesPath = join(issuesDir, 'issues.jsonl');
@@ -158,24 +200,56 @@ function appendToIssuesJsonl(projectPath: string, issues: any[]) {
}
}
// Convert discovery issues to standard format and append
const newIssues = issues.map(di => ({
id: di.id,
title: di.title,
status: 'registered',
priority: di.priority || 3,
context: di.context || di.description || '',
source: 'discovery',
source_discovery_id: di.source_discovery_id,
labels: di.labels || [],
created_at: new Date().toISOString(),
updated_at: new Date().toISOString()
}));
// Build set of existing IDs and source_finding combinations for deduplication
const existingIds = new Set(existingIssues.map(i => i.id));
const existingSourceFindings = new Set(
existingIssues
.filter(i => i.source === 'discovery' && i.source_finding_id)
.map(i => `${i.source_discovery_id}:${i.source_finding_id}`)
);
const allIssues = [...existingIssues, ...newIssues];
writeFileSync(issuesPath, allIssues.map(i => JSON.stringify(i)).join('\n'));
// Convert and filter duplicates
const skippedIds: string[] = [];
const newIssues: any[] = [];
return newIssues.length;
for (const di of issues) {
// Check for duplicate by ID
if (existingIds.has(di.id)) {
skippedIds.push(di.id);
continue;
}
// Check for duplicate by source_discovery_id + source_finding_id
const sourceKey = `${di.source_discovery_id}:${di.source_finding_id}`;
if (di.source_finding_id && existingSourceFindings.has(sourceKey)) {
skippedIds.push(di.id);
continue;
}
newIssues.push({
id: di.id,
title: di.title,
status: 'registered',
priority: di.priority || 3,
context: di.context || di.description || '',
source: 'discovery',
source_discovery_id: di.source_discovery_id,
source_finding_id: di.source_finding_id,
perspective: di.perspective,
file: di.file,
line: di.line,
labels: di.labels || [],
created_at: new Date().toISOString(),
updated_at: new Date().toISOString()
});
}
if (newIssues.length > 0) {
const allIssues = [...existingIssues, ...newIssues];
writeFileSync(issuesPath, allIssues.map(i => JSON.stringify(i)).join('\n'));
}
return { added: newIssues.length, skipped: skippedIds.length, skippedIds };
}
// ========== Route Handler ==========
@@ -340,6 +414,7 @@ export async function handleDiscoveryRoutes(ctx: RouteContext): Promise<boolean>
context: f.description || '',
source: 'discovery',
source_discovery_id: discoveryId,
source_finding_id: f.id, // Track original finding ID for deduplication
perspective: f.perspective,
file: f.file,
line: f.line,
@@ -347,13 +422,49 @@ export async function handleDiscoveryRoutes(ctx: RouteContext): Promise<boolean>
};
});
// Append to main issues.jsonl
const exportedCount = appendToIssuesJsonl(projectPath, issuesToExport);
// Append to main issues.jsonl (with deduplication)
const result = appendToIssuesJsonl(projectPath, issuesToExport);
// Mark exported findings in perspective files
if (result.added > 0) {
const exportedFindingIds = new Set(
issuesToExport
.filter((_, idx) => !result.skippedIds.includes(issuesToExport[idx].id))
.map(i => i.source_finding_id)
);
// Update each perspective file to mark findings as exported
const perspectivesDir = join(discoveriesDir, discoveryId, 'perspectives');
if (existsSync(perspectivesDir)) {
const files = readdirSync(perspectivesDir).filter(f => f.endsWith('.json'));
for (const file of files) {
const filePath = join(perspectivesDir, file);
try {
const content = JSON.parse(readFileSync(filePath, 'utf8'));
if (content.findings) {
let modified = false;
for (const finding of content.findings) {
if (exportedFindingIds.has(finding.id) && !finding.exported) {
finding.exported = true;
finding.exported_at = new Date().toISOString();
modified = true;
}
}
if (modified) {
writeFileSync(filePath, JSON.stringify(content, null, 2));
}
}
} catch {
// Skip invalid files
}
}
}
}
// Update discovery state
const state = readDiscoveryState(discoveriesDir, discoveryId);
if (state) {
state.issues_generated = (state.issues_generated || 0) + exportedCount;
state.issues_generated = (state.issues_generated || 0) + result.added;
writeFileSync(
join(discoveriesDir, discoveryId, 'discovery-state.json'),
JSON.stringify(state, null, 2)
@@ -362,8 +473,12 @@ export async function handleDiscoveryRoutes(ctx: RouteContext): Promise<boolean>
return {
success: true,
exported_count: exportedCount,
issue_ids: issuesToExport.map(i => i.id)
exported_count: result.added,
skipped_count: result.skipped,
skipped_ids: result.skippedIds,
message: result.skipped > 0
? `Exported ${result.added} issues, skipped ${result.skipped} duplicates`
: `Exported ${result.added} issues`
};
});
return true;

View File

@@ -358,17 +358,52 @@
}
.findings-count {
display: flex;
align-items: center;
justify-content: space-between;
padding: 0.5rem 1rem;
font-size: 0.75rem;
color: hsl(var(--muted-foreground));
border-bottom: 1px solid hsl(var(--border));
}
.findings-count-left {
display: flex;
align-items: center;
gap: 0.5rem;
}
.findings-count .selected-count {
color: hsl(var(--primary));
font-weight: 500;
}
.findings-count-actions {
display: flex;
gap: 0.5rem;
}
.select-action-btn {
display: inline-flex;
align-items: center;
gap: 0.25rem;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.625rem;
font-weight: 500;
color: hsl(var(--muted-foreground));
background: transparent;
border: 1px solid hsl(var(--border));
cursor: pointer;
transition: all 0.15s ease;
}
.select-action-btn:hover {
color: hsl(var(--foreground));
background: hsl(var(--muted));
border-color: hsl(var(--primary) / 0.3);
}
/* Findings List */
.findings-list {
flex: 1;
@@ -413,6 +448,35 @@
opacity: 0.5;
}
.finding-item.exported {
opacity: 0.6;
background: hsl(var(--success) / 0.05);
border: 1px solid hsl(var(--success) / 0.2);
}
.finding-item.exported:hover {
background: hsl(var(--success) / 0.08);
}
/* Exported Badge */
.exported-badge {
display: inline-flex;
align-items: center;
gap: 0.25rem;
padding: 0.125rem 0.5rem;
border-radius: 9999px;
font-size: 0.625rem;
font-weight: 600;
text-transform: uppercase;
background: hsl(var(--success) / 0.1);
color: hsl(var(--success));
}
.finding-checkbox input:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.finding-checkbox {
display: flex;
align-items: flex-start;

View File

@@ -28,6 +28,8 @@ const i18n = {
'common.deleteFailed': 'Delete failed',
'common.retry': 'Retry',
'common.refresh': 'Refresh',
'common.back': 'Back',
'common.search': 'Search...',
'common.minutes': 'minutes',
'common.enabled': 'Enabled',
'common.disabled': 'Disabled',
@@ -1783,6 +1785,8 @@ const i18n = {
'discovery.impact': 'Impact',
'discovery.recommendation': 'Recommendation',
'discovery.exportAsIssues': 'Export as Issues',
'discovery.selectAll': 'Select All',
'discovery.deselectAll': 'Deselect All',
'discovery.deleteSession': 'Delete Session',
'discovery.confirmDelete': 'Are you sure you want to delete this discovery session?',
'discovery.deleted': 'Discovery session deleted',
@@ -1944,6 +1948,8 @@ const i18n = {
'common.deleteFailed': '删除失败',
'common.retry': '重试',
'common.refresh': '刷新',
'common.back': '返回',
'common.search': '搜索...',
'common.minutes': '分钟',
'common.enabled': '已启用',
'common.disabled': '已禁用',
@@ -3527,6 +3533,8 @@ const i18n = {
'common.edit': '编辑',
'common.close': '关闭',
'common.refresh': '刷新',
'common.back': '返回',
'common.search': '搜索...',
'common.refreshed': '已刷新',
'common.refreshing': '刷新中...',
'common.loading': '加载中...',
@@ -3708,6 +3716,8 @@ const i18n = {
'discovery.impact': '影响',
'discovery.recommendation': '建议',
'discovery.exportAsIssues': '导出为议题',
'discovery.selectAll': '全选',
'discovery.deselectAll': '取消全选',
'discovery.deleteSession': '删除会话',
'discovery.confirmDelete': '确定要删除此发现会话吗?',
'discovery.deleted': '发现会话已删除',

View File

@@ -18,6 +18,28 @@ var discoveryData = {
var discoveryLoading = false;
var discoveryPollingInterval = null;
// ========== Helper Functions ==========
function getFilteredFindings() {
const findings = discoveryData.findings || [];
let filtered = findings;
if (discoveryData.perspectiveFilter !== 'all') {
filtered = filtered.filter(f => f.perspective === discoveryData.perspectiveFilter);
}
if (discoveryData.priorityFilter !== 'all') {
filtered = filtered.filter(f => f.priority === discoveryData.priorityFilter);
}
if (discoveryData.searchQuery) {
const q = discoveryData.searchQuery.toLowerCase();
filtered = filtered.filter(f =>
(f.title && f.title.toLowerCase().includes(q)) ||
(f.file && f.file.toLowerCase().includes(q)) ||
(f.description && f.description.toLowerCase().includes(q))
);
}
return filtered;
}
// ========== Main Render Function ==========
async function renderIssueDiscovery() {
const container = document.getElementById('mainContent');
@@ -258,23 +280,7 @@ function renderDiscoveryDetailSection() {
const findings = discoveryData.findings || [];
const perspectives = [...new Set(findings.map(f => f.perspective))];
// Filter findings
let filteredFindings = findings;
if (discoveryData.perspectiveFilter !== 'all') {
filteredFindings = filteredFindings.filter(f => f.perspective === discoveryData.perspectiveFilter);
}
if (discoveryData.priorityFilter !== 'all') {
filteredFindings = filteredFindings.filter(f => f.priority === discoveryData.priorityFilter);
}
if (discoveryData.searchQuery) {
const q = discoveryData.searchQuery.toLowerCase();
filteredFindings = filteredFindings.filter(f =>
(f.title && f.title.toLowerCase().includes(q)) ||
(f.file && f.file.toLowerCase().includes(q)) ||
(f.description && f.description.toLowerCase().includes(q))
);
}
const filteredFindings = getFilteredFindings();
return `
<div class="discovery-detail-container">
@@ -305,10 +311,22 @@ function renderDiscoveryDetailSection() {
<!-- Findings Count -->
<div class="findings-count">
<span>${filteredFindings.length} ${t('discovery.findings') || 'findings'}</span>
${discoveryData.selectedFindings.size > 0 ? `
<span class="selected-count">(${discoveryData.selectedFindings.size} selected)</span>
` : ''}
<div class="findings-count-left">
<span>${filteredFindings.length} ${t('discovery.findings') || 'findings'}</span>
${discoveryData.selectedFindings.size > 0 ? `
<span class="selected-count">(${discoveryData.selectedFindings.size} selected)</span>
` : ''}
</div>
<div class="findings-count-actions">
<button class="select-action-btn" onclick="selectAllFindings()">
<i data-lucide="check-square" class="w-3 h-3"></i>
<span>${t('discovery.selectAll') || 'Select All'}</span>
</button>
<button class="select-action-btn" onclick="deselectAllFindings()">
<i data-lucide="square" class="w-3 h-3"></i>
<span>${t('discovery.deselectAll') || 'Deselect All'}</span>
</button>
</div>
</div>
<!-- Findings List -->
@@ -353,17 +371,19 @@ function renderDiscoveryDetailSection() {
function renderFindingItem(finding) {
const isSelected = discoveryData.selectedFindings.has(finding.id);
const isActive = discoveryData.selectedFinding?.id === finding.id;
const isExported = finding.exported === true;
return `
<div class="finding-item ${isActive ? 'active' : ''} ${isSelected ? 'selected' : ''} ${finding.dismissed ? 'dismissed' : ''}"
<div class="finding-item ${isActive ? 'active' : ''} ${isSelected ? 'selected' : ''} ${finding.dismissed ? 'dismissed' : ''} ${isExported ? 'exported' : ''}"
onclick="selectFinding('${finding.id}')">
<div class="finding-checkbox" onclick="event.stopPropagation(); toggleFindingSelection('${finding.id}')">
<input type="checkbox" ${isSelected ? 'checked' : ''}>
<input type="checkbox" ${isSelected ? 'checked' : ''} ${isExported ? 'disabled' : ''}>
</div>
<div class="finding-content">
<div class="finding-header">
<span class="perspective-badge ${finding.perspective}">${finding.perspective}</span>
<span class="priority-badge ${finding.priority}">${finding.priority}</span>
${isExported ? '<span class="exported-badge">' + (t('discovery.exported') || 'Exported') + '</span>' : ''}
</div>
<div class="finding-title">${finding.title || 'Untitled'}</div>
<div class="finding-location">
@@ -509,6 +529,23 @@ function toggleFindingSelection(findingId) {
renderDiscoveryView();
}
function selectAllFindings() {
// Get filtered findings (respecting current filters)
const filteredFindings = getFilteredFindings();
// Select only non-exported findings
for (const finding of filteredFindings) {
if (!finding.exported) {
discoveryData.selectedFindings.add(finding.id);
}
}
renderDiscoveryView();
}
function deselectAllFindings() {
discoveryData.selectedFindings.clear();
renderDiscoveryView();
}
function filterDiscoveryByPerspective(perspective) {
discoveryData.perspectiveFilter = perspective;
renderDiscoveryView();
@@ -539,11 +576,19 @@ async function exportSelectedFindings() {
const result = await response.json();
if (result.success) {
showNotification('success', `Exported ${result.exported_count} issues`);
// Show detailed message if duplicates were skipped
const msg = result.skipped_count > 0
? `Exported ${result.exported_count} issues, skipped ${result.skipped_count} duplicates`
: `Exported ${result.exported_count} issues`;
showNotification('success', msg);
discoveryData.selectedFindings.clear();
// Reload discovery data
// Reload discovery data to reflect exported status
await loadDiscoveryData();
renderDiscoveryView();
if (discoveryData.selectedDiscovery) {
await viewDiscoveryDetail(discoveryData.selectedDiscovery.discovery_id);
} else {
renderDiscoveryView();
}
} else {
showNotification('error', result.error || 'Export failed');
}