refactor: Update workflow plan system and template organization

- Remove --analyze|--deep parameters from plan.md, use default analysis
- Change .analysis to .process directory structure for better organization
- Create ANALYSIS_RESULTS.md template focused on verified results
- Add .process folder to workflow-architecture.md file structure
- Template emphasizes verification of files, methods, and commands
- Prevent execution errors from non-existent references

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
catlog22
2025-09-18 16:26:50 +08:00
parent 9167e4e39e
commit fc6e851230
49 changed files with 5865 additions and 174 deletions

View File

@@ -0,0 +1,13 @@
"""
Independent tool scripts for specialized analysis tasks.
Provides module analysis, tech stack detection, and workflow management tools.
"""
from .module_analyzer import ModuleAnalyzer, ModuleInfo
from .tech_stack import TechStackLoader
__all__ = [
'ModuleAnalyzer',
'ModuleInfo',
'TechStackLoader'
]

View File

@@ -0,0 +1,369 @@
#!/usr/bin/env python3
"""
Unified Module Analyzer
Combines functionality from detect_changed_modules.py and get_modules_by_depth.py
into a single, comprehensive module analysis tool.
"""
import os
import sys
import subprocess
import time
import json
from pathlib import Path
from typing import List, Dict, Optional, Set, Tuple
from dataclasses import dataclass, asdict
# Add parent directory for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from core.config import get_config
from core.gitignore_parser import GitignoreParser
@dataclass
class ModuleInfo:
"""Information about a module/directory."""
depth: int
path: str
files: int
types: List[str]
has_claude: bool
status: str = "normal" # changed, normal, new, deleted
last_modified: Optional[float] = None
def to_dict(self) -> Dict:
return asdict(self)
class ModuleAnalyzer:
"""Unified module analysis tool with change detection and depth analysis."""
def __init__(self, root_path: str = ".", config_path: Optional[str] = None):
self.root_path = Path(root_path).resolve()
self.config = get_config(config_path)
# Source file extensions for analysis
self.source_extensions = {
'.md', '.js', '.ts', '.jsx', '.tsx', '.py', '.go', '.rs',
'.java', '.cpp', '.c', '.h', '.sh', '.ps1', '.json', '.yaml', '.yml',
'.php', '.rb', '.swift', '.kt', '.scala', '.dart'
}
# Initialize gitignore parser for exclusions
self.gitignore_parser = GitignoreParser(str(self.root_path))
self.exclude_patterns = self._build_exclusion_patterns()
def _build_exclusion_patterns(self) -> Set[str]:
"""Build exclusion patterns from config and gitignore."""
exclusions = {
'.git', '.history', '.vscode', '__pycache__', '.pytest_cache',
'node_modules', 'dist', 'build', '.egg-info', '.env',
'.cache', '.tmp', '.temp', '.DS_Store', 'Thumbs.db'
}
# Add patterns from config
config_patterns = self.config.get('exclude_patterns', [])
for pattern in config_patterns:
# Extract directory names from patterns
if '/' in pattern:
parts = pattern.replace('*/', '').replace('/*', '').split('/')
exclusions.update(part for part in parts if part and not part.startswith('*'))
return exclusions
def _should_exclude_directory(self, dir_path: Path) -> bool:
"""Check if directory should be excluded from analysis."""
dir_name = dir_path.name
# Check against exclusion patterns
if dir_name in self.exclude_patterns:
return True
# Check if directory starts with . (hidden directories)
if dir_name.startswith('.') and dir_name not in {'.github', '.vscode'}:
return True
return False
def get_git_changed_files(self, since: str = "HEAD") -> Set[str]:
"""Get files changed in git."""
changed_files = set()
try:
# Check if we're in a git repository
subprocess.run(['git', 'rev-parse', '--git-dir'],
check=True, capture_output=True, cwd=self.root_path)
# Get changes since specified reference
commands = [
['git', 'diff', '--name-only', since], # Changes since reference
['git', 'diff', '--name-only', '--staged'], # Staged changes
['git', 'ls-files', '--others', '--exclude-standard'] # Untracked files
]
for cmd in commands:
try:
result = subprocess.run(cmd, capture_output=True, text=True,
cwd=self.root_path, check=True)
if result.stdout.strip():
files = result.stdout.strip().split('\n')
changed_files.update(f for f in files if f)
except subprocess.CalledProcessError:
continue
except subprocess.CalledProcessError:
# Not a git repository or git not available
pass
return changed_files
def get_recently_modified_files(self, hours: int = 24) -> Set[str]:
"""Get files modified within the specified hours."""
cutoff_time = time.time() - (hours * 3600)
recent_files = set()
try:
for file_path in self.root_path.rglob('*'):
if file_path.is_file():
try:
if file_path.stat().st_mtime > cutoff_time:
rel_path = file_path.relative_to(self.root_path)
recent_files.add(str(rel_path))
except (OSError, ValueError):
continue
except Exception:
pass
return recent_files
def analyze_directory(self, dir_path: Path) -> Optional[ModuleInfo]:
"""Analyze a single directory and return module information."""
if self._should_exclude_directory(dir_path):
return None
try:
# Count files by type
file_types = set()
file_count = 0
has_claude = False
last_modified = 0
for item in dir_path.iterdir():
if item.is_file():
file_count += 1
# Track file types
if item.suffix.lower() in self.source_extensions:
file_types.add(item.suffix.lower())
# Check for CLAUDE.md
if item.name.upper() == 'CLAUDE.MD':
has_claude = True
# Track latest modification
try:
mtime = item.stat().st_mtime
last_modified = max(last_modified, mtime)
except OSError:
continue
# Calculate depth relative to root
try:
relative_path = dir_path.relative_to(self.root_path)
depth = len(relative_path.parts)
except ValueError:
depth = 0
return ModuleInfo(
depth=depth,
path=str(relative_path) if depth > 0 else ".",
files=file_count,
types=sorted(list(file_types)),
has_claude=has_claude,
last_modified=last_modified if last_modified > 0 else None
)
except (PermissionError, OSError):
return None
def detect_changed_modules(self, since: str = "HEAD") -> List[ModuleInfo]:
"""Detect modules affected by changes."""
changed_files = self.get_git_changed_files(since)
# If no git changes, fall back to recently modified files
if not changed_files:
changed_files = self.get_recently_modified_files(24)
# Get affected directories
affected_dirs = set()
for file_path in changed_files:
full_path = self.root_path / file_path
if full_path.exists():
# Add the file's directory and parent directories
current_dir = full_path.parent
while current_dir != self.root_path and current_dir.parent != current_dir:
affected_dirs.add(current_dir)
current_dir = current_dir.parent
# Analyze affected directories
modules = []
for dir_path in affected_dirs:
module_info = self.analyze_directory(dir_path)
if module_info:
module_info.status = "changed"
modules.append(module_info)
return sorted(modules, key=lambda m: (m.depth, m.path))
def analyze_by_depth(self, max_depth: Optional[int] = None) -> List[ModuleInfo]:
"""Analyze all modules organized by depth (deepest first)."""
modules = []
def scan_directory(dir_path: Path, current_depth: int = 0):
"""Recursively scan directories."""
if max_depth and current_depth > max_depth:
return
module_info = self.analyze_directory(dir_path)
if module_info and module_info.files > 0:
modules.append(module_info)
# Recurse into subdirectories
try:
for item in dir_path.iterdir():
if item.is_dir() and not self._should_exclude_directory(item):
scan_directory(item, current_depth + 1)
except (PermissionError, OSError):
pass
scan_directory(self.root_path)
# Sort by depth (deepest first), then by path
return sorted(modules, key=lambda m: (-m.depth, m.path))
def get_dependencies(self, module_path: str) -> List[str]:
"""Get module dependencies (basic implementation)."""
dependencies = []
module_dir = self.root_path / module_path
if not module_dir.exists() or not module_dir.is_dir():
return dependencies
# Look for common dependency files
dependency_files = [
'package.json', # Node.js
'requirements.txt', # Python
'Cargo.toml', # Rust
'go.mod', # Go
'pom.xml', # Java Maven
'build.gradle', # Java Gradle
]
for dep_file in dependency_files:
dep_path = module_dir / dep_file
if dep_path.exists():
dependencies.append(str(dep_path.relative_to(self.root_path)))
return dependencies
def find_modules_with_pattern(self, pattern: str) -> List[ModuleInfo]:
"""Find modules matching a specific pattern in their path or files."""
modules = self.analyze_by_depth()
matching_modules = []
for module in modules:
# Check if pattern matches path
if pattern.lower() in module.path.lower():
matching_modules.append(module)
continue
# Check if pattern matches file types
if any(pattern.lower() in ext.lower() for ext in module.types):
matching_modules.append(module)
return matching_modules
def export_analysis(self, modules: List[ModuleInfo], format: str = "json") -> str:
"""Export module analysis in specified format."""
if format == "json":
return json.dumps([module.to_dict() for module in modules], indent=2)
elif format == "list":
lines = []
for module in modules:
status = f"[{module.status}]" if module.status != "normal" else ""
claude_marker = "[CLAUDE]" if module.has_claude else ""
lines.append(f"{module.path} (depth:{module.depth}, files:{module.files}) {status} {claude_marker}")
return "\n".join(lines)
elif format == "grouped":
grouped = {}
for module in modules:
depth = module.depth
if depth not in grouped:
grouped[depth] = []
grouped[depth].append(module)
lines = []
for depth in sorted(grouped.keys()):
lines.append(f"\n=== Depth {depth} ===")
for module in grouped[depth]:
status = f"[{module.status}]" if module.status != "normal" else ""
claude_marker = "[CLAUDE]" if module.has_claude else ""
lines.append(f" {module.path} (files:{module.files}) {status} {claude_marker}")
return "\n".join(lines)
elif format == "paths":
return "\n".join(module.path for module in modules)
else:
raise ValueError(f"Unsupported format: {format}")
def main():
"""Main CLI entry point."""
import argparse
parser = argparse.ArgumentParser(description="Module Analysis Tool")
parser.add_argument("command", choices=["changed", "depth", "dependencies", "find"],
help="Analysis command to run")
parser.add_argument("--format", choices=["json", "list", "grouped", "paths"],
default="list", help="Output format")
parser.add_argument("--since", default="HEAD~1",
help="Git reference for change detection (default: HEAD~1)")
parser.add_argument("--max-depth", type=int,
help="Maximum directory depth to analyze")
parser.add_argument("--pattern", help="Pattern to search for (for find command)")
parser.add_argument("--module", help="Module path for dependency analysis")
parser.add_argument("--config", help="Configuration file path")
args = parser.parse_args()
analyzer = ModuleAnalyzer(config_path=args.config)
if args.command == "changed":
modules = analyzer.detect_changed_modules(args.since)
print(analyzer.export_analysis(modules, args.format))
elif args.command == "depth":
modules = analyzer.analyze_by_depth(args.max_depth)
print(analyzer.export_analysis(modules, args.format))
elif args.command == "dependencies":
if not args.module:
print("Error: --module required for dependencies command", file=sys.stderr)
sys.exit(1)
deps = analyzer.get_dependencies(args.module)
if args.format == "json":
print(json.dumps(deps, indent=2))
else:
print("\n".join(deps))
elif args.command == "find":
if not args.pattern:
print("Error: --pattern required for find command", file=sys.stderr)
sys.exit(1)
modules = analyzer.find_modules_with_pattern(args.pattern)
print(analyzer.export_analysis(modules, args.format))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,202 @@
#!/usr/bin/env python3
"""
Python equivalent of tech-stack-loader.sh
DMSFlow Tech Stack Guidelines Loader
Returns tech stack specific coding guidelines and best practices for Claude processing
Usage: python tech_stack_loader.py [command] [tech_stack]
"""
import sys
import argparse
import re
from pathlib import Path
from typing import Dict, List, Optional, Tuple
class TechStackLoader:
"""Load tech stack specific development guidelines."""
def __init__(self, script_dir: Optional[str] = None):
if script_dir:
self.script_dir = Path(script_dir)
else:
self.script_dir = Path(__file__).parent
# Look for template directory in multiple locations
possible_template_dirs = [
self.script_dir / "../tech-stack-templates",
self.script_dir / "../workflows/cli-templates/tech-stacks",
self.script_dir / "tech-stack-templates",
self.script_dir / "templates",
]
self.template_dir = None
for template_dir in possible_template_dirs:
if template_dir.exists():
self.template_dir = template_dir.resolve()
break
if not self.template_dir:
# Create a default template directory
self.template_dir = self.script_dir / "tech-stack-templates"
self.template_dir.mkdir(exist_ok=True)
def parse_yaml_frontmatter(self, content: str) -> Tuple[Dict[str, str], str]:
"""Parse YAML frontmatter from markdown content."""
frontmatter = {}
content_start = 0
lines = content.split('\n')
if lines and lines[0].strip() == '---':
# Find the closing ---
for i, line in enumerate(lines[1:], 1):
if line.strip() == '---':
content_start = i + 1
break
elif ':' in line:
key, value = line.split(':', 1)
frontmatter[key.strip()] = value.strip()
# Return frontmatter and content without YAML
remaining_content = '\n'.join(lines[content_start:])
return frontmatter, remaining_content
def list_available_guidelines(self) -> str:
"""List all available development guidelines."""
output = ["Available Development Guidelines:", "=" * 33]
if not self.template_dir.exists():
output.append("No template directory found.")
return '\n'.join(output)
for file_path in self.template_dir.glob("*.md"):
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
frontmatter, _ = self.parse_yaml_frontmatter(content)
name = frontmatter.get('name', file_path.stem)
description = frontmatter.get('description', 'No description available')
output.append(f"{name:<20} - {description}")
except Exception as e:
output.append(f"{file_path.stem:<20} - Error reading file: {e}")
return '\n'.join(output)
def load_guidelines(self, tech_stack: str) -> str:
"""Load specific development guidelines."""
template_path = self.template_dir / f"{tech_stack}.md"
if not template_path.exists():
# Try with different naming conventions
alternatives = [
f"{tech_stack}-dev.md",
f"{tech_stack}_dev.md",
f"{tech_stack.replace('-', '_')}.md",
f"{tech_stack.replace('_', '-')}.md"
]
for alt in alternatives:
alt_path = self.template_dir / alt
if alt_path.exists():
template_path = alt_path
break
else:
raise FileNotFoundError(
f"Error: Development guidelines '{tech_stack}' not found\n"
f"Use --list to see available guidelines"
)
try:
with open(template_path, 'r', encoding='utf-8') as f:
content = f.read()
# Parse and return content without YAML frontmatter
_, content_without_yaml = self.parse_yaml_frontmatter(content)
return content_without_yaml.strip()
except Exception as e:
raise RuntimeError(f"Error reading guidelines file: {e}")
def get_version(self) -> str:
"""Get version information."""
return "DMSFlow tech-stack-loader v2.0 (Python)\nSemantic-based development guidelines system"
def get_help(self) -> str:
"""Get help message."""
return """Usage:
tech_stack_loader.py --list List all available guidelines with descriptions
tech_stack_loader.py --load <name> Load specific development guidelines
tech_stack_loader.py <name> Load specific guidelines (legacy format)
tech_stack_loader.py --help Show this help message
tech_stack_loader.py --version Show version information
Examples:
tech_stack_loader.py --list
tech_stack_loader.py --load javascript-dev
tech_stack_loader.py python-dev"""
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="DMSFlow Tech Stack Guidelines Loader",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""Examples:
python tech_stack_loader.py --list
python tech_stack_loader.py --load javascript-dev
python tech_stack_loader.py python-dev"""
)
parser.add_argument("command", nargs="?", help="Command or tech stack name")
parser.add_argument("tech_stack", nargs="?", help="Tech stack name (when using --load)")
parser.add_argument("--list", action="store_true", help="List all available guidelines")
parser.add_argument("--load", metavar="TECH_STACK", help="Load specific development guidelines")
parser.add_argument("--version", "-v", action="store_true", help="Show version information")
parser.add_argument("--template-dir", help="Override template directory path")
args = parser.parse_args()
try:
loader = TechStackLoader(args.template_dir)
# Handle version check
if args.version or args.command == "--version":
print(loader.get_version())
return
# Handle list command
if args.list or args.command == "--list":
print(loader.list_available_guidelines())
return
# Handle load command
if args.load:
result = loader.load_guidelines(args.load)
print(result)
return
if args.command == "--load" and args.tech_stack:
result = loader.load_guidelines(args.tech_stack)
print(result)
return
# Handle legacy usage (direct tech stack name)
if args.command and args.command not in ["--help", "--list", "--load"]:
result = loader.load_guidelines(args.command)
print(result)
return
# Show help
print(loader.get_help())
except (FileNotFoundError, RuntimeError) as e:
print(str(e), file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Unexpected error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,241 @@
#!/usr/bin/env python3
"""
Python equivalent of update_module_claude.sh
Update CLAUDE.md for a specific module with automatic layer detection
Usage: python update_module_claude.py <module_path> [update_type]
module_path: Path to the module directory
update_type: full|related (default: full)
Script automatically detects layer depth and selects appropriate template
"""
import os
import sys
import subprocess
import time
import argparse
from pathlib import Path
from typing import Optional, Tuple, Dict
from dataclasses import dataclass
@dataclass
class LayerInfo:
"""Information about a documentation layer."""
name: str
template_path: str
analysis_strategy: str
class ModuleClaudeUpdater:
"""Update CLAUDE.md documentation for modules with layer detection."""
def __init__(self, home_dir: Optional[str] = None):
self.home_dir = Path(home_dir) if home_dir else Path.home()
self.template_base = self.home_dir / ".claude/workflows/cli-templates/prompts/dms"
def detect_layer(self, module_path: str) -> LayerInfo:
"""Determine documentation layer based on path patterns."""
clean_path = module_path.replace('./', '') if module_path.startswith('./') else module_path
if module_path == ".":
# Root directory
return LayerInfo(
name="Layer 1 (Root)",
template_path=str(self.template_base / "claude-layer1-root.txt"),
analysis_strategy="--all-files"
)
elif '/' not in clean_path:
# Top-level directories (e.g., .claude, src, tests)
return LayerInfo(
name="Layer 2 (Domain)",
template_path=str(self.template_base / "claude-layer2-domain.txt"),
analysis_strategy="@{*/CLAUDE.md}"
)
elif clean_path.count('/') == 1:
# Second-level directories (e.g., .claude/scripts, src/components)
return LayerInfo(
name="Layer 3 (Module)",
template_path=str(self.template_base / "claude-layer3-module.txt"),
analysis_strategy="@{*/CLAUDE.md}"
)
else:
# Deeper directories (e.g., .claude/workflows/cli-templates/prompts)
return LayerInfo(
name="Layer 4 (Sub-Module)",
template_path=str(self.template_base / "claude-layer4-submodule.txt"),
analysis_strategy="--all-files"
)
def load_template(self, template_path: str) -> str:
"""Load template content from file."""
try:
with open(template_path, 'r', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
print(f" [WARN] Template not found: {template_path}, using fallback")
return "Update CLAUDE.md documentation for this module following hierarchy standards."
except Exception as e:
print(f" [WARN] Error reading template: {e}, using fallback")
return "Update CLAUDE.md documentation for this module following hierarchy standards."
def build_prompt(self, layer_info: LayerInfo, module_path: str, update_type: str) -> str:
"""Build the prompt for gemini."""
template_content = self.load_template(layer_info.template_path)
module_name = os.path.basename(module_path)
if update_type == "full":
update_context = """
Update Mode: Complete refresh
- Perform comprehensive analysis of all content
- Document patterns, architecture, and purpose
- Consider existing documentation hierarchy
- Follow template guidelines strictly"""
else:
update_context = """
Update Mode: Context-aware update
- Focus on recent changes and affected areas
- Maintain consistency with existing documentation
- Update only relevant sections
- Follow template guidelines for updated content"""
base_prompt = f"""
[CRITICAL] RULES - MUST FOLLOW:
1. ONLY modify CLAUDE.md files at any hierarchy level
2. NEVER modify source code files
3. Focus exclusively on updating documentation
4. Follow the template guidelines exactly
{template_content}
{update_context}
Module Information:
- Name: {module_name}
- Path: {module_path}
- Layer: {layer_info.name}
- Analysis Strategy: {layer_info.analysis_strategy}"""
return base_prompt
def execute_gemini_command(self, prompt: str, analysis_strategy: str, module_path: str) -> bool:
"""Execute gemini command with the appropriate strategy."""
original_dir = os.getcwd()
try:
os.chdir(module_path)
if analysis_strategy == "--all-files":
cmd = ["gemini", "--all-files", "--yolo", "-p", prompt]
else:
cmd = ["gemini", "--yolo", "-p", f"{analysis_strategy} {prompt}"]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
return True
else:
print(f" [ERROR] Gemini command failed: {result.stderr}")
return False
except subprocess.CalledProcessError as e:
print(f" [ERROR] Error executing gemini: {e}")
return False
except FileNotFoundError:
print(f" [ERROR] Gemini command not found. Make sure gemini is installed and in PATH.")
return False
finally:
os.chdir(original_dir)
def update_module_claude(self, module_path: str, update_type: str = "full") -> bool:
"""Main function to update CLAUDE.md for a module."""
# Validate parameters
if not module_path:
print("[ERROR] Module path is required")
print("Usage: update_module_claude.py <module_path> [update_type]")
return False
path_obj = Path(module_path)
if not path_obj.exists() or not path_obj.is_dir():
print(f"[ERROR] Directory '{module_path}' does not exist")
return False
# Check if directory has files
files = list(path_obj.glob('*'))
file_count = len([f for f in files if f.is_file()])
if file_count == 0:
print(f"[SKIP] Skipping '{module_path}' - no files found")
return True
# Detect layer and get configuration
layer_info = self.detect_layer(module_path)
print(f"[UPDATE] Updating: {module_path}")
print(f" Layer: {layer_info.name} | Type: {update_type} | Files: {file_count}")
print(f" Template: {os.path.basename(layer_info.template_path)} | Strategy: {layer_info.analysis_strategy}")
# Build prompt
prompt = self.build_prompt(layer_info, module_path, update_type)
# Execute update
start_time = time.time()
print(" [PROGRESS] Starting update...")
success = self.execute_gemini_command(prompt, layer_info.analysis_strategy, module_path)
if success:
duration = int(time.time() - start_time)
print(f" [OK] Completed in {duration}s")
return True
else:
print(f" [ERROR] Update failed for {module_path}")
return False
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="Update CLAUDE.md for a specific module with automatic layer detection",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""Examples:
python update_module_claude.py .
python update_module_claude.py src/components full
python update_module_claude.py .claude/scripts related"""
)
parser.add_argument("module_path", help="Path to the module directory")
parser.add_argument("update_type", nargs="?", choices=["full", "related"],
default="full", help="Update type (default: full)")
parser.add_argument("--home", help="Override home directory path")
parser.add_argument("--dry-run", action="store_true",
help="Show what would be done without executing")
args = parser.parse_args()
try:
updater = ModuleClaudeUpdater(args.home)
if args.dry_run:
layer_info = updater.detect_layer(args.module_path)
prompt = updater.build_prompt(layer_info, args.module_path, args.update_type)
print("[DRY-RUN] Dry run mode - showing configuration:")
print(f"Module Path: {args.module_path}")
print(f"Update Type: {args.update_type}")
print(f"Layer: {layer_info.name}")
print(f"Template: {layer_info.template_path}")
print(f"Strategy: {layer_info.analysis_strategy}")
print("\nPrompt preview:")
print("-" * 50)
print(prompt[:500] + "..." if len(prompt) > 500 else prompt)
return
success = updater.update_module_claude(args.module_path, args.update_type)
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n[ERROR] Operation cancelled by user")
sys.exit(1)
except Exception as e:
print(f"[ERROR] Unexpected error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()