mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-12 02:37:45 +08:00
fix: resolve CodexLens installation issues by correcting package name and improving local path detection
- Updated package name from `codexlens` to `codex-lens` in all relevant files to ensure consistency with `pyproject.toml`. - Enhanced `findLocalPackagePath()` to always search for local paths, even when running from `node_modules`. - Removed fallback logic for PyPI installation in several functions, providing clearer error messages for local installation failures. - Added detailed documentation on installation steps and error handling for local development packages. - Introduced a new summary document outlining the issues and fixes related to CodexLens installation.
This commit is contained in:
@@ -61,6 +61,29 @@ Score = 0
|
|||||||
|
|
||||||
**Extract Keywords**: domains (auth, api, database, ui), technologies (react, typescript, node), actions (implement, refactor, test)
|
**Extract Keywords**: domains (auth, api, database, ui), technologies (react, typescript, node), actions (implement, refactor, test)
|
||||||
|
|
||||||
|
**Plan Context Loading** (when executing from plan.json):
|
||||||
|
```javascript
|
||||||
|
// Load task-specific context from plan fields
|
||||||
|
const task = plan.tasks.find(t => t.id === taskId)
|
||||||
|
const context = {
|
||||||
|
// Base context
|
||||||
|
scope: task.scope,
|
||||||
|
modification_points: task.modification_points,
|
||||||
|
implementation: task.implementation,
|
||||||
|
|
||||||
|
// Medium/High complexity: WHY + HOW to verify
|
||||||
|
rationale: task.rationale?.chosen_approach, // Why this approach
|
||||||
|
verification: task.verification?.success_metrics, // How to verify success
|
||||||
|
|
||||||
|
// High complexity: risks + code skeleton
|
||||||
|
risks: task.risks?.map(r => r.mitigation), // Risk mitigations to follow
|
||||||
|
code_skeleton: task.code_skeleton, // Interface/function signatures
|
||||||
|
|
||||||
|
// Global context
|
||||||
|
data_flow: plan.data_flow?.diagram // Data flow overview
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Phase 2: Context Discovery
|
## Phase 2: Context Discovery
|
||||||
@@ -129,6 +152,30 @@ EXPECTED: {clear_output_expectations}
|
|||||||
CONSTRAINTS: {constraints}
|
CONSTRAINTS: {constraints}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**5. Plan-Aware Prompt Enhancement** (when executing from plan.json):
|
||||||
|
```bash
|
||||||
|
# Include rationale in PURPOSE (Medium/High)
|
||||||
|
PURPOSE: {task.description}
|
||||||
|
Approach: {task.rationale.chosen_approach}
|
||||||
|
Decision factors: {task.rationale.decision_factors.join(', ')}
|
||||||
|
|
||||||
|
# Include code skeleton in TASK (High)
|
||||||
|
TASK: {task.implementation.join('\n')}
|
||||||
|
Key interfaces: {task.code_skeleton.interfaces.map(i => i.signature)}
|
||||||
|
Key functions: {task.code_skeleton.key_functions.map(f => f.signature)}
|
||||||
|
|
||||||
|
# Include verification in EXPECTED
|
||||||
|
EXPECTED: {task.acceptance.join(', ')}
|
||||||
|
Success metrics: {task.verification.success_metrics.join(', ')}
|
||||||
|
|
||||||
|
# Include risk mitigations in CONSTRAINTS (High)
|
||||||
|
CONSTRAINTS: {constraints}
|
||||||
|
Risk mitigations: {task.risks.map(r => r.mitigation).join('; ')}
|
||||||
|
|
||||||
|
# Include data flow context (High)
|
||||||
|
Memory: Data flow: {plan.data_flow.diagram}
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Phase 4: Tool Selection & Execution
|
## Phase 4: Tool Selection & Execution
|
||||||
@@ -205,11 +252,25 @@ find .workflow/active/ -name 'WFS-*' -type d
|
|||||||
**Timestamp**: {iso_timestamp} | **Session**: {session_id} | **Task**: {task_id}
|
**Timestamp**: {iso_timestamp} | **Session**: {session_id} | **Task**: {task_id}
|
||||||
|
|
||||||
## Phase 1: Intent {intent} | Complexity {complexity} | Keywords {keywords}
|
## Phase 1: Intent {intent} | Complexity {complexity} | Keywords {keywords}
|
||||||
|
[Medium/High] Rationale: {task.rationale.chosen_approach}
|
||||||
|
[High] Risks: {task.risks.map(r => `${r.description} → ${r.mitigation}`).join('; ')}
|
||||||
|
|
||||||
## Phase 2: Files ({N}) | Patterns {patterns} | Dependencies {deps}
|
## Phase 2: Files ({N}) | Patterns {patterns} | Dependencies {deps}
|
||||||
|
[High] Data Flow: {plan.data_flow.diagram}
|
||||||
|
|
||||||
## Phase 3: Enhanced Prompt
|
## Phase 3: Enhanced Prompt
|
||||||
{full_prompt}
|
{full_prompt}
|
||||||
|
[High] Code Skeleton:
|
||||||
|
- Interfaces: {task.code_skeleton.interfaces.map(i => i.name).join(', ')}
|
||||||
|
- Functions: {task.code_skeleton.key_functions.map(f => f.signature).join('; ')}
|
||||||
|
|
||||||
## Phase 4: Tool {tool} | Command {cmd} | Result {status} | Duration {time}
|
## Phase 4: Tool {tool} | Command {cmd} | Result {status} | Duration {time}
|
||||||
|
|
||||||
## Phase 5: Log {path} | Summary {summary_path}
|
## Phase 5: Log {path} | Summary {summary_path}
|
||||||
|
[Medium/High] Verification Checklist:
|
||||||
|
- Unit Tests: {task.verification.unit_tests.join(', ')}
|
||||||
|
- Success Metrics: {task.verification.success_metrics.join(', ')}
|
||||||
|
|
||||||
## Next Steps: {actions}
|
## Next Steps: {actions}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -77,6 +77,8 @@ Phase 4: planObject Generation
|
|||||||
|
|
||||||
## CLI Command Template
|
## CLI Command Template
|
||||||
|
|
||||||
|
### Base Template (All Complexity Levels)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ccw cli -p "
|
ccw cli -p "
|
||||||
PURPOSE: Generate plan for {task_description}
|
PURPOSE: Generate plan for {task_description}
|
||||||
@@ -84,12 +86,18 @@ TASK:
|
|||||||
• Analyze task/bug description and context
|
• Analyze task/bug description and context
|
||||||
• Break down into tasks following schema structure
|
• Break down into tasks following schema structure
|
||||||
• Identify dependencies and execution phases
|
• Identify dependencies and execution phases
|
||||||
|
• Generate complexity-appropriate fields (rationale, verification, risks, code_skeleton, data_flow)
|
||||||
MODE: analysis
|
MODE: analysis
|
||||||
CONTEXT: @**/* | Memory: {context_summary}
|
CONTEXT: @**/* | Memory: {context_summary}
|
||||||
EXPECTED:
|
EXPECTED:
|
||||||
## Summary
|
## Summary
|
||||||
[overview]
|
[overview]
|
||||||
|
|
||||||
|
## Approach
|
||||||
|
[high-level strategy]
|
||||||
|
|
||||||
|
## Complexity: {Low|Medium|High}
|
||||||
|
|
||||||
## Task Breakdown
|
## Task Breakdown
|
||||||
### T1: [Title] (or FIX1 for fix-plan)
|
### T1: [Title] (or FIX1 for fix-plan)
|
||||||
**Scope**: [module/feature path]
|
**Scope**: [module/feature path]
|
||||||
@@ -97,17 +105,54 @@ EXPECTED:
|
|||||||
**Description**: [what]
|
**Description**: [what]
|
||||||
**Modification Points**: - [file]: [target] - [change]
|
**Modification Points**: - [file]: [target] - [change]
|
||||||
**Implementation**: 1. [step]
|
**Implementation**: 1. [step]
|
||||||
**Acceptance/Verification**: - [quantified criterion]
|
**Reference**: - Pattern: [pattern] - Files: [files] - Examples: [guidance]
|
||||||
|
**Acceptance**: - [quantified criterion]
|
||||||
**Depends On**: []
|
**Depends On**: []
|
||||||
|
|
||||||
|
[MEDIUM/HIGH COMPLEXITY ONLY]
|
||||||
|
**Rationale**:
|
||||||
|
- Chosen Approach: [why this approach]
|
||||||
|
- Alternatives Considered: [other options]
|
||||||
|
- Decision Factors: [key factors]
|
||||||
|
- Tradeoffs: [known tradeoffs]
|
||||||
|
|
||||||
|
**Verification**:
|
||||||
|
- Unit Tests: [test names]
|
||||||
|
- Integration Tests: [test names]
|
||||||
|
- Manual Checks: [specific steps]
|
||||||
|
- Success Metrics: [quantified metrics]
|
||||||
|
|
||||||
|
[HIGH COMPLEXITY ONLY]
|
||||||
|
**Risks**:
|
||||||
|
- Risk: [description] | Probability: [L/M/H] | Impact: [L/M/H] | Mitigation: [strategy] | Fallback: [alternative]
|
||||||
|
|
||||||
|
**Code Skeleton**:
|
||||||
|
- Interfaces: [name]: [definition] - [purpose]
|
||||||
|
- Functions: [signature] - [purpose] - returns [type]
|
||||||
|
- Classes: [name] - [purpose] - methods: [list]
|
||||||
|
|
||||||
|
## Data Flow (HIGH COMPLEXITY ONLY)
|
||||||
|
**Diagram**: [A → B → C]
|
||||||
|
**Stages**:
|
||||||
|
- Stage [name]: Input=[type] → Output=[type] | Component=[module] | Transforms=[list]
|
||||||
|
**Dependencies**: [external deps]
|
||||||
|
|
||||||
|
## Design Decisions (MEDIUM/HIGH)
|
||||||
|
- Decision: [what] | Rationale: [why] | Tradeoff: [what was traded]
|
||||||
|
|
||||||
## Flow Control
|
## Flow Control
|
||||||
**Execution Order**: - Phase parallel-1: [T1, T2] (independent)
|
**Execution Order**: - Phase parallel-1: [T1, T2] (independent)
|
||||||
|
**Exit Conditions**: - Success: [condition] - Failure: [condition]
|
||||||
|
|
||||||
## Time Estimate
|
## Time Estimate
|
||||||
**Total**: [time]
|
**Total**: [time]
|
||||||
|
|
||||||
CONSTRAINTS:
|
CONSTRAINTS:
|
||||||
- Follow schema structure from {schema_path}
|
- Follow schema structure from {schema_path}
|
||||||
|
- Complexity determines required fields:
|
||||||
|
* Low: base fields only
|
||||||
|
* Medium: + rationale + verification + design_decisions
|
||||||
|
* High: + risks + code_skeleton + data_flow
|
||||||
- Acceptance/verification must be quantified
|
- Acceptance/verification must be quantified
|
||||||
- Dependencies use task IDs
|
- Dependencies use task IDs
|
||||||
- analysis=READ-ONLY
|
- analysis=READ-ONLY
|
||||||
@@ -127,43 +172,80 @@ function extractSection(cliOutput, header) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse structured tasks from CLI output
|
// Parse structured tasks from CLI output
|
||||||
function extractStructuredTasks(cliOutput) {
|
function extractStructuredTasks(cliOutput, complexity) {
|
||||||
const tasks = []
|
const tasks = []
|
||||||
const taskPattern = /### (T\d+): (.+?)\n\*\*File\*\*: (.+?)\n\*\*Action\*\*: (.+?)\n\*\*Description\*\*: (.+?)\n\*\*Modification Points\*\*:\n((?:- .+?\n)*)\*\*Implementation\*\*:\n((?:\d+\. .+?\n)+)\*\*Reference\*\*:\n((?:- .+?\n)+)\*\*Acceptance\*\*:\n((?:- .+?\n)+)\*\*Depends On\*\*: (.+)/g
|
// Split by task headers
|
||||||
|
const taskBlocks = cliOutput.split(/### (T\d+):/).slice(1)
|
||||||
|
|
||||||
|
for (let i = 0; i < taskBlocks.length; i += 2) {
|
||||||
|
const taskId = taskBlocks[i].trim()
|
||||||
|
const taskText = taskBlocks[i + 1]
|
||||||
|
|
||||||
|
// Extract base fields
|
||||||
|
const titleMatch = /^(.+?)(?=\n)/.exec(taskText)
|
||||||
|
const scopeMatch = /\*\*Scope\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||||
|
const actionMatch = /\*\*Action\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||||
|
const descMatch = /\*\*Description\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||||
|
const depsMatch = /\*\*Depends On\*\*: (.+?)(?=\n|$)/.exec(taskText)
|
||||||
|
|
||||||
let match
|
|
||||||
while ((match = taskPattern.exec(cliOutput)) !== null) {
|
|
||||||
// Parse modification points
|
// Parse modification points
|
||||||
const modPoints = match[6].trim().split('\n').filter(s => s.startsWith('-')).map(s => {
|
const modPointsSection = /\*\*Modification Points\*\*:\n((?:- .+?\n)*)/.exec(taskText)
|
||||||
const m = /- \[(.+?)\]: \[(.+?)\] - (.+)/.exec(s)
|
const modPoints = []
|
||||||
return m ? { file: m[1], target: m[2], change: m[3] } : null
|
if (modPointsSection) {
|
||||||
}).filter(Boolean)
|
const lines = modPointsSection[1].split('\n').filter(s => s.trim().startsWith('-'))
|
||||||
|
lines.forEach(line => {
|
||||||
// Parse reference
|
const m = /- \[(.+?)\]: \[(.+?)\] - (.+)/.exec(line)
|
||||||
const refText = match[8].trim()
|
if (m) modPoints.push({ file: m[1].trim(), target: m[2].trim(), change: m[3].trim() })
|
||||||
const reference = {
|
})
|
||||||
pattern: (/- Pattern: (.+)/m.exec(refText) || [])[1]?.trim() || "No pattern",
|
|
||||||
files: ((/- Files: (.+)/m.exec(refText) || [])[1] || "").split(',').map(f => f.trim()).filter(Boolean),
|
|
||||||
examples: (/- Examples: (.+)/m.exec(refText) || [])[1]?.trim() || "Follow general pattern"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse depends_on
|
// Parse implementation
|
||||||
const depsText = match[10].trim()
|
const implSection = /\*\*Implementation\*\*:\n((?:\d+\. .+?\n)+)/.exec(taskText)
|
||||||
const depends_on = depsText === '[]' ? [] : depsText.replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean)
|
const implementation = implSection
|
||||||
|
? implSection[1].split('\n').map(s => s.replace(/^\d+\. /, '').trim()).filter(Boolean)
|
||||||
|
: []
|
||||||
|
|
||||||
tasks.push({
|
// Parse reference
|
||||||
id: match[1].trim(),
|
const refSection = /\*\*Reference\*\*:\n((?:- .+?\n)+)/.exec(taskText)
|
||||||
title: match[2].trim(),
|
const reference = refSection ? {
|
||||||
file: match[3].trim(),
|
pattern: (/- Pattern: (.+)/m.exec(refSection[1]) || [])[1]?.trim() || "No pattern",
|
||||||
action: match[4].trim(),
|
files: ((/- Files: (.+)/m.exec(refSection[1]) || [])[1] || "").split(',').map(f => f.trim()).filter(Boolean),
|
||||||
description: match[5].trim(),
|
examples: (/- Examples: (.+)/m.exec(refSection[1]) || [])[1]?.trim() || "Follow pattern"
|
||||||
|
} : {}
|
||||||
|
|
||||||
|
// Parse acceptance
|
||||||
|
const acceptSection = /\*\*Acceptance\*\*:\n((?:- .+?\n)+)/.exec(taskText)
|
||||||
|
const acceptance = acceptSection
|
||||||
|
? acceptSection[1].split('\n').map(s => s.replace(/^- /, '').trim()).filter(Boolean)
|
||||||
|
: []
|
||||||
|
|
||||||
|
const task = {
|
||||||
|
id: taskId,
|
||||||
|
title: titleMatch?.[1].trim() || "Untitled",
|
||||||
|
scope: scopeMatch?.[1].trim() || "",
|
||||||
|
action: actionMatch?.[1].trim() || "Implement",
|
||||||
|
description: descMatch?.[1].trim() || "",
|
||||||
modification_points: modPoints,
|
modification_points: modPoints,
|
||||||
implementation: match[7].trim().split('\n').map(s => s.replace(/^\d+\. /, '')).filter(Boolean),
|
implementation,
|
||||||
reference,
|
reference,
|
||||||
acceptance: match[9].trim().split('\n').map(s => s.replace(/^- /, '')).filter(Boolean),
|
acceptance,
|
||||||
depends_on
|
depends_on: depsMatch?.[1] === '[]' ? [] : (depsMatch?.[1] || "").replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean)
|
||||||
})
|
}
|
||||||
|
|
||||||
|
// Add complexity-specific fields
|
||||||
|
if (complexity === "Medium" || complexity === "High") {
|
||||||
|
task.rationale = extractRationale(taskText)
|
||||||
|
task.verification = extractVerification(taskText)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (complexity === "High") {
|
||||||
|
task.risks = extractRisks(taskText)
|
||||||
|
task.code_skeleton = extractCodeSkeleton(taskText)
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks.push(task)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tasks
|
return tasks
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,14 +268,155 @@ function extractFlowControl(cliOutput) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse rationale section for a task
|
||||||
|
function extractRationale(taskText) {
|
||||||
|
const rationaleMatch = /\*\*Rationale\*\*:\n- Chosen Approach: (.+?)\n- Alternatives Considered: (.+?)\n- Decision Factors: (.+?)\n- Tradeoffs: (.+)/s.exec(taskText)
|
||||||
|
if (!rationaleMatch) return null
|
||||||
|
|
||||||
|
return {
|
||||||
|
chosen_approach: rationaleMatch[1].trim(),
|
||||||
|
alternatives_considered: rationaleMatch[2].split(',').map(s => s.trim()).filter(Boolean),
|
||||||
|
decision_factors: rationaleMatch[3].split(',').map(s => s.trim()).filter(Boolean),
|
||||||
|
tradeoffs: rationaleMatch[4].trim()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse verification section for a task
|
||||||
|
function extractVerification(taskText) {
|
||||||
|
const verificationMatch = /\*\*Verification\*\*:\n- Unit Tests: (.+?)\n- Integration Tests: (.+?)\n- Manual Checks: (.+?)\n- Success Metrics: (.+)/s.exec(taskText)
|
||||||
|
if (!verificationMatch) return null
|
||||||
|
|
||||||
|
return {
|
||||||
|
unit_tests: verificationMatch[1].split(',').map(s => s.trim()).filter(Boolean),
|
||||||
|
integration_tests: verificationMatch[2].split(',').map(s => s.trim()).filter(Boolean),
|
||||||
|
manual_checks: verificationMatch[3].split(',').map(s => s.trim()).filter(Boolean),
|
||||||
|
success_metrics: verificationMatch[4].split(',').map(s => s.trim()).filter(Boolean)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse risks section for a task
|
||||||
|
function extractRisks(taskText) {
|
||||||
|
const risksPattern = /- Risk: (.+?) \| Probability: ([LMH]) \| Impact: ([LMH]) \| Mitigation: (.+?)(?: \| Fallback: (.+?))?(?=\n|$)/g
|
||||||
|
const risks = []
|
||||||
|
let match
|
||||||
|
|
||||||
|
while ((match = risksPattern.exec(taskText)) !== null) {
|
||||||
|
risks.push({
|
||||||
|
description: match[1].trim(),
|
||||||
|
probability: match[2] === 'L' ? 'Low' : match[2] === 'M' ? 'Medium' : 'High',
|
||||||
|
impact: match[3] === 'L' ? 'Low' : match[3] === 'M' ? 'Medium' : 'High',
|
||||||
|
mitigation: match[4].trim(),
|
||||||
|
fallback: match[5]?.trim() || undefined
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return risks.length > 0 ? risks : null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse code skeleton section for a task
|
||||||
|
function extractCodeSkeleton(taskText) {
|
||||||
|
const skeletonSection = /\*\*Code Skeleton\*\*:\n([\s\S]*?)(?=\n\*\*|$)/.exec(taskText)
|
||||||
|
if (!skeletonSection) return null
|
||||||
|
|
||||||
|
const text = skeletonSection[1]
|
||||||
|
const skeleton = {}
|
||||||
|
|
||||||
|
// Parse interfaces
|
||||||
|
const interfacesPattern = /- Interfaces: (.+?): (.+?) - (.+?)(?=\n|$)/g
|
||||||
|
const interfaces = []
|
||||||
|
let match
|
||||||
|
while ((match = interfacesPattern.exec(text)) !== null) {
|
||||||
|
interfaces.push({ name: match[1].trim(), definition: match[2].trim(), purpose: match[3].trim() })
|
||||||
|
}
|
||||||
|
if (interfaces.length > 0) skeleton.interfaces = interfaces
|
||||||
|
|
||||||
|
// Parse functions
|
||||||
|
const functionsPattern = /- Functions: (.+?) - (.+?) - returns (.+?)(?=\n|$)/g
|
||||||
|
const functions = []
|
||||||
|
while ((match = functionsPattern.exec(text)) !== null) {
|
||||||
|
functions.push({ signature: match[1].trim(), purpose: match[2].trim(), returns: match[3].trim() })
|
||||||
|
}
|
||||||
|
if (functions.length > 0) skeleton.key_functions = functions
|
||||||
|
|
||||||
|
// Parse classes
|
||||||
|
const classesPattern = /- Classes: (.+?) - (.+?) - methods: (.+?)(?=\n|$)/g
|
||||||
|
const classes = []
|
||||||
|
while ((match = classesPattern.exec(text)) !== null) {
|
||||||
|
classes.push({
|
||||||
|
name: match[1].trim(),
|
||||||
|
purpose: match[2].trim(),
|
||||||
|
methods: match[3].split(',').map(s => s.trim()).filter(Boolean)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if (classes.length > 0) skeleton.classes = classes
|
||||||
|
|
||||||
|
return Object.keys(skeleton).length > 0 ? skeleton : null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse data flow section
|
||||||
|
function extractDataFlow(cliOutput) {
|
||||||
|
const dataFlowSection = /## Data Flow.*?\n([\s\S]*?)(?=\n## |$)/.exec(cliOutput)
|
||||||
|
if (!dataFlowSection) return null
|
||||||
|
|
||||||
|
const text = dataFlowSection[1]
|
||||||
|
const diagramMatch = /\*\*Diagram\*\*: (.+?)(?=\n|$)/.exec(text)
|
||||||
|
const depsMatch = /\*\*Dependencies\*\*: (.+?)(?=\n|$)/.exec(text)
|
||||||
|
|
||||||
|
// Parse stages
|
||||||
|
const stagesPattern = /- Stage (.+?): Input=(.+?) → Output=(.+?) \| Component=(.+?)(?: \| Transforms=(.+?))?(?=\n|$)/g
|
||||||
|
const stages = []
|
||||||
|
let match
|
||||||
|
while ((match = stagesPattern.exec(text)) !== null) {
|
||||||
|
stages.push({
|
||||||
|
stage: match[1].trim(),
|
||||||
|
input: match[2].trim(),
|
||||||
|
output: match[3].trim(),
|
||||||
|
component: match[4].trim(),
|
||||||
|
transformations: match[5] ? match[5].split(',').map(s => s.trim()).filter(Boolean) : undefined
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
diagram: diagramMatch?.[1].trim() || null,
|
||||||
|
stages: stages.length > 0 ? stages : undefined,
|
||||||
|
dependencies: depsMatch ? depsMatch[1].split(',').map(s => s.trim()).filter(Boolean) : undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse design decisions section
|
||||||
|
function extractDesignDecisions(cliOutput) {
|
||||||
|
const decisionsSection = /## Design Decisions.*?\n([\s\S]*?)(?=\n## |$)/.exec(cliOutput)
|
||||||
|
if (!decisionsSection) return null
|
||||||
|
|
||||||
|
const decisionsPattern = /- Decision: (.+?) \| Rationale: (.+?)(?: \| Tradeoff: (.+?))?(?=\n|$)/g
|
||||||
|
const decisions = []
|
||||||
|
let match
|
||||||
|
|
||||||
|
while ((match = decisionsPattern.exec(decisionsSection[1])) !== null) {
|
||||||
|
decisions.push({
|
||||||
|
decision: match[1].trim(),
|
||||||
|
rationale: match[2].trim(),
|
||||||
|
tradeoff: match[3]?.trim() || undefined
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return decisions.length > 0 ? decisions : null
|
||||||
|
}
|
||||||
|
|
||||||
// Parse all sections
|
// Parse all sections
|
||||||
function parseCLIOutput(cliOutput) {
|
function parseCLIOutput(cliOutput) {
|
||||||
|
const complexity = (extractSection(cliOutput, "Complexity") || "Medium").trim()
|
||||||
return {
|
return {
|
||||||
summary: extractSection(cliOutput, "Implementation Summary"),
|
summary: extractSection(cliOutput, "Summary") || extractSection(cliOutput, "Implementation Summary"),
|
||||||
approach: extractSection(cliOutput, "High-Level Approach"),
|
approach: extractSection(cliOutput, "Approach") || extractSection(cliOutput, "High-Level Approach"),
|
||||||
raw_tasks: extractStructuredTasks(cliOutput),
|
complexity,
|
||||||
|
raw_tasks: extractStructuredTasks(cliOutput, complexity),
|
||||||
flow_control: extractFlowControl(cliOutput),
|
flow_control: extractFlowControl(cliOutput),
|
||||||
time_estimate: extractSection(cliOutput, "Time Estimate")
|
time_estimate: extractSection(cliOutput, "Time Estimate"),
|
||||||
|
// High complexity only
|
||||||
|
data_flow: complexity === "High" ? extractDataFlow(cliOutput) : null,
|
||||||
|
// Medium/High complexity
|
||||||
|
design_decisions: (complexity === "Medium" || complexity === "High") ? extractDesignDecisions(cliOutput) : null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -326,7 +549,8 @@ function inferFlowControl(tasks) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
||||||
const tasks = validateAndEnhanceTasks(parsed.raw_tasks, enrichedContext)
|
const complexity = parsed.complexity || input.complexity || "Medium"
|
||||||
|
const tasks = validateAndEnhanceTasks(parsed.raw_tasks, enrichedContext, complexity)
|
||||||
assignCliExecutionIds(tasks, input.session.id) // MANDATORY: Assign CLI execution IDs
|
assignCliExecutionIds(tasks, input.session.id) // MANDATORY: Assign CLI execution IDs
|
||||||
const flow_control = parsed.flow_control?.execution_order?.length > 0 ? parsed.flow_control : inferFlowControl(tasks)
|
const flow_control = parsed.flow_control?.execution_order?.length > 0 ? parsed.flow_control : inferFlowControl(tasks)
|
||||||
const focus_paths = [...new Set(tasks.flatMap(t => [t.file || t.scope, ...t.modification_points.map(m => m.file)]).filter(Boolean))]
|
const focus_paths = [...new Set(tasks.flatMap(t => [t.file || t.scope, ...t.modification_points.map(m => m.file)]).filter(Boolean))]
|
||||||
@@ -338,7 +562,7 @@ function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
|||||||
flow_control,
|
flow_control,
|
||||||
focus_paths,
|
focus_paths,
|
||||||
estimated_time: parsed.time_estimate || `${tasks.length * 30} minutes`,
|
estimated_time: parsed.time_estimate || `${tasks.length * 30} minutes`,
|
||||||
recommended_execution: (input.complexity === "Low" || input.severity === "Low") ? "Agent" : "Codex",
|
recommended_execution: (complexity === "Low" || input.severity === "Low") ? "Agent" : "Codex",
|
||||||
_metadata: {
|
_metadata: {
|
||||||
timestamp: new Date().toISOString(),
|
timestamp: new Date().toISOString(),
|
||||||
source: "cli-lite-planning-agent",
|
source: "cli-lite-planning-agent",
|
||||||
@@ -348,6 +572,15 @@ function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add complexity-specific top-level fields
|
||||||
|
if (complexity === "Medium" || complexity === "High") {
|
||||||
|
base.design_decisions = parsed.design_decisions || []
|
||||||
|
}
|
||||||
|
|
||||||
|
if (complexity === "High") {
|
||||||
|
base.data_flow = parsed.data_flow || null
|
||||||
|
}
|
||||||
|
|
||||||
// Schema-specific fields
|
// Schema-specific fields
|
||||||
if (schemaType === 'fix-plan') {
|
if (schemaType === 'fix-plan') {
|
||||||
return {
|
return {
|
||||||
@@ -361,10 +594,63 @@ function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
|||||||
return {
|
return {
|
||||||
...base,
|
...base,
|
||||||
approach: parsed.approach || "Step-by-step implementation",
|
approach: parsed.approach || "Step-by-step implementation",
|
||||||
complexity: input.complexity || "Medium"
|
complexity
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Enhanced task validation with complexity-specific fields
|
||||||
|
function validateAndEnhanceTasks(rawTasks, enrichedContext, complexity) {
|
||||||
|
return rawTasks.map((task, idx) => {
|
||||||
|
const enhanced = {
|
||||||
|
id: task.id || `T${idx + 1}`,
|
||||||
|
title: task.title || "Unnamed task",
|
||||||
|
scope: task.scope || task.file || inferFile(task, enrichedContext),
|
||||||
|
action: task.action || inferAction(task.title),
|
||||||
|
description: task.description || task.title,
|
||||||
|
modification_points: task.modification_points?.length > 0
|
||||||
|
? task.modification_points
|
||||||
|
: [{ file: task.scope || task.file, target: "main", change: task.description }],
|
||||||
|
implementation: task.implementation?.length >= 2
|
||||||
|
? task.implementation
|
||||||
|
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||||
|
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
||||||
|
acceptance: task.acceptance?.length >= 1
|
||||||
|
? task.acceptance
|
||||||
|
: [`${task.title} completed`, `Follows conventions`],
|
||||||
|
depends_on: task.depends_on || []
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Medium/High complexity fields
|
||||||
|
if (complexity === "Medium" || complexity === "High") {
|
||||||
|
enhanced.rationale = task.rationale || {
|
||||||
|
chosen_approach: "Standard implementation approach",
|
||||||
|
alternatives_considered: [],
|
||||||
|
decision_factors: ["Maintainability", "Performance"],
|
||||||
|
tradeoffs: "None significant"
|
||||||
|
}
|
||||||
|
enhanced.verification = task.verification || {
|
||||||
|
unit_tests: [`test_${task.id.toLowerCase()}_basic`],
|
||||||
|
integration_tests: [],
|
||||||
|
manual_checks: ["Verify expected behavior"],
|
||||||
|
success_metrics: ["All tests pass"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add High complexity fields
|
||||||
|
if (complexity === "High") {
|
||||||
|
enhanced.risks = task.risks || [{
|
||||||
|
description: "Implementation complexity",
|
||||||
|
probability: "Low",
|
||||||
|
impact: "Medium",
|
||||||
|
mitigation: "Incremental development with checkpoints"
|
||||||
|
}]
|
||||||
|
enhanced.code_skeleton = task.code_skeleton || null
|
||||||
|
}
|
||||||
|
|
||||||
|
return enhanced
|
||||||
|
})
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Error Handling
|
### Error Handling
|
||||||
|
|||||||
@@ -327,7 +327,7 @@ for (const call of sequential) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
function buildExecutionPrompt(batch) {
|
function buildExecutionPrompt(batch) {
|
||||||
// Task template (4 parts: Modification Points → How → Reference → Done)
|
// Task template (6 parts: Modification Points → Why → How → Reference → Risks → Done)
|
||||||
const formatTask = (t) => `
|
const formatTask = (t) => `
|
||||||
## ${t.title}
|
## ${t.title}
|
||||||
|
|
||||||
@@ -336,18 +336,38 @@ function buildExecutionPrompt(batch) {
|
|||||||
### Modification Points
|
### Modification Points
|
||||||
${t.modification_points.map(p => `- **${p.file}** → \`${p.target}\`: ${p.change}`).join('\n')}
|
${t.modification_points.map(p => `- **${p.file}** → \`${p.target}\`: ${p.change}`).join('\n')}
|
||||||
|
|
||||||
|
${t.rationale ? `
|
||||||
|
### Why this approach (Medium/High)
|
||||||
|
${t.rationale.chosen_approach}
|
||||||
|
${t.rationale.decision_factors?.length > 0 ? `\nKey factors: ${t.rationale.decision_factors.join(', ')}` : ''}
|
||||||
|
${t.rationale.tradeoffs ? `\nTradeoffs: ${t.rationale.tradeoffs}` : ''}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
### How to do it
|
### How to do it
|
||||||
${t.description}
|
${t.description}
|
||||||
|
|
||||||
${t.implementation.map(step => `- ${step}`).join('\n')}
|
${t.implementation.map(step => `- ${step}`).join('\n')}
|
||||||
|
|
||||||
|
${t.code_skeleton ? `
|
||||||
|
### Code skeleton (High)
|
||||||
|
${t.code_skeleton.interfaces?.length > 0 ? `**Interfaces**: ${t.code_skeleton.interfaces.map(i => `\`${i.name}\` - ${i.purpose}`).join(', ')}` : ''}
|
||||||
|
${t.code_skeleton.key_functions?.length > 0 ? `\n**Functions**: ${t.code_skeleton.key_functions.map(f => `\`${f.signature}\` - ${f.purpose}`).join(', ')}` : ''}
|
||||||
|
${t.code_skeleton.classes?.length > 0 ? `\n**Classes**: ${t.code_skeleton.classes.map(c => `\`${c.name}\` - ${c.purpose}`).join(', ')}` : ''}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
### Reference
|
### Reference
|
||||||
- Pattern: ${t.reference?.pattern || 'N/A'}
|
- Pattern: ${t.reference?.pattern || 'N/A'}
|
||||||
- Files: ${t.reference?.files?.join(', ') || 'N/A'}
|
- Files: ${t.reference?.files?.join(', ') || 'N/A'}
|
||||||
${t.reference?.examples ? `- Notes: ${t.reference.examples}` : ''}
|
${t.reference?.examples ? `- Notes: ${t.reference.examples}` : ''}
|
||||||
|
|
||||||
|
${t.risks?.length > 0 ? `
|
||||||
|
### Risk mitigations (High)
|
||||||
|
${t.risks.map(r => `- ${r.description} → **${r.mitigation}**`).join('\n')}
|
||||||
|
` : ''}
|
||||||
|
|
||||||
### Done when
|
### Done when
|
||||||
${t.acceptance.map(c => `- [ ] ${c}`).join('\n')}`
|
${t.acceptance.map(c => `- [ ] ${c}`).join('\n')}
|
||||||
|
${t.verification?.success_metrics?.length > 0 ? `\n**Success metrics**: ${t.verification.success_metrics.join(', ')}` : ''}`
|
||||||
|
|
||||||
// Build prompt
|
// Build prompt
|
||||||
const sections = []
|
const sections = []
|
||||||
@@ -364,6 +384,9 @@ ${t.acceptance.map(c => `- [ ] ${c}`).join('\n')}`
|
|||||||
if (clarificationContext) {
|
if (clarificationContext) {
|
||||||
context.push(`### Clarifications\n${Object.entries(clarificationContext).map(([q, a]) => `- ${q}: ${a}`).join('\n')}`)
|
context.push(`### Clarifications\n${Object.entries(clarificationContext).map(([q, a]) => `- ${q}: ${a}`).join('\n')}`)
|
||||||
}
|
}
|
||||||
|
if (executionContext?.planObject?.data_flow?.diagram) {
|
||||||
|
context.push(`### Data Flow\n${executionContext.planObject.data_flow.diagram}`)
|
||||||
|
}
|
||||||
if (executionContext?.session?.artifacts?.plan) {
|
if (executionContext?.session?.artifacts?.plan) {
|
||||||
context.push(`### Artifacts\nPlan: ${executionContext.session.artifacts.plan}`)
|
context.push(`### Artifacts\nPlan: ${executionContext.session.artifacts.plan}`)
|
||||||
}
|
}
|
||||||
@@ -462,11 +485,13 @@ Progress tracked at batch level (not individual task level). Icons: ⚡ (paralle
|
|||||||
|
|
||||||
**Skip Condition**: Only run if `codeReviewTool ≠ "Skip"`
|
**Skip Condition**: Only run if `codeReviewTool ≠ "Skip"`
|
||||||
|
|
||||||
**Review Focus**: Verify implementation against plan acceptance criteria
|
**Review Focus**: Verify implementation against plan acceptance criteria and verification requirements
|
||||||
- Read plan.json for task acceptance criteria
|
- Read plan.json for task acceptance criteria and verification checklist
|
||||||
- Check each acceptance criterion is fulfilled
|
- Check each acceptance criterion is fulfilled
|
||||||
|
- Verify success metrics from verification field (Medium/High complexity)
|
||||||
|
- Run unit/integration tests specified in verification field
|
||||||
- Validate code quality and identify issues
|
- Validate code quality and identify issues
|
||||||
- Ensure alignment with planned approach
|
- Ensure alignment with planned approach and risk mitigations
|
||||||
|
|
||||||
**Operations**:
|
**Operations**:
|
||||||
- Agent Review: Current agent performs direct review
|
- Agent Review: Current agent performs direct review
|
||||||
@@ -478,17 +503,23 @@ Progress tracked at batch level (not individual task level). Icons: ⚡ (paralle
|
|||||||
|
|
||||||
**Review Criteria**:
|
**Review Criteria**:
|
||||||
- **Acceptance Criteria**: Verify each criterion from plan.tasks[].acceptance
|
- **Acceptance Criteria**: Verify each criterion from plan.tasks[].acceptance
|
||||||
|
- **Verification Checklist** (Medium/High): Check unit_tests, integration_tests, success_metrics from plan.tasks[].verification
|
||||||
- **Code Quality**: Analyze quality, identify issues, suggest improvements
|
- **Code Quality**: Analyze quality, identify issues, suggest improvements
|
||||||
- **Plan Alignment**: Validate implementation matches planned approach
|
- **Plan Alignment**: Validate implementation matches planned approach and risk mitigations
|
||||||
|
|
||||||
**Shared Prompt Template** (used by all CLI tools):
|
**Shared Prompt Template** (used by all CLI tools):
|
||||||
```
|
```
|
||||||
PURPOSE: Code review for implemented changes against plan acceptance criteria
|
PURPOSE: Code review for implemented changes against plan acceptance criteria and verification requirements
|
||||||
TASK: • Verify plan acceptance criteria fulfillment • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence
|
TASK: • Verify plan acceptance criteria fulfillment • Check verification requirements (unit tests, success metrics) • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence and risk mitigations
|
||||||
MODE: analysis
|
MODE: analysis
|
||||||
CONTEXT: @**/* @{plan.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements
|
CONTEXT: @**/* @{plan.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including verification checklist
|
||||||
EXPECTED: Quality assessment with acceptance criteria verification, issue identification, and recommendations. Explicitly check each acceptance criterion from plan.json tasks.
|
EXPECTED: Quality assessment with:
|
||||||
CONSTRAINTS: Focus on plan acceptance criteria and plan adherence | analysis=READ-ONLY
|
- Acceptance criteria verification (all tasks)
|
||||||
|
- Verification checklist validation (Medium/High: unit_tests, integration_tests, success_metrics)
|
||||||
|
- Issue identification
|
||||||
|
- Recommendations
|
||||||
|
Explicitly check each acceptance criterion and verification item from plan.json tasks.
|
||||||
|
CONSTRAINTS: Focus on plan acceptance criteria, verification requirements, and plan adherence | analysis=READ-ONLY
|
||||||
```
|
```
|
||||||
|
|
||||||
**Tool-Specific Execution** (Apply shared prompt template above):
|
**Tool-Specific Execution** (Apply shared prompt template above):
|
||||||
|
|||||||
@@ -143,11 +143,211 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"description": "CLI execution strategy based on task dependencies"
|
"description": "CLI execution strategy based on task dependencies"
|
||||||
|
},
|
||||||
|
"rationale": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"chosen_approach": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The selected implementation approach and why it was chosen"
|
||||||
|
},
|
||||||
|
"alternatives_considered": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Alternative approaches that were considered but not chosen"
|
||||||
|
},
|
||||||
|
"decision_factors": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Key factors that influenced the decision (performance, maintainability, cost, etc.)"
|
||||||
|
},
|
||||||
|
"tradeoffs": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Known tradeoffs of the chosen approach"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Design rationale explaining WHY this approach was chosen (required for Medium/High complexity)"
|
||||||
|
},
|
||||||
|
"verification": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"unit_tests": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of unit test names/descriptions to create"
|
||||||
|
},
|
||||||
|
"integration_tests": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of integration test names/descriptions to create"
|
||||||
|
},
|
||||||
|
"manual_checks": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Manual verification steps with specific actions"
|
||||||
|
},
|
||||||
|
"success_metrics": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Quantified metrics for success (e.g., 'Response time <200ms', 'Coverage >80%')"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Detailed verification steps beyond acceptance criteria (required for Medium/High complexity)"
|
||||||
|
},
|
||||||
|
"risks": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["description", "probability", "impact", "mitigation"],
|
||||||
|
"properties": {
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Description of the risk"
|
||||||
|
},
|
||||||
|
"probability": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["Low", "Medium", "High"],
|
||||||
|
"description": "Likelihood of the risk occurring"
|
||||||
|
},
|
||||||
|
"impact": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["Low", "Medium", "High"],
|
||||||
|
"description": "Impact severity if the risk occurs"
|
||||||
|
},
|
||||||
|
"mitigation": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Strategy to mitigate or prevent the risk"
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Alternative approach if mitigation fails"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Risk assessment and mitigation strategies (required for High complexity)"
|
||||||
|
},
|
||||||
|
"code_skeleton": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"interfaces": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string"},
|
||||||
|
"definition": {"type": "string"},
|
||||||
|
"purpose": {"type": "string"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Key interface/type definitions"
|
||||||
|
},
|
||||||
|
"key_functions": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"signature": {"type": "string"},
|
||||||
|
"purpose": {"type": "string"},
|
||||||
|
"returns": {"type": "string"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Critical function signatures"
|
||||||
|
},
|
||||||
|
"classes": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string"},
|
||||||
|
"purpose": {"type": "string"},
|
||||||
|
"methods": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Key class structures"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Code skeleton with interface/function signatures (required for High complexity)"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"description": "Structured task breakdown (1-10 tasks)"
|
"description": "Structured task breakdown (1-10 tasks)"
|
||||||
},
|
},
|
||||||
|
"data_flow": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"diagram": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "ASCII/text representation of data flow (e.g., 'A → B → C')"
|
||||||
|
},
|
||||||
|
"stages": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["stage", "input", "output", "component"],
|
||||||
|
"properties": {
|
||||||
|
"stage": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Stage name (e.g., 'Extraction', 'Processing', 'Storage')"
|
||||||
|
},
|
||||||
|
"input": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Input data format/type"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Output data format/type"
|
||||||
|
},
|
||||||
|
"component": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Component/module handling this stage"
|
||||||
|
},
|
||||||
|
"transformations": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Data transformations applied in this stage"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Detailed data flow stages"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "External dependencies or data sources"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Global data flow design showing how data moves through the system (required for High complexity)"
|
||||||
|
},
|
||||||
|
"design_decisions": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["decision", "rationale"],
|
||||||
|
"properties": {
|
||||||
|
"decision": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The design decision made"
|
||||||
|
},
|
||||||
|
"rationale": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Why this decision was made"
|
||||||
|
},
|
||||||
|
"tradeoff": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "What was traded off for this decision"
|
||||||
|
},
|
||||||
|
"alternatives": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Alternatives that were considered"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Global design decisions that affect the entire plan"
|
||||||
|
},
|
||||||
"flow_control": {
|
"flow_control": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|||||||
122
PACKAGE_NAME_FIX_SUMMARY.md
Normal file
122
PACKAGE_NAME_FIX_SUMMARY.md
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
# Package Name Fix Summary
|
||||||
|
|
||||||
|
## 问题描述
|
||||||
|
|
||||||
|
用户在使用 `ccw view` 界面安装 CodexLens 时遇到错误:
|
||||||
|
|
||||||
|
```
|
||||||
|
Error: Failed to install codexlens: Using Python 3.12.3 environment at: .codexlens/venv
|
||||||
|
× No solution found when resolving dependencies:
|
||||||
|
╰─▶ Because there are no versions of codexlens[semantic] and you require codexlens[semantic], we can conclude that your requirements are unsatisfiable.
|
||||||
|
```
|
||||||
|
|
||||||
|
## 根本原因
|
||||||
|
|
||||||
|
1. **包名不一致**:pyproject.toml 中定义的包名是 `codex-lens`(带连字符),但代码中尝试安装 `codexlens`(没有连字符)
|
||||||
|
2. **包未发布到 PyPI**:`codex-lens` 是本地开发包,没有发布到 PyPI,只能通过本地路径安装
|
||||||
|
3. **本地路径查找逻辑问题**:`findLocalPackagePath()` 函数在非开发环境(从 node_modules 运行)时会提前返回 null,导致找不到本地路径
|
||||||
|
|
||||||
|
## 修复内容
|
||||||
|
|
||||||
|
### 1. 核心文件修复 (ccw/src/tools/codex-lens.ts)
|
||||||
|
|
||||||
|
#### 1.1 修改 `findLocalPackagePath()` 函数
|
||||||
|
- **移除** `isDevEnvironment()` 早期返回逻辑
|
||||||
|
- **添加** 更多本地路径搜索位置(包括父目录)
|
||||||
|
- **总是** 尝试查找本地路径,即使从 node_modules 运行
|
||||||
|
|
||||||
|
#### 1.2 修改 `bootstrapWithUv()` 函数
|
||||||
|
- **移除** PyPI 安装的 fallback 逻辑
|
||||||
|
- **改为** 找不到本地路径时直接返回错误,提供清晰的修复指导
|
||||||
|
|
||||||
|
#### 1.3 修改 `installSemanticWithUv()` 函数
|
||||||
|
- **移除** PyPI 安装的 fallback 逻辑
|
||||||
|
- **改为** 找不到本地路径时直接返回错误
|
||||||
|
|
||||||
|
#### 1.4 修改 `bootstrapVenv()` 函数(pip fallback)
|
||||||
|
- **移除** PyPI 安装的 fallback 逻辑
|
||||||
|
- **改为** 找不到本地路径时抛出错误
|
||||||
|
|
||||||
|
#### 1.5 修复包名引用
|
||||||
|
- 将所有 `codexlens` 更改为 `codex-lens`(3 处)
|
||||||
|
|
||||||
|
### 2. 文档和脚本修复
|
||||||
|
|
||||||
|
修复以下文件中的包名引用(`codexlens` → `codex-lens`):
|
||||||
|
|
||||||
|
- ✅ `ccw/scripts/memory_embedder.py`
|
||||||
|
- ✅ `ccw/scripts/README-memory-embedder.md`
|
||||||
|
- ✅ `ccw/scripts/QUICK-REFERENCE.md`
|
||||||
|
- ✅ `ccw/scripts/IMPLEMENTATION-SUMMARY.md`
|
||||||
|
|
||||||
|
## 修复后的行为
|
||||||
|
|
||||||
|
### 安装流程
|
||||||
|
|
||||||
|
1. **查找本地路径**:
|
||||||
|
- 检查 `process.cwd()/codex-lens`
|
||||||
|
- 检查 `__dirname/../../../codex-lens`(项目根目录)
|
||||||
|
- 检查 `homedir()/codex-lens`
|
||||||
|
- 检查 `parent(cwd)/codex-lens`(新增)
|
||||||
|
|
||||||
|
2. **本地安装**(找到路径):
|
||||||
|
```bash
|
||||||
|
uv pip install -e /path/to/codex-lens[semantic]
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **失败并提示**(找不到路径):
|
||||||
|
```
|
||||||
|
Cannot find codex-lens directory for local installation.
|
||||||
|
|
||||||
|
codex-lens is a local development package (not published to PyPI) and must be installed from local files.
|
||||||
|
|
||||||
|
To fix this:
|
||||||
|
1. Ensure the 'codex-lens' directory exists in your project root
|
||||||
|
2. Verify pyproject.toml exists in codex-lens directory
|
||||||
|
3. Run ccw from the correct working directory
|
||||||
|
4. Or manually install: cd codex-lens && pip install -e .[semantic]
|
||||||
|
```
|
||||||
|
|
||||||
|
## 验证步骤
|
||||||
|
|
||||||
|
1. 确认 `codex-lens` 目录存在于项目根目录
|
||||||
|
2. 确认 `codex-lens/pyproject.toml` 存在
|
||||||
|
3. 从项目根目录运行 ccw
|
||||||
|
4. 尝试安装 CodexLens semantic 依赖
|
||||||
|
|
||||||
|
## 正确的手动安装方式
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 从项目根目录
|
||||||
|
cd D:\Claude_dms3\codex-lens
|
||||||
|
pip install -e .[semantic]
|
||||||
|
|
||||||
|
# 或者使用绝对路径
|
||||||
|
pip install -e D:\Claude_dms3\codex-lens[semantic]
|
||||||
|
|
||||||
|
# GPU 加速(CUDA)
|
||||||
|
pip install -e .[semantic-gpu]
|
||||||
|
|
||||||
|
# GPU 加速(DirectML,Windows)
|
||||||
|
pip install -e .[semantic-directml]
|
||||||
|
```
|
||||||
|
|
||||||
|
## 注意事项
|
||||||
|
|
||||||
|
- **不要** 使用 `pip install codex-lens[semantic]`(会失败,包未发布到 PyPI)
|
||||||
|
- **必须** 使用 `-e` 参数进行 editable 安装
|
||||||
|
- **必须** 从正确的工作目录运行(包含 codex-lens 目录的目录)
|
||||||
|
|
||||||
|
## 影响范围
|
||||||
|
|
||||||
|
- ✅ ccw view 界面安装
|
||||||
|
- ✅ 命令行 UV 安装
|
||||||
|
- ✅ 命令行 pip fallback 安装
|
||||||
|
- ✅ 文档和脚本中的安装说明
|
||||||
|
|
||||||
|
## 测试建议
|
||||||
|
|
||||||
|
1. 从全局安装的 ccw 运行(npm install -g)
|
||||||
|
2. 从本地开发目录运行(npm link)
|
||||||
|
3. 从不同的工作目录运行
|
||||||
|
4. 测试所有三种 GPU 模式(cpu, cuda, directml)
|
||||||
@@ -124,11 +124,11 @@ Generated automatically for each match:
|
|||||||
|
|
||||||
### Required
|
### Required
|
||||||
- `numpy`: Array operations and cosine similarity
|
- `numpy`: Array operations and cosine similarity
|
||||||
- `codexlens[semantic]`: Embedding generation
|
- `codex-lens[semantic]`: Embedding generation
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
```bash
|
```bash
|
||||||
pip install numpy codexlens[semantic]
|
pip install numpy codex-lens[semantic]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Testing
|
## Testing
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install numpy codexlens[semantic]
|
pip install numpy codex-lens[semantic]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Commands
|
## Commands
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ Bridge CCW to CodexLens semantic search by generating and searching embeddings f
|
|||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install numpy codexlens[semantic]
|
pip install numpy codex-lens[semantic]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ try:
|
|||||||
from codexlens.semantic.factory import clear_embedder_cache
|
from codexlens.semantic.factory import clear_embedder_cache
|
||||||
from codexlens.config import Config as CodexLensConfig
|
from codexlens.config import Config as CodexLensConfig
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("Error: CodexLens not found. Install with: pip install codexlens[semantic]", file=sys.stderr)
|
print("Error: CodexLens not found. Install with: pip install codex-lens[semantic]", file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -98,9 +98,8 @@ function broadcastStreamEvent(eventType: string, payload: Record<string, unknown
|
|||||||
req.on('socket', (socket) => {
|
req.on('socket', (socket) => {
|
||||||
socket.unref();
|
socket.unref();
|
||||||
});
|
});
|
||||||
req.on('error', (err) => {
|
req.on('error', () => {
|
||||||
// Log errors for debugging - helps diagnose hook communication issues
|
// Silently ignore - dashboard may not be running
|
||||||
console.error(`[Hook] Failed to send ${eventType}:`, (err as Error).message);
|
|
||||||
});
|
});
|
||||||
req.on('timeout', () => {
|
req.on('timeout', () => {
|
||||||
req.destroy();
|
req.destroy();
|
||||||
|
|||||||
@@ -60,11 +60,9 @@ function isDevEnvironment(): boolean {
|
|||||||
* breaking any editable (-e) pip installs that reference them.
|
* breaking any editable (-e) pip installs that reference them.
|
||||||
*/
|
*/
|
||||||
function findLocalPackagePath(packageName: string): string | null {
|
function findLocalPackagePath(packageName: string): string | null {
|
||||||
// If running from node_modules, skip local paths entirely - use PyPI
|
// Always try to find local paths first, even when running from node_modules.
|
||||||
if (!isDevEnvironment()) {
|
// codex-lens is a local development package not published to PyPI,
|
||||||
console.log(`[CodexLens] Running from node_modules - will use PyPI for ${packageName}`);
|
// so we must find it locally regardless of execution context.
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const possiblePaths = [
|
const possiblePaths = [
|
||||||
join(process.cwd(), packageName),
|
join(process.cwd(), packageName),
|
||||||
@@ -72,16 +70,28 @@ function findLocalPackagePath(packageName: string): string | null {
|
|||||||
join(homedir(), packageName),
|
join(homedir(), packageName),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
// Also check common workspace locations
|
||||||
|
const cwd = process.cwd();
|
||||||
|
const cwdParent = dirname(cwd);
|
||||||
|
if (cwdParent !== cwd) {
|
||||||
|
possiblePaths.push(join(cwdParent, packageName));
|
||||||
|
}
|
||||||
|
|
||||||
for (const localPath of possiblePaths) {
|
for (const localPath of possiblePaths) {
|
||||||
// Skip paths inside node_modules
|
// Skip paths inside node_modules
|
||||||
if (isInsideNodeModules(localPath)) {
|
if (isInsideNodeModules(localPath)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (existsSync(join(localPath, 'pyproject.toml'))) {
|
if (existsSync(join(localPath, 'pyproject.toml'))) {
|
||||||
|
console.log(`[CodexLens] Found local ${packageName} at: ${localPath}`);
|
||||||
return localPath;
|
return localPath;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!isDevEnvironment()) {
|
||||||
|
console.log(`[CodexLens] Running from node_modules - will try PyPI for ${packageName}`);
|
||||||
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -662,21 +672,24 @@ async function bootstrapWithUv(gpuMode: GpuMode = 'cpu'): Promise<BootstrapResul
|
|||||||
// Determine extras based on GPU mode
|
// Determine extras based on GPU mode
|
||||||
const extras = GPU_MODE_EXTRAS[gpuMode];
|
const extras = GPU_MODE_EXTRAS[gpuMode];
|
||||||
|
|
||||||
if (codexLensPath) {
|
if (!codexLensPath) {
|
||||||
console.log(`[CodexLens] Installing from local path with UV: ${codexLensPath}`);
|
// codex-lens is a local-only package, not published to PyPI
|
||||||
console.log(`[CodexLens] Extras: ${extras.join(', ')}`);
|
const errorMsg = `Cannot find codex-lens directory for local installation.\n\n` +
|
||||||
const installResult = await uv.installFromProject(codexLensPath, extras);
|
`codex-lens is a local development package (not published to PyPI) and must be installed from local files.\n\n` +
|
||||||
if (!installResult.success) {
|
`To fix this:\n` +
|
||||||
return { success: false, error: `Failed to install codexlens: ${installResult.error}` };
|
`1. Ensure the 'codex-lens' directory exists in your project root\n` +
|
||||||
}
|
` Expected location: D:\\Claude_dms3\\codex-lens\n` +
|
||||||
} else {
|
`2. Verify pyproject.toml exists: D:\\Claude_dms3\\codex-lens\\pyproject.toml\n` +
|
||||||
// Install from PyPI with extras
|
`3. Run ccw from the correct working directory (e.g., D:\\Claude_dms3)\n` +
|
||||||
console.log('[CodexLens] Installing from PyPI with UV...');
|
`4. Or manually install: cd D:\\Claude_dms3\\codex-lens && pip install -e .[${extras.join(',')}]`;
|
||||||
const packageSpec = `codexlens[${extras.join(',')}]`;
|
return { success: false, error: errorMsg };
|
||||||
const installResult = await uv.install([packageSpec]);
|
}
|
||||||
if (!installResult.success) {
|
|
||||||
return { success: false, error: `Failed to install codexlens: ${installResult.error}` };
|
console.log(`[CodexLens] Installing from local path with UV: ${codexLensPath}`);
|
||||||
}
|
console.log(`[CodexLens] Extras: ${extras.join(', ')}`);
|
||||||
|
const installResult = await uv.installFromProject(codexLensPath, extras);
|
||||||
|
if (!installResult.success) {
|
||||||
|
return { success: false, error: `Failed to install codex-lens: ${installResult.error}` };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear cache after successful installation
|
// Clear cache after successful installation
|
||||||
@@ -733,20 +746,22 @@ async function installSemanticWithUv(gpuMode: GpuMode = 'cpu'): Promise<Bootstra
|
|||||||
console.log(`[CodexLens] Extras: ${extras.join(', ')}`);
|
console.log(`[CodexLens] Extras: ${extras.join(', ')}`);
|
||||||
|
|
||||||
// Install with extras - UV handles dependency conflicts automatically
|
// Install with extras - UV handles dependency conflicts automatically
|
||||||
if (codexLensPath) {
|
if (!codexLensPath) {
|
||||||
console.log(`[CodexLens] Reinstalling from local path with semantic extras...`);
|
// codex-lens is a local-only package, not published to PyPI
|
||||||
const installResult = await uv.installFromProject(codexLensPath, extras);
|
const errorMsg = `Cannot find codex-lens directory for local installation.\n\n` +
|
||||||
if (!installResult.success) {
|
`codex-lens is a local development package (not published to PyPI) and must be installed from local files.\n\n` +
|
||||||
return { success: false, error: `Installation failed: ${installResult.error}` };
|
`To fix this:\n` +
|
||||||
}
|
`1. Ensure the 'codex-lens' directory exists in your project root\n` +
|
||||||
} else {
|
`2. Verify pyproject.toml exists in codex-lens directory\n` +
|
||||||
// Install from PyPI
|
`3. Run ccw from the correct working directory\n` +
|
||||||
const packageSpec = `codexlens[${extras.join(',')}]`;
|
`4. Or manually install: cd codex-lens && pip install -e .[${extras.join(',')}]`;
|
||||||
console.log(`[CodexLens] Installing ${packageSpec} from PyPI...`);
|
return { success: false, error: errorMsg };
|
||||||
const installResult = await uv.install([packageSpec]);
|
}
|
||||||
if (!installResult.success) {
|
|
||||||
return { success: false, error: `Installation failed: ${installResult.error}` };
|
console.log(`[CodexLens] Reinstalling from local path with semantic extras...`);
|
||||||
}
|
const installResult = await uv.installFromProject(codexLensPath, extras);
|
||||||
|
if (!installResult.success) {
|
||||||
|
return { success: false, error: `Installation failed: ${installResult.error}` };
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(`[CodexLens] Semantic dependencies installed successfully (${gpuMode} mode)`);
|
console.log(`[CodexLens] Semantic dependencies installed successfully (${gpuMode} mode)`);
|
||||||
@@ -933,31 +948,43 @@ async function bootstrapVenv(): Promise<BootstrapResult> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install codexlens with semantic extras
|
// Install codex-lens
|
||||||
try {
|
try {
|
||||||
console.log('[CodexLens] Installing codexlens package...');
|
console.log('[CodexLens] Installing codex-lens package...');
|
||||||
const pipPath =
|
const pipPath =
|
||||||
process.platform === 'win32'
|
process.platform === 'win32'
|
||||||
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
|
? join(CODEXLENS_VENV, 'Scripts', 'pip.exe')
|
||||||
: join(CODEXLENS_VENV, 'bin', 'pip');
|
: join(CODEXLENS_VENV, 'bin', 'pip');
|
||||||
|
|
||||||
// Try local path if in development (not from node_modules), then fall back to PyPI
|
// Try local path - codex-lens is local-only, not published to PyPI
|
||||||
const codexLensPath = findLocalCodexLensPath();
|
const codexLensPath = findLocalCodexLensPath();
|
||||||
|
|
||||||
if (codexLensPath) {
|
if (!codexLensPath) {
|
||||||
console.log(`[CodexLens] Installing from local path: ${codexLensPath}`);
|
// codex-lens is a local-only package, not published to PyPI
|
||||||
execSync(`"${pipPath}" install -e "${codexLensPath}"`, { stdio: 'inherit', timeout: EXEC_TIMEOUTS.PACKAGE_INSTALL });
|
const errorMsg = `Cannot find codex-lens directory for local installation.\n\n` +
|
||||||
} else {
|
`codex-lens is a local development package (not published to PyPI) and must be installed from local files.\n\n` +
|
||||||
console.log('[CodexLens] Installing from PyPI...');
|
`To fix this:\n` +
|
||||||
execSync(`"${pipPath}" install codexlens`, { stdio: 'inherit', timeout: EXEC_TIMEOUTS.PACKAGE_INSTALL });
|
`1. Ensure the 'codex-lens' directory exists in your project root\n` +
|
||||||
|
`2. Verify pyproject.toml exists in codex-lens directory\n` +
|
||||||
|
`3. Run ccw from the correct working directory\n` +
|
||||||
|
`4. Or manually install: cd codex-lens && pip install -e .`;
|
||||||
|
throw new Error(errorMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
console.log(`[CodexLens] Installing from local path: ${codexLensPath}`);
|
||||||
|
execSync(`"${pipPath}" install -e "${codexLensPath}"`, { stdio: 'inherit', timeout: EXEC_TIMEOUTS.PACKAGE_INSTALL });
|
||||||
|
|
||||||
// Clear cache after successful installation
|
// Clear cache after successful installation
|
||||||
clearVenvStatusCache();
|
clearVenvStatusCache();
|
||||||
clearSemanticStatusCache();
|
clearSemanticStatusCache();
|
||||||
return { success: true };
|
return { success: true };
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
return { success: false, error: `Failed to install codexlens: ${(err as Error).message}` };
|
const errorMsg = `Failed to install codex-lens: ${(err as Error).message}\n\n` +
|
||||||
|
`codex-lens is a local development package. To fix this:\n` +
|
||||||
|
`1. Ensure the 'codex-lens' directory exists in your project root\n` +
|
||||||
|
`2. Run the installation from the correct working directory\n` +
|
||||||
|
`3. Or manually install: cd codex-lens && pip install -e .`;
|
||||||
|
return { success: false, error: errorMsg };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user