feat: Enhance issue and solution management with new UI components and functionality

- Added internationalization support for new issue and solution-related strings in i18n.js.
- Implemented a solution detail modal in issue-manager.js to display solution information and bind/unbind actions.
- Enhanced the skill loading function to combine project and user skills in hook-manager.js.
- Improved queue rendering logic to handle empty states and display queue statistics in issue-manager.js.
- Introduced command modals for queue operations, allowing users to generate execution queues via CLI commands.
- Added functionality to auto-generate issue IDs and regenerate them in the create issue modal.
- Implemented detailed rendering of solution tasks, including acceptance criteria and modification points.
This commit is contained in:
catlog22
2025-12-27 11:27:45 +08:00
parent 8f310339df
commit 4da06864f8
11 changed files with 2490 additions and 169 deletions

View File

@@ -199,7 +199,7 @@ async function ripgrepFallback(issue, projectRoot) {
## Phase 3: Solution Planning
### Task Decomposition
### Task Decomposition (Closed-Loop)
```javascript
function decomposeTasks(issue, exploration) {
@@ -217,15 +217,104 @@ function decomposeTasks(issue, exploration) {
action: inferAction(group),
description: group.description,
modification_points: group.points,
// Phase 1: Implementation
implementation: generateImplementationSteps(group, exploration),
// Phase 2: Test
test: generateTestRequirements(group, exploration, issue.lifecycle_requirements),
// Phase 3: Regression
regression: generateRegressionChecks(group, issue.lifecycle_requirements),
// Phase 4: Acceptance
acceptance: generateAcceptanceCriteria(group),
// Phase 5: Commit
commit: generateCommitSpec(group, issue),
depends_on: inferDependencies(group, tasks),
estimated_minutes: estimateTime(group)
estimated_minutes: estimateTime(group),
executor: inferExecutor(group)
})
}
return tasks
}
function generateTestRequirements(group, exploration, lifecycle) {
const test = {
unit: [],
integration: [],
commands: [],
coverage_target: 80
}
// Generate unit test requirements based on action
if (group.action === 'Create' || group.action === 'Implement') {
test.unit.push(`Test ${group.title} happy path`)
test.unit.push(`Test ${group.title} error cases`)
}
// Generate test commands based on project patterns
if (exploration.test_patterns?.includes('jest')) {
test.commands.push(`npm test -- --grep '${group.scope}'`)
} else if (exploration.test_patterns?.includes('vitest')) {
test.commands.push(`npx vitest run ${group.scope}`)
} else {
test.commands.push(`npm test`)
}
// Add integration tests if needed
if (lifecycle?.test_strategy === 'integration' || lifecycle?.test_strategy === 'e2e') {
test.integration.push(`Integration test for ${group.title}`)
}
return test
}
function generateRegressionChecks(group, lifecycle) {
const regression = []
switch (lifecycle?.regression_scope) {
case 'full':
regression.push('npm test')
regression.push('npm run test:integration')
break
case 'related':
regression.push(`npm test -- --grep '${group.scope}'`)
regression.push(`npm test -- --changed`)
break
case 'affected':
default:
regression.push(`npm test -- --findRelatedTests ${group.points[0]?.file}`)
break
}
return regression
}
function generateCommitSpec(group, issue) {
const typeMap = {
'Create': 'feat',
'Implement': 'feat',
'Update': 'feat',
'Fix': 'fix',
'Refactor': 'refactor',
'Test': 'test',
'Configure': 'chore',
'Delete': 'chore'
}
const scope = group.scope.split('/').pop()?.replace(/\..*$/, '') || 'core'
return {
type: typeMap[group.action] || 'feat',
scope: scope,
message_template: `${typeMap[group.action] || 'feat'}(${scope}): ${group.title.toLowerCase()}\n\n${group.description || ''}`,
breaking: false
}
}
```
### Action Type Inference
@@ -347,11 +436,15 @@ function generateImplementationSteps(group, exploration) {
}
```
### Acceptance Criteria Generation
### Acceptance Criteria Generation (Closed-Loop)
```javascript
function generateAcceptanceCriteria(task) {
const criteria = []
const acceptance = {
criteria: [],
verification: [],
manual_checks: []
}
// Action-specific criteria
const actionCriteria = {
@@ -363,14 +456,41 @@ function generateAcceptanceCriteria(task) {
'Configure': [`Configuration applied correctly`]
}
criteria.push(...(actionCriteria[task.action] || []))
acceptance.criteria.push(...(actionCriteria[task.action] || []))
// Add quantified criteria
if (task.modification_points.length > 0) {
criteria.push(`${task.modification_points.length} file(s) modified correctly`)
acceptance.criteria.push(`${task.modification_points.length} file(s) modified correctly`)
}
return criteria.slice(0, 4) // Max 4 criteria
// Generate verification steps for each criterion
for (const criterion of acceptance.criteria) {
acceptance.verification.push(generateVerificationStep(criterion, task))
}
// Limit to reasonable counts
acceptance.criteria = acceptance.criteria.slice(0, 4)
acceptance.verification = acceptance.verification.slice(0, 4)
return acceptance
}
function generateVerificationStep(criterion, task) {
// Generate executable verification for criterion
if (criterion.includes('file created')) {
return `ls -la ${task.modification_points[0]?.file} && head -20 ${task.modification_points[0]?.file}`
}
if (criterion.includes('test')) {
return `npm test -- --grep '${task.scope}'`
}
if (criterion.includes('export')) {
return `node -e "console.log(require('./${task.modification_points[0]?.file}'))"`
}
if (criterion.includes('API') || criterion.includes('endpoint')) {
return `curl -X GET http://localhost:3000/${task.scope} -v`
}
// Default: describe manual check
return `Manually verify: ${criterion}`
}
```
@@ -413,20 +533,61 @@ function validateSolution(solution) {
function validateTask(task) {
const errors = []
// Basic fields
if (!/^T\d+$/.test(task.id)) errors.push('Invalid task ID format')
if (!task.title?.trim()) errors.push('Missing title')
if (!task.scope?.trim()) errors.push('Missing scope')
if (!['Create', 'Update', 'Implement', 'Refactor', 'Configure', 'Test', 'Fix', 'Delete'].includes(task.action)) {
errors.push('Invalid action type')
}
// Phase 1: Implementation
if (!task.implementation || task.implementation.length < 2) {
errors.push('Need 2+ implementation steps')
}
if (!task.acceptance || task.acceptance.length < 1) {
errors.push('Need 1+ acceptance criteria')
// Phase 2: Test
if (!task.test) {
errors.push('Missing test phase')
} else {
if (!task.test.commands || task.test.commands.length < 1) {
errors.push('Need 1+ test commands')
}
}
if (task.acceptance?.some(a => /works correctly|good performance|properly/i.test(a))) {
errors.push('Vague acceptance criteria')
// Phase 3: Regression
if (!task.regression || task.regression.length < 1) {
errors.push('Need 1+ regression checks')
}
// Phase 4: Acceptance
if (!task.acceptance) {
errors.push('Missing acceptance phase')
} else {
if (!task.acceptance.criteria || task.acceptance.criteria.length < 1) {
errors.push('Need 1+ acceptance criteria')
}
if (!task.acceptance.verification || task.acceptance.verification.length < 1) {
errors.push('Need 1+ verification steps')
}
if (task.acceptance.criteria?.some(a => /works correctly|good performance|properly/i.test(a))) {
errors.push('Vague acceptance criteria')
}
}
// Phase 5: Commit
if (!task.commit) {
errors.push('Missing commit phase')
} else {
if (!['feat', 'fix', 'refactor', 'test', 'docs', 'chore'].includes(task.commit.type)) {
errors.push('Invalid commit type')
}
if (!task.commit.scope?.trim()) {
errors.push('Missing commit scope')
}
if (!task.commit.message_template?.trim()) {
errors.push('Missing commit message template')
}
}
return errors
@@ -500,7 +661,9 @@ function generateOutput(solutions, conflicts) {
}
```
### Solution Schema
### Solution Schema (Closed-Loop Tasks)
Each task MUST include ALL 5 lifecycle phases:
```json
{
@@ -517,10 +680,62 @@ function generateOutput(solutions, conflicts) {
"modification_points": [
{ "file": "src/middleware/auth.ts", "target": "new file", "change": "Create middleware" }
],
"implementation": ["Step 1", "Step 2", "..."],
"acceptance": ["Criterion 1", "Criterion 2"],
"implementation": [
"Create auth.ts file in src/middleware/",
"Implement JWT token extraction from Authorization header",
"Add token validation using jsonwebtoken library",
"Handle error cases (missing, invalid, expired tokens)",
"Export middleware function"
],
"test": {
"unit": [
"Test valid token passes through",
"Test invalid token returns 401",
"Test expired token returns 401",
"Test missing token returns 401"
],
"integration": [
"Protected route returns 401 without token",
"Protected route returns 200 with valid token"
],
"commands": [
"npm test -- --grep 'auth middleware'",
"npm run test:coverage -- src/middleware/auth.ts"
],
"coverage_target": 80
},
"regression": [
"npm test -- --grep 'existing routes'",
"npm run test:integration"
],
"acceptance": {
"criteria": [
"Middleware validates JWT tokens successfully",
"Returns 401 with appropriate error for invalid tokens",
"Passes decoded user payload to request context"
],
"verification": [
"curl -H 'Authorization: Bearer <valid>' /api/protected → 200",
"curl /api/protected → 401 {error: 'No token'}",
"curl -H 'Authorization: Bearer invalid' /api/protected → 401"
],
"manual_checks": []
},
"commit": {
"type": "feat",
"scope": "auth",
"message_template": "feat(auth): add JWT validation middleware\n\n- Implement token extraction and validation\n- Add error handling for invalid/expired tokens\n- Export middleware for route protection",
"breaking": false
},
"depends_on": [],
"estimated_minutes": 30
"estimated_minutes": 30,
"executor": "codex"
}
],
"exploration_context": {
@@ -622,6 +837,14 @@ Before outputting solution:
6. Include file:line references in modification_points where possible
7. Detect and report cross-issue file conflicts in batch mode
8. Include exploration_context with patterns and relevant_files
9. **Generate ALL 5 lifecycle phases for each task**:
- `implementation`: 2-7 concrete steps
- `test`: unit tests, commands, coverage target
- `regression`: regression check commands
- `acceptance`: criteria + verification steps
- `commit`: type, scope, message template
10. Infer test commands from project's test framework
11. Generate commit message following conventional commits
**NEVER**:
1. Execute implementation (return plan only)
@@ -632,3 +855,5 @@ Before outputting solution:
6. Assume file exists without verification
7. Generate more than 10 tasks per issue
8. Skip ACE search (unless fallback triggered)
9. **Omit any of the 5 lifecycle phases** (test, regression, acceptance, commit)
10. Skip verification steps in acceptance criteria

View File

@@ -148,15 +148,15 @@ TodoWrite({
});
```
### Phase 3: Codex Coordination (Single Task Mode)
### Phase 3: Codex Coordination (Single Task Mode - Full Lifecycle)
```javascript
// Execute tasks - single codex instance per task
// Execute tasks - single codex instance per task with full lifecycle
async function executeTask(queueItem) {
const codexPrompt = `
## Single Task Execution
## Single Task Execution - CLOSED-LOOP LIFECYCLE
You are executing ONE task from the issue queue. Follow these steps exactly:
You are executing ONE task from the issue queue. Each task has 5 phases that MUST ALL complete successfully.
### Step 1: Fetch Task
Run this command to get your task:
@@ -164,35 +164,71 @@ Run this command to get your task:
ccw issue next
\`\`\`
This returns JSON with:
- queue_id: Queue item ID
- task: Task definition with implementation steps
- context: Exploration context
- execution_hints: Executor and time estimate
This returns JSON with full lifecycle definition:
- task.implementation: Implementation steps
- task.test: Test requirements and commands
- task.regression: Regression check commands
- task.acceptance: Acceptance criteria and verification
- task.commit: Commit specification
### Step 2: Execute Task
Read the returned task object and:
### Step 2: Execute Full Lifecycle
**Phase 1: IMPLEMENT**
1. Follow task.implementation steps in order
2. Meet all task.acceptance criteria
3. Use provided context.relevant_files for reference
2. Modify files specified in modification_points
3. Use context.relevant_files for reference
4. Use context.patterns for code style
**Phase 2: TEST**
1. Run test commands from task.test.commands
2. Ensure all unit tests pass (task.test.unit)
3. Run integration tests if specified (task.test.integration)
4. Verify coverage meets task.test.coverage_target if specified
5. If tests fail → fix code and re-run, do NOT proceed until tests pass
**Phase 3: REGRESSION**
1. Run all commands in task.regression
2. Ensure no existing tests are broken
3. If regression fails → fix and re-run
**Phase 4: ACCEPTANCE**
1. Verify each criterion in task.acceptance.criteria
2. Execute verification steps in task.acceptance.verification
3. Complete any manual_checks if specified
4. All criteria MUST pass before proceeding
**Phase 5: COMMIT**
1. Stage all modified files
2. Use task.commit.message_template as commit message
3. Commit with: git commit -m "$(cat <<'EOF'\n<message>\nEOF\n)"
4. If commit_strategy is 'per-task', commit now
5. If commit_strategy is 'atomic' or 'squash', stage but don't commit
### Step 3: Report Completion
When done, run:
When ALL phases complete successfully:
\`\`\`bash
ccw issue complete <queue_id> --result '{"files_modified": ["path1", "path2"], "summary": "What was done"}'
ccw issue complete <queue_id> --result '{
"files_modified": ["path1", "path2"],
"tests_passed": true,
"regression_passed": true,
"acceptance_passed": true,
"committed": true,
"commit_hash": "<hash>",
"summary": "What was done"
}'
\`\`\`
If task fails, run:
If any phase fails and cannot be fixed:
\`\`\`bash
ccw issue fail <queue_id> --reason "Why it failed"
ccw issue fail <queue_id> --reason "Phase X failed: <details>"
\`\`\`
### Rules
- NEVER read task files directly - use ccw issue next
- Execute the FULL task before marking complete
- Do NOT loop - execute ONE task only
- Report accurate files_modified in result
- NEVER skip any lifecycle phase
- Tests MUST pass before proceeding to acceptance
- Regression MUST pass before commit
- ALL acceptance criteria MUST be verified
- Report accurate lifecycle status in result
### Start Now
Begin by running: ccw issue next

View File

@@ -15,7 +15,7 @@ Creates a new structured issue from either:
Outputs a well-formed issue entry to `.workflow/issues/issues.jsonl`.
## Issue Structure
## Issue Structure (Closed-Loop)
```typescript
interface Issue {
@@ -27,14 +27,22 @@ interface Issue {
source: 'github' | 'text'; // Input source type
source_url?: string; // GitHub URL if applicable
labels?: string[]; // Categorization labels
// Structured extraction
problem_statement: string; // What is the problem?
expected_behavior?: string; // What should happen?
actual_behavior?: string; // What actually happens?
affected_components?: string[];// Files/modules affected
reproduction_steps?: string[]; // Steps to reproduce
// Closed-loop requirements (guide plan generation)
lifecycle_requirements: {
test_strategy: 'unit' | 'integration' | 'e2e' | 'manual' | 'auto';
regression_scope: 'affected' | 'related' | 'full'; // Which tests to run
acceptance_type: 'automated' | 'manual' | 'both'; // How to verify
commit_strategy: 'per-task' | 'squash' | 'atomic'; // Commit granularity
};
// Metadata
bound_solution_id: null;
solution_count: 0;
@@ -43,6 +51,52 @@ interface Issue {
}
```
## Task Lifecycle (Each Task is Closed-Loop)
When `/issue:plan` generates tasks, each task MUST include:
```typescript
interface SolutionTask {
id: string;
title: string;
scope: string;
action: string;
// Phase 1: Implementation
implementation: string[]; // Step-by-step implementation
modification_points: { file: string; target: string; change: string }[];
// Phase 2: Testing
test: {
unit?: string[]; // Unit test requirements
integration?: string[]; // Integration test requirements
commands?: string[]; // Test commands to run
coverage_target?: number; // Minimum coverage %
};
// Phase 3: Regression
regression: string[]; // Regression check commands/points
// Phase 4: Acceptance
acceptance: {
criteria: string[]; // Testable acceptance criteria
verification: string[]; // How to verify each criterion
manual_checks?: string[]; // Manual verification if needed
};
// Phase 5: Commit
commit: {
type: 'feat' | 'fix' | 'refactor' | 'test' | 'docs' | 'chore';
scope: string; // e.g., "auth", "api"
message_template: string; // Commit message template
breaking?: boolean;
};
depends_on: string[];
executor: 'codex' | 'gemini' | 'agent' | 'auto';
}
```
## Usage
```bash
@@ -206,7 +260,58 @@ async function parseTextDescription(text) {
}
```
### Phase 4: User Confirmation
### Phase 4: Lifecycle Configuration
```javascript
// Ask for lifecycle requirements (or use smart defaults)
const lifecycleAnswer = AskUserQuestion({
questions: [
{
question: 'Test strategy for this issue?',
header: 'Test',
multiSelect: false,
options: [
{ label: 'auto', description: 'Auto-detect based on affected files (Recommended)' },
{ label: 'unit', description: 'Unit tests only' },
{ label: 'integration', description: 'Integration tests' },
{ label: 'e2e', description: 'End-to-end tests' },
{ label: 'manual', description: 'Manual testing only' }
]
},
{
question: 'Regression scope?',
header: 'Regression',
multiSelect: false,
options: [
{ label: 'affected', description: 'Only affected module tests (Recommended)' },
{ label: 'related', description: 'Affected + dependent modules' },
{ label: 'full', description: 'Full test suite' }
]
},
{
question: 'Commit strategy?',
header: 'Commit',
multiSelect: false,
options: [
{ label: 'per-task', description: 'One commit per task (Recommended)' },
{ label: 'atomic', description: 'Single commit for entire issue' },
{ label: 'squash', description: 'Squash at the end' }
]
}
]
});
const lifecycle = {
test_strategy: lifecycleAnswer.test || 'auto',
regression_scope: lifecycleAnswer.regression || 'affected',
acceptance_type: 'automated',
commit_strategy: lifecycleAnswer.commit || 'per-task'
};
issueData.lifecycle_requirements = lifecycle;
```
### Phase 5: User Confirmation
```javascript
// Show parsed data and ask for confirmation
@@ -224,6 +329,11 @@ ${issueData.expected_behavior ? `### Expected Behavior\n${issueData.expected_beh
${issueData.actual_behavior ? `### Actual Behavior\n${issueData.actual_behavior}\n` : ''}
${issueData.affected_components?.length ? `### Affected Components\n${issueData.affected_components.map(c => `- ${c}`).join('\n')}\n` : ''}
${issueData.reproduction_steps?.length ? `### Reproduction Steps\n${issueData.reproduction_steps.map((s, i) => `${i+1}. ${s}`).join('\n')}\n` : ''}
### Lifecycle Configuration
- **Test Strategy**: ${lifecycle.test_strategy}
- **Regression Scope**: ${lifecycle.regression_scope}
- **Commit Strategy**: ${lifecycle.commit_strategy}
`);
// Ask user to confirm or edit
@@ -264,7 +374,7 @@ if (answer.includes('Edit Title')) {
}
```
### Phase 5: Write to JSONL
### Phase 6: Write to JSONL
```javascript
// Construct final issue object
@@ -280,14 +390,22 @@ const newIssue = {
source: issueData.source,
source_url: issueData.source_url || null,
labels: [...(issueData.labels || []), ...labels],
// Structured fields
problem_statement: issueData.problem_statement,
expected_behavior: issueData.expected_behavior || null,
actual_behavior: issueData.actual_behavior || null,
affected_components: issueData.affected_components || [],
reproduction_steps: issueData.reproduction_steps || [],
// Closed-loop lifecycle requirements
lifecycle_requirements: issueData.lifecycle_requirements || {
test_strategy: 'auto',
regression_scope: 'affected',
acceptance_type: 'automated',
commit_strategy: 'per-task'
},
// Metadata
bound_solution_id: null,
solution_count: 0,

View File

@@ -133,28 +133,59 @@ TodoWrite({
for (const [batchIndex, batch] of batches.entries()) {
updateTodo(`Plan batch ${batchIndex + 1}`, 'in_progress');
// Build issue prompt for agent
// Build issue prompt for agent with lifecycle requirements
const issuePrompt = `
## Issues to Plan
## Issues to Plan (Closed-Loop Tasks Required)
${batch.map((issue, i) => `
### Issue ${i + 1}: ${issue.id}
**Title**: ${issue.title}
**Context**: ${issue.context || 'No context provided'}
**Affected Components**: ${issue.affected_components?.join(', ') || 'Not specified'}
**Lifecycle Requirements**:
- Test Strategy: ${issue.lifecycle_requirements?.test_strategy || 'auto'}
- Regression Scope: ${issue.lifecycle_requirements?.regression_scope || 'affected'}
- Commit Strategy: ${issue.lifecycle_requirements?.commit_strategy || 'per-task'}
`).join('\n')}
## Project Root
${process.cwd()}
## Requirements
## Requirements - CLOSED-LOOP TASKS
Each task MUST include ALL lifecycle phases:
### 1. Implementation
- implementation: string[] (2-7 concrete steps)
- modification_points: { file, target, change }[]
### 2. Test
- test.unit: string[] (unit test requirements)
- test.integration: string[] (integration test requirements if needed)
- test.commands: string[] (actual test commands to run)
- test.coverage_target: number (minimum coverage %)
### 3. Regression
- regression: string[] (commands to run for regression check)
- Based on issue's regression_scope setting
### 4. Acceptance
- acceptance.criteria: string[] (testable acceptance criteria)
- acceptance.verification: string[] (how to verify each criterion)
- acceptance.manual_checks: string[] (manual checks if needed)
### 5. Commit
- commit.type: feat|fix|refactor|test|docs|chore
- commit.scope: string (module name)
- commit.message_template: string (full commit message)
- commit.breaking: boolean
## Additional Requirements
1. Use ACE semantic search (mcp__ace-tool__search_context) for exploration
2. Generate complete solution with task breakdown
3. Each task must have:
- implementation steps (2-7 steps)
- acceptance criteria (1-4 testable criteria)
- modification_points (exact file locations)
- depends_on (task dependencies)
4. Detect file conflicts if multiple issues
2. Detect file conflicts if multiple issues
3. Generate executable test commands based on project's test framework
4. Infer commit scope from affected files
`;
// Launch issue-plan-agent (combines explore + plan)
@@ -281,7 +312,7 @@ ${issues.map(i => {
`);
```
## Solution Format
## Solution Format (Closed-Loop Tasks)
Each solution line in `solutions/{issue-id}.jsonl`:
@@ -299,18 +330,56 @@ Each solution line in `solutions/{issue-id}.jsonl`:
"modification_points": [
{ "file": "src/middleware/auth.ts", "target": "new file", "change": "Create middleware" }
],
"implementation": [
"Create auth.ts file",
"Implement JWT validation",
"Add error handling",
"Export middleware"
"Create auth.ts file in src/middleware/",
"Implement JWT token validation using jsonwebtoken",
"Add error handling for invalid/expired tokens",
"Export middleware function"
],
"acceptance": [
"Middleware validates JWT tokens",
"Returns 401 for invalid tokens"
"test": {
"unit": [
"Test valid token passes through",
"Test invalid token returns 401",
"Test expired token returns 401",
"Test missing token returns 401"
],
"commands": [
"npm test -- --grep 'auth middleware'",
"npm run test:coverage -- src/middleware/auth.ts"
],
"coverage_target": 80
},
"regression": [
"npm test -- --grep 'protected routes'",
"npm run test:integration -- auth"
],
"acceptance": {
"criteria": [
"Middleware validates JWT tokens successfully",
"Returns 401 for invalid or missing tokens",
"Passes decoded token to request context"
],
"verification": [
"curl -H 'Authorization: Bearer valid_token' /api/protected → 200",
"curl /api/protected → 401",
"curl -H 'Authorization: Bearer invalid' /api/protected → 401"
]
},
"commit": {
"type": "feat",
"scope": "auth",
"message_template": "feat(auth): add JWT validation middleware\n\n- Implement token validation\n- Add error handling for invalid tokens\n- Export for route protection",
"breaking": false
},
"depends_on": [],
"estimated_minutes": 30
"estimated_minutes": 30,
"executor": "codex"
}
],
"exploration_context": {

View File

@@ -20,30 +20,67 @@ Queue formation command using **issue-queue-agent** that analyzes all bound solu
- Parallel/Sequential group assignment
- Output global queue.json
## Storage Structure (Flat JSONL)
## Storage Structure (Queue History)
```
.workflow/issues/
├── issues.jsonl # All issues (one per line)
├── queue.json # Execution queue (output)
├── queues/ # Queue history directory
│ ├── index.json # Queue index (active + history)
│ ├── {queue-id}.json # Individual queue files
│ └── ...
└── solutions/
├── {issue-id}.jsonl # Solutions for issue
└── ...
```
### Queue Index Schema
```json
{
"active_queue_id": "QUE-20251227-143000",
"queues": [
{
"id": "QUE-20251227-143000",
"status": "active",
"issue_ids": ["GH-123", "GH-124"],
"total_tasks": 8,
"completed_tasks": 3,
"created_at": "2025-12-27T14:30:00Z"
},
{
"id": "QUE-20251226-100000",
"status": "completed",
"issue_ids": ["GH-120"],
"total_tasks": 5,
"completed_tasks": 5,
"created_at": "2025-12-26T10:00:00Z",
"completed_at": "2025-12-26T12:30:00Z"
}
]
}
```
## Usage
```bash
/issue:queue [FLAGS]
# Examples
/issue:queue # Form queue from all bound solutions
/issue:queue --rebuild # Rebuild queue (clear and regenerate)
/issue:queue --issue GH-123 # Add only specific issue to queue
/issue:queue # Form NEW queue from all bound solutions
/issue:queue --issue GH-123 # Form queue for specific issue only
/issue:queue --append GH-124 # Append to active queue
/issue:queue --list # List all queues (history)
/issue:queue --switch QUE-xxx # Switch active queue
/issue:queue --archive # Archive completed active queue
# Flags
--rebuild Clear existing queue and regenerate
--issue <id> Add only specific issue's tasks
--issue <id> Form queue for specific issue only
--append <id> Append issue to active queue (don't create new)
--list List all queues with status
--switch <queue-id> Switch active queue
--archive Archive current queue (mark completed)
--clear <queue-id> Delete a queue from history
```
## Execution Process
@@ -215,10 +252,15 @@ ${(queueOutput.execution_groups || []).map(g => {
## Queue Schema
Output `queue.json`:
Output `queues/{queue-id}.json`:
```json
{
"id": "QUE-20251227-143000",
"name": "Auth Feature Queue",
"status": "active",
"issue_ids": ["GH-123", "GH-124"],
"queue": [
{
"queue_id": "Q-001",
@@ -233,6 +275,7 @@ Output `queue.json`:
"queued_at": "2025-12-26T10:00:00Z"
}
],
"conflicts": [
{
"type": "file_conflict",
@@ -244,24 +287,32 @@ Output `queue.json`:
"resolved": true
}
],
"execution_groups": [
{ "id": "P1", "type": "parallel", "task_count": 3, "tasks": ["GH-123:T1", "GH-124:T1", "GH-125:T1"] },
{ "id": "S2", "type": "sequential", "task_count": 2, "tasks": ["GH-123:T2", "GH-124:T2"] }
],
"_metadata": {
"version": "2.0",
"storage": "jsonl",
"total_tasks": 5,
"total_conflicts": 1,
"resolved_conflicts": 1,
"parallel_groups": 1,
"sequential_groups": 1,
"timestamp": "2025-12-26T10:00:00Z",
"pending_count": 3,
"completed_count": 2,
"failed_count": 0,
"created_at": "2025-12-26T10:00:00Z",
"updated_at": "2025-12-26T11:00:00Z",
"source": "issue-queue-agent"
}
}
```
### Queue ID Format
```
QUE-YYYYMMDD-HHMMSS
例如: QUE-20251227-143052
```
## Semantic Priority Rules
| Factor | Priority Boost |