Compare commits

...

1 Commits

Author SHA1 Message Date
catlog22
1cd96b90e8 chore: bump version to 7.2.7
- Enhance smart-search with advanced MCP integration
- Add GEMINI_API_KEY configuration support in codexlens
- Update MCP server with new tool handlers
- Add tests for smart-search MCP usage
- Update documentation
2026-03-11 16:48:16 +08:00
16 changed files with 1215 additions and 111 deletions

View File

@@ -61,6 +61,7 @@ const mockConfig: CodexLensConfig = {
const mockEnv: Record<string, string> = {
CODEXLENS_EMBEDDING_BACKEND: 'local',
CODEXLENS_EMBEDDING_MODEL: 'fast',
CODEXLENS_AUTO_EMBED_MISSING: 'true',
CODEXLENS_USE_GPU: 'true',
CODEXLENS_RERANKER_ENABLED: 'true',
CODEXLENS_RERANKER_BACKEND: 'onnx',
@@ -141,6 +142,7 @@ describe('SettingsTab', () => {
expect(screen.getByText(/Concurrency/i)).toBeInTheDocument();
expect(screen.getByText(/Cascade/i)).toBeInTheDocument();
expect(screen.getByText(/Chunking/i)).toBeInTheDocument();
expect(screen.getByText(/Auto Build Missing Vectors/i)).toBeInTheDocument();
});
it('should initialize index dir from config', () => {

View File

@@ -56,6 +56,13 @@ export const envVarGroupsSchema: EnvVarGroupsSchema = {
},
],
},
CODEXLENS_AUTO_EMBED_MISSING: {
key: 'CODEXLENS_AUTO_EMBED_MISSING',
labelKey: 'codexlens.envField.autoEmbedMissing',
type: 'checkbox',
default: 'true',
settingsPath: 'embedding.auto_embed_missing',
},
CODEXLENS_USE_GPU: {
key: 'CODEXLENS_USE_GPU',
labelKey: 'codexlens.envField.useGpu',

View File

@@ -256,8 +256,22 @@ export function McpServerDialog({
// Parse JSON config and populate form
const parseJsonConfig = useCallback(() => {
try {
const config = JSON.parse(jsonInput);
let config = JSON.parse(jsonInput);
let extractedServerName = '';
// Auto-detect mcpServers wrapper format (Claude Code config format)
// Supports both: { "mcpServers": { "name": {...} } } and direct { "command": ... }
if (config.mcpServers && typeof config.mcpServers === 'object' && !Array.isArray(config.mcpServers)) {
const serverNames = Object.keys(config.mcpServers);
if (serverNames.length > 0) {
extractedServerName = serverNames[0];
const serverConfig = config.mcpServers[extractedServerName];
if (serverConfig && typeof serverConfig === 'object') {
config = serverConfig;
}
}
}
// Detect transport type based on config structure
if (config.url) {
// HTTP transport
@@ -278,6 +292,7 @@ export function McpServerDialog({
setFormData(prev => ({
...prev,
name: extractedServerName || prev.name,
url: config.url || '',
headers,
bearerTokenEnvVar: config.bearer_token_env_var || config.bearerTokenEnvVar || '',
@@ -291,6 +306,7 @@ export function McpServerDialog({
setFormData(prev => ({
...prev,
name: extractedServerName || prev.name,
command: config.command || '',
args,
env,

View File

@@ -298,6 +298,7 @@
"envField": {
"backend": "Backend",
"model": "Model",
"autoEmbedMissing": "Auto Build Missing Vectors",
"useGpu": "Use GPU",
"highAvailability": "High Availability",
"loadBalanceStrategy": "Load Balance Strategy",

View File

@@ -298,6 +298,7 @@
"envField": {
"backend": "后端",
"model": "模型",
"autoEmbedMissing": "缺失向量时自动构建",
"useGpu": "使用 GPU",
"highAvailability": "高可用",
"loadBalanceStrategy": "负载均衡策略",

View File

@@ -955,6 +955,9 @@ export async function handleCodexLensConfigRoutes(ctx: RouteContext): Promise<bo
if (settings.embedding?.use_gpu !== undefined) {
settingsDefaults['CODEXLENS_USE_GPU'] = String(settings.embedding.use_gpu);
}
if (settings.embedding?.auto_embed_missing !== undefined) {
settingsDefaults['CODEXLENS_AUTO_EMBED_MISSING'] = String(settings.embedding.auto_embed_missing);
}
if (settings.embedding?.strategy) {
settingsDefaults['CODEXLENS_EMBEDDING_STRATEGY'] = settings.embedding.strategy;
}
@@ -1219,6 +1222,7 @@ export async function handleCodexLensConfigRoutes(ctx: RouteContext): Promise<bo
'CODEXLENS_EMBEDDING_BACKEND': { path: ['embedding', 'backend'] },
'CODEXLENS_EMBEDDING_MODEL': { path: ['embedding', 'model'] },
'CODEXLENS_USE_GPU': { path: ['embedding', 'use_gpu'], transform: v => v === 'true' },
'CODEXLENS_AUTO_EMBED_MISSING': { path: ['embedding', 'auto_embed_missing'], transform: v => v === 'true' },
'CODEXLENS_EMBEDDING_STRATEGY': { path: ['embedding', 'strategy'] },
'CODEXLENS_EMBEDDING_COOLDOWN': { path: ['embedding', 'cooldown'], transform: v => parseFloat(v) },
'CODEXLENS_RERANKER_BACKEND': { path: ['reranker', 'backend'] },

View File

@@ -20,6 +20,7 @@ const SERVER_VERSION = '6.2.0';
// Environment variable names for documentation
const ENV_PROJECT_ROOT = 'CCW_PROJECT_ROOT';
const ENV_ALLOWED_DIRS = 'CCW_ALLOWED_DIRS';
const STDIO_DISCONNECT_ERROR_CODES = new Set(['EPIPE', 'ERR_STREAM_DESTROYED']);
// Default enabled tools (core set - file operations, core memory, and smart search)
const DEFAULT_TOOLS: string[] = ['write_file', 'edit_file', 'read_file', 'read_many_files', 'read_outline', 'core_memory', 'smart_search'];
@@ -67,6 +68,47 @@ function formatToolResult(result: unknown): string {
return String(result);
}
/**
* Detect broken stdio pipes so orphaned MCP processes can terminate cleanly.
*/
function isStdioDisconnectError(error: unknown): error is NodeJS.ErrnoException {
if (error && typeof error === 'object') {
const maybeErrnoError = error as NodeJS.ErrnoException;
if (typeof maybeErrnoError.code === 'string' && STDIO_DISCONNECT_ERROR_CODES.has(maybeErrnoError.code)) {
return true;
}
}
return error instanceof Error && /broken pipe/i.test(error.message);
}
/**
* Best-effort logging for teardown paths where stderr may already be gone.
*/
function safeStderrWrite(message: string): void {
try {
if (process.stderr.destroyed || !process.stderr.writable) {
return;
}
process.stderr.write(`${message}\n`);
} catch {
// Ignore logging failures while stdio is tearing down.
}
}
function safeLogError(prefix: string, error: unknown): void {
if (error instanceof Error) {
safeStderrWrite(`${prefix}: ${error.message}`);
if (error.stack) {
safeStderrWrite(error.stack);
}
return;
}
safeStderrWrite(`${prefix}: ${String(error)}`);
}
/**
* Create and configure the MCP server
*/
@@ -151,28 +193,77 @@ function createServer(): Server {
async function main(): Promise<void> {
const server = createServer();
const transport = new StdioServerTransport();
let shutdownPromise: Promise<void> | null = null;
const shutdown = (reason: string, exitCode = 0, error?: unknown): Promise<void> => {
if (shutdownPromise) {
return shutdownPromise;
}
if (error && !isStdioDisconnectError(error)) {
safeLogError(`[${SERVER_NAME}] ${reason}`, error);
}
shutdownPromise = (async () => {
try {
await server.close();
} catch (closeError) {
if (!isStdioDisconnectError(closeError)) {
safeLogError(`[${SERVER_NAME}] Failed to close server`, closeError);
}
}
process.exit(exitCode);
})();
return shutdownPromise;
};
const handleStreamClose = (streamName: string) => () => {
void shutdown(`${streamName} disconnected`);
};
const handleStreamError = (streamName: string) => (error: unknown) => {
const exitCode = isStdioDisconnectError(error) ? 0 : 1;
void shutdown(`${streamName} stream error`, exitCode, error);
};
// Connect server to transport
await server.connect(transport);
// Error handling - prevent process crashes from closing transport
process.stdin.once('end', handleStreamClose('stdin'));
process.stdin.once('close', handleStreamClose('stdin'));
process.stdin.once('error', handleStreamError('stdin'));
process.stdout.once('close', handleStreamClose('stdout'));
process.stdout.once('error', handleStreamError('stdout'));
process.stderr.once('close', handleStreamClose('stderr'));
process.stderr.once('error', handleStreamError('stderr'));
// Error handling - stdio disconnects should terminate, other errors stay logged.
process.on('uncaughtException', (error) => {
console.error(`[${SERVER_NAME}] Uncaught exception:`, error.message);
console.error(error.stack);
if (isStdioDisconnectError(error)) {
void shutdown('Uncaught stdio disconnect', 0, error);
return;
}
safeLogError(`[${SERVER_NAME}] Uncaught exception`, error);
});
process.on('unhandledRejection', (reason) => {
console.error(`[${SERVER_NAME}] Unhandled rejection:`, reason);
if (isStdioDisconnectError(reason)) {
void shutdown('Unhandled stdio disconnect', 0, reason);
return;
}
safeLogError(`[${SERVER_NAME}] Unhandled rejection`, reason);
});
process.on('SIGINT', async () => {
await server.close();
process.exit(0);
await shutdown('Received SIGINT');
});
process.on('SIGTERM', async () => {
await server.close();
process.exit(0);
await shutdown('Received SIGTERM');
});
// Log server start (to stderr to not interfere with stdio protocol)

File diff suppressed because it is too large Load Diff

View File

@@ -11,26 +11,46 @@ import { dirname, join } from 'node:path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const serverPath = join(__dirname, '../bin/ccw-mcp.js');
async function waitForServerStart(child) {
await new Promise((resolve, reject) => {
const timeout = setTimeout(() => {
reject(new Error('Server start timeout'));
}, 5000);
const onData = (data) => {
const message = data.toString();
if (message.includes('started')) {
clearTimeout(timeout);
child.stderr.off('data', onData);
child.off('exit', onExit);
resolve();
}
};
const onExit = (code, signal) => {
clearTimeout(timeout);
child.stderr.off('data', onData);
reject(new Error(`Server exited before start (code=${code}, signal=${signal})`));
};
child.stderr.on('data', onData);
child.once('exit', onExit);
});
}
describe('MCP Server', () => {
let serverProcess;
before(async () => {
// Start the MCP server
const serverPath = join(__dirname, '../bin/ccw-mcp.js');
serverProcess = spawn('node', [serverPath], {
stdio: ['pipe', 'pipe', 'pipe']
});
// Wait for server to start
await new Promise((resolve) => {
serverProcess.stderr.once('data', (data) => {
const message = data.toString();
if (message.includes('started')) {
resolve();
}
});
});
await waitForServerStart(serverProcess);
});
after(() => {
@@ -157,4 +177,43 @@ describe('MCP Server', () => {
// Error could be "not enabled" (filtered by default tools) or "not found" (all tools enabled)
assert(response.result.content[0].text.includes('not enabled') || response.result.content[0].text.includes('not found'));
});
it('should exit when stdout disconnects during a request', async () => {
const disconnectedProcess = spawn('node', [serverPath], {
stdio: ['pipe', 'pipe', 'pipe']
});
try {
await waitForServerStart(disconnectedProcess);
const exitPromise = new Promise((resolve, reject) => {
const timeout = setTimeout(() => {
disconnectedProcess.kill('SIGKILL');
reject(new Error('Server did not exit after stdout disconnect'));
}, 1500);
disconnectedProcess.once('exit', (code, signal) => {
clearTimeout(timeout);
resolve({ code, signal });
});
});
// Simulate the MCP client disappearing before the server sends its response.
disconnectedProcess.stdout.destroy();
disconnectedProcess.stdin.write(JSON.stringify({
jsonrpc: '2.0',
id: 4,
method: 'tools/list',
params: {}
}) + '\n');
const exitResult = await exitPromise;
assert.equal(exitResult.code, 0);
assert.equal(exitResult.signal, null);
} finally {
if (disconnectedProcess.exitCode === null) {
disconnectedProcess.kill('SIGKILL');
}
}
});
});

View File

@@ -45,6 +45,19 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
assert.match(props.apiMaxWorkers.description, /endpoint pool/i);
assert.match(schema.description, /apiMaxWorkers=8/i);
assert.match(props.path.description, /single file path/i);
assert.ok(props.output_mode.enum.includes('ace'));
assert.match(props.output_mode.description, /ACE-style/i);
assert.equal(props.output_mode.default, 'ace');
});
it('defaults auto embedding warmup to enabled unless explicitly disabled', () => {
if (!smartSearchModule) return;
const { __testables } = smartSearchModule;
assert.equal(__testables.isAutoEmbedMissingEnabled(undefined), true);
assert.equal(__testables.isAutoEmbedMissingEnabled({}), true);
assert.equal(__testables.isAutoEmbedMissingEnabled({ embedding_auto_embed_missing: true }), true);
assert.equal(__testables.isAutoEmbedMissingEnabled({ embedding_auto_embed_missing: false }), false);
});
it('honors explicit small limit values', async () => {
@@ -58,6 +71,7 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
action: 'search',
query: 'hit',
path: dir,
output_mode: 'full',
limit: 1,
regex: false,
tokenize: false,
@@ -82,6 +96,7 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
action: 'search',
query: 'TARGET_TOKEN',
path: target,
output_mode: 'full',
regex: false,
tokenize: false,
});
@@ -112,6 +127,7 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
action: 'search',
query: wrappedQuery,
path: wrappedPath,
output_mode: 'full',
regex: false,
caseSensitive: false,
});
@@ -121,6 +137,66 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
assert.ok(toolResult.result.results.length >= 1);
});
it('falls back to literal ripgrep matching for invalid regex-like code queries', async () => {
if (!smartSearchModule) return;
const dir = createWorkspace();
const target = join(dir, 'component.ts');
writeFileSync(target, 'defineExpose({ handleResize });\n');
const toolResult = await smartSearchModule.handler({
action: 'search',
query: 'defineExpose({ handleResize',
path: dir,
output_mode: 'full',
limit: 5,
});
assert.equal(toolResult.success, true, toolResult.error);
assert.equal(toolResult.result.success, true);
assert.ok(toolResult.result.results.length >= 1);
assert.match(toolResult.result.metadata.warning, /literal ripgrep matching/i);
});
it('renders grouped ace-style output by default with multi-line chunks', async () => {
if (!smartSearchModule) return;
const dir = createWorkspace();
const target = join(dir, 'ace-target.ts');
writeFileSync(target, [
'const before = 1;',
'const TARGET_TOKEN = 1;',
'const after = 2;',
'',
'function useToken() {',
' return TARGET_TOKEN;',
'}',
].join('\n'));
const toolResult = await smartSearchModule.handler({
action: 'search',
query: 'TARGET_TOKEN',
path: dir,
contextLines: 1,
regex: false,
tokenize: false,
});
assert.equal(toolResult.success, true, toolResult.error);
assert.equal(toolResult.result.success, true);
assert.equal(toolResult.result.results.format, 'ace');
assert.equal(Array.isArray(toolResult.result.results.groups), true);
assert.equal(Array.isArray(toolResult.result.results.sections), true);
assert.equal(toolResult.result.results.groups.length, 1);
assert.equal(toolResult.result.results.groups[0].sections.length, 2);
assert.match(toolResult.result.results.text, /The following code sections were retrieved:/);
assert.match(toolResult.result.results.text, /Path: .*ace-target\.ts/);
assert.match(toolResult.result.results.text, /Chunk 1: lines 1-3/);
assert.match(toolResult.result.results.text, />\s+2 \| const TARGET_TOKEN = 1;/);
assert.match(toolResult.result.results.text, /Chunk 2: lines 5-7/);
assert.equal(toolResult.result.metadata.pagination.total >= 1, true);
});
it('defaults embed selection to local-fast for bulk indexing', () => {
if (!smartSearchModule) return;
@@ -181,6 +257,50 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
assert.equal(smartSearchModule.__testables.hasCentralizedVectorArtifacts(dir), true);
});
it('recognizes CodexLens CLI compatibility failures and invalid regex fallback', () => {
if (!smartSearchModule) return;
const compatibilityError = [
'UsageError: Got unexpected extra arguments (20 0 fts)',
'TypeError: TyperArgument.make_metavar() takes 1 positional argument but 2 were given',
].join('\n');
assert.equal(
smartSearchModule.__testables.isCodexLensCliCompatibilityError(compatibilityError),
true,
);
const resolution = smartSearchModule.__testables.resolveRipgrepQueryMode(
'defineExpose({ handleResize',
true,
true,
);
assert.equal(resolution.regex, false);
assert.equal(resolution.literalFallback, true);
assert.match(resolution.warning, /literal ripgrep matching/i);
});
it('builds actionable index suggestions for unhealthy index states', () => {
if (!smartSearchModule) return;
const suggestions = smartSearchModule.__testables.buildIndexSuggestions(
{
indexed: true,
has_embeddings: false,
embeddings_coverage_percent: 0,
warning: 'Index exists but no embeddings generated. Run smart_search(action="embed") to build the vector index.',
},
{
workingDirectory: 'D:/tmp/demo',
searchPaths: ['.'],
},
);
assert.equal(Array.isArray(suggestions), true);
assert.match(suggestions[0].command, /smart_search\(action="embed"/);
});
it('surfaces backend failure details when fuzzy search fully fails', async () => {
if (!smartSearchModule) return;
@@ -189,6 +309,7 @@ describe('Smart Search MCP usage defaults and path handling', async () => {
action: 'search',
query: 'TARGET_TOKEN',
path: missingPath,
output_mode: 'full',
regex: false,
tokenize: false,
});

View File

@@ -1566,6 +1566,8 @@ def config(
result["embedding_backend"] = embedding["backend"]
if embedding.get("model"):
result["embedding_model"] = embedding["model"]
if embedding.get("auto_embed_missing") is not None:
result["embedding_auto_embed_missing"] = embedding["auto_embed_missing"]
except (json.JSONDecodeError, OSError):
pass # Settings file not readable, continue with defaults
@@ -1584,6 +1586,10 @@ def config(
if env_overrides.get("EMBEDDING_BACKEND"):
result["embedding_backend"] = env_overrides["EMBEDDING_BACKEND"]
result["embedding_backend_source"] = ".env"
auto_embed_missing_override = env_overrides.get("CODEXLENS_AUTO_EMBED_MISSING") or env_overrides.get("AUTO_EMBED_MISSING")
if auto_embed_missing_override:
result["embedding_auto_embed_missing"] = auto_embed_missing_override.lower() in ("true", "1", "yes", "on")
result["embedding_auto_embed_missing_source"] = ".env"
if env_overrides.get("RERANKER_MODEL"):
result["reranker_model"] = env_overrides["RERANKER_MODEL"]
result["reranker_model_source"] = ".env"
@@ -1613,6 +1619,9 @@ def config(
model = result.get('embedding_model', 'code')
model_source = result.get('embedding_model_source', 'settings.json')
console.print(f" Model: {model} [dim]({model_source})[/dim]")
auto_embed_missing = result.get("embedding_auto_embed_missing", True)
auto_embed_missing_source = result.get("embedding_auto_embed_missing_source", "settings.json")
console.print(f" Auto Embed Missing: {auto_embed_missing} [dim]({auto_embed_missing_source})[/dim]")
# Show reranker settings
console.print(f"\n[bold]Reranker[/bold]")
@@ -1647,7 +1656,7 @@ def config(
# Handle reranker and embedding settings (stored in settings.json)
elif key in ("reranker_backend", "reranker_model", "reranker_enabled", "reranker_top_k",
"embedding_backend", "embedding_model", "reranker_api_provider"):
"embedding_backend", "embedding_model", "embedding_auto_embed_missing", "reranker_api_provider"):
settings_file = Path.home() / ".codexlens" / "settings.json"
settings_file.parent.mkdir(parents=True, exist_ok=True)
@@ -1680,6 +1689,8 @@ def config(
settings["embedding"]["backend"] = value
elif key == "embedding_model":
settings["embedding"]["model"] = value
elif key == "embedding_auto_embed_missing":
settings["embedding"]["auto_embed_missing"] = value.lower() in ("true", "1", "yes", "on")
# Save settings
settings_file.write_text(json.dumps(settings, indent=2), encoding="utf-8")

View File

@@ -110,6 +110,7 @@ class Config:
embedding_model: str = "code" # For fastembed: profile (fast/code/multilingual/balanced)
# For litellm: model name from config (e.g., "qwen3-embedding")
embedding_use_gpu: bool = True # For fastembed: whether to use GPU acceleration
embedding_auto_embed_missing: bool = True # Auto-build embeddings in background when indexed projects are searched without vectors
# Indexing/search optimizations
global_symbol_index_enabled: bool = True # Enable project-wide symbol index fast path
@@ -281,6 +282,7 @@ class Config:
"backend": self.embedding_backend,
"model": self.embedding_model,
"use_gpu": self.embedding_use_gpu,
"auto_embed_missing": self.embedding_auto_embed_missing,
"pool_enabled": self.embedding_pool_enabled,
"strategy": self.embedding_strategy,
"cooldown": self.embedding_cooldown,
@@ -376,6 +378,8 @@ class Config:
self.embedding_model = embedding["model"]
if "use_gpu" in embedding:
self.embedding_use_gpu = embedding["use_gpu"]
if "auto_embed_missing" in embedding:
self.embedding_auto_embed_missing = embedding["auto_embed_missing"]
# Load multi-endpoint configuration
if "endpoints" in embedding:
@@ -782,6 +786,14 @@ class Config:
else:
log.warning("Invalid EMBEDDING_BACKEND in .env: %r", embedding_backend)
auto_embed_missing = get_env("AUTO_EMBED_MISSING")
if auto_embed_missing:
self.embedding_auto_embed_missing = _parse_bool(auto_embed_missing)
log.debug(
"Overriding embedding_auto_embed_missing from .env: %s",
self.embedding_auto_embed_missing,
)
embedding_pool = get_env("EMBEDDING_POOL_ENABLED")
if embedding_pool:
value = embedding_pool.lower()

View File

@@ -33,6 +33,7 @@ ENV_VARS = {
# Embedding configuration (overrides settings.json)
"EMBEDDING_MODEL": "Embedding model/profile name (overrides settings.json)",
"EMBEDDING_BACKEND": "Embedding backend: fastembed, litellm",
"AUTO_EMBED_MISSING": "Auto-build embeddings in background when indexed projects are searched without vectors: true/false",
"EMBEDDING_API_KEY": "API key for embedding service",
"EMBEDDING_API_BASE": "Base URL for embedding API",
"EMBEDDING_POOL_ENABLED": "Enable embedding high availability pool: true/false",

View File

@@ -98,16 +98,18 @@ Unified search with content search, file discovery, and semantic search.
{
"name": "smart_search",
"parameters": {
"action": "search | find_files | init | status",
"action": "search | find_files | init | init_force | embed | status | update | watch",
"query": "string (for search)",
"pattern": "glob pattern (for find_files)",
"mode": "fuzzy | semantic (default: fuzzy)",
"output_mode": "full | files_only | count",
"maxResults": "number (default: 20)"
"output_mode": "ace | full | files_only | count (default: ace)",
"maxResults": "number (default: 5)"
}
}
```
`search` now defaults to `output_mode: "ace"`, which groups results by file and renders multi-line code chunks. When the index is missing, embeddings are incomplete, or status parsing looks unhealthy, the response metadata includes actionable `smart_search(...)` suggestions such as `init`, `status`, or `embed`. If an index exists but vectors are missing, `smart_search` can also start a background embedding build automatically when `CODEXLENS_AUTO_EMBED_MISSING` is enabled (default: `true`).
**Usage:**
```javascript
// Fuzzy search (default)

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "claude-code-workflow",
"version": "7.2.4",
"version": "7.2.7",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "claude-code-workflow",
"version": "7.2.4",
"version": "7.2.7",
"hasInstallScript": true,
"license": "MIT",
"workspaces": [

View File

@@ -1,6 +1,6 @@
{
"name": "claude-code-workflow",
"version": "7.2.6",
"version": "7.2.7",
"description": "JSON-driven multi-agent development framework with intelligent CLI orchestration (Gemini/Qwen/Codex), context-first architecture, and automated workflow execution",
"type": "module",
"main": "ccw/dist/index.js",