feat: add orchestrator execution engine, observability panel, and LSP document caching

Wire FlowExecutor into orchestrator routes for actual flow execution with
pause/resume/stop lifecycle management. Add CLI session audit system with
audit-routes backend and Observability tab in IssueHub frontend. Introduce
cli-session-mux for cross-workspace session routing and QueueSendToOrchestrator
UI component. Normalize frontend API response handling for { data: ... }
wrapper format and propagate projectPath through flow hooks.

In codex-lens, add per-server opened-document cache in StandaloneLspManager
to avoid redundant didOpen notifications (using didChange for updates), and
skip warmup delay for already-warmed LSP server instances in ChainSearchEngine.
This commit is contained in:
catlog22
2026-02-11 15:38:33 +08:00
parent d0cdee2e68
commit 5a9e54fd70
35 changed files with 5325 additions and 77 deletions

View File

@@ -58,6 +58,10 @@ class ServerState:
restart_count: int = 0
# Queue for producer-consumer pattern - continuous reading puts messages here
message_queue: asyncio.Queue = field(default_factory=asyncio.Queue)
# Track opened documents to avoid redundant didOpen spam (and unnecessary delays).
# Key: document URI -> (version, file_mtime)
opened_documents: Dict[str, Tuple[int, float]] = field(default_factory=dict)
opened_documents_lock: asyncio.Lock = field(default_factory=asyncio.Lock)
class StandaloneLspManager:
@@ -836,28 +840,66 @@ class StandaloneLspManager:
file_path = self._normalize_file_path(file_path)
resolved_path = Path(file_path).resolve()
# Fast path: already opened and unchanged (per-server cache).
try:
content = resolved_path.read_text(encoding="utf-8")
except Exception as e:
logger.error(f"Failed to read file {file_path}: {e}")
return
uri = resolved_path.as_uri()
except Exception:
uri = ""
# Detect language ID from extension
language_id = self.get_language_id(file_path) or "plaintext"
try:
file_mtime = float(resolved_path.stat().st_mtime)
except Exception:
file_mtime = 0.0
logger.debug(f"Opening document: {resolved_path.name} ({len(content)} chars)")
await self._send_notification(state, "textDocument/didOpen", {
"textDocument": {
"uri": resolved_path.as_uri(),
"languageId": language_id,
"version": 1,
"text": content,
}
})
# Serialize open/change notifications per server to avoid races when
# multiple concurrent LSP requests target the same file.
async with state.opened_documents_lock:
existing = state.opened_documents.get(uri) if uri else None
if existing is not None and existing[1] == file_mtime:
return
# Give the language server a brief moment to process the file
# The message queue handles any server requests automatically
await asyncio.sleep(0.5)
try:
content = resolved_path.read_text(encoding="utf-8")
except Exception as e:
logger.error(f"Failed to read file {file_path}: {e}")
return
# Detect language ID from extension
language_id = self.get_language_id(file_path) or "plaintext"
# Send didOpen only once per document; subsequent changes use didChange.
if existing is None:
version = 1
logger.debug(f"Opening document: {resolved_path.name} ({len(content)} chars)")
await self._send_notification(
state,
"textDocument/didOpen",
{
"textDocument": {
"uri": uri or resolved_path.as_uri(),
"languageId": language_id,
"version": version,
"text": content,
}
},
)
else:
version = int(existing[0]) + 1
logger.debug(f"Updating document: {resolved_path.name} ({len(content)} chars)")
await self._send_notification(
state,
"textDocument/didChange",
{
"textDocument": {
"uri": uri or resolved_path.as_uri(),
"version": version,
},
"contentChanges": [{"text": content}],
},
)
if uri:
state.opened_documents[uri] = (version, file_mtime)
# ========== Public LSP Methods ==========

View File

@@ -169,6 +169,9 @@ class ChainSearchEngine:
self._realtime_lsp_keepalive_lock = threading.RLock()
self._realtime_lsp_keepalive = None
self._realtime_lsp_keepalive_key = None
# Track which (workspace_root, config_file) pairs have already been warmed up.
# This avoids paying the warmup sleep on every query when using keep-alive LSP servers.
self._realtime_lsp_warmed_ids: set[tuple[str, str | None]] = set()
def _get_executor(self, max_workers: Optional[int] = None) -> ThreadPoolExecutor:
"""Get or create the shared thread pool executor.
@@ -1609,16 +1612,18 @@ class ChainSearchEngine:
if not seed_nodes:
return coarse_results
effective_warmup_s = warmup_s
async def expand_graph(bridge: LspBridge):
# Warm up analysis: open seed docs and wait a bit so references/call hierarchy are populated.
if warmup_s > 0:
if effective_warmup_s > 0:
for seed in seed_nodes[:3]:
try:
await bridge.get_document_symbols(seed.file_path)
except Exception:
continue
try:
warmup_budget = min(warmup_s, max(0.0, timeout_s * 0.1))
warmup_budget = min(effective_warmup_s, max(0.0, timeout_s * 0.1))
await asyncio.sleep(min(warmup_budget, max(0.0, timeout_s - 0.5)))
except Exception:
pass
@@ -1659,7 +1664,10 @@ class ChainSearchEngine:
config_file=str(lsp_config_file) if lsp_config_file else None,
timeout=float(timeout_s),
)
warm_id = (key.workspace_root, key.config_file)
with self._realtime_lsp_keepalive_lock:
if warm_id in self._realtime_lsp_warmed_ids:
effective_warmup_s = 0.0
keepalive = self._realtime_lsp_keepalive
if keepalive is None or self._realtime_lsp_keepalive_key != key:
if keepalive is not None:
@@ -1676,6 +1684,8 @@ class ChainSearchEngine:
self._realtime_lsp_keepalive_key = key
graph = keepalive.run(expand_graph, timeout=timeout_s)
with self._realtime_lsp_keepalive_lock:
self._realtime_lsp_warmed_ids.add(warm_id)
except Exception as exc:
self.logger.debug("Stage 2 (realtime) expansion failed: %r", exc)
return coarse_results