mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
feat: 更新执行命令的参数提示,支持指定现有工作树路径,增强工作树管理功能
This commit is contained in:
@@ -499,6 +499,13 @@ class Config:
|
||||
except ValueError:
|
||||
log.warning("Invalid RERANKER_COOLDOWN in .env: %r", env_vars["RERANKER_COOLDOWN"])
|
||||
|
||||
if "RERANKER_MAX_INPUT_TOKENS" in env_vars:
|
||||
try:
|
||||
self.reranker_max_input_tokens = int(env_vars["RERANKER_MAX_INPUT_TOKENS"])
|
||||
log.debug("Overriding reranker_max_input_tokens from .env: %s", self.reranker_max_input_tokens)
|
||||
except ValueError:
|
||||
log.warning("Invalid RERANKER_MAX_INPUT_TOKENS in .env: %r", env_vars["RERANKER_MAX_INPUT_TOKENS"])
|
||||
|
||||
@classmethod
|
||||
def load(cls) -> "Config":
|
||||
"""Load config with settings from file."""
|
||||
|
||||
@@ -294,7 +294,12 @@ class APIReranker(BaseReranker):
|
||||
return payload
|
||||
|
||||
def _estimate_tokens(self, text: str) -> int:
|
||||
"""Estimate token count using fast heuristic (len/4)."""
|
||||
"""Estimate token count using fast heuristic.
|
||||
|
||||
Uses len(text) // 4 as approximation (~4 chars per token for English).
|
||||
Not perfectly accurate for all models/languages but sufficient for
|
||||
batch sizing decisions where exact counts aren't critical.
|
||||
"""
|
||||
return len(text) // 4
|
||||
|
||||
def _create_token_aware_batches(
|
||||
@@ -317,7 +322,15 @@ class APIReranker(BaseReranker):
|
||||
for idx, doc in enumerate(documents):
|
||||
doc_tokens = self._estimate_tokens(doc)
|
||||
|
||||
# If single doc + query exceeds limit, include it anyway (will be truncated by API)
|
||||
# Warn if single document exceeds token limit (will be truncated by API)
|
||||
if doc_tokens > max_tokens - query_tokens:
|
||||
logger.warning(
|
||||
f"Document {idx} exceeds token limit: ~{doc_tokens} tokens "
|
||||
f"(limit: {max_tokens - query_tokens} after query overhead). "
|
||||
"Document will likely be truncated by the API."
|
||||
)
|
||||
|
||||
# If batch would exceed limit, start new batch
|
||||
if current_tokens + doc_tokens > max_tokens and current_batch:
|
||||
batches.append(current_batch)
|
||||
current_batch = []
|
||||
|
||||
Reference in New Issue
Block a user