mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-04 01:40:45 +08:00
feat: 添加工具类型和用法说明,支持自定义 API 头部设置
This commit is contained in:
@@ -54,6 +54,16 @@ All tool availability, model selection, and routing are defined in this configur
|
|||||||
| `secondaryModel` | Fallback model |
|
| `secondaryModel` | Fallback model |
|
||||||
| `tags` | Capability tags for routing |
|
| `tags` | Capability tags for routing |
|
||||||
|
|
||||||
|
### Tool Types
|
||||||
|
|
||||||
|
| Type | Usage | Capabilities |
|
||||||
|
|------|-------|--------------|
|
||||||
|
| `builtin` | `--tool gemini` | Full (analysis + write tools) |
|
||||||
|
| `cli-wrapper` | `--tool doubao` | Full (analysis + write tools) |
|
||||||
|
| `api-endpoint` | `--tool g25` | **Analysis only** (no file write tools) |
|
||||||
|
|
||||||
|
> **Note**: `api-endpoint` tools only support analysis and code generation responses. They cannot create, modify, or delete files.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Tool Selection
|
## Tool Selection
|
||||||
|
|||||||
@@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
from typing import Any, Sequence
|
from typing import Any, Sequence
|
||||||
|
|
||||||
import litellm
|
import litellm
|
||||||
@@ -132,10 +134,22 @@ class LiteLLMClient(AbstractLLMClient):
|
|||||||
# Merge kwargs
|
# Merge kwargs
|
||||||
completion_kwargs = {**self._litellm_kwargs, **kwargs}
|
completion_kwargs = {**self._litellm_kwargs, **kwargs}
|
||||||
|
|
||||||
# Override User-Agent to avoid being blocked by some API proxies
|
# Build extra_headers from multiple sources
|
||||||
# that detect and block OpenAI SDK's default User-Agent
|
|
||||||
if "extra_headers" not in completion_kwargs:
|
if "extra_headers" not in completion_kwargs:
|
||||||
completion_kwargs["extra_headers"] = {}
|
completion_kwargs["extra_headers"] = {}
|
||||||
|
|
||||||
|
# 1. Load custom headers from environment variable (set by CCW)
|
||||||
|
env_headers = os.environ.get("CCW_LITELLM_EXTRA_HEADERS")
|
||||||
|
if env_headers:
|
||||||
|
try:
|
||||||
|
custom_headers = json.loads(env_headers)
|
||||||
|
completion_kwargs["extra_headers"].update(custom_headers)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logger.warning(f"Invalid JSON in CCW_LITELLM_EXTRA_HEADERS: {env_headers}")
|
||||||
|
|
||||||
|
# 2. Override User-Agent to avoid being blocked by some API proxies
|
||||||
|
# that detect and block OpenAI SDK's default User-Agent
|
||||||
|
# This is a fallback - user can override via custom headers
|
||||||
if "User-Agent" not in completion_kwargs["extra_headers"]:
|
if "User-Agent" not in completion_kwargs["extra_headers"]:
|
||||||
completion_kwargs["extra_headers"]["User-Agent"] = "python-httpx/0.27"
|
completion_kwargs["extra_headers"]["User-Agent"] = "python-httpx/0.27"
|
||||||
|
|
||||||
|
|||||||
@@ -198,6 +198,14 @@ export async function executeLiteLLMEndpoint(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set custom headers from provider advanced settings
|
||||||
|
if (provider.advancedSettings?.customHeaders) {
|
||||||
|
process.env['CCW_LITELLM_EXTRA_HEADERS'] = JSON.stringify(provider.advancedSettings.customHeaders);
|
||||||
|
} else {
|
||||||
|
// Clear any previous custom headers
|
||||||
|
delete process.env['CCW_LITELLM_EXTRA_HEADERS'];
|
||||||
|
}
|
||||||
|
|
||||||
// Use litellm-client to call chat
|
// Use litellm-client to call chat
|
||||||
const response = await callWithRetries(
|
const response = await callWithRetries(
|
||||||
() => client.chat(finalPrompt, endpoint.model),
|
() => client.chat(finalPrompt, endpoint.model),
|
||||||
|
|||||||
Reference in New Issue
Block a user