Enhance skill generator documentation and templates

- Updated Phase 1 and Phase 2 documentation to include next phase links and data flow details.
- Expanded Phase 5 documentation to include comprehensive validation and README generation steps, along with validation report structure.
- Added purpose and usage context sections to various action and script templates (e.g., autonomous-action, llm-action, script-bash).
- Improved commands management by simplifying the command scanning logic and enabling/disabling commands through renaming files.
- Enhanced dashboard command manager to format group names and display nested groups with appropriate icons and colors.
- Updated LiteLLM executor to allow model overrides during execution.
- Added action reference guide and template reference sections to the skill-tuning SKILL.md for better navigation and understanding.
This commit is contained in:
catlog22
2026-01-28 20:34:03 +08:00
parent 29274ee943
commit 3998d24e32
46 changed files with 1559 additions and 7731 deletions

View File

@@ -802,13 +802,15 @@ export function getNativeResume(projectDir: string): boolean {
*/
export function addClaudeApiEndpoint(
projectDir: string,
endpoint: { id: string; name: string; enabled: boolean }
endpoint: { id: string; name: string; enabled: boolean; model?: string }
): ClaudeCliToolsConfig {
const config = loadClaudeCliTools(projectDir);
// Add as a tool with type: 'api-endpoint'
config.tools[endpoint.name] = {
enabled: endpoint.enabled,
primaryModel: endpoint.model, // Use endpoint.model as primaryModel (can be overridden via --model)
secondaryModel: endpoint.model, // Same as primary for fallback
tags: [],
type: 'api-endpoint',
id: endpoint.id // Store endpoint ID for settings lookup

View File

@@ -557,6 +557,11 @@ async function executeCliTool(
// id field is the LiteLLM endpoint ID (e.g., "g25")
const litellmEndpointId = toolConfig.id || toolName;
// Use configured primary model if no explicit model provided
// This allows --model parameter to override the tool's primaryModel
// Use undefined if primaryModel is empty string (endpoint.model will be used as fallback)
const apiEndpointEffectiveModel = model || (toolConfig.primaryModel || undefined);
// Find LiteLLM endpoint configuration
const litellmEndpoint = findEndpointById(workingDir, litellmEndpointId);
if (litellmEndpoint) {
@@ -568,13 +573,14 @@ async function executeCliTool(
});
}
// Execute via LiteLLM
// Execute via LiteLLM with model override
const result = await executeLiteLLMEndpoint({
prompt,
endpointId: litellmEndpointId,
baseDir: workingDir,
cwd: cd || workingDir,
includeDirs: includeDirs ? includeDirs.split(',').map(d => d.trim()) : undefined,
model: apiEndpointEffectiveModel, // Pass effective model (--model or primaryModel)
onOutput: onOutput || undefined,
});
@@ -587,7 +593,7 @@ async function executeCliTool(
id: customId || `${Date.now()}-litellm`,
timestamp: new Date(startTime).toISOString(),
tool: toolName,
model: litellmEndpoint.model,
model: result.model, // Use effective model from result (reflects any override)
mode,
prompt,
status: result.success ? 'success' : 'error',

View File

@@ -19,6 +19,7 @@ export interface LiteLLMExecutionOptions {
cwd?: string; // Working directory for file resolution
includeDirs?: string[]; // Additional directories for @patterns
enableCache?: boolean; // Override endpoint cache setting
model?: string; // Override model for this execution (if not specified, uses endpoint.model)
onOutput?: (unit: CliOutputUnit) => void;
/** Number of retries after the initial attempt (default: 0) */
maxRetries?: number;
@@ -56,7 +57,7 @@ export function extractPatterns(prompt: string): string[] {
export async function executeLiteLLMEndpoint(
options: LiteLLMExecutionOptions
): Promise<LiteLLMExecutionResult> {
const { prompt, endpointId, baseDir, cwd, includeDirs, enableCache, onOutput } = options;
const { prompt, endpointId, baseDir, cwd, includeDirs, enableCache, model: modelOverride, onOutput } = options;
// 1. Find endpoint configuration
const endpoint = findEndpointById(baseDir, endpointId);
@@ -96,7 +97,10 @@ export async function executeLiteLLMEndpoint(
};
}
// 3. Process context cache if enabled
// 3. Determine effective model: use override if provided, otherwise use endpoint.model
const effectiveModel = modelOverride || endpoint.model;
// 4. Process context cache if enabled
let finalPrompt = prompt;
let cacheUsed = false;
let cachedFiles: string[] = [];
@@ -168,12 +172,12 @@ export async function executeLiteLLMEndpoint(
}
}
// 4. Call LiteLLM
// 5. Call LiteLLM
try {
if (onOutput) {
onOutput({
type: 'stderr',
content: `[LiteLLM: Calling ${provider.type}/${endpoint.model}]\n`,
content: `[LiteLLM: Calling ${provider.type}/${effectiveModel}]\n`,
timestamp: new Date().toISOString()
});
}
@@ -206,14 +210,14 @@ export async function executeLiteLLMEndpoint(
delete process.env['CCW_LITELLM_EXTRA_HEADERS'];
}
// Use litellm-client to call chat
// Use litellm-client to call chat with effective model
const response = await callWithRetries(
() => client.chat(finalPrompt, endpoint.model),
() => client.chat(finalPrompt, effectiveModel),
{
maxRetries: options.maxRetries ?? 0,
baseDelayMs: options.retryBaseDelayMs ?? 1000,
onOutput,
rateLimitKey: `${provider.type}:${endpoint.model}`,
rateLimitKey: `${provider.type}:${effectiveModel}`,
},
);
@@ -228,7 +232,7 @@ export async function executeLiteLLMEndpoint(
return {
success: true,
output: response,
model: endpoint.model,
model: effectiveModel,
provider: provider.type,
cacheUsed,
cachedFiles,
@@ -246,7 +250,7 @@ export async function executeLiteLLMEndpoint(
return {
success: false,
output: '',
model: endpoint.model,
model: effectiveModel,
provider: provider.type,
cacheUsed,
error: errorMsg,