feat: Enhance embedding generation to track current index path and improve metadata retrieval

This commit is contained in:
catlog22
2026-01-02 19:18:26 +08:00
parent 0b6e9db8e4
commit c268b531aa
6 changed files with 175 additions and 136 deletions

View File

@@ -282,10 +282,27 @@ def get_optimal_providers(use_gpu: bool = True, with_device_options: bool = Fals
return ["CPUExecutionProvider"]
gpu_info = detect_gpu()
# Check if GPU was requested but not available - log warning
if not gpu_info.gpu_available:
try:
import onnxruntime as ort
available_providers = ort.get_available_providers()
except ImportError:
available_providers = []
logger.warning(
"GPU acceleration was requested, but no supported GPU provider (CUDA, DirectML) "
f"was found. Available providers: {available_providers}. Falling back to CPU."
)
else:
# Log which GPU provider is being used
gpu_providers = [p for p in gpu_info.onnx_providers if p != "CPUExecutionProvider"]
if gpu_providers:
logger.info(f"Using {gpu_providers[0]} for ONNX GPU acceleration")
if not with_device_options:
return gpu_info.onnx_providers
# Build providers with device_id options for GPU providers
device_id = get_selected_device_id()
providers = []