Skip to content
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 14 additions & 3 deletions rapidfireai/automl/model_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,8 @@ def __setattr__(self, name, value):
RFPromptManager = None


# Conditionally define evals model config classes only if dependencies are available
# RFvLLMModelConfig requires vLLM (a self-hosted local inference engine).
# vLLM is unavailable on macOS Apple Silicon and other CPU-only platforms.
if (
_VLLM_AVAILABLE
and _EVALS_MODULES_AVAILABLE
Expand Down Expand Up @@ -357,6 +358,17 @@ def sampling_params_to_dict(self) -> dict[str, Any]:
# This works across different vLLM versions
return dict(vars(self.sampling_params))

else:
RFvLLMModelConfig = None
Comment thread
cursor[bot] marked this conversation as resolved.


# RFOpenAIAPIModelConfig and RFGeminiAPIModelConfig only require the evals modules
# (LangChainRagSpec, PromptManager, OpenAIInferenceEngine / GoogleGeminiInferenceEngine).
# They do NOT require vLLM because OpenAI and Gemini are remote APIs and never run a
# local inference engine. Gating them behind _VLLM_AVAILABLE makes them unusable on
# CPU-only platforms (macOS Apple Silicon, Datahub CPU pods) for no reason.
if _EVALS_MODULES_AVAILABLE and InferenceEngine is not None:

class RFOpenAIAPIModelConfig(ModelConfig):
"""OpenAI API model configuration for evals mode."""

Expand Down Expand Up @@ -514,7 +526,6 @@ def sampling_params_to_dict(self) -> dict[str, Any]:
return {k: v for k, v in self.model_config.items() if k not in _non_sampling_keys}

else:
# Define placeholder classes if dependencies are not available
RFvLLMModelConfig = None
# Evals modules unavailable: API configs cannot be defined.
RFOpenAIAPIModelConfig = None
RFGeminiAPIModelConfig = None