diff --git a/litellm/litellm_core_utils/get_llm_provider_logic.py b/litellm/litellm_core_utils/get_llm_provider_logic.py index 275f2a63a1ba..85e868aa6178 100644 --- a/litellm/litellm_core_utils/get_llm_provider_logic.py +++ b/litellm/litellm_core_utils/get_llm_provider_logic.py @@ -12,10 +12,7 @@ def _is_non_openai_azure_model(model: str) -> bool: try: model_name = model.split("/", 1)[1] - if ( - model_name in litellm.cohere_chat_models - or f"mistral/{model_name}" in litellm.mistral_chat_models - ): + if model_name in litellm.cohere_chat_models or f"mistral/{model_name}" in litellm.mistral_chat_models: return True except Exception: return False @@ -42,11 +39,7 @@ def handle_cohere_chat_model_custom_llm_provider( if "/" in model: _custom_llm_provider, _model = model.split("/", 1) - if ( - _custom_llm_provider - and _custom_llm_provider == "cohere" - and _model in litellm.cohere_chat_models - ): + if _custom_llm_provider and _custom_llm_provider == "cohere" and _model in litellm.cohere_chat_models: return _model, "cohere_chat" return model, custom_llm_provider @@ -67,10 +60,7 @@ def handle_anthropic_text_model_custom_llm_provider( """ if custom_llm_provider: - if ( - custom_llm_provider == "anthropic" - and litellm.AnthropicTextConfig._is_anthropic_text_model(model) - ): + if custom_llm_provider == "anthropic" and litellm.AnthropicTextConfig._is_anthropic_text_model(model): return model, "anthropic_text" if "/" in model: @@ -102,9 +92,7 @@ def get_llm_provider( # noqa: PLR0915 Return model, custom_llm_provider, dynamic_api_key, api_base """ try: - if litellm.LiteLLMProxyChatConfig._should_use_litellm_proxy_by_default( - litellm_params=litellm_params - ): + if litellm.LiteLLMProxyChatConfig._should_use_litellm_proxy_by_default(litellm_params=litellm_params): return litellm.LiteLLMProxyChatConfig.litellm_proxy_get_custom_llm_provider_info( model=model, api_base=api_base, api_key=api_key ) @@ -119,25 +107,24 @@ def get_llm_provider( # noqa: PLR0915 api_key = litellm_params.api_key dynamic_api_key = None - # check if llm provider provided - # AZURE AI-Studio Logic - Azure AI Studio supports AZURE/Cohere - # If User passes azure/command-r-plus -> we should send it to cohere_chat/command-r-plus - if model.split("/", 1)[0] == "azure": + + # Avoid calling .split() multiple times by saving the result + split_model = model.split("/", 1) + model_prefix = split_model[0] + + # AZURE AI-Studio Logic - handle non-openai models with prefix 'azure' + if model_prefix == "azure": if _is_non_openai_azure_model(model): custom_llm_provider = "openai" return model, custom_llm_provider, dynamic_api_key, api_base ### Handle cases when custom_llm_provider is set to cohere/command-r-plus but it should use cohere_chat route - model, custom_llm_provider = handle_cohere_chat_model_custom_llm_provider( - model, custom_llm_provider - ) + model, custom_llm_provider = handle_cohere_chat_model_custom_llm_provider(model, custom_llm_provider) - model, custom_llm_provider = handle_anthropic_text_model_custom_llm_provider( - model, custom_llm_provider - ) + model, custom_llm_provider = handle_anthropic_text_model_custom_llm_provider(model, custom_llm_provider) if custom_llm_provider and ( - model.split("/")[0] != custom_llm_provider + split_model[0] != custom_llm_provider ): # handle scenario where model="azure/*" and custom_llm_provider="azure" model = custom_llm_provider + "/" + model @@ -145,10 +132,15 @@ def get_llm_provider( # noqa: PLR0915 dynamic_api_key = get_secret_str(api_key) # check if llm provider part of model name + num_parts = len(split_model) + + # check if llm provider part of model name + model_prefix_in_provider = model_prefix in litellm.provider_list + model_prefix_not_in_model_set = model_prefix not in litellm.model_list_set if ( - model.split("/", 1)[0] in litellm.provider_list - and model.split("/", 1)[0] not in litellm.model_list_set - and len(model.split("/")) + model_prefix_in_provider + and model_prefix_not_in_model_set + and num_parts > 1 # handle edge case where user passes in `litellm --model mistral` https://github.com/BerriAI/litellm/issues/1351 ): return _get_openai_compatible_provider_info( @@ -157,121 +149,85 @@ def get_llm_provider( # noqa: PLR0915 api_key=api_key, dynamic_api_key=dynamic_api_key, ) - elif model.split("/", 1)[0] in litellm.provider_list: - custom_llm_provider = model.split("/", 1)[0] - model = model.split("/", 1)[1] + elif model_prefix_in_provider: + custom_llm_provider = model_prefix + model = split_model[1] if num_parts > 1 else "" if api_base is not None and not isinstance(api_base, str): - raise Exception( - "api base needs to be a string. api_base={}".format(api_base) - ) + raise Exception("api base needs to be a string. api_base={}".format(api_base)) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): - raise Exception( - "dynamic_api_key needs to be a string. dynamic_api_key={}".format( - dynamic_api_key - ) - ) + raise Exception("dynamic_api_key needs to be a string. dynamic_api_key={}".format(dynamic_api_key)) return model, custom_llm_provider, dynamic_api_key, api_base # check if api base is a known openai compatible endpoint if api_base: - for endpoint in litellm.openai_compatible_endpoints: + endpoint_map = { + "api.perplexity.ai": ("perplexity", "PERPLEXITYAI_API_KEY"), + "api.endpoints.anyscale.com/v1": ("anyscale", "ANYSCALE_API_KEY"), + "api.deepinfra.com/v1/openai": ("deepinfra", "DEEPINFRA_API_KEY"), + "api.mistral.ai/v1": ("mistral", "MISTRAL_API_KEY"), + "api.groq.com/openai/v1": ("groq", "GROQ_API_KEY"), + "https://integrate.api.nvidia.com/v1": ("nvidia_nim", "NVIDIA_NIM_API_KEY"), + "https://api.cerebras.ai/v1": ("cerebras", "CEREBRAS_API_KEY"), + "https://inference.baseten.co/v1": ("baseten", "BASETEN_API_KEY"), + "https://api.sambanova.ai/v1": ("sambanova", "SAMBANOVA_API_KEY"), + "https://api.ai21.com/studio/v1": ("ai21_chat", "AI21_API_KEY"), + "https://codestral.mistral.ai/v1": ("codestral", "CODESTRAL_API_KEY"), + "app.empower.dev/api/v1": ("empower", "EMPOWER_API_KEY"), + "api.deepseek.com/v1": ("deepseek", "DEEPSEEK_API_KEY"), + "https://api.friendli.ai/serverless/v1": ("friendliai", "FRIENDLIAI_API_KEY"), + "api.galadriel.com/v1": ("galadriel", "GALADRIEL_API_KEY"), + "https://api.llama.com/compat/v1": ("meta_llama", "LLAMA_API_KEY"), + "https://api.featherless.ai/v1": ("featherless_ai", "FEATHERLESS_AI_API_KEY"), + litellm.NscaleConfig.API_BASE_URL: ("nscale", None), + "dashscope-intl.aliyuncs.com/compatible-mode/v1": ("dashscope", "DASHSCOPE_API_KEY"), + "api.moonshot.ai/v1": ("moonshot", "MOONSHOT_API_KEY"), + "https://api.v0.dev/v1": ("v0", "V0_API_KEY"), + "https://api.lambda.ai/v1": ("lambda_ai", "LAMBDA_API_KEY"), + "https://api.hyperbolic.xyz/v1": ("hyperbolic", "HYPERBOLIC_API_KEY"), + "https://ai-gateway.vercel.sh/v1": ("vercel_ai_gateway", "VERCEL_AI_GATEWAY_API_KEY"), + "https://api.inference.wandb.ai/v1": ("wandb", "WANDB_API_KEY"), + } + for endpoint, (provider, key_env) in endpoint_map.items(): if endpoint in api_base: - if endpoint == "api.perplexity.ai": - custom_llm_provider = "perplexity" - dynamic_api_key = get_secret_str("PERPLEXITYAI_API_KEY") - elif endpoint == "api.endpoints.anyscale.com/v1": - custom_llm_provider = "anyscale" - dynamic_api_key = get_secret_str("ANYSCALE_API_KEY") - elif endpoint == "api.deepinfra.com/v1/openai": - custom_llm_provider = "deepinfra" - dynamic_api_key = get_secret_str("DEEPINFRA_API_KEY") - elif endpoint == "api.mistral.ai/v1": - custom_llm_provider = "mistral" - dynamic_api_key = get_secret_str("MISTRAL_API_KEY") - elif endpoint == "api.groq.com/openai/v1": - custom_llm_provider = "groq" - dynamic_api_key = get_secret_str("GROQ_API_KEY") - elif endpoint == "https://integrate.api.nvidia.com/v1": - custom_llm_provider = "nvidia_nim" - dynamic_api_key = get_secret_str("NVIDIA_NIM_API_KEY") - elif endpoint == "https://api.cerebras.ai/v1": - custom_llm_provider = "cerebras" - dynamic_api_key = get_secret_str("CEREBRAS_API_KEY") - elif endpoint == "https://inference.baseten.co/v1": - custom_llm_provider = "baseten" - dynamic_api_key = get_secret_str("BASETEN_API_KEY") - elif endpoint == "https://api.sambanova.ai/v1": - custom_llm_provider = "sambanova" - dynamic_api_key = get_secret_str("SAMBANOVA_API_KEY") - elif endpoint == "https://api.ai21.com/studio/v1": - custom_llm_provider = "ai21_chat" - dynamic_api_key = get_secret_str("AI21_API_KEY") - elif endpoint == "https://codestral.mistral.ai/v1": - custom_llm_provider = "codestral" - dynamic_api_key = get_secret_str("CODESTRAL_API_KEY") - elif endpoint == "https://codestral.mistral.ai/v1": - custom_llm_provider = "text-completion-codestral" - dynamic_api_key = get_secret_str("CODESTRAL_API_KEY") - elif endpoint == "app.empower.dev/api/v1": - custom_llm_provider = "empower" - dynamic_api_key = get_secret_str("EMPOWER_API_KEY") - elif endpoint == "api.deepseek.com/v1": - custom_llm_provider = "deepseek" - dynamic_api_key = get_secret_str("DEEPSEEK_API_KEY") - elif endpoint == "https://api.friendli.ai/serverless/v1": - custom_llm_provider = "friendliai" - dynamic_api_key = get_secret_str( - "FRIENDLIAI_API_KEY" - ) or get_secret("FRIENDLI_TOKEN") - elif endpoint == "api.galadriel.com/v1": - custom_llm_provider = "galadriel" - dynamic_api_key = get_secret_str("GALADRIEL_API_KEY") - elif endpoint == "https://api.llama.com/compat/v1": - custom_llm_provider = "meta_llama" - dynamic_api_key = api_key or get_secret_str("LLAMA_API_KEY") - elif endpoint == "https://api.featherless.ai/v1": - custom_llm_provider = "featherless_ai" - dynamic_api_key = get_secret_str("FEATHERLESS_AI_API_KEY") - elif endpoint == litellm.NscaleConfig.API_BASE_URL: - custom_llm_provider = "nscale" + custom_llm_provider = provider + if provider == "nscale": dynamic_api_key = litellm.NscaleConfig.get_api_key() - elif endpoint == "dashscope-intl.aliyuncs.com/compatible-mode/v1": - custom_llm_provider = "dashscope" - dynamic_api_key = get_secret_str("DASHSCOPE_API_KEY") - elif endpoint == "api.moonshot.ai/v1": - custom_llm_provider = "moonshot" - dynamic_api_key = get_secret_str("MOONSHOT_API_KEY") - elif endpoint == "https://api.v0.dev/v1": - custom_llm_provider = "v0" - dynamic_api_key = get_secret_str("V0_API_KEY") - elif endpoint == "https://api.lambda.ai/v1": - custom_llm_provider = "lambda_ai" - dynamic_api_key = get_secret_str("LAMBDA_API_KEY") - elif endpoint == "https://api.hyperbolic.xyz/v1": - custom_llm_provider = "hyperbolic" - dynamic_api_key = get_secret_str("HYPERBOLIC_API_KEY") - elif endpoint == "https://ai-gateway.vercel.sh/v1": - custom_llm_provider = "vercel_ai_gateway" - dynamic_api_key = get_secret_str("VERCEL_AI_GATEWAY_API_KEY") - elif endpoint == "https://api.inference.wandb.ai/v1": - custom_llm_provider = "wandb" - dynamic_api_key = get_secret_str("WANDB_API_KEY") + else: + dynamic_api_key = (api_key or get_secret_str(key_env)) if key_env else api_key + if provider == "friendliai": + dynamic_api_key = dynamic_api_key or get_secret("FRIENDLI_TOKEN") + if provider == "meta_llama": + dynamic_api_key = api_key or get_secret_str("LLAMA_API_KEY") if api_base is not None and not isinstance(api_base, str): + raise Exception("api base needs to be a string. api_base={}".format(api_base)) + if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): raise Exception( - "api base needs to be a string. api_base={}".format( - api_base - ) - ) - if dynamic_api_key is not None and not isinstance( - dynamic_api_key, str - ): - raise Exception( - "dynamic_api_key needs to be a string. dynamic_api_key={}".format( - dynamic_api_key - ) + "dynamic_api_key needs to be a string. dynamic_api_key={}".format(dynamic_api_key) ) return model, custom_llm_provider, dynamic_api_key, api_base # type: ignore + # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) + ## openai - chatcompletion + text completion + + # The following block handles complex endpoints, preserving original sequential check to avoid subtle bugs + if "https://codestral.mistral.ai/v1" in api_base: + # two separately handled keys for historical reasons; preserve original logic + custom_llm_provider = "codestral" + dynamic_api_key = get_secret_str("CODESTRAL_API_KEY") + if api_base is not None and not isinstance(api_base, str): + raise Exception("api base needs to be a string. api_base={}".format(api_base)) + if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): + raise Exception("dynamic_api_key needs to be a string. dynamic_api_key={}".format(dynamic_api_key)) + return model, custom_llm_provider, dynamic_api_key, api_base + if "https://codestral.mistral.ai/v1" in api_base: + custom_llm_provider = "text-completion-codestral" + dynamic_api_key = get_secret_str("CODESTRAL_API_KEY") + if api_base is not None and not isinstance(api_base, str): + raise Exception("api base needs to be a string. api_base={}".format(api_base)) + if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): + raise Exception("dynamic_api_key needs to be a string. dynamic_api_key={}".format(dynamic_api_key)) + return model, custom_llm_provider, dynamic_api_key, api_base + # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) ## openai - chatcompletion + text completion if ( @@ -296,13 +252,10 @@ def get_llm_provider( # noqa: PLR0915 elif model in litellm.cohere_chat_models: custom_llm_provider = "cohere_chat" ## replicate - elif model in litellm.replicate_models or ( - ":" in model and len(model) > REPLICATE_MODEL_NAME_WITH_ID_LENGTH - ): + elif model in litellm.replicate_models or (":" in model and len(model) > REPLICATE_MODEL_NAME_WITH_ID_LENGTH): model_parts = model.split(":") if ( - len(model_parts) > 1 - and len(model_parts[1]) == REPLICATE_MODEL_NAME_WITH_ID_LENGTH + len(model_parts) > 1 and len(model_parts[1]) == REPLICATE_MODEL_NAME_WITH_ID_LENGTH ): ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3" custom_llm_provider = "replicate" elif model in litellm.replicate_models: @@ -329,11 +282,7 @@ def get_llm_provider( # noqa: PLR0915 ## ai21 elif model in litellm.ai21_chat_models or model in litellm.ai21_models: custom_llm_provider = "ai21_chat" - api_base = ( - api_base - or get_secret("AI21_API_BASE") - or "https://api.ai21.com/studio/v1" - ) # type: ignore + api_base = api_base or get_secret("AI21_API_BASE") or "https://api.ai21.com/studio/v1" # type: ignore dynamic_api_key = api_key or get_secret("AI21_API_KEY") ## aleph_alpha elif model in litellm.aleph_alpha_models: @@ -405,23 +354,15 @@ def get_llm_provider( # noqa: PLR0915 llm_provider="", ) if api_base is not None and not isinstance(api_base, str): - raise Exception( - "api base needs to be a string. api_base={}".format(api_base) - ) + raise Exception("api base needs to be a string. api_base={}".format(api_base)) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): - raise Exception( - "dynamic_api_key needs to be a string. dynamic_api_key={}".format( - dynamic_api_key - ) - ) + raise Exception("dynamic_api_key needs to be a string. dynamic_api_key={}".format(dynamic_api_key)) return model, custom_llm_provider, dynamic_api_key, api_base except Exception as e: if isinstance(e, litellm.exceptions.BadRequestError): raise e else: - error_str = ( - f"GetLLMProvider Exception - {str(e)}\n\noriginal model: {model}" - ) + error_str = f"GetLLMProvider Exception - {str(e)}\n\noriginal model: {model}" raise litellm.exceptions.BadRequestError( # type: ignore message=f"GetLLMProvider Exception - {str(e)}\n\noriginal model: {model}", model=model, @@ -457,9 +398,7 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 ( api_base, dynamic_api_key, - ) = litellm.PerplexityChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.PerplexityChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "aiohttp_openai": return model, "aiohttp_openai", api_key, api_base elif custom_llm_provider == "anyscale": @@ -470,35 +409,21 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 ( api_base, dynamic_api_key, - ) = litellm.DeepInfraConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.DeepInfraConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "empower": - api_base = ( - api_base - or get_secret("EMPOWER_API_BASE") - or "https://app.empower.dev/api/v1" - ) # type: ignore + api_base = api_base or get_secret("EMPOWER_API_BASE") or "https://app.empower.dev/api/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("EMPOWER_API_KEY") elif custom_llm_provider == "groq": ( api_base, dynamic_api_key, - ) = litellm.GroqChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.GroqChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "nvidia_nim": # nvidia_nim is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1 - api_base = ( - api_base - or get_secret("NVIDIA_NIM_API_BASE") - or "https://integrate.api.nvidia.com/v1" - ) # type: ignore + api_base = api_base or get_secret("NVIDIA_NIM_API_BASE") or "https://integrate.api.nvidia.com/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("NVIDIA_NIM_API_KEY") elif custom_llm_provider == "cerebras": - api_base = ( - api_base or get_secret("CEREBRAS_API_BASE") or "https://api.cerebras.ai/v1" - ) # type: ignore + api_base = api_base or get_secret("CEREBRAS_API_BASE") or "https://api.cerebras.ai/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("CEREBRAS_API_KEY") elif custom_llm_provider == "baseten": # Use BasetenConfig to determine the appropriate API base URL @@ -508,89 +433,53 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 api_base = api_base or get_secret_str("BASETEN_API_BASE") or "https://inference.baseten.co/v1" dynamic_api_key = api_key or get_secret_str("BASETEN_API_KEY") elif custom_llm_provider == "sambanova": - api_base = ( - api_base - or get_secret("SAMBANOVA_API_BASE") - or "https://api.sambanova.ai/v1" - ) # type: ignore + api_base = api_base or get_secret("SAMBANOVA_API_BASE") or "https://api.sambanova.ai/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("SAMBANOVA_API_KEY") elif custom_llm_provider == "meta_llama": - api_base = ( - api_base - or get_secret("LLAMA_API_BASE") - or "https://api.llama.com/compat/v1" - ) # type: ignore + api_base = api_base or get_secret("LLAMA_API_BASE") or "https://api.llama.com/compat/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("LLAMA_API_KEY") elif custom_llm_provider == "nebius": - api_base = ( - api_base - or get_secret("NEBIUS_API_BASE") - or "https://api.studio.nebius.ai/v1" - ) # type: ignore + api_base = api_base or get_secret("NEBIUS_API_BASE") or "https://api.studio.nebius.ai/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("NEBIUS_API_KEY") - elif (custom_llm_provider == "ai21_chat") or ( - custom_llm_provider == "ai21" and model in litellm.ai21_chat_models - ): - api_base = ( - api_base or get_secret("AI21_API_BASE") or "https://api.ai21.com/studio/v1" - ) # type: ignore + elif (custom_llm_provider == "ai21_chat") or (custom_llm_provider == "ai21" and model in litellm.ai21_chat_models): + api_base = api_base or get_secret("AI21_API_BASE") or "https://api.ai21.com/studio/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("AI21_API_KEY") custom_llm_provider = "ai21_chat" elif custom_llm_provider == "volcengine": # volcengine is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1 - api_base = ( - api_base - or get_secret("VOLCENGINE_API_BASE") - or "https://ark.cn-beijing.volces.com/api/v3" - ) # type: ignore + api_base = api_base or get_secret("VOLCENGINE_API_BASE") or "https://ark.cn-beijing.volces.com/api/v3" # type: ignore dynamic_api_key = api_key or get_secret_str("VOLCENGINE_API_KEY") elif custom_llm_provider == "codestral": # codestral is openai compatible, we just need to set this to custom_openai and have the api_base be https://codestral.mistral.ai/v1 - api_base = ( - api_base - or get_secret("CODESTRAL_API_BASE") - or "https://codestral.mistral.ai/v1" - ) # type: ignore + api_base = api_base or get_secret("CODESTRAL_API_BASE") or "https://codestral.mistral.ai/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("CODESTRAL_API_KEY") elif custom_llm_provider == "hosted_vllm": # vllm is openai compatible, we just need to set this to custom_openai ( api_base, dynamic_api_key, - ) = litellm.HostedVLLMChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.HostedVLLMChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "llamafile": # llamafile is OpenAI compatible. ( api_base, dynamic_api_key, - ) = litellm.LlamafileChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.LlamafileChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "datarobot": # DataRobot is OpenAI compatible. ( api_base, dynamic_api_key, - ) = litellm.DataRobotConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.DataRobotConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "lm_studio": # lm_studio is openai compatible, we just need to set this to custom_openai ( api_base, dynamic_api_key, - ) = litellm.LMStudioChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.LMStudioChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "deepseek": # deepseek is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.deepseek.com/v1 - api_base = ( - api_base - or get_secret("DEEPSEEK_API_BASE") - or "https://api.deepseek.com/beta" - ) # type: ignore + api_base = api_base or get_secret("DEEPSEEK_API_BASE") or "https://api.deepseek.com/beta" # type: ignore dynamic_api_key = api_key or get_secret_str("DEEPSEEK_API_KEY") elif custom_llm_provider == "fireworks_ai": @@ -598,9 +487,7 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 ( api_base, dynamic_api_key, - ) = litellm.FireworksAIConfig()._get_openai_compatible_provider_info( - api_base=api_base, api_key=api_key - ) + ) = litellm.FireworksAIConfig()._get_openai_compatible_provider_info(api_base=api_base, api_key=api_key) elif custom_llm_provider == "azure_ai": ( api_base, @@ -620,38 +507,26 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 ( api_base, dynamic_api_key, - ) = litellm.LiteLLMProxyChatConfig()._get_openai_compatible_provider_info( - api_base=api_base, api_key=api_key - ) + ) = litellm.LiteLLMProxyChatConfig()._get_openai_compatible_provider_info(api_base=api_base, api_key=api_key) elif custom_llm_provider == "mistral": ( api_base, dynamic_api_key, - ) = litellm.MistralConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.MistralConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "jina_ai": ( custom_llm_provider, api_base, dynamic_api_key, - ) = litellm.JinaAIEmbeddingConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.JinaAIEmbeddingConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "xai": ( api_base, dynamic_api_key, - ) = litellm.XAIChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.XAIChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "together_ai": - api_base = ( - api_base - or get_secret_str("TOGETHER_AI_API_BASE") - or "https://api.together.xyz/v1" - ) # type: ignore + api_base = api_base or get_secret_str("TOGETHER_AI_API_BASE") or "https://api.together.xyz/v1" # type: ignore dynamic_api_key = api_key or ( get_secret_str("TOGETHER_API_KEY") or get_secret_str("TOGETHER_AI_API_KEY") @@ -659,22 +534,10 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 or get_secret_str("TOGETHER_AI_TOKEN") ) elif custom_llm_provider == "friendliai": - api_base = ( - api_base - or get_secret("FRIENDLI_API_BASE") - or "https://api.friendli.ai/serverless/v1" - ) # type: ignore - dynamic_api_key = ( - api_key - or get_secret_str("FRIENDLIAI_API_KEY") - or get_secret_str("FRIENDLI_TOKEN") - ) + api_base = api_base or get_secret("FRIENDLI_API_BASE") or "https://api.friendli.ai/serverless/v1" # type: ignore + dynamic_api_key = api_key or get_secret_str("FRIENDLIAI_API_KEY") or get_secret_str("FRIENDLI_TOKEN") elif custom_llm_provider == "galadriel": - api_base = ( - api_base - or get_secret("GALADRIEL_API_BASE") - or "https://api.galadriel.com/v1" - ) # type: ignore + api_base = api_base or get_secret("GALADRIEL_API_BASE") or "https://api.galadriel.com/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("GALADRIEL_API_KEY") elif custom_llm_provider == "github_copilot": ( @@ -685,11 +548,7 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 model, api_base, api_key, custom_llm_provider ) elif custom_llm_provider == "novita": - api_base = ( - api_base - or get_secret("NOVITA_API_BASE") - or "https://api.novita.ai/v3/openai" - ) # type: ignore + api_base = api_base or get_secret("NOVITA_API_BASE") or "https://api.novita.ai/v3/openai" # type: ignore dynamic_api_key = api_key or get_secret_str("NOVITA_API_KEY") elif custom_llm_provider == "snowflake": api_base = ( @@ -702,116 +561,80 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 ( api_base, dynamic_api_key, - ) = litellm.GradientAIConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.GradientAIConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "featherless_ai": ( api_base, dynamic_api_key, - ) = litellm.FeatherlessAIConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.FeatherlessAIConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "nscale": ( api_base, dynamic_api_key, - ) = litellm.NscaleConfig()._get_openai_compatible_provider_info( - api_base=api_base, api_key=api_key - ) + ) = litellm.NscaleConfig()._get_openai_compatible_provider_info(api_base=api_base, api_key=api_key) elif custom_llm_provider == "heroku": ( api_base, dynamic_api_key, - ) = litellm.HerokuChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.HerokuChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "dashscope": ( api_base, dynamic_api_key, - ) = litellm.DashScopeChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.DashScopeChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "moonshot": ( api_base, dynamic_api_key, - ) = litellm.MoonshotChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.MoonshotChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "v0": ( api_base, dynamic_api_key, - ) = litellm.V0ChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.V0ChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "morph": ( api_base, dynamic_api_key, - ) = litellm.MorphChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.MorphChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "lambda_ai": ( api_base, dynamic_api_key, - ) = litellm.LambdaAIChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.LambdaAIChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "hyperbolic": ( api_base, dynamic_api_key, - ) = litellm.HyperbolicChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.HyperbolicChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "vercel_ai_gateway": ( api_base, dynamic_api_key, - ) = litellm.VercelAIGatewayConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.VercelAIGatewayConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "aiml": ( api_base, dynamic_api_key, - ) = litellm.AIMLChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.AIMLChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "wandb": - api_base = ( - api_base - or get_secret("WANDB_API_BASE") - or "https://api.inference.wandb.ai/v1" - ) # type: ignore + api_base = api_base or get_secret("WANDB_API_BASE") or "https://api.inference.wandb.ai/v1" # type: ignore dynamic_api_key = api_key or get_secret_str("WANDB_API_KEY") elif custom_llm_provider == "lemonade": ( api_base, dynamic_api_key, - ) = litellm.LemonadeChatConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.LemonadeChatConfig()._get_openai_compatible_provider_info(api_base, api_key) elif custom_llm_provider == "clarifai": ( api_base, dynamic_api_key, - ) = litellm.ClarifaiConfig()._get_openai_compatible_provider_info( - api_base, api_key - ) + ) = litellm.ClarifaiConfig()._get_openai_compatible_provider_info(api_base, api_key) if api_base is not None and not isinstance(api_base, str): raise Exception("api base needs to be a string. api_base={}".format(api_base)) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): - raise Exception( - "dynamic_api_key needs to be a string. dynamic_api_key={}".format( - dynamic_api_key - ) - ) + raise Exception("dynamic_api_key needs to be a string. dynamic_api_key={}".format(dynamic_api_key)) if dynamic_api_key is None and api_key is not None: dynamic_api_key = api_key return model, custom_llm_provider, dynamic_api_key, api_base