@@ -305,7 +305,7 @@ async def call_llm(
305
305
"""
306
306
provider = self ._get_provider_config (size )
307
307
params = self ._build_llm_params (provider , size )
308
- routing_provider = params [ "routing_provider" ]
308
+ routing_provider = params . pop ( "routing_provider" , None )
309
309
extra_params = {}
310
310
if self .portkey_api_key and routing_provider != "ollama" :
311
311
# ollama + portkey is not supported currently
@@ -358,7 +358,7 @@ async def call_llm_with_structured_output(
358
358
"""
359
359
provider = self ._get_provider_config (size )
360
360
params = self ._build_llm_params (provider , size )
361
- routing_provider = params [ "routing_provider" ]
361
+ routing_provider = params . pop ( "routing_provider" , None )
362
362
363
363
extra_params = {}
364
364
if self .portkey_api_key and routing_provider != "ollama" :
@@ -406,8 +406,7 @@ def _initialize_llm(self, provider: str, size: str, agent_type: AgentProvider):
406
406
Kept for potential future differentiated initialization.
407
407
"""
408
408
params = self ._build_llm_params (provider , size )
409
- routing_provider = params ["routing_provider" ]
410
-
409
+ routing_provider = params .pop ("routing_provider" , None )
411
410
if agent_type == AgentProvider .CREWAI :
412
411
crewai_params = {"model" : params ["model" ], ** params }
413
412
if "default_headers" in params :
0 commit comments