From 9ba34778ac8fb3e94758c6d14478c5782479d4f9 Mon Sep 17 00:00:00 2001 From: teyrebaz33 Date: Wed, 8 Apr 2026 03:43:26 +0300 Subject: [PATCH] fix: add _get_model_id() helper to prevent IndexError on missing '/' separator Fixes #249 Both completion() and chat() were calling model.split('/')[1] directly, which raises IndexError when the model string has no '/' separator. Added a _get_model_id() helper function that: - Extracts the model value from TEE_LLM enum or plain string - Raises a clear ValueError with a helpful message if the format is invalid - Is used in both completion() and chat() methods Matches the fix suggested in the issue. --- src/opengradient/client/llm.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/opengradient/client/llm.py b/src/opengradient/client/llm.py index ed54fd9..622fed5 100644 --- a/src/opengradient/client/llm.py +++ b/src/opengradient/client/llm.py @@ -47,6 +47,17 @@ class _ChatParams: x402_settlement_mode: x402SettlementMode +def _get_model_id(model) -> str: + """Extract model ID from provider/model-name format, raising ValueError for invalid formats.""" + value = model.value if hasattr(model, "value") else str(model) + parts = value.split("/", 1) + if len(parts) != 2: + raise ValueError( + f"Invalid model identifier '{value}'. Expected 'provider/model-name' format." + ) + return parts[1] + + class LLM: """ LLM inference namespace. @@ -252,7 +263,7 @@ async def completion( Raises: RuntimeError: If the inference fails. """ - model_id = model.split("/")[1] + model_id = _get_model_id(model) payload: Dict = { "model": model_id, "prompt": prompt, @@ -327,7 +338,7 @@ async def chat( RuntimeError: If the inference fails. """ params = _ChatParams( - model=model.split("/")[1], + model=_get_model_id(model), max_tokens=max_tokens, temperature=temperature, stop_sequence=stop_sequence,