JatsTheAIGen commited on
Commit
074e6cc
·
1 Parent(s): 82b52b6

Fix IndentationError: correct else clause alignment at line 152

Browse files
Files changed (1) hide show
  1. src/llm_router.py +4 -4
src/llm_router.py CHANGED
@@ -54,7 +54,7 @@ class LLMRouter:
54
  logger.info("Novita AI API client initialized")
55
  logger.info(f"Base URL: {self.settings.novita_base_url}")
56
  logger.info(f"Model: {self.settings.novita_model}")
57
- except Exception as e:
58
  logger.error(f"Failed to initialize Novita AI client: {e}")
59
  raise RuntimeError(f"Could not initialize Novita AI API client: {e}") from e
60
 
@@ -159,14 +159,14 @@ class LLMRouter:
159
  result = self._clean_reasoning_tags(result)
160
  logger.info(f"Novita AI API generated response (length: {len(result)})")
161
  return result
162
- else:
163
  logger.error("Novita AI API returned empty response")
164
  return None
165
 
166
  except Exception as e:
167
  logger.error(f"Error calling Novita AI API: {e}", exc_info=True)
168
- raise
169
-
170
  def _calculate_safe_max_tokens(self, prompt: str, requested_max_tokens: int) -> int:
171
  """
172
  Calculate safe max_tokens based on input token count and model context window.
 
54
  logger.info("Novita AI API client initialized")
55
  logger.info(f"Base URL: {self.settings.novita_base_url}")
56
  logger.info(f"Model: {self.settings.novita_model}")
57
+ except Exception as e:
58
  logger.error(f"Failed to initialize Novita AI client: {e}")
59
  raise RuntimeError(f"Could not initialize Novita AI API client: {e}") from e
60
 
 
159
  result = self._clean_reasoning_tags(result)
160
  logger.info(f"Novita AI API generated response (length: {len(result)})")
161
  return result
162
+ else:
163
  logger.error("Novita AI API returned empty response")
164
  return None
165
 
166
  except Exception as e:
167
  logger.error(f"Error calling Novita AI API: {e}", exc_info=True)
168
+ raise
169
+
170
  def _calculate_safe_max_tokens(self, prompt: str, requested_max_tokens: int) -> int:
171
  """
172
  Calculate safe max_tokens based on input token count and model context window.