Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -45,11 +45,13 @@ except ImportError:
|
|
45 |
try:
|
46 |
import google.generativeai as genai
|
47 |
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
|
|
48 |
_gemini_sdk_available = True
|
49 |
except ImportError:
|
50 |
genai = None
|
51 |
HarmCategory = None
|
52 |
HarmBlockThreshold = None
|
|
|
53 |
_gemini_sdk_available = False
|
54 |
|
55 |
# --- Groq SDK ---
|
@@ -101,13 +103,16 @@ WEBHOOK_SECRET = get_secret('WEBHOOK_SECRET')
|
|
101 |
|
102 |
# --- Model Configurations (Specific April 2025 - Updated Order) ---
|
103 |
# New Model Priority:
|
104 |
-
# 1. Gemini 2.5 Flash Preview (NEW)
|
105 |
# 2. Gemini 2.5 Pro Exp
|
106 |
# 3. Gemini 2.0 Flash
|
107 |
# 4. OpenRouter DeepSeek V3 Free
|
108 |
# 5. Groq Llama 4 Scout (Moved to Last)
|
109 |
|
110 |
-
|
|
|
|
|
|
|
111 |
GEMINI_PRO_EXP_MODEL = os.environ.get("GEMINI_PRO_EXP_MODEL", "gemini-2.5-pro-exp-03-25")
|
112 |
GEMINI_FLASH_MODEL = os.environ.get("GEMINI_FLASH_MODEL", "gemini-2.0-flash-001") # Original Flash model
|
113 |
OPENROUTER_DEEPSEEK_MODEL = os.environ.get("OPENROUTER_DEEPSEEK_MODEL", "deepseek/deepseek-chat-v3-0324:free") # Specific DeepSeek model
|
@@ -151,7 +156,7 @@ if not WEBHOOK_SECRET: logger.info("Optional secret 'WEBHOOK_SECRET' not found.
|
|
151 |
|
152 |
logger.info("Secret loading and configuration check finished.")
|
153 |
# --- Log summarizers in the NEW order ---
|
154 |
-
logger.info(f"Summarizer 1 (Gemini Flash Preview): {GEMINI_FLASH_PREVIEW_MODEL if _gemini_api_enabled else 'DISABLED'}")
|
155 |
logger.info(f"Summarizer 2 (Gemini Pro Exp): {GEMINI_PRO_EXP_MODEL if _gemini_api_enabled else 'DISABLED'}")
|
156 |
logger.info(f"Summarizer 3 (Gemini Flash): {GEMINI_FLASH_MODEL if _gemini_api_enabled else 'DISABLED'}")
|
157 |
logger.info(f"Summarizer 4 (OpenRouter): {OPENROUTER_DEEPSEEK_MODEL if _openrouter_fallback_enabled else 'DISABLED'}")
|
@@ -844,15 +849,20 @@ async def _call_groq(text: str, summary_type: str) -> Tuple[Optional[str], Optio
|
|
844 |
|
845 |
async def _call_gemini(text: str, summary_type: str, model_name: str) -> Tuple[Optional[str], Optional[str]]:
|
846 |
"""Internal function to call Gemini API. Returns (summary, error_message)."""
|
847 |
-
global _gemini_api_enabled, HarmCategory, HarmBlockThreshold
|
848 |
if not _gemini_api_enabled:
|
849 |
logger.error(f"[Gemini {model_name}] Called but API is disabled.");
|
850 |
return None, f"Error: AI service (Gemini API) not configured/available."
|
851 |
|
852 |
-
#
|
853 |
-
if HarmCategory is None or HarmBlockThreshold is None:
|
854 |
-
logger.error(f"[Gemini {model_name}]
|
855 |
-
return None, f"Sorry, an internal configuration error occurred with the AI service ({model_name}).
|
|
|
|
|
|
|
|
|
|
|
856 |
|
857 |
logger.info(f"[Gemini {model_name}] Generating {summary_type} summary using {model_name}. Input length: {len(text)}")
|
858 |
|
@@ -866,41 +876,28 @@ async def _call_gemini(text: str, summary_type: str, model_name: str) -> Tuple[O
|
|
866 |
full_prompt = f"{prompt}\n\n{text}"
|
867 |
|
868 |
# Define safety_settings
|
869 |
-
safety_settings = {}
|
870 |
try:
|
871 |
-
#
|
872 |
-
# Define settings only for categories confirmed to exist in the SDK version being used.
|
873 |
-
# HARM_CATEGORY_CIVIC_INTEGRITY caused an AttributeError, so it's removed.
|
874 |
-
# The goal is to set all *available* categories to BLOCK_NONE.
|
875 |
safety_settings = {
|
876 |
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
877 |
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
878 |
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
879 |
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
880 |
-
# HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY is REMOVED as it's not defined in the SDK version
|
881 |
}
|
882 |
-
# Log the actual settings being used
|
883 |
settings_applied_str = ", ".join([k.name for k in safety_settings.keys()])
|
884 |
logger.debug(f"[Gemini {model_name}] Applying BLOCK_NONE to available safety categories: [{settings_applied_str}]")
|
885 |
|
886 |
except (NameError, AttributeError) as e:
|
887 |
-
|
888 |
-
|
889 |
-
return None, f"Sorry, an internal error occurred configuring the AI service ({model_name}). Safety settings definition failed unexpectedly."
|
890 |
|
891 |
-
|
892 |
-
|
893 |
-
|
894 |
-
return None, f"Sorry, an internal error occurred configuring the AI service ({model_name}). No safety settings could be defined."
|
895 |
|
896 |
-
# --- API
|
897 |
try:
|
898 |
-
logger.debug(f"[Gemini {model_name}] Initializing model {model_name}")
|
899 |
-
# Make sure genai is accessible
|
900 |
-
if genai is None:
|
901 |
-
logger.error(f"[Gemini {model_name}] GenAI SDK object (genai) is None. Cannot initialize model.")
|
902 |
-
return None, f"Sorry, the AI service ({model_name}) SDK is not properly initialized."
|
903 |
-
|
904 |
model = genai.GenerativeModel(model_name)
|
905 |
logger.info(f"[Gemini {model_name}] Sending request to Gemini ({model_name})...")
|
906 |
request_options = {"timeout": 120}
|
@@ -909,7 +906,7 @@ async def _call_gemini(text: str, summary_type: str, model_name: str) -> Tuple[O
|
|
909 |
safety_settings=safety_settings, request_options=request_options )
|
910 |
logger.info(f"[Gemini {model_name}] Received response from Gemini.")
|
911 |
|
912 |
-
# Response
|
913 |
if response.prompt_feedback and response.prompt_feedback.block_reason:
|
914 |
block_reason_str = getattr(response.prompt_feedback.block_reason, 'name', str(response.prompt_feedback.block_reason))
|
915 |
logger.warning(f"[Gemini {model_name}] Request blocked by API. Reason: {block_reason_str}");
|
@@ -922,7 +919,6 @@ async def _call_gemini(text: str, summary_type: str, model_name: str) -> Tuple[O
|
|
922 |
finish_reason_str = finish_reason_name or 'N/A'
|
923 |
|
924 |
if finish_reason_name == 'SAFETY':
|
925 |
-
# Log safety ratings if available
|
926 |
safety_ratings_str = "N/A"
|
927 |
if hasattr(candidate, 'safety_ratings'):
|
928 |
safety_ratings_str = ", ".join([f"{rating.category.name}: {rating.probability.name}" for rating in candidate.safety_ratings])
|
@@ -931,73 +927,65 @@ async def _call_gemini(text: str, summary_type: str, model_name: str) -> Tuple[O
|
|
931 |
elif finish_reason_name not in ['STOP', 'MAX_TOKENS', None]:
|
932 |
logger.warning(f"[Gemini {model_name}] Candidate finished with non-standard reason: {finish_reason_str}")
|
933 |
|
934 |
-
# Check for content within the candidate
|
935 |
if candidate.content and candidate.content.parts:
|
936 |
summary = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
|
937 |
|
938 |
-
# Fallback to response.text if no summary extracted from candidate
|
939 |
if summary is None:
|
940 |
try:
|
941 |
-
|
942 |
-
|
943 |
-
|
944 |
-
|
945 |
-
|
946 |
-
summary = None # Ensure summary remains None
|
947 |
-
except ValueError as e: # This often indicates blocked content
|
948 |
-
logger.warning(f"[Gemini {model_name}] Error accessing response.text (likely blocked/no content): {e}"); summary = None
|
949 |
-
except Exception as e:
|
950 |
-
logger.warning(f"[Gemini {model_name}] Unexpected error accessing response.text: {e}"); summary = None
|
951 |
-
|
952 |
-
# Final check and return
|
953 |
if summary:
|
954 |
logger.info(f"[Gemini {model_name}] Success generating summary. Finish Reason: {finish_reason_str}. Output len: {len(summary)}");
|
955 |
return summary.strip(), None
|
956 |
else:
|
957 |
logger.warning(f"[Gemini {model_name}] Gemini returned empty summary or content was blocked. Final Finish Reason: {finish_reason_str}");
|
958 |
-
|
959 |
-
if finish_reason_str == 'SAFETY':
|
960 |
-
return None, f"Sorry, the AI model ({model_name}) response was blocked by safety filters."
|
961 |
return None, f"Sorry, the AI model ({model_name}) did not provide a summary (Finish Reason: {finish_reason_str})."
|
962 |
|
963 |
-
# ---
|
964 |
-
except AttributeError as ae:
|
965 |
-
logger.error(f"[Gemini {model_name}] AttributeError during Gemini response processing: {ae}. SDK
|
966 |
return None, f"Sorry, there was an issue processing the response from the AI service ({model_name}). Attribute error."
|
967 |
-
|
968 |
-
|
969 |
-
|
970 |
-
|
971 |
-
|
|
|
|
|
|
|
972 |
logger.error(f"[Gemini {model_name}] Invalid Argument error from Gemini API: {iae}", exc_info=False)
|
973 |
error_detail = str(iae)
|
974 |
user_message = f"Sorry, the AI service ({model_name}) reported an invalid argument."
|
975 |
-
if "
|
976 |
-
|
977 |
-
elif "API key not valid" in error_detail: user_message = f"Error: The API key for the AI service ({model_name}) is invalid."
|
978 |
-
elif "model" in error_detail and ("not found" in error_detail or "does not exist" in error_detail): user_message = f"Error: The AI model name '{model_name}' was not found by the API."
|
979 |
-
elif "HarmCategory" in error_detail: user_message += " (Unsupported safety category passed)." # More specific error if it happens again
|
980 |
return None, user_message
|
981 |
-
except google.api_core.exceptions.PermissionDenied as pde:
|
982 |
logger.error(f"[Gemini {model_name}] Permission Denied error from Gemini API: {pde}", exc_info=False)
|
983 |
user_message = f"Error: Access denied for the AI service ({model_name}). Check API key permissions."
|
984 |
return None, user_message
|
985 |
-
except google.api_core.exceptions.ResourceExhausted as ree:
|
986 |
logger.error(f"[Gemini {model_name}] Resource Exhausted (Quota/Rate Limit) error from Gemini API: {ree}", exc_info=False)
|
987 |
user_message = f"Sorry, the AI model ({model_name}) is busy or quota exceeded. Please try again later."
|
988 |
return None, user_message
|
989 |
-
except google.api_core.exceptions.GoogleAPIError as gae:
|
|
|
990 |
logger.error(f"[Gemini {model_name}] Google API error during Gemini call: {gae}", exc_info=False)
|
991 |
status_code = getattr(gae, 'code', 'Unknown')
|
992 |
user_message = f"Sorry, the AI service ({model_name}) encountered an API error (Code: {status_code})."
|
993 |
if status_code == 500: user_message = f"Sorry, the AI service ({model_name}) had an internal server error."
|
994 |
-
|
995 |
return None, user_message
|
996 |
except Exception as e:
|
|
|
997 |
logger.error(f"[Gemini {model_name}] Unexpected error during Gemini API call: {e}", exc_info=True);
|
998 |
error_msg = f"Sorry, an unexpected error occurred while using the AI service ({model_name})."
|
999 |
-
if
|
1000 |
-
|
|
|
1001 |
return None, error_msg
|
1002 |
|
1003 |
|
|
|
45 |
try:
|
46 |
import google.generativeai as genai
|
47 |
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
48 |
+
import google.api_core.exceptions # <-- ADD THIS LINE
|
49 |
_gemini_sdk_available = True
|
50 |
except ImportError:
|
51 |
genai = None
|
52 |
HarmCategory = None
|
53 |
HarmBlockThreshold = None
|
54 |
+
google = None # Set google to None if core part fails too
|
55 |
_gemini_sdk_available = False
|
56 |
|
57 |
# --- Groq SDK ---
|
|
|
103 |
|
104 |
# --- Model Configurations (Specific April 2025 - Updated Order) ---
|
105 |
# New Model Priority:
|
106 |
+
# 1. Gemini 2.5 Flash Preview (NEW - Using specific date variant)
|
107 |
# 2. Gemini 2.5 Pro Exp
|
108 |
# 3. Gemini 2.0 Flash
|
109 |
# 4. OpenRouter DeepSeek V3 Free
|
110 |
# 5. Groq Llama 4 Scout (Moved to Last)
|
111 |
|
112 |
+
# --- MODIFIED LINE: Changed default model name ---
|
113 |
+
GEMINI_FLASH_PREVIEW_MODEL = os.environ.get("GEMINI_FLASH_PREVIEW_MODEL", "gemini-2.5-flash-preview-04-17") # NEW Model with date
|
114 |
+
# --- END MODIFIED LINE ---
|
115 |
+
|
116 |
GEMINI_PRO_EXP_MODEL = os.environ.get("GEMINI_PRO_EXP_MODEL", "gemini-2.5-pro-exp-03-25")
|
117 |
GEMINI_FLASH_MODEL = os.environ.get("GEMINI_FLASH_MODEL", "gemini-2.0-flash-001") # Original Flash model
|
118 |
OPENROUTER_DEEPSEEK_MODEL = os.environ.get("OPENROUTER_DEEPSEEK_MODEL", "deepseek/deepseek-chat-v3-0324:free") # Specific DeepSeek model
|
|
|
156 |
|
157 |
logger.info("Secret loading and configuration check finished.")
|
158 |
# --- Log summarizers in the NEW order ---
|
159 |
+
logger.info(f"Summarizer 1 (Gemini Flash Preview): {GEMINI_FLASH_PREVIEW_MODEL if _gemini_api_enabled else 'DISABLED'} (Using specific date variant)") # Added note
|
160 |
logger.info(f"Summarizer 2 (Gemini Pro Exp): {GEMINI_PRO_EXP_MODEL if _gemini_api_enabled else 'DISABLED'}")
|
161 |
logger.info(f"Summarizer 3 (Gemini Flash): {GEMINI_FLASH_MODEL if _gemini_api_enabled else 'DISABLED'}")
|
162 |
logger.info(f"Summarizer 4 (OpenRouter): {OPENROUTER_DEEPSEEK_MODEL if _openrouter_fallback_enabled else 'DISABLED'}")
|
|
|
849 |
|
850 |
async def _call_gemini(text: str, summary_type: str, model_name: str) -> Tuple[Optional[str], Optional[str]]:
|
851 |
"""Internal function to call Gemini API. Returns (summary, error_message)."""
|
852 |
+
global _gemini_api_enabled, HarmCategory, HarmBlockThreshold, genai, google # Add google to global if needed, though import should make it available
|
853 |
if not _gemini_api_enabled:
|
854 |
logger.error(f"[Gemini {model_name}] Called but API is disabled.");
|
855 |
return None, f"Error: AI service (Gemini API) not configured/available."
|
856 |
|
857 |
+
# Check if SDK and necessary types are loaded
|
858 |
+
if genai is None or HarmCategory is None or HarmBlockThreshold is None:
|
859 |
+
logger.error(f"[Gemini {model_name}] SDK or safety types (HarmCategory/HarmBlockThreshold) are None/unavailable.")
|
860 |
+
return None, f"Sorry, an internal configuration error occurred with the AI service ({model_name}). SDK components missing."
|
861 |
+
|
862 |
+
# Also check if the google exceptions module loaded - needed for specific error handling
|
863 |
+
if google is None or not hasattr(google, 'api_core') or not hasattr(google.api_core, 'exceptions'):
|
864 |
+
logger.warning(f"[Gemini {model_name}] google.api_core.exceptions not available for specific error handling. Will use general exceptions.")
|
865 |
+
# We can proceed but error handling might be less specific
|
866 |
|
867 |
logger.info(f"[Gemini {model_name}] Generating {summary_type} summary using {model_name}. Input length: {len(text)}")
|
868 |
|
|
|
876 |
full_prompt = f"{prompt}\n\n{text}"
|
877 |
|
878 |
# Define safety_settings
|
879 |
+
safety_settings = {}
|
880 |
try:
|
881 |
+
# Use only categories known to exist in the SDK
|
|
|
|
|
|
|
882 |
safety_settings = {
|
883 |
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
884 |
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
885 |
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
886 |
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
|
|
887 |
}
|
|
|
888 |
settings_applied_str = ", ".join([k.name for k in safety_settings.keys()])
|
889 |
logger.debug(f"[Gemini {model_name}] Applying BLOCK_NONE to available safety categories: [{settings_applied_str}]")
|
890 |
|
891 |
except (NameError, AttributeError) as e:
|
892 |
+
logger.error(f"[Gemini {model_name}] Unexpected error defining safety settings ({type(e).__name__}): {e}.", exc_info=True)
|
893 |
+
return None, f"Sorry, an internal error occurred configuring the AI service ({model_name}). Safety settings definition failed."
|
|
|
894 |
|
895 |
+
if not safety_settings: # Should not happen if base categories exist
|
896 |
+
logger.error(f"[Gemini {model_name}] Failed to define any safety settings.")
|
897 |
+
return None, f"Sorry, an internal error occurred configuring the AI service ({model_name}). No safety settings defined."
|
|
|
898 |
|
899 |
+
# --- API Call ---
|
900 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
901 |
model = genai.GenerativeModel(model_name)
|
902 |
logger.info(f"[Gemini {model_name}] Sending request to Gemini ({model_name})...")
|
903 |
request_options = {"timeout": 120}
|
|
|
906 |
safety_settings=safety_settings, request_options=request_options )
|
907 |
logger.info(f"[Gemini {model_name}] Received response from Gemini.")
|
908 |
|
909 |
+
# --- Response Handling (same as before) ---
|
910 |
if response.prompt_feedback and response.prompt_feedback.block_reason:
|
911 |
block_reason_str = getattr(response.prompt_feedback.block_reason, 'name', str(response.prompt_feedback.block_reason))
|
912 |
logger.warning(f"[Gemini {model_name}] Request blocked by API. Reason: {block_reason_str}");
|
|
|
919 |
finish_reason_str = finish_reason_name or 'N/A'
|
920 |
|
921 |
if finish_reason_name == 'SAFETY':
|
|
|
922 |
safety_ratings_str = "N/A"
|
923 |
if hasattr(candidate, 'safety_ratings'):
|
924 |
safety_ratings_str = ", ".join([f"{rating.category.name}: {rating.probability.name}" for rating in candidate.safety_ratings])
|
|
|
927 |
elif finish_reason_name not in ['STOP', 'MAX_TOKENS', None]:
|
928 |
logger.warning(f"[Gemini {model_name}] Candidate finished with non-standard reason: {finish_reason_str}")
|
929 |
|
|
|
930 |
if candidate.content and candidate.content.parts:
|
931 |
summary = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
|
932 |
|
|
|
933 |
if summary is None:
|
934 |
try:
|
935 |
+
if hasattr(response, 'text'): summary = response.text
|
936 |
+
else: logger.warning(f"[Gemini {model_name}] Response object lacks 'text' attribute."); summary = None
|
937 |
+
except ValueError as e: logger.warning(f"[Gemini {model_name}] Error accessing response.text (likely blocked/no content): {e}"); summary = None
|
938 |
+
except Exception as e: logger.warning(f"[Gemini {model_name}] Unexpected error accessing response.text: {e}"); summary = None
|
939 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
940 |
if summary:
|
941 |
logger.info(f"[Gemini {model_name}] Success generating summary. Finish Reason: {finish_reason_str}. Output len: {len(summary)}");
|
942 |
return summary.strip(), None
|
943 |
else:
|
944 |
logger.warning(f"[Gemini {model_name}] Gemini returned empty summary or content was blocked. Final Finish Reason: {finish_reason_str}");
|
945 |
+
if finish_reason_str == 'SAFETY': return None, f"Sorry, the AI model ({model_name}) response was blocked by safety filters."
|
|
|
|
|
946 |
return None, f"Sorry, the AI model ({model_name}) did not provide a summary (Finish Reason: {finish_reason_str})."
|
947 |
|
948 |
+
# --- Exception Handling (Refined) ---
|
949 |
+
except AttributeError as ae: # Error within response processing
|
950 |
+
logger.error(f"[Gemini {model_name}] AttributeError during Gemini response processing: {ae}. SDK/response structure issue.", exc_info=True);
|
951 |
return None, f"Sorry, there was an issue processing the response from the AI service ({model_name}). Attribute error."
|
952 |
+
|
953 |
+
# Check if specific exceptions are available before trying to catch them
|
954 |
+
except google.api_core.exceptions.NotFound as nfe if google and hasattr(google.api_core, 'exceptions') else None:
|
955 |
+
# Specific handling for the 404 error
|
956 |
+
logger.error(f"[Gemini {model_name}] Model Not Found error from Gemini API: {nfe}", exc_info=False)
|
957 |
+
user_message = f"Sorry, the AI model '{model_name}' was not found by the API service. It might be unavailable or spelled incorrectly."
|
958 |
+
return None, user_message
|
959 |
+
except google.api_core.exceptions.InvalidArgument as iae if google and hasattr(google.api_core, 'exceptions') else None:
|
960 |
logger.error(f"[Gemini {model_name}] Invalid Argument error from Gemini API: {iae}", exc_info=False)
|
961 |
error_detail = str(iae)
|
962 |
user_message = f"Sorry, the AI service ({model_name}) reported an invalid argument."
|
963 |
+
if "API key not valid" in error_detail: user_message = f"Error: The API key for the AI service ({model_name}) is invalid."
|
964 |
+
# Add other specific InvalidArgument checks if needed
|
|
|
|
|
|
|
965 |
return None, user_message
|
966 |
+
except google.api_core.exceptions.PermissionDenied as pde if google and hasattr(google.api_core, 'exceptions') else None:
|
967 |
logger.error(f"[Gemini {model_name}] Permission Denied error from Gemini API: {pde}", exc_info=False)
|
968 |
user_message = f"Error: Access denied for the AI service ({model_name}). Check API key permissions."
|
969 |
return None, user_message
|
970 |
+
except google.api_core.exceptions.ResourceExhausted as ree if google and hasattr(google.api_core, 'exceptions') else None:
|
971 |
logger.error(f"[Gemini {model_name}] Resource Exhausted (Quota/Rate Limit) error from Gemini API: {ree}", exc_info=False)
|
972 |
user_message = f"Sorry, the AI model ({model_name}) is busy or quota exceeded. Please try again later."
|
973 |
return None, user_message
|
974 |
+
except google.api_core.exceptions.GoogleAPIError as gae if google and hasattr(google.api_core, 'exceptions') else None:
|
975 |
+
# Catch other Google API errors
|
976 |
logger.error(f"[Gemini {model_name}] Google API error during Gemini call: {gae}", exc_info=False)
|
977 |
status_code = getattr(gae, 'code', 'Unknown')
|
978 |
user_message = f"Sorry, the AI service ({model_name}) encountered an API error (Code: {status_code})."
|
979 |
if status_code == 500: user_message = f"Sorry, the AI service ({model_name}) had an internal server error."
|
980 |
+
# Add other status codes if needed
|
981 |
return None, user_message
|
982 |
except Exception as e:
|
983 |
+
# General catch-all
|
984 |
logger.error(f"[Gemini {model_name}] Unexpected error during Gemini API call: {e}", exc_info=True);
|
985 |
error_msg = f"Sorry, an unexpected error occurred while using the AI service ({model_name})."
|
986 |
+
# Check if it's the specific NameError we saw before, indicating an issue loading the exceptions module
|
987 |
+
if isinstance(e, NameError) and 'google' in str(e):
|
988 |
+
error_msg = f"Sorry, an internal configuration error occurred with the AI service ({model_name}). Exception handling module missing."
|
989 |
return None, error_msg
|
990 |
|
991 |
|