fmab777 commited on
Commit
46693ee
·
verified ·
1 Parent(s): 7e50d9a

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +28 -23
main.py CHANGED
@@ -623,7 +623,7 @@ async def _call_gemini(text: str, summary_type: str) -> Tuple[Optional[str], Opt
623
  return None, "Error: Primary AI service (Gemini) not configured/available."
624
  logger.info(f"[Gemini Primary] Generating {summary_type} summary using {GEMINI_MODEL}. Input length: {len(text)}")
625
 
626
- # Define prompts
627
  if summary_type == "paragraph":
628
  prompt = ("You are an AI model designed to provide concise summaries using British English spellings. Your output MUST be:\n"
629
  "• Clear and simple language suitable for someone unfamiliar with the topic.\n"
@@ -653,9 +653,8 @@ async def _call_gemini(text: str, summary_type: str) -> Tuple[Optional[str], Opt
653
  "• **Focus ONLY on the main content; strictly EXCLUDE information about website features, subscriptions, ads, cookie notices, or navigation elements. Do not include things like free/paid tiers; basic/premium memberships. Especially for ACS membership.**\n\n" # Added instruction
654
  "Here is the text to summarise:")
655
 
656
- # Input Length Check (Gemini-specific limits if known, otherwise use a large default)
657
- # Flash model has 1M token context, Pro has 1M (standard) or 2M (preview)
658
- MAX_INPUT_LENGTH_GEMINI = 900000 # Use a slightly conservative limit (~3.6M chars if 1 token ~ 4 chars)
659
  if len(text) > MAX_INPUT_LENGTH_GEMINI:
660
  logger.warning(f"[Gemini Primary] Input length ({len(text)}) exceeds limit ({MAX_INPUT_LENGTH_GEMINI}). Truncating.");
661
  text = text[:MAX_INPUT_LENGTH_GEMINI] + "... (Content truncated)"
@@ -667,33 +666,29 @@ async def _call_gemini(text: str, summary_type: str) -> Tuple[Optional[str], Opt
667
  HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
668
  HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
669
  HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
670
- # HarmCategory.HARM_CATEGORY_UNSPECIFIED: HarmBlockThreshold.BLOCK_NONE # Usually not needed
671
  }
672
- # Add Civic Integrity if available (might not be in all SDK versions)
673
  if hasattr(HarmCategory, 'HARM_CATEGORY_CIVIC_INTEGRITY'):
674
  safety_settings[HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY] = HarmBlockThreshold.BLOCK_NONE
675
 
676
- logger.debug(f"[Gemini Primary] Using safety settings: { {k.name: v.name for k, v in safety_settings.items()} }") # Log names
677
 
678
  try:
679
  logger.debug(f"[Gemini Primary] Initializing model {GEMINI_MODEL}")
680
  model = genai.GenerativeModel(GEMINI_MODEL)
681
  logger.info(f"[Gemini Primary] Sending request to Gemini ({GEMINI_MODEL})...")
682
- request_options = {"timeout": 120} # Generous timeout for Gemini
683
  response = await model.generate_content_async(
684
  full_prompt,
685
- generation_config=genai.types.GenerationConfig( # Optional: Control output further
686
- # max_output_tokens=1024, # Example limit if needed
687
- # temperature=0.7,
688
- ),
689
  safety_settings=safety_settings,
690
  request_options=request_options
691
  )
692
  logger.info("[Gemini Primary] Received response from Gemini.")
693
 
694
- # Check for immediate blocking reasons (more robust check)
695
  if response.prompt_feedback and response.prompt_feedback.block_reason:
696
- block_reason_str = response.prompt_feedback.block_reason.name
 
697
  logger.warning(f"[Gemini Primary] Request blocked by API. Reason: {block_reason_str}");
698
  return None, f"Sorry, the primary AI model ({GEMINI_MODEL}) blocked the request (Reason: {block_reason_str})."
699
 
@@ -701,37 +696,47 @@ async def _call_gemini(text: str, summary_type: str) -> Tuple[Optional[str], Opt
701
  summary = None
702
  finish_reason_str = 'UNKNOWN'
703
  if response.candidates:
704
- candidate = response.candidates[0] # Usually only one candidate
705
- finish_reason_str = candidate.finish_reason.name if candidate.finish_reason else 'N/A'
706
-
707
- if candidate.finish_reason == genai.types.FinishReason.SAFETY:
 
 
 
 
708
  safety_ratings_str = ", ".join([f"{rating.category.name}: {rating.probability.name}" for rating in candidate.safety_ratings])
709
  logger.warning(f"[Gemini Primary] Candidate blocked due to SAFETY. Finish Reason: {finish_reason_str}. Ratings: [{safety_ratings_str}]")
710
  return None, f"Sorry, the primary AI model ({GEMINI_MODEL}) blocked the response due to safety filters ({finish_reason_str})."
711
- elif candidate.finish_reason not in [genai.types.FinishReason.STOP, genai.types.FinishReason.MAX_TOKENS]:
712
- # Log other non-standard finish reasons
 
 
713
  logger.warning(f"[Gemini Primary] Candidate finished with non-standard reason: {finish_reason_str}")
714
 
715
  # Safely access content text
716
  if candidate.content and candidate.content.parts:
717
  summary = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
718
 
719
- # Fallback check via response.text (less safe if blocked)
720
  if summary is None:
721
  try:
722
  summary = response.text
723
  except ValueError as e:
724
- # This often indicates blocked content when accessing .text directly
725
  logger.warning(f"[Gemini Primary] Error accessing response.text (likely blocked content based on previous checks): {e}");
726
- summary = None # Ensure summary is None if access fails
727
 
728
  if summary:
729
  logger.info(f"[Gemini Primary] Success generating summary. Finish Reason: {finish_reason_str}. Output len: {len(summary)}");
730
  return summary.strip(), None
731
  else:
 
732
  logger.warning(f"[Gemini Primary] Gemini returned empty summary or content was blocked. Final Finish Reason: {finish_reason_str}");
733
  return None, f"Sorry, the primary AI model ({GEMINI_MODEL}) did not provide a summary (Finish Reason: {finish_reason_str})."
734
 
 
 
 
 
735
  except Exception as e:
736
  logger.error(f"[Gemini Primary] Unexpected error during Gemini API call: {e}", exc_info=True);
737
  return None, f"Sorry, an unexpected error occurred while using the primary AI service ({GEMINI_MODEL})."
 
623
  return None, "Error: Primary AI service (Gemini) not configured/available."
624
  logger.info(f"[Gemini Primary] Generating {summary_type} summary using {GEMINI_MODEL}. Input length: {len(text)}")
625
 
626
+ # Define prompts (same as before)
627
  if summary_type == "paragraph":
628
  prompt = ("You are an AI model designed to provide concise summaries using British English spellings. Your output MUST be:\n"
629
  "• Clear and simple language suitable for someone unfamiliar with the topic.\n"
 
653
  "• **Focus ONLY on the main content; strictly EXCLUDE information about website features, subscriptions, ads, cookie notices, or navigation elements. Do not include things like free/paid tiers; basic/premium memberships. Especially for ACS membership.**\n\n" # Added instruction
654
  "Here is the text to summarise:")
655
 
656
+ # Input Length Check
657
+ MAX_INPUT_LENGTH_GEMINI = 900000
 
658
  if len(text) > MAX_INPUT_LENGTH_GEMINI:
659
  logger.warning(f"[Gemini Primary] Input length ({len(text)}) exceeds limit ({MAX_INPUT_LENGTH_GEMINI}). Truncating.");
660
  text = text[:MAX_INPUT_LENGTH_GEMINI] + "... (Content truncated)"
 
666
  HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
667
  HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
668
  HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
 
669
  }
 
670
  if hasattr(HarmCategory, 'HARM_CATEGORY_CIVIC_INTEGRITY'):
671
  safety_settings[HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY] = HarmBlockThreshold.BLOCK_NONE
672
 
673
+ logger.debug(f"[Gemini Primary] Using safety settings: { {k.name: v.name for k, v in safety_settings.items()} }")
674
 
675
  try:
676
  logger.debug(f"[Gemini Primary] Initializing model {GEMINI_MODEL}")
677
  model = genai.GenerativeModel(GEMINI_MODEL)
678
  logger.info(f"[Gemini Primary] Sending request to Gemini ({GEMINI_MODEL})...")
679
+ request_options = {"timeout": 120}
680
  response = await model.generate_content_async(
681
  full_prompt,
682
+ generation_config=genai.types.GenerationConfig(), # Basic config
 
 
 
683
  safety_settings=safety_settings,
684
  request_options=request_options
685
  )
686
  logger.info("[Gemini Primary] Received response from Gemini.")
687
 
688
+ # Check for immediate blocking reasons
689
  if response.prompt_feedback and response.prompt_feedback.block_reason:
690
+ # Use .name for the enum value if block_reason is an enum, otherwise convert to string
691
+ block_reason_str = getattr(response.prompt_feedback.block_reason, 'name', str(response.prompt_feedback.block_reason))
692
  logger.warning(f"[Gemini Primary] Request blocked by API. Reason: {block_reason_str}");
693
  return None, f"Sorry, the primary AI model ({GEMINI_MODEL}) blocked the request (Reason: {block_reason_str})."
694
 
 
696
  summary = None
697
  finish_reason_str = 'UNKNOWN'
698
  if response.candidates:
699
+ candidate = response.candidates[0]
700
+ # *** FIX START ***
701
+ # Use .name attribute of the finish_reason enum for comparison
702
+ finish_reason_name = getattr(candidate.finish_reason, 'name', None)
703
+ finish_reason_str = finish_reason_name or 'N/A' # Use name if available
704
+
705
+ if finish_reason_name == 'SAFETY':
706
+ # *** FIX END ***
707
  safety_ratings_str = ", ".join([f"{rating.category.name}: {rating.probability.name}" for rating in candidate.safety_ratings])
708
  logger.warning(f"[Gemini Primary] Candidate blocked due to SAFETY. Finish Reason: {finish_reason_str}. Ratings: [{safety_ratings_str}]")
709
  return None, f"Sorry, the primary AI model ({GEMINI_MODEL}) blocked the response due to safety filters ({finish_reason_str})."
710
+ # *** FIX START ***
711
+ # Compare names instead of potentially non-existent enum members
712
+ elif finish_reason_name not in ['STOP', 'MAX_TOKENS', None]: # Also check for None
713
+ # *** FIX END ***
714
  logger.warning(f"[Gemini Primary] Candidate finished with non-standard reason: {finish_reason_str}")
715
 
716
  # Safely access content text
717
  if candidate.content and candidate.content.parts:
718
  summary = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
719
 
720
+ # Fallback check via response.text
721
  if summary is None:
722
  try:
723
  summary = response.text
724
  except ValueError as e:
 
725
  logger.warning(f"[Gemini Primary] Error accessing response.text (likely blocked content based on previous checks): {e}");
726
+ summary = None
727
 
728
  if summary:
729
  logger.info(f"[Gemini Primary] Success generating summary. Finish Reason: {finish_reason_str}. Output len: {len(summary)}");
730
  return summary.strip(), None
731
  else:
732
+ # If summary is still None, report the finish reason found earlier
733
  logger.warning(f"[Gemini Primary] Gemini returned empty summary or content was blocked. Final Finish Reason: {finish_reason_str}");
734
  return None, f"Sorry, the primary AI model ({GEMINI_MODEL}) did not provide a summary (Finish Reason: {finish_reason_str})."
735
 
736
+ except AttributeError as ae:
737
+ # Catch potential AttributeErrors during response processing if SDK structure differs
738
+ logger.error(f"[Gemini Primary] AttributeError during Gemini response processing: {ae}. SDK might be incompatible or response structure unexpected.", exc_info=True);
739
+ return None, f"Sorry, there was an issue processing the response from the primary AI service ({GEMINI_MODEL})."
740
  except Exception as e:
741
  logger.error(f"[Gemini Primary] Unexpected error during Gemini API call: {e}", exc_info=True);
742
  return None, f"Sorry, an unexpected error occurred while using the primary AI service ({GEMINI_MODEL})."