sikeaditya commited on
Commit
5591e71
·
verified ·
1 Parent(s): aada87f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -56
app.py CHANGED
@@ -1,14 +1,13 @@
1
  import gradio as gr
2
  import requests
3
  import os
4
- import json # Import json for better error message handling
5
 
6
  # --- Configuration ---
7
- # Using the specific Nebius/HF router URL from your snippet
8
  API_URL = "https://router.huggingface.co/nebius/v1/chat/completions"
9
- # Using the model from your snippet
10
- MODEL_ID = "google/gemma-3-27b-it-fast"
11
- # Get Hugging Face token from environment variable/secrets
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
 
14
  if not HF_TOKEN:
@@ -16,96 +15,71 @@ if not HF_TOKEN:
16
 
17
  HEADERS = {"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"}
18
 
19
- # --- No Local Model Loading Needed ---
20
- print(f"Application configured to use Hugging Face Inference API.")
21
- print(f"Target Model: AgriAssist_LLM")
22
- print(f"API Endpoint: {API_URL}")
23
 
24
  # --- Inference Function (Using Hugging Face API) ---
25
- def generate_response(prompt, max_new_tokens=512): # Using max_tokens from your snippet
26
  print(f"Received prompt: {prompt}")
27
- print("Preparing payload for API...")
28
 
29
- # Construct the payload based on the API requirements
30
- # NOTE: This version assumes text-only input matching the Gradio interface.
31
- # To handle image input like your snippet, the Gradio interface
32
- # and payload structure would need modification.
33
  payload = {
34
  "messages": [
35
  {
36
  "role": "user",
37
  "content": prompt
38
- # Example for multimodal if Gradio input changes:
39
- # "content": [
40
- # {"type": "text", "text": prompt},
41
- # {"type": "image_url", "image_url": {"url": "some_image_url.jpg"}}
42
- # ]
43
  }
44
  ],
45
- "model": MODEL_ID,
46
  "max_tokens": max_new_tokens,
47
- # Optional parameters you might want to add:
48
- # "temperature": 0.7,
49
- # "top_p": 0.9,
50
- # "stream": False # Set to True for streaming responses if API supports it
51
  }
52
 
53
- print(f"Sending request to API for model AgriAssist_LLM...")
54
  try:
55
- # Make the POST request
56
  response = requests.post(API_URL, headers=HEADERS, json=payload)
57
-
58
- # Raise an exception for bad status codes (like 4xx or 5xx)
59
  response.raise_for_status()
60
-
61
- # Parse the JSON response
62
  result = response.json()
63
- print("API Response Received Successfully.")
64
 
65
- # Extract the generated text - Structure matches your snippet's expectation
66
  if "choices" in result and len(result["choices"]) > 0 and "message" in result["choices"][0] and "content" in result["choices"][0]["message"]:
67
  api_response_content = result["choices"][0]["message"]["content"]
68
- print(f"API generated content: {api_response_content}")
69
  return api_response_content
70
  else:
71
- # Handle unexpected response structure
72
- print(f"Unexpected API response structure: {result}")
73
- return f"Error: Unexpected API response structure. Full response: {json.dumps(result)}"
74
 
75
  except requests.exceptions.RequestException as e:
76
- # Handle network errors, timeout errors, invalid responses, etc.
77
- error_message = f"Error calling Hugging Face API: {e}"
78
- # Try to get more details from the response body if it exists
79
  error_detail = ""
80
  if e.response is not None:
81
  try:
82
- error_detail = e.response.json() # Try parsing JSON error
83
  except json.JSONDecodeError:
84
- error_detail = e.response.text # Fallback to raw text
85
- print(f"{error_message}\nResponse details: {error_detail}")
86
- return f"{error_message}\nDetails: {error_detail}"
87
 
88
  except Exception as e:
89
- # Handle other potential errors during processing
90
  print(f"An unexpected error occurred: {e}")
91
- return f"An unexpected error occurred: {e}"
92
 
93
  # --- Gradio Interface ---
94
  iface = gr.Interface(
95
  fn=generate_response,
96
  inputs=gr.Textbox(lines=5, label="Enter your prompt", placeholder="Type your question or instruction here..."),
97
- outputs=gr.Textbox(lines=8, label=f"AgriAssist_LLM Says (via API):"), # Updated label
98
- title=f"Chat with AgriAssist_LLM via Inference API", # Updated title
99
- description=("This demo sends your text to a remote server for processing."), # Updated description
 
100
  allow_flagging="never",
101
- examples=[ # Examples should still be relevant
102
- ["Explain the concept of cloud computing in simple terms."],
103
- ["Write Python code to list files in a directory."],
104
- ["What are the main benefits of using Generative AI?"],
105
- ["Translate 'Cloud computing offers scalability' to German."],
106
  ]
107
  )
108
 
109
  # --- Launch the App ---
110
- # You can add share=True if you want to create a temporary public link (use with caution)
111
- iface.launch(server_name="0.0.0.0", server_port=7860) # Makes it accessible in Codespaces/docker
 
1
  import gradio as gr
2
  import requests
3
  import os
4
+ import json
5
 
6
  # --- Configuration ---
7
+ # API configuration but hidden from end users
8
  API_URL = "https://router.huggingface.co/nebius/v1/chat/completions"
9
+ MODEL_ID = "google/gemma-3-27b-it-fast" # Real model hidden from users
10
+ PUBLIC_MODEL_NAME = "AgriAssist_LLM" # What users will see
 
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
 
13
  if not HF_TOKEN:
 
15
 
16
  HEADERS = {"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"}
17
 
18
+ # --- Logging that doesn't expose real model name ---
19
+ print(f"Application configured to use {PUBLIC_MODEL_NAME}.")
20
+ print(f"API Endpoint configured.")
 
21
 
22
  # --- Inference Function (Using Hugging Face API) ---
23
+ def generate_response(prompt, max_new_tokens=512):
24
  print(f"Received prompt: {prompt}")
25
+ print(f"Preparing payload for API...")
26
 
 
 
 
 
27
  payload = {
28
  "messages": [
29
  {
30
  "role": "user",
31
  "content": prompt
 
 
 
 
 
32
  }
33
  ],
34
+ "model": MODEL_ID, # Real model used in API call
35
  "max_tokens": max_new_tokens,
 
 
 
 
36
  }
37
 
38
+ print(f"Sending request to API for {PUBLIC_MODEL_NAME}...")
39
  try:
 
40
  response = requests.post(API_URL, headers=HEADERS, json=payload)
 
 
41
  response.raise_for_status()
 
 
42
  result = response.json()
43
+ print(f"{PUBLIC_MODEL_NAME} Response Received Successfully.")
44
 
 
45
  if "choices" in result and len(result["choices"]) > 0 and "message" in result["choices"][0] and "content" in result["choices"][0]["message"]:
46
  api_response_content = result["choices"][0]["message"]["content"]
47
+ print(f"{PUBLIC_MODEL_NAME} generated content")
48
  return api_response_content
49
  else:
50
+ print(f"Unexpected API response structure")
51
+ return f"Error: {PUBLIC_MODEL_NAME} encountered an issue processing your request. Please try again."
 
52
 
53
  except requests.exceptions.RequestException as e:
 
 
 
54
  error_detail = ""
55
  if e.response is not None:
56
  try:
57
+ error_detail = e.response.json()
58
  except json.JSONDecodeError:
59
+ error_detail = e.response.text
60
+ print(f"Error calling API: {e}")
61
+ return f"{PUBLIC_MODEL_NAME} is currently experiencing connectivity issues. Please try again later."
62
 
63
  except Exception as e:
 
64
  print(f"An unexpected error occurred: {e}")
65
+ return f"{PUBLIC_MODEL_NAME} encountered an unexpected error. Please try again later."
66
 
67
  # --- Gradio Interface ---
68
  iface = gr.Interface(
69
  fn=generate_response,
70
  inputs=gr.Textbox(lines=5, label="Enter your prompt", placeholder="Type your question or instruction here..."),
71
+ outputs=gr.Textbox(lines=8, label=f"{PUBLIC_MODEL_NAME} Response:"),
72
+ title=f"Chat with {PUBLIC_MODEL_NAME}",
73
+ description=(f"This demo connects you with {PUBLIC_MODEL_NAME}, a specialized agricultural assistant. "
74
+ "Submit your farming, crop management, or agricultural questions below."),
75
  allow_flagging="never",
76
+ examples=[
77
+ ["What are sustainable practices for improving soil health in organic farming?"],
78
+ ["Explain crop rotation benefits and scheduling for small vegetable farms."],
79
+ ["How can I identify and treat common tomato plant diseases?"],
80
+ ["What irrigation methods are most water-efficient for drought-prone regions?"],
81
  ]
82
  )
83
 
84
  # --- Launch the App ---
85
+ iface.launch(server_name="0.0.0.0", server_port=7860)