Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,71 +1,71 @@
|
|
1 |
-
# Install compatible versions
|
2 |
-
!pip install --upgrade torch torchvision torchaudio transformers speedtest-cli sentencepiece accelerate gradio
|
3 |
-
|
4 |
-
import gradio as gr
|
5 |
-
from transformers import pipeline
|
6 |
-
import speedtest
|
7 |
-
import torch
|
8 |
-
|
9 |
-
# Clear GPU cache
|
10 |
-
torch.cuda.empty_cache()
|
11 |
-
|
12 |
-
# Load AI model
|
13 |
-
model_name = "google/flan-t5-large"
|
14 |
-
chatbot = pipeline("text2text-generation", model=model_name, device=0)
|
15 |
-
|
16 |
-
# Function to check network speed and troubleshoot
|
17 |
-
def check_network_speed(user_issue):
|
18 |
-
if not user_issue.strip():
|
19 |
-
return "β οΈ **Please enter a valid network issue!**"
|
20 |
-
|
21 |
-
# Show loading indicator
|
22 |
-
yield "β³ **Analyzing network issue... Running speed test...**"
|
23 |
-
|
24 |
-
# Run speed test
|
25 |
-
st_obj = speedtest.Speedtest()
|
26 |
-
download_speed = st_obj.download() / 1_000_000 # Convert to Mbps
|
27 |
-
upload_speed = st_obj.upload() / 1_000_000 # Convert to Mbps
|
28 |
-
ping_latency = st_obj.results.ping
|
29 |
-
|
30 |
-
# **Prompt for AI model**
|
31 |
-
prompt = f"""
|
32 |
-
A user is experiencing a network issue: "{user_issue}"
|
33 |
-
|
34 |
-
Network Speed Test Results:
|
35 |
-
- Download Speed: {download_speed:.2f} Mbps
|
36 |
-
- Upload Speed: {upload_speed:.2f} Mbps
|
37 |
-
- Ping: {ping_latency:.2f} ms
|
38 |
-
|
39 |
-
Provide exactly 5 different troubleshooting steps to help the user.
|
40 |
-
Each step must be unique, actionable, and relevant to the problem.
|
41 |
-
Avoid repeating steps or giving generic advice like "Check your internet".
|
42 |
-
"""
|
43 |
-
|
44 |
-
# Show processing status
|
45 |
-
yield f"β³ **Analyzing speed test results... Generating troubleshooting steps...**"
|
46 |
-
|
47 |
-
# Generate AI response
|
48 |
-
response = chatbot(prompt, max_length=250, do_sample=True, temperature=0.5, num_return_sequences=1)
|
49 |
-
|
50 |
-
# Final response
|
51 |
-
yield (
|
52 |
-
f"π½ **Download Speed:** {download_speed:.2f} Mbps\n"
|
53 |
-
f"πΌ **Upload Speed:** {upload_speed:.2f} Mbps\n"
|
54 |
-
f"πΆ **Ping Latency:** {ping_latency:.2f} ms\n\n"
|
55 |
-
f"### π Troubleshooting Steps:\n{response[0]['generated_text']}"
|
56 |
-
)
|
57 |
-
|
58 |
-
# Gradio UI with compact design
|
59 |
-
with gr.Blocks(theme=gr.themes.Base()) as iface: # πΉ Removes Gradio footer
|
60 |
-
gr.Markdown("## π‘ Network Troubleshooting Chatbot")
|
61 |
-
gr.Markdown("Enter your network issue, and the AI will diagnose the problem with a speed test and troubleshooting steps.")
|
62 |
-
|
63 |
-
user_input = gr.Textbox(placeholder="Describe your network problem...", label="π Enter Your Network Issue")
|
64 |
-
diagnose_button = gr.Button("Diagnose", variant="primary") # πΉ Ensures proper button display
|
65 |
-
|
66 |
-
output = gr.Markdown() # πΉ Output area
|
67 |
-
|
68 |
-
diagnose_button.click(fn=check_network_speed, inputs=user_input, outputs=output)
|
69 |
-
|
70 |
-
# Run Gradio app
|
71 |
-
iface.launch()
|
|
|
1 |
+
# Install compatible versions
|
2 |
+
# !pip install --upgrade torch torchvision torchaudio transformers speedtest-cli sentencepiece accelerate gradio
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
from transformers import pipeline
|
6 |
+
import speedtest
|
7 |
+
import torch
|
8 |
+
|
9 |
+
# Clear GPU cache
|
10 |
+
torch.cuda.empty_cache()
|
11 |
+
|
12 |
+
# Load AI model
|
13 |
+
model_name = "google/flan-t5-large"
|
14 |
+
chatbot = pipeline("text2text-generation", model=model_name, device=0)
|
15 |
+
|
16 |
+
# Function to check network speed and troubleshoot
|
17 |
+
def check_network_speed(user_issue):
|
18 |
+
if not user_issue.strip():
|
19 |
+
return "β οΈ **Please enter a valid network issue!**"
|
20 |
+
|
21 |
+
# Show loading indicator
|
22 |
+
yield "β³ **Analyzing network issue... Running speed test...**"
|
23 |
+
|
24 |
+
# Run speed test
|
25 |
+
st_obj = speedtest.Speedtest()
|
26 |
+
download_speed = st_obj.download() / 1_000_000 # Convert to Mbps
|
27 |
+
upload_speed = st_obj.upload() / 1_000_000 # Convert to Mbps
|
28 |
+
ping_latency = st_obj.results.ping
|
29 |
+
|
30 |
+
# **Prompt for AI model**
|
31 |
+
prompt = f"""
|
32 |
+
A user is experiencing a network issue: "{user_issue}"
|
33 |
+
|
34 |
+
Network Speed Test Results:
|
35 |
+
- Download Speed: {download_speed:.2f} Mbps
|
36 |
+
- Upload Speed: {upload_speed:.2f} Mbps
|
37 |
+
- Ping: {ping_latency:.2f} ms
|
38 |
+
|
39 |
+
Provide exactly 5 different troubleshooting steps to help the user.
|
40 |
+
Each step must be unique, actionable, and relevant to the problem.
|
41 |
+
Avoid repeating steps or giving generic advice like "Check your internet".
|
42 |
+
"""
|
43 |
+
|
44 |
+
# Show processing status
|
45 |
+
yield f"β³ **Analyzing speed test results... Generating troubleshooting steps...**"
|
46 |
+
|
47 |
+
# Generate AI response
|
48 |
+
response = chatbot(prompt, max_length=250, do_sample=True, temperature=0.5, num_return_sequences=1)
|
49 |
+
|
50 |
+
# Final response
|
51 |
+
yield (
|
52 |
+
f"π½ **Download Speed:** {download_speed:.2f} Mbps\n"
|
53 |
+
f"πΌ **Upload Speed:** {upload_speed:.2f} Mbps\n"
|
54 |
+
f"πΆ **Ping Latency:** {ping_latency:.2f} ms\n\n"
|
55 |
+
f"### π Troubleshooting Steps:\n{response[0]['generated_text']}"
|
56 |
+
)
|
57 |
+
|
58 |
+
# Gradio UI with compact design
|
59 |
+
with gr.Blocks(theme=gr.themes.Base()) as iface: # πΉ Removes Gradio footer
|
60 |
+
gr.Markdown("## π‘ Network Troubleshooting Chatbot")
|
61 |
+
gr.Markdown("Enter your network issue, and the AI will diagnose the problem with a speed test and troubleshooting steps.")
|
62 |
+
|
63 |
+
user_input = gr.Textbox(placeholder="Describe your network problem...", label="π Enter Your Network Issue")
|
64 |
+
diagnose_button = gr.Button("Diagnose", variant="primary") # πΉ Ensures proper button display
|
65 |
+
|
66 |
+
output = gr.Markdown() # πΉ Output area
|
67 |
+
|
68 |
+
diagnose_button.click(fn=check_network_speed, inputs=user_input, outputs=output)
|
69 |
+
|
70 |
+
# Run Gradio app
|
71 |
+
iface.launch()
|