4 video sources- fire detection
Browse files- app.py +42 -2
- mcp_client.py +25 -8
- pyproject.toml +3 -0
app.py
CHANGED
@@ -57,7 +57,7 @@ def analyze_fire_scene(frame):
|
|
57 |
inputs = inputs.to(device)
|
58 |
|
59 |
with torch.no_grad():
|
60 |
-
outputs = vqa_model.generate(**inputs, max_new_tokens=
|
61 |
|
62 |
answer = vqa_processor.decode(outputs[0], skip_special_tokens=True)
|
63 |
answer = answer.split("Assistant:")[-1].strip() if "Assistant:" in answer else answer
|
@@ -207,6 +207,33 @@ def respond(
|
|
207 |
"""
|
208 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
209 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
demo = gr.ChatInterface(
|
211 |
respond,
|
212 |
additional_inputs=[
|
@@ -225,4 +252,17 @@ demo = gr.ChatInterface(
|
|
225 |
|
226 |
|
227 |
if __name__ == "__main__":
|
228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
inputs = inputs.to(device)
|
58 |
|
59 |
with torch.no_grad():
|
60 |
+
outputs = vqa_model.generate(**inputs, max_new_tokens=10, do_sample=False, num_beams=1) # Faster generation
|
61 |
|
62 |
answer = vqa_processor.decode(outputs[0], skip_special_tokens=True)
|
63 |
answer = answer.split("Assistant:")[-1].strip() if "Assistant:" in answer else answer
|
|
|
207 |
"""
|
208 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
209 |
"""
|
210 |
+
def detect_fire_api(image_data):
|
211 |
+
"""API endpoint for fire detection"""
|
212 |
+
import base64
|
213 |
+
from PIL import Image
|
214 |
+
import io
|
215 |
+
|
216 |
+
try:
|
217 |
+
# Decode base64 image
|
218 |
+
img_bytes = base64.b64decode(image_data)
|
219 |
+
image = Image.open(io.BytesIO(img_bytes))
|
220 |
+
# Keep image small for faster processing
|
221 |
+
if image.size[0] > 320:
|
222 |
+
image = image.resize((320, 240))
|
223 |
+
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
224 |
+
|
225 |
+
# Analyze frame
|
226 |
+
fire_detected, smoke_detected, fire_details = analyze_fire_scene(frame)
|
227 |
+
|
228 |
+
return {
|
229 |
+
"fire_detected": fire_detected,
|
230 |
+
"smoke_detected": smoke_detected,
|
231 |
+
"details": fire_details,
|
232 |
+
"timestamp": datetime.now().isoformat()
|
233 |
+
}
|
234 |
+
except Exception as e:
|
235 |
+
return {"error": str(e)}
|
236 |
+
|
237 |
demo = gr.ChatInterface(
|
238 |
respond,
|
239 |
additional_inputs=[
|
|
|
252 |
|
253 |
|
254 |
if __name__ == "__main__":
|
255 |
+
# Add API endpoint
|
256 |
+
from fastapi import FastAPI
|
257 |
+
import uvicorn
|
258 |
+
|
259 |
+
app = FastAPI()
|
260 |
+
|
261 |
+
@app.post("/detect_fire")
|
262 |
+
async def api_detect_fire(request: dict):
|
263 |
+
return detect_fire_api(request["image"])
|
264 |
+
|
265 |
+
# Mount Gradio app
|
266 |
+
demo_app = gr.mount_gradio_app(app, demo, path="/")
|
267 |
+
|
268 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
mcp_client.py
CHANGED
@@ -24,16 +24,19 @@ class FireDetectionClient:
|
|
24 |
# Convert frame to PIL Image
|
25 |
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
26 |
|
|
|
|
|
|
|
27 |
# Convert to base64
|
28 |
buffer = io.BytesIO()
|
29 |
-
image.save(buffer, format='JPEG')
|
30 |
img_str = base64.b64encode(buffer.getvalue()).decode()
|
31 |
|
32 |
-
# Send to MCP server
|
33 |
response = requests.post(
|
34 |
f"{self.mcp_server_url}/detect_fire",
|
35 |
json={"image": img_str},
|
36 |
-
timeout=
|
37 |
)
|
38 |
|
39 |
if response.status_code == 200:
|
@@ -60,13 +63,27 @@ class FireDetectionClient:
|
|
60 |
|
61 |
frame_count += 1
|
62 |
|
63 |
-
# Process every
|
64 |
-
if frame_count %
|
65 |
timestamp = datetime.now().strftime("%H:%M:%S")
|
66 |
print(f"[{timestamp}] Source {source_id}: Analyzing frame {frame_count}")
|
67 |
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
if fire_detected or smoke_detected:
|
72 |
alert = f"🚨 ALERT - Source {source_id} at {timestamp}:\n"
|
@@ -79,7 +96,7 @@ class FireDetectionClient:
|
|
79 |
print(alert)
|
80 |
# Here you could send notifications, save alerts, etc.
|
81 |
|
82 |
-
time.sleep(0.
|
83 |
|
84 |
cap.release()
|
85 |
print(f"Stopped monitoring source {source_id}")
|
|
|
24 |
# Convert frame to PIL Image
|
25 |
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
26 |
|
27 |
+
# Resize image to reduce processing time
|
28 |
+
image = image.resize((320, 240)) # Smaller size for faster processing
|
29 |
+
|
30 |
# Convert to base64
|
31 |
buffer = io.BytesIO()
|
32 |
+
image.save(buffer, format='JPEG', quality=70) # Lower quality for speed
|
33 |
img_str = base64.b64encode(buffer.getvalue()).decode()
|
34 |
|
35 |
+
# Send to MCP server API endpoint with longer timeout
|
36 |
response = requests.post(
|
37 |
f"{self.mcp_server_url}/detect_fire",
|
38 |
json={"image": img_str},
|
39 |
+
timeout=30 # Increased timeout
|
40 |
)
|
41 |
|
42 |
if response.status_code == 200:
|
|
|
63 |
|
64 |
frame_count += 1
|
65 |
|
66 |
+
# Process every 200th frame (less frequent for stability)
|
67 |
+
if frame_count % 200 == 0:
|
68 |
timestamp = datetime.now().strftime("%H:%M:%S")
|
69 |
print(f"[{timestamp}] Source {source_id}: Analyzing frame {frame_count}")
|
70 |
|
71 |
+
try:
|
72 |
+
# Use MCP server for fire detection
|
73 |
+
result = self.detect_fire_mcp(frame)
|
74 |
+
|
75 |
+
if "error" in result:
|
76 |
+
print(f"MCP Error: {result['error']}")
|
77 |
+
# Fallback to simple detection
|
78 |
+
fire_detected, smoke_detected = self.simple_fire_detection(frame)
|
79 |
+
else:
|
80 |
+
fire_detected = result.get("fire_detected", False)
|
81 |
+
smoke_detected = result.get("smoke_detected", False)
|
82 |
+
|
83 |
+
except Exception as e:
|
84 |
+
print(f"Connection error: {e}")
|
85 |
+
# Use fallback detection
|
86 |
+
fire_detected, smoke_detected = self.simple_fire_detection(frame)
|
87 |
|
88 |
if fire_detected or smoke_detected:
|
89 |
alert = f"🚨 ALERT - Source {source_id} at {timestamp}:\n"
|
|
|
96 |
print(alert)
|
97 |
# Here you could send notifications, save alerts, etc.
|
98 |
|
99 |
+
time.sleep(0.03) # Longer delay to reduce load
|
100 |
|
101 |
cap.release()
|
102 |
print(f"Stopped monitoring source {source_id}")
|
pyproject.toml
CHANGED
@@ -18,5 +18,8 @@ dependencies = [
|
|
18 |
"opencv-python",
|
19 |
"torch>=2.7.1",
|
20 |
"accelerate>=1.10.0",
|
|
|
|
|
|
|
21 |
]
|
22 |
|
|
|
18 |
"opencv-python",
|
19 |
"torch>=2.7.1",
|
20 |
"accelerate>=1.10.0",
|
21 |
+
"fastapi",
|
22 |
+
"uvicorn",
|
23 |
+
"requests"
|
24 |
]
|
25 |
|