Update app.py
Browse files
app.py
CHANGED
|
@@ -1,44 +1,43 @@
|
|
| 1 |
-
from ultralytics import YOLO
|
| 2 |
-
import cv2
|
| 3 |
-
import torch
|
| 4 |
-
import gradio as gr
|
| 5 |
-
from PIL import Image
|
| 6 |
-
import numpy as np
|
| 7 |
-
|
| 8 |
-
# Load fine-tuned YOLOv8 model for car damage detection
|
| 9 |
-
model = YOLO("best.pt") # Replace with your trained model file
|
| 10 |
-
|
| 11 |
-
def predict(input_img):
|
| 12 |
-
# Convert PIL image to OpenCV format
|
| 13 |
-
image = np.array(input_img)
|
| 14 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 15 |
-
|
| 16 |
-
# Run inference
|
| 17 |
-
results = model(image)
|
| 18 |
-
|
| 19 |
-
# Draw bounding boxes
|
| 20 |
-
for result in results:
|
| 21 |
-
for box in result.boxes:
|
| 22 |
-
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
| 23 |
-
conf = float(box.conf[0])
|
| 24 |
-
label = f"Damage: {conf:.2f}"
|
| 25 |
-
|
| 26 |
-
# Draw red bounding box
|
| 27 |
-
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 3) # Red color (BGR: 0,0,255)
|
| 28 |
-
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
| 29 |
-
|
| 30 |
-
# Convert back to PIL format
|
| 31 |
-
output_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 32 |
-
return output_img
|
| 33 |
-
|
| 34 |
-
# Gradio interface
|
| 35 |
-
gradio_app = gr.Interface(
|
| 36 |
-
fn=predict,
|
| 37 |
-
inputs=gr.Image(label="Upload a car image", sources=['upload', 'webcam'], type="pil"),
|
| 38 |
-
outputs=gr.Image(label="Detected Damage"),
|
| 39 |
-
title="Car Damage Detection",
|
| 40 |
-
description="Upload an image of a car, and the model will detect and highlight damaged areas."
|
| 41 |
-
)
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
gradio_app.launch()
|
|
|
|
| 1 |
+
from ultralytics import YOLO
|
| 2 |
+
import cv2
|
| 3 |
+
import torch
|
| 4 |
+
import gradio as gr
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
# Load fine-tuned YOLOv8 model for car damage detection
|
| 9 |
+
model = YOLO("best.pt") # Replace with your trained model file
|
| 10 |
+
|
| 11 |
+
def predict(input_img):
|
| 12 |
+
# Convert PIL image to OpenCV format
|
| 13 |
+
image = np.array(input_img)
|
| 14 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 15 |
+
|
| 16 |
+
# Run inference
|
| 17 |
+
results = model(image)
|
| 18 |
+
|
| 19 |
+
# Draw bounding boxes
|
| 20 |
+
for result in results:
|
| 21 |
+
for box in result.boxes:
|
| 22 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
| 23 |
+
conf = float(box.conf[0])
|
| 24 |
+
label = f"Damage: {conf:.2f}"
|
| 25 |
+
|
| 26 |
+
# Draw red bounding box
|
| 27 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 3) # Red color (BGR: 0,0,255)
|
| 28 |
+
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
| 29 |
+
|
| 30 |
+
# Convert back to PIL format
|
| 31 |
+
output_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 32 |
+
return output_img
|
| 33 |
+
|
| 34 |
+
# Gradio interface
|
| 35 |
+
gradio_app = gr.Interface(
|
| 36 |
+
fn=predict,
|
| 37 |
+
inputs=gr.Image(label="Upload a car image", sources=['upload', 'webcam'], type="pil"),
|
| 38 |
+
outputs=gr.Image(label="Detected Damage"),
|
| 39 |
+
title="Car Damage Detection",
|
| 40 |
+
description="Upload an image of a car, and the model will detect and highlight damaged areas."
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
gradio_app.launch()
|
|
|