Spaces:
Sleeping
Sleeping
add fetch image api
Browse files- .gitignore +2 -1
- app.py +122 -154
- downloaded_image.jpg +0 -0
- requirements.txt +4 -0
- temp.py +159 -0
- templates/index.html +93 -74
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
model_final.pth
|
|
|
|
1 |
+
**/model_final.pth
|
2 |
+
**/__pycache__
|
app.py
CHANGED
@@ -12,61 +12,16 @@ import gdown
|
|
12 |
from skimage import io as skio
|
13 |
from torchvision.ops import box_iou
|
14 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# Initialize Flask app
|
17 |
app = Flask(__name__)
|
18 |
-
cfg = None
|
19 |
-
# Google Drive file URL
|
20 |
-
GDRIVE_MODEL_URL = "https://drive.google.com/uc?id=18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E" # Replace 'your-file-id' with the actual file ID from Google Drive
|
21 |
-
LOCAL_MODEL_PATH = "model_final.pth"
|
22 |
-
|
23 |
-
|
24 |
-
def download_file_from_google_drive(id, destination):
|
25 |
-
gdown.download(GDRIVE_MODEL_URL, LOCAL_MODEL_PATH, quiet=False)
|
26 |
-
|
27 |
-
|
28 |
-
file_id = "18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E"
|
29 |
-
destination = "model_final.pth"
|
30 |
-
download_file_from_google_drive(file_id, destination)
|
31 |
-
|
32 |
-
|
33 |
-
# Download model from Google Drive if not already present locally
|
34 |
-
def download_model():
|
35 |
-
if not os.path.exists(LOCAL_MODEL_PATH):
|
36 |
-
response = requests.get(GDRIVE_MODEL_URL, stream=True)
|
37 |
-
if response.status_code == 200:
|
38 |
-
with open(LOCAL_MODEL_PATH, "wb") as f:
|
39 |
-
f.write(response.content)
|
40 |
-
else:
|
41 |
-
raise Exception(
|
42 |
-
f"Failed to download model from Google Drive: {response.status_code}"
|
43 |
-
)
|
44 |
-
|
45 |
-
|
46 |
-
# Configuration and model setup
|
47 |
-
def setup_model(model_path):
|
48 |
-
global cfg
|
49 |
-
cfg = get_cfg()
|
50 |
-
cfg.merge_from_file("config.yaml") # Update with the config file path
|
51 |
-
cfg.MODEL.WEIGHTS = model_path
|
52 |
-
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
|
53 |
-
cfg.MODEL.DEVICE = "cpu" # Use "cuda" for GPU
|
54 |
-
return DefaultPredictor(cfg)
|
55 |
-
|
56 |
-
|
57 |
-
# Ensure model is available
|
58 |
-
predictor = setup_model(LOCAL_MODEL_PATH)
|
59 |
-
|
60 |
-
# Define expected parts and costs
|
61 |
-
expected_parts = ["headlamp", "rear_bumper", "door", "hood", "front_bumper"]
|
62 |
-
cost_dict = {
|
63 |
-
"headlamp": 300,
|
64 |
-
"rear_bumper": 250,
|
65 |
-
"door": 200,
|
66 |
-
"hood": 220,
|
67 |
-
"front_bumper": 250,
|
68 |
-
"other": 150,
|
69 |
-
}
|
70 |
|
71 |
|
72 |
@app.route("/")
|
@@ -74,68 +29,6 @@ def home():
|
|
74 |
return render_template("index.html")
|
75 |
|
76 |
|
77 |
-
@app.route("/upload", methods=["POST"])
|
78 |
-
def upload():
|
79 |
-
if "file" not in request.files:
|
80 |
-
return jsonify({"error": "No file uploaded"}), 400
|
81 |
-
|
82 |
-
file = request.files["file"]
|
83 |
-
if file.filename == "":
|
84 |
-
return jsonify({"error": "No file selected"}), 400
|
85 |
-
|
86 |
-
# Load image
|
87 |
-
image = skio.imread(file)
|
88 |
-
image_np = image
|
89 |
-
|
90 |
-
# Run model prediction
|
91 |
-
outputs = predictor(image_np)
|
92 |
-
instances = outputs["instances"].to("cpu")
|
93 |
-
class_names = MetadataCatalog.get(cfg.DATASETS.TEST[0]).thing_classes
|
94 |
-
|
95 |
-
# Extract bounding boxes and class IDs
|
96 |
-
boxes = instances.pred_boxes.tensor.numpy()
|
97 |
-
class_ids = instances.pred_classes.numpy()
|
98 |
-
|
99 |
-
# Filter overlapping boxes using IoU
|
100 |
-
iou_threshold = 0.8
|
101 |
-
keep_indices = []
|
102 |
-
merged_boxes = set()
|
103 |
-
|
104 |
-
for i in range(len(boxes)):
|
105 |
-
if i in merged_boxes:
|
106 |
-
continue
|
107 |
-
keep_indices.append(i)
|
108 |
-
for j in range(i + 1, len(boxes)):
|
109 |
-
if j in merged_boxes:
|
110 |
-
continue
|
111 |
-
iou = box_iou(
|
112 |
-
torch.tensor(boxes[i]).unsqueeze(0), torch.tensor(boxes[j]).unsqueeze(0)
|
113 |
-
).item()
|
114 |
-
if iou > iou_threshold:
|
115 |
-
merged_boxes.add(j)
|
116 |
-
|
117 |
-
# Calculate total cost based on non-overlapping boxes
|
118 |
-
total_cost = 0
|
119 |
-
damage_details = []
|
120 |
-
|
121 |
-
for idx in keep_indices:
|
122 |
-
class_id = class_ids[idx]
|
123 |
-
damaged_part = (
|
124 |
-
class_names[class_id] if class_id < len(class_names) else "unknown"
|
125 |
-
)
|
126 |
-
if damaged_part not in expected_parts:
|
127 |
-
damaged_part = "other"
|
128 |
-
|
129 |
-
repair_cost = cost_dict.get(damaged_part, cost_dict["other"])
|
130 |
-
total_cost += repair_cost
|
131 |
-
|
132 |
-
damage_details.append({"part": damaged_part, "cost_usd": repair_cost})
|
133 |
-
|
134 |
-
response = {"damages": damage_details, "total_cost": total_cost}
|
135 |
-
|
136 |
-
return jsonify(response)
|
137 |
-
|
138 |
-
|
139 |
@app.route("/fetch-image", methods=["POST"])
|
140 |
def fetchImage():
|
141 |
file = None
|
@@ -145,58 +38,133 @@ def fetchImage():
|
|
145 |
file = io.BytesIO(response.content)
|
146 |
elif "file" in request.files:
|
147 |
file = request.files["file"]
|
|
|
|
|
|
|
|
|
148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
# Load image
|
150 |
-
image =
|
151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
-
#
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
157 |
|
158 |
-
#
|
159 |
-
|
160 |
-
|
161 |
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
merged_boxes = set()
|
166 |
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
169 |
continue
|
170 |
-
keep_indices.append(i)
|
171 |
-
for j in range(i + 1, len(boxes)):
|
172 |
-
if j in merged_boxes:
|
173 |
-
continue
|
174 |
-
iou = box_iou(
|
175 |
-
torch.tensor(boxes[i]).unsqueeze(0), torch.tensor(boxes[j]).unsqueeze(0)
|
176 |
-
).item()
|
177 |
-
if iou > iou_threshold:
|
178 |
-
merged_boxes.add(j)
|
179 |
-
|
180 |
-
# Calculate total cost based on non-overlapping boxes
|
181 |
-
total_cost = 0
|
182 |
-
damage_details = []
|
183 |
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
-
|
193 |
-
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
-
|
|
|
|
|
196 |
|
197 |
-
|
|
|
|
|
198 |
|
199 |
-
return jsonify(
|
200 |
|
201 |
|
202 |
if __name__ == "__main__":
|
|
|
12 |
from skimage import io as skio
|
13 |
from torchvision.ops import box_iou
|
14 |
import torch
|
15 |
+
from roboflow import Roboflow
|
16 |
+
import supervision as sv
|
17 |
+
import cv2
|
18 |
+
import tempfile
|
19 |
+
import os
|
20 |
+
import numpy as np
|
21 |
+
import requests
|
22 |
|
23 |
# Initialize Flask app
|
24 |
app = Flask(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
|
27 |
@app.route("/")
|
|
|
29 |
return render_template("index.html")
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
@app.route("/fetch-image", methods=["POST"])
|
33 |
def fetchImage():
|
34 |
file = None
|
|
|
38 |
file = io.BytesIO(response.content)
|
39 |
elif "file" in request.files:
|
40 |
file = request.files["file"]
|
41 |
+
url = "https://firebasestorage.googleapis.com/v0/b/car-damage-detector-s34rrz.firebasestorage.app/o/users%2FYMd99dt33HaktTWpYp5MM5oYeBE3%2Fuploads%2F1737454072124000.jpg?alt=media&token=9eae79fa-4c06-41a5-9f58-236c39efaac0"
|
42 |
+
|
43 |
+
# File name for saving
|
44 |
+
file_name = "downloaded_image.jpg"
|
45 |
|
46 |
+
# Download the image
|
47 |
+
response = requests.get(url)
|
48 |
+
|
49 |
+
# Save the image to the current directory
|
50 |
+
if response.status_code == 200:
|
51 |
+
with open(file_name, "wb") as file:
|
52 |
+
file.write(response.content)
|
53 |
+
print(f"Image downloaded and saved as {file_name}")
|
54 |
+
else:
|
55 |
+
print(f"Failed to download image. Status code: {response.status_code}")
|
56 |
# Load image
|
57 |
+
image = cv2.imread(file_name)
|
58 |
+
|
59 |
+
rf = Roboflow(api_key="LqD8Cs4OsoK8seO3CPkf")
|
60 |
+
|
61 |
+
project_parts = rf.workspace().project("car-parts-segmentation")
|
62 |
+
model_parts = project_parts.version(2).model
|
63 |
+
|
64 |
+
project_damage = rf.workspace().project("car-damage-detection-ha5mm")
|
65 |
+
model_damage = project_damage.version(1).model
|
66 |
+
|
67 |
+
# Run the damage detection model
|
68 |
+
result_damage = model_damage.predict(
|
69 |
+
file_name,
|
70 |
+
confidence=40,
|
71 |
+
).json()
|
72 |
+
|
73 |
+
# Extract detections from the result
|
74 |
+
detections_damage = sv.Detections.from_inference(result_damage)
|
75 |
+
|
76 |
+
# Read the input image
|
77 |
+
|
78 |
+
# Annotate damaged areas of the car
|
79 |
+
mask_annotator = sv.MaskAnnotator()
|
80 |
+
annotated_image_damage = mask_annotator.annotate(
|
81 |
+
scene=image, detections=detections_damage
|
82 |
+
)
|
83 |
+
|
84 |
+
# Create a temporary directory to save outputs
|
85 |
+
temp_dir = tempfile.mkdtemp()
|
86 |
+
|
87 |
+
# Define a repair cost dictionary (per part)
|
88 |
+
repair_cost_dict = {
|
89 |
+
"wheel": 100, # Base cost for wheel
|
90 |
+
"door": 200, # Base cost for door
|
91 |
+
"hood": 300, # Base cost for hood
|
92 |
+
"front_bumper": 250, # Base cost for bumper
|
93 |
+
"trunk": 200,
|
94 |
+
"front_glass": 150,
|
95 |
+
"back_left_door": 200,
|
96 |
+
"left_mirror": 20,
|
97 |
+
"back_glass": 150,
|
98 |
+
}
|
99 |
+
|
100 |
+
# Initialize total cost
|
101 |
+
total_cost = 0
|
102 |
|
103 |
+
# Ensure coordinate processing is done in chunks of 4
|
104 |
+
coordinates = list(map(int, detections_damage.xyxy.flatten()))
|
105 |
+
num_damages = (
|
106 |
+
len(coordinates) // 4
|
107 |
+
) # Each damage has 4 coordinates (x1, y1, x2, y2)
|
108 |
|
109 |
+
# Iterate through damages
|
110 |
+
for i in range(num_damages):
|
111 |
+
x1, y1, x2, y2 = coordinates[i * 4 : (i + 1) * 4]
|
112 |
|
113 |
+
# Ensure the coordinates are within image bounds
|
114 |
+
x1, y1 = max(0, x1), max(0, y1)
|
115 |
+
x2, y2 = min(image.shape[1], x2), min(image.shape[0], y2)
|
|
|
116 |
|
117 |
+
# Crop the damaged region
|
118 |
+
cropped_damage = image[y1:y2, x1:x2]
|
119 |
+
|
120 |
+
# Check if the cropped region is valid
|
121 |
+
if cropped_damage.size == 0:
|
122 |
+
print(f"Skipping empty crop for damage region {i + 1}")
|
123 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
+
# Save the cropped damaged area
|
126 |
+
damage_image_path = os.path.join(temp_dir, f"damage_image_{i}.png")
|
127 |
+
cv2.imwrite(damage_image_path, cropped_damage)
|
128 |
+
|
129 |
+
# Run the parts detection model on the cropped damage
|
130 |
+
result_parts = model_parts.predict(damage_image_path, confidence=15).json()
|
131 |
+
detections_parts = sv.Detections.from_inference(result_parts)
|
132 |
+
|
133 |
+
# Calculate repair cost for each detected part
|
134 |
+
for part in result_parts["predictions"]:
|
135 |
+
part_name = part["class"]
|
136 |
+
damage_area = part["width"] * part["height"]
|
137 |
+
cropped_area = (x2 - x1) * (y2 - y1)
|
138 |
+
damage_percentage = (damage_area / cropped_area) * 100
|
139 |
+
|
140 |
+
# Lookup cost and add to total
|
141 |
+
base_cost = repair_cost_dict.get(
|
142 |
+
part_name, 0
|
143 |
+
) # Default to 0 if part not in dict
|
144 |
+
repair_cost = (damage_percentage / 100) * base_cost
|
145 |
+
total_cost += repair_cost
|
146 |
+
|
147 |
+
print(
|
148 |
+
f"Damage {i + 1} - {part_name}: {damage_percentage:.2f}% damaged, Cost: ${repair_cost:.2f}"
|
149 |
+
)
|
150 |
|
151 |
+
# Annotate and save the result
|
152 |
+
part_annotator = sv.LabelAnnotator()
|
153 |
+
annotated_parts_image = part_annotator.annotate(
|
154 |
+
scene=cropped_damage, detections=detections_parts
|
155 |
+
)
|
156 |
+
annotated_parts_path = os.path.join(temp_dir, f"annotated_parts_{i}.png")
|
157 |
+
cv2.imwrite(annotated_parts_path, annotated_parts_image)
|
158 |
|
159 |
+
# Save the overall annotated image
|
160 |
+
annotated_image_path = os.path.join(temp_dir, "annotated_image_damage.png")
|
161 |
+
cv2.imwrite(annotated_image_path, annotated_image_damage)
|
162 |
|
163 |
+
# Return the total cost in the specified format
|
164 |
+
result = {"total_cost": total_cost}
|
165 |
+
print(result)
|
166 |
|
167 |
+
return jsonify(result)
|
168 |
|
169 |
|
170 |
if __name__ == "__main__":
|
downloaded_image.jpg
ADDED
![]() |
requirements.txt
CHANGED
@@ -13,3 +13,7 @@ Pillow
|
|
13 |
opencv-python
|
14 |
uvicorn
|
15 |
scikit-image
|
|
|
|
|
|
|
|
|
|
13 |
opencv-python
|
14 |
uvicorn
|
15 |
scikit-image
|
16 |
+
roboflow
|
17 |
+
supervision
|
18 |
+
opencv-python
|
19 |
+
requests
|
temp.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, render_template
|
2 |
+
from detectron2.config import get_cfg
|
3 |
+
from detectron2.engine import DefaultPredictor
|
4 |
+
from detectron2.data import MetadataCatalog
|
5 |
+
from detectron2.utils.visualizer import Visualizer, ColorMode
|
6 |
+
import numpy as np
|
7 |
+
from PIL import Image
|
8 |
+
import io
|
9 |
+
import os
|
10 |
+
import requests
|
11 |
+
import gdown
|
12 |
+
from skimage import io as skio
|
13 |
+
from torchvision.ops import box_iou
|
14 |
+
import torch
|
15 |
+
|
16 |
+
# Initialize Flask app
|
17 |
+
app = Flask(__name__)
|
18 |
+
cfg = None
|
19 |
+
# Google Drive file URL
|
20 |
+
GDRIVE_MODEL_URL = "https://drive.google.com/uc?id=18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E" # Replace 'your-file-id' with the actual file ID from Google Drive
|
21 |
+
LOCAL_MODEL_PATH = "model_final.pth"
|
22 |
+
|
23 |
+
|
24 |
+
def download_file_from_google_drive(id, destination):
|
25 |
+
gdown.download(GDRIVE_MODEL_URL, LOCAL_MODEL_PATH, quiet=False)
|
26 |
+
|
27 |
+
|
28 |
+
file_id = "18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E"
|
29 |
+
destination = "model_final.pth"
|
30 |
+
download_file_from_google_drive(file_id, destination)
|
31 |
+
|
32 |
+
|
33 |
+
# Download model from Google Drive if not already present locally
|
34 |
+
def download_model():
|
35 |
+
if not os.path.exists(LOCAL_MODEL_PATH):
|
36 |
+
response = requests.get(GDRIVE_MODEL_URL, stream=True)
|
37 |
+
if response.status_code == 200:
|
38 |
+
with open(LOCAL_MODEL_PATH, "wb") as f:
|
39 |
+
f.write(response.content)
|
40 |
+
else:
|
41 |
+
raise Exception(
|
42 |
+
f"Failed to download model from Google Drive: {response.status_code}"
|
43 |
+
)
|
44 |
+
|
45 |
+
|
46 |
+
# Configuration and model setup
|
47 |
+
def setup_model(model_path):
|
48 |
+
global cfg
|
49 |
+
cfg = get_cfg()
|
50 |
+
cfg.merge_from_file("config.yaml") # Update with the config file path
|
51 |
+
cfg.MODEL.WEIGHTS = model_path
|
52 |
+
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
|
53 |
+
cfg.MODEL.DEVICE = "cpu" # Use "cuda" for GPU
|
54 |
+
return DefaultPredictor(cfg)
|
55 |
+
|
56 |
+
|
57 |
+
# Ensure model is available
|
58 |
+
predictor = setup_model(LOCAL_MODEL_PATH)
|
59 |
+
|
60 |
+
# Define expected parts and costs
|
61 |
+
expected_parts = ["headlamp", "rear_bumper", "door", "hood", "front_bumper"]
|
62 |
+
cost_dict = {
|
63 |
+
"headlamp": 300,
|
64 |
+
"rear_bumper": 250,
|
65 |
+
"door": 200,
|
66 |
+
"hood": 220,
|
67 |
+
"front_bumper": 250,
|
68 |
+
"other": 150,
|
69 |
+
}
|
70 |
+
|
71 |
+
|
72 |
+
@app.route("/")
|
73 |
+
def home():
|
74 |
+
return render_template("index.html")
|
75 |
+
|
76 |
+
|
77 |
+
@app.route("/upload", methods=["POST"])
|
78 |
+
def upload():
|
79 |
+
if "file" not in request.files:
|
80 |
+
return jsonify({"error": "No file uploaded"}), 400
|
81 |
+
|
82 |
+
file = request.files["file"]
|
83 |
+
if file.filename == "":
|
84 |
+
return jsonify({"error": "No file selected"}), 400
|
85 |
+
|
86 |
+
# Load image
|
87 |
+
image = skio.imread(file)
|
88 |
+
image_np = image
|
89 |
+
|
90 |
+
# Run model prediction
|
91 |
+
outputs = predictor(image_np)
|
92 |
+
instances = outputs["instances"].to("cpu")
|
93 |
+
class_names = MetadataCatalog.get(cfg.DATASETS.TEST[0]).thing_classes
|
94 |
+
|
95 |
+
# Extract bounding boxes and class IDs
|
96 |
+
boxes = instances.pred_boxes.tensor.numpy()
|
97 |
+
class_ids = instances.pred_classes.numpy()
|
98 |
+
|
99 |
+
# Filter overlapping boxes using IoU
|
100 |
+
iou_threshold = 0.8
|
101 |
+
keep_indices = []
|
102 |
+
merged_boxes = set()
|
103 |
+
|
104 |
+
for i in range(len(boxes)):
|
105 |
+
if i in merged_boxes:
|
106 |
+
continue
|
107 |
+
keep_indices.append(i)
|
108 |
+
for j in range(i + 1, len(boxes)):
|
109 |
+
if j in merged_boxes:
|
110 |
+
continue
|
111 |
+
iou = box_iou(
|
112 |
+
torch.tensor(boxes[i]).unsqueeze(0), torch.tensor(boxes[j]).unsqueeze(0)
|
113 |
+
).item()
|
114 |
+
if iou > iou_threshold:
|
115 |
+
merged_boxes.add(j)
|
116 |
+
|
117 |
+
# Calculate total cost based on non-overlapping boxes
|
118 |
+
total_cost = 0
|
119 |
+
damage_details = []
|
120 |
+
|
121 |
+
for idx in keep_indices:
|
122 |
+
class_id = class_ids[idx]
|
123 |
+
damaged_part = (
|
124 |
+
class_names[class_id] if class_id < len(class_names) else "unknown"
|
125 |
+
)
|
126 |
+
if damaged_part not in expected_parts:
|
127 |
+
damaged_part = "other"
|
128 |
+
|
129 |
+
repair_cost = cost_dict.get(damaged_part, cost_dict["other"])
|
130 |
+
total_cost += repair_cost
|
131 |
+
|
132 |
+
damage_details.append({"part": damaged_part, "cost_usd": repair_cost})
|
133 |
+
|
134 |
+
response = {"damages": damage_details, "total_cost": total_cost}
|
135 |
+
|
136 |
+
return jsonify(response)
|
137 |
+
|
138 |
+
|
139 |
+
@app.route("/fetch-image", methods=["POST"])
|
140 |
+
def fetchImage():
|
141 |
+
file = None
|
142 |
+
if "url" in request.form:
|
143 |
+
url = request.form["url"]
|
144 |
+
response = requests.get(url)
|
145 |
+
file = io.BytesIO(response.content)
|
146 |
+
elif "file" in request.files:
|
147 |
+
file = request.files["file"]
|
148 |
+
|
149 |
+
# Load image
|
150 |
+
image = skio.imread(file)
|
151 |
+
image_np = image
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
return jsonify(response)
|
156 |
+
|
157 |
+
|
158 |
+
if __name__ == "__main__":
|
159 |
+
app.run(host="0.0.0.0", port=7860)
|
templates/index.html
CHANGED
@@ -1,91 +1,110 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="en">
|
3 |
-
<head>
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
|
58 |
-
|
59 |
-
|
60 |
|
61 |
-
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
<strong>Analysis Result:</strong><br>
|
73 |
Total Cost: $${result.total_cost}<br>
|
74 |
<ul>
|
75 |
-
${result.damages
|
|
|
|
|
76 |
<li>
|
77 |
Part: ${damage.part}, Area: ${damage.area_pixels} pixels, Cost: $${damage.cost_usd}
|
78 |
</li>
|
79 |
-
`
|
|
|
|
|
80 |
</ul>
|
81 |
`;
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
</body>
|
91 |
-
</html>
|
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8" />
|
5 |
+
<meta
|
6 |
+
name="viewport"
|
7 |
+
content="width=device-width, initial-scale=1.0"
|
8 |
+
/>
|
9 |
+
<title>Upload File</title>
|
10 |
+
<style>
|
11 |
+
body {
|
12 |
+
font-family: Arial, sans-serif;
|
13 |
+
text-align: center;
|
14 |
+
margin-top: 50px;
|
15 |
+
}
|
16 |
+
#preview {
|
17 |
+
margin-top: 20px;
|
18 |
+
max-width: 500px;
|
19 |
+
max-height: 500px;
|
20 |
+
display: none;
|
21 |
+
}
|
22 |
+
</style>
|
23 |
+
</head>
|
24 |
+
<body>
|
25 |
+
<h1>Vehicle Damage Detection</h1>
|
26 |
+
<form
|
27 |
+
id="uploadForm"
|
28 |
+
enctype="multipart/form-data"
|
29 |
+
>
|
30 |
+
<label for="file">Upload an image:</label>
|
31 |
+
<input
|
32 |
+
type="file"
|
33 |
+
id="file"
|
34 |
+
name="file"
|
35 |
+
accept="image/*"
|
36 |
+
required
|
37 |
+
/>
|
38 |
+
<br /><br />
|
39 |
+
<img
|
40 |
+
id="preview"
|
41 |
+
alt="Image Preview"
|
42 |
+
/>
|
43 |
+
<br /><br />
|
44 |
+
<button type="submit">Upload and Analyze</button>
|
45 |
+
</form>
|
46 |
+
<p id="response"></p>
|
47 |
|
48 |
+
<script>
|
49 |
+
const fileInput = document.getElementById("file");
|
50 |
+
const preview = document.getElementById("preview");
|
51 |
+
const uploadForm = document.getElementById("uploadForm");
|
52 |
+
const responseElement = document.getElementById("response");
|
53 |
|
54 |
+
// Preview the selected image
|
55 |
+
fileInput.addEventListener("change", function () {
|
56 |
+
const file = fileInput.files[0];
|
57 |
+
if (file) {
|
58 |
+
const reader = new FileReader();
|
59 |
+
reader.onload = function (e) {
|
60 |
+
preview.src = e.target.result;
|
61 |
+
preview.style.display = "block";
|
62 |
+
};
|
63 |
+
reader.readAsDataURL(file);
|
64 |
+
} else {
|
65 |
+
preview.style.display = "none";
|
66 |
+
}
|
67 |
+
});
|
68 |
|
69 |
+
// Handle form submission
|
70 |
+
uploadForm.addEventListener("submit", async function (event) {
|
71 |
+
event.preventDefault();
|
72 |
|
73 |
+
const formData = new FormData();
|
74 |
+
formData.append("file", fileInput.files[0]);
|
75 |
|
76 |
+
responseElement.textContent = "Uploading and analyzing...";
|
77 |
|
78 |
+
try {
|
79 |
+
const response = await fetch("/fetch-image", {
|
80 |
+
method: "POST",
|
81 |
+
body: formData,
|
82 |
+
});
|
83 |
|
84 |
+
if (response.ok) {
|
85 |
+
const result = await response.json();
|
86 |
+
responseElement.innerHTML = `
|
87 |
<strong>Analysis Result:</strong><br>
|
88 |
Total Cost: $${result.total_cost}<br>
|
89 |
<ul>
|
90 |
+
${result.damages
|
91 |
+
.map(
|
92 |
+
(damage) => `
|
93 |
<li>
|
94 |
Part: ${damage.part}, Area: ${damage.area_pixels} pixels, Cost: $${damage.cost_usd}
|
95 |
</li>
|
96 |
+
`
|
97 |
+
)
|
98 |
+
.join("")}
|
99 |
</ul>
|
100 |
`;
|
101 |
+
} else {
|
102 |
+
responseElement.textContent = "Error: Unable to analyze the image.";
|
103 |
+
}
|
104 |
+
} catch (error) {
|
105 |
+
responseElement.textContent = "Error: " + error.message;
|
106 |
+
}
|
107 |
+
});
|
108 |
+
</script>
|
109 |
+
</body>
|
110 |
+
</html>
|