Spaces:
Sleeping
Sleeping
updated roboflow model to improve accuracy
Browse files- app.py +121 -74
- requirements.txt +1 -0
- test.ipynb +0 -0
app.py
CHANGED
@@ -39,6 +39,8 @@ from PIL import Image
|
|
39 |
import os
|
40 |
from sklearn.model_selection import train_test_split
|
41 |
from sklearn.preprocessing import MinMaxScaler
|
|
|
|
|
42 |
# Initialize Flask app
|
43 |
app = Flask(__name__)
|
44 |
|
@@ -138,88 +140,133 @@ def fetchImage():
|
|
138 |
scene=image, detections=detections_damage
|
139 |
)
|
140 |
|
141 |
-
#
|
142 |
-
temp_dir = tempfile.mkdtemp()
|
143 |
|
144 |
# Define a repair cost dictionary (per part)
|
145 |
-
|
146 |
-
"
|
147 |
-
"
|
148 |
-
"
|
149 |
-
"
|
150 |
-
"
|
151 |
-
"
|
152 |
-
"
|
153 |
-
"
|
154 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
}
|
156 |
|
157 |
-
# Initialize total cost
|
158 |
total_cost = 0
|
159 |
|
160 |
-
#
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
) # Each damage has 4 coordinates (x1, y1, x2, y2)
|
165 |
|
166 |
# Iterate through damages
|
167 |
-
for i in range(num_damages):
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
# Save the overall annotated image
|
219 |
-
annotated_image_path = os.path.join(temp_dir, "annotated_image_damage.png")
|
220 |
-
cv2.imwrite(annotated_image_path, annotated_image_damage)
|
221 |
-
|
222 |
-
# Return the total cost in the specified format
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
result = {"total_cost": total_cost}
|
224 |
print(result)
|
225 |
|
|
|
39 |
import os
|
40 |
from sklearn.model_selection import train_test_split
|
41 |
from sklearn.preprocessing import MinMaxScaler
|
42 |
+
from inference_sdk import InferenceHTTPClient
|
43 |
+
|
44 |
# Initialize Flask app
|
45 |
app = Flask(__name__)
|
46 |
|
|
|
140 |
scene=image, detections=detections_damage
|
141 |
)
|
142 |
|
143 |
+
# temp_dir = tempfile.mkdtemp()
|
|
|
144 |
|
145 |
# Define a repair cost dictionary (per part)
|
146 |
+
repair_costs = {
|
147 |
+
"Car-Damage-Detection-1KxY": 1000, # General damage assessment cost
|
148 |
+
"Bodypanel-Dent": 200,
|
149 |
+
"Front-Windscreen-Damage": 400,
|
150 |
+
"Headlight-Damage": 250,
|
151 |
+
"Rear-windscreen-Damage": 350,
|
152 |
+
"RunningBoard-Dent": 150,
|
153 |
+
"Sidemirror-Damage": 180,
|
154 |
+
"Signlight-Damage": 120,
|
155 |
+
"Taillight-Damage": 220,
|
156 |
+
"back-bumper": 500,
|
157 |
+
"back-glass": 400,
|
158 |
+
"bonnet-dent": 300,
|
159 |
+
"boot-dent": 350,
|
160 |
+
"broken_lamp": 100,
|
161 |
+
"crack": 250,
|
162 |
+
"damaged-door": 600,
|
163 |
+
"damaged-front-bumper": 550,
|
164 |
+
"damaged-head-light": 270,
|
165 |
+
"damaged-hood": 500,
|
166 |
+
"damaged-rear-bumper": 520,
|
167 |
+
"damaged-rear-window": 380,
|
168 |
+
"damaged-tail-light": 230,
|
169 |
+
"damaged-trunk": 600,
|
170 |
+
"damaged-window": 280,
|
171 |
+
"damaged-windscreen": 450,
|
172 |
+
"dent": 200,
|
173 |
+
"dent-or-scratch": 180,
|
174 |
+
"door": 700,
|
175 |
+
"doorouter-dent": 250,
|
176 |
+
"fender-dent": 220,
|
177 |
+
"flat_tire": 100,
|
178 |
+
"front-bumper": 500,
|
179 |
+
"front-bumper-dent": 450,
|
180 |
+
"front-glass": 400,
|
181 |
+
"headlight": 250,
|
182 |
+
"hood": 500,
|
183 |
+
"mirror": 180,
|
184 |
+
"pillar-dent": 220,
|
185 |
+
"quaterpanel-dent": 270,
|
186 |
+
"rear-bumper-dent": 480,
|
187 |
+
"roof-dent": 400,
|
188 |
+
"scratch": 150,
|
189 |
+
"shattered_glass": 500,
|
190 |
+
"taillight": 220,
|
191 |
+
"trunk": 600,
|
192 |
+
"wheel": 250,
|
193 |
+
"window": 300,
|
194 |
}
|
195 |
|
|
|
196 |
total_cost = 0
|
197 |
|
198 |
+
# coordinates = list(map(int, detections_damage.xyxy.flatten()))
|
199 |
+
# num_damages = (
|
200 |
+
# len(coordinates) // 4
|
201 |
+
# ) # Each damage has 4 coordinates (x1, y1, x2, y2)
|
|
|
202 |
|
203 |
# Iterate through damages
|
204 |
+
# for i in range(num_damages):
|
205 |
+
# x1, y1, x2, y2 = coordinates[i * 4: (i + 1) * 4]
|
206 |
+
|
207 |
+
# # Ensure the coordinates are within image bounds
|
208 |
+
# x1, y1 = max(0, x1), max(0, y1)
|
209 |
+
# x2, y2 = min(image.shape[1], x2), min(image.shape[0], y2)
|
210 |
+
|
211 |
+
# # Crop the damaged region
|
212 |
+
# cropped_damage = image[y1:y2, x1:x2]
|
213 |
+
|
214 |
+
# # Check if the cropped region is valid
|
215 |
+
# if cropped_damage.size == 0:
|
216 |
+
# print(f"Skipping empty crop for damage region {i + 1}")
|
217 |
+
# continue
|
218 |
+
|
219 |
+
# # Save the cropped damaged area
|
220 |
+
# damage_image_path = os.path.join(temp_dir, f"damage_image_{i}.png")
|
221 |
+
# cv2.imwrite(damage_image_path, cropped_damage)
|
222 |
+
|
223 |
+
# # Run the parts detection model on the cropped damage
|
224 |
+
# result_parts = model_parts.predict(
|
225 |
+
# damage_image_path, confidence=15).json()
|
226 |
+
# detections_parts = sv.Detections.from_inference(result_parts)
|
227 |
+
|
228 |
+
# # Calculate repair cost for each detected part
|
229 |
+
# for part in result_parts["predictions"]:
|
230 |
+
# part_name = part["class"]
|
231 |
+
# damage_area = part["width"] * part["height"]
|
232 |
+
# cropped_area = (x2 - x1) * (y2 - y1)
|
233 |
+
# damage_percentage = (damage_area / cropped_area) * 100
|
234 |
+
|
235 |
+
# # Lookup cost and add to total
|
236 |
+
# base_cost = repair_cost_dict.get(
|
237 |
+
# part_name, 0
|
238 |
+
# ) # Default to 0 if part not in dict
|
239 |
+
# repair_cost = (damage_percentage / 100) * 10 * base_cost
|
240 |
+
# total_cost += round(repair_cost, ndigits=1)
|
241 |
+
|
242 |
+
# print(
|
243 |
+
# f"Damage {i + 1} - {part_name}: {damage_percentage:.2f}% damaged, Cost: ${repair_cost:.2f}"
|
244 |
+
# )
|
245 |
+
|
246 |
+
# # Annotate and save the result
|
247 |
+
# part_annotator = sv.LabelAnnotator()
|
248 |
+
# annotated_parts_image = part_annotator.annotate(
|
249 |
+
# scene=cropped_damage, detections=detections_parts
|
250 |
+
# )
|
251 |
+
# annotated_parts_path = os.path.join(
|
252 |
+
# temp_dir, f"annotated_parts_{i}.png")
|
253 |
+
# cv2.imwrite(annotated_parts_path, annotated_parts_image)
|
254 |
+
|
255 |
+
# # Save the overall annotated image
|
256 |
+
# annotated_image_path = os.path.join(temp_dir, "annotated_image_damage.png")
|
257 |
+
# cv2.imwrite(annotated_image_path, annotated_image_damage)
|
258 |
+
|
259 |
+
# # Return the total cost in the specified format
|
260 |
+
|
261 |
+
CLIENT = InferenceHTTPClient(
|
262 |
+
api_url="https://detect.roboflow.com",
|
263 |
+
api_key="LqD8Cs4OsoK8seO3CPkf"
|
264 |
+
)
|
265 |
+
result = CLIENT.infer(file_name, model_id="car-damage-detection-krsix/1")
|
266 |
+
labels = [item["class"] for item in result["predictions"]]
|
267 |
+
print(labels)
|
268 |
+
for class_ in labels:
|
269 |
+
total_cost += repair_costs.get(class_, 0)
|
270 |
result = {"total_cost": total_cost}
|
271 |
print(result)
|
272 |
|
requirements.txt
CHANGED
@@ -19,6 +19,7 @@ opencv-python
|
|
19 |
requests
|
20 |
PyMuPDF
|
21 |
openai
|
|
|
22 |
fpdf
|
23 |
cloudinary
|
24 |
PyPDF2
|
|
|
19 |
requests
|
20 |
PyMuPDF
|
21 |
openai
|
22 |
+
inference-sdk
|
23 |
fpdf
|
24 |
cloudinary
|
25 |
PyPDF2
|
test.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|