Upload 31 files
Browse files- .env +2 -0
- activity_detection/activity_detection.py +101 -0
- activity_detection/activity_recognition.pt +3 -0
- app.py +145 -0
- images/Back20-11-11_08-02-30-91_00003_jpg.rf.375d64c55683fd17471189250f91a224.jpg +0 -0
- images/Back20-11-11_14-43-00-20_00008_jpg.rf.9f4e0388b4b2c1efbe006af98cd273d2.jpg +0 -0
- images/Back20-11-12_09-21-03-10_00040_jpg.rf.1c7ecbc5b08b5155915b7fe58ed5a98b.jpg +0 -0
- images/Back20-11-12_10-12-53-58_00069_jpg.rf.c6d541810375f0744327c87d11a06b29.jpg +0 -0
- images/FLIR_00964_jpeg.rf.6e68146482a533c9fa1d335005845f06.jpg +0 -0
- images/FLIR_01387_jpeg.rf.ae924342476ab411eb2489931d50f4a9.jpg +0 -0
- images/FLIR_03769_jpeg.rf.25e353a9db44e33f2ea3af8b6811bd3a.jpg +0 -0
- images/FLIR_03925_jpeg.rf.8891211d21226b5337f82a40ed8d79c9.jpg +0 -0
- images/FLIR_03968_jpeg.rf.922e9db2bb5d8c7942f9db752a1b1ae7.jpg +0 -0
- images/FLIR_03974_jpeg.rf.0290023ccfb7ecec1b533260c03065ff.jpg +0 -0
- images/FLIR_03979_jpeg.rf.412e1f6bacf691a860689d12f3d5f05e.jpg +0 -0
- images/FLIR_05146_jpeg.rf.246983d26227b81d0a2b87236f55ae62.jpg +0 -0
- images/FLIR_06021_jpeg.rf.9bfab1733636c24312989573a4c97ba5.jpg +0 -0
- images/FLIR_07442_jpeg.rf.2ba18cb1170708a9a2afff1f4445ce3b.jpg +0 -0
- images/TFront-South-09-38-03-32-05012_jpg.rf.0796379e3dae98ac267c967818e8a0dd.jpg +0 -0
- images/Thermal-Front-North-2024-02-26-1312058229_jpg.rf.4731469721f7aee8700efab0aa118d1a.jpg +0 -0
- images/car_98-71294498443604_Thu-Nov-25-20-09-45-2021_jpg.rf.418e43301e2ee7ff907d3d7caf49f4f9.jpg +0 -0
- images/car_99-94168281555176_Mon-Dec-13-16-37-40-2021_jpg.rf.a8c56aba60dd3a19f2c2f159a2c9062d.jpg +0 -0
- images/default_img.jpg +0 -0
- images/image.jpg +0 -0
- object_detection/detection.pt +3 -0
- object_detection/object_detection.py +156 -0
- pipeline.py +85 -0
- requirements.txt +77 -0
- utils/distance.py +51 -0
- utils/generate_result.py +112 -0
- utils/zoom_in.py +103 -0
.env
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
OBJECT_DET_MODEL_PATH = 'object_detection/detection.pt'
|
2 |
+
ACTIVITY_DET_MODEL_PATH = 'activity_detection/activity_recognition.pt'
|
activity_detection/activity_detection.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this file containes class for Activity Detection.
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import logging
|
4 |
+
|
5 |
+
# Configure the logger
|
6 |
+
logging.basicConfig(
|
7 |
+
level=logging.DEBUG,
|
8 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
9 |
+
filename="logs.log",
|
10 |
+
)
|
11 |
+
|
12 |
+
# Create a logger
|
13 |
+
logger = logging.getLogger("pipline")
|
14 |
+
|
15 |
+
class ActivityDetection:
|
16 |
+
"""Class to detect activity of person from image"""
|
17 |
+
|
18 |
+
def __init__(self):
|
19 |
+
self.activity_classes = ['Standing','Running','Sitting']
|
20 |
+
self.input_file_path = None
|
21 |
+
self.trained_model_path = None
|
22 |
+
self.trained_model = None
|
23 |
+
self.base_model = None
|
24 |
+
|
25 |
+
def set_input_file_path(self, input_file_path):
|
26 |
+
"""
|
27 |
+
Method to set path of input files to train a model
|
28 |
+
|
29 |
+
Args:
|
30 |
+
input_file_path (str): Relative or global path to the input files
|
31 |
+
"""
|
32 |
+
self.input_file_path = input_file_path
|
33 |
+
logger.info("input file path is set...")
|
34 |
+
|
35 |
+
def set_trained_model_path(self, trained_model_path):
|
36 |
+
"""
|
37 |
+
Method to set path of trained model to inference the model
|
38 |
+
|
39 |
+
Args:
|
40 |
+
trained_model_path (str): Relative or global path to the trained model
|
41 |
+
"""
|
42 |
+
self.trained_model_path = trained_model_path
|
43 |
+
logger.info("trained_model_path is set...")
|
44 |
+
self.trained_model = YOLO(trained_model_path)
|
45 |
+
logger.info("tranined_model is created successfully ...")
|
46 |
+
|
47 |
+
def train(self):
|
48 |
+
"""
|
49 |
+
Method to train a model for activity detection.
|
50 |
+
|
51 |
+
Raises:
|
52 |
+
BaseException: It generates BaseException when it could'f find input_file_path
|
53 |
+
e: any other exception that occors.
|
54 |
+
"""
|
55 |
+
self.base_model = YOLO("yolov8n-cls.pt")
|
56 |
+
try:
|
57 |
+
if self.input_file_path is None:
|
58 |
+
raise BaseException("Please set path of input_files first with set_input_file_path method.")
|
59 |
+
|
60 |
+
self.base_model.train(data=self.input_file_path, epochs=50)
|
61 |
+
logger.info("traning of model is successfully done...")
|
62 |
+
except Exception as e:
|
63 |
+
logger.error("Something went wrong in activity detection model training")
|
64 |
+
logger.error(e)
|
65 |
+
|
66 |
+
def inference(self, single_object_images):
|
67 |
+
"""Method to detect activity of person from image.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
single_object_images (list of numpy array): list of single object images
|
71 |
+
|
72 |
+
Raises:
|
73 |
+
BaseException: It generates BaseException when it could'f find trained_model_path
|
74 |
+
e: any other exception that occors.
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
activities(list of strings): returns list of activities perform by person
|
78 |
+
"""
|
79 |
+
logger.info("inference method is called...")
|
80 |
+
try:
|
81 |
+
if self.trained_model is None:
|
82 |
+
raise BaseException("Please set path of trained model first with set_trained_model_path method.")
|
83 |
+
|
84 |
+
activities = []
|
85 |
+
|
86 |
+
# detect activity in image
|
87 |
+
for img in single_object_images:
|
88 |
+
predictions =self.trained_model.predict(img)
|
89 |
+
|
90 |
+
for result in predictions:
|
91 |
+
|
92 |
+
probs = result.probs
|
93 |
+
class_index = probs.top1
|
94 |
+
|
95 |
+
activities.append(self.activity_classes[class_index])
|
96 |
+
|
97 |
+
return activities
|
98 |
+
|
99 |
+
except Exception as e:
|
100 |
+
logger.error("Something went wrong in activity detection model inference")
|
101 |
+
logger.error(e)
|
activity_detection/activity_recognition.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70759328a3222fb40001170759f7ed6577acd54f3283d087f9fbe63974989ee6
|
3 |
+
size 2968321
|
app.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2
|
3 |
+
import io
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
from pipeline import pipeline
|
7 |
+
import pandas as pd
|
8 |
+
import logging
|
9 |
+
|
10 |
+
# Configure the logger
|
11 |
+
logging.basicConfig(
|
12 |
+
level=logging.INFO,
|
13 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
14 |
+
filename="logs.log",
|
15 |
+
)
|
16 |
+
|
17 |
+
# Create a logger
|
18 |
+
logger = logging.getLogger("streamlit_app")
|
19 |
+
|
20 |
+
|
21 |
+
code = """
|
22 |
+
<style>
|
23 |
+
.block-container{
|
24 |
+
max-width: 100%;
|
25 |
+
padding: 50px;
|
26 |
+
}
|
27 |
+
# [data-testid="stImage"], .e115fcil2, [data-testid="StyledFullScreenButton"], [data-testid="stFullScreenFrame"].e1vs0wn30, [data-testid="element-container"].e1f1d6gn4.element-container{
|
28 |
+
# width: fit-content !important;
|
29 |
+
# }
|
30 |
+
# [data-testid="stVerticalBlock"].e1f1d6gn2{
|
31 |
+
# flex-direction: row;
|
32 |
+
# flex-wrap: wrap;
|
33 |
+
# }
|
34 |
+
[data-testid="StyledFullScreenButton"]{
|
35 |
+
display: none;
|
36 |
+
}
|
37 |
+
[data-testid="stVerticalBlockBorderWrapper"], [data-testid="stVerticalBlock"]{
|
38 |
+
width: 100%;
|
39 |
+
}
|
40 |
+
.e115fcil2{
|
41 |
+
justify-content: center;
|
42 |
+
margin-top: 20px;
|
43 |
+
}
|
44 |
+
</style>
|
45 |
+
"""
|
46 |
+
st.html(code)
|
47 |
+
|
48 |
+
st.title("Automated Surveillance System")
|
49 |
+
|
50 |
+
# Main two columns
|
51 |
+
col1, col2 = st.columns([5, 5])
|
52 |
+
container = col2.container(height=800)
|
53 |
+
|
54 |
+
# columns to show zoomed images
|
55 |
+
col3, col4= container.columns([1,1])
|
56 |
+
|
57 |
+
# column to take file input and show detected objects.
|
58 |
+
with col1:
|
59 |
+
image = st.file_uploader("File upload", label_visibility="hidden")
|
60 |
+
logger.info("Image is uploaded successfully...")
|
61 |
+
if image is not None:
|
62 |
+
image = Image.open(io.BytesIO(image.getvalue()))
|
63 |
+
image = np.asarray(image)
|
64 |
+
|
65 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
66 |
+
cv2.imwrite("images/image.jpg", image)
|
67 |
+
image = cv2.imread("images/image.jpg")
|
68 |
+
logger.info("Image is store in image.jpg file...")
|
69 |
+
results = pipeline(image)
|
70 |
+
logger.info("getting results of image...")
|
71 |
+
|
72 |
+
for result in results:
|
73 |
+
image = cv2.rectangle(image, result['merged_boundries']['top_left'], result['merged_boundries']['bottom_right'], (255, 0, 0), 1)
|
74 |
+
|
75 |
+
st.image(image)
|
76 |
+
logger.info("image is loaded in app successfully...")
|
77 |
+
|
78 |
+
else:
|
79 |
+
try:
|
80 |
+
logger.info("default image is used...")
|
81 |
+
image = cv2.imread("images/default_img.jpg")
|
82 |
+
|
83 |
+
results = pipeline(image)
|
84 |
+
logger.info("getting results of image...")
|
85 |
+
for result in results:
|
86 |
+
image = cv2.rectangle(image, result['merged_boundries']['top_left'], result['merged_boundries']['bottom_right'], (255, 0, 0), 1)
|
87 |
+
st.image(image)
|
88 |
+
except Exception as e:
|
89 |
+
logger.error("Something went wrong in Straemlit application inference")
|
90 |
+
logger.error(e)
|
91 |
+
|
92 |
+
# Column to show zoomed images
|
93 |
+
if results is not None:
|
94 |
+
with col2:
|
95 |
+
results_1 = results[:len(results)//2]
|
96 |
+
results_2 = results[len(results)//2:]
|
97 |
+
with col4:
|
98 |
+
for result in results_1:
|
99 |
+
img = result['zoomed_img']
|
100 |
+
|
101 |
+
df = pd.DataFrame(columns=['Object Type', 'Distance', 'Activity'])
|
102 |
+
actual_width, actual_height = result['merged_boundries']['bottom_right'][0] - result['merged_boundries']['top_left'][0], result['merged_boundries']['bottom_right'][1] - result['merged_boundries']['top_left'][1]
|
103 |
+
|
104 |
+
for box in result['actual_boxes']:
|
105 |
+
top_left = (box['top_left'][0] - result['merged_boundries']['top_left'][0], (box['top_left'][1] - result['merged_boundries']['top_left'][1]))
|
106 |
+
bottom_right = (box['bottom_right'][0] - result['merged_boundries']['top_left'][0], (box['bottom_right'][1] - result['merged_boundries']['top_left'][1]))
|
107 |
+
|
108 |
+
bottom_right = (bottom_right[0]*img.shape[0]//(actual_height), bottom_right[1]*img.shape[1]//(actual_width))
|
109 |
+
top_left = (top_left[0]*img.shape[0]//(actual_height), top_left[1]*img.shape[1]//(actual_width))
|
110 |
+
|
111 |
+
img = cv2.rectangle(img, top_left, bottom_right, (255, 0, 0), 1)
|
112 |
+
img = cv2.putText(img, "ID: "+str(len(df)), top_left, 1, 1, (255, 255, 255))
|
113 |
+
df.loc[len(df)] = [box['class'], box['distance'], box['activity']]
|
114 |
+
|
115 |
+
st.image(img)
|
116 |
+
st.table(df)
|
117 |
+
|
118 |
+
with col3:
|
119 |
+
for result in results_2:
|
120 |
+
img = result['zoomed_img']
|
121 |
+
|
122 |
+
df = pd.DataFrame(columns=['Object Type', 'Distance', 'Activity'])
|
123 |
+
actual_width, actual_height = result['merged_boundries']['bottom_right'][0] - result['merged_boundries']['top_left'][0], result['merged_boundries']['bottom_right'][1] - result['merged_boundries']['top_left'][1]
|
124 |
+
|
125 |
+
for box in result['actual_boxes']:
|
126 |
+
top_left = (box['top_left'][0] - result['merged_boundries']['top_left'][0], (box['top_left'][1] - result['merged_boundries']['top_left'][1]))
|
127 |
+
bottom_right = (box['bottom_right'][0] - result['merged_boundries']['top_left'][0], (box['bottom_right'][1] - result['merged_boundries']['top_left'][1]))
|
128 |
+
|
129 |
+
bottom_right = (bottom_right[0]*img.shape[0]//(actual_height), bottom_right[1]*img.shape[1]//(actual_width))
|
130 |
+
top_left = (top_left[0]*img.shape[0]//(actual_height), top_left[1]*img.shape[1]//(actual_width))
|
131 |
+
|
132 |
+
img = cv2.rectangle(img, top_left, bottom_right, (255, 0, 0), 1)
|
133 |
+
img = cv2.putText(img, "ID: "+str(len(df)), top_left, 1, 1, (255, 255, 255))
|
134 |
+
df.loc[len(df)] = [box['class'], box['distance'], box['activity']]
|
135 |
+
|
136 |
+
st.image(img)
|
137 |
+
st.table(df)
|
138 |
+
logger.info("Results are load successfully...")
|
139 |
+
logging.info('\n') # Add a blank line
|
140 |
+
logging.info('\n') # Add a blank line
|
141 |
+
|
142 |
+
else:
|
143 |
+
logger.error("results are not found...")
|
144 |
+
logging.info('\n') # Add a blank line
|
145 |
+
logging.info('\n') # Add a blank line
|
images/Back20-11-11_08-02-30-91_00003_jpg.rf.375d64c55683fd17471189250f91a224.jpg
ADDED
![]() |
images/Back20-11-11_14-43-00-20_00008_jpg.rf.9f4e0388b4b2c1efbe006af98cd273d2.jpg
ADDED
![]() |
images/Back20-11-12_09-21-03-10_00040_jpg.rf.1c7ecbc5b08b5155915b7fe58ed5a98b.jpg
ADDED
![]() |
images/Back20-11-12_10-12-53-58_00069_jpg.rf.c6d541810375f0744327c87d11a06b29.jpg
ADDED
![]() |
images/FLIR_00964_jpeg.rf.6e68146482a533c9fa1d335005845f06.jpg
ADDED
![]() |
images/FLIR_01387_jpeg.rf.ae924342476ab411eb2489931d50f4a9.jpg
ADDED
![]() |
images/FLIR_03769_jpeg.rf.25e353a9db44e33f2ea3af8b6811bd3a.jpg
ADDED
![]() |
images/FLIR_03925_jpeg.rf.8891211d21226b5337f82a40ed8d79c9.jpg
ADDED
![]() |
images/FLIR_03968_jpeg.rf.922e9db2bb5d8c7942f9db752a1b1ae7.jpg
ADDED
![]() |
images/FLIR_03974_jpeg.rf.0290023ccfb7ecec1b533260c03065ff.jpg
ADDED
![]() |
images/FLIR_03979_jpeg.rf.412e1f6bacf691a860689d12f3d5f05e.jpg
ADDED
![]() |
images/FLIR_05146_jpeg.rf.246983d26227b81d0a2b87236f55ae62.jpg
ADDED
![]() |
images/FLIR_06021_jpeg.rf.9bfab1733636c24312989573a4c97ba5.jpg
ADDED
![]() |
images/FLIR_07442_jpeg.rf.2ba18cb1170708a9a2afff1f4445ce3b.jpg
ADDED
![]() |
images/TFront-South-09-38-03-32-05012_jpg.rf.0796379e3dae98ac267c967818e8a0dd.jpg
ADDED
![]() |
images/Thermal-Front-North-2024-02-26-1312058229_jpg.rf.4731469721f7aee8700efab0aa118d1a.jpg
ADDED
![]() |
images/car_98-71294498443604_Thu-Nov-25-20-09-45-2021_jpg.rf.418e43301e2ee7ff907d3d7caf49f4f9.jpg
ADDED
![]() |
images/car_99-94168281555176_Mon-Dec-13-16-37-40-2021_jpg.rf.a8c56aba60dd3a19f2c2f159a2c9062d.jpg
ADDED
![]() |
images/default_img.jpg
ADDED
![]() |
images/image.jpg
ADDED
![]() |
object_detection/detection.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64b1203bd0c8e4fb317eb0b11816a9e2a95ab887f708d9ede3c4ef40f1daf94c
|
3 |
+
size 52026625
|
object_detection/object_detection.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this file containes class Object Detection from Image.
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import math
|
4 |
+
import logging
|
5 |
+
# Configure the logger
|
6 |
+
logging.basicConfig(
|
7 |
+
level=logging.DEBUG,
|
8 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
9 |
+
filename="logs.log",
|
10 |
+
)
|
11 |
+
|
12 |
+
# Create a logger
|
13 |
+
logger = logging.getLogger("pipline")
|
14 |
+
|
15 |
+
|
16 |
+
class ObjectDetection:
|
17 |
+
"""Class to detect object from image"""
|
18 |
+
|
19 |
+
def __init__(self):
|
20 |
+
self.input_file_path = None
|
21 |
+
self.trained_model_path = None
|
22 |
+
self.trained_model = None
|
23 |
+
self.base_model = None
|
24 |
+
|
25 |
+
def set_input_file_path(self, input_file_path):
|
26 |
+
"""
|
27 |
+
Method to set path of input files to train a model
|
28 |
+
|
29 |
+
Args:
|
30 |
+
input_file_path (str): Relative or global path to the input files
|
31 |
+
"""
|
32 |
+
self.input_file_path = input_file_path
|
33 |
+
logger.info("input file path is set...")
|
34 |
+
|
35 |
+
def set_trained_model_path(self, trained_model_path):
|
36 |
+
"""
|
37 |
+
Method to set path of trained model to inference the model
|
38 |
+
|
39 |
+
Args:
|
40 |
+
trained_model_path (str): Relative or global path to the trained model
|
41 |
+
"""
|
42 |
+
self.trained_model_path = trained_model_path
|
43 |
+
self.trained_model = YOLO(trained_model_path)
|
44 |
+
logger.info("trained model path is set...")
|
45 |
+
|
46 |
+
def train(self):
|
47 |
+
"""
|
48 |
+
Method to train a model for object detection in image.
|
49 |
+
|
50 |
+
Raises:
|
51 |
+
BaseException: It generates BaseException when it could'f find input_file_path
|
52 |
+
e: any other exception that occors.
|
53 |
+
"""
|
54 |
+
self.base_model = YOLO("yolov8m.pt")
|
55 |
+
try:
|
56 |
+
if self.input_file_path is None:
|
57 |
+
raise BaseException("Please set path of input_files first with set_input_file_path method.")
|
58 |
+
|
59 |
+
self.base_model.train(data=self.input_file_path, epochs=100)
|
60 |
+
except Exception as e:
|
61 |
+
logger.error("Something went wrong in activity detection model training")
|
62 |
+
logger.error(e)
|
63 |
+
|
64 |
+
def inference(self, image):
|
65 |
+
"""Method to detect object from image.
|
66 |
+
|
67 |
+
Args:
|
68 |
+
image (numpy array): Numpy array of image
|
69 |
+
|
70 |
+
Raises:
|
71 |
+
BaseException: It generates BaseException when it could'f find trained_model_path
|
72 |
+
e: any other exception that occors.
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
json array: returns list of all detected objects in formate:
|
76 |
+
[{
|
77 |
+
'actual_boundries': [{
|
78 |
+
'top_left': tupple(x, y),
|
79 |
+
'bottom_right': tupple(x,y),
|
80 |
+
'class': str
|
81 |
+
}],
|
82 |
+
'merged_boundries': [{
|
83 |
+
'top_left': tupple(x, y),
|
84 |
+
'bottom_right': tupple(x,y),
|
85 |
+
'person_count': int,
|
86 |
+
'vehical_count': int,
|
87 |
+
'animal_count': int
|
88 |
+
}]
|
89 |
+
}]
|
90 |
+
"""
|
91 |
+
try:
|
92 |
+
if self.trained_model is None:
|
93 |
+
raise BaseException("Please set path of trained model first with set_trained_model_path method.")
|
94 |
+
|
95 |
+
# detect object in image
|
96 |
+
results = self.trained_model(image)
|
97 |
+
detected_boundary_box = results[0].boxes.xyxy.tolist()
|
98 |
+
classes = results[0].boxes.cls.tolist()
|
99 |
+
class_names = results[0].names
|
100 |
+
confidences = results[0].boxes.conf.tolist()
|
101 |
+
number_of_objects = 0
|
102 |
+
boundary_boxes_with_margin = [] # ((x1, y1), (x2,y2), person_count, vehical_count, animal_count)
|
103 |
+
|
104 |
+
# Add margin to boundary box.
|
105 |
+
for box, cls, conf in zip(detected_boundary_box, classes, confidences):
|
106 |
+
x1, y1, x2, y2 = box
|
107 |
+
name = class_names[int(cls)]
|
108 |
+
merged_boundry_object = {"actual_boundries": [{"top_left": (int(x1), int(y1)),
|
109 |
+
"bottom_right": (int(x2), int(y2)),
|
110 |
+
"class": name}]}
|
111 |
+
x1 = max(0, x1 - (x2-x1)/2)
|
112 |
+
y1 = max(0, y1 - (y2-y1)/2)
|
113 |
+
x2 = min(len(image[0])-1, x2 + (x2-x1)/2)
|
114 |
+
y2 = min(len(image)-1, y2 + (y2-y1)/2)
|
115 |
+
x1, y1, x2, y2 = math.floor(x1), math.floor(y1), math.ceil(x2), math.ceil(y2)
|
116 |
+
merged_boundry_object["merged_boundries"] = {"top_left": (x1, y1),
|
117 |
+
"bottom_right": (x2, y2),
|
118 |
+
"person_count": 1 if name == 'person' else 0,
|
119 |
+
"vehical_count": 1 if name == 'vehical' else 0,
|
120 |
+
"animal_count": 1 if name == 'animal' else 0}
|
121 |
+
boundary_boxes_with_margin.append(merged_boundry_object)
|
122 |
+
number_of_objects += 1
|
123 |
+
boundary_boxes_with_margin.sort(key=lambda x: (x['merged_boundries']['top_left'], x['merged_boundries']['bottom_right']))
|
124 |
+
|
125 |
+
merged_boundary_boxes = []
|
126 |
+
if len(boundary_boxes_with_margin) > 0:
|
127 |
+
merged_boundary_boxes.append(boundary_boxes_with_margin[0])
|
128 |
+
|
129 |
+
# merge two overlaped boundary box.
|
130 |
+
for indx, box in enumerate(boundary_boxes_with_margin):
|
131 |
+
if indx != 0:
|
132 |
+
top_left_last = merged_boundary_boxes[-1]['merged_boundries']['top_left']
|
133 |
+
bottom_right_last = merged_boundary_boxes[-1]['merged_boundries']['bottom_right']
|
134 |
+
top_left_curr = box['merged_boundries']['top_left']
|
135 |
+
bottom_right_curr = box['merged_boundries']['bottom_right']
|
136 |
+
|
137 |
+
if bottom_right_last[0] >= top_left_curr[0] and bottom_right_last[1] >= top_left_curr[1]:
|
138 |
+
new_x1 = min(top_left_last[0], top_left_curr[0])
|
139 |
+
new_y1 = min(top_left_last[1], top_left_curr[1])
|
140 |
+
new_x2 = max(bottom_right_last[0], bottom_right_curr[0])
|
141 |
+
new_y2 = max(bottom_right_last[1], bottom_right_curr[1])
|
142 |
+
|
143 |
+
merged_boundary_boxes[-1]['actual_boundries'] += box['actual_boundries']
|
144 |
+
merged_boundary_boxes[-1]['merged_boundries'] = {"top_left": (new_x1, new_y1),
|
145 |
+
"bottom_right": (new_x2, new_y2),
|
146 |
+
"person_count": merged_boundary_boxes[-1]['merged_boundries']['person_count'] + box['merged_boundries']['person_count'],
|
147 |
+
"vehical_count": merged_boundary_boxes[-1]['merged_boundries']['vehical_count'] + box['merged_boundries']['vehical_count'],
|
148 |
+
"animal_count": merged_boundary_boxes[-1]['merged_boundries']['animal_count'] + box['merged_boundries']['animal_count']}
|
149 |
+
else:
|
150 |
+
merged_boundary_boxes.append(box)
|
151 |
+
logger.info("inference is done successfully...")
|
152 |
+
return merged_boundary_boxes
|
153 |
+
|
154 |
+
except Exception as e:
|
155 |
+
logger.error("Something went wrong in activity detection model inference")
|
156 |
+
logger.error(e)
|
pipeline.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this file used all classes and functions of project and create a flow.
|
2 |
+
import os
|
3 |
+
|
4 |
+
from utils.zoom_in import croped_images,image_enhancements
|
5 |
+
from utils.distance import get_distances
|
6 |
+
from utils.generate_result import get_json_data
|
7 |
+
|
8 |
+
from object_detection.object_detection import ObjectDetection
|
9 |
+
from activity_detection.activity_detection import ActivityDetection
|
10 |
+
import os
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
from pathlib import Path
|
13 |
+
import logging
|
14 |
+
|
15 |
+
env_path = Path('.') / '.env'
|
16 |
+
load_dotenv(dotenv_path=env_path)
|
17 |
+
|
18 |
+
path = {
|
19 |
+
'ACTIVITY_DET_MODEL_PATH':str(os.getenv('ACTIVITY_DET_MODEL_PATH')),
|
20 |
+
'OBJECT_DET_MODEL_PATH':str(os.getenv('OBJECT_DET_MODEL_PATH')),
|
21 |
+
}
|
22 |
+
|
23 |
+
# Configure the logger
|
24 |
+
logging.basicConfig(
|
25 |
+
level=logging.DEBUG,
|
26 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
27 |
+
filename="logs.log",
|
28 |
+
)
|
29 |
+
|
30 |
+
# Create a logger
|
31 |
+
logger = logging.getLogger("pipline")
|
32 |
+
|
33 |
+
def pipeline(image):
|
34 |
+
"""this function takes input as image from streamlit application then performs object detection,cropping image,
|
35 |
+
image enhancement, activity detection, distance estimation and get final results in json format and returns to
|
36 |
+
streamlit application.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
image (numpy array): get numpy array of image which has 3 channels
|
40 |
+
|
41 |
+
Returns:
|
42 |
+
final_results: JSON Array which has below object
|
43 |
+
{
|
44 |
+
'zoomed_img':np.array([]) ,
|
45 |
+
'actual_boxes':[],
|
46 |
+
'merged_boundries':{},
|
47 |
+
}
|
48 |
+
"""
|
49 |
+
# detect object of given image using YOLO and get json_data of each object
|
50 |
+
try:
|
51 |
+
# Detect object from image.
|
52 |
+
object_detection = ObjectDetection()
|
53 |
+
logger.info("object detection object is created...")
|
54 |
+
object_detection.set_trained_model_path(path['OBJECT_DET_MODEL_PATH'])
|
55 |
+
logger.info("object detection model path is set...")
|
56 |
+
object_json_data = object_detection.inference(image)
|
57 |
+
logger.info("object detection done successfully...")
|
58 |
+
|
59 |
+
# get croped_images list which has overlapping boundry box and also get croped single object images
|
60 |
+
croped_images_list,single_object_images = croped_images(image,object_json_data)
|
61 |
+
logger.info("cropping of image is done successfully...")
|
62 |
+
|
63 |
+
# enhance images of both croped images and single object images
|
64 |
+
enhanced_images,single_object_images = image_enhancements(croped_images_list,single_object_images)
|
65 |
+
logger.info("enhancement of image is done successfully...")
|
66 |
+
|
67 |
+
# detect activity of person object using image classification
|
68 |
+
activity_detection = ActivityDetection()
|
69 |
+
logger.info('activity detection object is created successfully...')
|
70 |
+
activity_detection.set_trained_model_path(path['ACTIVITY_DET_MODEL_PATH'])
|
71 |
+
logger.info('activity detection model is set')
|
72 |
+
detected_activity = activity_detection.inference(single_object_images)
|
73 |
+
logger.info("detection of activity is done successfully...")
|
74 |
+
|
75 |
+
# Calculate distances of all objects
|
76 |
+
distances_list = get_distances(object_json_data)
|
77 |
+
logger.info("distance of object is calculated successfully...")
|
78 |
+
|
79 |
+
# get final json array
|
80 |
+
final_results = get_json_data(object_json_data,enhanced_images,detected_activity,distances_list)
|
81 |
+
logger.info('final result of given image is created successfully...')
|
82 |
+
|
83 |
+
return final_results
|
84 |
+
except Exception as e:
|
85 |
+
pass
|
requirements.txt
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
altair==5.3.0
|
2 |
+
attrs==23.2.0
|
3 |
+
blinker==1.8.2
|
4 |
+
cachetools==5.3.3
|
5 |
+
certifi==2024.2.2
|
6 |
+
charset-normalizer==3.3.2
|
7 |
+
click==8.1.7
|
8 |
+
contourpy==1.2.1
|
9 |
+
cycler==0.12.1
|
10 |
+
filelock==3.14.0
|
11 |
+
fonttools==4.51.0
|
12 |
+
fsspec==2024.3.1
|
13 |
+
gitdb==4.0.11
|
14 |
+
GitPython==3.1.43
|
15 |
+
idna==3.7
|
16 |
+
Jinja2==3.1.4
|
17 |
+
jsonschema==4.22.0
|
18 |
+
jsonschema-specifications==2023.12.1
|
19 |
+
kiwisolver==1.4.5
|
20 |
+
markdown-it-py==3.0.0
|
21 |
+
MarkupSafe==2.1.5
|
22 |
+
matplotlib==3.8.4
|
23 |
+
mdurl==0.1.2
|
24 |
+
mpmath==1.3.0
|
25 |
+
networkx==3.3
|
26 |
+
numpy==1.26.4
|
27 |
+
nvidia-cublas-cu12==12.1.3.1
|
28 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
29 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
30 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
31 |
+
nvidia-cudnn-cu12==8.9.2.26
|
32 |
+
nvidia-cufft-cu12==11.0.2.54
|
33 |
+
nvidia-curand-cu12==10.3.2.106
|
34 |
+
nvidia-cusolver-cu12==11.4.5.107
|
35 |
+
nvidia-cusparse-cu12==12.1.0.106
|
36 |
+
nvidia-nccl-cu12==2.20.5
|
37 |
+
nvidia-nvjitlink-cu12==12.4.127
|
38 |
+
nvidia-nvtx-cu12==12.1.105
|
39 |
+
opencv-python==4.9.0.80
|
40 |
+
packaging==24.0
|
41 |
+
pandas==2.2.2
|
42 |
+
pillow==10.3.0
|
43 |
+
protobuf==4.25.3
|
44 |
+
psutil==5.9.8
|
45 |
+
py-cpuinfo==9.0.0
|
46 |
+
pyarrow==16.0.0
|
47 |
+
pydeck==0.9.0
|
48 |
+
Pygments==2.18.0
|
49 |
+
pyparsing==3.1.2
|
50 |
+
python-dateutil==2.9.0.post0
|
51 |
+
python-dotenv==1.0.1
|
52 |
+
pytz==2024.1
|
53 |
+
PyYAML==6.0.1
|
54 |
+
referencing==0.35.1
|
55 |
+
requests==2.31.0
|
56 |
+
rich==13.7.1
|
57 |
+
rpds-py==0.18.1
|
58 |
+
scipy==1.13.0
|
59 |
+
seaborn==0.13.2
|
60 |
+
six==1.16.0
|
61 |
+
smmap==5.0.1
|
62 |
+
streamlit==1.34.0
|
63 |
+
sympy==1.12
|
64 |
+
tenacity==8.3.0
|
65 |
+
thop==0.1.1.post2209072238
|
66 |
+
toml==0.10.2
|
67 |
+
toolz==0.12.1
|
68 |
+
torch==2.3.0
|
69 |
+
torchvision==0.18.0
|
70 |
+
tornado==6.4
|
71 |
+
tqdm==4.66.4
|
72 |
+
triton==2.3.0
|
73 |
+
typing_extensions==4.11.0
|
74 |
+
tzdata==2024.1
|
75 |
+
ultralytics==8.2.10
|
76 |
+
urllib3==2.2.1
|
77 |
+
watchdog==4.0.0
|
utils/distance.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# file to get distances of object from image
|
2 |
+
|
3 |
+
# Configure the logger
|
4 |
+
import logging
|
5 |
+
logging.basicConfig(
|
6 |
+
level=logging.DEBUG,
|
7 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
8 |
+
filename="logs.log",
|
9 |
+
)
|
10 |
+
|
11 |
+
# Create a logger
|
12 |
+
logger = logging.getLogger("pipline")
|
13 |
+
|
14 |
+
#constants
|
15 |
+
PERSON_HEIGHT = 1.5
|
16 |
+
VEHICAL_HEIGHT = 1.35
|
17 |
+
ANIMAL_HEIGHT = 0.6
|
18 |
+
FOCAL_LENGTH = 6400
|
19 |
+
|
20 |
+
def get_distances(merged_boundary_boxes):
|
21 |
+
"""get distance of object fro image
|
22 |
+
|
23 |
+
Args:
|
24 |
+
merged_boundary_boxes (json array): takes json array of detected image's data
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
distance_list: list of distances of each object
|
28 |
+
"""
|
29 |
+
logger.info("get_distances function is called...")
|
30 |
+
|
31 |
+
try:
|
32 |
+
distance_list = []
|
33 |
+
for box in merged_boundary_boxes:
|
34 |
+
for actual_box in box['actual_boundries']:
|
35 |
+
height = actual_box['bottom_right'][1] - actual_box['top_left'][1]
|
36 |
+
|
37 |
+
if actual_box['class'] == "person":
|
38 |
+
distance = FOCAL_LENGTH*PERSON_HEIGHT/height
|
39 |
+
|
40 |
+
elif actual_box['class'] == "vehical":
|
41 |
+
distance = FOCAL_LENGTH*PERSON_HEIGHT/height
|
42 |
+
|
43 |
+
else:
|
44 |
+
distance = FOCAL_LENGTH*PERSON_HEIGHT/height
|
45 |
+
|
46 |
+
distance_list.append(str(round(distance)) + "m")
|
47 |
+
return distance_list
|
48 |
+
except Exception as e:
|
49 |
+
logger.error("Something went wrong in distance estimation function...")
|
50 |
+
logger.error(e)
|
51 |
+
|
utils/generate_result.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# file to get json data of results
|
2 |
+
import numpy as np
|
3 |
+
import logging
|
4 |
+
|
5 |
+
# Configure the logger
|
6 |
+
logging.basicConfig(
|
7 |
+
level=logging.DEBUG,
|
8 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
9 |
+
filename="logs.log",
|
10 |
+
)
|
11 |
+
|
12 |
+
# Create a logger
|
13 |
+
logger = logging.getLogger("pipline")
|
14 |
+
|
15 |
+
def get_json_data(json_data,enhanced_images,detected_activity,distances_list):
|
16 |
+
"""
|
17 |
+
Args:
|
18 |
+
json_data (json Array): get json data of image
|
19 |
+
enhanced_images (list of numpy array): list of enhanced images
|
20 |
+
detected_activity (list of strings): list of activities of person
|
21 |
+
distances_list (lsit of integers): list of distances of each object
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
results(json Array): contains all informations needed for frontend
|
25 |
+
{'zoomed_img':np.array([]) ,
|
26 |
+
'actual_boxes':[],
|
27 |
+
'merged_boundries':{},
|
28 |
+
}
|
29 |
+
"""
|
30 |
+
logger.info("get_json_data function is called... ")
|
31 |
+
try:
|
32 |
+
results = []
|
33 |
+
object_count = 0
|
34 |
+
activity_count = 0
|
35 |
+
for idx,box in enumerate(json_data):
|
36 |
+
final_json_output = {'zoomed_img':np.array([]) ,
|
37 |
+
'actual_boxes':[],
|
38 |
+
'merged_boundries':{},
|
39 |
+
}
|
40 |
+
|
41 |
+
final_json_output['zoomed_img'] = enhanced_images[idx]
|
42 |
+
final_json_output['merged_boundries'] = { "top_left": box['merged_boundries']['top_left'],
|
43 |
+
"bottom_right": box['merged_boundries']['bottom_right']}
|
44 |
+
|
45 |
+
for actual_box in box['actual_boundries']:
|
46 |
+
|
47 |
+
single_object_details = {"top_left": actual_box['top_left'],
|
48 |
+
"bottom_right": actual_box['bottom_right'],
|
49 |
+
"class": actual_box['class'],
|
50 |
+
"distance":distances_list[object_count],
|
51 |
+
"activity":'none'}
|
52 |
+
object_count+=1
|
53 |
+
|
54 |
+
if single_object_details['class'] == 'person':
|
55 |
+
single_object_details['activity'] = detected_activity[activity_count]
|
56 |
+
activity_count+=1
|
57 |
+
|
58 |
+
final_json_output['actual_boxes'].append(single_object_details)
|
59 |
+
final_json_output = fix_distance(final_json_output)
|
60 |
+
|
61 |
+
results.append(final_json_output)
|
62 |
+
|
63 |
+
return results
|
64 |
+
except Exception as e:
|
65 |
+
logger.error("Something went wrong in generated results function")
|
66 |
+
logger.error(e)
|
67 |
+
|
68 |
+
|
69 |
+
def fix_distance(final_json_output):
|
70 |
+
"""
|
71 |
+
Args:
|
72 |
+
final_json_output (json Array): array of json object
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
final_json_output (json Array): array of json object
|
76 |
+
"""
|
77 |
+
logger.info("fix distance function is called... ")
|
78 |
+
try:
|
79 |
+
distances = []
|
80 |
+
DIFF = 90
|
81 |
+
|
82 |
+
for idx,box in enumerate(final_json_output['actual_boxes']):
|
83 |
+
distances.append({'idx':idx,'distance':int(box['distance'][:-1])})
|
84 |
+
|
85 |
+
sorted_dist = sorted(distances, key=lambda d: d['distance'])
|
86 |
+
sum_dist = []
|
87 |
+
idx= 0
|
88 |
+
sum_dist.append({'sum':sorted_dist[0]['distance'],'idxes':[sorted_dist[0]['idx']]})
|
89 |
+
|
90 |
+
for i in range(1,len(sorted_dist)):
|
91 |
+
if abs(sorted_dist[i]['distance']-sorted_dist[i-1]['distance']) <=DIFF:
|
92 |
+
sum_dist[idx]['sum']+= sorted_dist[i]['distance']
|
93 |
+
sum_dist[idx]['idxes'].append(sorted_dist[i]['idx'])
|
94 |
+
|
95 |
+
else:
|
96 |
+
sum_dist.append({'sum':sorted_dist[i]['distance'],'idxes':[sorted_dist[i]['idx']]})
|
97 |
+
idx+=1
|
98 |
+
|
99 |
+
#change values in distance array
|
100 |
+
for data in sum_dist:
|
101 |
+
count = len(data['idxes'])
|
102 |
+
mean = data['sum']//count
|
103 |
+
for i in data['idxes']:
|
104 |
+
final_json_output['actual_boxes'][i]['distance'] = str(mean)+'m'
|
105 |
+
|
106 |
+
return final_json_output
|
107 |
+
|
108 |
+
except Exception as e:
|
109 |
+
logger.error("Something went wrong in get_json_data function...")
|
110 |
+
logger.error(e)
|
111 |
+
|
112 |
+
|
utils/zoom_in.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# file to crop object from image and enhance it.
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import logging
|
5 |
+
|
6 |
+
# Configure the logger
|
7 |
+
logging.basicConfig(
|
8 |
+
level=logging.DEBUG,
|
9 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
10 |
+
filename="logs.log",
|
11 |
+
)
|
12 |
+
|
13 |
+
# Create a logger
|
14 |
+
logger = logging.getLogger("pipline")
|
15 |
+
|
16 |
+
def croped_images(image,merged_boundary_boxes):
|
17 |
+
""" Crop object from original image
|
18 |
+
Args:
|
19 |
+
image (numpy array): get numpy array of image which has 3 channels
|
20 |
+
merged_boundary_boxes (json array): get json array
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
croped_images_list(list of numpy array): returns list which has croped images
|
24 |
+
single_object_images(list of numpy array): returns list which has single object images
|
25 |
+
"""
|
26 |
+
logger.info('croped images function is called...')
|
27 |
+
try:
|
28 |
+
croped_images_list = []
|
29 |
+
single_object_images = []
|
30 |
+
|
31 |
+
for data in merged_boundary_boxes:
|
32 |
+
crop_image = image[data['merged_boundries']['top_left'][1]:data['merged_boundries']['bottom_right'][1],data['merged_boundries']['top_left'][0]:data['merged_boundries']['bottom_right'][0]]
|
33 |
+
croped_images_list.append(crop_image)
|
34 |
+
|
35 |
+
for object in data['actual_boundries']:
|
36 |
+
if object['class']=='person':
|
37 |
+
crop_object= image[object['top_left'][1]:object['bottom_right'][1],object['top_left'][0]:object['bottom_right'][0]]
|
38 |
+
single_object_images.append(crop_object)
|
39 |
+
|
40 |
+
|
41 |
+
return croped_images_list,single_object_images
|
42 |
+
|
43 |
+
except Exception as e:
|
44 |
+
logger.error("Something went wrong in croped image function...")
|
45 |
+
logger.error(e)
|
46 |
+
|
47 |
+
|
48 |
+
def image_enhancements(croped_images_list,single_object_images):
|
49 |
+
""" Enhance cropped image using openCV techniques
|
50 |
+
Args:
|
51 |
+
croped_images_list (list numpy array): croped images list
|
52 |
+
single_object_images (list numpy array): single object images list
|
53 |
+
|
54 |
+
Returns:
|
55 |
+
enhanced croped images: returns enhanced images
|
56 |
+
enhanced single_object_images: returns enhanced images
|
57 |
+
"""
|
58 |
+
logger.info("image enhance function is called...")
|
59 |
+
try:
|
60 |
+
enhanced_images = []
|
61 |
+
enhanced_single_object_images = []
|
62 |
+
|
63 |
+
for image in croped_images_list:
|
64 |
+
|
65 |
+
# resize the image
|
66 |
+
res = cv2.resize(image,(500*image.shape[1]//image.shape[0],500), interpolation = cv2.INTER_CUBIC)
|
67 |
+
|
68 |
+
# brightness and contrast
|
69 |
+
brightness = 16
|
70 |
+
contrast = 0.95
|
71 |
+
res2 = cv2.addWeighted(res, contrast, np.zeros(res.shape, res.dtype), 0, brightness)
|
72 |
+
|
73 |
+
# Sharpen the image
|
74 |
+
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
|
75 |
+
sharpened_image = cv2.filter2D(res2, -1, kernel)
|
76 |
+
|
77 |
+
#append in the list
|
78 |
+
enhanced_images.append(sharpened_image)
|
79 |
+
|
80 |
+
|
81 |
+
for image in single_object_images:
|
82 |
+
|
83 |
+
# resize the image
|
84 |
+
res = cv2.resize(image,(500*image.shape[1]//image.shape[0],500), interpolation = cv2.INTER_CUBIC)
|
85 |
+
|
86 |
+
# brightness and contrast
|
87 |
+
brightness = 16
|
88 |
+
contrast = 0.95
|
89 |
+
res2 = cv2.addWeighted(res, contrast, np.zeros(res.shape, res.dtype), 0, brightness)
|
90 |
+
|
91 |
+
# Sharpen the image
|
92 |
+
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
|
93 |
+
sharpened_image = cv2.filter2D(res2, -1, kernel)
|
94 |
+
|
95 |
+
#append enhnaced single object image
|
96 |
+
enhanced_single_object_images.append(sharpened_image)
|
97 |
+
|
98 |
+
return enhanced_images,enhanced_single_object_images
|
99 |
+
|
100 |
+
except Exception as e:
|
101 |
+
logger.error("Something went wrong in image enhancements function...")
|
102 |
+
logger.error(e)
|
103 |
+
|