Upload entire local dataset folder
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +16 -0
- .gitmodules +6 -0
- LICENSE +21 -0
- README.md +235 -0
- coco_classes.json +82 -0
- configs/deep_sort.yaml +10 -0
- configs/fastreid.yaml +3 -0
- configs/mask_rcnn.yaml +6 -0
- configs/mmdet.yaml +5 -0
- configs/yolov3.yaml +7 -0
- configs/yolov3_tiny.yaml +7 -0
- configs/yolov5l.yaml +9 -0
- configs/yolov5m.yaml +9 -0
- configs/yolov5n.yaml +9 -0
- configs/yolov5s.yaml +9 -0
- configs/yolov5x.yaml +9 -0
- deep_sort/README.md +3 -0
- deep_sort/__init__.py +19 -0
- deep_sort/deep/GETTING_STARTED.md +82 -0
- deep_sort/deep/__init__.py +0 -0
- deep_sort/deep/checkpoint/.gitkeep +0 -0
- deep_sort/deep/datasets.py +92 -0
- deep_sort/deep/evaluate.py +15 -0
- deep_sort/deep/feature_extractor.py +93 -0
- deep_sort/deep/model.py +105 -0
- deep_sort/deep/multi_train_utils/distributed_utils.py +67 -0
- deep_sort/deep/multi_train_utils/train_eval_utils.py +90 -0
- deep_sort/deep/resnet.py +173 -0
- deep_sort/deep/test.py +77 -0
- deep_sort/deep/train.jpg +3 -0
- deep_sort/deep/train.py +151 -0
- deep_sort/deep/train_multiGPU.py +189 -0
- deep_sort/deep_sort.py +121 -0
- deep_sort/sort/__init__.py +0 -0
- deep_sort/sort/detection.py +51 -0
- deep_sort/sort/iou_matching.py +81 -0
- deep_sort/sort/kalman_filter.py +231 -0
- deep_sort/sort/linear_assignment.py +192 -0
- deep_sort/sort/nn_matching.py +176 -0
- deep_sort/sort/preprocessing.py +73 -0
- deep_sort/sort/track.py +169 -0
- deep_sort/sort/tracker.py +138 -0
- deepsort.py +189 -0
- deepsort_new.py +229 -0
- demo/1.jpg +3 -0
- demo/2.jpg +3 -0
- demo/demo.gif +3 -0
- demo/demo2.gif +3 -0
- detector/MMDet/__init__.py +2 -0
- detector/MMDet/detector.py +52 -0
.gitignore
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Folders
|
2 |
+
__pycache__/
|
3 |
+
build/
|
4 |
+
*.egg-info
|
5 |
+
.idea/
|
6 |
+
|
7 |
+
|
8 |
+
# Files
|
9 |
+
*.weights
|
10 |
+
*.pth
|
11 |
+
*.pt
|
12 |
+
*.t7
|
13 |
+
*.mp4
|
14 |
+
*.avi
|
15 |
+
*.so
|
16 |
+
*.txt
|
.gitmodules
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "thirdparty/fast-reid"]
|
2 |
+
path = thirdparty/fast-reid
|
3 |
+
url = https://github.com/JDAI-CV/fast-reid.git
|
4 |
+
[submodule "thirdparty/mmdetection"]
|
5 |
+
path = thirdparty/mmdetection
|
6 |
+
url = https://github.com/open-mmlab/mmdetection.git
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2020 Ziqiang
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Deep Sort with PyTorch
|
2 |
+
|
3 |
+

|
4 |
+
|
5 |
+
## Update(1-1-2020)
|
6 |
+
Changes
|
7 |
+
- fix bugs
|
8 |
+
- refactor code
|
9 |
+
- accerate detection by adding nms on gpu
|
10 |
+
|
11 |
+
## Update(07-22)
|
12 |
+
Changes
|
13 |
+
- bug fix (Thanks @JieChen91 and @yingsen1 for bug reporting).
|
14 |
+
- using batch for feature extracting for each frame, which lead to a small speed up.
|
15 |
+
- code improvement.
|
16 |
+
|
17 |
+
Futher improvement direction
|
18 |
+
- Train detector on specific dataset rather than the official one.
|
19 |
+
- Retrain REID model on pedestrain dataset for better performance.
|
20 |
+
- Replace YOLOv3 detector with advanced ones.
|
21 |
+
|
22 |
+
## Update(23-05-2024)
|
23 |
+
|
24 |
+
### tracking
|
25 |
+
|
26 |
+
- Added resnet network to the appearance feature extraction network in the deep folder
|
27 |
+
|
28 |
+
- Fixed the NMS bug in the `preprocessing.py` and also fixed covariance calculation bug in the `kalmen_filter.py` in the sort folder
|
29 |
+
|
30 |
+
### detecting
|
31 |
+
|
32 |
+
- Added YOLOv5 detector, aligned interface, and added YOLOv5 related yaml configuration files. Codes references this repo: [YOLOv5-v6.1](https://github.com/ultralytics/yolov5/tree/v6.1).
|
33 |
+
|
34 |
+
- The `train.py`, `val.py` and `detect.py` in the original YOLOv5 were deleted. This repo only need **yolov5x.pt**.
|
35 |
+
|
36 |
+
### deepsort
|
37 |
+
|
38 |
+
- Added tracking target category, which can display both category and tracking ID simultaneously.
|
39 |
+
|
40 |
+
## Update(28-05-2024)
|
41 |
+
|
42 |
+
### segmentation
|
43 |
+
|
44 |
+
* Added Mask RCNN instance segmentation model. Codes references this repo: [mask_rcnn](https://github.com/WZMIAOMIAO/deep-learning-for-image-processing/tree/master/pytorch_object_detection/mask_rcnn). Visual result saved in `demo/demo2.gif`.
|
45 |
+
* Similar to YOLOv5, `train.py`, `validation.py` and `predict.py` were deleted. This repo only need **maskrcnn_resnet50_fpn_coco.pth**.
|
46 |
+
|
47 |
+
### deepsort
|
48 |
+
|
49 |
+
- Added tracking target mask, which can display both category, tracking ID and target mask simultaneously.
|
50 |
+
|
51 |
+
## latest Update(09-06-2024)
|
52 |
+
|
53 |
+
### feature extraction network
|
54 |
+
|
55 |
+
* Using `nn.parallel.DistributedDataParallel` in PyTorch to support multiple GPUs training.
|
56 |
+
* Added [GETTING_STARTED.md](deep_sort/deep/GETTING_STARTED.md) for better using `train.py` and `train_multiGPU.py`.
|
57 |
+
|
58 |
+
Updated `README.md` for previously updated content(#Update(23-05-2024) and #Update(28-05-2024)).
|
59 |
+
|
60 |
+
**Any contributions to this repository is welcome!**
|
61 |
+
|
62 |
+
|
63 |
+
## Introduction
|
64 |
+
This is an implement of MOT tracking algorithm deep sort. Deep sort is basicly the same with sort but added a CNN model to extract features in image of human part bounded by a detector. This CNN model is indeed a RE-ID model and the detector used in [PAPER](https://arxiv.org/abs/1703.07402) is FasterRCNN , and the original source code is [HERE](https://github.com/nwojke/deep_sort).
|
65 |
+
However in original code, the CNN model is implemented with tensorflow, which I'm not familier with. SO I re-implemented the CNN feature extraction model with PyTorch, and changed the CNN model a little bit. Also, I use **YOLOv3** to generate bboxes instead of FasterRCNN.
|
66 |
+
|
67 |
+
## Dependencies
|
68 |
+
- python 3 **(python2 not sure)**
|
69 |
+
- numpy
|
70 |
+
- scipy
|
71 |
+
- opencv-python
|
72 |
+
- sklearn
|
73 |
+
- torch >= 1.9
|
74 |
+
- torchvision >= 0.13
|
75 |
+
- pillow
|
76 |
+
- vizer
|
77 |
+
- edict
|
78 |
+
- matplotlib
|
79 |
+
- pycocotools
|
80 |
+
- tqdm
|
81 |
+
|
82 |
+
## Quick Start
|
83 |
+
0. Check all dependencies installed
|
84 |
+
```bash
|
85 |
+
pip install -r requirements.txt
|
86 |
+
```
|
87 |
+
for user in china, you can specify pypi source to accelerate install like:
|
88 |
+
```bash
|
89 |
+
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
90 |
+
```
|
91 |
+
|
92 |
+
1. Clone this repository
|
93 |
+
```bash
|
94 |
+
git clone [email protected]:ZQPei/deep_sort_pytorch.git
|
95 |
+
```
|
96 |
+
|
97 |
+
2. Download detector parameters
|
98 |
+
```bash
|
99 |
+
# if you use YOLOv3 as detector in this repo
|
100 |
+
cd detector/YOLOv3/weight/
|
101 |
+
wget https://pjreddie.com/media/files/yolov3.weights
|
102 |
+
wget https://pjreddie.com/media/files/yolov3-tiny.weights
|
103 |
+
cd ../../../
|
104 |
+
|
105 |
+
# if you use YOLOv5 as detector in this repo
|
106 |
+
cd detector/YOLOv5
|
107 |
+
wget https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt
|
108 |
+
or
|
109 |
+
wget https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt
|
110 |
+
cd ../../
|
111 |
+
|
112 |
+
# if you use Mask RCNN as detector in this repo
|
113 |
+
cd detector/Mask_RCNN/save_weights
|
114 |
+
wget https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth
|
115 |
+
cd ../../../
|
116 |
+
```
|
117 |
+
|
118 |
+
3. Download deepsort feature extraction networks weight
|
119 |
+
```bash
|
120 |
+
# if you use original model in PAPER
|
121 |
+
cd deep_sort/deep/checkpoint
|
122 |
+
# download ckpt.t7 from
|
123 |
+
https://drive.google.com/drive/folders/1xhG0kRH1EX5B9_Iz8gQJb7UNnn_riXi6 to this folder
|
124 |
+
cd ../../../
|
125 |
+
|
126 |
+
# if you use resnet18 in this repo
|
127 |
+
cd deep_sort/deep/checkpoint
|
128 |
+
wget https://download.pytorch.org/models/resnet18-5c106cde.pth
|
129 |
+
cd ../../../
|
130 |
+
```
|
131 |
+
|
132 |
+
4. **(Optional)** Compile nms module if you use YOLOv3 as detetor in this repo
|
133 |
+
```bash
|
134 |
+
cd detector/YOLOv3/nms
|
135 |
+
sh build.sh
|
136 |
+
cd ../../..
|
137 |
+
```
|
138 |
+
|
139 |
+
Notice:
|
140 |
+
If compiling failed, the simplist way is to **Upgrade your pytorch >= 1.1 and torchvision >= 0.3" and you can avoid the troublesome compiling problems which are most likely caused by either `gcc version too low` or `libraries missing`.
|
141 |
+
|
142 |
+
5. **(Optional)** Prepare third party submodules
|
143 |
+
|
144 |
+
[fast-reid](https://github.com/JDAI-CV/fast-reid)
|
145 |
+
|
146 |
+
This library supports bagtricks, AGW and other mainstream ReID methods through providing an fast-reid adapter.
|
147 |
+
|
148 |
+
to prepare our bundled fast-reid, then follow instructions in its README to install it.
|
149 |
+
|
150 |
+
Please refer to `configs/fastreid.yaml` for a sample of using fast-reid. See [Model Zoo](https://github.com/JDAI-CV/fast-reid/blob/master/docs/MODEL_ZOO.md) for available methods and trained models.
|
151 |
+
|
152 |
+
[MMDetection](https://github.com/open-mmlab/mmdetection)
|
153 |
+
|
154 |
+
This library supports Faster R-CNN and other mainstream detection methods through providing an MMDetection adapter.
|
155 |
+
|
156 |
+
to prepare our bundled MMDetection, then follow instructions in its README to install it.
|
157 |
+
|
158 |
+
Please refer to `configs/mmdet.yaml` for a sample of using MMDetection. See [Model Zoo](https://github.com/open-mmlab/mmdetection/blob/master/docs/model_zoo.md) for available methods and trained models.
|
159 |
+
|
160 |
+
Run
|
161 |
+
|
162 |
+
```
|
163 |
+
git submodule update --init --recursive
|
164 |
+
```
|
165 |
+
|
166 |
+
|
167 |
+
6. Run demo
|
168 |
+
```bash
|
169 |
+
usage: deepsort.py [-h]
|
170 |
+
[--fastreid]
|
171 |
+
[--config_fastreid CONFIG_FASTREID]
|
172 |
+
[--mmdet]
|
173 |
+
[--config_mmdetection CONFIG_MMDETECTION]
|
174 |
+
[--config_detection CONFIG_DETECTION]
|
175 |
+
[--config_deepsort CONFIG_DEEPSORT] [--display]
|
176 |
+
[--frame_interval FRAME_INTERVAL]
|
177 |
+
[--display_width DISPLAY_WIDTH]
|
178 |
+
[--display_height DISPLAY_HEIGHT] [--save_path SAVE_PATH]
|
179 |
+
[--cpu] [--camera CAM]
|
180 |
+
VIDEO_PATH
|
181 |
+
|
182 |
+
# yolov3 + deepsort
|
183 |
+
python deepsort.py [VIDEO_PATH] --config_detection ./configs/yolov3.yaml
|
184 |
+
|
185 |
+
# yolov3_tiny + deepsort
|
186 |
+
python deepsort.py [VIDEO_PATH] --config_detection ./configs/yolov3_tiny.yaml
|
187 |
+
|
188 |
+
# yolov3 + deepsort on webcam
|
189 |
+
python3 deepsort.py /dev/video0 --camera 0
|
190 |
+
|
191 |
+
# yolov3_tiny + deepsort on webcam
|
192 |
+
python3 deepsort.py /dev/video0 --config_detection ./configs/yolov3_tiny.yaml --camera 0
|
193 |
+
|
194 |
+
# yolov5s + deepsort
|
195 |
+
python deepsort.py [VIDEO_PATH] --config_detection ./configs/yolov5s.yaml
|
196 |
+
|
197 |
+
# yolov5m + deepsort
|
198 |
+
python deepsort.py [VIDEO_PATH] --config_detection ./configs/yolov5m.yaml
|
199 |
+
|
200 |
+
# mask_rcnn + deepsort
|
201 |
+
python deepsort.py [VIDEO_PATH] --config_detection ./configs/mask_rcnn.yaml --segment
|
202 |
+
|
203 |
+
# fast-reid + deepsort
|
204 |
+
python deepsort.py [VIDEO_PATH] --fastreid [--config_fastreid ./configs/fastreid.yaml]
|
205 |
+
|
206 |
+
# MMDetection + deepsort
|
207 |
+
python deepsort.py [VIDEO_PATH] --mmdet [--config_mmdetection ./configs/mmdet.yaml]
|
208 |
+
```
|
209 |
+
Use `--display` to enable display image per frame.
|
210 |
+
Results will be saved to `./output/results.avi` and `./output/results.txt`.
|
211 |
+
|
212 |
+
All files above can also be accessed from BaiduDisk!
|
213 |
+
linker:[BaiduDisk](https://pan.baidu.com/s/1YJ1iPpdFTlUyLFoonYvozg)
|
214 |
+
passwd:fbuw
|
215 |
+
|
216 |
+
## Training the RE-ID model
|
217 |
+
Check [GETTING_STARTED.md](deep_sort/deep/GETTING_STARTED.md) to start training progress using standard benchmark or **customized dataset**.
|
218 |
+
|
219 |
+
## Demo videos and images
|
220 |
+
[demo.avi](https://drive.google.com/drive/folders/1xhG0kRH1EX5B9_Iz8gQJb7UNnn_riXi6)
|
221 |
+
[demo2.avi](https://drive.google.com/drive/folders/1xhG0kRH1EX5B9_Iz8gQJb7UNnn_riXi6)
|
222 |
+
|
223 |
+

|
224 |
+

|
225 |
+
|
226 |
+
|
227 |
+
## References
|
228 |
+
- paper: [Simple Online and Realtime Tracking with a Deep Association Metric](https://arxiv.org/abs/1703.07402)
|
229 |
+
- code: [nwojke/deep_sort](https://github.com/nwojke/deep_sort)
|
230 |
+
- paper: [YOLOv3: An Incremental Improvement](https://pjreddie.com/media/files/papers/YOLOv3.pdf)
|
231 |
+
- code: [Joseph Redmon/yolov3](https://pjreddie.com/darknet/yolo/)
|
232 |
+
- paper: [Mask R-CNN](https://arxiv.org/pdf/1703.06870)
|
233 |
+
- code: [WZMIAOMIAO/Mask R-CNN](https://github.com/WZMIAOMIAO/deep-learning-for-image-processing/tree/master/pytorch_object_detection/mask_rcnn)
|
234 |
+
- paper: [YOLOv5](https://github.com/ultralytics/yolov5)
|
235 |
+
- code: [ultralytics/yolov5](https://github.com/ultralytics/yolov5/tree/v6.1)
|
coco_classes.json
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"0": "person",
|
3 |
+
"1": "bicycle",
|
4 |
+
"10": "fire hydrant",
|
5 |
+
"11": "stop sign",
|
6 |
+
"12": "parking meter",
|
7 |
+
"13": "bench",
|
8 |
+
"14": "bird",
|
9 |
+
"15": "cat",
|
10 |
+
"16": "dog",
|
11 |
+
"17": "horse",
|
12 |
+
"18": "sheep",
|
13 |
+
"19": "cow",
|
14 |
+
"2": "car",
|
15 |
+
"20": "elephant",
|
16 |
+
"21": "bear",
|
17 |
+
"22": "zebra",
|
18 |
+
"23": "giraffe",
|
19 |
+
"24": "backpack",
|
20 |
+
"25": "umbrella",
|
21 |
+
"26": "handbag",
|
22 |
+
"27": "tie",
|
23 |
+
"28": "suitcase",
|
24 |
+
"29": "frisbee",
|
25 |
+
"3": "motorcycle",
|
26 |
+
"30": "skis",
|
27 |
+
"31": "snowboard",
|
28 |
+
"32": "sports ball",
|
29 |
+
"33": "kite",
|
30 |
+
"34": "baseball bat",
|
31 |
+
"35": "baseball glove",
|
32 |
+
"36": "skateboard",
|
33 |
+
"37": "surfboard",
|
34 |
+
"38": "tennis racket",
|
35 |
+
"39": "bottle",
|
36 |
+
"4": "airplane",
|
37 |
+
"40": "wine glass",
|
38 |
+
"41": "cup",
|
39 |
+
"42": "fork",
|
40 |
+
"43": "knife",
|
41 |
+
"44": "spoon",
|
42 |
+
"45": "bowl",
|
43 |
+
"46": "banana",
|
44 |
+
"47": "apple",
|
45 |
+
"48": "sandwich",
|
46 |
+
"49": "orange",
|
47 |
+
"5": "bus",
|
48 |
+
"50": "broccoli",
|
49 |
+
"51": "carrot",
|
50 |
+
"52": "hot dog",
|
51 |
+
"53": "pizza",
|
52 |
+
"54": "donut",
|
53 |
+
"55": "cake",
|
54 |
+
"56": "chair",
|
55 |
+
"57": "couch",
|
56 |
+
"58": "potted plant",
|
57 |
+
"59": "bed",
|
58 |
+
"6": "train",
|
59 |
+
"60": "dining table",
|
60 |
+
"61": "toilet",
|
61 |
+
"62": "tv",
|
62 |
+
"63": "laptop",
|
63 |
+
"64": "mouse",
|
64 |
+
"65": "remote",
|
65 |
+
"66": "keyboard",
|
66 |
+
"67": "cell phone",
|
67 |
+
"68": "microwave",
|
68 |
+
"69": "oven",
|
69 |
+
"7": "truck",
|
70 |
+
"70": "toaster",
|
71 |
+
"71": "sink",
|
72 |
+
"72": "refrigerator",
|
73 |
+
"73": "book",
|
74 |
+
"74": "clock",
|
75 |
+
"75": "vase",
|
76 |
+
"76": "scissors",
|
77 |
+
"77": "teddy bear",
|
78 |
+
"78": "hair drier",
|
79 |
+
"79": "toothbrush",
|
80 |
+
"8": "boat",
|
81 |
+
"9": "traffic light"
|
82 |
+
}
|
configs/deep_sort.yaml
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DEEPSORT:
|
2 |
+
REID_CKPT: "./deep_sort/deep/checkpoint/ckpt.t7"
|
3 |
+
MAX_DIST: 0.2
|
4 |
+
MIN_CONFIDENCE: 0.5
|
5 |
+
NMS_MAX_OVERLAP: 0.5
|
6 |
+
MAX_IOU_DISTANCE: 0.7
|
7 |
+
MAX_AGE: 70
|
8 |
+
N_INIT: 3
|
9 |
+
NN_BUDGET: 100
|
10 |
+
|
configs/fastreid.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
FASTREID:
|
2 |
+
CFG: "thirdparty/fast-reid/configs/Market1501/bagtricks_R50.yml"
|
3 |
+
CHECKPOINT: "deep_sort/deep/checkpoint/market_bot_R50.pth"
|
configs/mask_rcnn.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MASKRCNN:
|
2 |
+
LABEL: "./coco_classes.json"
|
3 |
+
WEIGHT: "./detector/Mask_RCNN/save_weights/maskrcnn_resnet50_fpn_coco.pth"
|
4 |
+
|
5 |
+
NUM_CLASSES: 90
|
6 |
+
BOX_THRESH: 0.5
|
configs/mmdet.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MMDET:
|
2 |
+
CFG: "thirdparty/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py"
|
3 |
+
CHECKPOINT: "detector/MMDet/weight/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth"
|
4 |
+
|
5 |
+
SCORE_THRESH: 0.5
|
configs/yolov3.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV3:
|
2 |
+
CFG: "./detector/YOLOv3/cfg/yolo_v3.cfg"
|
3 |
+
WEIGHT: "./detector/YOLOv3/weight/yolov3.weights"
|
4 |
+
CLASS_NAMES: "./detector/YOLOv3/cfg/coco.names"
|
5 |
+
|
6 |
+
SCORE_THRESH: 0.5
|
7 |
+
NMS_THRESH: 0.4
|
configs/yolov3_tiny.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV3:
|
2 |
+
CFG: "./detector/YOLOv3/cfg/yolov3-tiny.cfg"
|
3 |
+
WEIGHT: "./detector/YOLOv3/weight/yolov3-tiny.weights"
|
4 |
+
CLASS_NAMES: "./detector/YOLOv3/cfg/coco.names"
|
5 |
+
|
6 |
+
SCORE_THRESH: 0.5
|
7 |
+
NMS_THRESH: 0.4
|
configs/yolov5l.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV5:
|
2 |
+
CFG: "./detector/YOLOv5/models/yolov5l.yaml"
|
3 |
+
WEIGHT: "./detector/YOLOv5/yolov5l.pt"
|
4 |
+
DATA: './detector/YOLOv5/data/coco128.yaml'
|
5 |
+
|
6 |
+
IMGSZ: [640, 640]
|
7 |
+
SCORE_THRESH: 0.25
|
8 |
+
NMS_THRESH: 0.45
|
9 |
+
MAX_DET: 100
|
configs/yolov5m.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV5:
|
2 |
+
CFG: "./detector/YOLOv5/models/yolov5m.yaml"
|
3 |
+
WEIGHT: "./detector/YOLOv5/yolov5m.pt"
|
4 |
+
DATA: './detector/YOLOv5/data/coco128.yaml'
|
5 |
+
|
6 |
+
IMGSZ: [640, 640]
|
7 |
+
SCORE_THRESH: 0.25
|
8 |
+
NMS_THRESH: 0.45
|
9 |
+
MAX_DET: 100
|
configs/yolov5n.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV5:
|
2 |
+
CFG: "./detector/YOLOv5/models/yolov5n.yaml"
|
3 |
+
WEIGHT: "./detector/YOLOv5/yolov5n.pt"
|
4 |
+
DATA: './detector/YOLOv5/data/coco128.yaml'
|
5 |
+
|
6 |
+
IMGSZ: [640, 640]
|
7 |
+
SCORE_THRESH: 0.25
|
8 |
+
NMS_THRESH: 0.45
|
9 |
+
MAX_DET: 100
|
configs/yolov5s.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV5:
|
2 |
+
CFG: "./detector/YOLOv5/models/yolov5s.yaml"
|
3 |
+
WEIGHT: "./detector/YOLOv5/yolov5s.pt"
|
4 |
+
DATA: './detector/YOLOv5/data/coco128.yaml'
|
5 |
+
|
6 |
+
IMGSZ: [640, 640]
|
7 |
+
SCORE_THRESH: 0.25
|
8 |
+
NMS_THRESH: 0.45
|
9 |
+
MAX_DET: 100
|
configs/yolov5x.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
YOLOV5:
|
2 |
+
CFG: "./detector/YOLOv5/models/yolov5x.yaml"
|
3 |
+
WEIGHT: "./detector/YOLOv5/yolov5x.pt"
|
4 |
+
DATA: './detector/YOLOv5/data/coco128.yaml'
|
5 |
+
|
6 |
+
IMGSZ: [640, 640]
|
7 |
+
SCORE_THRESH: 0.25
|
8 |
+
NMS_THRESH: 0.45
|
9 |
+
MAX_DET: 100
|
deep_sort/README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Deep Sort
|
2 |
+
|
3 |
+
This is the implemention of deep sort with pytorch.
|
deep_sort/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .deep_sort import DeepSort
|
2 |
+
|
3 |
+
__all__ = ['DeepSort', 'build_tracker']
|
4 |
+
|
5 |
+
|
6 |
+
def build_tracker(cfg, use_cuda):
|
7 |
+
if cfg.USE_FASTREID:
|
8 |
+
return DeepSort(model_path=cfg.FASTREID.CHECKPOINT, model_config=cfg.FASTREID.CFG,
|
9 |
+
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
|
10 |
+
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
|
11 |
+
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
|
12 |
+
use_cuda=use_cuda)
|
13 |
+
|
14 |
+
else:
|
15 |
+
return DeepSort(model_path=cfg.DEEPSORT.REID_CKPT,
|
16 |
+
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
|
17 |
+
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
|
18 |
+
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
|
19 |
+
use_cuda=use_cuda)
|
deep_sort/deep/GETTING_STARTED.md
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
In deepsort algorithm, appearance feature extraction network used to extract features from **image_crops** for matching purpose.The original model used in paper is in `model.py`, and its parameter here [ckpt.t7](https://drive.google.com/drive/folders/1xhG0kRH1EX5B9_Iz8gQJb7UNnn_riXi6). This repository also provides a `resnet.py` script and its pre-training weights on Imagenet here.
|
2 |
+
|
3 |
+
```
|
4 |
+
# resnet18
|
5 |
+
https://download.pytorch.org/models/resnet18-5c106cde.pth
|
6 |
+
# resnet34
|
7 |
+
https://download.pytorch.org/models/resnet34-333f7ec4.pth
|
8 |
+
# resnet50
|
9 |
+
https://download.pytorch.org/models/resnet50-19c8e357.pth
|
10 |
+
# resnext50_32x4d
|
11 |
+
https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
|
12 |
+
```
|
13 |
+
|
14 |
+
## Dataset PrePare
|
15 |
+
|
16 |
+
To train the model, first you need download [Market1501](http://www.liangzheng.com.cn/Project/project_reid.html) dataset or [Mars](http://www.liangzheng.com.cn/Project/project_mars.html) dataset.
|
17 |
+
|
18 |
+
If you want to train on your **own dataset**, assuming you have already downloaded the dataset.The dataset should be arranged in the following way.
|
19 |
+
|
20 |
+
```
|
21 |
+
├── dataset_root: The root dir of the dataset.
|
22 |
+
├── class1: Category 1 is located in the folder dir.
|
23 |
+
├── xxx1.jpg: Image belonging to category 1.
|
24 |
+
├── xxx2.jpg: Image belonging to category 1.
|
25 |
+
├── class2: Category 2 is located in the folder dir.
|
26 |
+
├── xxx3.jpg: Image belonging to category 2.
|
27 |
+
├── xxx4.jpg: Image belonging to category 2.
|
28 |
+
├── class3: Category 3 is located in the folder dir.
|
29 |
+
...
|
30 |
+
...
|
31 |
+
```
|
32 |
+
|
33 |
+
## Training the RE-ID model
|
34 |
+
|
35 |
+
Assuming you have already prepare the dataset. Then you can use the following command to start your training progress.
|
36 |
+
|
37 |
+
#### training on a single GPU
|
38 |
+
|
39 |
+
```python
|
40 |
+
usage: train.py [--data-dir]
|
41 |
+
[--epochs]
|
42 |
+
[--batch_size]
|
43 |
+
[--lr]
|
44 |
+
[--lrf]
|
45 |
+
[--weights]
|
46 |
+
[--freeze-layers]
|
47 |
+
[--gpu_id]
|
48 |
+
|
49 |
+
# default use cuda:0, use Net in `model.py`
|
50 |
+
python train.py --data-dir [dataset/root/path] --weights [(optional)pre-train/weight/path]
|
51 |
+
# you can use `--freeze-layers` option to freeze full convolutional layer parameters except fc layers parameters
|
52 |
+
python train.py --data-dir [dataset/root/path] --weights [(optional)pre-train/weight/path] --freeze-layers
|
53 |
+
```
|
54 |
+
|
55 |
+
#### training on multiple GPU
|
56 |
+
|
57 |
+
```python
|
58 |
+
usage: train_multiGPU.py [--data-dir]
|
59 |
+
[--epochs]
|
60 |
+
[--batch_size]
|
61 |
+
[--lr]
|
62 |
+
[--lrf]
|
63 |
+
[--syncBN]
|
64 |
+
[--weights]
|
65 |
+
[--freeze-layers]
|
66 |
+
# not change the following parameters, the system will automatically assignment
|
67 |
+
[--device]
|
68 |
+
[--world_size]
|
69 |
+
[--dist_url]
|
70 |
+
|
71 |
+
# default use cuda:0, cuda:1, cuda:2, cuda:3, use resnet18 in `resnet.py`
|
72 |
+
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --nproc_per_node=4 train_multiGPU.py --data-dir [dataset/root/path] --weights [(optional)pre-train/weight/path]
|
73 |
+
# you can use `--freeze-layers` option to freeze full convolutional layer parameters except fc layers parameters
|
74 |
+
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --nproc_per_node=4 train_multiGPU.py --data-dir [dataset/root/path] --weights [(optional)pre-train/weight/path] --freeze-layers
|
75 |
+
```
|
76 |
+
|
77 |
+
An example of training progress is as follows:
|
78 |
+
|
79 |
+

|
80 |
+
|
81 |
+
The last, you can evaluate it using [test.py](deep_sort/deep/test.py) and [evaluate.py](deep_sort/deep/evalute.py).
|
82 |
+
|
deep_sort/deep/__init__.py
ADDED
File without changes
|
deep_sort/deep/checkpoint/.gitkeep
ADDED
File without changes
|
deep_sort/deep/datasets.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
from PIL import Image
|
7 |
+
import torch
|
8 |
+
from torch.utils.data import Dataset
|
9 |
+
import matplotlib.pyplot as plt
|
10 |
+
|
11 |
+
|
12 |
+
class ClsDataset(Dataset):
|
13 |
+
def __init__(self, images_path, images_labels, transform=None):
|
14 |
+
self.images_path = images_path
|
15 |
+
self.images_labels = images_labels
|
16 |
+
self.transform = transform
|
17 |
+
|
18 |
+
def __len__(self):
|
19 |
+
return len(self.images_path)
|
20 |
+
|
21 |
+
def __getitem__(self, idx):
|
22 |
+
img = cv2.imread(self.images_path[idx])
|
23 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
24 |
+
img = Image.fromarray(img)
|
25 |
+
label = self.images_labels[idx]
|
26 |
+
|
27 |
+
if self.transform is not None:
|
28 |
+
img = self.transform(img)
|
29 |
+
return img, label
|
30 |
+
|
31 |
+
@staticmethod
|
32 |
+
def collate_fn(batch):
|
33 |
+
images, labels = tuple(zip(*batch))
|
34 |
+
images = torch.stack(images, dim=0)
|
35 |
+
labels = torch.as_tensor(labels)
|
36 |
+
return images, labels
|
37 |
+
|
38 |
+
|
39 |
+
def read_split_data(root, valid_rate=0.2):
|
40 |
+
assert os.path.exists(root), 'dataset root: {} does not exist.'.format(root)
|
41 |
+
|
42 |
+
class_names = [cls for cls in os.listdir(root) if os.path.isdir(os.path.join(root, cls))]
|
43 |
+
class_names.sort()
|
44 |
+
|
45 |
+
class_indices = {name: i for i, name in enumerate(class_names)}
|
46 |
+
json_str = json.dumps({v: k for k, v in class_indices.items()}, indent=4)
|
47 |
+
with open('class_indices.json', 'w') as f:
|
48 |
+
f.write(json_str)
|
49 |
+
|
50 |
+
train_images_path = []
|
51 |
+
train_labels = []
|
52 |
+
val_images_path = []
|
53 |
+
val_labels = []
|
54 |
+
per_class_num = []
|
55 |
+
|
56 |
+
supported = ['.jpg', '.JPG', '.png', '.PNG']
|
57 |
+
for cls in class_names:
|
58 |
+
cls_path = os.path.join(root, cls)
|
59 |
+
images_path = [os.path.join(cls_path, i) for i in os.listdir(cls_path)
|
60 |
+
if os.path.splitext(i)[-1] in supported]
|
61 |
+
images_label = class_indices[cls]
|
62 |
+
per_class_num.append(len(images_path))
|
63 |
+
|
64 |
+
val_path = random.sample(images_path, int(len(images_path) * valid_rate))
|
65 |
+
for img_path in images_path:
|
66 |
+
if img_path in val_path:
|
67 |
+
val_images_path.append(img_path)
|
68 |
+
val_labels.append(images_label)
|
69 |
+
else:
|
70 |
+
train_images_path.append(img_path)
|
71 |
+
train_labels.append(images_label)
|
72 |
+
|
73 |
+
print("{} images were found in the dataset.".format(sum(per_class_num)))
|
74 |
+
print("{} images for training.".format(len(train_images_path)))
|
75 |
+
print("{} images for validation.".format(len(val_images_path)))
|
76 |
+
|
77 |
+
assert len(train_images_path) > 0, "number of training images must greater than zero"
|
78 |
+
assert len(val_images_path) > 0, "number of validation images must greater than zero"
|
79 |
+
|
80 |
+
plot_distribution = False
|
81 |
+
if plot_distribution:
|
82 |
+
plt.bar(range(len(class_names)), per_class_num, align='center')
|
83 |
+
plt.xticks(range(len(class_names)), class_names)
|
84 |
+
|
85 |
+
for i, v in enumerate(per_class_num):
|
86 |
+
plt.text(x=i, y=v + 5, s=str(v), ha='center')
|
87 |
+
|
88 |
+
plt.xlabel('classes')
|
89 |
+
plt.ylabel('numbers')
|
90 |
+
plt.title('the distribution of dataset')
|
91 |
+
plt.show()
|
92 |
+
return [train_images_path, train_labels], [val_images_path, val_labels], len(class_names)
|
deep_sort/deep/evaluate.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
features = torch.load("features.pth")
|
4 |
+
qf = features["qf"]
|
5 |
+
ql = features["ql"]
|
6 |
+
gf = features["gf"]
|
7 |
+
gl = features["gl"]
|
8 |
+
|
9 |
+
scores = qf.mm(gf.t())
|
10 |
+
res = scores.topk(5, dim=1)[1][:,0]
|
11 |
+
top1correct = gl[res].eq(ql).sum().item()
|
12 |
+
|
13 |
+
print("Acc top1:{:.3f}".format(top1correct/ql.size(0)))
|
14 |
+
|
15 |
+
|
deep_sort/deep/feature_extractor.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision.transforms as transforms
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
import logging
|
6 |
+
|
7 |
+
from .model import Net
|
8 |
+
from .resnet import resnet18
|
9 |
+
# from fastreid.config import get_cfg
|
10 |
+
# from fastreid.engine import DefaultTrainer
|
11 |
+
# from fastreid.utils.checkpoint import Checkpointer
|
12 |
+
|
13 |
+
|
14 |
+
class Extractor(object):
|
15 |
+
def __init__(self, model_path, use_cuda=True):
|
16 |
+
self.net = Net(reid=True)
|
17 |
+
# self.net = resnet18(reid=True)
|
18 |
+
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
|
19 |
+
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
|
20 |
+
self.net.load_state_dict(state_dict if 'net_dict' not in state_dict else state_dict['net_dict'], strict=False)
|
21 |
+
logger = logging.getLogger("root.tracker")
|
22 |
+
logger.info("Loading weights from {}... Done!".format(model_path))
|
23 |
+
self.net.to(self.device)
|
24 |
+
self.size = (64, 128)
|
25 |
+
self.norm = transforms.Compose([
|
26 |
+
transforms.ToTensor(),
|
27 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
28 |
+
])
|
29 |
+
|
30 |
+
def _preprocess(self, im_crops):
|
31 |
+
"""
|
32 |
+
TODO:
|
33 |
+
1. to float with scale from 0 to 1
|
34 |
+
2. resize to (64, 128) as Market1501 dataset did
|
35 |
+
3. concatenate to a numpy array
|
36 |
+
3. to torch Tensor
|
37 |
+
4. normalize
|
38 |
+
"""
|
39 |
+
|
40 |
+
def _resize(im, size):
|
41 |
+
return cv2.resize(im.astype(np.float32) / 255., size)
|
42 |
+
|
43 |
+
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
|
44 |
+
return im_batch
|
45 |
+
|
46 |
+
def __call__(self, im_crops):
|
47 |
+
im_batch = self._preprocess(im_crops)
|
48 |
+
with torch.no_grad():
|
49 |
+
im_batch = im_batch.to(self.device)
|
50 |
+
features = self.net(im_batch)
|
51 |
+
return features.cpu().numpy()
|
52 |
+
|
53 |
+
|
54 |
+
class FastReIDExtractor(object):
|
55 |
+
def __init__(self, model_config, model_path, use_cuda=True):
|
56 |
+
cfg = get_cfg()
|
57 |
+
cfg.merge_from_file(model_config)
|
58 |
+
cfg.MODEL.BACKBONE.PRETRAIN = False
|
59 |
+
self.net = DefaultTrainer.build_model(cfg)
|
60 |
+
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
|
61 |
+
|
62 |
+
Checkpointer(self.net).load(model_path)
|
63 |
+
logger = logging.getLogger("root.tracker")
|
64 |
+
logger.info("Loading weights from {}... Done!".format(model_path))
|
65 |
+
self.net.to(self.device)
|
66 |
+
self.net.eval()
|
67 |
+
height, width = cfg.INPUT.SIZE_TEST
|
68 |
+
self.size = (width, height)
|
69 |
+
self.norm = transforms.Compose([
|
70 |
+
transforms.ToTensor(),
|
71 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
72 |
+
])
|
73 |
+
|
74 |
+
def _preprocess(self, im_crops):
|
75 |
+
def _resize(im, size):
|
76 |
+
return cv2.resize(im.astype(np.float32) / 255., size)
|
77 |
+
|
78 |
+
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
|
79 |
+
return im_batch
|
80 |
+
|
81 |
+
def __call__(self, im_crops):
|
82 |
+
im_batch = self._preprocess(im_crops)
|
83 |
+
with torch.no_grad():
|
84 |
+
im_batch = im_batch.to(self.device)
|
85 |
+
features = self.net(im_batch)
|
86 |
+
return features.cpu().numpy()
|
87 |
+
|
88 |
+
|
89 |
+
if __name__ == '__main__':
|
90 |
+
img = cv2.imread("demo.jpg")[:, :, (2, 1, 0)]
|
91 |
+
extr = Extractor("checkpoint/ckpt.t7")
|
92 |
+
feature = extr(img)
|
93 |
+
print(feature.shape)
|
deep_sort/deep/model.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
|
6 |
+
class BasicBlock(nn.Module):
|
7 |
+
def __init__(self, c_in, c_out, is_downsample=False):
|
8 |
+
super(BasicBlock, self).__init__()
|
9 |
+
self.is_downsample = is_downsample
|
10 |
+
if is_downsample:
|
11 |
+
self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=2, padding=1, bias=False)
|
12 |
+
else:
|
13 |
+
self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1, bias=False)
|
14 |
+
self.bn1 = nn.BatchNorm2d(c_out)
|
15 |
+
self.relu = nn.ReLU(True)
|
16 |
+
self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1, padding=1, bias=False)
|
17 |
+
self.bn2 = nn.BatchNorm2d(c_out)
|
18 |
+
if is_downsample:
|
19 |
+
self.downsample = nn.Sequential(
|
20 |
+
nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
|
21 |
+
nn.BatchNorm2d(c_out)
|
22 |
+
)
|
23 |
+
elif c_in != c_out:
|
24 |
+
self.downsample = nn.Sequential(
|
25 |
+
nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
|
26 |
+
nn.BatchNorm2d(c_out)
|
27 |
+
)
|
28 |
+
self.is_downsample = True
|
29 |
+
|
30 |
+
def forward(self, x):
|
31 |
+
y = self.conv1(x)
|
32 |
+
y = self.bn1(y)
|
33 |
+
y = self.relu(y)
|
34 |
+
y = self.conv2(y)
|
35 |
+
y = self.bn2(y)
|
36 |
+
if self.is_downsample:
|
37 |
+
x = self.downsample(x)
|
38 |
+
return F.relu(x.add(y), True)
|
39 |
+
|
40 |
+
|
41 |
+
def make_layers(c_in, c_out, repeat_times, is_downsample=False):
|
42 |
+
blocks = []
|
43 |
+
for i in range(repeat_times):
|
44 |
+
if i == 0:
|
45 |
+
blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample), ]
|
46 |
+
else:
|
47 |
+
blocks += [BasicBlock(c_out, c_out), ]
|
48 |
+
return nn.Sequential(*blocks)
|
49 |
+
|
50 |
+
|
51 |
+
class Net(nn.Module):
|
52 |
+
def __init__(self, num_classes=751, reid=False):
|
53 |
+
super(Net, self).__init__()
|
54 |
+
# 3 128 64
|
55 |
+
self.conv = nn.Sequential(
|
56 |
+
nn.Conv2d(3, 64, 3, stride=1, padding=1),
|
57 |
+
nn.BatchNorm2d(64),
|
58 |
+
nn.ReLU(inplace=True),
|
59 |
+
# nn.Conv2d(32,32,3,stride=1,padding=1),
|
60 |
+
# nn.BatchNorm2d(32),
|
61 |
+
# nn.ReLU(inplace=True),
|
62 |
+
nn.MaxPool2d(3, 2, padding=1),
|
63 |
+
)
|
64 |
+
# 32 64 32
|
65 |
+
self.layer1 = make_layers(64, 64, 2, False)
|
66 |
+
# 32 64 32
|
67 |
+
self.layer2 = make_layers(64, 128, 2, True)
|
68 |
+
# 64 32 16
|
69 |
+
self.layer3 = make_layers(128, 256, 2, True)
|
70 |
+
# 128 16 8
|
71 |
+
self.layer4 = make_layers(256, 512, 2, True)
|
72 |
+
# 256 8 4
|
73 |
+
self.avgpool = nn.AdaptiveAvgPool2d(1)
|
74 |
+
# 256 1 1
|
75 |
+
self.reid = reid
|
76 |
+
self.classifier = nn.Sequential(
|
77 |
+
nn.Linear(512, 256),
|
78 |
+
nn.BatchNorm1d(256),
|
79 |
+
nn.ReLU(inplace=True),
|
80 |
+
nn.Dropout(),
|
81 |
+
nn.Linear(256, num_classes),
|
82 |
+
)
|
83 |
+
|
84 |
+
def forward(self, x):
|
85 |
+
x = self.conv(x)
|
86 |
+
x = self.layer1(x)
|
87 |
+
x = self.layer2(x)
|
88 |
+
x = self.layer3(x)
|
89 |
+
x = self.layer4(x)
|
90 |
+
x = self.avgpool(x)
|
91 |
+
x = x.view(x.size(0), -1)
|
92 |
+
# B x 128
|
93 |
+
if self.reid:
|
94 |
+
x = x.div(x.norm(p=2, dim=1, keepdim=True))
|
95 |
+
return x
|
96 |
+
# classifier
|
97 |
+
x = self.classifier(x)
|
98 |
+
return x
|
99 |
+
|
100 |
+
|
101 |
+
if __name__ == '__main__':
|
102 |
+
net = Net()
|
103 |
+
x = torch.randn(4, 3, 128, 64)
|
104 |
+
y = net(x)
|
105 |
+
|
deep_sort/deep/multi_train_utils/distributed_utils.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.distributed as dist
|
5 |
+
|
6 |
+
|
7 |
+
def init_distributed_mode(args):
|
8 |
+
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
9 |
+
args.rank = int(os.environ['RANK'])
|
10 |
+
args.world_size = int(os.environ['WORLD_SIZE'])
|
11 |
+
args.gpu = int(os.environ['LOCAL_RANK'])
|
12 |
+
elif 'SLURM_PROCID' in os.environ:
|
13 |
+
args.rank = int(os.environ['SLURM_PROCID'])
|
14 |
+
args.gpu = args.rank % torch.cuda.device_count()
|
15 |
+
else:
|
16 |
+
print("Not using distributed mode")
|
17 |
+
args.distributed = False
|
18 |
+
return
|
19 |
+
|
20 |
+
args.distributed = True
|
21 |
+
|
22 |
+
torch.cuda.set_device(args.gpu)
|
23 |
+
args.dist_backend = 'nccl'
|
24 |
+
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
|
25 |
+
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
26 |
+
world_size=args.world_size, rank=args.rank)
|
27 |
+
dist.barrier()
|
28 |
+
|
29 |
+
|
30 |
+
def cleanup():
|
31 |
+
dist.destroy_process_group()
|
32 |
+
|
33 |
+
|
34 |
+
def is_dist_avail_and_initialized():
|
35 |
+
if not dist.is_available():
|
36 |
+
return False
|
37 |
+
if not dist.is_initialized():
|
38 |
+
return False
|
39 |
+
return True
|
40 |
+
|
41 |
+
|
42 |
+
def get_world_size():
|
43 |
+
if not is_dist_avail_and_initialized():
|
44 |
+
return 1
|
45 |
+
return dist.get_world_size()
|
46 |
+
|
47 |
+
|
48 |
+
def get_rank():
|
49 |
+
if not is_dist_avail_and_initialized():
|
50 |
+
return 0
|
51 |
+
return dist.get_rank()
|
52 |
+
|
53 |
+
|
54 |
+
def is_main_process():
|
55 |
+
return get_rank() == 0
|
56 |
+
|
57 |
+
|
58 |
+
def reduce_value(value, average=True):
|
59 |
+
world_size = get_world_size()
|
60 |
+
if world_size < 2:
|
61 |
+
return value
|
62 |
+
with torch.no_grad():
|
63 |
+
dist.all_reduce(value)
|
64 |
+
if average:
|
65 |
+
value /= world_size
|
66 |
+
|
67 |
+
return value
|
deep_sort/deep/multi_train_utils/train_eval_utils.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
from tqdm import tqdm
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from .distributed_utils import reduce_value, is_main_process
|
7 |
+
|
8 |
+
|
9 |
+
def load_model(state_dict, model_state_dict, model):
|
10 |
+
for k in state_dict:
|
11 |
+
if k in model_state_dict:
|
12 |
+
if state_dict[k].shape != model_state_dict[k].shape:
|
13 |
+
print('Skip loading parameter {}, required shape {}, ' \
|
14 |
+
'loaded shape {}.'.format(
|
15 |
+
k, model_state_dict[k].shape, state_dict[k].shape))
|
16 |
+
state_dict[k] = model_state_dict[k]
|
17 |
+
else:
|
18 |
+
print('Drop parameter {}.'.format(k))
|
19 |
+
for k in model_state_dict:
|
20 |
+
if not (k in state_dict):
|
21 |
+
print('No param {}.'.format(k))
|
22 |
+
state_dict[k] = model_state_dict[k]
|
23 |
+
model.load_state_dict(state_dict, strict=False)
|
24 |
+
return model
|
25 |
+
|
26 |
+
|
27 |
+
def train_one_epoch(model, optimizer, data_loader, device, epoch):
|
28 |
+
model.train()
|
29 |
+
criterion = torch.nn.CrossEntropyLoss()
|
30 |
+
mean_loss = torch.zeros(1).to(device)
|
31 |
+
sum_num = torch.zeros(1).to(device)
|
32 |
+
optimizer.zero_grad()
|
33 |
+
|
34 |
+
if is_main_process():
|
35 |
+
data_loader = tqdm(data_loader, file=sys.stdout)
|
36 |
+
|
37 |
+
for idx, (images, labels) in enumerate(data_loader):
|
38 |
+
# forward
|
39 |
+
images, labels = images.to(device), labels.to(device)
|
40 |
+
outputs = model(images)
|
41 |
+
loss = criterion(outputs, labels)
|
42 |
+
|
43 |
+
# backward
|
44 |
+
loss.backward()
|
45 |
+
loss = reduce_value(loss, average=True)
|
46 |
+
mean_loss = (mean_loss * idx + loss.detach()) / (idx + 1)
|
47 |
+
pred = torch.max(outputs, dim=1)[1]
|
48 |
+
sum_num += torch.eq(pred, labels).sum()
|
49 |
+
|
50 |
+
if is_main_process():
|
51 |
+
data_loader.desc = '[epoch {}] mean loss {}'.format(epoch, mean_loss.item())
|
52 |
+
|
53 |
+
if not torch.isfinite(loss):
|
54 |
+
print('loss is infinite, ending training')
|
55 |
+
sys.exit(1)
|
56 |
+
|
57 |
+
optimizer.step()
|
58 |
+
optimizer.zero_grad()
|
59 |
+
if device != torch.device('cpu'):
|
60 |
+
torch.cuda.synchronize(device)
|
61 |
+
sum_num = reduce_value(sum_num, average=False)
|
62 |
+
|
63 |
+
return sum_num.item(), mean_loss.item()
|
64 |
+
|
65 |
+
|
66 |
+
@torch.no_grad()
|
67 |
+
def evaluate(model, data_loader, device):
|
68 |
+
model.eval()
|
69 |
+
criterion = torch.nn.CrossEntropyLoss()
|
70 |
+
test_loss = torch.zeros(1).to(device)
|
71 |
+
sum_num = torch.zeros(1).to(device)
|
72 |
+
if is_main_process():
|
73 |
+
data_loader = tqdm(data_loader, file=sys.stdout)
|
74 |
+
|
75 |
+
for idx, (inputs, labels) in enumerate(data_loader):
|
76 |
+
inputs, labels = inputs.to(device), labels.to(device)
|
77 |
+
outputs = model(inputs)
|
78 |
+
loss = criterion(outputs, labels)
|
79 |
+
loss = reduce_value(loss, average=True)
|
80 |
+
|
81 |
+
test_loss = (test_loss * idx + loss.detach()) / (idx + 1)
|
82 |
+
pred = torch.max(outputs, dim=1)[1]
|
83 |
+
sum_num += torch.eq(pred, labels).sum()
|
84 |
+
|
85 |
+
if device != torch.device('cpu'):
|
86 |
+
torch.cuda.synchronize(device)
|
87 |
+
|
88 |
+
sum_num = reduce_value(sum_num, average=False)
|
89 |
+
|
90 |
+
return sum_num.item(), test_loss.item()
|
deep_sort/deep/resnet.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch
|
3 |
+
|
4 |
+
|
5 |
+
class BasicBlock(nn.Module):
|
6 |
+
expansion = 1
|
7 |
+
|
8 |
+
def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
|
9 |
+
super(BasicBlock, self).__init__()
|
10 |
+
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3,
|
11 |
+
stride=stride, padding=1, bias=False)
|
12 |
+
self.bn1 = nn.BatchNorm2d(out_channel)
|
13 |
+
self.relu = nn.ReLU()
|
14 |
+
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3,
|
15 |
+
stride=1, padding=1, bias=False)
|
16 |
+
self.bn2 = nn.BatchNorm2d(out_channel)
|
17 |
+
self.downsample = downsample
|
18 |
+
|
19 |
+
def forward(self, x):
|
20 |
+
identity = x
|
21 |
+
if self.downsample is not None:
|
22 |
+
identity = self.downsample(x)
|
23 |
+
|
24 |
+
out = self.conv1(x)
|
25 |
+
out = self.bn1(out)
|
26 |
+
out = self.relu(out)
|
27 |
+
|
28 |
+
out = self.conv2(out)
|
29 |
+
out = self.bn2(out)
|
30 |
+
|
31 |
+
out += identity
|
32 |
+
out = self.relu(out)
|
33 |
+
return out
|
34 |
+
|
35 |
+
|
36 |
+
class Bottleneck(nn.Module):
|
37 |
+
expansion = 4
|
38 |
+
|
39 |
+
def __init__(self, in_channel, out_channel, stride=1, downsample=None,
|
40 |
+
groups=1, width_per_group=64):
|
41 |
+
super(Bottleneck, self).__init__()
|
42 |
+
width = int(out_channel * (width_per_group / 64.)) * groups
|
43 |
+
|
44 |
+
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width, kernel_size=1,
|
45 |
+
stride=1, bias=False)
|
46 |
+
self.bn1 = nn.BatchNorm2d(width)
|
47 |
+
self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, kernel_size=3,
|
48 |
+
stride=stride, padding=1, bias=False, groups=groups)
|
49 |
+
self.bn2 = nn.BatchNorm2d(width)
|
50 |
+
self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel * self.expansion,
|
51 |
+
kernel_size=1, stride=1, bias=False)
|
52 |
+
self.bn3 = nn.BatchNorm2d(out_channel * self.expansion)
|
53 |
+
self.relu = nn.ReLU(inplace=True)
|
54 |
+
self.downsample = downsample
|
55 |
+
|
56 |
+
def forward(self, x):
|
57 |
+
identity = x
|
58 |
+
if self.downsample is not None:
|
59 |
+
identity = self.downsample(x)
|
60 |
+
|
61 |
+
out = self.conv1(x)
|
62 |
+
out = self.bn1(out)
|
63 |
+
out = self.relu(out)
|
64 |
+
|
65 |
+
out = self.conv2(out)
|
66 |
+
out = self.bn2(out)
|
67 |
+
out = self.relu(out)
|
68 |
+
|
69 |
+
out = self.conv3(out)
|
70 |
+
out = self.bn3(out)
|
71 |
+
|
72 |
+
out += identity
|
73 |
+
out = self.relu(out)
|
74 |
+
|
75 |
+
return out
|
76 |
+
|
77 |
+
|
78 |
+
class ResNet(nn.Module):
|
79 |
+
|
80 |
+
def __init__(self, block, blocks_num, reid=False, num_classes=1000, groups=1, width_per_group=64):
|
81 |
+
super(ResNet, self).__init__()
|
82 |
+
self.reid = reid
|
83 |
+
self.in_channel = 64
|
84 |
+
|
85 |
+
self.groups = groups
|
86 |
+
self.width_per_group = width_per_group
|
87 |
+
|
88 |
+
self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
|
89 |
+
padding=3, bias=False)
|
90 |
+
self.bn1 = nn.BatchNorm2d(self.in_channel)
|
91 |
+
self.relu = nn.ReLU(inplace=True)
|
92 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
93 |
+
self.layer1 = self._make_layers(block, 64, blocks_num[0])
|
94 |
+
self.layer2 = self._make_layers(block, 128, blocks_num[1], stride=2)
|
95 |
+
self.layer3 = self._make_layers(block, 256, blocks_num[2], stride=2)
|
96 |
+
# self.layer4 = self._make_layers(block, 512, blocks_num[3], stride=1)
|
97 |
+
|
98 |
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
99 |
+
self.fc = nn.Linear(256 * block.expansion, num_classes)
|
100 |
+
|
101 |
+
for m in self.modules():
|
102 |
+
if isinstance(m, nn.Conv2d):
|
103 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
104 |
+
elif isinstance(m, nn.BatchNorm2d):
|
105 |
+
nn.init.constant_(m.weight, 1)
|
106 |
+
nn.init.constant_(m.bias, 0)
|
107 |
+
|
108 |
+
def _make_layers(self, block, channel, block_num, stride=1):
|
109 |
+
downsample = None
|
110 |
+
if stride != 1 or self.in_channel != channel * block.expansion:
|
111 |
+
downsample = nn.Sequential(
|
112 |
+
nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
|
113 |
+
nn.BatchNorm2d(channel * block.expansion)
|
114 |
+
)
|
115 |
+
layers = []
|
116 |
+
layers.append(block(self.in_channel, channel, downsample=downsample, stride=stride,
|
117 |
+
groups=self.groups, width_per_group=self.width_per_group))
|
118 |
+
self.in_channel = channel * block.expansion
|
119 |
+
|
120 |
+
for _ in range(1, block_num):
|
121 |
+
layers.append(block(self.in_channel, channel, groups=self.groups, width_per_group=self.width_per_group))
|
122 |
+
|
123 |
+
return nn.Sequential(*layers)
|
124 |
+
|
125 |
+
def forward(self, x):
|
126 |
+
x = self.conv1(x)
|
127 |
+
x = self.bn1(x)
|
128 |
+
x = self.relu(x)
|
129 |
+
x = self.maxpool(x)
|
130 |
+
|
131 |
+
x = self.layer1(x)
|
132 |
+
x = self.layer2(x)
|
133 |
+
x = self.layer3(x)
|
134 |
+
# x = self.layer4(x)
|
135 |
+
x = self.avgpool(x)
|
136 |
+
x = torch.flatten(x, 1)
|
137 |
+
|
138 |
+
# B x 512
|
139 |
+
if self.reid:
|
140 |
+
x = x.div(x.norm(p=2, dim=1, keepdim=True))
|
141 |
+
return x
|
142 |
+
# classifier
|
143 |
+
x = self.fc(x)
|
144 |
+
return x
|
145 |
+
|
146 |
+
|
147 |
+
def resnet18(num_classes=1000, reid=False):
|
148 |
+
# https://download.pytorch.org/models/resnet18-5c106cde.pth
|
149 |
+
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, reid=reid)
|
150 |
+
|
151 |
+
|
152 |
+
def resnet34(num_classes=1000, reid=False):
|
153 |
+
# https://download.pytorch.org/models/resnet34-333f7ec4.pth
|
154 |
+
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, reid=reid)
|
155 |
+
|
156 |
+
|
157 |
+
def resnet50(num_classes=1000, reid=False):
|
158 |
+
# https://download.pytorch.org/models/resnet50-19c8e357.pth
|
159 |
+
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, reid=reid)
|
160 |
+
|
161 |
+
|
162 |
+
def resnext50_32x4d(num_classes=1000, reid=False):
|
163 |
+
# https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
|
164 |
+
groups = 32
|
165 |
+
width_per_group = 4
|
166 |
+
return ResNet(Bottleneck, [3, 4, 6, 3], reid=reid,
|
167 |
+
num_classes=num_classes, groups=groups, width_per_group=width_per_group)
|
168 |
+
|
169 |
+
|
170 |
+
if __name__ == '__main__':
|
171 |
+
net = resnet18(reid=True)
|
172 |
+
x = torch.randn(4, 3, 128, 64)
|
173 |
+
y = net(x)
|
deep_sort/deep/test.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.backends.cudnn as cudnn
|
3 |
+
import torchvision
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
|
8 |
+
from model import Net
|
9 |
+
|
10 |
+
parser = argparse.ArgumentParser(description="Train on market1501")
|
11 |
+
parser.add_argument("--data-dir", default='data', type=str)
|
12 |
+
parser.add_argument("--no-cuda", action="store_true")
|
13 |
+
parser.add_argument("--gpu-id", default=0, type=int)
|
14 |
+
args = parser.parse_args()
|
15 |
+
|
16 |
+
# device
|
17 |
+
device = "cuda:{}".format(args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
|
18 |
+
if torch.cuda.is_available() and not args.no_cuda:
|
19 |
+
cudnn.benchmark = True
|
20 |
+
|
21 |
+
# data loader
|
22 |
+
root = args.data_dir
|
23 |
+
query_dir = os.path.join(root, "query")
|
24 |
+
gallery_dir = os.path.join(root, "gallery")
|
25 |
+
transform = torchvision.transforms.Compose([
|
26 |
+
torchvision.transforms.Resize((128, 64)),
|
27 |
+
torchvision.transforms.ToTensor(),
|
28 |
+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
29 |
+
])
|
30 |
+
queryloader = torch.utils.data.DataLoader(
|
31 |
+
torchvision.datasets.ImageFolder(query_dir, transform=transform),
|
32 |
+
batch_size=64, shuffle=False
|
33 |
+
)
|
34 |
+
galleryloader = torch.utils.data.DataLoader(
|
35 |
+
torchvision.datas0ets.ImageFolder(gallery_dir, transform=transform),
|
36 |
+
batch_size=64, shuffle=False
|
37 |
+
)
|
38 |
+
|
39 |
+
# net definition
|
40 |
+
net = Net(reid=True)
|
41 |
+
assert os.path.isfile("./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
|
42 |
+
print('Loading from checkpoint/ckpt.t7')
|
43 |
+
checkpoint = torch.load("./checkpoint/ckpt.t7")
|
44 |
+
net_dict = checkpoint['net_dict']
|
45 |
+
net.load_state_dict(net_dict, strict=False)
|
46 |
+
net.eval()
|
47 |
+
net.to(device)
|
48 |
+
|
49 |
+
# compute features
|
50 |
+
query_features = torch.tensor([]).float()
|
51 |
+
query_labels = torch.tensor([]).long()
|
52 |
+
gallery_features = torch.tensor([]).float()
|
53 |
+
gallery_labels = torch.tensor([]).long()
|
54 |
+
|
55 |
+
with torch.no_grad():
|
56 |
+
for idx, (inputs, labels) in enumerate(queryloader):
|
57 |
+
inputs = inputs.to(device)
|
58 |
+
features = net(inputs).cpu()
|
59 |
+
query_features = torch.cat((query_features, features), dim=0)
|
60 |
+
query_labels = torch.cat((query_labels, labels))
|
61 |
+
|
62 |
+
for idx, (inputs, labels) in enumerate(galleryloader):
|
63 |
+
inputs = inputs.to(device)
|
64 |
+
features = net(inputs).cpu()
|
65 |
+
gallery_features = torch.cat((gallery_features, features), dim=0)
|
66 |
+
gallery_labels = torch.cat((gallery_labels, labels))
|
67 |
+
|
68 |
+
gallery_labels -= 2
|
69 |
+
|
70 |
+
# save features
|
71 |
+
features = {
|
72 |
+
"qf": query_features,
|
73 |
+
"ql": query_labels,
|
74 |
+
"gf": gallery_features,
|
75 |
+
"gl": gallery_labels
|
76 |
+
}
|
77 |
+
torch.save(features, "features.pth")
|
deep_sort/deep/train.jpg
ADDED
![]() |
Git LFS Details
|
deep_sort/deep/train.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import tempfile
|
4 |
+
|
5 |
+
import math
|
6 |
+
import warnings
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import torch
|
9 |
+
import torchvision
|
10 |
+
from torch.optim import lr_scheduler
|
11 |
+
|
12 |
+
from multi_train_utils.distributed_utils import init_distributed_mode, cleanup
|
13 |
+
from multi_train_utils.train_eval_utils import train_one_epoch, evaluate, load_model
|
14 |
+
import torch.distributed as dist
|
15 |
+
from datasets import ClsDataset, read_split_data
|
16 |
+
|
17 |
+
from model import Net
|
18 |
+
from resnet import resnet18
|
19 |
+
|
20 |
+
# plot figure
|
21 |
+
x_epoch = []
|
22 |
+
record = {'train_loss': [], 'train_err': [], 'test_loss': [], 'test_err': []}
|
23 |
+
fig = plt.figure()
|
24 |
+
ax0 = fig.add_subplot(121, title="loss")
|
25 |
+
ax1 = fig.add_subplot(122, title="top1_err")
|
26 |
+
|
27 |
+
|
28 |
+
def draw_curve(epoch, train_loss, train_err, test_loss, test_err):
|
29 |
+
global record
|
30 |
+
record['train_loss'].append(train_loss)
|
31 |
+
record['train_err'].append(train_err)
|
32 |
+
record['test_loss'].append(test_loss)
|
33 |
+
record['test_err'].append(test_err)
|
34 |
+
|
35 |
+
x_epoch.append(epoch)
|
36 |
+
ax0.plot(x_epoch, record['train_loss'], 'bo-', label='train')
|
37 |
+
ax0.plot(x_epoch, record['test_loss'], 'ro-', label='val')
|
38 |
+
ax1.plot(x_epoch, record['train_err'], 'bo-', label='train')
|
39 |
+
ax1.plot(x_epoch, record['test_err'], 'ro-', label='val')
|
40 |
+
if epoch == 0:
|
41 |
+
ax0.legend()
|
42 |
+
ax1.legend()
|
43 |
+
fig.savefig("train.jpg")
|
44 |
+
|
45 |
+
|
46 |
+
def main(args):
|
47 |
+
batch_size = args.batch_size
|
48 |
+
device = 'cuda:{}'.format(args.gpu_id) if torch.cuda.is_available() else 'cpu'
|
49 |
+
|
50 |
+
train_info, val_info, num_classes = read_split_data(args.data_dir, valid_rate=0.2)
|
51 |
+
train_images_path, train_labels = train_info
|
52 |
+
val_images_path, val_labels = val_info
|
53 |
+
|
54 |
+
transform_train = torchvision.transforms.Compose([
|
55 |
+
torchvision.transforms.RandomCrop((128, 64), padding=4),
|
56 |
+
torchvision.transforms.RandomHorizontalFlip(),
|
57 |
+
torchvision.transforms.ToTensor(),
|
58 |
+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
59 |
+
])
|
60 |
+
transform_val = torchvision.transforms.Compose([
|
61 |
+
torchvision.transforms.Resize((128, 64)),
|
62 |
+
torchvision.transforms.ToTensor(),
|
63 |
+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
64 |
+
])
|
65 |
+
|
66 |
+
train_dataset = ClsDataset(
|
67 |
+
images_path=train_images_path,
|
68 |
+
images_labels=train_labels,
|
69 |
+
transform=transform_train
|
70 |
+
)
|
71 |
+
val_dataset = ClsDataset(
|
72 |
+
images_path=val_images_path,
|
73 |
+
images_labels=val_labels,
|
74 |
+
transform=transform_val
|
75 |
+
)
|
76 |
+
|
77 |
+
number_workers = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
|
78 |
+
print('Using {} dataloader workers every process'.format(number_workers))
|
79 |
+
|
80 |
+
train_loader = torch.utils.data.DataLoader(
|
81 |
+
train_dataset,
|
82 |
+
batch_size=batch_size,
|
83 |
+
shuffle=True,
|
84 |
+
pin_memory=True,
|
85 |
+
num_workers=number_workers
|
86 |
+
)
|
87 |
+
val_loader = torch.utils.data.DataLoader(
|
88 |
+
val_dataset,
|
89 |
+
batch_size=batch_size,
|
90 |
+
shuffle=False,
|
91 |
+
pin_memory=True,
|
92 |
+
num_workers=number_workers,
|
93 |
+
)
|
94 |
+
|
95 |
+
# net definition
|
96 |
+
start_epoch = 0
|
97 |
+
net = Net(num_classes=num_classes)
|
98 |
+
if args.weights:
|
99 |
+
print('Loading from ', args.weights)
|
100 |
+
checkpoint = torch.load(args.weights, map_location='cpu')
|
101 |
+
net_dict = checkpoint if 'net_dict' not in checkpoint else checkpoint['net_dict']
|
102 |
+
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else start_epoch
|
103 |
+
net = load_model(net_dict, net.state_dict(), net)
|
104 |
+
|
105 |
+
if args.freeze_layers:
|
106 |
+
for name, param in net.named_parameters():
|
107 |
+
if 'classifier' not in name:
|
108 |
+
param.requires_grad = False
|
109 |
+
|
110 |
+
net.to(device)
|
111 |
+
|
112 |
+
# loss and optimizer
|
113 |
+
pg = [p for p in net.parameters() if p.requires_grad]
|
114 |
+
optimizer = torch.optim.SGD(pg, args.lr, momentum=0.9, weight_decay=5e-4)
|
115 |
+
|
116 |
+
lr = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf
|
117 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr)
|
118 |
+
for epoch in range(start_epoch, start_epoch + args.epochs):
|
119 |
+
train_positive, train_loss = train_one_epoch(net, optimizer, train_loader, device, epoch)
|
120 |
+
train_acc = train_positive / len(train_dataset)
|
121 |
+
scheduler.step()
|
122 |
+
|
123 |
+
test_positive, test_loss = evaluate(net, val_loader, device)
|
124 |
+
test_acc = test_positive / len(val_dataset)
|
125 |
+
|
126 |
+
print('[epoch {}] accuracy: {}'.format(epoch, test_acc))
|
127 |
+
|
128 |
+
state_dict = {
|
129 |
+
'net_dict': net.state_dict(),
|
130 |
+
'acc': test_acc,
|
131 |
+
'epoch': epoch
|
132 |
+
}
|
133 |
+
torch.save(state_dict, './checkpoint/model_{}.pth'.format(epoch))
|
134 |
+
draw_curve(epoch, train_loss, 1 - train_acc, test_loss, 1 - test_acc)
|
135 |
+
|
136 |
+
|
137 |
+
if __name__ == '__main__':
|
138 |
+
parser = argparse.ArgumentParser(description="Train on market1501")
|
139 |
+
parser.add_argument("--data-dir", default='data', type=str)
|
140 |
+
parser.add_argument('--epochs', type=int, default=40)
|
141 |
+
parser.add_argument('--batch_size', type=int, default=32)
|
142 |
+
parser.add_argument("--lr", default=0.001, type=float)
|
143 |
+
parser.add_argument('--lrf', default=0.1, type=float)
|
144 |
+
|
145 |
+
parser.add_argument('--weights', type=str, default='./checkpoint/resnet18.pth')
|
146 |
+
parser.add_argument('--freeze-layers', action='store_true')
|
147 |
+
|
148 |
+
parser.add_argument('--gpu_id', default='0', help='gpu id')
|
149 |
+
args = parser.parse_args()
|
150 |
+
|
151 |
+
main(args)
|
deep_sort/deep/train_multiGPU.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import tempfile
|
4 |
+
|
5 |
+
import math
|
6 |
+
import warnings
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import torch
|
9 |
+
import torchvision
|
10 |
+
from torch.optim import lr_scheduler
|
11 |
+
|
12 |
+
from multi_train_utils.distributed_utils import init_distributed_mode, cleanup
|
13 |
+
from multi_train_utils.train_eval_utils import train_one_epoch, evaluate, load_model
|
14 |
+
import torch.distributed as dist
|
15 |
+
from datasets import ClsDataset, read_split_data
|
16 |
+
|
17 |
+
from resnet import resnet18
|
18 |
+
|
19 |
+
|
20 |
+
# plot figure
|
21 |
+
x_epoch = []
|
22 |
+
record = {'train_loss': [], 'train_err': [], 'test_loss': [], 'test_err': []}
|
23 |
+
fig = plt.figure()
|
24 |
+
ax0 = fig.add_subplot(121, title="loss")
|
25 |
+
ax1 = fig.add_subplot(122, title="top1_err")
|
26 |
+
|
27 |
+
|
28 |
+
def draw_curve(epoch, train_loss, train_err, test_loss, test_err):
|
29 |
+
global record
|
30 |
+
record['train_loss'].append(train_loss)
|
31 |
+
record['train_err'].append(train_err)
|
32 |
+
record['test_loss'].append(test_loss)
|
33 |
+
record['test_err'].append(test_err)
|
34 |
+
|
35 |
+
x_epoch.append(epoch)
|
36 |
+
ax0.plot(x_epoch, record['train_loss'], 'bo-', label='train')
|
37 |
+
ax0.plot(x_epoch, record['test_loss'], 'ro-', label='val')
|
38 |
+
ax1.plot(x_epoch, record['train_err'], 'bo-', label='train')
|
39 |
+
ax1.plot(x_epoch, record['test_err'], 'ro-', label='val')
|
40 |
+
if epoch == 0:
|
41 |
+
ax0.legend()
|
42 |
+
ax1.legend()
|
43 |
+
fig.savefig("train.jpg")
|
44 |
+
|
45 |
+
|
46 |
+
def main(args):
|
47 |
+
init_distributed_mode(args)
|
48 |
+
|
49 |
+
rank = args.rank
|
50 |
+
device = torch.device(args.device)
|
51 |
+
batch_size = args.batch_size
|
52 |
+
weights_path = args.weights
|
53 |
+
args.lr *= args.world_size
|
54 |
+
checkpoint_path = ''
|
55 |
+
|
56 |
+
if rank == 0:
|
57 |
+
print(args)
|
58 |
+
if os.path.exists('./checkpoint') is False:
|
59 |
+
os.mkdir('./checkpoint')
|
60 |
+
|
61 |
+
train_info, val_info, num_classes = read_split_data(args.data_dir, valid_rate=0.2)
|
62 |
+
train_images_path, train_labels = train_info
|
63 |
+
val_images_path, val_labels = val_info
|
64 |
+
|
65 |
+
transform_train = torchvision.transforms.Compose([
|
66 |
+
torchvision.transforms.RandomCrop((128, 64), padding=4),
|
67 |
+
torchvision.transforms.RandomHorizontalFlip(),
|
68 |
+
torchvision.transforms.ToTensor(),
|
69 |
+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
70 |
+
])
|
71 |
+
transform_val = torchvision.transforms.Compose([
|
72 |
+
torchvision.transforms.Resize((128, 64)),
|
73 |
+
torchvision.transforms.ToTensor(),
|
74 |
+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
75 |
+
])
|
76 |
+
|
77 |
+
train_dataset = ClsDataset(
|
78 |
+
images_path=train_images_path,
|
79 |
+
images_labels=train_labels,
|
80 |
+
transform=transform_train
|
81 |
+
)
|
82 |
+
val_dataset = ClsDataset(
|
83 |
+
images_path=val_images_path,
|
84 |
+
images_labels=val_labels,
|
85 |
+
transform=transform_val
|
86 |
+
)
|
87 |
+
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
|
88 |
+
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
|
89 |
+
|
90 |
+
train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)
|
91 |
+
|
92 |
+
number_workers = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
|
93 |
+
|
94 |
+
if rank == 0:
|
95 |
+
print('Using {} dataloader workers every process'.format(number_workers))
|
96 |
+
|
97 |
+
train_loader = torch.utils.data.DataLoader(
|
98 |
+
train_dataset,
|
99 |
+
batch_sampler=train_batch_sampler,
|
100 |
+
pin_memory=True,
|
101 |
+
num_workers=number_workers
|
102 |
+
)
|
103 |
+
val_loader = torch.utils.data.DataLoader(
|
104 |
+
val_dataset,
|
105 |
+
sampler=val_sampler,
|
106 |
+
batch_size=batch_size,
|
107 |
+
pin_memory=True,
|
108 |
+
num_workers=number_workers,
|
109 |
+
)
|
110 |
+
|
111 |
+
# net definition
|
112 |
+
start_epoch = 0
|
113 |
+
net = resnet18(num_classes=num_classes)
|
114 |
+
if args.weights:
|
115 |
+
print('Loading from ', args.weights)
|
116 |
+
checkpoint = torch.load(args.weights, map_location='cpu')
|
117 |
+
net_dict = checkpoint if 'net_dict' not in checkpoint else checkpoint['net_dict']
|
118 |
+
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else start_epoch
|
119 |
+
net = load_model(net_dict, net.state_dict(), net)
|
120 |
+
else:
|
121 |
+
warnings.warn("better providing pretraining weights")
|
122 |
+
checkpoint_path = os.path.join(tempfile.gettempdir(), 'initial_weights.pth')
|
123 |
+
if rank == 0:
|
124 |
+
torch.save(net.state_dict(), checkpoint_path)
|
125 |
+
|
126 |
+
dist.barrier()
|
127 |
+
net.load_state_dict(torch.load(checkpoint_path, map_location='cpu'))
|
128 |
+
|
129 |
+
if args.freeze_layers:
|
130 |
+
for name, param in net.named_parameters():
|
131 |
+
if 'fc' not in name:
|
132 |
+
param.requires_grad = False
|
133 |
+
else:
|
134 |
+
if args.syncBN:
|
135 |
+
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
|
136 |
+
net.to(device)
|
137 |
+
|
138 |
+
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[args.gpu])
|
139 |
+
|
140 |
+
# loss and optimizer
|
141 |
+
pg = [p for p in net.parameters() if p.requires_grad]
|
142 |
+
optimizer = torch.optim.SGD(pg, args.lr, momentum=0.9, weight_decay=5e-4)
|
143 |
+
|
144 |
+
lr = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf
|
145 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr)
|
146 |
+
for epoch in range(start_epoch, start_epoch + args.epochs):
|
147 |
+
train_positive, train_loss = train_one_epoch(net, optimizer, train_loader, device, epoch)
|
148 |
+
train_acc = train_positive / len(train_dataset)
|
149 |
+
scheduler.step()
|
150 |
+
|
151 |
+
test_positive, test_loss = evaluate(net, val_loader, device)
|
152 |
+
test_acc = test_positive / len(val_dataset)
|
153 |
+
|
154 |
+
if rank == 0:
|
155 |
+
print('[epoch {}] accuracy: {}'.format(epoch, test_acc))
|
156 |
+
|
157 |
+
state_dict = {
|
158 |
+
'net_dict': net.module.state_dict(),
|
159 |
+
'acc': test_acc,
|
160 |
+
'epoch': epoch
|
161 |
+
}
|
162 |
+
torch.save(state_dict, './checkpoint/model_{}.pth'.format(epoch))
|
163 |
+
draw_curve(epoch, train_loss, 1 - train_acc, test_loss, 1 - test_acc)
|
164 |
+
|
165 |
+
if rank == 0:
|
166 |
+
if os.path.exists(checkpoint_path) is True:
|
167 |
+
os.remove(checkpoint_path)
|
168 |
+
cleanup()
|
169 |
+
|
170 |
+
|
171 |
+
if __name__ == '__main__':
|
172 |
+
parser = argparse.ArgumentParser(description="Train on market1501")
|
173 |
+
parser.add_argument("--data-dir", default='data', type=str)
|
174 |
+
parser.add_argument('--epochs', type=int, default=40)
|
175 |
+
parser.add_argument('--batch_size', type=int, default=32)
|
176 |
+
parser.add_argument("--lr", default=0.001, type=float)
|
177 |
+
parser.add_argument('--lrf', default=0.1, type=float)
|
178 |
+
parser.add_argument('--syncBN', type=bool, default=True)
|
179 |
+
|
180 |
+
parser.add_argument('--weights', type=str, default='./checkpoint/resnet18.pth')
|
181 |
+
parser.add_argument('--freeze-layers', action='store_true')
|
182 |
+
|
183 |
+
# not change the following parameters, the system will automatically assignment
|
184 |
+
parser.add_argument('--device', default='cuda', help='device id (i.e. 0 or 0, 1 or cpu)')
|
185 |
+
parser.add_argument('--world_size', default=4, type=int, help='number of distributed processes')
|
186 |
+
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
187 |
+
args = parser.parse_args()
|
188 |
+
|
189 |
+
main(args)
|
deep_sort/deep_sort.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from .deep.feature_extractor import Extractor, FastReIDExtractor
|
5 |
+
from .sort.nn_matching import NearestNeighborDistanceMetric
|
6 |
+
from .sort.preprocessing import non_max_suppression
|
7 |
+
from .sort.detection import Detection
|
8 |
+
from .sort.tracker import Tracker
|
9 |
+
|
10 |
+
__all__ = ['DeepSort']
|
11 |
+
|
12 |
+
|
13 |
+
class DeepSort(object):
|
14 |
+
def __init__(self, model_path, model_config=None, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0,
|
15 |
+
max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
|
16 |
+
self.min_confidence = min_confidence
|
17 |
+
self.nms_max_overlap = nms_max_overlap
|
18 |
+
|
19 |
+
if model_config is None:
|
20 |
+
self.extractor = Extractor(model_path, use_cuda=use_cuda)
|
21 |
+
else:
|
22 |
+
self.extractor = FastReIDExtractor(model_config, model_path, use_cuda=use_cuda)
|
23 |
+
|
24 |
+
max_cosine_distance = max_dist
|
25 |
+
metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
|
26 |
+
self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
|
27 |
+
|
28 |
+
def update(self, bbox_xywh, confidences, classes, ori_img, masks=None):
|
29 |
+
self.height, self.width = ori_img.shape[:2]
|
30 |
+
# generate detections
|
31 |
+
features = self._get_features(bbox_xywh, ori_img)
|
32 |
+
bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
|
33 |
+
detections = [Detection(bbox_tlwh[i], conf, label, features[i], None if masks is None else masks[i])
|
34 |
+
for i, (conf, label) in enumerate(zip(confidences, classes))
|
35 |
+
if conf > self.min_confidence]
|
36 |
+
|
37 |
+
# run on non-maximum supression
|
38 |
+
boxes = np.array([d.tlwh for d in detections])
|
39 |
+
scores = np.array([d.confidence for d in detections])
|
40 |
+
indices = non_max_suppression(boxes, self.nms_max_overlap, scores)
|
41 |
+
detections = [detections[i] for i in indices]
|
42 |
+
|
43 |
+
# update tracker
|
44 |
+
self.tracker.predict()
|
45 |
+
self.tracker.update(detections)
|
46 |
+
|
47 |
+
# output bbox identities
|
48 |
+
outputs = []
|
49 |
+
mask_outputs = []
|
50 |
+
for track in self.tracker.tracks:
|
51 |
+
if not track.is_confirmed() or track.time_since_update > 1:
|
52 |
+
continue
|
53 |
+
box = track.to_tlwh()
|
54 |
+
x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
|
55 |
+
track_id = track.track_id
|
56 |
+
track_cls = track.cls
|
57 |
+
outputs.append(np.array([x1, y1, x2, y2, track_cls, track_id], dtype=np.int32))
|
58 |
+
if track.mask is not None:
|
59 |
+
mask_outputs.append(track.mask)
|
60 |
+
if len(outputs) > 0:
|
61 |
+
outputs = np.stack(outputs, axis=0)
|
62 |
+
return outputs, mask_outputs
|
63 |
+
|
64 |
+
"""
|
65 |
+
TODO:
|
66 |
+
Convert bbox from xc_yc_w_h to xtl_ytl_w_h
|
67 |
+
Thanks [email protected] for reporting this bug!
|
68 |
+
"""
|
69 |
+
|
70 |
+
@staticmethod
|
71 |
+
def _xywh_to_tlwh(bbox_xywh):
|
72 |
+
if isinstance(bbox_xywh, np.ndarray):
|
73 |
+
bbox_tlwh = bbox_xywh.copy()
|
74 |
+
elif isinstance(bbox_xywh, torch.Tensor):
|
75 |
+
bbox_tlwh = bbox_xywh.clone()
|
76 |
+
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
|
77 |
+
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
|
78 |
+
return bbox_tlwh
|
79 |
+
|
80 |
+
def _xywh_to_xyxy(self, bbox_xywh):
|
81 |
+
x, y, w, h = bbox_xywh
|
82 |
+
x1 = max(int(x - w / 2), 0)
|
83 |
+
x2 = min(int(x + w / 2), self.width - 1)
|
84 |
+
y1 = max(int(y - h / 2), 0)
|
85 |
+
y2 = min(int(y + h / 2), self.height - 1)
|
86 |
+
return x1, y1, x2, y2
|
87 |
+
|
88 |
+
def _tlwh_to_xyxy(self, bbox_tlwh):
|
89 |
+
"""
|
90 |
+
TODO:
|
91 |
+
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
|
92 |
+
Thanks [email protected] for reporting this bug!
|
93 |
+
"""
|
94 |
+
x, y, w, h = bbox_tlwh
|
95 |
+
x1 = max(int(x), 0)
|
96 |
+
x2 = min(int(x + w), self.width - 1)
|
97 |
+
y1 = max(int(y), 0)
|
98 |
+
y2 = min(int(y + h), self.height - 1)
|
99 |
+
return x1, y1, x2, y2
|
100 |
+
|
101 |
+
@staticmethod
|
102 |
+
def _xyxy_to_tlwh(bbox_xyxy):
|
103 |
+
x1, y1, x2, y2 = bbox_xyxy
|
104 |
+
|
105 |
+
t = x1
|
106 |
+
l = y1
|
107 |
+
w = int(x2 - x1)
|
108 |
+
h = int(y2 - y1)
|
109 |
+
return t, l, w, h
|
110 |
+
|
111 |
+
def _get_features(self, bbox_xywh, ori_img):
|
112 |
+
im_crops = []
|
113 |
+
for box in bbox_xywh:
|
114 |
+
x1, y1, x2, y2 = self._xywh_to_xyxy(box)
|
115 |
+
im = ori_img[y1:y2, x1:x2]
|
116 |
+
im_crops.append(im)
|
117 |
+
if im_crops:
|
118 |
+
features = self.extractor(im_crops)
|
119 |
+
else:
|
120 |
+
features = np.array([])
|
121 |
+
return features
|
deep_sort/sort/__init__.py
ADDED
File without changes
|
deep_sort/sort/detection.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
class Detection(object):
|
6 |
+
"""
|
7 |
+
This class represents a bounding box detection in a single image.
|
8 |
+
|
9 |
+
Parameters
|
10 |
+
----------
|
11 |
+
tlwh : array_like
|
12 |
+
Bounding box in format `(x, y, w, h)`.
|
13 |
+
confidence : float
|
14 |
+
Detector confidence score.
|
15 |
+
feature : array_like
|
16 |
+
A feature vector that describes the object contained in this image.
|
17 |
+
|
18 |
+
Attributes
|
19 |
+
----------
|
20 |
+
tlwh : ndarray
|
21 |
+
Bounding box in format `(top left x, top left y, width, height)`.
|
22 |
+
confidence : ndarray
|
23 |
+
Detector confidence score.
|
24 |
+
feature : ndarray | NoneType
|
25 |
+
A feature vector that describes the object contained in this image.
|
26 |
+
|
27 |
+
"""
|
28 |
+
|
29 |
+
def __init__(self, tlwh, confidence, label, feature, mask=None):
|
30 |
+
self.tlwh = np.asarray(tlwh, dtype=np.float32)
|
31 |
+
self.confidence = float(confidence)
|
32 |
+
self.cls = int(label)
|
33 |
+
self.feature = np.asarray(feature, dtype=np.float32)
|
34 |
+
self.mask = mask
|
35 |
+
|
36 |
+
def to_tlbr(self):
|
37 |
+
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
|
38 |
+
`(top left, bottom right)`.
|
39 |
+
"""
|
40 |
+
ret = self.tlwh.copy()
|
41 |
+
ret[2:] += ret[:2]
|
42 |
+
return ret
|
43 |
+
|
44 |
+
def to_xyah(self):
|
45 |
+
"""Convert bounding box to format `(center x, center y, aspect ratio,
|
46 |
+
height)`, where the aspect ratio is `width / height`.
|
47 |
+
"""
|
48 |
+
ret = self.tlwh.copy()
|
49 |
+
ret[:2] += ret[2:] / 2
|
50 |
+
ret[2] /= ret[3]
|
51 |
+
return ret
|
deep_sort/sort/iou_matching.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
from __future__ import absolute_import
|
3 |
+
import numpy as np
|
4 |
+
from . import linear_assignment
|
5 |
+
|
6 |
+
|
7 |
+
def iou(bbox, candidates):
|
8 |
+
"""Computer intersection over union.
|
9 |
+
|
10 |
+
Parameters
|
11 |
+
----------
|
12 |
+
bbox : ndarray
|
13 |
+
A bounding box in format `(top left x, top left y, width, height)`.
|
14 |
+
candidates : ndarray
|
15 |
+
A matrix of candidate bounding boxes (one per row) in the same format
|
16 |
+
as `bbox`.
|
17 |
+
|
18 |
+
Returns
|
19 |
+
-------
|
20 |
+
ndarray
|
21 |
+
The intersection over union in [0, 1] between the `bbox` and each
|
22 |
+
candidate. A higher score means a larger fraction of the `bbox` is
|
23 |
+
occluded by the candidate.
|
24 |
+
|
25 |
+
"""
|
26 |
+
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
|
27 |
+
candidates_tl = candidates[:, :2]
|
28 |
+
candidates_br = candidates[:, :2] + candidates[:, 2:]
|
29 |
+
|
30 |
+
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
|
31 |
+
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
|
32 |
+
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
|
33 |
+
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
|
34 |
+
wh = np.maximum(0., br - tl)
|
35 |
+
|
36 |
+
area_intersection = wh.prod(axis=1)
|
37 |
+
area_bbox = bbox[2:].prod()
|
38 |
+
area_candidates = candidates[:, 2:].prod(axis=1)
|
39 |
+
return area_intersection / (area_bbox + area_candidates - area_intersection)
|
40 |
+
|
41 |
+
|
42 |
+
def iou_cost(tracks, detections, track_indices=None,
|
43 |
+
detection_indices=None):
|
44 |
+
"""An intersection over union distance metric.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
----------
|
48 |
+
tracks : List[deep_sort.track.Track]
|
49 |
+
A list of tracks.
|
50 |
+
detections : List[deep_sort.detection.Detection]
|
51 |
+
A list of detections.
|
52 |
+
track_indices : Optional[List[int]]
|
53 |
+
A list of indices to tracks that should be matched. Defaults to
|
54 |
+
all `tracks`.
|
55 |
+
detection_indices : Optional[List[int]]
|
56 |
+
A list of indices to detections that should be matched. Defaults
|
57 |
+
to all `detections`.
|
58 |
+
|
59 |
+
Returns
|
60 |
+
-------
|
61 |
+
ndarray
|
62 |
+
Returns a cost matrix of shape
|
63 |
+
len(track_indices), len(detection_indices) where entry (i, j) is
|
64 |
+
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
|
65 |
+
|
66 |
+
"""
|
67 |
+
if track_indices is None:
|
68 |
+
track_indices = np.arange(len(tracks))
|
69 |
+
if detection_indices is None:
|
70 |
+
detection_indices = np.arange(len(detections))
|
71 |
+
|
72 |
+
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
|
73 |
+
for row, track_idx in enumerate(track_indices):
|
74 |
+
if tracks[track_idx].time_since_update > 1:
|
75 |
+
cost_matrix[row, :] = linear_assignment.INFTY_COST
|
76 |
+
continue
|
77 |
+
|
78 |
+
bbox = tracks[track_idx].to_tlwh()
|
79 |
+
candidates = np.asarray([detections[i].tlwh for i in detection_indices])
|
80 |
+
cost_matrix[row, :] = 1. - iou(bbox, candidates)
|
81 |
+
return cost_matrix
|
deep_sort/sort/kalman_filter.py
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
import numpy as np
|
3 |
+
import scipy.linalg
|
4 |
+
|
5 |
+
|
6 |
+
"""
|
7 |
+
Table for the 0.95 quantile of the chi-square distribution with N degrees of
|
8 |
+
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
|
9 |
+
function and used as Mahalanobis gating threshold.
|
10 |
+
"""
|
11 |
+
chi2inv95 = {
|
12 |
+
1: 3.8415,
|
13 |
+
2: 5.9915,
|
14 |
+
3: 7.8147,
|
15 |
+
4: 9.4877,
|
16 |
+
5: 11.070,
|
17 |
+
6: 12.592,
|
18 |
+
7: 14.067,
|
19 |
+
8: 15.507,
|
20 |
+
9: 16.919}
|
21 |
+
|
22 |
+
|
23 |
+
class KalmanFilter(object):
|
24 |
+
"""
|
25 |
+
A simple Kalman filter for tracking bounding boxes in image space.
|
26 |
+
|
27 |
+
The 8-dimensional state space
|
28 |
+
|
29 |
+
x, y, a, h, vx, vy, va, vh
|
30 |
+
|
31 |
+
contains the bounding box center position (x, y), aspect ratio a, height h,
|
32 |
+
and their respective velocities.
|
33 |
+
|
34 |
+
Object motion follows a constant velocity model. The bounding box location
|
35 |
+
(x, y, a, h) is taken as direct observation of the state space (linear
|
36 |
+
observation model).
|
37 |
+
|
38 |
+
"""
|
39 |
+
|
40 |
+
def __init__(self):
|
41 |
+
ndim, dt = 4, 1.
|
42 |
+
|
43 |
+
# Create Kalman filter model matrices.
|
44 |
+
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
|
45 |
+
for i in range(ndim):
|
46 |
+
self._motion_mat[i, ndim + i] = dt
|
47 |
+
self._update_mat = np.eye(ndim, 2 * ndim)
|
48 |
+
|
49 |
+
# Motion and observation uncertainty are chosen relative to the current
|
50 |
+
# state estimate. These weights control the amount of uncertainty in
|
51 |
+
# the model. This is a bit hacky.
|
52 |
+
self._std_weight_position = 1. / 20
|
53 |
+
self._std_weight_velocity = 1. / 160
|
54 |
+
|
55 |
+
def initiate(self, measurement):
|
56 |
+
"""Create track from unassociated measurement.
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
measurement : ndarray
|
61 |
+
Bounding box coordinates (x, y, a, h) with center position (x, y),
|
62 |
+
aspect ratio a, and height h.
|
63 |
+
|
64 |
+
Returns
|
65 |
+
-------
|
66 |
+
(ndarray, ndarray)
|
67 |
+
Returns the mean vector (8 dimensional) and covariance matrix (8x8
|
68 |
+
dimensional) of the new track. Unobserved velocities are initialized
|
69 |
+
to 0 mean.
|
70 |
+
|
71 |
+
"""
|
72 |
+
mean_pos = measurement
|
73 |
+
mean_vel = np.zeros_like(mean_pos)
|
74 |
+
mean = np.r_[mean_pos, mean_vel]
|
75 |
+
|
76 |
+
std = [
|
77 |
+
2 * self._std_weight_position * measurement[3],
|
78 |
+
2 * self._std_weight_position * measurement[3],
|
79 |
+
1e-2,
|
80 |
+
2 * self._std_weight_position * measurement[3],
|
81 |
+
10 * self._std_weight_velocity * measurement[3],
|
82 |
+
10 * self._std_weight_velocity * measurement[3],
|
83 |
+
1e-5,
|
84 |
+
10 * self._std_weight_velocity * measurement[3]]
|
85 |
+
covariance = np.diag(np.square(std))
|
86 |
+
return mean, covariance
|
87 |
+
|
88 |
+
def predict(self, mean, covariance):
|
89 |
+
"""Run Kalman filter prediction step.
|
90 |
+
|
91 |
+
Parameters
|
92 |
+
----------
|
93 |
+
mean : ndarray
|
94 |
+
The 8 dimensional mean vector of the object state at the previous
|
95 |
+
time step.
|
96 |
+
covariance : ndarray
|
97 |
+
The 8x8 dimensional covariance matrix of the object state at the
|
98 |
+
previous time step.
|
99 |
+
|
100 |
+
Returns
|
101 |
+
-------
|
102 |
+
(ndarray, ndarray)
|
103 |
+
Returns the mean vector and covariance matrix of the predicted
|
104 |
+
state. Unobserved velocities are initialized to 0 mean.
|
105 |
+
|
106 |
+
"""
|
107 |
+
std_pos = [
|
108 |
+
self._std_weight_position * mean[3],
|
109 |
+
self._std_weight_position * mean[3],
|
110 |
+
1e-2,
|
111 |
+
self._std_weight_position * mean[3]]
|
112 |
+
std_vel = [
|
113 |
+
self._std_weight_velocity * mean[3],
|
114 |
+
self._std_weight_velocity * mean[3],
|
115 |
+
1e-5,
|
116 |
+
self._std_weight_velocity * mean[3]]
|
117 |
+
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
|
118 |
+
|
119 |
+
mean = np.dot(self._motion_mat, mean)
|
120 |
+
covariance = np.linalg.multi_dot((
|
121 |
+
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
|
122 |
+
|
123 |
+
return mean, covariance
|
124 |
+
|
125 |
+
def project(self, mean, covariance):
|
126 |
+
"""Project state distribution to measurement space.
|
127 |
+
|
128 |
+
Parameters
|
129 |
+
----------
|
130 |
+
mean : ndarray
|
131 |
+
The state's mean vector (8 dimensional array).
|
132 |
+
covariance : ndarray
|
133 |
+
The state's covariance matrix (8x8 dimensional).
|
134 |
+
|
135 |
+
Returns
|
136 |
+
-------
|
137 |
+
(ndarray, ndarray)
|
138 |
+
Returns the projected mean and covariance matrix of the given state
|
139 |
+
estimate.
|
140 |
+
|
141 |
+
"""
|
142 |
+
std = [
|
143 |
+
self._std_weight_position * mean[3],
|
144 |
+
self._std_weight_position * mean[3],
|
145 |
+
1e-1,
|
146 |
+
self._std_weight_position * mean[3]]
|
147 |
+
innovation_cov = np.diag(np.square(std))
|
148 |
+
|
149 |
+
mean = np.dot(self._update_mat, mean)
|
150 |
+
covariance = np.linalg.multi_dot((
|
151 |
+
self._update_mat, covariance, self._update_mat.T))
|
152 |
+
return mean, covariance + innovation_cov
|
153 |
+
|
154 |
+
def update(self, mean, covariance, measurement):
|
155 |
+
"""Run Kalman filter correction step.
|
156 |
+
|
157 |
+
Parameters
|
158 |
+
----------
|
159 |
+
mean : ndarray
|
160 |
+
The predicted state's mean vector (8 dimensional).
|
161 |
+
covariance : ndarray
|
162 |
+
The state's covariance matrix (8x8 dimensional).
|
163 |
+
measurement : ndarray
|
164 |
+
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
|
165 |
+
is the center position, a the aspect ratio, and h the height of the
|
166 |
+
bounding box.
|
167 |
+
|
168 |
+
Returns
|
169 |
+
-------
|
170 |
+
(ndarray, ndarray)
|
171 |
+
Returns the measurement-corrected state distribution.
|
172 |
+
|
173 |
+
"""
|
174 |
+
projected_mean, projected_cov = self.project(mean, covariance)
|
175 |
+
|
176 |
+
chol_factor, lower = scipy.linalg.cho_factor(
|
177 |
+
projected_cov, lower=True, check_finite=False)
|
178 |
+
kalman_gain = scipy.linalg.cho_solve(
|
179 |
+
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
|
180 |
+
check_finite=False).T
|
181 |
+
innovation = measurement - projected_mean
|
182 |
+
|
183 |
+
new_mean = mean + np.dot(innovation, kalman_gain.T)
|
184 |
+
# new_covariance = covariance - np.linalg.multi_dot((
|
185 |
+
# kalman_gain, projected_cov, kalman_gain.T))
|
186 |
+
new_covariance = covariance - np.linalg.multi_dot((
|
187 |
+
kalman_gain, self._update_mat, covariance))
|
188 |
+
return new_mean, new_covariance
|
189 |
+
|
190 |
+
def gating_distance(self, mean, covariance, measurements,
|
191 |
+
only_position=False):
|
192 |
+
"""Compute gating distance between state distribution and measurements.
|
193 |
+
|
194 |
+
A suitable distance threshold can be obtained from `chi2inv95`. If
|
195 |
+
`only_position` is False, the chi-square distribution has 4 degrees of
|
196 |
+
freedom, otherwise 2.
|
197 |
+
|
198 |
+
Parameters
|
199 |
+
----------
|
200 |
+
mean : ndarray
|
201 |
+
Mean vector over the state distribution (8 dimensional).
|
202 |
+
covariance : ndarray
|
203 |
+
Covariance of the state distribution (8x8 dimensional).
|
204 |
+
measurements : ndarray
|
205 |
+
An Nx4 dimensional matrix of N measurements, each in
|
206 |
+
format (x, y, a, h) where (x, y) is the bounding box center
|
207 |
+
position, a the aspect ratio, and h the height.
|
208 |
+
only_position : Optional[bool]
|
209 |
+
If True, distance computation is done with respect to the bounding
|
210 |
+
box center position only.
|
211 |
+
|
212 |
+
Returns
|
213 |
+
-------
|
214 |
+
ndarray
|
215 |
+
Returns an array of length N, where the i-th element contains the
|
216 |
+
squared Mahalanobis distance between (mean, covariance) and
|
217 |
+
`measurements[i]`.
|
218 |
+
|
219 |
+
"""
|
220 |
+
mean, covariance = self.project(mean, covariance)
|
221 |
+
if only_position:
|
222 |
+
mean, covariance = mean[:2], covariance[:2, :2]
|
223 |
+
measurements = measurements[:, :2]
|
224 |
+
|
225 |
+
cholesky_factor = np.linalg.cholesky(covariance)
|
226 |
+
d = measurements - mean
|
227 |
+
z = scipy.linalg.solve_triangular(
|
228 |
+
cholesky_factor, d.T, lower=True, check_finite=False,
|
229 |
+
overwrite_b=True)
|
230 |
+
squared_maha = np.sum(z * z, axis=0)
|
231 |
+
return squared_maha
|
deep_sort/sort/linear_assignment.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
from __future__ import absolute_import
|
3 |
+
import numpy as np
|
4 |
+
# from sklearn.utils.linear_assignment_ import linear_assignment
|
5 |
+
from scipy.optimize import linear_sum_assignment as linear_assignment
|
6 |
+
from . import kalman_filter
|
7 |
+
|
8 |
+
|
9 |
+
INFTY_COST = 1e+5
|
10 |
+
|
11 |
+
|
12 |
+
def min_cost_matching(
|
13 |
+
distance_metric, max_distance, tracks, detections, track_indices=None,
|
14 |
+
detection_indices=None):
|
15 |
+
"""Solve linear assignment problem.
|
16 |
+
|
17 |
+
Parameters
|
18 |
+
----------
|
19 |
+
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
|
20 |
+
The distance metric is given a list of tracks and detections as well as
|
21 |
+
a list of N track indices and M detection indices. The metric should
|
22 |
+
return the NxM dimensional cost matrix, where element (i, j) is the
|
23 |
+
association cost between the i-th track in the given track indices and
|
24 |
+
the j-th detection in the given detection_indices.
|
25 |
+
max_distance : float
|
26 |
+
Gating threshold. Associations with cost larger than this value are
|
27 |
+
disregarded.
|
28 |
+
tracks : List[track.Track]
|
29 |
+
A list of predicted tracks at the current time step.
|
30 |
+
detections : List[detection.Detection]
|
31 |
+
A list of detections at the current time step.
|
32 |
+
track_indices : List[int]
|
33 |
+
List of track indices that maps rows in `cost_matrix` to tracks in
|
34 |
+
`tracks` (see description above).
|
35 |
+
detection_indices : List[int]
|
36 |
+
List of detection indices that maps columns in `cost_matrix` to
|
37 |
+
detections in `detections` (see description above).
|
38 |
+
|
39 |
+
Returns
|
40 |
+
-------
|
41 |
+
(List[(int, int)], List[int], List[int])
|
42 |
+
Returns a tuple with the following three entries:
|
43 |
+
* A list of matched track and detection indices.
|
44 |
+
* A list of unmatched track indices.
|
45 |
+
* A list of unmatched detection indices.
|
46 |
+
|
47 |
+
"""
|
48 |
+
if track_indices is None:
|
49 |
+
track_indices = np.arange(len(tracks))
|
50 |
+
if detection_indices is None:
|
51 |
+
detection_indices = np.arange(len(detections))
|
52 |
+
|
53 |
+
if len(detection_indices) == 0 or len(track_indices) == 0:
|
54 |
+
return [], track_indices, detection_indices # Nothing to match.
|
55 |
+
|
56 |
+
cost_matrix = distance_metric(
|
57 |
+
tracks, detections, track_indices, detection_indices)
|
58 |
+
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
|
59 |
+
|
60 |
+
row_indices, col_indices = linear_assignment(cost_matrix)
|
61 |
+
|
62 |
+
matches, unmatched_tracks, unmatched_detections = [], [], []
|
63 |
+
for col, detection_idx in enumerate(detection_indices):
|
64 |
+
if col not in col_indices:
|
65 |
+
unmatched_detections.append(detection_idx)
|
66 |
+
for row, track_idx in enumerate(track_indices):
|
67 |
+
if row not in row_indices:
|
68 |
+
unmatched_tracks.append(track_idx)
|
69 |
+
for row, col in zip(row_indices, col_indices):
|
70 |
+
track_idx = track_indices[row]
|
71 |
+
detection_idx = detection_indices[col]
|
72 |
+
if cost_matrix[row, col] > max_distance:
|
73 |
+
unmatched_tracks.append(track_idx)
|
74 |
+
unmatched_detections.append(detection_idx)
|
75 |
+
else:
|
76 |
+
matches.append((track_idx, detection_idx))
|
77 |
+
return matches, unmatched_tracks, unmatched_detections
|
78 |
+
|
79 |
+
|
80 |
+
def matching_cascade(
|
81 |
+
distance_metric, max_distance, cascade_depth, tracks, detections,
|
82 |
+
track_indices=None, detection_indices=None):
|
83 |
+
"""Run matching cascade.
|
84 |
+
|
85 |
+
Parameters
|
86 |
+
----------
|
87 |
+
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
|
88 |
+
The distance metric is given a list of tracks and detections as well as
|
89 |
+
a list of N track indices and M detection indices. The metric should
|
90 |
+
return the NxM dimensional cost matrix, where element (i, j) is the
|
91 |
+
association cost between the i-th track in the given track indices and
|
92 |
+
the j-th detection in the given detection indices.
|
93 |
+
max_distance : float
|
94 |
+
Gating threshold. Associations with cost larger than this value are
|
95 |
+
disregarded.
|
96 |
+
cascade_depth: int
|
97 |
+
The cascade depth, should be se to the maximum track age.
|
98 |
+
tracks : List[track.Track]
|
99 |
+
A list of predicted tracks at the current time step.
|
100 |
+
detections : List[detection.Detection]
|
101 |
+
A list of detections at the current time step.
|
102 |
+
track_indices : Optional[List[int]]
|
103 |
+
List of track indices that maps rows in `cost_matrix` to tracks in
|
104 |
+
`tracks` (see description above). Defaults to all tracks.
|
105 |
+
detection_indices : Optional[List[int]]
|
106 |
+
List of detection indices that maps columns in `cost_matrix` to
|
107 |
+
detections in `detections` (see description above). Defaults to all
|
108 |
+
detections.
|
109 |
+
|
110 |
+
Returns
|
111 |
+
-------
|
112 |
+
(List[(int, int)], List[int], List[int])
|
113 |
+
Returns a tuple with the following three entries:
|
114 |
+
* A list of matched track and detection indices.
|
115 |
+
* A list of unmatched track indices.
|
116 |
+
* A list of unmatched detection indices.
|
117 |
+
|
118 |
+
"""
|
119 |
+
if track_indices is None:
|
120 |
+
track_indices = list(range(len(tracks)))
|
121 |
+
if detection_indices is None:
|
122 |
+
detection_indices = list(range(len(detections)))
|
123 |
+
|
124 |
+
unmatched_detections = detection_indices
|
125 |
+
matches = []
|
126 |
+
for level in range(cascade_depth):
|
127 |
+
if len(unmatched_detections) == 0: # No detections left
|
128 |
+
break
|
129 |
+
|
130 |
+
track_indices_l = [
|
131 |
+
k for k in track_indices
|
132 |
+
if tracks[k].time_since_update == 1 + level
|
133 |
+
]
|
134 |
+
if len(track_indices_l) == 0: # Nothing to match at this level
|
135 |
+
continue
|
136 |
+
|
137 |
+
matches_l, _, unmatched_detections = \
|
138 |
+
min_cost_matching(
|
139 |
+
distance_metric, max_distance, tracks, detections,
|
140 |
+
track_indices_l, unmatched_detections)
|
141 |
+
matches += matches_l
|
142 |
+
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
|
143 |
+
return matches, unmatched_tracks, unmatched_detections
|
144 |
+
|
145 |
+
|
146 |
+
def gate_cost_matrix(
|
147 |
+
kf, cost_matrix, tracks, detections, track_indices, detection_indices,
|
148 |
+
gated_cost=INFTY_COST, only_position=False):
|
149 |
+
"""Invalidate infeasible entries in cost matrix based on the state
|
150 |
+
distributions obtained by Kalman filtering.
|
151 |
+
|
152 |
+
Parameters
|
153 |
+
----------
|
154 |
+
kf : The Kalman filter.
|
155 |
+
cost_matrix : ndarray
|
156 |
+
The NxM dimensional cost matrix, where N is the number of track indices
|
157 |
+
and M is the number of detection indices, such that entry (i, j) is the
|
158 |
+
association cost between `tracks[track_indices[i]]` and
|
159 |
+
`detections[detection_indices[j]]`.
|
160 |
+
tracks : List[track.Track]
|
161 |
+
A list of predicted tracks at the current time step.
|
162 |
+
detections : List[detection.Detection]
|
163 |
+
A list of detections at the current time step.
|
164 |
+
track_indices : List[int]
|
165 |
+
List of track indices that maps rows in `cost_matrix` to tracks in
|
166 |
+
`tracks` (see description above).
|
167 |
+
detection_indices : List[int]
|
168 |
+
List of detection indices that maps columns in `cost_matrix` to
|
169 |
+
detections in `detections` (see description above).
|
170 |
+
gated_cost : Optional[float]
|
171 |
+
Entries in the cost matrix corresponding to infeasible associations are
|
172 |
+
set this value. Defaults to a very large value.
|
173 |
+
only_position : Optional[bool]
|
174 |
+
If True, only the x, y position of the state distribution is considered
|
175 |
+
during gating. Defaults to False.
|
176 |
+
|
177 |
+
Returns
|
178 |
+
-------
|
179 |
+
ndarray
|
180 |
+
Returns the modified cost matrix.
|
181 |
+
|
182 |
+
"""
|
183 |
+
gating_dim = 2 if only_position else 4
|
184 |
+
gating_threshold = kalman_filter.chi2inv95[gating_dim]
|
185 |
+
measurements = np.asarray(
|
186 |
+
[detections[i].to_xyah() for i in detection_indices])
|
187 |
+
for row, track_idx in enumerate(track_indices):
|
188 |
+
track = tracks[track_idx]
|
189 |
+
gating_distance = kf.gating_distance(
|
190 |
+
track.mean, track.covariance, measurements, only_position)
|
191 |
+
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
|
192 |
+
return cost_matrix
|
deep_sort/sort/nn_matching.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
def _pdist(a, b):
|
6 |
+
"""Compute pair-wise squared distance between points in `a` and `b`.
|
7 |
+
|
8 |
+
Parameters
|
9 |
+
----------
|
10 |
+
a : array_like
|
11 |
+
An NxM matrix of N samples of dimensionality M.
|
12 |
+
b : array_like
|
13 |
+
An LxM matrix of L samples of dimensionality M.
|
14 |
+
|
15 |
+
Returns
|
16 |
+
-------
|
17 |
+
ndarray
|
18 |
+
Returns a matrix of size len(a), len(b) such that eleement (i, j)
|
19 |
+
contains the squared distance between `a[i]` and `b[j]`.
|
20 |
+
|
21 |
+
"""
|
22 |
+
a, b = np.asarray(a), np.asarray(b)
|
23 |
+
if len(a) == 0 or len(b) == 0:
|
24 |
+
return np.zeros((len(a), len(b)))
|
25 |
+
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
|
26 |
+
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
|
27 |
+
r2 = np.clip(r2, 0., float(np.inf))
|
28 |
+
return r2
|
29 |
+
|
30 |
+
|
31 |
+
def _cosine_distance(a, b, data_is_normalized=False):
|
32 |
+
"""Compute pair-wise cosine distance between points in `a` and `b`.
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
a : array_like
|
37 |
+
An NxM matrix of N samples of dimensionality M.
|
38 |
+
b : array_like
|
39 |
+
An LxM matrix of L samples of dimensionality M.
|
40 |
+
data_is_normalized : Optional[bool]
|
41 |
+
If True, assumes rows in a and b are unit length vectors.
|
42 |
+
Otherwise, a and b are explicitly normalized to lenght 1.
|
43 |
+
|
44 |
+
Returns
|
45 |
+
-------
|
46 |
+
ndarray
|
47 |
+
Returns a matrix of size len(a), len(b) such that eleement (i, j)
|
48 |
+
contains the squared distance between `a[i]` and `b[j]`.
|
49 |
+
|
50 |
+
"""
|
51 |
+
if not data_is_normalized:
|
52 |
+
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
|
53 |
+
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
|
54 |
+
return 1. - np.dot(a, b.T)
|
55 |
+
|
56 |
+
|
57 |
+
def _nn_euclidean_distance(x, y):
|
58 |
+
""" Helper function for nearest neighbor distance metric (Euclidean).
|
59 |
+
|
60 |
+
Parameters
|
61 |
+
----------
|
62 |
+
x : ndarray
|
63 |
+
A matrix of N row-vectors (sample points).
|
64 |
+
y : ndarray
|
65 |
+
A matrix of M row-vectors (query points).
|
66 |
+
|
67 |
+
Returns
|
68 |
+
-------
|
69 |
+
ndarray
|
70 |
+
A vector of length M that contains for each entry in `y` the
|
71 |
+
smallest Euclidean distance to a sample in `x`.
|
72 |
+
|
73 |
+
"""
|
74 |
+
distances = _pdist(x, y)
|
75 |
+
return np.maximum(0.0, distances.min(axis=0))
|
76 |
+
|
77 |
+
|
78 |
+
def _nn_cosine_distance(x, y):
|
79 |
+
""" Helper function for nearest neighbor distance metric (cosine).
|
80 |
+
|
81 |
+
Parameters
|
82 |
+
----------
|
83 |
+
x : ndarray
|
84 |
+
A matrix of N row-vectors (sample points).
|
85 |
+
y : ndarray
|
86 |
+
A matrix of M row-vectors (query points).
|
87 |
+
|
88 |
+
Returns
|
89 |
+
-------
|
90 |
+
ndarray
|
91 |
+
A vector of length M that contains for each entry in `y` the
|
92 |
+
smallest cosine distance to a sample in `x`.
|
93 |
+
|
94 |
+
"""
|
95 |
+
distances = _cosine_distance(x, y)
|
96 |
+
return distances.min(axis=0)
|
97 |
+
|
98 |
+
|
99 |
+
class NearestNeighborDistanceMetric(object):
|
100 |
+
"""
|
101 |
+
A nearest neighbor distance metric that, for each target, returns
|
102 |
+
the closest distance to any sample that has been observed so far.
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
metric : str
|
107 |
+
Either "euclidean" or "cosine".
|
108 |
+
matching_threshold: float
|
109 |
+
The matching threshold. Samples with larger distance are considered an
|
110 |
+
invalid match.
|
111 |
+
budget : Optional[int]
|
112 |
+
If not None, fix samples per class to at most this number. Removes
|
113 |
+
the oldest samples when the budget is reached.
|
114 |
+
|
115 |
+
Attributes
|
116 |
+
----------
|
117 |
+
samples : Dict[int -> List[ndarray]]
|
118 |
+
A dictionary that maps from target identities to the list of samples
|
119 |
+
that have been observed so far.
|
120 |
+
|
121 |
+
"""
|
122 |
+
|
123 |
+
def __init__(self, metric, matching_threshold, budget=None):
|
124 |
+
|
125 |
+
if metric == "euclidean":
|
126 |
+
self._metric = _nn_euclidean_distance
|
127 |
+
elif metric == "cosine":
|
128 |
+
self._metric = _nn_cosine_distance
|
129 |
+
else:
|
130 |
+
raise ValueError(
|
131 |
+
"Invalid metric; must be either 'euclidean' or 'cosine'")
|
132 |
+
self.matching_threshold = matching_threshold
|
133 |
+
self.budget = budget
|
134 |
+
self.samples = {}
|
135 |
+
|
136 |
+
def partial_fit(self, features, targets, active_targets):
|
137 |
+
"""Update the distance metric with new data.
|
138 |
+
|
139 |
+
Parameters
|
140 |
+
----------
|
141 |
+
features : ndarray
|
142 |
+
An NxM matrix of N features of dimensionality M.
|
143 |
+
targets : ndarray
|
144 |
+
An integer array of associated target identities.
|
145 |
+
active_targets : List[int]
|
146 |
+
A list of targets that are currently present in the scene.
|
147 |
+
|
148 |
+
"""
|
149 |
+
for feature, target in zip(features, targets):
|
150 |
+
self.samples.setdefault(target, []).append(feature)
|
151 |
+
if self.budget is not None:
|
152 |
+
self.samples[target] = self.samples[target][-self.budget:]
|
153 |
+
self.samples = {k: self.samples[k] for k in active_targets}
|
154 |
+
|
155 |
+
def distance(self, features, targets):
|
156 |
+
"""Compute distance between features and targets.
|
157 |
+
|
158 |
+
Parameters
|
159 |
+
----------
|
160 |
+
features : ndarray
|
161 |
+
An NxM matrix of N features of dimensionality M.
|
162 |
+
targets : List[int]
|
163 |
+
A list of targets to match the given `features` against.
|
164 |
+
|
165 |
+
Returns
|
166 |
+
-------
|
167 |
+
ndarray
|
168 |
+
Returns a cost matrix of shape len(targets), len(features), where
|
169 |
+
element (i, j) contains the closest squared distance between
|
170 |
+
`targets[i]` and `features[j]`.
|
171 |
+
|
172 |
+
"""
|
173 |
+
cost_matrix = np.zeros((len(targets), len(features)))
|
174 |
+
for i, target in enumerate(targets):
|
175 |
+
cost_matrix[i, :] = self._metric(self.samples[target], features)
|
176 |
+
return cost_matrix
|
deep_sort/sort/preprocessing.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
|
5 |
+
|
6 |
+
def non_max_suppression(boxes, max_bbox_overlap, scores=None):
|
7 |
+
"""Suppress overlapping detections.
|
8 |
+
|
9 |
+
Original code from [1]_ has been adapted to include confidence score.
|
10 |
+
|
11 |
+
.. [1] http://www.pyimagesearch.com/2015/02/16/
|
12 |
+
faster-non-maximum-suppression-python/
|
13 |
+
|
14 |
+
Examples
|
15 |
+
--------
|
16 |
+
|
17 |
+
>>> boxes = [d.roi for d in detections]
|
18 |
+
>>> scores = [d.confidence for d in detections]
|
19 |
+
>>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
|
20 |
+
>>> detections = [detections[i] for i in indices]
|
21 |
+
|
22 |
+
Parameters
|
23 |
+
----------
|
24 |
+
boxes : ndarray
|
25 |
+
Array of ROIs (x, y, width, height).
|
26 |
+
max_bbox_overlap : float
|
27 |
+
ROIs that overlap more than this values are suppressed.
|
28 |
+
scores : Optional[array_like]
|
29 |
+
Detector confidence score.
|
30 |
+
|
31 |
+
Returns
|
32 |
+
-------
|
33 |
+
List[int]
|
34 |
+
Returns indices of detections that have survived non-maxima suppression.
|
35 |
+
|
36 |
+
"""
|
37 |
+
if len(boxes) == 0:
|
38 |
+
return []
|
39 |
+
|
40 |
+
boxes = boxes.astype(np.float32)
|
41 |
+
pick = []
|
42 |
+
|
43 |
+
x1 = boxes[:, 0]
|
44 |
+
y1 = boxes[:, 1]
|
45 |
+
x2 = boxes[:, 2] + boxes[:, 0]
|
46 |
+
y2 = boxes[:, 3] + boxes[:, 1]
|
47 |
+
|
48 |
+
area = (x2 - x1 + 1) * (y2 - y1 + 1)
|
49 |
+
if scores is not None:
|
50 |
+
idxs = np.argsort(scores)
|
51 |
+
else:
|
52 |
+
idxs = np.argsort(y2)
|
53 |
+
|
54 |
+
while len(idxs) > 0:
|
55 |
+
last = len(idxs) - 1
|
56 |
+
i = idxs[last]
|
57 |
+
pick.append(i)
|
58 |
+
|
59 |
+
xx1 = np.maximum(x1[i], x1[idxs[:last]])
|
60 |
+
yy1 = np.maximum(y1[i], y1[idxs[:last]])
|
61 |
+
xx2 = np.minimum(x2[i], x2[idxs[:last]])
|
62 |
+
yy2 = np.minimum(y2[i], y2[idxs[:last]])
|
63 |
+
|
64 |
+
w = np.maximum(0, xx2 - xx1 + 1)
|
65 |
+
h = np.maximum(0, yy2 - yy1 + 1)
|
66 |
+
|
67 |
+
overlap = (w * h) / (area[idxs[:last]] + area[idxs[last]] - w * h)
|
68 |
+
|
69 |
+
idxs = np.delete(
|
70 |
+
idxs, np.concatenate(
|
71 |
+
([last], np.where(overlap > max_bbox_overlap)[0])))
|
72 |
+
|
73 |
+
return pick
|
deep_sort/sort/track.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
|
3 |
+
|
4 |
+
class TrackState:
|
5 |
+
"""
|
6 |
+
Enumeration type for the single target track state. Newly created tracks are
|
7 |
+
classified as `tentative` until enough evidence has been collected. Then,
|
8 |
+
the track state is changed to `confirmed`. Tracks that are no longer alive
|
9 |
+
are classified as `deleted` to mark them for removal from the set of active
|
10 |
+
tracks.
|
11 |
+
|
12 |
+
"""
|
13 |
+
|
14 |
+
Tentative = 1
|
15 |
+
Confirmed = 2
|
16 |
+
Deleted = 3
|
17 |
+
|
18 |
+
|
19 |
+
class Track:
|
20 |
+
"""
|
21 |
+
A single target track with state space `(x, y, a, h)` and associated
|
22 |
+
velocities, where `(x, y)` is the center of the bounding box, `a` is the
|
23 |
+
aspect ratio and `h` is the height.
|
24 |
+
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
mean : ndarray
|
28 |
+
Mean vector of the initial state distribution.
|
29 |
+
covariance : ndarray
|
30 |
+
Covariance matrix of the initial state distribution.
|
31 |
+
track_id : int
|
32 |
+
A unique track identifier.
|
33 |
+
n_init : int
|
34 |
+
Number of consecutive detections before the track is confirmed. The
|
35 |
+
track state is set to `Deleted` if a miss occurs within the first
|
36 |
+
`n_init` frames.
|
37 |
+
max_age : int
|
38 |
+
The maximum number of consecutive misses before the track state is
|
39 |
+
set to `Deleted`.
|
40 |
+
feature : Optional[ndarray]
|
41 |
+
Feature vector of the detection this track originates from. If not None,
|
42 |
+
this feature is added to the `features` cache.
|
43 |
+
|
44 |
+
Attributes
|
45 |
+
----------
|
46 |
+
mean : ndarray
|
47 |
+
Mean vector of the initial state distribution.
|
48 |
+
covariance : ndarray
|
49 |
+
Covariance matrix of the initial state distribution.
|
50 |
+
track_id : int
|
51 |
+
A unique track identifier.
|
52 |
+
hits : int
|
53 |
+
Total number of measurement updates.
|
54 |
+
age : int
|
55 |
+
Total number of frames since first occurance.
|
56 |
+
time_since_update : int
|
57 |
+
Total number of frames since last measurement update.
|
58 |
+
state : TrackState
|
59 |
+
The current track state.
|
60 |
+
features : List[ndarray]
|
61 |
+
A cache of features. On each measurement update, the associated feature
|
62 |
+
vector is added to this list.
|
63 |
+
|
64 |
+
"""
|
65 |
+
|
66 |
+
def __init__(self, mean, covariance, track_id, n_init, max_age,
|
67 |
+
feature=None, cls=None, mask=None):
|
68 |
+
self.mean = mean
|
69 |
+
self.covariance = covariance
|
70 |
+
self.track_id = track_id
|
71 |
+
self.hits = 1
|
72 |
+
self.age = 1
|
73 |
+
self.time_since_update = 0
|
74 |
+
|
75 |
+
self.state = TrackState.Tentative
|
76 |
+
self.cls = cls
|
77 |
+
self.mask = mask
|
78 |
+
self.features = []
|
79 |
+
if feature is not None:
|
80 |
+
self.features.append(feature)
|
81 |
+
|
82 |
+
self._n_init = n_init
|
83 |
+
self._max_age = max_age
|
84 |
+
|
85 |
+
def to_tlwh(self):
|
86 |
+
"""Get current position in bounding box format `(top left x, top left y,
|
87 |
+
width, height)`.
|
88 |
+
|
89 |
+
Returns
|
90 |
+
-------
|
91 |
+
ndarray
|
92 |
+
The bounding box.
|
93 |
+
|
94 |
+
"""
|
95 |
+
ret = self.mean[:4].copy()
|
96 |
+
ret[2] *= ret[3]
|
97 |
+
ret[:2] -= ret[2:] / 2
|
98 |
+
return ret
|
99 |
+
|
100 |
+
def to_tlbr(self):
|
101 |
+
"""Get current position in bounding box format `(min x, miny, max x,
|
102 |
+
max y)`.
|
103 |
+
|
104 |
+
Returns
|
105 |
+
-------
|
106 |
+
ndarray
|
107 |
+
The bounding box.
|
108 |
+
|
109 |
+
"""
|
110 |
+
ret = self.to_tlwh()
|
111 |
+
ret[2:] = ret[:2] + ret[2:]
|
112 |
+
return ret
|
113 |
+
|
114 |
+
def predict(self, kf):
|
115 |
+
"""Propagate the state distribution to the current time step using a
|
116 |
+
Kalman filter prediction step.
|
117 |
+
|
118 |
+
Parameters
|
119 |
+
----------
|
120 |
+
kf : kalman_filter.KalmanFilter
|
121 |
+
The Kalman filter.
|
122 |
+
|
123 |
+
"""
|
124 |
+
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
|
125 |
+
self.age += 1
|
126 |
+
self.time_since_update += 1
|
127 |
+
|
128 |
+
def update(self, kf, detection):
|
129 |
+
"""Perform Kalman filter measurement update step and update the feature
|
130 |
+
cache.
|
131 |
+
|
132 |
+
Parameters
|
133 |
+
----------
|
134 |
+
kf : kalman_filter.KalmanFilter
|
135 |
+
The Kalman filter.
|
136 |
+
detection : Detection
|
137 |
+
The associated detection.
|
138 |
+
|
139 |
+
"""
|
140 |
+
self.mask = detection.mask
|
141 |
+
self.mean, self.covariance = kf.update(
|
142 |
+
self.mean, self.covariance, detection.to_xyah())
|
143 |
+
self.features.append(detection.feature)
|
144 |
+
|
145 |
+
self.hits += 1
|
146 |
+
self.time_since_update = 0
|
147 |
+
if self.state == TrackState.Tentative and self.hits >= self._n_init:
|
148 |
+
self.state = TrackState.Confirmed
|
149 |
+
|
150 |
+
def mark_missed(self):
|
151 |
+
"""Mark this track as missed (no association at the current time step).
|
152 |
+
"""
|
153 |
+
if self.state == TrackState.Tentative:
|
154 |
+
self.state = TrackState.Deleted
|
155 |
+
elif self.time_since_update > self._max_age:
|
156 |
+
self.state = TrackState.Deleted
|
157 |
+
|
158 |
+
def is_tentative(self):
|
159 |
+
"""Returns True if this track is tentative (unconfirmed).
|
160 |
+
"""
|
161 |
+
return self.state == TrackState.Tentative
|
162 |
+
|
163 |
+
def is_confirmed(self):
|
164 |
+
"""Returns True if this track is confirmed."""
|
165 |
+
return self.state == TrackState.Confirmed
|
166 |
+
|
167 |
+
def is_deleted(self):
|
168 |
+
"""Returns True if this track is dead and should be deleted."""
|
169 |
+
return self.state == TrackState.Deleted
|
deep_sort/sort/tracker.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vim: expandtab:ts=4:sw=4
|
2 |
+
from __future__ import absolute_import
|
3 |
+
import numpy as np
|
4 |
+
from . import kalman_filter
|
5 |
+
from . import linear_assignment
|
6 |
+
from . import iou_matching
|
7 |
+
from .track import Track
|
8 |
+
|
9 |
+
|
10 |
+
class Tracker:
|
11 |
+
"""
|
12 |
+
This is the multi-target tracker.
|
13 |
+
|
14 |
+
Parameters
|
15 |
+
----------
|
16 |
+
metric : nn_matching.NearestNeighborDistanceMetric
|
17 |
+
A distance metric for measurement-to-track association.
|
18 |
+
max_age : int
|
19 |
+
Maximum number of missed misses before a track is deleted.
|
20 |
+
n_init : int
|
21 |
+
Number of consecutive detections before the track is confirmed. The
|
22 |
+
track state is set to `Deleted` if a miss occurs within the first
|
23 |
+
`n_init` frames.
|
24 |
+
|
25 |
+
Attributes
|
26 |
+
----------
|
27 |
+
metric : nn_matching.NearestNeighborDistanceMetric
|
28 |
+
The distance metric used for measurement to track association.
|
29 |
+
max_age : int
|
30 |
+
Maximum number of missed misses before a track is deleted.
|
31 |
+
n_init : int
|
32 |
+
Number of frames that a track remains in initialization phase.
|
33 |
+
kf : kalman_filter.KalmanFilter
|
34 |
+
A Kalman filter to filter target trajectories in image space.
|
35 |
+
tracks : List[Track]
|
36 |
+
The list of active tracks at the current time step.
|
37 |
+
|
38 |
+
"""
|
39 |
+
|
40 |
+
def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):
|
41 |
+
self.metric = metric
|
42 |
+
self.max_iou_distance = max_iou_distance
|
43 |
+
self.max_age = max_age
|
44 |
+
self.n_init = n_init
|
45 |
+
|
46 |
+
self.kf = kalman_filter.KalmanFilter()
|
47 |
+
self.tracks = []
|
48 |
+
self._next_id = 1
|
49 |
+
|
50 |
+
def predict(self):
|
51 |
+
"""Propagate track state distributions one time step forward.
|
52 |
+
|
53 |
+
This function should be called once every time step, before `update`.
|
54 |
+
"""
|
55 |
+
for track in self.tracks:
|
56 |
+
track.predict(self.kf)
|
57 |
+
|
58 |
+
def update(self, detections):
|
59 |
+
"""Perform measurement update and track management.
|
60 |
+
|
61 |
+
Parameters
|
62 |
+
----------
|
63 |
+
detections : List[deep_sort.detection.Detection]
|
64 |
+
A list of detections at the current time step.
|
65 |
+
|
66 |
+
"""
|
67 |
+
# Run matching cascade.
|
68 |
+
matches, unmatched_tracks, unmatched_detections = \
|
69 |
+
self._match(detections)
|
70 |
+
|
71 |
+
# Update track set.
|
72 |
+
for track_idx, detection_idx in matches:
|
73 |
+
self.tracks[track_idx].update(
|
74 |
+
self.kf, detections[detection_idx])
|
75 |
+
for track_idx in unmatched_tracks:
|
76 |
+
self.tracks[track_idx].mark_missed()
|
77 |
+
for detection_idx in unmatched_detections:
|
78 |
+
self._initiate_track(detections[detection_idx])
|
79 |
+
self.tracks = [t for t in self.tracks if not t.is_deleted()]
|
80 |
+
|
81 |
+
# Update distance metric.
|
82 |
+
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
|
83 |
+
features, targets = [], []
|
84 |
+
for track in self.tracks:
|
85 |
+
if not track.is_confirmed():
|
86 |
+
continue
|
87 |
+
features += track.features
|
88 |
+
targets += [track.track_id for _ in track.features]
|
89 |
+
track.features = []
|
90 |
+
self.metric.partial_fit(
|
91 |
+
np.asarray(features), np.asarray(targets), active_targets)
|
92 |
+
|
93 |
+
def _match(self, detections):
|
94 |
+
|
95 |
+
def gated_metric(tracks, dets, track_indices, detection_indices):
|
96 |
+
features = np.array([dets[i].feature for i in detection_indices])
|
97 |
+
targets = np.array([tracks[i].track_id for i in track_indices])
|
98 |
+
cost_matrix = self.metric.distance(features, targets)
|
99 |
+
cost_matrix = linear_assignment.gate_cost_matrix(
|
100 |
+
self.kf, cost_matrix, tracks, dets, track_indices,
|
101 |
+
detection_indices)
|
102 |
+
|
103 |
+
return cost_matrix
|
104 |
+
|
105 |
+
# Split track set into confirmed and unconfirmed tracks.
|
106 |
+
confirmed_tracks = [
|
107 |
+
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
|
108 |
+
unconfirmed_tracks = [
|
109 |
+
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
|
110 |
+
|
111 |
+
# Associate confirmed tracks using appearance features.
|
112 |
+
matches_a, unmatched_tracks_a, unmatched_detections = \
|
113 |
+
linear_assignment.matching_cascade(
|
114 |
+
gated_metric, self.metric.matching_threshold, self.max_age,
|
115 |
+
self.tracks, detections, confirmed_tracks)
|
116 |
+
|
117 |
+
# Associate remaining tracks together with unconfirmed tracks using IOU.
|
118 |
+
iou_track_candidates = unconfirmed_tracks + [
|
119 |
+
k for k in unmatched_tracks_a if
|
120 |
+
self.tracks[k].time_since_update == 1]
|
121 |
+
unmatched_tracks_a = [
|
122 |
+
k for k in unmatched_tracks_a if
|
123 |
+
self.tracks[k].time_since_update != 1]
|
124 |
+
matches_b, unmatched_tracks_b, unmatched_detections = \
|
125 |
+
linear_assignment.min_cost_matching(
|
126 |
+
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
|
127 |
+
detections, iou_track_candidates, unmatched_detections)
|
128 |
+
|
129 |
+
matches = matches_a + matches_b
|
130 |
+
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
|
131 |
+
return matches, unmatched_tracks, unmatched_detections
|
132 |
+
|
133 |
+
def _initiate_track(self, detection):
|
134 |
+
mean, covariance = self.kf.initiate(detection.to_xyah())
|
135 |
+
self.tracks.append(Track(
|
136 |
+
mean, covariance, self._next_id, self.n_init, self.max_age,
|
137 |
+
detection.feature, detection.cls, detection.mask))
|
138 |
+
self._next_id += 1
|
deepsort.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import time
|
4 |
+
import argparse
|
5 |
+
import torch
|
6 |
+
import warnings
|
7 |
+
import json
|
8 |
+
import sys
|
9 |
+
|
10 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), 'thirdparty/fast-reid'))
|
11 |
+
|
12 |
+
from detector import build_detector
|
13 |
+
from deep_sort import build_tracker
|
14 |
+
from utils.draw import draw_boxes
|
15 |
+
from utils.parser import get_config
|
16 |
+
from utils.log import get_logger
|
17 |
+
from utils.io import write_results
|
18 |
+
|
19 |
+
|
20 |
+
class VideoTracker(object):
|
21 |
+
def __init__(self, cfg, args, video_path):
|
22 |
+
self.cfg = cfg
|
23 |
+
self.args = args
|
24 |
+
self.video_path = video_path
|
25 |
+
self.logger = get_logger("root")
|
26 |
+
|
27 |
+
use_cuda = args.use_cuda and torch.cuda.is_available()
|
28 |
+
if not use_cuda:
|
29 |
+
warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)
|
30 |
+
|
31 |
+
if args.display:
|
32 |
+
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
|
33 |
+
cv2.resizeWindow("test", args.display_width, args.display_height)
|
34 |
+
|
35 |
+
if args.cam != -1:
|
36 |
+
print("Using webcam " + str(args.cam))
|
37 |
+
self.vdo = cv2.VideoCapture(args.cam)
|
38 |
+
else:
|
39 |
+
self.vdo = cv2.VideoCapture()
|
40 |
+
self.detector = build_detector(cfg, use_cuda=use_cuda, segment=self.args.segment)
|
41 |
+
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
|
42 |
+
self.class_names = self.detector.class_names
|
43 |
+
|
44 |
+
def __enter__(self):
|
45 |
+
if self.args.cam != -1:
|
46 |
+
ret, frame = self.vdo.read()
|
47 |
+
assert ret, "Error: Camera error"
|
48 |
+
self.im_width = frame.shape[0]
|
49 |
+
self.im_height = frame.shape[1]
|
50 |
+
|
51 |
+
else:
|
52 |
+
assert os.path.isfile(self.video_path), "Path error"
|
53 |
+
self.vdo.open(self.video_path)
|
54 |
+
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
|
55 |
+
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
56 |
+
assert self.vdo.isOpened()
|
57 |
+
|
58 |
+
if self.args.save_path:
|
59 |
+
os.makedirs(self.args.save_path, exist_ok=True)
|
60 |
+
# TODO save masks
|
61 |
+
|
62 |
+
# path of saved video and results
|
63 |
+
self.save_video_path = os.path.join(self.args.save_path, "results.avi")
|
64 |
+
self.save_results_path = os.path.join(self.args.save_path, "results.txt")
|
65 |
+
|
66 |
+
# create video writer
|
67 |
+
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
|
68 |
+
self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))
|
69 |
+
|
70 |
+
# logging
|
71 |
+
self.logger.info("Save results to {}".format(self.args.save_path))
|
72 |
+
|
73 |
+
return self
|
74 |
+
|
75 |
+
def __exit__(self, exc_type, exc_value, exc_traceback):
|
76 |
+
if exc_type:
|
77 |
+
print(exc_type, exc_value, exc_traceback)
|
78 |
+
|
79 |
+
def run(self):
|
80 |
+
results = []
|
81 |
+
idx_frame = 0
|
82 |
+
with open('coco_classes.json', 'r') as f:
|
83 |
+
idx_to_class = json.load(f)
|
84 |
+
while self.vdo.grab():
|
85 |
+
idx_frame += 1
|
86 |
+
if idx_frame % self.args.frame_interval:
|
87 |
+
continue
|
88 |
+
|
89 |
+
start = time.time()
|
90 |
+
_, ori_im = self.vdo.retrieve()
|
91 |
+
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
|
92 |
+
|
93 |
+
# do detection
|
94 |
+
if self.args.segment:
|
95 |
+
bbox_xywh, cls_conf, cls_ids, seg_masks = self.detector(im)
|
96 |
+
else:
|
97 |
+
bbox_xywh, cls_conf, cls_ids = self.detector(im)
|
98 |
+
|
99 |
+
# select person class
|
100 |
+
mask = cls_ids == 0
|
101 |
+
|
102 |
+
bbox_xywh = bbox_xywh[mask]
|
103 |
+
# bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector
|
104 |
+
bbox_xywh[:, 2:] *= 1.2
|
105 |
+
cls_conf = cls_conf[mask]
|
106 |
+
cls_ids = cls_ids[mask]
|
107 |
+
|
108 |
+
# do tracking
|
109 |
+
if self.args.segment:
|
110 |
+
seg_masks = seg_masks[mask]
|
111 |
+
outputs, mask_outputs = self.deepsort.update(bbox_xywh, cls_conf, cls_ids, im, seg_masks)
|
112 |
+
else:
|
113 |
+
outputs, _ = self.deepsort.update(bbox_xywh, cls_conf, cls_ids, im)
|
114 |
+
|
115 |
+
# draw boxes for visualization
|
116 |
+
if len(outputs) > 0:
|
117 |
+
bbox_tlwh = []
|
118 |
+
bbox_xyxy = outputs[:, :4]
|
119 |
+
identities = outputs[:, -1]
|
120 |
+
cls = outputs[:, -2]
|
121 |
+
names = [idx_to_class[str(label)] for label in cls]
|
122 |
+
|
123 |
+
ori_im = draw_boxes(ori_im, bbox_xyxy, names, identities, None if not self.args.segment else mask_outputs)
|
124 |
+
|
125 |
+
for bb_xyxy in bbox_xyxy:
|
126 |
+
bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))
|
127 |
+
|
128 |
+
results.append((idx_frame - 1, bbox_tlwh, identities, cls))
|
129 |
+
|
130 |
+
end = time.time()
|
131 |
+
|
132 |
+
if self.args.display:
|
133 |
+
cv2.imshow("test", ori_im)
|
134 |
+
cv2.waitKey(1)
|
135 |
+
|
136 |
+
if self.args.save_path:
|
137 |
+
self.writer.write(ori_im)
|
138 |
+
|
139 |
+
# save results
|
140 |
+
write_results(self.save_results_path, results, 'mot')
|
141 |
+
|
142 |
+
# logging
|
143 |
+
self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
|
144 |
+
.format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))
|
145 |
+
|
146 |
+
|
147 |
+
def parse_args():
|
148 |
+
parser = argparse.ArgumentParser()
|
149 |
+
parser.add_argument("--VIDEO_PATH", type=str, default='demo.avi')
|
150 |
+
parser.add_argument("--config_mmdetection", type=str, default="./configs/mmdet.yaml")
|
151 |
+
parser.add_argument("--config_detection", type=str, default="./configs/mask_rcnn.yaml")
|
152 |
+
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
|
153 |
+
parser.add_argument("--config_fastreid", type=str, default="./configs/fastreid.yaml")
|
154 |
+
parser.add_argument("--fastreid", action="store_true")
|
155 |
+
parser.add_argument("--mmdet", action="store_true")
|
156 |
+
parser.add_argument("--segment", action="store_true")
|
157 |
+
# parser.add_argument("--ignore_display", dest="display", action="store_false", default=True)
|
158 |
+
parser.add_argument("--display", action="store_true")
|
159 |
+
parser.add_argument("--frame_interval", type=int, default=1)
|
160 |
+
parser.add_argument("--display_width", type=int, default=800)
|
161 |
+
parser.add_argument("--display_height", type=int, default=600)
|
162 |
+
parser.add_argument("--save_path", type=str, default="./output/")
|
163 |
+
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
|
164 |
+
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
|
165 |
+
return parser.parse_args()
|
166 |
+
|
167 |
+
|
168 |
+
if __name__ == "__main__":
|
169 |
+
args = parse_args()
|
170 |
+
cfg = get_config()
|
171 |
+
if args.segment:
|
172 |
+
cfg.USE_SEGMENT = True
|
173 |
+
else:
|
174 |
+
cfg.USE_SEGMENT = False
|
175 |
+
if args.mmdet:
|
176 |
+
cfg.merge_from_file(args.config_mmdetection)
|
177 |
+
cfg.USE_MMDET = True
|
178 |
+
else:
|
179 |
+
cfg.merge_from_file(args.config_detection)
|
180 |
+
cfg.USE_MMDET = False
|
181 |
+
cfg.merge_from_file(args.config_deepsort)
|
182 |
+
if args.fastreid:
|
183 |
+
cfg.merge_from_file(args.config_fastreid)
|
184 |
+
cfg.USE_FASTREID = True
|
185 |
+
else:
|
186 |
+
cfg.USE_FASTREID = False
|
187 |
+
|
188 |
+
with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
|
189 |
+
vdo_trk.run()
|
deepsort_new.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified deepsort.py — Target ID recovery with IOU threshold and smooth tracking
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import time
|
5 |
+
import argparse
|
6 |
+
import torch
|
7 |
+
import warnings
|
8 |
+
import json
|
9 |
+
import sys
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), 'thirdparty/fast-reid'))
|
13 |
+
|
14 |
+
from detector import build_detector
|
15 |
+
from deep_sort import build_tracker
|
16 |
+
from utils.draw import draw_boxes
|
17 |
+
from utils.parser import get_config
|
18 |
+
from utils.log import get_logger
|
19 |
+
from utils.io import write_results
|
20 |
+
|
21 |
+
|
22 |
+
def compute_iou(box1, box2):
|
23 |
+
if box1 is None or box2 is None:
|
24 |
+
return 0.0
|
25 |
+
xi1, yi1 = max(box1[0], box2[0]), max(box1[1], box2[1])
|
26 |
+
xi2, yi2 = min(box1[2], box2[2]), min(box1[3], box2[3])
|
27 |
+
inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1)
|
28 |
+
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
29 |
+
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
30 |
+
union = box1_area + box2_area - inter_area
|
31 |
+
return inter_area / union if union > 0 else 0.0
|
32 |
+
|
33 |
+
def get_best_iou_track(outputs, target_bbox, return_iou=False):
|
34 |
+
if target_bbox is None:
|
35 |
+
return (None, 0.0) if return_iou else None
|
36 |
+
best_iou = 0
|
37 |
+
best_id = None
|
38 |
+
for det in outputs:
|
39 |
+
x1, y1, x2, y2 = det[:4]
|
40 |
+
track_id = int(det[-1])
|
41 |
+
iou = compute_iou([x1, y1, x2, y2], target_bbox)
|
42 |
+
if iou > best_iou:
|
43 |
+
best_iou = iou
|
44 |
+
best_id = track_id
|
45 |
+
if return_iou:
|
46 |
+
return best_id, best_iou
|
47 |
+
return best_id
|
48 |
+
|
49 |
+
class VideoTracker:
|
50 |
+
def __init__(self, cfg, args, video_path):
|
51 |
+
self.cfg = cfg
|
52 |
+
self.args = args
|
53 |
+
self.video_path = video_path
|
54 |
+
self.logger = get_logger("root")
|
55 |
+
self.first_frame_flag = True
|
56 |
+
self.target_id = None
|
57 |
+
self.last_known_bbox = None
|
58 |
+
|
59 |
+
use_cuda = args.use_cuda and torch.cuda.is_available()
|
60 |
+
if not use_cuda:
|
61 |
+
warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)
|
62 |
+
|
63 |
+
if args.display:
|
64 |
+
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
|
65 |
+
cv2.resizeWindow("test", args.display_width, args.display_height)
|
66 |
+
|
67 |
+
if args.cam != -1:
|
68 |
+
self.vdo = cv2.VideoCapture(args.cam)
|
69 |
+
else:
|
70 |
+
self.vdo = cv2.VideoCapture(video_path)
|
71 |
+
|
72 |
+
self.detector = build_detector(cfg, use_cuda=use_cuda, segment=args.segment)
|
73 |
+
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
|
74 |
+
|
75 |
+
def run(self):
|
76 |
+
results = []
|
77 |
+
idx_frame = 0
|
78 |
+
with open('coco_classes.json', 'r') as f:
|
79 |
+
idx_to_class = json.load(f)
|
80 |
+
|
81 |
+
if not self.vdo.isOpened():
|
82 |
+
raise IOError("Failed to open video")
|
83 |
+
|
84 |
+
im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
|
85 |
+
im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
86 |
+
|
87 |
+
if self.args.save_path:
|
88 |
+
os.makedirs(self.args.save_path, exist_ok=True)
|
89 |
+
self.writer = cv2.VideoWriter(
|
90 |
+
os.path.join(self.args.save_path, "results.avi"),
|
91 |
+
cv2.VideoWriter_fourcc(*'MJPG'),
|
92 |
+
20, (im_width, im_height))
|
93 |
+
|
94 |
+
while self.vdo.grab():
|
95 |
+
idx_frame += 1
|
96 |
+
if idx_frame % self.args.frame_interval:
|
97 |
+
continue
|
98 |
+
|
99 |
+
_, ori_im = self.vdo.retrieve()
|
100 |
+
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
|
101 |
+
|
102 |
+
if self.args.segment:
|
103 |
+
bbox_xywh, cls_conf, cls_ids, seg_masks = self.detector(im)
|
104 |
+
else:
|
105 |
+
bbox_xywh, cls_conf, cls_ids = self.detector(im)
|
106 |
+
|
107 |
+
mask = cls_ids == 0 # person class
|
108 |
+
bbox_xywh = bbox_xywh[mask]
|
109 |
+
cls_conf = cls_conf[mask]
|
110 |
+
cls_ids = cls_ids[mask]
|
111 |
+
if bbox_xywh.shape[0] == 0:
|
112 |
+
continue
|
113 |
+
|
114 |
+
bbox_xywh[:, 2:] *= 1.2
|
115 |
+
|
116 |
+
if self.args.segment:
|
117 |
+
seg_masks = seg_masks[mask]
|
118 |
+
outputs, mask_outputs = self.deepsort.update(bbox_xywh, cls_conf, cls_ids, im, seg_masks)
|
119 |
+
else:
|
120 |
+
outputs, _ = self.deepsort.update(bbox_xywh, cls_conf, cls_ids, im)
|
121 |
+
|
122 |
+
if self.first_frame_flag and len(outputs) > 0:
|
123 |
+
cv2.imshow("Select target", ori_im)
|
124 |
+
cv2.waitKey(1)
|
125 |
+
target_roi = cv2.selectROI("Select target", ori_im, False, False)
|
126 |
+
cv2.destroyWindow("Select target")
|
127 |
+
target_bbox = [target_roi[0], target_roi[1], target_roi[0] + target_roi[2], target_roi[1] + target_roi[3]]
|
128 |
+
self.target_id = get_best_iou_track(outputs, target_bbox)
|
129 |
+
self.last_known_bbox = target_bbox
|
130 |
+
print(f"[INFO] Selected target ID: {self.target_id}")
|
131 |
+
self.first_frame_flag = False
|
132 |
+
continue
|
133 |
+
|
134 |
+
bbox_tlwh = []
|
135 |
+
filtered_outputs = []
|
136 |
+
for det in outputs:
|
137 |
+
if int(det[-1]) == self.target_id:
|
138 |
+
filtered_outputs.append(det)
|
139 |
+
self.last_known_bbox = det[:4]
|
140 |
+
|
141 |
+
if len(filtered_outputs) == 0 and self.last_known_bbox is not None:
|
142 |
+
new_id, best_iou = get_best_iou_track(outputs, self.last_known_bbox, return_iou=True)
|
143 |
+
if best_iou > 0.4:
|
144 |
+
self.target_id = new_id
|
145 |
+
print(f"[INFO] Target temporarily lost. Reassigned to ID {self.target_id} (IOU={best_iou:.2f})")
|
146 |
+
for det in outputs:
|
147 |
+
if int(det[-1]) == self.target_id:
|
148 |
+
filtered_outputs.append(det)
|
149 |
+
self.last_known_bbox = det[:4]
|
150 |
+
else:
|
151 |
+
print("[INFO] IOU too low to reassign. Skipping reassignment.")
|
152 |
+
|
153 |
+
if len(filtered_outputs) > 0:
|
154 |
+
def box_center(box):
|
155 |
+
return np.array([(box[0] + box[2]) / 2, (box[1] + box[3]) / 2])
|
156 |
+
|
157 |
+
smoothed_outputs = []
|
158 |
+
for det in filtered_outputs:
|
159 |
+
if self.last_known_bbox is None:
|
160 |
+
smoothed_outputs.append(det)
|
161 |
+
continue
|
162 |
+
dist = np.linalg.norm(box_center(det[:4]) - box_center(self.last_known_bbox))
|
163 |
+
if dist < 300:
|
164 |
+
smoothed_outputs.append(det)
|
165 |
+
else:
|
166 |
+
print(f"[INFO] Skipped jumpy box with dist={dist:.2f}")
|
167 |
+
|
168 |
+
if len(smoothed_outputs) > 0:
|
169 |
+
bbox_xyxy = np.array([det[:4] for det in smoothed_outputs])
|
170 |
+
identities = [int(det[-1]) for det in smoothed_outputs]
|
171 |
+
cls = [int(det[-2]) for det in smoothed_outputs]
|
172 |
+
names = [idx_to_class[str(label)] for label in cls]
|
173 |
+
|
174 |
+
ori_im = draw_boxes(ori_im, bbox_xyxy, names, identities)
|
175 |
+
|
176 |
+
for box in bbox_xyxy:
|
177 |
+
bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(box))
|
178 |
+
|
179 |
+
results.append((idx_frame - 1, bbox_tlwh, identities, cls))
|
180 |
+
|
181 |
+
if self.args.display:
|
182 |
+
cv2.imshow("test", ori_im)
|
183 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
184 |
+
break
|
185 |
+
|
186 |
+
if self.args.save_path:
|
187 |
+
self.writer.write(ori_im)
|
188 |
+
|
189 |
+
if self.args.save_path:
|
190 |
+
write_results(os.path.join(self.args.save_path, "results.txt"), results, 'mot')
|
191 |
+
|
192 |
+
self.vdo.release()
|
193 |
+
if self.args.display:
|
194 |
+
cv2.destroyAllWindows()
|
195 |
+
|
196 |
+
def parse_args():
|
197 |
+
parser = argparse.ArgumentParser()
|
198 |
+
parser.add_argument("--VIDEO_PATH", type=str, default="demo.avi")
|
199 |
+
parser.add_argument("--config_mmdetection", type=str, default="./configs/mmdet.yaml")
|
200 |
+
parser.add_argument("--config_detection", type=str, default="./configs/mask_rcnn.yaml")
|
201 |
+
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
|
202 |
+
parser.add_argument("--config_fastreid", type=str, default="./configs/fastreid.yaml")
|
203 |
+
parser.add_argument("--fastreid", action="store_true")
|
204 |
+
parser.add_argument("--mmdet", action="store_true")
|
205 |
+
parser.add_argument("--segment", action="store_true")
|
206 |
+
parser.add_argument("--display", action="store_true")
|
207 |
+
parser.add_argument("--frame_interval", type=int, default=1)
|
208 |
+
parser.add_argument("--display_width", type=int, default=800)
|
209 |
+
parser.add_argument("--display_height", type=int, default=600)
|
210 |
+
parser.add_argument("--save_path", type=str, default="./output/")
|
211 |
+
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
|
212 |
+
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
|
213 |
+
return parser.parse_args()
|
214 |
+
|
215 |
+
if __name__ == "__main__":
|
216 |
+
args = parse_args()
|
217 |
+
cfg = get_config()
|
218 |
+
|
219 |
+
cfg.USE_SEGMENT = args.segment
|
220 |
+
cfg.USE_MMDET = args.mmdet
|
221 |
+
cfg.USE_FASTREID = args.fastreid
|
222 |
+
|
223 |
+
cfg.merge_from_file(args.config_mmdetection if args.mmdet else args.config_detection)
|
224 |
+
cfg.merge_from_file(args.config_deepsort)
|
225 |
+
if args.fastreid:
|
226 |
+
cfg.merge_from_file(args.config_fastreid)
|
227 |
+
|
228 |
+
tracker = VideoTracker(cfg, args, video_path=args.VIDEO_PATH)
|
229 |
+
tracker.run()
|
demo/1.jpg
ADDED
![]() |
Git LFS Details
|
demo/2.jpg
ADDED
![]() |
Git LFS Details
|
demo/demo.gif
ADDED
![]() |
Git LFS Details
|
demo/demo2.gif
ADDED
![]() |
Git LFS Details
|
detector/MMDet/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .detector import MMDet
|
2 |
+
__all__ = ['MMDet']
|
detector/MMDet/detector.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from mmdet.apis import init_detector, inference_detector
|
6 |
+
from .mmdet_utils import xyxy_to_xywh
|
7 |
+
|
8 |
+
|
9 |
+
class MMDet(object):
|
10 |
+
def __init__(self, cfg_file, checkpoint_file, score_thresh=0.7,
|
11 |
+
is_xywh=False, use_cuda=True):
|
12 |
+
# net definition
|
13 |
+
self.device = "cuda" if use_cuda else "cpu"
|
14 |
+
self.net = init_detector(cfg_file, checkpoint_file, device=self.device)
|
15 |
+
logger = logging.getLogger("root.detector")
|
16 |
+
logger.info('Loading weights from %s... Done!' % (checkpoint_file))
|
17 |
+
|
18 |
+
#constants
|
19 |
+
self.score_thresh = score_thresh
|
20 |
+
self.use_cuda = use_cuda
|
21 |
+
self.is_xywh = is_xywh
|
22 |
+
self.class_names = self.net.CLASSES
|
23 |
+
self.num_classes = len(self.class_names)
|
24 |
+
|
25 |
+
def __call__(self, ori_img):
|
26 |
+
# forward
|
27 |
+
bbox_result = inference_detector(self.net, ori_img)
|
28 |
+
bboxes = np.vstack(bbox_result)
|
29 |
+
|
30 |
+
if len(bboxes) == 0:
|
31 |
+
bbox = np.array([]).reshape([0, 4])
|
32 |
+
cls_conf = np.array([])
|
33 |
+
cls_ids = np.array([])
|
34 |
+
return bbox, cls_conf, cls_ids
|
35 |
+
|
36 |
+
bbox = bboxes[:, :4]
|
37 |
+
cls_conf = bboxes[:, 4]
|
38 |
+
cls_ids = [
|
39 |
+
np.full(bbox.shape[0], i, dtype=np.int32)
|
40 |
+
for i, bbox in enumerate(bbox_result)
|
41 |
+
]
|
42 |
+
cls_ids = np.concatenate(cls_ids)
|
43 |
+
|
44 |
+
selected_idx = cls_conf > self.score_thresh
|
45 |
+
bbox = bbox[selected_idx, :]
|
46 |
+
cls_conf = cls_conf[selected_idx]
|
47 |
+
cls_ids = cls_ids[selected_idx]
|
48 |
+
|
49 |
+
if self.is_xywh:
|
50 |
+
bbox = xyxy_to_xywh(bbox)
|
51 |
+
|
52 |
+
return bbox, cls_conf, cls_ids
|