Spaces:
Paused
Paused
Merge branch 'pipeline-setup' of OperationSavta/SavtaDepth into master
Browse files- .dvc/.gitignore +3 -0
- .dvc/config +5 -0
- .dvc/plots/confusion.json +30 -0
- .dvc/plots/default.json +29 -0
- .dvc/plots/scatter.json +27 -0
- .dvc/plots/smooth.json +39 -0
- .dvcignore +3 -0
- .gitignore +9 -0
- Dockerfile +0 -9
- Makefile +37 -0
- Notebooks/SavtaDepth_Colab.ipynb +0 -0
- Notebooks/SavtaDepth_sanity_check.ipynb +0 -0
- README.md +112 -13
- dvc.lock +71 -0
- dvc.yaml +36 -0
- logs/test_metrics.csv +10 -0
- logs/train_metrics.csv +0 -0
- logs/train_params.yml +25 -0
- requirements.txt +10 -0
- run_dev_env.sh +7 -10
- src/.gitignore +2 -0
- src/code/custom_data_loading.py +51 -0
- src/code/eval.py +52 -0
- src/code/eval_metric_calculation.py +79 -0
- src/code/make_dataset.py +121 -0
- src/code/params.yml +13 -0
- src/code/training.py +44 -0
- src/data/.gitignore +1 -0
- src/data/raw/.gitignore +2 -0
- src/data/raw/nyu_depth_v2_labeled.mat.dvc +9 -0
- src/data/raw/splits.mat.dvc +9 -0
.dvc/.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
/config.local
|
2 |
+
/tmp
|
3 |
+
/cache
|
.dvc/config
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[core]
|
2 |
+
analytics = false
|
3 |
+
remote = origin
|
4 |
+
['remote "origin"']
|
5 |
+
url = https://dagshub.com/OperationSavta/SavtaDepth.dvc
|
.dvc/plots/confusion.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
3 |
+
"data": {
|
4 |
+
"values": "<DVC_METRIC_DATA>"
|
5 |
+
},
|
6 |
+
"title": "<DVC_METRIC_TITLE>",
|
7 |
+
"mark": "rect",
|
8 |
+
"encoding": {
|
9 |
+
"x": {
|
10 |
+
"field": "<DVC_METRIC_X>",
|
11 |
+
"type": "nominal",
|
12 |
+
"sort": "ascending",
|
13 |
+
"title": "<DVC_METRIC_X_LABEL>"
|
14 |
+
},
|
15 |
+
"y": {
|
16 |
+
"field": "<DVC_METRIC_Y>",
|
17 |
+
"type": "nominal",
|
18 |
+
"sort": "ascending",
|
19 |
+
"title": "<DVC_METRIC_Y_LABEL>"
|
20 |
+
},
|
21 |
+
"color": {
|
22 |
+
"aggregate": "count",
|
23 |
+
"type": "quantitative"
|
24 |
+
},
|
25 |
+
"facet": {
|
26 |
+
"field": "rev",
|
27 |
+
"type": "nominal"
|
28 |
+
}
|
29 |
+
}
|
30 |
+
}
|
.dvc/plots/default.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
3 |
+
"data": {
|
4 |
+
"values": "<DVC_METRIC_DATA>"
|
5 |
+
},
|
6 |
+
"title": "<DVC_METRIC_TITLE>",
|
7 |
+
"mark": {
|
8 |
+
"type": "line"
|
9 |
+
},
|
10 |
+
"encoding": {
|
11 |
+
"x": {
|
12 |
+
"field": "<DVC_METRIC_X>",
|
13 |
+
"type": "quantitative",
|
14 |
+
"title": "<DVC_METRIC_X_LABEL>"
|
15 |
+
},
|
16 |
+
"y": {
|
17 |
+
"field": "<DVC_METRIC_Y>",
|
18 |
+
"type": "quantitative",
|
19 |
+
"title": "<DVC_METRIC_Y_LABEL>",
|
20 |
+
"scale": {
|
21 |
+
"zero": false
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"color": {
|
25 |
+
"field": "rev",
|
26 |
+
"type": "nominal"
|
27 |
+
}
|
28 |
+
}
|
29 |
+
}
|
.dvc/plots/scatter.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
3 |
+
"data": {
|
4 |
+
"values": "<DVC_METRIC_DATA>"
|
5 |
+
},
|
6 |
+
"title": "<DVC_METRIC_TITLE>",
|
7 |
+
"mark": "point",
|
8 |
+
"encoding": {
|
9 |
+
"x": {
|
10 |
+
"field": "<DVC_METRIC_X>",
|
11 |
+
"type": "quantitative",
|
12 |
+
"title": "<DVC_METRIC_X_LABEL>"
|
13 |
+
},
|
14 |
+
"y": {
|
15 |
+
"field": "<DVC_METRIC_Y>",
|
16 |
+
"type": "quantitative",
|
17 |
+
"title": "<DVC_METRIC_Y_LABEL>",
|
18 |
+
"scale": {
|
19 |
+
"zero": false
|
20 |
+
}
|
21 |
+
},
|
22 |
+
"color": {
|
23 |
+
"field": "rev",
|
24 |
+
"type": "nominal"
|
25 |
+
}
|
26 |
+
}
|
27 |
+
}
|
.dvc/plots/smooth.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
3 |
+
"data": {
|
4 |
+
"values": "<DVC_METRIC_DATA>"
|
5 |
+
},
|
6 |
+
"title": "<DVC_METRIC_TITLE>",
|
7 |
+
"mark": {
|
8 |
+
"type": "line"
|
9 |
+
},
|
10 |
+
"encoding": {
|
11 |
+
"x": {
|
12 |
+
"field": "<DVC_METRIC_X>",
|
13 |
+
"type": "quantitative",
|
14 |
+
"title": "<DVC_METRIC_X_LABEL>"
|
15 |
+
},
|
16 |
+
"y": {
|
17 |
+
"field": "<DVC_METRIC_Y>",
|
18 |
+
"type": "quantitative",
|
19 |
+
"title": "<DVC_METRIC_Y_LABEL>",
|
20 |
+
"scale": {
|
21 |
+
"zero": false
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"color": {
|
25 |
+
"field": "rev",
|
26 |
+
"type": "nominal"
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"transform": [
|
30 |
+
{
|
31 |
+
"loess": "<DVC_METRIC_Y>",
|
32 |
+
"on": "<DVC_METRIC_X>",
|
33 |
+
"groupby": [
|
34 |
+
"rev"
|
35 |
+
],
|
36 |
+
"bandwidth": 0.3
|
37 |
+
}
|
38 |
+
]
|
39 |
+
}
|
.dvcignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Add patterns of files dvc should ignore, which could improve
|
2 |
+
# the performance. Learn more at
|
3 |
+
# https://dvc.org/doc/user-guide/dvcignore
|
.gitignore
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.vscode/
|
2 |
+
.DS_Store
|
3 |
+
.idea/
|
4 |
+
.ipynb_checkpoints/
|
5 |
+
.workspace/
|
6 |
+
aws/
|
7 |
+
google-cloud-sdk
|
8 |
+
__pycache__/
|
9 |
+
env/
|
Dockerfile
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
FROM pytorch/pytorch
|
2 |
-
|
3 |
-
RUN apt-get update && apt-get install -y software-properties-common && apt-get update
|
4 |
-
RUN add-apt-repository -y ppa:git-core/ppa && apt-get update && apt-get install -y git libglib2.0-dev
|
5 |
-
|
6 |
-
COPY requirements.txt ./
|
7 |
-
RUN pip install -r requirements.txt
|
8 |
-
|
9 |
-
RUN pip install jupyterlab
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Makefile
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#################################################################################
|
2 |
+
# GLOBALS #
|
3 |
+
#################################################################################
|
4 |
+
|
5 |
+
PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
6 |
+
PROJECT_NAME = savta_depth
|
7 |
+
PYTHON_INTERPRETER = python3
|
8 |
+
|
9 |
+
ifeq (,$(shell which conda))
|
10 |
+
HAS_CONDA=False
|
11 |
+
else
|
12 |
+
HAS_CONDA=True
|
13 |
+
endif
|
14 |
+
|
15 |
+
#################################################################################
|
16 |
+
# COMMANDS #
|
17 |
+
#################################################################################
|
18 |
+
|
19 |
+
env:
|
20 |
+
ifeq (True,$(HAS_CONDA))
|
21 |
+
@echo ">>> Detected conda, creating conda environment."
|
22 |
+
conda create -y --name $(PROJECT_NAME) python=3.7.6
|
23 |
+
@echo ">>> New conda env created. Activate with:\nconda activate $(PROJECT_NAME)"
|
24 |
+
else
|
25 |
+
@echo ">>> No conda detected, creating venv environment."
|
26 |
+
$(PYTHON_INTERPRETER) -m venv env
|
27 |
+
@echo ">>> New virtual env created. Activate with:\nsource env/bin/activate ."
|
28 |
+
endif
|
29 |
+
|
30 |
+
load_requirements:
|
31 |
+
@echo ">>> Installing requirements. Make sure your virtual environment is activated."
|
32 |
+
$(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
|
33 |
+
$(PYTHON_INTERPRETER) -m pip install -r requirements.txt
|
34 |
+
|
35 |
+
save_requirements:
|
36 |
+
@echo ">>> Saving requirements."
|
37 |
+
pip list --format=freeze > requirements.txt
|
Notebooks/SavtaDepth_Colab.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Notebooks/SavtaDepth_sanity_check.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
README.md
CHANGED
@@ -4,6 +4,12 @@ Savta Depth is a collaborative *O*pen *S*ource *D*ata *S*cience project for mono
|
|
4 |
Here you will find the code for the project, but also the data, models, pipelines and experiments. This means that the project is easily reproducible on any machine, but also that you can contribute to it as a data scientist.
|
5 |
|
6 |
Have a great idea for how to improve the model? Want to add data and metrics to make it more explainable/fair? We'd love to get your help.
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
## Contributing Guide
|
9 |
Here we'll list things we want to work on in the project as well as ways to start contributing.
|
@@ -11,23 +17,116 @@ If you'd like to take part, please follow the guide.
|
|
11 |
|
12 |
### Setting up your environment to contribute
|
13 |
* To get started, fork the repository on DAGsHub
|
14 |
-
*
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
```bash
|
24 |
-
$
|
25 |
-
$ ./run_dev_env.sh
|
26 |
```
|
27 |
-
|
28 |
-
*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
* Create a Pull Request on DAGsHub!
|
|
|
|
|
|
|
30 |
* 🐶
|
|
|
31 |
### TODO:
|
32 |
- [ ] Web UI
|
33 |
- [ ] Testing various datasets as basis for training
|
|
|
4 |
Here you will find the code for the project, but also the data, models, pipelines and experiments. This means that the project is easily reproducible on any machine, but also that you can contribute to it as a data scientist.
|
5 |
|
6 |
Have a great idea for how to improve the model? Want to add data and metrics to make it more explainable/fair? We'd love to get your help.
|
7 |
+
## Demo
|
8 |
+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1XU4DgQ217_hUMU1dllppeQNw3pTRlHy1?usp=sharing)
|
9 |
+
|
10 |
+
**You can use [this notebook](https://colab.research.google.com/drive/1XU4DgQ217_hUMU1dllppeQNw3pTRlHy1?usp=sharing) to load a model from the project and run it on an image you uploaded, to get the depth map. Once it has been saved, you can download it to use on platforms that support it (e.g. Facebook) to create 3d photos.**
|
11 |
+
|
12 |
+
![](https://i.ibb.co/r20HKpQ/savtadepthex.png)
|
13 |
|
14 |
## Contributing Guide
|
15 |
Here we'll list things we want to work on in the project as well as ways to start contributing.
|
|
|
17 |
|
18 |
### Setting up your environment to contribute
|
19 |
* To get started, fork the repository on DAGsHub
|
20 |
+
* Now, you have 3 way to set up your environment: Google Colab, local or docker. If you're not sure which one to go with, we recommend using Colab.
|
21 |
+
|
22 |
+
#### Google Colab
|
23 |
+
Google Colab can be looked at as your web connected, GPU powered IDE. Below is a link to a well-documented Colab notebook, that will load the code and data from this repository, enabling you to modify the code and re-run training. Notice that you still need to modify the code within the `src/code/` folder, adding cells should be used only for testing things out.
|
24 |
+
|
25 |
+
**You can also use this notebook to load a model from the project and run it on an image you uploaded, to get the depth map. Once it has been saved, you can download it to use on platforms that support it (e.g. Facebook) to create 3d photos.**
|
26 |
+
|
27 |
+
|
28 |
+
In order to edit code files, you must save the notebook to your drive. You can do this by typing `ctrl+s` or `cmd+s` on mac.
|
29 |
+
|
30 |
+
\>\> **[SavtaDepth Colab Environment](https://colab.research.google.com/drive/1XU4DgQ217_hUMU1dllppeQNw3pTRlHy1?usp=sharing)** \<\<
|
31 |
+
|
32 |
+
**_NOTE: The downside of this method (if you are not familiar with Colab) is that Google Colab will limit the amount of time an instance can be live, so you might be limited in your ability to train models for longer periods of time._**
|
33 |
+
|
34 |
+
This notebook is also a part of this project, in case it needs modification, in the `Notebooks` folder. You should not commit your version unless your contribution is an improvement to the environment.
|
35 |
+
|
36 |
+
#### Local
|
37 |
+
* Clone the repository you just forked by typing the following command in your terminal:
|
38 |
+
|
39 |
+
```bash
|
40 |
+
$ git clone https://dagshub.com/<your-dagshub-username>/SavtaDepth.git
|
41 |
+
```
|
42 |
+
|
43 |
+
* Create a virtual environment or Conda environment and activate it
|
44 |
+
```bash
|
45 |
+
# Create the virtual environment
|
46 |
+
$ make env
|
47 |
+
|
48 |
+
# Activate the virtual environment
|
49 |
+
# VENV
|
50 |
+
$ source env/bin/activate .
|
51 |
+
|
52 |
+
# or Conda
|
53 |
+
$ source activate savta_depth
|
54 |
+
```
|
55 |
+
* Install the required libraries
|
56 |
+
```bash
|
57 |
+
$ make load_requirements
|
58 |
+
```
|
59 |
+
**_NOTE: Here I assume a setup without GPU. Otherwise, you might need to modify requirements, which is outside the scope of this readme (feel free to contribute to this)._**
|
60 |
+
* Pull the dvc files to your workspace by typing:
|
61 |
+
|
62 |
+
```bash
|
63 |
+
$ dvc pull -r origin
|
64 |
+
$ dvc checkout #use this to get the data, models etc
|
65 |
+
```
|
66 |
+
|
67 |
+
* After you are finished your modification, make sure to do the following:
|
68 |
+
* If you modified packages, make sure to update the `requirements.txt` file accordingly.
|
69 |
+
|
70 |
+
* Push your code to DAGsHub, and your dvc managed files to your dvc remote. For reference on the commands needed, please refer to the Google Colab notebook section – [Commiting Your Work and Pushing Back to DAGsHub](https://colab.research.google.com/drive/1XU4DgQ217_hUMU1dllppeQNw3pTRlHy1?authuser=1#scrollTo=PAxz-29WhN12&line=1&uniqifier=1).
|
71 |
+
|
72 |
+
#### Docker
|
73 |
+
* Clone the repository you just forked by typing the following command in your terminal:
|
74 |
+
|
75 |
```bash
|
76 |
+
$ git clone https://dagshub.com/<your-dagshub-username>/SavtaDepth.git
|
|
|
77 |
```
|
78 |
+
|
79 |
+
* To get your environment up and running docker is the best way to go. We use an instance of [MLWorkspace](https://github.com/ml-tooling/ml-workspace).
|
80 |
+
* You can Just run the following commands to get it started.
|
81 |
+
|
82 |
+
```bash
|
83 |
+
$ chmod +x run_dev_env.sh
|
84 |
+
$ ./run_dev_env.sh
|
85 |
+
```
|
86 |
+
|
87 |
+
* Open localhost:8080 to see the workspace you have created. You will be asked for a token – enter `dagshub_savta`
|
88 |
+
* In the top right you have a menu called `Open Tool`. Click that button and choose terminal (alternatively open VSCode and open terminal there) and type in the following commands to install a virtualenv and dependencies:
|
89 |
+
|
90 |
+
```bash
|
91 |
+
$ make env
|
92 |
+
$ source activate savta_depth
|
93 |
+
```
|
94 |
+
|
95 |
+
Now when we have an environment, let's install all of the required libraries.
|
96 |
+
|
97 |
+
**Note**: If you don't have a GPU you will need to install pytorch separately and then run make requirements. You can install pytorch for computers without a gpu with the following command:
|
98 |
+
|
99 |
+
```bash
|
100 |
+
$ conda install pytorch torchvision cpuonly -c pytorch
|
101 |
+
```
|
102 |
+
|
103 |
+
To install the required libraries run the following command:
|
104 |
+
|
105 |
+
```bash
|
106 |
+
$ make load_requirements
|
107 |
+
```
|
108 |
+
|
109 |
+
|
110 |
+
* Pull the dvc files to your workspace by typing:
|
111 |
+
|
112 |
+
```bash
|
113 |
+
$ dvc pull -r dvc-remote
|
114 |
+
$ dvc checkout #use this to get the data, models etc
|
115 |
+
```
|
116 |
+
|
117 |
+
* After you are finished your modification, make sure to do the following:
|
118 |
+
* If you modified packages, make sure to update the `requirements.txt` file accordingly.
|
119 |
+
|
120 |
+
* Push your code to DAGsHub, and your dvc managed files to your dvc remote. For reference on the commands needed, please refer to the Google Colab notebook section – [Commiting Your Work and Pushing Back to DAGsHub](https://colab.research.google.com/drive/1XU4DgQ217_hUMU1dllppeQNw3pTRlHy1?authuser=1#scrollTo=PAxz-29WhN12&line=1&uniqifier=1).
|
121 |
+
|
122 |
+
---
|
123 |
+
### After pushing code and data to DAGsHub
|
124 |
* Create a Pull Request on DAGsHub!
|
125 |
+
* Explain what changes you are making.
|
126 |
+
* If your changes affect data or models, make sure they are pushed to your DAGsHub dvc remote, and are included in the PR.
|
127 |
+
* We will review your contribution ASAP, and merge it or start a discussion if needed.
|
128 |
* 🐶
|
129 |
+
|
130 |
### TODO:
|
131 |
- [ ] Web UI
|
132 |
- [ ] Testing various datasets as basis for training
|
dvc.lock
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
process_data:
|
2 |
+
cmd: python3 src/code/make_dataset.py src/data/raw/nyu_depth_v2_labeled.mat src/data/raw/splits.mat
|
3 |
+
src/data/processed
|
4 |
+
deps:
|
5 |
+
- path: src/code/make_dataset.py
|
6 |
+
md5: e069c7323c9be16baedd8f988375e145
|
7 |
+
size: 5256
|
8 |
+
- path: src/data/raw/nyu_depth_v2_labeled.mat
|
9 |
+
md5: 520609c519fba3ba5ac58c8fefcc3530
|
10 |
+
size: 2972037809
|
11 |
+
- path: src/data/raw/splits.mat
|
12 |
+
md5: 08e3c3aea27130ac7c01ffd739a4535f
|
13 |
+
size: 2626
|
14 |
+
outs:
|
15 |
+
- path: src/data/processed/
|
16 |
+
md5: 9a1f43f46e8b1c387532e994e721d57d.dir
|
17 |
+
size: 197717291
|
18 |
+
nfiles: 2898
|
19 |
+
train:
|
20 |
+
cmd: python3 src/code/training.py src/data/processed/train
|
21 |
+
deps:
|
22 |
+
- path: src/code/custom_data_loading.py
|
23 |
+
md5: c94ea029ed76ca94bb1ad4c1655e5e68
|
24 |
+
size: 1916
|
25 |
+
- path: src/code/params.yml
|
26 |
+
md5: 2263ca2167c1bb4b0f53a9aedb5f238e
|
27 |
+
size: 217
|
28 |
+
- path: src/code/training.py
|
29 |
+
md5: e3dff7f4b59e4ebf818d7631d3e6803a
|
30 |
+
size: 1683
|
31 |
+
- path: src/data/processed/train
|
32 |
+
md5: 9956d748dcadc3abadd1ff966a6e2b92.dir
|
33 |
+
size: 109120921
|
34 |
+
nfiles: 1590
|
35 |
+
outs:
|
36 |
+
- path: logs/train_metrics.csv
|
37 |
+
md5: 437a06e6c6c5b4f6eec5e546c1ce6930
|
38 |
+
size: 103916
|
39 |
+
- path: logs/train_params.yml
|
40 |
+
md5: e06e92ac0f3ac1d367c22a10c28cccf9
|
41 |
+
size: 886
|
42 |
+
- path: src/models/
|
43 |
+
md5: fab42526c433987e0e6370db31a1869d.dir
|
44 |
+
size: 494927196
|
45 |
+
nfiles: 1
|
46 |
+
eval:
|
47 |
+
cmd: python3 src/code/eval.py src/data/processed/test
|
48 |
+
deps:
|
49 |
+
- path: src/code/custom_data_loading.py
|
50 |
+
md5: c94ea029ed76ca94bb1ad4c1655e5e68
|
51 |
+
size: 1916
|
52 |
+
- path: src/code/eval.py
|
53 |
+
md5: fcc66ed80bb4466ab0438f556acd125c
|
54 |
+
size: 1775
|
55 |
+
- path: src/code/eval_metric_calculation.py
|
56 |
+
md5: 2fc866e1107042a996087d5716d44bf0
|
57 |
+
size: 2999
|
58 |
+
- path: src/code/params.yml
|
59 |
+
md5: 2263ca2167c1bb4b0f53a9aedb5f238e
|
60 |
+
size: 217
|
61 |
+
- path: src/data/processed/test
|
62 |
+
md5: bcccd66f3f561b53ba97c89a558c08a0.dir
|
63 |
+
size: 88596370
|
64 |
+
nfiles: 1308
|
65 |
+
- path: src/models/model.pth
|
66 |
+
md5: 2fd77305fd779eefd11e307ee3f201d7
|
67 |
+
size: 494927196
|
68 |
+
outs:
|
69 |
+
- path: logs/test_metrics.csv
|
70 |
+
md5: 0add355c58eb4dfa1ae7e28e47750d33
|
71 |
+
size: 340
|
dvc.yaml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
stages:
|
2 |
+
process_data:
|
3 |
+
cmd: python3 src/code/make_dataset.py src/data/raw/nyu_depth_v2_labeled.mat src/data/raw/splits.mat
|
4 |
+
src/data/processed
|
5 |
+
deps:
|
6 |
+
- src/code/make_dataset.py
|
7 |
+
- src/data/raw/nyu_depth_v2_labeled.mat
|
8 |
+
- src/data/raw/splits.mat
|
9 |
+
outs:
|
10 |
+
- src/data/processed/
|
11 |
+
train:
|
12 |
+
cmd: python3 src/code/training.py src/data/processed/train
|
13 |
+
deps:
|
14 |
+
- src/code/custom_data_loading.py
|
15 |
+
- src/code/params.yml
|
16 |
+
- src/code/training.py
|
17 |
+
- src/data/processed/train
|
18 |
+
outs:
|
19 |
+
- src/models/
|
20 |
+
- logs/train_params.yml:
|
21 |
+
cache: false
|
22 |
+
metrics:
|
23 |
+
- logs/train_metrics.csv:
|
24 |
+
cache: false
|
25 |
+
eval:
|
26 |
+
cmd: python3 src/code/eval.py src/data/processed/test
|
27 |
+
deps:
|
28 |
+
- src/code/params.yml
|
29 |
+
- src/code/custom_data_loading.py
|
30 |
+
- src/code/eval_metric_calculation.py
|
31 |
+
- src/code/eval.py
|
32 |
+
- src/models/model.pth
|
33 |
+
- src/data/processed/test
|
34 |
+
metrics:
|
35 |
+
- logs/test_metrics.csv:
|
36 |
+
cache: false
|
logs/test_metrics.csv
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Name,Value,Timestamp,Step
|
2 |
+
"a1",0.056999333,1613824849186,1
|
3 |
+
"a2",0.118539445,1613824849186,1
|
4 |
+
"a3",0.19929159,1613824849186,1
|
5 |
+
"abs_rel",2.5860002,1613824849186,1
|
6 |
+
"sq_rel",15.912783,1613824849186,1
|
7 |
+
"rmse",5.257741,1613824849186,1
|
8 |
+
"rmse_log",1.2291939,1613824849186,1
|
9 |
+
"log10",0.49469143,1613824849186,1
|
10 |
+
"silog",43.5198,1613824849186,1
|
logs/train_metrics.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/train_params.yml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DAGsHubLogger: true
|
2 |
+
Learner: <fastai.learner.Learner object at 0x7f051ecfcac8>
|
3 |
+
ParamScheduler: true
|
4 |
+
ProgressCallback: true
|
5 |
+
Recorder: {add_time: true, train_metrics: false, valid_metrics: true}
|
6 |
+
TrainEvalCallback: true
|
7 |
+
batch per epoch: 159
|
8 |
+
batch size: 4
|
9 |
+
dataset.tfms: '[Pipeline: PILBase.create, Pipeline: get_y_fn -> PILBase.create]'
|
10 |
+
device: cuda
|
11 |
+
dls.after_batch: "Pipeline: IntToFloatTensor -- {'div': 255.0, 'div_mask': 1} -> Normalize\
|
12 |
+
\ -- {'mean': tensor([[[[0.4850]],\n\n [[0.4560]],\n\n [[0.4060]]]],\
|
13 |
+
\ device='cuda:0'), 'std': tensor([[[[0.2290]],\n\n [[0.2240]],\n\n \
|
14 |
+
\ [[0.2250]]]], device='cuda:0'), 'axes': (0, 2, 3)}"
|
15 |
+
dls.after_item: 'Pipeline: ToTensor'
|
16 |
+
dls.before_batch: 'Pipeline: '
|
17 |
+
frozen: true
|
18 |
+
frozen idx: 2
|
19 |
+
input 1 dim 1: 4
|
20 |
+
input 1 dim 2: 3
|
21 |
+
input 1 dim 3: 480
|
22 |
+
input 1 dim 4: 640
|
23 |
+
model parameters: 41221268
|
24 |
+
n_inp: 1
|
25 |
+
success: true
|
requirements.txt
CHANGED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dvc==1.11.15
|
2 |
+
fastai==2.2.5
|
3 |
+
torch==1.7.0
|
4 |
+
h5py==2.10.0
|
5 |
+
opencv-python==4.4.0.42
|
6 |
+
tqdm==4.52.0
|
7 |
+
numpy==1.19.4
|
8 |
+
scikit-learn==0.23.2
|
9 |
+
dagshub==0.1.5
|
10 |
+
tables==3.6.1
|
run_dev_env.sh
CHANGED
@@ -1,10 +1,7 @@
|
|
1 |
-
docker run
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
--
|
6 |
-
--
|
7 |
-
|
8 |
-
--no-browser \
|
9 |
-
--NotebookApp.token='' \
|
10 |
-
--NotebookApp.password=''
|
|
|
1 |
+
docker run -d \
|
2 |
+
-p 8080:8080 \
|
3 |
+
--name "dags-ml-workspace" -v "/${PWD}:/workspace" \
|
4 |
+
--env AUTHENTICATE_VIA_JUPYTER="dagshub_savta" \
|
5 |
+
--shm-size 2G \
|
6 |
+
--restart always \
|
7 |
+
dagshub/ml-workspace-minimal:latest
|
|
|
|
|
|
src/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
/models
|
2 |
+
/eval
|
src/code/custom_data_loading.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
from fastai.vision.all import \
|
3 |
+
DataLoaders, \
|
4 |
+
delegates, \
|
5 |
+
DataBlock, \
|
6 |
+
ImageBlock, \
|
7 |
+
PILImage, \
|
8 |
+
PILImageBW, \
|
9 |
+
RandomSplitter, \
|
10 |
+
Path, \
|
11 |
+
get_files
|
12 |
+
|
13 |
+
|
14 |
+
class ImageImageDataLoaders(DataLoaders):
|
15 |
+
"""Basic wrapper around several `DataLoader`s with factory methods for Image to Image problems"""
|
16 |
+
@classmethod
|
17 |
+
@delegates(DataLoaders.from_dblock)
|
18 |
+
def from_label_func(cls, path, filenames, label_func, valid_pct=0.2, seed=None, item_transforms=None,
|
19 |
+
batch_transforms=None, **kwargs):
|
20 |
+
"""Create from list of `fnames` in `path`s with `label_func`."""
|
21 |
+
datablock = DataBlock(blocks=(ImageBlock(cls=PILImage), ImageBlock(cls=PILImageBW)),
|
22 |
+
get_y=label_func,
|
23 |
+
splitter=RandomSplitter(valid_pct, seed=seed),
|
24 |
+
item_tfms=item_transforms,
|
25 |
+
batch_tfms=batch_transforms)
|
26 |
+
res = cls.from_dblock(datablock, filenames, path=path, **kwargs)
|
27 |
+
return res
|
28 |
+
|
29 |
+
|
30 |
+
def get_y_fn(x):
|
31 |
+
y = str(x.absolute()).replace('.jpg', '_depth.png')
|
32 |
+
y = Path(y)
|
33 |
+
|
34 |
+
return y
|
35 |
+
|
36 |
+
|
37 |
+
def create_data(data_path):
|
38 |
+
with open(r"./src/code/params.yml") as f:
|
39 |
+
params = yaml.safe_load(f)
|
40 |
+
|
41 |
+
filenames = get_files(data_path, extensions='.jpg')
|
42 |
+
if len(filenames) == 0:
|
43 |
+
raise ValueError("Could not find any files in the given path")
|
44 |
+
dataset = ImageImageDataLoaders.from_label_func(data_path,
|
45 |
+
seed=int(params['seed']),
|
46 |
+
bs=int(params['batch_size']),
|
47 |
+
num_workers=int(params['num_workers']),
|
48 |
+
filenames=filenames,
|
49 |
+
label_func=get_y_fn)
|
50 |
+
|
51 |
+
return dataset
|
src/code/eval.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import yaml
|
3 |
+
import torch
|
4 |
+
from torchvision import transforms
|
5 |
+
from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, tuplify
|
6 |
+
from custom_data_loading import create_data
|
7 |
+
from eval_metric_calculation import compute_eval_metrics
|
8 |
+
from dagshub import dagshub_logger
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
|
12 |
+
if __name__ == "__main__":
|
13 |
+
if len(sys.argv) < 2:
|
14 |
+
print("usage: %s <test_data_path>" % sys.argv[0], file=sys.stderr)
|
15 |
+
sys.exit(0)
|
16 |
+
|
17 |
+
with open(r"./src/code/params.yml") as f:
|
18 |
+
params = yaml.safe_load(f)
|
19 |
+
|
20 |
+
data_path = Path(sys.argv[1])
|
21 |
+
data = create_data(data_path)
|
22 |
+
|
23 |
+
arch = {'resnet34': resnet34}
|
24 |
+
loss = {'MSELossFlat': MSELossFlat()}
|
25 |
+
|
26 |
+
learner = unet_learner(data,
|
27 |
+
arch.get(params['architecture']),
|
28 |
+
n_out=int(params['num_outs']),
|
29 |
+
loss_func=loss.get(params['loss_func']),
|
30 |
+
path='src/',
|
31 |
+
model_dir='models')
|
32 |
+
learner = learner.load('model')
|
33 |
+
|
34 |
+
filenames = get_files(Path(data_path), extensions='.jpg')
|
35 |
+
test_files = L([Path(i) for i in filenames])
|
36 |
+
|
37 |
+
for sample in tqdm(test_files.items, desc="Predicting on test images", total=len(test_files.items)):
|
38 |
+
pred = learner.predict(sample)[0]
|
39 |
+
pred = transforms.ToPILImage()(pred[:, :, :].type(torch.FloatTensor)).convert('L')
|
40 |
+
pred.save("src/eval/" + str(sample.stem) + "_pred.png")
|
41 |
+
|
42 |
+
print("Calculating metrics...")
|
43 |
+
metrics = compute_eval_metrics(test_files)
|
44 |
+
|
45 |
+
with dagshub_logger(
|
46 |
+
metrics_path="logs/test_metrics.csv",
|
47 |
+
should_log_hparams=False
|
48 |
+
) as logger:
|
49 |
+
# Metric logging
|
50 |
+
logger.log_metrics(metrics)
|
51 |
+
|
52 |
+
print("Evaluation Done!")
|
src/code/eval_metric_calculation.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from PIL import Image
|
3 |
+
from tqdm import tqdm
|
4 |
+
|
5 |
+
def compute_errors(target, prediction):
|
6 |
+
thresh = np.maximum((target / prediction), (prediction / target))
|
7 |
+
a1 = (thresh < 1.25).mean()
|
8 |
+
a2 = (thresh < 1.25 ** 2).mean()
|
9 |
+
a3 = (thresh < 1.25 ** 3).mean()
|
10 |
+
|
11 |
+
abs_rel = np.mean(np.abs(target - prediction) / target)
|
12 |
+
sq_rel = np.mean(((target - prediction) ** 2) / target)
|
13 |
+
|
14 |
+
rmse = (target - prediction) ** 2
|
15 |
+
rmse = np.sqrt(rmse.mean())
|
16 |
+
|
17 |
+
rmse_log = (np.log(target) - np.log(prediction)) ** 2
|
18 |
+
rmse_log = np.sqrt(rmse_log.mean())
|
19 |
+
|
20 |
+
err = np.log(prediction) - np.log(target)
|
21 |
+
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
|
22 |
+
|
23 |
+
log_10 = (np.abs(np.log10(target) - np.log10(prediction))).mean()
|
24 |
+
|
25 |
+
return a1, a2, a3, abs_rel, sq_rel, rmse, rmse_log, silog, log_10
|
26 |
+
|
27 |
+
|
28 |
+
def compute_eval_metrics(test_files):
|
29 |
+
min_depth_eval = 1e-3
|
30 |
+
max_depth_eval = 10
|
31 |
+
|
32 |
+
num_samples = len(test_files)
|
33 |
+
|
34 |
+
a1 = np.zeros(num_samples, np.float32)
|
35 |
+
a2 = np.zeros(num_samples, np.float32)
|
36 |
+
a3 = np.zeros(num_samples, np.float32)
|
37 |
+
abs_rel = np.zeros(num_samples, np.float32)
|
38 |
+
sq_rel = np.zeros(num_samples, np.float32)
|
39 |
+
rmse = np.zeros(num_samples, np.float32)
|
40 |
+
rmse_log = np.zeros(num_samples, np.float32)
|
41 |
+
silog = np.zeros(num_samples, np.float32)
|
42 |
+
log10 = np.zeros(num_samples, np.float32)
|
43 |
+
|
44 |
+
for i in tqdm(range(num_samples), desc="Calculating metrics for test data", total=num_samples):
|
45 |
+
sample_path = test_files[i]
|
46 |
+
target_path = str(sample_path.parent/(sample_path.stem + "_depth.png"))
|
47 |
+
pred_path = "src/eval/" + str(sample_path.stem) + "_pred.png"
|
48 |
+
|
49 |
+
target_image = Image.open(target_path)
|
50 |
+
pred_image = Image.open(pred_path)
|
51 |
+
|
52 |
+
target = np.asarray(target_image)
|
53 |
+
pred = np.asarray(pred_image)
|
54 |
+
|
55 |
+
target = target / 25.0
|
56 |
+
pred = pred / 25.0
|
57 |
+
|
58 |
+
pred[pred < min_depth_eval] = min_depth_eval
|
59 |
+
pred[pred > max_depth_eval] = max_depth_eval
|
60 |
+
pred[np.isinf(pred)] = max_depth_eval
|
61 |
+
|
62 |
+
target[np.isinf(target)] = 0
|
63 |
+
target[np.isnan(target)] = 0
|
64 |
+
|
65 |
+
valid_mask = np.logical_and(target > min_depth_eval, target < max_depth_eval)
|
66 |
+
|
67 |
+
a1[i], a2[i], a3[i], abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], silog[i], log10[i] = \
|
68 |
+
compute_errors(target[valid_mask], pred[valid_mask])
|
69 |
+
|
70 |
+
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
|
71 |
+
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
|
72 |
+
print("{:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
|
73 |
+
a1.mean(), a2.mean(), a3.mean(),
|
74 |
+
abs_rel.mean(), sq_rel.mean(), rmse.mean(), rmse_log.mean(), silog.mean(), log10.mean()))
|
75 |
+
|
76 |
+
return dict(a1=a1.mean(), a2=a2.mean(), a3=a3.mean(),
|
77 |
+
abs_rel=abs_rel.mean(), sq_rel=sq_rel.mean(),
|
78 |
+
rmse=rmse.mean(), rmse_log=rmse_log.mean(),
|
79 |
+
log10=log10.mean(), silog=silog.mean())
|
src/code/make_dataset.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
#######################################################################################
|
4 |
+
# The MIT License
|
5 |
+
|
6 |
+
# Copyright (c) 2014 Hannes Schulz, University of Bonn <[email protected]>
|
7 |
+
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <[email protected]>
|
8 |
+
# Copyright (c) 2008-2009 Sebastian Nowozin <[email protected]>
|
9 |
+
|
10 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
11 |
+
# of this software and associated documentation files (the "Software"), to deal
|
12 |
+
# in the Software without restriction, including without limitation the rights
|
13 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
14 |
+
# copies of the Software, and to permit persons to whom the Software is
|
15 |
+
# furnished to do so, subject to the following conditions:
|
16 |
+
#
|
17 |
+
# The above copyright notice and this permission notice shall be included in all
|
18 |
+
# copies or substantial portions of the Software.
|
19 |
+
#
|
20 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
21 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
22 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
23 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
24 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
25 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
26 |
+
# SOFTWARE.
|
27 |
+
#######################################################################################
|
28 |
+
#
|
29 |
+
# See https://github.com/deeplearningais/curfil/wiki/Training-and-Prediction-with-the-NYU-Depth-v2-Dataset
|
30 |
+
|
31 |
+
|
32 |
+
"""Helper script to convert the NYU Depth v2 dataset Matlab file into a set of PNG and JPEG images.
|
33 |
+
Receives 3 Files from argparse:
|
34 |
+
<h5_file> - Contains the original images, depths maps, and scene types
|
35 |
+
<train_test_split> - contains two numpy arrays with the index of the
|
36 |
+
images based on the split to train and test sets.
|
37 |
+
<out_folder> - Name of the folder to save the original and depth images.
|
38 |
+
|
39 |
+
Every image in the DB will have it's twine B&W image that indicates the depth
|
40 |
+
in the image. the images will be read, converted by the convert_image function
|
41 |
+
and finally saved to path based on train test split and Scene types.
|
42 |
+
"""
|
43 |
+
|
44 |
+
from __future__ import print_function
|
45 |
+
|
46 |
+
import h5py
|
47 |
+
import numpy as np
|
48 |
+
import os
|
49 |
+
import scipy.io
|
50 |
+
import sys
|
51 |
+
import cv2
|
52 |
+
from tqdm import tqdm
|
53 |
+
|
54 |
+
|
55 |
+
def convert_image(index, depth_map, img, output_folder):
|
56 |
+
"""Processes data images and depth maps
|
57 |
+
:param index: int, image index
|
58 |
+
:param depth_map: numpy array, image depth - 2D array.
|
59 |
+
:param img: numpy array, the original RGB image - 3D array.
|
60 |
+
:param output_folder: path to save the image in.
|
61 |
+
|
62 |
+
Receives an image with it's relevant depth map.
|
63 |
+
Normalizes the depth map, and adds a 7 px boundary to the original image.
|
64 |
+
Saves both image and depth map to the appropriate processed data folder.
|
65 |
+
"""
|
66 |
+
|
67 |
+
# Normalize the depth image
|
68 |
+
# normalized_depth = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX)
|
69 |
+
img_depth = depth_map * 25.0
|
70 |
+
cv2.imwrite("%s/%05d_depth.png" % (output_folder, index), img_depth)
|
71 |
+
|
72 |
+
# Adding black frame to original image
|
73 |
+
img = img[:, :, ::-1] # Flipping the image from RGB to BGR for opencv
|
74 |
+
image_black_boundary = np.zeros(img.shape, dtype=np.uint8)
|
75 |
+
image_black_boundary[7:image_black_boundary.shape[0] - 6, 7:image_black_boundary.shape[1] - 6, :] = \
|
76 |
+
img[7:img.shape[0] - 6, 7:img.shape[1] - 6, :]
|
77 |
+
cv2.imwrite("%s/%05d.jpg" % (output_folder, index), image_black_boundary)
|
78 |
+
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
|
82 |
+
# Check if got all needed input for argparse
|
83 |
+
if len(sys.argv) != 4:
|
84 |
+
print("usage: %s <h5_file> <train_test_split> <out_folder>" % sys.argv[0], file=sys.stderr)
|
85 |
+
sys.exit(0)
|
86 |
+
|
87 |
+
# load arguments to variables
|
88 |
+
h5_file = h5py.File(sys.argv[1], "r")
|
89 |
+
train_test = scipy.io.loadmat(sys.argv[2]) # h5py is not able to open that file. but scipy is
|
90 |
+
out_folder = sys.argv[3]
|
91 |
+
|
92 |
+
# Extract images *indexes* for train and test data sets
|
93 |
+
test_images = set([int(x) for x in train_test["testNdxs"]])
|
94 |
+
train_images = set([int(x) for x in train_test["trainNdxs"]])
|
95 |
+
print("%d training images" % len(train_images))
|
96 |
+
print("%d test images" % len(test_images))
|
97 |
+
|
98 |
+
# Grayscale
|
99 |
+
depth = h5_file['depths']
|
100 |
+
print("Reading", sys.argv[1])
|
101 |
+
images = h5_file['images'] # (num_channels, height, width)
|
102 |
+
|
103 |
+
# Extract all sceneTypes per image - "office", "classroom", etc.
|
104 |
+
scenes = [u''.join(chr(c[0]) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
|
105 |
+
|
106 |
+
for i, image in tqdm(enumerate(images), desc="Processing images", total=len(images)):
|
107 |
+
idx = int(i) + 1
|
108 |
+
if idx in train_images:
|
109 |
+
train_test = "train"
|
110 |
+
else:
|
111 |
+
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
|
112 |
+
train_test = "test"
|
113 |
+
|
114 |
+
# Create path to save image in
|
115 |
+
folder = "%s/%s/%s" % (out_folder, train_test, scenes[i])
|
116 |
+
if not os.path.exists(folder):
|
117 |
+
os.makedirs(folder)
|
118 |
+
|
119 |
+
convert_image(i, depth[i, :, :].T, image.T, folder)
|
120 |
+
|
121 |
+
print("Finished")
|
src/code/params.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
seed: 42
|
2 |
+
data: nyu_depth_v2
|
3 |
+
batch_size: 4
|
4 |
+
num_workers: 0
|
5 |
+
weight_decay: 1e-2
|
6 |
+
learning_rate: 1e-3
|
7 |
+
epochs: 1
|
8 |
+
num_outs: 3
|
9 |
+
source_dir: src
|
10 |
+
model_dir: models
|
11 |
+
architecture: resnet34
|
12 |
+
loss_func: MSELossFlat
|
13 |
+
train_metric: rmse
|
src/code/training.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Trains or fine-tunes a model for the task of monocular depth estimation
|
2 |
+
Receives 1 arguments from argparse:
|
3 |
+
<data_path> - Path to the dataset which is split into 2 folders - train and test.
|
4 |
+
"""
|
5 |
+
import sys
|
6 |
+
import yaml
|
7 |
+
from fastai.vision.all import unet_learner, Path, resnet34, rmse, MSELossFlat
|
8 |
+
from custom_data_loading import create_data
|
9 |
+
from dagshub.fastai import DAGsHubLogger
|
10 |
+
|
11 |
+
|
12 |
+
if __name__ == "__main__":
|
13 |
+
# Check if got all needed input for argparse
|
14 |
+
if len(sys.argv) != 2:
|
15 |
+
print("usage: %s <data_path>" % sys.argv[0], file=sys.stderr)
|
16 |
+
sys.exit(0)
|
17 |
+
|
18 |
+
with open(r"./src/code/params.yml") as f:
|
19 |
+
params = yaml.safe_load(f)
|
20 |
+
|
21 |
+
data = create_data(Path(sys.argv[1]))
|
22 |
+
|
23 |
+
metrics = {'rmse': rmse}
|
24 |
+
arch = {'resnet34': resnet34}
|
25 |
+
loss = {'MSELossFlat': MSELossFlat()}
|
26 |
+
|
27 |
+
learner = unet_learner(data,
|
28 |
+
arch.get(params['architecture']),
|
29 |
+
metrics=metrics.get(params['train_metric']),
|
30 |
+
wd=float(params['weight_decay']),
|
31 |
+
n_out=int(params['num_outs']),
|
32 |
+
loss_func=loss.get(params['loss_func']),
|
33 |
+
path=params['source_dir'],
|
34 |
+
model_dir=params['model_dir'],
|
35 |
+
cbs=DAGsHubLogger(
|
36 |
+
metrics_path="logs/train_metrics.csv",
|
37 |
+
hparams_path="logs/train_params.yml"))
|
38 |
+
|
39 |
+
print("Training model...")
|
40 |
+
learner.fine_tune(epochs=int(params['epochs']),
|
41 |
+
base_lr=float(params['learning_rate']))
|
42 |
+
print("Saving model...")
|
43 |
+
learner.save('model')
|
44 |
+
print("Done!")
|
src/data/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/processed
|
src/data/raw/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
/nyu_depth_v2_labeled.mat
|
2 |
+
/splits.mat
|
src/data/raw/nyu_depth_v2_labeled.mat.dvc
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
md5: d27a0ba6c898f981797a3388c26c2d0f
|
2 |
+
frozen: true
|
3 |
+
deps:
|
4 |
+
- etag: '"b125b2b1-5aa5b95864fc7"'
|
5 |
+
path: http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat
|
6 |
+
outs:
|
7 |
+
- md5: 520609c519fba3ba5ac58c8fefcc3530
|
8 |
+
path: nyu_depth_v2_labeled.mat
|
9 |
+
size: 2972037809
|
src/data/raw/splits.mat.dvc
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
md5: 26011289311c18b92781de66654223a4
|
2 |
+
frozen: true
|
3 |
+
deps:
|
4 |
+
- etag: '"a42-4cb6a5fad2fc0"'
|
5 |
+
path: http://horatio.cs.nyu.edu/mit/silberman/indoor_seg_sup/splits.mat
|
6 |
+
outs:
|
7 |
+
- md5: 08e3c3aea27130ac7c01ffd739a4535f
|
8 |
+
path: splits.mat
|
9 |
+
size: 2626
|