jacopoteneggi commited on
Commit
0455608
·
1 Parent(s): 4fb80ed
.gitignore CHANGED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
163
+
164
+ data
.pre-commit-config.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pycqa/isort
3
+ rev: 5.12.0
4
+ hooks:
5
+ - id: isort
6
+ args: ["--profile", "black", "--filter-files"]
7
+ - repo: https://github.com/psf/black
8
+ rev: 22.6.0
9
+ hooks:
10
+ - id: black-jupyter
11
+ - repo: https://github.com/kynan/nbstripout
12
+ rev: 0.5.0
13
+ hooks:
14
+ - id: nbstripout
README.md CHANGED
@@ -12,4 +12,5 @@ Supported datasets:
12
 
13
  Supported models:
14
 
15
- * [open_clip](https://github.com/mlfoundations/open_clip): [[ViT-B/32]](https://huggingface.co/laion/CLIP-ViT-B-32-laion2B-s34B-b79K)
 
 
12
 
13
  Supported models:
14
 
15
+ * [CLIP](https://github.com/openai/CLIP): ViT-B/32, ViT-L/14
16
+ * [OpenCLIP](https://github.com/mlfoundations/open_clip): [ViT-B-32](https://huggingface.co/laion/CLIP-ViT-B-32-laion2B-s34B-b79K), [ViT-L-14](https://huggingface.co/laion/CLIP-ViT-L-14-laion2B-s32B-b82K)
dataset_lib/__init__.py ADDED
File without changes
dataset_lib/datasets.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from PIL import Image
4
+ from torchvision.datasets import VisionDataset
5
+ from torchvision.datasets.folder import find_classes, make_dataset
6
+
7
+ SUPPORTED_DATASETS = []
8
+ workdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
+ with open(os.path.join(workdir, "supported_datasets.txt"), "r") as f:
10
+ for line in f:
11
+ SUPPORTED_DATASETS.append(line.strip())
12
+
13
+ data_dir = os.path.join(workdir, "data")
14
+
15
+
16
+ def get_dataset(dataset, transform=None):
17
+ if dataset == "imagenette":
18
+ root_dir = os.path.join(data_dir, "imagenette2")
19
+ train_dataset = Imagenette(root_dir, train=True, transform=transform)
20
+ val_dataset = Imagenette(root_dir, train=False, transform=transform)
21
+ return {"train": train_dataset, "val": val_dataset}
22
+
23
+
24
+ class Imagenette(VisionDataset):
25
+ _WNID_TO_CLASS = {
26
+ "n01440764": ("tench", "Tinca tinca"),
27
+ "n02102040": ("English springer", "English springer spaniel"),
28
+ "n02979186": ("cassette player",),
29
+ "n03000684": ("chainsaw", "chain saw"),
30
+ "n03028079": ("church", "church building"),
31
+ "n03394916": ("French horn", "horn"),
32
+ "n03417042": ("garbage truck", "dustcart"),
33
+ "n03425413": ("gas pump", "gasoline pump", "petrol pump", "island dispenser"),
34
+ "n03445777": ("golf ball",),
35
+ "n03888257": ("parachute", "chute"),
36
+ }
37
+
38
+ def __init__(self, root, train=True, transform=None):
39
+ super().__init__(root, transform=transform)
40
+ self.train = train
41
+
42
+ self._split = "train" if train else "val"
43
+ self._image_root = os.path.join(root, self._split)
44
+
45
+ self.wnids, self.wnid_to_idx = find_classes(self._image_root)
46
+ self.classes = [self._WNID_TO_CLASS[wnid][0] for wnid in self.wnids]
47
+
48
+ self._samples = make_dataset(
49
+ self._image_root, self.wnid_to_idx, extensions=".jpeg"
50
+ )
51
+
52
+ def __len__(self):
53
+ return len(self._samples)
54
+
55
+ def __getitem__(self, idx):
56
+ path, label = self._samples[idx]
57
+ image = Image.open(path).convert("RGB")
58
+
59
+ if self.transform is not None:
60
+ image = self.transform(image)
61
+
62
+ return image, label
dataset_lib/encode.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pickle
4
+
5
+ import torch
6
+ from torch.utils.data import DataLoader
7
+ from tqdm import tqdm
8
+
9
+ from dataset_lib.datasets import SUPPORTED_DATASETS, get_dataset
10
+ from dataset_lib.models import SUPPORTED_MODELS, get_transform_and_encoding_fn
11
+
12
+ curr_dir = os.path.dirname(os.path.abspath(__file__))
13
+ workdir = os.path.dirname(curr_dir)
14
+
15
+
16
+ def model_type(model):
17
+ assert model in SUPPORTED_MODELS, NotImplementedError(
18
+ f"Model {model} not supported"
19
+ )
20
+ return model
21
+
22
+
23
+ def dataset_type(dataset):
24
+ assert dataset in SUPPORTED_DATASETS, NotImplementedError(
25
+ f"Dataset {dataset} not supported"
26
+ )
27
+ return dataset
28
+
29
+
30
+ def config():
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument(
33
+ "--model",
34
+ type=model_type,
35
+ help="Model to use for encoding dataset",
36
+ default="open_clip:ViT-B-32",
37
+ )
38
+ parser.add_argument(
39
+ "--dataset",
40
+ type=dataset_type,
41
+ help="Dataset to encode",
42
+ default="imagenette",
43
+ )
44
+
45
+ return parser.parse_args()
46
+
47
+
48
+ def encode(
49
+ model_name,
50
+ dataset_name,
51
+ device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
52
+ ):
53
+ preprocess, encode_fn = get_transform_and_encoding_fn(model_name, device=device)
54
+ datasets = get_dataset(dataset_name, transform=preprocess)
55
+
56
+ prefix = os.path.join(workdir, f"{dataset_name}_{model_name.replace('/', '-')}")
57
+
58
+ print(f"Encoding {dataset_name} dataset with {model_name} model")
59
+ for op, dataset in datasets.items():
60
+ loader = DataLoader(dataset, batch_size=64, num_workers=4)
61
+
62
+ data = []
63
+ for _, data in enumerate(tqdm(loader, desc=f"({op}) encoding: ")):
64
+ image, label = data
65
+
66
+ image = image.to(device)
67
+
68
+ h = encode_fn(image)
69
+
70
+ h = h.cpu().numpy()
71
+ label = label.numpy()
72
+
73
+ data.extend(list(zip(h, label)))
74
+
75
+ data_name = f"{prefix}_{op}.pkl"
76
+ with open(data_name, "wb") as f:
77
+ pickle.dump(data, f)
78
+
79
+
80
+ def main(args):
81
+ model_name = args.model
82
+ dataset_name = args.dataset
83
+
84
+ encode(model_name, dataset_name)
85
+
86
+
87
+ if __name__ == "__main__":
88
+ args = config()
89
+ main(args)
dataset_lib/models.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import clip
4
+ import open_clip
5
+ import torch
6
+ import torch.amp
7
+
8
+ SUPPORTED_MODELS = {}
9
+ workdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10
+ with open(os.path.join(workdir, "supported_models.txt"), "r") as f:
11
+ for line in f:
12
+ model, model_path = line.strip().split(",")
13
+ SUPPORTED_MODELS[model] = model_path
14
+
15
+
16
+ def get_transform_and_encoding_fn(
17
+ model_name, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
+ ):
19
+ if "open_clip" in model_name:
20
+ return open_clip_get_transform_and_encoding_fn(model_name, device=device)
21
+ if "clip" in model_name:
22
+ return clip_get_transform_and_encoding_fn(model_name, device=device)
23
+
24
+
25
+ def open_clip_get_transform_and_encoding_fn(model_name, device):
26
+ model, _, preprocess = open_clip.create_model_and_transforms(
27
+ SUPPORTED_MODELS[model_name], device=device
28
+ )
29
+
30
+ @torch.no_grad()
31
+ @torch.cuda.amp.autocast()
32
+ def encode_fn(image):
33
+ image_features = model.encode_image(image)
34
+ return image_features / torch.linalg.norm(image_features, dim=-1, keepdim=True)
35
+
36
+ return preprocess, encode_fn
37
+
38
+
39
+ def clip_get_transform_and_encoding_fn(model_name, device):
40
+ backbone_name = model_name.split(":")[-1]
41
+ model, preprocess = clip.load(backbone_name, device=device)
42
+
43
+ @torch.no_grad()
44
+ @torch.cuda.amp.autocast()
45
+ def encode_fn(image):
46
+ image_features = model.encode_image(image)
47
+ return image_features / torch.linalg.norm(image_features, dim=-1, keepdim=True)
48
+
49
+ return preprocess, encode_fn
imagenette_clip:ViT-B-32_train.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19f3c3f597d1aaf7579be3285f9bc83337b9eff2275c411fbfbd8ebd6efa7746
3
+ size 36858462
imagenette_clip:ViT-B-32_val.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c1c0e3e22f3446e6bcbcb33164ee290a23dbfedd5b30cc82148e4db6cc3d6a
3
+ size 12689531
imagenette_clip:ViT-L-14_train.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5adb5c53898f50e957028da4314af87506165f4aadd74b062d271a9dcf5f2f4e
3
+ size 36920933
imagenette_clip:ViT-L-14_val.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c31fb9e428bf448f2c85e1596887e74448e48e656530289db5d020bd9327d6
3
+ size 12711046
imagenette_open_clip:ViT-B-32_train.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c946ac593a1681cbae7fe475803183999b9085270e6c8555cfd8092caeba518
3
+ size 36858458
imagenette_open_clip:ViT-B-32_val.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42f3a6c1e00468e22b65736ae9ce17db3299860454ca81c51c831c4147f55928
3
+ size 12689529
imagenette_open_clip:ViT-L-14_train.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d8c47412b0cfbbd74175d695e997b7c8f2d032988c8466c625ee33d3c15e3f4
3
+ size 36920931
imagenette_open_clip:ViT-L-14_val.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b659cd74dfb48e5b79d633833ac3799734c4b653b0640dc2c3e5aea287cca0
3
+ size 12711042
make_dataset.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+
3
+ from dataset_lib.datasets import SUPPORTED_DATASETS
4
+ from dataset_lib.encode import encode
5
+ from dataset_lib.models import SUPPORTED_MODELS
6
+
7
+ if __name__ == "__main__":
8
+ for model, dataset in product(SUPPORTED_MODELS.keys(), SUPPORTED_DATASETS):
9
+ encode(model, dataset)
supported_datasets.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ imagenette
supported_models.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ open_clip:ViT-B-32,hf-hub:laion/CLIP-ViT-B-32-laion2B-s34B-b79K
2
+ open_clip:ViT-L-14,hf-hub:laion/CLIP-ViT-L-14-laion2B-s32B-b82K
3
+ clip:ViT-B/32,
4
+ clip:ViT-L/14,