jacopoteneggi commited on
Commit
e582574
·
verified ·
1 Parent(s): 8f42ccc

Update datasets

Browse files
README.md CHANGED
@@ -2,17 +2,17 @@
2
  license: mit
3
  ---
4
 
5
- ### Pre-computed CLIP embeddings
6
 
7
- Embeddings are stored as HDF5 datasets with the following structure:
8
 
9
  ```python
10
- <DATASET_NAME>_<MODEL_NAME>_<OP>.hdf5
11
 
12
  """
13
  DATASET_NAME: name of the dataset, e.g. "imagenette".
14
- MODEL_NAME: name of the model, e.g. "open_clip:ViT-B-32".
15
- OP: split of the dataset (either "train" or "val").
16
  """
17
 
18
  dataset["embedding"] contains the embeddings
@@ -22,12 +22,7 @@ dataset["label"] contains the labels
22
  To generate the dataset, run
23
 
24
  ```bash
25
- $ python make_dataset.py -h
26
- usage: make_dataset.py [-h] [--dataset DATASET [DATASET ...]] [--model MODEL [MODEL ...]]
27
-
28
- options:
29
- --dataset DATASET [DATASET ...] List of datasets to encode.
30
- --model MODEL [MODEL ...] List of models to use.
31
  ```
32
 
33
  Supported dataset names (see [supported_datasets.txt](supported_datasets.txt)):
@@ -36,19 +31,21 @@ Supported dataset names (see [supported_datasets.txt](supported_datasets.txt)):
36
 
37
  Supported model names (see [supported_models.txt](supported_models.txt)):
38
 
39
- * `open_clip:ViT-B-32` [[model](https://huggingface.co/laion/CLIP-ViT-B-32-laion2B-s34B-b79K)]
40
- * `open_clip:ViT-L-14` [[model](https://huggingface.co/laion/CLIP-ViT-L-14-laion2B-s32B-b82K)]
41
  * `clip:ViT-B/32` [[model](https://github.com/openai/CLIP)]
42
  * `clip:ViT-L/14` [[model](https://github.com/openai/CLIP)]
43
-
 
 
 
 
 
44
  **References**
45
  ```
46
- @misc{teneggi2024ibetdidmean,
47
- title={I Bet You Did Not Mean That: Testing Semantic Importance via Betting},
48
- author={Jacopo Teneggi and Jeremias Sulam},
 
49
  year={2024},
50
- eprint={2405.19146},
51
- archivePrefix={arXiv},
52
- primaryClass={stat.ML},
53
  }
54
  ```
 
2
  license: mit
3
  ---
4
 
5
+ ### Pre-computed vision-language model image embeddings
6
 
7
+ Embeddings are stored as [Parquet](https://parquet.apache.org/) files with the following structure:
8
 
9
  ```python
10
+ <DATASET_NAME>_<OP>_<MODEL_NAME>.parquet
11
 
12
  """
13
  DATASET_NAME: name of the dataset, e.g. "imagenette".
14
+ OP: split of the dataset (either "train" or "test").
15
+ MODEL_NAME: name of the model, e.g. "clip_vit-l_14".
16
  """
17
 
18
  dataset["embedding"] contains the embeddings
 
22
  To generate the dataset, run
23
 
24
  ```bash
25
+ $ python make_dataset.py
 
 
 
 
 
26
  ```
27
 
28
  Supported dataset names (see [supported_datasets.txt](supported_datasets.txt)):
 
31
 
32
  Supported model names (see [supported_models.txt](supported_models.txt)):
33
 
34
+ * `clip:ViT-RN:50` [[model](https://github.com/openai/CLIP)]
 
35
  * `clip:ViT-B/32` [[model](https://github.com/openai/CLIP)]
36
  * `clip:ViT-L/14` [[model](https://github.com/openai/CLIP)]
37
+ * `open_clip:ViT-B-32` [[model](https://huggingface.co/laion/CLIP-ViT-B-32-laion2B-s34B-b79K)]
38
+ * `open_clip:ViT-L-14` [[model](https://huggingface.co/laion/CLIP-ViT-L-14-laion2B-s32B-b82K)]
39
+ * `FLAVA` [[model](https://huggingface.co/facebook/flava-full)]
40
+ * `ALIGN` [[model](https://huggingface.co/kakaobrain/align-base)]
41
+ * `BLIP` [[model](https://huggingface.co/Salesforce/blip-itm-base-coco)]
42
+
43
  **References**
44
  ```
45
+ @inproceedings{teneggi24testing,
46
+ title={Testing Semantic Importance via Betting},
47
+ author={Teneggi, Jacopo and Sulam, Jeremias},
48
+ booktitle={The Thirty-eighth Annual Conference on Neural Information Processing Systems},
49
  year={2024},
 
 
 
50
  }
51
  ```
dataset_lib/config.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SOURCE: https://github.com/Sulam-Group/IBYDMT/blob/main/ibydmt/utils/config.py
2
+
3
+ import os
4
+ from dataclasses import dataclass
5
+ from enum import Enum
6
+ from itertools import product
7
+ from typing import Any, Iterable, Mapping, Optional, Union
8
+
9
+ import torch
10
+ from ml_collections import ConfigDict
11
+ from numpy import ndarray
12
+
13
+ Array = Union[ndarray, torch.Tensor]
14
+
15
+
16
+ class TestType(Enum):
17
+ GLOBAL = "global"
18
+ GLOBAL_COND = "global_cond"
19
+ LOCAL_COND = "local_cond"
20
+
21
+
22
+ class ConceptType(Enum):
23
+ DATASET = "dataset"
24
+ CLASS = "class"
25
+ IMAGE = "image"
26
+
27
+
28
+ @dataclass
29
+ class Constants:
30
+ WORKDIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
31
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
+
33
+
34
+ class DataConfig(ConfigDict):
35
+ def __init__(self, config_dict: Optional[Mapping[str, Any]] = None):
36
+ super().__init__()
37
+ if config_dict is None:
38
+ config_dict = {}
39
+
40
+ self.dataset: str = config_dict.get("dataset", None)
41
+ self.backbone: str = config_dict.get("backbone", None)
42
+ self.bottleneck: str = config_dict.get("bottleneck", None)
43
+ self.classifier: str = config_dict.get("classifier", None)
44
+ self.sampler: str = config_dict.get("sampler", None)
45
+ self.num_concepts: int = config_dict.get("num_concepts", None)
46
+
47
+
48
+ class SpliceConfig(ConfigDict):
49
+ def __init__(self, config_dict: Optional[Mapping[str, Any]] = None):
50
+ super().__init__()
51
+ if config_dict is None:
52
+ config_dict = {}
53
+
54
+ self.vocab: str = config_dict.get("vocab", None)
55
+ self.vocab_size: int = config_dict.get("vocab_size", None)
56
+ self.l1_penalty: float = config_dict.get("l1_penalty", None)
57
+
58
+
59
+ class PCBMConfig(ConfigDict):
60
+ def __init__(self, config_dict: Optional[Mapping[str, Any]] = None):
61
+ super().__init__()
62
+ if config_dict is None:
63
+ config_dict = {}
64
+
65
+ self.alpha: float = config_dict.get("alpha", None)
66
+ self.l1_ratio: float = config_dict.get("l1_ratio", None)
67
+
68
+
69
+ class cKDEConfig(ConfigDict):
70
+ def __init__(self, config_dict: Optional[Mapping[str, Any]] = None):
71
+ super().__init__()
72
+ if config_dict is None:
73
+ config_dict = {}
74
+
75
+ self.metric: str = config_dict.get("metric", None)
76
+ self.scale_method: str = config_dict.get("scale_method", None)
77
+ self.scale: float = config_dict.get("scale", None)
78
+
79
+
80
+ class TestingConfig(ConfigDict):
81
+ def __init__(self, config_dict: Optional[Mapping[str, Any]] = None):
82
+ super().__init__()
83
+ if config_dict is None:
84
+ config_dict = {}
85
+
86
+ self.significance_level: float = config_dict.get("significance_level", None)
87
+ self.wealth: str = config_dict.get("wealth", None)
88
+ self.bet: str = config_dict.get("bet", None)
89
+ self.kernel: str = config_dict.get("kernel", None)
90
+ self.kernel_scale_method: str = config_dict.get("kernel_scale_method", None)
91
+ self.kernel_scale: float = config_dict.get("kernel_scale", None)
92
+ self.tau_max: int = config_dict.get("tau_max", None)
93
+ self.images_per_class: int = config_dict.get("images_per_class", None)
94
+ self.r: int = config_dict.get("r", None)
95
+ self.cardinality: Iterable[int] = config_dict.get("cardinality", None)
96
+
97
+
98
+ class Config(ConfigDict):
99
+ def __init__(self, config_dict: Optional[Mapping[str, Any]] = None):
100
+ super().__init__()
101
+ if config_dict is None:
102
+ config_dict = {}
103
+
104
+ self.name: str = config_dict.get("name", None)
105
+ self.data = DataConfig(config_dict.get("data", None))
106
+ self.splice = SpliceConfig(config_dict.get("splice", None))
107
+ self.pcbm = PCBMConfig(config_dict.get("pcbm", None))
108
+ self.ckde = cKDEConfig(config_dict.get("ckde", None))
109
+ self.testing = TestingConfig(config_dict.get("testing", None))
110
+
111
+ def backbone_name(self):
112
+ backbone = self.data.backbone.strip().lower()
113
+ return backbone.replace("/", "_").replace(":", "_")
114
+
115
+ def sweep(self, keys: Iterable[str]):
116
+ def _get(dict, key):
117
+ keys = key.split(".")
118
+ if len(keys) == 1:
119
+ return dict[keys[0]]
120
+ else:
121
+ return _get(dict[keys[0]], ".".join(keys[1:]))
122
+
123
+ def _set(dict, key, value):
124
+ keys = key.split(".")
125
+ if len(keys) == 1:
126
+ dict[keys[0]] = value
127
+ else:
128
+ _set(dict[keys[0]], ".".join(keys[1:]), value)
129
+
130
+ to_iterable = lambda v: v if isinstance(v, list) else [v]
131
+
132
+ config_dict = self.to_dict()
133
+ sweep_values = [_get(config_dict, key) for key in keys]
134
+ sweep = list(product(*map(to_iterable, sweep_values)))
135
+
136
+ configs: Iterable[Config] = []
137
+ for _sweep in sweep:
138
+ _config_dict = config_dict.copy()
139
+ for key, value in zip(keys, _sweep):
140
+ _set(_config_dict, key, value)
141
+
142
+ configs.append(Config(_config_dict))
143
+ return configs
144
+
145
+
146
+ def register_config(name: str):
147
+ def register(cls: Config):
148
+ if name in configs:
149
+ raise ValueError(f"Config {name} is already registered")
150
+ configs[name] = cls
151
+
152
+ return register
153
+
154
+
155
+ def get_config(name: str) -> Config:
156
+ return configs[name]()
157
+
158
+
159
+ configs: Mapping[str, Config] = {}
dataset_lib/datasets.py CHANGED
@@ -4,21 +4,17 @@ from PIL import Image
4
  from torchvision.datasets import VisionDataset
5
  from torchvision.datasets.folder import find_classes, make_dataset
6
 
7
- SUPPORTED_DATASETS = []
8
- workdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
- with open(os.path.join(workdir, "supported_datasets.txt"), "r") as f:
10
- for line in f:
11
- SUPPORTED_DATASETS.append(line.strip())
12
 
13
- data_dir = os.path.join(workdir, "data")
14
 
 
 
15
 
16
- def get_dataset(dataset, transform=None):
17
  if dataset == "imagenette":
18
  root_dir = os.path.join(data_dir, "imagenette2")
19
  train_dataset = Imagenette(root_dir, train=True, transform=transform)
20
- val_dataset = Imagenette(root_dir, train=False, transform=transform)
21
- return {"train": train_dataset, "val": val_dataset}
22
 
23
 
24
  class Imagenette(VisionDataset):
@@ -39,7 +35,7 @@ class Imagenette(VisionDataset):
39
  super().__init__(root, transform=transform)
40
  self.train = train
41
 
42
- self._split = "train" if train else "val"
43
  self._image_root = os.path.join(root, self._split)
44
 
45
  self.wnids, self.wnid_to_idx = find_classes(self._image_root)
 
4
  from torchvision.datasets import VisionDataset
5
  from torchvision.datasets.folder import find_classes, make_dataset
6
 
7
+ from dataset_lib.config import Constants as c
 
 
 
 
8
 
 
9
 
10
+ def get_dataset(dataset, transform=None, workdir=c.WORKDIR):
11
+ data_dir = os.path.join(workdir, "data")
12
 
 
13
  if dataset == "imagenette":
14
  root_dir = os.path.join(data_dir, "imagenette2")
15
  train_dataset = Imagenette(root_dir, train=True, transform=transform)
16
+ test_dataset = Imagenette(root_dir, train=False, transform=transform)
17
+ return {"train": train_dataset, "test": test_dataset}
18
 
19
 
20
  class Imagenette(VisionDataset):
 
35
  super().__init__(root, transform=transform)
36
  self.train = train
37
 
38
+ self._split = "train" if train else "test"
39
  self._image_root = os.path.join(root, self._split)
40
 
41
  self.wnids, self.wnid_to_idx = find_classes(self._image_root)
dataset_lib/encode.py CHANGED
@@ -1,48 +1,42 @@
 
1
  import os
2
 
3
- import h5py
4
- import numpy as np
5
  import torch
6
  from torch.utils.data import DataLoader
7
  from tqdm import tqdm
8
 
 
 
 
9
  from dataset_lib.datasets import get_dataset
10
- from dataset_lib.models import get_transform_and_encoding_fn
11
 
12
- curr_dir = os.path.dirname(os.path.abspath(__file__))
13
- workdir = os.path.dirname(curr_dir)
14
 
15
 
16
- def encode(
17
- dataset_name,
18
- model_name,
19
- device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
20
- ):
21
- preprocess, encode_fn = get_transform_and_encoding_fn(model_name, device=device)
22
- datasets = get_dataset(dataset_name, transform=preprocess)
23
 
24
- prefix = os.path.join(workdir, f"{dataset_name}_{model_name.replace('/', '-')}")
 
25
 
26
- print(f"Encoding {dataset_name} dataset with {model_name} model")
27
  for op, dataset in datasets.items():
28
- loader = DataLoader(dataset, batch_size=64, num_workers=4)
 
 
 
 
29
 
30
- embedding, label = [], []
31
- for _, data in enumerate(tqdm(loader, desc=f"({op}) encoding: ")):
32
- image, _label = data
33
 
34
- image = image.to(device)
35
 
36
- _embedding = encode_fn(image).cpu().numpy().tolist()
37
- _label = _label.numpy().tolist()
38
-
39
- embedding.extend(_embedding)
40
- label.extend(_label)
41
-
42
- embedding = np.array(embedding)
43
- label = np.array(label)
44
-
45
- filename = f"{prefix}_{op}.h5"
46
- with h5py.File(filename, "w") as f:
47
- f.create_dataset("embedding", data=embedding)
48
- f.create_dataset("label", data=label)
 
1
+ import logging
2
  import os
3
 
4
+ import pandas as pd
 
5
  import torch
6
  from torch.utils.data import DataLoader
7
  from tqdm import tqdm
8
 
9
+ import dataset_lib.multimodal as multimodal
10
+ from dataset_lib.config import Config
11
+ from dataset_lib.config import Constants as c
12
  from dataset_lib.datasets import get_dataset
 
13
 
14
+ logger = logging.getLogger(__name__)
 
15
 
16
 
17
+ @torch.no_grad()
18
+ def encode(config: Config, device=c.DEVICE, workdir=c.WORKDIR):
19
+ logger.info(
20
+ f"Encoding dataset {config.data.dataset.lower()} with"
21
+ f" backbone = {config.data.backbone}"
22
+ )
 
23
 
24
+ datasets = get_dataset(config.data.dataset)
25
+ encode_image = multimodal.get_image_encoder(config, device=device)
26
 
 
27
  for op, dataset in datasets.items():
28
+ data = {"embedding": [], "label": []}
29
+ for image, label in tqdm(dataset, desc=f"Encoding {op}"):
30
+ embedding = encode_image(image).float()
31
+ embedding /= torch.linalg.norm(embedding, dim=-1, keepdim=True)
32
+ embedding = embedding.cpu().numpy()
33
 
34
+ data["embedding"].extend(embedding)
35
+ data["label"].append(label)
 
36
 
37
+ df = pd.DataFrame(data)
38
 
39
+ data_path = os.path.join(
40
+ f"{config.data.dataset.lower()}_{op}_{config.backbone_name()}.parquet"
41
+ )
42
+ df.to_parquet(data_path, index=False)
 
 
 
 
 
 
 
 
 
dataset_lib/models.py DELETED
@@ -1,50 +0,0 @@
1
- import os
2
-
3
- import clip
4
- import open_clip
5
- import torch
6
- import torch.amp
7
-
8
- SUPPORTED_MODELS = {}
9
- workdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10
- with open(os.path.join(workdir, "supported_models.txt"), "r") as f:
11
- for line in f:
12
- model, model_path = line.strip().split(",")
13
- SUPPORTED_MODELS[model] = model_path
14
-
15
-
16
- def get_transform_and_encoding_fn(
17
- model_name, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
- ):
19
- if "open_clip" in model_name:
20
- return open_clip_get_transform_and_encoding_fn(model_name, device=device)
21
- if "clip" in model_name:
22
- return clip_get_transform_and_encoding_fn(model_name, device=device)
23
-
24
-
25
- def open_clip_get_transform_and_encoding_fn(model_name, device):
26
- model, _, preprocess = open_clip.create_model_and_transforms(
27
- SUPPORTED_MODELS[model_name], device=device
28
- )
29
- model.eval()
30
-
31
- @torch.no_grad()
32
- @torch.cuda.amp.autocast()
33
- def encode_fn(image):
34
- image_features = model.encode_image(image)
35
- return image_features / torch.linalg.norm(image_features, dim=-1, keepdim=True)
36
-
37
- return preprocess, encode_fn
38
-
39
-
40
- def clip_get_transform_and_encoding_fn(model_name, device):
41
- backbone_name = model_name.split(":")[-1]
42
- model, preprocess = clip.load(backbone_name, device=device)
43
-
44
- @torch.no_grad()
45
- @torch.cuda.amp.autocast()
46
- def encode_fn(image):
47
- image_features = model.encode_image(image)
48
- return image_features / torch.linalg.norm(image_features, dim=-1, keepdim=True)
49
-
50
- return preprocess, encode_fn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_lib/multimodal.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SOURCE: https://github.com/Sulam-Group/IBYDMT/blob/main/ibydmt/multimodal.py
2
+
3
+ from abc import abstractmethod
4
+ from typing import Mapping, Optional
5
+
6
+ import clip
7
+ import open_clip
8
+ from transformers import (
9
+ AlignModel,
10
+ AlignProcessor,
11
+ BlipForImageTextRetrieval,
12
+ BlipProcessor,
13
+ FlavaModel,
14
+ FlavaProcessor,
15
+ )
16
+
17
+ from dataset_lib.config import Config
18
+ from dataset_lib.config import Constants as c
19
+
20
+
21
+ class VisionLanguageModel:
22
+ def __init__(self, backbone: Optional[str] = None, device=c.DEVICE):
23
+ pass
24
+
25
+ @abstractmethod
26
+ def encode_text(self, text):
27
+ pass
28
+
29
+ @abstractmethod
30
+ def encode_image(self, image):
31
+ pass
32
+
33
+
34
+ models: Mapping[str, VisionLanguageModel] = {}
35
+
36
+
37
+ def register_model(name):
38
+ def register(cls: VisionLanguageModel):
39
+ if name in models:
40
+ raise ValueError(f"Model {name} is already registered")
41
+ models[name] = cls
42
+
43
+ return register
44
+
45
+
46
+ def get_model_name_and_backbone(config: Config):
47
+ backbone = config.data.backbone.split(":")
48
+ if len(backbone) == 1:
49
+ backbone.append(None)
50
+ return backbone
51
+
52
+
53
+ def get_model(config: Config, device=c.DEVICE) -> VisionLanguageModel:
54
+ model_name, backbone = get_model_name_and_backbone(config)
55
+ return models[model_name](backbone, device=device)
56
+
57
+
58
+ def get_text_encoder(config: Config, device=c.DEVICE):
59
+ model = get_model(config, device=device)
60
+ return model.encode_text
61
+
62
+
63
+ def get_image_encoder(config: Config, device=c.DEVICE):
64
+ model = get_model(config, device=device)
65
+ return model.encode_image
66
+
67
+
68
+ @register_model(name="clip")
69
+ class CLIPModel(VisionLanguageModel):
70
+ def __init__(self, backbone: str, device=c.DEVICE):
71
+ self.model, self.preprocess = clip.load(backbone, device=device)
72
+ self.tokenize = clip.tokenize
73
+
74
+ self.device = device
75
+
76
+ def encode_text(self, text):
77
+ text = self.tokenize(text).to(self.device)
78
+ return self.model.encode_text(text)
79
+
80
+ def encode_image(self, image):
81
+ image = self.preprocess(image).unsqueeze(0).to(self.device)
82
+ return self.model.encode_image(image)
83
+
84
+
85
+ @register_model(name="open_clip")
86
+ class OpenClipModel(VisionLanguageModel):
87
+ OPENCLIP_WEIGHTS = {
88
+ "ViT-B-32": "laion2b_s34b_b79k",
89
+ "ViT-L-14": "laion2b_s32b_b82k",
90
+ }
91
+
92
+ def __init__(self, backbone: str, device=c.DEVICE):
93
+ self.model, _, self.preprocess = open_clip.create_model_and_transforms(
94
+ backbone, pretrained=self.OPENCLIP_WEIGHTS[backbone], device=device
95
+ )
96
+ self.tokenize = open_clip.get_tokenizer(backbone)
97
+
98
+ self.device = device
99
+
100
+ def encode_text(self, text):
101
+ text = self.tokenize(text).to(self.device)
102
+ return self.model.encode_text(text)
103
+
104
+ def encode_image(self, image):
105
+ image = self.preprocess(image).unsqueeze(0).to(self.device)
106
+ return self.model.encode_image(image)
107
+
108
+
109
+ @register_model(name="flava")
110
+ class FLAVAModel(VisionLanguageModel):
111
+ HF_MODEL = "facebook/flava-full"
112
+
113
+ def __init__(self, backbone: Optional[str] = None, device=c.DEVICE):
114
+ if backbone is None:
115
+ backbone = self.HF_MODEL
116
+
117
+ self.model = FlavaModel.from_pretrained(backbone).to(device)
118
+ self.processor = FlavaProcessor.from_pretrained(backbone)
119
+
120
+ self.device = device
121
+
122
+ def encode_text(self, text):
123
+ text_inputs = self.processor(
124
+ text=text, return_tensors="pt", padding="max_length", max_length=77
125
+ )
126
+ text_inputs = {k: v.to(self.device) for k, v in text_inputs.items()}
127
+ return self.model.get_text_features(**text_inputs)[:, 0, :]
128
+
129
+ def encode_image(self, image):
130
+ image_inputs = self.processor(images=image, return_tensors="pt")
131
+ image_inputs = {k: v.to(self.device) for k, v in image_inputs.items()}
132
+ return self.model.get_image_features(**image_inputs)[:, 0, :]
133
+
134
+
135
+ @register_model(name="align")
136
+ class ALIGNModel(VisionLanguageModel):
137
+ HF_MODEL = "kakaobrain/align-base"
138
+
139
+ def __init__(self, backbone: Optional[str] = None, device=c.DEVICE):
140
+ if backbone is None:
141
+ backbone = self.HF_MODEL
142
+
143
+ self.model = AlignModel.from_pretrained(backbone).to(device)
144
+ self.processor = AlignProcessor.from_pretrained(backbone)
145
+
146
+ self.device = device
147
+
148
+ def encode_text(self, text):
149
+ text_inputs = self.processor(
150
+ text=text, return_tensors="pt", padding="max_length", max_length=77
151
+ )
152
+ text_inputs = {k: v.to(self.device) for k, v in text_inputs.items()}
153
+ return self.model.get_text_features(**text_inputs)
154
+
155
+ def encode_image(self, image):
156
+ image_inputs = self.processor(images=image, return_tensors="pt")
157
+ image_inputs = {k: v.to(self.device) for k, v in image_inputs.items()}
158
+ return self.model.get_image_features(**image_inputs)
159
+
160
+
161
+ @register_model(name="blip")
162
+ class BLIPModel(VisionLanguageModel):
163
+ HF_MODEL = "Salesforce/blip-itm-base-coco"
164
+
165
+ def __init__(self, backbone: Optional[str] = None, device=c.DEVICE):
166
+ if backbone is None:
167
+ backbone = self.HF_MODEL
168
+
169
+ self.model = BlipForImageTextRetrieval.from_pretrained(backbone).to(device)
170
+ self.processor = BlipProcessor.from_pretrained(backbone)
171
+
172
+ self.device = device
173
+
174
+ def encode_text(self, text):
175
+ text_inputs = self.processor(
176
+ text=text, return_tensors="pt", padding="max_length", max_length=77
177
+ )
178
+ text_inputs = {k: v.to(self.device) for k, v in text_inputs.items()}
179
+ question_embeds = self.model.text_encoder(**text_inputs)[0]
180
+ return self.model.text_proj(question_embeds[:, 0, :])
181
+
182
+ def encode_image(self, image):
183
+ image_inputs = self.processor(images=image, return_tensors="pt")
184
+ image_inputs = {k: v.to(self.device) for k, v in image_inputs.items()}
185
+ image_embeds = self.model.vision_model(**image_inputs)[0]
186
+ return self.model.vision_proj(image_embeds[:, 0, :])
imagenette_open_clip:ViT-B-32_train.h5 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:46b95fe30a90c980d4af8a150e983de327de80fc7a06e17c2e63e7c4ad7559d1
3
- size 38862824
 
 
 
 
imagenette_open_clip:ViT-B-32_val.h5 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c19c97aea008a9ccb07fc2a75b877404058de937270cc1c6d151c10926d5cce
3
- size 16110248
 
 
 
 
imagenette_open_clip:ViT-L-14_train.h5 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:10c758c9aa4d7345d04ffdfe16c3832197137c41ff6e218c2fa69a023b7cc399
3
- size 58255336
 
 
 
 
imagenette_open_clip:ViT-L-14_val.h5 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ce512d7495006818a5c32c5770eeda1ab0c20e1891ac19df33c23de4d52497c
3
- size 24148648
 
 
 
 
imagenette_clip:ViT-L-14_val.h5 → imagenette_test_align.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13f5f737a9305bc6b89fedd6624e143c4decdb0b47761f9603415275cc41878f
3
- size 24148648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:265db09eb55e5cbef235fa204020fd5ad2badf200038d9e0126374521affada6
3
+ size 10675850
imagenette_clip:ViT-B-32_train.h5 → imagenette_test_blip.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71ceaca38ea7430915ea046e0212d906c7d2927a7e5d3c6bd479c5ca2ab8ef5a
3
- size 38862824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f66d559b9440a59932c811636f5a721e2bdf5bde59a8f632ec6bf2f3d13fae1f
3
+ size 4646191
imagenette_clip:ViT-B-32_val.h5 → imagenette_test_clip_rn50.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1179b7dae485a821eff53c4509eca4326022f6feb5779e3365c01795d2690310
3
- size 16110248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67d7d1db195dd85e756b057aa0135b603b462cc9ca9ccfe895b5696c9ba2fada
3
+ size 16684784
imagenette_clip:ViT-L-14_train.h5 → imagenette_test_clip_vit-b_32.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc1e525af84214c040db67abee6c8db934dda16d85afe6154629667a939411b8
3
- size 58255336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3346b56e3d339872ff50f3a183581a781f367fb8077f9910f93492aaec3f729d
3
+ size 8654915
imagenette_test_clip_vit-l_14.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2bf060d1aaeaa225f775be69b42ff73e4d08416f35ffb57e4d18c7c5feefbf0
3
+ size 12668923
imagenette_test_flava.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21df42fc4e7730ceae8c9310e231124106a5e1fb35bacc799b516a933e83ee74
3
+ size 12685761
imagenette_test_open_clip_vit-b-32.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc2283f0f9e3cbedb581143d06dda53a014cea1b4f4ce54a9ef34746c9520fe0
3
+ size 8666033
imagenette_test_open_clip_vit-l-14.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4e4d1337006b6a9333654f3066b1b77844bd1341d3094a3818e1fbea85ff5c1
3
+ size 12685737
imagenette_train_align.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:264aade88e2ec1490d31d2368dfcdeb7a17351a51d92562f9a4904effceac83e
3
+ size 24871704
imagenette_train_blip.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:717255e31ed7afc8019001e068243d7755428430757d249d314f725bb821fe41
3
+ size 10325500
imagenette_train_clip_rn50.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0928498b881718f20350c7632f9846c69f5b1095d8786e2bc8676cf82c7294c1
3
+ size 39397651
imagenette_train_clip_vit-b_32.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23e319b3c1ebb785410417e1aa06276ef3b9b9f1d7d1ea220be69c36e878251a
3
+ size 20012432
imagenette_train_clip_vit-l_14.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5f88fe23098887b34b21b7dc39183d3fb3024f6d0f96a0312af1f0124a00b27
3
+ size 29701980
imagenette_train_flava.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e608ac7a1e915da8800f1b97551e4d94df1d61bd1ae645adca4fd2814e85bb7
3
+ size 29720268
imagenette_train_open_clip_vit-b-32.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceab9e99ced509a0658ef4557ec87b4cb8a8535b7cb3a1a4c0ff994651994651
3
+ size 20023012
imagenette_train_open_clip_vit-l-14.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7a31312b34936eb226e93ae94a67f18ba4ec6edf4d21196784ef035b99889e1
3
+ size 29720534
make_dataset.py CHANGED
@@ -1,53 +1,65 @@
1
  import argparse
 
2
  from itertools import product
3
 
4
- from dataset_lib.datasets import SUPPORTED_DATASETS
 
5
  from dataset_lib.encode import encode
6
- from dataset_lib.models import SUPPORTED_MODELS
7
 
8
 
9
- def _dataset_type(dataset):
10
- if isinstance(dataset, str):
11
- dataset = [dataset]
12
 
13
- for d in dataset:
14
- assert d in SUPPORTED_DATASETS, ValueError(f"Dataset {d} not supported.")
15
- return dataset
16
-
17
-
18
- def _model_type(model):
19
- if isinstance(model, str):
20
- model = [model]
 
 
 
21
 
22
- for m in model:
23
- assert m in SUPPORTED_MODELS, ValueError(f"Model {m} not supported.")
24
- return model
25
 
26
 
27
  def config():
28
  parser = argparse.ArgumentParser()
29
  parser.add_argument(
30
- "--dataset",
31
- type=_dataset_type,
32
- nargs="+",
33
- default=SUPPORTED_DATASETS,
34
- help="List of datasets to encode.",
35
  )
36
- parser.add_argument(
37
- "--model",
38
- type=_model_type,
39
- nargs="+",
40
- default=list(SUPPORTED_MODELS.keys()),
41
- help="List of models to use.",
42
- )
43
-
44
  return parser.parse_args()
45
 
46
 
 
 
 
 
 
 
 
 
 
 
47
  def main(args):
48
- dataset, model = args.dataset, args.model
49
- for d, m in product(dataset, model):
50
- encode(d, m)
 
 
 
 
 
 
 
 
 
 
51
 
52
 
53
  if __name__ == "__main__":
 
1
  import argparse
2
+ import os
3
  from itertools import product
4
 
5
+ from dataset_lib.config import Config
6
+ from dataset_lib.config import Constants as c
7
  from dataset_lib.encode import encode
 
8
 
9
 
10
+ def setup_logging(level):
11
+ import logging
 
12
 
13
+ formatter = logging.Formatter(
14
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
15
+ )
16
+ logging.root.setLevel(level)
17
+ loggers = [
18
+ logging.getLogger(name)
19
+ for name in logging.root.manager.loggerDict
20
+ if "ibydmt" in name
21
+ ]
22
+ for logger in loggers:
23
+ logger.setLevel(level)
24
 
25
+ stream_handler = logging.StreamHandler()
26
+ stream_handler.setFormatter(formatter)
27
+ logging.root.addHandler(stream_handler)
28
 
29
 
30
  def config():
31
  parser = argparse.ArgumentParser()
32
  parser.add_argument(
33
+ "--workdir", type=str, default=c.WORKDIR, help="Working directory"
 
 
 
 
34
  )
35
+ parser.add_argument("--log_level", type=str, default="INFO", help="Logging level")
 
 
 
 
 
 
 
36
  return parser.parse_args()
37
 
38
 
39
+ def get_support_datasets(workdir: str):
40
+ with open(os.path.join(workdir, "supported_datasets.txt"), "r") as f:
41
+ return f.read().splitlines()
42
+
43
+
44
+ def get_supported_models(workdir: str):
45
+ with open(os.path.join(workdir, "supported_models.txt"), "r") as f:
46
+ return f.read().splitlines()
47
+
48
+
49
  def main(args):
50
+ workdir = args.workdir
51
+ log_level = args.log_level
52
+
53
+ setup_logging(log_level)
54
+
55
+ datasets = get_support_datasets(workdir)
56
+ models = get_supported_models(workdir)
57
+ for dataset in datasets:
58
+ config = Config()
59
+ config.data.dataset = dataset
60
+ config.data.backbone = models
61
+ for _config in config.sweep(["data.backbone"]):
62
+ encode(_config)
63
 
64
 
65
  if __name__ == "__main__":
supported_models.txt CHANGED
@@ -1,4 +1,8 @@
1
- open_clip:ViT-B-32,hf-hub:laion/CLIP-ViT-B-32-laion2B-s34B-b79K
2
- open_clip:ViT-L-14,hf-hub:laion/CLIP-ViT-L-14-laion2B-s32B-b82K
3
- clip:ViT-B/32,
4
- clip:ViT-L/14,
 
 
 
 
 
1
+ clip:RN50
2
+ clip:ViT-B/32
3
+ clip:ViT-L/14
4
+ open_clip:ViT-B-32
5
+ open_clip:ViT-L-14
6
+ flava
7
+ align
8
+ blip