afiliot commited on
Commit
489cfa6
·
verified ·
1 Parent(s): 818c368

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -116
README.md CHANGED
@@ -76,127 +76,13 @@ For each tile, we provide the original slide id (`slide_id`), tile id (`tile_id`
76
 
77
  # How to extract features
78
 
79
- The following code snippet allows you to extract features with your feature extractor.
80
- 91 folders will be created, each named by the `slide_id` and containing a `features.npy` file.
81
- This feature file is a numpy array of shape (16278, 3+d) where d is the output dimension of your model and 3 corresponds to `(deepzoom_level, x_coordinate, y_coordinate)`.
82
-
83
  > [!IMPORTANT]
84
- > Tile coordinates are in the same order for each slide inside the dataset. No additional sorting is required to compare feature matrices between different slides (first element of each matrix corresponds to the same tile location).
85
  >
 
86
 
87
  2h30 and roughly 10 Gb storage are necessary to extract all features with a ViT-B model, 16 CPUs and 1 Nvidia T4 (16Go).
88
 
89
- > [!IMPORTANT]
90
- > 🎉 We plan to release a dedicated Github repository to properly extract features and compute metrics as done in (Filiot et al., 2025).
91
- >
92
- >
93
-
94
- ```python
95
-
96
- # Generic libraries
97
- from __future__ import annotations
98
- from pathlib import Path
99
- from PIL import Image
100
- from loguru import logger
101
- from tqdm import tqdm
102
-
103
- # Tensor-related libraries
104
- import numpy as np
105
- import torch
106
- from torch.utils.data import DataLoader
107
- import datasets
108
-
109
- # You first need to login with your HF token
110
- #from huggingface_hub import login
111
- #login()
112
-
113
- # Set your PIL.Image transform and embedding model
114
- #transform = # torchvision.transforms.transforms transforming PIL Image into Tensor
115
- #model = # torch.nn.Module outputing a tensor of features of shape (batch_size, features_dimension)
116
-
117
- # You can tweak the batch size depending on your hardware or model
118
- batch_size = 32
119
- num_slides = 91
120
- num_tiles = 16278
121
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
122
-
123
- # Set your export directory
124
- export_dir = Path("/path/to/your/export/directory/")
125
-
126
-
127
- def collate(batch: list[dict[str, str | Image]]) -> tuple[list[str], list[str], torch.Tensor]:
128
- """Return slide ids, tile ids and transformed images."""
129
- slide_ids =[b["slide_id"] for b in batch]
130
- tile_ids = [b["tile_id"] for b in batch]
131
- imgs = torch.stack([transform(b["png"]) for b in batch], axis=0)
132
- return (slide_ids, tile_ids, imgs)
133
-
134
-
135
- def process_imgs(imgs: torch.Tensor, tile_ids: list[str]) -> torch.Tensor:
136
- """Perform inference on input (already transformed) images."""
137
- with torch.inference_mode():
138
- batch_features = model(imgs.to(device)).squeeze() # (N_tiles, d) numpy array
139
- batch_tiles_coordinates = np.array([tile_id.split("_")[1:] for tile_id in tile_ids]).astype(int) # (N_tiles, 3) numpy array
140
- batch_stack = np.concatenate([batch_tiles_coordinates, batch_features], axis=1)
141
- return batch_stack
142
-
143
- def save_features(slide_features: list[np.ndarray], slide_id: str):
144
- """Save features to disk."""
145
- slide_features_export_dir = Path(export_dir / slide_id)
146
- slide_features_export_path = slides_features_export_dir / "features.npy"
147
- slide_features_export_dir.mkdir(exist_ok=True, parents=True)
148
- output_slide_features = np.concatenate(slide_features, axis=0).astype(np.float32)
149
- slide_num_tiles = output_slide_features.shape[0]
150
- assert slide_num_tiles == num_tiles, f"Output features for slide {slide_id} contains {slide_num_tiles} < {num_tiles}."
151
- np.save(slides_features_export_path, output_slide_features)
152
- logger.success(f"Successfully saved features for slide: {slide_id}")
153
-
154
- # Create the dataset and dataloader without actually loading the files to disk (`streaming=True`)
155
- # The dataset is sorted by slide_id, meaning that the first 16278 indexes belong to the same first slide,
156
- # then 16278:32556 to the second slide, etc.
157
- dataset = datasets.load_dataset("owkin/plism-dataset-tiles", split="train", streaming=True)
158
- dataloader = DataLoader(
159
- dataset, batch_size=batch_size, collate_fn=collate, num_workers=0, pin_memory=True, shuffle=False
160
- )
161
-
162
- # Iterate over the full dataset and store features each time 16278 input images have been processed
163
-
164
- slide_features = []
165
- current_num_tiles = 0
166
-
167
- for (slide_ids, tile_ids, imgs) in tqdm(
168
- dataloader,
169
- total=ceil(num_slides * num_tiles / batch_size),
170
- desc="Extracting features"
171
- ):
172
- reference_slide_id = slide_ids[0]
173
-
174
- # If we're on the same slide, we just add the batch features to the running list
175
- if all(slide_id == reference_slide_id for slide_id in slide_ids):
176
- batch_stack = process_imgs(imgs, tile_ids)
177
- slide_features.append(batch_stack)
178
- # For the very last slide, the last batch may be of size < `batch_size`
179
- current_num_tiles += batch_stack.shape[0]
180
- # If the current batch contains exactly the last `batch_size` tile features for the slide,
181
- # export the slide features and reset `slide_features` and `current_num_tiles`
182
- if current_num_tiles == num_tiles:
183
- save_features(slide_features, slide_id=reference_slide_id)
184
- slide_features = []
185
- current_num_tiles = 0
186
- # The current batch contains tiles from slide N (`reference_slide_id`) and slide N+1
187
- else:
188
- # We retrieve the maximum index at which all tiles in the batch comes from slide N
189
- mask = (np.array(slide_ids) != reference_slide_id)
190
- idx = mask.argmax()
191
- # And only process the later, then export the slides features
192
- batch_stack = process_imgs(imgs[:idx], tile_ids[:idx]
193
- slide_features.append(batch_stack)
194
- save_features(slide_features, slide_id=reference_slide_id)
195
- # We initialize `slide_features` and `current_num_tiles` with respectively
196
- # the tile features from slide N+1
197
- slide_features = [process_imgs(imgs[idx:], tile_ids[idx:])]
198
- current_num_tiles = batch_size - idx
199
- ```
200
 
201
  # License
202
 
 
76
 
77
  # How to extract features
78
 
 
 
 
 
79
  > [!IMPORTANT]
80
+ > 🎉 Check [plismbench](https://github.com/owkin/plism-benchmark) to perform the feature extraction of PLISM dataset and get run our robustness benchmark 🎉
81
  >
82
+ >
83
 
84
  2h30 and roughly 10 Gb storage are necessary to extract all features with a ViT-B model, 16 CPUs and 1 Nvidia T4 (16Go).
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  # License
88