Update README.md
Browse files
README.md
CHANGED
@@ -136,8 +136,8 @@ def collate(batch: list[dict[str, str | Image]]) -> tuple[list[str], list[str],
|
|
136 |
def process_imgs(imgs: torch.Tensor, tile_ids: list[str]) -> torch.Tensor:
|
137 |
"""Perform inference on input (already transformed) images."""
|
138 |
with torch.inference_mode():
|
139 |
-
batch_features = model(imgs.to(device)).squeeze()
|
140 |
-
batch_tiles_coordinates = np.array([tile_id.split("_")[1:] for tile_id in tile_ids]).astype(int)
|
141 |
batch_stack = np.concatenate([batch_tiles_coordinates, batch_features], axis=1)
|
142 |
return batch_stack
|
143 |
|
@@ -159,38 +159,41 @@ dataloader = DataLoader(
|
|
159 |
)
|
160 |
|
161 |
# Iterate over the full dataset and store features each time 16278 input images have been processed
|
|
|
162 |
slide_features = []
|
|
|
163 |
|
164 |
for i, (slide_ids, tile_ids, imgs) in tqdm(
|
165 |
enumerate(dataloader),
|
166 |
total=ceil(num_slides * num_tiles / batch_size),
|
167 |
desc="Extracting features"
|
168 |
):
|
169 |
-
|
170 |
-
current_slide_id = slide_ids[0]
|
171 |
|
172 |
-
# If
|
173 |
-
|
174 |
-
if all(slide_id == current_slide_id for slide_id in slide_ids):
|
175 |
batch_stack = process_imgs(imgs, tile_ids)
|
176 |
slide_features.append(batch_stack)
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
181 |
slide_features = []
|
182 |
-
|
183 |
-
# The current batch contains tiles from
|
184 |
else:
|
185 |
-
# We
|
186 |
-
mask = (np.array(slide_ids) !=
|
187 |
idx = mask.argmax()
|
188 |
-
#
|
189 |
-
batch_stack = process_imgs(imgs[:idx], tile_ids[:idx]
|
190 |
slide_features.append(batch_stack)
|
191 |
-
save_features(slide_features, slide_id=
|
192 |
-
#
|
|
|
193 |
slide_features = [process_imgs(imgs[idx:], tile_ids[idx:])]
|
|
|
194 |
```
|
195 |
|
196 |
# License
|
|
|
136 |
def process_imgs(imgs: torch.Tensor, tile_ids: list[str]) -> torch.Tensor:
|
137 |
"""Perform inference on input (already transformed) images."""
|
138 |
with torch.inference_mode():
|
139 |
+
batch_features = model(imgs.to(device)).squeeze() # (N_tiles, d) numpy array
|
140 |
+
batch_tiles_coordinates = np.array([tile_id.split("_")[1:] for tile_id in tile_ids]).astype(int) # (N_tiles, 3) numpy array
|
141 |
batch_stack = np.concatenate([batch_tiles_coordinates, batch_features], axis=1)
|
142 |
return batch_stack
|
143 |
|
|
|
159 |
)
|
160 |
|
161 |
# Iterate over the full dataset and store features each time 16278 input images have been processed
|
162 |
+
|
163 |
slide_features = []
|
164 |
+
current_num_tiles = 0
|
165 |
|
166 |
for i, (slide_ids, tile_ids, imgs) in tqdm(
|
167 |
enumerate(dataloader),
|
168 |
total=ceil(num_slides * num_tiles / batch_size),
|
169 |
desc="Extracting features"
|
170 |
):
|
171 |
+
reference_slide_id = slide_ids[0]
|
|
|
172 |
|
173 |
+
# If we're on the same slide, we just add the batch features to the running list
|
174 |
+
if all(slide_id == reference_slide_id for slide_id in slide_ids):
|
|
|
175 |
batch_stack = process_imgs(imgs, tile_ids)
|
176 |
slide_features.append(batch_stack)
|
177 |
+
current_num_tiles += batch_size
|
178 |
+
# If the current batch contains exactly the last `batch_size` tile features for the slide,
|
179 |
+
# export the slide features and reset `slide_features` and `current_num_tiles`
|
180 |
+
if current_num_tiles == num_tiles:
|
181 |
+
save_features(slide_features, slide_id=reference_slide_id)
|
182 |
slide_features = []
|
183 |
+
current_num_tiles = 0
|
184 |
+
# The current batch contains tiles from slide N (`reference_slide_id`) and slide N+1
|
185 |
else:
|
186 |
+
# We retrieve the maximum index at which all tiles in the batch comes from slide N
|
187 |
+
mask = (np.array(slide_ids) != reference_slide_id)
|
188 |
idx = mask.argmax()
|
189 |
+
# And only process the later, then export the slides features
|
190 |
+
batch_stack = process_imgs(imgs[:idx], tile_ids[:idx]
|
191 |
slide_features.append(batch_stack)
|
192 |
+
save_features(slide_features, slide_id=reference_slide_id)
|
193 |
+
# We initialize `slide_features` and `current_num_tiles` with respectively
|
194 |
+
# the tile features from slide N+1
|
195 |
slide_features = [process_imgs(imgs[idx:], tile_ids[idx:])]
|
196 |
+
current_num_tiles = batch_size - idx
|
197 |
```
|
198 |
|
199 |
# License
|