File size: 6,631 Bytes
b9043e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from batchgenerators.utilities.file_and_folder_operations import join\n",
"from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor\n",
"from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# instantiate the nnUNetPredictor\n",
"predictor = nnUNetPredictor(\n",
" tile_step_size=0.5, # 50% overlap between adjacent tiles\n",
" use_gaussian=True, # Apply Gaussian weighting to smooth tile edges\n",
" use_mirroring=True, # Enable test-time augmentation via flipping\n",
" perform_everything_on_device=True, # Perform all steps (preprocessing, prediction) on GPU\n",
" device=torch.device('cuda', 0), # Use the first GPU (cuda:0) for computations\n",
" verbose=False, # Disable detailed output logs during prediction\n",
" verbose_preprocessing=False, # Disable logs during preprocessing\n",
" allow_tqdm=True # Show progress bar during long tasks\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# initializes the network architecture, loads the checkpoint\n",
"predictor.initialize_from_trained_model_folder(\n",
" \"./nnUNet_weights\", # Path to the model weights\n",
" use_folds=(0,1,2,3,4), # Use all 5 folds (for cross-validation)\n",
" checkpoint_name='checkpoint_best.pth', # File name of model checkpoints (all must be equal)\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# variant 1: give input and output folders\n",
"# Note: if specific file path is provided, no need for \"_0000.nii.gz\" file ending;\n",
"# Note: if input folder path is provided, the input files MUST include \"_0000.nii.gz\" ending.\n",
"predictor.predict_from_files(\n",
" \"./input_images\", # Input folder with image files\n",
" \"./output_images\", # Output folder for predictions\n",
" save_probabilities=False, # Do not save the predicted probabilities, just the segmentation\n",
" overwrite=False, # Do not overwrite existing results in the output folder\n",
" num_processes_preprocessing=2, # Number of processes for preprocessing\n",
" num_processes_segmentation_export=2, # Number of processes for exporting the segmentation\n",
" folder_with_segs_from_prev_stage=None, # No previous stage segmentations used\n",
" num_parts=1, # Number of parts to divide the prediction task into\n",
" part_id=0 # ID of the current part (only one part in this case)\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# variant 2.1, use list of files as inputs. Note how we use nested lists!!!\n",
"indir = \"./input_images\" # Input folder with image files\n",
"outdir = \"./output_images\" # Output folder for predictions\n",
"predictor.predict_from_files(\n",
" [[join(indir, 'img0027_0000.nii.gz')]],\n",
" [join(outdir, 'img0027_pred.nii.gz')],\n",
" save_probabilities=False,\n",
" overwrite=False,\n",
" num_processes_preprocessing=2,\n",
" num_processes_segmentation_export=2,\n",
" folder_with_segs_from_prev_stage=None,\n",
" num_parts=1,\n",
" part_id=0\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# variant 2.2, returns segmentations (The predicted segmentations will be returned if the output_files are not specified)\n",
"indir = \"./input_images\"\n",
"predicted_segmentations = predictor.predict_from_files(\n",
" [[join(indir, 'img0027_0000.nii.gz')], [join(indir, 'img0027_0000.nii.gz')]], # Example of several input images with repeated sample\n",
" None,\n",
" save_probabilities=False,\n",
" overwrite=True,\n",
" num_processes_preprocessing=2,\n",
" num_processes_segmentation_export=2,\n",
" folder_with_segs_from_prev_stage=None,\n",
" num_parts=1,\n",
" part_id=0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# variant 3.1, predict a list of numpy arrays\n",
"indir = \"./input_images\"\n",
"img, props = SimpleITKIO().read_images([join(indir, 'img0027_0000.nii.gz')])\n",
"\n",
"# we do not set output files so that the segmentations will be returned. You can of course also specify output\n",
"# files instead (no return value on that case)\n",
"ret = predictor.predict_from_list_of_npy_arrays(\n",
" [img,],\n",
" None,\n",
" [props,],\n",
" None,\n",
" 2,\n",
" save_probabilities=False,\n",
" num_processes_segmentation_export=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# variant 3.2, predict a single numpy array\n",
"img, props = SimpleITKIO().read_images([\"./input_images/img0027_0000.nii.gz\"])\n",
"ret = predictor.predict_single_npy_array(img, props, None, None, False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|