gauthamk commited on
Commit
32ba8e8
·
1 Parent(s): 1a0cbb4

initial commit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+
36
+ *.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ __pycache__
2
+ flagged
3
+ weights/water_bodies_model.pth
4
+ venv
app.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from functions import *
3
+
4
+ title = "Water Body Segmentation - Image Segmentation PyTorch"
5
+ examples = ['samples/image1.png', 'samples/image2.png', 'samples/image3.png', 'samples/image4.png', 'samples/image5.png']
6
+
7
+ interface = gr.Interface(fn=predict, inputs=gr.Image(type= 'numpy').style(height= 256),
8
+ outputs= gr.Image(type = "numpy").style(height= 256),
9
+ examples= examples, title= title, css= '.gr-box {background-color: rgb(230 230 230);}')
10
+
11
+ interface.launch()
functions.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import onnxruntime as rt
4
+
5
+ def resize_preserve_aspect_ratio(image, size):
6
+ h, w = image.shape[:2]
7
+ if h > w:
8
+ image = cv2.resize(image, (size * w // h, size))
9
+ else:
10
+ image = cv2.resize(image, (size, size * h // w))
11
+ return image
12
+
13
+ def predict(inp_image):
14
+
15
+ model_path = 'weights/model.onnx'
16
+ inp_dim = inp_image.shape[:2]
17
+
18
+ image = cv2.resize(inp_image, (256, 256))
19
+ image = np.array(image, dtype=np.float32) / 255.0
20
+ image = np.transpose(image, (2, 0, 1))
21
+ image = np.expand_dims(image, axis=0)
22
+
23
+ session = rt.InferenceSession(model_path)
24
+ input_name = session.get_inputs()[0].name
25
+ output_name = session.get_outputs()[0].name
26
+
27
+ pred_onx = session.run([output_name], {input_name: image.astype(np.float32)})[0]
28
+ pred_onx = pred_onx > 0.5
29
+ pred_onx = pred_onx * 255
30
+
31
+ pred_onx = cv2.resize(pred_onx[0, 0].astype(np.uint8) , (inp_dim[1], inp_dim[0]))
32
+ pred_onx = np.expand_dims(pred_onx, axis=2)
33
+ pred_onx = np.concatenate((pred_onx, pred_onx, pred_onx), axis=2)
34
+
35
+ output = resize_preserve_aspect_ratio(pred_onx, 256)
36
+ return output
notebooks/onnx-testing.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
notebooks/torch-to-onnx.ipynb ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Converting PyTorch to ONNX"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [
15
+ {
16
+ "name": "stderr",
17
+ "output_type": "stream",
18
+ "text": [
19
+ "/home/gautham/.local/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
20
+ " from .autonotebook import tqdm as notebook_tqdm\n"
21
+ ]
22
+ }
23
+ ],
24
+ "source": [
25
+ "import torch\n",
26
+ "from torch import nn\n",
27
+ "from torch.nn import functional as F"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "markdown",
32
+ "metadata": {},
33
+ "source": [
34
+ "## Defining the model"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 2,
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "class DoubleConv(nn.Module):\n",
44
+ " def __init__(self, in_channels, out_channels):\n",
45
+ " super().__init__()\n",
46
+ " self.conv = nn.Sequential(\n",
47
+ " nn.Conv2d(in_channels, out_channels, 3, padding=1),\n",
48
+ " nn.BatchNorm2d(out_channels),\n",
49
+ " nn.ReLU(inplace=True),\n",
50
+ " nn.Conv2d(out_channels, out_channels, 3, padding=1),\n",
51
+ " nn.BatchNorm2d(out_channels),\n",
52
+ " nn.ReLU(inplace=True)\n",
53
+ " )\n",
54
+ " def forward(self, x):\n",
55
+ " return self.conv(x)\n",
56
+ "\n",
57
+ "class Down(nn.Module):\n",
58
+ " def __init__(self, in_channels, out_channels):\n",
59
+ " super().__init__()\n",
60
+ " self.down = nn.Sequential(\n",
61
+ " nn.MaxPool2d(2),\n",
62
+ " DoubleConv(in_channels, out_channels)\n",
63
+ " )\n",
64
+ " def forward(self, x):\n",
65
+ " return self.down(x)\n",
66
+ "\n",
67
+ "class Up(nn.Module):\n",
68
+ " def __init__(self, in_channels, out_channels, bilinear=False):\n",
69
+ " super().__init__()\n",
70
+ " if bilinear:\n",
71
+ " self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),\n",
72
+ " nn.Conv2d(in_channels, in_channels // 2, 1))\n",
73
+ " else:\n",
74
+ " self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, 2, stride=2)\n",
75
+ " \n",
76
+ " self.conv = DoubleConv(in_channels, out_channels)\n",
77
+ "\n",
78
+ " def forward(self, x1, x2):\n",
79
+ " x1 = self.up(x1)\n",
80
+ " diffY = x2.size()[2] - x1.size()[2]\n",
81
+ " diffX = x2.size()[3] - x1.size()[3]\n",
82
+ " x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])\n",
83
+ " x = torch.cat([x2, x1], dim=1)\n",
84
+ " return self.conv(x)\n",
85
+ "\n",
86
+ "class OutConv(nn.Module):\n",
87
+ " def __init__(self, in_channels, out_channels):\n",
88
+ " super().__init__()\n",
89
+ " self.conv = nn.Conv2d(in_channels, out_channels, 1)\n",
90
+ " self.sigmoid = nn.Sigmoid()\n",
91
+ "\n",
92
+ " def forward(self, x):\n",
93
+ " return self.sigmoid(self.conv(x))\n",
94
+ "\n",
95
+ "class UNet(nn.Module):\n",
96
+ " def __init__(self, n_channels, n_classes):\n",
97
+ " super().__init__()\n",
98
+ " self.inc = DoubleConv(n_channels, 64)\n",
99
+ " self.down1 = Down(64, 128)\n",
100
+ " self.down2 = Down(128, 256)\n",
101
+ " self.down3 = Down(256, 512)\n",
102
+ " self.down4 = Down(512, 1024)\n",
103
+ " self.up1 = Up(1024, 512)\n",
104
+ " self.up2 = Up(512, 256)\n",
105
+ " self.up3 = Up(256, 128)\n",
106
+ " self.up4 = Up(128, 64)\n",
107
+ " self.outc = OutConv(64, n_classes)\n",
108
+ "\n",
109
+ " def forward(self, x):\n",
110
+ " x1 = self.inc(x)\n",
111
+ " x2 = self.down1(x1)\n",
112
+ " x3 = self.down2(x2)\n",
113
+ " x4 = self.down3(x3)\n",
114
+ " x5 = self.down4(x4)\n",
115
+ " x = self.up1(x5, x4)\n",
116
+ " x = self.up2(x4, x3)\n",
117
+ " x = self.up3(x3, x2)\n",
118
+ " x = self.up4(x2, x1)\n",
119
+ " logits = self.outc(x)\n",
120
+ " return logits"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "markdown",
125
+ "metadata": {},
126
+ "source": [
127
+ "## Loading the model"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 3,
133
+ "metadata": {},
134
+ "outputs": [
135
+ {
136
+ "ename": "RuntimeError",
137
+ "evalue": "Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.",
138
+ "output_type": "error",
139
+ "traceback": [
140
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
141
+ "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
142
+ "Cell \u001b[0;32mIn [3], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m model \u001b[39m=\u001b[39m UNet(n_channels\u001b[39m=\u001b[39m\u001b[39m3\u001b[39m, n_classes\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m)\n\u001b[0;32m----> 2\u001b[0m model\u001b[39m.\u001b[39mload_state_dict(torch\u001b[39m.\u001b[39;49mload(\u001b[39m'\u001b[39;49m\u001b[39m../weights/water_bodies_model.pth\u001b[39;49m\u001b[39m'\u001b[39;49m))\n\u001b[1;32m 3\u001b[0m model\u001b[39m.\u001b[39mto(\u001b[39m'\u001b[39m\u001b[39mcpu\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 5\u001b[0m dummy_input \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mrandn(\u001b[39m1\u001b[39m, \u001b[39m3\u001b[39m, \u001b[39m256\u001b[39m, \u001b[39m256\u001b[39m)\n",
143
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:789\u001b[0m, in \u001b[0;36mload\u001b[0;34m(f, map_location, pickle_module, weights_only, **pickle_load_args)\u001b[0m\n\u001b[1;32m 787\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m \u001b[39mas\u001b[39;00m e:\n\u001b[1;32m 788\u001b[0m \u001b[39mraise\u001b[39;00m pickle\u001b[39m.\u001b[39mUnpicklingError(UNSAFE_MESSAGE \u001b[39m+\u001b[39m \u001b[39mstr\u001b[39m(e)) \u001b[39mfrom\u001b[39;00m \u001b[39mNone\u001b[39m\n\u001b[0;32m--> 789\u001b[0m \u001b[39mreturn\u001b[39;00m _load(opened_zipfile, map_location, pickle_module, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mpickle_load_args)\n\u001b[1;32m 790\u001b[0m \u001b[39mif\u001b[39;00m weights_only:\n\u001b[1;32m 791\u001b[0m \u001b[39mtry\u001b[39;00m:\n",
144
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:1131\u001b[0m, in \u001b[0;36m_load\u001b[0;34m(zip_file, map_location, pickle_module, pickle_file, **pickle_load_args)\u001b[0m\n\u001b[1;32m 1129\u001b[0m unpickler \u001b[39m=\u001b[39m UnpicklerWrapper(data_file, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mpickle_load_args)\n\u001b[1;32m 1130\u001b[0m unpickler\u001b[39m.\u001b[39mpersistent_load \u001b[39m=\u001b[39m persistent_load\n\u001b[0;32m-> 1131\u001b[0m result \u001b[39m=\u001b[39m unpickler\u001b[39m.\u001b[39;49mload()\n\u001b[1;32m 1133\u001b[0m torch\u001b[39m.\u001b[39m_utils\u001b[39m.\u001b[39m_validate_loaded_sparse_tensors()\n\u001b[1;32m 1135\u001b[0m \u001b[39mreturn\u001b[39;00m result\n",
145
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:1101\u001b[0m, in \u001b[0;36m_load.<locals>.persistent_load\u001b[0;34m(saved_id)\u001b[0m\n\u001b[1;32m 1099\u001b[0m \u001b[39mif\u001b[39;00m key \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m loaded_storages:\n\u001b[1;32m 1100\u001b[0m nbytes \u001b[39m=\u001b[39m numel \u001b[39m*\u001b[39m torch\u001b[39m.\u001b[39m_utils\u001b[39m.\u001b[39m_element_size(dtype)\n\u001b[0;32m-> 1101\u001b[0m load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location))\n\u001b[1;32m 1103\u001b[0m \u001b[39mreturn\u001b[39;00m loaded_storages[key]\n",
146
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:1083\u001b[0m, in \u001b[0;36m_load.<locals>.load_tensor\u001b[0;34m(dtype, numel, key, location)\u001b[0m\n\u001b[1;32m 1079\u001b[0m storage \u001b[39m=\u001b[39m zip_file\u001b[39m.\u001b[39mget_storage_from_record(name, numel, torch\u001b[39m.\u001b[39mUntypedStorage)\u001b[39m.\u001b[39mstorage()\u001b[39m.\u001b[39muntyped()\n\u001b[1;32m 1080\u001b[0m \u001b[39m# TODO: Once we decide to break serialization FC, we can\u001b[39;00m\n\u001b[1;32m 1081\u001b[0m \u001b[39m# stop wrapping with TypedStorage\u001b[39;00m\n\u001b[1;32m 1082\u001b[0m loaded_storages[key] \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mstorage\u001b[39m.\u001b[39mTypedStorage(\n\u001b[0;32m-> 1083\u001b[0m wrap_storage\u001b[39m=\u001b[39mrestore_location(storage, location),\n\u001b[1;32m 1084\u001b[0m dtype\u001b[39m=\u001b[39mdtype)\n",
147
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:215\u001b[0m, in \u001b[0;36mdefault_restore_location\u001b[0;34m(storage, location)\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdefault_restore_location\u001b[39m(storage, location):\n\u001b[1;32m 214\u001b[0m \u001b[39mfor\u001b[39;00m _, _, fn \u001b[39min\u001b[39;00m _package_registry:\n\u001b[0;32m--> 215\u001b[0m result \u001b[39m=\u001b[39m fn(storage, location)\n\u001b[1;32m 216\u001b[0m \u001b[39mif\u001b[39;00m result \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 217\u001b[0m \u001b[39mreturn\u001b[39;00m result\n",
148
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:182\u001b[0m, in \u001b[0;36m_cuda_deserialize\u001b[0;34m(obj, location)\u001b[0m\n\u001b[1;32m 180\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_cuda_deserialize\u001b[39m(obj, location):\n\u001b[1;32m 181\u001b[0m \u001b[39mif\u001b[39;00m location\u001b[39m.\u001b[39mstartswith(\u001b[39m'\u001b[39m\u001b[39mcuda\u001b[39m\u001b[39m'\u001b[39m):\n\u001b[0;32m--> 182\u001b[0m device \u001b[39m=\u001b[39m validate_cuda_device(location)\n\u001b[1;32m 183\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mgetattr\u001b[39m(obj, \u001b[39m\"\u001b[39m\u001b[39m_torch_load_uninitialized\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mFalse\u001b[39;00m):\n\u001b[1;32m 184\u001b[0m \u001b[39mwith\u001b[39;00m torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39mdevice(device):\n",
149
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/serialization.py:166\u001b[0m, in \u001b[0;36mvalidate_cuda_device\u001b[0;34m(location)\u001b[0m\n\u001b[1;32m 163\u001b[0m device \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39m_utils\u001b[39m.\u001b[39m_get_device_index(location, \u001b[39mTrue\u001b[39;00m)\n\u001b[1;32m 165\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39mis_available():\n\u001b[0;32m--> 166\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39m'\u001b[39m\u001b[39mAttempting to deserialize object on a CUDA \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 167\u001b[0m \u001b[39m'\u001b[39m\u001b[39mdevice but torch.cuda.is_available() is False. \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 168\u001b[0m \u001b[39m'\u001b[39m\u001b[39mIf you are running on a CPU-only machine, \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 169\u001b[0m \u001b[39m'\u001b[39m\u001b[39mplease use torch.load with map_location=torch.device(\u001b[39m\u001b[39m\\'\u001b[39;00m\u001b[39mcpu\u001b[39m\u001b[39m\\'\u001b[39;00m\u001b[39m) \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 170\u001b[0m \u001b[39m'\u001b[39m\u001b[39mto map your storages to the CPU.\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 171\u001b[0m device_count \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39mdevice_count()\n\u001b[1;32m 172\u001b[0m \u001b[39mif\u001b[39;00m device \u001b[39m>\u001b[39m\u001b[39m=\u001b[39m device_count:\n",
150
+ "\u001b[0;31mRuntimeError\u001b[0m: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU."
151
+ ]
152
+ }
153
+ ],
154
+ "source": [
155
+ "model = UNet(n_channels=3, n_classes=1)\n",
156
+ "model.load_state_dict(torch.load('../weights/water_bodies_model.pth'))\n",
157
+ "\n",
158
+ "dummy_input = torch.randn(1, 3, 256, 256)\n",
159
+ "\n",
160
+ "model.eval()\n",
161
+ "torch_out = model(dummy_input)"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "markdown",
166
+ "metadata": {},
167
+ "source": [
168
+ "## Converting to ONNX"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": null,
174
+ "metadata": {},
175
+ "outputs": [
176
+ {
177
+ "name": "stdout",
178
+ "output_type": "stream",
179
+ "text": [
180
+ "Exported graph: graph(%input : Float(*, 3, 256, 256, strides=[196608, 65536, 256, 1], requires_grad=0, device=cpu),\n",
181
+ " %up4.up.weight : Float(128, 64, 2, 2, strides=[256, 4, 2, 1], requires_grad=1, device=cpu),\n",
182
+ " %up4.up.bias : Float(64, strides=[1], requires_grad=1, device=cpu),\n",
183
+ " %outc.conv.weight : Float(1, 64, 1, 1, strides=[64, 1, 1, 1], requires_grad=1, device=cpu),\n",
184
+ " %outc.conv.bias : Float(1, strides=[1], requires_grad=1, device=cpu),\n",
185
+ " %onnx::Conv_226 : Float(64, 3, 3, 3, strides=[27, 9, 3, 1], requires_grad=0, device=cpu),\n",
186
+ " %onnx::Conv_227 : Float(64, strides=[1], requires_grad=0, device=cpu),\n",
187
+ " %onnx::Conv_229 : Float(64, 64, 3, 3, strides=[576, 9, 3, 1], requires_grad=0, device=cpu),\n",
188
+ " %onnx::Conv_230 : Float(64, strides=[1], requires_grad=0, device=cpu),\n",
189
+ " %onnx::Conv_232 : Float(128, 64, 3, 3, strides=[576, 9, 3, 1], requires_grad=0, device=cpu),\n",
190
+ " %onnx::Conv_233 : Float(128, strides=[1], requires_grad=0, device=cpu),\n",
191
+ " %onnx::Conv_235 : Float(128, 128, 3, 3, strides=[1152, 9, 3, 1], requires_grad=0, device=cpu),\n",
192
+ " %onnx::Conv_236 : Float(128, strides=[1], requires_grad=0, device=cpu),\n",
193
+ " %onnx::Conv_238 : Float(64, 128, 3, 3, strides=[1152, 9, 3, 1], requires_grad=0, device=cpu),\n",
194
+ " %onnx::Conv_239 : Float(64, strides=[1], requires_grad=0, device=cpu),\n",
195
+ " %onnx::Conv_241 : Float(64, 64, 3, 3, strides=[576, 9, 3, 1], requires_grad=0, device=cpu),\n",
196
+ " %onnx::Conv_242 : Float(64, strides=[1], requires_grad=0, device=cpu)):\n",
197
+ " %/inc/conv/conv.0/Conv_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1], onnx_name=\"/inc/conv/conv.0/Conv\"](%input, %onnx::Conv_226, %onnx::Conv_227), scope: __main__.UNet::/__main__.DoubleConv::inc/torch.nn.modules.container.Sequential::conv/torch.nn.modules.conv.Conv2d::conv.0 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
198
+ " %/inc/conv/conv.2/Relu_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name=\"/inc/conv/conv.2/Relu\"](%/inc/conv/conv.0/Conv_output_0), scope: __main__.UNet::/__main__.DoubleConv::inc/torch.nn.modules.container.Sequential::conv/torch.nn.modules.activation.ReLU::conv.2 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:1453:0\n",
199
+ " %/inc/conv/conv.3/Conv_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1], onnx_name=\"/inc/conv/conv.3/Conv\"](%/inc/conv/conv.2/Relu_output_0, %onnx::Conv_229, %onnx::Conv_230), scope: __main__.UNet::/__main__.DoubleConv::inc/torch.nn.modules.container.Sequential::conv/torch.nn.modules.conv.Conv2d::conv.3 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
200
+ " %/inc/conv/conv.5/Relu_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name=\"/inc/conv/conv.5/Relu\"](%/inc/conv/conv.3/Conv_output_0), scope: __main__.UNet::/__main__.DoubleConv::inc/torch.nn.modules.container.Sequential::conv/torch.nn.modules.activation.ReLU::conv.5 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:1453:0\n",
201
+ " %/down1/down/down.0/MaxPool_output_0 : Float(*, 64, 128, 128, strides=[1048576, 16384, 128, 1], requires_grad=1, device=cpu) = onnx::MaxPool[ceil_mode=0, kernel_shape=[2, 2], pads=[0, 0, 0, 0], strides=[2, 2], onnx_name=\"/down1/down/down.0/MaxPool\"](%/inc/conv/conv.5/Relu_output_0), scope: __main__.UNet::/__main__.Down::down1/torch.nn.modules.container.Sequential::down/torch.nn.modules.pooling.MaxPool2d::down.0 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:780:0\n",
202
+ " %/down1/down/down.1/conv/conv.0/Conv_output_0 : Float(*, 128, 128, 128, strides=[2097152, 16384, 128, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1], onnx_name=\"/down1/down/down.1/conv/conv.0/Conv\"](%/down1/down/down.0/MaxPool_output_0, %onnx::Conv_232, %onnx::Conv_233), scope: __main__.UNet::/__main__.Down::down1/torch.nn.modules.container.Sequential::down/__main__.DoubleConv::down.1/torch.nn.modules.container.Sequential::conv/torch.nn.modules.conv.Conv2d::conv.0 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
203
+ " %/down1/down/down.1/conv/conv.2/Relu_output_0 : Float(*, 128, 128, 128, strides=[2097152, 16384, 128, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name=\"/down1/down/down.1/conv/conv.2/Relu\"](%/down1/down/down.1/conv/conv.0/Conv_output_0), scope: __main__.UNet::/__main__.Down::down1/torch.nn.modules.container.Sequential::down/__main__.DoubleConv::down.1/torch.nn.modules.container.Sequential::conv/torch.nn.modules.activation.ReLU::conv.2 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:1453:0\n",
204
+ " %/down1/down/down.1/conv/conv.3/Conv_output_0 : Float(*, 128, 128, 128, strides=[2097152, 16384, 128, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1], onnx_name=\"/down1/down/down.1/conv/conv.3/Conv\"](%/down1/down/down.1/conv/conv.2/Relu_output_0, %onnx::Conv_235, %onnx::Conv_236), scope: __main__.UNet::/__main__.Down::down1/torch.nn.modules.container.Sequential::down/__main__.DoubleConv::down.1/torch.nn.modules.container.Sequential::conv/torch.nn.modules.conv.Conv2d::conv.3 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
205
+ " %/down1/down/down.1/conv/conv.5/Relu_output_0 : Float(*, 128, 128, 128, strides=[2097152, 16384, 128, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name=\"/down1/down/down.1/conv/conv.5/Relu\"](%/down1/down/down.1/conv/conv.3/Conv_output_0), scope: __main__.UNet::/__main__.Down::down1/torch.nn.modules.container.Sequential::down/__main__.DoubleConv::down.1/torch.nn.modules.container.Sequential::conv/torch.nn.modules.activation.ReLU::conv.5 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:1453:0\n",
206
+ " %/up4/up/ConvTranspose_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[2, 2], pads=[0, 0, 0, 0], strides=[2, 2], onnx_name=\"/up4/up/ConvTranspose\"](%/down1/down/down.1/conv/conv.5/Relu_output_0, %up4.up.weight, %up4.up.bias), scope: __main__.UNet::/__main__.Up::up4/torch.nn.modules.conv.ConvTranspose2d::up # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0\n",
207
+ " %/up4/Shape_output_0 : Long(4, strides=[1], device=cpu) = onnx::Shape[onnx_name=\"/up4/Shape\"](%/inc/conv/conv.5/Relu_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:37:0\n",
208
+ " %/up4/Constant_output_0 : Long(device=cpu) = onnx::Constant[value={2}, onnx_name=\"/up4/Constant\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:37:0\n",
209
+ " %/up4/Gather_output_0 : Long(device=cpu) = onnx::Gather[axis=0, onnx_name=\"/up4/Gather\"](%/up4/Shape_output_0, %/up4/Constant_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:37:0\n",
210
+ " %/up4/Shape_1_output_0 : Long(4, strides=[1], device=cpu) = onnx::Shape[onnx_name=\"/up4/Shape_1\"](%/up4/up/ConvTranspose_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
211
+ " %/up4/Constant_1_output_0 : Long(device=cpu) = onnx::Constant[value={2}, onnx_name=\"/up4/Constant_1\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
212
+ " %/up4/Gather_1_output_0 : Long(device=cpu) = onnx::Gather[axis=0, onnx_name=\"/up4/Gather_1\"](%/up4/Shape_1_output_0, %/up4/Constant_1_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
213
+ " %/up4/Sub_output_0 : Long(requires_grad=0, device=cpu) = onnx::Sub[onnx_name=\"/up4/Sub\"](%/up4/Gather_output_0, %/up4/Gather_1_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
214
+ " %/up4/Shape_2_output_0 : Long(4, strides=[1], device=cpu) = onnx::Shape[onnx_name=\"/up4/Shape_2\"](%/inc/conv/conv.5/Relu_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
215
+ " %/up4/Constant_2_output_0 : Long(device=cpu) = onnx::Constant[value={3}, onnx_name=\"/up4/Constant_2\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
216
+ " %/up4/Gather_2_output_0 : Long(device=cpu) = onnx::Gather[axis=0, onnx_name=\"/up4/Gather_2\"](%/up4/Shape_2_output_0, %/up4/Constant_2_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
217
+ " %/up4/Shape_3_output_0 : Long(4, strides=[1], device=cpu) = onnx::Shape[onnx_name=\"/up4/Shape_3\"](%/up4/up/ConvTranspose_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
218
+ " %/up4/Constant_3_output_0 : Long(device=cpu) = onnx::Constant[value={3}, onnx_name=\"/up4/Constant_3\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
219
+ " %/up4/Gather_3_output_0 : Long(device=cpu) = onnx::Gather[axis=0, onnx_name=\"/up4/Gather_3\"](%/up4/Shape_3_output_0, %/up4/Constant_3_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
220
+ " %/up4/Sub_1_output_0 : Long(requires_grad=0, device=cpu) = onnx::Sub[onnx_name=\"/up4/Sub_1\"](%/up4/Gather_2_output_0, %/up4/Gather_3_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:38:0\n",
221
+ " %/up4/Constant_4_output_0 : Long(requires_grad=0, device=cpu) = onnx::Constant[value={2}, onnx_name=\"/up4/Constant_4\"](), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
222
+ " %/up4/Div_output_0 : Long(device=cpu) = onnx::Div[onnx_name=\"/up4/Div\"](%/up4/Sub_1_output_0, %/up4/Constant_4_output_0), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
223
+ " %/up4/Cast_output_0 : Long(device=cpu) = onnx::Cast[to=7, onnx_name=\"/up4/Cast\"](%/up4/Div_output_0), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
224
+ " %/up4/Cast_1_output_0 : Long(requires_grad=0, device=cpu) = onnx::Cast[to=7, onnx_name=\"/up4/Cast_1\"](%/up4/Cast_output_0), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
225
+ " %/up4/Sub_2_output_0 : Long(requires_grad=0, device=cpu) = onnx::Sub[onnx_name=\"/up4/Sub_2\"](%/up4/Sub_1_output_0, %/up4/Cast_1_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
226
+ " %/up4/Constant_5_output_0 : Long(requires_grad=0, device=cpu) = onnx::Constant[value={2}, onnx_name=\"/up4/Constant_5\"](), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
227
+ " %/up4/Div_1_output_0 : Long(device=cpu) = onnx::Div[onnx_name=\"/up4/Div_1\"](%/up4/Sub_output_0, %/up4/Constant_5_output_0), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
228
+ " %/up4/Cast_2_output_0 : Long(device=cpu) = onnx::Cast[to=7, onnx_name=\"/up4/Cast_2\"](%/up4/Div_1_output_0), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
229
+ " %/up4/Cast_3_output_0 : Long(requires_grad=0, device=cpu) = onnx::Cast[to=7, onnx_name=\"/up4/Cast_3\"](%/up4/Cast_2_output_0), scope: __main__.UNet::/__main__.Up::up4 # /home/gautham/.local/lib/python3.10/site-packages/torch/_tensor.py:867:0\n",
230
+ " %/up4/Sub_3_output_0 : Long(requires_grad=0, device=cpu) = onnx::Sub[onnx_name=\"/up4/Sub_3\"](%/up4/Sub_output_0, %/up4/Cast_3_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
231
+ " %onnx::Unsqueeze_175 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
232
+ " %/up4/Unsqueeze_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze\"](%/up4/Cast_1_output_0, %onnx::Unsqueeze_175), scope: __main__.UNet::/__main__.Up::up4\n",
233
+ " %onnx::Unsqueeze_177 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
234
+ " %/up4/Unsqueeze_1_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_1\"](%/up4/Sub_2_output_0, %onnx::Unsqueeze_177), scope: __main__.UNet::/__main__.Up::up4\n",
235
+ " %onnx::Unsqueeze_179 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
236
+ " %/up4/Unsqueeze_2_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_2\"](%/up4/Cast_3_output_0, %onnx::Unsqueeze_179), scope: __main__.UNet::/__main__.Up::up4\n",
237
+ " %onnx::Unsqueeze_181 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
238
+ " %/up4/Unsqueeze_3_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_3\"](%/up4/Sub_3_output_0, %onnx::Unsqueeze_181), scope: __main__.UNet::/__main__.Up::up4\n",
239
+ " %/up4/Concat_output_0 : Long(4, strides=[1], device=cpu) = onnx::Concat[axis=0, onnx_name=\"/up4/Concat\"](%/up4/Unsqueeze_output_0, %/up4/Unsqueeze_1_output_0, %/up4/Unsqueeze_2_output_0, %/up4/Unsqueeze_3_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
240
+ " %onnx::Unsqueeze_184 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
241
+ " %/up4/Unsqueeze_4_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_4\"](%/up4/Cast_1_output_0, %onnx::Unsqueeze_184), scope: __main__.UNet::/__main__.Up::up4\n",
242
+ " %onnx::Unsqueeze_186 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
243
+ " %/up4/Unsqueeze_5_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_5\"](%/up4/Sub_2_output_0, %onnx::Unsqueeze_186), scope: __main__.UNet::/__main__.Up::up4\n",
244
+ " %onnx::Unsqueeze_188 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
245
+ " %/up4/Unsqueeze_6_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_6\"](%/up4/Cast_3_output_0, %onnx::Unsqueeze_188), scope: __main__.UNet::/__main__.Up::up4\n",
246
+ " %onnx::Unsqueeze_190 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()\n",
247
+ " %/up4/Unsqueeze_7_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name=\"/up4/Unsqueeze_7\"](%/up4/Sub_3_output_0, %onnx::Unsqueeze_190), scope: __main__.UNet::/__main__.Up::up4\n",
248
+ " %/up4/Concat_1_output_0 : Long(4, strides=[1], device=cpu) = onnx::Concat[axis=0, onnx_name=\"/up4/Concat_1\"](%/up4/Unsqueeze_4_output_0, %/up4/Unsqueeze_5_output_0, %/up4/Unsqueeze_6_output_0, %/up4/Unsqueeze_7_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
249
+ " %onnx::Pad_193 : NoneType = prim::Constant(), scope: __main__.UNet::/__main__.Up::up4\n",
250
+ " %/up4/Constant_6_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}, onnx_name=\"/up4/Constant_6\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
251
+ " %/up4/Shape_4_output_0 : Long(1, strides=[1], device=cpu) = onnx::Shape[onnx_name=\"/up4/Shape_4\"](%/up4/Concat_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
252
+ " %/up4/Gather_4_output_0 : Long(1, strides=[1], device=cpu) = onnx::Gather[axis=0, onnx_name=\"/up4/Gather_4\"](%/up4/Shape_4_output_0, %/up4/Constant_6_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
253
+ " %/up4/Constant_7_output_0 : Long(requires_grad=0, device=cpu) = onnx::Constant[value={8}, onnx_name=\"/up4/Constant_7\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
254
+ " %/up4/Sub_4_output_0 : Long(1, strides=[1], device=cpu) = onnx::Sub[onnx_name=\"/up4/Sub_4\"](%/up4/Constant_7_output_0, %/up4/Gather_4_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
255
+ " %/up4/Cast_4_output_0 : Long(4, strides=[1], device=cpu) = onnx::Cast[to=7, onnx_name=\"/up4/Cast_4\"](%/up4/Concat_1_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
256
+ " %/up4/ConstantOfShape_output_0 : Long(4, strides=[1], device=cpu) = onnx::ConstantOfShape[value={0}, onnx_name=\"/up4/ConstantOfShape\"](%/up4/Sub_4_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
257
+ " %/up4/Concat_2_output_0 : Long(8, strides=[1], device=cpu) = onnx::Concat[axis=0, onnx_name=\"/up4/Concat_2\"](%/up4/Cast_4_output_0, %/up4/ConstantOfShape_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
258
+ " %/up4/Constant_8_output_0 : Long(2, strides=[1], device=cpu) = onnx::Constant[value=-1 2 [ CPULongType{2} ], onnx_name=\"/up4/Constant_8\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
259
+ " %/up4/Reshape_output_0 : Long(4, 2, strides=[2, 1], device=cpu) = onnx::Reshape[allowzero=0, onnx_name=\"/up4/Reshape\"](%/up4/Concat_2_output_0, %/up4/Constant_8_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
260
+ " %/up4/Constant_9_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}, onnx_name=\"/up4/Constant_9\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
261
+ " %/up4/Constant_10_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={-1}, onnx_name=\"/up4/Constant_10\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
262
+ " %/up4/Constant_11_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={-9223372036854775807}, onnx_name=\"/up4/Constant_11\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
263
+ " %/up4/Constant_12_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={-1}, onnx_name=\"/up4/Constant_12\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
264
+ " %/up4/Slice_output_0 : Long(4, 2, strides=[2, 1], device=cpu) = onnx::Slice[onnx_name=\"/up4/Slice\"](%/up4/Reshape_output_0, %/up4/Constant_10_output_0, %/up4/Constant_11_output_0, %/up4/Constant_9_output_0, %/up4/Constant_12_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
265
+ " %/up4/Transpose_output_0 : Long(2, 4, strides=[4, 1], device=cpu) = onnx::Transpose[perm=[1, 0], onnx_name=\"/up4/Transpose\"](%/up4/Slice_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
266
+ " %/up4/Constant_13_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={-1}, onnx_name=\"/up4/Constant_13\"](), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
267
+ " %/up4/Reshape_1_output_0 : Long(8, strides=[1], device=cpu) = onnx::Reshape[allowzero=0, onnx_name=\"/up4/Reshape_1\"](%/up4/Transpose_output_0, %/up4/Constant_13_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
268
+ " %/up4/Cast_5_output_0 : Long(8, strides=[1], device=cpu) = onnx::Cast[to=7, onnx_name=\"/up4/Cast_5\"](%/up4/Reshape_1_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
269
+ " %/up4/Pad_output_0 : Float(*, *, *, *, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Pad[mode=\"constant\", onnx_name=\"/up4/Pad\"](%/up4/up/ConvTranspose_output_0, %/up4/Cast_5_output_0, %onnx::Pad_193), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:39:0\n",
270
+ " %/up4/Concat_3_output_0 : Float(*, *, 256, 256, strides=[8388608, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Concat[axis=1, onnx_name=\"/up4/Concat_3\"](%/inc/conv/conv.5/Relu_output_0, %/up4/Pad_output_0), scope: __main__.UNet::/__main__.Up::up4 # /tmp/ipykernel_19565/1651252862.py:40:0\n",
271
+ " %/up4/conv/conv/conv.0/Conv_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1], onnx_name=\"/up4/conv/conv/conv.0/Conv\"](%/up4/Concat_3_output_0, %onnx::Conv_238, %onnx::Conv_239), scope: __main__.UNet::/__main__.Up::up4/__main__.DoubleConv::conv/torch.nn.modules.container.Sequential::conv/torch.nn.modules.conv.Conv2d::conv.0 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
272
+ " %/up4/conv/conv/conv.2/Relu_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name=\"/up4/conv/conv/conv.2/Relu\"](%/up4/conv/conv/conv.0/Conv_output_0), scope: __main__.UNet::/__main__.Up::up4/__main__.DoubleConv::conv/torch.nn.modules.container.Sequential::conv/torch.nn.modules.activation.ReLU::conv.2 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:1453:0\n",
273
+ " %/up4/conv/conv/conv.3/Conv_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1], onnx_name=\"/up4/conv/conv/conv.3/Conv\"](%/up4/conv/conv/conv.2/Relu_output_0, %onnx::Conv_241, %onnx::Conv_242), scope: __main__.UNet::/__main__.Up::up4/__main__.DoubleConv::conv/torch.nn.modules.container.Sequential::conv/torch.nn.modules.conv.Conv2d::conv.3 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
274
+ " %/up4/conv/conv/conv.5/Relu_output_0 : Float(*, 64, 256, 256, strides=[4194304, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name=\"/up4/conv/conv/conv.5/Relu\"](%/up4/conv/conv/conv.3/Conv_output_0), scope: __main__.UNet::/__main__.Up::up4/__main__.DoubleConv::conv/torch.nn.modules.container.Sequential::conv/torch.nn.modules.activation.ReLU::conv.5 # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/functional.py:1453:0\n",
275
+ " %/outc/conv/Conv_output_0 : Float(*, 1, 256, 256, strides=[65536, 65536, 256, 1], requires_grad=0, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[1, 1], pads=[0, 0, 0, 0], strides=[1, 1], onnx_name=\"/outc/conv/Conv\"](%/up4/conv/conv/conv.5/Relu_output_0, %outc.conv.weight, %outc.conv.bias), scope: __main__.UNet::/__main__.OutConv::outc/torch.nn.modules.conv.Conv2d::conv # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0\n",
276
+ " %output : Float(*, 1, 256, 256, strides=[65536, 65536, 256, 1], requires_grad=1, device=cpu) = onnx::Sigmoid[onnx_name=\"/outc/sigmoid/Sigmoid\"](%/outc/conv/Conv_output_0), scope: __main__.UNet::/__main__.OutConv::outc/torch.nn.modules.activation.Sigmoid::sigmoid # /home/gautham/.local/lib/python3.10/site-packages/torch/nn/modules/activation.py:294:0\n",
277
+ " return (%output)\n",
278
+ "\n"
279
+ ]
280
+ },
281
+ {
282
+ "name": "stderr",
283
+ "output_type": "stream",
284
+ "text": [
285
+ "/home/gautham/.local/lib/python3.10/site-packages/torch/onnx/utils.py:687: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.)\n",
286
+ " _C._jit_pass_onnx_graph_shape_type_inference(\n",
287
+ "/home/gautham/.local/lib/python3.10/site-packages/torch/onnx/utils.py:1178: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.)\n",
288
+ " _C._jit_pass_onnx_graph_shape_type_inference(\n"
289
+ ]
290
+ }
291
+ ],
292
+ "source": [
293
+ "onnx_path = '../weights/model.onnx'\n",
294
+ "\n",
295
+ "torch.onnx.export(model,\n",
296
+ " dummy_input,\n",
297
+ " onnx_path,\n",
298
+ " verbose=True,\n",
299
+ " input_names = ['input'], # the model's input names\n",
300
+ " output_names = ['output'], # the model's output names\n",
301
+ " dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
302
+ " 'output' : {0 : 'batch_size'}})"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "markdown",
307
+ "metadata": {},
308
+ "source": [
309
+ "## Verifying the ONNX model"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "code",
314
+ "execution_count": null,
315
+ "metadata": {},
316
+ "outputs": [],
317
+ "source": [
318
+ "import onnx\n",
319
+ "\n",
320
+ "onnx_model = onnx.load(onnx_path)\n",
321
+ "onnx.checker.check_model(onnx_model)"
322
+ ]
323
+ },
324
+ {
325
+ "cell_type": "markdown",
326
+ "metadata": {},
327
+ "source": [
328
+ "## Comparing ONNX Runtime and PyTorch results"
329
+ ]
330
+ },
331
+ {
332
+ "cell_type": "code",
333
+ "execution_count": null,
334
+ "metadata": {},
335
+ "outputs": [
336
+ {
337
+ "name": "stdout",
338
+ "output_type": "stream",
339
+ "text": [
340
+ "Exported model has been tested with ONNXRuntime, and the result looks good!\n"
341
+ ]
342
+ }
343
+ ],
344
+ "source": [
345
+ "import onnxruntime\n",
346
+ "import numpy as np\n",
347
+ "\n",
348
+ "ort_session = onnxruntime.InferenceSession(onnx_path)\n",
349
+ "\n",
350
+ "def to_numpy(tensor):\n",
351
+ " return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n",
352
+ "\n",
353
+ "ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}\n",
354
+ "ort_outs = ort_session.run(None, ort_inputs)\n",
355
+ "\n",
356
+ "np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)\n",
357
+ "\n",
358
+ "print(\"Exported model has been tested with ONNXRuntime, and the result looks good!\")\n"
359
+ ]
360
+ }
361
+ ],
362
+ "metadata": {
363
+ "kernelspec": {
364
+ "display_name": "Python 3.10.6 64-bit",
365
+ "language": "python",
366
+ "name": "python3"
367
+ },
368
+ "language_info": {
369
+ "codemirror_mode": {
370
+ "name": "ipython",
371
+ "version": 3
372
+ },
373
+ "file_extension": ".py",
374
+ "mimetype": "text/x-python",
375
+ "name": "python",
376
+ "nbconvert_exporter": "python",
377
+ "pygments_lexer": "ipython3",
378
+ "version": "3.10.6"
379
+ },
380
+ "orig_nbformat": 4,
381
+ "vscode": {
382
+ "interpreter": {
383
+ "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
384
+ }
385
+ }
386
+ },
387
+ "nbformat": 4,
388
+ "nbformat_minor": 2
389
+ }
notebooks/water-body-segmentation-pytorch.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.8.3
2
+ aiosignal==1.3.1
3
+ anyio==3.6.2
4
+ async-timeout==4.0.2
5
+ attrs==22.1.0
6
+ bcrypt==4.0.1
7
+ certifi==2022.9.24
8
+ cffi==1.15.1
9
+ charset-normalizer==2.1.1
10
+ click==8.1.3
11
+ coloredlogs==15.0.1
12
+ contourpy==1.0.6
13
+ cryptography==38.0.3
14
+ cycler==0.11.0
15
+ fastapi==0.87.0
16
+ ffmpy==0.3.0
17
+ flatbuffers==22.10.26
18
+ fonttools==4.38.0
19
+ frozenlist==1.3.3
20
+ fsspec==2022.11.0
21
+ gradio==3.10.0
22
+ h11==0.12.0
23
+ httpcore==0.15.0
24
+ httpx==0.23.0
25
+ humanfriendly==10.0
26
+ idna==3.4
27
+ Jinja2==3.1.2
28
+ kiwisolver==1.4.4
29
+ linkify-it-py==1.0.3
30
+ markdown-it-py==2.1.0
31
+ MarkupSafe==2.1.1
32
+ matplotlib==3.6.2
33
+ mdit-py-plugins==0.3.1
34
+ mdurl==0.1.2
35
+ mpmath==1.2.1
36
+ multidict==6.0.2
37
+ numpy==1.23.4
38
+ onnxruntime==1.13.1
39
+ opencv-python==4.6.0.66
40
+ orjson==3.8.1
41
+ packaging==21.3
42
+ pandas==1.5.1
43
+ paramiko==2.12.0
44
+ Pillow==9.3.0
45
+ protobuf==4.21.9
46
+ pycparser==2.21
47
+ pycryptodome==3.15.0
48
+ pydantic==1.10.2
49
+ pydub==0.25.1
50
+ PyNaCl==1.5.0
51
+ pyparsing==3.0.9
52
+ python-dateutil==2.8.2
53
+ python-multipart==0.0.5
54
+ pytz==2022.6
55
+ PyYAML==6.0
56
+ requests==2.28.1
57
+ rfc3986==1.5.0
58
+ six==1.16.0
59
+ sniffio==1.3.0
60
+ starlette==0.21.0
61
+ sympy==1.11.1
62
+ typing_extensions==4.4.0
63
+ uc-micro-py==1.0.1
64
+ urllib3==1.26.12
65
+ uvicorn==0.19.0
66
+ websockets==10.4
67
+ yarl==1.8.1
samples/image1.png ADDED

Git LFS Details

  • SHA256: 129b0410890e2a09e89e532a49eec31176889b9d36c99831810107ea848413e1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.11 MB
samples/image2.png ADDED

Git LFS Details

  • SHA256: b1bcb0a9d7a9a59ec5031727d3fbbb9f4951d3fea5f4b593488f37166798ba85
  • Pointer size: 131 Bytes
  • Size of remote file: 673 kB
samples/image3.png ADDED

Git LFS Details

  • SHA256: 7b2fd0aed2111ba97ac142496b1bfd2612cb38c7ec3e418ff754916c91494674
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
samples/image4.png ADDED

Git LFS Details

  • SHA256: 5d2472c5908687940ca3864ba123c27a10c6a928bbbb78f92f00050c8792fe56
  • Pointer size: 131 Bytes
  • Size of remote file: 350 kB
samples/image5.png ADDED

Git LFS Details

  • SHA256: bdbcb7388d3f544a48399bf548c8967670da029606ee9fc18722ccc110dc06ab
  • Pointer size: 131 Bytes
  • Size of remote file: 352 kB
weights/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e5833c8c9f26b18a074c313c11c4db5e569cdbf65357ae057caaa895e2bfbb
3
+ size 1837091