Harry2687 commited on
Commit
6ffdc93
·
1 Parent(s): c9e4b13

Added files

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. images/Female/freya.png +0 -0
  3. images/Male/kratos.png +0 -0
  4. main.py +143 -0
  5. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ model_parameters.pt
2
+ .DS_Store
images/Female/freya.png ADDED
images/Male/kratos.png ADDED
main.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import torchvision.datasets as datasets
7
+ import torchvision.transforms as transforms
8
+ from torch.utils.data import DataLoader
9
+ import time
10
+
11
+ # Download model if not available
12
+ modelsave_name = 'model_parameters.pt'
13
+ if os.path.isfile(modelsave_name) == False:
14
+ url = 'https://drive.google.com/file/d/1_mYn2LrhG080Xvt26tWBtJ8U_0F2E1-s/view?usp=sharing'
15
+ gdown.download(url, output=modelsave_name, fuzzy=True)
16
+
17
+ # Set device
18
+ if torch.backends.mps.is_available():
19
+ device = torch.device('mps')
20
+ device_name = 'Apple Silicon GPU'
21
+ elif torch.cuda.is_available():
22
+ device = torch.device('cuda')
23
+ device_name = 'CUDA'
24
+ else:
25
+ device = torch.device('cpu')
26
+ device_name = 'CPU'
27
+
28
+ torch.set_default_device(device)
29
+
30
+ print(f'\nDevice: {device_name}')
31
+
32
+ # Define model
33
+ def conv_block(in_channels, out_channels, pool=False):
34
+ layers = [
35
+ nn.Conv2d(
36
+ in_channels,
37
+ out_channels,
38
+ kernel_size=3,
39
+ padding=1
40
+ ),
41
+ nn.BatchNorm2d(out_channels),
42
+ nn.ReLU()
43
+ ]
44
+ if pool:
45
+ layers.append(
46
+ nn.MaxPool2d(4)
47
+ )
48
+ return nn.Sequential(*layers)
49
+
50
+ class resnetModel_128(nn.Module):
51
+ def __init__(self):
52
+ super().__init__()
53
+ self.model_name = 'resnetModel_128'
54
+
55
+ self.conv_1 = conv_block(1, 64)
56
+ self.res_1 = nn.Sequential(
57
+ conv_block(64, 64),
58
+ conv_block(64, 64)
59
+ )
60
+ self.conv_2 = conv_block(64, 256, pool=True)
61
+ self.res_2 = nn.Sequential(
62
+ conv_block(256, 256),
63
+ conv_block(256, 256)
64
+ )
65
+ self.conv_3 = conv_block(256, 512, pool=True)
66
+ self.res_3 = nn.Sequential(
67
+ conv_block(512, 512),
68
+ conv_block(512, 512)
69
+ )
70
+ self.conv_4 = conv_block(512, 1024, pool=True)
71
+ self.res_4 = nn.Sequential(
72
+ conv_block(1024, 1024),
73
+ conv_block(1024, 1024)
74
+ )
75
+ self.classifier = nn.Sequential(
76
+ nn.Flatten(),
77
+ nn.Linear(2*2*1024, 2048),
78
+ nn.Dropout(0.5),
79
+ nn.ReLU(),
80
+ nn.Linear(2048, 1024),
81
+ nn.Dropout(0.5),
82
+ nn.ReLU(),
83
+ nn.Linear(1024, 2)
84
+ )
85
+
86
+ def forward(self, x):
87
+ x = self.conv_1(x)
88
+ x = self.res_1(x) + x
89
+ x = self.conv_2(x)
90
+ x = self.res_2(x) + x
91
+ x = self.conv_3(x)
92
+ x = self.res_3(x) + x
93
+ x = self.conv_4(x)
94
+ x = self.res_4(x) + x
95
+ x = self.classifier(x)
96
+ x = F.softmax(x, dim=1)
97
+ return x
98
+
99
+ # Make model and load parameters
100
+ resnet = resnetModel_128()
101
+ resnet.load_state_dict(torch.load(modelsave_name, map_location=device))
102
+ resnet.eval()
103
+
104
+ imsize = 128
105
+ classes = ('Female', 'Male')
106
+
107
+ loader = transforms.Compose([
108
+ transforms.Resize([imsize, imsize]),
109
+ transforms.Grayscale(1),
110
+ transforms.ToTensor(),
111
+ transforms.Normalize(0, 1)
112
+ ])
113
+
114
+ my_dataset = datasets.ImageFolder(
115
+ root='images/',
116
+ transform=loader
117
+ )
118
+
119
+ my_dataset_loader = DataLoader(
120
+ my_dataset,
121
+ batch_size=len(my_dataset),
122
+ generator=torch.Generator(device=device)
123
+ )
124
+
125
+ # Make predictions
126
+ start_time = time.time()
127
+ with torch.no_grad():
128
+ for i, (X, y) in enumerate(my_dataset_loader):
129
+ X = X.to(device)
130
+ y_pred = resnet.forward(X)
131
+ predicted = torch.max(y_pred.data,1)[1]
132
+
133
+ for j in range(len(X)):
134
+ print(f'\nImage: {my_dataset.imgs[j][0]}')
135
+ print(f'Prediction: {classes[predicted[j]]}')
136
+ print(f'Actual: {classes[y[j]]}')
137
+ print(f'{classes[0]} weight: {y_pred[j][0]}')
138
+ print(f'{classes[1]} weight: {y_pred[j][1]}')
139
+
140
+ end_time = time.time()
141
+
142
+ avg_inference_time = (end_time - start_time)/len(my_dataset)
143
+ print(f'\nAverage inference time: {avg_inference_time} seconds per image\n')
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gdown
2
+ torch
3
+ torchvision