DineshKumar1329 commited on
Commit
924a4f6
·
verified ·
1 Parent(s): ae49adc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +125 -21
README.md CHANGED
@@ -41,31 +41,135 @@ This repository contains a ResNet-based convolutional neural network trained to
41
  ### Inference:
42
  ```python
43
  import torch
44
- from torchvision import transforms
45
  from PIL import Image
46
- from transformers import pipeline
47
-
48
- # Define the image transformation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  transform = transforms.Compose([
50
  transforms.Resize((128, 128)),
51
  transforms.ToTensor(),
52
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
53
  ])
54
 
55
- # Load the model from Hugging Face
56
- pipe = pipeline("image-classification", model="DineshKumar1329/DogCat_Classifier")
57
-
58
- # Load and preprocess an image
59
- image_path = 'path/to/your/image.jpg'
60
- image = Image.open(image_path)
61
- image = transform(image)
62
- image = image.unsqueeze(0) # Add batch dimension
63
-
64
- # Make a prediction
65
- result = classifier(image_path)
66
-
67
- # Extract the predicted label
68
- predicted_label = result[0]['label']
69
-
70
- # Output the prediction
71
- print(f'The predicted class for the image is: {predicted_label}')
 
 
 
 
 
 
 
 
 
 
 
41
  ### Inference:
42
  ```python
43
  import torch
44
+ from torchvision.models import resnet18
45
  from PIL import Image
46
+ import torchvision.transforms as transforms
47
+ import matplotlib.pyplot as plt
48
+ model = resnet18(pretrained=False)
49
+ num_ftrs = model.fc.in_features
50
+ model.fc = torch.nn.Linear(num_ftrs, 2)
51
+
52
+ # Load the trained model state_dict
53
+ model_path = 'cat_dog_classifier.pth'
54
+ model.load_state_dict(torch.load(model_path))
55
+ model.eval()
56
+
57
+ <!-- ResNet(
58
+ (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
59
+ (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
60
+ (relu): ReLU(inplace=True)
61
+ (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
62
+ (layer1): Sequential(
63
+ (0): BasicBlock(
64
+ (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
65
+ (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
66
+ (relu): ReLU(inplace=True)
67
+ (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
68
+ (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
69
+ )
70
+ (1): BasicBlock(
71
+ (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
72
+ (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
73
+ (relu): ReLU(inplace=True)
74
+ (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
75
+ (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
76
+ )
77
+ )
78
+ (layer2): Sequential(
79
+ (0): BasicBlock(
80
+ (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
81
+ (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
82
+ (relu): ReLU(inplace=True)
83
+ (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
84
+ (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
85
+ (downsample): Sequential(
86
+ (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
87
+ (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
88
+ )
89
+ )
90
+ (1): BasicBlock(
91
+ (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
92
+ (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
93
+ (relu): ReLU(inplace=True)
94
+ (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
95
+ (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
96
+ )
97
+ )
98
+ (layer3): Sequential(
99
+ (0): BasicBlock(
100
+ (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
101
+ (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
102
+ (relu): ReLU(inplace=True)
103
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
104
+ (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
105
+ (downsample): Sequential(
106
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
107
+ (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
108
+ )
109
+ )
110
+ (1): BasicBlock(
111
+ (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
112
+ (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
113
+ (relu): ReLU(inplace=True)
114
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
115
+ (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
116
+ )
117
+ )
118
+ (layer4): Sequential(
119
+ (0): BasicBlock(
120
+ (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
121
+ (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
122
+ (relu): ReLU(inplace=True)
123
+ (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
124
+ (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
125
+ (downsample): Sequential(
126
+ (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
127
+ (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
128
+ )
129
+ )
130
+ (1): BasicBlock(
131
+ (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
132
+ (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
133
+ (relu): ReLU(inplace=True)
134
+ (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
135
+ (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
136
+ )
137
+ )
138
+ (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
139
+ (fc): Linear(in_features=512, out_features=2, bias=True)
140
+ )
141
+ -->
142
+ # Define the transformation (ensure it matches the training preprocessing)
143
  transform = transforms.Compose([
144
  transforms.Resize((128, 128)),
145
  transforms.ToTensor(),
146
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
147
  ])
148
 
149
+ def load_image(image_path):
150
+ image = Image.open(image_path)
151
+ image = transform(image)
152
+ image = image.unsqueeze(0) # Add batch dimension
153
+ return image
154
+
155
+ def predict_image(model, image_path):
156
+ image = load_image(image_path)
157
+ model.eval()
158
+ with torch.no_grad():
159
+ outputs = model(image)
160
+ _, predicted = torch.max(outputs, 1)
161
+ return "Cat" if predicted.item() == 0 else "Dog"
162
+
163
+ def plot_image(image_path, prediction):
164
+ image = Image.open(image_path)
165
+ plt.imshow(image)
166
+ plt.title(f'Predicted: {prediction}')
167
+ plt.axis('off')
168
+ plt.show()
169
+
170
+ # Example usage
171
+ image_path = "path.jpeg"
172
+ prediction = predict_image(model, image_path)
173
+ print(f'The predicted class for the image is: {prediction}')
174
+ plot_image(image_path, prediction)
175
+ The predicted class for the image is: Cat