rohithk-03 commited on
Commit
bcc0393
·
1 Parent(s): 045e469

update return msg

Browse files
Files changed (5) hide show
  1. app.py +47 -0
  2. model.py +157 -0
  3. requirements.txt +3 -0
  4. temp.py +6 -6
  5. test.py +0 -0
app.py CHANGED
@@ -7,16 +7,45 @@ from roboflow import Roboflow
7
  import supervision as sv
8
  import cv2
9
  import tempfile
 
10
  import os
11
  import requests
12
  import requests
13
  import cloudinary
 
14
  import cloudinary.uploader
15
  from a import main
16
 
17
  # Initialize Flask app
18
  app = Flask(__name__)
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  @app.route("/")
22
  def home():
@@ -186,5 +215,23 @@ def generate_report():
186
  return jsonify({"message": "Something happened!."}), 404
187
 
188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  if __name__ == "__main__":
190
  app.run(host="0.0.0.0", port=7860)
 
7
  import supervision as sv
8
  import cv2
9
  import tempfile
10
+ import gdown
11
  import os
12
  import requests
13
  import requests
14
  import cloudinary
15
+ import model
16
  import cloudinary.uploader
17
  from a import main
18
 
19
  # Initialize Flask app
20
  app = Flask(__name__)
21
 
22
+ GDRIVE_MODEL_URL = "https://drive.google.com/uc?id=1fzKneepaRt_--dzamTcDBM-9d3_dLX7z"
23
+ LOCAL_MODEL_PATH = "checkpoint32.pth"
24
+
25
+
26
+ def download_file_from_google_drive():
27
+ gdown.download(GDRIVE_MODEL_URL, LOCAL_MODEL_PATH, quiet=False)
28
+
29
+
30
+ file_id = "1fzKneepaRt_--dzamTcDBM-9d3_dLX7z"
31
+ destination = "checkpoint32.pth"
32
+ download_file_from_google_drive(file_id, destination)
33
+
34
+
35
+ def download_model():
36
+ if not os.path.exists(LOCAL_MODEL_PATH):
37
+ response = requests.get(GDRIVE_MODEL_URL, stream=True)
38
+ if response.status_code == 200:
39
+ with open(LOCAL_MODEL_PATH, "wb") as f:
40
+ f.write(response.content)
41
+ else:
42
+ raise Exception(
43
+ f"Failed to download model from Google Drive: {response.status_code}"
44
+ )
45
+
46
+
47
+ download_file_from_google_drive()
48
+
49
 
50
  @app.route("/")
51
  def home():
 
215
  return jsonify({"message": "Something happened!."}), 404
216
 
217
 
218
+ @app.route("/ms-detection", methods=["POST"])
219
+ def predict():
220
+ if file not in request.files:
221
+ return jsonify({"error": "file not uploaded"}), 400
222
+
223
+ file = request.files["file"]
224
+ # Save file temporarily
225
+ temp_path = os.path.join(tempfile.gettempdir(), file.filename)
226
+ file.save(temp_path)
227
+ if file.filename.lower().endswith((".png", ".jpg", ".jpeg")):
228
+ image = Image.open(temp_path)
229
+ image_save_path = os.path.join(
230
+ tempfile.gettempdir(), "saved_image.jpg")
231
+ image.save(image_save_path)
232
+
233
+ return jsonify({"message": model.check_file(temp_path), "saved_path": image_save_path})
234
+
235
+
236
  if __name__ == "__main__":
237
  app.run(host="0.0.0.0", port=7860)
model.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from torch.utils.data import DataLoader
3
+ from torchvision import transforms
4
+ import numpy as np
5
+ import pandas as pd
6
+ import os
7
+ import cv2
8
+ from sklearn.utils import shuffle
9
+ from sklearn.model_selection import train_test_split
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ import torchvision.transforms as transforms
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+
18
+ class HybridCNNViT(nn.Module):
19
+ def __init__(self, in_channels: int, num_classes: int):
20
+ super(HybridCNNViT, self).__init__()
21
+
22
+ self.conv1 = nn.Conv2d(
23
+ in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
24
+ self.bn1 = nn.BatchNorm2d(64)
25
+ self.relu = nn.ReLU(inplace=True)
26
+
27
+ self.conv2 = nn.Conv2d(64, 128, kernel_size=3,
28
+ stride=1, padding=1, bias=False)
29
+ self.bn2 = nn.BatchNorm2d(128)
30
+
31
+ self.conv3 = nn.Conv2d(128, 128, kernel_size=3,
32
+ stride=1, padding=1, bias=False)
33
+ self.bn3 = nn.BatchNorm2d(128)
34
+
35
+ self.conv4 = nn.Conv2d(128, 256, kernel_size=3,
36
+ stride=2, padding=1, bias=False)
37
+ self.bn4 = nn.BatchNorm2d(256)
38
+
39
+ self.conv5 = nn.Conv2d(256, 256, kernel_size=3,
40
+ stride=1, padding=1, bias=False)
41
+ self.bn5 = nn.BatchNorm2d(256)
42
+
43
+ self.conv6 = nn.Conv2d(256, 512, kernel_size=3,
44
+ stride=1, padding=1, bias=False)
45
+ self.bn6 = nn.BatchNorm2d(512)
46
+
47
+ self.conv7 = nn.Conv2d(512, 512, kernel_size=3,
48
+ stride=2, padding=1, bias=False)
49
+ self.bn7 = nn.BatchNorm2d(512)
50
+
51
+ # Optional MaxPooling (can be removed if strictly no max pooling)
52
+ self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
53
+
54
+ self.classifier_conv = nn.Conv2d(
55
+ 512, num_classes, kernel_size=1, stride=1, padding=0, bias=False)
56
+
57
+ self.classifier = nn.Sequential(
58
+ nn.AdaptiveAvgPool2d((1, 1)),
59
+ nn.Flatten(),
60
+ nn.Dropout(0.5)
61
+ )
62
+
63
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
64
+ x = self.relu(self.bn1(self.conv1(x)))
65
+ x = self.relu(self.bn2(self.conv2(x)))
66
+ x = self.relu(self.bn3(self.conv3(x)))
67
+ x = self.relu(self.bn4(self.conv4(x)))
68
+ x = self.relu(self.bn5(self.conv5(x)))
69
+ x = self.relu(self.bn6(self.conv6(x)))
70
+ x = self.relu(self.bn7(self.conv7(x)))
71
+
72
+ x = self.maxpool(x) # Comment this line if no max pooling is needed
73
+
74
+ x = self.classifier_conv(x)
75
+ x = self.classifier(x)
76
+
77
+ return x
78
+
79
+
80
+ def load_and_pad_single_image(image_path, img_size=(224, 224)):
81
+ img = cv2.imread(image_path)
82
+ if img is None:
83
+ raise ValueError(f"Could not read image: {image_path}")
84
+ img = cv2.resize(img, img_size)
85
+ return np.array(img)
86
+
87
+
88
+ def check_file(image_path):
89
+ # image_path = "d/Control-Axial/C-A (2).png"
90
+
91
+ # Load and preprocess the single image
92
+ image = load_and_pad_single_image(image_path)
93
+ image = np.expand_dims(image, axis=0) # Convert to batch format
94
+
95
+ # Duplicate the image 10 times
96
+ data = np.repeat(image, 10, axis=0)
97
+
98
+ # Normalize and transform the image
99
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
100
+ transform = transforms.Compose([
101
+ transforms.ToTensor(),
102
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
103
+ 0.229, 0.224, 0.225])
104
+ ])
105
+
106
+ data = torch.tensor(data, dtype=torch.float32).permute(
107
+ 0, 3, 1, 2).to(device)
108
+ # Placeholder labels for 10 images
109
+ labels = torch.tensor([0] * 10, dtype=torch.long).to(device)
110
+
111
+ data, labels = shuffle(data, labels, random_state=42)
112
+
113
+ train_data, test_data, train_labels, test_labels = train_test_split(
114
+ data, labels, test_size=0.2, random_state=42
115
+ )
116
+
117
+ train_labels = torch.tensor(train_labels, dtype=torch.long)
118
+ test_labels = torch.tensor(test_labels, dtype=torch.long)
119
+
120
+ batch_size = 1 # Since we are working with a single image
121
+ train_dataset = list(zip(train_data, train_labels))
122
+ test_dataset = list(zip(test_data, test_labels))
123
+ test_loader = DataLoader(
124
+ test_dataset, batch_size=batch_size, shuffle=False)
125
+
126
+ # Simple test with a model
127
+ output = ""
128
+
129
+ def test_model(model, test_loader, device):
130
+ model.to(device)
131
+ model.eval()
132
+ with torch.no_grad():
133
+ for images, labels in test_loader:
134
+ images, labels = images.to(device), labels.to(device)
135
+ outputs = model(images)
136
+ _, predicted = torch.max(outputs.data, 1)
137
+ output = predicted
138
+
139
+ def remove_module_from_checkpoint(checkpoint):
140
+ new_state_dict = {}
141
+ for key, value in checkpoint["model_state_dict"].items():
142
+ new_key = key.replace("module.", "")
143
+ new_state_dict[new_key] = value
144
+ checkpoint["model_state_dict"] = new_state_dict
145
+ return checkpoint
146
+
147
+ model = HybridCNNViT(3, 2)
148
+ checkpoint = torch.load('checkpoint32.pth')
149
+ checkpoint = remove_module_from_checkpoint(checkpoint)
150
+ model.load_state_dict(checkpoint['model_state_dict'])
151
+ # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
152
+ # scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
153
+ model.eval()
154
+ model.to(device)
155
+ model = nn.DataParallel(model)
156
+ test_model(model, test_loader, device)
157
+ return output
requirements.txt CHANGED
@@ -22,3 +22,6 @@ openai
22
  fpdf
23
  cloudinary
24
  PyPDF2
 
 
 
 
22
  fpdf
23
  cloudinary
24
  PyPDF2
25
+ scikit-learn
26
+ pandas
27
+ gdown
temp.py CHANGED
@@ -17,7 +17,8 @@ import torch
17
  app = Flask(__name__)
18
  cfg = None
19
  # Google Drive file URL
20
- GDRIVE_MODEL_URL = "https://drive.google.com/uc?id=18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E" # Replace 'your-file-id' with the actual file ID from Google Drive
 
21
  LOCAL_MODEL_PATH = "model_final.pth"
22
 
23
 
@@ -25,8 +26,8 @@ def download_file_from_google_drive(id, destination):
25
  gdown.download(GDRIVE_MODEL_URL, LOCAL_MODEL_PATH, quiet=False)
26
 
27
 
28
- file_id = "18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E"
29
- destination = "model_final.pth"
30
  download_file_from_google_drive(file_id, destination)
31
 
32
 
@@ -109,7 +110,8 @@ def upload():
109
  if j in merged_boxes:
110
  continue
111
  iou = box_iou(
112
- torch.tensor(boxes[i]).unsqueeze(0), torch.tensor(boxes[j]).unsqueeze(0)
 
113
  ).item()
114
  if iou > iou_threshold:
115
  merged_boxes.add(j)
@@ -150,8 +152,6 @@ def fetchImage():
150
  image = skio.imread(file)
151
  image_np = image
152
 
153
-
154
-
155
  return jsonify(response)
156
 
157
 
 
17
  app = Flask(__name__)
18
  cfg = None
19
  # Google Drive file URL
20
+ # Replace 'your-file-id' with the actual file ID from Google Drive
21
+ GDRIVE_MODEL_URL = "https://drive.google.com/uc?id=1fzKneepaRt_--dzamTcDBM-9d3_dLX7z"
22
  LOCAL_MODEL_PATH = "model_final.pth"
23
 
24
 
 
26
  gdown.download(GDRIVE_MODEL_URL, LOCAL_MODEL_PATH, quiet=False)
27
 
28
 
29
+ file_id = "1fzKneepaRt_--dzamTcDBM-9d3_dLX7z"
30
+ destination = "checkpoint32.pth"
31
  download_file_from_google_drive(file_id, destination)
32
 
33
 
 
110
  if j in merged_boxes:
111
  continue
112
  iou = box_iou(
113
+ torch.tensor(boxes[i]).unsqueeze(
114
+ 0), torch.tensor(boxes[j]).unsqueeze(0)
115
  ).item()
116
  if iou > iou_threshold:
117
  merged_boxes.add(j)
 
152
  image = skio.imread(file)
153
  image_np = image
154
 
 
 
155
  return jsonify(response)
156
 
157
 
test.py ADDED
File without changes