Spaces:
Sleeping
Sleeping
Commit
·
38a3c61
1
Parent(s):
2bb800d
first commit for API
Browse files- .gitignore +27 -0
- Dockerfile +20 -0
- architecture/resnet.py +98 -0
- docker-compose.yml +11 -0
- main.py +107 -0
- requirements.txt +9 -0
- steps/preprocess.py +33 -0
.gitignore
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ignore environment files
|
2 |
+
.env
|
3 |
+
|
4 |
+
# Ignore Python cache files
|
5 |
+
__pycache__/
|
6 |
+
*.py[cod]
|
7 |
+
|
8 |
+
# Ignore Docker files
|
9 |
+
*.log
|
10 |
+
*.tmp
|
11 |
+
|
12 |
+
# Ignore system files
|
13 |
+
.DS_Store
|
14 |
+
Thumbs.db
|
15 |
+
|
16 |
+
# Ignore IDE/editor specific files
|
17 |
+
.vscode/
|
18 |
+
.idea/
|
19 |
+
*.sublime-workspace
|
20 |
+
*.sublime-project
|
21 |
+
|
22 |
+
# Ignore node_modules if using Node.js
|
23 |
+
node_modules/
|
24 |
+
|
25 |
+
# Ignore build directories
|
26 |
+
dist/
|
27 |
+
build/
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Utiliser une image de base Python
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Définir le répertoire de travail
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Copier les fichiers de dépendances
|
8 |
+
COPY requirements.txt .
|
9 |
+
|
10 |
+
# Installer les dépendances
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Copier le reste de l'application
|
14 |
+
COPY . .
|
15 |
+
|
16 |
+
# Exposer le port sur lequel l'application va tourner
|
17 |
+
EXPOSE 8000
|
18 |
+
|
19 |
+
# Commande pour lancer l'application
|
20 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
architecture/resnet.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch.nn.functional as F
|
3 |
+
import torchvision.models as models
|
4 |
+
|
5 |
+
|
6 |
+
class ResNet(nn.Module):
|
7 |
+
def __init__(
|
8 |
+
self, resnet_type="resnet18", trainable_layers=3, num_output_neurons=2
|
9 |
+
):
|
10 |
+
super(ResNet, self).__init__()
|
11 |
+
|
12 |
+
# Dictionary to map resnet_type to the corresponding torchvision model and weights
|
13 |
+
resnet_dict = {
|
14 |
+
"resnet18": (models.resnet18, models.ResNet18_Weights.IMAGENET1K_V1),
|
15 |
+
"resnet34": (models.resnet34, models.ResNet34_Weights.IMAGENET1K_V1),
|
16 |
+
"resnet50": (models.resnet50, models.ResNet50_Weights.IMAGENET1K_V2),
|
17 |
+
"resnet101": (models.resnet101, models.ResNet101_Weights.IMAGENET1K_V2),
|
18 |
+
"resnet152": (models.resnet152, models.ResNet152_Weights.IMAGENET1K_V2),
|
19 |
+
}
|
20 |
+
|
21 |
+
# Ensure the provided resnet_type is valid
|
22 |
+
if resnet_type not in resnet_dict:
|
23 |
+
raise ValueError(
|
24 |
+
f"Invalid resnet_type. Expected one of: {list(resnet_dict.keys())}"
|
25 |
+
)
|
26 |
+
|
27 |
+
# Load the specified ResNet model with pre-trained weights
|
28 |
+
model_func, weights = resnet_dict[resnet_type]
|
29 |
+
self.resnet = model_func(weights=weights)
|
30 |
+
|
31 |
+
# Remove the last fully connected layer
|
32 |
+
self.resnet = nn.Sequential(*list(self.resnet.children())[:-2])
|
33 |
+
|
34 |
+
# Additional pooling to reduce dimensionality further
|
35 |
+
self.pool = nn.AdaptiveAvgPool2d((1, 1)) # Global average pooling
|
36 |
+
|
37 |
+
# Number of input features to the first fully connected layer
|
38 |
+
if resnet_type in ["resnet18", "resnet34"]:
|
39 |
+
fc_in_features = 512
|
40 |
+
else:
|
41 |
+
fc_in_features = 2048
|
42 |
+
|
43 |
+
# Simplified fully connected layers with Batch Normalization and Dropout
|
44 |
+
self.fc1 = nn.Linear(
|
45 |
+
fc_in_features, 128
|
46 |
+
) # Input features depend on the resnet type
|
47 |
+
self.bn1 = nn.BatchNorm1d(128) # Batch Normalization
|
48 |
+
self.dropout1 = nn.Dropout(0.5) # Helps prevent overfitting
|
49 |
+
|
50 |
+
self.fc2 = nn.Linear(128, 64)
|
51 |
+
self.bn2 = nn.BatchNorm1d(64) # Batch Normalization
|
52 |
+
self.dropout2 = nn.Dropout(0.5) # Helps prevent overfitting
|
53 |
+
|
54 |
+
self.fc3 = nn.Linear(
|
55 |
+
64, num_output_neurons
|
56 |
+
) # Output layer for binary classification
|
57 |
+
|
58 |
+
# Set the requires_grad attribute based on the number of trainable layers
|
59 |
+
self.set_trainable_layers(trainable_layers)
|
60 |
+
|
61 |
+
def set_trainable_layers(self, trainable_layers):
|
62 |
+
# If trainable_layers is 0, freeze all layers
|
63 |
+
if trainable_layers == 0:
|
64 |
+
for param in self.resnet.parameters():
|
65 |
+
param.requires_grad = False
|
66 |
+
else:
|
67 |
+
# Get the total number of layers in resnet
|
68 |
+
total_layers = len(list(self.resnet.children()))
|
69 |
+
# Make the last `trainable_layers` layers trainable
|
70 |
+
for i, layer in enumerate(self.resnet.children()):
|
71 |
+
if i < total_layers - trainable_layers:
|
72 |
+
for param in layer.parameters():
|
73 |
+
param.requires_grad = False
|
74 |
+
else:
|
75 |
+
for param in layer.parameters():
|
76 |
+
param.requires_grad = True
|
77 |
+
|
78 |
+
def forward(self, x):
|
79 |
+
# Use the ResNet backbone
|
80 |
+
x = self.resnet(x)
|
81 |
+
|
82 |
+
# Global average pooling
|
83 |
+
x = self.pool(x)
|
84 |
+
|
85 |
+
# Flattening the output for the dense layer
|
86 |
+
x = x.view(x.size(0), -1) # Adjust this based on the actual output size
|
87 |
+
|
88 |
+
x = F.relu(self.fc1(x))
|
89 |
+
x = self.bn1(x)
|
90 |
+
x = self.dropout1(x)
|
91 |
+
|
92 |
+
x = F.relu(self.fc2(x))
|
93 |
+
x = self.bn2(x)
|
94 |
+
x = self.dropout2(x)
|
95 |
+
|
96 |
+
x = self.fc3(x)
|
97 |
+
|
98 |
+
return x
|
docker-compose.yml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: '3.8'
|
2 |
+
|
3 |
+
services:
|
4 |
+
inference-lamp-api:
|
5 |
+
build: .
|
6 |
+
ports:
|
7 |
+
- "8000:8000"
|
8 |
+
volumes:
|
9 |
+
- .:/app
|
10 |
+
env_file:
|
11 |
+
- .env
|
main.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from fastapi import FastAPI, HTTPException
|
3 |
+
from fastapi.responses import JSONResponse
|
4 |
+
from pydantic import BaseModel
|
5 |
+
from transformers import pipeline
|
6 |
+
from torchvision import transforms
|
7 |
+
from PIL import Image
|
8 |
+
import requests
|
9 |
+
from io import BytesIO
|
10 |
+
from steps.preprocess import process_image
|
11 |
+
from huggingface_hub import hf_hub_download
|
12 |
+
from architecture.resnet import ResNet
|
13 |
+
import torch
|
14 |
+
|
15 |
+
app = FastAPI()
|
16 |
+
|
17 |
+
image_size = 256
|
18 |
+
hf_token = os.getenv("api_read")
|
19 |
+
|
20 |
+
models_locations = [
|
21 |
+
{
|
22 |
+
"repo_id": "TamisAI/category-lamp",
|
23 |
+
"subfolder": "maison-jansen/palmtree-152-0005-32-256",
|
24 |
+
"filename": "palmtree-jansen.pth",
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"repo_id": "TamisAI/category-lamp",
|
28 |
+
"subfolder": "maison-charles/corail-152-0001-32-256",
|
29 |
+
"filename": "maison-charles-corail.pth",
|
30 |
+
},
|
31 |
+
]
|
32 |
+
|
33 |
+
device = torch.device("cpu")
|
34 |
+
|
35 |
+
|
36 |
+
# Modèle de données pour les requêtes
|
37 |
+
class PredictRequest(BaseModel):
|
38 |
+
imageUrl: str
|
39 |
+
modelName: str
|
40 |
+
|
41 |
+
|
42 |
+
# Dictionnaire pour stocker les pipelines de modèles
|
43 |
+
model_pipelines = {}
|
44 |
+
|
45 |
+
# Create a single instance of the ResNet model
|
46 |
+
base_model = ResNet("resnet152", num_output_neurons=2).to(device)
|
47 |
+
|
48 |
+
|
49 |
+
@app.on_event("startup")
|
50 |
+
async def load_models():
|
51 |
+
# Charger les modèles au démarrage
|
52 |
+
print(f"Loading models...{len(models_locations)}")
|
53 |
+
|
54 |
+
for model_location in models_locations:
|
55 |
+
try:
|
56 |
+
print(f"Loading model: {model_location['filename']}")
|
57 |
+
model_weight = hf_hub_download(
|
58 |
+
repo_id=model_location["repo_id"],
|
59 |
+
subfolder=model_location["subfolder"],
|
60 |
+
filename=model_location["filename"],
|
61 |
+
token=hf_token,
|
62 |
+
cache_dir="/tmp/cache",
|
63 |
+
)
|
64 |
+
model = base_model.__class__("resnet152", num_output_neurons=2).to(device)
|
65 |
+
model.load_state_dict(
|
66 |
+
torch.load(model_weight, weights_only=True, map_location=device)
|
67 |
+
)
|
68 |
+
model.eval()
|
69 |
+
model_pipelines[model_location["filename"]] = model
|
70 |
+
except Exception as e:
|
71 |
+
print(f"Error loading model {model_location['filename']}: {e}")
|
72 |
+
print(f"Models loaded. {len(model_pipelines)}")
|
73 |
+
|
74 |
+
|
75 |
+
@app.post("/predict")
|
76 |
+
async def predict(request: PredictRequest):
|
77 |
+
image_url = request.imageUrl
|
78 |
+
model_name = request.modelName
|
79 |
+
|
80 |
+
# Télécharger l'image depuis l'URL
|
81 |
+
try:
|
82 |
+
response = requests.get(image_url)
|
83 |
+
image = Image.open(BytesIO(response.content))
|
84 |
+
except Exception as e:
|
85 |
+
raise HTTPException(status_code=400, detail="Invalid image URL")
|
86 |
+
|
87 |
+
# Vérifier si le modèle est chargé
|
88 |
+
if model_name not in model_pipelines:
|
89 |
+
raise HTTPException(status_code=404, detail="Model not found")
|
90 |
+
|
91 |
+
# Preprocess the image
|
92 |
+
processed_image = process_image(image, size=image_size)
|
93 |
+
|
94 |
+
# Convert to tensor
|
95 |
+
image_tensor = transforms.ToTensor()(processed_image).unsqueeze(0)
|
96 |
+
|
97 |
+
model = model_pipelines[model_name]
|
98 |
+
|
99 |
+
# Perform inference
|
100 |
+
with torch.no_grad():
|
101 |
+
outputs = model(image_tensor)
|
102 |
+
probabilities = torch.nn.functional.softmax(outputs, dim=1)
|
103 |
+
predicted_probabilities = probabilities.numpy().tolist()
|
104 |
+
confidence = round(predicted_probabilities[0][1], 2)
|
105 |
+
|
106 |
+
# Return the probabilities as JSON
|
107 |
+
return JSONResponse(content={"confidence": confidence})
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
transformers
|
4 |
+
Pillow
|
5 |
+
requests
|
6 |
+
torchvision
|
7 |
+
huggingface_hub
|
8 |
+
torch
|
9 |
+
numpy
|
steps/preprocess.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from PIL import Image
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
def resize_and_pad(img, desired_size):
|
7 |
+
"""Resize an image and pad it to the desired size while maintaining the aspect ratio."""
|
8 |
+
# Ensure image is in RGB
|
9 |
+
if img.mode != "RGB":
|
10 |
+
img = img.convert("RGB")
|
11 |
+
|
12 |
+
# Compute the new size to maintain aspect ratio
|
13 |
+
ratio = float(desired_size) / max(img.size)
|
14 |
+
new_size = tuple([int(x * ratio) for x in img.size])
|
15 |
+
img = img.resize(new_size, Image.Resampling.LANCZOS)
|
16 |
+
|
17 |
+
# Create a new image with mean color padding
|
18 |
+
new_im = Image.new("RGB", (desired_size, desired_size))
|
19 |
+
pixel_values = np.array(img)
|
20 |
+
mean_color = tuple(np.mean(np.mean(pixel_values, axis=0), axis=0).astype(int))
|
21 |
+
new_im.paste(Image.new("RGB", new_im.size, mean_color), (0, 0))
|
22 |
+
|
23 |
+
# Paste resized image onto the background
|
24 |
+
new_im.paste(
|
25 |
+
img, ((desired_size - new_size[0]) // 2, (desired_size - new_size[1]) // 2)
|
26 |
+
)
|
27 |
+
|
28 |
+
return new_im
|
29 |
+
|
30 |
+
|
31 |
+
def process_image(img, size):
|
32 |
+
processed_img = resize_and_pad(img, size)
|
33 |
+
return processed_img
|