HBlackwood commited on
Commit
9b33e2d
·
verified ·
1 Parent(s): c4d1018

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,38 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.pad filter=lfs diff=lfs merge=lfs -text
37
+ 09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth filter=lfs diff=lfs merge=lfs -text
38
+ .pth filter=lfs diff=lfs merge=lfs -text
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff07ee6a9faf1b1cbaf25837bd5990025f46ac083ea629919de57c82a86c157
3
+ size 31314554
README.md CHANGED
@@ -1,14 +1,14 @@
1
- ---
2
- title: Food Vision Mini
3
- emoji: 📊
4
- colorFrom: yellow
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.8.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: Food Vision Mini Project
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Food Vision Mini
3
+ emoji: 📊
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 5.8.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Food Vision Mini Project
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,7 +1,69 @@
 
1
  import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!"
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 1. Imports and class names setup
2
  import gradio as gr
3
+ import os
4
+ import torch
5
 
6
+ from model import create_effnetb2_model
7
+ from timeit import default_timer as timer
8
+ from typing import Tuple, Dict
9
 
10
+ # Setup class names
11
+ class_names = ['pizza', 'steak', 'sushi']
12
+
13
+ ### 2. Model and transforms preparation
14
+ effnetb2, effnetb2_transforms = create_effnetb2_model(num_classes=3)
15
+
16
+ # Load saved weights
17
+ effnetb2.load_state_dict(
18
+ torch.load(
19
+ f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
20
+ map_location=torch.device("cpu") # load the model to the CPU
21
+ )
22
+ )
23
+ ### Predict function
24
+
25
+ def predict(img) -> Tuple[Dict, float]:
26
+ # Start a timer
27
+ start_time = timer()
28
+
29
+ # Transform the input image for use with EffNetB2
30
+ img = effnetb2_transforms(img).unsqueeze(0) # unsqueeze = add batch dimension on 0th index.
31
+
32
+ # put model into eval mode, make predictions
33
+ effnetb2.eval()
34
+ with torch.inference_mode():
35
+ # Pass transformed image through model and turn the prediction logits into probabilities
36
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
37
+
38
+ # Create a prediction label and prediction probability dictionary
39
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
40
+
41
+ # Calculate pred time
42
+ end_time = timer()
43
+ pred_time = round(end_time - start_time, 4)
44
+
45
+ # Return pred dict and pred time
46
+ return pred_labels_and_probs, pred_time
47
+
48
+ ## 4. Gradio app
49
+
50
+ # Create title, description and article.
51
+ title = "FoodVision Mini 🍕🥩🍣"
52
+ description = "An [EfficientNetB2 feature extractor](https://pytorch.org/vision/stable/models/generated/torchvision.models.efficientnet_b2.html#torchvision.models.efficientnet_b2) computer vision model to classify images as pizza, steak or sushi."
53
+ article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/#74-building-a-gradio-interface)."
54
+
55
+ # Create example list
56
+ example_list = [["examples/" + example] for exmple in os.listdir("examples")]
57
+
58
+ # Create the Gradio demo
59
+ demo = gr.Interface(fn=predict, # maps inputs to outputs
60
+ inputs=gr.Image(type=pil"),
61
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"),
62
+ gr.Number(label="Prediction time (s)")],
63
+ examples=example_list,
64
+ title-title,
65
+ description=description,
66
+ article=article)
67
+
68
+ # Launch the demo!
69
+ demo.launch()
examples/2582289.jpg ADDED
examples/3622237.jpg ADDED
examples/592799.jpg ADDED
model.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torch import nn
5
+
6
+ def create_effnetb2_model(num_classes:int=3, # default output class = 3
7
+ seed:int=42):
8
+ # 1, 2, 3 Create EffnetB2 pretrained weights, transforms and model
9
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
10
+ transforms = weights.transforms()
11
+ model = torchvision.models.efficientnet_b2(weights=weights)
12
+
13
+ # 4. Freeze all layers of the base model
14
+ for param in model.parameters():
15
+ param.requires_grad = False
16
+
17
+ # 5. Change classifier head with random seed for reproducability
18
+ torch.manual_seed(seed)
19
+ model.classifier = nn.Sequential(
20
+ nn.Dropout(p=0.3, inplace=True)
21
+ nn.Linear(in_features=1408, out_features=num_classes)
22
+ )
23
+
24
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch=1.12.0
2
+ torchvision==0.13.0
3
+ gradio==3.1.4