Upload 7 files
Browse files- .gitattributes +1 -0
- Stop_sign.png +0 -0
- app.py +95 -0
- car_on_road.png +3 -0
- no_parking.png +0 -0
- police_van.png +0 -0
- requrements.txt +5 -0
- slow_sign.png +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
car_on_road.png filter=lfs diff=lfs merge=lfs -text
|
Stop_sign.png
ADDED
![]() |
app.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
import torchvision.transforms as transforms
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from PIL import Image
|
6 |
+
import streamlit as st
|
7 |
+
from pathlib import Path
|
8 |
+
import requests
|
9 |
+
|
10 |
+
# Load pre-trained ResNet model
|
11 |
+
model = torchvision.models.resnet50(pretrained=True)
|
12 |
+
model.eval()
|
13 |
+
|
14 |
+
# Load ImageNet class labels
|
15 |
+
LABELS_URL = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"
|
16 |
+
labels = requests.get(LABELS_URL).text.strip().split("\n")
|
17 |
+
|
18 |
+
# Image transform
|
19 |
+
transform = transforms.Compose([
|
20 |
+
transforms.Resize((224, 224)),
|
21 |
+
transforms.ToTensor(),
|
22 |
+
])
|
23 |
+
|
24 |
+
# Load predefined signs from 'signs' folder
|
25 |
+
signs_dir = Path("signs")
|
26 |
+
presets = {f.stem.replace("_", " "): f for f in signs_dir.glob("*.")}
|
27 |
+
|
28 |
+
def preprocess_image(image):
|
29 |
+
image = image.convert("RGB")
|
30 |
+
tensor = transform(image).unsqueeze(0)
|
31 |
+
return tensor
|
32 |
+
|
33 |
+
def predict_class(tensor):
|
34 |
+
with torch.no_grad():
|
35 |
+
outputs = model(tensor)
|
36 |
+
_, predicted = outputs.max(1)
|
37 |
+
return labels[predicted.item()]
|
38 |
+
|
39 |
+
def generate_adversarial_example(tensor, epsilon, target_class=400):
|
40 |
+
tensor.requires_grad = True
|
41 |
+
outputs = model(tensor)
|
42 |
+
loss = F.cross_entropy(outputs, torch.tensor([target_class]))
|
43 |
+
loss.backward()
|
44 |
+
perturbation = epsilon * tensor.grad.sign()
|
45 |
+
perturbed_tensor = torch.clamp(tensor + perturbation, 0, 1)
|
46 |
+
return perturbed_tensor
|
47 |
+
|
48 |
+
def main():
|
49 |
+
st.title("Adversarial Attack on Traffic Signs")
|
50 |
+
st.write("Upload a traffic sign image or select a predefined one, then apply perturbation.")
|
51 |
+
|
52 |
+
col1, col2 = st.columns(2)
|
53 |
+
|
54 |
+
with col1:
|
55 |
+
uploaded_file = st.file_uploader("Upload a traffic sign image", type=["png", "jpg", "jpeg"])
|
56 |
+
with col2:
|
57 |
+
selected_preset = st.selectbox("Or choose a predefined sign", [None] + list(presets.keys()))
|
58 |
+
|
59 |
+
# User-defined perturbation strength limit
|
60 |
+
max_epsilon = st.number_input("Set Maximum Perturbation Strength (ε)", min_value=0.01, max_value=1.0, value=0.1, step=0.01)
|
61 |
+
epsilon = st.slider("Select Perturbation Strength (ε)", 0.0, float(max_epsilon), 0.01, step=0.01)
|
62 |
+
|
63 |
+
def load_image():
|
64 |
+
if uploaded_file:
|
65 |
+
return Image.open(uploaded_file)
|
66 |
+
elif selected_preset:
|
67 |
+
return Image.open(presets[selected_preset])
|
68 |
+
else:
|
69 |
+
return None
|
70 |
+
|
71 |
+
image = load_image()
|
72 |
+
|
73 |
+
if image:
|
74 |
+
st.image(image, caption="Original Image", use_container_width=True)
|
75 |
+
tensor = preprocess_image(image)
|
76 |
+
original_label = predict_class(tensor)
|
77 |
+
|
78 |
+
perturbed_tensor = generate_adversarial_example(tensor.clone(), epsilon)
|
79 |
+
perturbed_image = transforms.ToPILImage()(perturbed_tensor.squeeze())
|
80 |
+
adversarial_label = predict_class(perturbed_tensor)
|
81 |
+
|
82 |
+
col1, col2 = st.columns(2)
|
83 |
+
with col1:
|
84 |
+
st.markdown("### Original Prediction")
|
85 |
+
st.success(original_label)
|
86 |
+
with col2:
|
87 |
+
st.markdown("### Adversarial Prediction")
|
88 |
+
st.error(adversarial_label)
|
89 |
+
|
90 |
+
st.image(perturbed_image, caption="Perturbed Image", use_container_width=True)
|
91 |
+
else:
|
92 |
+
st.warning("Please upload image.")
|
93 |
+
|
94 |
+
if __name__ == "__main__":
|
95 |
+
main()
|
car_on_road.png
ADDED
![]() |
Git LFS Details
|
no_parking.png
ADDED
![]() |
police_van.png
ADDED
![]() |
requrements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
Pillow
|
4 |
+
streamlit
|
5 |
+
requests
|
slow_sign.png
ADDED
![]() |