Spaces:
Build error
Build error
Francesco Pochetti
commited on
Commit
Β·
f61f295
1
Parent(s):
4a130a3
moving to kornia
Browse files- app.py +47 -11
- face_rec_benchmark.py +27 -0
- crowd.jpeg β images/crowd.jpeg +0 -0
- crowd1.jpeg β images/crowd1.jpeg +0 -0
- family.jpeg β images/family.jpeg +0 -0
- kornia_benchmark.py +63 -0
- requirements.txt +1 -1
app.py
CHANGED
@@ -3,27 +3,63 @@ import cv2
|
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def run(image):
|
8 |
image.thumbnail((1280, 1280))
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
face_image = image[top:bottom, left:right]
|
14 |
-
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
|
15 |
-
image[top:bottom, left:right] = face_image
|
16 |
-
|
17 |
-
return Image.fromarray(image)
|
18 |
|
19 |
content_image_input = gr.inputs.Image(label="Content Image", type="pil")
|
20 |
|
21 |
description="Privacy first! Upload an image of a groupf of people and blur their faces automatically."
|
22 |
article="""
|
23 |
-
Demo built
|
24 |
-
<a href='https://github.com/
|
25 |
"""
|
26 |
-
examples = [["family.jpeg"], ["crowd.jpeg"], ["crowd1.jpeg"]]
|
27 |
|
28 |
app_interface = gr.Interface(fn=run,
|
29 |
inputs=[content_image_input],
|
|
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
import numpy as np
|
6 |
+
import torch
|
7 |
+
import kornia as K
|
8 |
+
from kornia.contrib import FaceDetector, FaceDetectorResult
|
9 |
+
|
10 |
+
device = torch.device('cpu')
|
11 |
+
face_detection = FaceDetector().to(device)
|
12 |
+
|
13 |
+
def scale_image(img: np.ndarray, size: int) -> np.ndarray:
|
14 |
+
h, w = img.shape[:2]
|
15 |
+
scale = 1. * size / w
|
16 |
+
return cv2.resize(img, (int(w * scale), int(h * scale)))
|
17 |
+
|
18 |
+
|
19 |
+
def apply_blur_face(img: torch.Tensor, img_vis: np.ndarray, det: FaceDetectorResult):
|
20 |
+
# crop the face
|
21 |
+
x1, y1 = det.xmin.int(), det.ymin.int()
|
22 |
+
x2, y2 = det.xmax.int(), det.ymax.int()
|
23 |
+
roi = img[..., y1:y2, x1:x2]
|
24 |
+
if roi.shape[-1]==0 or roi.shape[-2]==0:
|
25 |
+
return
|
26 |
+
|
27 |
+
# apply blurring and put back to the visualisation image
|
28 |
+
roi = K.filters.gaussian_blur2d(roi, (21, 21), (100., 100.))
|
29 |
+
roi = K.color.rgb_to_bgr(roi)
|
30 |
+
img_vis[y1:y2, x1:x2] = K.tensor_to_image(roi)
|
31 |
+
|
32 |
|
33 |
def run(image):
|
34 |
image.thumbnail((1280, 1280))
|
35 |
+
img_raw = np.array(image)
|
36 |
+
|
37 |
+
# preprocess
|
38 |
+
img = K.image_to_tensor(img_raw, keepdim=False).to(device)
|
39 |
+
img = K.color.bgr_to_rgb(img.float())
|
40 |
+
|
41 |
+
with torch.no_grad():
|
42 |
+
dets = face_detection(img)
|
43 |
+
dets = [FaceDetectorResult(o) for o in dets]
|
44 |
+
|
45 |
+
img_vis = img_raw.copy()
|
46 |
+
|
47 |
+
for b in dets:
|
48 |
+
if b.score < 0.5:
|
49 |
+
continue
|
50 |
+
|
51 |
+
apply_blur_face(img, img_vis, b)
|
52 |
|
53 |
+
return Image.fromarray(img_vis)
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
content_image_input = gr.inputs.Image(label="Content Image", type="pil")
|
56 |
|
57 |
description="Privacy first! Upload an image of a groupf of people and blur their faces automatically."
|
58 |
article="""
|
59 |
+
Demo built on top of kornia and opencv, based on
|
60 |
+
<a href='https://github.com/kornia/kornia/blob/master/examples/face_detection/main.py' target='_blank'>this</a> example.
|
61 |
"""
|
62 |
+
examples = [["./images/family.jpeg"], ["./images/crowd.jpeg"], ["./images/crowd1.jpeg"]]
|
63 |
|
64 |
app_interface = gr.Interface(fn=run,
|
65 |
inputs=[content_image_input],
|
face_rec_benchmark.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import face_recognition
|
2 |
+
import cv2
|
3 |
+
import gradio as gr
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
import time
|
7 |
+
|
8 |
+
def run(image):
|
9 |
+
image.thumbnail((1280, 1280))
|
10 |
+
image = np.array(image)
|
11 |
+
face_locations = face_recognition.face_locations(image, model="cnn")
|
12 |
+
|
13 |
+
for top, right, bottom, left in face_locations:
|
14 |
+
face_image = image[top:bottom, left:right]
|
15 |
+
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
|
16 |
+
image[top:bottom, left:right] = face_image
|
17 |
+
|
18 |
+
return Image.fromarray(image)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
|
22 |
+
start = time.time()
|
23 |
+
for _ in range(100):
|
24 |
+
image = Image.open("./images/crowd.jpeg")
|
25 |
+
_ = run(image)
|
26 |
+
|
27 |
+
print('It took', (time.time()-start)/100, 'seconds.')
|
crowd.jpeg β images/crowd.jpeg
RENAMED
File without changes
|
crowd1.jpeg β images/crowd1.jpeg
RENAMED
File without changes
|
family.jpeg β images/family.jpeg
RENAMED
File without changes
|
kornia_benchmark.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import gradio as gr
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import kornia as K
|
7 |
+
from kornia.contrib import FaceDetector, FaceDetectorResult
|
8 |
+
import time
|
9 |
+
|
10 |
+
device = torch.device('cpu')
|
11 |
+
face_detection = FaceDetector().to(device)
|
12 |
+
|
13 |
+
def scale_image(img: np.ndarray, size: int) -> np.ndarray:
|
14 |
+
h, w = img.shape[:2]
|
15 |
+
scale = 1. * size / w
|
16 |
+
return cv2.resize(img, (int(w * scale), int(h * scale)))
|
17 |
+
|
18 |
+
|
19 |
+
def apply_blur_face(img: torch.Tensor, img_vis: np.ndarray, det: FaceDetectorResult):
|
20 |
+
# crop the face
|
21 |
+
x1, y1 = det.xmin.int(), det.ymin.int()
|
22 |
+
x2, y2 = det.xmax.int(), det.ymax.int()
|
23 |
+
roi = img[..., y1:y2, x1:x2]
|
24 |
+
#print(roi.shape)
|
25 |
+
if roi.shape[-1]==0 or roi.shape[-2]==0:
|
26 |
+
return
|
27 |
+
|
28 |
+
# apply blurring and put back to the visualisation image
|
29 |
+
roi = K.filters.gaussian_blur2d(roi, (21, 21), (100., 100.))
|
30 |
+
roi = K.color.rgb_to_bgr(roi)
|
31 |
+
img_vis[y1:y2, x1:x2] = K.tensor_to_image(roi)
|
32 |
+
|
33 |
+
|
34 |
+
def run(image):
|
35 |
+
image.thumbnail((1280, 1280))
|
36 |
+
img_raw = np.array(image)
|
37 |
+
|
38 |
+
# preprocess
|
39 |
+
img = K.image_to_tensor(img_raw, keepdim=False).to(device)
|
40 |
+
img = K.color.bgr_to_rgb(img.float())
|
41 |
+
|
42 |
+
with torch.no_grad():
|
43 |
+
dets = face_detection(img)
|
44 |
+
dets = [FaceDetectorResult(o) for o in dets]
|
45 |
+
|
46 |
+
img_vis = img_raw.copy()
|
47 |
+
|
48 |
+
for b in dets:
|
49 |
+
if b.score < 0.5:
|
50 |
+
continue
|
51 |
+
|
52 |
+
apply_blur_face(img, img_vis, b)
|
53 |
+
|
54 |
+
return Image.fromarray(img_vis)
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
|
58 |
+
start = time.time()
|
59 |
+
for _ in range(100):
|
60 |
+
image = Image.open("./images/crowd.jpeg")
|
61 |
+
_ = run(image)
|
62 |
+
|
63 |
+
print('It took', (time.time()-start)/100, 'seconds.')
|
requirements.txt
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
opencv-python==4.5.5.62
|
2 |
-
|
|
|
1 |
opencv-python==4.5.5.62
|
2 |
+
kornia==0.6.3
|