Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,31 +4,116 @@ import numpy as np
|
|
4 |
import cv2
|
5 |
import gradio as gr
|
6 |
|
7 |
-
#
|
8 |
snapshot_download(
|
9 |
"fal/AuraFace-v1",
|
10 |
local_dir="models/auraface",
|
11 |
)
|
12 |
|
13 |
-
|
14 |
-
face_app = FaceAnalysis(
|
15 |
name="auraface",
|
16 |
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
17 |
root=".",
|
18 |
)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
def get_embedding(image):
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
|
25 |
# 获取人脸嵌入
|
26 |
-
|
27 |
-
if len(
|
28 |
-
return
|
29 |
else:
|
30 |
return None
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
def calculate_similarity(image1, image2):
|
33 |
# 获取两张图片的嵌入
|
34 |
embedding1 = get_embedding(image1)
|
|
|
4 |
import cv2
|
5 |
import gradio as gr
|
6 |
|
7 |
+
# Download face encoder
|
8 |
snapshot_download(
|
9 |
"fal/AuraFace-v1",
|
10 |
local_dir="models/auraface",
|
11 |
)
|
12 |
|
13 |
+
app = FaceAnalysis(
|
|
|
14 |
name="auraface",
|
15 |
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
16 |
root=".",
|
17 |
)
|
18 |
|
19 |
+
app.prepare(ctx_id=0, det_size=(640, 640))
|
20 |
+
|
21 |
+
def process_image_by_bbox_larger(input_image, bbox_xyxy, min_bbox_ratio=0.2):
|
22 |
+
"""
|
23 |
+
Process an image based on a bounding box, cropping and resizing as necessary.
|
24 |
+
Parameters:
|
25 |
+
- input_image: PIL Image object.
|
26 |
+
- bbox_xyxy: Tuple (x1, y1, x2, y2) representing the bounding box coordinates.
|
27 |
+
Returns:
|
28 |
+
- A processed image cropped and resized to 1024x1024 if the bounding box is valid,
|
29 |
+
or None if the bounding box does not meet the required size criteria.
|
30 |
+
"""
|
31 |
+
# Constants
|
32 |
+
target_size = 1024
|
33 |
+
# min_bbox_ratio = 0.2 # Bounding box should be at least 20% of the crop
|
34 |
+
|
35 |
+
# Extract bounding box coordinates
|
36 |
+
x1, y1, x2, y2 = bbox_xyxy
|
37 |
+
bbox_w = x2 - x1
|
38 |
+
bbox_h = y2 - y1
|
39 |
+
|
40 |
+
# Calculate the area of the bounding box
|
41 |
+
bbox_area = bbox_w * bbox_h
|
42 |
+
|
43 |
+
# Start with the smallest square crop that allows bbox to be at least 20% of the crop area
|
44 |
+
crop_size = max(bbox_w, bbox_h)
|
45 |
+
initial_crop_area = crop_size * crop_size
|
46 |
+
while (bbox_area / initial_crop_area) < min_bbox_ratio:
|
47 |
+
crop_size += 10 # Gradually increase until bbox is at least 20% of the area
|
48 |
+
initial_crop_area = crop_size * crop_size
|
49 |
+
|
50 |
+
# Once the minimum condition is satisfied, try to expand the crop further
|
51 |
+
max_possible_crop_size = min(input_image.width, input_image.height)
|
52 |
+
while crop_size < max_possible_crop_size:
|
53 |
+
# Calculate a potential new area
|
54 |
+
new_crop_size = crop_size + 10
|
55 |
+
new_crop_area = new_crop_size * new_crop_size
|
56 |
+
if (bbox_area / new_crop_area) < min_bbox_ratio:
|
57 |
+
break # Stop if expanding further violates the 20% rule
|
58 |
+
crop_size = new_crop_size
|
59 |
+
|
60 |
+
# Determine the center of the bounding box
|
61 |
+
center_x = (x1 + x2) // 2
|
62 |
+
center_y = (y1 + y2) // 2
|
63 |
+
|
64 |
+
# Calculate the crop coordinates centered around the bounding box
|
65 |
+
crop_x1 = max(0, center_x - crop_size // 2)
|
66 |
+
crop_y1 = max(0, center_y - crop_size // 2)
|
67 |
+
crop_x2 = min(input_image.width, crop_x1 + crop_size)
|
68 |
+
crop_y2 = min(input_image.height, crop_y1 + crop_size)
|
69 |
+
|
70 |
+
# Ensure the crop is square, adjust if it goes out of image bounds
|
71 |
+
if crop_x2 - crop_x1 != crop_y2 - crop_y1:
|
72 |
+
side_length = min(crop_x2 - crop_x1, crop_y2 - crop_y1)
|
73 |
+
crop_x2 = crop_x1 + side_length
|
74 |
+
crop_y2 = crop_y1 + side_length
|
75 |
+
|
76 |
+
# Crop the image
|
77 |
+
cropped_image = input_image.crop((crop_x1, crop_y1, crop_x2, crop_y2))
|
78 |
+
|
79 |
+
# Resize the cropped image to 1024x1024
|
80 |
+
resized_image = cropped_image.resize((target_size, target_size), Image.LANCZOS)
|
81 |
+
|
82 |
+
return resized_image
|
83 |
+
|
84 |
+
def calc_emb_cropped(image, app, min_bbox_ratio=0.2):
|
85 |
+
face_image = image.copy()
|
86 |
+
|
87 |
+
face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
|
88 |
+
|
89 |
+
face_info = face_info[0]
|
90 |
+
|
91 |
+
#print(face_info)
|
92 |
+
|
93 |
+
cropped_face_image = process_image_by_bbox_larger(face_image, face_info["bbox"], min_bbox_ratio=min_bbox_ratio)
|
94 |
+
|
95 |
+
return cropped_face_image
|
96 |
+
|
97 |
def get_embedding(image):
|
98 |
+
face_image = image.copy()
|
99 |
+
|
100 |
+
face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
|
101 |
|
102 |
# 获取人脸嵌入
|
103 |
+
#face_info = app.get(cv2_image)
|
104 |
+
if len(face_info) > 0:
|
105 |
+
return face_info[0].normed_embedding
|
106 |
else:
|
107 |
return None
|
108 |
|
109 |
+
'''
|
110 |
+
from PIL import Image
|
111 |
+
im0 = Image.open("Unreal_5_render_of_a_handsome_man_gentle_snowfall_at_dusk_a_bustling_marketplace_in_the_background.png")
|
112 |
+
calc_emb_cropped(im0, app)
|
113 |
+
get_embedding(im0)
|
114 |
+
'''
|
115 |
+
|
116 |
+
|
117 |
def calculate_similarity(image1, image2):
|
118 |
# 获取两张图片的嵌入
|
119 |
embedding1 = get_embedding(image1)
|