File size: 5,553 Bytes
3036471
6a45047
 
 
 
 
 
40eb162
5ed36c6
40eb162
 
 
 
51d553e
555aa66
40eb162
 
 
7e14640
a030e65
c8a1d10
6a45047
189ca29
6a45047
 
 
7f79864
189ca29
7f79864
6a45047
 
 
 
08a3d08
6a45047
 
041a8ee
6a45047
 
 
 
 
 
 
 
 
08a3d08
6a45047
 
 
 
 
 
88dd46b
6a45047
 
 
 
 
 
 
7e14640
6a45047
 
 
 
 
 
 
7e14640
6a45047
88dd46b
08a3d08
6a45047
 
 
 
 
 
 
 
 
 
88dd46b
6a45047
 
88dd46b
 
6a45047
81d2759
9ce537b
5a0f8d3
9ce537b
55a429d
 
9ce537b
 
 
 
 
 
 
 
 
5a0f8d3
55a429d
9ce537b
 
 
 
 
55a429d
 
 
 
 
9ce537b
5a0f8d3
 
 
 
9ce537b
 
6a45047
5a0f8d3
 
 
212e830
5a0f8d3
 
 
60289bb
d0de6f5
5b46236
1424887
 
5b46236
1424887
 
 
 
 
5a0f8d3
d0de6f5
5a0f8d3
9ce537b
 
55a429d
 
 
d0de6f5
55a429d
 
 
 
 
 
 
 
 
 
6a45047
 
 
9ce537b
6a45047
9ce537b
5a0f8d3
9ce537b
88d6cd6
9ce537b
6a45047
 
cf15135
55a429d
212e830
 
55a429d
212e830
 
 
 
 
 
 
53352ed
4c06029
5a0f8d3
88dd46b
a28e363
 
4c06029
c8a1d10
dbab7f1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
import time
import tempfile
import os

faceflux_model = os.environ.get("faceflux_model")

try:
    with open("faceflux.py", "w") as file_object:
        file_object.write(faceflux_model)
    print("Successfully wrote code to faceflux.py")
    print(faceflux_model)
except IOError as e:
    print(f"Error writing to file: {e}")

import faceflux as ff


def process_video(
    video_path, ref_img, trans, res, step, feather_pct, 
    progress=gr.Progress()
):
    # --- Initialization ---
    step = 5 - step
    padding_pct = 0.24
    
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS) or 24
    total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    progress(0.0, desc="Initializing")

    # --- Prepare masked reference ---
    ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
    mask_ref, ref_box = ff.get_face_mask_box(ref_bgr, feather_pct, padding_pct)
    if mask_ref is None:
        progress(None)  # hide on error
        return None, None, None, None
    x_r, y_r, w_r, h_r = ref_box
    ref_cut = ref_bgr[y_r:y_r+h_r, x_r:x_r+w_r]
    mask_ref_norm = mask_ref.astype(np.float32)[..., None] / 255.0
    ref_masked = (ref_cut.astype(np.float32) * mask_ref_norm).astype(np.uint8)
    ref_morph = cv2.resize(ref_masked, (res, res))
    progress(0.1, desc="Reference ready")

    # --- Output setup ---
    w_o = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h_o = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
    out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_o, h_o))

    
    # --- Frame-by-frame processing ---
    for i in range(total):
        ret, frame = cap.read()
        if not ret:
            break
        progress(0.1 + 0.8 * (i / total), desc=f"Processing frame {i+1}/{total}")

        mask_roi, box = ff.get_face_mask_box(frame, feather_pct, padding_pct)
        if mask_roi is None:
            out_vid.write(frame)
            continue
        x, y, w, h = box
        crop = frame[y:y+h, x:x+w]
        crop_resized = cv2.resize(crop, (res, res))
        alpha = float(np.clip((trans+1)/2, 0, 1))
        mor = ff.morph_faces(crop_resized, ref_morph, alpha, res, step)

        

        mor_back = cv2.resize(mor, (w, h))
        mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0)
        region = frame[y:y+h, x:x+w].astype(np.float32)
        blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n
        frame[y:y+h, x:x+w] = blended.astype(np.uint8)
        out_vid.write(frame)

    cap.release()
    out_vid.release()

    

    progress(1.0, desc="Done")
    return tmp_vid
        
# --- Gradio App ---

css = """
@import url('https://fonts.googleapis.com/css2?family=Sulphur+Point:wght@700&display=swap');

video, img { object-fit: contain !important; }

.sulphur-point-title {
    font-family: 'Sulphur Point', sans-serif;
    font-weight: 700;
    font-size: 2.5em;
    margin-bottom: 0.2em;
}

.description-text {
    font-size: 1em;
    line-height: 1.6;
    margin-bottom: 1em;
}

.socials {
    font-size: 0.9em;
    color: #555;
    margin-bottom: 1.5em;
}

.socials li {
    margin-bottom: 0.2em;
}

.example-section {
    margin: 1em 0;
}
"""

with gr.Blocks(css=css) as iface:
    # Title
    gr.HTML('<div class="sulphur-point-title">FaceFlux</div>')

    # Description
    gr.Markdown(
        """
<div class="description-text">
<b><i>Super Fast Face Swap</i></b> using a CPU-friendly algorithm.
<br><br>
<i><b>How to Use:</b></i>
<ul>
  <li>Upload a video (ideal for small or mid-distance faces).</li>
  <li>Upload a clear reference image.</li>
  <li><b>Strength:</b> How much of the reference face to blend in.</li>
  <li><b>Quality:</b> Higher = better alignment, but slower.</li>
  <li><b>Feather:</b> Smooths edges for natural blending.</li>
  <li><b>Face Resolution:</b> Higher = sharper face but longer processing.</li>
</ul>
</div>
<br><br>
        """
    )

    # Socials (vertical)
    gr.Markdown(
        """
        
<ul class="socials">
  <li>📷 Instagram: <a href="https://instagram.com/nihal_gazi_io" target="_blank">@nihal_gazi_io</a></li>
  <li>🐦 X (Twitter): <a href="https://x.com/NihalGazi_" target="_blank">@NihalGazi_</a></li>
  <li>💬 Discord: nihal_gazi_io</li>
  <li>📧 Email: [email protected]</li>
</ul>
        """
    )

    # Inputs
    with gr.Row():
        vid = gr.Video(label='Input Video')
        ref = gr.Image(type='numpy', label='Reference Image')

    with gr.Row():
        res = gr.Dropdown([256, 384, 512, 768], value=512, label='Face Resolution')
        quality = gr.Slider(1, 4, value=1, step=1, label='Quality')
        feather = gr.Slider(0.12, 0.24, value=0.12, step=0.01, label='Feather (%)')
        strength = gr.Slider(-0.70, -0.15, value=-0.35, step=0.05, label='Strength')

    btn = gr.Button('Generate Morph 🚀')
    out_vid = gr.Video(label='Morphed Video')

    # Example (after component definitions)
    gr.Examples(
        examples=[
            ["examples/input.mp4", "examples/reference.jpg", -0.35, 512, 1, 0.12]
        ],
        inputs=[vid, ref, strength, res, quality, feather],
        outputs=[out_vid],
        examples_per_page=1,
        cache_examples=False
    )

    btn.click(
        fn=process_video,
        inputs=[vid, ref, strength, res, quality, feather],
        outputs=[out_vid],
        show_progress=True,
        concurrency_limit=30
    )

    iface.queue().launch(debug=True)