NihalGazi's picture
Update app.py
555aa66 verified
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
import time
import tempfile
import os
faceflux_model = os.environ.get("faceflux_model")
try:
with open("faceflux.py", "w") as file_object:
file_object.write(faceflux_model)
print("Successfully wrote code to faceflux.py")
print(faceflux_model)
except IOError as e:
print(f"Error writing to file: {e}")
import faceflux as ff
def process_video(
video_path, ref_img, trans, res, step, feather_pct,
progress=gr.Progress()
):
# --- Initialization ---
step = 5 - step
padding_pct = 0.24
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS) or 24
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
progress(0.0, desc="Initializing")
# --- Prepare masked reference ---
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
mask_ref, ref_box = ff.get_face_mask_box(ref_bgr, feather_pct, padding_pct)
if mask_ref is None:
progress(None) # hide on error
return None, None, None, None
x_r, y_r, w_r, h_r = ref_box
ref_cut = ref_bgr[y_r:y_r+h_r, x_r:x_r+w_r]
mask_ref_norm = mask_ref.astype(np.float32)[..., None] / 255.0
ref_masked = (ref_cut.astype(np.float32) * mask_ref_norm).astype(np.uint8)
ref_morph = cv2.resize(ref_masked, (res, res))
progress(0.1, desc="Reference ready")
# --- Output setup ---
w_o = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h_o = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_o, h_o))
# --- Frame-by-frame processing ---
for i in range(total):
ret, frame = cap.read()
if not ret:
break
progress(0.1 + 0.8 * (i / total), desc=f"Processing frame {i+1}/{total}")
mask_roi, box = ff.get_face_mask_box(frame, feather_pct, padding_pct)
if mask_roi is None:
out_vid.write(frame)
continue
x, y, w, h = box
crop = frame[y:y+h, x:x+w]
crop_resized = cv2.resize(crop, (res, res))
alpha = float(np.clip((trans+1)/2, 0, 1))
mor = ff.morph_faces(crop_resized, ref_morph, alpha, res, step)
mor_back = cv2.resize(mor, (w, h))
mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0)
region = frame[y:y+h, x:x+w].astype(np.float32)
blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n
frame[y:y+h, x:x+w] = blended.astype(np.uint8)
out_vid.write(frame)
cap.release()
out_vid.release()
progress(1.0, desc="Done")
return tmp_vid
# --- Gradio App ---
css = """
@import url('https://fonts.googleapis.com/css2?family=Sulphur+Point:wght@700&display=swap');
video, img { object-fit: contain !important; }
.sulphur-point-title {
font-family: 'Sulphur Point', sans-serif;
font-weight: 700;
font-size: 2.5em;
margin-bottom: 0.2em;
}
.description-text {
font-size: 1em;
line-height: 1.6;
margin-bottom: 1em;
}
.socials {
font-size: 0.9em;
color: #555;
margin-bottom: 1.5em;
}
.socials li {
margin-bottom: 0.2em;
}
.example-section {
margin: 1em 0;
}
"""
with gr.Blocks(css=css) as iface:
# Title
gr.HTML('<div class="sulphur-point-title">FaceFlux</div>')
# Description
gr.Markdown(
"""
<div class="description-text">
<b><i>Super Fast Face Swap</i></b> using a CPU-friendly algorithm.
<br><br>
<i><b>How to Use:</b></i>
<ul>
<li>Upload a video (ideal for small or mid-distance faces).</li>
<li>Upload a clear reference image.</li>
<li><b>Strength:</b> How much of the reference face to blend in.</li>
<li><b>Quality:</b> Higher = better alignment, but slower.</li>
<li><b>Feather:</b> Smooths edges for natural blending.</li>
<li><b>Face Resolution:</b> Higher = sharper face but longer processing.</li>
</ul>
</div>
<br><br>
"""
)
# Socials (vertical)
gr.Markdown(
"""
<ul class="socials">
<li>📷 Instagram: <a href="https://instagram.com/nihal_gazi_io" target="_blank">@nihal_gazi_io</a></li>
<li>🐦 X (Twitter): <a href="https://x.com/NihalGazi_" target="_blank">@NihalGazi_</a></li>
<li>💬 Discord: nihal_gazi_io</li>
<li>📧 Email: [email protected]</li>
</ul>
"""
)
# Inputs
with gr.Row():
vid = gr.Video(label='Input Video')
ref = gr.Image(type='numpy', label='Reference Image')
with gr.Row():
res = gr.Dropdown([256, 384, 512, 768], value=512, label='Face Resolution')
quality = gr.Slider(1, 4, value=1, step=1, label='Quality')
feather = gr.Slider(0.12, 0.24, value=0.12, step=0.01, label='Feather (%)')
strength = gr.Slider(-0.70, -0.15, value=-0.35, step=0.05, label='Strength')
btn = gr.Button('Generate Morph 🚀')
out_vid = gr.Video(label='Morphed Video')
# Example (after component definitions)
gr.Examples(
examples=[
["examples/input.mp4", "examples/reference.jpg", -0.35, 512, 1, 0.12]
],
inputs=[vid, ref, strength, res, quality, feather],
outputs=[out_vid],
examples_per_page=1,
cache_examples=False
)
btn.click(
fn=process_video,
inputs=[vid, ref, strength, res, quality, feather],
outputs=[out_vid],
show_progress=True,
concurrency_limit=30
)
iface.queue().launch(debug=True)