import gradio as gr import cv2 import numpy as np import mediapipe as mp import time import tempfile import os faceflux_model = os.environ.get("faceflux_model") try: with open("faceflux.py", "w") as file_object: file_object.write(faceflux_model) print("Successfully wrote code to faceflux.py") print(faceflux_model) except IOError as e: print(f"Error writing to file: {e}") import faceflux as ff def process_video( video_path, ref_img, trans, res, step, feather_pct, progress=gr.Progress() ): # --- Initialization --- step = 5 - step padding_pct = 0.24 cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) or 24 total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) progress(0.0, desc="Initializing") # --- Prepare masked reference --- ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR) mask_ref, ref_box = ff.get_face_mask_box(ref_bgr, feather_pct, padding_pct) if mask_ref is None: progress(None) # hide on error return None, None, None, None x_r, y_r, w_r, h_r = ref_box ref_cut = ref_bgr[y_r:y_r+h_r, x_r:x_r+w_r] mask_ref_norm = mask_ref.astype(np.float32)[..., None] / 255.0 ref_masked = (ref_cut.astype(np.float32) * mask_ref_norm).astype(np.uint8) ref_morph = cv2.resize(ref_masked, (res, res)) progress(0.1, desc="Reference ready") # --- Output setup --- w_o = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h_o = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_o, h_o)) # --- Frame-by-frame processing --- for i in range(total): ret, frame = cap.read() if not ret: break progress(0.1 + 0.8 * (i / total), desc=f"Processing frame {i+1}/{total}") mask_roi, box = ff.get_face_mask_box(frame, feather_pct, padding_pct) if mask_roi is None: out_vid.write(frame) continue x, y, w, h = box crop = frame[y:y+h, x:x+w] crop_resized = cv2.resize(crop, (res, res)) alpha = float(np.clip((trans+1)/2, 0, 1)) mor = ff.morph_faces(crop_resized, ref_morph, alpha, res, step) mor_back = cv2.resize(mor, (w, h)) mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0) region = frame[y:y+h, x:x+w].astype(np.float32) blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n frame[y:y+h, x:x+w] = blended.astype(np.uint8) out_vid.write(frame) cap.release() out_vid.release() progress(1.0, desc="Done") return tmp_vid # --- Gradio App --- css = """ @import url('https://fonts.googleapis.com/css2?family=Sulphur+Point:wght@700&display=swap'); video, img { object-fit: contain !important; } .sulphur-point-title { font-family: 'Sulphur Point', sans-serif; font-weight: 700; font-size: 2.5em; margin-bottom: 0.2em; } .description-text { font-size: 1em; line-height: 1.6; margin-bottom: 1em; } .socials { font-size: 0.9em; color: #555; margin-bottom: 1.5em; } .socials li { margin-bottom: 0.2em; } .example-section { margin: 1em 0; } """ with gr.Blocks(css=css) as iface: # Title gr.HTML('