face / app.py
kimhyunwoo's picture
Create app.py
cc4f6e6 verified
raw
history blame
3.59 kB
from fastapi import FastAPI
import gradio as gr
from PIL import Image
import numpy as np
import torch
from transformers import pipeline
import cv2
app = FastAPI()
# ๋”ฅ๋Ÿฌ๋‹ ๋ชจ๋ธ ๋กœ๋“œ (Depth Anything)
# ๋ชจ๋ธ ๋กœ๋”ฉ์€ ์•ฑ ์‹œ์ž‘ ์‹œ ํ•œ ๋ฒˆ๋งŒ ํ•˜๋„๋ก ๊ธ€๋กœ๋ฒŒ ๋ณ€์ˆ˜๋กœ ์„ค์ •
print("Loading Depth Anything model...")
try:
depth_estimator = pipeline(task="depth-estimation", model="LiangNX/depth-anything-hf")
print("Depth Anything model loaded successfully.")
except Exception as e:
print(f"Error loading Depth Anything model: {e}")
depth_estimator = None # ๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ ์‹œ None์œผ๋กœ ์„ค์ •
def process_image_for_depth(image_path_or_pil_image):
if depth_estimator is None:
return None, "Error: Depth Anything model not loaded."
# Gradio๋Š” PIL Image ๊ฐ์ฒด๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค.
if isinstance(image_path_or_pil_image, str):
image = Image.open(image_path_or_pil_image).convert("RGB")
else:
image = image_path_or_pil_image.convert("RGB")
try:
# Depth Anything ๋ชจ๋ธ ์ถ”๋ก 
# result๋Š” ๋”•์…”๋„ˆ๋ฆฌ๋กœ, 'depth' (PIL Image)์™€ 'depth_npy' (numpy array)๋ฅผ ํฌํ•จ
result = depth_estimator(image)
# ๋ށ์Šค ๋งต (PIL Image)
depth_image_pil = result["depth"]
# ๋ށ์Šค ๋งต (Numpy Array) - ์‹œ๊ฐํ™”๋ฅผ ์œ„ํ•ด ์ •๊ทœํ™”
depth_np = result["depth_npy"]
normalized_depth_np = (depth_np - depth_np.min()) / (depth_np.max() - depth_np.min()) * 255
normalized_depth_np = normalized_depth_np.astype(np.uint8)
# ํ‘๋ฐฑ ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ (PIL Image)
depth_grayscale_pil = Image.fromarray(normalized_depth_np)
return depth_grayscale_pil, None
except Exception as e:
return None, f"Error processing image for depth: {e}"
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
with gr.Blocks() as demo:
gr.Markdown("# ๐Ÿง‘โ€๐Ÿ’ป ์–ผ๊ตด ๋ށ์Šค ๋งต ์ถ”์ถœ๊ธฐ")
gr.Markdown("์—ฌ๋Ÿฌ ์žฅ์˜ ์–ผ๊ตด ์‚ฌ์ง„์„ ์—…๋กœ๋“œํ•˜๋ฉด ๊ฐ ์‚ฌ์ง„์—์„œ ๋”ฅ๋Ÿฌ๋‹์„ ํ†ตํ•ด ๋ށ์Šค ๋งต(๊นŠ์ด ์ •๋ณด)์„ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.")
with gr.Row():
input_images = gr.File(label="์–ผ๊ตด ์‚ฌ์ง„ ์—…๋กœ๋“œ (์ตœ๋Œ€ 10์žฅ ๊ถŒ์žฅ)", file_count="multiple", type="filepath")
output_gallery = gr.Gallery(label="์›๋ณธ ์ด๋ฏธ์ง€ ๋ฐ ๋ށ์Šค ๋งต", columns=[2], rows=[1], object_fit="contain", height="auto")
process_button = gr.Button("๋ށ์Šค ๋งต ์ถ”์ถœ ์‹œ์ž‘")
def process_all_images(image_paths):
if not image_paths:
return [(None, "์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•ด์ฃผ์„ธ์š”.")]
results = []
for i, path in enumerate(image_paths):
original_image = Image.open(path).convert("RGB")
depth_map_pil, error = process_image_for_depth(original_image)
if error:
print(f"Error processing image {i+1}: {error}")
results.append((original_image, f"Error: {error}"))
else:
results.append((original_image, f"์›๋ณธ ์ด๋ฏธ์ง€ {i+1}"))
results.append((depth_map_pil, f"๋ށ์Šค ๋งต {i+1}"))
return results
process_button.click(
fn=process_all_images,
inputs=input_images,
outputs=output_gallery
)
# Gradio ์•ฑ์„ FastAPI์— ๋งˆ์šดํŠธ
app = gr.mount_gradio_app(app, demo, path="/")
# FastAPI ๊ธฐ๋ณธ ์—”๋“œํฌ์ธํŠธ (์„ ํƒ ์‚ฌํ•ญ, Gradio ์•ฑ์ด ๊ธฐ๋ณธ ๊ฒฝ๋กœ๋ฅผ ์ ์œ ํ•จ)
@app.get("/api")
def read_root():
return {"message": "Welcome to the Face Depth Map Extractor! Visit / for the UI."}