Spaces:
Runtime error
Runtime error
File size: 3,593 Bytes
cc4f6e6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
from fastapi import FastAPI
import gradio as gr
from PIL import Image
import numpy as np
import torch
from transformers import pipeline
import cv2
app = FastAPI()
# ๋ฅ๋ฌ๋ ๋ชจ๋ธ ๋ก๋ (Depth Anything)
# ๋ชจ๋ธ ๋ก๋ฉ์ ์ฑ ์์ ์ ํ ๋ฒ๋ง ํ๋๋ก ๊ธ๋ก๋ฒ ๋ณ์๋ก ์ค์
print("Loading Depth Anything model...")
try:
depth_estimator = pipeline(task="depth-estimation", model="LiangNX/depth-anything-hf")
print("Depth Anything model loaded successfully.")
except Exception as e:
print(f"Error loading Depth Anything model: {e}")
depth_estimator = None # ๋ชจ๋ธ ๋ก๋ ์คํจ ์ None์ผ๋ก ์ค์
def process_image_for_depth(image_path_or_pil_image):
if depth_estimator is None:
return None, "Error: Depth Anything model not loaded."
# Gradio๋ PIL Image ๊ฐ์ฒด๋ก ์ด๋ฏธ์ง๋ฅผ ์ ๋ฌํฉ๋๋ค.
if isinstance(image_path_or_pil_image, str):
image = Image.open(image_path_or_pil_image).convert("RGB")
else:
image = image_path_or_pil_image.convert("RGB")
try:
# Depth Anything ๋ชจ๋ธ ์ถ๋ก
# result๋ ๋์
๋๋ฆฌ๋ก, 'depth' (PIL Image)์ 'depth_npy' (numpy array)๋ฅผ ํฌํจ
result = depth_estimator(image)
# ๋์ค ๋งต (PIL Image)
depth_image_pil = result["depth"]
# ๋์ค ๋งต (Numpy Array) - ์๊ฐํ๋ฅผ ์ํด ์ ๊ทํ
depth_np = result["depth_npy"]
normalized_depth_np = (depth_np - depth_np.min()) / (depth_np.max() - depth_np.min()) * 255
normalized_depth_np = normalized_depth_np.astype(np.uint8)
# ํ๋ฐฑ ์ด๋ฏธ์ง๋ก ๋ณํ (PIL Image)
depth_grayscale_pil = Image.fromarray(normalized_depth_np)
return depth_grayscale_pil, None
except Exception as e:
return None, f"Error processing image for depth: {e}"
# Gradio ์ธํฐํ์ด์ค ์ ์
with gr.Blocks() as demo:
gr.Markdown("# ๐งโ๐ป ์ผ๊ตด ๋์ค ๋งต ์ถ์ถ๊ธฐ")
gr.Markdown("์ฌ๋ฌ ์ฅ์ ์ผ๊ตด ์ฌ์ง์ ์
๋ก๋ํ๋ฉด ๊ฐ ์ฌ์ง์์ ๋ฅ๋ฌ๋์ ํตํด ๋์ค ๋งต(๊น์ด ์ ๋ณด)์ ์ถ์ถํฉ๋๋ค.")
with gr.Row():
input_images = gr.File(label="์ผ๊ตด ์ฌ์ง ์
๋ก๋ (์ต๋ 10์ฅ ๊ถ์ฅ)", file_count="multiple", type="filepath")
output_gallery = gr.Gallery(label="์๋ณธ ์ด๋ฏธ์ง ๋ฐ ๋์ค ๋งต", columns=[2], rows=[1], object_fit="contain", height="auto")
process_button = gr.Button("๋์ค ๋งต ์ถ์ถ ์์")
def process_all_images(image_paths):
if not image_paths:
return [(None, "์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํด์ฃผ์ธ์.")]
results = []
for i, path in enumerate(image_paths):
original_image = Image.open(path).convert("RGB")
depth_map_pil, error = process_image_for_depth(original_image)
if error:
print(f"Error processing image {i+1}: {error}")
results.append((original_image, f"Error: {error}"))
else:
results.append((original_image, f"์๋ณธ ์ด๋ฏธ์ง {i+1}"))
results.append((depth_map_pil, f"๋์ค ๋งต {i+1}"))
return results
process_button.click(
fn=process_all_images,
inputs=input_images,
outputs=output_gallery
)
# Gradio ์ฑ์ FastAPI์ ๋ง์ดํธ
app = gr.mount_gradio_app(app, demo, path="/")
# FastAPI ๊ธฐ๋ณธ ์๋ํฌ์ธํธ (์ ํ ์ฌํญ, Gradio ์ฑ์ด ๊ธฐ๋ณธ ๊ฒฝ๋ก๋ฅผ ์ ์ ํจ)
@app.get("/api")
def read_root():
return {"message": "Welcome to the Face Depth Map Extractor! Visit / for the UI."} |