kimhyunwoo commited on
Commit
cc4f6e6
ยท
verified ยท
1 Parent(s): 3423f45

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import numpy as np
5
+ import torch
6
+ from transformers import pipeline
7
+ import cv2
8
+
9
+ app = FastAPI()
10
+
11
+ # ๋”ฅ๋Ÿฌ๋‹ ๋ชจ๋ธ ๋กœ๋“œ (Depth Anything)
12
+ # ๋ชจ๋ธ ๋กœ๋”ฉ์€ ์•ฑ ์‹œ์ž‘ ์‹œ ํ•œ ๋ฒˆ๋งŒ ํ•˜๋„๋ก ๊ธ€๋กœ๋ฒŒ ๋ณ€์ˆ˜๋กœ ์„ค์ •
13
+ print("Loading Depth Anything model...")
14
+ try:
15
+ depth_estimator = pipeline(task="depth-estimation", model="LiangNX/depth-anything-hf")
16
+ print("Depth Anything model loaded successfully.")
17
+ except Exception as e:
18
+ print(f"Error loading Depth Anything model: {e}")
19
+ depth_estimator = None # ๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ ์‹œ None์œผ๋กœ ์„ค์ •
20
+
21
+ def process_image_for_depth(image_path_or_pil_image):
22
+ if depth_estimator is None:
23
+ return None, "Error: Depth Anything model not loaded."
24
+
25
+ # Gradio๋Š” PIL Image ๊ฐ์ฒด๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค.
26
+ if isinstance(image_path_or_pil_image, str):
27
+ image = Image.open(image_path_or_pil_image).convert("RGB")
28
+ else:
29
+ image = image_path_or_pil_image.convert("RGB")
30
+
31
+ try:
32
+ # Depth Anything ๋ชจ๋ธ ์ถ”๋ก 
33
+ # result๋Š” ๋”•์…”๋„ˆ๋ฆฌ๋กœ, 'depth' (PIL Image)์™€ 'depth_npy' (numpy array)๋ฅผ ํฌํ•จ
34
+ result = depth_estimator(image)
35
+
36
+ # ๋ށ์Šค ๋งต (PIL Image)
37
+ depth_image_pil = result["depth"]
38
+
39
+ # ๋ށ์Šค ๋งต (Numpy Array) - ์‹œ๊ฐํ™”๋ฅผ ์œ„ํ•ด ์ •๊ทœํ™”
40
+ depth_np = result["depth_npy"]
41
+ normalized_depth_np = (depth_np - depth_np.min()) / (depth_np.max() - depth_np.min()) * 255
42
+ normalized_depth_np = normalized_depth_np.astype(np.uint8)
43
+
44
+ # ํ‘๋ฐฑ ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ (PIL Image)
45
+ depth_grayscale_pil = Image.fromarray(normalized_depth_np)
46
+
47
+ return depth_grayscale_pil, None
48
+ except Exception as e:
49
+ return None, f"Error processing image for depth: {e}"
50
+
51
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
52
+ with gr.Blocks() as demo:
53
+ gr.Markdown("# ๐Ÿง‘โ€๐Ÿ’ป ์–ผ๊ตด ๋ށ์Šค ๋งต ์ถ”์ถœ๊ธฐ")
54
+ gr.Markdown("์—ฌ๋Ÿฌ ์žฅ์˜ ์–ผ๊ตด ์‚ฌ์ง„์„ ์—…๋กœ๋“œํ•˜๋ฉด ๊ฐ ์‚ฌ์ง„์—์„œ ๋”ฅ๋Ÿฌ๋‹์„ ํ†ตํ•ด ๋ށ์Šค ๋งต(๊นŠ์ด ์ •๋ณด)์„ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.")
55
+
56
+ with gr.Row():
57
+ input_images = gr.File(label="์–ผ๊ตด ์‚ฌ์ง„ ์—…๋กœ๋“œ (์ตœ๋Œ€ 10์žฅ ๊ถŒ์žฅ)", file_count="multiple", type="filepath")
58
+
59
+ output_gallery = gr.Gallery(label="์›๋ณธ ์ด๋ฏธ์ง€ ๋ฐ ๋ށ์Šค ๋งต", columns=[2], rows=[1], object_fit="contain", height="auto")
60
+
61
+ process_button = gr.Button("๋ށ์Šค ๋งต ์ถ”์ถœ ์‹œ์ž‘")
62
+
63
+ def process_all_images(image_paths):
64
+ if not image_paths:
65
+ return [(None, "์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•ด์ฃผ์„ธ์š”.")]
66
+
67
+ results = []
68
+ for i, path in enumerate(image_paths):
69
+ original_image = Image.open(path).convert("RGB")
70
+ depth_map_pil, error = process_image_for_depth(original_image)
71
+
72
+ if error:
73
+ print(f"Error processing image {i+1}: {error}")
74
+ results.append((original_image, f"Error: {error}"))
75
+ else:
76
+ results.append((original_image, f"์›๋ณธ ์ด๋ฏธ์ง€ {i+1}"))
77
+ results.append((depth_map_pil, f"๋ށ์Šค ๋งต {i+1}"))
78
+ return results
79
+
80
+ process_button.click(
81
+ fn=process_all_images,
82
+ inputs=input_images,
83
+ outputs=output_gallery
84
+ )
85
+
86
+ # Gradio ์•ฑ์„ FastAPI์— ๋งˆ์šดํŠธ
87
+ app = gr.mount_gradio_app(app, demo, path="/")
88
+
89
+ # FastAPI ๊ธฐ๋ณธ ์—”๋“œํฌ์ธํŠธ (์„ ํƒ ์‚ฌํ•ญ, Gradio ์•ฑ์ด ๊ธฐ๋ณธ ๊ฒฝ๋กœ๋ฅผ ์ ์œ ํ•จ)
90
+ @app.get("/api")
91
+ def read_root():
92
+ return {"message": "Welcome to the Face Depth Map Extractor! Visit / for the UI."}