Spaces:
Runtime error
Runtime error
Upload 6 files
Browse files- README.md +10 -8
- app.py +238 -0
- gitattributes +40 -0
- gitignore +1 -0
- requirements-detail.txt +27 -0
- requirements.txt +28 -0
README.md
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: red
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
|
|
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
-
short_description:
|
|
|
11 |
---
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: 3D LLAMA
|
3 |
+
emoji: ⚡
|
4 |
colorFrom: red
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
python_version: 3.10.8
|
8 |
+
sdk_version: 5.15.0
|
9 |
app_file: app.py
|
10 |
+
pinned: true
|
11 |
+
short_description: Powered by Ginigen 3D Style Image, Unique3D
|
12 |
+
license: mit
|
13 |
---
|
14 |
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import shlex
|
2 |
+
import subprocess
|
3 |
+
import os
|
4 |
+
|
5 |
+
subprocess.run(shlex.split("pip install pip==24.0"), check=True)
|
6 |
+
subprocess.run(
|
7 |
+
shlex.split(
|
8 |
+
"pip install package/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl --force-reinstall --no-deps"
|
9 |
+
), check=True
|
10 |
+
)
|
11 |
+
subprocess.run(
|
12 |
+
shlex.split(
|
13 |
+
"pip install package/nvdiffrast-0.3.1.torch-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps"
|
14 |
+
), check=True
|
15 |
+
)
|
16 |
+
|
17 |
+
# 모델 체크포인트 다운로드 및 torch 설정
|
18 |
+
if __name__ == "__main__":
|
19 |
+
from huggingface_hub import snapshot_download
|
20 |
+
|
21 |
+
snapshot_download("public-data/Unique3D", repo_type="model", local_dir="./ckpt")
|
22 |
+
|
23 |
+
import os
|
24 |
+
import sys
|
25 |
+
sys.path.append(os.curdir)
|
26 |
+
import torch
|
27 |
+
torch.set_float32_matmul_precision('medium')
|
28 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
29 |
+
torch.set_grad_enabled(False)
|
30 |
+
|
31 |
+
import fire
|
32 |
+
import gradio as gr
|
33 |
+
from gradio_app.gradio_3dgen import create_ui as create_3d_ui
|
34 |
+
from gradio_app.all_models import model_zoo
|
35 |
+
|
36 |
+
# ===============================
|
37 |
+
# Text-to-IMAGE 관련 API 함수 정의
|
38 |
+
# ===============================
|
39 |
+
def text_to_image(height, width, steps, scales, prompt, seed):
|
40 |
+
"""
|
41 |
+
주어진 파라미터를 이용해 외부 API의 /process_and_save_image 엔드포인트를 호출하여 이미지를 생성한다.
|
42 |
+
"""
|
43 |
+
from gradio_client import Client
|
44 |
+
client = Client(os.getenv("CLIENT_API")) # 기본값 설정
|
45 |
+
result = client.predict(
|
46 |
+
height,
|
47 |
+
width,
|
48 |
+
steps,
|
49 |
+
scales,
|
50 |
+
prompt,
|
51 |
+
seed,
|
52 |
+
api_name="/process_and_save_image"
|
53 |
+
)
|
54 |
+
if isinstance(result, dict):
|
55 |
+
return result.get("url", None)
|
56 |
+
else:
|
57 |
+
return result
|
58 |
+
|
59 |
+
def update_random_seed():
|
60 |
+
"""
|
61 |
+
외부 API의 /update_random_seed 엔드포인트를 호출하여 새로운 랜덤 시드 값을 가져온다.
|
62 |
+
"""
|
63 |
+
from gradio_client import Client
|
64 |
+
client = Client(os.getenv("CLIENT_API")) # 기본값 설정
|
65 |
+
return client.predict(api_name="/update_random_seed")
|
66 |
+
|
67 |
+
|
68 |
+
_TITLE = '''✨ 3D LLAMA Studio'''
|
69 |
+
_DESCRIPTION = '''
|
70 |
+
### Welcome to 3D Llama Studio - Your Advanced 3D Generation Platform
|
71 |
+
|
72 |
+
This platform offers two powerful features:
|
73 |
+
1. **Text/Image to 3D**: Generate detailed 3D models from text descriptions or reference images
|
74 |
+
2. **Text to Styled Image**: Create artistic images that can be used for 3D generation
|
75 |
+
|
76 |
+
*Note: Both English and Korean prompts are supported (영어와 한글 프롬프트 모두 지원됩니다)*
|
77 |
+
'''
|
78 |
+
|
79 |
+
# CSS 스타일 밝은 테마로 수정
|
80 |
+
custom_css = """
|
81 |
+
.gradio-container {
|
82 |
+
background-color: #ffffff;
|
83 |
+
color: #333333;
|
84 |
+
}
|
85 |
+
.tabs {
|
86 |
+
background-color: #f8f9fa;
|
87 |
+
border-radius: 10px;
|
88 |
+
padding: 10px;
|
89 |
+
margin: 10px 0;
|
90 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
91 |
+
}
|
92 |
+
.input-box {
|
93 |
+
background-color: #ffffff;
|
94 |
+
border: 1px solid #e0e0e0;
|
95 |
+
border-radius: 8px;
|
96 |
+
padding: 15px;
|
97 |
+
margin: 10px 0;
|
98 |
+
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
|
99 |
+
}
|
100 |
+
.button-primary {
|
101 |
+
background-color: #4a90e2 !important;
|
102 |
+
border: none !important;
|
103 |
+
color: white !important;
|
104 |
+
transition: all 0.3s ease;
|
105 |
+
}
|
106 |
+
.button-primary:hover {
|
107 |
+
background-color: #357abd !important;
|
108 |
+
transform: translateY(-1px);
|
109 |
+
}
|
110 |
+
.button-secondary {
|
111 |
+
background-color: #f0f0f0 !important;
|
112 |
+
border: 1px solid #e0e0e0 !important;
|
113 |
+
color: #333333 !important;
|
114 |
+
transition: all 0.3s ease;
|
115 |
+
}
|
116 |
+
.button-secondary:hover {
|
117 |
+
background-color: #e0e0e0 !important;
|
118 |
+
}
|
119 |
+
.main-title {
|
120 |
+
color: #2c3e50;
|
121 |
+
font-weight: bold;
|
122 |
+
margin-bottom: 20px;
|
123 |
+
}
|
124 |
+
.slider-label {
|
125 |
+
color: #2c3e50;
|
126 |
+
font-weight: 500;
|
127 |
+
}
|
128 |
+
.textbox-input {
|
129 |
+
border: 1px solid #e0e0e0 !important;
|
130 |
+
background-color: #ffffff !important;
|
131 |
+
}
|
132 |
+
"""
|
133 |
+
|
134 |
+
# Gradio 테마 설정 수정
|
135 |
+
def launch():
|
136 |
+
model_zoo.init_models()
|
137 |
+
|
138 |
+
with gr.Blocks(
|
139 |
+
title=_TITLE,
|
140 |
+
css=custom_css,
|
141 |
+
theme=gr.themes.Soft(
|
142 |
+
primary_hue="blue",
|
143 |
+
secondary_hue="slate",
|
144 |
+
neutral_hue="slate",
|
145 |
+
font=["Inter", "Arial", "sans-serif"]
|
146 |
+
)
|
147 |
+
) as demo:
|
148 |
+
|
149 |
+
with gr.Row():
|
150 |
+
gr.Markdown('# ' + _TITLE, elem_classes="main-title")
|
151 |
+
gr.Markdown(_DESCRIPTION)
|
152 |
+
|
153 |
+
with gr.Tabs() as tabs:
|
154 |
+
with gr.Tab("🎨 Text to Styled Image", elem_classes="tab"):
|
155 |
+
with gr.Group(elem_classes="input-box"):
|
156 |
+
gr.Markdown("### Image Generation Settings")
|
157 |
+
with gr.Row():
|
158 |
+
with gr.Column():
|
159 |
+
height_slider = gr.Slider(
|
160 |
+
label="Image Height",
|
161 |
+
minimum=256,
|
162 |
+
maximum=2048,
|
163 |
+
step=64,
|
164 |
+
value=1024,
|
165 |
+
info="Select image height (pixels)"
|
166 |
+
)
|
167 |
+
width_slider = gr.Slider(
|
168 |
+
label="Image Width",
|
169 |
+
minimum=256,
|
170 |
+
maximum=2048,
|
171 |
+
step=64,
|
172 |
+
value=1024,
|
173 |
+
info="Select image width (pixels)"
|
174 |
+
)
|
175 |
+
with gr.Column():
|
176 |
+
steps_slider = gr.Slider(
|
177 |
+
label="Generation Steps",
|
178 |
+
minimum=1,
|
179 |
+
maximum=100,
|
180 |
+
step=1,
|
181 |
+
value=8,
|
182 |
+
info="More steps = higher quality but slower"
|
183 |
+
)
|
184 |
+
scales_slider = gr.Slider(
|
185 |
+
label="Guidance Scale",
|
186 |
+
minimum=1.0,
|
187 |
+
maximum=10.0,
|
188 |
+
step=0.1,
|
189 |
+
value=3.5,
|
190 |
+
info="How closely to follow the prompt"
|
191 |
+
)
|
192 |
+
|
193 |
+
prompt_text = gr.Textbox(
|
194 |
+
label="Image Description",
|
195 |
+
placeholder="Enter your prompt here (English or Korean)",
|
196 |
+
lines=3,
|
197 |
+
elem_classes="input-box"
|
198 |
+
)
|
199 |
+
|
200 |
+
with gr.Row():
|
201 |
+
seed_number = gr.Number(
|
202 |
+
label="Seed (Empty = Random)",
|
203 |
+
value=None,
|
204 |
+
elem_classes="input-box"
|
205 |
+
)
|
206 |
+
update_seed_button = gr.Button(
|
207 |
+
"🎲 Random Seed",
|
208 |
+
elem_classes="button-secondary"
|
209 |
+
)
|
210 |
+
|
211 |
+
generate_button = gr.Button(
|
212 |
+
"🚀 Generate Image",
|
213 |
+
elem_classes="button-primary"
|
214 |
+
)
|
215 |
+
|
216 |
+
with gr.Group(elem_classes="input-box"):
|
217 |
+
gr.Markdown("### Generated Result")
|
218 |
+
image_output = gr.Image(label="Output Image")
|
219 |
+
|
220 |
+
update_seed_button.click(
|
221 |
+
fn=update_random_seed,
|
222 |
+
inputs=[],
|
223 |
+
outputs=seed_number
|
224 |
+
)
|
225 |
+
|
226 |
+
generate_button.click(
|
227 |
+
fn=text_to_image,
|
228 |
+
inputs=[height_slider, width_slider, steps_slider, scales_slider, prompt_text, seed_number],
|
229 |
+
outputs=image_output
|
230 |
+
)
|
231 |
+
|
232 |
+
with gr.Tab("🎯 Image to 3D", elem_classes="tab"):
|
233 |
+
create_3d_ui("wkl")
|
234 |
+
|
235 |
+
demo.queue().launch(share=True)
|
236 |
+
|
237 |
+
if __name__ == '__main__':
|
238 |
+
fire.Fire(launch)
|
gitattributes
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.o filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.ninja_deps filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.so filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.whl filter=lfs diff=lfs merge=lfs -text
|
gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.pyc
|
requirements-detail.txt
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.29.2
|
2 |
+
datasets==2.18.0
|
3 |
+
diffusers==0.27.2
|
4 |
+
fire==0.6.0
|
5 |
+
gradio==4.32.0
|
6 |
+
jaxtyping==0.2.29
|
7 |
+
numba==0.59.1
|
8 |
+
numpy==1.26.4
|
9 |
+
nvdiffrast==0.3.1
|
10 |
+
omegaconf==2.3.0
|
11 |
+
onnxruntime_gpu==1.17.0
|
12 |
+
opencv_python==4.9.0.80
|
13 |
+
opencv_python_headless==4.9.0.80
|
14 |
+
ort_nightly_gpu==1.17.0.dev20240118002
|
15 |
+
peft==0.10.0
|
16 |
+
Pillow==10.3.0
|
17 |
+
pygltflib==1.16.2
|
18 |
+
pymeshlab==2023.12.post1
|
19 |
+
pytorch3d==0.7.5
|
20 |
+
rembg==2.0.56
|
21 |
+
torch==2.1.0+cu121
|
22 |
+
torch_scatter==2.1.2
|
23 |
+
tqdm==4.64.1
|
24 |
+
transformers==4.39.3
|
25 |
+
trimesh==4.3.0
|
26 |
+
typeguard==2.13.3
|
27 |
+
wandb==0.16.6
|
requirements.txt
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pytorch3d @ https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt221/pytorch3d-0.7.6-cp310-cp310-linux_x86_64.whl
|
2 |
+
ort_nightly_gpu @ https://aiinfra.pkgs.visualstudio.com/2692857e-05ef-43b4-ba9c-ccf1c22c437c/_packaging/d3daa2b0-aa56-45ac-8145-2c3dc0661c87/pypi/download/ort-nightly-gpu/1.17.dev20240118002/ort_nightly_gpu-1.17.0.dev20240118002-cp310-cp310-manylinux_2_28_x86_64.whl
|
3 |
+
onnxruntime_gpu @ https://pkgs.dev.azure.com/onnxruntime/2a773b67-e88b-4c7f-9fc0-87d31fea8ef2/_packaging/7fa31e42-5da1-4e84-a664-f2b4129c7d45/pypi/download/onnxruntime-gpu/1.17/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl
|
4 |
+
torch==2.2.0
|
5 |
+
accelerate
|
6 |
+
datasets
|
7 |
+
diffusers>=0.26.3
|
8 |
+
fire
|
9 |
+
gradio
|
10 |
+
jaxtyping
|
11 |
+
numba
|
12 |
+
numpy<2
|
13 |
+
omegaconf>=2.3.0
|
14 |
+
opencv_python
|
15 |
+
opencv_python_headless
|
16 |
+
peft
|
17 |
+
Pillow
|
18 |
+
pygltflib
|
19 |
+
pymeshlab>=2023.12
|
20 |
+
rembg[gpu]
|
21 |
+
torch_scatter @ https://data.pyg.org/whl/torch-2.2.0%2Bcu121/torch_scatter-2.1.2%2Bpt22cu121-cp310-cp310-linux_x86_64.whl
|
22 |
+
tqdm
|
23 |
+
transformers
|
24 |
+
trimesh
|
25 |
+
typeguard
|
26 |
+
wandb
|
27 |
+
xformers
|
28 |
+
ninja
|