salomonsky commited on
Commit
72cf865
·
verified ·
1 Parent(s): b6a33f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -73
app.py CHANGED
@@ -10,10 +10,6 @@ from huggingface_hub import hf_hub_download
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
 
13
- MORE = """ ## TRY Other Demos
14
- ### Instant Image: 4k images in 5 Second -> https://huggingface.co/spaces/KingNish/Instant-Image
15
- """
16
-
17
  # Constants
18
  bases = {
19
  "Cartoon": "frankjoshua/toonyou_beta6",
@@ -25,23 +21,14 @@ step_loaded = None
25
  base_loaded = "Realistic"
26
  motion_loaded = None
27
 
28
- # Ensure model and scheduler are initialized in GPU-enabled function
29
- if not torch.cuda.is_available():
30
- raise NotImplementedError("No GPU detected!")
31
-
32
- device = "cuda"
33
- dtype = torch.float16
34
  pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
35
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
36
 
37
- # Safety checkers
38
- from transformers import CLIPFeatureExtractor
39
-
40
- feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
41
-
42
- # Function
43
- @spaces.GPU(duration=30,queue=False)
44
- def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
45
  global step_loaded
46
  global base_loaded
47
  global motion_loaded
@@ -72,30 +59,19 @@ def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Prog
72
 
73
  name = str(uuid.uuid4()).replace("-", "")
74
  path = f"/tmp/{name}.mp4"
75
- export_to_video(output.frames[0], path, fps=10)
76
  return path
77
 
78
-
79
  # Gradio Interface
80
- with gr.Blocks(css="style.css") as demo:
81
- gr.HTML(
82
- "<h1><center>Mochi 1</center></h1>"
83
-
84
- )
85
  with gr.Group():
86
  with gr.Row():
87
- prompt = gr.Textbox(
88
- label='Prompt'
89
- )
90
  with gr.Row():
91
  select_base = gr.Dropdown(
92
  label='Base model',
93
- choices=[
94
- "Cartoon",
95
- "Realistic",
96
- "3d",
97
- "Anime",
98
- ],
99
  value=base_loaded,
100
  interactive=True
101
  )
@@ -112,60 +88,36 @@ with gr.Blocks(css="style.css") as demo:
112
  ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
113
  ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
114
  ],
115
- value="guoyww/animatediff-motion-lora-zoom-in",
116
  interactive=True
117
  )
118
  select_step = gr.Dropdown(
119
  label='Inference steps',
120
- choices=[
121
- ('1-Step', 1),
122
- ('2-Step', 2),
123
- ('4-Step', 4),
124
- ('8-Step', 8),
125
- ],
126
  value=4,
127
  interactive=True
128
  )
129
- submit = gr.Button(
130
- scale=1,
131
- variant='primary'
 
 
 
 
132
  )
 
133
  video = gr.Video(
134
- label='AnimateDiff-Lightning',
135
  autoplay=True,
136
  height=512,
137
  width=512,
138
  elem_id="video_output"
139
  )
140
 
141
- gr.on(triggers=[
142
- submit.click,
143
- prompt.submit
144
- ],
145
- fn = generate_image,
146
- inputs = [prompt, select_base, select_motion, select_step],
147
- outputs = [video],
148
- api_name = "instant_video",
149
- queue = False
150
- )
151
-
152
- gr.Examples(
153
- examples=[
154
- ["Focus: Eiffel Tower (Animate: Clouds moving)"], #Atmosphere Movement Example
155
- ["Focus: Trees In forest (Animate: Lion running)"], #Object Movement Example
156
- ["Focus: Astronaut in Space"], #Normal
157
- ["Focus: Group of Birds in sky (Animate: Birds Moving) (Shot From distance)"], #Camera distance
158
- ["Focus: Statue of liberty (Shot from Drone) (Animate: Drone coming toward statue)"], #Camera Movement
159
- ["Focus: Panda in Forest (Animate: Drinking Tea)"], #Doing Something
160
- ["Focus: Kids Playing (Season: Winter)"], #Atmosphere or Season
161
- {"Focus: Cars in Street (Season: Rain, Daytime) (Shot from Distance) (Movement: Cars running)"} #Mixture
162
- ],
163
  fn=generate_image,
164
- inputs=[prompt],
165
- outputs=[video],
166
- cache_examples="lazy",
167
- )
168
 
169
  demo.queue().launch()
170
-
171
- Translate
 
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
 
 
 
 
 
13
  # Constants
14
  bases = {
15
  "Cartoon": "frankjoshua/toonyou_beta6",
 
21
  base_loaded = "Realistic"
22
  motion_loaded = None
23
 
24
+ # Set device to CPU
25
+ device = "cpu"
26
+ dtype = torch.float32
 
 
 
27
  pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
28
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
29
 
30
+ @spaces.CPU(duration=30, queue=False)
31
+ def generate_image(prompt, base="Realistic", motion="", step=8, duration=5, progress=gr.Progress()):
 
 
 
 
 
 
32
  global step_loaded
33
  global base_loaded
34
  global motion_loaded
 
59
 
60
  name = str(uuid.uuid4()).replace("-", "")
61
  path = f"/tmp/{name}.mp4"
62
+ export_to_video(output.frames[0], path, fps=10, duration=duration)
63
  return path
64
 
 
65
  # Gradio Interface
66
+ with gr.Blocks() as demo:
67
+ gr.HTML("<h1><center>AnimateDiff on CPU</center></h1>")
 
 
 
68
  with gr.Group():
69
  with gr.Row():
70
+ prompt = gr.Textbox(label='Prompt')
 
 
71
  with gr.Row():
72
  select_base = gr.Dropdown(
73
  label='Base model',
74
+ choices=["Cartoon", "Realistic", "3d", "Anime"],
 
 
 
 
 
75
  value=base_loaded,
76
  interactive=True
77
  )
 
88
  ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
89
  ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
90
  ],
91
+ value="",
92
  interactive=True
93
  )
94
  select_step = gr.Dropdown(
95
  label='Inference steps',
96
+ choices=[('1-Step', 1), ('2-Step', 2), ('4-Step', 4), ('8-Step', 8)],
 
 
 
 
 
97
  value=4,
98
  interactive=True
99
  )
100
+ slider_duration = gr.Slider(
101
+ label='Video Duration (seconds)',
102
+ minimum=1,
103
+ maximum=10,
104
+ value=5,
105
+ step=1,
106
+ interactive=True
107
  )
108
+ submit = gr.Button(scale=1, variant='primary')
109
  video = gr.Video(
110
+ label='Generated Video',
111
  autoplay=True,
112
  height=512,
113
  width=512,
114
  elem_id="video_output"
115
  )
116
 
117
+ submit.click(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  fn=generate_image,
119
+ inputs=[prompt, select_base, select_motion, select_step, slider_duration],
120
+ outputs=[video]
121
+ )
 
122
 
123
  demo.queue().launch()