Spaces:
Running
Running
Commit
·
8429ccf
1
Parent(s):
581f70e
Added GPU duration
Browse files- app.py +1 -1
- aria/aria.py +2 -2
app.py
CHANGED
@@ -168,7 +168,7 @@ def convert_midi_to_wav(midi_path):
|
|
168 |
print(f"Error converting MIDI to WAV: {str(e)}")
|
169 |
return None
|
170 |
|
171 |
-
@spaces.GPU # Set duration to 60 seconds for music generation
|
172 |
def generate_music(image, conditioning_type, gen_len, temperature, top_p, min_instruments):
|
173 |
"""Generate music from input image"""
|
174 |
model = get_model(conditioning_type)
|
|
|
168 |
print(f"Error converting MIDI to WAV: {str(e)}")
|
169 |
return None
|
170 |
|
171 |
+
@spaces.GPU(duration=120) # Set duration to 60 seconds for music generation
|
172 |
def generate_music(image, conditioning_type, gen_len, temperature, top_p, min_instruments):
|
173 |
"""Generate music from input image"""
|
174 |
model = get_model(conditioning_type)
|
aria/aria.py
CHANGED
@@ -15,7 +15,7 @@ sys.path.append(MIDI_EMOTION_PATH)
|
|
15 |
class ARIA:
|
16 |
"""ARIA model that generates music from images based on emotional content."""
|
17 |
|
18 |
-
@spaces.GPU
|
19 |
def __init__(
|
20 |
self,
|
21 |
image_model_checkpoint: str,
|
@@ -60,7 +60,7 @@ class ARIA:
|
|
60 |
self.midi_model.load_state_dict(torch.load(model_fp, map_location=self.device, weights_only=True))
|
61 |
self.midi_model.eval()
|
62 |
|
63 |
-
@spaces.GPU
|
64 |
@torch.inference_mode() # More efficient than no_grad for inference
|
65 |
def generate(
|
66 |
self,
|
|
|
15 |
class ARIA:
|
16 |
"""ARIA model that generates music from images based on emotional content."""
|
17 |
|
18 |
+
@spaces.GPU(duration=20) # Model loading should be quick
|
19 |
def __init__(
|
20 |
self,
|
21 |
image_model_checkpoint: str,
|
|
|
60 |
self.midi_model.load_state_dict(torch.load(model_fp, map_location=self.device, weights_only=True))
|
61 |
self.midi_model.eval()
|
62 |
|
63 |
+
@spaces.GPU(duration=120)
|
64 |
@torch.inference_mode() # More efficient than no_grad for inference
|
65 |
def generate(
|
66 |
self,
|