# This code is based on Sanchit Gandhi's MusicGen-Streaming: https://huggingface.co/spaces/sanchit-gandhi/musicgen-streaming from queue import Queue from threading import Thread import numpy as np import torch from transformers import MusicgenForConditionalGeneration, MusicgenProcessor, set_seed import gradio as gr import spaces model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small") title = "AI Radio" class MusicgenStreamer: def __init__(self, model, device=None, play_steps=10, stride=None, timeout=None): self.decoder, self.audio_encoder, self.generation_config = model.decoder, model.audio_encoder, model.generation_config self.device = device or model.device self.play_steps = play_steps self.stride = stride or np.prod(self.audio_encoder.config.upsampling_ratios) * (play_steps - self.decoder.num_codebooks) // 6 self.token_cache, self.to_yield, self.audio_queue, self.timeout = None, 0, Queue(), timeout self.stop_signal = object() def apply_delay_pattern_mask(self, input_ids): _, mask = self.decoder.build_delay_pattern_mask(input_ids[:, :1], pad_token_id=self.generation_config.decoder_start_token_id, max_length=input_ids.shape[-1]) input_ids = self.decoder.apply_delay_pattern_mask(input_ids, mask) input_ids = input_ids[input_ids != self.generation_config.pad_token_id].reshape(1, self.decoder.num_codebooks, -1)[None, ...] return self.audio_encoder.decode(input_ids.to(self.audio_encoder.device), audio_scales=[None]).audio_values[0, 0].cpu().float().numpy() def put(self, value): if value.shape[0] // self.decoder.num_codebooks > 1: raise ValueError("MusicgenStreamer only supports batch size 1") self.token_cache = torch.cat([self.token_cache, value[:, None]], dim=-1) if self.token_cache else value if self.token_cache.shape[-1] % self.play_steps == 0: audio_values = self.apply_delay_pattern_mask(self.token_cache) self.on_finalized_audio(audio_values[self.to_yield:-self.stride]) self.to_yield += len(audio_values) - self.to_yield - self.stride def end(self): audio_values = self.apply_delay_pattern_mask(self.token_cache) if self.token_cache else np.zeros(self.to_yield) self.on_finalized_audio(audio_values[self.to_yield:], stream_end=True) def on_finalized_audio(self, audio, stream_end=False): self.audio_queue.put(audio, timeout=self.timeout) if stream_end: self.audio_queue.put(self.stop_signal, timeout=self.timeout) def __iter__(self): return self def __next__(self): value = self.audio_queue.get(timeout=self.timeout) if value is self.stop_signal: raise StopIteration() return value @spaces.GPU() def generate_audio(text_prompt, audio_length_in_s=10.0, play_steps_in_s=2.0, seed=0): device = "cuda:0" if torch.cuda.is_available() else "cpu" if device != model.device: model.to(device) if device == "cuda:0": model.half() max_new_tokens = int(model.audio_encoder.config.frame_rate * audio_length_in_s) play_steps = int(model.audio_encoder.config.frame_rate * play_steps_in_s) inputs = processor(text=text_prompt, padding=True, return_tensors="pt") streamer = MusicgenStreamer(model, device=device, play_steps=play_steps) Thread(target=model.generate, kwargs=dict(**inputs.to(device), streamer=streamer, max_new_tokens=max_new_tokens)).start() set_seed(seed) for new_audio in streamer: print(f"Sample of length: {round(new_audio.shape[0] / model.audio_encoder.config.sampling_rate, 2)} seconds") yield model.audio_encoder.config.sampling_rate, new_audio demo = gr.Interface( fn=generate_audio, inputs=[ gr.Text(label="Prompt", value="80s pop track with synth and instrumentals"), gr.Slider(10, 30, value=15, step=5, label="Audio length in seconds"), gr.Slider(0.5, 2.5, value=1.5, step=0.5, label="Streaming interval in seconds", info="Lower = shorter chunks, lower latency, more codec steps"), gr.Slider(0, 10, value=5, step=1, label="Seed for random generations"), ], outputs=[gr.Audio(label="Generated Music", streaming=True, autoplay=True)], title=title, cache_examples=False, ) demo.queue().launch()