Spaces:
Sleeping
Sleeping
File size: 7,560 Bytes
337e17d 5d863e7 337e17d c2a1fe9 a72e07e 939fac7 337e17d a72e07e 939fac7 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 c0f5938 306bbb1 337e17d 306bbb1 568823b 306bbb1 568823b 306bbb1 337e17d 306bbb1 337e17d 306bbb1 c0f5938 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d c0f5938 337e17d 306bbb1 337e17d 306bbb1 c0f5938 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 306bbb1 337e17d 6800ad4 337e17d 306bbb1 d55ae9b 306bbb1 d55ae9b 5530a43 d55ae9b 5530a43 d55ae9b 5530a43 d55ae9b a72e07e 5530a43 a72e07e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
import streamlit as st
import numpy as np
import time
import pickle
import tensorflow as tf
from music21 import *
from keras.models import Sequential
from keras.layers import *
from midi2audio import FluidSynth
import shutil
import pretty_midi
import soundfile as sf
def midi_to_audio(midi_file, output_file):
# Load the MIDI file
midi_data = pretty_midi.PrettyMIDI(midi_file)
# Synthesize the audio from the MIDI data
audio_data = midi_data.synthesize()
# Save to a WAV file
sf.write(output_file, audio_data, 44100)
####################### Music Generation Functions #######################
def generate(seq_len,x):
""" Generate a piano midi file """
#load the notes used to train the model
with open('final_notes', 'rb') as filepath:
notes = pickle.load(filepath)
# Get all pitch names
pitchnames = sorted(set(item for item in notes))
n_vocab = len(set(notes))
network_input, normalized_input = prepare_sequences(notes, pitchnames, n_vocab , seq_length = seq_len)
model = create_network(normalized_input, n_vocab)
prediction_output = generate_notes(model, network_input, pitchnames, n_vocab, x)
create_midi(prediction_output)
def prepare_sequences(notes, pitchnames, n_vocab , seq_length):
""" Prepare the sequences used by the Neural Network """
# map between notes and integers and back
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
sequence_length = seq_length
network_input = []
normalized_input = []
output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
# reshape the input into a format compatible with LSTM layers
normalized_input = np.reshape(network_input, (n_patterns, sequence_length, 1))
# normalize input
normalized_input = normalized_input / float(n_vocab)
return (network_input, normalized_input)
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
adam = tf.keras.optimizers.Adam(0.001)
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
recurrent_dropout=0.3,
return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
model.add(LSTM(256))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
# 'rmsprop'
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# Load the weights to each node
model.load_weights('best2.h5')
return model
def generate_notes(model, network_input, pitchnames, n_vocab , x):
""" Generate notes from the neural network based on a sequence of notes """
# pick a random sequence from the input as a starting point for the prediction
start = np.random.randint(0, len(network_input)-1)
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
pattern = network_input[start]
prediction_output = []
# generate x notes (x entered by user)
for note_index in range(x):
prediction_input = np.reshape(pattern, (1, len(pattern), 1))
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = np.argmax(prediction)
result = int_to_note[index]
prediction_output.append(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
return prediction_output
def create_midi(prediction_output):
""" convert the output from the prediction to notes and create a midi file from the notes """
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
elif pattern == 'r':
# Create a rest note with a default duration (e.g., 1.0)
new_note = note.Rest(1.0) # Set a valid duration
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output2.mid')
# Set page config
st.set_page_config(page_title="Music Generation", page_icon=":tada:", layout="wide")
# Header section
with st.container():
left_column, right_column = st.columns(2)
with left_column:
st.subheader("Music Generation :musical_keyboard:")
st.write(
"Our website is an application of piano music generation, you can listen to new musical notes generated by LSTM artificial neural network, which is used in fields of AI and deep learning. Let's get it started :notes:"
)
with right_column:
# Display a GIF instead of Lottie animation
st.image("im.gif", use_column_width=True)
# Sidebar for user input
# Sidebar for user input
with st.sidebar:
# Set a default value for len_notes
default_len_notes = 100 # Example default value
len_notes = st.slider('Please Choose The Notes Length', 20, 750, default_len_notes, 4)
st.write("Notes Length = ", len_notes)
# Music generation functionality
if st.sidebar.button('Generate My Music'):
# Use the default value if len_notes is not explicitly set by the user
if len_notes is not None:
with st.container():
st.write("---")
with st.spinner('✨ Your music is now under processing... ✨'):
time.sleep(10) # Simulate processing time
generate(10, len_notes)
midi_to_audio('test_output2.mid', 'output.wav')
st.audio('output.wav')
st.markdown("Here you are! You can download your music by right-clicking on the media player.")
else:
# Fallback to the default value if no selection is made
with st.container():
st.write("---")
st.warning("No notes length selected. Using default value of 100.")
with st.spinner('✨ Your music is now under processing... ✨'):
time.sleep(10) # Simulate processing time
generate(10, default_len_notes)
midi_to_audio('test_output2.mid', 'output.wav')
st.audio('output.wav')
st.markdown("Here you are! You can download your music by right-clicking on the media player.")
|