josuelmet commited on
Commit
923fa3c
·
1 Parent(s): c7042e3

Upload _Generation.py

Browse files
Files changed (1) hide show
  1. _Generation.py +661 -0
_Generation.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import guitarpro
2
+ from guitarpro import *
3
+ from matplotlib import pyplot as plt
4
+ import mgzip
5
+ import numpy as np
6
+ import os
7
+ import pickle
8
+ from tqdm import tqdm
9
+
10
+ import tensorflow as tf
11
+ from tensorflow import keras
12
+ from keras.callbacks import ModelCheckpoint
13
+ from keras.models import Sequential
14
+ from keras.layers import Activation, Dense, LSTM, Dropout, Flatten
15
+
16
+ from _Decompressor import SongWriter
17
+
18
+
19
+
20
+ # Define some constants:
21
+
22
+
23
+ # PITCH[i] = the pitch associated with midi note number i.
24
+ # For example, PITCH[69] = 'A4'
25
+ PITCH = {val : str(GuitarString(number=0, value=val)) for val in range(128)}
26
+ # MIDI[string] = the midi number associated with the note described by string.
27
+ # For example, MIDI['A4'] = 69.
28
+ MIDI = {str(GuitarString(number=0, value=val)) : val for val in range(128)}
29
+
30
+
31
+
32
+
33
+
34
+
35
+ # Generation helper methods:
36
+ def thirty_seconds_to_duration(count):
37
+ if count % 3 == 0:
38
+ # If the note is dotted, do 32 / (i * 2/3), and return isDotted = True.
39
+ return (48//count, True)
40
+ else:
41
+ # If the note is not dotted, to 32 / i, and return isDotted = False.
42
+ return (32//count, False)
43
+
44
+
45
+ def quantize_thirty_seconds(value):
46
+
47
+ # 32nd-note values of each fundamental type of note (not including 64th-notes, of course).
48
+ vals = np.array([32, # whole
49
+ 24, # dotted half
50
+ 16, # half
51
+ 12, # dotted quarter
52
+ 8, # quarter
53
+ 6, # dotted eigth
54
+ 4, # eigth
55
+ 3, # dotted sixteenth
56
+ 2, # sixteenth
57
+ 1]) # thirty-second
58
+
59
+ list_out = []
60
+
61
+ for v in vals:
62
+ if v <= value:
63
+ list_out.append(thirty_seconds_to_duration(v))
64
+ value -= v
65
+
66
+ return np.array(list_out)
67
+
68
+
69
+
70
+
71
+ def adjust_to_4_4(prediction_output):
72
+ '''
73
+ Adjust prediction output to be in 4/4 time.
74
+ Then, separate the beats into measures.
75
+ '''
76
+
77
+ # This will be the prediction output
78
+ new_prediction_output = []
79
+
80
+
81
+ time = 0
82
+ for beat in prediction_output:
83
+
84
+ # Calculate the fraction of a measure encompassed by the current beat / chord.
85
+ beat_time = (1 / beat[1]) * (1 + 0.5 * beat[2])
86
+
87
+ # Calculate the fraction of a measure taken up by all notes in the measure.
88
+ # Calculate any residual time to see if this measure (in 4/4 time) is longer than 1 measure.
89
+ measure_time = time + beat_time
90
+ leftover_time = (measure_time) % 1
91
+
92
+ # If the measure count (i.e., the measure integer) has changed and there is significant left-over beat time:
93
+ if (int(measure_time) > int(time)) and (leftover_time > 1/128):
94
+
95
+ # Calculate the initial 32nd notes encompassed by this beat in the current measure.
96
+ this_measure_thirty_seconds = int(32 * (1 - time % 1))
97
+ # Calculate the remaining 32nd notes encompassed by this beat in the next measure.
98
+ next_measure_thirty_seconds = int(32 * leftover_time)
99
+
100
+ # Get the Duration object parameters for this measure and the next measure.
101
+ this_measure_durations = quantize_thirty_seconds(this_measure_thirty_seconds)
102
+ next_measure_durations = quantize_thirty_seconds(next_measure_thirty_seconds)
103
+
104
+
105
+ #print(f'{{ {32 / beat[1]}')
106
+ for duration_idx, duration in enumerate(this_measure_durations):
107
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
108
+
109
+ #print(time, '\t', time * 32)
110
+
111
+ chord = beat[0] if duration_idx == 0 else 'tied'
112
+
113
+ new_prediction_output.append((chord, duration[0], duration[1], beat[3]))
114
+
115
+
116
+ for duration in next_measure_durations:
117
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
118
+
119
+ #print(time, '\t', time * 32)
120
+
121
+ new_prediction_output.append(('tied', duration[0], duration[1], beat[3]))
122
+
123
+
124
+ continue
125
+
126
+
127
+ time += beat_time
128
+ new_prediction_output.append((beat[0], beat[1], beat[2], beat[3]))
129
+
130
+ #print(time, '\t', time * 32)
131
+
132
+
133
+ '''
134
+ # Code for debugging
135
+
136
+ time = 0
137
+ time2 = 0
138
+ idx = 0
139
+
140
+ for idx2, beat2 in enumerate(new_prediction_output[:100]):
141
+ beat = prediction_output[idx]
142
+
143
+ if time == time2:
144
+ print(beat[0], '\t', time, '\t\t', beat2[0], '\t', time2)
145
+
146
+ idx += 1
147
+
148
+ time += (1 / beat[1]) * (1 + 0.5 * beat[2])
149
+
150
+ else:
151
+ print('\t\t\t\t', beat2[0], '\t', time2)
152
+
153
+
154
+
155
+ time2 += (1 / beat2[1]) * (1 + 0.5 * beat2[2])
156
+ ''';
157
+
158
+ # Use the previously calculated cumulative time as the number of measures in the new 4/4 song.
159
+ num_measures = int(np.ceil(time))
160
+
161
+ song = np.empty(num_measures, dtype=object)
162
+
163
+ time = 0
164
+ m_idx = 0
165
+
166
+ timestamps = []
167
+
168
+ for beat in new_prediction_output:
169
+ #print(time)
170
+ timestamps.append(time)
171
+
172
+ m_idx = int(time)
173
+
174
+ if song[m_idx] is None:
175
+
176
+ song[m_idx] = [beat]
177
+ else:
178
+ song[m_idx].append(beat)
179
+
180
+
181
+ time += (1 / beat[1]) * (1 + 0.5 * beat[2])
182
+
183
+
184
+ print(f'4/4 adjusted correctly: {set(range(num_measures)).issubset(set(timestamps))}')
185
+
186
+ return song
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+ class Generator:
195
+ def __init__(self, num_tracks_to_generate=5, as_fingerings=True, sequence_length=100):
196
+ with mgzip.open('data\\notes_data.pickle.gz', 'rb') as filepath:
197
+ self.notes = pickle.load(filepath)
198
+ self.note_to_int = pickle.load(filepath)
199
+ self.int_to_note = pickle.load(filepath)
200
+ self.n_vocab = pickle.load(filepath)
201
+ self.NUM_TRACKS_TO_GENERATE = num_tracks_to_generate
202
+ self.as_fingerings = as_fingerings
203
+ self.sequence_length = sequence_length
204
+
205
+ with mgzip.open('data\\track_data.pickle.gz', 'rb') as filepath:
206
+ self.track_data = pickle.load(filepath)
207
+
208
+ self.model = keras.models.load_model('minigpt')
209
+
210
+ self.ints = np.array([self.note_to_int[x] for x in self.notes])
211
+
212
+
213
+
214
+ def generate_track(self, track_idx=None):
215
+
216
+ if track_idx is None:
217
+ # Choose a random track
218
+ track_idx = np.random.choice(len(self.track_data))
219
+
220
+ # Get the note indices corresponding to the beginning and ending of the track
221
+ song_note_idx_first = self.track_data.loc[track_idx]['noteStartIdx']
222
+ song_note_idx_last = self.track_data.loc[track_idx+1]['noteStartIdx']
223
+
224
+ # Choose a random starting point within the track
225
+ start_idx = np.random.randint(low=song_note_idx_first,
226
+ high=song_note_idx_last)
227
+
228
+ # Choose a number of initial notes to select from the track, at most 100.
229
+ #num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
230
+ num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
231
+
232
+ # Select the initial notes (tokens)
233
+ start_tokens = [_ for _ in self.ints[start_idx:start_idx+num_initial_notes]]
234
+
235
+
236
+ max_tokens = 100
237
+
238
+
239
+
240
+ def sample_from(logits, top_k=10):
241
+ logits, indices = tf.math.top_k(logits, k=top_k, sorted=True)
242
+ indices = np.asarray(indices).astype("int32")
243
+ preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]
244
+ preds = np.asarray(preds).astype("float32")
245
+ return np.random.choice(indices, p=preds)
246
+
247
+ num_tokens_generated = 0
248
+ tokens_generated = []
249
+
250
+ while num_tokens_generated <= max_tokens:
251
+ pad_len = self.sequence_length - len(start_tokens)
252
+ sample_index = len(start_tokens) - 1
253
+ if pad_len < 0:
254
+ x = start_tokens[:self.sequence_length]
255
+ sample_index = self.sequence_length - 1
256
+ elif pad_len > 0:
257
+ x = start_tokens + [0] * pad_len
258
+ else:
259
+ x = start_tokens
260
+ x = np.array([x])
261
+ y, _ = self.model.predict(x)
262
+ sample_token = sample_from(y[0][sample_index])
263
+ tokens_generated.append(sample_token)
264
+ start_tokens.append(sample_token)
265
+ num_tokens_generated = len(tokens_generated)
266
+
267
+ generated_notes = [self.int_to_note[num] for num in np.concatenate((start_tokens, tokens_generated))]
268
+
269
+ return track_idx, generated_notes
270
+
271
+
272
+
273
+ def generate_track_batch(self, artist=None):
274
+
275
+ self.track_indices = np.zeros(self.NUM_TRACKS_TO_GENERATE)
276
+ self.tracks = np.zeros(self.NUM_TRACKS_TO_GENERATE, dtype=object)
277
+
278
+
279
+ for i in tqdm(range(self.NUM_TRACKS_TO_GENERATE)):
280
+ if artist is None:
281
+ idx, t = self.generate_track()
282
+ else:
283
+ idx, t = self.generate_track(track_idx=np.random.choice(list(self.track_data[self.track_data.artist==artist].index)))
284
+ self.track_indices[i] = idx
285
+ self.tracks[i] = t
286
+
287
+
288
+
289
+ def save_tracks(self, filepath='_generation.gp5'):
290
+
291
+ songWriter = SongWriter(initialTempo=self.track_data.loc[self.track_indices[0]]['tempo'])
292
+
293
+ for idx in range(len(self.tracks)):
294
+ new_track = adjust_to_4_4(self.tracks[idx])
295
+
296
+ # Get the tempo and tuning (lowest string note) of the song:
297
+ #print( track_data.loc[track_indices[idx]])
298
+ tempo = self.track_data.loc[self.track_indices[idx]]['tempo']
299
+ instrument = self.track_data.loc[self.track_indices[idx]]['instrument']
300
+ name = self.track_data.loc[self.track_indices[idx]]['song']
301
+ lowest_string = self.track_data.loc[self.track_indices[idx]]['tuning']
302
+
303
+ if not self.as_fingerings:
304
+ # Get all the unique pitch values from the new track
305
+ pitchnames = set.union(*[set([beat[0].split('_')[0] for beat in measure]) for measure in new_track])
306
+ pitchnames.discard('rest') # Ignore rests
307
+ pitchnames.discard('tied') # Ignore tied notes
308
+ pitchnames.discard('dead') # Ignore dead/ghost notes
309
+ lowest_string = min([MIDI[pitch] for pitch in pitchnames]) # Get the lowest MIDI value / pitch
310
+ lowest_string = min(lowest_string, MIDI['E2']) # Don't allow any tunings higher than standard.
311
+
312
+
313
+ # Standard tuning
314
+ tuning = {1: MIDI['E4'],
315
+ 2: MIDI['B3'],
316
+ 3: MIDI['G3'],
317
+ 4: MIDI['D3'],
318
+ 5: MIDI['A2'],
319
+ 6: MIDI['E2']}
320
+
321
+ if lowest_string <= MIDI['B1']:
322
+ # 7-string guitar case
323
+ tuning[7] = MIDI['B1']
324
+ downtune = MIDI['B1'] - lowest_string
325
+ else:
326
+ # downtune the tuning by however much is necessary.
327
+ downtune = MIDI['E2'] - lowest_string
328
+
329
+ tuning = {k: v - downtune for k, v in tuning.items()} # Adjust to the new tuning
330
+
331
+ # Write the track to the song writer
332
+ songWriter.decompress_track(new_track, tuning, tempo=tempo, instrument=instrument, name=name, as_fingerings=self.as_fingerings)
333
+
334
+
335
+
336
+ songWriter.write(filepath)
337
+ print('Finished')
338
+
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+
347
+ '''
348
+
349
+
350
+ def init_generator():
351
+ global NUM_TRACKS_TO_GENERATE, notes, note_to_int, int_to_note, n_vocab, track_data, model, ints
352
+
353
+ with mgzip.open('data\\notes_data.pickle.gz', 'rb') as filepath:
354
+ notes = pickle.load(filepath)
355
+ note_to_int = pickle.load(filepath)
356
+ int_to_note = pickle.load(filepath)
357
+ n_vocab = pickle.load(filepath)
358
+
359
+ with mgzip.open('data\\track_data.pickle.gz', 'rb') as filepath:
360
+ track_data = pickle.load(filepath)
361
+
362
+ #with mgzip.open('output\\generated_songs.pickle.gz', 'rb') as filepath:
363
+ # track_indices = pickle.load(filepath)
364
+ # tracks = pickle.load(filepath)
365
+
366
+ model = keras.models.load_model('minigpt')
367
+
368
+ ints = np.array([note_to_int[x] for x in notes])
369
+
370
+
371
+
372
+
373
+ def generate_track(track_idx=None):
374
+ global track_data, ints, int_to_note
375
+
376
+ if track_idx is None:
377
+ # Choose a random track
378
+ track_idx = np.random.choice(len(track_data))
379
+
380
+ # Get the note indices corresponding to the beginning and ending of the track
381
+ song_note_idx_first = track_data.loc[track_idx]['noteStartIdx']
382
+ song_note_idx_last = track_data.loc[track_idx+1]['noteStartIdx']
383
+
384
+ # Choose a random starting point within the track
385
+ start_idx = np.random.randint(low=song_note_idx_first,
386
+ high=song_note_idx_last)
387
+
388
+ # Choose a number of initial notes to select from the track, at most 100.
389
+ #num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
390
+ num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
391
+
392
+ # Select the initial notes (tokens)
393
+ start_tokens = [_ for _ in ints[start_idx:start_idx+num_initial_notes]]
394
+
395
+
396
+ max_tokens = 100
397
+
398
+
399
+
400
+ def sample_from(logits, top_k=10):
401
+ logits, indices = tf.math.top_k(logits, k=top_k, sorted=True)
402
+ indices = np.asarray(indices).astype("int32")
403
+ preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]
404
+ preds = np.asarray(preds).astype("float32")
405
+ return np.random.choice(indices, p=preds)
406
+
407
+ num_tokens_generated = 0
408
+ tokens_generated = []
409
+
410
+ while num_tokens_generated <= max_tokens:
411
+ pad_len = maxlen - len(start_tokens)
412
+ sample_index = len(start_tokens) - 1
413
+ if pad_len < 0:
414
+ x = start_tokens[:maxlen]
415
+ sample_index = maxlen - 1
416
+ elif pad_len > 0:
417
+ x = start_tokens + [0] * pad_len
418
+ else:
419
+ x = start_tokens
420
+ x = np.array([x])
421
+ y, _ = model.predict(x)
422
+ sample_token = sample_from(y[0][sample_index])
423
+ tokens_generated.append(sample_token)
424
+ start_tokens.append(sample_token)
425
+ num_tokens_generated = len(tokens_generated)
426
+
427
+ generated_notes = [int_to_note[num] for num in np.concatenate((start_tokens, tokens_generated))]
428
+
429
+ return track_idx, generated_notes
430
+
431
+
432
+
433
+
434
+ def generate_track_batch(artist=None):
435
+ global track_indices, tracks, NUM_TRACKS_TO_GENERATE, track_data
436
+
437
+ track_indices = np.zeros(NUM_TRACKS_TO_GENERATE)
438
+ tracks = np.zeros(NUM_TRACKS_TO_GENERATE, dtype=object)
439
+
440
+
441
+ for i in tqdm(range(NUM_TRACKS_TO_GENERATE)):
442
+ if artist is None:
443
+ idx, t = generate_track()
444
+ else:
445
+ idx, t = generate_track(track_idx=np.random.choice(list(track_data[track_data.artist==artist].index)))
446
+ track_indices[i] = idx
447
+ tracks[i] = t
448
+
449
+
450
+
451
+
452
+
453
+ # Generation helper methods:
454
+ def thirty_seconds_to_duration(count):
455
+ if count % 3 == 0:
456
+ # If the note is dotted, do 32 / (i * 2/3), and return isDotted = True.
457
+ return (48//count, True)
458
+ else:
459
+ # If the note is not dotted, to 32 / i, and return isDotted = False.
460
+ return (32//count, False)
461
+
462
+
463
+ def quantize_thirty_seconds(value):
464
+
465
+ # 32nd-note values of each fundamental type of note (not including 64th-notes, of course).
466
+ vals = np.array([32, # whole
467
+ 24, # dotted half
468
+ 16, # half
469
+ 12, # dotted quarter
470
+ 8, # quarter
471
+ 6, # dotted eigth
472
+ 4, # eigth
473
+ 3, # dotted sixteenth
474
+ 2, # sixteenth
475
+ 1]) # thirty-second
476
+
477
+ list_out = []
478
+
479
+ for v in vals:
480
+ if v <= value:
481
+ list_out.append(thirty_seconds_to_duration(v))
482
+ value -= v
483
+
484
+ return np.array(list_out)
485
+
486
+
487
+
488
+
489
+ def adjust_to_4_4(prediction_output):
490
+
491
+ #Adjust prediction output to be in 4/4 time.
492
+ #Then, separate the beats into measures.
493
+
494
+
495
+ # This will be the prediction output
496
+ new_prediction_output = []
497
+
498
+
499
+ time = 0
500
+ for beat in prediction_output:
501
+
502
+ # Calculate the fraction of a measure encompassed by the current beat / chord.
503
+ beat_time = (1 / beat[1]) * (1 + 0.5 * beat[2])
504
+
505
+ # Calculate the fraction of a measure taken up by all notes in the measure.
506
+ # Calculate any residual time to see if this measure (in 4/4 time) is longer than 1 measure.
507
+ measure_time = time + beat_time
508
+ leftover_time = (measure_time) % 1
509
+
510
+ # If the measure count (i.e., the measure integer) has changed and there is significant left-over beat time:
511
+ if (int(measure_time) > int(time)) and (leftover_time > 1/128):
512
+
513
+ # Calculate the initial 32nd notes encompassed by this beat in the current measure.
514
+ this_measure_thirty_seconds = int(32 * (1 - time % 1))
515
+ # Calculate the remaining 32nd notes encompassed by this beat in the next measure.
516
+ next_measure_thirty_seconds = int(32 * leftover_time)
517
+
518
+ # Get the Duration object parameters for this measure and the next measure.
519
+ this_measure_durations = quantize_thirty_seconds(this_measure_thirty_seconds)
520
+ next_measure_durations = quantize_thirty_seconds(next_measure_thirty_seconds)
521
+
522
+
523
+ #print(f'{{ {32 / beat[1]}')
524
+ for duration_idx, duration in enumerate(this_measure_durations):
525
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
526
+
527
+ #print(time, '\t', time * 32)
528
+
529
+ chord = beat[0] if duration_idx == 0 else 'tied'
530
+
531
+ new_prediction_output.append((chord, duration[0], duration[1]))
532
+
533
+
534
+ for duration in next_measure_durations:
535
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
536
+
537
+ #print(time, '\t', time * 32)
538
+
539
+ new_prediction_output.append(('tied', duration[0], duration[1]))
540
+
541
+
542
+ continue
543
+
544
+
545
+ time += beat_time
546
+ new_prediction_output.append((beat[0], beat[1], beat[2]))
547
+
548
+ #print(time, '\t', time * 32)
549
+
550
+
551
+
552
+ # Code for debugging
553
+
554
+ #time = 0
555
+ #time2 = 0
556
+ #idx = 0
557
+
558
+ #for idx2, beat2 in enumerate(new_prediction_output[:100]):
559
+ # beat = prediction_output[idx]
560
+
561
+ # if time == time2:
562
+ # print(beat[0], '\t', time, '\t\t', beat2[0], '\t', time2)
563
+
564
+ # idx += 1
565
+
566
+ # time += (1 / beat[1]) * (1 + 0.5 * beat[2])
567
+
568
+ # else:
569
+ # print('\t\t\t\t', beat2[0], '\t', time2)
570
+
571
+
572
+
573
+ # time2 += (1 / beat2[1]) * (1 + 0.5 * beat2[2])
574
+
575
+
576
+ # Use the previously calculated cumulative time as the number of measures in the new 4/4 song.
577
+ num_measures = int(np.ceil(time))
578
+
579
+ song = np.empty(num_measures, dtype=object)
580
+
581
+ time = 0
582
+ m_idx = 0
583
+
584
+ timestamps = []
585
+
586
+ for beat in new_prediction_output:
587
+ #print(time)
588
+ timestamps.append(time)
589
+
590
+ m_idx = int(time)
591
+
592
+ if song[m_idx] is None:
593
+
594
+ song[m_idx] = [beat]
595
+ else:
596
+ song[m_idx].append(beat)
597
+
598
+
599
+ time += (1 / beat[1]) * (1 + 0.5 * beat[2])
600
+
601
+
602
+ print(f'4/4 adjusted correctly: {set(range(num_measures)).issubset(set(timestamps))}')
603
+
604
+ return song
605
+
606
+
607
+
608
+
609
+
610
+
611
+ def save_tracks(filepath='_generation.gp5'):
612
+ global track_data, track_indice, tracks
613
+
614
+ songWriter = SongWriter(initialTempo=track_data.loc[track_indices[0]]['tempo'])
615
+
616
+ for idx in range(len(tracks)):
617
+ new_track = adjust_to_4_4(tracks[idx])
618
+
619
+ # Get the tempo and tuning (lowest string note) of the song:
620
+ #print( track_data.loc[track_indices[idx]])
621
+ tempo = track_data.loc[track_indices[idx]]['tempo']
622
+ instrument = track_data.loc[track_indices[idx]]['instrument']
623
+ name = track_data.loc[track_indices[idx]]['song']
624
+ lowest_string = track_data.loc[track_indices[idx]]['tuning']
625
+
626
+ if not as_fingerings:
627
+ # Get all the unique pitch values from the new track
628
+ pitchnames = set.union(*[set([beat[0].split('_')[0] for beat in measure]) for measure in new_track])
629
+ pitchnames.discard('rest') # Ignore rests
630
+ pitchnames.discard('tied') # Ignore tied notes
631
+ pitchnames.discard('dead') # Ignore dead/ghost notes
632
+ lowest_string = min([MIDI[pitch] for pitch in pitchnames]) # Get the lowest MIDI value / pitch
633
+ lowest_string = min(lowest_string, MIDI['E2']) # Don't allow any tunings higher than standard.
634
+
635
+
636
+ # Standard tuning
637
+ tuning = {1: MIDI['E4'],
638
+ 2: MIDI['B3'],
639
+ 3: MIDI['G3'],
640
+ 4: MIDI['D3'],
641
+ 5: MIDI['A2'],
642
+ 6: MIDI['E2']}
643
+
644
+ if lowest_string <= MIDI['B1']:
645
+ # 7-string guitar case
646
+ tuning[7] = MIDI['B1']
647
+ downtune = MIDI['B1'] - lowest_string
648
+ else:
649
+ # downtune the tuning by however much is necessary.
650
+ downtune = MIDI['E2'] - lowest_string
651
+
652
+ tuning = {k: v - downtune for k, v in tuning.items()} # Adjust to the new tuning
653
+
654
+ # Write the track to the song writer
655
+ songWriter.decompress_track(new_track, tuning, tempo=tempo, instrument=instrument, name=name, as_fingerings=as_fingerings)
656
+
657
+
658
+
659
+ songWriter.write(filepath)
660
+ print('Finished')
661
+ '''