asigalov61
commited on
Commit
•
10c9409
1
Parent(s):
0004d49
Upload 2 files
Browse files
Imagen_MIDI_Images_Solo_Piano_Model_Maker.ipynb
ADDED
@@ -0,0 +1,490 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"private_outputs": true,
|
7 |
+
"provenance": [],
|
8 |
+
"gpuType": "T4"
|
9 |
+
},
|
10 |
+
"kernelspec": {
|
11 |
+
"name": "python3",
|
12 |
+
"display_name": "Python 3"
|
13 |
+
},
|
14 |
+
"language_info": {
|
15 |
+
"name": "python"
|
16 |
+
},
|
17 |
+
"accelerator": "GPU"
|
18 |
+
},
|
19 |
+
"cells": [
|
20 |
+
{
|
21 |
+
"cell_type": "markdown",
|
22 |
+
"source": [
|
23 |
+
"# Imagen MIDI Images Solo Piano Model Maker (ver. 1.0)\n",
|
24 |
+
"\n",
|
25 |
+
"***\n",
|
26 |
+
"\n",
|
27 |
+
"Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n",
|
28 |
+
"\n",
|
29 |
+
"***\n",
|
30 |
+
"\n",
|
31 |
+
"WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/\n",
|
32 |
+
"\n",
|
33 |
+
"***\n",
|
34 |
+
"\n",
|
35 |
+
"#### Project Los Angeles\n",
|
36 |
+
"\n",
|
37 |
+
"#### Tegridy Code 2024\n",
|
38 |
+
"\n",
|
39 |
+
"***"
|
40 |
+
],
|
41 |
+
"metadata": {
|
42 |
+
"id": "ipXP5fe65oQQ"
|
43 |
+
}
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"cell_type": "markdown",
|
47 |
+
"source": [
|
48 |
+
"# (SETUP ENVIRONMENT)"
|
49 |
+
],
|
50 |
+
"metadata": {
|
51 |
+
"id": "ZLJbLL226y2m"
|
52 |
+
}
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"cell_type": "code",
|
56 |
+
"execution_count": null,
|
57 |
+
"metadata": {
|
58 |
+
"id": "pxNxlyfZ8hCg",
|
59 |
+
"cellView": "form"
|
60 |
+
},
|
61 |
+
"outputs": [],
|
62 |
+
"source": [
|
63 |
+
"# @title Install dependecies\n",
|
64 |
+
"!git clone --depth 1 https://github.com/asigalov61/tegridy-tools\n",
|
65 |
+
"\n",
|
66 |
+
"!pip install -U imagen-pytorch\n",
|
67 |
+
"\n",
|
68 |
+
"!pip install -U huggingface_hub"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"cell_type": "code",
|
73 |
+
"source": [
|
74 |
+
"#@title Import all needed modules\n",
|
75 |
+
"\n",
|
76 |
+
"print('=' * 70)\n",
|
77 |
+
"print('Loading core modules...')\n",
|
78 |
+
"\n",
|
79 |
+
"import os\n",
|
80 |
+
"\n",
|
81 |
+
"import numpy as np\n",
|
82 |
+
"from tqdm import tqdm\n",
|
83 |
+
"\n",
|
84 |
+
"from huggingface_hub import snapshot_download\n",
|
85 |
+
"\n",
|
86 |
+
"print('Done!')\n",
|
87 |
+
"print('=' * 70)\n",
|
88 |
+
"print('Creating I/O dirs...')\n",
|
89 |
+
"\n",
|
90 |
+
"if not os.path.exists('/content/Dataset'):\n",
|
91 |
+
" os.makedirs('/content/Dataset')\n",
|
92 |
+
"\n",
|
93 |
+
"print('Done!')\n",
|
94 |
+
"print('=' * 70)\n",
|
95 |
+
"print('Loading tegridy-tools modules...')\n",
|
96 |
+
"print('=' * 70)\n",
|
97 |
+
"\n",
|
98 |
+
"%cd /content/tegridy-tools/tegridy-tools\n",
|
99 |
+
"\n",
|
100 |
+
"import TMIDIX\n",
|
101 |
+
"import TPLOTS\n",
|
102 |
+
"\n",
|
103 |
+
"%cd /content/\n",
|
104 |
+
"\n",
|
105 |
+
"print('=' * 70)\n",
|
106 |
+
"print('Done!')\n",
|
107 |
+
"print('=' * 70)\n",
|
108 |
+
"print('Loading Imagen...')\n",
|
109 |
+
"\n",
|
110 |
+
"import torch\n",
|
111 |
+
"from imagen_pytorch import Unet, Imagen, ImagenTrainer\n",
|
112 |
+
"from imagen_pytorch.data import Dataset\n",
|
113 |
+
"\n",
|
114 |
+
"print('Done!')\n",
|
115 |
+
"print('=' * 70)\n",
|
116 |
+
"print('Torch version:', torch.__version__)\n",
|
117 |
+
"print('=' * 70)\n",
|
118 |
+
"print('Done!')\n",
|
119 |
+
"print('=' * 70)"
|
120 |
+
],
|
121 |
+
"metadata": {
|
122 |
+
"id": "OblKfMMT8rfM",
|
123 |
+
"cellView": "form"
|
124 |
+
},
|
125 |
+
"execution_count": null,
|
126 |
+
"outputs": []
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"cell_type": "markdown",
|
130 |
+
"source": [
|
131 |
+
"# (DOWNLOAD DATASET)"
|
132 |
+
],
|
133 |
+
"metadata": {
|
134 |
+
"id": "iDdMYg4haGFn"
|
135 |
+
}
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"cell_type": "code",
|
139 |
+
"source": [
|
140 |
+
"# @title Download and unzip MIDI Images POP909 Solo Piano dataset\n",
|
141 |
+
"\n",
|
142 |
+
"print('=' * 70)\n",
|
143 |
+
"print('Downloading MIDI Images dataset repo...')\n",
|
144 |
+
"print('=' * 70)\n",
|
145 |
+
"\n",
|
146 |
+
"repo_id = \"asigalov61/MIDI-Images\"\n",
|
147 |
+
"repo_type = 'dataset'\n",
|
148 |
+
"\n",
|
149 |
+
"local_dir = \"./MIDI-Images\"\n",
|
150 |
+
"\n",
|
151 |
+
"snapshot_download(repo_id, repo_type=repo_type, local_dir=local_dir)\n",
|
152 |
+
"\n",
|
153 |
+
"print('=' * 70)\n",
|
154 |
+
"print('Done!')\n",
|
155 |
+
"print('=' * 70)\n",
|
156 |
+
"\n",
|
157 |
+
"print('Unzipping POP909 MIDI Images dataset...')\n",
|
158 |
+
"print('=' * 70)\n",
|
159 |
+
"%cd /content/Dataset/\n",
|
160 |
+
"!unzip /content/MIDI-Images/POP909_MIDI_Dataset_Solo_Piano_MIDI_Images_128_128_32_BW_Ver_1_CC_BY_NC_SA.zip > /dev/null\n",
|
161 |
+
"%cd /content/\n",
|
162 |
+
"print('=' * 70)\n",
|
163 |
+
"print('Done!')\n",
|
164 |
+
"print('=' * 70)"
|
165 |
+
],
|
166 |
+
"metadata": {
|
167 |
+
"cellView": "form",
|
168 |
+
"id": "ydi1B-KD7oCC"
|
169 |
+
},
|
170 |
+
"execution_count": null,
|
171 |
+
"outputs": []
|
172 |
+
},
|
173 |
+
{
|
174 |
+
"cell_type": "markdown",
|
175 |
+
"source": [
|
176 |
+
"# (INIT MODEL)"
|
177 |
+
],
|
178 |
+
"metadata": {
|
179 |
+
"id": "llSBItTq9LaP"
|
180 |
+
}
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"cell_type": "code",
|
184 |
+
"source": [
|
185 |
+
"# @title Init Imagen model\n",
|
186 |
+
"\n",
|
187 |
+
"print('=' * 70)\n",
|
188 |
+
"print('Instantiating Imagen model...')\n",
|
189 |
+
"print('=' * 70)\n",
|
190 |
+
"\n",
|
191 |
+
"# unets for unconditional imagen\n",
|
192 |
+
"\n",
|
193 |
+
"unet = Unet(\n",
|
194 |
+
" dim = 64,\n",
|
195 |
+
" dim_mults = (1, 2, 4, 8),\n",
|
196 |
+
" num_resnet_blocks = 1,\n",
|
197 |
+
" channels=1,\n",
|
198 |
+
" layer_attns = (False, False, False, True),\n",
|
199 |
+
" layer_cross_attns = False\n",
|
200 |
+
")\n",
|
201 |
+
"\n",
|
202 |
+
"# imagen, which contains the unet above\n",
|
203 |
+
"\n",
|
204 |
+
"imagen = Imagen(\n",
|
205 |
+
" condition_on_text = False, # this must be set to False for unconditional Imagen\n",
|
206 |
+
" unets = unet,\n",
|
207 |
+
" channels=1,\n",
|
208 |
+
" image_sizes = 128,\n",
|
209 |
+
" timesteps = 1000\n",
|
210 |
+
")\n",
|
211 |
+
"\n",
|
212 |
+
"trainer = ImagenTrainer(\n",
|
213 |
+
" imagen = imagen,\n",
|
214 |
+
" split_valid_from_train = True # whether to split the validation dataset from the training\n",
|
215 |
+
").cuda()\n",
|
216 |
+
"\n",
|
217 |
+
"print('=' * 70)\n",
|
218 |
+
"print('Done!')\n",
|
219 |
+
"print('=' * 70)"
|
220 |
+
],
|
221 |
+
"metadata": {
|
222 |
+
"id": "m_1uKB_I5ctR",
|
223 |
+
"cellView": "form"
|
224 |
+
},
|
225 |
+
"execution_count": null,
|
226 |
+
"outputs": []
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"cell_type": "markdown",
|
230 |
+
"source": [
|
231 |
+
"# (INIT DATASET)"
|
232 |
+
],
|
233 |
+
"metadata": {
|
234 |
+
"id": "vAJkOmUD-Bzb"
|
235 |
+
}
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"cell_type": "code",
|
239 |
+
"source": [
|
240 |
+
"# @title Prep and init dataset\n",
|
241 |
+
"batch_size = 16 # @param {\"type\":\"slider\",\"min\":4,\"max\":64,\"step\":4}\n",
|
242 |
+
"\n",
|
243 |
+
"print('=' * 70)\n",
|
244 |
+
"print('Instantiating dataloader...')\n",
|
245 |
+
"print('=' * 70)\n",
|
246 |
+
"\n",
|
247 |
+
"# instantiate your dataloader, which returns the necessary inputs to the DDPM as tuple in the order of images, text embeddings, then text masks. in this case, only images is returned as it is unconditional training\n",
|
248 |
+
"\n",
|
249 |
+
"dataset = Dataset('/content/Dataset', image_size = 128)\n",
|
250 |
+
"\n",
|
251 |
+
"try:\n",
|
252 |
+
" trainer.add_train_dataset(dataset, batch_size = batch_size)\n",
|
253 |
+
"\n",
|
254 |
+
"except:\n",
|
255 |
+
" print('Dataset is ready!')\n",
|
256 |
+
" pass\n",
|
257 |
+
"\n",
|
258 |
+
"print('=' * 70)\n",
|
259 |
+
"print('Done!')\n",
|
260 |
+
"print('=' * 70)"
|
261 |
+
],
|
262 |
+
"metadata": {
|
263 |
+
"cellView": "form",
|
264 |
+
"id": "nC4H4bKE-GuQ"
|
265 |
+
},
|
266 |
+
"execution_count": null,
|
267 |
+
"outputs": []
|
268 |
+
},
|
269 |
+
{
|
270 |
+
"cell_type": "markdown",
|
271 |
+
"source": [
|
272 |
+
"# (TRAIN MODEL)"
|
273 |
+
],
|
274 |
+
"metadata": {
|
275 |
+
"id": "9-R2bCz1_M4Z"
|
276 |
+
}
|
277 |
+
},
|
278 |
+
{
|
279 |
+
"cell_type": "code",
|
280 |
+
"source": [
|
281 |
+
"# @title Train Imagen model\n",
|
282 |
+
"\n",
|
283 |
+
"NUM_EPOCHS = 10\n",
|
284 |
+
"\n",
|
285 |
+
"print('=' * 70)\n",
|
286 |
+
"print('Training...')\n",
|
287 |
+
"print('=' * 70)\n",
|
288 |
+
"\n",
|
289 |
+
"NUM_STEPS = NUM_EPOCHS * len(dataset)\n",
|
290 |
+
"\n",
|
291 |
+
"# working training loop\n",
|
292 |
+
"\n",
|
293 |
+
"epoch = 1\n",
|
294 |
+
"\n",
|
295 |
+
"print('=' * 70)\n",
|
296 |
+
"print('Epoch #', epoch)\n",
|
297 |
+
"print('=' * 70)\n",
|
298 |
+
"\n",
|
299 |
+
"for i in range(NUM_STEPS):\n",
|
300 |
+
"\n",
|
301 |
+
" try:\n",
|
302 |
+
"\n",
|
303 |
+
" loss = trainer.train_step(unet_number = 1, max_batch_size = batch_size)\n",
|
304 |
+
" print(f'loss: {loss}', '===', i)\n",
|
305 |
+
"\n",
|
306 |
+
" if not (i % 50):\n",
|
307 |
+
" valid_loss = trainer.valid_step(unet_number = 1, max_batch_size = batch_size)\n",
|
308 |
+
" print('=' * 70)\n",
|
309 |
+
" print(f'valid loss: {valid_loss}')\n",
|
310 |
+
" print('=' * 70)\n",
|
311 |
+
"\n",
|
312 |
+
" if not (i % 1000) and trainer.is_main: # is_main makes sure this can run in distributed\n",
|
313 |
+
" print('=' * 70)\n",
|
314 |
+
" images = trainer.sample(batch_size = batch_size // 4, return_pil_images = True) # returns List[Image]\n",
|
315 |
+
" images[0].save(f'./sample-{i // 100}.png')\n",
|
316 |
+
" print('=' * 70)\n",
|
317 |
+
"\n",
|
318 |
+
" if not (i % len(dataset)):\n",
|
319 |
+
" print('=' * 70)\n",
|
320 |
+
" print('Epoch #', epoch)\n",
|
321 |
+
" print('=' * 70)\n",
|
322 |
+
"\n",
|
323 |
+
" except KeyboardInterrupt:\n",
|
324 |
+
" print('=' * 70)\n",
|
325 |
+
" print('Stopping training...')\n",
|
326 |
+
" break\n",
|
327 |
+
"\n",
|
328 |
+
"print('=' * 70)\n",
|
329 |
+
"print('Done!')\n",
|
330 |
+
"print('=' * 70)"
|
331 |
+
],
|
332 |
+
"metadata": {
|
333 |
+
"cellView": "form",
|
334 |
+
"id": "PPH5xRwl9kB7"
|
335 |
+
},
|
336 |
+
"execution_count": null,
|
337 |
+
"outputs": []
|
338 |
+
},
|
339 |
+
{
|
340 |
+
"cell_type": "markdown",
|
341 |
+
"source": [
|
342 |
+
"# (SAVE/LOAD MODEL)"
|
343 |
+
],
|
344 |
+
"metadata": {
|
345 |
+
"id": "mA-Of_2-BF7n"
|
346 |
+
}
|
347 |
+
},
|
348 |
+
{
|
349 |
+
"cell_type": "code",
|
350 |
+
"source": [
|
351 |
+
"# @title Save trained model\n",
|
352 |
+
"\n",
|
353 |
+
"print('=' * 70)\n",
|
354 |
+
"print('Saving model...')\n",
|
355 |
+
"print('=' * 70)\n",
|
356 |
+
"\n",
|
357 |
+
"trainer.save('./Imagen_POP909_64_dim_'+str(i)+'_steps_'+str(loss)+'_loss.ckpt')\n",
|
358 |
+
"\n",
|
359 |
+
"print('=' * 70)\n",
|
360 |
+
"print('Done!')\n",
|
361 |
+
"print('=' * 70)"
|
362 |
+
],
|
363 |
+
"metadata": {
|
364 |
+
"id": "3GmoHTGW2_h0",
|
365 |
+
"cellView": "form"
|
366 |
+
},
|
367 |
+
"execution_count": null,
|
368 |
+
"outputs": []
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"cell_type": "code",
|
372 |
+
"source": [
|
373 |
+
"# @title Load/reload trained model\n",
|
374 |
+
"full_path_to_model_checkpoint = \"./Imagen_POP909_64_dim_10000_steps_0.01_loss.ckpt\" # @param {\"type\":\"string\"}\n",
|
375 |
+
"\n",
|
376 |
+
"print('=' * 70)\n",
|
377 |
+
"print('Loading model...')\n",
|
378 |
+
"print('=' * 70)\n",
|
379 |
+
"\n",
|
380 |
+
"unet = Unet(\n",
|
381 |
+
" dim = 64,\n",
|
382 |
+
" dim_mults = (1, 2, 4, 8),\n",
|
383 |
+
" num_resnet_blocks = 1,\n",
|
384 |
+
" channels=1,\n",
|
385 |
+
" layer_attns = (False, False, False, True),\n",
|
386 |
+
" layer_cross_attns = False\n",
|
387 |
+
")\n",
|
388 |
+
"\n",
|
389 |
+
"imagen = Imagen(\n",
|
390 |
+
" condition_on_text = False, # this must be set to False for unconditional Imagen\n",
|
391 |
+
" unets = unet,\n",
|
392 |
+
" channels=1,\n",
|
393 |
+
" image_sizes = 128,\n",
|
394 |
+
" timesteps = 1000\n",
|
395 |
+
")\n",
|
396 |
+
"\n",
|
397 |
+
"trainer = ImagenTrainer(\n",
|
398 |
+
" imagen = imagen,\n",
|
399 |
+
" split_valid_from_train = True # whether to split the validation dataset from the training\n",
|
400 |
+
").cuda()\n",
|
401 |
+
"\n",
|
402 |
+
"trainer.load(full_path_to_model_checkpoint)\n",
|
403 |
+
"\n",
|
404 |
+
"print('=' * 70)\n",
|
405 |
+
"print('Done!')\n",
|
406 |
+
"print('=' * 70)"
|
407 |
+
],
|
408 |
+
"metadata": {
|
409 |
+
"id": "lBK05tTeBCIz",
|
410 |
+
"cellView": "form"
|
411 |
+
},
|
412 |
+
"execution_count": null,
|
413 |
+
"outputs": []
|
414 |
+
},
|
415 |
+
{
|
416 |
+
"cell_type": "markdown",
|
417 |
+
"source": [
|
418 |
+
"# (GENERATE)"
|
419 |
+
],
|
420 |
+
"metadata": {
|
421 |
+
"id": "uBB8c3sGFQ78"
|
422 |
+
}
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"cell_type": "code",
|
426 |
+
"source": [
|
427 |
+
"# @title Generate music\n",
|
428 |
+
"\n",
|
429 |
+
"number_of_compositions_to_generate = 8 # @param {\"type\":\"slider\",\"min\":1,\"max\":64,\"step\":1}\n",
|
430 |
+
"noise_threshold = 128 # @param {\"type\":\"slider\",\"min\":0,\"max\":255,\"step\":1}\n",
|
431 |
+
"\n",
|
432 |
+
"print('=' * 70)\n",
|
433 |
+
"print('Imagen Model Generator')\n",
|
434 |
+
"print('=' * 70)\n",
|
435 |
+
"print('Generating', number_of_compositions_to_generate, 'compositions...')\n",
|
436 |
+
"print('=' * 70)\n",
|
437 |
+
"\n",
|
438 |
+
"images = trainer.sample(batch_size = number_of_compositions_to_generate, return_pil_images = True)\n",
|
439 |
+
"\n",
|
440 |
+
"print('Done!')\n",
|
441 |
+
"print('=' * 70)\n",
|
442 |
+
"print('Converting to MIDIs...')\n",
|
443 |
+
"\n",
|
444 |
+
"imgs_array = []\n",
|
445 |
+
"\n",
|
446 |
+
"for idx, image in enumerate(images):\n",
|
447 |
+
"\n",
|
448 |
+
" print('=' * 70)\n",
|
449 |
+
" print('Converting image #', idx)\n",
|
450 |
+
" print('=' * 70)\n",
|
451 |
+
"\n",
|
452 |
+
" arr = np.array(image)\n",
|
453 |
+
" farr = np.where(arr < noise_threshold, 0, 1)\n",
|
454 |
+
"\n",
|
455 |
+
" bmatrix = TPLOTS.images_to_binary_matrix([farr])\n",
|
456 |
+
"\n",
|
457 |
+
" score = TMIDIX.binary_matrix_to_original_escore_notes(bmatrix)\n",
|
458 |
+
"\n",
|
459 |
+
" output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(score)\n",
|
460 |
+
"\n",
|
461 |
+
" detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score,\n",
|
462 |
+
" output_signature = 'MIDI Images',\n",
|
463 |
+
" output_file_name = '/content/MIDI-Images-Composition_'+str(idx),\n",
|
464 |
+
" track_name='Project Los Angeles',\n",
|
465 |
+
" list_of_MIDI_patches=patches,\n",
|
466 |
+
" timings_multiplier=256\n",
|
467 |
+
" )\n",
|
468 |
+
"\n",
|
469 |
+
"print('=' * 70)\n",
|
470 |
+
"print('Done!')\n",
|
471 |
+
"print('=' * 70)"
|
472 |
+
],
|
473 |
+
"metadata": {
|
474 |
+
"id": "c-WFUfLvE_lm",
|
475 |
+
"cellView": "form"
|
476 |
+
},
|
477 |
+
"execution_count": null,
|
478 |
+
"outputs": []
|
479 |
+
},
|
480 |
+
{
|
481 |
+
"cell_type": "markdown",
|
482 |
+
"source": [
|
483 |
+
"# Congrats! You did it! :)"
|
484 |
+
],
|
485 |
+
"metadata": {
|
486 |
+
"id": "F9_DkzNzHWZq"
|
487 |
+
}
|
488 |
+
}
|
489 |
+
]
|
490 |
+
}
|
imagen_midi_images_solo_piano_model_maker.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Imagen_MIDI_Images_Solo_Piano_Model_Maker.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/189FJfPRxZ8zrwi44fAR_ywKnMeb73XJJ
|
8 |
+
|
9 |
+
# Imagen MIDI Images Solo Piano Model Maker (ver. 1.0)
|
10 |
+
|
11 |
+
***
|
12 |
+
|
13 |
+
Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools
|
14 |
+
|
15 |
+
***
|
16 |
+
|
17 |
+
WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/
|
18 |
+
|
19 |
+
***
|
20 |
+
|
21 |
+
#### Project Los Angeles
|
22 |
+
|
23 |
+
#### Tegridy Code 2024
|
24 |
+
|
25 |
+
***
|
26 |
+
|
27 |
+
# (SETUP ENVIRONMENT)
|
28 |
+
"""
|
29 |
+
|
30 |
+
# @title Install dependecies
|
31 |
+
!git clone --depth 1 https://github.com/asigalov61/tegridy-tools
|
32 |
+
|
33 |
+
!pip install -U imagen-pytorch
|
34 |
+
|
35 |
+
!pip install -U huggingface_hub
|
36 |
+
|
37 |
+
# Commented out IPython magic to ensure Python compatibility.
|
38 |
+
#@title Import all needed modules
|
39 |
+
|
40 |
+
print('=' * 70)
|
41 |
+
print('Loading core modules...')
|
42 |
+
|
43 |
+
import os
|
44 |
+
|
45 |
+
import numpy as np
|
46 |
+
from tqdm import tqdm
|
47 |
+
|
48 |
+
from huggingface_hub import snapshot_download
|
49 |
+
|
50 |
+
print('Done!')
|
51 |
+
print('=' * 70)
|
52 |
+
print('Creating I/O dirs...')
|
53 |
+
|
54 |
+
if not os.path.exists('/content/Dataset'):
|
55 |
+
os.makedirs('/content/Dataset')
|
56 |
+
|
57 |
+
print('Done!')
|
58 |
+
print('=' * 70)
|
59 |
+
print('Loading tegridy-tools modules...')
|
60 |
+
print('=' * 70)
|
61 |
+
|
62 |
+
# %cd /content/tegridy-tools/tegridy-tools
|
63 |
+
|
64 |
+
import TMIDIX
|
65 |
+
import TPLOTS
|
66 |
+
|
67 |
+
# %cd /content/
|
68 |
+
|
69 |
+
print('=' * 70)
|
70 |
+
print('Done!')
|
71 |
+
print('=' * 70)
|
72 |
+
print('Loading Imagen...')
|
73 |
+
|
74 |
+
import torch
|
75 |
+
from imagen_pytorch import Unet, Imagen, ImagenTrainer
|
76 |
+
from imagen_pytorch.data import Dataset
|
77 |
+
|
78 |
+
print('Done!')
|
79 |
+
print('=' * 70)
|
80 |
+
print('Torch version:', torch.__version__)
|
81 |
+
print('=' * 70)
|
82 |
+
print('Done!')
|
83 |
+
print('=' * 70)
|
84 |
+
|
85 |
+
"""# (DOWNLOAD DATASET)"""
|
86 |
+
|
87 |
+
# Commented out IPython magic to ensure Python compatibility.
|
88 |
+
# @title Download and unzip MIDI Images POP909 Solo Piano dataset
|
89 |
+
|
90 |
+
print('=' * 70)
|
91 |
+
print('Downloading MIDI Images dataset repo...')
|
92 |
+
print('=' * 70)
|
93 |
+
|
94 |
+
repo_id = "asigalov61/MIDI-Images"
|
95 |
+
repo_type = 'dataset'
|
96 |
+
|
97 |
+
local_dir = "./MIDI-Images"
|
98 |
+
|
99 |
+
snapshot_download(repo_id, repo_type=repo_type, local_dir=local_dir)
|
100 |
+
|
101 |
+
print('=' * 70)
|
102 |
+
print('Done!')
|
103 |
+
print('=' * 70)
|
104 |
+
|
105 |
+
print('Unzipping POP909 MIDI Images dataset...')
|
106 |
+
print('=' * 70)
|
107 |
+
# %cd /content/Dataset/
|
108 |
+
!unzip /content/MIDI-Images/POP909_MIDI_Dataset_Solo_Piano_MIDI_Images_128_128_32_BW_Ver_1_CC_BY_NC_SA.zip > /dev/null
|
109 |
+
# %cd /content/
|
110 |
+
print('=' * 70)
|
111 |
+
print('Done!')
|
112 |
+
print('=' * 70)
|
113 |
+
|
114 |
+
"""# (INIT MODEL)"""
|
115 |
+
|
116 |
+
# @title Init Imagen model
|
117 |
+
|
118 |
+
print('=' * 70)
|
119 |
+
print('Instantiating Imagen model...')
|
120 |
+
print('=' * 70)
|
121 |
+
|
122 |
+
# unets for unconditional imagen
|
123 |
+
|
124 |
+
unet = Unet(
|
125 |
+
dim = 64,
|
126 |
+
dim_mults = (1, 2, 4, 8),
|
127 |
+
num_resnet_blocks = 1,
|
128 |
+
channels=1,
|
129 |
+
layer_attns = (False, False, False, True),
|
130 |
+
layer_cross_attns = False
|
131 |
+
)
|
132 |
+
|
133 |
+
# imagen, which contains the unet above
|
134 |
+
|
135 |
+
imagen = Imagen(
|
136 |
+
condition_on_text = False, # this must be set to False for unconditional Imagen
|
137 |
+
unets = unet,
|
138 |
+
channels=1,
|
139 |
+
image_sizes = 128,
|
140 |
+
timesteps = 1000
|
141 |
+
)
|
142 |
+
|
143 |
+
trainer = ImagenTrainer(
|
144 |
+
imagen = imagen,
|
145 |
+
split_valid_from_train = True # whether to split the validation dataset from the training
|
146 |
+
).cuda()
|
147 |
+
|
148 |
+
print('=' * 70)
|
149 |
+
print('Done!')
|
150 |
+
print('=' * 70)
|
151 |
+
|
152 |
+
"""# (INIT DATASET)"""
|
153 |
+
|
154 |
+
# @title Prep and init dataset
|
155 |
+
batch_size = 16 # @param {"type":"slider","min":4,"max":64,"step":4}
|
156 |
+
|
157 |
+
print('=' * 70)
|
158 |
+
print('Instantiating dataloader...')
|
159 |
+
print('=' * 70)
|
160 |
+
|
161 |
+
# instantiate your dataloader, which returns the necessary inputs to the DDPM as tuple in the order of images, text embeddings, then text masks. in this case, only images is returned as it is unconditional training
|
162 |
+
|
163 |
+
dataset = Dataset('/content/Dataset', image_size = 128)
|
164 |
+
|
165 |
+
try:
|
166 |
+
trainer.add_train_dataset(dataset, batch_size = batch_size)
|
167 |
+
|
168 |
+
except:
|
169 |
+
print('Dataset is ready!')
|
170 |
+
pass
|
171 |
+
|
172 |
+
print('=' * 70)
|
173 |
+
print('Done!')
|
174 |
+
print('=' * 70)
|
175 |
+
|
176 |
+
"""# (TRAIN MODEL)"""
|
177 |
+
|
178 |
+
# @title Train Imagen model
|
179 |
+
|
180 |
+
NUM_EPOCHS = 10
|
181 |
+
|
182 |
+
print('=' * 70)
|
183 |
+
print('Training...')
|
184 |
+
print('=' * 70)
|
185 |
+
|
186 |
+
NUM_STEPS = NUM_EPOCHS * len(dataset)
|
187 |
+
|
188 |
+
# working training loop
|
189 |
+
|
190 |
+
epoch = 1
|
191 |
+
|
192 |
+
print('=' * 70)
|
193 |
+
print('Epoch #', epoch)
|
194 |
+
print('=' * 70)
|
195 |
+
|
196 |
+
for i in range(NUM_STEPS):
|
197 |
+
|
198 |
+
try:
|
199 |
+
|
200 |
+
loss = trainer.train_step(unet_number = 1, max_batch_size = batch_size)
|
201 |
+
print(f'loss: {loss}', '===', i)
|
202 |
+
|
203 |
+
if not (i % 50):
|
204 |
+
valid_loss = trainer.valid_step(unet_number = 1, max_batch_size = batch_size)
|
205 |
+
print('=' * 70)
|
206 |
+
print(f'valid loss: {valid_loss}')
|
207 |
+
print('=' * 70)
|
208 |
+
|
209 |
+
if not (i % 1000) and trainer.is_main: # is_main makes sure this can run in distributed
|
210 |
+
print('=' * 70)
|
211 |
+
images = trainer.sample(batch_size = batch_size // 4, return_pil_images = True) # returns List[Image]
|
212 |
+
images[0].save(f'./sample-{i // 100}.png')
|
213 |
+
print('=' * 70)
|
214 |
+
|
215 |
+
if not (i % len(dataset)):
|
216 |
+
print('=' * 70)
|
217 |
+
print('Epoch #', epoch)
|
218 |
+
print('=' * 70)
|
219 |
+
|
220 |
+
except KeyboardInterrupt:
|
221 |
+
print('=' * 70)
|
222 |
+
print('Stopping training...')
|
223 |
+
break
|
224 |
+
|
225 |
+
print('=' * 70)
|
226 |
+
print('Done!')
|
227 |
+
print('=' * 70)
|
228 |
+
|
229 |
+
"""# (SAVE/LOAD MODEL)"""
|
230 |
+
|
231 |
+
# @title Save trained model
|
232 |
+
|
233 |
+
print('=' * 70)
|
234 |
+
print('Saving model...')
|
235 |
+
print('=' * 70)
|
236 |
+
|
237 |
+
trainer.save('./Imagen_POP909_64_dim_'+str(i)+'_steps_'+str(loss)+'_loss.ckpt')
|
238 |
+
|
239 |
+
print('=' * 70)
|
240 |
+
print('Done!')
|
241 |
+
print('=' * 70)
|
242 |
+
|
243 |
+
# @title Load/reload trained model
|
244 |
+
full_path_to_model_checkpoint = "./Imagen_POP909_64_dim_10000_steps_0.01_loss.ckpt" # @param {"type":"string"}
|
245 |
+
|
246 |
+
print('=' * 70)
|
247 |
+
print('Loading model...')
|
248 |
+
print('=' * 70)
|
249 |
+
|
250 |
+
unet = Unet(
|
251 |
+
dim = 64,
|
252 |
+
dim_mults = (1, 2, 4, 8),
|
253 |
+
num_resnet_blocks = 1,
|
254 |
+
channels=1,
|
255 |
+
layer_attns = (False, False, False, True),
|
256 |
+
layer_cross_attns = False
|
257 |
+
)
|
258 |
+
|
259 |
+
imagen = Imagen(
|
260 |
+
condition_on_text = False, # this must be set to False for unconditional Imagen
|
261 |
+
unets = unet,
|
262 |
+
channels=1,
|
263 |
+
image_sizes = 128,
|
264 |
+
timesteps = 1000
|
265 |
+
)
|
266 |
+
|
267 |
+
trainer = ImagenTrainer(
|
268 |
+
imagen = imagen,
|
269 |
+
split_valid_from_train = True # whether to split the validation dataset from the training
|
270 |
+
).cuda()
|
271 |
+
|
272 |
+
trainer.load(full_path_to_model_checkpoint)
|
273 |
+
|
274 |
+
print('=' * 70)
|
275 |
+
print('Done!')
|
276 |
+
print('=' * 70)
|
277 |
+
|
278 |
+
"""# (GENERATE)"""
|
279 |
+
|
280 |
+
# @title Generate music
|
281 |
+
|
282 |
+
number_of_compositions_to_generate = 8 # @param {"type":"slider","min":1,"max":64,"step":1}
|
283 |
+
noise_threshold = 128 # @param {"type":"slider","min":0,"max":255,"step":1}
|
284 |
+
|
285 |
+
print('=' * 70)
|
286 |
+
print('Imagen Model Generator')
|
287 |
+
print('=' * 70)
|
288 |
+
print('Generating', number_of_compositions_to_generate, 'compositions...')
|
289 |
+
print('=' * 70)
|
290 |
+
|
291 |
+
images = trainer.sample(batch_size = number_of_compositions_to_generate, return_pil_images = True)
|
292 |
+
|
293 |
+
print('Done!')
|
294 |
+
print('=' * 70)
|
295 |
+
print('Converting to MIDIs...')
|
296 |
+
|
297 |
+
imgs_array = []
|
298 |
+
|
299 |
+
for idx, image in enumerate(images):
|
300 |
+
|
301 |
+
print('=' * 70)
|
302 |
+
print('Converting image #', idx)
|
303 |
+
print('=' * 70)
|
304 |
+
|
305 |
+
arr = np.array(image)
|
306 |
+
farr = np.where(arr < noise_threshold, 0, 1)
|
307 |
+
|
308 |
+
bmatrix = TPLOTS.images_to_binary_matrix([farr])
|
309 |
+
|
310 |
+
score = TMIDIX.binary_matrix_to_original_escore_notes(bmatrix)
|
311 |
+
|
312 |
+
output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(score)
|
313 |
+
|
314 |
+
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score,
|
315 |
+
output_signature = 'MIDI Images',
|
316 |
+
output_file_name = '/content/MIDI-Images-Composition_'+str(idx),
|
317 |
+
track_name='Project Los Angeles',
|
318 |
+
list_of_MIDI_patches=patches,
|
319 |
+
timings_multiplier=256
|
320 |
+
)
|
321 |
+
|
322 |
+
print('=' * 70)
|
323 |
+
print('Done!')
|
324 |
+
print('=' * 70)
|
325 |
+
|
326 |
+
"""# Congrats! You did it! :)"""
|