micro
Browse files- micro/config.json +3 -0
- micro/diffusion_pytorch_model.safetensors +3 -0
- samples/micro_192x384_0.jpg +3 -0
- samples/micro_256x384_0.jpg +3 -0
- samples/micro_320x384_0.jpg +3 -0
- samples/micro_384x192_0.jpg +3 -0
- samples/micro_384x256_0.jpg +3 -0
- samples/micro_384x320_0.jpg +3 -0
- samples/micro_384x384_0.jpg +3 -0
- sdxl/config.json +3 -0
- sdxl/diffusion_pytorch_model.safetensors +3 -0
- src/micro_create.ipynb +3 -0
- src/sdxl_create.ipynb +3 -0
- src/sdxs_create_unet-2b.ipynb +1 -1
- train.py +10 -9
micro/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31acced0f5ecc3a484fd3c358dabaf3e5a7fce777b0472846ef774f4a734a839
|
3 |
+
size 1893
|
micro/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24bc2a898a6829689cc3fcd3d2c1d4e4a757aa157243d127316ca0d8e3267372
|
3 |
+
size 2078471360
|
samples/micro_192x384_0.jpg
ADDED
![]() |
Git LFS Details
|
samples/micro_256x384_0.jpg
ADDED
![]() |
Git LFS Details
|
samples/micro_320x384_0.jpg
ADDED
![]() |
Git LFS Details
|
samples/micro_384x192_0.jpg
ADDED
![]() |
Git LFS Details
|
samples/micro_384x256_0.jpg
ADDED
![]() |
Git LFS Details
|
samples/micro_384x320_0.jpg
ADDED
![]() |
Git LFS Details
|
samples/micro_384x384_0.jpg
ADDED
![]() |
Git LFS Details
|
sdxl/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fc6f3f339d56f6e5fc44b2b62cd0ceb46616137961a105f2579298631328f21
|
3 |
+
size 1768
|
sdxl/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f14e079e3164d58155294204b475f1782d48e4364a1487000a28b0a16e6a281f
|
3 |
+
size 3944400016
|
src/micro_create.ipynb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eeb3c833ffba08a44cf7dce894a67310ad1b7570d7401259e71bc6c770785e75
|
3 |
+
size 33978
|
src/sdxl_create.ipynb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11a356e99139cd42df2110a02820b7d2c8cdd1fbe86011be16b27fa862efb4a6
|
3 |
+
size 27389
|
src/sdxs_create_unet-2b.ipynb
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 146107
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d2d6a5077aad9a1e416c69627411951a897959b0bfb026caba89634be0ec158
|
3 |
size 146107
|
train.py
CHANGED
@@ -26,13 +26,13 @@ import torch.nn.functional as F
|
|
26 |
|
27 |
# --------------------------- Параметры ---------------------------
|
28 |
ds_path = "datasets/384"
|
29 |
-
project = "
|
30 |
-
batch_size =
|
31 |
-
base_learning_rate =
|
32 |
-
min_learning_rate =
|
33 |
-
num_epochs =
|
34 |
# samples/save per epoch
|
35 |
-
sample_interval_share =
|
36 |
use_wandb = True
|
37 |
save_model = True
|
38 |
use_decay = True
|
@@ -49,13 +49,14 @@ torch.backends.cudnn.allow_tf32 = True
|
|
49 |
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
50 |
dtype = torch.float32
|
51 |
save_barrier = 1.03
|
|
|
52 |
dispersive_temperature=0.5
|
53 |
dispersive_weight= 0.05
|
54 |
percentile_clipping = 95 # 8bit optim
|
55 |
betta2 = 0.97
|
56 |
eps = 1e-5
|
57 |
-
clip_grad_norm = .
|
58 |
-
steps_offset =
|
59 |
limit = 0
|
60 |
checkpoints_folder = ""
|
61 |
mixed_precision = "bf16" #"fp16"
|
@@ -510,7 +511,7 @@ else:
|
|
510 |
|
511 |
def lr_schedule(step):
|
512 |
x = step / (total_training_steps * world_size)
|
513 |
-
warmup =
|
514 |
|
515 |
if not use_decay:
|
516 |
return base_learning_rate
|
|
|
26 |
|
27 |
# --------------------------- Параметры ---------------------------
|
28 |
ds_path = "datasets/384"
|
29 |
+
project = "micro"
|
30 |
+
batch_size = 5
|
31 |
+
base_learning_rate = 1e-4
|
32 |
+
min_learning_rate = 5e-5
|
33 |
+
num_epochs = 5
|
34 |
# samples/save per epoch
|
35 |
+
sample_interval_share = 40
|
36 |
use_wandb = True
|
37 |
save_model = True
|
38 |
use_decay = True
|
|
|
49 |
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
50 |
dtype = torch.float32
|
51 |
save_barrier = 1.03
|
52 |
+
warmup_percent = 0.01
|
53 |
dispersive_temperature=0.5
|
54 |
dispersive_weight= 0.05
|
55 |
percentile_clipping = 95 # 8bit optim
|
56 |
betta2 = 0.97
|
57 |
eps = 1e-5
|
58 |
+
clip_grad_norm = 1.0
|
59 |
+
steps_offset = 0 # Scheduler
|
60 |
limit = 0
|
61 |
checkpoints_folder = ""
|
62 |
mixed_precision = "bf16" #"fp16"
|
|
|
511 |
|
512 |
def lr_schedule(step):
|
513 |
x = step / (total_training_steps * world_size)
|
514 |
+
warmup = warmup_percent
|
515 |
|
516 |
if not use_decay:
|
517 |
return base_learning_rate
|