recoilme commited on
Commit
816cc93
·
1 Parent(s): 04aaac7
micro/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31acced0f5ecc3a484fd3c358dabaf3e5a7fce777b0472846ef774f4a734a839
3
+ size 1893
micro/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24bc2a898a6829689cc3fcd3d2c1d4e4a757aa157243d127316ca0d8e3267372
3
+ size 2078471360
samples/micro_192x384_0.jpg ADDED

Git LFS Details

  • SHA256: dabd8a7de9a454d52811de321df17007fc6c382e612c294c6a33b6d2d35ee9d3
  • Pointer size: 130 Bytes
  • Size of remote file: 57.5 kB
samples/micro_256x384_0.jpg ADDED

Git LFS Details

  • SHA256: 0b97c920e541d7cf0ed1c1c9bb8f1af4a38107d3e4350f816d6f105577d97a41
  • Pointer size: 130 Bytes
  • Size of remote file: 70.8 kB
samples/micro_320x384_0.jpg ADDED

Git LFS Details

  • SHA256: 63a90f62498135ffa026d819aba2d6e3b1114432d5c10dc8f4975659e2b6e6b5
  • Pointer size: 131 Bytes
  • Size of remote file: 143 kB
samples/micro_384x192_0.jpg ADDED

Git LFS Details

  • SHA256: d56bbce9b88414084ffe305de0cd8995cb1aabeb2e54958bc34cac9ccffd300c
  • Pointer size: 130 Bytes
  • Size of remote file: 83.4 kB
samples/micro_384x256_0.jpg ADDED

Git LFS Details

  • SHA256: 9609ae72ac21f4bd11cc7091989abb53d8e3cec57560f0f9ece28221e37d7305
  • Pointer size: 130 Bytes
  • Size of remote file: 30.8 kB
samples/micro_384x320_0.jpg ADDED

Git LFS Details

  • SHA256: e5cfbc0159cc1e8d42abe920cf65083e4ae2edcb52c00e20909e8f2a83b9b557
  • Pointer size: 131 Bytes
  • Size of remote file: 115 kB
samples/micro_384x384_0.jpg ADDED

Git LFS Details

  • SHA256: d864e9749adb3e6b0448177e7ef43fc3e009494d2c3078d423900129a9d831e8
  • Pointer size: 130 Bytes
  • Size of remote file: 41.3 kB
sdxl/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc6f3f339d56f6e5fc44b2b62cd0ceb46616137961a105f2579298631328f21
3
+ size 1768
sdxl/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f14e079e3164d58155294204b475f1782d48e4364a1487000a28b0a16e6a281f
3
+ size 3944400016
src/micro_create.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeb3c833ffba08a44cf7dce894a67310ad1b7570d7401259e71bc6c770785e75
3
+ size 33978
src/sdxl_create.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11a356e99139cd42df2110a02820b7d2c8cdd1fbe86011be16b27fa862efb4a6
3
+ size 27389
src/sdxs_create_unet-2b.ipynb CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11f42016cce7ee2d56f5bcff4649008491222a23a70d144dbb87babccc6beaa7
3
  size 146107
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d2d6a5077aad9a1e416c69627411951a897959b0bfb026caba89634be0ec158
3
  size 146107
train.py CHANGED
@@ -26,13 +26,13 @@ import torch.nn.functional as F
26
 
27
  # --------------------------- Параметры ---------------------------
28
  ds_path = "datasets/384"
29
- project = "2b"
30
- batch_size = 64
31
- base_learning_rate = 8e-5
32
- min_learning_rate = 4e-5
33
- num_epochs = 25
34
  # samples/save per epoch
35
- sample_interval_share = 5
36
  use_wandb = True
37
  save_model = True
38
  use_decay = True
@@ -49,13 +49,14 @@ torch.backends.cudnn.allow_tf32 = True
49
  torch.backends.cuda.enable_mem_efficient_sdp(False)
50
  dtype = torch.float32
51
  save_barrier = 1.03
 
52
  dispersive_temperature=0.5
53
  dispersive_weight= 0.05
54
  percentile_clipping = 95 # 8bit optim
55
  betta2 = 0.97
56
  eps = 1e-5
57
- clip_grad_norm = .75
58
- steps_offset = 1 # Scheduler
59
  limit = 0
60
  checkpoints_folder = ""
61
  mixed_precision = "bf16" #"fp16"
@@ -510,7 +511,7 @@ else:
510
 
511
  def lr_schedule(step):
512
  x = step / (total_training_steps * world_size)
513
- warmup = 0.05
514
 
515
  if not use_decay:
516
  return base_learning_rate
 
26
 
27
  # --------------------------- Параметры ---------------------------
28
  ds_path = "datasets/384"
29
+ project = "micro"
30
+ batch_size = 5
31
+ base_learning_rate = 1e-4
32
+ min_learning_rate = 5e-5
33
+ num_epochs = 5
34
  # samples/save per epoch
35
+ sample_interval_share = 40
36
  use_wandb = True
37
  save_model = True
38
  use_decay = True
 
49
  torch.backends.cuda.enable_mem_efficient_sdp(False)
50
  dtype = torch.float32
51
  save_barrier = 1.03
52
+ warmup_percent = 0.01
53
  dispersive_temperature=0.5
54
  dispersive_weight= 0.05
55
  percentile_clipping = 95 # 8bit optim
56
  betta2 = 0.97
57
  eps = 1e-5
58
+ clip_grad_norm = 1.0
59
+ steps_offset = 0 # Scheduler
60
  limit = 0
61
  checkpoints_folder = ""
62
  mixed_precision = "bf16" #"fp16"
 
511
 
512
  def lr_schedule(step):
513
  x = step / (total_training_steps * world_size)
514
+ warmup = warmup_percent
515
 
516
  if not use_decay:
517
  return base_learning_rate