malcolmrey commited on
Commit
3b8d50d
·
1 Parent(s): 1cbd3bb

training scripts

Browse files
training-scripts/aitoolkit/fk9_template.yaml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ job: "extension"
3
+ config:
4
+ name: "fk9_[dataset_name]_v1"
5
+ process:
6
+ - type: "diffusion_trainer"
7
+ training_folder: "c:\\Development\\ai-toolkit\\output"
8
+ sqlite_db_path: "./aitk_db.db"
9
+ device: "cuda"
10
+ trigger_word: "sks woman"
11
+ performance_log_every: 10
12
+ network:
13
+ type: "lora"
14
+ linear: 32
15
+ linear_alpha: 32
16
+ conv: 16
17
+ conv_alpha: 16
18
+ lokr_full_rank: true
19
+ lokr_factor: -1
20
+ network_kwargs:
21
+ ignore_if_contains: []
22
+ save:
23
+ dtype: "bf16"
24
+ save_every: 2500
25
+ max_step_saves_to_keep: 5
26
+ save_format: "diffusers"
27
+ push_to_hub: false
28
+ datasets:
29
+ - folder_path: "c:\\Development\\ai-toolkit\\datasets/[dataset_name]"
30
+ mask_path: null
31
+ mask_min_value: 0.1
32
+ default_caption: "photo of a woman"
33
+ caption_ext: "txt"
34
+ caption_dropout_rate: 0.05
35
+ cache_latents_to_disk: false
36
+ is_reg: false
37
+ network_weight: 1
38
+ resolution:
39
+ - 512
40
+ controls: []
41
+ shrink_video_to_frames: true
42
+ num_frames: 1
43
+ flip_x: false
44
+ flip_y: false
45
+ num_repeats: 1
46
+ control_path_1: null
47
+ control_path_2: null
48
+ control_path_3: null
49
+ train:
50
+ batch_size: 1
51
+ bypass_guidance_embedding: false
52
+ steps: 2500
53
+ gradient_accumulation: 1
54
+ train_unet: true
55
+ train_text_encoder: false
56
+ gradient_checkpointing: true
57
+ noise_scheduler: "flowmatch"
58
+ optimizer: "adamw8bit"
59
+ timestep_type: "weighted"
60
+ content_or_style: "balanced"
61
+ optimizer_params:
62
+ weight_decay: 0.0001
63
+ unload_text_encoder: true
64
+ cache_text_embeddings: false
65
+ lr: 0.0001
66
+ ema_config:
67
+ use_ema: false
68
+ ema_decay: 0.99
69
+ skip_first_sample: true
70
+ force_first_sample: false
71
+ disable_sampling: true
72
+ dtype: "bf16"
73
+ diff_output_preservation: false
74
+ diff_output_preservation_multiplier: 1
75
+ diff_output_preservation_class: "person"
76
+ switch_boundary_every: 1
77
+ loss_type: "mse"
78
+ do_differential_guidance: true
79
+ differential_guidance_scale: 3
80
+ logging:
81
+ log_every: 1
82
+ use_ui_logger: true
83
+ model:
84
+ name_or_path: "black-forest-labs/FLUX.2-klein-base-9B"
85
+ quantize: true
86
+ qtype: "qfloat8"
87
+ quantize_te: true
88
+ qtype_te: "qfloat8"
89
+ arch: "flux2_klein_9b"
90
+ low_vram: true
91
+ model_kwargs:
92
+ match_target_res: false
93
+ layer_offloading: false
94
+ layer_offloading_text_encoder_percent: 1
95
+ layer_offloading_transformer_percent: 1
96
+ sample:
97
+ sampler: "flowmatch"
98
+ sample_every: 250
99
+ width: 512
100
+ height: 512
101
+ samples:
102
+ - prompt: "woman with red hair, playing chess at the park, bomb going off in the background"
103
+ - prompt: "a woman holding a coffee cup, in a beanie, sitting at a cafe"
104
+ - prompt: "a womman showing off her cool new t shirt at the beach, a shark is jumping out of the water in the background"
105
+ - prompt: "woman playing the guitar, on stage, singing a song, laser lights, punk rocker"
106
+ - prompt: "photo of a woman, white background, medium shot, modeling clothing, studio lighting, white backdrop"
107
+ neg: ""
108
+ seed: 42
109
+ walk_seed: true
110
+ guidance_scale: 4
111
+ sample_steps: 25
112
+ num_frames: 1
113
+ fps: 1
114
+ meta:
115
+ name: "[name]"
116
+ version: "1.0"
training-scripts/aitoolkit/fk9r_template.yaml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ job: "extension"
3
+ config:
4
+ name: "fk9r_[dataset_name]_v1"
5
+ process:
6
+ - type: "diffusion_trainer"
7
+ training_folder: "c:\\Development\\ai-toolkit\\output"
8
+ sqlite_db_path: "./aitk_db.db"
9
+ device: "cuda"
10
+ trigger_word: "sks woman"
11
+ performance_log_every: 10
12
+ network:
13
+ type: "lokr"
14
+ linear: 32
15
+ linear_alpha: 32
16
+ conv: 16
17
+ conv_alpha: 16
18
+ lokr_full_rank: true
19
+ lokr_factor: 4
20
+ network_kwargs:
21
+ ignore_if_contains: []
22
+ save:
23
+ dtype: "bf16"
24
+ save_every: 100
25
+ max_step_saves_to_keep: 12
26
+ save_format: "diffusers"
27
+ push_to_hub: false
28
+ datasets:
29
+ - folder_path: "c:\\Development\\ai-toolkit\\datasets/[dataset_name]"
30
+ mask_path: null
31
+ mask_min_value: 0.1
32
+ default_caption: "photo of a woman"
33
+ caption_ext: "txt"
34
+ caption_dropout_rate: 0.05
35
+ cache_latents_to_disk: false
36
+ is_reg: false
37
+ network_weight: 1
38
+ resolution:
39
+ - 512
40
+ controls: []
41
+ shrink_video_to_frames: true
42
+ num_frames: 1
43
+ flip_x: false
44
+ flip_y: false
45
+ num_repeats: 1
46
+ control_path_1: null
47
+ control_path_2: null
48
+ control_path_3: null
49
+ train:
50
+ batch_size: 1
51
+ bypass_guidance_embedding: false
52
+ steps: 1000
53
+ gradient_accumulation: 1
54
+ train_unet: true
55
+ train_text_encoder: false
56
+ gradient_checkpointing: true
57
+ noise_scheduler: "flowmatch"
58
+ optimizer: "adamw8bit"
59
+ timestep_type: "weighted"
60
+ content_or_style: "balanced"
61
+ optimizer_params:
62
+ weight_decay: 0.0001
63
+ unload_text_encoder: false
64
+ cache_text_embeddings: false
65
+ lr: 0.0001
66
+ ema_config:
67
+ use_ema: false
68
+ ema_decay: 0.99
69
+ skip_first_sample: true
70
+ force_first_sample: false
71
+ disable_sampling: false
72
+ dtype: "bf16"
73
+ diff_output_preservation: false
74
+ diff_output_preservation_multiplier: 1
75
+ diff_output_preservation_class: "person"
76
+ switch_boundary_every: 1
77
+ loss_type: "mse"
78
+ do_differential_guidance: true
79
+ differential_guidance_scale: 3
80
+ logging:
81
+ log_every: 1
82
+ use_ui_logger: true
83
+ model:
84
+ name_or_path: "black-forest-labs/FLUX.2-klein-base-9B"
85
+ quantize: true
86
+ qtype: "qfloat8"
87
+ quantize_te: true
88
+ qtype_te: "qfloat8"
89
+ arch: "flux2_klein_9b"
90
+ low_vram: true
91
+ model_kwargs:
92
+ match_target_res: false
93
+ layer_offloading: false
94
+ layer_offloading_text_encoder_percent: 1
95
+ layer_offloading_transformer_percent: 1
96
+ sample:
97
+ sampler: "flowmatch"
98
+ sample_every: 250
99
+ width: 512
100
+ height: 512
101
+ samples:
102
+ - prompt: "woman with red hair, playing chess at the park, bomb going off in the background"
103
+ - prompt: "a woman holding a coffee cup, in a beanie, sitting at a cafe"
104
+ - prompt: "a womman showing off her cool new t shirt at the beach, a shark is jumping out of the water in the background"
105
+ - prompt: "woman playing the guitar, on stage, singing a song, laser lights, punk rocker"
106
+ - prompt: "photo of a woman, white background, medium shot, modeling clothing, studio lighting, white backdrop"
107
+ neg: ""
108
+ seed: 42
109
+ walk_seed: true
110
+ guidance_scale: 4
111
+ sample_steps: 25
112
+ num_frames: 1
113
+ fps: 1
114
+ meta:
115
+ name: "[name]"
116
+ version: "1.0"
training-scripts/aitoolkit/wan_template.yaml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ job: "extension"
3
+ config:
4
+ name: "wan_[dataset_name]_v1"
5
+ process:
6
+ - type: "diffusion_trainer"
7
+ training_folder: "c:\\Development\\ai-toolkit\\output"
8
+ sqlite_db_path: "./aitk_db.db"
9
+ device: "cuda"
10
+ trigger_word: "sks woman"
11
+ performance_log_every: 10
12
+ network:
13
+ type: "lora"
14
+ linear: 32
15
+ linear_alpha: 32
16
+ conv: 16
17
+ conv_alpha: 16
18
+ lokr_full_rank: true
19
+ lokr_factor: -1
20
+ network_kwargs:
21
+ ignore_if_contains: []
22
+ save:
23
+ dtype: "bf16"
24
+ save_every: 2500
25
+ max_step_saves_to_keep: 8
26
+ save_format: "diffusers"
27
+ push_to_hub: false
28
+ datasets:
29
+ - folder_path: "c:\\Development\\ai-toolkit\\datasets/[dataset_name]"
30
+ control_path: null
31
+ mask_path: null
32
+ mask_min_value: 0.1
33
+ default_caption: "photo of a woman"
34
+ caption_ext: "txt"
35
+ caption_dropout_rate: 0.05
36
+ cache_latents_to_disk: false
37
+ is_reg: false
38
+ network_weight: 1
39
+ resolution:
40
+ - 512
41
+ controls: []
42
+ shrink_video_to_frames: true
43
+ num_frames: 1
44
+ do_i2v: true
45
+ flip_x: false
46
+ flip_y: false
47
+ train:
48
+ batch_size: 1
49
+ bypass_guidance_embedding: false
50
+ steps: 2500
51
+ gradient_accumulation: 1
52
+ train_unet: true
53
+ train_text_encoder: false
54
+ gradient_checkpointing: true
55
+ noise_scheduler: "flowmatch"
56
+ optimizer: "adamw8bit"
57
+ timestep_type: "sigmoid"
58
+ content_or_style: "balanced"
59
+ optimizer_params:
60
+ weight_decay: 0.0001
61
+ unload_text_encoder: true
62
+ cache_text_embeddings: false
63
+ lr: 0.0001
64
+ ema_config:
65
+ use_ema: false
66
+ ema_decay: 0.99
67
+ skip_first_sample: true
68
+ disable_sampling: true
69
+ dtype: "bf16"
70
+ diff_output_preservation: false
71
+ diff_output_preservation_multiplier: 1
72
+ diff_output_preservation_class: "person"
73
+ switch_boundary_every: 1
74
+ model:
75
+ name_or_path: "Wan-AI/Wan2.1-T2V-14B-Diffusers"
76
+ quantize: true
77
+ qtype: "qfloat8"
78
+ quantize_te: true
79
+ qtype_te: "qfloat8"
80
+ arch: "wan21:14b"
81
+ low_vram: true
82
+ model_kwargs: {}
83
+ sample:
84
+ sampler: "flowmatch"
85
+ sample_every: 500
86
+ width: 512
87
+ height: 512
88
+ samples:
89
+ - prompt: "woman with red hair, playing chess at the park, bomb going off in the background"
90
+ - prompt: "a woman holding a coffee cup, in a beanie, sitting at a cafe"
91
+ - prompt: "a woman showing off his cool new t shirt at the beach, a shark is jumping out of the water in the background"
92
+ - prompt: "woman playing the guitar, on stage, singing a song, laser lights, punk rocker"
93
+ - prompt: "photo of a woman , white background, medium shot, modeling clothing, studio lighting, white backdrop"
94
+ neg: ""
95
+ seed: 42
96
+ walk_seed: true
97
+ guidance_scale: 4
98
+ sample_steps: 25
99
+ num_frames: 1
100
+ fps: 1
101
+ meta:
102
+ name: "[name]"
103
+ version: "1.0"
training-scripts/aitoolkit/zbase_template.yaml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ job: "extension"
3
+ config:
4
+ name: "zbase_[dataset_name]_v1"
5
+ process:
6
+ - type: "diffusion_trainer"
7
+ training_folder: "c:\\Development\\ai-toolkit\\output"
8
+ sqlite_db_path: "./aitk_db.db"
9
+ device: "cuda"
10
+ trigger_word: "sks woman"
11
+ performance_log_every: 10
12
+ network:
13
+ type: "lora"
14
+ linear: 32
15
+ linear_alpha: 32
16
+ conv: 16
17
+ conv_alpha: 16
18
+ lokr_full_rank: true
19
+ lokr_factor: -1
20
+ network_kwargs:
21
+ ignore_if_contains: []
22
+ save:
23
+ dtype: "bf16"
24
+ save_every: 2500
25
+ max_step_saves_to_keep: 8
26
+ save_format: "diffusers"
27
+ push_to_hub: false
28
+ datasets:
29
+ - folder_path: "c:\\Development\\ai-toolkit\\datasets/[dataset_name]"
30
+ mask_path: null
31
+ mask_min_value: 0.1
32
+ default_caption: "photo of a woman"
33
+ caption_ext: "txt"
34
+ caption_dropout_rate: 0.05
35
+ cache_latents_to_disk: false
36
+ is_reg: false
37
+ network_weight: 1
38
+ resolution:
39
+ - 512
40
+ controls: []
41
+ shrink_video_to_frames: true
42
+ num_frames: 1
43
+ do_i2v: true
44
+ flip_x: false
45
+ flip_y: false
46
+ train:
47
+ batch_size: 1
48
+ bypass_guidance_embedding: false
49
+ steps: 2500
50
+ gradient_accumulation: 1
51
+ train_unet: true
52
+ train_text_encoder: false
53
+ gradient_checkpointing: true
54
+ noise_scheduler: "flowmatch"
55
+ optimizer: "adamw8bit"
56
+ timestep_type: "weighted"
57
+ content_or_style: "balanced"
58
+ optimizer_params:
59
+ weight_decay: 0.0001
60
+ unload_text_encoder: true
61
+ cache_text_embeddings: false
62
+ lr: 0.0001
63
+ ema_config:
64
+ use_ema: false
65
+ ema_decay: 0.99
66
+ skip_first_sample: false
67
+ force_first_sample: false
68
+ disable_sampling: true
69
+ dtype: "bf16"
70
+ diff_output_preservation: false
71
+ diff_output_preservation_multiplier: 1
72
+ diff_output_preservation_class: "person"
73
+ switch_boundary_every: 1
74
+ loss_type: "mse"
75
+ do_differential_guidance: true
76
+ differential_guidance_scale: 3
77
+ model:
78
+ name_or_path: "Tongyi-MAI/Z-Image"
79
+ quantize: true
80
+ qtype: "qfloat8"
81
+ quantize_te: true
82
+ qtype_te: "qfloat8"
83
+ arch: "zimage"
84
+ low_vram: true
85
+ model_kwargs: {}
86
+ layer_offloading: false
87
+ layer_offloading_text_encoder_percent: 1
88
+ layer_offloading_transformer_percent: 1
89
+ sample:
90
+ sampler: "flowmatch"
91
+ sample_every: 2500
92
+ width: 512
93
+ height: 512
94
+ samples:
95
+ - prompt: "woman with red hair, playing chess at the park, bomb going off in the background"
96
+ - prompt: "a woman holding a coffee cup, in a beanie, sitting at a cafe"
97
+ - prompt: "a woman showing off her cool new t shirt at the beach, a shark is jumping out of the water in the background"
98
+ - prompt: "woman playing the guitar, on stage, singing a song, laser lights, punk rocker"
99
+ - prompt: "photo of a woman, white background, medium shot, modeling clothing, studio lighting, white backdrop"
100
+ neg: ""
101
+ seed: 42
102
+ walk_seed: true
103
+ guidance_scale: 4
104
+ sample_steps: 8
105
+ num_frames: 1
106
+ fps: 1
107
+ meta:
108
+ name: "[name]"
109
+ version: "1.0"
training-scripts/aitoolkit/zimage_template.yaml ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ job: "extension"
3
+ config:
4
+ name: "zimage_[dataset_name]_v1"
5
+ process:
6
+ - type: "diffusion_trainer"
7
+ training_folder: "c:\\Development\\ai-toolkit\\output"
8
+ sqlite_db_path: "./aitk_db.db"
9
+ device: "cuda"
10
+ trigger_word: "sks woman"
11
+ performance_log_every: 10
12
+ network:
13
+ type: "lora"
14
+ linear: 32
15
+ linear_alpha: 32
16
+ conv: 16
17
+ conv_alpha: 16
18
+ lokr_full_rank: true
19
+ lokr_factor: -1
20
+ network_kwargs:
21
+ ignore_if_contains: []
22
+ save:
23
+ dtype: "bf16"
24
+ save_every: 2500
25
+ max_step_saves_to_keep: 8
26
+ save_format: "diffusers"
27
+ push_to_hub: false
28
+ datasets:
29
+ - folder_path: "c:\\Development\\ai-toolkit\\datasets/[dataset_name]"
30
+ mask_path: null
31
+ mask_min_value: 0.1
32
+ default_caption: "photo of a woman"
33
+ caption_ext: "txt"
34
+ caption_dropout_rate: 0.05
35
+ cache_latents_to_disk: false
36
+ is_reg: false
37
+ network_weight: 1
38
+ resolution:
39
+ - 512
40
+ controls: []
41
+ shrink_video_to_frames: true
42
+ num_frames: 1
43
+ do_i2v: true
44
+ flip_x: false
45
+ flip_y: false
46
+ train:
47
+ batch_size: 1
48
+ bypass_guidance_embedding: false
49
+ steps: 2500
50
+ gradient_accumulation: 1
51
+ train_unet: true
52
+ train_text_encoder: false
53
+ gradient_checkpointing: true
54
+ noise_scheduler: "flowmatch"
55
+ optimizer: "adamw8bit"
56
+ timestep_type: "weighted"
57
+ content_or_style: "balanced"
58
+ optimizer_params:
59
+ weight_decay: 0.0001
60
+ unload_text_encoder: true
61
+ cache_text_embeddings: false
62
+ lr: 0.0001
63
+ ema_config:
64
+ use_ema: false
65
+ ema_decay: 0.99
66
+ skip_first_sample: false
67
+ force_first_sample: false
68
+ disable_sampling: true
69
+ dtype: "bf16"
70
+ diff_output_preservation: false
71
+ diff_output_preservation_multiplier: 1
72
+ diff_output_preservation_class: "person"
73
+ switch_boundary_every: 1
74
+ loss_type: "mse"
75
+ do_differential_guidance: true
76
+ differential_guidance_scale: 3
77
+ model:
78
+ name_or_path: "Tongyi-MAI/Z-Image-Turbo"
79
+ quantize: true
80
+ qtype: "qfloat8"
81
+ quantize_te: true
82
+ qtype_te: "qfloat8"
83
+ arch: "zimage:turbo"
84
+ low_vram: true
85
+ model_kwargs: {}
86
+ layer_offloading: false
87
+ layer_offloading_text_encoder_percent: 1
88
+ layer_offloading_transformer_percent: 1
89
+ assistant_lora_path: "ostris/zimage_turbo_training_adapter/zimage_turbo_training_adapter_v1.safetensors"
90
+ sample:
91
+ sampler: "flowmatch"
92
+ sample_every: 2500
93
+ width: 512
94
+ height: 512
95
+ samples:
96
+ - prompt: "woman with red hair, playing chess at the park, bomb going off in the background"
97
+ - prompt: "a woman holding a coffee cup, in a beanie, sitting at a cafe"
98
+ - prompt: "a woman showing off her cool new t shirt at the beach, a shark is jumping out of the water in the background"
99
+ - prompt: "woman playing the guitar, on stage, singing a song, laser lights, punk rocker"
100
+ - prompt: "photo of a woman, white background, medium shot, modeling clothing, studio lighting, white backdrop"
101
+ neg: ""
102
+ seed: 42
103
+ walk_seed: true
104
+ guidance_scale: 1
105
+ sample_steps: 8
106
+ num_frames: 1
107
+ fps: 1
108
+ meta:
109
+ name: "[name]"
110
+ version: "1.0"
training-scripts/maltrainer/zimage_base_template.yaml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Z-Image LoRA Training Configuration Example
2
+
3
+ # Model configuration
4
+ model:
5
+ name_or_path: "Tongyi-MAI/Z-Image" # or path to local model
6
+
7
+ # LoRA configuration
8
+ lora:
9
+ prefix: "diffusion_model."
10
+ rank: 32
11
+ alpha: 32
12
+ target_modules:
13
+ - "to_q"
14
+ - "to_k"
15
+ - "to_v"
16
+ - "to_out.0"
17
+ - "feed_forward.w1"
18
+ - "feed_forward.w2"
19
+ - "feed_forward.w3"
20
+ - "adaLN_modulation.0"
21
+
22
+ # Dataset configuration
23
+ dataset:
24
+ path: "[dataset_location]/[person_to_train]"
25
+ trigger: "[instance_token] [class_token]"
26
+ default_caption: "photo of a [class_token]"
27
+ repeats: 1
28
+ resolution: 512
29
+ center_crop: true
30
+ random_flip: false
31
+ num_workers: 0
32
+
33
+ # Training configuration
34
+ train:
35
+ batch_size: 1
36
+ gradient_accumulation_steps: 1
37
+ num_epochs: 100
38
+ optimizer: "adamw8bit"
39
+ learning_rate: 1e-4
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.999
42
+ adam_epsilon: 1e-8
43
+ weight_decay: 0.01
44
+ weight_decay_exclude_lora: true
45
+ timestep_weighting: "none"
46
+ do_differential_guidance: true
47
+ differential_guidance_scale: 3.0
48
+ unconditional_prompt: ""
49
+ dynamic_noise_offset: true
50
+ noise_multiplier: 1.0
51
+ random_noise_multiplier: 0.0
52
+ random_noise_shift: 0.0
53
+ latent_multiplier: 1.0
54
+ noisy_latent_multiplier: 1.0
55
+ max_grad_norm: 1.0
56
+ save_every: 500
57
+
58
+ # Output configuration
59
+ output:
60
+ path: "./output/[person_to_train]"
61
+
62
+ # Logging configuration
63
+ logging:
64
+ level: "INFO"
65
+
66
+ # Training settings
67
+ dtype: "bfloat16"
68
+ mixed_precision: "bf16"
69
+ seed: 42
training-scripts/onetrainer/zimage_base_template.json ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version": 10,
3
+ "training_method": "LORA",
4
+ "model_type": "Z_IMAGE",
5
+ "debug_mode": false,
6
+ "debug_dir": "debug",
7
+ "workspace_dir": "workspace/run",
8
+ "cache_dir": "workspace-cache/run",
9
+ "tensorboard": true,
10
+ "tensorboard_expose": false,
11
+ "tensorboard_always_on": false,
12
+ "tensorboard_port": 6006,
13
+ "validation": false,
14
+ "validate_after": 1,
15
+ "validate_after_unit": "EPOCH",
16
+ "continue_last_backup": false,
17
+ "include_train_config": "NONE",
18
+ "multi_gpu": false,
19
+ "device_indexes": "",
20
+ "gradient_reduce_precision": "FLOAT_32_STOCHASTIC",
21
+ "fused_gradient_reduce": true,
22
+ "async_gradient_reduce": true,
23
+ "async_gradient_reduce_buffer": 100,
24
+ "base_model_name": "Tongyi-MAI/Z-Image",
25
+ "output_dtype": "BFLOAT_16",
26
+ "output_model_format": "SAFETENSORS",
27
+ "output_model_destination": "models/zbase_[person_to_train]_v1.safetensors",
28
+ "gradient_checkpointing": "ON",
29
+ "enable_async_offloading": true,
30
+ "enable_activation_offloading": true,
31
+ "layer_offload_fraction": 0.0,
32
+ "force_circular_padding": false,
33
+ "compile": true,
34
+ "concept_file_name": "training_concepts/concepts.json",
35
+ "concepts": [
36
+ {
37
+ "__version": 2,
38
+ "image": {
39
+ "__version": 0,
40
+ "enable_crop_jitter": true,
41
+ "enable_random_flip": false,
42
+ "enable_fixed_flip": false,
43
+ "enable_random_rotate": false,
44
+ "enable_fixed_rotate": false,
45
+ "random_rotate_max_angle": 0.0,
46
+ "enable_random_brightness": false,
47
+ "enable_fixed_brightness": false,
48
+ "random_brightness_max_strength": 0.0,
49
+ "enable_random_contrast": false,
50
+ "enable_fixed_contrast": false,
51
+ "random_contrast_max_strength": 0.0,
52
+ "enable_random_saturation": false,
53
+ "enable_fixed_saturation": false,
54
+ "random_saturation_max_strength": 0.0,
55
+ "enable_random_hue": false,
56
+ "enable_fixed_hue": false,
57
+ "random_hue_max_strength": 0.0,
58
+ "enable_resolution_override": false,
59
+ "resolution_override": "512",
60
+ "enable_random_circular_mask_shrink": false,
61
+ "enable_random_mask_rotate_crop": false
62
+ },
63
+ "text": {
64
+ "__version": 0,
65
+ "prompt_source": "sample",
66
+ "prompt_path": "",
67
+ "enable_tag_shuffling": false,
68
+ "tag_delimiter": ",",
69
+ "keep_tags_count": 1,
70
+ "tag_dropout_enable": false,
71
+ "tag_dropout_mode": "FULL",
72
+ "tag_dropout_probability": 0.0,
73
+ "tag_dropout_special_tags_mode": "NONE",
74
+ "tag_dropout_special_tags": "",
75
+ "tag_dropout_special_tags_regex": false,
76
+ "caps_randomize_enable": false,
77
+ "caps_randomize_mode": "capslock, title, first, random",
78
+ "caps_randomize_probability": 0.0,
79
+ "caps_randomize_lowercase": false
80
+ },
81
+ "name": "",
82
+ "path": "C:/Development/ai-toolkit/datasets/[person_to_train]",
83
+ "seed": -469069486,
84
+ "enabled": true,
85
+ "type": "STANDARD",
86
+ "include_subdirectories": false,
87
+ "image_variations": 1,
88
+ "text_variations": 1,
89
+ "balancing": 1.0,
90
+ "balancing_strategy": "REPEATS",
91
+ "loss_weight": 1.0,
92
+ "concept_stats": {}
93
+ }
94
+ ],
95
+ "aspect_ratio_bucketing": true,
96
+ "latent_caching": true,
97
+ "clear_cache_before_training": true,
98
+ "learning_rate_scheduler": "CONSTANT",
99
+ "custom_learning_rate_scheduler": null,
100
+ "scheduler_params": [],
101
+ "learning_rate": 1.0,
102
+ "learning_rate_warmup_steps": 200.0,
103
+ "learning_rate_cycles": 1.0,
104
+ "learning_rate_min_factor": 0.0,
105
+ "epochs": 100,
106
+ "batch_size": 2,
107
+ "gradient_accumulation_steps": 1,
108
+ "ema": "OFF",
109
+ "ema_decay": 0.999,
110
+ "ema_update_step_interval": 5,
111
+ "dataloader_threads": 1,
112
+ "train_device": "cuda",
113
+ "temp_device": "cpu",
114
+ "train_dtype": "BFLOAT_16",
115
+ "fallback_train_dtype": "BFLOAT_16",
116
+ "enable_autocast_cache": true,
117
+ "only_cache": false,
118
+ "resolution": "512",
119
+ "frames": "25",
120
+ "mse_strength": 1.0,
121
+ "mae_strength": 0.0,
122
+ "log_cosh_strength": 0.0,
123
+ "huber_strength": 0.0,
124
+ "huber_delta": 1.0,
125
+ "vb_loss_strength": 1.0,
126
+ "loss_weight_fn": "CONSTANT",
127
+ "loss_weight_strength": 5.0,
128
+ "dropout_probability": 0.0,
129
+ "loss_scaler": "NONE",
130
+ "learning_rate_scaler": "NONE",
131
+ "clip_grad_norm": 1.0,
132
+ "offset_noise_weight": 0.0,
133
+ "generalized_offset_noise": false,
134
+ "perturbation_noise_weight": 0.0,
135
+ "rescale_noise_scheduler_to_zero_terminal_snr": false,
136
+ "force_v_prediction": false,
137
+ "force_epsilon_prediction": false,
138
+ "min_noising_strength": 0.0,
139
+ "max_noising_strength": 1.0,
140
+ "timestep_distribution": "LOGIT_NORMAL",
141
+ "noising_weight": 0.0,
142
+ "noising_bias": 0.0,
143
+ "timestep_shift": 1.0,
144
+ "dynamic_timestep_shifting": false,
145
+ "unet": {
146
+ "__version": 0,
147
+ "model_name": "",
148
+ "include": true,
149
+ "train": true,
150
+ "stop_training_after": 0,
151
+ "stop_training_after_unit": "NEVER",
152
+ "learning_rate": null,
153
+ "weight_dtype": "FLOAT_32",
154
+ "dropout_probability": 0.0,
155
+ "train_embedding": true,
156
+ "attention_mask": false,
157
+ "guidance_scale": 1.0
158
+ },
159
+ "prior": {
160
+ "__version": 0,
161
+ "model_name": "",
162
+ "include": true,
163
+ "train": true,
164
+ "stop_training_after": 0,
165
+ "stop_training_after_unit": "NEVER",
166
+ "learning_rate": null,
167
+ "weight_dtype": "FLOAT_32",
168
+ "dropout_probability": 0.0,
169
+ "train_embedding": true,
170
+ "attention_mask": false,
171
+ "guidance_scale": 1.0
172
+ },
173
+ "transformer": {
174
+ "__version": 0,
175
+ "model_name": "",
176
+ "include": true,
177
+ "train": true,
178
+ "stop_training_after": 0,
179
+ "stop_training_after_unit": "NEVER",
180
+ "learning_rate": null,
181
+ "weight_dtype": "FLOAT_8",
182
+ "dropout_probability": 0.0,
183
+ "train_embedding": true,
184
+ "attention_mask": false,
185
+ "guidance_scale": 1.0
186
+ },
187
+ "quantization": {
188
+ "__version": 0,
189
+ "layer_filter": "layers",
190
+ "layer_filter_preset": "blocks",
191
+ "layer_filter_regex": false,
192
+ "svd_dtype": "NONE",
193
+ "svd_rank": 16,
194
+ "cache_dir": null
195
+ },
196
+ "text_encoder": {
197
+ "__version": 0,
198
+ "model_name": "",
199
+ "include": true,
200
+ "train": false,
201
+ "stop_training_after": 30,
202
+ "stop_training_after_unit": "EPOCH",
203
+ "learning_rate": null,
204
+ "weight_dtype": "FLOAT_8",
205
+ "dropout_probability": 0.0,
206
+ "train_embedding": true,
207
+ "attention_mask": false,
208
+ "guidance_scale": 1.0
209
+ },
210
+ "text_encoder_layer_skip": 0,
211
+ "text_encoder_sequence_length": 512,
212
+ "text_encoder_2": {
213
+ "__version": 0,
214
+ "model_name": "",
215
+ "include": true,
216
+ "train": true,
217
+ "stop_training_after": 30,
218
+ "stop_training_after_unit": "EPOCH",
219
+ "learning_rate": null,
220
+ "weight_dtype": "FLOAT_32",
221
+ "dropout_probability": 0.0,
222
+ "train_embedding": true,
223
+ "attention_mask": false,
224
+ "guidance_scale": 1.0
225
+ },
226
+ "text_encoder_2_layer_skip": 0,
227
+ "text_encoder_2_sequence_length": 77,
228
+ "text_encoder_3": {
229
+ "__version": 0,
230
+ "model_name": "",
231
+ "include": true,
232
+ "train": true,
233
+ "stop_training_after": 30,
234
+ "stop_training_after_unit": "EPOCH",
235
+ "learning_rate": null,
236
+ "weight_dtype": "FLOAT_32",
237
+ "dropout_probability": 0.0,
238
+ "train_embedding": true,
239
+ "attention_mask": false,
240
+ "guidance_scale": 1.0
241
+ },
242
+ "text_encoder_3_layer_skip": 0,
243
+ "text_encoder_4": {
244
+ "__version": 0,
245
+ "model_name": "",
246
+ "include": true,
247
+ "train": true,
248
+ "stop_training_after": 30,
249
+ "stop_training_after_unit": "EPOCH",
250
+ "learning_rate": null,
251
+ "weight_dtype": "FLOAT_32",
252
+ "dropout_probability": 0.0,
253
+ "train_embedding": true,
254
+ "attention_mask": false,
255
+ "guidance_scale": 1.0
256
+ },
257
+ "text_encoder_4_layer_skip": 0,
258
+ "vae": {
259
+ "__version": 0,
260
+ "model_name": "",
261
+ "include": true,
262
+ "train": true,
263
+ "stop_training_after": null,
264
+ "stop_training_after_unit": "NEVER",
265
+ "learning_rate": null,
266
+ "weight_dtype": "FLOAT_32",
267
+ "dropout_probability": 0.0,
268
+ "train_embedding": true,
269
+ "attention_mask": false,
270
+ "guidance_scale": 1.0
271
+ },
272
+ "effnet_encoder": {
273
+ "__version": 0,
274
+ "model_name": "",
275
+ "include": true,
276
+ "train": true,
277
+ "stop_training_after": null,
278
+ "stop_training_after_unit": "NEVER",
279
+ "learning_rate": null,
280
+ "weight_dtype": "FLOAT_32",
281
+ "dropout_probability": 0.0,
282
+ "train_embedding": true,
283
+ "attention_mask": false,
284
+ "guidance_scale": 1.0
285
+ },
286
+ "decoder": {
287
+ "__version": 0,
288
+ "model_name": "",
289
+ "include": true,
290
+ "train": true,
291
+ "stop_training_after": null,
292
+ "stop_training_after_unit": "NEVER",
293
+ "learning_rate": null,
294
+ "weight_dtype": "FLOAT_32",
295
+ "dropout_probability": 0.0,
296
+ "train_embedding": true,
297
+ "attention_mask": false,
298
+ "guidance_scale": 1.0
299
+ },
300
+ "decoder_text_encoder": {
301
+ "__version": 0,
302
+ "model_name": "",
303
+ "include": true,
304
+ "train": true,
305
+ "stop_training_after": null,
306
+ "stop_training_after_unit": "NEVER",
307
+ "learning_rate": null,
308
+ "weight_dtype": "FLOAT_32",
309
+ "dropout_probability": 0.0,
310
+ "train_embedding": true,
311
+ "attention_mask": false,
312
+ "guidance_scale": 1.0
313
+ },
314
+ "decoder_vqgan": {
315
+ "__version": 0,
316
+ "model_name": "",
317
+ "include": true,
318
+ "train": true,
319
+ "stop_training_after": null,
320
+ "stop_training_after_unit": "NEVER",
321
+ "learning_rate": null,
322
+ "weight_dtype": "FLOAT_32",
323
+ "dropout_probability": 0.0,
324
+ "train_embedding": true,
325
+ "attention_mask": false,
326
+ "guidance_scale": 1.0
327
+ },
328
+ "masked_training": false,
329
+ "unmasked_probability": 0.1,
330
+ "unmasked_weight": 0.1,
331
+ "normalize_masked_area_loss": false,
332
+ "masked_prior_preservation_weight": 0.0,
333
+ "custom_conditioning_image": false,
334
+ "layer_filter": "^(?=.*attention)(?!.*refiner).*,^(?=.*feed_forward)(?!.*refiner).*",
335
+ "layer_filter_preset": "attn-mlp",
336
+ "layer_filter_regex": true,
337
+ "embedding_learning_rate": null,
338
+ "preserve_embedding_norm": false,
339
+ "embedding": {
340
+ "__version": 0,
341
+ "uuid": "496f9260-d6c0-45ec-8248-94d8b340e2f4",
342
+ "model_name": "",
343
+ "placeholder": "<embedding>",
344
+ "train": true,
345
+ "stop_training_after": null,
346
+ "stop_training_after_unit": "NEVER",
347
+ "token_count": 1,
348
+ "initial_embedding_text": "*",
349
+ "is_output_embedding": false
350
+ },
351
+ "additional_embeddings": [],
352
+ "embedding_weight_dtype": "FLOAT_32",
353
+ "cloud": {
354
+ "__version": 0,
355
+ "enabled": false,
356
+ "type": "RUNPOD",
357
+ "file_sync": "NATIVE_SCP",
358
+ "create": true,
359
+ "name": "OneTrainer",
360
+ "tensorboard_tunnel": true,
361
+ "sub_type": "",
362
+ "gpu_type": "",
363
+ "volume_size": 100,
364
+ "min_download": 0,
365
+ "remote_dir": "/workspace",
366
+ "huggingface_cache_dir": "/workspace/huggingface_cache",
367
+ "onetrainer_dir": "/workspace/OneTrainer",
368
+ "install_cmd": "git clone https://github.com/Nerogar/OneTrainer",
369
+ "install_onetrainer": true,
370
+ "update_onetrainer": true,
371
+ "detach_trainer": false,
372
+ "run_id": "job1",
373
+ "download_samples": true,
374
+ "download_output_model": true,
375
+ "download_saves": true,
376
+ "download_backups": false,
377
+ "download_tensorboard": false,
378
+ "delete_workspace": false,
379
+ "on_finish": "NONE",
380
+ "on_error": "NONE",
381
+ "on_detached_finish": "NONE",
382
+ "on_detached_error": "NONE"
383
+ },
384
+ "peft_type": "LORA",
385
+ "lora_model_name": "",
386
+ "lora_rank": 16,
387
+ "lora_alpha": 1.0,
388
+ "lora_decompose": false,
389
+ "lora_decompose_norm_epsilon": true,
390
+ "lora_decompose_output_axis": false,
391
+ "lora_weight_dtype": "FLOAT_32",
392
+ "bundle_additional_embeddings": true,
393
+ "oft_block_size": 32,
394
+ "oft_coft": false,
395
+ "coft_eps": 0.0001,
396
+ "oft_block_share": false,
397
+ "optimizer": {
398
+ "__version": 0,
399
+ "optimizer": "PRODIGY_ADV",
400
+ "adam_w_mode": false,
401
+ "alpha": 5.0,
402
+ "amsgrad": false,
403
+ "beta1": 0.9,
404
+ "beta2": 0.99,
405
+ "beta3": null,
406
+ "bias_correction": false,
407
+ "block_wise": false,
408
+ "capturable": false,
409
+ "centered": false,
410
+ "clip_threshold": null,
411
+ "d0": 1e-06,
412
+ "d_coef": 1.0,
413
+ "dampening": null,
414
+ "decay_rate": null,
415
+ "decouple": false,
416
+ "differentiable": false,
417
+ "eps": 1e-08,
418
+ "eps2": null,
419
+ "foreach": false,
420
+ "fsdp_in_use": false,
421
+ "fused": false,
422
+ "fused_back_pass": false,
423
+ "growth_rate": "inf",
424
+ "initial_accumulator_value": null,
425
+ "initial_accumulator": null,
426
+ "is_paged": false,
427
+ "log_every": null,
428
+ "lr_decay": null,
429
+ "max_unorm": null,
430
+ "maximize": false,
431
+ "min_8bit_size": null,
432
+ "quant_block_size": null,
433
+ "momentum": null,
434
+ "nesterov": false,
435
+ "no_prox": false,
436
+ "optim_bits": null,
437
+ "percentile_clipping": null,
438
+ "r": null,
439
+ "relative_step": false,
440
+ "safeguard_warmup": false,
441
+ "scale_parameter": false,
442
+ "stochastic_rounding": true,
443
+ "use_bias_correction": false,
444
+ "use_triton": false,
445
+ "warmup_init": false,
446
+ "weight_decay": 0.0,
447
+ "weight_lr_power": null,
448
+ "decoupled_decay": false,
449
+ "fixed_decay": false,
450
+ "rectify": false,
451
+ "degenerated_to_sgd": false,
452
+ "k": null,
453
+ "xi": null,
454
+ "n_sma_threshold": null,
455
+ "ams_bound": false,
456
+ "adanorm": false,
457
+ "adam_debias": false,
458
+ "slice_p": 11,
459
+ "cautious": false,
460
+ "weight_decay_by_lr": true,
461
+ "prodigy_steps": 0,
462
+ "use_speed": false,
463
+ "split_groups": true,
464
+ "split_groups_mean": true,
465
+ "factored": true,
466
+ "factored_fp32": true,
467
+ "use_stableadamw": true,
468
+ "use_cautious": false,
469
+ "use_grams": false,
470
+ "use_adopt": false,
471
+ "d_limiter": false,
472
+ "use_schedulefree": true,
473
+ "use_orthograd": false,
474
+ "nnmf_factor": false,
475
+ "orthogonal_gradient": false,
476
+ "use_atan2": false,
477
+ "use_AdEMAMix": false,
478
+ "beta3_ema": 0.9999,
479
+ "alpha_grad": 100.0,
480
+ "beta1_warmup": null,
481
+ "min_beta1": null,
482
+ "Simplified_AdEMAMix": false,
483
+ "cautious_mask": false,
484
+ "grams_moment": false,
485
+ "kourkoutas_beta": false,
486
+ "k_warmup_steps": null,
487
+ "schedulefree_c": null,
488
+ "ns_steps": null,
489
+ "MuonWithAuxAdam": false,
490
+ "muon_hidden_layers": null,
491
+ "muon_adam_regex": false,
492
+ "muon_adam_lr": null,
493
+ "muon_te1_adam_lr": null,
494
+ "muon_te2_adam_lr": null,
495
+ "muon_adam_config": null,
496
+ "rms_rescaling": true,
497
+ "normuon_variant": false,
498
+ "beta2_normuon": null,
499
+ "normuon_eps": null,
500
+ "low_rank_ortho": false,
501
+ "ortho_rank": null,
502
+ "accelerated_ns": false,
503
+ "cautious_wd": false,
504
+ "approx_mars": false,
505
+ "kappa_p": null,
506
+ "auto_kappa_p": false,
507
+ "compile": false
508
+ },
509
+ "optimizer_defaults": {},
510
+ "sample_definition_file_name": "training_samples/samples.json",
511
+ "samples": [],
512
+ "sample_after": 10,
513
+ "sample_after_unit": "MINUTE",
514
+ "sample_skip_first": 0,
515
+ "sample_image_format": "JPG",
516
+ "sample_video_format": "MP4",
517
+ "sample_audio_format": "MP3",
518
+ "samples_to_tensorboard": true,
519
+ "non_ema_sampling": true,
520
+ "backup_after": 30,
521
+ "backup_after_unit": "MINUTE",
522
+ "rolling_backup": true,
523
+ "rolling_backup_count": 2,
524
+ "backup_before_save": true,
525
+ "save_every": 0,
526
+ "save_every_unit": "NEVER",
527
+ "save_skip_first": 0,
528
+ "save_filename_prefix": ""
529
+ }
training-scripts/onetrainer/zimage_turbo_template.json ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version": 10,
3
+ "training_method": "LORA",
4
+ "model_type": "Z_IMAGE",
5
+ "debug_mode": false,
6
+ "debug_dir": "debug",
7
+ "workspace_dir": "workspace/run",
8
+ "cache_dir": "workspace-cache/run",
9
+ "tensorboard": true,
10
+ "tensorboard_expose": false,
11
+ "tensorboard_always_on": false,
12
+ "tensorboard_port": 6006,
13
+ "validation": false,
14
+ "validate_after": 1,
15
+ "validate_after_unit": "EPOCH",
16
+ "continue_last_backup": false,
17
+ "include_train_config": "NONE",
18
+ "multi_gpu": false,
19
+ "device_indexes": "",
20
+ "gradient_reduce_precision": "FLOAT_32_STOCHASTIC",
21
+ "fused_gradient_reduce": true,
22
+ "async_gradient_reduce": true,
23
+ "async_gradient_reduce_buffer": 100,
24
+ "base_model_name": "Tongyi-MAI/Z-Image-Turbo",
25
+ "output_dtype": "BFLOAT_16",
26
+ "output_model_format": "SAFETENSORS",
27
+ "output_model_destination": "models/zimage_[person_to_train]_v2_onetrainer.safetensors",
28
+ "gradient_checkpointing": "ON",
29
+ "enable_async_offloading": true,
30
+ "enable_activation_offloading": true,
31
+ "layer_offload_fraction": 0.0,
32
+ "force_circular_padding": false,
33
+ "compile": true,
34
+ "concept_file_name": "training_concepts/concepts.json",
35
+ "concepts": [
36
+ {
37
+ "__version": 2,
38
+ "image": {
39
+ "__version": 0,
40
+ "enable_crop_jitter": true,
41
+ "enable_random_flip": false,
42
+ "enable_fixed_flip": false,
43
+ "enable_random_rotate": false,
44
+ "enable_fixed_rotate": false,
45
+ "random_rotate_max_angle": 0.0,
46
+ "enable_random_brightness": false,
47
+ "enable_fixed_brightness": false,
48
+ "random_brightness_max_strength": 0.0,
49
+ "enable_random_contrast": false,
50
+ "enable_fixed_contrast": false,
51
+ "random_contrast_max_strength": 0.0,
52
+ "enable_random_saturation": false,
53
+ "enable_fixed_saturation": false,
54
+ "random_saturation_max_strength": 0.0,
55
+ "enable_random_hue": false,
56
+ "enable_fixed_hue": false,
57
+ "random_hue_max_strength": 0.0,
58
+ "enable_resolution_override": false,
59
+ "resolution_override": "512",
60
+ "enable_random_circular_mask_shrink": false,
61
+ "enable_random_mask_rotate_crop": false
62
+ },
63
+ "text": {
64
+ "__version": 0,
65
+ "prompt_source": "sample",
66
+ "prompt_path": "",
67
+ "enable_tag_shuffling": false,
68
+ "tag_delimiter": ",",
69
+ "keep_tags_count": 1,
70
+ "tag_dropout_enable": false,
71
+ "tag_dropout_mode": "FULL",
72
+ "tag_dropout_probability": 0.0,
73
+ "tag_dropout_special_tags_mode": "NONE",
74
+ "tag_dropout_special_tags": "",
75
+ "tag_dropout_special_tags_regex": false,
76
+ "caps_randomize_enable": false,
77
+ "caps_randomize_mode": "capslock, title, first, random",
78
+ "caps_randomize_probability": 0.0,
79
+ "caps_randomize_lowercase": false
80
+ },
81
+ "name": "",
82
+ "path": "C:/Development/ai-toolkit/datasets/[person_to_train]",
83
+ "seed": -469069486,
84
+ "enabled": true,
85
+ "type": "STANDARD",
86
+ "include_subdirectories": false,
87
+ "image_variations": 1,
88
+ "text_variations": 1,
89
+ "balancing": 1.0,
90
+ "balancing_strategy": "REPEATS",
91
+ "loss_weight": 1.0,
92
+ "concept_stats": {}
93
+ }
94
+ ],
95
+ "aspect_ratio_bucketing": true,
96
+ "latent_caching": true,
97
+ "clear_cache_before_training": true,
98
+ "learning_rate_scheduler": "CONSTANT",
99
+ "custom_learning_rate_scheduler": null,
100
+ "scheduler_params": [],
101
+ "learning_rate": 0.0003,
102
+ "learning_rate_warmup_steps": 200.0,
103
+ "learning_rate_cycles": 1.0,
104
+ "learning_rate_min_factor": 0.0,
105
+ "epochs": 100,
106
+ "batch_size": 2,
107
+ "gradient_accumulation_steps": 1,
108
+ "ema": "OFF",
109
+ "ema_decay": 0.999,
110
+ "ema_update_step_interval": 5,
111
+ "dataloader_threads": 1,
112
+ "train_device": "cuda",
113
+ "temp_device": "cpu",
114
+ "train_dtype": "BFLOAT_16",
115
+ "fallback_train_dtype": "BFLOAT_16",
116
+ "enable_autocast_cache": true,
117
+ "only_cache": false,
118
+ "resolution": "512",
119
+ "frames": "25",
120
+ "mse_strength": 1.0,
121
+ "mae_strength": 0.0,
122
+ "log_cosh_strength": 0.0,
123
+ "huber_strength": 0.0,
124
+ "huber_delta": 1.0,
125
+ "vb_loss_strength": 1.0,
126
+ "loss_weight_fn": "CONSTANT",
127
+ "loss_weight_strength": 5.0,
128
+ "dropout_probability": 0.0,
129
+ "loss_scaler": "NONE",
130
+ "learning_rate_scaler": "NONE",
131
+ "clip_grad_norm": 1.0,
132
+ "offset_noise_weight": 0.0,
133
+ "generalized_offset_noise": false,
134
+ "perturbation_noise_weight": 0.0,
135
+ "rescale_noise_scheduler_to_zero_terminal_snr": false,
136
+ "force_v_prediction": false,
137
+ "force_epsilon_prediction": false,
138
+ "min_noising_strength": 0.0,
139
+ "max_noising_strength": 1.0,
140
+ "timestep_distribution": "LOGIT_NORMAL",
141
+ "noising_weight": 0.0,
142
+ "noising_bias": 0.0,
143
+ "timestep_shift": 1.0,
144
+ "dynamic_timestep_shifting": false,
145
+ "unet": {
146
+ "__version": 0,
147
+ "model_name": "",
148
+ "include": true,
149
+ "train": true,
150
+ "stop_training_after": 0,
151
+ "stop_training_after_unit": "NEVER",
152
+ "learning_rate": null,
153
+ "weight_dtype": "FLOAT_32",
154
+ "dropout_probability": 0.0,
155
+ "train_embedding": true,
156
+ "attention_mask": false,
157
+ "guidance_scale": 1.0
158
+ },
159
+ "prior": {
160
+ "__version": 0,
161
+ "model_name": "",
162
+ "include": true,
163
+ "train": true,
164
+ "stop_training_after": 0,
165
+ "stop_training_after_unit": "NEVER",
166
+ "learning_rate": null,
167
+ "weight_dtype": "FLOAT_32",
168
+ "dropout_probability": 0.0,
169
+ "train_embedding": true,
170
+ "attention_mask": false,
171
+ "guidance_scale": 1.0
172
+ },
173
+ "transformer": {
174
+ "__version": 0,
175
+ "model_name": "https://huggingface.co/ostris/Z-Image-De-Turbo/blob/main/z_image_de_turbo_v1_bf16.safetensors",
176
+ "include": true,
177
+ "train": true,
178
+ "stop_training_after": 0,
179
+ "stop_training_after_unit": "NEVER",
180
+ "learning_rate": null,
181
+ "weight_dtype": "INT_W8A8",
182
+ "dropout_probability": 0.0,
183
+ "train_embedding": true,
184
+ "attention_mask": false,
185
+ "guidance_scale": 1.0
186
+ },
187
+ "quantization": {
188
+ "__version": 0,
189
+ "layer_filter": "layers",
190
+ "layer_filter_preset": "blocks",
191
+ "layer_filter_regex": false,
192
+ "svd_dtype": "NONE",
193
+ "svd_rank": 16,
194
+ "cache_dir": null
195
+ },
196
+ "text_encoder": {
197
+ "__version": 0,
198
+ "model_name": "",
199
+ "include": true,
200
+ "train": false,
201
+ "stop_training_after": 30,
202
+ "stop_training_after_unit": "EPOCH",
203
+ "learning_rate": null,
204
+ "weight_dtype": "FLOAT_8",
205
+ "dropout_probability": 0.0,
206
+ "train_embedding": true,
207
+ "attention_mask": false,
208
+ "guidance_scale": 1.0
209
+ },
210
+ "text_encoder_layer_skip": 0,
211
+ "text_encoder_sequence_length": 512,
212
+ "text_encoder_2": {
213
+ "__version": 0,
214
+ "model_name": "",
215
+ "include": true,
216
+ "train": true,
217
+ "stop_training_after": 30,
218
+ "stop_training_after_unit": "EPOCH",
219
+ "learning_rate": null,
220
+ "weight_dtype": "FLOAT_32",
221
+ "dropout_probability": 0.0,
222
+ "train_embedding": true,
223
+ "attention_mask": false,
224
+ "guidance_scale": 1.0
225
+ },
226
+ "text_encoder_2_layer_skip": 0,
227
+ "text_encoder_2_sequence_length": 77,
228
+ "text_encoder_3": {
229
+ "__version": 0,
230
+ "model_name": "",
231
+ "include": true,
232
+ "train": true,
233
+ "stop_training_after": 30,
234
+ "stop_training_after_unit": "EPOCH",
235
+ "learning_rate": null,
236
+ "weight_dtype": "FLOAT_32",
237
+ "dropout_probability": 0.0,
238
+ "train_embedding": true,
239
+ "attention_mask": false,
240
+ "guidance_scale": 1.0
241
+ },
242
+ "text_encoder_3_layer_skip": 0,
243
+ "text_encoder_4": {
244
+ "__version": 0,
245
+ "model_name": "",
246
+ "include": true,
247
+ "train": true,
248
+ "stop_training_after": 30,
249
+ "stop_training_after_unit": "EPOCH",
250
+ "learning_rate": null,
251
+ "weight_dtype": "FLOAT_32",
252
+ "dropout_probability": 0.0,
253
+ "train_embedding": true,
254
+ "attention_mask": false,
255
+ "guidance_scale": 1.0
256
+ },
257
+ "text_encoder_4_layer_skip": 0,
258
+ "vae": {
259
+ "__version": 0,
260
+ "model_name": "",
261
+ "include": true,
262
+ "train": true,
263
+ "stop_training_after": null,
264
+ "stop_training_after_unit": "NEVER",
265
+ "learning_rate": null,
266
+ "weight_dtype": "FLOAT_32",
267
+ "dropout_probability": 0.0,
268
+ "train_embedding": true,
269
+ "attention_mask": false,
270
+ "guidance_scale": 1.0
271
+ },
272
+ "effnet_encoder": {
273
+ "__version": 0,
274
+ "model_name": "",
275
+ "include": true,
276
+ "train": true,
277
+ "stop_training_after": null,
278
+ "stop_training_after_unit": "NEVER",
279
+ "learning_rate": null,
280
+ "weight_dtype": "FLOAT_32",
281
+ "dropout_probability": 0.0,
282
+ "train_embedding": true,
283
+ "attention_mask": false,
284
+ "guidance_scale": 1.0
285
+ },
286
+ "decoder": {
287
+ "__version": 0,
288
+ "model_name": "",
289
+ "include": true,
290
+ "train": true,
291
+ "stop_training_after": null,
292
+ "stop_training_after_unit": "NEVER",
293
+ "learning_rate": null,
294
+ "weight_dtype": "FLOAT_32",
295
+ "dropout_probability": 0.0,
296
+ "train_embedding": true,
297
+ "attention_mask": false,
298
+ "guidance_scale": 1.0
299
+ },
300
+ "decoder_text_encoder": {
301
+ "__version": 0,
302
+ "model_name": "",
303
+ "include": true,
304
+ "train": true,
305
+ "stop_training_after": null,
306
+ "stop_training_after_unit": "NEVER",
307
+ "learning_rate": null,
308
+ "weight_dtype": "FLOAT_32",
309
+ "dropout_probability": 0.0,
310
+ "train_embedding": true,
311
+ "attention_mask": false,
312
+ "guidance_scale": 1.0
313
+ },
314
+ "decoder_vqgan": {
315
+ "__version": 0,
316
+ "model_name": "",
317
+ "include": true,
318
+ "train": true,
319
+ "stop_training_after": null,
320
+ "stop_training_after_unit": "NEVER",
321
+ "learning_rate": null,
322
+ "weight_dtype": "FLOAT_32",
323
+ "dropout_probability": 0.0,
324
+ "train_embedding": true,
325
+ "attention_mask": false,
326
+ "guidance_scale": 1.0
327
+ },
328
+ "masked_training": false,
329
+ "unmasked_probability": 0.1,
330
+ "unmasked_weight": 0.1,
331
+ "normalize_masked_area_loss": false,
332
+ "masked_prior_preservation_weight": 0.0,
333
+ "custom_conditioning_image": false,
334
+ "layer_filter": "^(?=.*attention)(?!.*refiner).*,^(?=.*feed_forward)(?!.*refiner).*",
335
+ "layer_filter_preset": "attn-mlp",
336
+ "layer_filter_regex": true,
337
+ "embedding_learning_rate": null,
338
+ "preserve_embedding_norm": false,
339
+ "embedding": {
340
+ "__version": 0,
341
+ "uuid": "2a318115-2b61-473b-8ddd-5f7a2cc5cb2b",
342
+ "model_name": "",
343
+ "placeholder": "<embedding>",
344
+ "train": true,
345
+ "stop_training_after": null,
346
+ "stop_training_after_unit": "NEVER",
347
+ "token_count": 1,
348
+ "initial_embedding_text": "*",
349
+ "is_output_embedding": false
350
+ },
351
+ "additional_embeddings": [],
352
+ "embedding_weight_dtype": "FLOAT_32",
353
+ "cloud": {
354
+ "__version": 0,
355
+ "enabled": false,
356
+ "type": "RUNPOD",
357
+ "file_sync": "NATIVE_SCP",
358
+ "create": true,
359
+ "name": "OneTrainer",
360
+ "tensorboard_tunnel": true,
361
+ "sub_type": "",
362
+ "gpu_type": "",
363
+ "volume_size": 100,
364
+ "min_download": 0,
365
+ "remote_dir": "/workspace",
366
+ "huggingface_cache_dir": "/workspace/huggingface_cache",
367
+ "onetrainer_dir": "/workspace/OneTrainer",
368
+ "install_cmd": "git clone https://github.com/Nerogar/OneTrainer",
369
+ "install_onetrainer": true,
370
+ "update_onetrainer": true,
371
+ "detach_trainer": false,
372
+ "run_id": "job1",
373
+ "download_samples": true,
374
+ "download_output_model": true,
375
+ "download_saves": true,
376
+ "download_backups": false,
377
+ "download_tensorboard": false,
378
+ "delete_workspace": false,
379
+ "on_finish": "NONE",
380
+ "on_error": "NONE",
381
+ "on_detached_finish": "NONE",
382
+ "on_detached_error": "NONE"
383
+ },
384
+ "peft_type": "LORA",
385
+ "lora_model_name": "",
386
+ "lora_rank": 16,
387
+ "lora_alpha": 1.0,
388
+ "lora_decompose": false,
389
+ "lora_decompose_norm_epsilon": true,
390
+ "lora_decompose_output_axis": false,
391
+ "lora_weight_dtype": "FLOAT_32",
392
+ "bundle_additional_embeddings": true,
393
+ "oft_block_size": 32,
394
+ "oft_coft": false,
395
+ "coft_eps": 0.0001,
396
+ "oft_block_share": false,
397
+ "optimizer": {
398
+ "__version": 0,
399
+ "optimizer": "ADAMW",
400
+ "adam_w_mode": false,
401
+ "alpha": null,
402
+ "amsgrad": false,
403
+ "beta1": 0.9,
404
+ "beta2": 0.999,
405
+ "beta3": null,
406
+ "bias_correction": false,
407
+ "block_wise": false,
408
+ "capturable": false,
409
+ "centered": false,
410
+ "clip_threshold": null,
411
+ "d0": null,
412
+ "d_coef": null,
413
+ "dampening": null,
414
+ "decay_rate": null,
415
+ "decouple": false,
416
+ "differentiable": false,
417
+ "eps": 1e-08,
418
+ "eps2": null,
419
+ "foreach": false,
420
+ "fsdp_in_use": false,
421
+ "fused": true,
422
+ "fused_back_pass": false,
423
+ "growth_rate": null,
424
+ "initial_accumulator_value": null,
425
+ "initial_accumulator": null,
426
+ "is_paged": false,
427
+ "log_every": null,
428
+ "lr_decay": null,
429
+ "max_unorm": null,
430
+ "maximize": false,
431
+ "min_8bit_size": null,
432
+ "quant_block_size": null,
433
+ "momentum": null,
434
+ "nesterov": false,
435
+ "no_prox": false,
436
+ "optim_bits": null,
437
+ "percentile_clipping": null,
438
+ "r": null,
439
+ "relative_step": false,
440
+ "safeguard_warmup": false,
441
+ "scale_parameter": false,
442
+ "stochastic_rounding": false,
443
+ "use_bias_correction": false,
444
+ "use_triton": false,
445
+ "warmup_init": false,
446
+ "weight_decay": 0.01,
447
+ "weight_lr_power": null,
448
+ "decoupled_decay": false,
449
+ "fixed_decay": false,
450
+ "rectify": false,
451
+ "degenerated_to_sgd": false,
452
+ "k": null,
453
+ "xi": null,
454
+ "n_sma_threshold": null,
455
+ "ams_bound": false,
456
+ "adanorm": false,
457
+ "adam_debias": false,
458
+ "slice_p": null,
459
+ "cautious": false,
460
+ "weight_decay_by_lr": true,
461
+ "prodigy_steps": null,
462
+ "use_speed": false,
463
+ "split_groups": true,
464
+ "split_groups_mean": true,
465
+ "factored": true,
466
+ "factored_fp32": true,
467
+ "use_stableadamw": true,
468
+ "use_cautious": false,
469
+ "use_grams": false,
470
+ "use_adopt": false,
471
+ "d_limiter": true,
472
+ "use_schedulefree": true,
473
+ "use_orthograd": false,
474
+ "nnmf_factor": false,
475
+ "orthogonal_gradient": false,
476
+ "use_atan2": false,
477
+ "use_AdEMAMix": false,
478
+ "beta3_ema": null,
479
+ "alpha_grad": null,
480
+ "beta1_warmup": null,
481
+ "min_beta1": null,
482
+ "Simplified_AdEMAMix": false,
483
+ "cautious_mask": false,
484
+ "grams_moment": false,
485
+ "kourkoutas_beta": false,
486
+ "k_warmup_steps": null,
487
+ "schedulefree_c": null,
488
+ "ns_steps": null,
489
+ "MuonWithAuxAdam": false,
490
+ "muon_hidden_layers": null,
491
+ "muon_adam_regex": false,
492
+ "muon_adam_lr": null,
493
+ "muon_te1_adam_lr": null,
494
+ "muon_te2_adam_lr": null,
495
+ "muon_adam_config": null,
496
+ "rms_rescaling": true,
497
+ "normuon_variant": false,
498
+ "beta2_normuon": null,
499
+ "normuon_eps": null,
500
+ "low_rank_ortho": false,
501
+ "ortho_rank": null,
502
+ "accelerated_ns": false,
503
+ "cautious_wd": false,
504
+ "approx_mars": false,
505
+ "kappa_p": null,
506
+ "auto_kappa_p": false,
507
+ "compile": false
508
+ },
509
+ "optimizer_defaults": {},
510
+ "sample_definition_file_name": "training_samples/samples.json",
511
+ "samples": [],
512
+ "sample_after": 10,
513
+ "sample_after_unit": "MINUTE",
514
+ "sample_skip_first": 0,
515
+ "sample_image_format": "JPG",
516
+ "sample_video_format": "MP4",
517
+ "sample_audio_format": "MP3",
518
+ "samples_to_tensorboard": true,
519
+ "non_ema_sampling": true,
520
+ "backup_after": 30,
521
+ "backup_after_unit": "MINUTE",
522
+ "rolling_backup": true,
523
+ "rolling_backup_count": 2,
524
+ "backup_before_save": true,
525
+ "save_every": 0,
526
+ "save_every_unit": "NEVER",
527
+ "save_skip_first": 0,
528
+ "save_filename_prefix": ""
529
+ }