Shitao commited on
Commit
b3f0a26
·
verified ·
1 Parent(s): cc7caea

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. exp/final.yml +129 -0
  2. exp/pytorch_model_fsdp.bin +3 -0
exp/final.yml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: final
2
+
3
+ seed: 998244353
4
+ device_specific_seed: true
5
+ workder_specific_seed: true
6
+
7
+
8
+ data:
9
+ data_path: train_data/chenyuan/final.yml
10
+ image_size: 512
11
+ apply_chat_template: true
12
+ max_token_len: 4000
13
+ img_scale_num: 16
14
+ use_text_loss: false
15
+ interleave: true
16
+ use_input_image_for_vlm: true
17
+
18
+
19
+ model:
20
+ # pretrained_model_path: ~
21
+ pretrained_vae_model_name_or_path: /share_2/luoxin/modelscope/hub/models/FLUX.1-dev
22
+ pretrained_text_encoder_model_name_or_path: /share/shitao/models/Qwen2.5-VL-3B-Instruct
23
+ pretrained_diffusion_model_name_or_path: /share_2/luoxin/projects/Ominigenv2/experiments/0605_mix5/checkpoint-1000/pytorch_model_fsdp.bin
24
+ use_vae_input: false
25
+ use_all_condition: true
26
+ use_image_tokens: false
27
+ use_hw_tokens: false
28
+ use_only_text_token_from_mllm: true
29
+ use_only_prev_text_token_from_mllm: false
30
+
31
+ arch_type: Lumina2Transformer_separate_ref_process
32
+ arch_opt:
33
+ patch_size: 2
34
+ in_channels: 16
35
+ hidden_size: 2520
36
+ num_layers: 32
37
+ num_refiner_layers: 2
38
+ num_attention_heads: 21
39
+ num_kv_heads: 7
40
+ multiple_of: 256
41
+ norm_eps: !!float 1e-05
42
+ axes_dim_rope: [40, 40, 40]
43
+ axes_lens: [12000, 12000, 12000]
44
+ text_feat_dim: 2048
45
+ timestep_scale: !!float 1000
46
+ position_id_version: ominicontrol
47
+ separate_ref_image_patch_embed: true
48
+ separate_ref_image_refiner: true
49
+ use_fused_rms_norm: true
50
+ use_fused_swiglu: true
51
+ proj_feat_depth: 8
52
+ projector_dim: 2048
53
+ proj_feat_dim: 1024
54
+
55
+ transport:
56
+ snr_type: lognorm
57
+ do_shift: true
58
+ dynamic_time_shift: true
59
+
60
+ train:
61
+ # Dataloader
62
+ global_batch_size: 128
63
+ batch_size: 2
64
+ gradient_accumulation_steps: 1
65
+
66
+ num_train_epochs: 5
67
+
68
+ dataloader_num_workers: 6
69
+
70
+ # Optimizer
71
+ learning_rate: !!float 1e-6
72
+ mllm_lr: !!float 3e-7
73
+ mllm_projector_lr: !!float 1e-7
74
+ mllm_vision_lr: !!float 1e-7
75
+ mllm_image_token_lr: !!float 1e-4
76
+
77
+ scale_lr: false
78
+ lr_scheduler: timm_cosine
79
+ t_initial: 200000
80
+ lr_min: 0
81
+ cycle_decay: 0.5
82
+ warmup_t: 500
83
+ warmup_lr_init: 0
84
+ warmup_prefix: true
85
+ t_in_epochs: false
86
+
87
+ # resume_from_checkpoint:
88
+
89
+ use_8bit_adam: false
90
+ adam_beta1: 0.9
91
+ adam_beta2: 0.95
92
+ adam_weight_decay: !!float 0.01
93
+ adam_epsilon: !!float 1e-08
94
+ max_grad_norm: 1
95
+
96
+ gradient_checkpointing: true
97
+
98
+ set_grads_to_none: true
99
+
100
+ # Misc
101
+ allow_tf32: false
102
+ mixed_precision: 'bf16'
103
+
104
+ ema_decay: 0.0
105
+
106
+ repa_loss_weight: !!float 0.0
107
+ repa_encoder: dinov2_vitl14_reg
108
+ repa_batch_infer: true
109
+
110
+ text_loss_weight: !!float 1.0
111
+
112
+ train_diffusion: true
113
+ train_mllm: true
114
+ train_diffusion_context_refiner: true
115
+
116
+ val:
117
+ validation_steps: 200
118
+ train_visualization_steps: 200
119
+
120
+ logger:
121
+ log_with: [wandb, tensorboard]
122
+ # log_with: ~
123
+
124
+ checkpointing_steps: 1000
125
+ checkpoints_total_limit: ~
126
+
127
+ cache_dir:
128
+ resume_from_checkpoint: latest
129
+ previous_stage_ckpt:
exp/pytorch_model_fsdp.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50591eb71539cf8dca17b85e1fa43f5a0494aae155de1b1e2c5b63eb089a2b6
3
+ size 32211884086