panopstor commited on
Commit
00914ff
·
verified ·
1 Parent(s): d7bc7a5

Upload wan_video.toml

Browse files
Files changed (1) hide show
  1. wan_video.toml +91 -0
wan_video.toml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/mnt/lcl/nvme/diffusion-pipe/wan_video_test'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/input.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 100
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 1
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 4
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 2
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 2
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ type = 'wan'
65
+ # Clone https://huggingface.co/Wan-AI/Wan2.1-T2V-14B or https://huggingface.co/Wan-AI/Wan2.1-T2V-1.3B
66
+ ckpt_path = '/mnt/lcl/nvme/Wan2.1/Wan2.1-T2V-14B'
67
+
68
+ # Base dtype used for all models.
69
+ dtype = 'bfloat16'
70
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
71
+ transformer_dtype = 'bfloat16'
72
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
73
+ timestep_sample_method = 'logit_normal'
74
+
75
+ # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
76
+ [adapter]
77
+ type = 'lora'
78
+ rank = 32
79
+ # Dtype for the LoRA weights you are training.
80
+ dtype = 'bfloat16'
81
+ # You can initialize the lora weights from a previously trained lora.
82
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
83
+
84
+ [optimizer]
85
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
86
+ # Look at train.py for other options. You could also easily edit the file and add your own.
87
+ type = 'adamw8bit'
88
+ lr = 2e-5
89
+ betas = [0.9, 0.99]
90
+ weight_decay = 0.01
91
+ eps = 1e-8