panopstor commited on
Commit
2321cc1
·
verified ·
1 Parent(s): 00914ff

Delete hunyuan_video.toml

Browse files
Files changed (1) hide show
  1. hunyuan_video.toml +0 -95
hunyuan_video.toml DELETED
@@ -1,95 +0,0 @@
1
- # Output path for training runs. Each training run makes a new directory in here.
2
- output_dir = '/data/diffusion_pipe_training_runs/hunyuan_video_test'
3
-
4
- # Dataset config file.
5
- dataset = 'examples/dataset.toml'
6
- # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
- # eval_datasets = [
8
- # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
- # ]
10
-
11
- # training settings
12
-
13
- # I usually set this to a really high value because I don't know how long I want to train.
14
- epochs = 1000
15
- # Batch size of a single forward/backward pass for one GPU.
16
- micro_batch_size_per_gpu = 1
17
- # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
- pipeline_stages = 1
19
- # Number of micro-batches sent through the pipeline for each training step.
20
- # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
- gradient_accumulation_steps = 4
22
- # Grad norm clipping.
23
- gradient_clipping = 1.0
24
- # Learning rate warmup.
25
- warmup_steps = 100
26
-
27
- # eval settings
28
-
29
- eval_every_n_epochs = 1
30
- eval_before_first_step = true
31
- # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
- # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
- # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
- eval_micro_batch_size_per_gpu = 1
35
- eval_gradient_accumulation_steps = 1
36
-
37
- # misc settings
38
-
39
- # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
- save_every_n_epochs = 2
41
- # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
- #checkpoint_every_n_epochs = 1
43
- checkpoint_every_n_minutes = 120
44
- # Always set to true unless you have a huge amount of VRAM.
45
- activation_checkpointing = true
46
- # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
- partition_method = 'parameters'
48
- # dtype for saving the LoRA or model, if different from training dtype
49
- save_dtype = 'bfloat16'
50
- # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
- caching_batch_size = 2
52
- # How often deepspeed logs to console.
53
- steps_per_print = 1
54
- # How to extract video clips for training from a single input video file.
55
- # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
- # number of frames for that bucket.
57
- # single_beginning: one clip starting at the beginning of the video
58
- # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
- # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
- # default is single_middle
61
- video_clip_mode = 'single_middle'
62
-
63
- [model]
64
- type = 'hunyuan-video'
65
- # Can load Hunyuan Video entirely from the ckpt path set up for the official inference scripts.
66
- #ckpt_path = '/home/anon/HunyuanVideo/ckpts'
67
- # Or you can load it by pointing to all the ComfyUI files.
68
- transformer_path = '/data2/imagegen_models/hunyuan_video_comfyui/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
69
- vae_path = '/data2/imagegen_models/hunyuan_video_comfyui/hunyuan_video_vae_bf16.safetensors'
70
- llm_path = '/data2/imagegen_models/hunyuan_video_comfyui/llava-llama-3-8b-text-encoder-tokenizer'
71
- clip_path = '/data2/imagegen_models/hunyuan_video_comfyui/clip-vit-large-patch14'
72
- # Base dtype used for all models.
73
- dtype = 'bfloat16'
74
- # Hunyuan Video supports fp8 for the transformer when training LoRA.
75
- transformer_dtype = 'float8'
76
- # How to sample timesteps to train on. Can be logit_normal or uniform.
77
- timestep_sample_method = 'logit_normal'
78
-
79
- # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
80
- [adapter]
81
- type = 'lora'
82
- rank = 32
83
- # Dtype for the LoRA weights you are training.
84
- dtype = 'bfloat16'
85
- # You can initialize the lora weights from a previously trained lora.
86
- #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
87
-
88
- [optimizer]
89
- # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
90
- # Look at train.py for other options. You could also easily edit the file and add your own.
91
- type = 'adamw_optimi'
92
- lr = 2e-5
93
- betas = [0.9, 0.99]
94
- weight_decay = 0.01
95
- eps = 1e-8