panopstor commited on
Commit
d7bc7a5
·
verified ·
1 Parent(s): 828aba0

Upload 3 files

Browse files
events.out.tfevents.1740782093.panopstor-nv6k-001.2603219.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1dd25b1b34ab98b6b372044d64861f824b07e0ef6da7b07d4d969a36b48cfa3
3
+ size 537131
hunyuan_video.toml ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/data/diffusion_pipe_training_runs/hunyuan_video_test'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 1000
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 1
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 4
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 2
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ type = 'hunyuan-video'
65
+ # Can load Hunyuan Video entirely from the ckpt path set up for the official inference scripts.
66
+ #ckpt_path = '/home/anon/HunyuanVideo/ckpts'
67
+ # Or you can load it by pointing to all the ComfyUI files.
68
+ transformer_path = '/data2/imagegen_models/hunyuan_video_comfyui/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
69
+ vae_path = '/data2/imagegen_models/hunyuan_video_comfyui/hunyuan_video_vae_bf16.safetensors'
70
+ llm_path = '/data2/imagegen_models/hunyuan_video_comfyui/llava-llama-3-8b-text-encoder-tokenizer'
71
+ clip_path = '/data2/imagegen_models/hunyuan_video_comfyui/clip-vit-large-patch14'
72
+ # Base dtype used for all models.
73
+ dtype = 'bfloat16'
74
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
75
+ transformer_dtype = 'float8'
76
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
77
+ timestep_sample_method = 'logit_normal'
78
+
79
+ # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
80
+ [adapter]
81
+ type = 'lora'
82
+ rank = 32
83
+ # Dtype for the LoRA weights you are training.
84
+ dtype = 'bfloat16'
85
+ # You can initialize the lora weights from a previously trained lora.
86
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
87
+
88
+ [optimizer]
89
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
90
+ # Look at train.py for other options. You could also easily edit the file and add your own.
91
+ type = 'adamw_optimi'
92
+ lr = 2e-5
93
+ betas = [0.9, 0.99]
94
+ weight_decay = 0.01
95
+ eps = 1e-8
input.toml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Resolutions to train on, given as the side length of a square image. You can have multiple sizes here.
2
+ # !!!WARNING!!!: this might work differently to how you think it does. Images are first grouped to aspect ratio
3
+ # buckets, then each image is resized to ALL of the areas specified by the resolutions list. This is a way to do
4
+ # multi-resolution training, i.e. training on multiple total pixel areas at once. Your dataset is effectively duplicated
5
+ # as many times as the length of this list.
6
+ # If you just want to use predetermined (width, height, frames) size buckets, see the example cosmos_dataset.toml
7
+ # file for how you can do that.
8
+ resolutions = [512]
9
+
10
+ # You can give resolutions as (width, height) pairs also. This doesn't do anything different, it's just
11
+ # another way of specifying the area(s) (i.e. total number of pixels) you want to train on.
12
+ # resolutions = [[1280, 720]]
13
+
14
+ # Enable aspect ratio bucketing. For the different AR buckets, the final size will be such that
15
+ # the areas match the resolutions you configured above.
16
+ enable_ar_bucket = true
17
+
18
+ # The aspect ratio and frame bucket settings may be specified for each [[directory]] entry as well.
19
+ # Directory-level settings will override top-level settings.
20
+
21
+ # Min and max aspect ratios, given as width/height ratio.
22
+ min_ar = 0.5
23
+ max_ar = 2.0
24
+ # Total number of aspect ratio buckets, evenly spaced (in log space) between min_ar and max_ar.
25
+ num_ar_buckets = 7
26
+
27
+ # Can manually specify ar_buckets instead of using the range-style config above.
28
+ # Each entry can be width/height ratio, or (width, height) pair. But you can't mix them, because of TOML.
29
+ # ar_buckets = [[512, 512], [448, 576]]
30
+ # ar_buckets = [1.0, 1.5]
31
+
32
+ # For video training, you need to configure frame buckets (similar to aspect ratio buckets). There will always
33
+ # be a frame bucket of 1 for images. Videos will be assigned to the first frame bucket that the video is greater than or equal to in length.
34
+ # But videos are never assigned to the image frame bucket (1); if the video is very short it would just be dropped.
35
+ frame_buckets = [1, 33]
36
+ # If you have >24GB VRAM, or multiple GPUs and use pipeline parallelism, or lower the spatial resolution, you could maybe train with longer frame buckets
37
+ # frame_buckets = [1, 33, 65, 97]
38
+
39
+
40
+ [[directory]]
41
+ # Path to directory of images/videos, and corresponding caption files. The caption files should match the media file name, but with a .txt extension.
42
+ # A missing caption file will log a warning, but then just train using an empty caption.
43
+ path = 'input'
44
+ # How many repeats for 1 epoch. The dataset will act like it is duplicated this many times.
45
+ # The semantics of this are the same as sd-scripts: num_repeats=1 means one epoch is a single pass over all examples (no duplication).
46
+ num_repeats = 10
47
+ # Example of overriding some settings, and using ar_buckets to directly specify ARs.
48
+ # ar_buckets = [[448, 576]]
49
+ # resolutions = [[448, 576]]
50
+ # frame_buckets = [1]
51
+
52
+
53
+ # You can list multiple directories.
54
+
55
+ # [[directory]]
56
+ # path = '/home/anon/data/images/something_else'
57
+ # num_repeats = 5