File size: 1,241 Bytes
62db92a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
output_dir = "/workspace/outputs/Complete"
dataset = "/workspace/configs/Complete/dataset.toml"
epochs = 10
micro_batch_size_per_gpu = 1
pipeline_stages = 1
gradient_accumulation_steps = 2
gradient_clipping = 1.0
warmup_steps = 0
eval_every_n_epochs = 1
eval_before_first_step = true
eval_micro_batch_size_per_gpu = 1
eval_gradient_accumulation_steps = 1
save_every_n_epochs = 1
checkpoint_every_n_minutes = 120
activation_checkpointing = true
partition_method = "parameters"
save_dtype = "bfloat16"
caching_batch_size = 4
steps_per_print = 1
video_clip_mode = "single_beginning"
[model]
type = "hunyuan-video"
dtype = "bfloat16"
transformer_path = "/workspace/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors"
transformer_dtype = "float8"
vae_path = "/workspace/models/hunyuan/hunyuan_video_vae_bf16.safetensors"
llm_path = "/workspace/models/hunyuan/llava-llama-3-8b-text-encoder-tokenizer"
clip_path = "/workspace/models/hunyuan/clip-vit-large-patch14"
timestep_sample_method = "logit_normal"
[adapter]
type = "lora"
rank = 16
dtype = "bfloat16"
[optimizer]
type = "adamw8bitKahan"
lr = 0.0002
weight_decay = 0.01
eps = 1e-08
betas = [0.9, 0.99]
[monitoring]
enable_wandb = false
log_dir = "/workspace/outputs/Complete"