svjack commited on
Commit
a8bb393
·
verified ·
1 Parent(s): b697243

Upload folder using huggingface_hub

Browse files
Files changed (35) hide show
  1. .job_config.json +152 -0
  2. Xiang_wan_2_2_lora_000000250_high_noise.safetensors +3 -0
  3. Xiang_wan_2_2_lora_000000250_low_noise.safetensors +3 -0
  4. Xiang_wan_2_2_lora_000000500_high_noise.safetensors +3 -0
  5. Xiang_wan_2_2_lora_000000500_low_noise.safetensors +3 -0
  6. Xiang_wan_2_2_lora_000000750_high_noise.safetensors +3 -0
  7. Xiang_wan_2_2_lora_000000750_low_noise.safetensors +3 -0
  8. Xiang_wan_2_2_lora_000001000_high_noise.safetensors +3 -0
  9. Xiang_wan_2_2_lora_000001000_low_noise.safetensors +3 -0
  10. Xiang_wan_2_2_lora_000001250_high_noise.safetensors +3 -0
  11. Xiang_wan_2_2_lora_000001250_low_noise.safetensors +3 -0
  12. Xiang_wan_2_2_lora_000001500_high_noise.safetensors +3 -0
  13. Xiang_wan_2_2_lora_000001500_low_noise.safetensors +3 -0
  14. Xiang_wan_2_2_lora_000001750_high_noise.safetensors +3 -0
  15. Xiang_wan_2_2_lora_000001750_low_noise.safetensors +3 -0
  16. Xiang_wan_2_2_lora_000002000_high_noise.safetensors +3 -0
  17. Xiang_wan_2_2_lora_000002000_low_noise.safetensors +3 -0
  18. Xiang_wan_2_2_lora_000002250_high_noise.safetensors +3 -0
  19. Xiang_wan_2_2_lora_000002250_low_noise.safetensors +3 -0
  20. Xiang_wan_2_2_lora_000002500_high_noise.safetensors +3 -0
  21. Xiang_wan_2_2_lora_000002500_low_noise.safetensors +3 -0
  22. Xiang_wan_2_2_lora_000002750_high_noise.safetensors +3 -0
  23. Xiang_wan_2_2_lora_000002750_low_noise.safetensors +3 -0
  24. Xiang_wan_2_2_lora_000003000_high_noise.safetensors +3 -0
  25. Xiang_wan_2_2_lora_000003000_low_noise.safetensors +3 -0
  26. Xiang_wan_2_2_lora_000003250_high_noise.safetensors +3 -0
  27. Xiang_wan_2_2_lora_000003250_low_noise.safetensors +3 -0
  28. Xiang_wan_2_2_lora_000003500_high_noise.safetensors +3 -0
  29. Xiang_wan_2_2_lora_000003500_low_noise.safetensors +3 -0
  30. Xiang_wan_2_2_lora_000003750_high_noise.safetensors +3 -0
  31. Xiang_wan_2_2_lora_000003750_low_noise.safetensors +3 -0
  32. config.yaml +117 -0
  33. log.txt +0 -0
  34. logs/0_log.txt +103 -0
  35. optimizer.pt +3 -0
.job_config.json ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "job": "extension",
3
+ "config": {
4
+ "name": "Xiang_wan_2_2_lora",
5
+ "process": [
6
+ {
7
+ "type": "ui_trainer",
8
+ "training_folder": "/home/featurize/ai-toolkit/output",
9
+ "sqlite_db_path": "/home/featurize/ai-toolkit/aitk_db.db",
10
+ "device": "cuda",
11
+ "trigger_word": null,
12
+ "performance_log_every": 10,
13
+ "network": {
14
+ "type": "lora",
15
+ "linear": 32,
16
+ "linear_alpha": 32,
17
+ "conv": 16,
18
+ "conv_alpha": 16,
19
+ "lokr_full_rank": true,
20
+ "lokr_factor": -1,
21
+ "network_kwargs": {
22
+ "ignore_if_contains": []
23
+ }
24
+ },
25
+ "save": {
26
+ "dtype": "bf16",
27
+ "save_every": 250,
28
+ "max_step_saves_to_keep": 4000,
29
+ "save_format": "diffusers",
30
+ "push_to_hub": false
31
+ },
32
+ "datasets": [
33
+ {
34
+ "folder_path": "/home/featurize/ai-toolkit/datasets/Xiang_Handsome_Flux_SRPO_Keye_Captioned",
35
+ "control_path": null,
36
+ "mask_path": null,
37
+ "mask_min_value": 0.1,
38
+ "default_caption": "",
39
+ "caption_ext": "txt",
40
+ "caption_dropout_rate": 0.05,
41
+ "cache_latents_to_disk": false,
42
+ "is_reg": false,
43
+ "network_weight": 1,
44
+ "resolution": [
45
+ 512,
46
+ 768
47
+ ],
48
+ "controls": [],
49
+ "shrink_video_to_frames": true,
50
+ "num_frames": 1,
51
+ "do_i2v": true,
52
+ "flip_x": false,
53
+ "flip_y": false
54
+ }
55
+ ],
56
+ "train": {
57
+ "batch_size": 1,
58
+ "bypass_guidance_embedding": false,
59
+ "steps": 30000,
60
+ "gradient_accumulation": 1,
61
+ "train_unet": true,
62
+ "train_text_encoder": false,
63
+ "gradient_checkpointing": true,
64
+ "noise_scheduler": "flowmatch",
65
+ "optimizer": "adamw8bit",
66
+ "timestep_type": "linear",
67
+ "content_or_style": "balanced",
68
+ "optimizer_params": {
69
+ "weight_decay": 0.0001
70
+ },
71
+ "unload_text_encoder": false,
72
+ "cache_text_embeddings": true,
73
+ "lr": 0.0001,
74
+ "ema_config": {
75
+ "use_ema": false,
76
+ "ema_decay": 0.99
77
+ },
78
+ "skip_first_sample": true,
79
+ "force_first_sample": false,
80
+ "disable_sampling": true,
81
+ "dtype": "bf16",
82
+ "diff_output_preservation": false,
83
+ "diff_output_preservation_multiplier": 1,
84
+ "diff_output_preservation_class": "person",
85
+ "switch_boundary_every": 1
86
+ },
87
+ "model": {
88
+ "name_or_path": "/home/featurize/ai-toolkit/Wan2.2-T2V-A14B-Diffusers-bf16",
89
+ "quantize": true,
90
+ "qtype": "uint4|ostris/accuracy_recovery_adapters/wan22_14b_t2i_torchao_uint4.safetensors",
91
+ "quantize_te": true,
92
+ "qtype_te": "qfloat8",
93
+ "arch": "wan22_14b:t2v",
94
+ "low_vram": true,
95
+ "model_kwargs": {
96
+ "train_high_noise": true,
97
+ "train_low_noise": true
98
+ }
99
+ },
100
+ "sample": {
101
+ "sampler": "flowmatch",
102
+ "sample_every": 250,
103
+ "width": 1024,
104
+ "height": 1024,
105
+ "samples": [
106
+ {
107
+ "prompt": "woman with red hair, playing chess at the park, bomb going off in the background"
108
+ },
109
+ {
110
+ "prompt": "a woman holding a coffee cup, in a beanie, sitting at a cafe"
111
+ },
112
+ {
113
+ "prompt": "a horse is a DJ at a night club, fish eye lens, smoke machine, lazer lights, holding a martini"
114
+ },
115
+ {
116
+ "prompt": "a man showing off his cool new t shirt at the beach, a shark is jumping out of the water in the background"
117
+ },
118
+ {
119
+ "prompt": "a bear building a log cabin in the snow covered mountains"
120
+ },
121
+ {
122
+ "prompt": "woman playing the guitar, on stage, singing a song, laser lights, punk rocker"
123
+ },
124
+ {
125
+ "prompt": "hipster man with a beard, building a chair, in a wood shop"
126
+ },
127
+ {
128
+ "prompt": "photo of a man, white background, medium shot, modeling clothing, studio lighting, white backdrop"
129
+ },
130
+ {
131
+ "prompt": "a man holding a sign that says, 'this is a sign'"
132
+ },
133
+ {
134
+ "prompt": "a bulldog, in a post apocalyptic world, with a shotgun, in a leather jacket, in a desert, with a motorcycle"
135
+ }
136
+ ],
137
+ "neg": "",
138
+ "seed": 42,
139
+ "walk_seed": true,
140
+ "guidance_scale": 4,
141
+ "sample_steps": 25,
142
+ "num_frames": 41,
143
+ "fps": 16
144
+ }
145
+ }
146
+ ]
147
+ },
148
+ "meta": {
149
+ "name": "[name]",
150
+ "version": "1.0"
151
+ }
152
+ }
Xiang_wan_2_2_lora_000000250_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de9fbe1802c300b04ef3a6c8ecfce26bcb5e267e15717bcdd84b0c018b23dcc1
3
+ size 306808376
Xiang_wan_2_2_lora_000000250_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b013a468d95a58ca7865248d25ec36b33d5c827484216da6ccbf1a96a68ae39
3
+ size 306808376
Xiang_wan_2_2_lora_000000500_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc456c37204c3b36b1633cdb1c8701aefb879241086e658b55226bbbf918eb3
3
+ size 306808376
Xiang_wan_2_2_lora_000000500_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daded9ed7d3f44ddabfda621045f2bf8752ab7c5bd66fb6be90f8faf106e232d
3
+ size 306808376
Xiang_wan_2_2_lora_000000750_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc0b2f106f1982941a78245c263d6390426722185ec31109a333de0b5b388b3
3
+ size 306808376
Xiang_wan_2_2_lora_000000750_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8616262843f4b5f7fe3897ac8aabbedffaf8290c80aaf648ada9bf5b54367631
3
+ size 306808376
Xiang_wan_2_2_lora_000001000_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c0b139cdaf864e3400fefbd732dd294ec156038c86d9b1410ef524dfa5f23c4
3
+ size 306808376
Xiang_wan_2_2_lora_000001000_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15127f928c8b773b8ea1d6c89997aeb1ad4a31d9106d4120433246ccb9e03499
3
+ size 306808376
Xiang_wan_2_2_lora_000001250_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793b809be5c44dabbe1115db8c34dd7cdace07f202263d6edaed54bdb25c9c21
3
+ size 306808376
Xiang_wan_2_2_lora_000001250_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb3fd5dcc3ce08d8420959c8e47d0dd226fb7ff96aa5764fdc4e1870f62f5802
3
+ size 306808376
Xiang_wan_2_2_lora_000001500_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5197a9dd0fc9b50f8360894147167162acc871bc66f1ea9503971dd72f219513
3
+ size 306808376
Xiang_wan_2_2_lora_000001500_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e148a2047c488ae24fc83f300f018e81f5f4b967cadb3e20ad2fbd7e6d4af324
3
+ size 306808376
Xiang_wan_2_2_lora_000001750_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3fa4aa2b448bdf0aa4d92e1ff632d705695b2c09903a2a77dc8829c48316aca
3
+ size 306808376
Xiang_wan_2_2_lora_000001750_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d5f23edbc782984fd5d82d81931a10ce50b67e6c51a9c78f013ac09c67a03b9
3
+ size 306808376
Xiang_wan_2_2_lora_000002000_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd46313e292d833c31c5d8f92e2dc34d64ede68eaa44bef5f1a46eb0fe38005b
3
+ size 306808376
Xiang_wan_2_2_lora_000002000_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f1835761a4c2796093a0d32f78b2e6269472a9d58708ec37c0ee11088cd415a
3
+ size 306808376
Xiang_wan_2_2_lora_000002250_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3edfaedcd963ea3d43eb91e0ea02a3383319b44b27c6170408dacae4f66c2bfb
3
+ size 306808376
Xiang_wan_2_2_lora_000002250_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:976001a470a1d8f4452118e24cb349954d2ab018e5ac6b72c97271b1f1287a44
3
+ size 306808376
Xiang_wan_2_2_lora_000002500_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e8566f8b1adab4d3ba876494961fba744ecd4beece065d5db7e8454b1797fbb
3
+ size 306808376
Xiang_wan_2_2_lora_000002500_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:152c2f291a3e9c4cd2fee05c12593094a4d967d02c4bc86f70c64516de71d7b8
3
+ size 306808376
Xiang_wan_2_2_lora_000002750_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b21a51795b81d9f8bc0df9a72f2e3cf1fb68f8b755d2796f747c0e552c43b36
3
+ size 306808376
Xiang_wan_2_2_lora_000002750_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e1e73612ef56af5782fb6efb08a9bddc3d3cd1692e40966aec76b56d38fd09a
3
+ size 306808376
Xiang_wan_2_2_lora_000003000_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d780cd0107b1b3ddf07babe1c5749a14bac8929ba93b95fb02c3295d14657b9e
3
+ size 306808376
Xiang_wan_2_2_lora_000003000_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69af0fce3f269d17b9cf6ee1a58cf823cb3c6a77378a7ceed13de70b45d598ee
3
+ size 306808376
Xiang_wan_2_2_lora_000003250_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e85f9e5c4c814ac98b8622f0b2ff0bc5e3e70901446cb4a7461498914f7d02f4
3
+ size 306808376
Xiang_wan_2_2_lora_000003250_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e34f1b7c3ad62ce939378131b3e50c4e63159b6a106ccb8bfe7666612a56f320
3
+ size 306808376
Xiang_wan_2_2_lora_000003500_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0209d0d9aa302a64b476ab73d0f4bdc8ddad28f87849123bba6253cc42d8cce
3
+ size 306808376
Xiang_wan_2_2_lora_000003500_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49aa0d2392cc88dec9b983e6b19569a62abbcfd92c487e4f82bfd91874ab1765
3
+ size 306808376
Xiang_wan_2_2_lora_000003750_high_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13932d97075265fd4c042c361a882d7589f331cde7528b071c0f48f04dd6dd5b
3
+ size 306808376
Xiang_wan_2_2_lora_000003750_low_noise.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7cefa112c15845f0bbb6be7309e3912a31d9cb124b411874e23a44dd4f0e404
3
+ size 306808376
config.yaml ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ job: extension
2
+ config:
3
+ name: Xiang_wan_2_2_lora
4
+ process:
5
+ - type: ui_trainer
6
+ training_folder: /home/featurize/ai-toolkit/output
7
+ sqlite_db_path: /home/featurize/ai-toolkit/aitk_db.db
8
+ device: cuda
9
+ trigger_word: null
10
+ performance_log_every: 10
11
+ network:
12
+ type: lora
13
+ linear: 32
14
+ linear_alpha: 32
15
+ conv: 16
16
+ conv_alpha: 16
17
+ lokr_full_rank: true
18
+ lokr_factor: -1
19
+ network_kwargs:
20
+ ignore_if_contains: []
21
+ save:
22
+ dtype: bf16
23
+ save_every: 250
24
+ max_step_saves_to_keep: 4000
25
+ save_format: diffusers
26
+ push_to_hub: false
27
+ datasets:
28
+ - folder_path: /home/featurize/ai-toolkit/datasets/Xiang_Handsome_Flux_SRPO_Keye_Captioned
29
+ control_path: null
30
+ mask_path: null
31
+ mask_min_value: 0.1
32
+ default_caption: ''
33
+ caption_ext: txt
34
+ caption_dropout_rate: 0.05
35
+ cache_latents_to_disk: false
36
+ is_reg: false
37
+ network_weight: 1
38
+ resolution:
39
+ - 512
40
+ - 768
41
+ controls: []
42
+ shrink_video_to_frames: true
43
+ num_frames: 1
44
+ do_i2v: true
45
+ flip_x: false
46
+ flip_y: false
47
+ train:
48
+ batch_size: 1
49
+ bypass_guidance_embedding: false
50
+ steps: 30000
51
+ gradient_accumulation: 1
52
+ train_unet: true
53
+ train_text_encoder: false
54
+ gradient_checkpointing: true
55
+ noise_scheduler: flowmatch
56
+ optimizer: adamw8bit
57
+ timestep_type: linear
58
+ content_or_style: balanced
59
+ optimizer_params:
60
+ weight_decay: 0.0001
61
+ unload_text_encoder: false
62
+ cache_text_embeddings: true
63
+ lr: 0.0001
64
+ ema_config:
65
+ use_ema: false
66
+ ema_decay: 0.99
67
+ skip_first_sample: true
68
+ force_first_sample: false
69
+ disable_sampling: true
70
+ dtype: bf16
71
+ diff_output_preservation: false
72
+ diff_output_preservation_multiplier: 1
73
+ diff_output_preservation_class: person
74
+ switch_boundary_every: 1
75
+ model:
76
+ name_or_path: /home/featurize/ai-toolkit/Wan2.2-T2V-A14B-Diffusers-bf16
77
+ quantize: true
78
+ qtype: uint4|ostris/accuracy_recovery_adapters/wan22_14b_t2i_torchao_uint4.safetensors
79
+ quantize_te: true
80
+ qtype_te: qfloat8
81
+ arch: wan22_14b:t2v
82
+ low_vram: true
83
+ model_kwargs:
84
+ train_high_noise: true
85
+ train_low_noise: true
86
+ sample:
87
+ sampler: flowmatch
88
+ sample_every: 250
89
+ width: 1024
90
+ height: 1024
91
+ samples:
92
+ - prompt: woman with red hair, playing chess at the park, bomb going off in
93
+ the background
94
+ - prompt: a woman holding a coffee cup, in a beanie, sitting at a cafe
95
+ - prompt: a horse is a DJ at a night club, fish eye lens, smoke machine, lazer
96
+ lights, holding a martini
97
+ - prompt: a man showing off his cool new t shirt at the beach, a shark is jumping
98
+ out of the water in the background
99
+ - prompt: a bear building a log cabin in the snow covered mountains
100
+ - prompt: woman playing the guitar, on stage, singing a song, laser lights,
101
+ punk rocker
102
+ - prompt: hipster man with a beard, building a chair, in a wood shop
103
+ - prompt: photo of a man, white background, medium shot, modeling clothing,
104
+ studio lighting, white backdrop
105
+ - prompt: a man holding a sign that says, 'this is a sign'
106
+ - prompt: a bulldog, in a post apocalyptic world, with a shotgun, in a leather
107
+ jacket, in a desert, with a motorcycle
108
+ neg: ''
109
+ seed: 42
110
+ walk_seed: true
111
+ guidance_scale: 4
112
+ sample_steps: 25
113
+ num_frames: 41
114
+ fps: 16
115
+ meta:
116
+ name: Xiang_wan_2_2_lora
117
+ version: '1.0'
log.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs/0_log.txt ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Running 1 job
2
+ Error running job: /environment/miniconda3/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEab
3
+ Error running on_error: cannot access local variable 'job' where it is not associated with a value
4
+
5
+ ========================================
6
+ Result:
7
+ - 0 completed jobs
8
+ - 1 failure
9
+ ========================================
10
+ Traceback (most recent call last):
11
+ Traceback (most recent call last):
12
+ File "/home/featurize/ai-toolkit/run.py", line 120, in <module>
13
+ File "/home/featurize/ai-toolkit/run.py", line 120, in <module>
14
+ main()main()
15
+
16
+ File "/home/featurize/ai-toolkit/run.py", line 108, in main
17
+ File "/home/featurize/ai-toolkit/run.py", line 108, in main
18
+ raise eraise e
19
+
20
+ File "/home/featurize/ai-toolkit/run.py", line 95, in main
21
+ File "/home/featurize/ai-toolkit/run.py", line 95, in main
22
+ job = get_job(config_file, args.name)job = get_job(config_file, args.name)
23
+
24
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
25
+
26
+ File "/home/featurize/ai-toolkit/toolkit/job.py", line 28, in get_job
27
+ File "/home/featurize/ai-toolkit/toolkit/job.py", line 28, in get_job
28
+ from jobs import ExtensionJobfrom jobs import ExtensionJob
29
+
30
+ File "/home/featurize/ai-toolkit/jobs/__init__.py", line 1, in <module>
31
+ File "/home/featurize/ai-toolkit/jobs/__init__.py", line 1, in <module>
32
+ from .BaseJob import BaseJobfrom .BaseJob import BaseJob
33
+
34
+ File "/home/featurize/ai-toolkit/jobs/BaseJob.py", line 5, in <module>
35
+ File "/home/featurize/ai-toolkit/jobs/BaseJob.py", line 5, in <module>
36
+ from jobs.process import BaseProcessfrom jobs.process import BaseProcess
37
+
38
+ File "/home/featurize/ai-toolkit/jobs/process/__init__.py", line 1, in <module>
39
+ File "/home/featurize/ai-toolkit/jobs/process/__init__.py", line 1, in <module>
40
+ from .BaseExtractProcess import BaseExtractProcessfrom .BaseExtractProcess import BaseExtractProcess
41
+
42
+ File "/home/featurize/ai-toolkit/jobs/process/BaseExtractProcess.py", line 7, in <module>
43
+ File "/home/featurize/ai-toolkit/jobs/process/BaseExtractProcess.py", line 7, in <module>
44
+ from toolkit.metadata import get_meta_for_safetensorsfrom toolkit.metadata import get_meta_for_safetensors
45
+
46
+ File "/home/featurize/ai-toolkit/toolkit/metadata.py", line 9, in <module>
47
+ File "/home/featurize/ai-toolkit/toolkit/metadata.py", line 9, in <module>
48
+ from toolkit.train_tools import addnet_hash_legacyfrom toolkit.train_tools import addnet_hash_legacy
49
+
50
+ File "/home/featurize/ai-toolkit/toolkit/train_tools.py", line 25, in <module>
51
+ File "/home/featurize/ai-toolkit/toolkit/train_tools.py", line 25, in <module>
52
+ from transformers import T5Tokenizer, T5EncoderModel, UMT5EncoderModelfrom transformers import T5Tokenizer, T5EncoderModel, UMT5EncoderModel
53
+
54
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/utils/import_utils.py", line 2045, in __getattr__
55
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/utils/import_utils.py", line 2045, in __getattr__
56
+ module = self._get_module(self._class_to_module[name])module = self._get_module(self._class_to_module[name])
57
+
58
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
59
+
60
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/utils/import_utils.py", line 2075, in _get_module
61
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/utils/import_utils.py", line 2075, in _get_module
62
+ raise eraise e
63
+
64
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/utils/import_utils.py", line 2073, in _get_module
65
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/utils/import_utils.py", line 2073, in _get_module
66
+ return importlib.import_module("." + module_name, self.__name__)return importlib.import_module("." + module_name, self.__name__)
67
+
68
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
69
+
70
+ File "/environment/miniconda3/lib/python3.11/importlib/__init__.py", line 126, in import_module
71
+ File "/environment/miniconda3/lib/python3.11/importlib/__init__.py", line 126, in import_module
72
+ return _bootstrap._gcd_import(name[level:], package, level)return _bootstrap._gcd_import(name[level:], package, level)
73
+
74
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
75
+
76
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/models/t5/modeling_t5.py", line 40, in <module>
77
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/models/t5/modeling_t5.py", line 40, in <module>
78
+ from ...modeling_utils import PreTrainedModelfrom ...modeling_utils import PreTrainedModel
79
+
80
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/modeling_utils.py", line 61, in <module>
81
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/modeling_utils.py", line 61, in <module>
82
+ from .integrations.flash_attention import flash_attention_forwardfrom .integrations.flash_attention import flash_attention_forward
83
+
84
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/integrations/flash_attention.py", line 5, in <module>
85
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/integrations/flash_attention.py", line 5, in <module>
86
+ from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_maskfrom ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
87
+
88
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/modeling_flash_attention_utils.py", line 36, in <module>
89
+ File "/environment/miniconda3/lib/python3.11/site-packages/transformers/modeling_flash_attention_utils.py", line 36, in <module>
90
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqafrom flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
91
+
92
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
93
+
94
+ File "/environment/miniconda3/lib/python3.11/site-packages/flash_attn/__init__.py", line 3, in <module>
95
+ File "/environment/miniconda3/lib/python3.11/site-packages/flash_attn/__init__.py", line 3, in <module>
96
+ from flash_attn.flash_attn_interface import (from flash_attn.flash_attn_interface import (
97
+
98
+ File "/environment/miniconda3/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py", line 15, in <module>
99
+ File "/environment/miniconda3/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py", line 15, in <module>
100
+ import flash_attn_2_cuda as flash_attn_gpuimport flash_attn_2_cuda as flash_attn_gpu
101
+
102
+ ImportErrorImportError: : /environment/miniconda3/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEab/environment/miniconda3/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so: undefined symbol: _ZN3c104cuda9SetDeviceEab
103
+
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1018e7a50001489d791c5df1b08ff97f47207af9fe1b492247dca56333bcd02e
3
+ size 624884069