prithivMLmods commited on
Commit
4f47677
·
verified ·
1 Parent(s): 3a09493

Upload folder using huggingface_hub

Browse files
checkpoint-1378/config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SiglipForImageClassification"
4
+ ],
5
+ "id2label": {
6
+ "0": "Accessories",
7
+ "1": "Apparel",
8
+ "2": "Footwear",
9
+ "3": "Free Items",
10
+ "4": "Home",
11
+ "5": "Personal Care",
12
+ "6": "Sporting Goods"
13
+ },
14
+ "initializer_factor": 1.0,
15
+ "label2id": {
16
+ "Accessories": 0,
17
+ "Apparel": 1,
18
+ "Footwear": 2,
19
+ "Free Items": 3,
20
+ "Home": 4,
21
+ "Personal Care": 5,
22
+ "Sporting Goods": 6
23
+ },
24
+ "model_type": "siglip",
25
+ "problem_type": "single_label_classification",
26
+ "text_config": {
27
+ "attention_dropout": 0.0,
28
+ "hidden_act": "gelu_pytorch_tanh",
29
+ "hidden_size": 768,
30
+ "intermediate_size": 3072,
31
+ "layer_norm_eps": 1e-06,
32
+ "max_position_embeddings": 64,
33
+ "model_type": "siglip_text_model",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "projection_size": 768,
37
+ "torch_dtype": "float32",
38
+ "vocab_size": 256000
39
+ },
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.50.3",
42
+ "vision_config": {
43
+ "attention_dropout": 0.0,
44
+ "hidden_act": "gelu_pytorch_tanh",
45
+ "hidden_size": 768,
46
+ "image_size": 224,
47
+ "intermediate_size": 3072,
48
+ "layer_norm_eps": 1e-06,
49
+ "model_type": "siglip_vision_model",
50
+ "num_attention_heads": 12,
51
+ "num_channels": 3,
52
+ "num_hidden_layers": 12,
53
+ "patch_size": 16,
54
+ "torch_dtype": "float32"
55
+ }
56
+ }
checkpoint-1378/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aba920d3badce764848b3e7f20eb6a0a0458653e77f438293b153c13d196d99b
3
+ size 371583372
checkpoint-1378/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75154bb7e7ed9b661ae9157262f8656c1cce439c9beac9e8c8b20bbe0534b580
3
+ size 686586490
checkpoint-1378/preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 224,
22
+ "width": 224
23
+ }
24
+ }
checkpoint-1378/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be3800323adfbabbfee073f7c32c3d86bc0c9fd7937c41a132afa461c6c74cbe
3
+ size 14244
checkpoint-1378/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c2f9dcade45b8d8773de525e316b5580ad7e0268f4b204751a07ebc271c81d0
3
+ size 1064
checkpoint-1378/trainer_state.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1378,
3
+ "best_metric": 0.17906464636325836,
4
+ "best_model_checkpoint": "siglip2-finetune-full/checkpoint-1378",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1378,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.36284470246734396,
14
+ "grad_norm": 6.49093770980835,
15
+ "learning_rate": 0.00016674057649667407,
16
+ "loss": 0.4567,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.7256894049346879,
21
+ "grad_norm": 2.3314664363861084,
22
+ "learning_rate": 0.00012978566149297858,
23
+ "loss": 0.2743,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 1.0,
28
+ "eval_accuracy": 0.9472000363042294,
29
+ "eval_loss": 0.17906464636325836,
30
+ "eval_model_preparation_time": 0.0024,
31
+ "eval_runtime": 528.8752,
32
+ "eval_samples_per_second": 83.332,
33
+ "eval_steps_per_second": 10.416,
34
+ "step": 1378
35
+ }
36
+ ],
37
+ "logging_steps": 500,
38
+ "max_steps": 2756,
39
+ "num_input_tokens_seen": 0,
40
+ "num_train_epochs": 2,
41
+ "save_steps": 500,
42
+ "stateful_callbacks": {
43
+ "TrainerControl": {
44
+ "args": {
45
+ "should_epoch_stop": false,
46
+ "should_evaluate": false,
47
+ "should_log": false,
48
+ "should_save": true,
49
+ "should_training_stop": false
50
+ },
51
+ "attributes": {}
52
+ }
53
+ },
54
+ "total_flos": 3.6914252598321316e+18,
55
+ "train_batch_size": 32,
56
+ "trial_name": null,
57
+ "trial_params": null
58
+ }
checkpoint-1378/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12dd34e788f5eb534c4b6ac8f57ace245fefa2a536daa2fe035c88d0385d96e8
3
+ size 5304
checkpoint-2756/config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SiglipForImageClassification"
4
+ ],
5
+ "id2label": {
6
+ "0": "Accessories",
7
+ "1": "Apparel",
8
+ "2": "Footwear",
9
+ "3": "Free Items",
10
+ "4": "Home",
11
+ "5": "Personal Care",
12
+ "6": "Sporting Goods"
13
+ },
14
+ "initializer_factor": 1.0,
15
+ "label2id": {
16
+ "Accessories": 0,
17
+ "Apparel": 1,
18
+ "Footwear": 2,
19
+ "Free Items": 3,
20
+ "Home": 4,
21
+ "Personal Care": 5,
22
+ "Sporting Goods": 6
23
+ },
24
+ "model_type": "siglip",
25
+ "problem_type": "single_label_classification",
26
+ "text_config": {
27
+ "attention_dropout": 0.0,
28
+ "hidden_act": "gelu_pytorch_tanh",
29
+ "hidden_size": 768,
30
+ "intermediate_size": 3072,
31
+ "layer_norm_eps": 1e-06,
32
+ "max_position_embeddings": 64,
33
+ "model_type": "siglip_text_model",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "projection_size": 768,
37
+ "torch_dtype": "float32",
38
+ "vocab_size": 256000
39
+ },
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.50.3",
42
+ "vision_config": {
43
+ "attention_dropout": 0.0,
44
+ "hidden_act": "gelu_pytorch_tanh",
45
+ "hidden_size": 768,
46
+ "image_size": 224,
47
+ "intermediate_size": 3072,
48
+ "layer_norm_eps": 1e-06,
49
+ "model_type": "siglip_vision_model",
50
+ "num_attention_heads": 12,
51
+ "num_channels": 3,
52
+ "num_hidden_layers": 12,
53
+ "patch_size": 16,
54
+ "torch_dtype": "float32"
55
+ }
56
+ }
checkpoint-2756/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2ec341c1eb231410b520a2179e11e337775aba761c2484c17b056ed5b2ee382
3
+ size 371583372
checkpoint-2756/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7f8b91f1228ac1cb238009ab9d05d09208bb142cf02c5e9a2442232f1df37b9
3
+ size 686586490
checkpoint-2756/preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 224,
22
+ "width": 224
23
+ }
24
+ }
checkpoint-2756/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6895266ce7775b656647ddf97f9b07ff3ff78add76255d92106d9353ec64e09
3
+ size 14244
checkpoint-2756/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d105dad7507619cb227e4051cae60fdd5d2447df00dfe640987ab94bee865e58
3
+ size 1064
checkpoint-2756/trainer_state.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 2756,
3
+ "best_metric": 0.07285120338201523,
4
+ "best_model_checkpoint": "siglip2-finetune-full/checkpoint-2756",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 2756,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.36284470246734396,
14
+ "grad_norm": 6.49093770980835,
15
+ "learning_rate": 0.00016674057649667407,
16
+ "loss": 0.4567,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.7256894049346879,
21
+ "grad_norm": 2.3314664363861084,
22
+ "learning_rate": 0.00012978566149297858,
23
+ "loss": 0.2743,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 1.0,
28
+ "eval_accuracy": 0.9472000363042294,
29
+ "eval_loss": 0.17906464636325836,
30
+ "eval_model_preparation_time": 0.0024,
31
+ "eval_runtime": 528.8752,
32
+ "eval_samples_per_second": 83.332,
33
+ "eval_steps_per_second": 10.416,
34
+ "step": 1378
35
+ },
36
+ {
37
+ "epoch": 1.0885341074020318,
38
+ "grad_norm": 6.498136043548584,
39
+ "learning_rate": 9.283074648928308e-05,
40
+ "loss": 0.191,
41
+ "step": 1500
42
+ },
43
+ {
44
+ "epoch": 1.4513788098693758,
45
+ "grad_norm": 3.643498659133911,
46
+ "learning_rate": 5.587583148558758e-05,
47
+ "loss": 0.1366,
48
+ "step": 2000
49
+ },
50
+ {
51
+ "epoch": 1.8142235123367199,
52
+ "grad_norm": 3.603039026260376,
53
+ "learning_rate": 1.892091648189209e-05,
54
+ "loss": 0.1034,
55
+ "step": 2500
56
+ },
57
+ {
58
+ "epoch": 2.0,
59
+ "eval_accuracy": 0.9802595752405155,
60
+ "eval_loss": 0.07285120338201523,
61
+ "eval_model_preparation_time": 0.0024,
62
+ "eval_runtime": 525.5082,
63
+ "eval_samples_per_second": 83.865,
64
+ "eval_steps_per_second": 10.483,
65
+ "step": 2756
66
+ }
67
+ ],
68
+ "logging_steps": 500,
69
+ "max_steps": 2756,
70
+ "num_input_tokens_seen": 0,
71
+ "num_train_epochs": 2,
72
+ "save_steps": 500,
73
+ "stateful_callbacks": {
74
+ "TrainerControl": {
75
+ "args": {
76
+ "should_epoch_stop": false,
77
+ "should_evaluate": false,
78
+ "should_log": false,
79
+ "should_save": true,
80
+ "should_training_stop": true
81
+ },
82
+ "attributes": {}
83
+ }
84
+ },
85
+ "total_flos": 7.382850519664263e+18,
86
+ "train_batch_size": 32,
87
+ "trial_name": null,
88
+ "trial_params": null
89
+ }
checkpoint-2756/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12dd34e788f5eb534c4b6ac8f57ace245fefa2a536daa2fe035c88d0385d96e8
3
+ size 5304
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SiglipForImageClassification"
4
+ ],
5
+ "id2label": {
6
+ "0": "Accessories",
7
+ "1": "Apparel",
8
+ "2": "Footwear",
9
+ "3": "Free Items",
10
+ "4": "Home",
11
+ "5": "Personal Care",
12
+ "6": "Sporting Goods"
13
+ },
14
+ "initializer_factor": 1.0,
15
+ "label2id": {
16
+ "Accessories": 0,
17
+ "Apparel": 1,
18
+ "Footwear": 2,
19
+ "Free Items": 3,
20
+ "Home": 4,
21
+ "Personal Care": 5,
22
+ "Sporting Goods": 6
23
+ },
24
+ "model_type": "siglip",
25
+ "problem_type": "single_label_classification",
26
+ "text_config": {
27
+ "attention_dropout": 0.0,
28
+ "hidden_act": "gelu_pytorch_tanh",
29
+ "hidden_size": 768,
30
+ "intermediate_size": 3072,
31
+ "layer_norm_eps": 1e-06,
32
+ "max_position_embeddings": 64,
33
+ "model_type": "siglip_text_model",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "projection_size": 768,
37
+ "torch_dtype": "float32",
38
+ "vocab_size": 256000
39
+ },
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.50.3",
42
+ "vision_config": {
43
+ "attention_dropout": 0.0,
44
+ "hidden_act": "gelu_pytorch_tanh",
45
+ "hidden_size": 768,
46
+ "image_size": 224,
47
+ "intermediate_size": 3072,
48
+ "layer_norm_eps": 1e-06,
49
+ "model_type": "siglip_vision_model",
50
+ "num_attention_heads": 12,
51
+ "num_channels": 3,
52
+ "num_hidden_layers": 12,
53
+ "patch_size": 16,
54
+ "torch_dtype": "float32"
55
+ }
56
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2ec341c1eb231410b520a2179e11e337775aba761c2484c17b056ed5b2ee382
3
+ size 371583372
preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 224,
22
+ "width": 224
23
+ }
24
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12dd34e788f5eb534c4b6ac8f57ace245fefa2a536daa2fe035c88d0385d96e8
3
+ size 5304