prithivMLmods commited on
Commit
94fe90b
·
verified ·
1 Parent(s): 533e324

Upload folder using huggingface_hub

Browse files
checkpoint-4805/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SiglipForImageClassification"
4
+ ],
5
+ "id2label": {
6
+ "0": "call",
7
+ "1": "dislike",
8
+ "2": "fist",
9
+ "3": "four",
10
+ "4": "like",
11
+ "5": "mute",
12
+ "6": "no_gesture",
13
+ "7": "ok",
14
+ "8": "one",
15
+ "9": "palm",
16
+ "10": "peace",
17
+ "11": "peace_inverted",
18
+ "12": "rock",
19
+ "13": "stop",
20
+ "14": "stop_inverted",
21
+ "15": "three",
22
+ "16": "three2",
23
+ "17": "two_up",
24
+ "18": "two_up_inverted"
25
+ },
26
+ "initializer_factor": 1.0,
27
+ "label2id": {
28
+ "call": 0,
29
+ "dislike": 1,
30
+ "fist": 2,
31
+ "four": 3,
32
+ "like": 4,
33
+ "mute": 5,
34
+ "no_gesture": 6,
35
+ "ok": 7,
36
+ "one": 8,
37
+ "palm": 9,
38
+ "peace": 10,
39
+ "peace_inverted": 11,
40
+ "rock": 12,
41
+ "stop": 13,
42
+ "stop_inverted": 14,
43
+ "three": 15,
44
+ "three2": 16,
45
+ "two_up": 17,
46
+ "two_up_inverted": 18
47
+ },
48
+ "model_type": "siglip",
49
+ "problem_type": "single_label_classification",
50
+ "text_config": {
51
+ "attention_dropout": 0.0,
52
+ "hidden_act": "gelu_pytorch_tanh",
53
+ "hidden_size": 768,
54
+ "intermediate_size": 3072,
55
+ "layer_norm_eps": 1e-06,
56
+ "max_position_embeddings": 64,
57
+ "model_type": "siglip_text_model",
58
+ "num_attention_heads": 12,
59
+ "num_hidden_layers": 12,
60
+ "projection_size": 768,
61
+ "torch_dtype": "float32",
62
+ "vocab_size": 256000
63
+ },
64
+ "torch_dtype": "float32",
65
+ "transformers_version": "4.50.3",
66
+ "vision_config": {
67
+ "attention_dropout": 0.0,
68
+ "hidden_act": "gelu_pytorch_tanh",
69
+ "hidden_size": 768,
70
+ "image_size": 224,
71
+ "intermediate_size": 3072,
72
+ "layer_norm_eps": 1e-06,
73
+ "model_type": "siglip_vision_model",
74
+ "num_attention_heads": 12,
75
+ "num_channels": 3,
76
+ "num_hidden_layers": 12,
77
+ "patch_size": 16,
78
+ "torch_dtype": "float32"
79
+ }
80
+ }
checkpoint-4805/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61fc8474b3a285fb4785021a72d008931359f6ee7fad02b22fa66865d8d436fb
3
+ size 371620292
checkpoint-4805/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7529e204c78d9830c24e0ef1e4594ba7b1ce5b51af6ea9a898ecf327aaa2866
3
+ size 686660346
checkpoint-4805/preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 224,
22
+ "width": 224
23
+ }
24
+ }
checkpoint-4805/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13324d285e34158f16334e5bb4925f926c6c55d469c9669497dbfa239cf2abc
3
+ size 14244
checkpoint-4805/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f155484f38b2e9eb064f5ae8ba48c07e67082b1ddbb1bbce9396d85c82fca94
3
+ size 1064
checkpoint-4805/trainer_state.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 4805,
3
+ "best_metric": 0.05839185044169426,
4
+ "best_model_checkpoint": "siglip2-finetune-full/checkpoint-4805",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 4805,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.1040582726326743,
14
+ "grad_norm": 44.86325454711914,
15
+ "learning_rate": 9.053627760252366e-07,
16
+ "loss": 1.8108,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.2081165452653486,
21
+ "grad_norm": 35.94168472290039,
22
+ "learning_rate": 8.002103049421662e-07,
23
+ "loss": 0.4382,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 0.31217481789802287,
28
+ "grad_norm": 15.327913284301758,
29
+ "learning_rate": 6.950578338590957e-07,
30
+ "loss": 0.2398,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 0.4162330905306972,
35
+ "grad_norm": 42.22140884399414,
36
+ "learning_rate": 5.899053627760253e-07,
37
+ "loss": 0.1731,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 0.5202913631633714,
42
+ "grad_norm": 52.25971603393555,
43
+ "learning_rate": 4.847528916929548e-07,
44
+ "loss": 0.144,
45
+ "step": 2500
46
+ },
47
+ {
48
+ "epoch": 0.6243496357960457,
49
+ "grad_norm": 29.09841537475586,
50
+ "learning_rate": 3.7960042060988434e-07,
51
+ "loss": 0.1267,
52
+ "step": 3000
53
+ },
54
+ {
55
+ "epoch": 0.72840790842872,
56
+ "grad_norm": 56.122318267822266,
57
+ "learning_rate": 2.744479495268139e-07,
58
+ "loss": 0.1061,
59
+ "step": 3500
60
+ },
61
+ {
62
+ "epoch": 0.8324661810613944,
63
+ "grad_norm": 36.10615158081055,
64
+ "learning_rate": 1.6929547844374343e-07,
65
+ "loss": 0.0977,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 0.9365244536940687,
70
+ "grad_norm": 41.66591262817383,
71
+ "learning_rate": 6.414300736067297e-08,
72
+ "loss": 0.0934,
73
+ "step": 4500
74
+ },
75
+ {
76
+ "epoch": 1.0,
77
+ "eval_accuracy": 0.9832959313103717,
78
+ "eval_loss": 0.05839185044169426,
79
+ "eval_model_preparation_time": 0.004,
80
+ "eval_runtime": 2181.6249,
81
+ "eval_samples_per_second": 70.468,
82
+ "eval_steps_per_second": 8.809,
83
+ "step": 4805
84
+ }
85
+ ],
86
+ "logging_steps": 500,
87
+ "max_steps": 4805,
88
+ "num_input_tokens_seen": 0,
89
+ "num_train_epochs": 1,
90
+ "save_steps": 500,
91
+ "stateful_callbacks": {
92
+ "TrainerControl": {
93
+ "args": {
94
+ "should_epoch_stop": false,
95
+ "should_evaluate": false,
96
+ "should_log": false,
97
+ "should_save": true,
98
+ "should_training_stop": true
99
+ },
100
+ "attributes": {}
101
+ }
102
+ },
103
+ "total_flos": 1.2877966316954511e+19,
104
+ "train_batch_size": 32,
105
+ "trial_name": null,
106
+ "trial_params": null
107
+ }
checkpoint-4805/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ece2f315761755eaa1563a5a7e3c0364fb0faa58002113aa00850f59d1176766
3
+ size 5304
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SiglipForImageClassification"
4
+ ],
5
+ "id2label": {
6
+ "0": "call",
7
+ "1": "dislike",
8
+ "2": "fist",
9
+ "3": "four",
10
+ "4": "like",
11
+ "5": "mute",
12
+ "6": "no_gesture",
13
+ "7": "ok",
14
+ "8": "one",
15
+ "9": "palm",
16
+ "10": "peace",
17
+ "11": "peace_inverted",
18
+ "12": "rock",
19
+ "13": "stop",
20
+ "14": "stop_inverted",
21
+ "15": "three",
22
+ "16": "three2",
23
+ "17": "two_up",
24
+ "18": "two_up_inverted"
25
+ },
26
+ "initializer_factor": 1.0,
27
+ "label2id": {
28
+ "call": 0,
29
+ "dislike": 1,
30
+ "fist": 2,
31
+ "four": 3,
32
+ "like": 4,
33
+ "mute": 5,
34
+ "no_gesture": 6,
35
+ "ok": 7,
36
+ "one": 8,
37
+ "palm": 9,
38
+ "peace": 10,
39
+ "peace_inverted": 11,
40
+ "rock": 12,
41
+ "stop": 13,
42
+ "stop_inverted": 14,
43
+ "three": 15,
44
+ "three2": 16,
45
+ "two_up": 17,
46
+ "two_up_inverted": 18
47
+ },
48
+ "model_type": "siglip",
49
+ "problem_type": "single_label_classification",
50
+ "text_config": {
51
+ "attention_dropout": 0.0,
52
+ "hidden_act": "gelu_pytorch_tanh",
53
+ "hidden_size": 768,
54
+ "intermediate_size": 3072,
55
+ "layer_norm_eps": 1e-06,
56
+ "max_position_embeddings": 64,
57
+ "model_type": "siglip_text_model",
58
+ "num_attention_heads": 12,
59
+ "num_hidden_layers": 12,
60
+ "projection_size": 768,
61
+ "torch_dtype": "float32",
62
+ "vocab_size": 256000
63
+ },
64
+ "torch_dtype": "float32",
65
+ "transformers_version": "4.50.3",
66
+ "vision_config": {
67
+ "attention_dropout": 0.0,
68
+ "hidden_act": "gelu_pytorch_tanh",
69
+ "hidden_size": 768,
70
+ "image_size": 224,
71
+ "intermediate_size": 3072,
72
+ "layer_norm_eps": 1e-06,
73
+ "model_type": "siglip_vision_model",
74
+ "num_attention_heads": 12,
75
+ "num_channels": 3,
76
+ "num_hidden_layers": 12,
77
+ "patch_size": 16,
78
+ "torch_dtype": "float32"
79
+ }
80
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61fc8474b3a285fb4785021a72d008931359f6ee7fad02b22fa66865d8d436fb
3
+ size 371620292
preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 224,
22
+ "width": 224
23
+ }
24
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ece2f315761755eaa1563a5a7e3c0364fb0faa58002113aa00850f59d1176766
3
+ size 5304