yaswanthgali commited on
Commit
e862541
·
1 Parent(s): d6d09b0

Update model

Browse files
Files changed (3) hide show
  1. config.json +10 -5
  2. model.safetensors +2 -2
  3. preprocessor_config.json +7 -1
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "architectures": [
3
- "AIMv2Model"
4
  ],
5
  "attention_dropout": 0.0,
6
  "auto_map": {
@@ -8,10 +8,14 @@
8
  "AutoModel": "modeling_aimv2.AIMv2Model",
9
  "FlaxAutoModel": "modeling_flax_aimv2.FlaxAIMv2Model"
10
  },
 
11
  "hidden_size": 1024,
12
  "image_size": 224,
 
13
  "intermediate_size": 2816,
14
- "model_type": "aimv2",
 
 
15
  "num_attention_heads": 8,
16
  "num_channels": 3,
17
  "num_hidden_layers": 24,
@@ -20,6 +24,7 @@
20
  "qkv_bias": false,
21
  "rms_norm_eps": 1e-05,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.46.3",
24
- "use_bias": false
25
- }
 
 
1
  {
2
  "architectures": [
3
+ "Aimv2VisionModel"
4
  ],
5
  "attention_dropout": 0.0,
6
  "auto_map": {
 
8
  "AutoModel": "modeling_aimv2.AIMv2Model",
9
  "FlaxAutoModel": "modeling_flax_aimv2.FlaxAIMv2Model"
10
  },
11
+ "hidden_act": "silu",
12
  "hidden_size": 1024,
13
  "image_size": 224,
14
+ "initializer_range": 0.02,
15
  "intermediate_size": 2816,
16
+ "is_native": false,
17
+ "mlp_bias": false,
18
+ "model_type": "aimv2_vision_model",
19
  "num_attention_heads": 8,
20
  "num_channels": 3,
21
  "num_hidden_layers": 24,
 
24
  "qkv_bias": false,
25
  "rms_norm_eps": 1e-05,
26
  "torch_dtype": "float32",
27
+ "transformers_version": "4.54.0.dev0",
28
+ "use_bias": false,
29
+ "use_head": false
30
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0736a439b17ce0405b52473e25d0b2b482eace1aa2c909160f64a132223ba7e
3
- size 1236809392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff60adc9c00d936fef310a9f8bffbbc3032aee468976f677711ece79121a715
3
+ size 1236816248
preprocessor_config.json CHANGED
@@ -3,6 +3,10 @@
3
  "height": 224,
4
  "width": 224
5
  },
 
 
 
 
6
  "do_center_crop": true,
7
  "do_convert_rgb": true,
8
  "do_normalize": true,
@@ -13,14 +17,16 @@
13
  0.4578275,
14
  0.40821073
15
  ],
16
- "image_processor_type": "CLIPImageProcessor",
17
  "image_std": [
18
  0.26862954,
19
  0.26130258,
20
  0.27577711
21
  ],
 
22
  "resample": 3,
23
  "rescale_factor": 0.00392156862745098,
 
24
  "size": {
25
  "shortest_edge": 224
26
  }
 
3
  "height": 224,
4
  "width": 224
5
  },
6
+ "data_format": "channels_first",
7
+ "default_to_square": false,
8
+ "device": null,
9
+ "disable_grouping": null,
10
  "do_center_crop": true,
11
  "do_convert_rgb": true,
12
  "do_normalize": true,
 
17
  0.4578275,
18
  0.40821073
19
  ],
20
+ "image_processor_type": "CLIPImageProcessorFast",
21
  "image_std": [
22
  0.26862954,
23
  0.26130258,
24
  0.27577711
25
  ],
26
+ "input_data_format": null,
27
  "resample": 3,
28
  "rescale_factor": 0.00392156862745098,
29
+ "return_tensors": null,
30
  "size": {
31
  "shortest_edge": 224
32
  }