File size: 1,281 Bytes
48a079e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
{
"architectures": [
"SiglipForImageClassification"
],
"id2label": {
"0": "Child 0-12",
"1": "Teenager 13-20",
"2": "Adult 21-44",
"3": "Middle Age 45-64",
"4": "Aged 65+"
},
"initializer_factor": 1.0,
"label2id": {
"Adult 21-44": 2,
"Aged 65+": 4,
"Child 0-12": 0,
"Middle Age 45-64": 3,
"Teenager 13-20": 1
},
"model_type": "siglip",
"problem_type": "single_label_classification",
"text_config": {
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 768,
"intermediate_size": 3072,
"layer_norm_eps": 1e-06,
"max_position_embeddings": 64,
"model_type": "siglip_text_model",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"projection_size": 768,
"torch_dtype": "float32",
"vocab_size": 256000
},
"torch_dtype": "float32",
"transformers_version": "4.50.0",
"vision_config": {
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 768,
"image_size": 224,
"intermediate_size": 3072,
"layer_norm_eps": 1e-06,
"model_type": "siglip_vision_model",
"num_attention_heads": 12,
"num_channels": 3,
"num_hidden_layers": 12,
"patch_size": 16,
"torch_dtype": "float32"
}
}
|