ayyuce commited on
Commit
f70bb53
·
verified ·
1 Parent(s): 10c1344

ayyuce/blip-vqa-rad

Browse files
Files changed (5) hide show
  1. README.md +17 -9
  2. config.json +4 -29
  3. generation_config.json +1 -1
  4. model.safetensors +1 -1
  5. training_args.bin +2 -2
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.2797
20
 
21
  ## Model description
22
 
@@ -39,21 +39,29 @@ The following hyperparameters were used during training:
39
  - train_batch_size: 2
40
  - eval_batch_size: 2
41
  - seed: 42
42
- - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
- - num_epochs: 2
45
 
46
  ### Training results
47
 
48
  | Training Loss | Epoch | Step | Validation Loss |
49
  |:-------------:|:-----:|:----:|:---------------:|
50
- | 0.3462 | 1.0 | 897 | 0.2955 |
51
- | 0.3099 | 2.0 | 1794 | 0.2797 |
 
 
 
 
 
 
 
 
52
 
53
 
54
  ### Framework versions
55
 
56
- - Transformers 4.50.0
57
- - Pytorch 2.6.0+cu124
58
- - Datasets 3.5.0
59
- - Tokenizers 0.21.1
 
16
 
17
  This model is a fine-tuned version of [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.3083
20
 
21
  ## Model description
22
 
 
39
  - train_batch_size: 2
40
  - eval_batch_size: 2
41
  - seed: 42
42
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
+ - num_epochs: 10
45
 
46
  ### Training results
47
 
48
  | Training Loss | Epoch | Step | Validation Loss |
49
  |:-------------:|:-----:|:----:|:---------------:|
50
+ | 0.3482 | 1.0 | 897 | 0.3006 |
51
+ | 0.3143 | 2.0 | 1794 | 0.2856 |
52
+ | 0.2695 | 3.0 | 2691 | 0.2854 |
53
+ | 0.2053 | 4.0 | 3588 | 0.2910 |
54
+ | 0.2152 | 5.0 | 4485 | 0.2987 |
55
+ | 0.2841 | 6.0 | 5382 | 0.2927 |
56
+ | 0.2106 | 7.0 | 6279 | 0.3007 |
57
+ | 0.1746 | 8.0 | 7176 | 0.3027 |
58
+ | 0.2473 | 9.0 | 8073 | 0.3073 |
59
+ | 0.2259 | 10.0 | 8970 | 0.3083 |
60
 
61
 
62
  ### Framework versions
63
 
64
+ - Transformers 4.47.0
65
+ - Pytorch 2.5.1+cu121
66
+ - Datasets 3.3.1
67
+ - Tokenizers 0.21.0
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "BlipForConditionalGeneration"
4
  ],
@@ -11,44 +12,18 @@
11
  "projection_dim": 512,
12
  "text_config": {
13
  "_attn_implementation_autoset": true,
14
- "attention_probs_dropout_prob": 0.0,
15
- "encoder_hidden_size": 768,
16
- "hidden_act": "gelu",
17
- "hidden_dropout_prob": 0.0,
18
- "hidden_size": 768,
19
  "initializer_factor": 1.0,
20
- "initializer_range": 0.02,
21
- "intermediate_size": 3072,
22
- "label_smoothing": 0.0,
23
- "layer_norm_eps": 1e-12,
24
- "max_position_embeddings": 512,
25
  "model_type": "blip_text_model",
26
- "num_attention_heads": 12,
27
- "num_hidden_layers": 12,
28
- "projection_dim": 768,
29
- "torch_dtype": "float32",
30
- "use_cache": true,
31
- "vocab_size": 30524
32
  },
33
  "torch_dtype": "float32",
34
- "transformers_version": "4.50.0",
35
  "vision_config": {
36
  "_attn_implementation_autoset": true,
37
- "attention_dropout": 0.0,
38
  "dropout": 0.0,
39
- "hidden_act": "gelu",
40
- "hidden_size": 768,
41
- "image_size": 384,
42
  "initializer_factor": 1.0,
43
  "initializer_range": 0.02,
44
- "intermediate_size": 3072,
45
- "layer_norm_eps": 1e-05,
46
  "model_type": "blip_vision_model",
47
- "num_attention_heads": 12,
48
- "num_channels": 3,
49
- "num_hidden_layers": 12,
50
- "patch_size": 16,
51
- "projection_dim": 512,
52
- "torch_dtype": "float32"
53
  }
54
  }
 
1
  {
2
+ "_name_or_path": "Salesforce/blip-vqa-base",
3
  "architectures": [
4
  "BlipForConditionalGeneration"
5
  ],
 
12
  "projection_dim": 512,
13
  "text_config": {
14
  "_attn_implementation_autoset": true,
 
 
 
 
 
15
  "initializer_factor": 1.0,
 
 
 
 
 
16
  "model_type": "blip_text_model",
17
+ "num_attention_heads": 12
 
 
 
 
 
18
  },
19
  "torch_dtype": "float32",
20
+ "transformers_version": "4.47.0",
21
  "vision_config": {
22
  "_attn_implementation_autoset": true,
 
23
  "dropout": 0.0,
 
 
 
24
  "initializer_factor": 1.0,
25
  "initializer_range": 0.02,
 
 
26
  "model_type": "blip_vision_model",
27
+ "num_channels": 3
 
 
 
 
 
28
  }
29
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 30522,
4
  "eos_token_id": 2,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.50.0"
7
  }
 
3
  "bos_token_id": 30522,
4
  "eos_token_id": 2,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.47.0"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a55e1daa92809f44d7e759af4f36b7b81f7b82a528d4f5a802a66b17f4d6ef3
3
  size 989717056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c5a7f2b692b3b08bd9194d5897fa37cbff309d1e36841be618b2dff5e829ef
3
  size 989717056
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3054e42f2f5aec2419b9f6a06a1905c05dae237f192e7e6c194eaca41eea1be5
3
- size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:694457f2f177f35a1c7b78ae2c8dcc4a0ac46d9534b37dfd1b78e54ab8af48bd
3
+ size 5304