File size: 4,707 Bytes
6502387 8c84cb4 6502387 3f7f53a 65e76d3 3f7f53a 65e76d3 3f7f53a 65e76d3 3f7f53a 65e76d3 3f7f53a 65e76d3 9530fb8 65e76d3 9530fb8 65e76d3 9530fb8 65e76d3 9530fb8 65e76d3 9530fb8 65e76d3 483c3e8 65e76d3 483c3e8 65e76d3 483c3e8 65e76d3 483c3e8 65e76d3 483c3e8 65e76d3 0c637d2 65e76d3 0c637d2 65e76d3 0c637d2 65e76d3 0c637d2 65e76d3 0c637d2 65e76d3 6502387 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
---
license: mit
tags:
- generated_from_trainer
datasets:
- squad_v2
- quoref
- adversarial_qa
- duorc
model-index:
- name: rob-base-superqa2
results:
- task:
type: question-answering
name: Question Answering
dataset:
name: squad_v2
type: squad_v2
config: squad_v2
split: validation
metrics:
- type: exact_match
value: 79.2365
name: Exact Match
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZmUxNmNjZTg2YzcxMTBkYjE4YTVmODM3YWY2NTFkMmY3NWNiMGYyZTkxZDlkMTJiMjBkZjVhY2NlMGRjYzAyMCIsInZlcnNpb24iOjF9.dn0e2EJ8ImOWODCoRH_d7v10vtxv2ZfRkeWKF-R62sz2ufMjyC9brQihULMw5ZhnSQq5bBFFvauhG6KpLgzgDA
- type: f1
value: 82.3326
name: F1
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYWQyZGNhY2Y3OWE5MTg0ZDM4MmQ0NDk5MjRmODA2OTVmY2ExYTc1MjM2ZTcyNzUxMGI2OWIwMzY5NzMzNzU3NyIsInZlcnNpb24iOjF9.j2Y2sfHayDV5Qfka1bVSbkBIAyMa-rpUxMNICTFvsDVTCnzRPkNVrtrE9dBiDKQa2vVOZNiKrbJEDfXE1xOIBQ
- task:
type: question-answering
name: Question Answering
dataset:
name: adversarial_qa
type: adversarial_qa
config: adversarialQA
split: test
metrics:
- type: exact_match
value: 12.4
name: Exact Match
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZDExM2Q2NjIxODU4N2FiOWY3NjVjZjNmNzYwYWNhODllMzQxZGVkMzZlMWUzMzJhYTQwMDZhMGQyZjZjNWMyNiIsInZlcnNpb24iOjF9.rMzb2Cna0y3MljrLCRZ8r8SwPFTtwr4OG1mD6gdn3zcVgqX3Td0Q04n_O7RoGuH1788xqEvdeGltd-_TRrC0Aw
- type: f1
value: 12.4
name: F1
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMTNlYjgzMzZlNmYzNjkyYjg4MzYxNTZjZjNkYzA0NTZmNjk5MjI5YzM5YWQ0Y2VlMjVkYWIxNTk2YWJkMTQzYyIsInZlcnNpb24iOjF9.Du89uCtL2-mlV-tewIVKqwG9O53JE3B4Jflpzv4nptpa0MtYiGUAMCunMuWRdBXMe_YdqKDjr7_alJ0-XNRVDQ
- task:
type: question-answering
name: Question Answering
dataset:
name: adversarial_qa
type: adversarial_qa
config: adversarialQA
split: validation
metrics:
- type: exact_match
value: 42.3667
name: Exact Match
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNmJmYmU5OGFmNjBjNzM1YmRhNDViNDI3MTMwZjg2N2EwYmM2ZDkyZmI4MmY5OGE4NmU5YTA1NThjZDdkYjkyYyIsInZlcnNpb24iOjF9.Iah8AxkCHsDHRTK8SLXzo4qd0C3Ku7bqGVJIJHiPxC2VO3bfz6c5emSSVZk5fACbKYMFoKTeLr6_XTwfTNjGAg
- type: f1
value: 53.3255
name: F1
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYmVlMGFiNjhhOWVjZWUzZTkwOWYzNWQzNjVhNzFhMjRkOGRmNmYwNWIyN2VmZWQ0MDZlNzk0NWM5MzgxMzdhYyIsInZlcnNpb24iOjF9._lW0JTSbvQ-pUBG0cZwpKzCvXMXmjcLrAaxk-bhou_Hf5R3Sw4AEAEy0Vx5qvxk3e30E73mYTInrtBqH8JA3BQ
- task:
type: question-answering
name: Question Answering
dataset:
name: squad
type: squad
config: plain_text
split: validation
metrics:
- type: exact_match
value: 86.1925
name: Exact Match
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOWMxYmE4MWRjZDRlMWY3NWMxMjI0YTI5MmVkZTI5OGZiNzM2ZWMyNzlhOGRlNjMzZTBmYWM2OGU0MWM1NDc0MiIsInZlcnNpb24iOjF9.3h6zsLHiqX9ScqvvKhirv4n3S2bP1qpAmnGQrz2eoY0vps2UAN4afyXA_PLMlbk_osgN8Wvnefx74VIgELh6DQ
- type: f1
value: 92.4306
name: F1
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMTVhMWIzYWJhMWNkNzg5NDNlZTdmY2UwNDM3ZWIzZmZjNTA5NDNiMjk2NjI0MTVmNDRjOTYyYjU0YWNmYWE4ZSIsInZlcnNpb24iOjF9.pMkm1BJU1PAsWruRIWhNFrF4DD-nXaE5Gq6sR5mZfQUoeESNWiYGU2GppAvnIEVah0PmdRQHd-DBRK0hDJ2PBg
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# rob-base-superqa2
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- distributed_type: multi-GPU
- num_devices: 8
- total_train_batch_size: 256
- total_eval_batch_size: 256
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 2.0
### Training results
### Framework versions
- Transformers 4.21.1
- Pytorch 1.11.0a0+gita4c10ee
- Datasets 2.4.0
- Tokenizers 0.12.1
|