Datasets:
Updated Readme
Browse files
README.md
CHANGED
@@ -219,7 +219,7 @@ from datasets import load_dataset
|
|
219 |
|
220 |
skill = "sequence_filling" # "sequence_filling", "char_coherence", "visual_closure", "text_closure", "caption_relevance"
|
221 |
split = "val" # "val", "test"
|
222 |
-
dataset = load_dataset("VLR-CVC/
|
223 |
```
|
224 |
|
225 |
<details>
|
@@ -405,7 +405,7 @@ from datasets import load_dataset
|
|
405 |
|
406 |
skill = "sequence_filling" # "sequence_filling", "char_coherence", "visual_closure", "text_closure", "caption_relevance"
|
407 |
split = "val" # "val", "test"
|
408 |
-
dataset = load_dataset("VLR-CVC/
|
409 |
|
410 |
processor = SingleImagePickAPanel()
|
411 |
dataset = dataset.map(
|
@@ -414,7 +414,7 @@ dataset = dataset.map(
|
|
414 |
batch_size=32,
|
415 |
remove_columns=['context', 'options']
|
416 |
)
|
417 |
-
dataset.save_to_disk(f"
|
418 |
```
|
419 |
|
420 |
</details>
|
@@ -441,7 +441,6 @@ Where `sample_id` is the id of the sample, `correct_panel_id` is the prediction
|
|
441 |
<summary>Pseudocode for the evaluation on val set, adapt for your model:</summary>
|
442 |
|
443 |
```python
|
444 |
-
split = "val"
|
445 |
skills = {
|
446 |
"sequence_filling": {
|
447 |
"num_examples": 262
|
@@ -461,7 +460,7 @@ skills = {
|
|
461 |
}
|
462 |
|
463 |
for skill in skills:
|
464 |
-
dataset = load_dataset("VLR-CVC/
|
465 |
correct = 0
|
466 |
total = 0
|
467 |
for example in dataset:
|
|
|
219 |
|
220 |
skill = "sequence_filling" # "sequence_filling", "char_coherence", "visual_closure", "text_closure", "caption_relevance"
|
221 |
split = "val" # "val", "test"
|
222 |
+
dataset = load_dataset("VLR-CVC/ComicsPAP", skill, split=split)
|
223 |
```
|
224 |
|
225 |
<details>
|
|
|
405 |
|
406 |
skill = "sequence_filling" # "sequence_filling", "char_coherence", "visual_closure", "text_closure", "caption_relevance"
|
407 |
split = "val" # "val", "test"
|
408 |
+
dataset = load_dataset("VLR-CVC/ComicsPAP", skill, split=split)
|
409 |
|
410 |
processor = SingleImagePickAPanel()
|
411 |
dataset = dataset.map(
|
|
|
414 |
batch_size=32,
|
415 |
remove_columns=['context', 'options']
|
416 |
)
|
417 |
+
dataset.save_to_disk(f"ComicsPAP_{skill}_{split}_single_images")
|
418 |
```
|
419 |
|
420 |
</details>
|
|
|
441 |
<summary>Pseudocode for the evaluation on val set, adapt for your model:</summary>
|
442 |
|
443 |
```python
|
|
|
444 |
skills = {
|
445 |
"sequence_filling": {
|
446 |
"num_examples": 262
|
|
|
460 |
}
|
461 |
|
462 |
for skill in skills:
|
463 |
+
dataset = load_dataset("VLR-CVC/ComicsPAP", skill, split="val")
|
464 |
correct = 0
|
465 |
total = 0
|
466 |
for example in dataset:
|