datasetId
large_stringlengths 6
116
| author
large_stringlengths 2
42
| last_modified
large_stringdate 2021-04-29 15:34:29
2025-06-25 02:40:10
| downloads
int64 0
3.97M
| likes
int64 0
7.74k
| tags
large listlengths 1
7.92k
| task_categories
large listlengths 0
48
| createdAt
large_stringdate 2022-03-02 23:29:22
2025-06-25 00:32:52
| trending_score
float64 0
64
| card
large_stringlengths 31
1.01M
|
---|---|---|---|---|---|---|---|---|---|
abhinav302019/olympiad_data_320 | abhinav302019 | 2025-03-05T16:53:53Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-05T16:53:50Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: Known_Solution
dtype: string
- name: Known_Answer
dtype: string
- name: Generated_Solution
dtype: string
- name: Generated_Answer
dtype: string
- name: Judge_Evaluation
dtype: string
- name: Judge_Rating
dtype: string
- name: Judge_Justification
dtype: string
splits:
- name: train
num_bytes: 46504
num_examples: 10
download_size: 46114
dataset_size: 46504
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RLHF-And-Friends/tldr-thematic | RLHF-And-Friends | 2025-05-19T23:44:12Z | 351 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-28T18:38:31Z | 0 | ---
dataset_info:
- config_name: Advice
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 3248777.2880519526
num_examples: 2088
- name: validation
num_bytes: 361482.16246153845
num_examples: 232
download_size: 2218375
dataset_size: 3610259.450513491
- config_name: AskDocs
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 440327.5730453556
num_examples: 283
- name: validation
num_bytes: 62324.51076923077
num_examples: 40
download_size: 312880
dataset_size: 502652.0838145864
- config_name: AskReddit
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 24023525.540001027
num_examples: 15440
- name: validation
num_bytes: 2779673.1803076924
num_examples: 1784
download_size: 15633065
dataset_size: 26803198.72030872
- config_name: BreakUps
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 1303867.513116636
num_examples: 838
- name: validation
num_bytes: 168276.1790769231
num_examples: 108
download_size: 883490
dataset_size: 1472143.6921935591
- config_name: Cooking
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 177375.77147410085
num_examples: 114
- name: validation
num_bytes: 10906.789384615384
num_examples: 7
download_size: 110570
dataset_size: 188282.56085871623
- config_name: Dogtraining
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 563245.8708212676
num_examples: 362
- name: validation
num_bytes: 65440.73630769231
num_examples: 42
download_size: 397400
dataset_size: 628686.6071289598
- config_name: GetMotivated
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 262951.8015712548
num_examples: 169
- name: validation
num_bytes: 37394.70646153846
num_examples: 24
download_size: 196817
dataset_size: 300346.50803279324
- config_name: Parenting
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 676828.60167749
num_examples: 435
- name: validation
num_bytes: 74789.41292307692
num_examples: 48
download_size: 480742
dataset_size: 751618.0146005669
- config_name: Pets
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 569469.5821010607
num_examples: 366
- name: validation
num_bytes: 68556.96184615385
num_examples: 44
download_size: 394199
dataset_size: 638026.5439472145
- config_name: askwomenadvice
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 1070478.3401243982
num_examples: 688
- name: validation
num_bytes: 115300.34492307692
num_examples: 74
download_size: 686910
dataset_size: 1185778.6850474752
- config_name: books
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 250504.37901166876
num_examples: 161
- name: validation
num_bytes: 34278.480923076924
num_examples: 22
download_size: 173603
dataset_size: 284782.8599347457
- config_name: cats
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 504120.613663234
num_examples: 324
- name: validation
num_bytes: 62324.51076923077
num_examples: 40
download_size: 338338
dataset_size: 566445.1244324647
- config_name: college
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 410764.94446633884
num_examples: 264
- name: validation
num_bytes: 70115.07461538461
num_examples: 45
download_size: 275869
dataset_size: 480880.01908172347
- config_name: dating_advice
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 4432838.359032573
num_examples: 2849
- name: validation
num_bytes: 500154.1989230769
num_examples: 321
download_size: 2859130
dataset_size: 4932992.55795565
- config_name: dogs
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 992681.9491269855
num_examples: 638
- name: validation
num_bytes: 104393.55553846154
num_examples: 67
download_size: 686858
dataset_size: 1097075.504665447
- config_name: jobs
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 1686625.7568239064
num_examples: 1084
- name: validation
num_bytes: 166718.0663076923
num_examples: 107
download_size: 1115205
dataset_size: 1853343.8231315988
- config_name: legaladvice
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 3107187.8564366614
num_examples: 1997
- name: validation
num_bytes: 316296.89215384616
num_examples: 203
download_size: 2148216
dataset_size: 3423484.7485905075
- config_name: loseit
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 2259207.1945648636
num_examples: 1452
- name: validation
num_bytes: 267995.3963076923
num_examples: 172
download_size: 1502741
dataset_size: 2527202.590872556
- config_name: needadvice
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 821529.8889326777
num_examples: 528
- name: validation
num_bytes: 81021.864
num_examples: 52
download_size: 566990
dataset_size: 902551.7529326777
- config_name: offmychest
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 2461477.8111581365
num_examples: 1582
- name: validation
num_bytes: 232158.80261538463
num_examples: 149
download_size: 1752857
dataset_size: 2693636.613773521
- config_name: personalfinance
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 3597305.119720361
num_examples: 2312
- name: validation
num_bytes: 422248.56046153844
num_examples: 271
download_size: 2364890
dataset_size: 4019553.6801818996
- config_name: pettyrevenge
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 852648.4453316428
num_examples: 548
- name: validation
num_bytes: 96602.9916923077
num_examples: 62
download_size: 639836
dataset_size: 949251.4370239505
- config_name: relationship_advice
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 13522568.683170268
num_examples: 8691
- name: validation
num_bytes: 1480207.1307692307
num_examples: 950
download_size: 9038096
dataset_size: 15002775.813939499
- config_name: relationships
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 98527573.27040318
num_examples: 63324
- name: validation
num_bytes: 10908347.497384615
num_examples: 7001
download_size: 65934652
dataset_size: 109435920.76778778
- config_name: running
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 882211.0739106595
num_examples: 567
- name: validation
num_bytes: 110626.00661538461
num_examples: 71
download_size: 542869
dataset_size: 992837.0805260441
- config_name: self
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 1630612.3553057692
num_examples: 1048
- name: validation
num_bytes: 204112.77276923077
num_examples: 131
download_size: 1156745
dataset_size: 1834725.128075
- config_name: tifu
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 11957305.296302326
num_examples: 7685
- name: validation
num_bytes: 1305698.5006153847
num_examples: 838
download_size: 8213592
dataset_size: 13263003.79691771
- config_name: travel
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 703279.3746166104
num_examples: 452
- name: validation
num_bytes: 81021.864
num_examples: 52
download_size: 450229
dataset_size: 784301.2386166104
- config_name: weddingplanning
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 673716.7460375936
num_examples: 433
- name: validation
num_bytes: 66998.84907692307
num_examples: 43
download_size: 471160
dataset_size: 740715.5951145167
configs:
- config_name: Advice
data_files:
- split: train
path: Advice/train-*
- split: validation
path: Advice/validation-*
- config_name: AskDocs
data_files:
- split: train
path: AskDocs/train-*
- split: validation
path: AskDocs/validation-*
- config_name: AskReddit
data_files:
- split: train
path: AskReddit/train-*
- split: validation
path: AskReddit/validation-*
- config_name: BreakUps
data_files:
- split: train
path: BreakUps/train-*
- split: validation
path: BreakUps/validation-*
- config_name: Cooking
data_files:
- split: train
path: Cooking/train-*
- split: validation
path: Cooking/validation-*
- config_name: Dogtraining
data_files:
- split: train
path: Dogtraining/train-*
- split: validation
path: Dogtraining/validation-*
- config_name: GetMotivated
data_files:
- split: train
path: GetMotivated/train-*
- split: validation
path: GetMotivated/validation-*
- config_name: Parenting
data_files:
- split: train
path: Parenting/train-*
- split: validation
path: Parenting/validation-*
- config_name: Pets
data_files:
- split: train
path: Pets/train-*
- split: validation
path: Pets/validation-*
- config_name: askwomenadvice
data_files:
- split: train
path: askwomenadvice/train-*
- split: validation
path: askwomenadvice/validation-*
- config_name: books
data_files:
- split: train
path: books/train-*
- split: validation
path: books/validation-*
- config_name: cats
data_files:
- split: train
path: cats/train-*
- split: validation
path: cats/validation-*
- config_name: college
data_files:
- split: train
path: college/train-*
- split: validation
path: college/validation-*
- config_name: dating_advice
data_files:
- split: train
path: dating_advice/train-*
- split: validation
path: dating_advice/validation-*
- config_name: dogs
data_files:
- split: train
path: dogs/train-*
- split: validation
path: dogs/validation-*
- config_name: jobs
data_files:
- split: train
path: jobs/train-*
- split: validation
path: jobs/validation-*
- config_name: legaladvice
data_files:
- split: train
path: legaladvice/train-*
- split: validation
path: legaladvice/validation-*
- config_name: loseit
data_files:
- split: train
path: loseit/train-*
- split: validation
path: loseit/validation-*
- config_name: needadvice
data_files:
- split: train
path: needadvice/train-*
- split: validation
path: needadvice/validation-*
- config_name: offmychest
data_files:
- split: train
path: offmychest/train-*
- split: validation
path: offmychest/validation-*
- config_name: personalfinance
data_files:
- split: train
path: personalfinance/train-*
- split: validation
path: personalfinance/validation-*
- config_name: pettyrevenge
data_files:
- split: train
path: pettyrevenge/train-*
- split: validation
path: pettyrevenge/validation-*
- config_name: relationship_advice
data_files:
- split: train
path: relationship_advice/train-*
- split: validation
path: relationship_advice/validation-*
- config_name: relationships
data_files:
- split: train
path: relationships/train-*
- split: validation
path: relationships/validation-*
- config_name: running
data_files:
- split: train
path: running/train-*
- split: validation
path: running/validation-*
- config_name: self
data_files:
- split: train
path: self/train-*
- split: validation
path: self/validation-*
- config_name: tifu
data_files:
- split: train
path: tifu/train-*
- split: validation
path: tifu/validation-*
- config_name: travel
data_files:
- split: train
path: travel/train-*
- split: validation
path: travel/validation-*
- config_name: weddingplanning
data_files:
- split: train
path: weddingplanning/train-*
- split: validation
path: weddingplanning/validation-*
---
|
ziyu3141/rf_newtrain_1_48 | ziyu3141 | 2025-02-07T03:57:05Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-07T03:57:03Z | 0 | ---
dataset_info:
features:
- name: Filename
dtype: string
- name: Aesthetics score
dtype: float64
- name: Artifact score
dtype: float64
- name: Misalignment score
dtype: float64
- name: Overall score
dtype: float64
- name: Artifact heatmap
sequence:
sequence:
sequence: int64
- name: Misalignment heatmap
sequence:
sequence:
sequence: int64
- name: Misalignment token label
dtype: string
- name: is_uneven
dtype: bool
- name: preferred_image
dtype: binary
- name: unpreferred_image
dtype: binary
- name: revised_image
dtype: binary
- name: unrevised_id
dtype: string
- name: is_preferred
dtype: bool
splits:
- name: train
num_bytes: 135379387
num_examples: 20
download_size: 9833035
dataset_size: 135379387
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Hennara/Recap-DataComp-1B_split_4 | Hennara | 2024-11-28T20:08:01Z | 46 | 0 | [
"size_categories:100M<n<1B",
"format:parquet",
"modality:image",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-28T19:00:44Z | 0 | ---
dataset_info:
features:
- name: url
dtype: string
- name: re_caption
dtype: string
- name: org_caption
dtype: string
- name: sha256
dtype: string
- name: key
dtype: string
- name: re_clip_score
dtype: float64
- name: org_clip_score
dtype: float64
- name: re_length
dtype: int64
- name: org_length
dtype: int64
- name: re_gpt4v_score
dtype: int64
- name: org_gpt4v_score
dtype: int64
splits:
- name: train
num_bytes: 67854510316
num_examples: 117611282
download_size: 41259522279
dataset_size: 67854510316
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
fdschmidt93/synthetic-llama-8b | fdschmidt93 | 2025-03-21T18:55:21Z | 17 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-21T18:55:10Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 657960740
num_examples: 478247
download_size: 353896912
dataset_size: 657960740
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ChaosAiVision/Medical_reasoning | ChaosAiVision | 2025-06-22T13:04:27Z | 31 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-20T11:02:46Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: anwser
dtype: string
- name: chain_of_though
dtype: string
splits:
- name: train
num_bytes: 6323164
num_examples: 2144
download_size: 2322394
dataset_size: 6323164
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
sucharush/rag_mcqa_with_doc | sucharush | 2025-06-08T03:03:07Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-08T02:43:40Z | 0 | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 240267317
num_examples: 122868
download_size: 122928694
dataset_size: 240267317
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
philschmid/open-orca-10k-guidellm | philschmid | 2024-10-09T11:45:44Z | 33 | 1 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-09T11:45:42Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 55855617.39201845
num_examples: 10000
download_size: 34695180
dataset_size: 55855617.39201845
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
french-datasets/bismarck91_frA-enA-tokenised-qwen-part1 | french-datasets | 2025-06-21T14:19:03Z | 0 | 0 | [
"task_categories:audio-to-audio",
"language:fra",
"language:eng",
"region:us"
] | [
"audio-to-audio"
] | 2025-06-21T14:18:28Z | 0 | ---
language:
- fra
- eng
viewer: false
task_categories:
- audio-to-audio
---
Ce répertoire est vide, il a été créé pour améliorer le référencement du jeu de données [bismarck91/frA-enA-tokenised-qwen-part1](https://huggingface.co/datasets/bismarck91/frA-enA-tokenised-qwen-part1). |
mlfoundations-dev/multiple_samples_sympy_numina_aime | mlfoundations-dev | 2025-02-04T05:33:24Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-03T23:22:34Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: r1_distill_70b_response
sequence: string
- name: r1_distill_70b_extracted_answer
sequence: string
- name: sympy_code
dtype: string
- name: correct
dtype: bool
- name: execution_output
dtype: string
splits:
- name: train
num_bytes: 177622
num_examples: 3
download_size: 101520
dataset_size: 177622
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
woshityj/xsum_dataset | woshityj | 2024-12-11T08:49:23Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-11T08:49:14Z | 0 | ---
dataset_info:
features:
- name: document
dtype: string
- name: summary
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 69746054
num_examples: 30000
- name: test
num_bytes: 8864589
num_examples: 3750
- name: validation
num_bytes: 8637471
num_examples: 3750
download_size: 55382887
dataset_size: 87248114
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
Tobius/9e77d18e-3e9a-46af-ac54-7342711a98b6 | Tobius | 2024-11-29T11:14:06Z | 13 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-29T11:14:05Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 108838.4
num_examples: 800
- name: test
num_bytes: 27209.6
num_examples: 200
download_size: 11524
dataset_size: 136048.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
lighteval/RULER-131072-Lamma3-Instruct | lighteval | 2025-06-18T12:10:44Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-18T12:03:29Z | 0 | ---
dataset_info:
features:
- name: index
dtype: int64
- name: input
dtype: string
- name: outputs
sequence: string
- name: length
dtype: int64
splits:
- name: vt
num_bytes: 245243500
num_examples: 500
- name: fwe
num_bytes: 160767200
num_examples: 500
- name: niah_single_1
num_bytes: 245447719
num_examples: 500
- name: qa_2
num_bytes: 278549005
num_examples: 500
- name: niah_multikey_1
num_bytes: 306810494
num_examples: 500
- name: niah_multivalue
num_bytes: 306833986
num_examples: 500
- name: niah_multikey_3
num_bytes: 128503000
num_examples: 500
- name: niah_single_3
num_bytes: 306735516
num_examples: 500
- name: niah_single_2
num_bytes: 306711618
num_examples: 500
- name: qa_1
num_bytes: 312918473
num_examples: 500
- name: niah_multikey_2
num_bytes: 242897246
num_examples: 500
- name: niah_multiquery
num_bytes: 306890263
num_examples: 500
- name: cwe
num_bytes: 177649224
num_examples: 500
download_size: 1617169541
dataset_size: 3325957244
configs:
- config_name: default
data_files:
- split: vt
path: data/vt-*
- split: fwe
path: data/fwe-*
- split: niah_single_1
path: data/niah_single_1-*
- split: qa_2
path: data/qa_2-*
- split: niah_multikey_1
path: data/niah_multikey_1-*
- split: niah_multivalue
path: data/niah_multivalue-*
- split: niah_multikey_3
path: data/niah_multikey_3-*
- split: niah_single_3
path: data/niah_single_3-*
- split: niah_single_2
path: data/niah_single_2-*
- split: qa_1
path: data/qa_1-*
- split: niah_multikey_2
path: data/niah_multikey_2-*
- split: niah_multiquery
path: data/niah_multiquery-*
- split: cwe
path: data/cwe-*
---
|
RyanYr/reflect_collegemath-test_t4 | RyanYr | 2025-01-19T04:11:17Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-17T17:09:46Z | 0 | ---
dataset_info:
features:
- name: data_source
dtype: string
- name: question_number
dtype: string
- name: problem
dtype: string
- name: answer
dtype: string
- name: license
dtype: string
- name: data_topic
dtype: string
- name: response@0
sequence: string
- name: response@1
sequence: string
- name: response@2
sequence: string
- name: response@3
sequence: string
- name: response@4
sequence: string
- name: response@5
sequence: string
- name: response@6
sequence: string
- name: response@7
sequence: string
- name: response@8
sequence: string
splits:
- name: train
num_bytes: 46205198
num_examples: 2818
download_size: 16907418
dataset_size: 46205198
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
nvidia/Scoring-Verifiers | nvidia | 2025-04-01T16:35:02Z | 33 | 6 | [
"task_categories:text-ranking",
"license:other",
"size_categories:1K<n<10K",
"arxiv:2502.13820",
"region:us",
"code",
"synthetic",
"nvidia",
"reasoning",
"llms",
"verifiers"
] | [
"text-ranking"
] | 2025-04-01T15:55:46Z | 0 | ---
license: other
license_name: nsclv1
license_link: https://github.com/aleksficek/Scoring-Verifiers/blob/main/LICENSE
task_categories:
- text-ranking
tags:
- code
- synthetic
- nvidia
- reasoning
- llms
- verifiers
pretty_name: Scoring Verifiers
size_categories:
- 1K<n<10K
---
# Scoring Verifiers
Scoring Verifiers is a set of 4 benchmarks that evaluate the scoring and ranking capabilities of synthetic verifiers such as test case generation and reward modelling. You can find our paper [Scoring Verifiers: Evaluating Synthetic Verification for Code and Reasoning](https://www.arxiv.org/abs/2502.13820) which explains in more detail our methodology, benchmark details and findings.

## Datasets
In this repository, we include 4 benchmarks that are code scoring and ranking versions of HumanEval and MBPP:
- HE-R
- HE-R+
- MBPP-R
- MBPP-R+
Each dataset sample contains a question from HumanEval or MBPP following by several `gpt-4o` solutions and their rankings based on pre-defined test case execution scores. Alongside the keys found in the original benchmarks each sample contains the following keys:
- `task_id`
- `prompt`
- `canonical_solution`
- `all_solutions` (each solution contains the following)
- `rank`
- `average_test_score`
- `average_time_taken`
- `solution`
For example, the following is a distribution of the test case scores for all solutions in HE-R+ and MBPP-R+ respectively.
 
## Paper
Overall our paper's contributions can be summarized as follows:
1. We provide a recipe to transform any coding benchmark with predefined test cases into a code scoring and ranking benchmark.
2. We certify our recipe by creating code scoring and ranking versions of HumanEval and MBPP datasets: HE-R, HE-R+, MBPP-R, MBPP-R+.
3. We use our benchmark to evaluate synthetic verification methods such as test case generation in standard, reward and reasoning LLM’s.

We also open-source the [code used to generate these benchmarks](https://github.com/aleksficek/Scoring-Verifiers).
## Citation
```
@misc{ficek2025scoringverifiersevaluatingsynthetic,
title={Scoring Verifiers: Evaluating Synthetic Verification in Code and Reasoning},
author={Aleksander Ficek and Somshubra Majumdar and Vahid Noroozi and Boris Ginsburg},
year={2025},
eprint={2502.13820},
archivePrefix={arXiv},
primaryClass={cs.AI},
url={https://arxiv.org/abs/2502.13820},
}
```
|
ssktora/train-jqara-for-tevatron-1-all | ssktora | 2025-04-16T06:17:45Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-16T06:17:41Z | 0 | ---
dataset_info:
features:
- name: query_id
dtype: string
- name: query
dtype: string
- name: positive_passages
list:
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
- name: negative_passages
list:
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 643740
num_examples: 18
download_size: 406196
dataset_size: 643740
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
french-datasets/ProfessorBob-text-embedding-dataset | french-datasets | 2025-03-30T17:00:19Z | 16 | 0 | [
"language:fra",
"region:us"
] | [] | 2025-03-29T20:42:45Z | 0 | ---
language: "fra"
viewer: false
---
Ce répertoire est vide, il a été créé pour améliorer le référencement du jeu de données [ProfessorBob/text-embedding-dataset](huggingface.co/datasets/ProfessorBob/text-embedding-dataset).
|
jed351/Chinese-Common-Crawl-Filtered | jed351 | 2025-06-02T05:32:00Z | 264 | 15 | [
"language:zh",
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2023-07-20T21:23:06Z | 0 | ---
language:
- zh
---
# Traditional Chinese C4
### Dataset Summary
Data obtained from 2025-18 and 2025-13 Common Crawl.
Downloaded and processed using [code](https://github.com/jedcheng/c4-dataset-script) based on another [project](https://github.com/shjwudp/c4-dataset-script) attempting to recreate the C4 dataset.
The resultant dataset contains both simplified and traditional Chinese.
It was then filtered using a [modified list](https://github.com/jedcheng/c4-dataset-script/blob/master/SC_filter/SC_list.txt) of simplified Chinese characters to obtain [another traditional Chinese dataset](https://huggingface.co/datasets/jed351/Traditional-Chinese-Common-Crawl-Filtered).
I am still ironning out the process of filtering.
The 2025-13 dataset was deduplicated without splitting into shards resulting in a smaller dataset than the 2025-18 dataset.
Unfortunately, this process takes 2 CPU nodes for 4 hours.
In addition, other people would have done a better job at cleaning the common crawl.
To preserver time, money and resources, I will not work on this dataset anymore but instead shift my focus to the [Traditional Chinese Common Crawl dataset](https://huggingface.co/datasets/jed351/Traditional-Chinese-Common-Crawl).
|
sergiov2000/eval_wm_act_yellow3 | sergiov2000 | 2025-05-30T10:17:50Z | 114 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-30T10:17:34Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 1728,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.above": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.side": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
mhr2004/NevIR-val | mhr2004 | 2025-04-25T05:29:58Z | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-25T05:29:56Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: version
dtype: string
- name: text
dtype: string
- name: label
dtype: int64
splits:
- name: train
num_bytes: 689464
num_examples: 450
download_size: 125405
dataset_size: 689464
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
michsethowusu/dyula-kamba_sentence-pairs | michsethowusu | 2025-04-02T13:14:12Z | 11 | 0 | [
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-02T13:14:08Z | 0 |
---
dataset_info:
features:
- name: score
dtype: float32
- name: Dyula
dtype: string
- name: Kamba
dtype: string
splits:
- name: train
num_bytes: 2691554
num_examples: 22901
download_size: 2691554
dataset_size: 2691554
configs:
- config_name: default
data_files:
- split: train
path: Dyula-Kamba_Sentence-Pairs.csv
---
# Dyula-Kamba_Sentence-Pairs Dataset
This dataset contains sentence pairs for African languages along with similarity scores. It can be used for machine translation, sentence alignment, or other natural language processing tasks.
This dataset is based on the NLLBv1 dataset, published on OPUS under an open-source initiative led by META. You can find more information here: [OPUS - NLLB-v1](https://opus.nlpl.eu/legacy/NLLB-v1.php)
## Metadata
- **File Name**: Dyula-Kamba_Sentence-Pairs
- **Number of Rows**: 22901
- **Number of Columns**: 3
- **Columns**: score, Dyula, Kamba
## Dataset Description
The dataset contains sentence pairs in African languages with an associated similarity score. Each row consists of three columns:
1. `score`: The similarity score between the two sentences (range from 0 to 1).
2. `Dyula`: The first sentence in the pair (language 1).
3. `Kamba`: The second sentence in the pair (language 2).
This dataset is intended for use in training and evaluating machine learning models for tasks like translation, sentence similarity, and cross-lingual transfer learning.
## References
Below are papers related to how the data was collected and used in various multilingual and cross-lingual applications:
[1] Holger Schwenk and Matthijs Douze, Learning Joint Multilingual Sentence Representations with Neural Machine Translation, ACL workshop on Representation Learning for NLP, 2017
[2] Holger Schwenk and Xian Li, A Corpus for Multilingual Document Classification in Eight Languages, LREC, pages 3548-3551, 2018.
[3] Holger Schwenk, Filtering and Mining Parallel Data in a Joint Multilingual Space ACL, July 2018
[4] Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R. Bowman, Holger Schwenk and Veselin Stoyanov, XNLI: Cross-lingual Sentence Understanding through Inference, EMNLP, 2018.
[5] Mikel Artetxe and Holger Schwenk, Margin-based Parallel Corpus Mining with Multilingual Sentence Embeddings arXiv, Nov 3 2018.
[6] Mikel Artetxe and Holger Schwenk, Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond arXiv, Dec 26 2018.
[7] Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong and Paco Guzman, WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia arXiv, July 11 2019.
[8] Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave and Armand Joulin CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB
[9] Paul-Ambroise Duquenne, Hongyu Gong, Holger Schwenk, Multimodal and Multilingual Embeddings for Large-Scale Speech Mining, NeurIPS 2021, pages 15748-15761.
[10] Kevin Heffernan, Onur Celebi, and Holger Schwenk, Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
SciFy/annotation_demo | SciFy | 2025-03-10T16:39:57Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T16:39:55Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: metadata
struct:
- name: ocr_model
dtype: string
- name: python_version
dtype: string
splits:
- name: annotations
num_bytes: 89
num_examples: 2
download_size: 1735
dataset_size: 89
configs:
- config_name: default
data_files:
- split: annotations
path: data/annotations-*
---
|
taeyoon12321421/final_datasets_gorani | taeyoon12321421 | 2025-02-12T03:01:45Z | 14 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-12T03:01:22Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: target_language
dtype: string
- name: metadata
struct:
- name: ENG
dtype: string
- name: JPN
dtype: string
- name: KO
dtype: string
splits:
- name: train
num_bytes: 10250793
num_examples: 21662
- name: test
num_bytes: 2558493
num_examples: 5416
download_size: 4724881
dataset_size: 12809286
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sl-revised_NoQuant_32_32_0.05_64_BestF1 | ferrazzipietro | 2024-11-25T14:10:59Z | 20 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-25T11:24:26Z | 0 | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 149854
num_examples: 101
- name: test
num_bytes: 1063090
num_examples: 654
download_size: 241896
dataset_size: 1212944
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
severo/fix-401 | severo | 2022-06-24T11:45:48Z | 12 | 0 | [
"region:us"
] | [] | 2022-06-20T16:04:10Z | 0 | ---
viewer: false
---
# Try to include an iframe
from observable:
<iframe width="100%" height="635" frameborder="0"
src="https://observablehq.com/embed/@d3/sortable-bar-chart?cell=viewof+order&cell=chart"></iframe>
from an HF space:
<iframe src="https://hf.space/embed/YoannLemesle/CLIPictionary/+?__theme=system" data-src="https://hf.space/embed/YoannLemesle/CLIPictionary/+" data-sdk="gradio" title="Gradio app" class="container p-0 flex-grow overflow-hidden space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads" scrolling="no" id="iFrameResizer0" style="overflow: hidden; height: 725px;"></iframe> |
yunjae-won/mp_gemma9b_sft_ogd_rms_epoch4_multisample_2.5k | yunjae-won | 2025-05-11T20:41:37Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-11T20:41:32Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
- name: output_logps
dtype: float64
splits:
- name: train
num_bytes: 58064068
num_examples: 20000
download_size: 20871352
dataset_size: 58064068
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
a1031737/flickr8k_geometry3k_style | a1031737 | 2025-05-09T10:12:01Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-09T07:31:56Z | 0 | ---
dataset_info:
features:
- name: images
sequence: image
- name: problem
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 12950265.6
num_examples: 135
- name: validation
num_bytes: 671495.2533333333
num_examples: 7
- name: test
num_bytes: 767423.1466666666
num_examples: 8
download_size: 14477291
dataset_size: 14389184.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
Rapidata/text-2-image-Rich-Human-Feedback-32k | Rapidata | 2025-04-29T11:28:30Z | 159 | 12 | [
"language:en",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2312.10240",
"region:us",
"heatmap",
"t2i",
"human",
"feedback",
"rich",
"annotation",
"open-image-preferences"
] | [] | 2025-04-24T15:55:37Z | 12 | ---
dataset_info:
features:
- name: image_name
dtype: image
- name: sentence
dtype: string
- name: word_scores
dtype: string
- name: alignment_score_norm
dtype: float32
- name: coherence_score_norm
dtype: float32
- name: style_score_norm
dtype: float32
- name: alignment_heatmap
dtype:
array2_d:
shape:
- 1024
- 1024
dtype: float32
- name: coherence_heatmap
dtype:
array2_d:
shape:
- 1024
- 1024
dtype: float32
- name: alignment_score
dtype: float32
- name: coherence_score
dtype: float32
- name: style_score
dtype: float32
splits:
- name: train
num_bytes: 116617124714.976
num_examples: 32528
download_size: 91216762385
dataset_size: 116617124714.976
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- heatmap
- t2i
- human
- feedback
- rich
- annotation
- open-image-preferences
license: apache-2.0
language:
- en
pretty_name: Text to image - Rich Annotation
size_categories:
- 10K<n<100K
---
<a href="https://www.rapidata.ai">
<img src="https://cdn-uploads.huggingface.co/production/uploads/66f5624c42b853e73e0738eb/jfxR79bOztqaC6_yNNnGU.jpeg" width="250" alt="Rapidata Logo">
</a>
Building upon Google's research [Rich Human Feedback for Text-to-Image Generation](https://arxiv.org/abs/2312.10240), and the
[smaller, previous version of this dataset](https://huggingface.co/datasets/Rapidata/text-2-image-Rich-Human-Feedback), we have collected over 3.7 million responses from 307'415 individual humans for the [open-image-preference-v1](https://huggingface.co/datasets/data-is-better-together/open-image-preferences-v1) dataset using Rapidata via the [Python API](https://docs.rapidata.ai/). Collection took less than 2 weeks.
If you get value from this dataset and would like to see more in the future, please consider liking it ♥️
# Overview
We asked humans to evaluate AI-generated images in style, coherence and prompt alignment. For images that contained flaws, participants were asked to identify specific problematic areas. Additionally, for all images, participants identified words from the prompts that were not accurately represented in the generated images.
If you want to replicate the annotation setup, the steps are outlined at the [bottom](#replicating-the-annotation-setup).
This dataset and the annotation process is described in further detail in our blog post [Beyond Image Preferences](https://huggingface.co/blog/RapidataAI/beyond-image-preferences).
# Usage Examples
Accessing this data is easy with the Huggingface `dataset` library. For quick demos or previews, we recommend setting `streaming=True` as downloading the whole dataset can take a while.
```python
from datasets import load_dataset
ds = load_dataset("Rapidata/text-2-image-Rich-Human-Feedback-32k", split="train", streaming=True)
```
As an example, below we show how to replicate the figures below.
<details>
<summary>Click to expand Select Words example</summary>
The methods below can be used to produce figures similar to the ones shownn below.
Note however that the figures below were created using `matplotlib`, however we opt to use `opencv` here as it makes calculating the text spacing much easier.
**Methods**
```python
from PIL import Image
from datasets import load_dataset
import cv2
import numpy as np
def get_colors(words):
colors = []
for item in words:
intensity = item / max(words)
value = np.uint8((1 - intensity) * 255)
color = tuple(map(int, cv2.applyColorMap(np.array([[value]]), cv2.COLORMAP_AUTUMN)[0][0]))
colors.append(color)
return colors
def get_wrapped_text(text_color_pairs, font, font_scale, thickness, word_spacing, max_width):
wrapped_text_color_pairs, current_line, line_width = [], [], 0
for text, color in text_color_pairs:
text_size = cv2.getTextSize(text, font, font_scale, thickness)[0]
if line_width + text_size[0] > max_width:
wrapped_text_color_pairs.append(current_line)
current_line, line_width = [], 0
current_line.append((text, color, text_size))
line_width += text_size[0] + word_spacing
wrapped_text_color_pairs.append(current_line)
return wrapped_text_color_pairs
def add_multicolor_text(input, text_color_pairs, font_scale=1, thickness=2, word_spacing=20):
image = cv2.cvtColor(np.array(input), cv2.COLOR_RGB2BGR)
image_height, image_width, _ = image.shape
font = cv2.FONT_HERSHEY_SIMPLEX
wrapped_text = get_wrapped_text(text_color_pairs, font, font_scale, thickness, word_spacing, int(image_width*0.95))
position = (int(0.025*image_width), int(word_spacing*2))
overlay = image.copy()
cv2.rectangle(overlay, (0, 0), (image_width, int((len(wrapped_text)+1)*word_spacing*2)), (100,100,100), -1)
out_img = cv2.addWeighted(overlay, 0.75, image, 0.25, 0)
for idx, text_line in enumerate(wrapped_text):
current_x, current_y = position[0], position[1] + int(idx*word_spacing*2)
for text, color, text_size in text_line:
cv2.putText(out_img, text, (current_x, current_y), font, font_scale, color, thickness)
current_x += text_size[0] + word_spacing
return Image.fromarray(cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
```
**Create figures**
```python
ds_words = ds.select_columns(["image","prompt", "word_scores"])
for example in ds_words.take(5):
image = example["image"]
prompt = example["prompt"]
word_scores = [s[1] for s in eval(example["word_scores"])]
words = [s[0] for s in eval(example["word_scores"])]
colors = get_colors(word_scores)
display(add_multicolor_text(image, list(zip(words, colors)), font_scale=1, thickness=2, word_spacing=20))
```
</details>
<details>
<summary>Click to expand Heatmap example</summary>
**Methods**
```python
import cv2
import numpy as np
from PIL import Image
def overlay_heatmap(image, heatmap, alpha=0.3):
cv2_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
heatmap_normalized = ((heatmap - heatmap.min()) / (heatmap.max() - heatmap.min()))
heatmap_normalized = np.uint8(255 * (heatmap_normalized))
heatmap_colored = cv2.applyColorMap(heatmap_normalized, cv2.COLORMAP_HOT)
overlaid_image = cv2.addWeighted(cv2_image, 1 - alpha, heatmap_colored, alpha, 0)
return Image.fromarray(cv2.cvtColor(overlaid_image, cv2.COLOR_BGR2RGB))
```
**Create figures**
```python
ds_heatmap = ds.select_columns(["image","prompt", "alignment_heatmap"])
for example in ds_heatmap.take(5):
image = example["image"]
heatmap = example["alignment_heatmap"]
if heatmap:
display(overlay_heatmap(image, np.asarray(heatmap)))
```
</details>
</br>
# Data Summary
## Word Scores
Users identified words from the prompts that were NOT accurately depicted in the generated images. Higher word scores indicate poorer representation in the image. Participants also had the option to select "[No_mistakes]" for prompts where all elements were accurately depicted.
### Examples Results:
| <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/lzlWHmLKBvBJhjGWP8xZZ.png" width="500"> | <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/b38uskYWaGEgfeJQtKiaO.png" width="500"> |
|---|---|
| <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/4uWKVjZBA5aX2YDUYNpdV.png" width="500"> | <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/f9JIuwDoNohy7EkDYILFm.png" width="500"> |
## Coherence
The coherence score measures whether the generated image is logically consistent and free from artifacts or visual glitches. Without seeing the original prompt, users were asked: "Look closely, does this image have weird errors, like senseless or malformed objects, incomprehensible details, or visual glitches?" Each image received at least 21 responses indicating the level of coherence on a scale of 1-5, which were then averaged to produce the final scores where 5 indicates the highest coherence.
Images scoring below 3.5 in coherence were further evaluated, with participants marking specific errors in the image.
### Example Results:
| <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/sc-4ls9X0yO-hGN0VCDSX.png" width="500"> | <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/J77EmYp4oyRRakkcRnaF9.png" width="500"> |
|---|---|
| <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/mRDdoQdc4_iy2JcLhdI7J.png" width="500"> | <img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/2N2KJyz4YOGT6N6tuUX8M.png" width="500"> |
## Alignment
The alignment score quantifies how well an image matches its prompt. Users were asked: "How well does the image match the description?". Again, each image received at least 21 responses indicating the level of alignment on a scale of 1-5 (5 being the highest), which were then averaged.
For images with an alignment score below 3.2, additional users were asked to highlight areas where the image did not align with the prompt. These responses were then compiled into a heatmap.
As mentioned in the google paper, aligment is harder to annotate consistently, if e.g. an object is missing, it is unclear to the annotators what they need to highlight.
### Example Results:
<style>
.example-results-grid {
display: grid;
grid-template-columns: repeat(2, 450px);
gap: 20px;
margin: 20px 0;
justify-content: left;
}
.result-card {
background-color: #fff;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
padding: 15px;
width: 450px;
}
.prompt {
margin-bottom: 10px;
font-size: 18px;
line-height: 1.4;
color: #333;
background-color: #f8f8f8;
padding: 10px;
border-radius: 5px;
}
.image-container img {
width: 450px;
height: auto;
border-radius: 4px;
}
@media (max-width: 1050px) {
.example-results-grid {
grid-template-columns: 450px;
}
}
</style>
<div class="example-results-grid">
<div class="result-card">
<div class="prompt">
<strong>Prompt:</strong> Three cats and one dog sitting on the grass.
</div>
<div class="image-container">
<img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/qCNWVSNjPsp8XQ3zliLcp.png" alt="Three cats and one dog">
</div>
</div>
<div class="result-card">
<div class="prompt">
<strong>Prompt:</strong> A brown toilet with a white wooden seat.
</div>
<div class="image-container">
<img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/M3buzP-5k4pRCxOi_ijxM.png" alt="Brown toilet">
</div>
</div>
<div class="result-card">
<div class="prompt">
<strong>Prompt:</strong> Photograph of a pale Asian woman, wearing an oriental costume, sitting in a luxurious white chair. Her head is floating off the chair, with the chin on the table and chin on her knees, her chin on her knees. Closeup
</div>
<div class="image-container">
<img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/ggYXUEbGppiTeL84pG-DP.png" alt="Asian woman in costume">
</div>
</div>
<div class="result-card">
<div class="prompt">
<strong>Prompt:</strong> A tennis racket underneath a traffic light.
</div>
<div class="image-container">
<img src="https://cdn-uploads.huggingface.co/production/uploads/672b7d79fd1e92e3c3567435/mT7sAbnO-w6ySXaeEqEki.png" alt="Racket under traffic light">
</div>
</div>
</div>
## Style
The style score reflects how visually appealing participants found each image, independent of the prompt. Users were asked: "How much do you like the way this image looks?" Each image received 21 responses grading on a scale of 1-5, which were then averaged.
In contrast to other prefrence collection methods, such as the huggingface image arena, the preferences were collected from humans from around the world (156 different countries) from all walks of life, creating a more representative score.
# About Rapidata
Rapidata's technology makes collecting human feedback at scale faster and more accessible than ever before. Visit [rapidata.ai](https://www.rapidata.ai/) to learn more about how we're revolutionizing human feedback collection for AI development.
# Other Datasets
We run a benchmark of the major image generation models, the results can be found on our [website](https://www.rapidata.ai/leaderboard/image-models). We rank the models according to their coherence/plausiblity, their aligment with the given prompt and style prefernce. The underlying 2M+ annotations can be found here:
- Link to the [Coherence dataset](https://huggingface.co/datasets/Rapidata/Flux_SD3_MJ_Dalle_Human_Coherence_Dataset)
- Link to the [Text-2-Image Alignment dataset](https://huggingface.co/datasets/Rapidata/Flux_SD3_MJ_Dalle_Human_Alignment_Dataset)
- Link to the [Preference dataset](https://huggingface.co/datasets/Rapidata/700k_Human_Preference_Dataset_FLUX_SD3_MJ_DALLE3)
We have also started to run a [video generation benchmark](https://www.rapidata.ai/leaderboard/video-models), it is still a work in progress and currently only covers 2 models. They are also analysed in coherence/plausiblity, alignment and style preference.
# Replicating the Annotation Setup
For researchers interested in producing their own rich preference dataset, you can directly use the Rapidata API through python. The code snippets below show how to replicate the modalities used in the dataset. Additional information is available through the [documentation](https://docs.rapidata.ai/)
<details>
<summary>Creating the Rapidata Client and Downloading the Dataset</summary>
First install the rapidata package, then create the RapidataClient() this will be used create and launch the annotation setup
```bash
pip install rapidata
```
```python
from rapidata import RapidataClient, LabelingSelection, ValidationSelection
client = RapidataClient()
```
As example data we will just use images from the dataset. Make sure to set `streaming=True` as downloading the whole dataset might take a significant amount of time.
```python
from datasets import load_dataset
ds = load_dataset("Rapidata/text-2-image-Rich-Human-Feedback-32k", split="train", streaming=True)
ds = ds.select_columns(["image","prompt"])
```
Since we use streaming, we can extract the prompts and download the images we need like this:
```python
import os
tmp_folder = "demo_images"
# make folder if it doesn't exist
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
prompts = []
image_paths = []
for i, row in enumerate(ds.take(10)):
prompts.append(row["prompt"])
# save image to disk
save_path = os.path.join(tmp_folder, f"{i}.jpg")
row["image"].save(save_path)
image_paths.append(save_path)
```
</details>
<details>
<summary>Likert Scale Alignment Score</summary>
To launch a likert scale annotation order, we make use of the classification annotation modality. Below we show the setup for the alignment criteria.
The structure is the same for style and coherence, however arguments have to be adjusted of course. I.e. different instructions, options and validation set.
```python
# Alignment Example
instruction = "How well does the image match the description?"
answer_options = [
"1: Not at all",
"2: A little",
"3: Moderately",
"4: Very well",
"5: Perfectly"
]
order = client.order.create_classification_order(
name="Alignment Example",
instruction=instruction,
answer_options=answer_options,
datapoints=image_paths,
contexts=prompts, # for alignment, prompts are required as context for the annotators.
responses_per_datapoint=10,
selections=[ValidationSelection("676199a5ef7af86285630ea6"), LabelingSelection(1)] # here we use a pre-defined validation set. See https://docs.rapidata.ai/improve_order_quality/ for details
)
order.run() # This starts the order. Follow the printed link to see progress.
```
</details>
<details>
<summary>Alignment Heatmap</summary>
To produce heatmaps, we use the locate annotation modality. Below is the setup used for creating the alignment heatmaps.
```python
# alignment heatmap
# Note that the selected images may not actually have severely misaligned elements, but this is just for demonstration purposes.
order = client.order.create_locate_order(
name="Alignment Heatmap Example",
instruction="What part of the image does not match with the description? Tap to select.",
datapoints=image_paths,
contexts=prompts, # for alignment, prompts are required as context for the annotators.
responses_per_datapoint=10,
selections=[ValidationSelection("67689e58026456ec851f51f8"), LabelingSelection(1)] # here we use a pre-defined validation set for alignment. See https://docs.rapidata.ai/improve_order_quality/ for details
)
order.run() # This starts the order. Follow the printed link to see progress.
```
</details>
<details>
<summary>Select Misaligned Words</summary>
To launch the annotation setup for selection of misaligned words, we used the following setup
```python
# Select words example
from rapidata import LanguageFilter
select_words_prompts = [p + " [No_Mistake]" for p in prompts]
order = client.order.create_select_words_order(
name="Select Words Example",
instruction = "The image is based on the text below. Select mistakes, i.e., words that are not aligned with the image.",
datapoints=image_paths,
sentences=select_words_prompts,
responses_per_datapoint=10,
filters=[LanguageFilter(["en"])], # here we add a filter to ensure only english speaking annotators are selected
selections=[ValidationSelection("6761a86eef7af86285630ea8"), LabelingSelection(1)] # here we use a pre-defined validation set. See https://docs.rapidata.ai/improve_order_quality/ for details
)
order.run()
```
</details> |
shreyas231219/new_dataset_1 | shreyas231219 | 2025-02-09T03:06:24Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-09T03:06:20Z | 0 | ---
dataset_info:
features:
- name: What
dtype: string
- name: When
dtype: string
- name: Where
dtype: string
- name: Who
dtype: string
- name: Category
dtype: string
splits:
- name: train
num_bytes: 118713
num_examples: 1500
download_size: 35429
dataset_size: 118713
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
selfcorrexp2/llama31_first_wrong_and_20kfirst_corr_regular_norr_20k | selfcorrexp2 | 2024-12-25T21:08:39Z | 13 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-25T21:08:10Z | 0 | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: prompt
dtype: string
- name: answers
sequence: string
- name: first_round
dtype: bool
- name: gt
dtype: string
- name: rewards
sequence: bool
- name: my_solu
sequence: string
- name: flag
dtype: bool
- name: turn
dtype: int64
- name: conversations
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 338968859.6649252
num_examples: 20000
download_size: 152955034
dataset_size: 338968859.6649252
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RobotisSW/eval_ffw_test_pc_8 | RobotisSW | 2025-04-28T14:37:43Z | 28 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-04-28T14:37:36Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "ffw",
"total_episodes": 2,
"total_frames": 327,
"total_tasks": 1,
"total_videos": 6,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
16
],
"names": [
"arm_right_waist",
"arm_right_shoulder",
"arm_right_shoulder_shadow",
"arm_right_elbow",
"arm_right_elbow_shadow",
"arm_right_forearm_roll",
"arm_right_wrist_angle",
"arm_right_gripper",
"arm_left_waist",
"arm_left_shoulder",
"arm_left_shoulder_shadow",
"arm_left_elbow",
"arm_left_elbow_shadow",
"arm_left_forearm_roll",
"arm_left_wrist_angle",
"arm_left_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
16
],
"names": [
"arm_right_waist",
"arm_right_shoulder",
"arm_right_shoulder_shadow",
"arm_right_elbow",
"arm_right_elbow_shadow",
"arm_right_forearm_roll",
"arm_right_wrist_angle",
"arm_right_gripper",
"arm_left_waist",
"arm_left_shoulder",
"arm_left_shoulder_shadow",
"arm_left_elbow",
"arm_left_elbow_shadow",
"arm_left_forearm_roll",
"arm_left_wrist_angle",
"arm_left_gripper"
]
},
"observation.images.cam_head": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_wrist_1": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_wrist_2": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
UniDataPro/portuguese-speech-recognition-dataset | UniDataPro | 2025-05-22T09:06:31Z | 75 | 0 | [
"license:cc-by-nc-nd-4.0",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us",
"speech recognition",
"machine learning",
"NLP",
"ASR",
"audio",
"speech",
"portuguese"
] | [] | 2025-03-17T21:47:11Z | 0 | ---
license: cc-by-nc-nd-4.0
tags:
- speech recognition
- machine learning
- NLP
- ASR
- audio
- speech
- portuguese
size_categories:
- 1K<n<10K
---
# Portuguese Speech Dataset for recognition task
Dataset comprises **406** hours of telephone dialogues in Portuguese, collected from **590** native speakers across various topics and domains. This dataset boasts an impressive **98%** word accuracy rate, making it a valuable resource for advancing **speech recognition technology**.
By utilizing this dataset, researchers and developers can advance their understanding and capabilities in **automatic speech recognition** (ASR) systems, **transcribing audio**, and **natural language processing** (NLP). - **[Get the data](https://unidata.pro/datasets/portuguese-speech-recognition-dataset/?utm_source=huggingface&utm_medium=referral&utm_campaign=portuguese-speech-recognition-dataset)**
The dataset includes high-quality audio recordings with text transcriptions, making it ideal for training and evaluating speech recognition models.
# 💵 Buy the Dataset: This is a limited preview of the data. To access the full dataset, please contact us at [https://unidata.pro](https://unidata.pro/datasets/portuguese-speech-recognition-dataset/?utm_source=huggingface&utm_medium=referral&utm_campaign=portuguese-speech-recognition-dataset) to discuss your requirements and pricing options.
## Metadata for the dataset

- **Audio files:** High-quality recordings in **WAV** format
- **Text transcriptions:** Accurate and detailed **transcripts** for each audio segment
- **Speaker information:** Metadata on **native speakers**, including **gender** and etc
- **Topics:** Diverse domains such as **general conversations**, **business** and etc
This dataset is a valuable resource for researchers and developers working on speech recognition, language models, and speech technology.
# 🌐 [UniData](https://unidata.pro/datasets/portuguese-speech-recognition-dataset/?utm_source=huggingface&utm_medium=referral&utm_campaign=portuguese-speech-recognition-dataset) provides high-quality datasets, content moderation, data collection and annotation for your AI/ML projects |
r2e-edits/14b_swebv_temp08_10_patch_verifier | r2e-edits | 2025-02-26T03:54:00Z | 19 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-26T03:53:54Z | 0 | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: docker_images
dtype: string
- name: rewards
dtype: float64
splits:
- name: train
num_bytes: 356479621
num_examples: 4801
download_size: 106826836
dataset_size: 356479621
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
justus27/lcbv5-test | justus27 | 2025-06-25T00:22:26Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-23T23:42:25Z | 0 | ---
dataset_info:
features:
- name: problem_id
dtype: string
- name: task_type
dtype: string
- name: prompt
dtype: string
- name: verification_info
dtype: string
- name: responses
sequence: string
- name: response_lens
sequence: int64
- name: avg_reward
dtype: float64
splits:
- name: train
num_bytes: 3921596209
num_examples: 279
download_size: 2415217496
dataset_size: 3921596209
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Iess/chinese_modern_poetry | Iess | 2023-06-25T16:39:13Z | 281 | 25 | [
"language:zh",
"license:mit",
"size_categories:100K<n<1M",
"format:json",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"region:us",
"poetry",
"chinese poetry",
"modern poetry",
"chinese modern poetry"
] | [] | 2023-06-25T15:59:49Z | 1 | ---
license: mit
language:
- zh
tags:
- poetry
- chinese poetry
- modern poetry
- chinese modern poetry
---
### 简介
1. 数据集包括了近现代的中国诗人及外国诗人(中译版)作品,所有作品著作权归原作者所有,侵删请联系[email protected]
2. chinese_poems.jsonl为原数据,training_imagery2-5_maxlen256.json 分别是根据2-5个关键意象生成诗歌的相关数据集
3. 数据来源于网络,包括但不限于
+ https://github.com/sheepzh/poetry
+ https://bedtimepoem.com/
+ https://poemwiki.org/
+ baidu、google、zhihu等
### 一些作品
使用此数据集训练ChatGLM、LLaMA7b模型生成的诗歌,更多诗歌查看poems目录



|
test-gen/code_humaneval_qwen2.5-3b_t0.1_n8_tests_humaneval_qwen3-8b_t0.7_n1 | test-gen | 2025-05-16T15:32:56Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-16T15:32:55Z | 0 | ---
dataset_info:
features:
- name: task_id
dtype: string
- name: prompt
dtype: string
- name: canonical_solution
dtype: string
- name: test
dtype: string
- name: entry_point
dtype: string
- name: generated_code
sequence: string
- name: gt_rewards
sequence: float64
- name: rewards
sequence: float64
- name: verification_info
struct:
- name: language
dtype: string
- name: test_cases
sequence: string
splits:
- name: test
num_bytes: 2299064
num_examples: 164
download_size: 396095
dataset_size: 2299064
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
chiyuanhsiao/text_L2-regular-ASR_trivia_qa-audio | chiyuanhsiao | 2025-04-28T16:02:06Z | 13 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-28T15:57:00Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: question_id
dtype: string
- name: question_source
dtype: string
- name: entity_pages
sequence:
- name: doc_source
dtype: string
- name: filename
dtype: string
- name: title
dtype: string
- name: wiki_context
dtype: string
- name: search_results
sequence:
- name: description
dtype: string
- name: filename
dtype: string
- name: rank
dtype: int32
- name: title
dtype: string
- name: url
dtype: string
- name: search_context
dtype: string
- name: answer
struct:
- name: aliases
sequence: string
- name: normalized_aliases
sequence: string
- name: matched_wiki_entity_name
dtype: string
- name: normalized_matched_wiki_entity_name
dtype: string
- name: normalized_value
dtype: string
- name: type
dtype: string
- name: value
dtype: string
- name: my_prediction_text
dtype: string
splits:
- name: validation
num_bytes: 51583519
num_examples: 1000
download_size: 30132540
dataset_size: 51583519
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_41d6cd9b-6ceb-4990-a7dc-c6f7e62b549a | argilla-internal-testing | 2024-12-18T10:23:25Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-18T10:23:24Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
alea-institute/kl3m-filter-data-dotgov-www.usa.gov | alea-institute | 2025-02-04T21:20:16Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-04T21:20:15Z | 0 | ---
dataset_info:
features:
- name: identifier
dtype: string
- name: dataset
dtype: string
- name: mime_type
dtype: string
- name: score
dtype: float64
- name: tokens
sequence: int64
splits:
- name: train
num_bytes: 4093672
num_examples: 615
download_size: 602873
dataset_size: 4093672
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
BoooomNing/Faces_Dataset | BoooomNing | 2025-01-09T12:12:34Z | 29 | 1 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-09T12:12:16Z | 0 | ---
dataset_info:
features:
- name: face_images
dtype: image
- name: target_images
dtype: image
- name: captions
dtype: string
splits:
- name: train
num_bytes: 144501219.0
num_examples: 470
download_size: 106181649
dataset_size: 144501219.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
serEzioAuditore/turkceVeriset3 | serEzioAuditore | 2025-03-16T19:51:48Z | 15 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-16T19:51:34Z | 0 | ---
dataset_info:
features:
- name: id
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 245519628.3
num_examples: 90000
- name: validation
num_bytes: 27279958.7
num_examples: 10000
download_size: 160223582
dataset_size: 272799587.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
JavisGPT-dev/InstUnd-Audio | JavisGPT-dev | 2025-03-17T08:42:18Z | 152 | 1 | [
"size_categories:1M<n<10M",
"format:json",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"region:us"
] | [] | 2025-02-24T02:55:19Z | 0 | ---
size_categories:
- 100K<n<1M
---
- After downloading the dataset, you need to concat the files and extract the files.
```shell
cat data_part_* > data.tar.gz
tar -xzvf data.tar.gz
``` |
ZhengGuangze/TAP-Vid | ZhengGuangze | 2025-05-28T01:32:13Z | 0 | 0 | [
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:text",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | [] | 2025-05-28T01:03:50Z | 0 | ---
license: apache-2.0
---
|
thainq107/c4-small | thainq107 | 2025-05-23T02:32:41Z | 0 | 0 | [
"region:us"
] | [] | 2025-05-23T02:32:24Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 194484214.8
num_examples: 90000
- name: test
num_bytes: 21609357.2
num_examples: 10000
download_size: 131631260
dataset_size: 216093572.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
eliasfiz/emilia-snac-with-spk-emb-DE | eliasfiz | 2025-05-06T23:08:35Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-06T22:57:00Z | 0 | ---
dataset_info:
features:
- name: codes_list
sequence: int64
- name: speaker_embedding
sequence: float32
- name: text
dtype: string
splits:
- name: part_82
num_bytes: 3004706
num_examples: 378
- name: part_33
num_bytes: 26900546
num_examples: 3853
- name: part_72
num_bytes: 11162071
num_examples: 1474
- name: part_73
num_bytes: 11241726
num_examples: 1485
- name: part_69
num_bytes: 16655292
num_examples: 2172
- name: part_36
num_bytes: 32814113
num_examples: 4402
- name: part_66
num_bytes: 16919071
num_examples: 2222
- name: part_77
num_bytes: 11260986
num_examples: 1461
- name: part_50
num_bytes: 25353469
num_examples: 3319
- name: part_59
num_bytes: 23748838
num_examples: 3179
- name: part_55
num_bytes: 24708418
num_examples: 3278
- name: part_47
num_bytes: 37466034
num_examples: 5054
- name: part_54
num_bytes: 24892316
num_examples: 3332
- name: part_57
num_bytes: 24722307
num_examples: 3336
- name: part_11
num_bytes: 86811637
num_examples: 14069
- name: part_12
num_bytes: 86629026
num_examples: 13529
- name: part_14
num_bytes: 89289674
num_examples: 14227
- name: part_8
num_bytes: 104772590
num_examples: 15144
- name: part_27
num_bytes: 92785970
num_examples: 15702
- name: part_16
num_bytes: 104976420
num_examples: 16303
- name: part_21
num_bytes: 103246091
num_examples: 16788
- name: part_13
num_bytes: 111827449
num_examples: 17857
- name: part_20
num_bytes: 103331829
num_examples: 16560
- name: part_5
num_bytes: 128579654
num_examples: 18538
download_size: 541210774
dataset_size: 1303100233
configs:
- config_name: default
data_files:
- split: part_82
path: data/part_82-*
- split: part_33
path: data/part_33-*
- split: part_72
path: data/part_72-*
- split: part_73
path: data/part_73-*
- split: part_69
path: data/part_69-*
- split: part_36
path: data/part_36-*
- split: part_66
path: data/part_66-*
- split: part_77
path: data/part_77-*
- split: part_50
path: data/part_50-*
- split: part_59
path: data/part_59-*
- split: part_55
path: data/part_55-*
- split: part_47
path: data/part_47-*
- split: part_54
path: data/part_54-*
- split: part_57
path: data/part_57-*
- split: part_11
path: data/part_11-*
- split: part_12
path: data/part_12-*
- split: part_14
path: data/part_14-*
- split: part_8
path: data/part_8-*
- split: part_27
path: data/part_27-*
- split: part_16
path: data/part_16-*
- split: part_21
path: data/part_21-*
- split: part_13
path: data/part_13-*
- split: part_20
path: data/part_20-*
- split: part_5
path: data/part_5-*
---
|
tthoma909/math-basics | tthoma909 | 2025-02-15T21:44:07Z | 17 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-15T21:35:12Z | 0 | ---
dataset_info:
features:
- name: nums
sequence: int64
- name: expression
dtype: string
- name: target
dtype: int64
splits:
- name: train
num_bytes: 5246324
num_examples: 100000
download_size: 2559611
dataset_size: 5246324
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/conversational_ai_5_turns_only_ckp_3 | junnystateofmind | 2024-11-22T17:06:21Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-22T16:02:54Z | 0 | ---
dataset_info:
features:
- name: narrative
dtype: string
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 7488
num_examples: 20
download_size: 7108
dataset_size: 7488
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
laochengzi/stanford_df_rectified | laochengzi | 2024-11-05T06:37:00Z | 18 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-05T06:36:47Z | 0 | ---
dataset_info:
features:
- name: Image_name
dtype: int64
- name: Paragraph
dtype: string
- name: train
dtype: bool
- name: test
dtype: bool
- name: url
dtype: string
- name: val
dtype: bool
splits:
- name: train
num_bytes: 7510209
num_examples: 19561
download_size: 3505452
dataset_size: 7510209
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Ahmed-ibn-Harun/BrainHermorrhage | Ahmed-ibn-Harun | 2024-10-23T11:12:58Z | 18 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-23T11:03:46Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': 0_no_hermorrhage
'1': 1_hermorrhage
splits:
- name: train
num_bytes: 286881574.338
num_examples: 7298
- name: validation
num_bytes: 72656887.733
num_examples: 2029
- name: test
num_bytes: 35044806.0
num_examples: 811
download_size: 433700277
dataset_size: 394583268.071
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
YYT-t/code_opencoder_edu-deepseek-coder-6.7b-instruct-iter1_sample_4000_tp | YYT-t | 2025-04-24T13:23:47Z | 19 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-24T13:23:46Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
- name: rational_answer
dtype: string
splits:
- name: train
num_bytes: 5920468
num_examples: 4000
download_size: 2067106
dataset_size: 5920468
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
magnifi/Phi3_intent_v56_2_w_unknown_remove_68_intents_upper_lower | magnifi | 2025-03-10T01:02:42Z | 14 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T01:02:40Z | 0 | ---
dataset_info:
features:
- name: Query
dtype: string
- name: true_intent
dtype: string
splits:
- name: train
num_bytes: 1505486
num_examples: 20664
- name: validation
num_bytes: 8109
num_examples: 113
download_size: 430246
dataset_size: 1513595
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
andreuka18/DeepSeek-R1-Distill-Llama-8B-OpenThoughts-114k-tokenized | andreuka18 | 2025-03-06T12:21:55Z | 111 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-06T12:20:29Z | 0 | ---
dataset_info:
features:
- name: tokens
sequence: int64
splits:
- name: train
num_bytes: 6166867104.0
num_examples: 752424
download_size: 1238722898
dataset_size: 6166867104.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
MisterEScholar/s50K_part_24 | MisterEScholar | 2025-02-09T06:07:00Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-09T06:06:56Z | 0 | ---
dataset_info:
features:
- name: solution
dtype: string
- name: question
dtype: string
- name: cot_type
dtype: string
- name: source_type
dtype: string
- name: metadata
dtype: string
- name: cot
dtype: 'null'
splits:
- name: train
num_bytes: 1639387
num_examples: 1000
download_size: 894444
dataset_size: 1639387
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
griffinpinney/Sort-4to6 | griffinpinney | 2025-02-24T03:53:42Z | 15 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-23T01:22:14Z | 0 | ---
dataset_info:
features:
- name: target
sequence: int64
- name: original
sequence: int64
splits:
- name: train
num_bytes: 44009696
num_examples: 500000
download_size: 4058605
dataset_size: 44009696
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
eagle0504/MedQuad-MedicalQnADataset-1024-synth-aug-1024-synth-aug-1024-synth-aug | eagle0504 | 2025-04-29T09:10:07Z | 27 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-29T09:10:06Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: solution
dtype: string
splits:
- name: train
num_bytes: 165392
num_examples: 85
download_size: 82738
dataset_size: 165392
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
WaltonFuture/GEOQA_R1V_Train_8K | WaltonFuture | 2025-04-24T12:01:25Z | 39 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-24T12:01:11Z | 0 | ---
dataset_info:
features:
- name: images
sequence: image
- name: problem
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 28815465.18
num_examples: 8030
download_size: 35537104
dataset_size: 28815465.18
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AFZAL0008/mal-eng-translation | AFZAL0008 | 2024-12-20T05:08:01Z | 70 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-20T04:22:38Z | 0 | ---
dataset_info:
features:
- name: 'Unnamed: 0.1'
dtype: int64
- name: 'Unnamed: 0'
dtype: int64
- name: English
dtype: string
- name: Malayalam
dtype: string
splits:
- name: train
num_bytes: 84825396
num_examples: 361847
download_size: 33788639
dataset_size: 84825396
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/s1_ablation_diversity_sampling_1k | mlfoundations-dev | 2025-02-12T16:02:43Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-10T15:52:35Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: response
dtype: string
- name: math_class
dtype: string
- name: system
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 40463615
num_examples: 1000
download_size: 17218343
dataset_size: 40463615
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gaotang/sky_v02_processed_qwen | gaotang | 2025-03-20T16:11:04Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-20T16:10:56Z | 0 | ---
dataset_info:
features:
- name: context_messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: winner
dtype: string
splits:
- name: train
num_bytes: 503664912
num_examples: 77016
download_size: 192509566
dataset_size: 503664912
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Alex-xu/secalign-dbg-haiku-javascript-all | Alex-xu | 2025-01-22T03:44:18Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-22T03:44:17Z | 0 | ---
dataset_info:
features:
- name: lang
dtype: string
- name: cwe
dtype: string
- name: original_instruction
dtype: string
- name: original_code
dtype: string
- name: empty
dtype: string
- name: fixed_code
dtype: string
- name: benign
dtype: bool
splits:
- name: train
num_bytes: 60992724
num_examples: 17892
download_size: 27563981
dataset_size: 60992724
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
chocckaka/CriticTool-Dataset | chocckaka | 2025-06-24T02:53:49Z | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-06-24T02:49:53Z | 0 | ---
license: apache-2.0
---
|
DanqingZ/tic_tac_toe_5_raw_2 | DanqingZ | 2025-06-15T07:40:19Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | 2025-06-15T07:24:55Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 715,
"total_tasks": 1,
"total_videos": 6,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.on_robot": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.side_view": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
Pisethan/plp-ai-dataset | Pisethan | 2025-05-26T09:08:01Z | 0 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-26T07:49:28Z | 0 | ---
license: apache-2.0
---
|
qrk-labs/QRK-Islam-Basic-Weak | qrk-labs | 2025-05-12T14:12:43Z | 73 | 0 | [
"license:mit",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-09T21:42:54Z | 0 | ---
license: mit
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 1334778
num_examples: 7099
download_size: 475144
dataset_size: 1334778
---
|
Asap7772/omnimath-hint-v6-r1distill15b-respgen__1534_1705 | Asap7772 | 2025-04-07T00:12:53Z | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-07T00:12:06Z | 0 | ---
dataset_info:
features:
- name: hint_chosen
dtype: string
- name: hint_completion
sequence: string
- name: hint_completion_answer
sequence: string
- name: hint_completion_correct
sequence: bool
- name: hint_completion_succ_rate
dtype: float64
- name: domain
dtype: string
- name: difficulty
dtype: float64
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: source
dtype: string
- name: completion
sequence: string
- name: completion_answer
sequence: string
- name: completion_correct
sequence: bool
- name: completion_succ_rate
dtype: float64
- name: context
dtype: string
- name: hint1
dtype: string
- name: hint2
dtype: string
- name: hint3
dtype: string
- name: hint4
dtype: string
- name: hint5
dtype: string
splits:
- name: train
num_bytes: 1311018061
num_examples: 1026
download_size: 466969384
dataset_size: 1311018061
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
datasets-CNRS/CIENSFO | datasets-CNRS | 2025-03-29T21:41:48Z | 22 | 0 | [
"language:fra",
"license:cc-by-4.0",
"region:us"
] | [] | 2024-10-22T20:23:17Z | 0 | ---
language:
- fra
viewer: false
license: cc-by-4.0
---
> [!NOTE]
> Dataset origin: https://www.ortolang.fr/market/corpora/ciensfo
> # CIENSFO
Corpus of Non-Standard Spoken French Subordinated Interrogatives (Corpus d'Interrogatives Enchâssées Non-Standards du Français Oral)
## Corpus content
This corpus contains transcriptions of spoken French sentences which exhibit non-standard subordinated interrogatives.
ex. ma façon de voir les choses c'est de faire le bilan de [pause] c'est quoi notre expertise
More precisely, five types of subordinated clauses are present:
1. Interrogatives **dependent on a noun**
* ex. moi je pouvais observer les différences de comment on était éduqués
2. Interrogatives dependent on a verb and **introduced by a preposition**
* ex. ça a pratiquement tout de suite reposé sur qu'est-ce qu'on va inventer comme femme [...]
3. Interrogatives being **verbal adjunct**
* ex. [...] ça peut être paronyme ou homonyme suivant comment vous le prononcez
4. Subordinated interrogatives using a **non-standard form** (ex. a marker unexpected in subordination)
* ex. avant ça je me pose jamais la question de est-ce que j'aime faire ça
5. Interrogatives used **in a nominal context**
* ex. avant de s'attaquer au à quoi ça sert commençons par le à quoi ça ne sert pas
**TW:** Some sources mention sensitive questions (sex, sexism, etc.). Thus, some sentences may use explicit words.
## Corpus elaboration
This corpus is constituted with transcribed sentences personally observed by the authors. There are two kinds of sources.
1. heard in a **personnal conversations**: sentences uttered by someone else, sometimes the author being an interlocutor, sometimes not
2. from an **online** (often free) **material** (podcast, YouTube video, series, etc.)
3. saw written on an online forum or in a text message
4. sentences taken from the CEFC (see `cefc.tsv`)
Sentences of type 2. are provided with a complete description of their source (title, author, publisher, URL, time code), so that it is possible to check the transcription and obtain the actual prosody.
Sentences of type 1. were transcribed on the fly from a spontaneous conversation. There was no recording. Therefore, it is not possible to check the accuracy of the transcription nor to obtain the actual prosody. As a consequence, sentences of type 1. and 3. are little trustworthy. Please take that into account in your analyses.
Type 1. and 3. sentences were taken from persons of various age (but mostly French young adults) bewteen June 2022 and November 2023. Type 2 sentences are mostly extracted from materials put online between 2018 and November 2023 by persons of various age (but mostly French adults).
## Transcription choices
Sentence segmentation is based on locutory units. A token `NAME` was substituted to proper names present in personal conversations.
Transcription is performed using standard lexical spelling forms. In particular, silent `e`'s are not removed (ex. `tu as` instead of `t'as` for `/ta/`). However, "missing words" are not added back (ex. `y a` instead of `y'a` or `il y a` for `/ja/`). Punctuation and capital letters starting sentences are not considered. The transcription follows the 1990 French spelling reform.
A `[pause]` symbol is added when there is a long enough pause between the predicate and the interrogative (only for type 2 sentences). Similarly, `euh`'s are not transcribed, except the ones close to the interrogative boarders.
ex. [...] un de mes objectifs c'est de partager avec vous mes réflexions sur [pause] comment vous pouvez vous créer votre propre mindset
A lot a extracts mentioned a list of interrogations following a first embedded interrogative. To avoid getting cumbersome lines, most of these additional interrogations were omitted.
## Structure
The main document `ciensfo.json` is a json file. It contains a list of records. Each record contains fields:
* **id** (mandatory): unique sentence identifier
* **source** (mandatory): json description of the source of the sentence
* **time** (mandatory, except for type 1. and 2. sentences): time code of the online material `(hours:)minutes:seconds`
* **text** (mandatory): transcription
* **subtitles** (optional): official subtitles given by the publisher / author(s), they may differ from the transcription
* **modality** (optional): `written` or `spoken`, when absent, `spoken` is default
* **note** (optional): e.g. `ungrammatical, humoristic`, `maybe subrodinated exclamative`, `maybe free relative`, `maybe reported`
* **variant** (optional): variant of French, e.g. `Québec`, `Belgium`, when absent, default is European French
The source field has the following fields:
* **id** (mandatory): unique material identifier
* **title** (mandatory)
* **type** (mandatory): among:
* type 1.: `conversation`, `scientific_conference`
* type 2.: `online_podcast`, `series_epidose`, `radio_programme`, `recorded_speech`, `comedy_video`, `interview_video`, `newspaper_video`, `popularization_video`, `position_video`, `documentary_video`, `music`, `FAQ_video`, `tv_programme`
* type 3.: `online_forum`, `comics`, `text_message`
* **date** (mandatory): online publication date, vector date format ``[[year:month:day]]`` (may be underspecified)
* **duration** (mandatory): `(hours:)minutes:seconds`
* **publisher** (optional)
* **catalog** (optional)
* **authors** (mandatory): the authors on the list may be identified by their given name, family name, literal name (ex. YouTube channel) or a mix of them
* **URL** (optional)
* **accessed** (mandatory, except for written): vector date format
* **page** (optional)
* **pages** (optional)
* **booktitle** (optional)
* **series** (optional)
* **volume** (optional)
* **ISSN** (optional)
Type 1. and 3. sentences only have source fields `id` and `type`.
Note that the person saying the extracted sentence may not always be one of the authors, but e.g. an interviewed person.
When some sentences are extracted from the same material, the source field of subsequent sentences may only contain the `id` field. Therefore, the couple "source" "id" + "time" (or just "source" "id" for type 1 sentences) constitute another possible unique sentence identifier.
## Annotation
The file `classification_ciensfo.csv` contains, for each occurrence of a non-standard subordinated interrogatve, annotations about syntactic features. Theses labels have been added by hand.
Columns:
1. **sentence id**, if a sentence has several of such patterns, we add a dot and a second id. (e.g `14.1`, `14.2`). The id of CEFC sentences begins with a `c`
2. **sentence type**: 1, 2 or 3
3. **dependent on a noun**: if so, the field contains the lemma of the noun
4. **dependent on an adjective**: if so, the field contains the lemma of the adjective
5. **dependent on a verb (includes semi-fixed verbal expressions)**: if so, the field contains the lemma of the predicate (or `CONJ` if it is conjuncted with the previous interrogative in the same sentence)
6. **negated**: if dependent on a verb or attribute adjective, 1 if the predicate is negated or 0 is not
7. **adverbial adjunct clause**: if the interrogative is an adverbial modifier clause, the field contains the preposition(al locution) introducing it
8. **introducing preposition**: ("/" if no preposition)
9. **graft**: if the interrogative is a graft, the field contains the preceding word, typicaly a preposition or a determiner
10. **non-standard type**: if applicable:
* `qecq`: occurrence of *qu'est-ce que/qui*
* `ecq`: *est-ce que* instead of *si* or *WH + est-ce que* other than *qu'est-ce que/qui*
* `in-situ`: e.g *c'est quoi*
* `spp`: suffixed personal pronoun (aka. subject -verb inversion)
11. **WH**: interrogative word lemma
12. **WH 2**: second interrogative word lemma, if applicable
13. **marker**: interrogative marker
14. **marker**: additional morphosyntactic phenomenon which can hint at interrogativeness, e.g. *oui ou non*, *ou pas*, *ou non*
15. **class**: class according to [Coveney 2011]
16. **additional note**, e.g. `ungrammatical, humoristic`, `maybe subrodinated exclamative`, `maybe free relative`, `maybe reported`
Note: columns 3, 4, 5 and 7 may contain the token `CONJ` to indicate that the interrogative is conjuncted with the previous line, under the same governor.
### Extended Coveney classification
The classification `type` (direct interrogatives only) is based on:
> Aidan Coveney. 2011. L’interrogation directe. Travaux de linguistique, 63(2):112–145. De Boeck Supérieur.
We extend it to account for infinitival interrogatives, subordinated interrogatives, nominal and elliptical interrogatives.
**Note:** Contrary to [Coveney 2011], `stats.py` considers expression *qu'est-ce que/qui* as an interrogative word, and not as `Q` + *est-ce que* pattern.
The list of categories is:
* yes-no interrogatives:
* `ESV`: 'est-ce que', e.g. *Est-ce que les autres / ils sont partis ?*
* `V-CL`: clitic inversion, e.g. *Sont-ils partis ?*
* `GN V-CL`: complex inversion, e.g. *Les autres sont-ils partis ?*
* `SV-ti`: '-ti' marker, *C'est-ti pas fini ?*
* constituent (fr. partielle):
* `SVQ`: in situ, e.g. *Ils sont partis où ?*
* `QSV`: fronting (fr. antéposition), e.g. *Où ils sont partis ?*
* `QV-CL`: qu + clitic inversion, e.g. *Où sont-ils partis ?*
* `Q GN V-CL`: qu + complex inversion, e.g. *Où les autres sont-ils partis ?*
* `QV GN`: qu + stylistic inversion, e.g. *Où sont partis les autres ?*
* `seQkSV`: cleft, e.g. *C’est où qu’ils sont partis ?*
* `QESV`: qu + 'est-ce que', e.g. *Où est-ce qu’ils sont partis ?*
* `QsekSV`: qu + cleft variant, e.g. *Où c’est qu’ils sont partis ?*
* `QkSV`: qu + complementizer, e.g. *Où qu’ils sont partis ?*
* `Q=S V`: subject qu, e.g. *Lesquels sont partis ?*
* hybrid (non-standard)
* `QEV GN`: qu+ 'est-ce que' + stylistic inversion, e.g. *Avec qui est-ce que travaille nicole Dupont ?*
* `Q=S V-CL`: subject qu + clitic inversion, e.g. *De ces fillettes, lesquelles sont-elles les tiennes ?*
* `E GN V-CL`: 'est-ce que' + complex inversion, e.g. *Est-ce que demain les sauveteurs pourront-ils s’approcher des alpinistes en détresse ?*
* `QE GN V-CL`: qu + 'est-ce que' + complex inversion e.g. *Qu’est-ce que le rédacteur de la rubrique des chats écrasés entend-il par un pachyderme ?*
Our extension includes:
* other case
* `Q=S sekV`: subject qu + cleft *c'est qui* + verb, e.g. *Qui c'est qui diffuse ça ?*
* infinitival
* `QVinf`: qu + infinitival verb, e.g. *Où partir ?*
* `Vinf Q`: infinitival verb + in-situ qu, e.g. *Pour partir où ?*
* `QsekVinf`: qu + cleft variant + infinitival verb, e.g. *Qu'est-ce que c'est qu'être une fille ?*
* multiple qu-words
* `Q=S VQ`: double qu-interrogative with one qu subject, e.g. *Qui veut intervenir dans quoi ?*
* `QSVQ`: double qu-interrogative, e.g. *Combien d'infanteries tu envoies sur quelle planète ?*
* `QVinf QQ`: triple qu-interrogative + infinitival verb, e.g. *Qui inviter à quel endroit sur quel sujet ?*
* Nominal or elliptical
* `Q GN`: qu + noun phrase, e.g. *Pourquoi Angiox ?*
* `Qsek GN`: qu + cleft variant + noun phrase, e.g. *Qu'est-ce que c'est que l'énergie ?*
* `Q`: elliptical qu (interrogative phrase alone), e.g. *Où ?*
* embedded yes-no
* `si SV`: 'si', e.g. *Je sais s'ils sont partis.*
* other regional variants
* `SV-tu`: '-tu' marker, *C'est-tu vraiment si pire que ça ?*
## Corpus searches
The pattern created to search for interrogative adverbial modifier clauses use [Grew](https://grew.fr/) (v. >= 1.14). Request files and found sentences are in the `searches` folder.
* `fib_comp_prep.req` used on the [FIB](https://github.com/Valentin-D-Richard/UD_French-FIB) (on the enriched version)
* `orfeo_prep_int.req` used on the [CEFC corpus](https://orfeo.grew.fr/?corpus=cefc-gold)
The raw results can be found in the json files for the FIB and in the `CEFC_found` folder for the CEFC. The files with `_0` ending contain the pattern with the preposition immediately preceding the QU-word or morphosyntactic marking. The files with `_1` ending, where there is one word in between.
## License
The data is collected under the Right to quote. It is distributed under the Creative Common [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license.
## Publication
Please cite this publication to mention CIENSFO.
Richard, V. D. (2024). “selon coment vous vous positionnez” : Étude des circonstancielles à interrogative. to appear in 9e Congrès Mondial de Linguistique Française, Lausanne.
## Citation
```
@misc{11403/ciensfo/v1,
title = {CIENSFO (Corpus d'Interrogatives Enchâssées Non-Standards du Fran\c{c}ais Oral)},
author = {Valentin D. Richard},
url = {https://hdl.handle.net/11403/ciensfo/v1},
note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr},
copyright = {Licence Creative Commons - Attribution 4.0 International},
year = {2024}
}
``` |
pragsri8/WildBenchGenv2-hard-matched | pragsri8 | 2025-05-11T00:11:53Z | 0 | 0 | [
"region:us"
] | [] | 2025-05-10T23:56:08Z | 0 | ---
dataset_info:
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390600
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390600
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_RM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390600
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_RRM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390600
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 744190
num_examples: 256
download_size: 399797
dataset_size: 744190
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA-pragyas_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 744190
num_examples: 256
download_size: 399797
dataset_size: 744190
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 744190
num_examples: 256
download_size: 399797
dataset_size: 744190
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_RM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 744190
num_examples: 256
download_size: 399797
dataset_size: 744190
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_RRM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 744190
num_examples: 256
download_size: 399797
dataset_size: 744190
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390148
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA-pragyas_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390148
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390148
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_RM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390148
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_RRM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390148
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390184
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390184
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_RM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390184
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_RRM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390184
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390848
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390848
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_RM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390848
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_RRM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 390848
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 389845
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_RM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 389845
dataset_size: 726930
- config_name: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_RRM_pairpm_wildbench
features:
- name: id
dtype: string
- name: session_id
dtype: string
- name: conversation_input
list:
- name: content
dtype: string
- name: language
dtype: string
- name: redacted
dtype: bool
- name: role
dtype: string
- name: timestamp
dtype: 'null'
- name: toxic
dtype: bool
- name: length
dtype: int64
- name: checklist
sequence: string
- name: intent
dtype: string
- name: primary_tag
dtype: string
- name: secondary_tags
sequence: string
- name: avg_score
dtype: float64
- name: var_score
dtype: float64
splits:
- name: train
num_bytes: 726930
num_examples: 254
download_size: 389845
dataset_size: 726930
configs:
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_CARMA-no_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_RM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_RM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_RRM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K16_reward_eval16._bon_sampling_RRM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA-no_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA-pragyas_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA-pragyas_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_RM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_RM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_RRM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K1_reward_eval1._bon_sampling_RRM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA-no_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA-pragyas_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA-pragyas_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_RM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_RM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_RRM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K2_reward_eval2._bon_sampling_RRM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_CARMA-no_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_RM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_RM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_RRM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K32_reward_eval32._bon_sampling_RRM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_CARMA-no_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_CARMA_qrandomized_neutrals_our_improve_degrade_data_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_RM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_RM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_RRM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K4_reward_eval4._bon_sampling_RRM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_CARMA-no_neutrals_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_CARMA-no_neutrals_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_RM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_RM_pairpm_wildbench/train-*
- config_name: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_RRM_pairpm_wildbench
data_files:
- split: train
path: Test_Gemma9B_SFT_BoN_K8_reward_eval8._bon_sampling_RRM_pairpm_wildbench/train-*
---
|
vlm-reasoning-cot/ciphers | vlm-reasoning-cot | 2025-05-15T18:11:15Z | 128 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-13T04:58:14Z | 0 | ---
dataset_info:
- config_name: chunk_0001
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 707261228.0
num_examples: 1000
download_size: 578886758
dataset_size: 707261228.0
- config_name: chunk_0002
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 714895362.0
num_examples: 1000
download_size: 586241826
dataset_size: 714895362.0
- config_name: chunk_0003
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 718190062.0
num_examples: 1000
download_size: 589335104
dataset_size: 718190062.0
- config_name: chunk_0004
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 704665360.0
num_examples: 1000
download_size: 577484472
dataset_size: 704665360.0
- config_name: chunk_0005
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 716389846.0
num_examples: 1000
download_size: 593459805
dataset_size: 716389846.0
- config_name: chunk_0006
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 705398322.0
num_examples: 1000
download_size: 583947568
dataset_size: 705398322.0
- config_name: chunk_0007
features:
- name: question
dtype: string
- name: reasoning
dtype: string
- name: answer
dtype: string
- name: source_folder
dtype: string
- name: problem_image_1
dtype: image
- name: problem_image_1_base64
dtype: string
- name: reasoning_image_1
dtype: image
- name: reasoning_image_1_base64
dtype: string
- name: reasoning_image_10
dtype: image
- name: reasoning_image_10_base64
dtype: string
- name: reasoning_image_11
dtype: image
- name: reasoning_image_11_base64
dtype: string
- name: reasoning_image_12
dtype: image
- name: reasoning_image_12_base64
dtype: string
- name: reasoning_image_2
dtype: image
- name: reasoning_image_2_base64
dtype: string
- name: reasoning_image_3
dtype: image
- name: reasoning_image_3_base64
dtype: string
- name: reasoning_image_4
dtype: image
- name: reasoning_image_4_base64
dtype: string
- name: reasoning_image_5
dtype: image
- name: reasoning_image_5_base64
dtype: string
- name: reasoning_image_6
dtype: image
- name: reasoning_image_6_base64
dtype: string
- name: reasoning_image_7
dtype: image
- name: reasoning_image_7_base64
dtype: string
- name: reasoning_image_8
dtype: image
- name: reasoning_image_8_base64
dtype: string
- name: reasoning_image_9
dtype: image
- name: reasoning_image_9_base64
dtype: string
splits:
- name: train
num_bytes: 415705968.0
num_examples: 593
download_size: 340850442
dataset_size: 415705968.0
configs:
- config_name: chunk_0001
data_files:
- split: train
path: chunk_0001/train-*
- config_name: chunk_0002
data_files:
- split: train
path: chunk_0002/train-*
- config_name: chunk_0003
data_files:
- split: train
path: chunk_0003/train-*
- config_name: chunk_0004
data_files:
- split: train
path: chunk_0004/train-*
- config_name: chunk_0005
data_files:
- split: train
path: chunk_0005/train-*
- config_name: chunk_0006
data_files:
- split: train
path: chunk_0006/train-*
- config_name: chunk_0007
data_files:
- split: train
path: chunk_0007/train-*
---
|
SarraChab/MNLP_M2_mcqa_dataset | SarraChab | 2025-05-26T09:39:13Z | 85 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-18T09:37:53Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: dataset
dtype: string
- name: question
dtype: string
- name: options
sequence: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 2268293
num_examples: 10687
download_size: 1254741
dataset_size: 2268293
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
This dataset was constructed as part of the EPFL Modern NLP (MNLP) course project to train and evaluate large language models on **multiple-choice question answering (MCQA)** tasks focused on scientific reasoning.
It aggregates and reformats **10,687 unique examples** from five high-quality academic and biomedical QA datasets, applying consistent structure, question normalization, and cross-source deduplication.
### 📊 Dataset Composition
| Source Dataset | Link | Questions Used | Description |
|----------------|------|----------------|-------------|
| ARC-Challenge | [ai2_arc](https://huggingface.co/datasets/ai2_arc) | 1,119 | Harder science exam questions requiring multi-step reasoning |
| ARC-Easy | [ai2_arc](https://huggingface.co/datasets/ai2_arc) | 2,251 | Simpler science questions at the elementary/middle school level |
| QASC | [qasc](https://huggingface.co/datasets/qasc) | 3,000 (subset) | A filtered and deduplicated subset of the QASC dataset, which was originally larger (~8,000+ examples). Only 3,000 unique and diverse questions were selected for balance |
| OpenBookQA | [openbookqa](https://huggingface.co/datasets/openbookqa) | 3,317 | 4-option science questions, filtered to keep `humanScore ≥ 1` |
| PubMedQA | [pubmed_qa](https://huggingface.co/datasets/pubmed_qa) | 1,000 | Biomedical questions with Yes/No/Maybe answers based on PubMed abstracts |
### 🧪 Preprocessing Pipeline
- **Normalization**: All questions were lowercased and stripped of whitespace for consistency.
- **Deduplication**: Each question was hashed (`md5(lowercase question)`) to detect and eliminate duplicates across datasets.
- **Filtering**:
- OpenBookQA was filtered to retain only questions with `humanScore ≥ 1`.
- PubMedQA was filtered to retain only labeled questions with answers in {yes, no, maybe}.
- QASC was **sampled and capped** at 3,000 unique questions to ensure dataset balance.
- **Unified formatting**: All entries follow the same JSON schema across sources.
### 📦 Format
Each sample follows this structure:
```json
{
"id": "qasc_481",
"dataset": "qasc",
"question": "What do bees use to make honey?",
"options": ["nectar", "pollen", "water", "leaves"],
"answer": "A"
}
|
extralit-dev/test_import_dataset_from_hub_with_classlabel_fc75f806-a0ee-4fad-a969-29e01309ac96 | extralit-dev | 2025-06-19T05:05:44Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-19T05:05:44Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1264
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
TianHongZXY/MATH-test-Tulu-3-8B-SFT-beam_search-completions-temp_0.8-range_3800_to_3900 | TianHongZXY | 2025-01-19T21:15:18Z | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-18T00:46:18Z | 0 | ---
dataset_info:
config_name: TianHongZXY_MATH--T-0.8--top_p-1.0--n-32--m-4--iters-20--look-1--seed-42--agg_strategy--last
features:
- name: problem
dtype: string
- name: level
dtype: string
- name: type
dtype: string
- name: solution
dtype: string
- name: completions
sequence: string
- name: pred
dtype: string
- name: completion_tokens
sequence: int64
- name: scores
sequence:
sequence: float64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
- name: pred_weighted@32
dtype: string
- name: pred_maj@32
dtype: string
- name: pred_naive@32
dtype: string
splits:
- name: train
num_bytes: 2963248
num_examples: 100
download_size: 381984
dataset_size: 2963248
configs:
- config_name: TianHongZXY_MATH--T-0.8--top_p-1.0--n-32--m-4--iters-20--look-1--seed-42--agg_strategy--last
data_files:
- split: train
path: TianHongZXY_MATH--T-0.8--top_p-1.0--n-32--m-4--iters-20--look-1--seed-42--agg_strategy--last/train-*
---
|
fernandabufon/ukr_to_pt_json_gpt | fernandabufon | 2025-01-15T13:43:17Z | 18 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-15T13:43:15Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: translation
dtype: string
- name: anger
dtype: int64
- name: disgust
dtype: int64
- name: fear
dtype: int64
- name: joy
dtype: int64
- name: sadness
dtype: int64
- name: surprise
dtype: int64
- name: inference_time
dtype: float64
- name: inference_total_time
dtype: float64
- name: inference_average_time
dtype: float64
splits:
- name: train
num_bytes: 1016468
num_examples: 2466
download_size: 514168
dataset_size: 1016468
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
zuozhuan/so100_close | zuozhuan | 2024-12-16T02:08:01Z | 26 | 0 | [
"task_categories:robotics",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | 2024-12-16T02:03:20Z | 0 | ---
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
pbvr/so101_test011 | pbvr | 2025-05-26T07:42:36Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so101",
"tutorial"
] | [
"robotics"
] | 2025-05-26T07:42:18Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so101
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so101",
"total_episodes": 2,
"total_frames": 1788,
"total_tasks": 1,
"total_videos": 6,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.handeye": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.side": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.topdown": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
dylanebert/iso3d | dylanebert | 2024-06-10T19:38:43Z | 112 | 9 | [
"license:mit",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | [] | 2024-06-07T21:02:18Z | 1 | ---
license: mit
---
# iso3d - Isolated Synthetic Objects 3D
A dataset of isolated object images for evaluating [image-to-3D](https://huggingface.co/models?pipeline_tag=image-to-3d) models.
## Leaderboard
Vote and view results at [3d-arena](https://huggingface.co/spaces/dylanebert/3d-arena).
## Curation
Images are created using [dreamshaper-xl](https://huggingface.co/Lykon/dreamshaper-xl-v2-turbo) and [white background lora](https://civitai.com/models/119388/white-background) on [karlo-v1](https://huggingface.co/datasets/diffusers-parti-prompts/karlo-v1) prompts.
1. Each [karlo-v1](https://huggingface.co/datasets/diffusers-parti-prompts/karlo-v1) prompt is extended with `{prompt}, isolated object render, with a white background` and negative prompt `text, watermark, shadow, background`.
2. Images are generated using [ComfyUI](https://github.com/comfyanonymous/ComfyUI) with [dreamshaper-xl](https://huggingface.co/Lykon/dreamshaper-xl-v2-turbo) and [white background lora](https://civitai.com/models/119388/white-background).
3. Backgrounds are removed using [rembg](https://github.com/danielgatis/rembg).
4. 100 images are hand-selected from the 1.63k generated images.
## Contributing
The leaderboard is automatically populated by the [3d-arena dataset](https://huggingface.co/datasets/dylanebert/3d-arena).
To submit your model, [open a PR](https://huggingface.co/docs/hub/en/repositories-pull-requests-discussions) on the dataset.
## Citation
```
@misc{3d-arena,
author = {Dylan Ebert}
title = {3D Arena}
year = {2024}
publisher = {Hugging Face}
howpublished = \url{https://huggingface.co/spaces/dylanebert/3d-arena}
}
```
|
ganker5/so100_toy_20250402 | ganker5 | 2025-04-02T03:33:53Z | 56 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | 2025-04-02T02:36:36Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 10,
"total_frames": 4946,
"total_tasks": 1,
"total_videos": 20,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:10"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
PhilSad/SCP-Wiki-Dataset | PhilSad | 2024-12-14T15:09:58Z | 20 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-14T15:09:52Z | 0 | ---
dataset_info:
features:
- name: description
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 17570353.015576325
num_examples: 2311
- name: test
num_bytes: 1953950.984423676
num_examples: 257
download_size: 11421492
dataset_size: 19524304.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
RyanYr/reflect_mmlumathpro_nonmrkv-test_t4_crtc | RyanYr | 2025-01-27T21:11:22Z | 20 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-27T16:40:52Z | 0 | ---
dataset_info:
features:
- name: question_id
dtype: int64
- name: original_question
dtype: string
- name: options
sequence: string
- name: answer
dtype: string
- name: answer_index
dtype: int64
- name: cot_content
dtype: string
- name: category
dtype: string
- name: src
dtype: string
- name: problem
dtype: string
- name: alt_answer
dtype: string
- name: response@0
sequence: string
- name: response@1
sequence: string
- name: response@2
sequence: string
- name: response@3
sequence: string
- name: response@4
sequence: string
- name: response@5
sequence: string
- name: response@6
sequence: string
- name: response@7
sequence: string
splits:
- name: train
num_bytes: 21694922
num_examples: 1351
download_size: 8184506
dataset_size: 21694922
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
MInference/SCBench | MInference | 2024-12-13T07:06:57Z | 104 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-12T18:43:55Z | 0 | ---
license: mit
dataset_info:
- config_name: multi_turn_choice_eng
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
- name: options
sequence: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 46482955
num_examples: 58
download_size: 28590613
dataset_size: 46482955
- config_name: multi_turn_kv
features:
- name: id
dtype: int64
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
splits:
- name: train
num_bytes: 20071200
num_examples: 100
download_size: 18278186
dataset_size: 20071200
- config_name: multi_turn_many_shot
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
- name: id
dtype: int64
- name: task
dtype: string
splits:
- name: train
num_bytes: 4734315
num_examples: 54
download_size: 99406
dataset_size: 4734315
- config_name: multi_turn_mf
features:
- name: id
dtype: int64
- name: context
sequence: int64
- name: multi_turns
list:
- name: answer
dtype: int64
- name: input
dtype: string
splits:
- name: train
num_bytes: 24065100
num_examples: 100
download_size: 3766479
dataset_size: 24065100
- config_name: multi_turn_prefix_suffix
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
splits:
- name: train
num_bytes: 17498600
num_examples: 100
download_size: 16417345
dataset_size: 17498600
- config_name: multi_turn_qa_chn
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 180437341
num_examples: 35
download_size: 115936454
dataset_size: 180437341
- config_name: multi_turn_qa_eng
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 58359967
num_examples: 69
download_size: 35648660
dataset_size: 58359967
- config_name: multi_turn_repoqa
features:
- name: context
dtype: string
- name: id
dtype: int64
- name: multi_turns
list:
- name: answer
dtype: string
- name: code_ratio
dtype: float64
- name: description
dtype: string
- name: end_byte
dtype: int64
- name: end_line
dtype: int64
- name: func
dtype: string
- name: global_end_byte
dtype: int64
- name: global_end_line
dtype: int64
- name: global_start_byte
dtype: int64
- name: global_start_line
dtype: int64
- name: input
dtype: string
- name: name
dtype: string
- name: path
dtype: string
- name: start_byte
dtype: int64
- name: start_line
dtype: int64
- name: lang
dtype: string
- name: repo
dtype: string
splits:
- name: train
num_bytes: 24847710
num_examples: 88
download_size: 4427455
dataset_size: 24847710
- config_name: multi_turn_repoqa_and_kv
features:
- name: context
dtype: string
- name: id
dtype: int64
- name: multi_turns
list:
- name: answer
dtype: string
- name: code_ratio
dtype: float64
- name: description
dtype: string
- name: end_byte
dtype: int64
- name: end_line
dtype: int64
- name: func
dtype: string
- name: global_end_byte
dtype: int64
- name: global_end_line
dtype: int64
- name: global_start_byte
dtype: int64
- name: global_start_line
dtype: int64
- name: input
dtype: string
- name: name
dtype: string
- name: path
dtype: string
- name: start_byte
dtype: int64
- name: start_line
dtype: int64
- name: task
dtype: string
- name: lang
dtype: string
- name: repo
dtype: string
splits:
- name: train
num_bytes: 25019328
num_examples: 88
download_size: 8583611
dataset_size: 25019328
- config_name: multi_turn_summary
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 28622955
num_examples: 70
download_size: 14245669
dataset_size: 28622955
- config_name: multi_turn_summary_with_needles
features:
- name: context
dtype: string
- name: multi_turns
list:
- name: answer
dtype: string
- name: input
dtype: string
- name: task
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 28629718
num_examples: 70
download_size: 14233712
dataset_size: 28629718
- config_name: multi_turn_vt
features:
- name: index
dtype: int64
- name: input
dtype: string
- name: length
dtype: int64
- name: multi_turns
list:
- name: answer
sequence: string
- name: input
dtype: string
splits:
- name: train
num_bytes: 42549030
num_examples: 90
download_size: 2160077
dataset_size: 42549030
configs:
- config_name: multi_turn_choice_eng
data_files:
- split: train
path: multi_turn_choice_eng/train-*
- config_name: multi_turn_kv
data_files:
- split: train
path: multi_turn_kv/train-*
- config_name: multi_turn_many_shot
data_files:
- split: train
path: multi_turn_many_shot/train-*
- config_name: multi_turn_mf
data_files:
- split: train
path: multi_turn_mf/train-*
- config_name: multi_turn_prefix_suffix
data_files:
- split: train
path: multi_turn_prefix_suffix/train-*
- config_name: multi_turn_qa_chn
data_files:
- split: train
path: multi_turn_qa_chn/train-*
- config_name: multi_turn_qa_eng
data_files:
- split: train
path: multi_turn_qa_eng/train-*
- config_name: multi_turn_repoqa
data_files:
- split: train
path: multi_turn_repoqa/train-*
- config_name: multi_turn_repoqa_and_kv
data_files:
- split: train
path: multi_turn_repoqa_and_kv/train-*
- config_name: multi_turn_summary
data_files:
- split: train
path: multi_turn_summary/train-*
- config_name: multi_turn_summary_with_needles
data_files:
- split: train
path: multi_turn_summary_with_needles/train-*
- config_name: multi_turn_vt
data_files:
- split: train
path: multi_turn_vt/train-*
---
# SCBench
[[Paper]](https://drive.google.com/file/d/1_DFu11V7HbktvEMRqMUAWGm7DTkVXlOR/view?usp=drive_link)
[[Code]](https://github.com/microsoft/MInference/SCBench)

SCBench (SharedContextBench) is a comprehensive benchmark to evaluate efficient long-context methods in a KV cache-centric perspective, analyzing their performance across **the full KV cache lifecycle (generation, compression, retrieval, and loading)** in real-world scenarios where context memory (KV cache) is shared and reused across multiple requests.
## Dataset

SCBench covers 12 diverse tasks that test four key long-context capabilities: string retrieval, semantic retrieval, global information processing, and multi-tasking.
### String Retrieval
- **Retr.KV**: Tests key-value lookup in large JSON objects with random, incompressible content
- **Retr.Prefix-Suffix**: Evaluates finding strings with specific prefix and suffix patterns
- **Retr.MultiHop**: Assesses multi-hop variable tracing capabilities in long inputs
### Semantic Retrieval
- **Code.RepoQA**: Function retrieval from large codebases based on natural language descriptions
- **Language QA**: Includes English QA, Chinese QA, and multi-choice questions on long texts
- Requires semantic understanding on length inputs
### Global Information Processing
- **Many-shot ICL**: Tests in-context learning with hundreds of examples
- **Math.Find**: Statistical tasks on large arrays
- **En.Sum**: Summarization of documents
- Requires global information processing or aggregation
### Multi-Tasking
- **Mix.Sum+NIAH**: Combines summarization with needle-in-haystack search
- **Mix.RepoQA+KV**: Integrates code function retrieval with key-value lookup
- Requires multi-tasking or multi-step reasoning
## Two Shared Context Modes
The benchmark evaluates these tasks across two shared context modes:
- **Multi-turn Mode**: Caches context within single sessions
- **Multi-request Mode**: Shares context across multiple sessions
## Compared to previous long-context benchmarks

Our SCBench is the first long-context benchmark that covers single-turn, multi-turn, and multi-request scenarios. In addition, our impelmentation also involves KV cache reuse techniques, thereby providing a more comprehensive analysis on the full KV cache lifecycle of efficient long-context methods.
## Results and Findings

Our SCBench reveals that the following key insights:
### Finding 1: Sub-O(n) Memory is Problematic in Multi-Request/Multi-Turn Decoding
- Sparse decoding methods with sub-O(n) memory perform well on first queries but lose accuracy in subsequent requests
- Methods maintaining O(n) memory with sub-O(n²) computation during pre-filling can better approximate full attention accuracy across multiple queries
### Finding 2: Task Performance Shows Varying Decline Patterns
- Sparse KV cache methods excel in tasks requiring global information processing
- O(n) memory is essential for tasks involving exact match retrieval
### Finding 3: Performance vs Compression Rate
- All methods show performance degradation as compression rates increase
- Sub-O(n) memory methods exhibit significant drop at 1/4 compression rate
- Methods like RetrievalAttention and KIVI that maintain O(n) memory with sparse decoding show better resilience at higher compression rates
### Finding 4: Issues with Long-Generation Scenarios
- Attention distribution shifts significantly as generation length and number of rounds increase
- This out-of-distribution (OOD) issue impacts performance even for O(n) memory methods
### Finding 5: Dynamic vs Static Patterns
- Dynamic sparse patterns generally outperform static patterns
## Citation
```bibtex
@article{li2024scbench,
title={SCBench: A KV cache-centric analysis of long-context methods},
author={Li, Yucheng and Jiang, Huiqiang and Wu, Qianhui and Luo, Xufang and Ahn, Surin and Zhang, Chengruidong and Abdi, Amir H and Li, Dongsheng and Gao, Jianfeng and Yang, Yuqing and Qiu, Lili},
journal={arXiv preprint arXiv:2412.},
year={2024}
}
``` |
dgambettaphd/D_gen1_run2_llama2-7b_wiki_doc1000_real32_synt96_vuw | dgambettaphd | 2024-12-17T10:01:08Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-17T10:01:05Z | 0 | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 373990
num_examples: 1000
download_size: 211688
dataset_size: 373990
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dcml0714/StyleSet | dcml0714 | 2025-06-24T11:21:32Z | 37 | 1 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2506.05984",
"region:us"
] | [] | 2025-06-09T03:21:45Z | 0 | ---
dataset_info:
- config_name: role_playing
features:
- name: ID
dtype: int64
- name: text_0
dtype: string
- name: text_1
dtype: string
- name: audio_0
dtype:
audio:
sampling_rate: 16000
- name: audio_1
dtype:
audio:
sampling_rate: 16000
- name: source
dtype: string
- name: speaker1
dtype: string
- name: speaker2
dtype: string
splits:
- name: test
num_bytes: 182310504.0
num_examples: 20
download_size: 148908359
dataset_size: 182310504.0
- config_name: voice_instruction_following
features:
- name: ID
dtype: int64
- name: text_1
dtype: string
- name: text_2
dtype: string
- name: audio_1
dtype:
audio:
sampling_rate: 16000
- name: audio_2
dtype:
audio:
sampling_rate: 16000
splits:
- name: test
num_bytes: 36665909.0
num_examples: 20
download_size: 35109899
dataset_size: 36665909.0
configs:
- config_name: role_playing
data_files:
- split: test
path: role_playing/test-*
- config_name: voice_instruction_following
data_files:
- split: test
path: voice_instruction_following/test-*
---
# StyleSet
**WARNING**: This dataset contains some profane words.
**A spoken language benchmark for evaluating speaking-style-related speech generation**
Released in our paper, [Audio-Aware Large Language Models as Judges for Speaking Styles](https://arxiv.org/abs/2506.05984)
This dataset is released by NTU Speech Lab under the MIT license.

---
## Tasks
1. **Voice Style Instruction Following**
- Reproduce a given sentence verbatim.
- Match specified prosodic styles (emotion, volume, pace, emphasis, pitch, non-verbal cues).
2. **Role Playing**
- Continue a two-turn dialogue prompt in character.
- Generate the next utterance with appropriate prosody and style.
- The dataset is modified from IEMOCAP with the consent of the authors. Please refer to [IEMOCAP](https://sail.usc.edu/iemocap/) for details and the original data of IEMOCAP. We do not redistribute the data here.
---
## Evaluation
We use ALLM-as-a-judge for evaluation. Currently, we found that `gemini-2.5-pro-0506` reaches the best agreement with human evaluators.
The complete evaluation prompt and evaluation pipelines can be found in Table 3 to Table 5 in our paper.
## Citation
If you use StyleSet or find ALLM-as-a-judge useful, please cite our paper by
```
@misc{chiang2025audioawarelargelanguagemodels,
title={Audio-Aware Large Language Models as Judges for Speaking Styles},
author={Cheng-Han Chiang and Xiaofei Wang and Chung-Ching Lin and Kevin Lin and Linjie Li and Radu Kopetz and Yao Qian and Zhendong Wang and Zhengyuan Yang and Hung-yi Lee and Lijuan Wang},
year={2025},
eprint={2506.05984},
archivePrefix={arXiv},
primaryClass={eess.AS},
url={https://arxiv.org/abs/2506.05984},
}
``` |
YounesHouhou/filter | YounesHouhou | 2025-05-22T12:43:35Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-05-22T12:43:32Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": null,
"total_episodes": 1,
"total_frames": 600,
"total_tasks": 1,
"total_videos": 1,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"observation.state": {
"dtype": "float32",
"shape": [
9
],
"names": {
"motors": [
"Measurement_RIGHT_ARM_SHOULDER_PITCH",
"Measurement_RIGHT_ARM_SHOULDER_ROLL",
"Measurement_RIGHT_ARM_BICEP_YAW",
"Measurement_RIGHT_ARM_ELBOW_PITCH",
"Measurement_RIGHT_ARM_WRIST_YAW",
"Measurement_RIGHT_ARM_WRIST_PITCH",
"Measurement_RIGHT_ARM_WRIST_ROLL",
"Measurement_RIGHT_ARM_THUMB",
"Measurement_RIGHT_ARM_FINGERS"
]
}
},
"action": {
"dtype": "float32",
"shape": [
9
],
"names": {
"motors": [
"MpcInput_RIGHT_ARM_SHOULDER_PITCH",
"MpcInput_RIGHT_ARM_SHOULDER_ROLL",
"MpcInput_RIGHT_ARM_BICEP_YAW",
"MpcInput_RIGHT_ARM_ELBOW_PITCH",
"MpcInput_RIGHT_ARM_WRIST_YAW",
"MpcInput_RIGHT_ARM_WRIST_PITCH",
"MpcInput_RIGHT_ARM_WRIST_ROLL",
"Target_RIGHT_ARM_THUMB",
"Target_RIGHT_ARM_FINGERS"
]
}
},
"next.done": {
"dtype": "bool",
"shape": [
1
]
},
"observation.images.camera_head": {
"dtype": "video",
"shape": [
3,
480,
640
],
"names": [
"channels",
"height",
"width"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
test-gen/livecodebench_qwen-0.5b-random_t0.0_n1_generated_tests_updated | test-gen | 2025-05-23T02:45:12Z | 24 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-15T02:30:33Z | 0 | ---
dataset_info:
features:
- name: question_title
dtype: string
- name: question_content
dtype: string
- name: question_id
dtype: string
- name: contest_id
dtype: string
- name: test_id
dtype: int64
- name: contest_date
dtype: timestamp[us]
- name: starter_code
dtype: string
- name: function_name
dtype: string
- name: difficulty
dtype: string
- name: test
dtype: string
- name: verification_info
struct:
- name: language
dtype: string
- name: test_cases
sequence: string
- name: new_verification_info
struct:
- name: language
dtype: string
- name: test_cases
sequence: string
splits:
- name: test
num_bytes: 183440
num_examples: 182
download_size: 77951
dataset_size: 183440
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
DuongTrongChi/vov_crawler | DuongTrongChi | 2024-10-25T15:22:36Z | 29 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-25T15:22:13Z | 0 | ---
dataset_info:
features:
- name: title
dtype: string
- name: content
sequence: string
- name: metadata
struct:
- name: date
dtype: string
- name: url
dtype: string
splits:
- name: train
num_bytes: 1085141731
num_examples: 372528
download_size: 510136321
dataset_size: 1085141731
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
flozi00/german-canary-asr-0324 | flozi00 | 2024-03-19T10:48:50Z | 270 | 6 | [
"task_categories:automatic-speech-recognition",
"language:de",
"size_categories:100K<n<1M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"automatic-speech-recognition"
] | 2024-03-16T10:46:35Z | 1 | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
decode: false
- name: transkription
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 41511776468.673
num_examples: 985257
download_size: 142197574339
dataset_size: 41511776468.673
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
task_categories:
- automatic-speech-recognition
language:
- de
pretty_name: German Canary ASR
---
# Dataset Beschreibung
## Allgemeine Informationen
Dieser Datensatz ist eine Kombination aus drei verschiedenen Quellen für die deutsche Sprache: Commonvoice 16.1, Voxpopuli und Multilingual librispeech. Die Daten wurden gefiltert, normalisiert und grammatikalisch korrigiert.
Die drei Datensätze wurden erneut transkribiert und mit den entsprechenden Audio-Daten abgeglichen, um genaue Transkriptionen zu erhalten. Anschließend wurde ein Abgleich mit den Originaltranskripten durchgeführt, um fehlerhafte Transkriptionen zu korrigieren oder zu entfernen, sofern dies möglich war.
Für diese Aufgabe wurde das Nvidia Canary 1b Modell genutzt.
### Commonvoice 16.1
Common Voice ist ein öffentlich verfügbarer Sprachdatensatz, der durch Stimmen freiwilliger Mitwirkender auf der ganzen Welt erstellt wird. Der Datensatz enthält Aufnahmen von Sätzen in verschiedenen Sprachen, einschließlich Deutsch.
### Voxpopuli
Die Rohdaten für diesen Teil des Datensatzes stammen aus den Aufzeichnungen von Veranstaltungen des Europäischen Parlaments von 2009 bis 2020. Wir danken dem Europäischen Parlament dafür, dass es diese Materialien erstellt und geteilt hat.
### Multilingual librispeech
Der Multilingual LibriSpeech (MLS) Datensatz ist ein umfangreicher mehrsprachiger Korpus, der sich für die Sprachforschung eignet. Der Datensatz basiert auf vorgelesenen Hörbüchern von LibriVox und enthält auch deutschsprachige Aufnahmen.
## Datenverarbeitungsschritte
Um einen qualitativ hochwertigen deutschen Sprachdatensatz zu erstellen, wurden folgende Schritte durchgeführt:
1. Filterung: Es wurden nur die deutschen Sätze aus den jeweiligen Quelldatensätzen extrahiert.
2. Normalisierung: Die Texte wurden auf eine einheitliche Form gebracht, um Inkonsistenzen zu beseitigen.
3. Grammatikkorrektur: Fehlerhafte Grammatik wurde korrigiert, um die Qualität der Sätze zu verbessern.
## Verwendungszweck
Dieser kombinierte deutsche Sprachdatensatz kann für verschiedene Zwecke verwendet werden:
- ASR (Automatic Speech Recognition) Modelltraining
- NLP (Natural Language Processing) Forschung
- Text-to-Speech Anwendungen
Bitte beachten Sie jedoch bei der Verwendung dieses Datensatzes die Lizenzbedingungen der einzelnen Quellen sowie etwaige Einschränkungen oder Richtlinien bezüglich des Datenschutzes oder Urheberrechts. |
chiyuanhsiao/llama-questions-ASR_GT-score | chiyuanhsiao | 2025-01-01T13:58:17Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-31T08:39:20Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
- name: audio
dtype: audio
- name: question_unit
sequence: int64
- name: response_interleaf
dtype: string
- name: response_text
dtype: string
- name: response_speech
dtype: audio
- name: response_asr
dtype: string
- name: speech_score
dtype: int64
- name: text_score
dtype: int64
splits:
- name: test
num_bytes: 176590033.0
num_examples: 300
download_size: 157352925
dataset_size: 176590033.0
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
jasren/lerobot-test-10 | jasren | 2025-04-20T03:26:05Z | 24 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-04-20T03:26:01Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 6,
"total_frames": 3302,
"total_tasks": 1,
"total_videos": 12,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:6"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
nhorner/record-test | nhorner | 2025-06-15T19:52:26Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-06-15T19:52:16Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so101_follower",
"total_episodes": 2,
"total_frames": 3557,
"total_tasks": 1,
"total_videos": 2,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"shoulder_pan.pos",
"shoulder_lift.pos",
"elbow_flex.pos",
"wrist_flex.pos",
"wrist_roll.pos",
"gripper.pos"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"shoulder_pan.pos",
"shoulder_lift.pos",
"elbow_flex.pos",
"wrist_flex.pos",
"wrist_roll.pos",
"gripper.pos"
]
},
"observation.images.front": {
"dtype": "video",
"shape": [
1080,
1920,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 1080,
"video.width": 1920,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
psg777/so100test1 | psg777 | 2025-05-28T17:55:53Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so101",
"tutorial"
] | [
"robotics"
] | 2025-05-28T17:55:47Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so101
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so101",
"total_episodes": 5,
"total_frames": 3457,
"total_tasks": 1,
"total_videos": 15,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:5"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.base": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.gripper": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.bird": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
THU-KEG/LongWriter-Zero-RLData | THU-KEG | 2025-06-24T03:12:49Z | 72 | 2 | [
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2506.18841",
"arxiv:2506.01234",
"region:us"
] | [] | 2025-06-18T09:11:45Z | 2 | ---
license: apache-2.0
---
# LongWriter-Zero RL Data
<p align="center">
🤗 <a href="https://huggingface.co/THU-KEG/LongWriter-Zero-32B" target="_blank">[Model]</a> • 📃 <a href="https://arxiv.org/abs/2506.18841" target="_blank">[Paper]</a> • 💾 <a href="https://huggingface.co/datasets/THU-KEG/LongWriter-Zero-RLData" target="_blank">[Dataset Card]</a>
</p>
**LongWriter-Zero RL Data** is designed for ultra-long text generation via reinforcement learning. The dataset consists of conversational queries paired with *length-range tags*, which specify the desired output span (measured in words or Chinese characters).
These annotations are used to train the **LongWriter-Zero** model, enabling it to consistently generate passages exceeding **10,000 words**.
## Dataset at a Glance
| Field | Type | Description |
|---------|--------|---------------------------------------------------------------------------------------|
| `idx` | `int` | Unique example identifier |
| `query` | string | User instruction / prompt (English or Chinese) |
| `label` | object | JSON dict `{"range": [low, high]}` denoting the target word‑count interval |
<!-- ---
<!-- ## Citation
If you find **LongWriter‑zero RLData** useful, please cite:
```bibtex
@article{wu2025longwriterzero,
title = {LongWriter-zero: Length-Controlled Reinforcement Learning for 10,000-Word Generation},
author = {Yuhao Wu and Zhiqiang Hu and Yushi Bai and Jie Tang},
journal = {arXiv preprint arXiv:2506.01234},
year = {2025}
}
``` -->
*Happy long-form writing!* |
kobybar/tokenized_roots_dataset_mbert | kobybar | 2024-11-23T19:43:53Z | 54 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-23T19:39:00Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: input_ids
sequence: int32
- name: token_type_ids
sequence: int8
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 13672989742
num_examples: 7044
download_size: 6246959869
dataset_size: 13672989742
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
IntMeGroup/env | IntMeGroup | 2025-06-04T06:44:11Z | 54 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-05-07T12:45:21Z | 0 | ---
license: apache-2.0
---
|
zenless-lab/jmmlu | zenless-lab | 2024-12-26T00:51:34Z | 19 | 0 | [
"language:ja",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-18T10:14:10Z | 0 | ---
language:
- ja
dataset_info:
features:
- name: question
dtype: large_string
- name: choice0
dtype: large_string
- name: choice1
dtype: large_string
- name: choice2
dtype: large_string
- name: choice3
dtype: large_string
- name: split
dtype: large_string
- name: label
dtype:
class_label:
names:
'0': '0'
'1': '1'
'2': '2'
'3': '3'
splits:
- name: train
num_bytes: 2325309.4389178525
num_examples: 5677
- name: test
num_bytes: 581634.5610821474
num_examples: 1420
download_size: 1751049
dataset_size: 2906944.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
s-nlp/paradetox | s-nlp | 2025-04-02T15:20:04Z | 853 | 9 | [
"task_categories:text-generation",
"language:en",
"license:openrail++",
"size_categories:10K<n<100K",
"format:csv",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"detoxification"
] | [
"text-generation"
] | 2022-05-19T17:12:06Z | 0 | ---
license: openrail++
task_categories:
- text-generation
language:
- en
tags:
- detoxification
size_categories:
- 10K<n<100K
---
# ParaDetox: Text Detoxification with Parallel Data (English)
This repository contains information about ParaDetox dataset -- the first parallel corpus for the detoxification task -- as well as models and evaluation methodology for the detoxification of English texts. The original paper ["ParaDetox: Detoxification with Parallel Data"](https://aclanthology.org/2022.acl-long.469/) was presented at ACL 2022 main conference.
📰 **Updates**
**[2025] !!!NOW OPEN!!! TextDetox CLEF2025 shared task: for even more -- 15 languages!** [website](https://pan.webis.de/clef25/pan25-web/text-detoxification.html) 🤗[Starter Kit](https://huggingface.co/collections/textdetox/)
**[2025] COLNG2025**: Daryna Dementieva, Nikolay Babakov, Amit Ronen, Abinew Ali Ayele, Naquee Rizwan, Florian Schneider, Xintong Wang, Seid Muhie Yimam, Daniil Alekhseevich Moskovskiy, Elisei Stakovskii, Eran Kaufman, Ashraf Elnagar, Animesh Mukherjee, and Alexander Panchenko. 2025. ***Multilingual and Explainable Text Detoxification with Parallel Corpora***. In Proceedings of the 31st International Conference on Computational Linguistics, pages 7998–8025, Abu Dhabi, UAE. Association for Computational Linguistics. [pdf](https://aclanthology.org/2025.coling-main.535/)
**[2024]** We have also created versions of ParaDetox in more languages. You can checkout a [RuParaDetox](https://huggingface.co/datasets/s-nlp/ru_paradetox) dataset as well as a [Multilingual TextDetox](https://huggingface.co/textdetox) project that includes 9 languages.
Corresponding papers:
* [MultiParaDetox: Extending Text Detoxification with Parallel Data to New Languages](https://aclanthology.org/2024.naacl-short.12/) (NAACL 2024)
* [Overview of the multilingual text detoxification task at pan 2024](https://ceur-ws.org/Vol-3740/paper-223.pdf) (CLEF Shared Task 2024)
## ParaDetox Collection Pipeline
<img alt="Collection Pipeline" src="generation_pipeline_blue-1.png">
The ParaDetox Dataset collection was done via [Toloka.ai](https://toloka.ai) crowdsource platform. The collection was done in three steps:
* *Task 1:* **Generation of Paraphrases**: The first crowdsourcing task asks users to eliminate toxicity in a given sentence while keeping the content.
* *Task 2:* **Content Preservation Check**: We show users the generated paraphrases along with their original variants and ask them to indicate if they have close meanings.
* *Task 3:* **Toxicity Check**: Finally, we check if the workers succeeded in removing toxicity.
All these steps were done to ensure high quality of the data and make the process of collection automated. For more details please refer to the original paper.
## ParaDetox Dataset
As a result, we get paraphrases for 11,939 toxic sentences (on average 1.66 paraphrases per sentence), 19,766 paraphrases total.
In addition to all ParaDetox dataset, we also make public [samples](https://huggingface.co/datasets/s-nlp/en_non_detoxified) that were marked by annotators as "cannot rewrite" in *Task 1* of crowdsource pipeline.
# Detoxification evaluation
The automatic evaluation of the model were produced based on three parameters:
* *style transfer accuracy* (**STA**): percentage of nontoxic outputs identified by a style classifier. We pretrained [toxicity classifier](https://huggingface.co/s-nlp/roberta_toxicity_classifier) on Jigsaw data and put it online in HuggingFace🤗 [repo](https://huggingface.co/s-nlp/roberta_toxicity_classifier).
* *content preservation* (**SIM**): cosine similarity between the embeddings of the original text and the output computed with the model of [Wieting et al. (2019)](https://aclanthology.org/P19-1427/).
* *fluency* (**FL**): percentage of fluent sentences identified by a RoBERTa-based classifier of linguistic acceptability trained on the [CoLA dataset](https://nyu-mll.github.io/CoLA/).
All code used for our experiments to evluate different detoxifcation models can be run via Colab notebook [](https://colab.research.google.com/drive/1xTqbx7IPF8bVL2bDCfQSDarA43mIPefE?usp=sharing)
## Detoxification model
The first *seq2seq* SOTA for the text detoxification task in English -- BART (base) model trained on ParaDetox dataset -- we release online in HuggingFace🤗 [repo](https://huggingface.co/s-nlp/bart-base-detox).
You can also check out our [web-demo](https://detoxifier.nlp.zhores.net/junction/).
## Citation
```
@inproceedings{logacheva-etal-2022-paradetox,
title = "{P}ara{D}etox: Detoxification with Parallel Data",
author = "Logacheva, Varvara and
Dementieva, Daryna and
Ustyantsev, Sergey and
Moskovskiy, Daniil and
Dale, David and
Krotova, Irina and
Semenov, Nikita and
Panchenko, Alexander",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.acl-long.469",
pages = "6804--6818",
abstract = "We present a novel pipeline for the collection of parallel data for the detoxification task. We collect non-toxic paraphrases for over 10,000 English toxic sentences. We also show that this pipeline can be used to distill a large existing corpus of paraphrases to get toxic-neutral sentence pairs. We release two parallel corpora which can be used for the training of detoxification models. To the best of our knowledge, these are the first parallel datasets for this task.We describe our pipeline in detail to make it fast to set up for a new language or domain, thus contributing to faster and easier development of new parallel resources.We train several detoxification models on the collected data and compare them with several baselines and state-of-the-art unsupervised approaches. We conduct both automatic and manual evaluations. All models trained on parallel data outperform the state-of-the-art unsupervised models by a large margin. This suggests that our novel datasets can boost the performance of detoxification systems.",
}
```
and
```
@inproceedings{dementieva2021crowdsourcing,
title = "Crowdsourcing of Parallel Corpora: the Case of Style Transfer for Detoxification",
author = {Dementieva, Daryna
and Ustyantsev, Sergey
and Dale, David
and Kozlova, Olga
and Semenov, Nikita
and Panchenko, Alexander
and Logacheva, Varvara},
booktitle = "Proceedings of the 2nd Crowd Science Workshop: Trust, Ethics, and Excellence in Crowdsourced Data Management at Scale co-located with 47th International Conference on Very Large Data Bases (VLDB 2021 (https://vldb.org/2021/))",
year = "2021",
address = "Copenhagen, Denmark",
publisher = "CEUR Workshop Proceedings",
pages = "35--49",
url={http://ceur-ws.org/Vol-2932/paper2.pdf}
}
```
## Contacts
If you find some issue, do not hesitate to add it to [Github Issues](https://github.com/s-nlp/paradetox/issues).
For any questions and get the TEST SET, please, contact: Daryna Dementieva ([email protected]), Daniil Moskovskiy ([email protected]), or Alexander Panchenko ([email protected])
**Dataset Card and Paper corresponding contact**:
[Daryna Dementieva](https://huggingface.co/dardem) |
rajesh-lm/data_feed | rajesh-lm | 2025-02-01T06:58:29Z | 15 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-02-01T06:58:29Z | 0 | ---
license: apache-2.0
---
|
kothasuhas/llama-3b-gold-15M-1.5MSNIS-iter1-4-26-generations_SNIS_2048_iter2-426-init-i1_baseN1.50M_N1.50M | kothasuhas | 2025-04-27T05:20:21Z | 33 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-27T05:19:04Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: log_weight
dtype: float32
- name: sampling_p_scaled
dtype: float64
- name: sampling_p_temperature_scaled
dtype: float64
splits:
- name: train
num_bytes: 2446360037
num_examples: 1500000
- name: validation
num_bytes: 2444848
num_examples: 1000
download_size: 1311908664
dataset_size: 2448804885
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
mlfoundations-dev/get_question_answer_codeforces | mlfoundations-dev | 2025-03-14T18:44:27Z | 28 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-14T18:23:27Z | 0 | ---
dataset_info:
features:
- name: question_answer_string
dtype: string
splits:
- name: train
num_bytes: 2408636233
num_examples: 47780
download_size: 969021289
dataset_size: 2408636233
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vamshi0317/team4-888_CodeforcesProblems_ts_cleaned_summarized_v2 | vamshi0317 | 2025-04-22T22:41:01Z | 22 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-22T22:33:43Z | 0 | ---
dataset_info:
features:
- name: Problem Description
dtype: string
- name: Tag
dtype: string
- name: math
dtype: bool
- name: greedy
dtype: bool
- name: implementation
dtype: bool
- name: dp
dtype: bool
- name: data structures
dtype: bool
- name: constructive algorithms
dtype: bool
- name: brute force
dtype: bool
- name: binary search
dtype: bool
- name: sortings
dtype: bool
- name: graphs
dtype: bool
splits:
- name: train
num_bytes: 20516156
num_examples: 9285
- name: validation
num_bytes: 2610352
num_examples: 1161
- name: test
num_bytes: 2581096
num_examples: 1161
download_size: 11177161
dataset_size: 25707604
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
reasoning-proj/c_dfiltered_Llama-3_1-Nemotron-Nano-8B-v1_madversarial_continue_unrelated_t10 | reasoning-proj | 2025-05-09T07:06:27Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-09T01:28:54Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer_content
dtype: string
- name: reference_answer
dtype: string
- name: id
dtype: string
- name: metadata
struct:
- name: question_license
dtype: string
- name: question_source
dtype: string
- name: model_name
dtype: string
- name: verifier_score
dtype: int64
- name: mutated_answer_content
dtype: string
- name: continuation_1
dtype: string
- name: complete_answer_1
dtype: string
- name: continuation_2
dtype: string
- name: complete_answer_2
dtype: string
- name: continuation_3
dtype: string
- name: complete_answer_3
dtype: string
- name: continuation_4
dtype: string
- name: complete_answer_4
dtype: string
- name: continuation_5
dtype: string
- name: complete_answer_5
dtype: string
- name: continuation_6
dtype: string
- name: complete_answer_6
dtype: string
- name: continuation_7
dtype: string
- name: complete_answer_7
dtype: string
- name: continuation_8
dtype: string
- name: complete_answer_8
dtype: string
- name: continuation_model
dtype: string
splits:
- name: train
num_bytes: 125443699
num_examples: 600
download_size: 46970431
dataset_size: 125443699
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Ktzoras/shipping_llm_results_3k_sample | Ktzoras | 2025-05-29T08:32:30Z | 38 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-29T08:32:12Z | 0 | ---
dataset_info:
features:
- name: link
dtype: string
- name: date
dtype: timestamp[ns]
- name: title
dtype: string
- name: content
dtype: string
- name: vectors_L6_v2
sequence: float64
- name: vectors_distil
sequence:
sequence: int64
- name: classes_distil
dtype: string
- name: Scale_fe
dtype: string
- name: Type of Vessel_fe
dtype: string
- name: Vessel Size_fe
dtype: string
- name: Sea Route_fe
dtype: string
- name: Duration of Positive Impact_fe
dtype: string
- name: Impact_fe
dtype: string
- name: Hire Rate Impact_fe
dtype: string
- name: Scale_rag
dtype: string
- name: Type of Vessel_rag
dtype: string
- name: Vessel Size_rag
dtype: string
- name: Sea Route_rag
dtype: string
- name: Duration of Positive Impact_rag
dtype: string
- name: Impact_rag
dtype: string
- name: Hire Rate Impact_rag
dtype: string
- name: Scale_idfe
dtype: string
- name: Type of Vessel_idfe
dtype: string
- name: Vessel Size_idfe
dtype: string
- name: Sea Route_idfe
dtype: string
- name: Duration of Positive Impact_idfe
dtype: string
- name: Impact_idfe
dtype: string
- name: Hire Rate Impact_idfe
dtype: string
- name: Scale_idrbfe
dtype: string
- name: Type of Vessel_idrbfe
dtype: string
- name: Vessel Size_idrbfe
dtype: string
- name: Sea Route_idrbfe
dtype: string
- name: Duration of Positive Impact_idrbfe
dtype: string
- name: Impact_idrbfe
dtype: string
- name: Hire Rate Impact_idrbfe
dtype: string
- name: Hire rate impact_rag
dtype: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 38557055
num_examples: 3000
download_size: 18290021
dataset_size: 38557055
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.