datasetId
large_stringlengths 6
107
| author
large_stringlengths 3
34
| last_modified
large_stringdate 2021-05-20 00:57:22
2025-05-05 12:15:05
| downloads
int64 0
4.28M
| likes
int64 0
7.74k
| tags
large listlengths 1
2.03k
| task_categories
large listlengths 0
16
| createdAt
large_stringdate 2022-03-02 23:29:22
2025-05-05 12:13:14
| trending_score
float64 1
39
⌀ | card
large_stringlengths 31
1M
|
---|---|---|---|---|---|---|---|---|---|
AlexHung29629/mistral_distill_data | AlexHung29629 | 2025-05-03T10:42:29Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:04:48Z | null | ---
dataset_info:
- config_name: edit
features:
- name: domain
dtype: string
- name: language
dtype: string
- name: context
list:
- name: role
dtype: string
- name: content
dtype: string
- name: original_response
dtype: string
- name: edited_response
dtype: string
- name: feedback
sequence: string
- name: change_summary
dtype: string
- name: output_0
dtype: string
- name: output_1
dtype: string
- name: output_2
dtype: string
- name: output_3
dtype: string
- name: score_0
dtype: float64
- name: score_1
dtype: float64
- name: score_2
dtype: float64
- name: score_3
dtype: float64
splits:
- name: train
num_bytes: 113741127
num_examples: 5000
download_size: 51590159
dataset_size: 113741127
- config_name: edit_quality
features:
- name: domain
dtype: string
- name: language
dtype: string
- name: context
list:
- name: role
dtype: string
- name: content
dtype: string
- name: original_response
dtype: string
- name: good_edited_response
dtype: string
- name: bad_edited_response
dtype: string
- name: feedback
sequence: string
- name: output_0
dtype: string
- name: output_1
dtype: string
- name: output_2
dtype: string
- name: output_3
dtype: string
- name: score_0
dtype: float64
- name: score_1
dtype: float64
- name: score_2
dtype: float64
- name: score_3
dtype: float64
splits:
- name: train
num_bytes: 65768627
num_examples: 3111
download_size: 31897839
dataset_size: 65768627
- config_name: feedback
features:
- name: domain
dtype: string
- name: language
dtype: string
- name: context
list:
- name: role
dtype: string
- name: content
dtype: string
- name: response1
dtype: string
- name: response2
dtype: string
- name: feedback1
sequence: string
- name: feedback2
sequence: string
- name: output_0
dtype: string
- name: output_1
dtype: string
- name: output_2
dtype: string
- name: output_3
dtype: string
- name: score_0
dtype: float64
- name: score_1
dtype: float64
- name: score_2
dtype: float64
- name: score_3
dtype: float64
splits:
- name: train
num_bytes: 119210291
num_examples: 5000
download_size: 52864142
dataset_size: 119210291
- config_name: preference
features:
- name: domain
dtype: string
- name: language
dtype: string
- name: context
list:
- name: role
dtype: string
- name: content
dtype: string
- name: response1
dtype: string
- name: response2
dtype: string
- name: overall_preference
dtype: int64
- name: individual_preference
list:
- name: score
dtype: int64
- name: reasoning
dtype: string
- name: feedback1
dtype: string
- name: feedback2
dtype: string
- name: output_0
dtype: string
- name: output_1
dtype: string
- name: output_2
dtype: string
- name: output_3
dtype: string
- name: score_0
dtype: float64
- name: score_1
dtype: float64
- name: score_2
dtype: float64
- name: score_3
dtype: float64
splits:
- name: train
num_bytes: 123005751
num_examples: 5000
download_size: 54232101
dataset_size: 123005751
configs:
- config_name: edit
data_files:
- split: train
path: edit/train-*
- config_name: edit_quality
data_files:
- split: train
path: edit_quality/train-*
- config_name: feedback
data_files:
- split: train
path: feedback/train-*
- config_name: preference
data_files:
- split: train
path: preference/train-*
---
|
MBZUAI-IFM/if_eval_final | MBZUAI-IFM | 2025-05-03T10:40:23Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T19:34:17Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: key
dtype: int64
- name: prompt
dtype: string
- name: instruction_id_list
dtype: string
- name: cot
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 2929864
num_examples: 428
download_size: 1672492
dataset_size: 2929864
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
tunahanf/MAIN_ALPACA-2 | tunahanf | 2025-05-03T10:22:38Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T10:06:46Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 62541990.0
num_examples: 35637
- name: test
num_bytes: 20847330.0
num_examples: 11879
download_size: 45180778
dataset_size: 83389320.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Hkang/summarize_sft-test_lm-EleutherAI_pythia-1b_seed-42_numex-250_lr3e8_3K-BON_32 | Hkang | 2025-05-03T10:18:42Z | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-01T00:00:26Z | null | ---
dataset_info:
features:
- name: id
dtype: string
- name: subreddit
dtype: string
- name: title
dtype: string
- name: post
dtype: string
- name: summary
dtype: string
- name: query_input_ids
sequence: int64
- name: query_attention_mask
sequence: int64
- name: query
dtype: string
- name: reference_response
dtype: string
- name: reference_response_input_ids
sequence: int64
- name: reference_response_attention_mask
sequence: int64
- name: reference_response_token_len
dtype: int64
- name: query_reference_response
dtype: string
- name: query_reference_response_input_ids
sequence: int64
- name: query_reference_response_attention_mask
sequence: int64
- name: query_reference_response_token_response_label
sequence: int64
- name: query_reference_response_token_len
dtype: int64
- name: model_response
dtype: string
splits:
- name: test
num_bytes: 6851275
num_examples: 250
download_size: 1149760
dataset_size: 6851275
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
kothasuhas/llp-gold-37m-1.5m_N1.50M_T8.0_T8.0_T8.0 | kothasuhas | 2025-05-03T10:07:35Z | 0 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T10:06:36Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: p_log_probs
dtype: float32
- name: q_log_probs
dtype: float32
- name: num_tokens
dtype: float32
- name: log_weight
dtype: float64
- name: sampling_p_temperature_scaled
dtype: float64
splits:
- name: train
num_bytes: 6198000000
num_examples: 1500000
download_size: 1978574
dataset_size: 6198000000
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
tunahanf/MAIN_ALPACA | tunahanf | 2025-05-03T10:00:40Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T09:57:34Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 60688866.0
num_examples: 35637
- name: test
num_bytes: 20229622.0
num_examples: 11879
download_size: 45186540
dataset_size: 80918488.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
LiqunMa/temp | LiqunMa | 2025-05-03T09:50:17Z | 125 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-07-09T12:40:30Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: index
dtype: int64
- name: question
dtype: string
- name: cot
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 534859
num_examples: 49
download_size: 257991
dataset_size: 534859
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
zhengbang0707/REFUEL_it2_mask1_v2_30k_CUDA | zhengbang0707 | 2025-05-03T09:46:49Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T23:06:28Z | null | ---
dataset_info:
features:
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: reject
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen_token
sequence: int64
- name: reject_token
sequence: int64
- name: chosen_mask
sequence: int64
- name: chosen_mask_user
sequence: int64
- name: reject_mask
sequence: int64
- name: reject_mask_user
sequence: int64
- name: chosen_reward_list
sequence: float64
- name: reject_reward_list
sequence: float64
- name: chosen_reward_list_new
sequence: float64
- name: reject_reward_list_new
sequence: float64
- name: chosen_reward
dtype: float64
- name: reject_reward
dtype: float64
- name: chosen_logprob
dtype: float64
- name: reject_logprob
dtype: float64
splits:
- name: train
num_bytes: 3190309179
num_examples: 30000
- name: test
num_bytes: 53052292
num_examples: 500
download_size: 193646837
dataset_size: 3243361471
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Jianshu001/reasoning-data-collections | Jianshu001 | 2025-05-03T09:39:36Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T09:35:36Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 13303147741
num_examples: 925318
download_size: 5883849943
dataset_size: 13303147741
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hettc/polkadot-elections | hettc | 2025-05-03T09:37:18Z | 236 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-04-20T10:19:57Z | null | ---
license: apache-2.0
---
|
findableai/phenology | findableai | 2025-05-03T09:29:28Z | 0 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | [] | 2025-05-02T12:46:30Z | null | ---
license: mit
---
# Dataset Card for Phenology
<!-- Provide a quick summary of the dataset. -->
## Summary
This dataset contains data necessary for running the code in the GitHub repository https://github.com/findable-no/phenology-data
The file tables.tgz contains raw scans of the pages in the article
> A. Lauscher, F. Lauscher, and H. Printz, *Die Phänologie Norwegens, Teil II. Phänologischce Mittelwerte für 260 Orte*, Skr. Det Norske Videnskaps-Akademi Oslo. I. Mat.-Naturv. Kl. No.l 1959, 1-176, 1959
that contain data tables. Unpack this file in the folder ./data/raw in the repository and use the notebook __phenology_preprocess_1.ipynb__ to generate the final processed data.
## Professor Printz and his phenology data
Phenology is, according to [Wikipedia](https://en.wikipedia.org/wiki/Phenology):
> the study of periodic events in biological life cycles and how these are influenced by seasonal and interannual variations in climate, as well as habitat factors (such as elevation)
It is arguably one of the oldest of mans sciences since the very survival of our species depended (and depends) on this knowledge. Knowing for instance about migratory patterns of animals allowed for effective hunting, observations about plants allowed for effective sowing and harvesting and thereby planning of food supply etc. A lot is known, of course, about phenology, but there have been relatively few systematic and official efforts dedicated to collection of such information.
In Norway, a large effort at collecting phenological data concerning plants, birds and agricultural phenomena was undertaken in 1928 by a botanist at the University of Oslo, Henrik Printz. He established an extensive network of observers all over Norway and tasked them with observing a large number of different phenological phases such as flowering and budburst of different plants, arrival of certain migratory birds etc.
In the 1959 publication
> A. Lauscher, F. Lauscher, and H. Printz, *Die Phänologie Norwegens, Teil II. Phänologischce Mittelwerte für 260 Orte*, Skr. Det Norske Videnskaps-Akademi Oslo. I. Mat.-Naturv. Kl. No.l 1959, 1-176, 1959
he published observations from 278 observation stations for the years 1928 to 1952. The article, 182 pages long and written in German, contains the data from each observation station in the form of handwritten tables looking like this:

For instance, the cell indexed as row i and column 1 (shown in blue in the figure above) represents the Julian date of the first flowering of a tiny plant called [Coltsfoot](https://en.wikipedia.org/wiki/Tussilago) (in Latin, *tussilago farfara*). [Julian dates](https://en.wikipedia.org/wiki/Julian_day) , as used in this article, are just the day number after the 1st of January, so in a non-leap year the Julian date 137 corresponds to the 17th of May.
The coltsfoot is interesting since it is a so-called phenologically plastic plant. It will basically start its lifecycle when local climatic conditions permit. If spring is early a year it will blossom early, if spring is cold and late it will blossom later. So you can think of these tiny plants a climatic laboratories spread all over Norway.
So why should we care about these old data? Well, the observation period coincides with a period when the human contribution of potential greenhouse gases was much lower than today (see [here](https://www.climate.gov/media/14596) for instance for an overview of the evolution of CO2 in the athmosphere since 1750). In this sense, Henrik Printz’s data represents a *time capsule* of indirect climatic observations that is soon to be a hundred years old. It serves as a very simple baseline against which we can compare todays conditions and verify if indeed natures phenological phases have been influenced by a possible climatic change.
In this repository you will find:
1) A Jupyter notebook to extract, from every table, a specific cell as an image and code to use a visual large language model (vLLM) to perform an initial reading of these cells
2) A Jupyter notebook that shows how to finetune the vLLM to read the data more precisely.
3) Pandas data frames containing the observations in digital format.
4) A Jupyter notebook containing some examples of how this data can be used to make geoplots of the variability of the data in Norway.
At [Findable AS](https://www.findable.ai/) we do document understanding for the building industry. This current project is a “labor of love” that we release to the public domain as a token of appreciation for all the different open source tools that we use in our daily work. We hope this will be of interest to at least some people, and we believe (and fear) that the data will show that nature, in Norway, has not gone uninfluenced by climatic changes. We welcome suggestions for improvements and additions and hope people will surprise us with their own uses of this data. If you continue work on this data we demand that you kindly maintain the reference to the original article by Henrik Printz cited above. Likewise, a kind mention of Findable and the work we put into digitising this would be appreciated 😄.
A few observations about the data:
1. There are 278 tables corresponding to the 278 observation locations.
2. Every table contains 4 metadata fields (show in yellow below) and 292 data fields (shown in blue):

3. There are 83.956 fields all in all, but as can be observed above not all observation were made in every location.
4. There are a total of 33.905 cells that are not blank.
5. You will find a comprehensive list of all the observations and how to interpret them in the file xxx in the data folder.
6. Finally, you will find a pandas data frame and an excel sheet containing the resulting data after *manual verification* by real human beings.
## Disclaimer
We present this data *in good faith and as is* without any claims, either direct nor implied, as to their usability for any purpose whatsoever.
Findable AS nor the authors make any claim as to the correctness of the data and shall not be held liable for any consequences, either direct or indirect, of the use or interpretation of this data.
Any conclusions or inferences drawn from this data are at the sole discretion of the entities setting forth such conclusions or inferences.
- **Curated by:** Lars Aurdal, Eivind Kjosbakken, Findable AS (https://www.findable.ai)
- **Language(s) (NLP):** English, Norwegian, German.
- **License:** MIT
## Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** https://github.com/findable-no/phenology-data
## Dataset Card Authors [optional]
Lars Aurdal, Findable AS (https://www.findable.ai)
## Dataset Card Contact
Lars Aurdal, Findable AS (https://www.findable.ai) |
pierreqi/HumanEval-scilab | pierreqi | 2025-05-03T09:23:22Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T09:23:19Z | null | ---
dataset_info:
features:
- name: task_id
dtype: string
- name: prompt
dtype: string
- name: canonical_solution
dtype: string
- name: test
dtype: string
- name: entry_point
dtype: string
splits:
- name: test
num_bytes: 284649
num_examples: 164
download_size: 117920
dataset_size: 284649
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
HungVu2003/opt-350m_beta_1.0_alpha_0.6_num-company_3_dataset_1_for_gen_13 | HungVu2003 | 2025-05-03T08:45:39Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T08:45:33Z | null | ---
dataset_info:
features:
- name: question
dtype: string
splits:
- name: train
num_bytes: 3684139
num_examples: 12500
download_size: 1878089
dataset_size: 3684139
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
fakeemailqwe/emoji-vae-dataset | fakeemailqwe | 2025-05-03T08:29:59Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T08:29:57Z | null | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 5199453.477
num_examples: 2749
download_size: 4793494
dataset_size: 5199453.477
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shubhamjuneja/emoji-vae-dataset | shubhamjuneja | 2025-05-03T08:29:08Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T08:29:02Z | null | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 5199453.477
num_examples: 2749
download_size: 4793494
dataset_size: 5199453.477
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
justaszie/emoji-vae-dataset | justaszie | 2025-05-03T08:24:52Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T08:24:50Z | null | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 5199453.477
num_examples: 2749
download_size: 4793494
dataset_size: 5199453.477
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shylee/eval_DP_cube_downDims1_cropNo224_freeze1_32_32_ema0_1e-4_ckpt030000 | shylee | 2025-05-03T07:10:57Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-03T07:10:52Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 1,
"total_frames": 38,
"total_tasks": 1,
"total_videos": 3,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.FrontCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.TopCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.WristCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
shylee/eval_DP_cube_downDims1_cropNo_freeze0_64_64_ema0_1e-4_ckpt180000 | shylee | 2025-05-03T07:01:45Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-03T07:01:39Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 1,
"total_frames": 488,
"total_tasks": 1,
"total_videos": 3,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.FrontCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.TopCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.WristCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
lambara/tool_shuffle_small | lambara | 2025-05-03T07:00:05Z | 214 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-11T09:38:39Z | null | ---
dataset_info:
features:
- name: conversation
dtype: string
- name: tools
dtype: string
- name: date
dtype: string
splits:
- name: train
num_bytes: 17203102
num_examples: 12888
download_size: 1921256
dataset_size: 17203102
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junieg/validation-dataset | junieg | 2025-05-03T06:47:40Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:40:13Z | null | ---
dataset_info:
features:
- name: image
dtype: image
- name: Latitude
dtype: float64
- name: Longitude
dtype: float64
splits:
- name: train
num_bytes: 772293898.0
num_examples: 183
download_size: 772111651
dataset_size: 772293898.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
chiyuanhsiao/text_L2-regular-ASR_spoken-web-questions-score | chiyuanhsiao | 2025-05-03T06:45:18Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:45:16Z | null | ---
dataset_info:
features:
- name: url
dtype: string
- name: question
dtype: string
- name: answers
sequence: string
- name: my_prediction_text
dtype: string
- name: text_score
dtype: int64
splits:
- name: test
num_bytes: 1158748
num_examples: 2032
download_size: 305572
dataset_size: 1158748
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
chiyuanhsiao/text_L2-regular-15_trivia_qa-audio-score | chiyuanhsiao | 2025-05-03T06:43:01Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:42:54Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: question_id
dtype: string
- name: question_source
dtype: string
- name: entity_pages
sequence:
- name: doc_source
dtype: string
- name: filename
dtype: string
- name: title
dtype: string
- name: wiki_context
dtype: string
- name: search_results
sequence:
- name: description
dtype: string
- name: filename
dtype: string
- name: rank
dtype: int32
- name: title
dtype: string
- name: url
dtype: string
- name: search_context
dtype: string
- name: answer
struct:
- name: aliases
sequence: string
- name: normalized_aliases
sequence: string
- name: matched_wiki_entity_name
dtype: string
- name: normalized_matched_wiki_entity_name
dtype: string
- name: normalized_value
dtype: string
- name: type
dtype: string
- name: value
dtype: string
- name: my_prediction_text
dtype: string
- name: text_score
dtype: int64
splits:
- name: validation
num_bytes: 74723805
num_examples: 1000
download_size: 31135428
dataset_size: 74723805
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
---
|
chiyuanhsiao/text_L2-regular_trivia_qa-audio-score | chiyuanhsiao | 2025-05-03T06:42:27Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:42:21Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: question_id
dtype: string
- name: question_source
dtype: string
- name: entity_pages
sequence:
- name: doc_source
dtype: string
- name: filename
dtype: string
- name: title
dtype: string
- name: wiki_context
dtype: string
- name: search_results
sequence:
- name: description
dtype: string
- name: filename
dtype: string
- name: rank
dtype: int32
- name: title
dtype: string
- name: url
dtype: string
- name: search_context
dtype: string
- name: answer
struct:
- name: aliases
sequence: string
- name: normalized_aliases
sequence: string
- name: matched_wiki_entity_name
dtype: string
- name: normalized_matched_wiki_entity_name
dtype: string
- name: normalized_value
dtype: string
- name: type
dtype: string
- name: value
dtype: string
- name: my_prediction_text
dtype: string
- name: text_score
dtype: int64
splits:
- name: validation
num_bytes: 75206396
num_examples: 1000
download_size: 31006582
dataset_size: 75206396
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
---
|
HungVu2003/opt-350m_beta_0.5_alpha_0.6_num-company_3_dataset_1_for_gen_8 | HungVu2003 | 2025-05-03T06:41:22Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:41:21Z | null | ---
dataset_info:
features:
- name: question
dtype: string
splits:
- name: train
num_bytes: 2358748
num_examples: 12500
download_size: 1293112
dataset_size: 2358748
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/hero_run_2_with_domain | mlfoundations-dev | 2025-05-03T06:40:33Z | 0 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:32:23Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: _domain
dtype: string
- name: system
dtype: string
- name: problem
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: question
dtype: string
- name: source
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 19429855798.0
num_examples: 1142975
download_size: 8430047143
dataset_size: 19429855798.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shylee/eval_DP_cube_downDims1_cropNo_freeze1_64_64_ema0_1e-4_ckpt420000 | shylee | 2025-05-03T06:40:24Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-03T06:40:08Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 9,
"total_frames": 6585,
"total_tasks": 1,
"total_videos": 27,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:9"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.FrontCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.TopCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.WristCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
cchoi1/kodcode-complete_1000_gpt-4o_qwen7b_att_iter0_att10_sol5_relabeled_grpo_20000 | cchoi1 | 2025-05-03T06:34:44Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T00:15:33Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: completion
dtype: string
- name: reward
dtype: string
- name: task_id
dtype: string
- name: input_ids_prompt
sequence: int64
- name: attention_mask_prompt
sequence: int64
splits:
- name: train
num_bytes: 256763020
num_examples: 20000
- name: test
num_bytes: 64339767
num_examples: 5000
download_size: 31928702
dataset_size: 321102787
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
shylee/eval_DP_cube_downDims1_cropNo_freeze1_64_64_ema0_1e-4_ckpt480000 | shylee | 2025-05-03T06:33:10Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-03T06:33:00Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 3,
"total_frames": 2670,
"total_tasks": 1,
"total_videos": 9,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:3"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.FrontCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.TopCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.WristCam": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
kmrasmussen/intercebd-sft-proj-af06b3e1-b92a-40ae-93fe-a92b01f5b81c-20250503061257 | kmrasmussen | 2025-05-03T06:13:00Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T06:12:57Z | null | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 1072
num_examples: 11
download_size: 2450
dataset_size: 1072
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HungVu2003/opt-350m_beta_0.5_alpha_0.6_num-company_3_dataset_2_for_gen_7 | HungVu2003 | 2025-05-03T05:57:25Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T05:57:24Z | null | ---
dataset_info:
features:
- name: question
dtype: string
splits:
- name: train
num_bytes: 2504311
num_examples: 12500
download_size: 1233444
dataset_size: 2504311
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/d1_science_load_in_qwen3 | mlfoundations-dev | 2025-05-03T05:48:22Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T05:47:52Z | null | ---
dataset_info:
features:
- name: instruction_seed
dtype: string
- name: _source
dtype: string
- name: gpt41_mini_response
dtype: string
- name: __original_row_idx
dtype: int64
- name: length
dtype: int64
- name: domain
dtype: string
- name: r1_response
dtype: string
- name: r1_reasoning_content
dtype: string
- name: extract_solution
dtype: string
- name: url
dtype: string
- name: filename
dtype: string
- name: success
dtype: bool
- name: page_count
dtype: int64
- name: page_number
dtype: int64
- name: question_choices_solutions
dtype: string
- name: extracted_question
dtype: string
- name: extracted_answer_choices
sequence: string
- name: matched_solution
dtype: string
- name: qa_validation_outputs
dtype: bool
- name: classifier_reasoning
dtype: string
- name: is_organic_chemistry
dtype: bool
- name: ms_id
dtype: int64
- name: final_reasoning_trace
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 2675170878
num_examples: 63200
download_size: 730471908
dataset_size: 2675170878
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
test-gen/mbpp_mbpp-qwen-coder-7b-instruct-from-sft_t1.0_n8_generated_tests | test-gen | 2025-05-03T05:44:37Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T05:35:30Z | null | ---
dataset_info:
features:
- name: task_id
dtype: int32
- name: text
dtype: string
- name: code
dtype: string
- name: test_list
sequence: string
- name: test_setup_code
dtype: string
- name: challenge_test_list
sequence: string
- name: verification_info
struct:
- name: language
dtype: string
- name: test_cases
sequence: string
splits:
- name: train
num_bytes: 609378
num_examples: 374
- name: validation
num_bytes: 147679
num_examples: 90
download_size: 342407
dataset_size: 757057
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
movefast/math_gen_writing_20k_v2 | movefast | 2025-05-03T04:55:14Z | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-29T21:31:10Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
- name: reward
dtype: float64
- name: question_token_count
dtype: float64
- name: task
dtype: string
- name: gt
dtype: string
- name: options
sequence: string
- name: discipline
dtype: string
- name: field
dtype: string
- name: subfield
dtype: string
- name: difficulty
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: word_count
dtype: float64
- name: num_turns
dtype: float64
splits:
- name: train
num_bytes: 56241327
num_examples: 19460
- name: eval_math
num_bytes: 89522.33333333333
num_examples: 32
- name: eval_gen
num_bytes: 89522.33333333333
num_examples: 32
- name: eval_writing
num_bytes: 89522.33333333333
num_examples: 32
download_size: 29309429
dataset_size: 56509894.00000001
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: eval_math
path: data/eval_math-*
- split: eval_gen
path: data/eval_gen-*
- split: eval_writing
path: data/eval_writing-*
---
|
NONHUMAN-RESEARCH/tic-tac-toe-v2-three | NONHUMAN-RESEARCH | 2025-05-03T04:46:25Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"test"
] | [
"robotics"
] | 2025-05-03T04:45:17Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- test
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 25,
"total_frames": 11175,
"total_tasks": 1,
"total_videos": 50,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:25"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
zhengbang0707/REFUEL_it2_mask2_v2_60k | zhengbang0707 | 2025-05-03T04:17:38Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T04:11:43Z | null | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: val
path: data/val-*
dataset_info:
features:
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: reject
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen_token
sequence: int64
- name: reject_token
sequence: int64
- name: chosen_mask
sequence: int64
- name: reject_mask
sequence: int64
- name: chosen_reward_list
sequence: float64
- name: reject_reward_list
sequence: float64
- name: chosen_reward_list_new
sequence: float64
- name: reject_reward_list_new
sequence: float64
- name: chosen_reward
dtype: float64
- name: reject_reward
dtype: float64
splits:
- name: train
num_bytes: 4411629887.853228
num_examples: 60000
- name: test
num_bytes: 36786161
num_examples: 500
- name: val
num_bytes: 36656292
num_examples: 500
download_size: 308067318
dataset_size: 4485072340.853228
---
# Dataset Card for "REFUEL_it2_mask2_v2_60k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
cchoi1/kodcode-complete_1000_qwen7b_sol_iter0_att10_sol5_lr5e5_10ep_dedup_dpo_6000 | cchoi1 | 2025-05-03T04:05:14Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T04:05:10Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: task_id
dtype: string
splits:
- name: train
num_bytes: 6739047.688016529
num_examples: 1548
- name: test
num_bytes: 1689115.311983471
num_examples: 388
download_size: 1603835
dataset_size: 8428163.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
abehandlerorg/olmobypublisherdev | abehandlerorg | 2025-05-03T03:53:16Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T03:50:55Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: date_download
dtype: string
- name: source_domain
dtype: string
- name: title
dtype: string
- name: url
dtype: string
splits:
- name: train
num_bytes: 1471349423
num_examples: 375804
download_size: 745759734
dataset_size: 1471349423
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
theprint/Coach-1.2k | theprint | 2025-05-03T03:52:45Z | 0 | 0 | [
"task_categories:text-generation",
"language:en",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-generation"
] | 2025-05-03T03:51:03Z | null | ---
license: apache-2.0
task_categories:
- text-generation
language:
- en
size_categories:
- 1K<n<10K
--- |
HungVu2003/opt-350m_beta_0.5_alpha_0.6_num-company_3_dataset_0_for_gen_6 | HungVu2003 | 2025-05-03T03:47:06Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T03:47:04Z | null | ---
dataset_info:
features:
- name: question
dtype: string
splits:
- name: train
num_bytes: 3764988
num_examples: 12500
download_size: 1783193
dataset_size: 3764988
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mothnaZl/s1-Qwen2.5-7B-Instruct-6-best_of_n-VLLM-Skywork-o1-Open-PRM-Qwen-2.5-7B-completions | mothnaZl | 2025-05-03T03:03:00Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T02:18:52Z | null | ---
dataset_info:
config_name: FUfu99_OlympiadBench_maths_origin--T-0.8--top_p-1.0--n-8--seed-0--agg_strategy-last--num-shots-0--prompt_type-None--merged--evals
features:
- name: n
dtype: int64
- name: acc_naive
dtype: float64
- name: acc_weighted
dtype: float64
- name: acc_maj
dtype: float64
- name: pass@n
dtype: float64
- name: div_avg
dtype: float64
- name: div_sum
dtype: float64
- name: div_mean
dtype: float64
- name: Unigrams
dtype: float64
- name: Bigrams
dtype: float64
- name: Trigrams
dtype: float64
- name: Fourgrams
dtype: float64
- name: pass_tag
sequence: 'null'
- name: BM25
dtype: int64
splits:
- name: train
num_bytes: 432
num_examples: 4
download_size: 6292
dataset_size: 432
configs:
- config_name: FUfu99_OlympiadBench_maths_origin--T-0.8--top_p-1.0--n-8--seed-0--agg_strategy-last--num-shots-0--prompt_type-None--merged--evals
data_files:
- split: train
path: FUfu99_OlympiadBench_maths_origin--T-0.8--top_p-1.0--n-8--seed-0--agg_strategy-last--num-shots-0--prompt_type-None--merged--evals/train-*
---
|
osama24sy/llama3.2-3b-it-countdown-game-7k-qwq-r64-results-20250503-17462399256186 | osama24sy | 2025-05-03T02:53:51Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T02:53:50Z | null | ---
dataset_info:
features:
- name: index
dtype: int64
- name: numbers
sequence: int64
- name: target
dtype: int64
- name: operations
sequence:
sequence: string
- name: response
dtype: string
- name: token_count
dtype: int64
splits:
- name: train
num_bytes: 2563972
num_examples: 150
download_size: 936143
dataset_size: 2563972
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jaeyong2/Math-Qwen3-14B-Ko | jaeyong2 | 2025-05-03T02:25:21Z | 17 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-01T10:13:26Z | null | ---
dataset_info:
features:
- name: content
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 378343668
num_examples: 55000
download_size: 150992454
dataset_size: 378343668
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
bismarck91/frA-enA-tokenised-qwen-synthetic_16khz | bismarck91 | 2025-05-03T01:44:27Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T01:44:17Z | null | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: labels
sequence: int64
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 439205697
num_examples: 24900
download_size: 132045560
dataset_size: 439205697
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hanaearg/MyMultiLabelEmotionsNew | hanaearg | 2025-05-03T01:43:44Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T01:43:43Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
sequence: string
splits:
- name: train
num_bytes: 1646765
num_examples: 2768
- name: test
num_bytes: 1648925
num_examples: 2767
- name: dev
num_bytes: 68696
num_examples: 116
download_size: 354595
dataset_size: 3364386
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: dev
path: data/dev-*
---
|
CohenQu/HintGenerator.10.02 | CohenQu | 2025-05-03T01:12:12Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T01:12:09Z | null | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: suffix
dtype: string
splits:
- name: train
num_bytes: 118372021
num_examples: 38747
- name: test
num_bytes: 2864052
num_examples: 1000
download_size: 54886213
dataset_size: 121236073
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Aravindh25/test_depth_2 | Aravindh25 | 2025-05-03T01:00:37Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-03T00:58:49Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "trossen_ai_solo",
"total_episodes": 3,
"total_frames": 373,
"total_tasks": 1,
"total_videos": 9,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:3"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
7
],
"names": [
"main_joint_0",
"main_joint_1",
"main_joint_2",
"main_joint_3",
"main_joint_4",
"main_joint_5",
"main_joint_6"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
7
],
"names": [
"main_joint_0",
"main_joint_1",
"main_joint_2",
"main_joint_3",
"main_joint_4",
"main_joint_5",
"main_joint_6"
]
},
"observation.images.cam_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.depth.cam_wrist": {
"dtype": "depth",
"shape": [
480,
640,
1
],
"names": [
"height",
"width",
"channels"
],
"info": null
},
"observation.images.cam_high": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.depth.cam_high": {
"dtype": "depth",
"shape": [
480,
640,
1
],
"names": [
"height",
"width",
"channels"
],
"info": null
},
"observation.images.cam_front": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.depth.cam_front": {
"dtype": "depth",
"shape": [
480,
640,
1
],
"names": [
"height",
"width",
"channels"
],
"info": null
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
cfpark00/toy-multistep-v2-nn_20-na_10-nab_40-test | cfpark00 | 2025-05-03T00:43:48Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T00:43:41Z | null | ---
dataset_info:
features:
- name: prompts
dtype: string
- name: completions
dtype: string
- name: num_maskeds
dtype: int64
- name: texts
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 9155618
num_examples: 50000
- name: rl_nm_0
num_bytes: 186071
num_examples: 1000
- name: rl_nm_1
num_bytes: 224255
num_examples: 1000
- name: rl_nm_2
num_bytes: 268608
num_examples: 1000
- name: rl_nm_3
num_bytes: 296155
num_examples: 1000
- name: rl_nm_4
num_bytes: 313813
num_examples: 1000
- name: test_nm_0
num_bytes: 183591
num_examples: 1000
- name: test_nm_1
num_bytes: 233440
num_examples: 1000
- name: test_nm_2
num_bytes: 272471
num_examples: 1000
- name: test_nm_3
num_bytes: 295383
num_examples: 1000
- name: test_nm_4
num_bytes: 313458
num_examples: 1000
download_size: 5202689
dataset_size: 11742863
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: rl_nm_0
path: data/rl_nm_0-*
- split: rl_nm_1
path: data/rl_nm_1-*
- split: rl_nm_2
path: data/rl_nm_2-*
- split: rl_nm_3
path: data/rl_nm_3-*
- split: rl_nm_4
path: data/rl_nm_4-*
- split: test_nm_0
path: data/test_nm_0-*
- split: test_nm_1
path: data/test_nm_1-*
- split: test_nm_2
path: data/test_nm_2-*
- split: test_nm_3
path: data/test_nm_3-*
- split: test_nm_4
path: data/test_nm_4-*
---
|
VGraf/mt_dependent_user_2_turns | VGraf | 2025-05-03T00:35:28Z | 41 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-09T15:30:17Z | null | ---
dataset_info:
features:
- name: conv
list:
- name: user
dtype: string
- name: sys
dtype: string
- name: id
dtype: string
- name: do_inference
dtype: bool
- name: inst
dtype: string
- name: key
dtype: int64
- name: prompt
dtype: string
- name: entity
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 203495
num_examples: 600
download_size: 83466
dataset_size: 203495
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- config_name: user_reference
data_files:
- split: test
path: data/train-*
---
|
pacscilab/VoxCommunis | pacscilab | 2025-05-03T00:33:01Z | 102 | 2 | [
"language:ab",
"language:am",
"language:ba",
"language:be",
"language:bg",
"language:bn",
"language:ca",
"language:cs",
"language:cv",
"language:ckb",
"language:dv",
"language:el",
"language:eu",
"language:gn",
"language:ha",
"language:hi",
"language:hsb",
"language:hu",
"language:hy",
"language:id",
"language:it",
"language:ja",
"language:ka",
"language:kk",
"language:ko",
"language:ky",
"language:lt",
"language:mk",
"language:mn",
"language:mr",
"language:mt",
"language:nl",
"language:or",
"language:pa",
"language:pl",
"language:pt",
"language:ro",
"language:ru",
"language:rw",
"language:sk",
"language:sl",
"language:sq",
"language:sr",
"language:sv",
"language:sw",
"language:ta",
"language:th",
"language:tk",
"language:tr",
"language:ug",
"language:uk",
"language:uz",
"language:vi",
"language:yo",
"language:yue",
"language:zh",
"license:cc0-1.0",
"size_categories:n<1K",
"region:us",
"Phonetics",
"Linguistics",
"Corpus"
] | [] | 2024-11-06T22:33:01Z | null | ---
language:
- ab
- am
- ba
- be
- bg
- bn
- ca
- cs
- cv
- ckb
- dv
- el
- eu
- gn
- ha
- hi
- hsb
- hu
- hy
- id
- it
- ja
- ka
- kk
- ko
- ky
- lt
- mk
- mn
- mr
- mt
- nl
- or
- pa
- pl
- pt
- ro
- ru
- rw
- sk
- sl
- sq
- sr
- sv
- sw
- ta
- th
- tk
- tr
- ug
- uk
- uz
- vi
- yo
- yue
- zh
tags:
- Phonetics
- Linguistics
- Corpus
size_categories:
- n<1K
pretty_name: VXC
license: cc0-1.0
---
The VoxCommunis Corpus contains acoustic models, lexicons, and force-aligned TextGrids with phone- and word-level segmentations derived from the Mozilla Common Voice Corpus. The Mozilla Common Voice Corpus and derivative VoxCommunis Corpus stored here are free to download and use under a CC0 license.
The lexicons are developed using Epitran, the XPF Corpus, Charsiu, and some custom dictionaries. Some manual correction has been applied, and we hope to continue improving these. Any updates from the community are welcome.
The acoustic models have been trained using the Montreal Forced Aligner, and the force-aligned TextGrids are obtained directly from those alignments. These acoustic models can be downloaded and re-used with the Montreal Forced Aligner for new data.
The TextGrids contain phone- and word-level alignments of the validated set of the Common Voice data.
The filename has the structure: Common Voice language code, G2P system, Common Voice version (validated), VoxCommunis acoustic model.
mk_xpf_textgrids19_acoustic19 corresponds to: alignments from the validated portion of the Macedonian Common Voice 19 Corpus using a lexicon generated with the XPF Corpus, aligned using an acoustic model trained on the validated portion of the Macedonian Common Voice 19 Corpus.
The spkr_files contain a mapping from the original client_id to a simplified spkr_id.
The corresponding Github repository can be found here: https://github.com/pacscilab/voxcommunis |
Aravindh25/test_depth_1 | Aravindh25 | 2025-05-03T00:26:10Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-05-03T00:24:41Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "trossen_ai_solo",
"total_episodes": 3,
"total_frames": 900,
"total_tasks": 1,
"total_videos": 3,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:3"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
7
],
"names": [
"main_joint_0",
"main_joint_1",
"main_joint_2",
"main_joint_3",
"main_joint_4",
"main_joint_5",
"main_joint_6"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
7
],
"names": [
"main_joint_0",
"main_joint_1",
"main_joint_2",
"main_joint_3",
"main_joint_4",
"main_joint_5",
"main_joint_6"
]
},
"observation.images.cam_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.depth.cam_wrist": {
"dtype": "depth",
"shape": [
480,
640,
1
],
"names": [
"height",
"width",
"channels"
],
"info": null
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
chiyuanhsiao/text_L2-regular-ties_llama-questions | chiyuanhsiao | 2025-05-03T00:24:09Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T00:24:05Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
- name: my_prediction_text
dtype: string
splits:
- name: test
num_bytes: 7004004
num_examples: 300
download_size: 809036
dataset_size: 7004004
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
ymroddi/langa_chat | ymroddi | 2025-05-03T00:17:19Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T00:17:17Z | null | ---
dataset_info:
features:
- name: train
struct:
- name: assistant
dtype: string
- name: conversations
dtype: string
- name: user
dtype: string
splits:
- name: train
num_bytes: 72742576
num_examples: 41523
download_size: 29814553
dataset_size: 72742576
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AlignmentResearch/StrongREJECT | AlignmentResearch | 2025-05-02T23:59:46Z | 1,452 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-06-27T17:29:45Z | null | ---
dataset_info:
- config_name: default
features:
- name: clf_label
dtype:
class_label:
names:
'0': Benign
'1': Harmful
- name: proxy_clf_label
dtype:
class_label:
names:
'0': Benign
'1': Harmful
- name: instructions
dtype: string
- name: content
sequence: string
- name: answer_prompt
dtype: string
- name: gen_target
dtype: string
- name: proxy_gen_target
dtype: string
splits:
- name: train
num_bytes: 0
num_examples: 0
- name: validation
num_bytes: 82052
num_examples: 313
download_size: 34722
dataset_size: 82052
- config_name: keywords
features:
- name: clf_label
dtype:
class_label:
names:
'0': Benign
'1': Harmful
- name: proxy_clf_label
dtype:
class_label:
names:
'0': Benign
'1': Harmful
- name: instructions
dtype: string
- name: content
sequence: string
- name: answer_prompt
dtype: string
- name: gen_target
dtype: string
- name: proxy_gen_target
dtype: string
splits:
- name: train
num_bytes: 0
num_examples: 0
- name: validation
num_bytes: 5242.939297124601
num_examples: 20
download_size: 7890
dataset_size: 5242.939297124601
- config_name: violence
features:
- name: clf_label
dtype:
class_label:
names:
'0': Benign
'1': Harmful
- name: proxy_clf_label
dtype:
class_label:
names:
'0': Benign
'1': Harmful
- name: instructions
dtype: string
- name: content
sequence: string
- name: answer_prompt
dtype: string
- name: gen_target
dtype: string
- name: proxy_gen_target
dtype: string
splits:
- name: train
num_bytes: 0
num_examples: 0
- name: validation
num_bytes: 14155.936102236421
num_examples: 54
download_size: 11965
dataset_size: 14155.936102236421
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- config_name: keywords
data_files:
- split: train
path: keywords/train-*
- split: validation
path: keywords/validation-*
- config_name: violence
data_files:
- split: train
path: violence/train-*
- split: validation
path: violence/validation-*
---
|
Asap7772/s1K-1.1-gemini-qwensft | Asap7772 | 2025-05-02T23:56:43Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T23:54:37Z | null | ---
dataset_info:
features:
- name: query
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 12816610.55
num_examples: 950
- name: test
num_bytes: 674558.45
num_examples: 50
download_size: 5243188
dataset_size: 13491169.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
jaeyong2/Math-Qwen3-14B-vi | jaeyong2 | 2025-05-02T23:44:31Z | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-01T18:12:44Z | null | ---
dataset_info:
features:
- name: content
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 287195847
num_examples: 35000
download_size: 115840235
dataset_size: 287195847
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
chiyuanhsiao/text_L2-regular-14_trivia_qa-audio | chiyuanhsiao | 2025-05-02T23:42:51Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T23:42:45Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: question_id
dtype: string
- name: question_source
dtype: string
- name: entity_pages
sequence:
- name: doc_source
dtype: string
- name: filename
dtype: string
- name: title
dtype: string
- name: wiki_context
dtype: string
- name: search_results
sequence:
- name: description
dtype: string
- name: filename
dtype: string
- name: rank
dtype: int32
- name: title
dtype: string
- name: url
dtype: string
- name: search_context
dtype: string
- name: answer
struct:
- name: aliases
sequence: string
- name: normalized_aliases
sequence: string
- name: matched_wiki_entity_name
dtype: string
- name: normalized_matched_wiki_entity_name
dtype: string
- name: normalized_value
dtype: string
- name: type
dtype: string
- name: value
dtype: string
- name: my_prediction_text
dtype: string
splits:
- name: validation
num_bytes: 74591653
num_examples: 1000
download_size: 33520752
dataset_size: 74591653
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
---
|
chiyuanhsiao/text_L2-regular-linear_trivia_qa-audio | chiyuanhsiao | 2025-05-02T23:34:03Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T23:33:52Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: question_id
dtype: string
- name: question_source
dtype: string
- name: entity_pages
sequence:
- name: doc_source
dtype: string
- name: filename
dtype: string
- name: title
dtype: string
- name: wiki_context
dtype: string
- name: search_results
sequence:
- name: description
dtype: string
- name: filename
dtype: string
- name: rank
dtype: int32
- name: title
dtype: string
- name: url
dtype: string
- name: search_context
dtype: string
- name: answer
struct:
- name: aliases
sequence: string
- name: normalized_aliases
sequence: string
- name: matched_wiki_entity_name
dtype: string
- name: normalized_matched_wiki_entity_name
dtype: string
- name: normalized_value
dtype: string
- name: type
dtype: string
- name: value
dtype: string
- name: my_prediction_text
dtype: string
splits:
- name: validation
num_bytes: 73651753
num_examples: 1000
download_size: 32939208
dataset_size: 73651753
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
---
|
ieuniversity/group_1_submission | ieuniversity | 2025-05-02T23:32:50Z | 375 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-25T10:28:13Z | null | ---
dataset_info:
features:
- name: ID
dtype: string
- name: CLASE
dtype: string
splits:
- name: train
num_bytes: 895432
num_examples: 25808
download_size: 501536
dataset_size: 895432
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cchoi1/kodcode-complete_1000_gpt-4o_qwen7b_att_iter0_att10_sol5_relabeled_grpo_10000 | cchoi1 | 2025-05-02T23:31:36Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T23:31:32Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: completion
dtype: string
- name: reward
dtype: string
- name: task_id
dtype: string
- name: input_ids_prompt
sequence: int64
- name: attention_mask_prompt
sequence: int64
splits:
- name: train
num_bytes: 127862002
num_examples: 10000
- name: test
num_bytes: 25677824
num_examples: 2000
download_size: 15408572
dataset_size: 153539826
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
slava-medvedev/zelensky-speeches | slava-medvedev | 2025-05-02T23:30:13Z | 324 | 3 | [
"task_categories:summarization",
"task_categories:text-classification",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"language:uk",
"language:en",
"license:cc-by-4.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"zelensky",
"ukraine",
"politics"
] | [
"summarization",
"text-classification"
] | 2023-11-14T18:43:21Z | null | ---
annotations_creators:
- no-annotation
language_creators:
- found
language:
- uk
- en
license: cc-by-4.0
multilinguality:
- monolingual
size_categories:
- 1K<n<10K
task_categories:
- summarization
- text-classification
pretty_name: 'Speeches given by the president of Ukraine Volodymyr Zelensky
Language: Ukrainian
Source: https://www.president.gov.ua/news/speeches'
dataset_info:
features:
- name: date
dtype: int64
- name: link
dtype: string
- name: topic
dtype: string
- name: full_text
dtype: string
- name: lang
dtype: string
splits:
- name: train
num_bytes: 19595099
num_examples: 2939
download_size: 9888247
dataset_size: 19595099
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- zelensky
- ukraine
- politics
---
# Dataset Card for "zelenskiy-speeches"
Speeches given by the president of Ukraine Volodymyr Zelensky
Languages: Ukrainian, English
Source: [president.gov.ua](https://www.president.gov.ua/news/speeches)
Auto-updated daily by Github Actions of [zelensky-speech-fetcher](https://github.com/medvedev/zelensky-speech-fetcher)
License: [CC BY-NC-ND 4.0 Deed](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.en) |
marrowdust/ev_charging | marrowdust | 2025-05-02T22:57:59Z | 0 | 0 | [
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T22:54:53Z | null | ---
license: apache-2.0
---
|
tphage/beam_dataset_0502 | tphage | 2025-05-02T22:57:11Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T22:57:10Z | null | ---
dataset_info:
features:
- name: Image
dtype: image
- name: Question
dtype: string
- name: BeamDescription
dtype: string
- name: CauseEffect
dtype: string
- name: ResponseDescription
dtype: string
- name: Answer
dtype: string
splits:
- name: train
num_bytes: 6150711.0
num_examples: 100
download_size: 5717714
dataset_size: 6150711.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HungVu2003/opt-350m_beta_1.0_alpha_0.6_num-company_3_dataset_0_for_gen_10 | HungVu2003 | 2025-05-02T22:54:44Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T22:54:42Z | null | ---
dataset_info:
features:
- name: question
dtype: string
splits:
- name: train
num_bytes: 6321246
num_examples: 12500
download_size: 1689490
dataset_size: 6321246
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/d1_code_long_paragraphs_10k | mlfoundations-dev | 2025-05-02T22:31:34Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T22:23:46Z | null | ---
dataset_info:
features:
- name: id
dtype: string
- name: instruction_seed
dtype: string
- name: output
dtype: string
- name: source
dtype: string
- name: license
dtype: string
- name: dataset
dtype: string
- name: split
dtype: string
- name: difficulty
dtype: int64
- name: solution
dtype: string
- name: index
dtype: string
- name: _source
dtype: string
- name: difficulty_reasoning
dtype: string
- name: __original_row_idx
dtype: int64
- name: ms_id
dtype: int64
- name: reasoning
sequence: string
- name: deepseek_solution
sequence: string
- name: final_reasoning_trace
sequence: string
- name: correct
sequence: bool
- name: classifier_reasoning
dtype: string
- name: _majority_responses
sequence: string
- name: verified_final_reasoning_trace
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 22033422492.405064
num_examples: 10000
download_size: 8940912851
dataset_size: 22033422492.405064
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
chiyuanhsiao/text_L2-regular-SQA-14_IFEval | chiyuanhsiao | 2025-05-02T22:12:22Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T22:12:17Z | null | ---
dataset_info:
features:
- name: key
dtype: int64
- name: prompt
dtype: string
- name: instruction_id_list
sequence: string
- name: kwargs
list:
- name: num_highlights
dtype: int64
- name: relation
dtype: string
- name: num_words
dtype: int64
- name: num_placeholders
dtype: int64
- name: prompt_to_repeat
dtype: string
- name: num_bullets
dtype: int64
- name: section_spliter
dtype: string
- name: num_sections
dtype: int64
- name: capital_relation
dtype: string
- name: capital_frequency
dtype: int64
- name: keywords
sequence: string
- name: num_paragraphs
dtype: int64
- name: language
dtype: string
- name: let_relation
dtype: string
- name: letter
dtype: string
- name: let_frequency
dtype: int64
- name: end_phrase
dtype: string
- name: forbidden_words
sequence: string
- name: keyword
dtype: string
- name: frequency
dtype: int64
- name: num_sentences
dtype: int64
- name: postscript_marker
dtype: string
- name: first_word
dtype: string
- name: nth_paragraph
dtype: int64
- name: my_prediction_text
dtype: string
splits:
- name: train
num_bytes: 10348768
num_examples: 541
download_size: 2249934
dataset_size: 10348768
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
SynthData/TER-Token_Efficient_Reasoning | SynthData | 2025-05-02T21:41:12Z | 168 | 3 | [
"language:en",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-17T16:56:51Z | null | ---
license: apache-2.0
language:
- en
---
# Token Efficient Reasoning
The Token Efficient Reasoning dataset contains high-quality, expert-level reasoning demonstrations structured to capture how domain experts actually think through complex problems. Unlike traditional reasoning traces, TER features information-dense, concise reasoning paths that maintain full logical integrity without veering into "cryptic" shorthand territory.
## Dataset Details
TER consists of high-quality Question/Answers from pretraining corpora DCLM, FineMath, etc. in addition to synthetically-generated variations on questions from popular reasoning benchmarks including MATH, GPQA, MMLU-Pro, MMLU-STEM. We use the reference final answer from the original document from the pretraining corpora when possible, or extract it from a mixture of models + n-sampling agent flow if there is wide consensus.
Each Q/A example follows a consistent six-tier reasoning framework that progresses from problem reformulation through essential reasoning steps to solution confirmation. The dataset spans multiple domains including mathematics, physics, computer science, engineering, and social sciences, with each entry demonstrating how to solve complex problems with minimal token usage while preserving deep domain expertise. For robustness, each Q/A pair contains 3 separate reasoning trails, generated under the TER framework with slightly varied prompting styles, by DeepSeek-R1 and QwQ-32b.
Notably, we've observed that when reasoning models are provided with enough relevant context tangential to possible solution paths - before entering the reasoning phase - the final outputs maintain high quality (human-verified) and low hallucination rates across domains.
TER was developed to address the challenges of a) inefficient reasoning in language models, where verbosity often masks core insights; and b) a lack of training data for teaching models how to think through a framework similar to that of domain experts. By training on this dataset, models can learn to generate more precise, logically sound reasoning that better reflects how experts actually approach difficult problems: recognizing patterns instantly, avoiding reasoning pitfalls, verifying solutions - all while using conceptual concision and focusing on critical decision points.
## Contact
[email protected] |
HPAI-BSC/MRI-MCQA | HPAI-BSC | 2025-05-02T21:36:34Z | 5 | 1 | [
"task_categories:multiple-choice",
"task_categories:question-answering",
"language:en",
"license:cc-by-nc-4.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"medical"
] | [
"multiple-choice",
"question-answering"
] | 2025-04-30T15:17:09Z | null | ---
license: cc-by-nc-4.0
language:
- en
tags:
- medical
task_categories:
- multiple-choice
- question-answering
pretty_name: MRI-MCQA
size_categories:
- n<1K
---
# MRI-MCQA
<div align="center">
<img src="https://cdn-uploads.huggingface.co/production/uploads/6620f941eba5274b5c12f83d/DlPW2CN-qErpC3QJqdNho.png" width="400" alt="HPAI"/>
</div>
<hr style="margin: 15px">
<div align="center" style="line-height: 1;">
<a href="https://hpai.bsc.es/" target="_blank" style="margin: 1px;">
<img alt="Web" src="https://img.shields.io/badge/Website-HPAI-8A2BE2" style="display: inline-block; vertical-align: middle;"/>
</a>
<a href="https://huggingface.co/HPAI-BSC" target="_blank" style="margin: 1px;">
<img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-HPAI-ffc107?color=ffc107&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
</a>
<a href="https://github.com/HPAI-BSC" target="_blank" style="margin: 1px;">
<img alt="GitHub" src="https://img.shields.io/badge/GitHub-HPAI-%23121011.svg?logo=github&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
</a>
</div>
<div align="center" style="line-height: 1;">
<a href="https://www.linkedin.com/company/hpai" target="_blank" style="margin: 1px;">
<img alt="Linkedin" src="https://img.shields.io/badge/Linkedin-HPAI-blue" style="display: inline-block; vertical-align: middle;"/>
</a>
<a href="https://bsky.app/profile/hpai.bsky.social" target="_blank" style="margin: 1px;">
<img alt="BlueSky" src="https://img.shields.io/badge/Bluesky-HPAI-0285FF?logo=bluesky&logoColor=fff" style="display: inline-block; vertical-align: middle;"/>
</a>
<a href="https://linktr.ee/hpai_bsc" target="_blank" style="margin: 1px;">
<img alt="LinkTree" src="https://img.shields.io/badge/Linktree-HPAI-43E55E?style=flat&logo=linktree&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
</a>
</div>
## Dataset Description
MRI-MCQA is a benchmark composed by multiple-choice questions related to Magnetic Resonance Imaging (MRI). We use this dataset to evaluate the level of knowledge of various LLMs about the MRI field.
- **Curated by:** [Oscar Molina Sedano](https://huggingface.co/OscarMolina)
- **Language(s) (NLP):** English
## License
This dataset is licensed under [CC-BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/).
## Disclaimer
Courtesy of Allen D. Elster, [MRIquestions.com](http://mriquestions.com). |
MBZUAI-IFM/OpenO1-SFT_final | MBZUAI-IFM | 2025-05-02T21:31:26Z | 0 | 1 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:31:04Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: dataset_source
dtype: string
splits:
- name: train
num_bytes: 302570532
num_examples: 62130
download_size: 143091676
dataset_size: 302570532
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/no_pipeline_science_300k | mlfoundations-dev | 2025-05-02T21:30:11Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:27:50Z | null | ---
dataset_info:
features:
- name: instruction_seed
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: source
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: shard_id
dtype: string
splits:
- name: train
num_bytes: 6570312975.384615
num_examples: 316000
download_size: 3187340742
dataset_size: 6570312975.384615
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/no_pipeline_science_1k | mlfoundations-dev | 2025-05-02T21:26:45Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:26:43Z | null | ---
dataset_info:
features:
- name: instruction_seed
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: source
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: shard_id
dtype: string
splits:
- name: train
num_bytes: 20792129.668938655
num_examples: 1000
download_size: 10100605
dataset_size: 20792129.668938655
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/no_pipeline_code_100k | mlfoundations-dev | 2025-05-02T21:22:10Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:20:57Z | null | ---
dataset_info:
features:
- name: instruction_seed
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: source
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: shard_id
dtype: string
splits:
- name: train
num_bytes: 3919865347.745011
num_examples: 100000
download_size: 1694713482
dataset_size: 3919865347.745011
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/no_pipeline_code_0.3k | mlfoundations-dev | 2025-05-02T21:20:16Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:20:14Z | null | ---
dataset_info:
features:
- name: instruction_seed
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: source
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: shard_id
dtype: string
splits:
- name: train
num_bytes: 12386774.498874234
num_examples: 316
download_size: 5867264
dataset_size: 12386774.498874234
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
felixZzz/math_eval_suite-math | felixZzz | 2025-05-02T21:16:11Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:16:10Z | null | ---
dataset_info:
features:
- name: problem
dtype: string
- name: answer
dtype: string
- name: difficulty
dtype: float64
splits:
- name: test
num_bytes: 108912
num_examples: 500
download_size: 63490
dataset_size: 108912
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
AdoCleanCode/VGG_sound_distorted_v1_025 | AdoCleanCode | 2025-05-02T21:08:41Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T17:25:54Z | null | ---
dataset_info:
features:
- name: id
dtype: string
- name: label
dtype: string
- name: caption
dtype: string
- name: question_1
dtype: string
- name: answer_1
dtype: string
- name: question_2
dtype: string
- name: answer_2
dtype: string
- name: question_3
dtype: string
- name: answer_3
dtype: string
- name: coarse_label
dtype: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 37372784
num_examples: 44422
download_size: 7043514
dataset_size: 37372784
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
chiyuanhsiao/text_L2-regular-ties_spoken-web-questions | chiyuanhsiao | 2025-05-02T21:07:18Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T21:07:13Z | null | ---
dataset_info:
features:
- name: url
dtype: string
- name: question
dtype: string
- name: answers
sequence: string
- name: my_prediction_text
dtype: string
splits:
- name: test
num_bytes: 42652936
num_examples: 2032
download_size: 6233842
dataset_size: 42652936
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
ysn-rfd/alpaca_fibonacci_dataset_v7 | ysn-rfd | 2025-05-02T20:50:17Z | 0 | 1 | [
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T20:49:31Z | null | ---
license: apache-2.0
---
|
ai4m3/example_dataset | ai4m3 | 2025-05-02T20:49:06Z | 0 | 0 | [
"task_categories:robotics",
"region:us",
"phosphobot",
"so100",
"phospho-dk"
] | [
"robotics"
] | 2025-05-02T20:49:04Z | null |
---
tags:
- phosphobot
- so100
- phospho-dk
task_categories:
- robotics
---
# example_dataset
**This dataset was generated using a [phospho starter pack](https://robots.phospho.ai).**
This dataset contains a series of episodes recorded with a robot and multiple cameras. It can be directly used to train a policy using imitation learning. It's compatible with LeRobot and RLDS.
|
leeroy-jankins/Appropriations | leeroy-jankins | 2025-05-02T20:31:55Z | 97 | 0 | [
"license:mit",
"size_categories:1M<n<10M",
"format:text",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | [] | 2025-04-26T14:03:49Z | null | ---
license: mit
pretty_name: U.S. Appropriations Dataset
---
# 💵 U.S. Appropriations Dataset (1996–2025)
This dataset links enacted U.S. Public Laws with their corresponding Explanatory Statements and Appropriations Titles,
covering the major federal appropriations acts from FY1996 through FY2025.
---
## 📊 Structure
Each entry includes:
- `public_law`: Official citation of the enacted appropriations law (e.g. P.L. 117-328)
- `explanatory_statement`: House or Senate report number accompanying the law (e.g. H. Rpt. 117-328)
- `appropriation_title`: Full name of the Appropriations Act or Continuing Resolution
---
## 🗂️ Sample Entries
| Public Law | Explanatory Statement | Appropriation Title |
|---------------|------------------------|--------------------------------------------------------------------------------------|
| P.L. 104-134 | H. Rpt. 104-537 | Omnibus Consolidated Rescissions and Appropriations Act |
| P.L. 104-208 | H. Rpt. 104-863 | Omnibus Consolidated Appropriations Act, 1997 |
| P.L. 105-277 | H. Rpt. 105-825 | Omnibus Consolidated and Emergency Supplemental Appropriations Act |
| P.L. 105-277 | H. Rpt. 106-110 | Omnibus Consolidated and Emergency Supplemental Appropriations Act |
| P.L. 106-113 | H. Rpt. 106-479 | Consolidated Appropriations Act, 2000 |
| P.L. 106-79 | H. Rpt. 106-371 | Department of Defense Appropriations Act, 2000 |
| P.L. 106-554 | H. Rpt. 106-1033 | Consolidated Appropriations Act, 2001 |
| P.L. 106-259 | S. Rpt. 106-298 | Department of Defense Appropriations Act, 2001 |
| P.L. 107-117 | H. Rpt. 107-350 | Department of Defense and Emergency Supplemental Appropriations |
| P.L. 107-206 | H. Rpt. 107-593 | Supplemental Appropriations Act, 2002 |
| P.L. 108-7 | H. Rpt. 108-10 | Consolidated Appropriations Resolution, 2003 |
| P.L. 108-199 | H. Rpt. 108-401 | Consolidated Appropriations Act, 2004 |
| P.L. 108-11 | H. Rpt. 108-55 | Emergency Supplemental Appropriations Act for Defense |
| P.L. 108-447 | H. Rpt. 108-792 | Consolidated Appropriations Act, 2005 |
| P.L. 109-13 | H. Rpt. 109-72 | Emergency Supplemental Appropriations Act for Defense, Global War on Terror, Tsunami Relief |
| P.L. 109-108 | H. Rpt. 109-272 | Science, State, Justice, Commerce Appropriations Act |
| P.L. 109-148 | S. Rpt. 109-141 | Department of Defense Appropriations Act, 2006 |
| P.L. 110-5 | H. Rpt. 110-5 | Revised Continuing Appropriations Resolution, 2007 |
| P.L. 110-161 | H. Rpt. 110-497 | Consolidated Appropriations Act, 2008 |
| P.L. 110-252 | H. Rpt. 110-656 | Supplemental Appropriations Act, 2008 |
| P.L. 111-8 | H. Rpt. 111-8 | Omnibus Appropriations Act, 2009 |
| P.L. 111-32 | H. Rpt. 111-105 | Supplemental Appropriations Act, 2009 |
| P.L. 111-117 | H. Rpt. 111-366 | Consolidated Appropriations Act, 2010 |
| P.L. 112-10 | H. Rpt. 112-331 | Department of Defense and Full-Year Continuing Appropriations Act, 2011 |
| P.L. 112-74 | H. Rpt. 112-331 | Consolidated Appropriations Act, 2012 |
| P.L. 113-6 | H. Rpt. 113-6 | Consolidated and Further Continuing Appropriations Act, 2013 |
| P.L. 113-76 | H. Rpt. 113-76 | Consolidated Appropriations Act, 2014 |
| P.L. 113-235 | H. Rpt. 113-235 | Consolidated and Further Continuing Appropriations Act, 2015 |
| P.L. 114-113 | H. Rpt. 114-113 | Consolidated Appropriations Act, 2016 |
| P.L. 115-31 | H. Rpt. 115-31 | Consolidated Appropriations Act, 2017 |
| P.L. 115-141 | H. Rpt. 115-141 | Consolidated Appropriations Act, 2018 |
| P.L. 116-6 | H. Rpt. 116-6 | Consolidated Appropriations Act, 2019 |
| P.L. 116-93 | H. Rpt. 116-93 | Further Consolidated Appropriations Act, 2020 |
| P.L. 116-260 | H. Rpt. 116-260 | Consolidated Appropriations Act, 2021 |
| P.L. 117-103 | H. Rpt. 117-103 | Consolidated Appropriations Act, 2022 |
| P.L. 117-328 | H. Rpt. 117-328 | Consolidated Appropriations Act, 2023 |
| P.L. 118-42 | H. Rpt. 118-42 | Continuing Appropriations Act, 2024 |
| P.L. 118-83 | H. Rpt. 118-83 | Continuing Appropriations Act, 2025
---
## 🔍 Use Cases
- 🧠 Train NLP models for legislative reference extraction
- 🧾 Link Appropriations Acts to their respective explanatory documents
- 🗃️ Construct longitudinal appropriations histories for federal program analysis
- 📜 Support research on continuing resolutions and omnibus legislation
---
## 📚 Related Concepts
- Omnibus and Consolidated Appropriations
- Explanatory Statements (House/Senate Reports)
- Continuing Resolutions
- Title-by-Title Budget Authority
---
## 🧠 Example Usage (Python)
```python
from datasets import load_dataset
ds = load_dataset("leeroy-jankins/Regulations", split="train")
for item in ds:
print(f"{item['public_law']} — {item['appropriation_title']}") |
Hamzah-Asadullah/TA-WQS-8k | Hamzah-Asadullah | 2025-05-02T20:30:28Z | 55 | 1 | [
"task_categories:question-answering",
"task_categories:text2text-generation",
"task_categories:text-generation",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"conversational",
"synthetic"
] | [
"question-answering",
"text2text-generation",
"text-generation"
] | 2025-04-27T15:31:59Z | null | ---
license: mit
task_categories:
- question-answering
- text2text-generation
- text-generation
language:
- en
tags:
- conversational
- synthetic
---
> [!NOTE]
> Consider supporting me [here](https://ko-fi.com/hamzahasadullah) 🎉
> Try out my assistant for free [here](https://xetute.github.io/)
TinyAlpaca WQS (Weird Question Specific) 8k is part of the TA (Tiny Alpaca) series. This dataset is generated using [the SyntheticAlpaca script](https://github.com/Hamzah-Asadullah/SyntheticAlpaca) as a pipeline and [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) as a generator.
## Loading through HuggingFace Datasets
First, install the library: `pip install --upgrade datasets`
Then, load this dataset:
```py
from datasets import load_dataset
dataset = load_dataset("Hamzah-Asadullah/TA-WQS-8k")
print(dataset["train"][0])
```
**Happy coding**
<div style="display: flex; flex-direction: column; justify-content: center; align-items: left; font-size: 1rem; padding: 20px;">
<div style="display: flex; flex-direction: row; align-items: center; margin: 10px; margin-left: 0; padding: 0;">
<img src="https://xetute.github.io/favicon.ico" style="margin: 0; border-radius: 50%; height: 2rem;"/>
<h2 style="margin: 0; margin-left: 10px;">XeTute Technologies</h2>
</div>
<div style="display: flex; flex-direction: row; gap: 5px; margin: 0; max-width: 500px;">
XeTute Technologies is an unofficial Pakistani organisation created by <a href="https://huggingface.co/Hamzah-Asadullah">Hamzah Asadullah.</a>
</div>
<h2 style="margin: 5px; margin-top: 20px; margin-left: 0;">Links</h2>
<div style="display: flex; flex-direction: row; word-break: none; gap: 5px;">
<a href="https://huggingface.co/XeTute">HuggingFace</a>
<a href="https://github.com/XeTute">GitHub</a>
</div>
<div style="display: flex; flex-direction: row; word-break: none; gap: 5px;">
<a href="https://ko-fi.com/hamzahasadullah">Buy me a Coffee</a>
<a href="https://xetute.github.io">Apex Webpage</a>
</div>
<h2 style="margin: 5px; margin-top: 20px; margin-left: 0;">Pakistan</h2>
Pakistan is a country in South-Asia known for its rich culture despite the British, its stunning landscape, and PAF (Pakistan Armed Forces), its military. Long live the Islamic Republic of Pakistan.<br>
<img src="https://upload.wikimedia.org/wikipedia/commons/3/32/Flag_of_Pakistan.svg" style="width: 85%; max-width: 512px; border-radius: 25px;"/>
</div> |
Hamzah-Asadullah/TA-4k | Hamzah-Asadullah | 2025-05-02T20:27:47Z | 411 | 1 | [
"task_categories:question-answering",
"task_categories:translation",
"task_categories:text-generation",
"task_categories:text2text-generation",
"task_categories:summarization",
"language:en",
"language:de",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"alpaca",
"reasoning",
"synthetic"
] | [
"question-answering",
"translation",
"text-generation",
"text2text-generation",
"summarization"
] | 2025-03-26T15:30:46Z | null | ---
license: mit
task_categories:
- question-answering
- translation
- text-generation
- text2text-generation
- summarization
language:
- en
- de
tags:
- alpaca
- reasoning
- synthetic
size_categories:
- 1K<n<10K
---
**TL;DR**: *TA*, short for *Tiny-Alpaca*, is a reasoning-included dataset with **4,096 samples** synthetically generated using [Gemma3:4b](https://huggingface.co/Google/Gemma-3-4b-it) and [Synthetic-Alpaca](https://github.com/Hamzah-Asadullah/SyntheticAlpaca). It took ~one day to generate this dataset on a RTX4060 8GB.
> [!NOTE]
> Took quite some nerves to generate and motiviate Gemma3 to produce something publishable, [you can support me here](https://ko-fi.com/hamzahasadullah).
---
## Gemma3:4b generated this dataset
> [!NOTE]
> Following description is copy pasted from [the original Gemma3 release](https://huggingface.co/google/gemma-3-4b-it).
_These models were trained on a dataset of text data that includes a wide variety
of sources. The 27B model was trained with 14 trillion tokens, the 12B model was
trained with 12 trillion tokens, 4B model was trained with 4 trillion tokens and
1B with 2 trillion tokens. Here are the key components:_
- _Web Documents: A diverse collection of web text ensures the model is
exposed to a broad range of linguistic styles, topics, and vocabulary. The
training dataset includes content in over 140 languages._
- _Code: Exposing the model to code helps it to learn the syntax and
patterns of programming languages, which improves its ability to generate
code and understand code-related questions._
- _Mathematics: Training on mathematical text helps the model learn logical
reasoning, symbolic representation, and to address mathematical queries._
- _Images: A wide range of images enables the model to perform image
analysis and visual data extraction tasks._
_The combination of these diverse data sources is crucial for training a powerful
multimodal model that can handle a wide variety of different tasks and data
formats._
---
You can find the script used to generate this dataset [here (GitHub)](https://github.com/Hamzah-Asadullah/SyntheticAlpaca) and [here (raw)](https://huggingface.co/datasets/Hamzah-Asadullah/TA-4k/resolve/main/main.py).
If you're only using this dataset or only adding samples which are ~5MB and under 2,048 samples, you should note that Gemma3 (or at least the 4b version) loved to ask itself all of following around 1k times, but re-phrased:
- Explaining Quantum Entanglement & Blockchain Technology to minors
- Imagining it's a travel blogger & therefore describing Kyoto, Japan (Kyoto is beautiful, but I don't need 815 samples of that Google)
- Analying extremely exaggerated customer reviews which all sound the same, basically
- Instead of saying "Summarize this article: A long article comes here" (and actually putting in an article), it will literally do "Summarize this article: {Generate an article here}."
The dataset **is deduped** if that's a concern for you, but some questions are **still very similar** even though the temperature was set to `1`, which is already quite high. Happens when **[big companies](https://hf.co/google/)** overfit their model on benchmarks or datasets optimized for those.
That said, the dataset isn't too bad, around an average of what you'd expect from something you can add to an instruction-tuning dataset. You can filter out too similar topics by just searching for "Kyoto" or other relevant keys and removing 50% of the rows containing these; you should still end up with a good amount of 'good' samples. This doesn't mean Gemma3 didn't memorize its training data though.
A larger version of this dataset with higher-quality samples (using LLaMA3.1 8B) at more quantity is available on [here](https://huggingface.co/datasets/XeTute/TA-8k).
---
<div style="display: flex; flex-direction: column; justify-content: center; align-items: left; font-size: 1rem; padding: 20px;">
<div style="display: flex; flex-direction: row; align-items: center; margin: 10px; margin-left: 0; padding: 0;">
<img src="https://xetute.github.io/favicon.ico" style="margin: 0; border-radius: 50%; height: 2rem;"/>
<h2 style="margin: 0; margin-left: 10px;">XeTute Technologies</h2>
</div>
<div style="display: flex; flex-direction: row; gap: 5px; margin: 0; max-width: 500px;">
XeTute Technologies is an unofficial Pakistani organisation created by <a href="https://huggingface.co/Hamzah-Asadullah">Hamzah Asadullah.</a>
</div>
<h2 style="margin: 5px; margin-top: 20px; margin-left: 0;">Links</h2>
<div style="display: flex; flex-direction: row; word-break: none; gap: 5px;">
<a href="https://huggingface.co/XeTute">HuggingFace</a>
<a href="https://github.com/XeTute">GitHub</a>
</div>
<div style="display: flex; flex-direction: row; word-break: none; gap: 5px;">
<a href="https://ko-fi.com/hamzahasadullah">Buy me a Coffee</a>
<a href="https://xetute.github.io">Apex Webpage</a>
</div>
<h2 style="margin: 5px; margin-top: 20px; margin-left: 0;">Pakistan</h2>
Pakistan is a country in South-Asia known for its rich culture despite the British, its stunning landscape, and PAF (Pakistan Armed Forces), its military. Long live the Islamic Republic of Pakistan.<br>
<img src="https://upload.wikimedia.org/wikipedia/commons/3/32/Flag_of_Pakistan.svg" style="width: 85%; max-width: 512px; border-radius: 25px;"/>
</div> |
magnifi/parser_user_v39d | magnifi | 2025-05-02T20:18:16Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T20:18:12Z | null | ---
dataset_info:
features:
- name: Query_id
dtype: int64
- name: Query
dtype: string
- name: Elastic_search
dtype: string
- name: virtual_portfolios
dtype: string
- name: Parser_output
dtype: string
splits:
- name: train
num_bytes: 638171
num_examples: 2664
- name: validation
num_bytes: 29682
num_examples: 149
download_size: 213107
dataset_size: 667853
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
IDEALLab/photonics_2d_MIT_Workshop_May2nd | IDEALLab | 2025-05-02T20:12:27Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T20:12:22Z | null | ---
dataset_info:
features:
- name: lambda1
dtype: float64
- name: lambda2
dtype: float64
- name: blur_radius
dtype: int32
- name: optimal_design
dtype:
array2_d:
shape:
- 120
- 120
dtype: float32
- name: optimization_history
list: float64
splits:
- name: train
num_bytes: 58804500.0
num_examples: 985
- name: val
num_bytes: 7343100.0
num_examples: 123
- name: test
num_bytes: 7402800.0
num_examples: 124
download_size: 3226379
dataset_size: 73550400.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
- split: test
path: data/test-*
---
|
alchemistyzz/MMIU_TEST | alchemistyzz | 2025-05-02T19:54:46Z | 0 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T10:34:13Z | null | ---
license: apache-2.0
---
|
TheRealPilot638/Llama-3.2-1B-dvts_16_H200 | TheRealPilot638 | 2025-05-02T19:40:24Z | 4 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-30T23:41:49Z | null | ---
dataset_info:
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: pred
dtype: string
- name: completion_tokens
dtype: int64
- name: scores
sequence:
sequence: float64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 14138341
num_examples: 500
download_size: 2472111
dataset_size: 14138341
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals
features:
- name: n
dtype: int64
- name: acc_naive
dtype: float64
- name: acc_weighted
dtype: float64
- name: acc_maj
dtype: float64
splits:
- name: train
num_bytes: 64
num_examples: 2
download_size: 1993
dataset_size: 64
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: pred
dtype: string
- name: completion_tokens
dtype: int64
- name: scores
sequence:
sequence: float64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 14582780
num_examples: 500
download_size: 2507961
dataset_size: 14582780
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals
features:
- name: n
dtype: int64
- name: acc_naive
dtype: float64
- name: acc_weighted
dtype: float64
- name: acc_maj
dtype: float64
splits:
- name: train
num_bytes: 64
num_examples: 2
download_size: 1993
dataset_size: 64
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: pred
dtype: string
- name: completion_tokens
dtype: int64
- name: scores
sequence:
sequence: float64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 14116003
num_examples: 500
download_size: 2495740
dataset_size: 14116003
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals
features:
- name: n
dtype: int64
- name: acc_naive
dtype: float64
- name: acc_weighted
dtype: float64
- name: acc_maj
dtype: float64
splits:
- name: train
num_bytes: 64
num_examples: 2
download_size: 1993
dataset_size: 64
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: pred
dtype: string
- name: completion_tokens
dtype: int64
- name: scores
sequence:
sequence: float64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 14174904
num_examples: 500
download_size: 2481776
dataset_size: 14174904
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals
features:
- name: n
dtype: int64
- name: acc_naive
dtype: float64
- name: acc_weighted
dtype: float64
- name: acc_maj
dtype: float64
splits:
- name: train
num_bytes: 64
num_examples: 2
download_size: 1993
dataset_size: 64
configs:
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals/train-*
---
|
TheRealPilot638/Llama-3.2-1B-best_of_16_H200 | TheRealPilot638 | 2025-05-02T19:16:20Z | 3 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-30T03:49:38Z | null | ---
dataset_info:
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-0--agg_strategy-last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: scores
sequence:
sequence: float64
- name: pred
dtype: string
- name: completion_tokens
sequence: int64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 41985898
num_examples: 500
download_size: 9909293
dataset_size: 41985898
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-0--agg_strategy-last--evals
features:
- name: n
dtype: int64
- name: acc_naive
dtype: float64
- name: acc_weighted
dtype: float64
- name: acc_maj
dtype: float64
splits:
- name: train
num_bytes: 160
num_examples: 5
download_size: 2032
dataset_size: 160
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-1--agg_strategy-last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: scores
sequence:
sequence: float64
- name: pred
dtype: string
- name: completion_tokens
sequence: int64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 42403211
num_examples: 500
download_size: 9928733
dataset_size: 42403211
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-2--agg_strategy-last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: scores
sequence:
sequence: float64
- name: pred
dtype: string
- name: completion_tokens
sequence: int64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 42593130
num_examples: 500
download_size: 10090776
dataset_size: 42593130
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-3--agg_strategy-last
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: answer
dtype: string
- name: subject
dtype: string
- name: level
dtype: int64
- name: unique_id
dtype: string
- name: completions
sequence: string
- name: scores
sequence:
sequence: float64
- name: pred
dtype: string
- name: completion_tokens
sequence: int64
- name: agg_scores
sequence: float64
- name: pred_weighted@1
dtype: string
- name: pred_maj@1
dtype: string
- name: pred_naive@1
dtype: string
- name: pred_weighted@2
dtype: string
- name: pred_maj@2
dtype: string
- name: pred_naive@2
dtype: string
- name: pred_weighted@4
dtype: string
- name: pred_maj@4
dtype: string
- name: pred_naive@4
dtype: string
- name: pred_weighted@8
dtype: string
- name: pred_maj@8
dtype: string
- name: pred_naive@8
dtype: string
- name: pred_weighted@16
dtype: string
- name: pred_maj@16
dtype: string
- name: pred_naive@16
dtype: string
splits:
- name: train
num_bytes: 42271977
num_examples: 500
download_size: 10006726
dataset_size: 42271977
configs:
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-0--agg_strategy-last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-0--agg_strategy-last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-0--agg_strategy-last--evals
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-0--agg_strategy-last--evals/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-1--agg_strategy-last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-1--agg_strategy-last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-2--agg_strategy-last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-2--agg_strategy-last/train-*
- config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-3--agg_strategy-last
data_files:
- split: train
path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--seed-3--agg_strategy-last/train-*
---
|
tcapelle/train_ds_triton | tcapelle | 2025-05-02T19:15:54Z | 301 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-21T20:44:21Z | null | ---
dataset_info:
features:
- name: uuid
dtype: string
- name: file_name
dtype: string
- name: repo_name
dtype: string
- name: file_path
dtype: string
- name: commit_hash
dtype: string
- name: starcount
dtype: int64
- name: input
dtype: string
- name: category
struct:
- name: Data Type
sequence: string
- name: Functionality
sequence: string
- name: Memory Access Pattern
sequence: string
- name: Parallelization Strategy
sequence: string
- name: Performance Objective
sequence: string
- name: licenses
sequence: string
- name: github_url
dtype: string
- name: description
dtype: string
- name: pytorch_code_with_tests
dtype: string
- name: format_pt_code
dtype: string
- name: entrypoint
dtype: string
- name: pt_code_runs
dtype: bool
- name: stop_reason
dtype: string
- name: pt_code_without_tests
dtype: string
- name: tests
dtype: string
- name: stdout
dtype: string
- name: stderr
dtype: string
- name: runtime
dtype: float64
- name: prompt
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 18922341
num_examples: 863
download_size: 6838428
dataset_size: 18922341
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
zhengbang0707/REFUEL_it2_mask1_v2_90k | zhengbang0707 | 2025-05-02T19:13:40Z | 0 | 0 | [
"region:us"
] | [] | 2025-05-02T19:01:30Z | null | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: val
path: data/val-*
dataset_info:
features:
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: reject
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen_token
sequence: int64
- name: reject_token
sequence: int64
- name: chosen_mask
sequence: int64
- name: chosen_mask_user
sequence: int64
- name: reject_mask
sequence: int64
- name: reject_mask_user
sequence: int64
- name: chosen_reward_list
sequence: float64
- name: reject_reward_list
sequence: float64
- name: chosen_reward_list_new
sequence: float64
- name: reject_reward_list_new
sequence: float64
- name: chosen_reward
dtype: float64
- name: reject_reward
dtype: float64
splits:
- name: train
num_bytes: 9567284831.77984
num_examples: 90000
- name: test
num_bytes: 53174161
num_examples: 500
- name: val
num_bytes: 53044292
num_examples: 500
download_size: 576372995
dataset_size: 9673503284.77984
---
# Dataset Card for "REFUEL_it2_mask1_v2_90k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
SpeedyFrostfish/llama-3.2-3b-Cooking-ChatBot | SpeedyFrostfish | 2025-05-02T18:51:19Z | 49 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-23T14:46:37Z | null | ---
dataset_info:
features:
- name: Instruction
dtype: string
- name: Input
dtype: string
- name: Output
dtype: string
splits:
- name: train
num_bytes: 63513183
num_examples: 60000
download_size: 27759123
dataset_size: 63513183
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ryzax/train_v1 | ryzax | 2025-05-02T18:46:32Z | 113 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-28T00:34:04Z | null | ---
dataset_info:
features:
- name: problem
dtype: string
- name: solution
dtype: string
- name: tests
dtype: string
- name: domain
dtype: string
- name: source
dtype: string
- name: difficulty
dtype: string
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 2319101496.4583063
num_examples: 607036
download_size: 1123521487
dataset_size: 2319101496.4583063
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
winterkitsune/elka-pl-news | winterkitsune | 2025-05-02T18:46:03Z | 0 | 0 | [
"task_categories:summarization",
"task_categories:text-classification",
"language:pl",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"summarization",
"text-classification"
] | 2025-05-02T18:28:21Z | null | ---
license: apache-2.0
task_categories:
- summarization
- text-classification
language:
- pl
pretty_name: elka.pl news
size_categories:
- 10K<n<100K
---
# Elka.pl news
This dataset contains scraped news articles from polish, regional news site [https://elka.pl](elka.pl)
## Dataset Details
This dataset contains articles about news, events from over 20 years in Leszno, Kościan, Gostyń, Góra, Rawicz, Wschowa cities. The CSV contains the following fields: `id, url, title, subtitle, lead, author, date, content`.
At first, i didn't create that dataset with AI processing in mind, but mostly as a way to preserve history of region and be able to better search across articles.
### Source Data
#### Data Collection and Processing
As the website is using incremental IDs for articles, it was pretty trivial to scrape the data. As the articles spans across over near 20 years, and various cms updates with questionable html practices it was not that easy to establish a stable way to get all the data, but it should be mostly there.
To scrape the articles, i used a [https://github.com/gocolly/colly](colly) golang framework.
As to respect the small team, and not overwhelm the server, I set hard limit at 1 request per second, the scrape took about 3 days. |
VGraf/context_switch_alpacaeval_3lt | VGraf | 2025-05-02T18:25:39Z | 62 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-02T22:09:56Z | null | ---
dataset_info:
features:
- name: dataset
dtype: string
- name: instruction
dtype: string
- name: output
dtype: string
- name: generator
dtype: string
- name: messages
list:
- name: role
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 13210819
num_examples: 805
download_size: 7428530
dataset_size: 13210819
configs:
- config_name: alpaca_eval_gpt4_baseline
data_files:
- split: eval
path: data/train-*
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
flagrantia/character_select_stand_alone_app | flagrantia | 2025-05-02T18:10:08Z | 7,907 | 1 | [
"license:mit",
"size_categories:10K<n<100K",
"modality:image",
"modality:text",
"region:us"
] | [] | 2025-03-07T07:48:49Z | null | ---
license: mit
configs:
- config_name: default1
data_files:
- split: md5
path: "wai_character_md5.csv"
- config_name: default2
data_files:
- split: base64_gzipped_webp
path: "wai_character_thumbs.json"
size_categories:
- n<6K
---
https://github.com/mirabarukaso/character_select_stand_alone_app |
tacab/Asr_agri_somalii | tacab | 2025-05-02T17:59:34Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T17:55:53Z | null | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: cleaned_text
dtype: string
splits:
- name: train
num_bytes: 597094141.956
num_examples: 2778
download_size: 397302337
dataset_size: 597094141.956
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Misraj/SadeedDiac-25 | Misraj | 2025-05-02T17:57:58Z | 33 | 2 | [
"task_categories:text-generation",
"language:ar",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2504.21635",
"region:us"
] | [
"text-generation"
] | 2025-04-27T14:03:31Z | null | ---
language:
- ar
size_categories:
- 1K<n<10K
task_categories:
- text-generation
dataset_info:
features:
- name: filename
dtype: string
- name: ground_truth
dtype: string
splits:
- name: train
num_bytes: 926418
num_examples: 1200
download_size: 407863
dataset_size: 926418
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# SadeedDiac-25: A Benchmark for Arabic Diacritization
[Paper](https://huggingface.co/papers/2504.21635)
**SadeedDiac-25** is a comprehensive and linguistically diverse benchmark specifically designed for evaluating Arabic diacritization models. It unifies Modern Standard Arabic (MSA) and Classical Arabic (CA) in a single dataset, addressing key limitations in existing benchmarks.
## Overview
Existing Arabic diacritization benchmarks tend to focus on either Classical Arabic (e.g., Fadel, Abbad) or Modern Standard Arabic (e.g., CATT, WikiNews), with limited domain diversity and quality inconsistencies. SadeedDiac-25 addresses these issues by:
- Combining MSA and CA in one dataset
- Covering diverse domains (e.g., news, religion, politics, sports, culinary arts)
- Ensuring high annotation quality through a multi-stage expert review process
- Avoiding contamination from large-scale pretraining corpora
## Dataset Composition
SadeedDiac-25 consists of 1,200 paragraphs:
- **📘 50% Modern Standard Arabic (MSA)**
- 454 paragraphs of curated original MSA content
- 146 paragraphs from WikiNews
- Length: 40–50 words per paragraph
- **📗 50% Classical Arabic (CA)**
- 📖 600 paragraphs from the Fadel test set
## Evaluation Results
We evaluated several models on SadeedDiac-25, including proprietary LLMs and open-source Arabic models. Evaluation metrics include Diacritic Error Rate (DER), Word Error Rate (WER), and hallucination rates.
The evaluation code for this dataset is available at: https://github.com/misraj-ai/Sadeed
### Evaluation Table
| Model | DER (CE) | WER (CE) | DER (w/o CE) | WER (w/o CE) | Hallucinations |
| ------------------------ | ---------- | ---------- | ------------ | ------------ | -------------- |
| Claude-3-7-Sonnet-Latest | **1.3941** | **4.6718** | **0.7693** | **2.3098** | **0.821** |
| GPT-4 | 3.8645 | 5.2719 | 3.8645 | 10.9274 | 1.0242 |
| Gemini-Flash-2.0 | 3.1926 | 7.9942 | 2.3783 | 5.5044 | 1.1713 |
| *Sadeed* | *7.2915* | *13.7425* | *5.2625* | *9.9245* | *7.1946* |
| Aya-23-8B | 25.6274 | 47.4908 | 19.7584 | 40.2478 | 5.7793 |
| ALLaM-7B-Instruct | 50.3586 | 70.3369 | 39.4100 | 67.0920 | 36.5092 |
| Yehia-7B | 50.8801 | 70.2323 | 39.7677 | 67.1520 | 43.1113 |
| Jais-13B | 78.6820 | 99.7541 | 60.7271 | 99.5702 | 61.0803 |
| Gemma-2-9B | 78.8560 | 99.7928 | 60.9188 | 99.5895 | 86.8771 |
| SILMA-9B-Instruct-v1.0 | 78.6567 | 99.7367 | 60.7106 | 99.5586 | 93.6515 |
> **Note**: CE = Case Ending
## Citation
If you use SadeedDiac-25 in your work, please cite:
## Citation
If you use this dataset, please cite:
```bibtex
@misc{aldallal2025sadeedadvancingarabicdiacritization,
title={Sadeed: Advancing Arabic Diacritization Through Small Language Model},
author={Zeina Aldallal and Sara Chrouf and Khalil Hennara and Mohamed Motaism Hamed and Muhammad Hreden and Safwan AlModhayan},
year={2025},
eprint={2504.21635},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2504.21635},
}
```
## License
📄 This dataset is released under the CC BY-NC-SA 4.0 License.
## Contact
📬 For questions, contact [Misraj-AI](https://misraj.ai/) on Hugging Face. |
ethicalabs/Ouroboros-Kurtis-MH | ethicalabs | 2025-05-02T17:48:49Z | 33 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-28T23:38:22Z | null | ---
dataset_info:
features:
- name: input
dtype: string
- name: original_response
dtype: string
- name: completion
dtype: string
- name: reasoning
sequence: string
- name: domain
dtype: string
- name: source_dataset
dtype: string
- name: dataset_name
dtype: string
splits:
- name: train
num_bytes: 8308691
num_examples: 4119
download_size: 3228852
dataset_size: 8308691
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ieuniversity/group_4_submission | ieuniversity | 2025-05-02T17:46:00Z | 148 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-23T21:10:03Z | null | ---
dataset_info:
features:
- name: ID
dtype: string
- name: CLASE
dtype: string
splits:
- name: train
num_bytes: 897695
num_examples: 25808
download_size: 500636
dataset_size: 897695
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
rayonlabs/wmt19-fi-en | rayonlabs | 2025-05-02T17:16:50Z | 0 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T17:16:24Z | null | ---
dataset_info:
features:
- name: fi
dtype: string
- name: en
dtype: string
splits:
- name: train
num_bytes: 1422917719
num_examples: 6587448
download_size: 735149976
dataset_size: 1422917719
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
rayonlabs/wmt19-gu-en | rayonlabs | 2025-05-02T17:11:48Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T17:11:44Z | null | ---
dataset_info:
features:
- name: gu
dtype: string
- name: en
dtype: string
splits:
- name: train
num_bytes: 590747
num_examples: 11670
download_size: 357671
dataset_size: 590747
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
IABD11/DatasetEmocionesIABD11 | IABD11 | 2025-05-02T17:11:04Z | 0 | 0 | [
"license:cc-by-nc-4.0",
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-29T17:10:37Z | null | ---
license: cc-by-nc-4.0
---
|
SmilingWolf/wdtagger-v3-seed | SmilingWolf | 2025-05-02T17:10:53Z | 309 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-02T16:17:47Z | null | ---
dataset_info:
features:
- name: image_id
dtype: int64
- name: rating
dtype: string
- name: general_tags
dtype: string
- name: character_tags
dtype: string
splits:
- name: train
num_bytes: 2031877486
num_examples: 5750784
- name: val
num_bytes: 112900829
num_examples: 319488
- name: test
num_bytes: 112649500
num_examples: 318464
- name: rejected
num_bytes: 186624390
num_examples: 793722
download_size: 957049809
dataset_size: 2444052205
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
- split: test
path: data/test-*
- split: rejected
path: data/rejected-*
---
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.