datasetId
large_stringlengths 6
116
| author
large_stringlengths 2
42
| last_modified
large_stringdate 2021-04-29 15:34:29
2025-06-25 02:40:10
| downloads
int64 0
3.97M
| likes
int64 0
7.74k
| tags
large listlengths 1
7.92k
| task_categories
large listlengths 0
48
| createdAt
large_stringdate 2022-03-02 23:29:22
2025-06-25 00:32:52
| trending_score
float64 0
64
| card
large_stringlengths 31
1.01M
|
---|---|---|---|---|---|---|---|---|---|
kh4dien/mc-german | kh4dien | 2025-01-24T01:02:37Z | 54 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-24T01:02:36Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: correct
dtype: string
- name: incorrect
dtype: string
splits:
- name: train
num_bytes: 326045.99022736336
num_examples: 5145
download_size: 161034
dataset_size: 326045.99022736336
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hirundo-io/TruthfulQA-hallucinations-free-text | hirundo-io | 2025-04-21T11:51:52Z | 37 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-21T11:49:43Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 93487
num_examples: 817
download_size: 58366
dataset_size: 93487
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Allen44/high-quality-instruction-response-pairs | Allen44 | 2025-04-13T10:59:07Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-12T05:12:59Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: response
dtype: string
- name: rating
dtype: int64
splits:
- name: train
num_bytes: 103446
num_examples: 100
download_size: 64786
dataset_size: 103446
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
uzair921/SKILLSPAN_embeddings_text | uzair921 | 2025-02-07T22:44:28Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-07T22:44:17Z | 0 | ---
dataset_info:
features:
- name: tokens
sequence: string
- name: ner_tags
sequence:
class_label:
names:
'0': O
'1': B-Skill
'2': I-Skill
- name: context
dtype: string
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 77854136
num_examples: 3075
- name: validation
num_bytes: 778061
num_examples: 1397
- name: test
num_bytes: 826998
num_examples: 1523
download_size: 55862166
dataset_size: 79459195
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
secmlr/clean_dataset_dsformat_filtered_together-deepseek-reasoner_train_len_8000_inputlen_5000 | secmlr | 2025-02-28T06:11:59Z | 24 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-28T04:06:59Z | 0 | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: system
dtype: string
- name: idx
dtype: int64
- name: cwe
sequence: string
splits:
- name: train
num_bytes: 49119405
num_examples: 4978
download_size: 12770997
dataset_size: 49119405
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
scubaSteve512/nfl_prediction_demo | scubaSteve512 | 2024-10-20T23:38:04Z | 19 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-20T23:37:52Z | 0 | ---
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 158904
num_examples: 670
download_size: 27959
dataset_size: 158904
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/FINET_30mil_qwen25_7bI_promptoriginal | juliadollis | 2025-02-22T23:08:02Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-22T23:08:00Z | 0 | ---
dataset_info:
features:
- name: Description
dtype: string
- name: Patient
dtype: string
- name: Doctor
dtype: string
- name: Translated_Description
dtype: string
- name: Translated_Patient
dtype: string
- name: Translated_Doctor
dtype: string
- name: Inferencia
dtype: string
splits:
- name: train
num_bytes: 261326
num_examples: 100
download_size: 132918
dataset_size: 261326
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
stalaei/realmath_2025-2025-03 | stalaei | 2025-06-24T01:46:55Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-24T01:46:45Z | 0 | ---
dataset_info:
features:
- name: paper_link
dtype: string
- name: theorem
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: context
dtype: string
- name: submission_date
dtype: string
splits:
- name: train
num_bytes: 286850323
num_examples: 508
download_size: 166903020
dataset_size: 286850323
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RyanYr/reflect_nonGenCritic_llama8b-t0_gt-t1_mstllrg-t2_80k | RyanYr | 2024-12-29T23:34:16Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-29T04:19:27Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: generated_solution
dtype: string
- name: answer
dtype: string
- name: problem_source
dtype: string
- name: response@0
sequence: string
- name: response@0_ans
sequence: string
- name: response@0_correctness
sequence: bool
- name: response@2
sequence: string
splits:
- name: train
num_bytes: 338596643
num_examples: 80000
download_size: 145566590
dataset_size: 338596643
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
alangai/freedata | alangai | 2024-10-07T19:17:00Z | 13 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2024-10-07T19:17:00Z | 0 | ---
license: apache-2.0
---
|
yunjae-won/mp_gemma9b_sft_ogd_rms_epoch4_10k_n8 | yunjae-won | 2025-05-17T06:37:12Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-17T03:46:45Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
- name: policy_logps
dtype: float64
- name: ref_logps
dtype: float64
- name: weight
dtype: float64
splits:
- name: train
num_bytes: 29067637
num_examples: 10000
download_size: 4218963
dataset_size: 29067637
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hackulos/my-distiset-bb1120ca | hackulos | 2025-02-15T21:46:18Z | 10 | 0 | [
"task_categories:text-classification",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif",
"datacraft"
] | [
"text-classification"
] | 2025-02-15T21:46:16Z | 0 | ---
size_categories: n<1K
task_categories:
- text-classification
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': dora
'1': cyber resilience act
'2': red directive
splits:
- name: train
num_bytes: 40113
num_examples: 99
download_size: 16390
dataset_size: 40113
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
- datacraft
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for my-distiset-bb1120ca
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/hackulos/my-distiset-bb1120ca/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/hackulos/my-distiset-bb1120ca/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"label": 1,
"text": "The General Data Protection Regulation (GDPR) of the European Union has imposed obligations on organizations to implement technical and organizational measures to ensure the security of personal data. One of these measures is the pseudonymization of personal data, which involves transforming the data into a form that is no longer directly associated with an individual, while still maintaining its utility. This concept is similar to encryption, but the difference lies in the fact that pseudonymization is reversible, whereas encryption is not. Furthermore, pseudonymization is required to be performed in such a way that the original data cannot be easily reversed, thereby achieving the goal of protecting sensitive information."
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("hackulos/my-distiset-bb1120ca", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("hackulos/my-distiset-bb1120ca")
```
</details>
|
neelabh17/2025-04-21_13.22.59.114647_partial_40-Qwen2.5-3B-Instruct | neelabh17 | 2025-04-21T17:23:05Z | 20 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-21T17:23:03Z | 0 | ---
dataset_info:
features:
- name: response_0
dtype: string
- name: answer_0
dtype: string
- name: correct_0
dtype: int64
splits:
- name: train
num_bytes: 6859589
num_examples: 6076
download_size: 2314219
dataset_size: 6859589
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
saintlyk1d/dont-say-it-prompts-player1-basic | saintlyk1d | 2025-04-11T05:14:33Z | 54 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-11T05:14:32Z | 0 | ---
dataset_info:
features:
- name: secret_word
dtype: string
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 143686
num_examples: 494
download_size: 17475
dataset_size: 143686
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
abhinav302019/olympiad_data_10018 | abhinav302019 | 2025-02-24T11:24:35Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-24T11:24:33Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: Known_Solution
dtype: string
- name: Known_Answer
dtype: string
- name: Generated_Solution
dtype: string
- name: Generated_Answer
dtype: string
- name: Judge_Evaluation
dtype: string
- name: Judge_Rating
dtype: string
- name: Judge_Justification
dtype: string
splits:
- name: train
num_bytes: 129638
num_examples: 9
download_size: 99961
dataset_size: 129638
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mdeputy/053025-segmentation-masks | mdeputy | 2025-05-31T06:09:45Z | 62 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-31T05:56:32Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: mask
dtype:
array2_d:
shape:
- 5632
- 5632
dtype: uint8
splits:
- name: chunk_0
num_bytes: 158709895
num_examples: 5
- name: chunk_1
num_bytes: 158709895
num_examples: 5
- name: chunk_2
num_bytes: 158709895
num_examples: 5
- name: chunk_3
num_bytes: 158709898
num_examples: 5
- name: chunk_4
num_bytes: 158709900
num_examples: 5
- name: chunk_5
num_bytes: 158709900
num_examples: 5
- name: chunk_6
num_bytes: 158709900
num_examples: 5
- name: chunk_7
num_bytes: 158709900
num_examples: 5
- name: chunk_8
num_bytes: 158709900
num_examples: 5
- name: chunk_9
num_bytes: 158709900
num_examples: 5
- name: chunk_10
num_bytes: 158709900
num_examples: 5
- name: chunk_11
num_bytes: 158709900
num_examples: 5
- name: chunk_12
num_bytes: 158709900
num_examples: 5
- name: chunk_13
num_bytes: 158709895
num_examples: 5
- name: chunk_14
num_bytes: 158709895
num_examples: 5
- name: chunk_15
num_bytes: 158709895
num_examples: 5
- name: chunk_16
num_bytes: 158709895
num_examples: 5
- name: chunk_17
num_bytes: 158709895
num_examples: 5
- name: chunk_18
num_bytes: 158709895
num_examples: 5
- name: chunk_19
num_bytes: 158709895
num_examples: 5
- name: chunk_20
num_bytes: 158709895
num_examples: 5
- name: chunk_21
num_bytes: 158709895
num_examples: 5
- name: chunk_22
num_bytes: 158709895
num_examples: 5
- name: chunk_23
num_bytes: 158709895
num_examples: 5
- name: chunk_24
num_bytes: 95225937
num_examples: 3
download_size: 11348515
dataset_size: 3904263465
configs:
- config_name: default
data_files:
- split: chunk_0
path: data/chunk_0-*
- split: chunk_1
path: data/chunk_1-*
- split: chunk_2
path: data/chunk_2-*
- split: chunk_3
path: data/chunk_3-*
- split: chunk_4
path: data/chunk_4-*
- split: chunk_5
path: data/chunk_5-*
- split: chunk_6
path: data/chunk_6-*
- split: chunk_7
path: data/chunk_7-*
- split: chunk_8
path: data/chunk_8-*
- split: chunk_9
path: data/chunk_9-*
- split: chunk_10
path: data/chunk_10-*
- split: chunk_11
path: data/chunk_11-*
- split: chunk_12
path: data/chunk_12-*
- split: chunk_13
path: data/chunk_13-*
- split: chunk_14
path: data/chunk_14-*
- split: chunk_15
path: data/chunk_15-*
- split: chunk_16
path: data/chunk_16-*
- split: chunk_17
path: data/chunk_17-*
- split: chunk_18
path: data/chunk_18-*
- split: chunk_19
path: data/chunk_19-*
- split: chunk_20
path: data/chunk_20-*
- split: chunk_21
path: data/chunk_21-*
- split: chunk_22
path: data/chunk_22-*
- split: chunk_23
path: data/chunk_23-*
- split: chunk_24
path: data/chunk_24-*
---
|
olachinkei/BFCL_jp | olachinkei | 2025-06-24T14:11:16Z | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-06-24T14:11:16Z | 0 | ---
license: apache-2.0
---
|
ryota39/llm-jp-chatbot-arena-conversations-reformatted | ryota39 | 2025-05-19T08:15:20Z | 58 | 0 | [
"license:cc",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-19T08:07:23Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: model_a
dtype: string
- name: model_b
dtype: string
- name: winner
dtype: string
- name: conversation_a
list:
- name: content
dtype: string
- name: role
dtype: string
- name: speaker
dtype: string
- name: conversation_b
list:
- name: content
dtype: string
- name: role
dtype: string
- name: speaker
dtype: string
- name: turn
dtype: int64
- name: annoy
dtype: bool
- name: language
dtype: string
splits:
- name: train
num_bytes: 4264749
num_examples: 990
download_size: 1812809
dataset_size: 4264749
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
license: cc
---
- [llm-jp/llm-jp-chatbot-arena-conversations](https://huggingface.co/datasets/llm-jp/llm-jp-chatbot-arena-conversations)を整形したデータセットです |
Deason11/so100_test7 | Deason11 | 2025-03-14T07:49:56Z | 35 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | 2025-03-14T07:48:50Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 590,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
7
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_wrist_angle",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
7
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_wrist_angle",
"main_gripper"
]
},
"observation.images.L_OverheadCamera": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
HanXiao1999/UI-Genie-Agent-5k | HanXiao1999 | 2025-05-29T04:27:31Z | 83 | 0 | [
"task_categories:image-text-to-text",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2505.21496",
"region:us"
] | [
"image-text-to-text"
] | 2025-05-27T15:00:50Z | 0 | ---
task_categories:
- image-text-to-text
language:
- en
license: mit
---
This repository contains the dataset from the paper [UI-Genie: A Self-Improving Approach for Iteratively Boosting MLLM-based
Mobile GUI Agents](https://huggingface.co/papers/2505.21496).
Github: https://github.com/Euphoria16/UI-Genie |
Min-Jaewon/sdxl_laion_2_0 | Min-Jaewon | 2025-01-17T01:55:09Z | 147 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-17T01:47:30Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: prompt
dtype: string
- name: latents
dtype:
array4_d:
shape:
- 2
- 4
- 128
- 128
dtype: float32
- name: prompt_embed
dtype:
array3_d:
shape:
- 1
- 77
- 2048
dtype: float32
- name: pooled_prompt_embed
dtype:
array2_d:
shape:
- 1
- 1280
dtype: float32
splits:
- name: train
num_bytes: 8735479669
num_examples: 7500
download_size: 8739413858
dataset_size: 8735479669
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
nreHieW/Extracted_GSM | nreHieW | 2025-01-26T16:44:11Z | 19 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-23T01:14:36Z | 0 | ---
dataset_info:
features:
- name: type
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 1890281
num_examples: 7473
download_size: 1068227
dataset_size: 1890281
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
To reproduce
```python
from datasets import load_dataset, Dataset
from tqdm import tqdm
ds = load_dataset("allenai/RLVR-GSM")
out = []
for item in tqdm(ds["train"]):
text = item['messages'][0]['content']
text = text.split("\n\nQuestion:")[-1].strip()
out.append({
"type": "math",
"question": text,
'answer': item['ground_truth']
})
ds = Dataset.from_list(out)
``` |
junn991/asdasd | junn991 | 2024-11-19T17:04:42Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-18T08:46:08Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
- name: input
dtype: string
splits:
- name: train
num_bytes: 5380
num_examples: 90
download_size: 5714
dataset_size: 5380
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
KiViDrag/breastmnist_50 | KiViDrag | 2024-11-08T11:29:48Z | 61 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-08T09:46:23Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': malignant
'1': normal, benign
splits:
- name: train
num_bytes: 4346269.75
num_examples: 4914
download_size: 4440909
dataset_size: 4346269.75
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/trade_the_event_sup_dpo_binarized | ZixuanKe | 2024-11-04T21:46:46Z | 20 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-03T22:52:43Z | 0 | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: ground_truth_chosen
dtype: string
- name: v1_rejected
dtype: string
splits:
- name: train
num_bytes: 1390170654.0
num_examples: 243425
- name: validation
num_bytes: 72814246.0
num_examples: 12812
download_size: 793179358
dataset_size: 1462984900.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
french-datasets/jpacifico_merged-admin-def-dataset-16k | french-datasets | 2025-06-22T11:39:05Z | 0 | 0 | [
"language:fra",
"region:us"
] | [] | 2025-06-22T11:38:16Z | 0 | ---
language: "fra"
viewer: false
---
Ce répertoire est vide, il a été créé pour améliorer le référencement du jeu de données [jpacifico/merged-admin-def-dataset-16k](https://huggingface.co/datasets/jpacifico/merged-admin-def-dataset-16k).
|
rakhman-llm/XCodeEval-Java-Dataset | rakhman-llm | 2024-11-26T01:16:19Z | 30 | 0 | [
"task_categories:text-generation",
"size_categories:100K<n<1M",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"code"
] | [
"text-generation"
] | 2024-11-25T05:36:22Z | 0 | ---
task_categories:
- text-generation
tags:
- code
size_categories:
- 100K<n<1M
--- |
yav1327/music-generation-llm | yav1327 | 2024-10-17T15:35:25Z | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-17T15:35:07Z | 0 | ---
dataset_info:
features:
- name: song_id
dtype: int64
- name: filename
dtype: string
- name: filepath
dtype:
audio:
sampling_rate: 16000
- name: genre_id
dtype: int64
- name: genre
dtype: string
splits:
- name: train
num_bytes: 369471304.0
num_examples: 945
download_size: 369252420
dataset_size: 369471304.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
michsethowusu/dinka-dyula_sentence-pairs | michsethowusu | 2025-04-03T10:26:13Z | 52 | 0 | [
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-03T10:26:10Z | 0 |
---
dataset_info:
features:
- name: score
dtype: float32
- name: Dinka
dtype: string
- name: Dyula
dtype: string
splits:
- name: train
num_bytes: 2860214
num_examples: 17561
download_size: 2860214
dataset_size: 2860214
configs:
- config_name: default
data_files:
- split: train
path: Dinka-Dyula_Sentence-Pairs.csv
---
# Dinka-Dyula_Sentence-Pairs Dataset
This dataset contains sentence pairs for African languages along with similarity scores. It can be used for machine translation, sentence alignment, or other natural language processing tasks.
This dataset is based on the NLLBv1 dataset, published on OPUS under an open-source initiative led by META. You can find more information here: [OPUS - NLLB-v1](https://opus.nlpl.eu/legacy/NLLB-v1.php)
## Metadata
- **File Name**: Dinka-Dyula_Sentence-Pairs
- **Number of Rows**: 17561
- **Number of Columns**: 3
- **Columns**: score, Dinka, Dyula
## Dataset Description
The dataset contains sentence pairs in African languages with an associated similarity score. Each row consists of three columns:
1. `score`: The similarity score between the two sentences (range from 0 to 1).
2. `Dinka`: The first sentence in the pair (language 1).
3. `Dyula`: The second sentence in the pair (language 2).
This dataset is intended for use in training and evaluating machine learning models for tasks like translation, sentence similarity, and cross-lingual transfer learning.
## References
Below are papers related to how the data was collected and used in various multilingual and cross-lingual applications:
[1] Holger Schwenk and Matthijs Douze, Learning Joint Multilingual Sentence Representations with Neural Machine Translation, ACL workshop on Representation Learning for NLP, 2017
[2] Holger Schwenk and Xian Li, A Corpus for Multilingual Document Classification in Eight Languages, LREC, pages 3548-3551, 2018.
[3] Holger Schwenk, Filtering and Mining Parallel Data in a Joint Multilingual Space ACL, July 2018
[4] Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R. Bowman, Holger Schwenk and Veselin Stoyanov, XNLI: Cross-lingual Sentence Understanding through Inference, EMNLP, 2018.
[5] Mikel Artetxe and Holger Schwenk, Margin-based Parallel Corpus Mining with Multilingual Sentence Embeddings arXiv, Nov 3 2018.
[6] Mikel Artetxe and Holger Schwenk, Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond arXiv, Dec 26 2018.
[7] Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong and Paco Guzman, WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia arXiv, July 11 2019.
[8] Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave and Armand Joulin CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB
[9] Paul-Ambroise Duquenne, Hongyu Gong, Holger Schwenk, Multimodal and Multilingual Embeddings for Large-Scale Speech Mining, NeurIPS 2021, pages 15748-15761.
[10] Kevin Heffernan, Onur Celebi, and Holger Schwenk, Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
yoonholee/metamath-hint-and-data-v4 | yoonholee | 2025-03-01T19:09:48Z | 18 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-28T19:52:03Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: answer
dtype: string
- name: completion
sequence: string
- name: completion_answer
sequence: string
- name: completion_correct
sequence: bool
- name: completion_succ_rate
dtype: float64
- name: domain
dtype: string
- name: context
dtype: string
- name: hint
dtype: string
splits:
- name: train
num_bytes: 1225053097
num_examples: 40000
download_size: 375337679
dataset_size: 1225053097
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Cornell-AGI/math_size_qwen2.5_3b_eval | Cornell-AGI | 2025-05-29T23:19:30Z | 40 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-29T23:19:23Z | 0 | ---
dataset_info:
features:
- name: level
dtype: string
- name: type
dtype: string
- name: data_source
dtype: string
- name: prompt
list:
- name: content
dtype: string
- name: role
dtype: string
- name: ability
dtype: string
- name: reward_model
struct:
- name: ground_truth
dtype: string
- name: style
dtype: string
- name: extra_info
struct:
- name: answer
dtype: string
- name: index
dtype: int64
- name: question
dtype: string
- name: split
dtype: string
- name: response_0
dtype: string
- name: response_1
dtype: string
- name: response_2
dtype: string
- name: response_3
dtype: string
- name: response_4
dtype: string
- name: response_5
dtype: string
- name: response_6
dtype: string
- name: response_7
dtype: string
- name: response_8
dtype: string
- name: response_9
dtype: string
- name: response_10
dtype: string
- name: response_11
dtype: string
- name: response_12
dtype: string
- name: response_13
dtype: string
- name: response_14
dtype: string
- name: response_15
dtype: string
- name: response_16
dtype: string
- name: response_17
dtype: string
- name: response_18
dtype: string
- name: response_19
dtype: string
- name: response_20
dtype: string
- name: response_21
dtype: string
- name: response_22
dtype: string
- name: response_23
dtype: string
- name: response_24
dtype: string
- name: response_25
dtype: string
- name: response_26
dtype: string
- name: response_27
dtype: string
- name: response_28
dtype: string
- name: response_29
dtype: string
- name: response_30
dtype: string
- name: response_31
dtype: string
- name: eval_0
dtype: float64
- name: eval_1
dtype: float64
- name: eval_2
dtype: float64
- name: eval_3
dtype: float64
- name: eval_4
dtype: float64
- name: eval_5
dtype: float64
- name: eval_6
dtype: float64
- name: eval_7
dtype: float64
- name: eval_8
dtype: float64
- name: eval_9
dtype: float64
- name: eval_10
dtype: float64
- name: eval_11
dtype: float64
- name: eval_12
dtype: float64
- name: eval_13
dtype: float64
- name: eval_14
dtype: float64
- name: eval_15
dtype: float64
- name: eval_16
dtype: float64
- name: eval_17
dtype: float64
- name: eval_18
dtype: float64
- name: eval_19
dtype: float64
- name: eval_20
dtype: float64
- name: eval_21
dtype: float64
- name: eval_22
dtype: float64
- name: eval_23
dtype: float64
- name: eval_24
dtype: float64
- name: eval_25
dtype: float64
- name: eval_26
dtype: float64
- name: eval_27
dtype: float64
- name: eval_28
dtype: float64
- name: eval_29
dtype: float64
- name: eval_30
dtype: float64
- name: eval_31
dtype: float64
splits:
- name: train
num_bytes: 413000734
num_examples: 7500
download_size: 187731245
dataset_size: 413000734
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
infinite-dataset-hub/FitHealth | infinite-dataset-hub | 2025-03-22T22:03:24Z | 23 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:csv",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"infinite-dataset-hub",
"synthetic"
] | [] | 2025-03-22T22:03:23Z | 0 | ---
license: mit
tags:
- infinite-dataset-hub
- synthetic
---
# FitHealth
tags: Healthcare, Physical Activity, Biometric Analysis
_Note: This is an AI-generated dataset so its content may be inaccurate or false_
**Dataset Description:**
The 'FitHealth' dataset comprises records of individuals participating in fitness-related activities, paired with biometric data to monitor their health. It includes information such as types of physical activities, duration, intensity, and corresponding health metrics like heart rate, blood pressure, and body mass index (BMI). This dataset can be used to analyze the impact of different fitness routines on health parameters and to develop personalized fitness plans.
**CSV Content Preview:**
```csv
activity_id,date,activity_type,duration,intensity,heart_rate,blood_pressure,bmi
001,2023-01-15,Running,45,High,172,120/80,23.5
002,2023-01-18,Cycling,60,Moderate,155,115/75,22.1
003,2023-01-20,Swimming,30,Low,130,118/78,24.2
004,2023-01-22,Yoga,60,Low,95,110/70,21.8
005,2023-01-25,Weightlifting,45,High,165,122/82,25.3
```
**Source of the data:**
The dataset was generated using the [Infinite Dataset Hub](https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub) and microsoft/Phi-3-mini-4k-instruct using the query 'Fitness':
- **Dataset Generation Page**: https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub?q=Fitness&dataset=FitHealth&tags=Healthcare,+Physical+Activity,+Biometric+Analysis
- **Model**: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
- **More Datasets**: https://huggingface.co/datasets?other=infinite-dataset-hub
|
mevsg/Gongguan-TextRegions-v1 | mevsg | 2025-03-12T10:02:57Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-12T09:58:13Z | 0 | ---
dataset_info:
features:
- name: image_path
dtype: string
- name: image_size
sequence: int64
- name: bboxes
sequence:
sequence: int64
- name: labels
sequence: string
- name: page_id
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 51276245.09090909
num_examples: 8
- name: validation
num_bytes: 19228591.90909091
num_examples: 3
download_size: 70461678
dataset_size: 70504837.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
LabARSS/MMLU-Pro-single-token-entropy | LabARSS | 2025-05-26T13:43:03Z | 0 | 0 | [
"language:en",
"license:mit",
"size_categories:100K<n<1M",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2503.01688",
"region:us"
] | [] | 2025-05-26T12:59:29Z | 0 | ---
configs:
- config_name: mistral_24b
data_files: "mmlu_mistral_24b.tsv"
- config_name: mistral_24b_w_fallback_if_unknown
data_files: "mmlu_mistral_24b_w_fallback_if_unknown.tsv"
- config_name: phi4
data_files: "mmlu_phi4.tsv"
- config_name: phi4_w_fallback_if_unknown
data_files: "mmlu_phi4_w_fallback_if_unknown.tsv"
- config_name: phi4_w_fallback_if_unknown_alternative_prompt
data_files: "mmlu_phi4_w_fallback_if_unknown_alternative_prompt.tsv"
- config_name: phi4mini
data_files: "mmlu_phi4mini.tsv"
- config_name: phi4mini_w_fallback_if_unknown
data_files: "mmlu_phi4mini_w_fallback_if_unknown.tsv"
- config_name: phi4mini_w_fallback_if_unknown_alternative_prompt
data_files: "mmlu_phi4mini_w_fallback_if_unknown_alternative_prompt.tsv"
- config_name: qwen_3b
data_files: "mmlu_qwen_3b.tsv"
- config_name: qwen_3b_w_fallback_if_unknown
data_files: "mmlu_qwen_3b_w_fallback_if_unknown.tsv"
- config_name: qwen_3b_w_fallback_if_unknown_alternative_prompt
data_files: "mmlu_qwen_3b_w_fallback_if_unknown_alternative_prompt.tsv"
license: mit
language:
- en
pretty_name: MMLU Pro with single token response entropy metadata for Mistral 24B, Phi4, Phi4-mini, Qwen2.5 3B
size_categories:
- 10K<n<100K
---
# Dataset Card for MMLU Pro with single token response entropy metadata for Mistral 24B, Phi4, Phi4-mini, Qwen2.5 3B
<!-- Provide a quick summary of the dataset. -->
MMLU Pro dataset with single token response entropy metadata for Mistral 24B, Phi4, Phi4-mini, Qwen2.5 3B
## Dataset Details
### Dataset Description
Following up on the results from ["When an LLM is apprehensive about its answers -- and when its uncertainty is justified"](https://arxiv.org/abs/2503.01688), we measure the response entopy for MMLU Pro dataset when the model is prompted to answer questions directly as a single token. We collect the entropy across 3 different sets of prompts: the ones that allow the model to answer "I do not know" and the ones that do not.
- **Language(s) (NLP):** English
- **License:** MIT
## Dataset Structure
Columns:
- All columns as in the original [MMLU Pro dataset](https://huggingface.co/datasets/TIGER-Lab/MMLU-Pro);
- "entropy_ans_correct_{model_internal_name}" - (bool) correctness of the model answer;
- "entropy_value_{model_internal_name}" - (float) entropy value. Default (if answer is incorrectly formatted or missing): 0.0.
- "entropy_ans_{model_internal_name}" - (str) whole decoded response.
## Prompts
### Default
System prompt:
```
The following are multiple choice questions about {subject}. Write down ONLY the NUMBER of the correct answer and nothing else.
```
User prompt:
```
Question: ...
Options:
1. ...
2. ...
...
n. ...
Choose one of the answers. Write down ONLY the NUMBER of the correct answer and nothing else.".
```
### With fallback if unknown
We allow the model to self-estimate its uncertainty and reply "0" as a special option denoting "I do not know".
System prompt:
```
The following are multiple choice questions about {subject}. If you are certain about the answer return the correct option number, otherwise return 0. Write down ONLY the NUMBER and nothing else.
```
User prompt:
```
Question: ...
Options:
1. ...
2. ...
...
n. ...
Choose one of the answers. If you are certain about the answer return the correct option number, otherwise return 0. Write down ONLY the NUMBER and nothing else.
```
### With fallback if unknown (alternative)
Alternative version of the fallback prompt.
System prompt:
```
The following are multiple choice questions about {subject}. If you know the answer return the correct option number, otherwise return 0. Write down ONLY the NUMBER and nothing else.
```
User prompt:
```
Question: ...
Options:
1. ...
2. ...
...
n. ...
Choose one of the answers. If you know the answer return the correct option number, otherwise return 0. Write down ONLY the NUMBER and nothing else.
```
## Hyperparameters
```
outputs = model.generate(
**inputs,
max_new_tokens=1,
return_dict_in_generate=True,
output_scores=True,
temperature=None,
top_p=None,
top_k=None,
do_sample=False,
num_beams=1,
pad_token_id=tokenizer.eos_token_id,
)
```
## Citation
TBD |
ilovesushiandkimchiandmalaxiangguo/for_bisai | ilovesushiandkimchiandmalaxiangguo | 2025-05-03T07:51:37Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-03T07:50:17Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 539880496.0
num_examples: 297
download_size: 539840457
dataset_size: 539880496.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vedataydin/tarim2 | vedataydin | 2025-03-25T12:59:21Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-25T12:58:57Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: float64
- name: response
dtype: string
splits:
- name: train
num_bytes: 1300.8
num_examples: 4
- name: test
num_bytes: 315
num_examples: 1
download_size: 6517
dataset_size: 1615.8
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
michsethowusu/french-fulah_sentence-pairs | michsethowusu | 2025-05-17T17:24:10Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-17T15:25:30Z | 0 | ---
dataset_info:
features:
- name: similarity
dtype: float32
- name: French
dtype: string
- name: Fulah
dtype: string
splits:
- name: train
num_bytes: 98351097
num_examples: 701161
download_size: 69409466
dataset_size: 98351097
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# French-Fulah_Sentence-Pairs Dataset
This dataset can be used for machine translation, sentence alignment, or other natural language processing tasks.
It is based on the NLLBv1 dataset, published on OPUS under an open-source initiative led by META. You can find more information here: [OPUS - NLLB-v1](https://opus.nlpl.eu/legacy/NLLB-v1.php)
## Metadata
- **File Name**: French-Fulah_Sentence-Pairs
- **File Size**: 100126177 bytes
- **Languages**: French, French
## Dataset Description
The dataset contains sentence pairs in African languages with an associated similarity score. The file is structured as a CSV with three columns:
1. `similarity`: The similarity score between the two sentences (range from 0 to 1).
2. `French`: The first sentence in the pair.
3. `French`: The second sentence in the pair.
This dataset is intended for use in training and evaluating machine learning models for tasks like translation, sentence similarity, and cross-lingual transfer learning.
## References
Below are papers related to how the data was collected and used in various multilingual and cross-lingual applications:
[1] Holger Schwenk and Matthijs Douze, "Learning Joint Multilingual Sentence Representations with Neural Machine Translation", ACL workshop on Representation Learning for NLP, 2017
[2] Holger Schwenk and Xian Li, "A Corpus for Multilingual Document Classification in Eight Languages", LREC, pages 3548-3551, 2018.
[3] Holger Schwenk, "Filtering and Mining Parallel Data in a Joint Multilingual Space", ACL, July 2018
[4] Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R. Bowman, Holger Schwenk and Veselin Stoyanov, "XNLI: Cross-lingual Sentence Understanding through Inference", EMNLP, 2018.
[5] Mikel Artetxe and Holger Schwenk, "Margin-based Parallel Corpus Mining with Multilingual Sentence Embeddings", arXiv, Nov 3 2018.
[6] Mikel Artetxe and Holger Schwenk, "Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond", arXiv, Dec 26 2018.
[7] Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong and Paco Guzman, "WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia", arXiv, July 11 2019.
[8] Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave and Armand Joulin, "CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB", 2020.
[9] Paul-Ambroise Duquenne, Hongyu Gong, Holger Schwenk, "Multimodal and Multilingual Embeddings for Large-Scale Speech Mining", NeurIPS 2021, pages 15748-15761.
[10] Kevin Heffernan, Onur Celebi, and Holger Schwenk, "Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages", 2022.
|
LovrOP/cppe-custom-dataset | LovrOP | 2024-12-09T14:31:43Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-09T14:25:48Z | 0 | ---
dataset_info:
features:
- name: image_id
dtype: int64
- name: image_path
dtype: string
- name: width
dtype: int64
- name: height
dtype: int64
- name: objects
struct:
- name: area
sequence: int64
- name: bbox
sequence:
sequence: int64
- name: category
sequence: int64
- name: id
sequence: int64
splits:
- name: train
num_bytes: 6971
num_examples: 10
- name: test
num_bytes: 6961
num_examples: 10
- name: validation
num_bytes: 7021
num_examples: 10
download_size: 19833
dataset_size: 20953
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
fwnlp/self-instruct-safety-alignment | fwnlp | 2024-10-23T23:36:37Z | 133 | 2 | [
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2410.05269",
"region:us"
] | [] | 2024-10-13T07:32:13Z | 0 | ---
license: apache-2.0
---
**[EMNLP 2024] Data Advisor: Dynamic Data Curation for Safety Alignment of Large Language Models**
[**🌐 Homepage**](https://feiwang96.github.io/DataAdvisor/) | [**📖 Paper**](https://arxiv.org/pdf/2410.05269) | [**🤗 Dataset (Data Advisor)**](https://huggingface.co/datasets/fwnlp/data-advisor-safety-alignment) | [**🤗 Dataset (Self-Instruct)**](https://huggingface.co/datasets/fwnlp/self-instruct-safety-alignment)
## Disclaimer
The dataset contains content that may be offensive or harmful. This dataset is intended for research purposes, specifically to support efforts aimed at creating safer and less harmful AI systems. Please engage with it responsibly and at your own risk.
## Citation
```
@inproceedings{wang2024data,
title={Data Advisor: Dynamic Data Curation for Safety Alignment of Large Language Models},
author={Wang, Fei and Mehrabi, Ninareh and Goyal, Palash and Gupta, Rahul and Chang, Kai-Wei and Galstyan, Aram},
booktitle={Proceedings of EMNLP 2024},
year={2024}
}
```
|
tuenguyen/Medical-Eval-MedBullets_op4 | tuenguyen | 2025-04-10T11:36:03Z | 221 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-10T11:36:01Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: options
struct:
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: answer_idx
dtype: string
splits:
- name: train
num_bytes: 327147
num_examples: 308
download_size: 170076
dataset_size: 327147
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
rohith2812/atoi-finetuning-v2 | rohith2812 | 2024-11-18T20:36:27Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-18T20:36:23Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
- name: combined_text
dtype: string
splits:
- name: train
num_bytes: 91610656.0
num_examples: 517
download_size: 88233776
dataset_size: 91610656.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
qingshufan/GA-EVLRU | qingshufan | 2025-05-02T12:34:36Z | 79 | 1 | [
"task_categories:time-series-forecasting",
"language:en",
"license:gpl-3.0",
"arxiv:2504.07453",
"region:us"
] | [
"time-series-forecasting"
] | 2025-04-12T09:06:35Z | 0 | ---
language: en
license: gpl-3.0
pretty_name: GA-EVLRU
task_categories:
- time-series-forecasting
---
# Dataset Card for GA-EVLRU Battery Swapping Demand Datasets
## Dataset Details
### Dataset Description
This dataset consists of battery swapping demand datasets generated by the conversion of the [ST-EVCDP](https://github.com/IntelligentSystemsLab/ST-EVCDP), [UrbanEV](https://github.com/IntelligentSystemsLab/UrbanEV), and [EV-Load-Open-Data](https://github.com/yvenn-amara/ev-load-open-data). There are a total of nine scenario-specific datasets, each containing unstructured demand data (`demand.csv`) and data normalized to 1 hour (`bss.csv`). The dataset is used for the study of probability estimation and scheduling optimization for battery swap stations, where an innovative approach combining the Least Recently Used (LRU) strategy with genetic algorithms and a guided search mechanism is proposed to enhance global optimization capability.
### Dataset Sources
- **Repository:** [https://github.com/qingshufan/GA-EVLRU](https://github.com/qingshufan/GA-EVLRU)
- **Paper:** [Probability Estimation and Scheduling Optimization for Battery Swap Stations via LRU-Enhanced Genetic Algorithm and Dual-Factor Decision System](https://arxiv.org/abs/2504.07453)
## Uses
### Direct Use
The dataset can be directly used for research and development related to battery swapping demand prediction and optimization of battery swap stations. Specifically, it can be used to train and test the proposed probability estimation model and the GA-EVLRU algorithm for better understanding and predicting the battery swapping demand in different scenarios.
### Out-of-Scope Use
The dataset is not intended for any applications unrelated to battery swapping demand prediction and optimization of battery swap stations. It should not be used for any commercial purposes that violate the GPL-3.0 license terms, such as selling the dataset without proper authorization. Also, it should not be used to generate any information that may cause harm or discrimination to individuals or groups.
## Dataset Structure
Each of the nine datasets (`acn`, `boulder_2021`, `dundee`, `palo_alto`, `paris`, `perth`, `sap`, `st-evcdp`, `urbanev`) contains two main files: `demand.csv` which is the unstructured demand data, and `bss.csv` which is the data normalized to 1 hour. The source of the original data for these datasets is either from [EV-Load-Open-Data](https://github.com/yvenn-amara/ev-load-open-data), [ST-EVCDP](https://github.com/IntelligentSystemsLab/ST-EVCDP), or [UrbanEV](https://github.com/IntelligentSystemsLab/UrbanEV).
## Dataset Creation
### Curation Rationale
The motivation for creating this dataset is to provide a comprehensive set of data for studying the battery swapping demand in different scenarios, which can help in the development of more efficient battery swap station scheduling and optimization algorithms. By combining data from different sources and normalizing them, a more consistent and useful dataset for research is obtained.
### Source Data
#### Data Collection and Processing
The data is collected from multiple open-source repositories: [ST-EVCDP](https://github.com/IntelligentSystemsLab/ST-EVCDP), [UrbanEV](https://github.com/IntelligentSystemsLab/UrbanEV), and [EV-Load-Open-Data](https://github.com/yvenn-amara/ev-load-open-data). The collected data is then processed to generate the battery swapping demand datasets. The processing includes data conversion to create the `demand.csv` and `bss.csv` files for each scenario. The specific processing steps are described in the associated research paper and the code available in the [https://github.com/qingshufan/GA-EVLRU](https://github.com/qingshufan/GA-EVLRU) repository.
#### Who are the source data producers?
The source data producers for [EV-Load-Open-Data](https://github.com/yvenn-amara/ev-load-open-data) are associated with the project by yvenn-amara. For [ST-EVCDP](https://github.com/IntelligentSystemsLab/ST-EVCDP), it is associated with the IntelligentSystemsLab. And for [UrbanEV](https://github.com/IntelligentSystemsLab/UrbanEV), it is also from the IntelligentSystemsLab.
#### Personal and Sensitive Information
There is no indication that the dataset contains personal or sensitive information such as addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.
## Bias, Risks, and Limitations
The dataset may have limitations in representing all possible real-world scenarios for battery swapping demand. The data is collected from specific open-source repositories and may not cover all geographical locations, types of electric vehicles, or different charging patterns comprehensively. Also, the normalization process may introduce some biases in representing the actual demand in a completely accurate way.
### Recommendations
Users should be aware that the dataset may not fully represent all real-world situations and should use it with caution when making decisions related to large-scale battery swap station deployments. When using the dataset for research, users should consider conducting sensitivity analyses to understand the impact of potential biases and limitations on their results.
## Citation
**BibTeX:**
```bibtex
@inproceedings{li2025gaevlru,
title={Probability Estimation and Scheduling Optimization for Battery Swap Stations via LRU-Enhanced Genetic Algorithm and Dual-Factor Decision System},
author={Anzhen Li and Shufan Qing and Xiaochang Li and Rui Mao and Mingchen Feng},
journal={arXiv preprint arXiv:2504.07453},
year={2025}
}
```
## Dataset Card Contact
For any questions or further information about the dataset, you can contact the authors of the associated research paper or raise issues in the [https://github.com/qingshufan/GA-EVLRU](https://github.com/qingshufan/GA-EVLRU) repository. |
crtvai/jordan-dataset-v11 | crtvai | 2024-11-18T09:44:33Z | 52 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-18T09:44:03Z | 0 | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
splits:
- name: train
num_bytes: 154612591.6601307
num_examples: 122
- name: test
num_bytes: 21152573.594771244
num_examples: 16
- name: validation
num_bytes: 19370708.74509804
num_examples: 15
download_size: 169438055
dataset_size: 195135874.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
kopyl/833-icons-dataset-1024-blip-large | kopyl | 2023-11-17T19:35:17Z | 90 | 1 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2023-11-17T19:34:14Z | 1 | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 21063249.0
num_examples: 833
download_size: 19766635
dataset_size: 21063249.0
---
# Dataset Card for "833-icons-dataset-1024-blip-large"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
maniro-ai/2024-10-24-engine-simple-3d-relative-04 | maniro-ai | 2024-10-25T15:09:18Z | 17 | 0 | [
"region:us"
] | [] | 2024-10-25T15:09:07Z | 0 | ---
dataset_info:
features:
- name: observation.state
sequence: float32
length: 8
- name: action
sequence: float32
length: 8
- name: episode_index
dtype: int64
- name: frame_index
dtype: int64
- name: timestamp
dtype: float32
- name: observation.images.wrist_1
dtype: video_frame
- name: observation.images.wrist_2
dtype: video_frame
- name: index
dtype: int64
splits:
- name: train
num_bytes: 2602088
num_examples: 12274
download_size: 438636
dataset_size: 2602088
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mikesun26card/werewolf_audio_dataset | mikesun26card | 2025-03-22T07:48:28Z | 18 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-28T19:19:18Z | 0 | ---
dataset_info:
features:
- name: file_name
dtype: audio
- name: startTime
dtype: string
- name: endTime
dtype: string
- name: playerName
dtype: string
- name: votingOutcome
dtype: string
- name: startRoles
dtype: string
- name: endRoles
dtype: string
- name: werewolfNames
dtype: string
- name: warning
dtype: string
splits:
- name: train
num_bytes: 4701480983.0
num_examples: 151
download_size: 0
dataset_size: 4701480983.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
KomeijiForce/Fandom_Bandori_Codified_Profiles_Claude_4.0 | KomeijiForce | 2025-06-09T20:06:49Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-09T20:06:47Z | 0 | ---
dataset_info:
features:
- name: band
dtype: string
- name: character
dtype: string
- name: segment
dtype: string
- name: code
dtype: string
splits:
- name: train
num_bytes: 570117
num_examples: 390
download_size: 224769
dataset_size: 570117
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Erroriser/jenny-tts-text-tags-6h-v1 | Erroriser | 2024-10-07T13:57:17Z | 21 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-07T13:57:16Z | 0 | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: transcription_normalised
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
splits:
- name: train
num_bytes: 2063542
num_examples: 4000
download_size: 1025292
dataset_size: 2063542
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hamishivi/combined_o3_val_data_1 | hamishivi | 2025-05-31T19:39:23Z | 32 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-31T19:39:21Z | 0 | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: dataset
dtype: string
- name: ground_truth
sequence: string
- name: quality
dtype: int64
- name: id
dtype: string
splits:
- name: train
num_bytes: 11279266
num_examples: 9247
download_size: 6348410
dataset_size: 11279266
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
BASF-AI/PubChem-Raw | BASF-AI | 2025-05-08T19:35:30Z | 17 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-08T19:22:07Z | 0 | ---
dataset_info:
- config_name: compounds
features:
- name: CID
dtype: int64
- name: Title
dtype: string
- name: MolecularFormula
dtype: string
- name: IUPACName
dtype: string
- name: InChI
dtype: string
- name: SMILES
dtype: string
- name: Synonyms
dtype: string
splits:
- name: train
num_bytes: 997202779
num_examples: 2087164
download_size: 394262919
dataset_size: 997202779
- config_name: descriptions
features:
- name: CID
dtype: int64
- name: Title
dtype: string
- name: Description
dtype: string
- name: ReferenceNumber
dtype: int64
- name: SourceName
dtype: string
- name: SourceID
dtype: string
- name: ReferenceDescription
dtype: string
- name: URL
dtype: string
splits:
- name: train
num_bytes: 257707353
num_examples: 408530
download_size: 50671541
dataset_size: 257707353
configs:
- config_name: compounds
data_files:
- split: train
path: compounds/train-*
- config_name: descriptions
data_files:
- split: train
path: descriptions/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_ff596251-154a-4bd0-86d6-af5aef785f95 | argilla-internal-testing | 2024-11-11T11:09:32Z | 19 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-11T11:09:31Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1454
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
andrewsiah/PersonaPromptPersonalLLM_918 | andrewsiah | 2024-11-15T15:14:07Z | 60 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-15T15:14:05Z | 0 | ---
dataset_info:
features:
- name: personaid_918_response_1_llama3_sfairx
dtype: float64
- name: personaid_918_response_2_llama3_sfairx
dtype: float64
- name: personaid_918_response_3_llama3_sfairx
dtype: float64
- name: personaid_918_response_4_llama3_sfairx
dtype: float64
- name: personaid_918_response_5_llama3_sfairx
dtype: float64
- name: personaid_918_response_6_llama3_sfairx
dtype: float64
- name: personaid_918_response_7_llama3_sfairx
dtype: float64
- name: personaid_918_response_8_llama3_sfairx
dtype: float64
- name: prompt
dtype: string
- name: subset
dtype: string
- name: prompt_id
dtype: int64
- name: response_1
dtype: string
- name: response_1_model
dtype: string
- name: response_2
dtype: string
- name: response_2_model
dtype: string
- name: response_3
dtype: string
- name: response_3_model
dtype: string
- name: response_4
dtype: string
- name: response_4_model
dtype: string
- name: response_5
dtype: string
- name: response_5_model
dtype: string
- name: response_6
dtype: string
- name: response_6_model
dtype: string
- name: response_7
dtype: string
- name: response_7_model
dtype: string
- name: response_8
dtype: string
- name: response_8_model
dtype: string
- name: response_1_gemma_2b
dtype: float64
- name: response_2_gemma_2b
dtype: float64
- name: response_3_gemma_2b
dtype: float64
- name: response_4_gemma_2b
dtype: float64
- name: response_5_gemma_2b
dtype: float64
- name: response_6_gemma_2b
dtype: float64
- name: response_7_gemma_2b
dtype: float64
- name: response_8_gemma_2b
dtype: float64
- name: response_1_gemma_7b
dtype: float64
- name: response_2_gemma_7b
dtype: float64
- name: response_3_gemma_7b
dtype: float64
- name: response_4_gemma_7b
dtype: float64
- name: response_5_gemma_7b
dtype: float64
- name: response_6_gemma_7b
dtype: float64
- name: response_7_gemma_7b
dtype: float64
- name: response_8_gemma_7b
dtype: float64
- name: response_1_mistral_raft
dtype: float64
- name: response_2_mistral_raft
dtype: float64
- name: response_3_mistral_raft
dtype: float64
- name: response_4_mistral_raft
dtype: float64
- name: response_5_mistral_raft
dtype: float64
- name: response_6_mistral_raft
dtype: float64
- name: response_7_mistral_raft
dtype: float64
- name: response_8_mistral_raft
dtype: float64
- name: response_1_mistral_ray
dtype: float64
- name: response_2_mistral_ray
dtype: float64
- name: response_3_mistral_ray
dtype: float64
- name: response_4_mistral_ray
dtype: float64
- name: response_5_mistral_ray
dtype: float64
- name: response_6_mistral_ray
dtype: float64
- name: response_7_mistral_ray
dtype: float64
- name: response_8_mistral_ray
dtype: float64
- name: response_1_mistral_weqweasdas
dtype: float64
- name: response_2_mistral_weqweasdas
dtype: float64
- name: response_3_mistral_weqweasdas
dtype: float64
- name: response_4_mistral_weqweasdas
dtype: float64
- name: response_5_mistral_weqweasdas
dtype: float64
- name: response_6_mistral_weqweasdas
dtype: float64
- name: response_7_mistral_weqweasdas
dtype: float64
- name: response_8_mistral_weqweasdas
dtype: float64
- name: response_1_llama3_sfairx
dtype: float64
- name: response_2_llama3_sfairx
dtype: float64
- name: response_3_llama3_sfairx
dtype: float64
- name: response_4_llama3_sfairx
dtype: float64
- name: response_5_llama3_sfairx
dtype: float64
- name: response_6_llama3_sfairx
dtype: float64
- name: response_7_llama3_sfairx
dtype: float64
- name: response_8_llama3_sfairx
dtype: float64
- name: response_1_oasst_deberta_v3
dtype: float64
- name: response_2_oasst_deberta_v3
dtype: float64
- name: response_3_oasst_deberta_v3
dtype: float64
- name: response_4_oasst_deberta_v3
dtype: float64
- name: response_5_oasst_deberta_v3
dtype: float64
- name: response_6_oasst_deberta_v3
dtype: float64
- name: response_7_oasst_deberta_v3
dtype: float64
- name: response_8_oasst_deberta_v3
dtype: float64
- name: response_1_beaver_7b
dtype: float64
- name: response_2_beaver_7b
dtype: float64
- name: response_3_beaver_7b
dtype: float64
- name: response_4_beaver_7b
dtype: float64
- name: response_5_beaver_7b
dtype: float64
- name: response_6_beaver_7b
dtype: float64
- name: response_7_beaver_7b
dtype: float64
- name: response_8_beaver_7b
dtype: float64
- name: response_1_oasst_pythia_7b
dtype: float64
- name: response_2_oasst_pythia_7b
dtype: float64
- name: response_3_oasst_pythia_7b
dtype: float64
- name: response_4_oasst_pythia_7b
dtype: float64
- name: response_5_oasst_pythia_7b
dtype: float64
- name: response_6_oasst_pythia_7b
dtype: float64
- name: response_7_oasst_pythia_7b
dtype: float64
- name: response_8_oasst_pythia_7b
dtype: float64
- name: response_1_oasst_pythia_1b
dtype: float64
- name: response_2_oasst_pythia_1b
dtype: float64
- name: response_3_oasst_pythia_1b
dtype: float64
- name: response_4_oasst_pythia_1b
dtype: float64
- name: response_5_oasst_pythia_1b
dtype: float64
- name: response_6_oasst_pythia_1b
dtype: float64
- name: response_7_oasst_pythia_1b
dtype: float64
- name: response_8_oasst_pythia_1b
dtype: float64
- name: id
dtype: int64
- name: rformatted_promptresponse_1
dtype: string
- name: rformatted_promptresponse_2
dtype: string
- name: rformatted_promptresponse_3
dtype: string
- name: rformatted_promptresponse_4
dtype: string
- name: rformatted_promptresponse_5
dtype: string
- name: rformatted_promptresponse_6
dtype: string
- name: rformatted_promptresponse_7
dtype: string
- name: rformatted_promptresponse_8
dtype: string
splits:
- name: train
num_bytes: 32417752
num_examples: 1000
download_size: 18420200
dataset_size: 32417752
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "PersonaPromptPersonalLLM_918"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
atta00/icd10-codes | atta00 | 2024-10-15T04:33:18Z | 21 | 0 | [
"license:mit",
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-15T04:31:29Z | 0 | ---
license: mit
dataset_info:
features:
- name: chapter
dtype: string
- name: section
dtype: string
- name: category
dtype: string
- name: category_code
dtype: string
- name: code
dtype: string
- name: description
dtype: string
splits:
- name: train
num_bytes: 5424121
num_examples: 25719
- name: test
num_bytes: 5324664
num_examples: 25719
download_size: 1199236
dataset_size: 10748785
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
wwbrannon/twinviews-13k | wwbrannon | 2024-10-11T08:11:29Z | 81 | 3 | [
"task_categories:text-classification",
"task_categories:reinforcement-learning",
"annotations_creators:machine-generated",
"annotations_creators:expert-generated",
"language_creators:machine-generated",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:cc-by-4.0",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2409.05283",
"region:us",
"synthetic",
"political-bias",
"truthfulness",
"alignment",
"debiasing",
"bias-detection",
"fairness"
] | [
"text-classification",
"reinforcement-learning"
] | 2024-10-11T04:45:53Z | 0 | ---
annotations_creators:
- machine-generated
- expert-generated
license: cc-by-4.0
task_categories:
- text-classification
- reinforcement-learning
language:
- en
language_creators:
- machine-generated
tags:
- synthetic
- political-bias
- truthfulness
- alignment
- debiasing
- bias-detection
- fairness
multilinguality:
- monolingual
pretty_name: TwinViews-13k
size_categories:
- 10K<n<100K
source_datasets:
- original
paperswithcode_id: twinviews-13k
---
<!-- YAML front matter fields documented here: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md -->
# Dataset Card for TwinViews-13k
This dataset contains 13,855 pairs of left-leaning and right-leaning political statements matched by topic. The dataset was generated using GPT-3.5 Turbo and has been audited to ensure quality and ideological balance. It is designed to facilitate the study of political bias in reward models and language models, with a focus on the relationship between truthfulness and political views.
## Dataset Details
### Dataset Description
TwinViews-13k is a dataset of 13,855 pairs of left-leaning and right-leaning political statements, each pair matched by topic. It was created to study political bias in reward and language models, with a focus on understanding the interaction between model alignment to truthfulness and the emergence of political bias. The dataset was generated using GPT-3.5 Turbo, with extensive auditing to ensure ideological balance and topical relevance.
This dataset can be used for various tasks related to political bias, natural language processing, and model alignment, particularly in studies examining how political orientation impacts model outputs.
- **Curated by:** Suyash Fulay, William Brannon, Shrestha Mohanty, Cassandra Overney, Elinor Poole-Dayan, Deb Roy, Jad Kabbara
- **Language(s) (NLP):** en
- **License:** cc-by-4.0
### Dataset Sources
- **Repository:** https://github.com/sfulay/truth_politics
- **Paper:** https://arxiv.org/abs/2409.05283
## Uses
### Direct Use
This dataset is suitable for:
* Studying political bias in reward models and large language models (LLMs).
* Evaluating alignment techniques for LLMs, especially regarding truthfulness and political bias.
* Training and/or evaluating models in the context of political discourse analysis.
* Research on how political views and alignment objectives interact in AI systems.
### Out-of-Scope Use
This dataset is not suitable for tasks requiring very fine-grained or human-labeled annotations of political affiliation beyond the machine-generated left/right splits. Notions of "left" and "right" may also vary between countries and over time, and users of the data should check that it captures the ideological dimensions of interest.
## Dataset Structure
The dataset contains 13,855 pairs of left-leaning and right-leaning political statements. Each pair is matched by topic, with statements generated to be similar in style and length. The dataset consists of the following fields:
* `l`: A left-leaning political statement.
* `r`: A right-leaning political statement.
* `topic`: The general topic of the pair (e.g., taxes, climate, education).
## Dataset Creation
### Curation Rationale
The dataset was created to fill the gap in large-scale, topically matched political statement pairs for studying bias in LLMs. It allows for comparison of how models treat left-leaning versus right-leaning perspectives, particularly in the context of truthfulness and political bias.
### Source Data
#### Data Collection and Processing
The data was generated using GPT-3.5 Turbo. A carefully designed prompt was used to generate statement pairs that were ideologically representative of left-leaning and right-leaning viewpoints. The statements were then audited to ensure relevance, ideological alignment, and quality. Topic matching was done to ensure the statements are comparable across the political spectrum.
In summary:
* Generated using GPT-3.5 Turbo.
* Audited for ideological and topical relevance.
* Final dataset filtered and structured to ensure left/right statement parity.
#### Who are the source data producers?
The dataset was generated by GPT-3.5 Turbo, with extensive auditing performed by the dataset creators at MIT.
#### Personal and Sensitive Information
The dataset consists of machine-generated political statements and does not contain any personal or sensitive information.
## Bias, Risks, and Limitations
Users of the dataset should be aware of certain limitations:
* **Source context:** Notions of what is political and the left/right ideological spectrum are context-specific and vary between countries and over time. Our dataset and its notions of politics and ideology come from the US in the early 2020s and may not generalize to other cultures or other time periods.
* **Generated content:** Since the statements were generated by GPT-3.5 Turbo, they may not fully capture the nuance or complexity of real-world political discourse. It is also possible that the dataset may contain stylistic or lexical artifacts correlated with political bias, though our evaluation has not identified any such artifacts.
## Citation
**BibTeX:**
<!-- add on publication in anthology:
url = "https://aclanthology.org/_______",
doi = "10.________",
pages = "X--Y",
-->
```
@inproceedings{fulayRelationshipTruthPolitical2024,
author = {Fulay, Suyash and Brannon, William and Mohanty, Shrestha and Overney, Cassandra and Poole-Dayan, Elinor and Roy, Deb and Kabbara, Jad},
title = {On the Relationship between Truth and Political Bias in Language Models},
booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP '24)},
year = {2024},
month = nov,
publisher = {Association for Computational Linguistics},
note = {arXiv:2409.05283},
abstract = {Language model alignment research often attempts to ensure that models are not only helpful and harmless, but also truthful and unbiased. However, optimizing these objectives simultaneously can obscure how improving one aspect might impact the others. In this work, we focus on analyzing the relationship between two concepts essential in both language model alignment and political science: \textit{truthfulness} and \textit{political bias}. We train reward models on various popular truthfulness datasets and subsequently evaluate their political bias. Our findings reveal that optimizing reward models for truthfulness on these datasets tends to result in a left-leaning political bias. We also find that existing open-source reward models (i.e. those trained on standard human preference datasets) already show a similar bias and that the bias is larger for larger models. These results raise important questions about both the datasets used to represent truthfulness and what language models capture about the relationship between truth and politics.}
}
```
**APA:**
```
Fulay, S., Brannon, W., Mohanty, S., Overney, C., Poole-Dayan, E., Roy, D., & Kabbara, J. (2024). On the Relationship between Truth and Political Bias in Language Models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP '24). Association for Computational Linguistics.
```
## Glossary
* Left-leaning: Political statements generally associated with progressive or liberal views.
* Right-leaning: Political statements generally associated with conservative or traditional views.
* Political Bias: A model's tendency to favor one political ideology over another in its outputs.
## Dataset Card Authors
William Brannon, <[email protected]>
## Dataset Card Contact
* William Brannon, <[email protected]>
* Suyash Fulay, <[email protected]> |
sun2ot/DiffCLR | sun2ot | 2025-06-10T02:01:28Z | 70 | 0 | [
"language:en",
"license:mit",
"size_categories:10K<n<100K",
"region:us",
"recommendation",
"matrix",
"sparse"
] | [] | 2025-05-18T12:09:44Z | 0 | ---
license: mit
language:
- en
tags:
- recommendation
- matrix
- sparse
size_categories:
- 10K<n<100K
---
This is a datasets for recommendation including `Tiktok`, `Yelp`, and `Amazon-Sports`.
`Tiktok` and `Sports` were published in [DiffMM](https://github.com/HKUDS/DiffMM) work, and we only updated the Scipy version they used.
The `Yelp` dataset is a dataset we constructed from the latest official [Yelp](https://business.yelp.com/data/resources/open-dataset/) data. |
mlfoundations-dev/math_stratos_scale_annotated | mlfoundations-dev | 2025-01-24T22:43:30Z | 17 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-24T17:27:46Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: reasoning
dtype: string
- name: deepseek_solution
dtype: string
- name: ground_truth_solution
dtype: string
splits:
- name: train
num_bytes: 3678111860.0
num_examples: 140075
download_size: 1622763414
dataset_size: 3678111860.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mahdi02ch/test | mahdi02ch | 2025-04-03T09:58:12Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-22T16:33:40Z | 0 | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: text
dtype: string
- name: start_time
dtype: string
- name: end_time
dtype: string
splits:
- name: train
num_bytes: 2225583.0
num_examples: 8
download_size: 2227893
dataset_size: 2225583.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mahwizzzz/Urdu_Rekhta | mahwizzzz | 2025-03-08T06:48:54Z | 38 | 3 | [
"language:ur",
"license:mit",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-08T05:49:22Z | 0 | ---
language:
- ur
license: mit
size_categories:
- 1K<n<10K
dataset_info:
features:
- name: Poet
dtype: string
- name: Poem_name
dtype: string
- name: Poetry
dtype: string
splits:
- name: train
num_bytes: 1443830
num_examples: 1314
download_size: 704346
dataset_size: 1443830
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
datacomp/imagenet-1k-random-100.0-frac-1over32 | datacomp | 2025-01-18T00:22:07Z | 17 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-06T14:13:20Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': tench, Tinca tinca
'1': goldfish, Carassius auratus
'2': great white shark, white shark, man-eater, man-eating shark, Carcharodon
carcharias
'3': tiger shark, Galeocerdo cuvieri
'4': hammerhead, hammerhead shark
'5': electric ray, crampfish, numbfish, torpedo
'6': stingray
'7': cock
'8': hen
'9': ostrich, Struthio camelus
'10': brambling, Fringilla montifringilla
'11': goldfinch, Carduelis carduelis
'12': house finch, linnet, Carpodacus mexicanus
'13': junco, snowbird
'14': indigo bunting, indigo finch, indigo bird, Passerina cyanea
'15': robin, American robin, Turdus migratorius
'16': bulbul
'17': jay
'18': magpie
'19': chickadee
'20': water ouzel, dipper
'21': kite
'22': bald eagle, American eagle, Haliaeetus leucocephalus
'23': vulture
'24': great grey owl, great gray owl, Strix nebulosa
'25': European fire salamander, Salamandra salamandra
'26': common newt, Triturus vulgaris
'27': eft
'28': spotted salamander, Ambystoma maculatum
'29': axolotl, mud puppy, Ambystoma mexicanum
'30': bullfrog, Rana catesbeiana
'31': tree frog, tree-frog
'32': tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
'33': loggerhead, loggerhead turtle, Caretta caretta
'34': leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
'35': mud turtle
'36': terrapin
'37': box turtle, box tortoise
'38': banded gecko
'39': common iguana, iguana, Iguana iguana
'40': American chameleon, anole, Anolis carolinensis
'41': whiptail, whiptail lizard
'42': agama
'43': frilled lizard, Chlamydosaurus kingi
'44': alligator lizard
'45': Gila monster, Heloderma suspectum
'46': green lizard, Lacerta viridis
'47': African chameleon, Chamaeleo chamaeleon
'48': Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus
komodoensis
'49': African crocodile, Nile crocodile, Crocodylus niloticus
'50': American alligator, Alligator mississipiensis
'51': triceratops
'52': thunder snake, worm snake, Carphophis amoenus
'53': ringneck snake, ring-necked snake, ring snake
'54': hognose snake, puff adder, sand viper
'55': green snake, grass snake
'56': king snake, kingsnake
'57': garter snake, grass snake
'58': water snake
'59': vine snake
'60': night snake, Hypsiglena torquata
'61': boa constrictor, Constrictor constrictor
'62': rock python, rock snake, Python sebae
'63': Indian cobra, Naja naja
'64': green mamba
'65': sea snake
'66': horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
'67': diamondback, diamondback rattlesnake, Crotalus adamanteus
'68': sidewinder, horned rattlesnake, Crotalus cerastes
'69': trilobite
'70': harvestman, daddy longlegs, Phalangium opilio
'71': scorpion
'72': black and gold garden spider, Argiope aurantia
'73': barn spider, Araneus cavaticus
'74': garden spider, Aranea diademata
'75': black widow, Latrodectus mactans
'76': tarantula
'77': wolf spider, hunting spider
'78': tick
'79': centipede
'80': black grouse
'81': ptarmigan
'82': ruffed grouse, partridge, Bonasa umbellus
'83': prairie chicken, prairie grouse, prairie fowl
'84': peacock
'85': quail
'86': partridge
'87': African grey, African gray, Psittacus erithacus
'88': macaw
'89': sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
'90': lorikeet
'91': coucal
'92': bee eater
'93': hornbill
'94': hummingbird
'95': jacamar
'96': toucan
'97': drake
'98': red-breasted merganser, Mergus serrator
'99': goose
'100': black swan, Cygnus atratus
'101': tusker
'102': echidna, spiny anteater, anteater
'103': platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus
anatinus
'104': wallaby, brush kangaroo
'105': koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
'106': wombat
'107': jellyfish
'108': sea anemone, anemone
'109': brain coral
'110': flatworm, platyhelminth
'111': nematode, nematode worm, roundworm
'112': conch
'113': snail
'114': slug
'115': sea slug, nudibranch
'116': chiton, coat-of-mail shell, sea cradle, polyplacophore
'117': chambered nautilus, pearly nautilus, nautilus
'118': Dungeness crab, Cancer magister
'119': rock crab, Cancer irroratus
'120': fiddler crab
'121': king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes
camtschatica
'122': American lobster, Northern lobster, Maine lobster, Homarus americanus
'123': spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
'124': crayfish, crawfish, crawdad, crawdaddy
'125': hermit crab
'126': isopod
'127': white stork, Ciconia ciconia
'128': black stork, Ciconia nigra
'129': spoonbill
'130': flamingo
'131': little blue heron, Egretta caerulea
'132': American egret, great white heron, Egretta albus
'133': bittern
'134': crane
'135': limpkin, Aramus pictus
'136': European gallinule, Porphyrio porphyrio
'137': American coot, marsh hen, mud hen, water hen, Fulica americana
'138': bustard
'139': ruddy turnstone, Arenaria interpres
'140': red-backed sandpiper, dunlin, Erolia alpina
'141': redshank, Tringa totanus
'142': dowitcher
'143': oystercatcher, oyster catcher
'144': pelican
'145': king penguin, Aptenodytes patagonica
'146': albatross, mollymawk
'147': grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius
robustus
'148': killer whale, killer, orca, grampus, sea wolf, Orcinus orca
'149': dugong, Dugong dugon
'150': sea lion
'151': Chihuahua
'152': Japanese spaniel
'153': Maltese dog, Maltese terrier, Maltese
'154': Pekinese, Pekingese, Peke
'155': Shih-Tzu
'156': Blenheim spaniel
'157': papillon
'158': toy terrier
'159': Rhodesian ridgeback
'160': Afghan hound, Afghan
'161': basset, basset hound
'162': beagle
'163': bloodhound, sleuthhound
'164': bluetick
'165': black-and-tan coonhound
'166': Walker hound, Walker foxhound
'167': English foxhound
'168': redbone
'169': borzoi, Russian wolfhound
'170': Irish wolfhound
'171': Italian greyhound
'172': whippet
'173': Ibizan hound, Ibizan Podenco
'174': Norwegian elkhound, elkhound
'175': otterhound, otter hound
'176': Saluki, gazelle hound
'177': Scottish deerhound, deerhound
'178': Weimaraner
'179': Staffordshire bullterrier, Staffordshire bull terrier
'180': American Staffordshire terrier, Staffordshire terrier, American pit
bull terrier, pit bull terrier
'181': Bedlington terrier
'182': Border terrier
'183': Kerry blue terrier
'184': Irish terrier
'185': Norfolk terrier
'186': Norwich terrier
'187': Yorkshire terrier
'188': wire-haired fox terrier
'189': Lakeland terrier
'190': Sealyham terrier, Sealyham
'191': Airedale, Airedale terrier
'192': cairn, cairn terrier
'193': Australian terrier
'194': Dandie Dinmont, Dandie Dinmont terrier
'195': Boston bull, Boston terrier
'196': miniature schnauzer
'197': giant schnauzer
'198': standard schnauzer
'199': Scotch terrier, Scottish terrier, Scottie
'200': Tibetan terrier, chrysanthemum dog
'201': silky terrier, Sydney silky
'202': soft-coated wheaten terrier
'203': West Highland white terrier
'204': Lhasa, Lhasa apso
'205': flat-coated retriever
'206': curly-coated retriever
'207': golden retriever
'208': Labrador retriever
'209': Chesapeake Bay retriever
'210': German short-haired pointer
'211': vizsla, Hungarian pointer
'212': English setter
'213': Irish setter, red setter
'214': Gordon setter
'215': Brittany spaniel
'216': clumber, clumber spaniel
'217': English springer, English springer spaniel
'218': Welsh springer spaniel
'219': cocker spaniel, English cocker spaniel, cocker
'220': Sussex spaniel
'221': Irish water spaniel
'222': kuvasz
'223': schipperke
'224': groenendael
'225': malinois
'226': briard
'227': kelpie
'228': komondor
'229': Old English sheepdog, bobtail
'230': Shetland sheepdog, Shetland sheep dog, Shetland
'231': collie
'232': Border collie
'233': Bouvier des Flandres, Bouviers des Flandres
'234': Rottweiler
'235': German shepherd, German shepherd dog, German police dog, alsatian
'236': Doberman, Doberman pinscher
'237': miniature pinscher
'238': Greater Swiss Mountain dog
'239': Bernese mountain dog
'240': Appenzeller
'241': EntleBucher
'242': boxer
'243': bull mastiff
'244': Tibetan mastiff
'245': French bulldog
'246': Great Dane
'247': Saint Bernard, St Bernard
'248': Eskimo dog, husky
'249': malamute, malemute, Alaskan malamute
'250': Siberian husky
'251': dalmatian, coach dog, carriage dog
'252': affenpinscher, monkey pinscher, monkey dog
'253': basenji
'254': pug, pug-dog
'255': Leonberg
'256': Newfoundland, Newfoundland dog
'257': Great Pyrenees
'258': Samoyed, Samoyede
'259': Pomeranian
'260': chow, chow chow
'261': keeshond
'262': Brabancon griffon
'263': Pembroke, Pembroke Welsh corgi
'264': Cardigan, Cardigan Welsh corgi
'265': toy poodle
'266': miniature poodle
'267': standard poodle
'268': Mexican hairless
'269': timber wolf, grey wolf, gray wolf, Canis lupus
'270': white wolf, Arctic wolf, Canis lupus tundrarum
'271': red wolf, maned wolf, Canis rufus, Canis niger
'272': coyote, prairie wolf, brush wolf, Canis latrans
'273': dingo, warrigal, warragal, Canis dingo
'274': dhole, Cuon alpinus
'275': African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
'276': hyena, hyaena
'277': red fox, Vulpes vulpes
'278': kit fox, Vulpes macrotis
'279': Arctic fox, white fox, Alopex lagopus
'280': grey fox, gray fox, Urocyon cinereoargenteus
'281': tabby, tabby cat
'282': tiger cat
'283': Persian cat
'284': Siamese cat, Siamese
'285': Egyptian cat
'286': cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
'287': lynx, catamount
'288': leopard, Panthera pardus
'289': snow leopard, ounce, Panthera uncia
'290': jaguar, panther, Panthera onca, Felis onca
'291': lion, king of beasts, Panthera leo
'292': tiger, Panthera tigris
'293': cheetah, chetah, Acinonyx jubatus
'294': brown bear, bruin, Ursus arctos
'295': American black bear, black bear, Ursus americanus, Euarctos americanus
'296': ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
'297': sloth bear, Melursus ursinus, Ursus ursinus
'298': mongoose
'299': meerkat, mierkat
'300': tiger beetle
'301': ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
'302': ground beetle, carabid beetle
'303': long-horned beetle, longicorn, longicorn beetle
'304': leaf beetle, chrysomelid
'305': dung beetle
'306': rhinoceros beetle
'307': weevil
'308': fly
'309': bee
'310': ant, emmet, pismire
'311': grasshopper, hopper
'312': cricket
'313': walking stick, walkingstick, stick insect
'314': cockroach, roach
'315': mantis, mantid
'316': cicada, cicala
'317': leafhopper
'318': lacewing, lacewing fly
'319': dragonfly, darning needle, devil's darning needle, sewing needle,
snake feeder, snake doctor, mosquito hawk, skeeter hawk
'320': damselfly
'321': admiral
'322': ringlet, ringlet butterfly
'323': monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
'324': cabbage butterfly
'325': sulphur butterfly, sulfur butterfly
'326': lycaenid, lycaenid butterfly
'327': starfish, sea star
'328': sea urchin
'329': sea cucumber, holothurian
'330': wood rabbit, cottontail, cottontail rabbit
'331': hare
'332': Angora, Angora rabbit
'333': hamster
'334': porcupine, hedgehog
'335': fox squirrel, eastern fox squirrel, Sciurus niger
'336': marmot
'337': beaver
'338': guinea pig, Cavia cobaya
'339': sorrel
'340': zebra
'341': hog, pig, grunter, squealer, Sus scrofa
'342': wild boar, boar, Sus scrofa
'343': warthog
'344': hippopotamus, hippo, river horse, Hippopotamus amphibius
'345': ox
'346': water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
'347': bison
'348': ram, tup
'349': bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain
sheep, Ovis canadensis
'350': ibex, Capra ibex
'351': hartebeest
'352': impala, Aepyceros melampus
'353': gazelle
'354': Arabian camel, dromedary, Camelus dromedarius
'355': llama
'356': weasel
'357': mink
'358': polecat, fitch, foulmart, foumart, Mustela putorius
'359': black-footed ferret, ferret, Mustela nigripes
'360': otter
'361': skunk, polecat, wood pussy
'362': badger
'363': armadillo
'364': three-toed sloth, ai, Bradypus tridactylus
'365': orangutan, orang, orangutang, Pongo pygmaeus
'366': gorilla, Gorilla gorilla
'367': chimpanzee, chimp, Pan troglodytes
'368': gibbon, Hylobates lar
'369': siamang, Hylobates syndactylus, Symphalangus syndactylus
'370': guenon, guenon monkey
'371': patas, hussar monkey, Erythrocebus patas
'372': baboon
'373': macaque
'374': langur
'375': colobus, colobus monkey
'376': proboscis monkey, Nasalis larvatus
'377': marmoset
'378': capuchin, ringtail, Cebus capucinus
'379': howler monkey, howler
'380': titi, titi monkey
'381': spider monkey, Ateles geoffroyi
'382': squirrel monkey, Saimiri sciureus
'383': Madagascar cat, ring-tailed lemur, Lemur catta
'384': indri, indris, Indri indri, Indri brevicaudatus
'385': Indian elephant, Elephas maximus
'386': African elephant, Loxodonta africana
'387': lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
'388': giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
'389': barracouta, snoek
'390': eel
'391': coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus
kisutch
'392': rock beauty, Holocanthus tricolor
'393': anemone fish
'394': sturgeon
'395': gar, garfish, garpike, billfish, Lepisosteus osseus
'396': lionfish
'397': puffer, pufferfish, blowfish, globefish
'398': abacus
'399': abaya
'400': academic gown, academic robe, judge's robe
'401': accordion, piano accordion, squeeze box
'402': acoustic guitar
'403': aircraft carrier, carrier, flattop, attack aircraft carrier
'404': airliner
'405': airship, dirigible
'406': altar
'407': ambulance
'408': amphibian, amphibious vehicle
'409': analog clock
'410': apiary, bee house
'411': apron
'412': ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin,
dustbin, trash barrel, trash bin
'413': assault rifle, assault gun
'414': backpack, back pack, knapsack, packsack, rucksack, haversack
'415': bakery, bakeshop, bakehouse
'416': balance beam, beam
'417': balloon
'418': ballpoint, ballpoint pen, ballpen, Biro
'419': Band Aid
'420': banjo
'421': bannister, banister, balustrade, balusters, handrail
'422': barbell
'423': barber chair
'424': barbershop
'425': barn
'426': barometer
'427': barrel, cask
'428': barrow, garden cart, lawn cart, wheelbarrow
'429': baseball
'430': basketball
'431': bassinet
'432': bassoon
'433': bathing cap, swimming cap
'434': bath towel
'435': bathtub, bathing tub, bath, tub
'436': beach wagon, station wagon, wagon, estate car, beach waggon, station
waggon, waggon
'437': beacon, lighthouse, beacon light, pharos
'438': beaker
'439': bearskin, busby, shako
'440': beer bottle
'441': beer glass
'442': bell cote, bell cot
'443': bib
'444': bicycle-built-for-two, tandem bicycle, tandem
'445': bikini, two-piece
'446': binder, ring-binder
'447': binoculars, field glasses, opera glasses
'448': birdhouse
'449': boathouse
'450': bobsled, bobsleigh, bob
'451': bolo tie, bolo, bola tie, bola
'452': bonnet, poke bonnet
'453': bookcase
'454': bookshop, bookstore, bookstall
'455': bottlecap
'456': bow
'457': bow tie, bow-tie, bowtie
'458': brass, memorial tablet, plaque
'459': brassiere, bra, bandeau
'460': breakwater, groin, groyne, mole, bulwark, seawall, jetty
'461': breastplate, aegis, egis
'462': broom
'463': bucket, pail
'464': buckle
'465': bulletproof vest
'466': bullet train, bullet
'467': butcher shop, meat market
'468': cab, hack, taxi, taxicab
'469': caldron, cauldron
'470': candle, taper, wax light
'471': cannon
'472': canoe
'473': can opener, tin opener
'474': cardigan
'475': car mirror
'476': carousel, carrousel, merry-go-round, roundabout, whirligig
'477': carpenter's kit, tool kit
'478': carton
'479': car wheel
'480': cash machine, cash dispenser, automated teller machine, automatic
teller machine, automated teller, automatic teller, ATM
'481': cassette
'482': cassette player
'483': castle
'484': catamaran
'485': CD player
'486': cello, violoncello
'487': cellular telephone, cellular phone, cellphone, cell, mobile phone
'488': chain
'489': chainlink fence
'490': chain mail, ring mail, mail, chain armor, chain armour, ring armor,
ring armour
'491': chain saw, chainsaw
'492': chest
'493': chiffonier, commode
'494': chime, bell, gong
'495': china cabinet, china closet
'496': Christmas stocking
'497': church, church building
'498': cinema, movie theater, movie theatre, movie house, picture palace
'499': cleaver, meat cleaver, chopper
'500': cliff dwelling
'501': cloak
'502': clog, geta, patten, sabot
'503': cocktail shaker
'504': coffee mug
'505': coffeepot
'506': coil, spiral, volute, whorl, helix
'507': combination lock
'508': computer keyboard, keypad
'509': confectionery, confectionary, candy store
'510': container ship, containership, container vessel
'511': convertible
'512': corkscrew, bottle screw
'513': cornet, horn, trumpet, trump
'514': cowboy boot
'515': cowboy hat, ten-gallon hat
'516': cradle
'517': crane2
'518': crash helmet
'519': crate
'520': crib, cot
'521': Crock Pot
'522': croquet ball
'523': crutch
'524': cuirass
'525': dam, dike, dyke
'526': desk
'527': desktop computer
'528': dial telephone, dial phone
'529': diaper, nappy, napkin
'530': digital clock
'531': digital watch
'532': dining table, board
'533': dishrag, dishcloth
'534': dishwasher, dish washer, dishwashing machine
'535': disk brake, disc brake
'536': dock, dockage, docking facility
'537': dogsled, dog sled, dog sleigh
'538': dome
'539': doormat, welcome mat
'540': drilling platform, offshore rig
'541': drum, membranophone, tympan
'542': drumstick
'543': dumbbell
'544': Dutch oven
'545': electric fan, blower
'546': electric guitar
'547': electric locomotive
'548': entertainment center
'549': envelope
'550': espresso maker
'551': face powder
'552': feather boa, boa
'553': file, file cabinet, filing cabinet
'554': fireboat
'555': fire engine, fire truck
'556': fire screen, fireguard
'557': flagpole, flagstaff
'558': flute, transverse flute
'559': folding chair
'560': football helmet
'561': forklift
'562': fountain
'563': fountain pen
'564': four-poster
'565': freight car
'566': French horn, horn
'567': frying pan, frypan, skillet
'568': fur coat
'569': garbage truck, dustcart
'570': gasmask, respirator, gas helmet
'571': gas pump, gasoline pump, petrol pump, island dispenser
'572': goblet
'573': go-kart
'574': golf ball
'575': golfcart, golf cart
'576': gondola
'577': gong, tam-tam
'578': gown
'579': grand piano, grand
'580': greenhouse, nursery, glasshouse
'581': grille, radiator grille
'582': grocery store, grocery, food market, market
'583': guillotine
'584': hair slide
'585': hair spray
'586': half track
'587': hammer
'588': hamper
'589': hand blower, blow dryer, blow drier, hair dryer, hair drier
'590': hand-held computer, hand-held microcomputer
'591': handkerchief, hankie, hanky, hankey
'592': hard disc, hard disk, fixed disk
'593': harmonica, mouth organ, harp, mouth harp
'594': harp
'595': harvester, reaper
'596': hatchet
'597': holster
'598': home theater, home theatre
'599': honeycomb
'600': hook, claw
'601': hoopskirt, crinoline
'602': horizontal bar, high bar
'603': horse cart, horse-cart
'604': hourglass
'605': iPod
'606': iron, smoothing iron
'607': jack-o'-lantern
'608': jean, blue jean, denim
'609': jeep, landrover
'610': jersey, T-shirt, tee shirt
'611': jigsaw puzzle
'612': jinrikisha, ricksha, rickshaw
'613': joystick
'614': kimono
'615': knee pad
'616': knot
'617': lab coat, laboratory coat
'618': ladle
'619': lampshade, lamp shade
'620': laptop, laptop computer
'621': lawn mower, mower
'622': lens cap, lens cover
'623': letter opener, paper knife, paperknife
'624': library
'625': lifeboat
'626': lighter, light, igniter, ignitor
'627': limousine, limo
'628': liner, ocean liner
'629': lipstick, lip rouge
'630': Loafer
'631': lotion
'632': loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
'633': loupe, jeweler's loupe
'634': lumbermill, sawmill
'635': magnetic compass
'636': mailbag, postbag
'637': mailbox, letter box
'638': maillot
'639': maillot, tank suit
'640': manhole cover
'641': maraca
'642': marimba, xylophone
'643': mask
'644': matchstick
'645': maypole
'646': maze, labyrinth
'647': measuring cup
'648': medicine chest, medicine cabinet
'649': megalith, megalithic structure
'650': microphone, mike
'651': microwave, microwave oven
'652': military uniform
'653': milk can
'654': minibus
'655': miniskirt, mini
'656': minivan
'657': missile
'658': mitten
'659': mixing bowl
'660': mobile home, manufactured home
'661': Model T
'662': modem
'663': monastery
'664': monitor
'665': moped
'666': mortar
'667': mortarboard
'668': mosque
'669': mosquito net
'670': motor scooter, scooter
'671': mountain bike, all-terrain bike, off-roader
'672': mountain tent
'673': mouse, computer mouse
'674': mousetrap
'675': moving van
'676': muzzle
'677': nail
'678': neck brace
'679': necklace
'680': nipple
'681': notebook, notebook computer
'682': obelisk
'683': oboe, hautboy, hautbois
'684': ocarina, sweet potato
'685': odometer, hodometer, mileometer, milometer
'686': oil filter
'687': organ, pipe organ
'688': oscilloscope, scope, cathode-ray oscilloscope, CRO
'689': overskirt
'690': oxcart
'691': oxygen mask
'692': packet
'693': paddle, boat paddle
'694': paddlewheel, paddle wheel
'695': padlock
'696': paintbrush
'697': pajama, pyjama, pj's, jammies
'698': palace
'699': panpipe, pandean pipe, syrinx
'700': paper towel
'701': parachute, chute
'702': parallel bars, bars
'703': park bench
'704': parking meter
'705': passenger car, coach, carriage
'706': patio, terrace
'707': pay-phone, pay-station
'708': pedestal, plinth, footstall
'709': pencil box, pencil case
'710': pencil sharpener
'711': perfume, essence
'712': Petri dish
'713': photocopier
'714': pick, plectrum, plectron
'715': pickelhaube
'716': picket fence, paling
'717': pickup, pickup truck
'718': pier
'719': piggy bank, penny bank
'720': pill bottle
'721': pillow
'722': ping-pong ball
'723': pinwheel
'724': pirate, pirate ship
'725': pitcher, ewer
'726': plane, carpenter's plane, woodworking plane
'727': planetarium
'728': plastic bag
'729': plate rack
'730': plow, plough
'731': plunger, plumber's helper
'732': Polaroid camera, Polaroid Land camera
'733': pole
'734': police van, police wagon, paddy wagon, patrol wagon, wagon, black
Maria
'735': poncho
'736': pool table, billiard table, snooker table
'737': pop bottle, soda bottle
'738': pot, flowerpot
'739': potter's wheel
'740': power drill
'741': prayer rug, prayer mat
'742': printer
'743': prison, prison house
'744': projectile, missile
'745': projector
'746': puck, hockey puck
'747': punching bag, punch bag, punching ball, punchball
'748': purse
'749': quill, quill pen
'750': quilt, comforter, comfort, puff
'751': racer, race car, racing car
'752': racket, racquet
'753': radiator
'754': radio, wireless
'755': radio telescope, radio reflector
'756': rain barrel
'757': recreational vehicle, RV, R.V.
'758': reel
'759': reflex camera
'760': refrigerator, icebox
'761': remote control, remote
'762': restaurant, eating house, eating place, eatery
'763': revolver, six-gun, six-shooter
'764': rifle
'765': rocking chair, rocker
'766': rotisserie
'767': rubber eraser, rubber, pencil eraser
'768': rugby ball
'769': rule, ruler
'770': running shoe
'771': safe
'772': safety pin
'773': saltshaker, salt shaker
'774': sandal
'775': sarong
'776': sax, saxophone
'777': scabbard
'778': scale, weighing machine
'779': school bus
'780': schooner
'781': scoreboard
'782': screen, CRT screen
'783': screw
'784': screwdriver
'785': seat belt, seatbelt
'786': sewing machine
'787': shield, buckler
'788': shoe shop, shoe-shop, shoe store
'789': shoji
'790': shopping basket
'791': shopping cart
'792': shovel
'793': shower cap
'794': shower curtain
'795': ski
'796': ski mask
'797': sleeping bag
'798': slide rule, slipstick
'799': sliding door
'800': slot, one-armed bandit
'801': snorkel
'802': snowmobile
'803': snowplow, snowplough
'804': soap dispenser
'805': soccer ball
'806': sock
'807': solar dish, solar collector, solar furnace
'808': sombrero
'809': soup bowl
'810': space bar
'811': space heater
'812': space shuttle
'813': spatula
'814': speedboat
'815': spider web, spider's web
'816': spindle
'817': sports car, sport car
'818': spotlight, spot
'819': stage
'820': steam locomotive
'821': steel arch bridge
'822': steel drum
'823': stethoscope
'824': stole
'825': stone wall
'826': stopwatch, stop watch
'827': stove
'828': strainer
'829': streetcar, tram, tramcar, trolley, trolley car
'830': stretcher
'831': studio couch, day bed
'832': stupa, tope
'833': submarine, pigboat, sub, U-boat
'834': suit, suit of clothes
'835': sundial
'836': sunglass
'837': sunglasses, dark glasses, shades
'838': sunscreen, sunblock, sun blocker
'839': suspension bridge
'840': swab, swob, mop
'841': sweatshirt
'842': swimming trunks, bathing trunks
'843': swing
'844': switch, electric switch, electrical switch
'845': syringe
'846': table lamp
'847': tank, army tank, armored combat vehicle, armoured combat vehicle
'848': tape player
'849': teapot
'850': teddy, teddy bear
'851': television, television system
'852': tennis ball
'853': thatch, thatched roof
'854': theater curtain, theatre curtain
'855': thimble
'856': thresher, thrasher, threshing machine
'857': throne
'858': tile roof
'859': toaster
'860': tobacco shop, tobacconist shop, tobacconist
'861': toilet seat
'862': torch
'863': totem pole
'864': tow truck, tow car, wrecker
'865': toyshop
'866': tractor
'867': trailer truck, tractor trailer, trucking rig, rig, articulated lorry,
semi
'868': tray
'869': trench coat
'870': tricycle, trike, velocipede
'871': trimaran
'872': tripod
'873': triumphal arch
'874': trolleybus, trolley coach, trackless trolley
'875': trombone
'876': tub, vat
'877': turnstile
'878': typewriter keyboard
'879': umbrella
'880': unicycle, monocycle
'881': upright, upright piano
'882': vacuum, vacuum cleaner
'883': vase
'884': vault
'885': velvet
'886': vending machine
'887': vestment
'888': viaduct
'889': violin, fiddle
'890': volleyball
'891': waffle iron
'892': wall clock
'893': wallet, billfold, notecase, pocketbook
'894': wardrobe, closet, press
'895': warplane, military plane
'896': washbasin, handbasin, washbowl, lavabo, wash-hand basin
'897': washer, automatic washer, washing machine
'898': water bottle
'899': water jug
'900': water tower
'901': whiskey jug
'902': whistle
'903': wig
'904': window screen
'905': window shade
'906': Windsor tie
'907': wine bottle
'908': wing
'909': wok
'910': wooden spoon
'911': wool, woolen, woollen
'912': worm fence, snake fence, snake-rail fence, Virginia fence
'913': wreck
'914': yawl
'915': yurt
'916': web site, website, internet site, site
'917': comic book
'918': crossword puzzle, crossword
'919': street sign
'920': traffic light, traffic signal, stoplight
'921': book jacket, dust cover, dust jacket, dust wrapper
'922': menu
'923': plate
'924': guacamole
'925': consomme
'926': hot pot, hotpot
'927': trifle
'928': ice cream, icecream
'929': ice lolly, lolly, lollipop, popsicle
'930': French loaf
'931': bagel, beigel
'932': pretzel
'933': cheeseburger
'934': hotdog, hot dog, red hot
'935': mashed potato
'936': head cabbage
'937': broccoli
'938': cauliflower
'939': zucchini, courgette
'940': spaghetti squash
'941': acorn squash
'942': butternut squash
'943': cucumber, cuke
'944': artichoke, globe artichoke
'945': bell pepper
'946': cardoon
'947': mushroom
'948': Granny Smith
'949': strawberry
'950': orange
'951': lemon
'952': fig
'953': pineapple, ananas
'954': banana
'955': jackfruit, jak, jack
'956': custard apple
'957': pomegranate
'958': hay
'959': carbonara
'960': chocolate sauce, chocolate syrup
'961': dough
'962': meat loaf, meatloaf
'963': pizza, pizza pie
'964': potpie
'965': burrito
'966': red wine
'967': espresso
'968': cup
'969': eggnog
'970': alp
'971': bubble
'972': cliff, drop, drop-off
'973': coral reef
'974': geyser
'975': lakeside, lakeshore
'976': promontory, headland, head, foreland
'977': sandbar, sand bar
'978': seashore, coast, seacoast, sea-coast
'979': valley, vale
'980': volcano
'981': ballplayer, baseball player
'982': groom, bridegroom
'983': scuba diver
'984': rapeseed
'985': daisy
'986': yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus,
Cypripedium parviflorum
'987': corn
'988': acorn
'989': hip, rose hip, rosehip
'990': buckeye, horse chestnut, conker
'991': coral fungus
'992': agaric
'993': gyromitra
'994': stinkhorn, carrion fungus
'995': earthstar
'996': hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola
frondosa
'997': bolete
'998': ear, spike, capitulum
'999': toilet tissue, toilet paper, bathroom tissue
splits:
- name: train
num_bytes: 3208631875.5
num_examples: 40036
- name: validation
num_bytes: 6706896736.0
num_examples: 50000
- name: test
num_bytes: 13610348261.0
num_examples: 100000
download_size: 23496058678
dataset_size: 23525876872.5
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
BarryFutureman/vpt_data_8xx_shard0008 | BarryFutureman | 2025-06-14T09:39:29Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-06-14T09:38:33Z | 0 | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": null,
"total_episodes": 67,
"total_frames": 326951,
"total_tasks": 1,
"total_videos": 67,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 20,
"splits": {
"train": "0:67"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"observation.image": {
"dtype": "video",
"shape": [
3,
360,
640
],
"names": [
"channel",
"height",
"width"
],
"info": {
"video.height": 360,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 20,
"video.channels": 3,
"has_audio": false
}
},
"action": {
"dtype": "string",
"shape": [
1
],
"names": null
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
DeepShop/DeepShop | DeepShop | 2025-05-13T16:14:08Z | 17 | 0 | [
"task_categories:question-answering",
"language:en",
"license:cc-by-4.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"agent"
] | [
"question-answering"
] | 2025-05-12T11:16:10Z | 0 | ---
license: cc-by-4.0
task_categories:
- question-answering
language:
- en
tags:
- agent
pretty_name: DeepShop
size_categories:
- n<1K
--- |
BelalElhossany/FinClass_Reasoning_Dataset | BelalElhossany | 2025-04-21T20:10:31Z | 36 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-21T20:10:20Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int64
- name: predicted_label
dtype: string
- name: reasoning
dtype: string
splits:
- name: train
num_bytes: 4631828.948690008
num_examples: 11505
- name: validation
num_bytes: 578928.2944994551
num_examples: 1438
- name: test
num_bytes: 579330.887193822
num_examples: 1439
download_size: 2893559
dataset_size: 5790088.130383286
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
bitmind/AFHQ___mobius | bitmind | 2024-10-31T20:11:04Z | 241 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-10-31T20:09:47Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 1751255957.33
num_examples: 15803
download_size: 1751467871
dataset_size: 1751255957.33
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/buzz_sources_287_liquid | supergoose | 2024-11-10T20:51:56Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-10T20:51:55Z | 0 | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: source
dtype: string
- name: stack
dtype: string
splits:
- name: train
num_bytes: 45778
num_examples: 29
download_size: 22000
dataset_size: 45778
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
obiwan96/obiwan96open_web_math_raw_v3_25000_50000 | obiwan96 | 2025-02-25T11:33:15Z | 20 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-25T04:10:48Z | 0 | ---
dataset_info:
features:
- name: url
dtype: string
- name: text
dtype: string
- name: date
dtype: string
- name: metadata
dtype: string
- name: backtracking_raw
dtype: string
- name: verification_raw
dtype: string
- name: subgoal_setting_raw
dtype: string
- name: backward_chaining_raw
dtype: string
splits:
- name: train
num_bytes: 255242503
num_examples: 25000
download_size: 118643779
dataset_size: 255242503
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
sghosts/rand-tezler-firstlast10_alp | sghosts | 2025-03-10T00:11:47Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T00:11:42Z | 0 | ---
dataset_info:
features:
- name: subdir
dtype: string
- name: pdf_path
dtype: string
- name: page_num
dtype: int64
- name: image
dtype: image
- name: alp
dtype: int64
splits:
- name: train
num_bytes: 76026218.0
num_examples: 1000
download_size: 75619850
dataset_size: 76026218.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
FiscaAI/PSYHSK_50.00-D-embeddings | FiscaAI | 2024-12-17T18:55:43Z | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-17T18:55:42Z | 0 | ---
dataset_info:
features:
- name: document
struct:
- name: code
dtype: string
- name: MwSt
dtype: string
- name: Bez_255
dtype: string
- name: LTyp
dtype: string
- name: validFrom
dtype: string
- name: K_Pfl
dtype: string
- name: Sortierung
dtype: string
- name: Anaesthesie_Min
dtype: string
- name: betr
dtype: string
- name: KNr
dtype: string
- name: name
dtype: string
- name: LNr
dtype: string
- name: Prix_Var
dtype: string
- name: regelAlter
dtype: string
- name: P_AL_R
dtype: string
- name: Raum_Min
dtype: string
- name: id
dtype: string
- name: P_TL
dtype: string
- name: pik
dtype: string
- name: Arzt_t
dtype: string
- name: U_Pfl
dtype: string
- name: Sparte_Text
dtype: string
- name: Mechanik_Text
dtype: string
- name: TP_TL
dtype: string
- name: regelMenge
dtype: string
- name: groups
dtype: string
- name: Med_Interpret
dtype: string
- name: K_Pfl_Text
dtype: string
- name: description
dtype: string
- name: Sparte
dtype: string
- name: Typ_Text
dtype: string
- name: pdf
dtype: string
- name: text
dtype: string
- name: depends
dtype: string
- name: Version
dtype: string
- name: Mechanik
dtype: string
- name: TTyp
dtype: string
- name: Variante
dtype: string
- name: Preisversion
dtype: string
- name: regelKum
dtype: string
- name: U_Pfl_Text
dtype: string
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 358243
num_examples: 10
download_size: 345279
dataset_size: 358243
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
reasoning-proj/contrast_pairs_deepseek-ai_DeepSeek-R1-Distill-Llama-8B_neutral_add_random_text | reasoning-proj | 2025-05-15T05:57:09Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-15T05:57:04Z | 0 | ---
dataset_info:
features:
- name: text_input
dtype: string
- name: label
dtype: string
- name: intervention_type_group
dtype: string
- name: original_id
dtype: string
splits:
- name: train
num_bytes: 14540566
num_examples: 1200
download_size: 5219474
dataset_size: 14540566
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
neelabh17/new_news_exploded_prompt_n_10_d_perc_100_num_gen_10_Qwen2.5-7B-Instruct | neelabh17 | 2025-05-15T15:28:18Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-15T15:28:16Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: name
dtype: string
- name: topic
dtype: string
- name: news
dtype: string
- name: category
dtype: string
- name: question
dtype: string
- name: option
sequence: string
- name: prompt
dtype: string
- name: response_0
dtype: string
- name: answer_0
dtype: string
- name: correct_0
dtype: int64
- name: response_1
dtype: string
- name: answer_1
dtype: string
- name: correct_1
dtype: int64
- name: response_2
dtype: string
- name: answer_2
dtype: string
- name: correct_2
dtype: int64
- name: response_3
dtype: string
- name: answer_3
dtype: string
- name: correct_3
dtype: int64
- name: response_4
dtype: string
- name: answer_4
dtype: string
- name: correct_4
dtype: int64
- name: response_5
dtype: string
- name: answer_5
dtype: string
- name: correct_5
dtype: int64
- name: response_6
dtype: string
- name: answer_6
dtype: string
- name: correct_6
dtype: int64
- name: response_7
dtype: string
- name: answer_7
dtype: string
- name: correct_7
dtype: int64
- name: response_8
dtype: string
- name: answer_8
dtype: string
- name: correct_8
dtype: int64
- name: response_9
dtype: string
- name: answer_9
dtype: string
- name: correct_9
dtype: int64
splits:
- name: train
num_bytes: 3287856
num_examples: 375
download_size: 1193543
dataset_size: 3287856
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vklinhhh/imgur5k_words | vklinhhh | 2025-05-07T10:03:09Z | 0 | 0 | [
"region:us"
] | [] | 2025-05-07T10:02:18Z | 0 | ---
dataset_info:
features:
- name: label
dtype: string
- name: full_character
sequence: string
- name: base_character
sequence: string
- name: type
sequence: string
- name: diacritic_type
sequence: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 718622312.0
num_examples: 60472
download_size: 693621288
dataset_size: 718622312.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
davanstrien/models_with_metadata_and_summaries | davanstrien | 2025-06-03T08:21:20Z | 119 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-25T11:12:24Z | 0 | ---
dataset_info:
features:
- name: modelId
dtype: large_string
- name: author
dtype: large_string
- name: last_modified
dtype: timestamp[us, tz=UTC]
- name: downloads
dtype: int64
- name: likes
dtype: int64
- name: library_name
dtype: large_string
- name: tags
large_list: large_string
- name: pipeline_tag
dtype: large_string
- name: createdAt
dtype: timestamp[us, tz=UTC]
- name: card
dtype: large_string
- name: summary
dtype: large_string
- name: post_yaml_content
dtype: large_string
- name: param_count
dtype: int64
- name: formatted_count
dtype: large_string
splits:
- name: train
num_bytes: 31546660
num_examples: 1380
download_size: 10404293
dataset_size: 31546660
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
appier-rey/mmlu-en | appier-rey | 2025-04-05T04:10:10Z | 51 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-05T04:10:06Z | 0 | ---
dataset_info:
features:
- name: 'Unnamed: 0'
dtype: int64
- name: Question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: Answer
dtype: string
- name: Subject
dtype: string
splits:
- name: test
num_bytes: 6952813
num_examples: 13871
download_size: 3797816
dataset_size: 6952813
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
kevin017/tokenized_bioS_inverse_QA_c_city_large_padding | kevin017 | 2025-04-03T14:54:21Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-03T14:54:09Z | 0 | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: answers_tokenized
struct:
- name: attention_mask
sequence:
sequence: int64
- name: input_ids
sequence:
sequence: int64
splits:
- name: train
num_bytes: 498293
num_examples: 9
- name: test
num_bytes: 1337933
num_examples: 9
download_size: 181719
dataset_size: 1836226
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
jeffreygwang/pythia_dedupe_mia_0-97000_97010-97025 | jeffreygwang | 2025-01-06T22:50:06Z | 17 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-05T03:31:26Z | 0 | ---
dataset_info:
features:
- name: tokens
sequence: int64
- name: text
dtype: string
splits:
- name: member
num_bytes: 368149519
num_examples: 15000
- name: nonmember
num_bytes: 368221204
num_examples: 15000
download_size: 249873130
dataset_size: 736370723
configs:
- config_name: default
data_files:
- split: member
path: data/member-*
- split: nonmember
path: data/nonmember-*
---
|
david9dragon9/short_to_long_essays | david9dragon9 | 2024-12-29T01:39:22Z | 38 | 0 | [
"task_categories:text-classification",
"language:en",
"license:mit",
"region:us"
] | [
"text-classification"
] | 2024-12-29T01:32:36Z | 0 | ---
license: mit
task_categories:
- text-classification
language:
- en
---
This dataset contains short argument fragments and long essays written by students. The dataset contains both score and preference data, created by transforming data from essays with neighboring scores.
References:
Argument fragments: https://www.kaggle.com/competitions/feedback-prize-effectiveness
Essays: https://www.kaggle.com/competitions/learning-agency-lab-automated-essay-scoring-2 |
diddydaddy/newname | diddydaddy | 2025-02-02T11:58:28Z | 16 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-02-02T11:58:28Z | 0 | ---
license: apache-2.0
---
|
bchiusano/WrongPatternsCHILDES | bchiusano | 2025-06-05T13:25:50Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-05T11:55:06Z | 0 | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: file_name
dtype: string
- name: transcript
dtype: string
splits:
- name: train
num_bytes: 20181360.0
num_examples: 528
download_size: 20153835
dataset_size: 20181360.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ruediste/codeparrot-github-code-10G | ruediste | 2025-01-13T19:37:43Z | 132 | 1 | [
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-12T01:55:59Z | 0 | ---
dataset_info:
- config_name: bat
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 776606056
num_examples: 236775
download_size: 224108489
dataset_size: 776606056
- config_name: c
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10811349125
num_examples: 763797
download_size: 3838352747
dataset_size: 10811349125
- config_name: cmake
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 601149917
num_examples: 175275
download_size: 217789598
dataset_size: 601149917
- config_name: cpp
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10820311701
num_examples: 841459
download_size: 3686981747
dataset_size: 10820311701
- config_name: cs
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10943819320
num_examples: 1848161
download_size: 3120336752
dataset_size: 10943819320
- config_name: css
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10810494864
num_examples: 773883
download_size: 2812801293
dataset_size: 10810494864
- config_name: dockerfile
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 802019049
num_examples: 365925
download_size: 217263689
dataset_size: 802019049
- config_name: f
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 1751209693
num_examples: 141450
download_size: 586553383
dataset_size: 1751209693
- config_name: go
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10844686994
num_examples: 1177701
download_size: 3533865959
dataset_size: 10844686994
- config_name: hs
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 2017953331
num_examples: 340300
download_size: 800910590
dataset_size: 2017953331
- config_name: html
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10859691221
num_examples: 950145
download_size: 2911365108
dataset_size: 10859691221
- config_name: java
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10984706703
num_examples: 1816254
download_size: 3543707243
dataset_size: 10984706703
- config_name: jl
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 310477024
num_examples: 57400
download_size: 113926926
dataset_size: 310477024
- config_name: js
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10859135475
num_examples: 1321191
download_size: 3887494730
dataset_size: 10859135475
- config_name: lua
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 3067620267
num_examples: 578100
download_size: 1100472588
dataset_size: 3067620267
- config_name: makefile
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 3187259914
num_examples: 678550
download_size: 1207339362
dataset_size: 3187259914
- config_name: md
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 11106099907
num_examples: 3654282
download_size: 5418909097
dataset_size: 11106099907
- config_name: perl
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 5092484681
num_examples: 497125
download_size: 1971626131
dataset_size: 5092484681
- config_name: php
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10923821552
num_examples: 1803703
download_size: 3855239888
dataset_size: 10923821552
- config_name: ps1
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 751671158
num_examples: 136325
download_size: 266061632
dataset_size: 751671158
- config_name: py
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10860398080
num_examples: 1389571
download_size: 3880081226
dataset_size: 10860398080
- config_name: rb
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 11107135154
num_examples: 4084613
download_size: 4250173287
dataset_size: 11107135154
- config_name: rs
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 2895512507
num_examples: 321850
download_size: 958277948
dataset_size: 2895512507
- config_name: scala
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 4253564514
num_examples: 835375
download_size: 1543815679
dataset_size: 4253564514
- config_name: sh
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 3345439162
num_examples: 1384775
download_size: 1435265806
dataset_size: 3345439162
- config_name: sql
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 6160648946
num_examples: 656000
download_size: 1614855561
dataset_size: 6160648946
- config_name: tex
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 2332592780
num_examples: 250100
download_size: 1014672343
dataset_size: 2332592780
- config_name: ts
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 10813959394
num_examples: 793033
download_size: 2913351624
dataset_size: 10813959394
- config_name: vb
features:
- name: code
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: language
dtype: string
- name: license
dtype: string
- name: size
dtype: int32
splits:
- name: train
num_bytes: 2055862838
num_examples: 154775
download_size: 507645412
dataset_size: 2055862838
configs:
- config_name: bat
data_files:
- split: train
path: bat/train-*
- config_name: c
data_files:
- split: train
path: c/train-*
- config_name: cmake
data_files:
- split: train
path: cmake/train-*
- config_name: cpp
data_files:
- split: train
path: cpp/train-*
- config_name: cs
data_files:
- split: train
path: cs/train-*
- config_name: css
data_files:
- split: train
path: css/train-*
- config_name: dockerfile
data_files:
- split: train
path: dockerfile/train-*
- config_name: f
data_files:
- split: train
path: f/train-*
- config_name: go
data_files:
- split: train
path: go/train-*
- config_name: hs
data_files:
- split: train
path: hs/train-*
- config_name: html
data_files:
- split: train
path: html/train-*
- config_name: java
data_files:
- split: train
path: java/train-*
- config_name: jl
data_files:
- split: train
path: jl/train-*
- config_name: js
data_files:
- split: train
path: js/train-*
- config_name: lua
data_files:
- split: train
path: lua/train-*
- config_name: makefile
data_files:
- split: train
path: makefile/train-*
- config_name: md
data_files:
- split: train
path: md/train-*
- config_name: perl
data_files:
- split: train
path: perl/train-*
- config_name: php
data_files:
- split: train
path: php/train-*
- config_name: ps1
data_files:
- split: train
path: ps1/train-*
- config_name: py
data_files:
- split: train
path: py/train-*
- config_name: rb
data_files:
- split: train
path: rb/train-*
- config_name: rs
data_files:
- split: train
path: rs/train-*
- config_name: scala
data_files:
- split: train
path: scala/train-*
- config_name: sh
data_files:
- split: train
path: sh/train-*
- config_name: sql
data_files:
- split: train
path: sql/train-*
- config_name: tex
data_files:
- split: train
path: tex/train-*
- config_name: ts
data_files:
- split: train
path: ts/train-*
- config_name: vb
data_files:
- split: train
path: vb/train-*
---
This is data is derived from the [Codeparrot Dataset](https://huggingface.co/datasets/codeparrot/github-code) by taking the first 10GB of text from each language, and splitting it into individual configs. This results in a download size of about 3GB per language.
Sample usage:
```python
from datasets import load_dataset
dataset = load_dataset("ruediste/codeparrot-github-code-10G", "java")
```
List of Languages:
```
languages = {
'HTML': 'html',
'Java': 'java',
'JavaScript': 'js',
'CSS': 'css',
'C#': 'cs',
'TypeScript': 'ts',
"Batchfile": "bat",
"C": 'c',
"C++": 'cpp',
"CMake": "cmake",
"Dockerfile": "dockerfile",
"FORTRAN": 'f',
"GO": "go",
"Haskell": "hs",
"Julia": "jl",
"Lua": "lua",
"Makefile": "makefile",
"Markdown": "md",
"PHP": "php",
"Perl": "perl",
"PowerShell": 'ps1',
"Python": "py",
"Ruby": "rb",
"Rust": "rs",
"SQL": "sql",
"Scala": "scala",
"Shell": "sh",
"TeX": "tex",
"Visual Basic": "vb"
}
```
Please note that assembly to lost in the conversion.
|
abhinav302019/olympiad_data_10118 | abhinav302019 | 2025-03-05T23:59:45Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-05T23:59:44Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: Known_Solution
dtype: string
- name: Known_Answer
dtype: string
- name: Generated_Solution
dtype: string
- name: Generated_Answer
dtype: string
- name: Judge_Evaluation
dtype: string
- name: Judge_Rating
dtype: string
- name: Judge_Justification
dtype: string
splits:
- name: train
num_bytes: 47924
num_examples: 5
download_size: 39543
dataset_size: 47924
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
nsethi610/llmtwin | nsethi610 | 2025-04-27T21:43:22Z | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-24T14:43:48Z | 0 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 32910
num_examples: 63
- name: test
num_bytes: 3630
num_examples: 7
download_size: 26769
dataset_size: 36540
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
mbodiai/ABB_PandG_20250401_093449 | mbodiai | 2025-04-17T06:31:36Z | 6 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-17T06:31:35Z | 0 | ---
dataset_info:
features:
- name: observation
struct:
- name: image
dtype: image
- name: instruction
dtype: string
- name: prompt
dtype: string
- name: action
struct:
- name: pose
struct:
- name: x
dtype: float32
- name: y
dtype: float32
- name: z
dtype: float32
- name: roll
dtype: float32
- name: pitch
dtype: float32
- name: yaw
dtype: float32
- name: grasp
dtype: float32
- name: gripper_force
dtype: int64
- name: state
struct:
- name: images
struct:
- name: camF
dtype: image
- name: camT
dtype: image
- name: depths
struct:
- name: camF
dtype: image
- name: camT
dtype: image
- name: world_objects
dtype: string
- name: gripper_pose
sequence: float32
- name: reward
dtype: float32
- name: metadata
struct:
- name: episode_idx
dtype: int64
- name: step_idx
dtype: int64
splits:
- name: train
num_bytes: 52605627.0
num_examples: 5
download_size: 30312273
dataset_size: 52605627.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Asap7772/elix_generations_llama31_8b_cleaned | Asap7772 | 2024-12-12T00:51:05Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-12T00:50:56Z | 0 | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: level
dtype: string
- name: level_id
dtype: int64
- name: responses
sequence: string
splits:
- name: train
num_bytes: 148566225.6
num_examples: 1134
- name: test
num_bytes: 16507358.4
num_examples: 126
download_size: 70331308
dataset_size: 165073584.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
abhinav302019/olympiad_data_10067 | abhinav302019 | 2025-03-05T19:29:50Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-05T19:29:49Z | 0 | ---
dataset_info:
features:
- name: problem
dtype: string
- name: Known_Solution
dtype: string
- name: Known_Answer
dtype: string
- name: Generated_Solution
dtype: string
- name: Generated_Answer
dtype: string
- name: Judge_Evaluation
dtype: string
- name: Judge_Rating
dtype: string
- name: Judge_Justification
dtype: string
splits:
- name: train
num_bytes: 39385
num_examples: 5
download_size: 36730
dataset_size: 39385
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
extralit-dev/test_import_dataset_from_hub_with_classlabel_8f99c116-d33b-4499-9e9e-8ff51f0248bb | extralit-dev | 2025-06-20T21:45:59Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-20T21:45:58Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1264
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
zkdeng/mushroom_50_edible | zkdeng | 2025-02-24T23:12:29Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-24T23:12:26Z | 0 | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: observer_id
dtype: int64
- name: login
dtype: string
- name: name
dtype: string
- name: photo_url
dtype: string
- name: taxon_name
dtype: string
- name: ancestry
dtype: string
- name: taxon_id
dtype: int64
- name: rank
dtype: string
- name: photo_id
dtype: int64
- name: photo_uuid
dtype: string
- name: extension
dtype: string
- name: edibility_class
dtype: int64
- name: image_file
dtype: string
- name: label
dtype: int64
- name: image
dtype: string
splits:
- name: train
num_bytes: 762678
num_examples: 2150
download_size: 254543
dataset_size: 762678
---
# Dataset Card for "mushroom_50_edible"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
MayAlsofyani/balanced_manybugs_promote1 | MayAlsofyani | 2025-01-28T20:08:08Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-28T20:08:06Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int64
- name: response
dtype: string
splits:
- name: train
num_bytes: 172253
num_examples: 42
download_size: 76883
dataset_size: 172253
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
martijn75/Tanakh_voc_with_aramaic | martijn75 | 2025-01-10T14:53:18Z | 18 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-10T14:52:08Z | 0 | ---
dataset_info:
features:
- name: Text
dtype: string
splits:
- name: train
num_bytes: 4260733.471629042
num_examples: 20651
- name: test
num_bytes: 473506.5283709579
num_examples: 2295
download_size: 2352646
dataset_size: 4734240.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
mlfoundations-dev/distill_70b_infra_together | mlfoundations-dev | 2025-02-02T17:02:19Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-02T03:46:04Z | 0 | ---
dataset_info:
features:
- name: system
dtype: string
- name: problem
dtype: string
- name: task
dtype: string
- name: __original_row_idx
dtype: int64
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 195589252
num_examples: 2500
download_size: 56146335
dataset_size: 195589252
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
abhishekkuber/cogdist_ner | abhishekkuber | 2025-01-13T09:45:03Z | 65 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-13T09:44:58Z | 0 | ---
dataset_info:
features:
- name: id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence:
class_label:
names:
'0': O
'1': I
splits:
- name: train
num_bytes: 5696443.982581156
num_examples: 2020
- name: validation
num_bytes: 713465.508709422
num_examples: 253
- name: test
num_bytes: 713465.508709422
num_examples: 253
download_size: 1019755
dataset_size: 7123375.000000001
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
qfq/trainnov28_timelimit_sft_numberedmax | qfq | 2024-12-02T03:31:08Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-12-02T03:21:00Z | 0 | ---
dataset_info:
features:
- name: question
dtype: string
- name: solution
dtype: string
- name: attempt
dtype: string
- name: cot_type
dtype: string
- name: source_type
dtype: string
- name: metadata
dtype: string
- name: cot
sequence: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 14924429
num_examples: 1088
- name: test
num_bytes: 772517
num_examples: 58
download_size: 6815344
dataset_size: 15696946
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
SandeepKumarRudhravaram/llama2-Lung_Cancer_QA_V2 | SandeepKumarRudhravaram | 2024-11-19T01:46:41Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-19T01:46:31Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 477849
num_examples: 3000
download_size: 20261
dataset_size: 477849
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AI4Protein/FLIP_GB1_one-vs-rest | AI4Protein | 2024-11-21T13:39:58Z | 63 | 0 | [
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-21T13:39:46Z | 0 | ---
license: apache-2.0
---
|
TAUR-dev/evals__fixed_bf26k_short_syncot_v2__samples | TAUR-dev | 2025-03-22T10:51:40Z | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-22T10:51:38Z | 0 | ---
dataset_info:
features:
- name: doc_id
dtype: int64
- name: doc
struct:
- name: answer
dtype: string
- name: level
dtype: int64
- name: problem
dtype: string
- name: solution
dtype: string
- name: subject
dtype: string
- name: unique_id
dtype: string
- name: target
dtype: string
- name: arguments
struct:
- name: gen_args_0
struct:
- name: arg_0
dtype: string
- name: arg_1
struct:
- name: do_sample
dtype: bool
- name: max_gen_toks
dtype: int64
- name: max_tokens_thinking
dtype: int64
- name: temperature
dtype: float64
- name: thinking_end
dtype: string
- name: thinking_start
dtype: string
- name: until
sequence: 'null'
- name: resps
sequence:
sequence: string
- name: filtered_resps
sequence: string
- name: doc_hash
dtype: string
- name: prompt_hash
dtype: string
- name: target_hash
dtype: string
- name: exact_match
dtype: int64
- name: extracted_answers
sequence: string
splits:
- name: train
num_bytes: 64946040
num_examples: 500
download_size: 6658129
dataset_size: 64946040
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ssyok/malay-voice-test-1 | ssyok | 2025-05-06T05:48:39Z | 0 | 0 | [
"language:ms",
"license:mit",
"size_categories:n<1K",
"modality:audio",
"modality:text",
"region:us",
"audio",
"text"
] | [] | 2025-05-06T05:40:08Z | 0 | ---
license: mit
language:
- ms
tags:
- audio
- text
dataset_info:
features:
- name: text
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 24000
splits:
- name: train
num_bytes: 2545019
num_examples: 6
download_size: 2474583
dataset_size: 2545019
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
size_categories:
- n<1K
--- |
MeissonFlow/lemon | MeissonFlow | 2025-04-01T05:30:38Z | 285 | 0 | [
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-28T14:58:48Z | 0 | ---
license: apache-2.0
---
|
HPC-Forran2Cpp/F2C_dialogue_2.5K | HPC-Forran2Cpp | 2024-12-22T23:55:50Z | 27 | 0 | [
"license:apache-2.0",
"modality:text",
"region:us"
] | [] | 2024-12-22T20:04:34Z | 0 | ---
license: apache-2.0
---
|
jjzha/rt-tokenized | jjzha | 2025-06-08T19:26:49Z | 82 | 0 | [
"task_categories:question-answering",
"language:en",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2505.11140",
"region:us",
"think",
"factuality"
] | [
"question-answering"
] | 2025-03-24T11:24:08Z | 0 | ---
language:
- en
license: apache-2.0
size_categories:
- 1K<n<10K
task_categories:
- question-answering
dataset_info:
features:
- name: id
dtype: string
- name: question
dtype: string
- name: gold_answer
sequence: string
- name: model_answer
sequence: string
- name: model
dtype: string
- name: reasoning_trace
dtype: string
- name: model_attempt
dtype: string
- name: valid
dtype: int64
- name: text
dtype: string
- name: total_length
dtype: int64
- name: think_length
dtype: int64
- name: answer_length
dtype: int64
splits:
- name: train
num_bytes: 92023139
num_examples: 7038
download_size: 39633284
dataset_size: 92023139
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- think
- factuality
---
## Dataset Details
### Dataset Description
This dataset is the training data for `rt` from _Scaling Reasoning can Improve Factuality in Large Language Models_. The amount of data is around 7K rows.
- **Curated by:** Mike Zhang
- **Funded by [optional]:** Villum Fonden
- **Language(s) (NLP):** English
- **License:** Apache 2.0
### Dataset Sources [optional]
- **Repository:** https://huggingface.co/datasets/AAU-NLP/fs1-predictions
- **Paper [optional]:** [https://huggingface.co/papers/2505.11140](https://huggingface.co/papers/2505.11140)
- **Github:** https://github.com/jjzha/fs1
## Uses
One can use these reasoning traces to fine-tune their models to induce more factual thinking.
### Direct Use
Having reasoning models via simple scaling (Muennighoff et al., 2025).
### Out-of-Scope Use
We only have QA in this dataset, no other domains like mathematical reasoning or puzzles.
## Dataset Structure
We have the following features:
```
features:
- name: id
dtype: string
- name: question
dtype: string
- name: gold_answer
sequence: string
- name: model_answer
sequence: string
- name: model
dtype: string
- name: reasoning_trace
dtype: string
- name: model_attempt
dtype: string
- name: valid
dtype: int64
- name: text
dtype: string
- name: total_length
dtype: int64
- name: think_length
dtype: int64
- name: answer_length
dtype: int64
```
The part used for fine-tuning is `text` where we pre-apply the chat template and also add a special tag for the `<thinking>` block.
## Dataset Creation
### Source Data
The data comes from the datasets used in the paper.
#### Data Collection and Processing
We did no further pre-processing to the QA pairs.
## Bias, Risks, and Limitations
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. Note that not every answer is correct, thus always double-check the answers from the model.
## Citation
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
```
@misc{zhang2025scalingreasoningimprovefactuality,
title={Scaling Reasoning can Improve Factuality in Large Language Models},
author={Mike Zhang and Johannes Bjerva and Russa Biswas},
year={2025},
eprint={2505.11140},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2505.11140},
}
``` |
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_5c774a4a-0849-4a0c-b7e3-6775f4997500 | argilla-internal-testing | 2024-11-26T10:02:19Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2024-11-26T10:02:19Z | 0 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cosmo3769/synthetic_vqa_dataset_100_images_google_paligemma2-3b-mix-224 | cosmo3769 | 2025-05-21T22:05:27Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-21T22:05:21Z | 0 | ---
dataset_info:
features:
- name: image
dtype: image
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 20367037.0
num_examples: 400
download_size: 5093989
dataset_size: 20367037.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liamford1/finetuning_demo | liamford1 | 2025-05-19T20:53:16Z | 28 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-17T23:08:33Z | 0 | ---
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 942138
num_examples: 839
download_size: 365602
dataset_size: 942138
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.