| # this is a small derivative from 8M-big c4-en dataset for testing | |
| # how this build script and dataset_infos.json were generated | |
| # | |
| mkdir c4-en-10k | |
| cd c4-en-10k | |
| # data (extracted the dataset elsewhere) - this is a 1TB+ dataset, so tough to rebuild from scratch | |
| ``` | |
| from datasets import load_dataset | |
| dataset_name = "c4" | |
| ds = load_dataset(dataset_name, 'en', split='train[:10000]') | |
| ds.to_json(f"c4.jsonl", orient="records", lines=True) | |
| ``` | |
| mkdir c4-en-10k | |
| mv c4-en-10k.jsonl c4-en-10k | |
| tar cfJ c4-en-10k.tar.xz c4-en-10k | |
| # the c4-en-10k subdir gets created on the fly | |
| aws s3 cp c4-en-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/c4/ | |
| # script | |
| (adapted from stas/oscar-en-10k) | |
| # manually check that the script is correct - edit the descriptions | |
| # create a new dataset entry on the hub | |
| https://huggingface.co/new-dataset | |
| # once created clone it | |
| git clone https://huggingface.co/datasets/stas/c4-en-10k | |
| cp c4-en-10k.py process.txt c4-en-10k | |
| cd c4-en-10k | |
| git add c4-en-10k.py process.txt README.md | |
| git commit -m "build script" c4-en-10k.py process.txt | |
| git push | |
| # test and generate config file | |
| cd .. | |
| datasets-cli test ./c4-en-10k --save_infos --all_configs | |
| # add and push the generated config | |
| cd c4-en-10k | |
| git add dataset_infos.json | |
| git commit -m "add dataset_infos.json" dataset_infos.json | |
| git push | |
| # test that the dataset is working | |
| python -c "from datasets import load_dataset; ds=load_dataset('stas/c4-en-10k'); print(ds)" | |