Dataset Viewer (First 5GB)
Auto-converted to Parquet
The dataset viewer is not available for this split.
The info cannot be fetched for the config 'shards' of the dataset.
Error code:   InfoError
Exception:    ReadTimeout
Message:      (ReadTimeoutError("HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)"), '(Request ID: 4f8018ac-b965-487a-973a-94e63e2e12da)')
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 211, in compute_first_rows_from_streaming_response
                  info = get_dataset_config_info(path=dataset, config_name=config, token=hf_token)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 277, in get_dataset_config_info
                  builder = load_dataset_builder(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1849, in load_dataset_builder
                  dataset_module = dataset_module_factory(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1731, in dataset_module_factory
                  raise e1 from None
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1688, in dataset_module_factory
                  return HubDatasetModuleFactoryWithoutScript(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1067, in get_module
                  data_files = DataFilesDict.from_patterns(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/data_files.py", line 721, in from_patterns
                  else DataFilesList.from_patterns(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/data_files.py", line 634, in from_patterns
                  origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/data_files.py", line 548, in _get_origin_metadata
                  return thread_map(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/tqdm/contrib/concurrent.py", line 69, in thread_map
                  return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/tqdm/contrib/concurrent.py", line 51, in _executor_map
                  return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))
                File "/src/services/worker/.venv/lib/python3.9/site-packages/tqdm/std.py", line 1169, in __iter__
                  for obj in iterable:
                File "/usr/local/lib/python3.9/concurrent/futures/_base.py", line 609, in result_iterator
                  yield fs.pop().result()
                File "/usr/local/lib/python3.9/concurrent/futures/_base.py", line 446, in result
                  return self.__get_result()
                File "/usr/local/lib/python3.9/concurrent/futures/_base.py", line 391, in __get_result
                  raise self._exception
                File "/usr/local/lib/python3.9/concurrent/futures/thread.py", line 58, in run
                  result = self.fn(*self.args, **self.kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/data_files.py", line 527, in _get_single_origin_metadata
                  resolved_path = fs.resolve_path(data_file)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 198, in resolve_path
                  repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 125, in _repo_and_revision_exist
                  self._api.repo_info(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
                  return fn(*args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_api.py", line 2704, in repo_info
                  return method(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
                  return fn(*args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_api.py", line 2561, in dataset_info
                  r = get_session().get(path, headers=headers, timeout=timeout, params=params)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/sessions.py", line 602, in get
                  return self.request("GET", url, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/sessions.py", line 589, in request
                  resp = self.send(prep, **send_kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/sessions.py", line 703, in send
                  r = adapter.send(request, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_http.py", line 93, in send
                  return super().send(request, *args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/adapters.py", line 635, in send
                  raise ReadTimeout(e, request=request)
              requests.exceptions.ReadTimeout: (ReadTimeoutError("HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)"), '(Request ID: 4f8018ac-b965-487a-973a-94e63e2e12da)')

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

🙋🏻‍♂️Welcome to 🧑🏻‍🚀Tonic's🚀🚰Easy🔴Reddit🔥!

image/png

This is every "best reddit_question_best_answers" appended and produced according to the following template :

{"prompt": "This is the first prompt", "completion": "This is the first completion"}
{"prompt": "This is the second prompt", "completion": "This is the second completion"}

image/png

  • 🌟 You can use it in shards or all together !

  • 🌟 This dataset is internally consistent !

🤔The point is to make it easy to train models with a single correctly formatted dataset of

  • 54,367,153 rows

Original Dataset :

nreimers/reddit_question_best_answers

How To Use :

Combine random shards in random quantities to produce a very high quality conversational training dataset for fine tuning or try combining rows line by line to save memory by running the following code:


# see selectbyline.py

import os
import random

# Directory containing the shard JSONL files
shard_directory = "/path/to/shard/directory"

# Get a list of all JSONL files in the directory
shard_files = [f for f in os.listdir(shard_directory) if f.endswith('.jsonl')]

# Function to read a random number of lines (between min_lines and max_lines) from a file
def read_random_lines(filename, min_lines, max_lines):
    selected_lines = []
    num_lines = random.randint(min_lines, max_lines)

    with open(filename, 'r') as file:
        lines = list(file)
        if len(lines) <= num_lines:
            return lines
        selected_lines = random.sample(lines, num_lines)

    return selected_lines

# Function to combine shards
def combine_shards(output_filename, num_combinations):
    with open(output_filename, 'w') as output_file:
        for _ in range(num_combinations):
            selected_shard_file = random.choice(shard_files)
            lines = read_random_lines(os.path.join(shard_directory, selected_shard_file), 5000, 10000)
            output_file.writelines(lines)

# Example usage
combine_shards("/path/to/output/combined_shards.jsonl", 10)

Pre-Processing

import json
import os
import gzip
import logging
import re
import random

# Setup basic logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

def clean_string(s):
    """Remove special characters, keeping only alphanumeric characters and spaces."""
    if isinstance(s, list):
        # Extract text from each dictionary in the list and join into a single string
        s = " ".join([d.get("body", "") if isinstance(d, dict) else str(d) for d in s])
    return re.sub(r'[^A-Za-z0-9 ]+', '', s)

def process_file(input_file, output_file):
    try:
        dataset = []
        with gzip.open(input_file, 'rt') as infile:
            for line in infile:
                # Parse the JSON line
                try:
                    data = json.loads(line)
                except json.JSONDecodeError:
                    logging.error(f"Invalid JSON format in {input_file}: {line}")
                    continue

                # Extract and clean the 'body' and 'answers' fields
                prompt = clean_string(data.get("body", ""))
                completion = clean_string(data.get("answers", ""))

                # For each body found, make a new row and duplicate the prompt for it
                if isinstance(data.get("body", ""), list):
                    for body in data.get("body", []):
                        cleaned_body = clean_string(body)
                        dataset.append({"prompt": cleaned_body, "completion": completion})
                else:
                    dataset.append({"prompt": prompt, "completion": completion})

        # Shuffle the dataset
        random.shuffle(dataset)

        # Write the shuffled dataset to the output file
        with open(output_file, 'a') as outfile:
            for item in dataset:
                json.dump(item, outfile)
                outfile.write('\n')

        logging.info(f"Processed file: {input_file}")

    except Exception as e:
        logging.error(f"Error processing file {input_file}: {e}")

def process_files(file_list, output_dir):
    # Ensure the output directory exists
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Create a single output file path
    output_file = os.path.join(output_dir, 'synthesized_dataset.jsonl')

    for input_file in file_list:
        process_file(input_file, output_file)

        # Update with your list of .gz file paths
file_list = [r'C:\Users\MeMyself\FILES, r"C:\Users\MeMyself\FILES" ]  # Update with your list of .gz file paths
output_dir = r'C:\Users\MeMyself\reddit_question_best_answers\processed'
process_files(file_list, output_dir)

sharding script :


import json
import os

def read_dataset(file_path):
    try:
        with open(file_path, 'r') as file:
            data = [json.loads(line) for line in file]
        print(f"Dataset loaded successfully from {file_path}.")
        return data
    except Exception as e:
        print(f"Error reading dataset from {file_path}: {e}")
        return []

def shard_dataset(dataset, num_shards):
    shard_size = len(dataset) // num_shards
    shards = [dataset[i:i + shard_size] for i in range(0, len(dataset), shard_size)]
    if len(shards) > num_shards:
        shards[num_shards - 1].extend(shards.pop())
    print(f"Dataset sharded into {num_shards} parts.")
    return shards

def write_shards(shards, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created output directory at {output_dir}.")

    for i, shard in enumerate(shards):
        shard_file = os.path.join(output_dir, f'shard_{i+1}.jsonl')
        with open(shard_file, 'w') as file:
            for item in shard:
                json.dump(item, file)
                file.write('\n')
        print(f"Shard {i+1} written to {shard_file}.")

def main():
    input_file = 'path_to_processed_dataset.jsonl'  # Update with your processed dataset file path
    output_dir = 'sharded_dataset'  # Update with your output directory for shards
    num_shards = 33

    dataset = read_dataset(input_file)
    if dataset:
        shards = shard_dataset(dataset, num_shards)
        write_shards(shards, output_dir)
        print("All shards have been successfully written.")
    else:
        print("No dataset to process.")

if __name__ == "__main__":
    main()

Disclaimer :

🌟Re-format this dataset before use.

🌟Probably there's a big problem with the token count on these long answers 😉

🌟Good Luck ! 🧑🏻‍🚀🚀

Downloads last month
127