File size: 3,850 Bytes
d17ef4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e39ab47
 
 
 
 
d17ef4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#%%
import pyreadr
import pandas as pd
import numpy as np
import sqlite3
import requests
import datasets
import tempfile
import rdata
import json

#%%
sqlite_url = "https://experimenthub.bioconductor.org/metadata/experimenthub.sqlite3"
DATA_URL = "https://bioconductorhubs.blob.core.windows.net/experimenthub/curatedMetagenomicData/"
    
CITATION = """\
Pasolli E, Schiffer L, Manghi P, Renson A, Obenchain V, Truong D, Beghini F, Malik F, Ramos M, Dowd J, Huttenhower C, Morgan M, Segata N, Waldron L (2017). Accessible, curated metagenomic data through ExperimentHub. Nat. Methods, 14 (11), 1023-1024. ISSN 1548-7091, 1548-7105, doi: 10.1038/nmeth.4468.
"""

# %%
# def get_metadata():
#     with tempfile.NamedTemporaryFile(delete=False) as tmpfname:
#         r = requests.get(sqlite_url, allow_redirects=True)
#         open(tmpfname.name, 'wb').write(r.content)

#     db = sqlite3.connect(tmpfname.name)
#     cursor = db.cursor()
#     cur = cursor.execute("""SELECT * FROM resources""")

#     ehid = []
#     descriptions = []
#     for row in cur.fetchall():
#         if "curatedMetagenomicData" in str(row[-1]):
#             ehid.append(row[1])
#             descriptions.append(row[7])
#     return ehid, descriptions

def get_metadata():
    ehids = []
    descriptions = []
    with tempfile.NamedTemporaryFile(delete=False) as tmpfname:
        r = requests.get("https://huggingface.co/datasets/wwydmanski/metagenomic_curated/raw/main/index.tsv", allow_redirects=True)
        open(tmpfname.name, 'wb').write(r.content)

    with open(tmpfname.name, "r") as f:
        for line in f:
            ehid, desc = line.split("\t")
            ehids.append(ehid)
            descriptions.append(desc)
    return ehids, descriptions

# %%
class MetagenomicCurated(datasets.GeneratorBasedBuilder):
    """Metagenomic Curated Data"""

    ehids, descriptions = get_metadata()
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=ehid, 
                               version=datasets.Version("1.0.0"), 
                               description=d.strip())
        for ehid, d in zip(ehids, descriptions)
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=self.config.description,
            citation=CITATION,
            homepage="https://waldronlab.io/curatedMetagenomicData/index.html",
            license="https://www.r-project.org/Licenses/Artistic-2.0",
        )
    
    def _split_generators(self, dl_manager):
        json_url = f"https://experimenthub.bioconductor.org/ehid/{self.config.name}"
        r = requests.get(json_url, allow_redirects=True)
        metadata = json.loads(r.content)
        url = metadata['location_prefix']+metadata['rdatapaths'][0]['rdatapath']

        data_fname: str = dl_manager.download(url)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_fname}),
        ]

    def _generate_examples(self, filepath):
        parsed = rdata.parser.parse_file(filepath)
        converted = rdata.conversion.convert(parsed)
        expressions = list(converted.values())[0].assayData['exprs']
        data_df = expressions.to_pandas().T
        study_name = list(converted.keys())[0].split(".")[0]

        meta = pyreadr.read_r("sampleMetadata.rda")['sampleMetadata']
        metadata = meta.loc[meta['study_name'] == study_name].set_index('sample_id')

        for idx, (i, row) in enumerate(data_df.iterrows()):
            yield idx, {
                "features": row.values,
                "metadata": {i: str(j) for i, j in metadata.loc[i].to_dict().items()}
            }

# %%
if __name__=="__main__":
    ds = datasets.load_dataset("./metagenomic_curated.py", "EH1914")
    X = np.array(ds['train']['features'])
    y = np.array([x['study_condition'] for x in ds['train']['metadata']])

# %%