hash
stringlengths 40
40
| authorName
stringclasses 42
values | authorEmail
stringclasses 41
values | date
timestamp[ms]date 2021-07-26 09:52:55
2025-07-18 10:19:56
| subject
stringlengths 11
116
| diff
stringlengths 0
987k
|
---|---|---|---|---|---|
60f3044c876364a9801923dfbd00360dc8e3f14e
|
Sylvain Lesage
| 2021-08-25T09:05:59 |
docs: ✏️ add install of the letsencrypt certificate
|
diff --git a/INSTALL.md b/INSTALL.md
index d1c1b5d6..95458a5a 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -40 +40 @@ pm2 restart all
-Check if the app is accessible at http://54.158.211.3/healthcheck.
+Check if the app is accessible at https://datasets-preview.huggingface.tech/healthcheck.
@@ -52,0 +53 @@ ipv4 (public): 54.158.211.3
+domain name: datasets-preview.huggingface.tech
@@ -90,0 +92 @@ server {
+ server_name datasets-preview.huggingface.tech;
@@ -112,0 +115,6 @@ sudo systemctl reload nginx
+[Install certbot](https://certbot.eff.org/lets-encrypt/ubuntufocal-nginx) with snap to manage the certificate for the domain name. Email: [email protected].
+
+```bash
+sudo certbot --nginx
+```
+
@@ -129 +137 @@ PORT=8000 pm2 start --name datasets-preview-backend make -- -C /home/hf/datasets
-Check if the app is accessible at http://54.158.211.3/healthcheck.
+Check if the app is accessible at https://datasets-preview.huggingface.tech/healthcheck.
diff --git a/README.md b/README.md
index f08e76dc..665b7b73 100644
--- a/README.md
+++ b/README.md
@@ -53 +53 @@ make watch
-Example: http://54.158.211.3/healthcheck
+Example: https://datasets-preview.huggingface.tech/healthcheck
@@ -67 +67 @@ Responses:
-Example: http://54.158.211.3/info?dataset=glue
+Example: https://datasets-preview.huggingface.tech/info?dataset=glue
@@ -167 +167 @@ Responses:
-Example: http://54.158.211.3/configs?dataset=glue
+Example: https://datasets-preview.huggingface.tech/configs?dataset=glue
@@ -207 +207 @@ Responses:
-Example: http://54.158.211.3/splits?dataset=glue&config=ax
+Example: https://datasets-preview.huggingface.tech/splits?dataset=glue&config=ax
@@ -236 +236 @@ Responses:
-Example: http://54.158.211.3/rows?dataset=glue&config=ax&split=test&rows=2
+Example: https://datasets-preview.huggingface.tech/rows?dataset=glue&config=ax&split=test&rows=2
|
|
452a9b41787e31861a010baf3065ac7c4d84a75d
|
Sylvain Lesage
| 2021-08-25T08:47:06 |
fix: 🐛 fix JSON key
|
diff --git a/benchmark/scripts/get_rows_report.py b/benchmark/scripts/get_rows_report.py
index ec81b68e..5094032e 100644
--- a/benchmark/scripts/get_rows_report.py
+++ b/benchmark/scripts/get_rows_report.py
@@ -25 +25 @@ def get_rows_report(dataset: str, config: str, split: str):
- "row_length": len(rows),
+ "rows_length": len(rows),
|
|
37a3f9d39f0449b9c10e2c61ec00f9e400f86fa1
|
Sylvain Lesage
| 2021-08-23T14:41:52 |
fix: 🐛 fix the COMPILE_REPORT target
|
diff --git a/Makefile b/Makefile
index cc097389..b4f68d90 100644
--- a/Makefile
+++ b/Makefile
@@ -31 +31,3 @@ style:
-# The result is benchmark/tmp/report.json
+# beware: even if all the data should theoritically be streamed, the ~/.cache/huggingface directory
+# will grow about 25G!
+# The result is benchmark/tmp/report.json (about 40M)
@@ -34 +35,0 @@ benchmark:
-
diff --git a/benchmark/Makefile b/benchmark/Makefile
index 20fd1722..09c97b62 100644
--- a/benchmark/Makefile
+++ b/benchmark/Makefile
@@ -4,0 +5 @@ tmpDir = ./tmp/
+scriptsDir = ./scripts/
@@ -20 +21 @@ $(report_filename): $(get_info_reports_filename) $(get_configs_reports_filename)
- poetry run python $(MERGE_REPORTS) $(get_info_reports_filename) $(get_configs_reports_filename) $(get_splits_reports_filename) $(get_rows_reports_filename) $@
+ poetry run python $(COMPILE_REPORT) $(get_info_reports_filename) $(get_configs_reports_filename) $(get_splits_reports_filename) $(get_rows_reports_filename) $@
|
|
509994681a0be58182738b1471368773a0cf9340
|
Sylvain Lesage
| 2021-08-23T13:19:59 |
docs: ✏️ add comment about "make benchmark"
|
diff --git a/Makefile b/Makefile
index 1e018e1a..cc097389 100644
--- a/Makefile
+++ b/Makefile
@@ -28,0 +29,3 @@ style:
+# Get a report for every dataset / config / split of the Hub, for every endpoint
+# It takes 1 or 2 hours to run. Delete benchmark/tmp to run from scratch.
+# The result is benchmark/tmp/report.json
|
|
925ed9f6a3454789e21ce7e657f7791411a04159
|
Sylvain Lesage
| 2021-08-23T13:14:48 |
feat: 🎸 shuffle the processes
|
diff --git a/benchmark/serialized_config_names/Makefile b/benchmark/serialized_config_names/Makefile
index 355dccbe..01f1aa29 100644
--- a/benchmark/serialized_config_names/Makefile
+++ b/benchmark/serialized_config_names/Makefile
@@ -5,0 +6 @@ scriptsDir = ../scripts/
+sorted_serialized_config_names_filename = $(addprefix $(tmpDir), sorted_serialized_config_names.txt)
@@ -13,0 +15 @@ all: $(serialized_config_names_filename)
+# the names are shuffled to avoid processing configs from the same dataset together wince the data server might ban us
@@ -16 +18,2 @@ $(serialized_config_names_filename): $(get_configs_reports_filename)
- poetry run python $(GET_SERIALIZED_CONFIG_NAMES) $(get_configs_reports_filename) $(serialized_config_names_filename)
+ poetry run python $(GET_SERIALIZED_CONFIG_NAMES) $(get_configs_reports_filename) $(sorted_serialized_config_names_filename)
+ shuf $(sorted_serialized_config_names_filename) -o $(serialized_config_names_filename)
@@ -19 +22 @@ clean:
- rm -rf $(serialized_config_names_filename)
+ rm -rf $(serialized_config_names_filename) $(sorted_serialized_config_names_filename)
diff --git a/benchmark/serialized_dataset_names/Makefile b/benchmark/serialized_dataset_names/Makefile
index f898d3f3..486246be 100644
--- a/benchmark/serialized_dataset_names/Makefile
+++ b/benchmark/serialized_dataset_names/Makefile
@@ -5,0 +6 @@ scriptsDir = ../scripts/
+sorted_serialized_dataset_names_filename = $(addprefix $(tmpDir), sorted_serialized_dataset_names.txt)
@@ -15 +16,2 @@ $(serialized_dataset_names_filename):
- poetry run python $(GET_SERIALIZED_DATASET_NAMES) $(serialized_dataset_names_filename)
+ poetry run python $(GET_SERIALIZED_DATASET_NAMES) $(sorted_serialized_dataset_names_filename)
+ shuf $(sorted_serialized_dataset_names_filename) -o $(serialized_dataset_names_filename)
@@ -18 +20 @@ clean:
- rm -rf $(serialized_dataset_names_filename)
+ rm -rf $(serialized_dataset_names_filename) $(sorted_serialized_dataset_names_filename)
diff --git a/benchmark/serialized_split_names/Makefile b/benchmark/serialized_split_names/Makefile
index 637db06c..35a13fb1 100644
--- a/benchmark/serialized_split_names/Makefile
+++ b/benchmark/serialized_split_names/Makefile
@@ -5,0 +6 @@ scriptsDir = ../scripts/
+sorted_serialized_split_names_filename = $(addprefix $(tmpDir), sorted_serialized_split_names.txt)
@@ -13,0 +15 @@ all: $(serialized_split_names_filename)
+# the names are shuffled to avoid processing splits from the same dataset together wince the data server might ban us
@@ -16 +18,2 @@ $(serialized_split_names_filename): $(get_splits_reports_filename)
- poetry run python $(GET_SERIALIZED_SPLIT_NAMES) $(get_splits_reports_filename) $(serialized_split_names_filename)
+ poetry run python $(GET_SERIALIZED_SPLIT_NAMES) $(get_splits_reports_filename) $(sorted_serialized_split_names_filename)
+ shuf $(sorted_serialized_split_names_filename) -o $(serialized_split_names_filename)
@@ -19 +22 @@ clean:
- rm -rf $(serialized_split_names_filename)
+ rm -rf $(serialized_split_names_filename) $(sorted_serialized_split_names_filename)
|
|
14361c9188338a86d54fb99cdc1a23dc85f0e2f4
|
Sylvain Lesage
| 2021-08-23T12:53:12 |
feat: 🎸 use all the available CPUs for the benchmark
|
diff --git a/Makefile b/Makefile
index a534ee4b..1e018e1a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,0 +3,5 @@ PORT ?= 8000
+# monitor the load with htop and adapt the value (load should be a bit less than the number of processors (check with "$ nproc"))
+# this will launch as much processes as possible under the limit of load=MAX_LOAD
+MAX_LOAD = 7
+PARALLEL = -j -l $(MAX_LOAD)
+
@@ -25 +30 @@ benchmark:
- $(MAKE) -C benchmark
+ $(MAKE) -C benchmark $(PARALLEL)
diff --git a/benchmark/Makefile b/benchmark/Makefile
index dda61e59..20fd1722 100644
--- a/benchmark/Makefile
+++ b/benchmark/Makefile
@@ -0,0 +1 @@
+# Launch with "make -j -l 7" to enable parallel execution
@@ -22 +23 @@ $(get_rows_reports_filename): $(serialized_split_names_filename)
- $(MAKE) -C get_rows_reports -j 5
+ $(MAKE) -C get_rows_reports
@@ -28 +29 @@ $(get_splits_reports_filename): $(serialized_config_names_filename)
- $(MAKE) -C get_splits_reports -j 5
+ $(MAKE) -C get_splits_reports
@@ -34 +35 @@ $(get_configs_reports_filename): $(serialized_dataset_names_filename)
- $(MAKE) -C get_configs_reports -j 5
+ $(MAKE) -C get_configs_reports
@@ -37 +38 @@ $(get_info_reports_filename): $(serialized_dataset_names_filename)
- $(MAKE) -C get_info_reports -j 5
+ $(MAKE) -C get_info_reports
|
|
81bf77b6e6e09d32db8e9b2d9916624de98ea9f7
|
Sylvain Lesage
| 2021-08-23T12:25:25 |
style: 💄 apply make style
|
diff --git a/benchmark/scripts/compile_report.py b/benchmark/scripts/compile_report.py
index 9a905655..0530fcf5 100644
--- a/benchmark/scripts/compile_report.py
+++ b/benchmark/scripts/compile_report.py
@@ -2 +1,0 @@ import json
-
@@ -3,0 +3 @@ import time
+
diff --git a/benchmark/scripts/merge_reports.py b/benchmark/scripts/merge_reports.py
index 7d9fa53d..317653eb 100644
--- a/benchmark/scripts/merge_reports.py
+++ b/benchmark/scripts/merge_reports.py
@@ -2 +1,0 @@ import json
-import typer
@@ -4,0 +4,2 @@ import os
+import typer
+
|
|
26e96261c8d7c616357fff8a1fa58998ce5cb5b7
|
Sylvain Lesage
| 2021-08-23T12:21:05 |
refactor: 💡 compile final report
|
diff --git a/benchmark/Makefile b/benchmark/Makefile
index f29f1073..dda61e59 100644
--- a/benchmark/Makefile
+++ b/benchmark/Makefile
@@ -14 +14 @@ report_filename = $(addprefix $(tmpDir), report.json)
-all: $(report_filename)
+COMPILE_REPORT = $(addprefix $(scriptsDir), compile_report.py)
@@ -16,4 +16 @@ all: $(report_filename)
-$(report_filename): $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_reports_filename) $(get_rows_reports_filename)
-# TODO -> generate the report from the files
-# { info: <$info_filename, configs: <$configs_filename, ... }
-# jq -s '.' $^ > $@
+all: $(report_filename)
@@ -21,2 +18,2 @@ $(report_filename): $(get_configs_reports_filename) $(get_info_reports_filename)
-# TODO: add splits and rows
-# we need to generate the list of pairs (dataset, config), and of tuples (dataset, config, split), one per line, in order to allow parallelization
+$(report_filename): $(get_info_reports_filename) $(get_configs_reports_filename) $(get_splits_reports_filename) $(get_rows_reports_filename)
+ poetry run python $(MERGE_REPORTS) $(get_info_reports_filename) $(get_configs_reports_filename) $(get_splits_reports_filename) $(get_rows_reports_filename) $@
diff --git a/benchmark/get_configs_reports/Makefile b/benchmark/get_configs_reports/Makefile
index bd74a265..af0a4240 100644
--- a/benchmark/get_configs_reports/Makefile
+++ b/benchmark/get_configs_reports/Makefile
@@ -12,0 +13 @@ GET_CONFIGS_REPORT = $(addprefix $(scriptsDir), get_configs_report.py)
+MERGE_REPORTS = $(addprefix $(scriptsDir), merge_reports.py)
@@ -16,0 +18 @@ all: $(merged_report_filename)
+
@@ -18 +20 @@ $(merged_report_filename): $(individual_report_filenames)
- jq -s '.' $^ > $@
+ poetry run python $(MERGE_REPORTS) $(serialized_dataset_names_filename) $(reportsDir) $@
diff --git a/benchmark/get_info_reports/Makefile b/benchmark/get_info_reports/Makefile
index 3a16b9ee..b250d37f 100644
--- a/benchmark/get_info_reports/Makefile
+++ b/benchmark/get_info_reports/Makefile
@@ -12,0 +13 @@ GET_INFO_REPORT = $(addprefix $(scriptsDir), get_info_report.py)
+MERGE_REPORTS = $(addprefix $(scriptsDir), merge_reports.py)
@@ -18 +19 @@ $(merged_report_filename): $(individual_report_filenames)
- jq -s '.' $^ > $@
+ poetry run python $(MERGE_REPORTS) $(serialized_dataset_names_filename) $(reportsDir) $@
diff --git a/benchmark/get_rows_reports/Makefile b/benchmark/get_rows_reports/Makefile
index 8c774825..dbef43ce 100644
--- a/benchmark/get_rows_reports/Makefile
+++ b/benchmark/get_rows_reports/Makefile
@@ -12,0 +13 @@ GET_ROWS_REPORT = $(addprefix $(scriptsDir), get_rows_report.py)
+MERGE_REPORTS = $(addprefix $(scriptsDir), merge_reports.py)
@@ -18 +19 @@ $(merged_report_filename): $(individual_report_filenames)
- jq -s '.' $^ > $@
+ poetry run python $(MERGE_REPORTS) $(serialized_split_names_filename) $(reportsDir) $@
diff --git a/benchmark/get_splits_reports/Makefile b/benchmark/get_splits_reports/Makefile
index 729a7745..046e14ac 100644
--- a/benchmark/get_splits_reports/Makefile
+++ b/benchmark/get_splits_reports/Makefile
@@ -12,0 +13 @@ GET_SPLITS_REPORT = $(addprefix $(scriptsDir), get_splits_report.py)
+MERGE_REPORTS = $(addprefix $(scriptsDir), merge_reports.py)
@@ -18 +19 @@ $(merged_report_filename): $(individual_report_filenames)
- jq -s '.' $^ > $@
+ poetry run python $(MERGE_REPORTS) $(serialized_config_names_filename) $(reportsDir) $@
diff --git a/benchmark/scripts/compile_report.py b/benchmark/scripts/compile_report.py
new file mode 100644
index 00000000..9a905655
--- /dev/null
+++ b/benchmark/scripts/compile_report.py
@@ -0,0 +1,35 @@
+import json
+
+import time
+import typer
+
+
+def main(
+ get_info_reports_filename: str,
+ get_configs_reports_filename: str,
+ get_splits_reports_filename: str,
+ get_rows_reports_filename: str,
+ output: str,
+):
+ with open(get_info_reports_filename) as f:
+ get_info_reports = json.load(f)
+ with open(get_configs_reports_filename) as f:
+ get_configs_reports = json.load(f)
+ with open(get_splits_reports_filename) as f:
+ get_splits_reports = json.load(f)
+ with open(get_rows_reports_filename) as f:
+ get_rows_reports = json.load(f)
+ time_string = time.strftime("%Y%m%d-%H%M%S")
+ report = {
+ "info_reports": get_info_reports,
+ "configs_reports": get_configs_reports,
+ "splits_reports": get_splits_reports,
+ "rows_reports": get_rows_reports,
+ "created_at": time_string,
+ }
+ with open(output, "w") as f:
+ json.dump(report, f)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/merge_reports.py b/benchmark/scripts/merge_reports.py
new file mode 100644
index 00000000..7d9fa53d
--- /dev/null
+++ b/benchmark/scripts/merge_reports.py
@@ -0,0 +1,23 @@
+import json
+import typer
+import os
+
+
+def main(
+ basenames_filename: str,
+ reports_dir: str,
+ output: str,
+):
+ reports = []
+ with open(basenames_filename) as f:
+ basenames = f.read().splitlines()
+ for basename in basenames:
+ filename = os.path.join(reports_dir, basename + ".json")
+ with open(filename) as f2:
+ reports.append(json.load(f2))
+ with open(output, "w") as f:
+ json.dump(reports, f)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/serialize.py b/benchmark/scripts/serialize.py
index 56fef386..450d040b 100644
--- a/benchmark/scripts/serialize.py
+++ b/benchmark/scripts/serialize.py
@@ -3 +3,4 @@ from typing import Tuple, Union
-SLASH_SEPARATOR = "___SLASH___"
+SLASH = "___SLASH___"
+SPACE = "___SPACE___"
+PAR_OPEN = "___PAR_OPEN___"
+PAR_CLOSE = "___PAR_CLOSE___"
@@ -10 +13 @@ def serialize_dataset_name(dataset: str) -> str:
- return dataset.replace("/", SLASH_SEPARATOR)
+ return dataset.replace("/", SLASH)
@@ -14 +17 @@ def deserialize_dataset_name(serialized_dataset: str) -> str:
- return serialized_dataset.replace(SLASH_SEPARATOR, "/")
+ return serialized_dataset.replace(SLASH, "/")
@@ -19 +22,3 @@ def serialize_config_name(dataset: str, config: Union[str, None]) -> str:
- return serialize_dataset_name(dataset) + CONFIG_SEPARATOR + c
+ # due to config named "(China)", "bbc hindi nli"
+ safe_config = c.replace("(", PAR_OPEN).replace(")", PAR_CLOSE).replace(" ", SPACE)
+ return serialize_dataset_name(dataset) + CONFIG_SEPARATOR + safe_config
@@ -24 +29,2 @@ def deserialize_config_name(serialized_config: str) -> Tuple[str, Union[str, Non
- config = None if safe_config == CONFIG_NONE else safe_config
+ c = safe_config.replace(PAR_OPEN, "(").replace(PAR_CLOSE, ")").replace(SPACE, " ")
+ config = None if c == CONFIG_NONE else c
@@ -30 +36,2 @@ def serialize_split_name(dataset: str, config: Union[str, None], split: str) ->
- return serialize_config_name(dataset, config) + SPLIT_SEPARATOR + split
+ safe_split = split
+ return serialize_config_name(dataset, config) + SPLIT_SEPARATOR + safe_split
@@ -34 +41,2 @@ def deserialize_split_name(serialized_split: str) -> Tuple[str, Union[str, None]
- serialized_config, _, split = serialized_split.partition(SPLIT_SEPARATOR)
+ serialized_config, _, safe_split = serialized_split.partition(SPLIT_SEPARATOR)
+ split = safe_split.replace(PAR_OPEN, "(").replace(PAR_CLOSE, ")")
|
|
f19a8386b8fa928edbf44ce19ac3b31d4f6eeaa3
|
Sylvain Lesage
| 2021-08-23T09:34:50 |
refactor: 💡 add get rows report to makefiles
|
diff --git a/benchmark/Makefile b/benchmark/Makefile
index 4163f0a1..f29f1073 100644
--- a/benchmark/Makefile
+++ b/benchmark/Makefile
@@ -10,0 +11 @@ get_splits_reports_filename = $(addprefix $(tmpDir), get_splits_reports.json)
+get_rows_reports_filename = $(addprefix $(tmpDir), get_rows_reports.json)
@@ -12,0 +14 @@ report_filename = $(addprefix $(tmpDir), report.json)
+all: $(report_filename)
@@ -14 +16 @@ report_filename = $(addprefix $(tmpDir), report.json)
-all: $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_reports_filename) $(serialized_split_names_filename)
+$(report_filename): $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_reports_filename) $(get_rows_reports_filename)
@@ -21,0 +24,3 @@ all: $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_r
+$(get_rows_reports_filename): $(serialized_split_names_filename)
+ $(MAKE) -C get_rows_reports -j 5
+
@@ -40,0 +46 @@ clean:
+ $(MAKE) -C get_rows_reports/ clean
diff --git a/benchmark/get_configs_reports/Makefile b/benchmark/get_configs_reports/Makefile
index e2ccd004..bd74a265 100644
--- a/benchmark/get_configs_reports/Makefile
+++ b/benchmark/get_configs_reports/Makefile
@@ -4 +4 @@ tmpDir = ../tmp/
-configsDir = $(addprefix $(tmpDir), get_configs_reports/)
+reportsDir = $(addprefix $(tmpDir), get_configs_reports/)
@@ -9 +9 @@ serialized_dataset_names = $(file < $(serialized_dataset_names_filename))
-individual_report_filenames = $(addprefix $(configsDir), $(addsuffix .json, $(serialized_dataset_names)))
+individual_report_filenames = $(addprefix $(reportsDir), $(addsuffix .json, $(serialized_dataset_names)))
@@ -23 +23 @@ $(merged_report_filename): $(individual_report_filenames)
- @mkdir -p $(configsDir)
+ @mkdir -p $(reportsDir)
@@ -28 +28 @@ clean:
- rm -rf $(configsDir)
+ rm -rf $(reportsDir)
diff --git a/benchmark/get_info_reports/Makefile b/benchmark/get_info_reports/Makefile
index b673029e..3a16b9ee 100644
--- a/benchmark/get_info_reports/Makefile
+++ b/benchmark/get_info_reports/Makefile
@@ -4 +4 @@ tmpDir = ../tmp/
-infoDir = $(addprefix $(tmpDir), get_info_reports/)
+reportsDir = $(addprefix $(tmpDir), get_info_reports/)
@@ -9 +9 @@ serialized_dataset_names = $(file < $(serialized_dataset_names_filename))
-individual_report_filenames = $(addprefix $(infoDir), $(addsuffix .json, $(serialized_dataset_names)))
+individual_report_filenames = $(addprefix $(reportsDir), $(addsuffix .json, $(serialized_dataset_names)))
@@ -23 +23 @@ $(merged_report_filename): $(individual_report_filenames)
- @mkdir -p $(infoDir)
+ @mkdir -p $(reportsDir)
@@ -28 +28 @@ clean:
- rm -rf $(infoDir)
+ rm -rf $(reportsDir)
diff --git a/benchmark/get_rows_reports/Makefile b/benchmark/get_rows_reports/Makefile
new file mode 100644
index 00000000..8c774825
--- /dev/null
+++ b/benchmark/get_rows_reports/Makefile
@@ -0,0 +1,28 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+reportsDir = $(addprefix $(tmpDir), get_rows_reports/)
+scriptsDir = ../scripts/
+
+serialized_split_names_filename = $(addprefix $(tmpDir), serialized_split_names.txt)
+serialized_split_names = $(file < $(serialized_split_names_filename))
+individual_report_filenames = $(addprefix $(reportsDir), $(addsuffix .json, $(serialized_split_names)))
+merged_report_filename = $(addprefix $(tmpDir), get_rows_reports.json)
+
+GET_ROWS_REPORT = $(addprefix $(scriptsDir), get_rows_report.py)
+
+all: $(merged_report_filename)
+
+# merge all the reports in one JSON
+$(merged_report_filename): $(individual_report_filenames)
+ jq -s '.' $^ > $@
+
+# generate a report for every split (get_rows_report.py) -> list of rows, and potential exceptions
+# this is $(individual_report_filenames)
+../tmp/get_rows_reports/%.json:
+ @mkdir -p $(reportsDir)
+ poetry run python $(GET_ROWS_REPORT) $* $@
+
+clean:
+ rm -rf $(merged_report_filename)
+ rm -rf $(reportsDir)
diff --git a/benchmark/get_splits_reports/Makefile b/benchmark/get_splits_reports/Makefile
index 127c7644..729a7745 100644
--- a/benchmark/get_splits_reports/Makefile
+++ b/benchmark/get_splits_reports/Makefile
@@ -4 +4 @@ tmpDir = ../tmp/
-splitsDir = $(addprefix $(tmpDir), get_splits_reports/)
+reportsDir = $(addprefix $(tmpDir), get_splits_reports/)
@@ -9 +9 @@ serialized_config_names = $(file < $(serialized_config_names_filename))
-individual_report_filenames = $(addprefix $(splitsDir), $(addsuffix .json, $(serialized_config_names)))
+individual_report_filenames = $(addprefix $(reportsDir), $(addsuffix .json, $(serialized_config_names)))
@@ -23 +23 @@ $(merged_report_filename): $(individual_report_filenames)
- @mkdir -p $(splitsDir)
+ @mkdir -p $(reportsDir)
@@ -28 +28 @@ clean:
- rm -rf $(splitsDir)
+ rm -rf $(reportsDir)
diff --git a/benchmark/scripts/get_rows_report.py b/benchmark/scripts/get_rows_report.py
new file mode 100644
index 00000000..ec81b68e
--- /dev/null
+++ b/benchmark/scripts/get_rows_report.py
@@ -0,0 +1,55 @@
+import json
+import logging
+import time
+
+import typer
+from datasets import disable_progress_bar
+from serialize import deserialize_split_name
+
+from datasets_preview_backend.queries.rows import extract_rows
+
+# remove any logs
+logging.disable(logging.CRITICAL)
+disable_progress_bar()
+
+
+def get_rows_report(dataset: str, config: str, split: str):
+ num_rows = 10
+ try:
+ t = time.process_time()
+ rows = extract_rows(dataset, config, split, num_rows)["rows"]
+ return {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ "row_length": len(rows),
+ "success": True,
+ "exception": None,
+ "message": None,
+ "cause": None,
+ "cause_message": None,
+ "elapsed_seconds": time.process_time() - t,
+ }
+ except Exception as err:
+ return {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ "success": False,
+ "exception": type(err).__name__,
+ "message": str(err),
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
+ "elapsed_seconds": time.process_time() - t,
+ }
+
+
+def main(serialized_split_name: str, filename: str):
+ dataset, config, split = deserialize_split_name(serialized_split_name)
+ report = get_rows_report(dataset, config, split)
+ with open(filename, "w") as f:
+ json.dump(report, f)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/get_splits_report.py b/benchmark/scripts/get_splits_report.py
index 5138a143..3852fab7 100644
--- a/benchmark/scripts/get_splits_report.py
+++ b/benchmark/scripts/get_splits_report.py
@@ -47 +47 @@ def main(serialized_config_name: str, filename: str):
- splits = get_splits_report(dataset, config)
+ report = get_splits_report(dataset, config)
@@ -49 +49 @@ def main(serialized_config_name: str, filename: str):
- json.dump(splits, f)
+ json.dump(report, f)
|
|
42fa524344f93cc8a1f1580ebcd4cd437c26ca56
|
Sylvain Lesage
| 2021-08-23T09:24:52 |
refactor: 💡 add list of serialized split names
|
diff --git a/benchmark/Makefile b/benchmark/Makefile
index 4b214fd5..4163f0a1 100644
--- a/benchmark/Makefile
+++ b/benchmark/Makefile
@@ -6,0 +7 @@ serialized_config_names_filename = $(addprefix $(tmpDir), serialized_config_name
+serialized_split_names_filename = $(addprefix $(tmpDir), serialized_split_names.txt)
@@ -8,0 +10 @@ get_configs_reports_filename = $(addprefix $(tmpDir), get_configs_reports.json)
+get_splits_reports_filename = $(addprefix $(tmpDir), get_splits_reports.json)
@@ -12 +14 @@ report_filename = $(addprefix $(tmpDir), report.json)
-all: $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_reports_filename)
+all: $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_reports_filename) $(serialized_split_names_filename)
@@ -19,0 +22,6 @@ all: $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_r
+$(serialized_split_names_filename): $(get_splits_reports_filename)
+ $(MAKE) -C serialized_split_names
+
+$(get_splits_reports_filename): $(serialized_config_names_filename)
+ $(MAKE) -C get_splits_reports -j 5
+
@@ -32,0 +41,2 @@ clean:
+ $(MAKE) -C serialized_split_names/ clean
+ $(MAKE) -C get_splits_reports/ clean
diff --git a/benchmark/scripts/get_serialized_dataset_names.py b/benchmark/scripts/get_serialized_dataset_names.py
index 399a3b4c..405ae284 100644
--- a/benchmark/scripts/get_serialized_dataset_names.py
+++ b/benchmark/scripts/get_serialized_dataset_names.py
@@ -9 +8,0 @@ from serialize import serialize_dataset_name
-
diff --git a/benchmark/scripts/get_serialized_split_names.py b/benchmark/scripts/get_serialized_split_names.py
new file mode 100644
index 00000000..869b1649
--- /dev/null
+++ b/benchmark/scripts/get_serialized_split_names.py
@@ -0,0 +1,25 @@
+import json
+
+import typer
+from serialize import serialize_split_name
+
+
+def main(get_splits_reports_filename: str, output: str):
+ with open(get_splits_reports_filename) as f:
+ get_splits_reports = json.load(f)
+
+ # replace '/' in namespaced dataset names
+ serialized_split_names = []
+ for get_splits_report in get_splits_reports:
+ dataset = get_splits_report["dataset"]
+ config = get_splits_report["config"]
+ for split in get_splits_report["splits"]:
+ serialized_split_names.append(serialize_split_name(dataset, config, split))
+
+ with open(output, "w") as f:
+ for serialized_split_name in serialized_split_names:
+ f.write("%s\n" % serialized_split_name)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/serialize.py b/benchmark/scripts/serialize.py
index f9a3cfc5..56fef386 100644
--- a/benchmark/scripts/serialize.py
+++ b/benchmark/scripts/serialize.py
@@ -5,0 +6 @@ CONFIG_NONE = "___NONE_CONFIG___"
+SPLIT_SEPARATOR = "___SPLIT___"
@@ -25,0 +27,10 @@ def deserialize_config_name(serialized_config: str) -> Tuple[str, Union[str, Non
+
+
+def serialize_split_name(dataset: str, config: Union[str, None], split: str) -> str:
+ return serialize_config_name(dataset, config) + SPLIT_SEPARATOR + split
+
+
+def deserialize_split_name(serialized_split: str) -> Tuple[str, Union[str, None], str]:
+ serialized_config, _, split = serialized_split.partition(SPLIT_SEPARATOR)
+ dataset, config = deserialize_config_name(serialized_config)
+ return dataset, config, split
diff --git a/benchmark/serialized_config_names/Makefile b/benchmark/serialized_config_names/Makefile
index 7797925d..355dccbe 100644
--- a/benchmark/serialized_config_names/Makefile
+++ b/benchmark/serialized_config_names/Makefile
@@ -13 +13 @@ all: $(serialized_config_names_filename)
-# get the list of serialized dataset names (with '/' replaced with "___SLASH___")
+# get the list of serialized config names
diff --git a/benchmark/serialized_split_names/Makefile b/benchmark/serialized_split_names/Makefile
new file mode 100644
index 00000000..637db06c
--- /dev/null
+++ b/benchmark/serialized_split_names/Makefile
@@ -0,0 +1,19 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+scriptsDir = ../scripts/
+
+serialized_split_names_filename = $(addprefix $(tmpDir), serialized_split_names.txt)
+get_splits_reports_filename = $(addprefix $(tmpDir), get_splits_reports.json)
+
+GET_SERIALIZED_SPLIT_NAMES = $(addprefix $(scriptsDir), get_serialized_split_names.py)
+
+all: $(serialized_split_names_filename)
+
+# get the list of serialized split names
+$(serialized_split_names_filename): $(get_splits_reports_filename)
+ @mkdir -p $(tmpDir)
+ poetry run python $(GET_SERIALIZED_SPLIT_NAMES) $(get_splits_reports_filename) $(serialized_split_names_filename)
+
+clean:
+ rm -rf $(serialized_split_names_filename)
|
|
94d257938a719c991e4d974828eaa3bf9d0b5d13
|
Sylvain Lesage
| 2021-08-23T09:14:06 |
refactor: 💡 add get_splits reports in makefiles
|
diff --git a/benchmark/Makefile b/benchmark/Makefile
index b534ccf0..4b214fd5 100644
--- a/benchmark/Makefile
+++ b/benchmark/Makefile
@@ -11 +10,0 @@ report_filename = $(addprefix $(tmpDir), report.json)
-all: $(report_filename) $(serialized_config_names_filename)
@@ -13 +12 @@ all: $(report_filename) $(serialized_config_names_filename)
-$(report_filename): $(get_configs_report_filename) $(get_info_report_filename)
+all: $(get_configs_reports_filename) $(get_info_reports_filename) $(get_splits_reports_filename)
diff --git a/benchmark/get_splits_reports/Makefile b/benchmark/get_splits_reports/Makefile
new file mode 100644
index 00000000..127c7644
--- /dev/null
+++ b/benchmark/get_splits_reports/Makefile
@@ -0,0 +1,28 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+splitsDir = $(addprefix $(tmpDir), get_splits_reports/)
+scriptsDir = ../scripts/
+
+serialized_config_names_filename = $(addprefix $(tmpDir), serialized_config_names.txt)
+serialized_config_names = $(file < $(serialized_config_names_filename))
+individual_report_filenames = $(addprefix $(splitsDir), $(addsuffix .json, $(serialized_config_names)))
+merged_report_filename = $(addprefix $(tmpDir), get_splits_reports.json)
+
+GET_SPLITS_REPORT = $(addprefix $(scriptsDir), get_splits_report.py)
+
+all: $(merged_report_filename)
+
+# merge all the reports in one JSON
+$(merged_report_filename): $(individual_report_filenames)
+ jq -s '.' $^ > $@
+
+# generate a report for every config (get_splits_report.py) -> list of splits, and potential exceptions
+# this is $(individual_report_filenames)
+../tmp/get_splits_reports/%.json:
+ @mkdir -p $(splitsDir)
+ poetry run python $(GET_SPLITS_REPORT) $* $@
+
+clean:
+ rm -rf $(merged_report_filename)
+ rm -rf $(splitsDir)
diff --git a/benchmark/scripts/get_configs_report.py b/benchmark/scripts/get_configs_report.py
index 7c04ff79..3dace60e 100644
--- a/benchmark/scripts/get_configs_report.py
+++ b/benchmark/scripts/get_configs_report.py
@@ -44 +44 @@ def main(serialized_dataset_name: str, filename: str):
- info = get_configs_report(deserialize_dataset_name(serialized_dataset_name))
+ report = get_configs_report(deserialize_dataset_name(serialized_dataset_name))
@@ -46 +46 @@ def main(serialized_dataset_name: str, filename: str):
- json.dump(info, f)
+ json.dump(report, f)
diff --git a/benchmark/scripts/get_info_report.py b/benchmark/scripts/get_info_report.py
index 52fc263d..7ddedf52 100644
--- a/benchmark/scripts/get_info_report.py
+++ b/benchmark/scripts/get_info_report.py
@@ -6,0 +7 @@ from datasets import disable_progress_bar
+from serialize import deserialize_dataset_name
@@ -42,10 +43,2 @@ def get_info_report(dataset: str):
-def to_safe(str):
- return str.replace("/", "___SLASH___")
-
-
-def to_unsafe(str):
- return str.replace("___SLASH___", "/")
-
-
-def main(safe_dataset: str, filename: str):
- info = get_info_report(to_unsafe(safe_dataset))
+def main(serialized_dataset_name: str, filename: str):
+ report = get_info_report(deserialize_dataset_name(serialized_dataset_name))
@@ -53 +46 @@ def main(safe_dataset: str, filename: str):
- json.dump(info, f)
+ json.dump(report, f)
diff --git a/benchmark/scripts/get_splits_report.py b/benchmark/scripts/get_splits_report.py
new file mode 100644
index 00000000..5138a143
--- /dev/null
+++ b/benchmark/scripts/get_splits_report.py
@@ -0,0 +1,53 @@
+import json
+import logging
+import time
+
+import typer
+from datasets import disable_progress_bar
+from serialize import deserialize_config_name
+
+from datasets_preview_backend.queries.splits import get_splits
+
+# remove any logs
+logging.disable(logging.CRITICAL)
+disable_progress_bar()
+
+
+def get_splits_report(dataset: str, config: str):
+ try:
+ t = time.process_time()
+ splits = get_splits(dataset, config)["splits"]
+ return {
+ "dataset": dataset,
+ "config": config,
+ "splits": list(splits),
+ "success": True,
+ "exception": None,
+ "message": None,
+ "cause": None,
+ "cause_message": None,
+ "elapsed_seconds": time.process_time() - t,
+ }
+ except Exception as err:
+ return {
+ "dataset": dataset,
+ "config": config,
+ "splits": [],
+ "success": False,
+ "exception": type(err).__name__,
+ "message": str(err),
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
+ "elapsed_seconds": time.process_time() - t,
+ }
+
+
+def main(serialized_config_name: str, filename: str):
+ dataset, config = deserialize_config_name(serialized_config_name)
+ splits = get_splits_report(dataset, config)
+ with open(filename, "w") as f:
+ json.dump(splits, f)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/serialize.py b/benchmark/scripts/serialize.py
index 5af10346..f9a3cfc5 100644
--- a/benchmark/scripts/serialize.py
+++ b/benchmark/scripts/serialize.py
@@ -1 +1 @@
-from typing import Union
+from typing import Tuple, Union
@@ -12,2 +12,2 @@ def serialize_dataset_name(dataset: str) -> str:
-def deserialize_dataset_name(dataset: str) -> str:
- return dataset.replace(SLASH_SEPARATOR, "/")
+def deserialize_dataset_name(serialized_dataset: str) -> str:
+ return serialized_dataset.replace(SLASH_SEPARATOR, "/")
@@ -18,0 +19,7 @@ def serialize_config_name(dataset: str, config: Union[str, None]) -> str:
+
+
+def deserialize_config_name(serialized_config: str) -> Tuple[str, Union[str, None]]:
+ serialized_dataset, _, safe_config = serialized_config.partition(CONFIG_SEPARATOR)
+ config = None if safe_config == CONFIG_NONE else safe_config
+ dataset = deserialize_dataset_name(serialized_dataset)
+ return dataset, config
|
|
0f0d096a807e5ea6006511741fe91163eb906177
|
Sylvain Lesage
| 2021-08-23T08:49:33 |
refactor: 💡 replace test_datasets.py with makefiles
|
diff --git a/.gitignore b/.gitignore
index e2cdb79d..98a3b28d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,0 +11,2 @@ poetry.toml
+
+benchmark/tmp/
diff --git a/Makefile b/Makefile
index f8ffe6ab..a534ee4b 100644
--- a/Makefile
+++ b/Makefile
@@ -25 +25,2 @@ benchmark:
- poetry run python benchmark/test_datasets.py
+ $(MAKE) -C benchmark
+
diff --git a/benchmark/Makefile b/benchmark/Makefile
new file mode 100644
index 00000000..b534ccf0
--- /dev/null
+++ b/benchmark/Makefile
@@ -0,0 +1,37 @@
+.PHONY: clean all
+
+tmpDir = ./tmp/
+
+serialized_dataset_names_filename = $(addprefix $(tmpDir), serialized_dataset_names.txt)
+serialized_config_names_filename = $(addprefix $(tmpDir), serialized_config_names.txt)
+get_info_reports_filename = $(addprefix $(tmpDir), get_info_reports.json)
+get_configs_reports_filename = $(addprefix $(tmpDir), get_configs_reports.json)
+report_filename = $(addprefix $(tmpDir), report.json)
+
+all: $(report_filename) $(serialized_config_names_filename)
+
+$(report_filename): $(get_configs_report_filename) $(get_info_report_filename)
+# TODO -> generate the report from the files
+# { info: <$info_filename, configs: <$configs_filename, ... }
+# jq -s '.' $^ > $@
+
+# TODO: add splits and rows
+# we need to generate the list of pairs (dataset, config), and of tuples (dataset, config, split), one per line, in order to allow parallelization
+
+$(serialized_config_names_filename): $(get_configs_reports_filename)
+ $(MAKE) -C serialized_config_names
+
+$(get_configs_reports_filename): $(serialized_dataset_names_filename)
+ $(MAKE) -C get_configs_reports -j 5
+
+$(get_info_reports_filename): $(serialized_dataset_names_filename)
+ $(MAKE) -C get_info_reports -j 5
+
+$(serialized_dataset_names_filename):
+ $(MAKE) -C serialized_dataset_names
+
+clean:
+ $(MAKE) -C serialized_config_names/ clean
+ $(MAKE) -C get_configs_reports/ clean
+ $(MAKE) -C get_info_reports/ clean
+ $(MAKE) -C serialized_dataset_names/ clean
diff --git a/benchmark/test_datasets.py b/benchmark/deprecated/test_datasets.py
similarity index 100%
rename from benchmark/test_datasets.py
rename to benchmark/deprecated/test_datasets.py
diff --git a/benchmark/get_configs_reports/Makefile b/benchmark/get_configs_reports/Makefile
new file mode 100644
index 00000000..e2ccd004
--- /dev/null
+++ b/benchmark/get_configs_reports/Makefile
@@ -0,0 +1,28 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+configsDir = $(addprefix $(tmpDir), get_configs_reports/)
+scriptsDir = ../scripts/
+
+serialized_dataset_names_filename = $(addprefix $(tmpDir), serialized_dataset_names.txt)
+serialized_dataset_names = $(file < $(serialized_dataset_names_filename))
+individual_report_filenames = $(addprefix $(configsDir), $(addsuffix .json, $(serialized_dataset_names)))
+merged_report_filename = $(addprefix $(tmpDir), get_configs_reports.json)
+
+GET_CONFIGS_REPORT = $(addprefix $(scriptsDir), get_configs_report.py)
+
+all: $(merged_report_filename)
+
+# merge all the reports in one JSON
+$(merged_report_filename): $(individual_report_filenames)
+ jq -s '.' $^ > $@
+
+# generate a report for every dataset (get_configs.py) -> list of configs, and potential exceptions
+# this is $(individual_report_filenames)
+../tmp/get_configs_reports/%.json:
+ @mkdir -p $(configsDir)
+ poetry run python $(GET_CONFIGS_REPORT) $* $@
+
+clean:
+ rm -rf $(merged_report_filename)
+ rm -rf $(configsDir)
diff --git a/benchmark/get_info_reports/Makefile b/benchmark/get_info_reports/Makefile
new file mode 100644
index 00000000..b673029e
--- /dev/null
+++ b/benchmark/get_info_reports/Makefile
@@ -0,0 +1,28 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+infoDir = $(addprefix $(tmpDir), get_info_reports/)
+scriptsDir = ../scripts/
+
+serialized_dataset_names_filename = $(addprefix $(tmpDir), serialized_dataset_names.txt)
+serialized_dataset_names = $(file < $(serialized_dataset_names_filename))
+individual_report_filenames = $(addprefix $(infoDir), $(addsuffix .json, $(serialized_dataset_names)))
+merged_report_filename = $(addprefix $(tmpDir), get_info_reports.json)
+
+GET_INFO_REPORT = $(addprefix $(scriptsDir), get_info_report.py)
+
+all: $(merged_report_filename)
+
+# merge all the reports in one JSON
+$(merged_report_filename): $(individual_report_filenames)
+ jq -s '.' $^ > $@
+
+# generate a report for every dataset (get_info.py) -> list of info, and potential exceptions
+# this is $(individual_report_filenames)
+../tmp/get_info_reports/%.json:
+ @mkdir -p $(infoDir)
+ poetry run python $(GET_INFO_REPORT) $* $@
+
+clean:
+ rm -rf $(merged_report_filename)
+ rm -rf $(infoDir)
diff --git a/benchmark/scripts/get_configs_report.py b/benchmark/scripts/get_configs_report.py
new file mode 100644
index 00000000..7c04ff79
--- /dev/null
+++ b/benchmark/scripts/get_configs_report.py
@@ -0,0 +1,50 @@
+import json
+import logging
+import time
+
+import typer
+from datasets import disable_progress_bar
+from serialize import deserialize_dataset_name
+
+from datasets_preview_backend.queries.configs import get_configs
+
+# remove any logs
+logging.disable(logging.CRITICAL)
+disable_progress_bar()
+
+
+def get_configs_report(dataset: str):
+ try:
+ t = time.process_time()
+ configs = get_configs(dataset)["configs"]
+ return {
+ "dataset": dataset,
+ "configs": list(configs),
+ "success": True,
+ "exception": None,
+ "message": None,
+ "cause": None,
+ "cause_message": None,
+ "elapsed_seconds": time.process_time() - t,
+ }
+ except Exception as err:
+ return {
+ "dataset": dataset,
+ "configs": [],
+ "success": False,
+ "exception": type(err).__name__,
+ "message": str(err),
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
+ "elapsed_seconds": time.process_time() - t,
+ }
+
+
+def main(serialized_dataset_name: str, filename: str):
+ info = get_configs_report(deserialize_dataset_name(serialized_dataset_name))
+ with open(filename, "w") as f:
+ json.dump(info, f)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/get_info_report.py b/benchmark/scripts/get_info_report.py
new file mode 100644
index 00000000..52fc263d
--- /dev/null
+++ b/benchmark/scripts/get_info_report.py
@@ -0,0 +1,57 @@
+import json
+import logging
+import time
+
+import typer
+from datasets import disable_progress_bar
+
+from datasets_preview_backend.queries.info import get_info
+
+# remove any logs
+logging.disable(logging.CRITICAL)
+disable_progress_bar()
+
+
+def get_info_report(dataset: str):
+ try:
+ t = time.process_time()
+ info = get_info(dataset)["info"]
+ return {
+ "dataset": dataset,
+ "info": info,
+ "success": True,
+ "exception": None,
+ "message": None,
+ "cause": None,
+ "cause_message": None,
+ "elapsed_seconds": time.process_time() - t,
+ }
+ except Exception as err:
+ return {
+ "dataset": dataset,
+ "info": None,
+ "success": False,
+ "exception": type(err).__name__,
+ "message": str(err),
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
+ "elapsed_seconds": time.process_time() - t,
+ }
+
+
+def to_safe(str):
+ return str.replace("/", "___SLASH___")
+
+
+def to_unsafe(str):
+ return str.replace("___SLASH___", "/")
+
+
+def main(safe_dataset: str, filename: str):
+ info = get_info_report(to_unsafe(safe_dataset))
+ with open(filename, "w") as f:
+ json.dump(info, f)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/get_serialized_config_names.py b/benchmark/scripts/get_serialized_config_names.py
new file mode 100644
index 00000000..612a62d3
--- /dev/null
+++ b/benchmark/scripts/get_serialized_config_names.py
@@ -0,0 +1,24 @@
+import json
+
+import typer
+from serialize import serialize_config_name
+
+
+def main(get_configs_reports_filename: str, output: str):
+ with open(get_configs_reports_filename) as f:
+ get_configs_reports = json.load(f)
+
+ # replace '/' in namespaced dataset names
+ serialized_config_names = []
+ for get_configs_report in get_configs_reports:
+ dataset = get_configs_report["dataset"]
+ for config in get_configs_report["configs"]:
+ serialized_config_names.append(serialize_config_name(dataset, config))
+
+ with open(output, "w") as f:
+ for serialized_config_name in serialized_config_names:
+ f.write("%s\n" % serialized_config_name)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/get_serialized_dataset_names.py b/benchmark/scripts/get_serialized_dataset_names.py
new file mode 100644
index 00000000..399a3b4c
--- /dev/null
+++ b/benchmark/scripts/get_serialized_dataset_names.py
@@ -0,0 +1,32 @@
+import typer
+from datasets import list_datasets
+from serialize import serialize_dataset_name
+
+# import os
+# import shutil
+
+
+
+def main(filename: str):
+ dataset_names = list_datasets(with_community_datasets=True)
+ # replace '/' in namespaced dataset names
+ serialized_dataset_names = [
+ serialize_dataset_name(dataset_name) for dataset_name in dataset_names
+ ]
+ # # current subdirectories
+ # dir_list = next(os.walk(path))[1]
+ # # to add
+ # for dataset in safe_datasets:
+ # if dataset not in dir_list:
+ # os.mkdir(os.path.join(path, dataset))
+ # # to remove
+ # for dataset in dir_list:
+ # if dataset not in safe_datasets:
+ # shutil.rmtree(os.path.join(path, dataset))
+ with open(filename, "w") as f:
+ for serialized_dataset_name in serialized_dataset_names:
+ f.write("%s\n" % serialized_dataset_name)
+
+
+if __name__ == "__main__":
+ typer.run(main)
diff --git a/benchmark/scripts/serialize.py b/benchmark/scripts/serialize.py
new file mode 100644
index 00000000..5af10346
--- /dev/null
+++ b/benchmark/scripts/serialize.py
@@ -0,0 +1,18 @@
+from typing import Union
+
+SLASH_SEPARATOR = "___SLASH___"
+CONFIG_SEPARATOR = "___CONFIG___"
+CONFIG_NONE = "___NONE_CONFIG___"
+
+
+def serialize_dataset_name(dataset: str) -> str:
+ return dataset.replace("/", SLASH_SEPARATOR)
+
+
+def deserialize_dataset_name(dataset: str) -> str:
+ return dataset.replace(SLASH_SEPARATOR, "/")
+
+
+def serialize_config_name(dataset: str, config: Union[str, None]) -> str:
+ c = CONFIG_NONE if config is None else config
+ return serialize_dataset_name(dataset) + CONFIG_SEPARATOR + c
diff --git a/benchmark/serialized_config_names/Makefile b/benchmark/serialized_config_names/Makefile
new file mode 100644
index 00000000..7797925d
--- /dev/null
+++ b/benchmark/serialized_config_names/Makefile
@@ -0,0 +1,19 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+scriptsDir = ../scripts/
+
+serialized_config_names_filename = $(addprefix $(tmpDir), serialized_config_names.txt)
+get_configs_reports_filename = $(addprefix $(tmpDir), get_configs_reports.json)
+
+GET_SERIALIZED_CONFIG_NAMES = $(addprefix $(scriptsDir), get_serialized_config_names.py)
+
+all: $(serialized_config_names_filename)
+
+# get the list of serialized dataset names (with '/' replaced with "___SLASH___")
+$(serialized_config_names_filename): $(get_configs_reports_filename)
+ @mkdir -p $(tmpDir)
+ poetry run python $(GET_SERIALIZED_CONFIG_NAMES) $(get_configs_reports_filename) $(serialized_config_names_filename)
+
+clean:
+ rm -rf $(serialized_config_names_filename)
diff --git a/benchmark/serialized_dataset_names/Makefile b/benchmark/serialized_dataset_names/Makefile
new file mode 100644
index 00000000..f898d3f3
--- /dev/null
+++ b/benchmark/serialized_dataset_names/Makefile
@@ -0,0 +1,18 @@
+.PHONY: clean all
+
+tmpDir = ../tmp/
+scriptsDir = ../scripts/
+
+serialized_dataset_names_filename = $(addprefix $(tmpDir), serialized_dataset_names.txt)
+
+GET_SERIALIZED_DATASET_NAMES = $(addprefix $(scriptsDir), get_serialized_dataset_names.py)
+
+all: $(serialized_dataset_names_filename)
+
+# get the list of serialized dataset names (with '/' replaced with "___SLASH___")
+$(serialized_dataset_names_filename):
+ @mkdir -p $(tmpDir)
+ poetry run python $(GET_SERIALIZED_DATASET_NAMES) $(serialized_dataset_names_filename)
+
+clean:
+ rm -rf $(serialized_dataset_names_filename)
diff --git a/poetry.lock b/poetry.lock
index c2531d24..62c60ded 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -277 +277 @@ name = "click"
-version = "8.0.1"
+version = "7.1.2"
@@ -281,4 +281 @@ optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
@@ -1410,0 +1408,17 @@ url = "vendors/trec-car-tools/python3"
+[[package]]
+name = "typer"
+version = "0.3.2"
+description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+click = ">=7.1.1,<7.2.0"
+
+[package.extras]
+test = ["pytest-xdist (>=1.32.0,<2.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "mypy (==0.782)", "black (>=19.10b0,<20.0b0)", "isort (>=5.0.6,<6.0.0)", "shellingham (>=1.3.0,<2.0.0)", "pytest (>=4.4.0,<5.4.0)", "pytest-cov (>=2.10.0,<3.0.0)", "coverage (>=5.2,<6.0)"]
+all = ["colorama (>=0.4.3,<0.5.0)", "shellingham (>=1.3.0,<2.0.0)"]
+dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)"]
+doc = ["mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=5.4.0,<6.0.0)", "markdown-include (>=0.5.1,<0.6.0)"]
+
@@ -1520 +1534 @@ python-versions = "^3.8"
-content-hash = "3483e9538bc8600e998a570d1511aa3a5fcd820da3eb98c12b5952c9f2c6c97f"
+content-hash = "4de5a24ba38eaa17c695aa1fcc59a54d5bd40bfc9d4c1651b29279ea5c7c64fc"
@@ -1815,2 +1829,2 @@ click = [
- {file = "click-8.0.1-py3-none-any.whl", hash = "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"},
- {file = "click-8.0.1.tar.gz", hash = "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a"},
+ {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"},
+ {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"},
@@ -2743,0 +2758,4 @@ trec-car-tools = []
+typer = [
+ {file = "typer-0.3.2-py3-none-any.whl", hash = "sha256:ba58b920ce851b12a2d790143009fa00ac1d05b3ff3257061ff69dbdfc3d161b"},
+ {file = "typer-0.3.2.tar.gz", hash = "sha256:5455d750122cff96745b0dec87368f56d023725a7ebc9d2e54dd23dc86816303"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index 93041407..b2a6b729 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,0 +28 @@ datasets = {extras = ["streaming"], git = "https://github.com/huggingface/datase
+typer = "^0.3.2"
|
|
ae51f18706f03aaa53abd89158cc0c293bdde991
|
Sylvain Lesage
| 2021-08-20T09:32:44 |
feat: 🎸 add elapsed time to benchmark
|
diff --git a/benchmark/test_datasets.py b/benchmark/test_datasets.py
index 8b9b26b3..d909594f 100644
--- a/benchmark/test_datasets.py
+++ b/benchmark/test_datasets.py
@@ -19,0 +20 @@ def get_info_report(dataset: str):
+ t = time.process_time()
@@ -28,0 +30 @@ def get_info_report(dataset: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -38,0 +41 @@ def get_info_report(dataset: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -43,0 +47 @@ def get_configs_report(dataset: str):
+ t = time.process_time()
@@ -52,0 +57 @@ def get_configs_report(dataset: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -62,0 +68 @@ def get_configs_report(dataset: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -67,0 +74 @@ def get_splits_report(dataset: str, config: str):
+ t = time.process_time()
@@ -77,0 +85 @@ def get_splits_report(dataset: str, config: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -88,0 +97 @@ def get_splits_report(dataset: str, config: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -94,0 +104 @@ def get_rows_report(dataset: str, config: str, split: str):
+ t = time.process_time()
@@ -105,0 +116 @@ def get_rows_report(dataset: str, config: str, split: str):
+ "elapsed_seconds": time.process_time() - t,
@@ -116,0 +128 @@ def get_rows_report(dataset: str, config: str, split: str):
+ "elapsed_seconds": time.process_time() - t,
|
|
30823f9ca3411c2ce7df61c250d96ae779feef1b
|
Sylvain Lesage
| 2021-08-20T09:19:54 |
style: 💄 sort the imports with isort
|
diff --git a/Makefile b/Makefile
index e4327758..f8ffe6ab 100644
--- a/Makefile
+++ b/Makefile
@@ -16,0 +17 @@ quality:
+ poetry run isort --check-only tests src benchmark
@@ -20,0 +22 @@ style:
+ poetry run isort tests src benchmark
diff --git a/benchmark/test_datasets.py b/benchmark/test_datasets.py
index c0a4248c..8b9b26b3 100644
--- a/benchmark/test_datasets.py
+++ b/benchmark/test_datasets.py
@@ -1,5 +1 @@
-from datasets_preview_backend.queries.rows import extract_rows
-from datasets_preview_backend.queries.splits import get_splits
-from datasets_preview_backend.queries.configs import get_configs
-from datasets_preview_backend.queries.info import get_info
-from datasets import list_datasets, disable_progress_bar
+import concurrent.futures
@@ -7 +2,0 @@ import json
-import time
@@ -9 +4,8 @@ import logging
-import concurrent.futures
+import time
+
+from datasets import disable_progress_bar, list_datasets
+
+from datasets_preview_backend.queries.configs import get_configs
+from datasets_preview_backend.queries.info import get_info
+from datasets_preview_backend.queries.rows import extract_rows
+from datasets_preview_backend.queries.splits import get_splits
diff --git a/poetry.lock b/poetry.lock
index c36a2cea..c2531d24 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -596,0 +597,14 @@ python-versions = "*"
+[[package]]
+name = "isort"
+version = "5.9.3"
+description = "A Python utility / library to sort Python imports."
+category = "dev"
+optional = false
+python-versions = ">=3.6.1,<4.0"
+
+[package.extras]
+pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
+requirements_deprecated_finder = ["pipreqs", "pip-api"]
+colors = ["colorama (>=0.4.3,<0.5.0)"]
+plugins = ["setuptools"]
+
@@ -1506 +1520 @@ python-versions = "^3.8"
-content-hash = "8c5e0bd6527f095c35d5f6281a7a6ac659b7954e11b2a5ce315853392d8a66c3"
+content-hash = "3483e9538bc8600e998a570d1511aa3a5fcd820da3eb98c12b5952c9f2c6c97f"
@@ -1971,0 +1986,4 @@ iniconfig = [
+isort = [
+ {file = "isort-5.9.3-py3-none-any.whl", hash = "sha256:e17d6e2b81095c9db0a03a8025a957f334d6ea30b26f9ec70805411e5c7c81f2"},
+ {file = "isort-5.9.3.tar.gz", hash = "sha256:9c2ea1e62d871267b78307fe511c0838ba0da28698c5732d54e2790bf3ba9899"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index 07b55427..93041407 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,0 +32 @@ black = "^21.7b0"
+isort = "^5.9.3"
@@ -40,0 +42,3 @@ filterwarnings = [
+
+[tool.isort]
+profile = "black"
diff --git a/src/datasets_preview_backend/config.py b/src/datasets_preview_backend/config.py
index f07f1c64..2097c43b 100644
--- a/src/datasets_preview_backend/config.py
+++ b/src/datasets_preview_backend/config.py
@@ -1,0 +2 @@ import os
+
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 70461a54..00c914dd 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -0,0 +1 @@
+import uvicorn
@@ -3 +3,0 @@ from starlette.routing import Route
-import uvicorn
@@ -6 +6 @@ from datasets_preview_backend.config import PORT
-from datasets_preview_backend.routes import healthcheck, rows, configs, splits, info
+from datasets_preview_backend.routes import configs, healthcheck, info, rows, splits
diff --git a/src/datasets_preview_backend/queries/configs.py b/src/datasets_preview_backend/queries/configs.py
index d3b41780..a06d695f 100644
--- a/src/datasets_preview_backend/queries/configs.py
+++ b/src/datasets_preview_backend/queries/configs.py
@@ -2 +1,0 @@ import logging
-
@@ -5,4 +4 @@ from typing import List
-from datasets import (
- prepare_module,
- import_main_class,
-)
+from datasets import import_main_class, prepare_module
@@ -10,4 +6 @@ from datasets import (
-from datasets_preview_backend.exceptions import (
- Status400Error,
- Status404Error,
-)
+from datasets_preview_backend.exceptions import Status400Error, Status404Error
diff --git a/src/datasets_preview_backend/queries/info.py b/src/datasets_preview_backend/queries/info.py
index a7a8a538..c79bce28 100644
--- a/src/datasets_preview_backend/queries/info.py
+++ b/src/datasets_preview_backend/queries/info.py
@@ -1 +0,0 @@
-import logging
@@ -2,0 +2 @@ import json
+import logging
@@ -4 +3,0 @@ from dataclasses import asdict
-
@@ -7,4 +6 @@ from typing import List
-from datasets import (
- prepare_module,
- import_main_class,
-)
+from datasets import import_main_class, prepare_module
@@ -12,4 +8 @@ from datasets import (
-from datasets_preview_backend.exceptions import (
- Status400Error,
- Status404Error,
-)
+from datasets_preview_backend.exceptions import Status400Error, Status404Error
diff --git a/src/datasets_preview_backend/queries/rows.py b/src/datasets_preview_backend/queries/rows.py
index 1514eb2b..3eba5393 100644
--- a/src/datasets_preview_backend/queries/rows.py
+++ b/src/datasets_preview_backend/queries/rows.py
@@ -1 +0,0 @@
-import re
@@ -3 +2 @@ import logging
-
+import re
@@ -6,4 +5 @@ from typing import List
-from datasets import (
- IterableDataset,
- load_dataset,
-)
+from datasets import IterableDataset, load_dataset
@@ -11,4 +7 @@ from datasets import (
-from datasets_preview_backend.exceptions import (
- Status400Error,
- Status404Error,
-)
+from datasets_preview_backend.exceptions import Status400Error, Status404Error
diff --git a/src/datasets_preview_backend/queries/splits.py b/src/datasets_preview_backend/queries/splits.py
index 7ede618f..f0bd1ed7 100644
--- a/src/datasets_preview_backend/queries/splits.py
+++ b/src/datasets_preview_backend/queries/splits.py
@@ -6,4 +6 @@ from datasets.utils.streaming_download_manager import StreamingDownloadManager
-from datasets_preview_backend.exceptions import (
- Status400Error,
- Status404Error,
-)
+from datasets_preview_backend.exceptions import Status400Error, Status404Error
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 565b37ee..9992c9a5 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -1,0 +2 @@ import logging
+
@@ -3 +4 @@ from starlette.requests import Request
-from starlette.responses import PlainTextResponse, JSONResponse
+from starlette.responses import JSONResponse, PlainTextResponse
@@ -6,5 +6,0 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
-from datasets_preview_backend.queries.info import get_info
-from datasets_preview_backend.queries.configs import get_configs
-from datasets_preview_backend.queries.splits import get_splits
-from datasets_preview_backend.queries.rows import extract_rows
-from datasets_preview_backend.utils import get_int_value
@@ -12 +7,0 @@ from datasets_preview_backend.exceptions import (
- StatusError,
@@ -14,0 +10 @@ from datasets_preview_backend.exceptions import (
+ StatusError,
@@ -15,0 +12,5 @@ from datasets_preview_backend.exceptions import (
+from datasets_preview_backend.queries.configs import get_configs
+from datasets_preview_backend.queries.info import get_info
+from datasets_preview_backend.queries.rows import extract_rows
+from datasets_preview_backend.queries.splits import get_splits
+from datasets_preview_backend.utils import get_int_value
diff --git a/tests/queries/test_configs.py b/tests/queries/test_configs.py
index ced98b6a..14a2adb4 100644
--- a/tests/queries/test_configs.py
+++ b/tests/queries/test_configs.py
@@ -3 +2,0 @@ import pytest
-
diff --git a/tests/queries/test_info.py b/tests/queries/test_info.py
index 8fe6c022..43eb0309 100644
--- a/tests/queries/test_info.py
+++ b/tests/queries/test_info.py
@@ -3 +2,0 @@ import pytest
-
|
|
c2afebc1b96994276c8cb0d62798adbc57323a29
|
Sylvain Lesage
| 2021-08-20T09:08:26 |
style: 💄 use black to check and format
|
diff --git a/Makefile b/Makefile
index 8ee6c034..e4327758 100644
--- a/Makefile
+++ b/Makefile
@@ -3 +3 @@ PORT ?= 8000
-.PHONY: install run test benchmark watch
+.PHONY: install run test quality style benchmark watch
@@ -13,0 +14,8 @@ test:
+# Check that source code meets quality standards
+quality:
+ poetry run black --check tests src benchmark
+
+# Format source code automatically
+style:
+ poetry run black tests src benchmark
+
diff --git a/benchmark/test_datasets.py b/benchmark/test_datasets.py
index 10b540eb..c0a4248c 100644
--- a/benchmark/test_datasets.py
+++ b/benchmark/test_datasets.py
@@ -123,2 +123 @@ def process_map(fun, iterator, max_workers):
- future_to_item = {executor.submit(
- fun, **item): item for item in iterator}
+ future_to_item = {executor.submit(fun, **item): item for item in iterator}
@@ -131 +130 @@ def process_map(fun, iterator, max_workers):
- print('%r generated an exception: %s' % (item, exc))
+ print("%r generated an exception: %s" % (item, exc))
@@ -144 +143,2 @@ def export_all_datasets_exceptions():
- get_info_report, datasets_iterator, max_workers=max_workers)
+ get_info_report, datasets_iterator, max_workers=max_workers
+ )
@@ -148 +148,2 @@ def export_all_datasets_exceptions():
- get_configs_report, datasets_iterator, max_workers=max_workers)
+ get_configs_report, datasets_iterator, max_workers=max_workers
+ )
@@ -155,2 +156 @@ def export_all_datasets_exceptions():
- configs_iterator.append(
- {"dataset": report["dataset"], "config": config})
+ configs_iterator.append({"dataset": report["dataset"], "config": config})
@@ -169 +169,6 @@ def export_all_datasets_exceptions():
- {"dataset": report["dataset"], "config": report["config"], "split": split})
+ {
+ "dataset": report["dataset"],
+ "config": report["config"],
+ "split": split,
+ }
+ )
diff --git a/poetry.lock b/poetry.lock
index e6b5475e..c36a2cea 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -84,0 +85,8 @@ test = ["freezegun (>=0.3.12)", "mock (>=1.0.1,<3.0.0)", "nose (>=1.3.7)", "nose
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+category = "dev"
+optional = false
+python-versions = "*"
+
@@ -137,12 +144,0 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>
-[[package]]
-name = "autopep8"
-version = "1.5.7"
-description = "A tool that automatically formats Python code to conform to the PEP 8 style guide"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-pycodestyle = ">=2.7.0"
-toml = "*"
-
@@ -175,0 +172,22 @@ test = ["pytest", "pytest-cov", "coverage[toml] (>=5.2)"]
+[[package]]
+name = "black"
+version = "21.7b0"
+description = "The uncompromising code formatter."
+category = "dev"
+optional = false
+python-versions = ">=3.6.2"
+
+[package.dependencies]
+appdirs = "*"
+click = ">=7.1.2"
+mypy-extensions = ">=0.4.3"
+pathspec = ">=0.8.1,<1"
+regex = ">=2020.1.8"
+tomli = ">=0.2.6,<2.0.0"
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"]
+python2 = ["typed-ast (>=1.4.2)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
@@ -322 +340 @@ streaming = ["aiohttp"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert_score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests_file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert_score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests_file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)"]
@@ -605,0 +624 @@ url = "https://github.com/kpu/kenlm/archive/master.zip"
+
@@ -708,0 +728,8 @@ type = ["mypy", "mypy-extensions"]
+[[package]]
+name = "mypy-extensions"
+version = "0.4.3"
+description = "Experimental type system extensions for programs checked with the mypy typechecker."
+category = "dev"
+optional = false
+python-versions = "*"
+
@@ -847,0 +875,8 @@ test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
+[[package]]
+name = "pathspec"
+version = "0.9.0"
+description = "Utility library for gitignore style pattern matching of file paths."
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+
@@ -940,8 +974,0 @@ pyasn1 = ">=0.4.6,<0.5.0"
-[[package]]
-name = "pycodestyle"
-version = "2.7.0"
-description = "Python style guide checker"
-category = "dev"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-
@@ -1276,0 +1304,8 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+[[package]]
+name = "tomli"
+version = "1.2.1"
+description = "A lil' TOML parser"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
@@ -1471 +1506 @@ python-versions = "^3.8"
-content-hash = "ce04c97957959ceff7976d93a0467dd93b7cb05397a482b55b82eda9059a8755"
+content-hash = "8c5e0bd6527f095c35d5f6281a7a6ac659b7954e11b2a5ce315853392d8a66c3"
@@ -1544,0 +1580,4 @@ apache-beam = [
+appdirs = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
@@ -1565,4 +1603,0 @@ attrs = [
-autopep8 = [
- {file = "autopep8-1.5.7-py2.py3-none-any.whl", hash = "sha256:aa213493c30dcdac99537249ee65b24af0b2c29f2e83cd8b3f68760441ed0db9"},
- {file = "autopep8-1.5.7.tar.gz", hash = "sha256:276ced7e9e3cb22e5d7c14748384a5cf5d9002257c0ed50c0e075b68011bb6d0"},
-]
@@ -1609,0 +1645,4 @@ bcj-cffi = [
+black = [
+ {file = "black-21.7b0-py3-none-any.whl", hash = "sha256:1c7aa6ada8ee864db745b22790a32f94b2795c253a75d6d9b5e439ff10d23116"},
+ {file = "black-21.7b0.tar.gz", hash = "sha256:c8373c6491de9362e39271630b65b964607bc5c79c83783547d76c839b3aa219"},
+]
@@ -2057,0 +2097,4 @@ multivolumefile = [
+mypy-extensions = [
+ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
+ {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
+]
@@ -2142,0 +2186,4 @@ pandas = [
+pathspec = [
+ {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
+ {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
+]
@@ -2282,4 +2328,0 @@ pyasn1-modules = [
-pycodestyle = [
- {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"},
- {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"},
-]
@@ -2669,0 +2713,4 @@ toml = [
+tomli = [
+ {file = "tomli-1.2.1-py3-none-any.whl", hash = "sha256:8dd0e9524d6f386271a36b41dbf6c57d8e32fd96fd22b6584679dc569d20899f"},
+ {file = "tomli-1.2.1.tar.gz", hash = "sha256:a5b75cb6f3968abb47af1b40c1819dc519ea82bcc065776a866e8d74c5ca9442"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index 23ac8cb4..07b55427 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31 +31 @@ pytest = "^6.2.4"
-autopep8 = "^1.5.7"
+black = "^21.7b0"
diff --git a/src/datasets_preview_backend/queries/info.py b/src/datasets_preview_backend/queries/info.py
index 7464d7d6..a7a8a538 100644
--- a/src/datasets_preview_backend/queries/info.py
+++ b/src/datasets_preview_backend/queries/info.py
@@ -23,2 +23,4 @@ def get_info(dataset: str) -> any:
- info = {config_name: asdict(
- config_info) for config_name, config_info in total_dataset_infos.items()}
+ info = {
+ config_name: asdict(config_info)
+ for config_name, config_info in total_dataset_infos.items()
+ }
diff --git a/src/datasets_preview_backend/queries/rows.py b/src/datasets_preview_backend/queries/rows.py
index 58ad62a6..1514eb2b 100644
--- a/src/datasets_preview_backend/queries/rows.py
+++ b/src/datasets_preview_backend/queries/rows.py
@@ -52,2 +52 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- raise Status404Error(
- "The dataset config could not be found.") from err
+ raise Status404Error("The dataset config could not be found.") from err
diff --git a/src/datasets_preview_backend/queries/splits.py b/src/datasets_preview_backend/queries/splits.py
index 973b8c18..7ede618f 100644
--- a/src/datasets_preview_backend/queries/splits.py
+++ b/src/datasets_preview_backend/queries/splits.py
@@ -19,2 +19 @@ def get_splits(dataset: str, config: str) -> List[str]:
- raise Status404Error(
- "The dataset config could not be found.") from err
+ raise Status404Error("The dataset config could not be found.") from err
diff --git a/tests/queries/test_rows.py b/tests/queries/test_rows.py
index bc40d240..20f40836 100644
--- a/tests/queries/test_rows.py
+++ b/tests/queries/test_rows.py
@@ -66,2 +66 @@ def test_extract_not_implemented_split():
- extract_rows("ade_corpus_v2",
- "Ade_corpus_v2_classification", "train", 10)
+ extract_rows("ade_corpus_v2", "Ade_corpus_v2_classification", "train", 10)
|
|
4004083bc1c15a54db127b8256dd44c8f916f18c
|
Sylvain Lesage
| 2021-08-20T08:57:40 |
refactor: 💡 rename quality to benchmark
|
diff --git a/Makefile b/Makefile
index 93b177f8..8ee6c034 100644
--- a/Makefile
+++ b/Makefile
@@ -3 +3 @@ PORT ?= 8000
-.PHONY: install run test quality watch
+.PHONY: install run test benchmark watch
@@ -14,2 +14,2 @@ test:
-quality:
- poetry run python quality/test_datasets.py
+benchmark:
+ poetry run python benchmark/test_datasets.py
diff --git a/quality/test_datasets.py b/benchmark/test_datasets.py
similarity index 100%
rename from quality/test_datasets.py
rename to benchmark/test_datasets.py
|
|
969a5911ce824a4023328f9c9d8eb6e4c0c3a93c
|
Sylvain Lesage
| 2021-08-20T08:47:38 |
feat: 🎸 add /info to the benchmark
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 9aaf3a46..10b540eb 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -0,0 +1,4 @@
+from datasets_preview_backend.queries.rows import extract_rows
+from datasets_preview_backend.queries.splits import get_splits
+from datasets_preview_backend.queries.configs import get_configs
+from datasets_preview_backend.queries.info import get_info
@@ -4 +7,0 @@ import time
-from tqdm.contrib.concurrent import process_map
@@ -5,0 +9 @@ import logging
+import concurrent.futures
@@ -11,3 +15,23 @@ disable_progress_bar()
-from datasets_preview_backend.queries.configs import get_configs
-from datasets_preview_backend.queries.splits import get_splits
-from datasets_preview_backend.queries.rows import extract_rows
+
+def get_info_report(dataset: str):
+ try:
+ info = get_info(dataset)["info"]
+ return {
+ "dataset": dataset,
+ "info": info,
+ "success": True,
+ "exception": None,
+ "message": None,
+ "cause": None,
+ "cause_message": None,
+ }
+ except Exception as err:
+ return {
+ "dataset": dataset,
+ "info": None,
+ "success": False,
+ "exception": type(err).__name__,
+ "message": str(err),
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
+ }
@@ -93,0 +118,19 @@ def get_rows_report(dataset: str, config: str, split: str):
+def process_map(fun, iterator, max_workers):
+ # We can use a with statement to ensure threads are cleaned up promptly
+ results = []
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+ # Start the load operations and mark each future with its URL
+ future_to_item = {executor.submit(
+ fun, **item): item for item in iterator}
+ for future in concurrent.futures.as_completed(future_to_item):
+ item = future_to_item[future]
+ print(item)
+ try:
+ result = future.result()
+ except Exception as exc:
+ print('%r generated an exception: %s' % (item, exc))
+ else:
+ results.append(result)
+ return results
+
+
@@ -95 +138 @@ def export_all_datasets_exceptions():
- chunksize = 5
+ max_workers = 5
@@ -96,0 +140,5 @@ def export_all_datasets_exceptions():
+ datasets_iterator = [{"dataset": dataset} for dataset in datasets]
+
+ # print("Get info for all the datasets")
+ info_reports = process_map(
+ get_info_report, datasets_iterator, max_workers=max_workers)
@@ -99 +147,2 @@ def export_all_datasets_exceptions():
- configs_reports = process_map(get_configs_report, datasets, chunksize=chunksize)
+ configs_reports = process_map(
+ get_configs_report, datasets_iterator, max_workers=max_workers)
@@ -102,2 +151 @@ def export_all_datasets_exceptions():
- splits_datasets = []
- splits_configs = []
+ configs_iterator = []
@@ -107,2 +155,2 @@ def export_all_datasets_exceptions():
- splits_datasets.append(report["dataset"])
- splits_configs.append(config)
+ configs_iterator.append(
+ {"dataset": report["dataset"], "config": config})
@@ -111,3 +159,2 @@ def export_all_datasets_exceptions():
- splits_datasets,
- splits_configs,
- chunksize=chunksize,
+ configs_iterator,
+ max_workers=max_workers,
@@ -117,3 +164 @@ def export_all_datasets_exceptions():
- rows_datasets = []
- rows_configs = []
- rows_splits = []
+ splits_iterator = []
@@ -123,3 +168,2 @@ def export_all_datasets_exceptions():
- rows_datasets.append(report["dataset"])
- rows_configs.append(report["config"])
- rows_splits.append(split)
+ splits_iterator.append(
+ {"dataset": report["dataset"], "config": report["config"], "split": split})
@@ -128,4 +172,2 @@ def export_all_datasets_exceptions():
- rows_datasets,
- rows_configs,
- rows_splits,
- chunksize=chunksize,
+ splits_iterator,
+ max_workers=max_workers,
@@ -134,0 +177 @@ def export_all_datasets_exceptions():
+ "info_reports": info_reports,
|
|
0319c34e428b1b42f4486f546b0d800fb73e3994
|
Sylvain Lesage
| 2021-08-20T08:45:10 |
style: 💄 use autopep8 and reformat files
|
diff --git a/poetry.lock b/poetry.lock
index 3d1b606b..e6b5475e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -85,8 +84,0 @@ test = ["freezegun (>=0.3.12)", "mock (>=1.0.1,<3.0.0)", "nose (>=1.3.7)", "nose
-[[package]]
-name = "appdirs"
-version = "1.4.4"
-description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
-category = "dev"
-optional = false
-python-versions = "*"
-
@@ -144,0 +137,12 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>
+[[package]]
+name = "autopep8"
+version = "1.5.7"
+description = "A tool that automatically formats Python code to conform to the PEP 8 style guide"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pycodestyle = ">=2.7.0"
+toml = "*"
+
@@ -172,22 +175,0 @@ test = ["pytest", "pytest-cov", "coverage[toml] (>=5.2)"]
-[[package]]
-name = "black"
-version = "21.7b0"
-description = "The uncompromising code formatter."
-category = "dev"
-optional = false
-python-versions = ">=3.6.2"
-
-[package.dependencies]
-appdirs = "*"
-click = ">=7.1.2"
-mypy-extensions = ">=0.4.3"
-pathspec = ">=0.8.1,<1"
-regex = ">=2020.1.8"
-tomli = ">=0.2.6,<2.0.0"
-
-[package.extras]
-colorama = ["colorama (>=0.4.3)"]
-d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"]
-python2 = ["typed-ast (>=1.4.2)"]
-uvloop = ["uvloop (>=0.15.2)"]
-
@@ -340 +322 @@ streaming = ["aiohttp"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert_score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests_file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert_score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests_file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)"]
@@ -727,8 +708,0 @@ type = ["mypy", "mypy-extensions"]
-[[package]]
-name = "mypy-extensions"
-version = "0.4.3"
-description = "Experimental type system extensions for programs checked with the mypy typechecker."
-category = "dev"
-optional = false
-python-versions = "*"
-
@@ -874,8 +847,0 @@ test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
-[[package]]
-name = "pathspec"
-version = "0.9.0"
-description = "Utility library for gitignore style pattern matching of file paths."
-category = "dev"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
-
@@ -973,0 +940,8 @@ pyasn1 = ">=0.4.6,<0.5.0"
+[[package]]
+name = "pycodestyle"
+version = "2.7.0"
+description = "Python style guide checker"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
@@ -1303,8 +1276,0 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
-[[package]]
-name = "tomli"
-version = "1.2.1"
-description = "A lil' TOML parser"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
@@ -1505 +1471 @@ python-versions = "^3.8"
-content-hash = "b19f9516833c7d7b59c4142dd1cb4137dbee62cb85391c0b578e26e61a69e9e1"
+content-hash = "ce04c97957959ceff7976d93a0467dd93b7cb05397a482b55b82eda9059a8755"
@@ -1579,4 +1544,0 @@ apache-beam = [
-appdirs = [
- {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
- {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
-]
@@ -1602,0 +1565,4 @@ attrs = [
+autopep8 = [
+ {file = "autopep8-1.5.7-py2.py3-none-any.whl", hash = "sha256:aa213493c30dcdac99537249ee65b24af0b2c29f2e83cd8b3f68760441ed0db9"},
+ {file = "autopep8-1.5.7.tar.gz", hash = "sha256:276ced7e9e3cb22e5d7c14748384a5cf5d9002257c0ed50c0e075b68011bb6d0"},
+]
@@ -1644,4 +1609,0 @@ bcj-cffi = [
-black = [
- {file = "black-21.7b0-py3-none-any.whl", hash = "sha256:1c7aa6ada8ee864db745b22790a32f94b2795c253a75d6d9b5e439ff10d23116"},
- {file = "black-21.7b0.tar.gz", hash = "sha256:c8373c6491de9362e39271630b65b964607bc5c79c83783547d76c839b3aa219"},
-]
@@ -2096,4 +2057,0 @@ multivolumefile = [
-mypy-extensions = [
- {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
- {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
-]
@@ -2185,4 +2142,0 @@ pandas = [
-pathspec = [
- {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
- {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
-]
@@ -2327,0 +2282,4 @@ pyasn1-modules = [
+pycodestyle = [
+ {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"},
+ {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"},
+]
@@ -2712,4 +2669,0 @@ toml = [
-tomli = [
- {file = "tomli-1.2.1-py3-none-any.whl", hash = "sha256:8dd0e9524d6f386271a36b41dbf6c57d8e32fd96fd22b6584679dc569d20899f"},
- {file = "tomli-1.2.1.tar.gz", hash = "sha256:a5b75cb6f3968abb47af1b40c1819dc519ea82bcc065776a866e8d74c5ca9442"},
-]
diff --git a/pyproject.toml b/pyproject.toml
index 7741d326..23ac8cb4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,2 +29,0 @@ datasets = {extras = ["streaming"], git = "https://github.com/huggingface/datase
-black = "^21.7b0"
-tqdm = "^4.61.2"
@@ -32,0 +31 @@ pytest = "^6.2.4"
+autopep8 = "^1.5.7"
diff --git a/src/datasets_preview_backend/queries/info.py b/src/datasets_preview_backend/queries/info.py
index 7c7caf91..7464d7d6 100644
--- a/src/datasets_preview_backend/queries/info.py
+++ b/src/datasets_preview_backend/queries/info.py
@@ -23 +23,2 @@ def get_info(dataset: str) -> any:
- info = {config_name: asdict(config_info) for config_name, config_info in total_dataset_infos.items()}
+ info = {config_name: asdict(
+ config_info) for config_name, config_info in total_dataset_infos.items()}
diff --git a/src/datasets_preview_backend/queries/rows.py b/src/datasets_preview_backend/queries/rows.py
index 38239276..58ad62a6 100644
--- a/src/datasets_preview_backend/queries/rows.py
+++ b/src/datasets_preview_backend/queries/rows.py
@@ -15,0 +16 @@ from datasets_preview_backend.exceptions import (
+
@@ -51 +52,2 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- raise Status404Error("The dataset config could not be found.") from err
+ raise Status404Error(
+ "The dataset config could not be found.") from err
diff --git a/src/datasets_preview_backend/queries/splits.py b/src/datasets_preview_backend/queries/splits.py
index 7ede618f..973b8c18 100644
--- a/src/datasets_preview_backend/queries/splits.py
+++ b/src/datasets_preview_backend/queries/splits.py
@@ -19 +19,2 @@ def get_splits(dataset: str, config: str) -> List[str]:
- raise Status404Error("The dataset config could not be found.") from err
+ raise Status404Error(
+ "The dataset config could not be found.") from err
diff --git a/tests/queries/test_info.py b/tests/queries/test_info.py
index b46793fb..8fe6c022 100644
--- a/tests/queries/test_info.py
+++ b/tests/queries/test_info.py
@@ -22 +21,0 @@ def test_get_info():
-
diff --git a/tests/queries/test_rows.py b/tests/queries/test_rows.py
index 20f40836..bc40d240 100644
--- a/tests/queries/test_rows.py
+++ b/tests/queries/test_rows.py
@@ -66 +66,2 @@ def test_extract_not_implemented_split():
- extract_rows("ade_corpus_v2", "Ade_corpus_v2_classification", "train", 10)
+ extract_rows("ade_corpus_v2",
+ "Ade_corpus_v2_classification", "train", 10)
|
|
11771939b09fb132bdc26a56977546e538e944da
|
Sylvain Lesage
| 2021-08-19T14:55:11 |
chore: 🤖 version bump
|
diff --git a/pyproject.toml b/pyproject.toml
index 4eaca019..7741d326 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3 +3 @@ name = "datasets-preview-backend"
-version = "0.2.0"
+version = "0.3.0"
|
|
10d860cece28341e236614e35ddf47b9027d5e00
|
Sylvain Lesage
| 2021-08-19T14:54:48 |
feat: 🎸 add /info endpoint
|
diff --git a/README.md b/README.md
index 4e2bd12e..f08e76dc 100644
--- a/README.md
+++ b/README.md
@@ -62,0 +63,100 @@ Responses:
+### /info
+
+> Return the dataset_info.json file for the dataset
+
+Example: http://54.158.211.3/info?dataset=glue
+
+Method: `GET`
+
+Parameters:
+
+- `dataset` (required): the dataset ID
+
+Responses:
+
+- `200`: JSON content with the following structure:
+
+ ```json
+ {
+ "dataset": "glue",
+ "info": {
+ "cola": {
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
+ "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
+ "homepage": "https://nyu-mll.github.io/CoLA/",
+ "license": "",
+ "features": {
+ "sentence": {
+ "dtype": "string",
+ "id": null,
+ "_type": "Value"
+ },
+ "label": {
+ "num_classes": 2,
+ "names": [
+ "unacceptable",
+ "acceptable"
+ ],
+ "names_file": null,
+ "id": null,
+ "_type": "ClassLabel"
+ },
+ "idx": {
+ "dtype": "int32",
+ "id": null,
+ "_type": "Value"
+ }
+ },
+ "post_processed": null,
+ "supervised_keys": null,
+ "task_templates": null,
+ "builder_name": "glue",
+ "config_name": "cola",
+ "version": {
+ "version_str": "1.0.0",
+ "description": "",
+ "major": 1,
+ "minor": 0,
+ "patch": 0
+ },
+ "splits": {
+ "test" : {
+ "name": "test",
+ "num_bytes": 61049,
+ "num_examples": 1063,
+ "dataset_name": "glue"
+ },
+ "train": {
+ "name": "train",
+ "num_bytes": 489149,
+ "num_examples": 8551,
+ "dataset_name": "glue"
+ },
+ "validation": {
+ "name": "validation",
+ "num_bytes": 60850,
+ "num_examples": 1043,
+ "dataset_name": "glue"
+ }
+ },
+ "download_checksums": {
+ "https://dl.fbaipublicfiles.com/glue/data/CoLA.zip": {
+ "num_bytes": 376971,
+ "checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"
+ }
+ },
+ "download_size": 376971,
+ "post_processing_size": null,
+ "dataset_size": 611048,
+ "size_in_bytes": 988019
+ },
+ "sst2": { ... },
+ ...
+ }
+ }
+ ```
+
+- `400`: the dataset script is erroneous
+- `404`: the dataset cannot be found
+- `500`: application error
+
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index fc40dc76..70461a54 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -6 +6 @@ from datasets_preview_backend.config import PORT
-from datasets_preview_backend.routes import healthcheck, rows, configs, splits
+from datasets_preview_backend.routes import healthcheck, rows, configs, splits, info
@@ -15,0 +16 @@ def app():
+ Route("/info", endpoint=info),
diff --git a/src/datasets_preview_backend/queries/info.py b/src/datasets_preview_backend/queries/info.py
new file mode 100644
index 00000000..7c7caf91
--- /dev/null
+++ b/src/datasets_preview_backend/queries/info.py
@@ -0,0 +1,31 @@
+import logging
+import json
+from dataclasses import asdict
+
+from typing import List
+
+from datasets import (
+ prepare_module,
+ import_main_class,
+)
+
+from datasets_preview_backend.exceptions import (
+ Status400Error,
+ Status404Error,
+)
+
+
+def get_info(dataset: str) -> any:
+ try:
+ module_path, *_ = prepare_module(dataset, dataset=True)
+ builder_cls = import_main_class(module_path, dataset=True)
+ total_dataset_infos = builder_cls.get_all_exported_dataset_infos()
+ info = {config_name: asdict(config_info) for config_name, config_info in total_dataset_infos.items()}
+ except FileNotFoundError as err:
+ raise Status404Error("The dataset info could not be found.") from err
+ except Exception as err:
+ raise Status400Error(
+ "The dataset info could not be parsed from the dataset."
+ ) from err
+
+ return {"dataset": dataset, "info": info}
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 8aa5da00..565b37ee 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -5,0 +6 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
+from datasets_preview_backend.queries.info import get_info
@@ -26,0 +28,16 @@ async def healthcheck(_: Request):
+async def info(request: Request):
+ dataset: str = request.query_params.get("dataset")
+
+ if dataset is None:
+ return PlainTextResponse(
+ "'dataset' is a required query parameter.", status_code=400
+ )
+
+ try:
+ return JSONResponse(get_info(dataset))
+ except (Status400Error, Status404Error) as err:
+ log_error(err)
+ return PlainTextResponse(err.message, status_code=err.status_code)
+ # other exceptions will generate a 500 response
+
+
diff --git a/tests/queries/test_info.py b/tests/queries/test_info.py
new file mode 100644
index 00000000..b46793fb
--- /dev/null
+++ b/tests/queries/test_info.py
@@ -0,0 +1,47 @@
+import pytest
+
+
+from datasets_preview_backend.queries.info import (
+ Status400Error,
+ Status404Error,
+ get_info,
+)
+
+
+def test_get_info():
+ dataset = "glue"
+ response = get_info(dataset)
+ assert "dataset" in response
+ assert response["dataset"] == dataset
+ assert "info" in response
+ info = response["info"]
+ assert len(list(info.keys())) == 12
+ assert "cola" in info
+
+
+
+def test_script_error():
+ # raises "ModuleNotFoundError: No module named 'datasets_modules.datasets.br-quad-2'", which should be caught and raised as DatasetBuilderScriptError
+ with pytest.raises(Status400Error):
+ get_info("piEsposito/br-quad-2.0")
+
+
+def test_no_dataset():
+ # the dataset does not exist
+ with pytest.raises(Status404Error):
+ get_info("doesnotexist")
+
+
+def test_no_dataset_no_script():
+ # the dataset does not contain a script
+ with pytest.raises(Status404Error):
+ get_info("AConsApart/anime_subtitles_DialoGPT")
+ # raises "ModuleNotFoundError: No module named 'datasets_modules.datasets.Test'", which should be caught and raised as DatasetBuilderScriptError
+ with pytest.raises(Status404Error):
+ get_info("TimTreasure4/Test")
+
+
+def test_no_dataset_bad_script_name():
+ # the dataset script name is incorrect
+ with pytest.raises(Status404Error):
+ get_info("Cropinky/rap_lyrics_english")
|
|
e92753e8ddda4997bd155f0ee1294a3eacefb78f
|
Sylvain Lesage
| 2021-08-19T12:45:06 |
docs: ✏️ mention release tags in the upgrade doc
|
diff --git a/INSTALL.md b/INSTALL.md
index eea452ee..d1c1b5d6 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -23,0 +24,2 @@ git merge
+# or better
+# git checkout 0.2.0 # <- the latest release tag (https://github.com/huggingface/datasets-preview-backend/releases/latest)
|
|
4040c9567364e2951de80991f19d60894d155dba
|
Sylvain Lesage
| 2021-08-19T12:34:11 |
chore: 🤖 version bump
|
diff --git a/pyproject.toml b/pyproject.toml
index ff60ac85..4eaca019 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3 +3 @@ name = "datasets-preview-backend"
-version = "0.1.0"
+version = "0.2.0"
|
|
d452c65736d609e57fe31ff6de6f4c74d5c7f9bc
|
Sylvain Lesage
| 2021-08-19T12:30:32 |
feat: 🎸 upgrade datasets to current master version
|
diff --git a/poetry.lock b/poetry.lock
index 7a41abed..3d1b606b 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -266,0 +267,8 @@ unicode_backport = ["unicodedata2"]
+[[package]]
+name = "clang"
+version = "5.0"
+description = "libclang python bindings"
+category = "main"
+optional = false
+python-versions = "*"
+
@@ -288 +296 @@ name = "conllu"
-version = "4.4"
+version = "4.4.1"
@@ -292 +300 @@ optional = false
-python-versions = "*"
+python-versions = ">=3.6"
@@ -304,2 +312,2 @@ name = "datasets"
-version = "1.11.0"
-description = "HuggingFace/Datasets is an open library of NLP datasets."
+version = "1.11.1.dev0"
+description = ""
@@ -308,0 +317 @@ python-versions = "*"
+develop = false
@@ -314 +323 @@ fsspec = ">=2021.05.0"
-huggingface-hub = "<0.1.0"
+huggingface_hub = "<0.1.0"
@@ -321 +330 @@ requests = ">=2.19.0"
-tqdm = ">=4.42"
+tqdm = ">=4.62.1"
@@ -326,6 +334,0 @@ apache-beam = ["apache-beam (>=2.26.0)"]
-benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "transformers (==3.0.2)"]
-dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
-docs = ["docutils (==0.16.0)", "recommonmark", "sphinx (==3.1.2)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinxext-opengraph (==0.4.1)", "sphinx-copybutton", "fsspec", "s3fs"]
-quality = ["black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)"]
-s3 = ["fsspec", "boto3 (==1.16.43)", "botocore (==1.19.52)", "s3fs"]
-streaming = ["aiohttp"]
@@ -334 +336,0 @@ tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "importlib-resources"]
@@ -335,0 +338,12 @@ torch = ["torch"]
+s3 = ["fsspec", "boto3 (==1.16.43)", "botocore (==1.19.52)", "s3fs"]
+streaming = ["aiohttp"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert_score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests_file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)"]
+quality = ["black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)"]
+benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "transformers (==3.0.2)"]
+docs = ["docutils (==0.16.0)", "recommonmark", "sphinx (==3.1.2)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinxext-opengraph (==0.4.1)", "sphinx-copybutton", "fsspec", "s3fs"]
+
+[package.source]
+type = "git"
+url = "https://github.com/huggingface/datasets.git"
+reference = "b9fb8b2567aecfb14ad0bc31b59329f573eb35df"
+resolved_reference = "b9fb8b2567aecfb14ad0bc31b59329f573eb35df"
@@ -437 +451 @@ name = "google-auth"
-version = "1.34.0"
+version = "1.35.0"
@@ -482 +496 @@ name = "grpcio"
-version = "1.34.1"
+version = "1.39.0"
@@ -492 +506 @@ six = ">=1.5.2"
-protobuf = ["grpcio-tools (>=1.34.1)"]
+protobuf = ["grpcio-tools (>=1.39.0)"]
@@ -611,2 +625,2 @@ url = "https://github.com/kpu/kenlm/archive/master.zip"
-name = "keras-nightly"
-version = "2.5.0.dev2021032900"
+name = "keras"
+version = "2.6.0"
@@ -637,2 +651,2 @@ name = "kss"
-version = "2.5.1"
-description = "Split Korean text into sentences using heuristic algorithm using pure python"
+version = "2.6.0"
+description = "Korean sentence splitter"
@@ -846 +860 @@ name = "pandas"
-version = "1.3.1"
+version = "1.3.2"
@@ -1015 +1029 @@ name = "pyppmd"
-version = "0.15.2"
+version = "0.16.1"
@@ -1025 +1039 @@ fuzzer = ["atheris", "hypothesis"]
-test = ["pytest", "pytest-benchmark", "pytest-cov", "psutil", "hypothesis", "coverage[toml] (>=5.2)"]
+test = ["pytest (>=6.0)", "pytest-benchmark", "pytest-cov", "pytest-timeout", "hypothesis", "coverage[toml] (>=5.2)"]
@@ -1085 +1099 @@ name = "regex"
-version = "2021.7.6"
+version = "2021.8.3"
@@ -1182 +1196 @@ name = "tensorboard"
-version = "2.5.0"
+version = "2.6.0"
@@ -1186 +1200 @@ optional = false
-python-versions = ">= 2.7, != 3.0.*, != 3.1.*"
+python-versions = ">=3.6"
@@ -1219 +1233 @@ name = "tensorflow"
-version = "2.5.0"
+version = "2.6.0"
@@ -1227,0 +1242 @@ astunparse = ">=1.6.3,<1.7.0"
+clang = ">=5.0,<6.0"
@@ -1231 +1246 @@ google-pasta = ">=0.2,<1.0"
-grpcio = ">=1.34.0,<1.35.0"
+grpcio = ">=1.37.0,<2.0"
@@ -1233 +1248 @@ h5py = ">=3.1.0,<3.2.0"
-keras-nightly = ">=2.5.0.dev,<2.6.0"
+keras = ">=2.6,<3.0"
@@ -1239,2 +1254,2 @@ six = ">=1.15.0,<1.16.0"
-tensorboard = ">=2.5,<3.0"
-tensorflow-estimator = ">=2.5.0rc0,<2.6.0"
+tensorboard = ">=2.6,<3.0"
+tensorflow-estimator = ">=2.6,<3.0"
@@ -1247 +1262 @@ name = "tensorflow-estimator"
-version = "2.5.0"
+version = "2.6.0"
@@ -1290 +1305 @@ name = "tomli"
-version = "1.2.0"
+version = "1.2.1"
@@ -1298 +1313 @@ name = "tqdm"
-version = "4.62.0"
+version = "4.62.1"
@@ -1314 +1329 @@ name = "transformers"
-version = "4.9.1"
+version = "4.9.2"
@@ -1391 +1406 @@ name = "ujson"
-version = "4.0.2"
+version = "4.1.0"
@@ -1490 +1505 @@ python-versions = "^3.8"
-content-hash = "9ee62f3717c20b8179c7d81cb2e17f9c7487314611152c3ce14b470812d7fc80"
+content-hash = "b19f9516833c7d7b59c4142dd1cb4137dbee62cb85391c0b578e26e61a69e9e1"
@@ -1639,0 +1655,7 @@ brotli = [
+ {file = "Brotli-1.0.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9744a863b489c79a73aba014df554b0e7a0fc44ef3f8a0ef2a52919c7d155031"},
+ {file = "Brotli-1.0.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a72661af47119a80d82fa583b554095308d6a4c356b2a554fdc2799bc19f2a43"},
+ {file = "Brotli-1.0.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ee83d3e3a024a9618e5be64648d6d11c37047ac48adff25f12fa4226cf23d1c"},
+ {file = "Brotli-1.0.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:19598ecddd8a212aedb1ffa15763dd52a388518c4550e615aed88dc3753c0f0c"},
+ {file = "Brotli-1.0.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:44bb8ff420c1d19d91d79d8c3574b8954288bdff0273bf788954064d260d7ab0"},
+ {file = "Brotli-1.0.9-cp310-cp310-win32.whl", hash = "sha256:26d168aac4aaec9a4394221240e8a5436b5634adc3cd1cdf637f6645cecbf181"},
+ {file = "Brotli-1.0.9-cp310-cp310-win_amd64.whl", hash = "sha256:622a231b08899c864eb87e85f81c75e7b9ce05b001e59bbfbf43d4a71f5f32b2"},
@@ -1647,0 +1670 @@ brotli = [
+ {file = "Brotli-1.0.9-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87fdccbb6bb589095f413b1e05734ba492c962b4a45a13ff3408fa44ffe6479b"},
@@ -1652,0 +1676 @@ brotli = [
+ {file = "Brotli-1.0.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29d1d350178e5225397e28ea1b7aca3648fcbab546d20e7475805437bfb0a130"},
@@ -1654,0 +1679 @@ brotli = [
+ {file = "Brotli-1.0.9-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e16eb9541f3dd1a3e92b89005e37b1257b157b7256df0e36bd7b33b50be73bcb"},
@@ -1657,0 +1683 @@ brotli = [
+ {file = "Brotli-1.0.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a674ac10e0a87b683f4fa2b6fa41090edfd686a6524bd8dedbd6138b309175c"},
@@ -1659,0 +1686 @@ brotli = [
+ {file = "Brotli-1.0.9-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2aad0e0baa04517741c9bb5b07586c642302e5fb3e75319cb62087bd0995ab19"},
@@ -1662,0 +1690 @@ brotli = [
+ {file = "Brotli-1.0.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bf919756d25e4114ace16a8ce91eb340eb57a08e2c6950c3cebcbe3dff2a5e7"},
@@ -1664,0 +1693,2 @@ brotli = [
+ {file = "Brotli-1.0.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9749a124280a0ada4187a6cfd1ffd35c350fb3af79c706589d98e088c5044267"},
+ {file = "Brotli-1.0.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:76ffebb907bec09ff511bb3acc077695e2c32bc2142819491579a695f77ffd4d"},
@@ -1764,0 +1795,4 @@ charset-normalizer = [
+clang = [
+ {file = "clang-5.0-py2-none-any.whl", hash = "sha256:b9301dff507041b5019b30ae710b78b0552c1ca1d4441b8dfa93c2e85078a5f8"},
+ {file = "clang-5.0.tar.gz", hash = "sha256:ceccae97eda0225a5b44d42ffd61102e248325c2865ca53e4407746464a5333a"},
+]
@@ -1774,2 +1808,2 @@ conllu = [
- {file = "conllu-4.4-py2.py3-none-any.whl", hash = "sha256:fe7e3547bc2beec8a0af8076cd564040dff7feec4ef20779a63a395e59e8116f"},
- {file = "conllu-4.4.tar.gz", hash = "sha256:37b812ef3e30168232239d65564e257975c3399ec5d7fca9915a52b44bdc6553"},
+ {file = "conllu-4.4.1-py2.py3-none-any.whl", hash = "sha256:d17db3fb4884e4d221cd1e333d897257db7605ce58c76ff16294d3af9ffadb1c"},
+ {file = "conllu-4.4.1.tar.gz", hash = "sha256:0029fb83ca225dd7e9ac342aabdb4717c0227d3b1d4497abf97b7dbc2cc04dd1"},
@@ -1783,4 +1817 @@ crcmod = [
-datasets = [
- {file = "datasets-1.11.0-py3-none-any.whl", hash = "sha256:603612b018794e33d8f0655235731bc139b141cb8f864c2f29140940da16955f"},
- {file = "datasets-1.11.0.tar.gz", hash = "sha256:3b01bf12951903e83b528d41129876426eb3a5fbcaf2645552283330528c92bf"},
-]
+datasets = []
@@ -1836,2 +1867,2 @@ google-auth = [
- {file = "google-auth-1.34.0.tar.gz", hash = "sha256:f1094088bae046fb06f3d1a3d7df14717e8d959e9105b79c57725bd4e17597a2"},
- {file = "google_auth-1.34.0-py2.py3-none-any.whl", hash = "sha256:bd6aa5916970a823e76ffb3d5c3ad3f0bedafca0a7fa53bc15149ab21cb71e05"},
+ {file = "google-auth-1.35.0.tar.gz", hash = "sha256:b7033be9028c188ee30200b204ea00ed82ea1162e8ac1df4aa6ded19a191d88e"},
+ {file = "google_auth-1.35.0-py2.py3-none-any.whl", hash = "sha256:997516b42ecb5b63e8d80f5632c1a61dddf41d2a4c2748057837e06e00014258"},
@@ -1849,46 +1880,51 @@ grpcio = [
- {file = "grpcio-1.34.1-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:5c4402fd8ce28e2847112105591139dc121c8980770f683eb781be1568a64097"},
- {file = "grpcio-1.34.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c6f756c11144c7ecb51b87f0d60a4b72e05635b9f24ddfa004286ab0c8527fa0"},
- {file = "grpcio-1.34.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:ec6d1b3daed886a73e40b4dc553474ef415acc111e913d7324cc2c6b0ba9efe0"},
- {file = "grpcio-1.34.1-cp27-cp27m-win32.whl", hash = "sha256:d757bc8bb12f07014dde55a04b5261c94828b605cf0726d02d491c3dc71aa6bb"},
- {file = "grpcio-1.34.1-cp27-cp27m-win_amd64.whl", hash = "sha256:f74cb93cd090b07528cf586a18628370e5780c08e0239f4af796f60a5e773568"},
- {file = "grpcio-1.34.1-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:c4355fa382dfc71c130dc3eccd8ae606a13e1729be2a77b6c44cd5a130d0c616"},
- {file = "grpcio-1.34.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f1a8048428a7a1e5b12322b3ee44ee0bb8e1bea1d67f08fa1813c455f3ef638c"},
- {file = "grpcio-1.34.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:0bd906496b9dd3751b9e5cacc7ceb25a57c16ce2aa67315b85ee86a4ba7246f1"},
- {file = "grpcio-1.34.1-cp35-cp35m-linux_armv7l.whl", hash = "sha256:5e488a40ebeb883117aa0dba2cea410ef2ab545a2403b2ac9101e62d42808c71"},
- {file = "grpcio-1.34.1-cp35-cp35m-macosx_10_10_intel.whl", hash = "sha256:98c06f0f7feeca736cc98f3f46b9b74c5f5fdc5febfc7d72728d1895c57be87f"},
- {file = "grpcio-1.34.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:90a4799c15b8b5aa587f65650a0cea28ea88bcd2c5fdf4f1adb2b8b7b4e77a5e"},
- {file = "grpcio-1.34.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:121af89d0b9ba1d47c738242783675009dd4e9067359481e4b743eb9e5886682"},
- {file = "grpcio-1.34.1-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:1be193803c706f78d0df12c817eaf2415fb4d39472fa00d860700e6c7a99f8f7"},
- {file = "grpcio-1.34.1-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:9e465a1d594a9a5f4252c4abbb93909c42768bee5fbfcd18098d60bf06a35573"},
- {file = "grpcio-1.34.1-cp35-cp35m-win32.whl", hash = "sha256:8b16d14160b7fd8bc43600be70e0da677d17dd8aafb5a258bbda996fe410320e"},
- {file = "grpcio-1.34.1-cp35-cp35m-win_amd64.whl", hash = "sha256:8a543209ab606dd55c58dc218be8e8619214607f03717dded78c7d27f1d05ba5"},
- {file = "grpcio-1.34.1-cp36-cp36m-linux_armv7l.whl", hash = "sha256:f74f270550df347a18f839331f84838b938c8923a9e13a6fa7cc69c79087a686"},
- {file = "grpcio-1.34.1-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:163a2cf7f4df3ff0a04f49e634526e3d88f02393a7ebf8f34a2134c88b06322e"},
- {file = "grpcio-1.34.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:11735ac4efd53691afeb36d006e20db9b7d4b6f3356c751f32d5747aee38fa4c"},
- {file = "grpcio-1.34.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:79bda20756e2fc7236b94468ffcce4b516953f946a80b7ea883f89d9e9b25a41"},
- {file = "grpcio-1.34.1-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1857f88b351e2382aa57ed892960361a8b71acca4aa1b90998007b4177f15114"},
- {file = "grpcio-1.34.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:6f81fbf9f830e20aee93480305877f73f15bfa58fa87433eb331696be47ae7ba"},
- {file = "grpcio-1.34.1-cp36-cp36m-win32.whl", hash = "sha256:ff8aef869c2e9de65c3a693406f7d1200d87e6d541d096eae69f98e7f301fa60"},
- {file = "grpcio-1.34.1-cp36-cp36m-win_amd64.whl", hash = "sha256:ece7459c182e00ca90b2e5823940a552651b5eb3acdeee9350377ddb44d9c412"},
- {file = "grpcio-1.34.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:7924ef3a898f6ff985540ee5d8c7554f0c925dc7668c3d63461600ea50b39658"},
- {file = "grpcio-1.34.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:b5e96ca83d5c34c9b60d8951e52492b0d9d072c3fe38a1c19765932e121036ce"},
- {file = "grpcio-1.34.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:fe9360347a3f4f2ec6923d8afb03a9194f3f14e054cb09e75e8346af9c0aa9f6"},
- {file = "grpcio-1.34.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:cadc09c9bd24ecf3ba7ae55b5a741f7de694a8843e97e82a7c3fa2e6e81e0f9a"},
- {file = "grpcio-1.34.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5971e6dfcfa0ebeb0df2d15383e1b53fa36208198c8aff9a4eed5ece2a6d4571"},
- {file = "grpcio-1.34.1-cp37-cp37m-win32.whl", hash = "sha256:a181092b534e996e36d0c0216d81280d4942322170c823b2fb84ec4597dc0bd5"},
- {file = "grpcio-1.34.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2b97cdd4582445ad7bd441f5f3c57d838bcdc518a05713dab0c7f4b945afb39e"},
- {file = "grpcio-1.34.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:ff760c5ce73c177851864e8caaf75467eaf06c1b6857b21e1789658375e720fb"},
- {file = "grpcio-1.34.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:fd58ea88dd5439e03c6587f0b672db1627ec8ed47be312c74632650dfed33c2e"},
- {file = "grpcio-1.34.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f6fee4445cffb45593b4c1d9bb0bc7922e77ec846a1237e2e744b1223d69c863"},
- {file = "grpcio-1.34.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:cd4da71e105088b1a7e629d1b033f16d87dec08524d0e4f5d77982af6fe1b6c2"},
- {file = "grpcio-1.34.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:9d43849d8925ec24bf121bccd941a13d4e8c2cffdfa769a04a6d4ed38c6b88a2"},
- {file = "grpcio-1.34.1-cp38-cp38-win32.whl", hash = "sha256:696f0de4d47f738063432bbbcecd07f78256864f0839e41369458421f539f00a"},
- {file = "grpcio-1.34.1-cp38-cp38-win_amd64.whl", hash = "sha256:8fff784ec5d12252a7cc0ab6f1a3206861b94e45ee0ebeba2439bd10a6db2f1a"},
- {file = "grpcio-1.34.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:ed8ac4f76cbbef5dc54594cb7bf6fbb985f5be66abcb1f9da8142500e4d76492"},
- {file = "grpcio-1.34.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8dad4184e4669672e126de26776eba8e3db4914660b4a0a6c7edbdbcf3e2f05f"},
- {file = "grpcio-1.34.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:011e9b5e47cb9d2a808e8c2dd5ae86df085d5879d9e8095a24631a32c577f231"},
- {file = "grpcio-1.34.1-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:49ffc5bb78b201db24d8d1644193beb50a896c3cb35b259b4fb9c44dba18585f"},
- {file = "grpcio-1.34.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:cfe0e015cb8db5a27a92621fdd9dc8e69b2f7130db326601802e6ff36626deff"},
- {file = "grpcio-1.34.1-cp39-cp39-win32.whl", hash = "sha256:809732f300fa8093b40f843c36f6f78423ffb40493098185bc4a96bd67126db5"},
- {file = "grpcio-1.34.1-cp39-cp39-win_amd64.whl", hash = "sha256:96dc85c059f15390beb7ac6bf075d1e4cf72e8f5c9b6c37ea179b7cc579816fd"},
- {file = "grpcio-1.34.1.tar.gz", hash = "sha256:1c746a3cd8a830d8d916a9d0476a786aaa98c5cc2a096344af2be955e439f8ac"},
+ {file = "grpcio-1.39.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:4163e022f365406be2da78db890035463371effea172300ce5af8a768142baf3"},
+ {file = "grpcio-1.39.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:02e8a8b41db8e13df53078355b439363e4ac46d0ac9a8a461a39e42829e2bcf8"},
+ {file = "grpcio-1.39.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:050901a5baa6c4ca445e1781ef4c32d864f965ccec70c46cd5ad92d15e282c6a"},
+ {file = "grpcio-1.39.0-cp27-cp27m-win32.whl", hash = "sha256:1ab44dde4e1b225d3fc873535ca6e642444433131dd2891a601b75fb46c87c11"},
+ {file = "grpcio-1.39.0-cp27-cp27m-win_amd64.whl", hash = "sha256:25731b2c20a4ed51bea7e3952d5e83d408a5df32d03c7553457b2e6eb8bcb16c"},
+ {file = "grpcio-1.39.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:a2733994b05ee5382da1d0378f6312b72c5cb202930c7fa20c794a24e96a1a34"},
+ {file = "grpcio-1.39.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:4039645b8b5d19064766f3a6fa535f1db52a61c4d4de97a6a8945331a354d527"},
+ {file = "grpcio-1.39.0-cp35-cp35m-macosx_10_10_intel.whl", hash = "sha256:7b95b3329446408e2fe6db9b310d263303fa1a94649d08ec1e1cc12506743d26"},
+ {file = "grpcio-1.39.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:2a4308875b9b986000513c6b04c2e7424f436a127f15547036c42d3cf8289374"},
+ {file = "grpcio-1.39.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:4b3fcc1878a1a5b71e1ecdfe82c74f7cd9eadaa43e25be0d67676dcec0c9d39f"},
+ {file = "grpcio-1.39.0-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:6d51be522b573cec14798d4742efaa69d234bedabce122fec2d5489abb3724d4"},
+ {file = "grpcio-1.39.0-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:43c57987e526d1b893b85099424387b22de6e3eee4ea7188443de8d657d11cc0"},
+ {file = "grpcio-1.39.0-cp35-cp35m-win32.whl", hash = "sha256:cd2e39a199bcbefb3f4b9fa6677c72b0e67332915550fed3bd7c28b454bf917d"},
+ {file = "grpcio-1.39.0-cp35-cp35m-win_amd64.whl", hash = "sha256:5628e7cc69079159f9465388ff21fde1e1a780139f76dd99d319119d45156f45"},
+ {file = "grpcio-1.39.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:3c14e2087f809973d5ee8ca64f772a089ead0167286f3f21fdda8b6029b50abb"},
+ {file = "grpcio-1.39.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:d5a105f5a595b89a0e394e5b147430b115333d07c55efb0c0eddc96055f0d951"},
+ {file = "grpcio-1.39.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:366b6b35b3719c5570588e21d866460c5666ae74e3509c2a5a73ca79997abdaf"},
+ {file = "grpcio-1.39.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:544e1c1a133b43893e03e828c8325be5b82e20d3b0ef0ee3942d32553052a1b5"},
+ {file = "grpcio-1.39.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:a659f7c634cacfcf14657687a9fa3265b0a1844b1c19d140f3b66aebfba1a66b"},
+ {file = "grpcio-1.39.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:b0ff14dd872030e6b2fce8a6811642bd30d93833f794d3782c7e9eb2f01234cc"},
+ {file = "grpcio-1.39.0-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:2a958ad794292e12d8738a06754ebaf71662e635a89098916c18715b27ca2b5b"},
+ {file = "grpcio-1.39.0-cp36-cp36m-win32.whl", hash = "sha256:ed845ba6253c4032d5a01b7fb9db8fe80299e9a437e695a698751b0b191174be"},
+ {file = "grpcio-1.39.0-cp36-cp36m-win_amd64.whl", hash = "sha256:b236eb4b50d83754184b248b8b1041bb1546287fff7618c4b7001b9f257bb903"},
+ {file = "grpcio-1.39.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:27e2c6213fc04e71a862bacccb51f3c8e722255933f01736ace183e92d860ee6"},
+ {file = "grpcio-1.39.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5127f4ba1f52fda28037ae465cf4b0e5fabe89d5ac1d64d15b073b46b7db5e16"},
+ {file = "grpcio-1.39.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:a6211150765cc2343e69879dfb856718b0f7477a4618b5f9a8f6c3ee84c047c0"},
+ {file = "grpcio-1.39.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:691f5b3a75f072dfb7b093f46303f493b885b7a42f25a831868ffaa22ee85f9d"},
+ {file = "grpcio-1.39.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:c8fe430add656b92419f6cd0680b64fbe6347c831d89a7788324f5037dfb3359"},
+ {file = "grpcio-1.39.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:3cccf470fcaab65a1b0a826ff34bd7c0861eb82ed957a83c6647a983459e4ecd"},
+ {file = "grpcio-1.39.0-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:2bc7eebb405aac2d7eecfaa881fd73b489f99c01470d7193b4431a6ce199b9c3"},
+ {file = "grpcio-1.39.0-cp37-cp37m-win32.whl", hash = "sha256:52100d800390d58492ed1093de6faccd957de6fc29b1a0e5948c84f275d9228f"},
+ {file = "grpcio-1.39.0-cp37-cp37m-win_amd64.whl", hash = "sha256:20f57c5d09a36e0d0c8fe16ee1905f4307edb1d04f6034b56320f7fbc1a1071a"},
+ {file = "grpcio-1.39.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:6ba6ad60009da2258cf15a72c51b7e0c2f58c8da517e97550881e488839e56c6"},
+ {file = "grpcio-1.39.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a1fb9936b86b5efdea417fe159934bcad82a6f8c6ab7d1beec4bf3a78324d975"},
+ {file = "grpcio-1.39.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:46cfb0f2b757673bfd36ab4b0e3d61988cc1a0d47e0597e91462dcbef7528f35"},
+ {file = "grpcio-1.39.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f2621c82fbbff1496993aa5fbf60e235583c7f970506e818671ad52000b6f310"},
+ {file = "grpcio-1.39.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:e98aca5cfe05ca29950b3d99006b9ddb54fde6451cd12cb2db1443ae3b9fa076"},
+ {file = "grpcio-1.39.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:8ed1e52ad507a54d20e6aaedf4b3edcab18cc12031eafe6de898f97513d8997b"},
+ {file = "grpcio-1.39.0-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:3c57fa7fec932767bc553bfb956759f45026890255bd232b2f797c3bc4dfeba2"},
+ {file = "grpcio-1.39.0-cp38-cp38-win32.whl", hash = "sha256:88dbef504b491b96e3238a6d5360b04508c34c62286080060c85fddd3caf7137"},
+ {file = "grpcio-1.39.0-cp38-cp38-win_amd64.whl", hash = "sha256:cffdccc94e63710dd6ead01849443390632c8e0fec52dc26e4fddf9f28ac9280"},
+ {file = "grpcio-1.39.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:43e0f5c49f985c94332794aa6c4f15f3a1ced336f0c6a6c8946c67b5ab111ae9"},
+ {file = "grpcio-1.39.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:dc3a24022a90c1754e54315009da6f949b48862c1d06daa54f9a28f89a5efacb"},
+ {file = "grpcio-1.39.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:476fa94ba8efb09213baabd757f6f93e839794d8ae0eaa371347d6899e8f57a0"},
+ {file = "grpcio-1.39.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:46d510a7af777d2f38ef4c1d25491add37cad24143012f3eebe72dc5c6d0fc4c"},
+ {file = "grpcio-1.39.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:5091b4a5ee8454a8f0c8ac45946ca25d6142c3be4b1fba141f1d62a6e0b5c696"},
+ {file = "grpcio-1.39.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:de83a045005703e7b9e67b61c38bb72cd49f68d9d2780d2c675353a3a3f2816f"},
+ {file = "grpcio-1.39.0-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:4258b778ce09ffa3b7c9a26971c216a34369e786771afbf4f98afe223f27d248"},
+ {file = "grpcio-1.39.0-cp39-cp39-win32.whl", hash = "sha256:c44958a24559f875d902d5c1acb0ae43faa5a84f6120d1d0d800acb52f96516e"},
+ {file = "grpcio-1.39.0-cp39-cp39-win_amd64.whl", hash = "sha256:2068a2b896ac67103c4a5453d5435fafcbb1a2f41eaf25148d08780096935cee"},
+ {file = "grpcio-1.39.0.tar.gz", hash = "sha256:57974361a459d6fe04c9ae0af1845974606612249f467bbd2062d963cb90f407"},
@@ -1944,2 +1980,2 @@ kenlm = []
-keras-nightly = [
- {file = "keras_nightly-2.5.0.dev2021032900-py2.py3-none-any.whl", hash = "sha256:6ba70f738f4008222de7e7fdd5b2b18c48c49b897a9fca54c844854e25964011"},
+keras = [
+ {file = "keras-2.6.0-py2.py3-none-any.whl", hash = "sha256:504af5656a9829fe803ce48a8580ef16916e89906aceddad9e098614269437e7"},
@@ -1952 +1988 @@ kss = [
- {file = "kss-2.5.1-py3-none-any.whl", hash = "sha256:29801a0ac9c6872cadc4cb08f8e451fa4abe844de9973b2f50ed41b6a92c82f9"},
+ {file = "kss-2.6.0-py3-none-any.whl", hash = "sha256:fedbdcd0bfc33111d7817866dd60346dab79f9f1ca5bab0026c4ee40e5941b0c"},
@@ -1965,0 +2002,2 @@ lxml = [
+ {file = "lxml-4.6.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:64812391546a18896adaa86c77c59a4998f33c24788cadc35789e55b727a37f4"},
+ {file = "lxml-4.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c1a40c06fd5ba37ad39caa0b3144eb3772e813b5fb5b084198a985431c2f1e8d"},
@@ -2127,19 +2165,19 @@ pandas = [
- {file = "pandas-1.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1ee8418d0f936ff2216513aa03e199657eceb67690995d427a4a7ecd2e68f442"},
- {file = "pandas-1.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d9acfca191140a518779d1095036d842d5e5bc8e8ad8b5eaad1aff90fe1870d"},
- {file = "pandas-1.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e323028ab192fcfe1e8999c012a0fa96d066453bb354c7e7a4a267b25e73d3c8"},
- {file = "pandas-1.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d06661c6eb741ae633ee1c57e8c432bb4203024e263fe1a077fa3fda7817fdb"},
- {file = "pandas-1.3.1-cp37-cp37m-win32.whl", hash = "sha256:23c7452771501254d2ae23e9e9dac88417de7e6eff3ce64ee494bb94dc88c300"},
- {file = "pandas-1.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7150039e78a81eddd9f5a05363a11cadf90a4968aac6f086fd83e66cf1c8d1d6"},
- {file = "pandas-1.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5c09a2538f0fddf3895070579082089ff4ae52b6cb176d8ec7a4dacf7e3676c1"},
- {file = "pandas-1.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:905fc3e0fcd86b0a9f1f97abee7d36894698d2592b22b859f08ea5a8fe3d3aab"},
- {file = "pandas-1.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ee927c70794e875a59796fab8047098aa59787b1be680717c141cd7873818ae"},
- {file = "pandas-1.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c976e023ed580e60a82ccebdca8e1cc24d8b1fbb28175eb6521025c127dab66"},
- {file = "pandas-1.3.1-cp38-cp38-win32.whl", hash = "sha256:22f3fcc129fb482ef44e7df2a594f0bd514ac45aabe50da1a10709de1b0f9d84"},
- {file = "pandas-1.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45656cd59ae9745a1a21271a62001df58342b59c66d50754390066db500a8362"},
- {file = "pandas-1.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:114c6789d15862508900a25cb4cb51820bfdd8595ea306bab3b53cd19f990b65"},
- {file = "pandas-1.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:527c43311894aff131dea99cf418cd723bfd4f0bcf3c3da460f3b57e52a64da5"},
- {file = "pandas-1.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb3b33dde260b1766ea4d3c6b8fbf6799cee18d50a2a8bc534cf3550b7c819a"},
- {file = "pandas-1.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c28760932283d2c9f6fa5e53d2f77a514163b9e67fd0ee0879081be612567195"},
- {file = "pandas-1.3.1-cp39-cp39-win32.whl", hash = "sha256:be12d77f7e03c40a2466ed00ccd1a5f20a574d3c622fe1516037faa31aa448aa"},
- {file = "pandas-1.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:9e1fe6722cbe27eb5891c1977bca62d456c19935352eea64d33956db46139364"},
- {file = "pandas-1.3.1.tar.gz", hash = "sha256:341935a594db24f3ff07d1b34d1d231786aa9adfa84b76eab10bf42907c8aed3"},
+ {file = "pandas-1.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ba7ceb8abc6dbdb1e34612d1173d61e4941f1a1eb7e6f703b2633134ae6a6c89"},
+ {file = "pandas-1.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb71b1935249de80e3a808227189eee381d4d74a31760ced2df21eedc92a8e3"},
+ {file = "pandas-1.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa54dc1d3e5d004a09ab0b1751473698011ddf03e14f1f59b84ad9a6ac630975"},
+ {file = "pandas-1.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34ced9ce5d5b17b556486da7256961b55b471d64a8990b56e67a84ebeb259416"},
+ {file = "pandas-1.3.2-cp37-cp37m-win32.whl", hash = "sha256:a56246de744baf646d1f3e050c4653d632bc9cd2e0605f41051fea59980e880a"},
+ {file = "pandas-1.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:53b17e4debba26b7446b1e4795c19f94f0c715e288e08145e44bdd2865e819b3"},
+ {file = "pandas-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f07a9745ca075ae73a5ce116f5e58f691c0dc9de0bff163527858459df5c176f"},
+ {file = "pandas-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9e8e0ce5284ebebe110efd652c164ed6eab77f5de4c3533abc756302ee77765"},
+ {file = "pandas-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59a78d7066d1c921a77e3306aa0ebf6e55396c097d5dfcc4df8defe3dcecb735"},
+ {file = "pandas-1.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:132def05e73d292c949b02e7ef873debb77acc44a8b119d215921046f0c3a91d"},
+ {file = "pandas-1.3.2-cp38-cp38-win32.whl", hash = "sha256:69e1b2f5811f46827722fd641fdaeedb26002bd1e504eacc7a8ec36bdc25393e"},
+ {file = "pandas-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:7996d311413379136baf0f3cf2a10e331697657c87ced3f17ac7c77f77fe34a3"},
+ {file = "pandas-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1738154049062156429a5cf2fd79a69c9f3fa4f231346a7ec6fd156cd1a9a621"},
+ {file = "pandas-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cce01f6d655b4add966fcd36c32c5d1fe84628e200626b3f5e2f40db2d16a0f"},
+ {file = "pandas-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1099e2a0cd3a01ec62cca183fc1555833a2d43764950ef8cb5948c8abfc51014"},
+ {file = "pandas-1.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cd5776be891331a3e6b425b5abeab9596abea18435c5982191356f9b24ae731"},
+ {file = "pandas-1.3.2-cp39-cp39-win32.whl", hash = "sha256:66a95361b81b4ba04b699ecd2416b0591f40cd1e24c60a8bfe0d19009cfa575a"},
+ {file = "pandas-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:89f40e5d21814192802421df809f948247d39ffe171e45fe2ab4abf7bd4279d8"},
+ {file = "pandas-1.3.2.tar.gz", hash = "sha256:cbcb84d63867af3411fa063af3de64902665bb5b3d40b25b2059e40603594e87"},
@@ -2435,27 +2473,27 @@ pyppmd = [
- {file = "pyppmd-0.15.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c4bc7eee8dc4da2d87c6a59796db16afc7c910fefc13b719d9feb61341958a7d"},
- {file = "pyppmd-0.15.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2192c7db9eee2945b259f05369f33da23e99e393cefd214d9de6feb1c882babf"},
- {file = "pyppmd-0.15.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c75daf1cc832fd417cddf6bf6532e22f3c20bcb095e35413367f02af547bdb96"},
- {file = "pyppmd-0.15.2-cp36-cp36m-win32.whl", hash = "sha256:999784c10ba6b9db071c571ef037c85f5e291e89377c84644b0ae3771424c78b"},
- {file = "pyppmd-0.15.2-cp36-cp36m-win_amd64.whl", hash = "sha256:48e85e74d710ddf66775beeab8ae75bb1d8765a10c0a0e1f3137ecb69daa1a34"},
- {file = "pyppmd-0.15.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5b23f28c2052cf4abb821e1eaab973867c9aec5c9ad3f4483e99e2ce88b4d959"},
- {file = "pyppmd-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64620b677fc4cdfdfbe8b0364e11362634f378088af443be0d184c82e83fe440"},
- {file = "pyppmd-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:236c36fcad93c653aca37cefb63df4710d4450833bee1fe9a334dad70cfdde9b"},
- {file = "pyppmd-0.15.2-cp37-cp37m-win32.whl", hash = "sha256:64eaae44fe7c90e598027fb3e7095b9e2586764e7368a70ba0ba37dafd8e1994"},
- {file = "pyppmd-0.15.2-cp37-cp37m-win_amd64.whl", hash = "sha256:178cf8f3a9909e43524a0bbf0c458cc535f59e68046903e49eab038917bfd644"},
- {file = "pyppmd-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5782bd9724d04e70a724caf6f78bda31d6c4426f0ab1a659165d4b6d7e2d11cc"},
- {file = "pyppmd-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d56df5e8c65f7b28e821c2450a5f9840ff06b386848c909e7dcf5449a55db8c0"},
- {file = "pyppmd-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f5167ab2487a1ff7761374cab3e6ded1ccb3f97807150a8f9ac6bc6f0c35138"},
- {file = "pyppmd-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33daaa90356912c12fae442de4d3227aa94ee51f9b69700109bdca9433491d79"},
- {file = "pyppmd-0.15.2-cp38-cp38-win32.whl", hash = "sha256:a51c057597da7b517cb2d51d440472c7dd5f2014e0e150f7b1aed8a4eb0e392c"},
- {file = "pyppmd-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:8f914f31b27130d1e61cea70c7ad133b4d0c0209fb85d9218c3513083092b913"},
- {file = "pyppmd-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8db10325726311aed07b1a9aa7c3b2f477f9d5d721b77c2e98e9293494977980"},
- {file = "pyppmd-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61e225adb18b9531e6daeb97cf292241aaa17690308d89e799049bda48a1c957"},
- {file = "pyppmd-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90255e1dc5eb04b7302c498a5acb0c1929ee1bbfc6346519b32c2058fa33dcab"},
- {file = "pyppmd-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e642c5f65af8997c51041a64a56fab1ba5b382d910e6c4f8aa1ce0de497faa9f"},
- {file = "pyppmd-0.15.2-cp39-cp39-win32.whl", hash = "sha256:d72bb209ceaa0d23708d28c8ebd54e2bdbec9bb8c7d6a819460c3bea671fb061"},
- {file = "pyppmd-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:61fa1df0a912da6abdf9c181cc1ab95821b545e812c3e9d016a271dc4eaab57d"},
- {file = "pyppmd-0.15.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dee5ad99dd66b5daeb780fad6267f83834cfcb4377ba667a6e1f162450bab370"},
- {file = "pyppmd-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4286f0b3cb0b313b2dfd0ae4b6e301c144957856eccfa039038afa793e81853d"},
- {file = "pyppmd-0.15.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f77aecfc364188e38b476de637a48cb3985d3e13d9fe11a23a86857e8c30a4d7"},
- {file = "pyppmd-0.15.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7c497fa447795bbb9e0b1be811e8085c3e6856b62b5957898bd6475c2758b1f3"},
- {file = "pyppmd-0.15.2.tar.gz", hash = "sha256:17195786082a473f271ad2e4bcc3bd224fcff44a53f085026ebb16a65f2c92f3"},
+ {file = "pyppmd-0.16.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:41c8091b9117a2a76e6eb5fa4d7c770a37d904cce13d7d3141ec3504cac6e387"},
+ {file = "pyppmd-0.16.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62d4beb6c862a7348a65cd9f5d931f7ad34be5d1ced8eb80dd1ab1823f361190"},
+ {file = "pyppmd-0.16.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c57220b05b23c838fa0524303561700bc763e24a2b112b9029285346ab26ab5"},
+ {file = "pyppmd-0.16.1-cp36-cp36m-win32.whl", hash = "sha256:3651319e05f60d8de465565abb96fb90e79ab6f4c07be51b1da4d4cf4226332d"},
+ {file = "pyppmd-0.16.1-cp36-cp36m-win_amd64.whl", hash = "sha256:a9a340bad7f99fa833f60e5dd8f2b81789bab61100a4894760a1c1fb0fecc86f"},
+ {file = "pyppmd-0.16.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:78c9007b78c8cc8fb7e7b619b3267b98b946482bc4c037af7f7152f644fd7f1a"},
+ {file = "pyppmd-0.16.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9f0cecf64b00731c835b08f606f6753e754b94870355c6ebf34fde8231027ab"},
+ {file = "pyppmd-0.16.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d91ca120425c36176115999fd03c77ed6b94e8d6e4b64ac5d138ad35eb314d59"},
+ {file = "pyppmd-0.16.1-cp37-cp37m-win32.whl", hash = "sha256:b489e4ca01f411b3df66bd8aad0165dffa8f5eb9447e3510e9e2200b8c5d954f"},
+ {file = "pyppmd-0.16.1-cp37-cp37m-win_amd64.whl", hash = "sha256:d5fe66a5d8757802ac5eb6887122a31d92a7444e4cfc5c37fd87237543cac27f"},
+ {file = "pyppmd-0.16.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b22135c6c7c594b80b447ad99a3a9ca77e3a6c43fd4670e8c920ba5936fe6e12"},
+ {file = "pyppmd-0.16.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4fb5b764f736270d438201b5aa9e9deaf6966c2a53036e14fd6e39399942c50d"},
+ {file = "pyppmd-0.16.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf250e02add94573258878049c51532057b34734804caadc8313e9eaa77dbf0"},
+ {file = "pyppmd-0.16.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83e54ad557e6287dc2ccb567a43a978cc1a98fc26d8361ab890094f4b483a94c"},
+ {file = "pyppmd-0.16.1-cp38-cp38-win32.whl", hash = "sha256:efed13003958bb4b6ad34ccb61962e0cedfdbfd4fcd89fcbeb70cfc7c8faafd8"},
+ {file = "pyppmd-0.16.1-cp38-cp38-win_amd64.whl", hash = "sha256:c4da7c342f2ffdd0e472503f6e75ddf02a26cfb95ff31d398275388bf8ffd891"},
+ {file = "pyppmd-0.16.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24aa4af9c6df626e70b695a82b2ab0bab639da0dbfb6c87964c82e3f2e51daf6"},
+ {file = "pyppmd-0.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6de4fee717b5f4be27f773f152482f4b8c0884fbcee1a1bb8f962dd55ed43e98"},
+ {file = "pyppmd-0.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f653da1228f5f0294c0062d9069c0900543c5e10a7ffb5a3394720e0a5218ae7"},
+ {file = "pyppmd-0.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57e2c46ebd30200d7163dda477ca4e3bb7c8a541d07ed79ffffe33d5404b3388"},
+ {file = "pyppmd-0.16.1-cp39-cp39-win32.whl", hash = "sha256:9f68ecbe62166108f5b2a0c41370d447f6f5e348602deb4caff96dbb64b9b510"},
+ {file = "pyppmd-0.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:3248300c3834161d2639a295788e05757fbbdcf41b3b5707973ef02ecdc5b741"},
+ {file = "pyppmd-0.16.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:245f02688d66d734ad23107ffdd1c5fc77ff38eeb01a8b523af151a52981bcc7"},
+ {file = "pyppmd-0.16.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25c8ffb568887f3c23b362b90a3e7b3d4a807e46adefdbbf8c6356153264dc65"},
+ {file = "pyppmd-0.16.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:80e9a02c14fad6ca3d0479e6b5a25063383ab89f70a48cc7251c1c6cf702fe56"},
+ {file = "pyppmd-0.16.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8185d0324301bbcee1c26a9e05f58210ac415d777aec84ff8222f6278a007773"},
+ {file = "pyppmd-0.16.1.tar.gz", hash = "sha256:b3c8b8ac01b36da191277ccce897eefeffd71d79326be9161f28bd20a931f2d3"},
@@ -2553,41 +2591,33 @@ regex = [
- {file = "regex-2021.7.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e6a1e5ca97d411a461041d057348e578dc344ecd2add3555aedba3b408c9f874"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:6afe6a627888c9a6cfbb603d1d017ce204cebd589d66e0703309b8048c3b0854"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ccb3d2190476d00414aab36cca453e4596e8f70a206e2aa8db3d495a109153d2"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:ed693137a9187052fc46eedfafdcb74e09917166362af4cc4fddc3b31560e93d"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:99d8ab206a5270c1002bfcf25c51bf329ca951e5a169f3b43214fdda1f0b5f0d"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:b85ac458354165405c8a84725de7bbd07b00d9f72c31a60ffbf96bb38d3e25fa"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:3f5716923d3d0bfb27048242a6e0f14eecdb2e2a7fac47eda1d055288595f222"},
- {file = "regex-2021.7.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5983c19d0beb6af88cb4d47afb92d96751fb3fa1784d8785b1cdf14c6519407"},
- {file = "regex-2021.7.6-cp36-cp36m-win32.whl", hash = "sha256:c92831dac113a6e0ab28bc98f33781383fe294df1a2c3dfd1e850114da35fd5b"},
- {file = "regex-2021.7.6-cp36-cp36m-win_amd64.whl", hash = "sha256:791aa1b300e5b6e5d597c37c346fb4d66422178566bbb426dd87eaae475053fb"},
- {file = "regex-2021.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:59506c6e8bd9306cd8a41511e32d16d5d1194110b8cfe5a11d102d8b63cf945d"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:564a4c8a29435d1f2256ba247a0315325ea63335508ad8ed938a4f14c4116a5d"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:59c00bb8dd8775473cbfb967925ad2c3ecc8886b3b2d0c90a8e2707e06c743f0"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:9a854b916806c7e3b40e6616ac9e85d3cdb7649d9e6590653deb5b341a736cec"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:db2b7df831c3187a37f3bb80ec095f249fa276dbe09abd3d35297fc250385694"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:173bc44ff95bc1e96398c38f3629d86fa72e539c79900283afa895694229fe6a"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:15dddb19823f5147e7517bb12635b3c82e6f2a3a6b696cc3e321522e8b9308ad"},
- {file = "regex-2021.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ddeabc7652024803666ea09f32dd1ed40a0579b6fbb2a213eba590683025895"},
- {file = "regex-2021.7.6-cp37-cp37m-win32.whl", hash = "sha256:f080248b3e029d052bf74a897b9d74cfb7643537fbde97fe8225a6467fb559b5"},
- {file = "regex-2021.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:d8bbce0c96462dbceaa7ac4a7dfbbee92745b801b24bce10a98d2f2b1ea9432f"},
- {file = "regex-2021.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edd1a68f79b89b0c57339bce297ad5d5ffcc6ae7e1afdb10f1947706ed066c9c"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux1_i686.whl", hash = "sha256:422dec1e7cbb2efbbe50e3f1de36b82906def93ed48da12d1714cabcd993d7f0"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cbe23b323988a04c3e5b0c387fe3f8f363bf06c0680daf775875d979e376bd26"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0eb2c6e0fcec5e0f1d3bcc1133556563222a2ffd2211945d7b1480c1b1a42a6f"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:1c78780bf46d620ff4fff40728f98b8afd8b8e35c3efd638c7df67be2d5cddbf"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bc84fb254a875a9f66616ed4538542fb7965db6356f3df571d783f7c8d256edd"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:598c0a79b4b851b922f504f9f39a863d83ebdfff787261a5ed061c21e67dd761"},
- {file = "regex-2021.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875c355360d0f8d3d827e462b29ea7682bf52327d500a4f837e934e9e4656068"},
- {file = "regex-2021.7.6-cp38-cp38-win32.whl", hash = "sha256:e586f448df2bbc37dfadccdb7ccd125c62b4348cb90c10840d695592aa1b29e0"},
- {file = "regex-2021.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:2fe5e71e11a54e3355fa272137d521a40aace5d937d08b494bed4529964c19c4"},
- {file = "regex-2021.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6110bab7eab6566492618540c70edd4d2a18f40ca1d51d704f1d81c52d245026"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:4f64fc59fd5b10557f6cd0937e1597af022ad9b27d454e182485f1db3008f417"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:89e5528803566af4df368df2d6f503c84fbfb8249e6631c7b025fe23e6bd0cde"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2366fe0479ca0e9afa534174faa2beae87847d208d457d200183f28c74eaea59"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f9392a4555f3e4cb45310a65b403d86b589adc773898c25a39184b1ba4db8985"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:2bceeb491b38225b1fee4517107b8491ba54fba77cf22a12e996d96a3c55613d"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:f98dc35ab9a749276f1a4a38ab3e0e2ba1662ce710f6530f5b0a6656f1c32b58"},
- {file = "regex-2021.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319eb2a8d0888fa6f1d9177705f341bc9455a2c8aca130016e52c7fe8d6c37a3"},
- {file = "regex-2021.7.6-cp39-cp39-win32.whl", hash = "sha256:eaf58b9e30e0e546cdc3ac06cf9165a1ca5b3de8221e9df679416ca667972035"},
- {file = "regex-2021.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:4c9c3155fe74269f61e27617529b7f09552fbb12e44b1189cebbdb24294e6e1c"},
- {file = "regex-2021.7.6.tar.gz", hash = "sha256:8394e266005f2d8c6f0bc6780001f7afa3ef81a7a2111fa35058ded6fce79e4d"},
+ {file = "regex-2021.8.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8764a78c5464ac6bde91a8c87dd718c27c1cabb7ed2b4beaf36d3e8e390567f9"},
+ {file = "regex-2021.8.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4551728b767f35f86b8e5ec19a363df87450c7376d7419c3cac5b9ceb4bce576"},
+ {file = "regex-2021.8.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:577737ec3d4c195c4aef01b757905779a9e9aee608fa1cf0aec16b5576c893d3"},
+ {file = "regex-2021.8.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c856ec9b42e5af4fe2d8e75970fcc3a2c15925cbcc6e7a9bcb44583b10b95e80"},
+ {file = "regex-2021.8.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3835de96524a7b6869a6c710b26c90e94558c31006e96ca3cf6af6751b27dca1"},
+ {file = "regex-2021.8.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cea56288eeda8b7511d507bbe7790d89ae7049daa5f51ae31a35ae3c05408531"},
+ {file = "regex-2021.8.3-cp36-cp36m-win32.whl", hash = "sha256:a4eddbe2a715b2dd3849afbdeacf1cc283160b24e09baf64fa5675f51940419d"},
+ {file = "regex-2021.8.3-cp36-cp36m-win_amd64.whl", hash = "sha256:57fece29f7cc55d882fe282d9de52f2f522bb85290555b49394102f3621751ee"},
+ {file = "regex-2021.8.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a5c6dbe09aff091adfa8c7cfc1a0e83fdb8021ddb2c183512775a14f1435fe16"},
+ {file = "regex-2021.8.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff4a8ad9638b7ca52313d8732f37ecd5fd3c8e3aff10a8ccb93176fd5b3812f6"},
+ {file = "regex-2021.8.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b63e3571b24a7959017573b6455e05b675050bbbea69408f35f3cb984ec54363"},
+ {file = "regex-2021.8.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fbc20975eee093efa2071de80df7f972b7b35e560b213aafabcec7c0bd00bd8c"},
+ {file = "regex-2021.8.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14caacd1853e40103f59571f169704367e79fb78fac3d6d09ac84d9197cadd16"},
+ {file = "regex-2021.8.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb350eb1060591d8e89d6bac4713d41006cd4d479f5e11db334a48ff8999512f"},
+ {file = "regex-2021.8.3-cp37-cp37m-win32.whl", hash = "sha256:18fdc51458abc0a974822333bd3a932d4e06ba2a3243e9a1da305668bd62ec6d"},
+ {file = "regex-2021.8.3-cp37-cp37m-win_amd64.whl", hash = "sha256:026beb631097a4a3def7299aa5825e05e057de3c6d72b139c37813bfa351274b"},
+ {file = "regex-2021.8.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:16d9eaa8c7e91537516c20da37db975f09ac2e7772a0694b245076c6d68f85da"},
+ {file = "regex-2021.8.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3905c86cc4ab6d71635d6419a6f8d972cab7c634539bba6053c47354fd04452c"},
+ {file = "regex-2021.8.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937b20955806381e08e54bd9d71f83276d1f883264808521b70b33d98e4dec5d"},
+ {file = "regex-2021.8.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28e8af338240b6f39713a34e337c3813047896ace09d51593d6907c66c0708ba"},
+ {file = "regex-2021.8.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c09d88a07483231119f5017904db8f60ad67906efac3f1baa31b9b7f7cca281"},
+ {file = "regex-2021.8.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:85f568892422a0e96235eb8ea6c5a41c8ccbf55576a2260c0160800dbd7c4f20"},
+ {file = "regex-2021.8.3-cp38-cp38-win32.whl", hash = "sha256:bf6d987edd4a44dd2fa2723fca2790f9442ae4de2c8438e53fcb1befdf5d823a"},
+ {file = "regex-2021.8.3-cp38-cp38-win_amd64.whl", hash = "sha256:8fe58d9f6e3d1abf690174fd75800fda9bdc23d2a287e77758dc0e8567e38ce6"},
+ {file = "regex-2021.8.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7976d410e42be9ae7458c1816a416218364e06e162b82e42f7060737e711d9ce"},
+ {file = "regex-2021.8.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9569da9e78f0947b249370cb8fadf1015a193c359e7e442ac9ecc585d937f08d"},
+ {file = "regex-2021.8.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bbe342c5b2dec5c5223e7c363f291558bc27982ef39ffd6569e8c082bdc83"},
+ {file = "regex-2021.8.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f421e3cdd3a273bace013751c345f4ebeef08f05e8c10757533ada360b51a39"},
+ {file = "regex-2021.8.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea212df6e5d3f60341aef46401d32fcfded85593af1d82b8b4a7a68cd67fdd6b"},
+ {file = "regex-2021.8.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a3b73390511edd2db2d34ff09aa0b2c08be974c71b4c0505b4a048d5dc128c2b"},
+ {file = "regex-2021.8.3-cp39-cp39-win32.whl", hash = "sha256:f35567470ee6dbfb946f069ed5f5615b40edcbb5f1e6e1d3d2b114468d505fc6"},
+ {file = "regex-2021.8.3-cp39-cp39-win_amd64.whl", hash = "sha256:bfa6a679410b394600eafd16336b2ce8de43e9b13f7fb9247d84ef5ad2b45e91"},
+ {file = "regex-2021.8.3.tar.gz", hash = "sha256:8935937dad2c9b369c3d932b0edbc52a62647c2afb2fafc0c280f14a8bf56a6a"},
@@ -2625 +2655 @@ tensorboard = [
- {file = "tensorboard-2.5.0-py3-none-any.whl", hash = "sha256:e167460085b6528956b33bab1c970c989cdce47a6616273880733f5e7bde452e"},
+ {file = "tensorboard-2.6.0-py3-none-any.whl", hash = "sha256:f7dac4cdfb52d14c9e3f74585ce2aaf8e6203620a864e51faf84988b09f7bbdb"},
@@ -2636,12 +2666,12 @@ tensorflow = [
- {file = "tensorflow-2.5.0-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:7e1351ce05b897d5cf1042066b6929ca3f595a717849421ae92dbe8d6d2f1c74"},
- {file = "tensorflow-2.5.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:31a3ea994c336fc5a6ba0e6d61f131262b2c6dbff97e2b7473ff6da0cf9383f7"},
- {file = "tensorflow-2.5.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c45059b42bca01ce441004abb965acf7838b40d12e036920063bd7ac540def9a"},
- {file = "tensorflow-2.5.0-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:616bc8094cb289b3bd21eded2196b0dba65bce53bad112efcaf2acb6f7d9e6a5"},
- {file = "tensorflow-2.5.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:739d25273ccc10fedc74517de099bd5b16a274d1295fad6bfef834ad28cc3401"},
- {file = "tensorflow-2.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:68b70ca7df7f5f8fbe3d7240e937b3ea8b1a25e51710f60293e7edada00257a2"},
- {file = "tensorflow-2.5.0-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:c46b1d1b0eec54577d7ba545e3951c9dd0355ca05a8eb776c95d9a3e22e7be9c"},
- {file = "tensorflow-2.5.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:34ab87aac9093de98cbba68d7e8dca9159c36acd06a03e5749c956c7ab08d9da"},
- {file = "tensorflow-2.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:46f10a2edc694bb54a2d869a65b5a09705dab1874a89b529990a943416ad48aa"},
- {file = "tensorflow-2.5.0-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:baebb9c95ef1815bb410317ad525dd3dbb26064fe95636b51486459b6536bc6e"},
- {file = "tensorflow-2.5.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:1ea003f9e11508d0336c242a2a3bc73aea205dd5b31892c3e1d7f5d0f0e60c0a"},
- {file = "tensorflow-2.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:4edec9b9f6ef8f1407762a3a6bd050173177f686d5ea6b59e91487b645173f73"},
+ {file = "tensorflow-2.6.0-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:c67fad296a3a2133b7a14da5f06c9937e7911b02c5d7a3ff6ba52a1d79b6bc9e"},
+ {file = "tensorflow-2.6.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:8b5ce09ede0fe45ef100f4dc65cf3f46722194e75139f85d524058315e2ce9fa"},
+ {file = "tensorflow-2.6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:dea97f664246e185d79cbe40a86309527affd4232f06afa8a6500c4fc4b64a03"},
+ {file = "tensorflow-2.6.0-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:4716c9b25a61a2c79b1f253d1e114f1f8679241559c13ad18c657c626a7d5924"},
+ {file = "tensorflow-2.6.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:e45e026a9d08c89cecc1160d8248135e2fb79bdc3267328399e1fb25ce583bd6"},
+ {file = "tensorflow-2.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:6e38b6969414d16afc560c58ca34e1328cc0a5dbd644b64e060f5be8a6653274"},
+ {file = "tensorflow-2.6.0-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:2a067d22a356c2cd4753bdd16ee492c55a610f5ebc52713e2954c642f070321c"},
+ {file = "tensorflow-2.6.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:2c9b8c6adc060acfcf805a2ea501db0124b679d95b522fd5983a4c110e8e0264"},
+ {file = "tensorflow-2.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:d6468e05552720100e8f94097feb770de320e4c8c244323a8746bd84e5ba4052"},
+ {file = "tensorflow-2.6.0-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00b1af0a0c5c102db19caceffac4bd4e6c536e6d7512144c241a4ace4428e7c6"},
+ {file = "tensorflow-2.6.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:bc73ebdd30c48cfc27ba307271117e6dbb795b37396ed817b2fec9393380b115"},
+ {file = "tensorflow-2.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:bfb255c2b0400bc5b4060dda098d46cd7ddeb53b7cbac1dfa29435612cba828c"},
@@ -2650 +2680 @@ tensorflow-estimator = [
- {file = "tensorflow_estimator-2.5.0-py2.py3-none-any.whl", hash = "sha256:d1fe76dee8b1dcab865d807a0246da0a9c4a635b1eba6e9545bf216c3aad6955"},
+ {file = "tensorflow_estimator-2.6.0-py2.py3-none-any.whl", hash = "sha256:cf78528998efdb637ac0abaf525c929bf192767544eb24ae20d9266effcf5afd"},
@@ -2683,2 +2713,2 @@ tomli = [
- {file = "tomli-1.2.0-py3-none-any.whl", hash = "sha256:056f0376bf5a6b182c513f9582c1e5b0487265eb6c48842b69aa9ca1cd5f640a"},
- {file = "tomli-1.2.0.tar.gz", hash = "sha256:d60e681734099207a6add7a10326bc2ddd1fdc36c1b0f547d00ef73ac63739c2"},
+ {file = "tomli-1.2.1-py3-none-any.whl", hash = "sha256:8dd0e9524d6f386271a36b41dbf6c57d8e32fd96fd22b6584679dc569d20899f"},
+ {file = "tomli-1.2.1.tar.gz", hash = "sha256:a5b75cb6f3968abb47af1b40c1819dc519ea82bcc065776a866e8d74c5ca9442"},
@@ -2687,2 +2717,2 @@ tqdm = [
- {file = "tqdm-4.62.0-py2.py3-none-any.whl", hash = "sha256:706dea48ee05ba16e936ee91cb3791cd2ea6da348a0e50b46863ff4363ff4340"},
- {file = "tqdm-4.62.0.tar.gz", hash = "sha256:3642d483b558eec80d3c831e23953582c34d7e4540db86d9e5ed9dad238dabc6"},
+ {file = "tqdm-4.62.1-py2.py3-none-any.whl", hash = "sha256:07856e19a1fe4d2d9621b539d3f072fa88c9c1ef1f3b7dd4d4953383134c3164"},
+ {file = "tqdm-4.62.1.tar.gz", hash = "sha256:35540feeaca9ac40c304e916729e6b78045cbbeccd3e941b2868f09306798ac9"},
@@ -2691,2 +2721,2 @@ transformers = [
- {file = "transformers-4.9.1-py3-none-any.whl", hash = "sha256:86f3c46efecf114c6886d361c1d6cca14738f0e9d1effadb1e9252770cba55a0"},
- {file = "transformers-4.9.1.tar.gz", hash = "sha256:1c30e38b2e0da15e110d9bb9a627f78de9569b9c6036d6533baf783015c339be"},
+ {file = "transformers-4.9.2-py3-none-any.whl", hash = "sha256:ed64723efb6f67a77a60559bbb318009fdd94045724cd474a949f62c5c0b185e"},
+ {file = "transformers-4.9.2.tar.gz", hash = "sha256:ad79b0e4a9c9bfedbaaf5a36e8266bea69ab0437187f83ede602ef222d26d9d7"},
@@ -2701,21 +2731,21 @@ ujson = [
- {file = "ujson-4.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:e390df0dcc7897ffb98e17eae1f4c442c39c91814c298ad84d935a3c5c7a32fa"},
- {file = "ujson-4.0.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:84b1dca0d53b0a8d58835f72ea2894e4d6cf7a5dd8f520ab4cbd698c81e49737"},
- {file = "ujson-4.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:91396a585ba51f84dc71c8da60cdc86de6b60ba0272c389b6482020a1fac9394"},
- {file = "ujson-4.0.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:eb6b25a7670c7537a5998e695fa62ff13c7f9c33faf82927adf4daa460d5f62e"},
- {file = "ujson-4.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f8aded54c2bc554ce20b397f72101737dd61ee7b81c771684a7dd7805e6cca0c"},
- {file = "ujson-4.0.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:30962467c36ff6de6161d784cd2a6aac1097f0128b522d6e9291678e34fb2b47"},
- {file = "ujson-4.0.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:fc51e545d65689c398161f07fd405104956ec27f22453de85898fa088b2cd4bb"},
- {file = "ujson-4.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e6e90330670c78e727d6637bb5a215d3e093d8e3570d439fd4922942f88da361"},
- {file = "ujson-4.0.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:5e1636b94c7f1f59a8ead4c8a7bab1b12cc52d4c21ababa295ffec56b445fd2a"},
- {file = "ujson-4.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:e2cadeb0ddc98e3963bea266cc5b884e5d77d73adf807f0bda9eca64d1c509d5"},
- {file = "ujson-4.0.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a214ba5a21dad71a43c0f5aef917cd56a2d70bc974d845be211c66b6742a471c"},
- {file = "ujson-4.0.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0190d26c0e990c17ad072ec8593647218fe1c675d11089cd3d1440175b568967"},
- {file = "ujson-4.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:f273a875c0b42c2a019c337631bc1907f6fdfbc84210cc0d1fff0e2019bbfaec"},
- {file = "ujson-4.0.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d3a87888c40b5bfcf69b4030427cd666893e826e82cc8608d1ba8b4b5e04ea99"},
- {file = "ujson-4.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:7333e8bc45ea28c74ae26157eacaed5e5629dbada32e0103c23eb368f93af108"},
- {file = "ujson-4.0.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:b3a6dcc660220539aa718bcc9dbd6dedf2a01d19c875d1033f028f212e36d6bb"},
- {file = "ujson-4.0.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:0ea07fe57f9157118ca689e7f6db72759395b99121c0ff038d2e38649c626fb1"},
- {file = "ujson-4.0.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:4d6d061563470cac889c0a9fd367013a5dbd8efc36ad01ab3e67a57e56cad720"},
- {file = "ujson-4.0.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b5c70704962cf93ec6ea3271a47d952b75ae1980d6c56b8496cec2a722075939"},
- {file = "ujson-4.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:aad6d92f4d71e37ea70e966500f1951ecd065edca3a70d3861b37b176dd6702c"},
- {file = "ujson-4.0.2.tar.gz", hash = "sha256:c615a9e9e378a7383b756b7e7a73c38b22aeb8967a8bfbffd4741f7ffd043c4d"},
+ {file = "ujson-4.1.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:148680f2bc6e52f71c56908b65f59b36a13611ac2f75a86f2cb2bce2b2c2588c"},
+ {file = "ujson-4.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1c2fb32976982e4e75ca0843a1e7b2254b8c5d8c45d979ebf2db29305b4fa31"},
+ {file = "ujson-4.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:971d4b450e689bfec8ad6b22060fb9b9bec1e0860dbdf0fa7cfe4068adbc5f58"},
+ {file = "ujson-4.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f453480b275192ae40ef350a4e8288977f00b02e504ed34245ebd12d633620cb"},
+ {file = "ujson-4.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f135db442e5470d9065536745968efc42a60233311c8509b9327bcd59a8821c7"},
+ {file = "ujson-4.1.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:2251fc9395ba4498cbdc48136a179b8f20914fa8b815aa9453b20b48ad120f43"},
+ {file = "ujson-4.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9005d0d952d0c1b3dff5cdb79df2bde35a3499e2de3f708a22c45bbb4089a1f6"},
+ {file = "ujson-4.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:117855246a9ea3f61f3b69e5ca1b1d11d622b3126f50a0ec08b577cb5c87e56e"},
+ {file = "ujson-4.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:989bed422e7e20c7ba740a4e1bbeb28b3b6324e04f023ea238a2e5449fc53668"},
+ {file = "ujson-4.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:44993136fd2ecade747b6db95917e4f015a3279e09a08113f70cbbd0d241e66a"},
+ {file = "ujson-4.1.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9e962df227fd1d851ff095382a9f8432c2470c3ee640f02ae14231dc5728e6f3"},
+ {file = "ujson-4.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be6013cda610c5149fb80a84ee815b210aa2e7fe4edf1d2bce42c02336715208"},
+ {file = "ujson-4.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:41b7e5422184249b5b94d1571206f76e5d91e8d721ce51abe341a88f41dd6692"},
+ {file = "ujson-4.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:807bb0585f30a650ec981669827721ed3ee1ee24f2c6f333a64982a40eb66b82"},
+ {file = "ujson-4.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d2955dd5cce0e76ba56786d647aaedca2cebb75eda9f0ec1787110c3646751a8"},
+ {file = "ujson-4.1.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:a873c93d43f9bd14d9e9a6d2c6eb7aae4aad9717fe40c748d0cd4b6ed7767c62"},
+ {file = "ujson-4.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8fe9bbeca130debb10eea7910433a0714c8efc057fad36353feccb87c1d07f"},
+ {file = "ujson-4.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:81a49dbf176ae041fc86d2da564f5b9b46faf657306035632da56ecfd7203193"},
+ {file = "ujson-4.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1fb2455e62f20ab4a6d49f78b5dc4ff99c72fdab9466e761120e9757fa35f4d7"},
+ {file = "ujson-4.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:44db30b8fc52e70a6f67def11804f74818addafef0a65cd7f0abb98b7830920f"},
+ {file = "ujson-4.1.0.tar.gz", hash = "sha256:22b63ec4409f0d2f2c4c9d5aa331997e02470b7a15a3233f3cc32f2f9b92d58c"},
diff --git a/pyproject.toml b/pyproject.toml
index f68bb960..ff60ac85 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9 +8,0 @@ python = "^3.8"
-datasets = {extras = ["streaming"], version = "^1.10.2"}
@@ -26,0 +26,2 @@ nltk = "^3.6.2"
+aiohttp = "^3.7.4"
+datasets = {extras = ["streaming"], git = "https://github.com/huggingface/datasets.git", rev = "b9fb8b2567aecfb14ad0bc31b59329f573eb35df"}
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 328cd43f..9aaf3a46 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -1 +1 @@
-from datasets import list_datasets
+from datasets import list_datasets, disable_progress_bar
@@ -9 +9 @@ logging.disable(logging.CRITICAL)
-
+disable_progress_bar()
diff --git a/src/datasets_preview_backend/queries/rows.py b/src/datasets_preview_backend/queries/rows.py
index 5b8589f6..38239276 100644
--- a/src/datasets_preview_backend/queries/rows.py
+++ b/src/datasets_preview_backend/queries/rows.py
@@ -16 +15,0 @@ from datasets_preview_backend.exceptions import (
-
@@ -25,0 +25 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
+ rows = list(iterable_dataset.take(num_rows))
@@ -61 +60,0 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- rows = list(iterable_dataset.take(num_rows))
diff --git a/tests/queries/test_configs.py b/tests/queries/test_configs.py
index f66f3dd1..ced98b6a 100644
--- a/tests/queries/test_configs.py
+++ b/tests/queries/test_configs.py
@@ -33,3 +32,0 @@ def test_script_error():
- # raises "ModuleNotFoundError: No module named 'datasets_modules.datasets.Test'", which should be caught and raised as DatasetBuilderScriptError
- with pytest.raises(Status400Error):
- get_configs("TimTreasure4/Test")
@@ -50,0 +48,3 @@ def test_no_dataset_no_script():
+ # raises "ModuleNotFoundError: No module named 'datasets_modules.datasets.Test'", which should be caught and raised as DatasetBuilderScriptError
+ with pytest.raises(Status404Error):
+ get_configs("TimTreasure4/Test")
diff --git a/tests/queries/test_rows.py b/tests/queries/test_rows.py
index 726c34f7..20f40836 100644
--- a/tests/queries/test_rows.py
+++ b/tests/queries/test_rows.py
@@ -47,0 +48,2 @@ def test_extract_unknown_config():
+ with pytest.raises(Status404Error):
+ extract_rows("TimTreasure4/Test", None, "train", 100)
@@ -55,5 +56,0 @@ def test_extract_unknown_split():
-def test_extract_bogus_dataset():
- with pytest.raises(Status400Error):
- extract_rows("TimTreasure4/Test", None, "train", 100)
-
-
diff --git a/tests/queries/test_splits.py b/tests/queries/test_splits.py
index a2456575..fb598719 100644
--- a/tests/queries/test_splits.py
+++ b/tests/queries/test_splits.py
@@ -42,6 +41,0 @@ def test_get_splits():
-def test_no_splits():
- # Due to https://github.com/huggingface/datasets/issues/2743
- with pytest.raises(Status400Error):
- get_splits("journalists_questions", "plain_text")
-
-
|
|
1a1fb17b2cc1739cafc8c25bdfb9744935acf3e9
|
Sylvain Lesage
| 2021-08-05T09:38:53 |
fix: 🐛 fix make arguments order
|
diff --git a/INSTALL.md b/INSTALL.md
index 302ebf76..eea452ee 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -124 +124 @@ Launch the app with pm2:
-PORT=8000 pm2 start --name datasets-preview-backend make -C /home/hf/datasets-preview-backend/ -- run
+PORT=8000 pm2 start --name datasets-preview-backend make -- -C /home/hf/datasets-preview-backend/ run
|
|
1911009b401a9489543cdb92751a09aa7762b4b1
|
Sylvain Lesage
| 2021-08-04T14:59:59 |
docs: ✏️ update install instructions
|
diff --git a/INSTALL.md b/INSTALL.md
index d91d60ea..302ebf76 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -16 +16,3 @@ pm2 logs
-To deploy a new version of datasets-preview-backend, first update the code
+To deploy a new version of datasets-preview-backend, first pause the monitor at https://betteruptime.com/team/14149/monitors/389098.
+
+Then update the code
@@ -37,0 +40,2 @@ Check if the app is accessible at http://54.158.211.3/healthcheck.
+Finally un-pause the monitor at https://betteruptime.com/team/14149/monitors/389098.
+
@@ -49 +53 @@ ipv4 (public): 54.158.211.3
-grafana:
+Grafana:
@@ -53,0 +58,4 @@ grafana:
+BetterUptime:
+
+- https://betteruptime.com/team/14149/monitors/389098
+
@@ -72,86 +79,0 @@ Configure nginx as a reverse-proxy to expose the application on the port 80:
-/: 10 GB
-/data: 100 GB
-
-ipv4: 172.30.0.73
-ipv6: fe80::1060:d6ff:feee:6d31
-ipv4 (public): 3.83.96.81
-
-````
-
-grafana:
-
-- https://grafana.huggingface.co/d/rYdddlPWk/node-exporter-full?orgId=2&refresh=1m&from=now-15m&to=now&var-DS_PROMETHEUS=HF%20Prometheus&var-job=node_exporter_metrics&var-node=tensorboard-launcher
-- https://grafana.huggingface.co/d/gBtAotjMk/use-method?orgId=2&var-DS_PROMETHEUS=HF%20Prometheus&var-node=tensorboard-launcher
-
-## Install tensorboard-launcher on the machine
-
-Install packages, logged as `hf`:
-
-```bash
-sudo apt install git-lfs nginx python-is-python3
-````
-
-Also [install poetry](https://python-poetry.org/docs/master/#installation).
-
-Install tensorboard-launcher:
-
-```bash
-cd
-# See https://github.blog/2013-09-03-two-factor-authentication/#how-does-it-work-for-command-line-git for authentication
-git clone https://github.com/huggingface/tensorboard-launcher.git
-cd tensorboard-launcher
-poetry install
-```
-
-Create a launcher script
-
-```bash
-vi ~/launch.sh
-```
-
-```bash
-#!/bin/bash
-cd /home/hf/tensorboard-launcher/
-TBL_NGINX_BASE_PATH=/data/nginx/tensorboard TBL_MODELS_BASE_PATH=/data/models make run
-```
-
-```bash
-chmod +x ~/launch.sh
-```
-
-Create a systemd service
-
-```bash
-sudo vi /etc/systemd/system/tensorboard.service
-```
-
-```
-[Unit]
-Description=Tensorboard Daemon
-After=network-online.target
-
-[Service]
-Type=simple
-
-User=hf
-Group=hf
-UMask=007
-
-ExecStart=/home/hf/launch.sh
-ExecStop=/bin/kill -9 $MAINPID
-
-Restart=on-failure
-
-# Configures the time to wait before service is stopped forcefully.
-TimeoutStopSec=10
-
-[Install]
-WantedBy=multi-user.target
-```
-
-```bash
-sudo systemctl enable tensorboard
-```
-
-Configure nginx
-
@@ -202 +124 @@ Launch the app with pm2:
-PORT=8000 pm2 start --name datasets-preview-backend make -- -C /home/hf/datasets-preview-backend/ run
+PORT=8000 pm2 start --name datasets-preview-backend make -C /home/hf/datasets-preview-backend/ -- run
|
|
2111daf36edbe63dd356dbdee40b8ee46a1dbedb
|
Sylvain Lesage
| 2021-08-03T14:24:28 |
docs: ✏️ format
|
diff --git a/README.md b/README.md
index 30703066..4e2bd12e 100644
--- a/README.md
+++ b/README.md
@@ -51 +51 @@ make watch
-Endpoint: `/healthcheck`
+> Ensure the app is running
@@ -55,2 +54,0 @@ Example: http://54.158.211.3/healthcheck
-> Ensure the app is running
-
@@ -67 +65 @@ Responses:
-Endpoint: `/configs`
+> Lists the [configurations](https://huggingface.co/docs/datasets/loading_datasets.html#selecting-a-configuration) names for the dataset
@@ -71,2 +68,0 @@ Example: http://54.158.211.3/configs?dataset=glue
-> Lists the [configurations](https://huggingface.co/docs/datasets/loading_datasets.html#selecting-a-configuration) names for the dataset
-
@@ -109 +105 @@ Responses:
-Endpoint: `/splits`
+> Lists the [splits](https://huggingface.co/docs/datasets/splits.html) names for a dataset config
@@ -113,2 +108,0 @@ Example: http://54.158.211.3/splits?dataset=glue&config=ax
-> Lists the [splits](https://huggingface.co/docs/datasets/splits.html) names for a dataset config
-
@@ -140 +134 @@ Responses:
-Endpoint: `/rows`
+> Extract the first [rows](https://huggingface.co/docs/datasets/splits.html) for a split of a dataset config
@@ -144,2 +137,0 @@ Example: http://54.158.211.3/rows?dataset=glue&config=ax&split=test&rows=2
-> Extract the first [rows](https://huggingface.co/docs/datasets/splits.html) for a split of a dataset config
-
|
|
635a852608cfc287f67621aab4cc642c796cdef4
|
Sylvain Lesage
| 2021-08-03T14:22:56 |
docs: ✏️ formatting detail
|
diff --git a/README.md b/README.md
index af514a51..30703066 100644
--- a/README.md
+++ b/README.md
@@ -49 +49 @@ make watch
-### healthcheck
+### /healthcheck
@@ -65 +65 @@ Responses:
-### configs
+### /configs
@@ -107 +107 @@ Responses:
-### splits
+### /splits
@@ -138 +138 @@ Responses:
-### rows
+### /rows
|
|
4d3943e88b4d0f5bd3caf33ce4c719a42bca9a38
|
Sylvain Lesage
| 2021-08-03T14:21:46 |
docs: ✏️ add endpoints doc
|
diff --git a/README.md b/README.md
index a0dc800b..af514a51 100644
--- a/README.md
+++ b/README.md
@@ -45,0 +46,138 @@ make watch
+
+## Endpoints
+
+### healthcheck
+
+Endpoint: `/healthcheck`
+
+Example: http://54.158.211.3/healthcheck
+
+> Ensure the app is running
+
+Method: `GET`
+
+Parameters: none
+
+Responses:
+
+- `200`: text content `ok`
+
+### configs
+
+Endpoint: `/configs`
+
+Example: http://54.158.211.3/configs?dataset=glue
+
+> Lists the [configurations](https://huggingface.co/docs/datasets/loading_datasets.html#selecting-a-configuration) names for the dataset
+
+Method: `GET`
+
+Parameters:
+
+- `dataset` (required): the dataset ID
+
+Responses:
+
+- `200`: JSON content with the following structure:
+
+ ```json
+ {
+ "dataset": "glue",
+ "configs": [
+ "cola",
+ "sst2",
+ "mrpc",
+ "qqp",
+ "stsb",
+ "mnli",
+ "mnli_mismatched",
+ "mnli_matched",
+ "qnli",
+ "rte",
+ "wnli",
+ "ax"
+ ]
+ }
+ ```
+
+- `400`: the dataset script is erroneous
+- `404`: the dataset cannot be found
+- `500`: application error
+
+### splits
+
+Endpoint: `/splits`
+
+Example: http://54.158.211.3/splits?dataset=glue&config=ax
+
+> Lists the [splits](https://huggingface.co/docs/datasets/splits.html) names for a dataset config
+
+Method: `GET`
+
+Parameters:
+
+- `dataset` (required): the dataset ID
+- `config`: the configuration name. It might be required, or not, depending on the dataset
+
+Responses:
+
+- `200`: JSON content with the following structure:
+
+ ```json
+ {
+ "dataset": "glue",
+ "config": "ax",
+ "splits": ["test"]
+ }
+ ```
+
+- `400`: the dataset script is erroneous
+- `404`: the dataset or config cannot be found
+- `500`: application error
+
+### rows
+
+Endpoint: `/rows`
+
+Example: http://54.158.211.3/rows?dataset=glue&config=ax&split=test&rows=2
+
+> Extract the first [rows](https://huggingface.co/docs/datasets/splits.html) for a split of a dataset config
+
+Method: `GET`
+
+Parameters:
+
+- `dataset` (required): the dataset ID
+- `config`: the configuration name. It might be required, or not, depending on the dataset
+- `split` (required): the split name
+- `rows`: the number of rows to extract. Defaults to 100.
+
+Responses:
+
+- `200`: JSON content with the following structure:
+
+ ```json
+ {
+ "dataset": "glue",
+ "config": "ax",
+ "split": "test",
+ "rows": [
+ {
+ "idx": 0,
+ "hypothesis": "The cat did not sit on the mat.",
+ "label": -1,
+ "premise": "The cat sat on the mat."
+ },
+ {
+ "idx": 1,
+ "hypothesis": "The cat sat on the mat.",
+ "label": -1,
+ "premise": "The cat did not sit on the mat."
+ }
+ ]
+ }
+ ```
+
+- `400`: the dataset script is erroneous, or the data cannot be obtained.
+- `404`: the dataset, config or script cannot be found
+- `500`: application error
|
|
f4e45983ffe6d978b970763d7afb20f7a8cc9213
|
Sylvain Lesage
| 2021-08-03T13:52:08 |
docs: ✏️ add install doc
|
diff --git a/INSTALL.md b/INSTALL.md
new file mode 100644
index 00000000..d91d60ea
--- /dev/null
+++ b/INSTALL.md
@@ -0,0 +1,212 @@
+# INSTALL
+
+datasets-preview-backend is installed on a virtual machine (ec2-54-158-211-3.compute-1.amazonaws.com).
+
+## Manage
+
+Use [pm2](https://pm2.keymetrics.io/docs/usage/quick-start/#cheatsheet) to manage the service.
+
+```bash
+pm2 list
+pm2 logs
+```
+
+## Upgrade
+
+To deploy a new version of datasets-preview-backend, first update the code
+
+```
+cd /home/hf/datasets-preview-backend/
+git fetch
+git merge
+```
+
+Install packages
+
+```
+make install
+```
+
+Restart
+
+```
+pm2 restart all
+```
+
+Check if the app is accessible at http://54.158.211.3/healthcheck.
+
+## Machine
+
+```bash
+ssh [email protected]
+
+/: 200 GB
+
+ipv4: 172.30.4.71
+ipv4 (public): 54.158.211.3
+```
+
+grafana:
+
+- https://grafana.huggingface.co/d/gBtAotjMk/use-method?orgId=2&var-DS_PROMETHEUS=HF%20Prometheus&var-node=data-preview
+- https://grafana.huggingface.co/d/rYdddlPWk/node-exporter-full?orgId=2&refresh=1m&var-DS_PROMETHEUS=HF%20Prometheus&var-job=node_exporter_metrics&var-node=data-preview&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B
+
+## Install
+
+Install packages, logged as `hf`:
+
+```bash
+sudo apt install python-is-python3 make nginx
+```
+
+Also install node and npm (with [nvm](https://github.com/nvm-sh/nvm)), then:
+
+```bash
+npm i -g pm2@latest
+```
+
+Also [install poetry](https://python-poetry.org/docs/master/#installation). Don't forget to add `poetry` to the `PATH` environment variable.
+
+Configure nginx as a reverse-proxy to expose the application on the port 80:
+
+/: 10 GB
+/data: 100 GB
+
+ipv4: 172.30.0.73
+ipv6: fe80::1060:d6ff:feee:6d31
+ipv4 (public): 3.83.96.81
+
+````
+
+grafana:
+
+- https://grafana.huggingface.co/d/rYdddlPWk/node-exporter-full?orgId=2&refresh=1m&from=now-15m&to=now&var-DS_PROMETHEUS=HF%20Prometheus&var-job=node_exporter_metrics&var-node=tensorboard-launcher
+- https://grafana.huggingface.co/d/gBtAotjMk/use-method?orgId=2&var-DS_PROMETHEUS=HF%20Prometheus&var-node=tensorboard-launcher
+
+## Install tensorboard-launcher on the machine
+
+Install packages, logged as `hf`:
+
+```bash
+sudo apt install git-lfs nginx python-is-python3
+````
+
+Also [install poetry](https://python-poetry.org/docs/master/#installation).
+
+Install tensorboard-launcher:
+
+```bash
+cd
+# See https://github.blog/2013-09-03-two-factor-authentication/#how-does-it-work-for-command-line-git for authentication
+git clone https://github.com/huggingface/tensorboard-launcher.git
+cd tensorboard-launcher
+poetry install
+```
+
+Create a launcher script
+
+```bash
+vi ~/launch.sh
+```
+
+```bash
+#!/bin/bash
+cd /home/hf/tensorboard-launcher/
+TBL_NGINX_BASE_PATH=/data/nginx/tensorboard TBL_MODELS_BASE_PATH=/data/models make run
+```
+
+```bash
+chmod +x ~/launch.sh
+```
+
+Create a systemd service
+
+```bash
+sudo vi /etc/systemd/system/tensorboard.service
+```
+
+```
+[Unit]
+Description=Tensorboard Daemon
+After=network-online.target
+
+[Service]
+Type=simple
+
+User=hf
+Group=hf
+UMask=007
+
+ExecStart=/home/hf/launch.sh
+ExecStop=/bin/kill -9 $MAINPID
+
+Restart=on-failure
+
+# Configures the time to wait before service is stopped forcefully.
+TimeoutStopSec=10
+
+[Install]
+WantedBy=multi-user.target
+```
+
+```bash
+sudo systemctl enable tensorboard
+```
+
+Configure nginx
+
+```bash
+sudo unlink /etc/nginx/sites-enabled/default
+sudo vi /etc/nginx/sites-available/reverse-proxy.conf
+```
+
+```bash
+server {
+ listen 80;
+ listen [::]:80;
+
+ access_log /var/log/nginx/reverse-access.log;
+ error_log /var/log/nginx/reverse-error.log;
+
+ location / {
+ proxy_pass http://localhost:8000/;
+ proxy_set_header Host $proxy_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_buffering off;
+ proxy_http_version 1.1;
+ }
+}
+```
+
+```bash
+sudo ln -s /etc/nginx/sites-available/reverse-proxy.conf /etc/nginx/sites-enabled/reverse-proxy.conf
+sudo nginx -t # Test
+sudo systemctl reload nginx
+```
+
+Install datasets-preview-backend:
+
+```bash
+cd
+# See https://github.blog/2013-09-03-two-factor-authentication/#how-does-it-work-for-command-line-git for authentication
+git clone https://github.com/huggingface/datasets-preview-backend.git
+cd datasets-preview-backend
+make install
+```
+
+Launch the app with pm2:
+
+```bash
+PORT=8000 pm2 start --name datasets-preview-backend make -- -C /home/hf/datasets-preview-backend/ run
+```
+
+Check if the app is accessible at http://54.158.211.3/healthcheck.
+
+Finally, ensure that pm2 will restart on reboot (see https://pm2.keymetrics.io/docs/usage/startup/):
+
+```bash
+pm2 startup
+# and follow the instructions
+```
|
|
4a7c2e0196d6b92941379b0a7f0727a388203434
|
Sylvain Lesage
| 2021-08-03T13:42:34 |
chore: 🤖 don't expose the port on all the network interfaces
|
diff --git a/Makefile b/Makefile
index a90ff370..93b177f8 100644
--- a/Makefile
+++ b/Makefile
@@ -9 +9 @@ run:
- poetry run uvicorn --port $(PORT) --host 0.0.0.0 --factory datasets_preview_backend.main:app
+ poetry run uvicorn --port $(PORT) --factory datasets_preview_backend.main:app
@@ -18 +18 @@ watch:
- poetry run uvicorn --port $(PORT) --host 0.0.0.0 --factory --reload datasets_preview_backend.main:app
+ poetry run uvicorn --port $(PORT) --factory --reload datasets_preview_backend.main:app
|
|
5a831c0463f4a0cb2ffeda586b0b3d7e6e582275
|
Sylvain Lesage
| 2021-08-03T13:27:49 |
chore: 🤖 listen on all the network interfaces
|
diff --git a/Makefile b/Makefile
index 93b177f8..a90ff370 100644
--- a/Makefile
+++ b/Makefile
@@ -9 +9 @@ run:
- poetry run uvicorn --port $(PORT) --factory datasets_preview_backend.main:app
+ poetry run uvicorn --port $(PORT) --host 0.0.0.0 --factory datasets_preview_backend.main:app
@@ -18 +18 @@ watch:
- poetry run uvicorn --port $(PORT) --factory --reload datasets_preview_backend.main:app
+ poetry run uvicorn --port $(PORT) --host 0.0.0.0 --factory --reload datasets_preview_backend.main:app
|
|
5680728ed18f3eec6f4a9586c68bc526c8ff8198
|
Sylvain Lesage
| 2021-08-03T13:01:09 |
docs: ✏️ details in the readme
|
diff --git a/README.md b/README.md
index e0b93c72..a0dc800b 100644
--- a/README.md
+++ b/README.md
@@ -14,0 +15,2 @@ The URL schema is `https://huggingface.co/datasets-preview/:datasetId/extract?ro
+Install with:
+
@@ -16,2 +17,0 @@ The URL schema is `https://huggingface.co/datasets-preview/:datasetId/extract?ro
-git clone [email protected]:huggingface/datasets-preview-backend.git
-cd datasets-preview-backend
@@ -22,0 +23,2 @@ make install
+Launch with:
+
@@ -24 +25,0 @@ make install
-cd datasets-preview-backend
@@ -43 +43,0 @@ To reload the application on file changes while developing, run:
-cd datasets-preview-backend
|
|
8122bbb0c58d9b1a141bb1cb94f1fe38d43443c0
|
Sylvain Lesage
| 2021-08-03T12:59:07 |
feat: 🎸 remove useless prefix `DPB_` to env variables
|
diff --git a/Makefile b/Makefile
index f942b86d..93b177f8 100644
--- a/Makefile
+++ b/Makefile
@@ -1 +1 @@
-DPB_PORT ?= 8000
+PORT ?= 8000
@@ -9 +9 @@ run:
- poetry run uvicorn --port $(DPB_PORT) --factory datasets_preview_backend.main:app
+ poetry run uvicorn --port $(PORT) --factory datasets_preview_backend.main:app
@@ -18 +18 @@ watch:
- poetry run uvicorn --port $(DPB_PORT) --factory --reload datasets_preview_backend.main:app
+ poetry run uvicorn --port $(PORT) --factory --reload datasets_preview_backend.main:app
diff --git a/README.md b/README.md
index 9d4bd617..e0b93c72 100644
--- a/README.md
+++ b/README.md
@@ -30,2 +30,3 @@ Set environment variables to configure the following aspects:
-- `DPB_EXTRACT_ROWS_LIMIT`: maximum number of rows in the extract. Defaults to `100`.
-- `DPB_PORT`: the port used by the app
+- `EXTRACT_ROWS_LIMIT`: maximum number of rows in the extract. Defaults to `100`.
+- `PORT`: the port used by the app. Defaults to `8000`.
+- `WEB_CONCURRENCY`: the number of workers. Defaults to `1`.
@@ -33 +34,7 @@ Set environment variables to configure the following aspects:
-To restart the application on file changes while developing, run:
+For example:
+
+```bash
+PORT=80 WEB_CONCURRENCY=4 make run
+```
+
+To reload the application on file changes while developing, run:
diff --git a/src/datasets_preview_backend/config.py b/src/datasets_preview_backend/config.py
index 1018acc2..f07f1c64 100644
--- a/src/datasets_preview_backend/config.py
+++ b/src/datasets_preview_backend/config.py
@@ -8 +8 @@ DEFAULT_EXTRACT_ROWS_LIMIT = 100
-PORT = get_int_value(d=os.environ, key="DPB_PORT", default=DEFAULT_PORT)
+PORT = get_int_value(d=os.environ, key="PORT", default=DEFAULT_PORT)
@@ -10 +10 @@ EXTRACT_ROWS_LIMIT = get_int_value(
- d=os.environ, key="DPB_EXTRACT_ROWS_LIMIT", default=DEFAULT_EXTRACT_ROWS_LIMIT
+ d=os.environ, key="EXTRACT_ROWS_LIMIT", default=DEFAULT_EXTRACT_ROWS_LIMIT
|
|
33289e0bc58a731281abd936327b1f795a2cf101
|
Sylvain Lesage
| 2021-08-03T12:54:45 |
chore: 🤖 call uvicorn from the command line
|
diff --git a/Makefile b/Makefile
index b1d44a97..f942b86d 100644
--- a/Makefile
+++ b/Makefile
@@ -0,0 +1,2 @@
+DPB_PORT ?= 8000
+
@@ -7 +9 @@ run:
- poetry run python src/datasets_preview_backend/main.py
+ poetry run uvicorn --port $(DPB_PORT) --factory datasets_preview_backend.main:app
@@ -16 +18 @@ watch:
- poetry run watchmedo auto-restart -d src/datasets_preview_backend -p "*.py" -R python src/datasets_preview_backend/main.py
+ poetry run uvicorn --port $(DPB_PORT) --factory --reload datasets_preview_backend.main:app
diff --git a/poetry.lock b/poetry.lock
index b35f7d90..7a41abed 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -93,8 +92,0 @@ python-versions = "*"
-[[package]]
-name = "argh"
-version = "0.26.2"
-description = "An unobtrusive argparse wrapper with natural syntax"
-category = "dev"
-optional = false
-python-versions = "*"
-
@@ -266 +258 @@ name = "charset-normalizer"
-version = "2.0.3"
+version = "2.0.4"
@@ -335 +327 @@ benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "tr
-dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
+dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
@@ -342 +334 @@ tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "importlib-resources"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "importlib-resources"]
@@ -464 +456 @@ name = "google-auth-oauthlib"
-version = "0.4.4"
+version = "0.4.5"
@@ -618 +609,0 @@ url = "https://github.com/kpu/kenlm/archive/master.zip"
-
@@ -1299 +1290 @@ name = "tomli"
-version = "1.1.0"
+version = "1.2.0"
@@ -1307 +1298 @@ name = "tqdm"
-version = "4.61.2"
+version = "4.62.0"
@@ -1435,15 +1425,0 @@ standard = ["websockets (>=9.1)", "httptools (>=0.2.0,<0.3.0)", "watchgod (>=0.6
-[[package]]
-name = "watchdog"
-version = "2.1.3"
-description = "Filesystem events monitoring"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-argh = {version = ">=0.24.1", optional = true, markers = "extra == \"watchmedo\""}
-PyYAML = {version = ">=3.10", optional = true, markers = "extra == \"watchmedo\""}
-
-[package.extras]
-watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"]
-
@@ -1514 +1490 @@ python-versions = "^3.8"
-content-hash = "7fe0a20f2baf129ea8c51856ced71768ff64d01e0c0bea829bdd25cd968c6d80"
+content-hash = "9ee62f3717c20b8179c7d81cb2e17f9c7487314611152c3ce14b470812d7fc80"
@@ -1592,4 +1567,0 @@ appdirs = [
-argh = [
- {file = "argh-0.26.2-py2.py3-none-any.whl", hash = "sha256:a9b3aaa1904eeb78e32394cd46c6f37ac0fb4af6dc488daa58971bdc7d7fcaf3"},
- {file = "argh-0.26.2.tar.gz", hash = "sha256:e9535b8c84dc9571a48999094fda7f33e63c3f1b74f3e5f3ac0105a58405bb65"},
-]
@@ -1790,2 +1762,2 @@ charset-normalizer = [
- {file = "charset-normalizer-2.0.3.tar.gz", hash = "sha256:c46c3ace2d744cfbdebceaa3c19ae691f53ae621b39fd7570f59d14fb7f2fd12"},
- {file = "charset_normalizer-2.0.3-py3-none-any.whl", hash = "sha256:88fce3fa5b1a84fdcb3f603d889f723d1dd89b26059d0123ca435570e848d5e1"},
+ {file = "charset-normalizer-2.0.4.tar.gz", hash = "sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3"},
+ {file = "charset_normalizer-2.0.4-py3-none-any.whl", hash = "sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b"},
@@ -1868,2 +1840,2 @@ google-auth-oauthlib = [
- {file = "google-auth-oauthlib-0.4.4.tar.gz", hash = "sha256:09832c6e75032f93818edf1affe4746121d640c625a5bef9b5c96af676e98eee"},
- {file = "google_auth_oauthlib-0.4.4-py2.py3-none-any.whl", hash = "sha256:0e92aacacfb94978de3b7972cf4b0f204c3cd206f74ddd0dc0b31e91164e6317"},
+ {file = "google-auth-oauthlib-0.4.5.tar.gz", hash = "sha256:4ab58e6c3dc6ccf112f921fcced40e5426fba266768986ea502228488276eaba"},
+ {file = "google_auth_oauthlib-0.4.5-py2.py3-none-any.whl", hash = "sha256:b5a1ce7c617d247ccb2dfbba9d4bfc734b41096803d854a2c52592ae80150a67"},
@@ -2711,2 +2683,2 @@ tomli = [
- {file = "tomli-1.1.0-py3-none-any.whl", hash = "sha256:f4a182048010e89cbec0ae4686b21f550a7f2903f665e34a6de58ec15424f919"},
- {file = "tomli-1.1.0.tar.gz", hash = "sha256:33d7984738f8bb699c9b0a816eb646a8178a69eaa792d258486776a5d21b8ca5"},
+ {file = "tomli-1.2.0-py3-none-any.whl", hash = "sha256:056f0376bf5a6b182c513f9582c1e5b0487265eb6c48842b69aa9ca1cd5f640a"},
+ {file = "tomli-1.2.0.tar.gz", hash = "sha256:d60e681734099207a6add7a10326bc2ddd1fdc36c1b0f547d00ef73ac63739c2"},
@@ -2715,2 +2687,2 @@ tqdm = [
- {file = "tqdm-4.61.2-py2.py3-none-any.whl", hash = "sha256:5aa445ea0ad8b16d82b15ab342de6b195a722d75fc1ef9934a46bba6feafbc64"},
- {file = "tqdm-4.61.2.tar.gz", hash = "sha256:8bb94db0d4468fea27d004a0f1d1c02da3cdedc00fe491c0de986b76a04d6b0a"},
+ {file = "tqdm-4.62.0-py2.py3-none-any.whl", hash = "sha256:706dea48ee05ba16e936ee91cb3791cd2ea6da348a0e50b46863ff4363ff4340"},
+ {file = "tqdm-4.62.0.tar.gz", hash = "sha256:3642d483b558eec80d3c831e23953582c34d7e4540db86d9e5ed9dad238dabc6"},
@@ -2759,23 +2730,0 @@ uvicorn = [
-watchdog = [
- {file = "watchdog-2.1.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9628f3f85375a17614a2ab5eac7665f7f7be8b6b0a2a228e6f6a2e91dd4bfe26"},
- {file = "watchdog-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:acc4e2d5be6f140f02ee8590e51c002829e2c33ee199036fcd61311d558d89f4"},
- {file = "watchdog-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b851237cf3533fabbc034ffcd84d0fa52014b3121454e5f8b86974b531560c"},
- {file = "watchdog-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a12539ecf2478a94e4ba4d13476bb2c7a2e0a2080af2bb37df84d88b1b01358a"},
- {file = "watchdog-2.1.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6fe9c8533e955c6589cfea6f3f0a1a95fb16867a211125236c82e1815932b5d7"},
- {file = "watchdog-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d9456f0433845e7153b102fffeb767bde2406b76042f2216838af3b21707894e"},
- {file = "watchdog-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd8c595d5a93abd441ee7c5bb3ff0d7170e79031520d113d6f401d0cf49d7c8f"},
- {file = "watchdog-2.1.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0bcfe904c7d404eb6905f7106c54873503b442e8e918cc226e1828f498bdc0ca"},
- {file = "watchdog-2.1.3-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bf84bd94cbaad8f6b9cbaeef43080920f4cb0e61ad90af7106b3de402f5fe127"},
- {file = "watchdog-2.1.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b8ddb2c9f92e0c686ea77341dcb58216fa5ff7d5f992c7278ee8a392a06e86bb"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8805a5f468862daf1e4f4447b0ccf3acaff626eaa57fbb46d7960d1cf09f2e6d"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:3e305ea2757f81d8ebd8559d1a944ed83e3ab1bdf68bcf16ec851b97c08dc035"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_i686.whl", hash = "sha256:431a3ea70b20962e6dee65f0eeecd768cd3085ea613ccb9b53c8969de9f6ebd2"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:e4929ac2aaa2e4f1a30a36751160be391911da463a8799460340901517298b13"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:201cadf0b8c11922f54ec97482f95b2aafca429c4c3a4bb869a14f3c20c32686"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:3a7d242a7963174684206093846537220ee37ba9986b824a326a8bb4ef329a33"},
- {file = "watchdog-2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:54e057727dd18bd01a3060dbf5104eb5a495ca26316487e0f32a394fd5fe725a"},
- {file = "watchdog-2.1.3-py3-none-win32.whl", hash = "sha256:b5fc5c127bad6983eecf1ad117ab3418949f18af9c8758bd10158be3647298a9"},
- {file = "watchdog-2.1.3-py3-none-win_amd64.whl", hash = "sha256:44acad6f642996a2b50bb9ce4fb3730dde08f23e79e20cd3d8e2a2076b730381"},
- {file = "watchdog-2.1.3-py3-none-win_ia64.whl", hash = "sha256:0bcdf7b99b56a3ae069866c33d247c9994ffde91b620eaf0306b27e099bd1ae0"},
- {file = "watchdog-2.1.3.tar.gz", hash = "sha256:e5236a8e8602ab6db4b873664c2d356c365ab3cac96fbdec4970ad616415dd45"},
-]
diff --git a/pyproject.toml b/pyproject.toml
index 931c143d..f68bb960 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30 +29,0 @@ black = "^21.7b0"
-watchdog = {extras = ["watchmedo"], version = "^2.1.3"}
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index fc9ea1af..fc40dc76 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -9,2 +9,2 @@ from datasets_preview_backend.routes import healthcheck, rows, configs, splits
-def start():
- app = Starlette(
+def app():
+ return Starlette(
@@ -19,2 +18,0 @@ def start():
- uvicorn.run(app, host="0.0.0.0", port=PORT)
-
@@ -23 +21 @@ if __name__ == "__main__":
- start()
+ uvicorn.run(app(), host="0.0.0.0", port=PORT)
|
|
fa22dfebb6edfa65bf9458042cefbb7951376c57
|
Sylvain Lesage
| 2021-08-02T16:31:02 |
feat: 🎸 log all errors
|
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index adeeea48..1bbc4c10 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -12,2 +11,0 @@ class StatusError(Exception):
- # TODO: log the traces on every caught exception
-
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index b261ed76..8aa5da00 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -0,0 +1 @@
+import logging
@@ -9,0 +11 @@ from datasets_preview_backend.exceptions import (
+ StatusError,
@@ -14,0 +17,6 @@ from datasets_preview_backend.exceptions import (
+def log_error(err: StatusError):
+ logging.debug(
+ f"Error {err.status_code} '{err.message}'. Caused by a {type(err.__cause__).__name__}: '{str(err.__cause__)}'"
+ )
+
+
@@ -29,0 +38 @@ async def configs(request: Request):
+ log_error(err)
@@ -46,0 +56 @@ async def splits(request: Request):
+ log_error(err)
@@ -71,0 +82 @@ async def rows(request: Request):
+ log_error(err)
|
|
cf6510a53401f9dfd8f7159845a0fabac3c58c73
|
Sylvain Lesage
| 2021-08-02T16:11:14 |
refactor: 💡 simplify the errors a lot
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 82fb3cda..328cd43f 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -24,0 +25,2 @@ def get_configs_report(dataset: str):
+ "cause": None,
+ "cause_message": None,
@@ -31 +33 @@ def get_configs_report(dataset: str):
- "exception": str(type(err).__name__),
+ "exception": type(err).__name__,
@@ -32,0 +35,2 @@ def get_configs_report(dataset: str):
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
@@ -45,0 +50,2 @@ def get_splits_report(dataset: str, config: str):
+ "cause": None,
+ "cause_message": None,
@@ -53 +59 @@ def get_splits_report(dataset: str, config: str):
- "exception": str(type(err).__name__),
+ "exception": type(err).__name__,
@@ -54,0 +61,2 @@ def get_splits_report(dataset: str, config: str):
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
@@ -69,0 +78,2 @@ def get_rows_report(dataset: str, config: str, split: str):
+ "cause": None,
+ "cause_message": None,
@@ -77 +87 @@ def get_rows_report(dataset: str, config: str, split: str):
- "exception": str(type(err).__name__),
+ "exception": type(err).__name__,
@@ -78,0 +89,2 @@ def get_rows_report(dataset: str, config: str, split: str):
+ "cause": type(err.__cause__).__name__,
+ "cause_message": str(err.__cause__),
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index 646e37f4..adeeea48 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -8 +8 @@ def print_config(config):
-class Error(Exception):
+class StatusError(Exception):
@@ -11,57 +11,2 @@ class Error(Exception):
- def __init__(self, message):
- self.message = message
- super().__init__(message)
-
-
-class DatasetBuilderScriptError(Error):
- """Exception raised if the dataset script fails.
-
- Attributes:
- dataset -- the erroneous dataset id
- """
-
- def __init__(self, dataset):
- self.dataset = dataset
- super().__init__(f"Dataset builder script error. Dataset: '{self.dataset}'")
-
-
-class DatasetBuilderNotFoundError(Error):
- """Exception raised if the dataset script could not be found.
-
- Attributes:
- dataset -- the erroneous dataset id
- """
-
- def __init__(self, dataset):
- self.dataset = dataset
- super().__init__(
- f"Dataset builder script could not be found. Dataset: '{self.dataset}'"
- )
-
-
-class DatasetBuilderNoSplitsError(Error):
- """Exception raised if the builder script fails to provide the list of splits.
-
- Attributes:
- dataset -- the erroneous dataset id
- config -- the erroneous dataset config name
- """
-
- def __init__(self, dataset, config):
- self.dataset = dataset
- self.config = config
- super().__init__(
- f"Dataset builder script error: could not get the list of splits. Dataset: '{self.dataset}', config: {print_config(self.config)}"
- )
-
-
-class DatasetNotFoundError(Error):
- """Exception raised if a dataset has not been found.
-
- Attributes:
- dataset -- the erroneous dataset id
- """
-
- def __init__(self, dataset):
- self.dataset = dataset
- super().__init__(f"Dataset not found. Dataset: '{self.dataset}'")
+ def __init__(self, message, status_code):
+ # TODO: log the traces on every caught exception
@@ -69,15 +14,3 @@ class DatasetNotFoundError(Error):
-
-class ConfigNotFoundError(Error):
- """Exception raised for config builder not found.
-
- Attributes:
- dataset -- the erroneous dataset id
- config -- the erroneous dataset config name
- """
-
- def __init__(self, dataset, config):
- self.dataset = dataset
- self.config = config
- super().__init__(
- f"Config not found. Dataset: '{self.dataset}', config: {print_config(self.config)}"
- )
+ self.message = message
+ self.status_code = status_code
+ super().__init__(self.message)
@@ -86,2 +19,2 @@ class ConfigNotFoundError(Error):
-class SplitError(Error):
- """Exception raised for errors in the split.
+class Status400Error(StatusError):
+ """Exception raised if the response must be a 400 status code.
@@ -90,3 +23 @@ class SplitError(Error):
- dataset -- the erroneous dataset id
- config -- the erroneous dataset config name
- split -- the erroneous dataset split name
+ message -- the content of the response
@@ -95,7 +26,2 @@ class SplitError(Error):
- def __init__(self, dataset, config, split):
- self.dataset = dataset
- self.config = config
- self.split = split
- super().__init__(
- f"Split error. Dataset: '{self.dataset}', config: {print_config(self.config)}, split: '{self.split}'"
- )
+ def __init__(self, message):
+ super().__init__(message, 400)
@@ -104,2 +30,2 @@ class SplitError(Error):
-class SplitNotImplementedError(Error):
- """Exception raised for NotImplementedError in the split.
+class Status404Error(StatusError):
+ """Exception raised if the response must be a 404 status code.
@@ -108,4 +34 @@ class SplitNotImplementedError(Error):
- dataset -- the erroneous dataset id
- config -- the erroneous dataset config name
- split -- the erroneous dataset split name
- extension -- the file extension not implemented yet
+ message -- the content of the response
@@ -114,11 +37,2 @@ class SplitNotImplementedError(Error):
- def __init__(self, dataset, config, split, extension):
- self.dataset = dataset
- self.config = config
- self.split = split
- self.extension = extension
- extension_str = (
- "" if self.extension is None else f" for extension '{self.extension}'"
- )
- super().__init__(
- f"Extraction protocol not implemented{extension_str}. Dataset: '{self.dataset}', config: {print_config(self.config)}, split: '{self.split}'"
- )
+ def __init__(self, message):
+ super().__init__(message, 404)
diff --git a/src/datasets_preview_backend/queries/configs.py b/src/datasets_preview_backend/queries/configs.py
index e267728b..d3b41780 100644
--- a/src/datasets_preview_backend/queries/configs.py
+++ b/src/datasets_preview_backend/queries/configs.py
@@ -11,2 +11,2 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptError,
- DatasetBuilderNotFoundError,
+ Status400Error,
+ Status404Error,
@@ -15,2 +14,0 @@ from datasets_preview_backend.exceptions import (
-# TODO: log the traces on every caught exception
-
@@ -23,3 +21,5 @@ def get_configs(dataset: str) -> List[str]:
- raise DatasetBuilderNotFoundError(dataset=dataset)
- except (ModuleNotFoundError):
- raise DatasetBuilderScriptError(dataset=dataset)
+ raise Status404Error("The dataset could not be found.") from err
+ except Exception as err:
+ raise Status400Error(
+ "The config names could not be parsed from the dataset."
+ ) from err
diff --git a/src/datasets_preview_backend/queries/rows.py b/src/datasets_preview_backend/queries/rows.py
index 232b42fb..5b8589f6 100644
--- a/src/datasets_preview_backend/queries/rows.py
+++ b/src/datasets_preview_backend/queries/rows.py
@@ -12,5 +12,2 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptError,
- DatasetNotFoundError,
- ConfigNotFoundError,
- SplitError,
- SplitNotImplementedError,
+ Status400Error,
+ Status404Error,
@@ -19,2 +15,0 @@ from datasets_preview_backend.exceptions import (
-# TODO: log the traces on every caught exception
-
@@ -32 +27,3 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- raise DatasetNotFoundError(dataset=dataset)
+ raise Status404Error(
+ "The split for the dataset config could not be found."
+ ) from err
@@ -41,7 +38,7 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- extension = None
- raise SplitNotImplementedError(
- dataset=dataset,
- config=config,
- split=split,
- extension=extension,
- )
+ raise Status400Error(
+ "The rows could not be extracted from the split of the dataset config."
+ ) from err
+ else:
+ raise Status400Error(
+ f"The rows could not be extracted from the split of the dataset config because extension {extension} is not supported."
+ ) from err
@@ -49,7 +46,4 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- message = str(err)
- if message.startswith(f"BuilderConfig {config} not found"):
- raise ConfigNotFoundError(dataset=dataset, config=config)
- elif message.startswith(f"Config name is missing."):
- raise ConfigNotFoundError(dataset=dataset, config=config)
- elif message.startswith(f'Unknown split "{split}".') or message.startswith(
- f"Bad split: {split}."
+ if (
+ str(err).startswith(f"BuilderConfig {config} not found.")
+ or str(err).startswith(f"Config name is missing.")
+ or str(err).startswith(f"Bad split")
@@ -57 +51 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- raise SplitError(dataset=dataset, config=config, split=split)
+ raise Status404Error("The dataset config could not be found.") from err
@@ -59,3 +53,7 @@ def extract_rows(dataset: str, config: str, split: str, num_rows: int):
- raise
- except (ModuleNotFoundError, RuntimeError, TypeError):
- raise DatasetBuilderScriptError(dataset=dataset)
+ raise Status400Error(
+ "The rows could not be extracted from the split of the dataset config."
+ ) from err
+ except Exception as err:
+ raise Status400Error(
+ "The rows could not be extracted from the split of the dataset config."
+ ) from err
diff --git a/src/datasets_preview_backend/queries/splits.py b/src/datasets_preview_backend/queries/splits.py
index dbe186f1..7ede618f 100644
--- a/src/datasets_preview_backend/queries/splits.py
+++ b/src/datasets_preview_backend/queries/splits.py
@@ -3,3 +3 @@ from typing import List
-from datasets import (
- load_dataset_builder,
-)
+from datasets import load_dataset_builder
@@ -9,3 +7,2 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptError,
- DatasetBuilderNoSplitsError,
- ConfigNotFoundError,
+ Status400Error,
+ Status404Error,
@@ -14,2 +10,0 @@ from datasets_preview_backend.exceptions import (
-# TODO: log the traces on every caught exception
-
@@ -19,0 +15,2 @@ def get_splits(dataset: str, config: str) -> List[str]:
+ except FileNotFoundError as err:
+ raise Status404Error("The dataset config could not be found.") from err
@@ -21,5 +18,2 @@ def get_splits(dataset: str, config: str) -> List[str]:
- message = str(err)
- if message.startswith(f"BuilderConfig {config} not found"):
- raise ConfigNotFoundError(dataset=dataset, config=config)
- elif message.startswith(f"Config name is missing."):
- raise ConfigNotFoundError(dataset=dataset, config=config)
+ if str(err).startswith(f"BuilderConfig {config} not found."):
+ raise Status404Error("The dataset config could not be found.") from err
@@ -27,3 +21,7 @@ def get_splits(dataset: str, config: str) -> List[str]:
- raise
- except (ModuleNotFoundError, RuntimeError, TypeError):
- raise DatasetBuilderScriptError(dataset=dataset)
+ raise Status400Error(
+ "The split names could not be parsed from the dataset config."
+ ) from err
+ except Exception as err:
+ raise Status400Error(
+ "The split names could not be parsed from the dataset config."
+ ) from err
@@ -32,0 +31 @@ def get_splits(dataset: str, config: str) -> List[str]:
+ # should not be necessary once https://github.com/huggingface/datasets/issues/2743 is fixed
@@ -40,2 +39,4 @@ def get_splits(dataset: str, config: str) -> List[str]:
- except:
- raise DatasetBuilderNoSplitsError(dataset=dataset, config=config)
+ except Exception as err:
+ raise Status400Error(
+ "The split names could not be parsed from the dataset config."
+ ) from err
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index a5b3898e..b261ed76 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -10,7 +10,2 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptError,
- DatasetBuilderNotFoundError,
- DatasetBuilderNoSplitsError,
- DatasetNotFoundError,
- ConfigNotFoundError,
- SplitError,
- SplitNotImplementedError,
+ Status400Error,
+ Status404Error,
@@ -24 +19 @@ async def healthcheck(_: Request):
-async def rows(request: Request):
+async def configs(request: Request):
@@ -26,5 +20,0 @@ async def rows(request: Request):
- config: str = request.query_params.get("config")
- split: str = request.query_params.get("split")
- num_rows = get_int_value(
- d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT
- )
@@ -36,5 +25,0 @@ async def rows(request: Request):
- # note: config_name must not be set to refer to the None config_name (the majority of datasets).
- if split is None:
- return PlainTextResponse(
- "'split' is a required query parameter.", status_code=400
- )
@@ -43,9 +28,3 @@ async def rows(request: Request):
- return JSONResponse(extract_rows(dataset, config, split, num_rows))
- except (DatasetNotFoundError, ConfigNotFoundError) as err:
- return PlainTextResponse(err.message, status_code=404)
- except (
- DatasetBuilderScriptError,
- SplitError,
- SplitNotImplementedError,
- ) as err:
- return PlainTextResponse(err.message, status_code=400)
+ return JSONResponse(get_configs(dataset))
+ except (Status400Error, Status404Error) as err:
+ return PlainTextResponse(err.message, status_code=err.status_code)
@@ -55 +34 @@ async def rows(request: Request):
-async def configs(request: Request):
+async def splits(request: Request):
@@ -56,0 +36 @@ async def configs(request: Request):
+ config: str = request.query_params.get("config")
@@ -61,0 +42 @@ async def configs(request: Request):
+ # note: config_name must not be set to refer to the None config_name (the majority of datasets).
@@ -64,5 +45,3 @@ async def configs(request: Request):
- return JSONResponse(get_configs(dataset))
- except (DatasetBuilderNotFoundError) as err:
- return PlainTextResponse(err.message, status_code=404)
- except (DatasetBuilderScriptError,) as err:
- return PlainTextResponse(err.message, status_code=400)
+ return JSONResponse(get_splits(dataset, config))
+ except (Status400Error, Status404Error) as err:
+ return PlainTextResponse(err.message, status_code=err.status_code)
@@ -72 +51 @@ async def configs(request: Request):
-async def splits(request: Request):
+async def rows(request: Request):
@@ -74,0 +54,4 @@ async def splits(request: Request):
+ split: str = request.query_params.get("split")
+ num_rows = get_int_value(
+ d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT
+ )
@@ -80,0 +64,4 @@ async def splits(request: Request):
+ if split is None:
+ return PlainTextResponse(
+ "'split' is a required query parameter.", status_code=400
+ )
@@ -83,5 +70,3 @@ async def splits(request: Request):
- return JSONResponse(get_splits(dataset, config))
- except (ConfigNotFoundError) as err:
- return PlainTextResponse(err.message, status_code=404)
- except (DatasetBuilderScriptError, DatasetBuilderNoSplitsError) as err:
- return PlainTextResponse(err.message, status_code=400)
+ return JSONResponse(extract_rows(dataset, config, split, num_rows))
+ except (Status400Error, Status404Error) as err:
+ return PlainTextResponse(err.message, status_code=err.status_code)
diff --git a/tests/queries/test_configs.py b/tests/queries/test_configs.py
index e7aa3352..f66f3dd1 100644
--- a/tests/queries/test_configs.py
+++ b/tests/queries/test_configs.py
@@ -4,0 +5,2 @@ from datasets_preview_backend.queries.configs import (
+ Status400Error,
+ Status404Error,
@@ -6,2 +7,0 @@ from datasets_preview_backend.queries.configs import (
- DatasetBuilderScriptError,
- DatasetBuilderNotFoundError,
@@ -32,6 +31,0 @@ def test_import_nltk():
-def test_import_nltk():
- # requires the nltk dependency
- configs = get_configs("vershasaxena91/squad_multitask")["configs"]
- assert len(configs) == 3
-
-
@@ -40 +34 @@ def test_script_error():
- with pytest.raises(DatasetBuilderScriptError):
+ with pytest.raises(Status400Error):
@@ -43 +37 @@ def test_script_error():
- with pytest.raises(DatasetBuilderScriptError):
+ with pytest.raises(Status400Error):
@@ -46,0 +41,6 @@ def test_script_error():
+def test_no_dataset():
+ # the dataset does not exist
+ with pytest.raises(Status404Error):
+ get_configs("doesnotexist")
+
+
@@ -49 +49 @@ def test_no_dataset_no_script():
- with pytest.raises(DatasetBuilderNotFoundError):
+ with pytest.raises(Status404Error):
@@ -55 +55 @@ def test_no_dataset_bad_script_name():
- with pytest.raises(DatasetBuilderNotFoundError):
+ with pytest.raises(Status404Error):
diff --git a/tests/queries/test_rows.py b/tests/queries/test_rows.py
index 6f393242..726c34f7 100644
--- a/tests/queries/test_rows.py
+++ b/tests/queries/test_rows.py
@@ -4,5 +4,2 @@ from datasets_preview_backend.queries.rows import (
- DatasetBuilderScriptError,
- ConfigNotFoundError,
- DatasetNotFoundError,
- SplitError,
- SplitNotImplementedError,
+ Status400Error,
+ Status404Error,
@@ -38,0 +36,7 @@ def test_extract_split_rows_num_rows():
+def test_extract_unknown_dataset():
+ with pytest.raises(Status404Error):
+ extract_rows("doesnotexist", None, "train", 100)
+ with pytest.raises(Status404Error):
+ extract_rows("AConsApart/anime_subtitles_DialoGPT", None, "train", 100)
+
+
@@ -40 +44 @@ def test_extract_unknown_config():
- with pytest.raises(ConfigNotFoundError):
+ with pytest.raises(Status404Error):
@@ -42 +46 @@ def test_extract_unknown_config():
- with pytest.raises(ConfigNotFoundError):
+ with pytest.raises(Status404Error):
@@ -47 +51 @@ def test_extract_unknown_split():
- with pytest.raises(SplitError):
+ with pytest.raises(Status404Error):
@@ -51,7 +54,0 @@ def test_extract_unknown_split():
-def test_extract_unknown_dataset():
- with pytest.raises(DatasetNotFoundError):
- extract_rows("doesnotexist", None, "train", 100)
- with pytest.raises(DatasetNotFoundError):
- extract_rows("AConsApart/anime_subtitles_DialoGPT", None, "train", 100)
-
-
@@ -59 +56 @@ def test_extract_bogus_dataset():
- with pytest.raises(DatasetBuilderScriptError):
+ with pytest.raises(Status400Error):
@@ -64 +61 @@ def test_extract_bogus_config():
- with pytest.raises(DatasetBuilderScriptError):
+ with pytest.raises(Status400Error):
@@ -66 +63 @@ def test_extract_bogus_config():
- with pytest.raises(DatasetBuilderScriptError):
+ with pytest.raises(Status400Error):
@@ -71 +68 @@ def test_extract_not_implemented_split():
- with pytest.raises(SplitNotImplementedError):
+ with pytest.raises(Status400Error):
@@ -76 +73 @@ def test_tar_gz_extension():
- with pytest.raises(SplitNotImplementedError):
+ with pytest.raises(Status400Error):
diff --git a/tests/queries/test_splits.py b/tests/queries/test_splits.py
index bb1ff982..a2456575 100644
--- a/tests/queries/test_splits.py
+++ b/tests/queries/test_splits.py
@@ -4 +4,2 @@ from datasets_preview_backend.queries.splits import (
- DatasetBuilderNoSplitsError,
+ Status400Error,
+ Status404Error,
@@ -27 +28 @@ def test_get_splits():
- # uses the fallback to call "builder._split_generators"
+ # uses the fallback to call "builder._split_generators" while https://github.com/huggingface/datasets/issues/2743
@@ -41,3 +42,3 @@ def test_get_splits():
-def test_extract_bogus_splits():
- # not sure if we have an example of such an error
- with pytest.raises(DatasetBuilderNoSplitsError):
+def test_no_splits():
+ # Due to https://github.com/huggingface/datasets/issues/2743
+ with pytest.raises(Status400Error):
@@ -44,0 +46,16 @@ def test_extract_bogus_splits():
+
+
+def test_builder_config_error():
+ with pytest.raises(Status400Error):
+ get_splits("KETI-AIR/nikl", "spoken.v1.0")
+ with pytest.raises(Status400Error):
+ get_splits("nateraw/image-folder", None)
+ with pytest.raises(Status400Error):
+ get_splits("Valahaar/wsdmt", None)
+
+
+def test_not_found():
+ with pytest.raises(Status404Error):
+ get_splits("doesnotexist", None)
+ with pytest.raises(Status404Error):
+ get_splits("glue", "doesnotexist")
|
|
792fa4d99b4440c57e1cf0394788b735c9a6ffb4
|
Sylvain Lesage
| 2021-08-02T09:20:54 |
test: 💍 add tests for all known errors in get_configs
|
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index 2dbf6a7e..646e37f4 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -27,0 +28,14 @@ class DatasetBuilderScriptError(Error):
+class DatasetBuilderNotFoundError(Error):
+ """Exception raised if the dataset script could not be found.
+
+ Attributes:
+ dataset -- the erroneous dataset id
+ """
+
+ def __init__(self, dataset):
+ self.dataset = dataset
+ super().__init__(
+ f"Dataset builder script could not be found. Dataset: '{self.dataset}'"
+ )
+
+
diff --git a/src/datasets_preview_backend/queries/configs.py b/src/datasets_preview_backend/queries/configs.py
index 93bac93f..e267728b 100644
--- a/src/datasets_preview_backend/queries/configs.py
+++ b/src/datasets_preview_backend/queries/configs.py
@@ -12 +12 @@ from datasets_preview_backend.exceptions import (
- DatasetNotFoundError,
+ DatasetBuilderNotFoundError,
@@ -23 +23 @@ def get_configs(dataset: str) -> List[str]:
- raise DatasetNotFoundError(dataset=dataset)
+ raise DatasetBuilderNotFoundError(dataset=dataset)
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 0f9f3c59..a5b3898e 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -10,0 +11 @@ from datasets_preview_backend.exceptions import (
+ DatasetBuilderNotFoundError,
@@ -64 +65 @@ async def configs(request: Request):
- except (DatasetNotFoundError) as err:
+ except (DatasetBuilderNotFoundError) as err:
diff --git a/tests/queries/test_configs.py b/tests/queries/test_configs.py
index 54f66341..e7aa3352 100644
--- a/tests/queries/test_configs.py
+++ b/tests/queries/test_configs.py
@@ -0,0 +1,3 @@
+import pytest
+
+
@@ -2,0 +6,2 @@ from datasets_preview_backend.queries.configs import (
+ DatasetBuilderScriptError,
+ DatasetBuilderNotFoundError,
@@ -24,0 +30,27 @@ def test_import_nltk():
+
+
+def test_import_nltk():
+ # requires the nltk dependency
+ configs = get_configs("vershasaxena91/squad_multitask")["configs"]
+ assert len(configs) == 3
+
+
+def test_script_error():
+ # raises "ModuleNotFoundError: No module named 'datasets_modules.datasets.Test'", which should be caught and raised as DatasetBuilderScriptError
+ with pytest.raises(DatasetBuilderScriptError):
+ get_configs("TimTreasure4/Test")
+ # raises "ModuleNotFoundError: No module named 'datasets_modules.datasets.br-quad-2'", which should be caught and raised as DatasetBuilderScriptError
+ with pytest.raises(DatasetBuilderScriptError):
+ get_configs("piEsposito/br-quad-2.0")
+
+
+def test_no_dataset_no_script():
+ # the dataset does not contain a script
+ with pytest.raises(DatasetBuilderNotFoundError):
+ get_configs("AConsApart/anime_subtitles_DialoGPT")
+
+
+def test_no_dataset_bad_script_name():
+ # the dataset script name is incorrect
+ with pytest.raises(DatasetBuilderNotFoundError):
+ get_configs("Cropinky/rap_lyrics_english")
|
|
af9151393b8c9ce334b6e9f6a666cb2a19ae03e7
|
Sylvain Lesage
| 2021-08-02T09:01:46 |
refactor: 💡 split queries.py into three files
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index efa800f1..82fb3cda 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -11,5 +11,3 @@ logging.disable(logging.CRITICAL)
-from datasets_preview_backend.queries import (
- get_configs,
- get_splits,
- extract_rows,
-)
+from datasets_preview_backend.queries.configs import get_configs
+from datasets_preview_backend.queries.splits import get_splits
+from datasets_preview_backend.queries.rows import extract_rows
diff --git a/src/datasets_preview_backend/queries/configs.py b/src/datasets_preview_backend/queries/configs.py
new file mode 100644
index 00000000..93bac93f
--- /dev/null
+++ b/src/datasets_preview_backend/queries/configs.py
@@ -0,0 +1,29 @@
+import logging
+
+from typing import List
+
+from datasets import (
+ prepare_module,
+ import_main_class,
+)
+
+from datasets_preview_backend.exceptions import (
+ DatasetBuilderScriptError,
+ DatasetNotFoundError,
+)
+
+# TODO: log the traces on every caught exception
+
+
+def get_configs(dataset: str) -> List[str]:
+ try:
+ module_path, *_ = prepare_module(dataset, dataset=True)
+ builder_cls = import_main_class(module_path, dataset=True)
+ except FileNotFoundError as err:
+ raise DatasetNotFoundError(dataset=dataset)
+ except (ModuleNotFoundError):
+ raise DatasetBuilderScriptError(dataset=dataset)
+
+ configs = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
+ logging.debug(f"The dataset builder has {len(configs)} configs: {configs}")
+ return {"dataset": dataset, "configs": configs}
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries/rows.py
similarity index 55%
rename from src/datasets_preview_backend/queries.py
rename to src/datasets_preview_backend/queries/rows.py
index 10c973f3..232b42fb 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries/rows.py
@@ -9,3 +8,0 @@ from datasets import (
- load_dataset_builder,
- prepare_module,
- import_main_class,
@@ -13 +9,0 @@ from datasets import (
-from datasets.utils.streaming_download_manager import StreamingDownloadManager
@@ -17 +12,0 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderNoSplitsError,
@@ -27,44 +21,0 @@ from datasets_preview_backend.exceptions import (
-def get_configs(dataset: str) -> List[str]:
- try:
- module_path, *_ = prepare_module(dataset, dataset=True)
- builder_cls = import_main_class(module_path, dataset=True)
- except FileNotFoundError as err:
- raise DatasetNotFoundError(dataset=dataset)
- except (ModuleNotFoundError):
- raise DatasetBuilderScriptError(dataset=dataset)
-
- configs = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
- logging.debug(f"The dataset builder has {len(configs)} configs: {configs}")
- return {"dataset": dataset, "configs": configs}
-
-
-def get_splits(dataset: str, config: str) -> List[str]:
- try:
- builder = load_dataset_builder(dataset, name=config)
- except ValueError as err:
- message = str(err)
- if message.startswith(f"BuilderConfig {config} not found"):
- raise ConfigNotFoundError(dataset=dataset, config=config)
- elif message.startswith(f"Config name is missing."):
- raise ConfigNotFoundError(dataset=dataset, config=config)
- else:
- raise
- except (ModuleNotFoundError, RuntimeError, TypeError):
- raise DatasetBuilderScriptError(dataset=dataset)
-
- if builder.info.splits is None:
- # try to get them from _split_generators
- try:
- splits = [
- split_generator.name
- for split_generator in builder._split_generators(
- StreamingDownloadManager(base_path=builder.base_path)
- )
- ]
- except:
- raise DatasetBuilderNoSplitsError(dataset=dataset, config=config)
- else:
- splits = list(builder.info.splits.keys())
- return {"dataset": dataset, "config": config, "splits": splits}
-
-
diff --git a/src/datasets_preview_backend/queries/splits.py b/src/datasets_preview_backend/queries/splits.py
new file mode 100644
index 00000000..dbe186f1
--- /dev/null
+++ b/src/datasets_preview_backend/queries/splits.py
@@ -0,0 +1,44 @@
+from typing import List
+
+from datasets import (
+ load_dataset_builder,
+)
+from datasets.utils.streaming_download_manager import StreamingDownloadManager
+
+from datasets_preview_backend.exceptions import (
+ DatasetBuilderScriptError,
+ DatasetBuilderNoSplitsError,
+ ConfigNotFoundError,
+)
+
+# TODO: log the traces on every caught exception
+
+
+def get_splits(dataset: str, config: str) -> List[str]:
+ try:
+ builder = load_dataset_builder(dataset, name=config)
+ except ValueError as err:
+ message = str(err)
+ if message.startswith(f"BuilderConfig {config} not found"):
+ raise ConfigNotFoundError(dataset=dataset, config=config)
+ elif message.startswith(f"Config name is missing."):
+ raise ConfigNotFoundError(dataset=dataset, config=config)
+ else:
+ raise
+ except (ModuleNotFoundError, RuntimeError, TypeError):
+ raise DatasetBuilderScriptError(dataset=dataset)
+
+ if builder.info.splits is None:
+ # try to get them from _split_generators
+ try:
+ splits = [
+ split_generator.name
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path)
+ )
+ ]
+ except:
+ raise DatasetBuilderNoSplitsError(dataset=dataset, config=config)
+ else:
+ splits = list(builder.info.splits.keys())
+ return {"dataset": dataset, "config": config, "splits": splits}
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 91a848c7..0f9f3c59 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -5 +5,3 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
-from datasets_preview_backend.queries import extract_rows, get_configs, get_splits
+from datasets_preview_backend.queries.configs import get_configs
+from datasets_preview_backend.queries.splits import get_splits
+from datasets_preview_backend.queries.rows import extract_rows
diff --git a/tests/queries/test_configs.py b/tests/queries/test_configs.py
new file mode 100644
index 00000000..54f66341
--- /dev/null
+++ b/tests/queries/test_configs.py
@@ -0,0 +1,24 @@
+from datasets_preview_backend.queries.configs import (
+ get_configs,
+)
+
+
+def test_get_configs():
+ dataset = "acronym_identification"
+ response = get_configs(dataset)
+ assert "dataset" in response
+ assert response["dataset"] == dataset
+ assert "configs" in response
+ configs = response["configs"]
+ assert len(configs) == 1
+ assert configs[0] is None
+
+ configs = get_configs("glue")["configs"]
+ assert len(configs) == 12
+ assert "cola" in configs
+
+
+def test_import_nltk():
+ # requires the nltk dependency
+ configs = get_configs("vershasaxena91/squad_multitask")["configs"]
+ assert len(configs) == 3
diff --git a/tests/test_queries.py b/tests/queries/test_rows.py
similarity index 55%
rename from tests/test_queries.py
rename to tests/queries/test_rows.py
index ca698d8a..6f393242 100644
--- a/tests/test_queries.py
+++ b/tests/queries/test_rows.py
@@ -3 +3 @@ import pytest
-from datasets_preview_backend.queries import (
+from datasets_preview_backend.queries.rows import (
@@ -5 +4,0 @@ from datasets_preview_backend.queries import (
- DatasetBuilderNoSplitsError,
@@ -10,2 +8,0 @@ from datasets_preview_backend.queries import (
- get_configs,
- get_splits,
@@ -16,47 +12,0 @@ from datasets_preview_backend.queries import (
-def test_get_configs():
- dataset = "acronym_identification"
- response = get_configs(dataset)
- assert "dataset" in response
- assert response["dataset"] == dataset
- assert "configs" in response
- configs = response["configs"]
- assert len(configs) == 1
- assert configs[0] is None
-
- configs = get_configs("glue")["configs"]
- assert len(configs) == 12
- assert "cola" in configs
-
-
-def test_get_splits():
- dataset = "acronym_identification"
- config = None
- response = get_splits(dataset, config)
- assert "dataset" in response
- assert response["dataset"] == dataset
- assert "config" in response
- assert response["config"] == config
- assert "splits" in response
- splits = response["splits"]
- assert len(splits) == 3
- assert "train" in splits
-
- splits = get_splits("glue", "ax")["splits"]
- assert len(splits) == 1
- assert "test" in splits
- assert "train" not in splits
-
- # uses the fallback to call "builder._split_generators"
- splits = get_splits("hda_nli_hindi", "HDA nli hindi")["splits"]
- assert len(splits) == 3
- assert "train" in splits
- assert "validation" in splits
- assert "test" in splits
-
- splits = get_splits("classla/copa_hr", "copa_hr")["splits"]
- assert len(splits) == 3
-
- splits = get_splits("mc4", "sn")["splits"]
- assert len(splits) == 2
-
-
@@ -120,6 +69,0 @@ def test_extract_bogus_config():
-def test_extract_bogus_splits():
- # not sure if we have an example of such an error
- with pytest.raises(DatasetBuilderNoSplitsError):
- get_splits("journalists_questions", "plain_text")
-
-
@@ -134,6 +77,0 @@ def test_tar_gz_extension():
-
-
-def test_import_nltk():
- # requires the nltk dependency
- configs = get_configs("vershasaxena91/squad_multitask")["configs"]
- assert len(configs) == 3
diff --git a/tests/queries/test_splits.py b/tests/queries/test_splits.py
new file mode 100644
index 00000000..bb1ff982
--- /dev/null
+++ b/tests/queries/test_splits.py
@@ -0,0 +1,44 @@
+import pytest
+
+from datasets_preview_backend.queries.splits import (
+ DatasetBuilderNoSplitsError,
+ get_splits,
+)
+
+
+def test_get_splits():
+ dataset = "acronym_identification"
+ config = None
+ response = get_splits(dataset, config)
+ assert "dataset" in response
+ assert response["dataset"] == dataset
+ assert "config" in response
+ assert response["config"] == config
+ assert "splits" in response
+ splits = response["splits"]
+ assert len(splits) == 3
+ assert "train" in splits
+
+ splits = get_splits("glue", "ax")["splits"]
+ assert len(splits) == 1
+ assert "test" in splits
+ assert "train" not in splits
+
+ # uses the fallback to call "builder._split_generators"
+ splits = get_splits("hda_nli_hindi", "HDA nli hindi")["splits"]
+ assert len(splits) == 3
+ assert "train" in splits
+ assert "validation" in splits
+ assert "test" in splits
+
+ splits = get_splits("classla/copa_hr", "copa_hr")["splits"]
+ assert len(splits) == 3
+
+ splits = get_splits("mc4", "sn")["splits"]
+ assert len(splits) == 2
+
+
+def test_extract_bogus_splits():
+ # not sure if we have an example of such an error
+ with pytest.raises(DatasetBuilderNoSplitsError):
+ get_splits("journalists_questions", "plain_text")
|
|
4537a0f1aad9e2110c9e39d360b534cbcfee9581
|
Sylvain Lesage
| 2021-08-02T08:47:27 |
feat: 🎸 add nltk dependency
|
diff --git a/poetry.lock b/poetry.lock
index df2f3a97..b35f7d90 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -312 +312 @@ name = "datasets"
-version = "1.10.2"
+version = "1.11.0"
@@ -335 +335 @@ benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "tr
-dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
+dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
@@ -342 +342 @@ tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "importlib-resources"]
@@ -617,0 +618 @@ url = "https://github.com/kpu/kenlm/archive/master.zip"
+
@@ -756,0 +758,22 @@ torch = ["torch"]
+[[package]]
+name = "nltk"
+version = "3.6.2"
+description = "Natural Language Toolkit"
+category = "main"
+optional = false
+python-versions = ">=3.5.*"
+
+[package.dependencies]
+click = "*"
+joblib = "*"
+regex = "*"
+tqdm = "*"
+
+[package.extras]
+all = ["matplotlib", "twython", "scipy", "numpy", "gensim (<4.0.0)", "python-crfsuite", "pyparsing", "scikit-learn", "requests"]
+corenlp = ["requests"]
+machine_learning = ["gensim (<4.0.0)", "numpy", "python-crfsuite", "scikit-learn", "scipy"]
+plot = ["matplotlib"]
+tgrep = ["pyparsing"]
+twitter = ["twython"]
+
@@ -1491 +1514 @@ python-versions = "^3.8"
-content-hash = "a33b4415d9dadbe5601eb950817b4464343d32cde1cbacbad426b2d26cd40fb5"
+content-hash = "7fe0a20f2baf129ea8c51856ced71768ff64d01e0c0bea829bdd25cd968c6d80"
@@ -1789,2 +1812,2 @@ datasets = [
- {file = "datasets-1.10.2-py3-none-any.whl", hash = "sha256:a523e22b222b38700cc672445f2d534ed4a5aeda1399c074b722feda36b175c2"},
- {file = "datasets-1.10.2.tar.gz", hash = "sha256:19106e8f5fa7be95ccd19fa82653ce707cad378d0e323c3013a17c2b6513bf5c"},
+ {file = "datasets-1.11.0-py3-none-any.whl", hash = "sha256:603612b018794e33d8f0655235731bc139b141cb8f864c2f29140940da16955f"},
+ {file = "datasets-1.11.0.tar.gz", hash = "sha256:3b01bf12951903e83b528d41129876426eb3a5fbcaf2645552283330528c92bf"},
@@ -2070,0 +2094,4 @@ nlp = [
+nltk = [
+ {file = "nltk-3.6.2-py3-none-any.whl", hash = "sha256:240e23ab1ab159ef9940777d30c7c72d7e76d91877099218a7585370c11f6b9e"},
+ {file = "nltk-3.6.2.zip", hash = "sha256:57d556abed621ab9be225cc6d2df1edce17572efb67a3d754630c9f8381503eb"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index 6c86a715..931c143d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -25,0 +26 @@ kenlm = {url = "https://github.com/kpu/kenlm/archive/master.zip"}
+nltk = "^3.6.2"
@@ -35,0 +37,5 @@ build-backend = "poetry.core.masonry.api"
+
+[tool.pytest.ini_options]
+filterwarnings = [
+ "ignore::DeprecationWarning"
+]
diff --git a/tests/test_main.py b/tests/test_queries.py
similarity index 96%
rename from tests/test_main.py
rename to tests/test_queries.py
index 0b337421..ca698d8a 100644
--- a/tests/test_main.py
+++ b/tests/test_queries.py
@@ -133,0 +134,6 @@ def test_tar_gz_extension():
+
+
+def test_import_nltk():
+ # requires the nltk dependency
+ configs = get_configs("vershasaxena91/squad_multitask")["configs"]
+ assert len(configs) == 3
|
|
fe120afd6ab736a64c6e348043d96345585abbcb
|
Sylvain Lesage
| 2021-07-30T17:24:25 |
test: 💍 remove logs during the benchmark
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 90b05723..efa800f1 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -4,0 +5,4 @@ from tqdm.contrib.concurrent import process_map
+import logging
+
+# remove any logs
+logging.disable(logging.CRITICAL)
|
|
ece32583791cbdcff030a45256d1efd8fd4e991e
|
Sylvain Lesage
| 2021-07-30T17:16:12 |
test: 💍 lower chunksize to 5, and factor the variable
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 679022f5..90b05723 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -80,0 +81 @@ def export_all_datasets_exceptions():
+ chunksize = 5
@@ -84 +85 @@ def export_all_datasets_exceptions():
- configs_reports = process_map(get_configs_report, datasets, chunksize=20)
+ configs_reports = process_map(get_configs_report, datasets, chunksize=chunksize)
@@ -98 +99 @@ def export_all_datasets_exceptions():
- chunksize=20,
+ chunksize=chunksize,
@@ -116 +117 @@ def export_all_datasets_exceptions():
- chunksize=20,
+ chunksize=chunksize,
|
|
977d8e717d6c032f60ea34d6544d238d4cd7e1fa
|
Sylvain Lesage
| 2021-07-30T17:14:38 |
fix: 🐛 fix error names, and fix except place for one
|
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 743619b9..91a848c7 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -9 +9 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptConfigNoSplitsError,
+ DatasetBuilderNoSplitsError,
@@ -45 +44,0 @@ async def rows(request: Request):
- DatasetBuilderScriptConfigNoSplitsError,
@@ -84 +83 @@ async def splits(request: Request):
- except (DatasetBuilderScriptError,) as err:
+ except (DatasetBuilderScriptError, DatasetBuilderNoSplitsError) as err:
|
|
e303d1ec54b806d48405e37ce27852a25896b875
|
Sylvain Lesage
| 2021-07-30T17:10:12 |
test: 💍 change error name and add a test
|
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index f9f8d1cb..2dbf6a7e 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -28,2 +28,2 @@ class DatasetBuilderScriptError(Error):
-class DatasetBuilderScriptConfigNoSplitsError(Error):
- """Exception raised if the builder script fails for this config.
+class DatasetBuilderNoSplitsError(Error):
+ """Exception raised if the builder script fails to provide the list of splits.
@@ -40 +40 @@ class DatasetBuilderScriptConfigNoSplitsError(Error):
- f"Dataset builder script error: missing .info.splits. Dataset: '{self.dataset}', config: {print_config(self.config)}"
+ f"Dataset builder script error: could not get the list of splits. Dataset: '{self.dataset}', config: {print_config(self.config)}"
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
index 20619ce7..10c973f3 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries.py
@@ -17 +17 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptConfigNoSplitsError,
+ DatasetBuilderNoSplitsError,
@@ -65,3 +65 @@ def get_splits(dataset: str, config: str) -> List[str]:
- raise DatasetBuilderScriptConfigNoSplitsError(
- dataset=dataset, config=config
- )
+ raise DatasetBuilderNoSplitsError(dataset=dataset, config=config)
diff --git a/tests/test_main.py b/tests/test_main.py
index 8a8735d5..0b337421 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5 +5 @@ from datasets_preview_backend.queries import (
- # DatasetBuilderScriptConfigNoSplitsError,
+ DatasetBuilderNoSplitsError,
@@ -120,4 +120,4 @@ def test_extract_bogus_config():
-# def test_extract_bogus_splits():
-# not sure if we have an example of such an error
-# with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
-# extract_config_rows("mc4", "sn", 10)
+def test_extract_bogus_splits():
+ # not sure if we have an example of such an error
+ with pytest.raises(DatasetBuilderNoSplitsError):
+ get_splits("journalists_questions", "plain_text")
|
|
3966b6c3ed96010a27ec69a4c0b26a28229a3fae
|
Sylvain Lesage
| 2021-07-30T16:53:31 |
feat: 🎸 improve coherence in keys: dataset, config, split
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index d16afa3d..679022f5 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -8 +8 @@ from datasets_preview_backend.queries import (
- get_config_names,
+ get_configs,
@@ -14 +14 @@ from datasets_preview_backend.queries import (
-def get_config_names_report(dataset_id: str):
+def get_configs_report(dataset: str):
@@ -16 +16 @@ def get_config_names_report(dataset_id: str):
- config_names = get_config_names(dataset_id)["config_names"]
+ configs = get_configs(dataset)["configs"]
@@ -18,2 +18,2 @@ def get_config_names_report(dataset_id: str):
- "dataset_id": dataset_id,
- "config_names": list(config_names),
+ "dataset": dataset,
+ "configs": list(configs),
@@ -26,2 +26,2 @@ def get_config_names_report(dataset_id: str):
- "dataset_id": dataset_id,
- "config_names": [],
+ "dataset": dataset,
+ "configs": [],
@@ -34 +34 @@ def get_config_names_report(dataset_id: str):
-def get_split_names_report(dataset_id: str, config_name: str):
+def get_splits_report(dataset: str, config: str):
@@ -36 +36 @@ def get_split_names_report(dataset_id: str, config_name: str):
- split_names = get_splits(dataset_id, config_name)["splits"]
+ splits = get_splits(dataset, config)["splits"]
@@ -38,3 +38,3 @@ def get_split_names_report(dataset_id: str, config_name: str):
- "dataset_id": dataset_id,
- "config_name": config_name,
- "split_names": list(split_names),
+ "dataset": dataset,
+ "config": config,
+ "splits": list(splits),
@@ -47,3 +47,3 @@ def get_split_names_report(dataset_id: str, config_name: str):
- "dataset_id": dataset_id,
- "config_name": config_name,
- "split_names": [],
+ "dataset": dataset,
+ "config": config,
+ "splits": [],
@@ -56 +56 @@ def get_split_names_report(dataset_id: str, config_name: str):
-def get_rows_report(dataset_id: str, config_name: str, split_name: str):
+def get_rows_report(dataset: str, config: str, split: str):
@@ -59,5 +59 @@ def get_rows_report(dataset_id: str, config_name: str, split_name: str):
- rows = extract_rows(dataset_id, config_name, split_name, num_rows)["rows"]
- if len(rows) != num_rows:
- raise ValueError(
- f"number of rows is different from required: {len(rows)} instead of {num_rows}"
- )
+ rows = extract_rows(dataset, config, split, num_rows)["rows"]
@@ -65,3 +61,4 @@ def get_rows_report(dataset_id: str, config_name: str, split_name: str):
- "dataset_id": dataset_id,
- "config_name": config_name,
- "split_name": split_name,
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ "row_length": len(rows),
@@ -74,3 +71,3 @@ def get_rows_report(dataset_id: str, config_name: str, split_name: str):
- "dataset_id": dataset_id,
- "config_name": config_name,
- "split_name": split_name,
+ "dataset": dataset,
+ "config": config,
+ "split": split,
@@ -84 +81 @@ def export_all_datasets_exceptions():
- dataset_ids = list_datasets(with_community_datasets=True)
+ datasets = list_datasets(with_community_datasets=True)
@@ -87,16 +84,14 @@ def export_all_datasets_exceptions():
- config_names_reports = process_map(
- get_config_names_report, dataset_ids, chunksize=20
- )
-
- print("Get split names for all the pairs (dataset_id, config_name)")
- split_names_dataset_ids = []
- split_names_config_names = []
- for report in config_names_reports:
- for config_name in report["config_names"]:
- # reports with an exception will not contribute to the lists since config_names is empty
- split_names_dataset_ids.append(report["dataset_id"])
- split_names_config_names.append(config_name)
- split_names_reports = process_map(
- get_split_names_report,
- split_names_dataset_ids,
- split_names_config_names,
+ configs_reports = process_map(get_configs_report, datasets, chunksize=20)
+
+ print("Get split names for all the pairs (dataset, config)")
+ splits_datasets = []
+ splits_configs = []
+ for report in configs_reports:
+ for config in report["configs"]:
+ # reports with an exception will not contribute to the lists since configs is empty
+ splits_datasets.append(report["dataset"])
+ splits_configs.append(config)
+ splits_reports = process_map(
+ get_splits_report,
+ splits_datasets,
+ splits_configs,
@@ -106,10 +101,10 @@ def export_all_datasets_exceptions():
- print("Get rows extract for all the tuples (dataset_id, config_name, split_name)")
- rows_dataset_ids = []
- rows_config_names = []
- rows_split_names = []
- for report in split_names_reports:
- for split_name in report["split_names"]:
- # reports with an exception will not contribute to the lists since split_names is empty
- rows_dataset_ids.append(report["dataset_id"])
- rows_config_names.append(report["config_name"])
- rows_split_names.append(split_name)
+ print("Get rows extract for all the tuples (dataset, config, split)")
+ rows_datasets = []
+ rows_configs = []
+ rows_splits = []
+ for report in splits_reports:
+ for split in report["splits"]:
+ # reports with an exception will not contribute to the lists since splits is empty
+ rows_datasets.append(report["dataset"])
+ rows_configs.append(report["config"])
+ rows_splits.append(split)
@@ -118,3 +113,3 @@ def export_all_datasets_exceptions():
- rows_dataset_ids,
- rows_config_names,
- rows_split_names,
+ rows_datasets,
+ rows_configs,
+ rows_splits,
@@ -125,2 +120,2 @@ def export_all_datasets_exceptions():
- "config_names_reports": config_names_reports,
- "split_names_reports": split_names_reports,
+ "configs_reports": configs_reports,
+ "splits_reports": splits_reports,
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index 16f853c8..f9f8d1cb 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -1,2 +1,2 @@
-def print_config(config_name):
- if config_name is None:
+def print_config(config):
+ if config is None:
@@ -5 +5 @@ def print_config(config_name):
- return f"'{config_name}'"
+ return f"'{config}'"
@@ -20 +20 @@ class DatasetBuilderScriptError(Error):
- dataset_id -- the erroneous dataset id
+ dataset -- the erroneous dataset id
@@ -23,3 +23,3 @@ class DatasetBuilderScriptError(Error):
- def __init__(self, dataset_id):
- self.dataset_id = dataset_id
- super().__init__(f"Dataset builder script error. Dataset: '{self.dataset_id}'")
+ def __init__(self, dataset):
+ self.dataset = dataset
+ super().__init__(f"Dataset builder script error. Dataset: '{self.dataset}'")
@@ -32,2 +32,2 @@ class DatasetBuilderScriptConfigNoSplitsError(Error):
- dataset_id -- the erroneous dataset id
- config_name -- the erroneous dataset config_name
+ dataset -- the erroneous dataset id
+ config -- the erroneous dataset config name
@@ -36,3 +36,3 @@ class DatasetBuilderScriptConfigNoSplitsError(Error):
- def __init__(self, dataset_id, config_name):
- self.dataset_id = dataset_id
- self.config_name = config_name
+ def __init__(self, dataset, config):
+ self.dataset = dataset
+ self.config = config
@@ -40 +40 @@ class DatasetBuilderScriptConfigNoSplitsError(Error):
- f"Dataset builder script error: missing .info.splits. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}"
+ f"Dataset builder script error: missing .info.splits. Dataset: '{self.dataset}', config: {print_config(self.config)}"
@@ -48 +48 @@ class DatasetNotFoundError(Error):
- dataset_id -- the erroneous dataset id
+ dataset -- the erroneous dataset id
@@ -51,3 +51,3 @@ class DatasetNotFoundError(Error):
- def __init__(self, dataset_id):
- self.dataset_id = dataset_id
- super().__init__(f"Dataset not found. Dataset: '{self.dataset_id}'")
+ def __init__(self, dataset):
+ self.dataset = dataset
+ super().__init__(f"Dataset not found. Dataset: '{self.dataset}'")
@@ -60,2 +60,2 @@ class ConfigNotFoundError(Error):
- dataset_id -- the erroneous dataset id
- config_name -- the erroneous dataset config_name
+ dataset -- the erroneous dataset id
+ config -- the erroneous dataset config name
@@ -64,3 +64,3 @@ class ConfigNotFoundError(Error):
- def __init__(self, dataset_id, config_name):
- self.dataset_id = dataset_id
- self.config_name = config_name
+ def __init__(self, dataset, config):
+ self.dataset = dataset
+ self.config = config
@@ -68 +68 @@ class ConfigNotFoundError(Error):
- f"Config not found. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}"
+ f"Config not found. Dataset: '{self.dataset}', config: {print_config(self.config)}"
@@ -76,3 +76,3 @@ class SplitError(Error):
- dataset_id -- the erroneous dataset id
- config_name -- the erroneous dataset config_name
- split -- the erroneous dataset split
+ dataset -- the erroneous dataset id
+ config -- the erroneous dataset config name
+ split -- the erroneous dataset split name
@@ -81,3 +81,3 @@ class SplitError(Error):
- def __init__(self, dataset_id, config_name, split):
- self.dataset_id = dataset_id
- self.config_name = config_name
+ def __init__(self, dataset, config, split):
+ self.dataset = dataset
+ self.config = config
@@ -86 +86 @@ class SplitError(Error):
- f"Split error. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}, split: '{self.split}'"
+ f"Split error. Dataset: '{self.dataset}', config: {print_config(self.config)}, split: '{self.split}'"
@@ -94,3 +94,3 @@ class SplitNotImplementedError(Error):
- dataset_id -- the erroneous dataset id
- config_name -- the erroneous dataset config_name
- split -- the erroneous dataset split
+ dataset -- the erroneous dataset id
+ config -- the erroneous dataset config name
+ split -- the erroneous dataset split name
@@ -100,3 +100,3 @@ class SplitNotImplementedError(Error):
- def __init__(self, dataset_id, config_name, split, extension):
- self.dataset_id = dataset_id
- self.config_name = config_name
+ def __init__(self, dataset, config, split, extension):
+ self.dataset = dataset
+ self.config = config
@@ -109 +109 @@ class SplitNotImplementedError(Error):
- f"Extraction protocol not implemented{extension_str}. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}, split: '{self.split}'"
+ f"Extraction protocol not implemented{extension_str}. Dataset: '{self.dataset}', config: {print_config(self.config)}, split: '{self.split}'"
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
index 38df9e46..20619ce7 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries.py
@@ -27 +27 @@ from datasets_preview_backend.exceptions import (
-def get_config_names(dataset_id: str) -> List[str]:
+def get_configs(dataset: str) -> List[str]:
@@ -29 +29 @@ def get_config_names(dataset_id: str) -> List[str]:
- module_path, *_ = prepare_module(dataset_id, dataset=True)
+ module_path, *_ = prepare_module(dataset, dataset=True)
@@ -32 +32 @@ def get_config_names(dataset_id: str) -> List[str]:
- raise DatasetNotFoundError(dataset_id=dataset_id)
+ raise DatasetNotFoundError(dataset=dataset)
@@ -34 +34 @@ def get_config_names(dataset_id: str) -> List[str]:
- raise DatasetBuilderScriptError(dataset_id=dataset_id)
+ raise DatasetBuilderScriptError(dataset=dataset)
@@ -36,5 +36,3 @@ def get_config_names(dataset_id: str) -> List[str]:
- config_names = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
- logging.debug(
- f"The dataset builder has {len(config_names)} configs: {config_names}"
- )
- return {"dataset_id": dataset_id, "config_names": config_names}
+ configs = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
+ logging.debug(f"The dataset builder has {len(configs)} configs: {configs}")
+ return {"dataset": dataset, "configs": configs}
@@ -43 +41 @@ def get_config_names(dataset_id: str) -> List[str]:
-def get_splits(dataset_id: str, config_name: str) -> List[str]:
+def get_splits(dataset: str, config: str) -> List[str]:
@@ -45 +43 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- builder = load_dataset_builder(dataset_id, name=config_name)
+ builder = load_dataset_builder(dataset, name=config)
@@ -48,2 +46,2 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- if message.startswith(f"BuilderConfig {config_name} not found"):
- raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
+ if message.startswith(f"BuilderConfig {config} not found"):
+ raise ConfigNotFoundError(dataset=dataset, config=config)
@@ -51 +49 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
+ raise ConfigNotFoundError(dataset=dataset, config=config)
@@ -55 +53 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- raise DatasetBuilderScriptError(dataset_id=dataset_id)
+ raise DatasetBuilderScriptError(dataset=dataset)
@@ -68 +66 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- dataset_id=dataset_id, config_name=config_name
+ dataset=dataset, config=config
@@ -72 +70 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- return {"dataset_id": dataset_id, "config_name": config_name, "splits": splits}
+ return {"dataset": dataset, "config": config, "splits": splits}
@@ -75 +73 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
-def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
+def extract_rows(dataset: str, config: str, split: str, num_rows: int):
@@ -77 +75 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name} - {split}"
+ f"asked for {num_rows} first rows of dataset {dataset} - {config} - {split}"
@@ -81,2 +79,2 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- dataset: IterableDataset = load_dataset(
- dataset_id, name=config_name, split=split, streaming=True
+ iterable_dataset: IterableDataset = load_dataset(
+ dataset, name=config, split=split, streaming=True
@@ -85 +83 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- raise DatasetNotFoundError(dataset_id=dataset_id)
+ raise DatasetNotFoundError(dataset=dataset)
@@ -96,2 +94,2 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- dataset_id=dataset_id,
- config_name=config_name,
+ dataset=dataset,
+ config=config,
@@ -103,2 +101,2 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- if message.startswith(f"BuilderConfig {config_name} not found"):
- raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
+ if message.startswith(f"BuilderConfig {config} not found"):
+ raise ConfigNotFoundError(dataset=dataset, config=config)
@@ -106 +104 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
+ raise ConfigNotFoundError(dataset=dataset, config=config)
@@ -110,3 +108 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- raise SplitError(
- dataset_id=dataset_id, config_name=config_name, split=split
- )
+ raise SplitError(dataset=dataset, config=config, split=split)
@@ -116 +112 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- raise DatasetBuilderScriptError(dataset_id=dataset_id)
+ raise DatasetBuilderScriptError(dataset=dataset)
@@ -118 +114 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- rows = list(dataset.take(num_rows))
+ rows = list(iterable_dataset.take(num_rows))
@@ -121 +117 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- f"could not read all the required rows ({len(rows)} / {num_rows}) from dataset {dataset_id} - {config_name} - {split}"
+ f"could not read all the required rows ({len(rows)} / {num_rows}) from dataset {dataset} - {config} - {split}"
@@ -125,2 +121,2 @@ def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- "dataset_id": dataset_id,
- "config_name": config_name,
+ "dataset": dataset,
+ "config": config,
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 7494df6d..743619b9 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -5 +5 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
-from datasets_preview_backend.queries import extract_rows, get_config_names, get_splits
+from datasets_preview_backend.queries import extract_rows, get_configs, get_splits
@@ -22,3 +22,3 @@ async def rows(request: Request):
- dataset_id: str = request.query_params.get("dataset")
- config_name: str = request.query_params.get("config")
- split_name: str = request.query_params.get("split")
+ dataset: str = request.query_params.get("dataset")
+ config: str = request.query_params.get("config")
+ split: str = request.query_params.get("split")
@@ -29 +29 @@ async def rows(request: Request):
- if dataset_id is None:
+ if dataset is None:
@@ -34 +34 @@ async def rows(request: Request):
- if split_name is None:
+ if split is None:
@@ -40 +40 @@ async def rows(request: Request):
- return JSONResponse(extract_rows(dataset_id, config_name, split_name, num_rows))
+ return JSONResponse(extract_rows(dataset, config, split, num_rows))
@@ -54 +54 @@ async def configs(request: Request):
- dataset_id: str = request.query_params.get("dataset")
+ dataset: str = request.query_params.get("dataset")
@@ -56 +56 @@ async def configs(request: Request):
- if dataset_id is None:
+ if dataset is None:
@@ -62 +62 @@ async def configs(request: Request):
- return JSONResponse(get_config_names(dataset_id))
+ return JSONResponse(get_configs(dataset))
@@ -71,2 +71,2 @@ async def splits(request: Request):
- dataset_id: str = request.query_params.get("dataset")
- config_name: str = request.query_params.get("config")
+ dataset: str = request.query_params.get("dataset")
+ config: str = request.query_params.get("config")
@@ -74 +74 @@ async def splits(request: Request):
- if dataset_id is None:
+ if dataset is None:
@@ -81 +81 @@ async def splits(request: Request):
- return JSONResponse(get_splits(dataset_id, config_name))
+ return JSONResponse(get_splits(dataset, config))
diff --git a/tests/test_main.py b/tests/test_main.py
index 1819b912..8a8735d5 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -10 +10 @@ from datasets_preview_backend.queries import (
- get_config_names,
+ get_configs,
@@ -17,8 +17,8 @@ def test_get_configs():
- dataset_id = "acronym_identification"
- response = get_config_names(dataset_id)
- assert "dataset_id" in response
- assert response["dataset_id"] == dataset_id
- assert "config_names" in response
- config_names = response["config_names"]
- assert len(config_names) == 1
- assert config_names[0] is None
+ dataset = "acronym_identification"
+ response = get_configs(dataset)
+ assert "dataset" in response
+ assert response["dataset"] == dataset
+ assert "configs" in response
+ configs = response["configs"]
+ assert len(configs) == 1
+ assert configs[0] is None
@@ -26,3 +26,3 @@ def test_get_configs():
- config_names = get_config_names("glue")["config_names"]
- assert len(config_names) == 12
- assert "cola" in config_names
+ configs = get_configs("glue")["configs"]
+ assert len(configs) == 12
+ assert "cola" in configs
@@ -32,7 +32,7 @@ def test_get_splits():
- dataset_id = "acronym_identification"
- config_name = None
- response = get_splits(dataset_id, config_name)
- assert "dataset_id" in response
- assert response["dataset_id"] == dataset_id
- assert "config_name" in response
- assert response["config_name"] == config_name
+ dataset = "acronym_identification"
+ config = None
+ response = get_splits(dataset, config)
+ assert "dataset" in response
+ assert response["dataset"] == dataset
+ assert "config" in response
+ assert response["config"] == config
@@ -64,2 +64,2 @@ def test_extract_split_rows():
- dataset_id = "acronym_identification"
- config_name = None
+ dataset = "acronym_identification"
+ config = None
@@ -68,3 +68,3 @@ def test_extract_split_rows():
- extract = extract_rows(dataset_id, config_name, split, num_rows)
- assert "dataset_id" in extract and extract["dataset_id"] == dataset_id
- assert "config_name" in extract and extract["config_name"] == config_name
+ extract = extract_rows(dataset, config, split, num_rows)
+ assert "dataset" in extract and extract["dataset"] == dataset
+ assert "config" in extract and extract["config"] == config
@@ -79,2 +79,2 @@ def test_extract_split_rows_num_rows():
- dataset_id = "acronym_identification"
- config_name = None
+ dataset = "acronym_identification"
+ config = None
@@ -83 +83 @@ def test_extract_split_rows_num_rows():
- extract = extract_rows(dataset_id, config_name, split, num_rows)
+ extract = extract_rows(dataset, config, split, num_rows)
|
|
f50c3dc1de1b6636d86544886ab81e2968591460
|
Sylvain Lesage
| 2021-07-30T16:37:14 |
feat: 🎸 change response format for /splits and /configs
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index cf94362f..d16afa3d 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -16 +16 @@ def get_config_names_report(dataset_id: str):
- config_names = get_config_names(dataset_id)
+ config_names = get_config_names(dataset_id)["config_names"]
@@ -36 +36 @@ def get_split_names_report(dataset_id: str, config_name: str):
- split_names = get_splits(dataset_id, config_name)
+ split_names = get_splits(dataset_id, config_name)["splits"]
@@ -59,3 +59,5 @@ def get_rows_report(dataset_id: str, config_name: str, split_name: str):
- extract = extract_rows(dataset_id, config_name, split_name, num_rows)
- if len(extract["rows"]) != num_rows:
- raise ValueError(f"{len(extract['rows'])} rows instead of {num_rows}")
+ rows = extract_rows(dataset_id, config_name, split_name, num_rows)["rows"]
+ if len(rows) != num_rows:
+ raise ValueError(
+ f"number of rows is different from required: {len(rows)} instead of {num_rows}"
+ )
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
index b3cc6c1d..38df9e46 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries.py
@@ -40 +40 @@ def get_config_names(dataset_id: str) -> List[str]:
- return config_names
+ return {"dataset_id": dataset_id, "config_names": config_names}
@@ -72 +72 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
- return splits
+ return {"dataset_id": dataset_id, "config_name": config_name, "splits": splits}
diff --git a/tests/test_main.py b/tests/test_main.py
index e0f32501..1819b912 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -17 +17,6 @@ def test_get_configs():
- config_names = get_config_names("acronym_identification")
+ dataset_id = "acronym_identification"
+ response = get_config_names(dataset_id)
+ assert "dataset_id" in response
+ assert response["dataset_id"] == dataset_id
+ assert "config_names" in response
+ config_names = response["config_names"]
@@ -21 +26 @@ def test_get_configs():
- config_names = get_config_names("glue")
+ config_names = get_config_names("glue")["config_names"]
@@ -26,2 +31,10 @@ def test_get_configs():
-def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_splits("acronym_identification", None)
+def test_get_splits():
+ dataset_id = "acronym_identification"
+ config_name = None
+ response = get_splits(dataset_id, config_name)
+ assert "dataset_id" in response
+ assert response["dataset_id"] == dataset_id
+ assert "config_name" in response
+ assert response["config_name"] == config_name
+ assert "splits" in response
+ splits = response["splits"]
@@ -31 +44 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_splits("glue", "ax")
+ splits = get_splits("glue", "ax")["splits"]
@@ -37 +50 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_splits("hda_nli_hindi", "HDA nli hindi")
+ splits = get_splits("hda_nli_hindi", "HDA nli hindi")["splits"]
@@ -43 +56 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_splits("classla/copa_hr", "copa_hr")
+ splits = get_splits("classla/copa_hr", "copa_hr")["splits"]
@@ -46 +59 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_splits("mc4", "sn")
+ splits = get_splits("mc4", "sn")["splits"]
|
|
54cf304b6e3ebfbc8cdc6554a0d5512947bfb6b3
|
Sylvain Lesage
| 2021-07-30T16:21:44 |
feat: 🎸 add /splits route
|
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index d43154e4..16f853c8 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -0,0 +1,7 @@
+def print_config(config_name):
+ if config_name is None:
+ return "None"
+ else:
+ return f"'{config_name}'"
+
+
@@ -33 +40 @@ class DatasetBuilderScriptConfigNoSplitsError(Error):
- f"Dataset builder script error: missing .info.splits. Dataset: '{self.dataset_id}', config: '{self.config_name}'"
+ f"Dataset builder script error: missing .info.splits. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}"
@@ -61 +68 @@ class ConfigNotFoundError(Error):
- f"Config not found. Dataset: '{self.dataset_id}', config: '{self.config_name}'"
+ f"Config not found. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}"
@@ -79 +86 @@ class SplitError(Error):
- f"Split error. Dataset: '{self.dataset_id}', config: '{self.config_name}', script: '{self.split}'"
+ f"Split error. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}, split: '{self.split}'"
@@ -102 +109 @@ class SplitNotImplementedError(Error):
- f"Extraction protocol not implemented{extension_str}. Dataset: '{self.dataset_id}', config: '{self.config_name}', script: '{self.split}'"
+ f"Extraction protocol not implemented{extension_str}. Dataset: '{self.dataset_id}', config: {print_config(self.config_name)}, split: '{self.split}'"
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 371b6f3d..fc9ea1af 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -6 +6 @@ from datasets_preview_backend.config import PORT
-from datasets_preview_backend.routes import healthcheck, rows, configs
+from datasets_preview_backend.routes import healthcheck, rows, configs, splits
@@ -14,0 +15 @@ def start():
+ Route("/splits", endpoint=splits),
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
index 98a12dac..b3cc6c1d 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries.py
@@ -49,0 +50,2 @@ def get_splits(dataset_id: str, config_name: str) -> List[str]:
+ elif message.startswith(f"Config name is missing."):
+ raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index c35c6533..7494df6d 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -5 +5 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
-from datasets_preview_backend.queries import extract_rows, get_config_names
+from datasets_preview_backend.queries import extract_rows, get_config_names, get_splits
@@ -67,0 +68,19 @@ async def configs(request: Request):
+
+
+async def splits(request: Request):
+ dataset_id: str = request.query_params.get("dataset")
+ config_name: str = request.query_params.get("config")
+
+ if dataset_id is None:
+ return PlainTextResponse(
+ "'dataset' is a required query parameter.", status_code=400
+ )
+ # note: config_name must not be set to refer to the None config_name (the majority of datasets).
+
+ try:
+ return JSONResponse(get_splits(dataset_id, config_name))
+ except (ConfigNotFoundError) as err:
+ return PlainTextResponse(err.message, status_code=404)
+ except (DatasetBuilderScriptError,) as err:
+ return PlainTextResponse(err.message, status_code=400)
+ # other exceptions will generate a 500 response
|
|
10c919338df4db6d9057b08b5c3b63baa4aaccab
|
Sylvain Lesage
| 2021-07-30T16:13:40 |
feat: 🎸 add /configs route
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index c636acc4..371b6f3d 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -6 +6 @@ from datasets_preview_backend.config import PORT
-from datasets_preview_backend.routes import healthcheck, rows
+from datasets_preview_backend.routes import healthcheck, rows, configs
@@ -13,0 +14 @@ def start():
+ Route("/configs", endpoint=configs),
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index a80f2b1c..c35c6533 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -50,0 +51,17 @@ async def rows(request: Request):
+
+
+async def configs(request: Request):
+ dataset_id: str = request.query_params.get("dataset")
+
+ if dataset_id is None:
+ return PlainTextResponse(
+ "'dataset' is a required query parameter.", status_code=400
+ )
+
+ try:
+ return JSONResponse(get_config_names(dataset_id))
+ except (DatasetNotFoundError) as err:
+ return PlainTextResponse(err.message, status_code=404)
+ except (DatasetBuilderScriptError,) as err:
+ return PlainTextResponse(err.message, status_code=400)
+ # other exceptions will generate a 500 response
|
|
ee257684864cfd227b130236235e130aed08a014
|
Sylvain Lesage
| 2021-07-30T16:07:11 |
fix: 🐛 fix name
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 519c9e25..cf94362f 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -8,3 +8,3 @@ from datasets_preview_backend.queries import (
- get_dataset_config_names,
- get_config_splits,
- extract_split_rows,
+ get_config_names,
+ get_splits,
+ extract_rows,
@@ -16 +16 @@ def get_config_names_report(dataset_id: str):
- config_names = get_dataset_config_names(dataset_id)
+ config_names = get_config_names(dataset_id)
@@ -36 +36 @@ def get_split_names_report(dataset_id: str, config_name: str):
- split_names = get_config_splits(dataset_id, config_name)
+ split_names = get_splits(dataset_id, config_name)
@@ -59 +59 @@ def get_rows_report(dataset_id: str, config_name: str, split_name: str):
- extract = extract_split_rows(dataset_id, config_name, split_name, num_rows)
+ extract = extract_rows(dataset_id, config_name, split_name, num_rows)
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
index e2e648b1..98a12dac 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries.py
@@ -27 +27 @@ from datasets_preview_backend.exceptions import (
-def get_dataset_config_names(dataset_id: str) -> List[str]:
+def get_config_names(dataset_id: str) -> List[str]:
@@ -43 +43 @@ def get_dataset_config_names(dataset_id: str) -> List[str]:
-def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
+def get_splits(dataset_id: str, config_name: str) -> List[str]:
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index 9712b3c7..a80f2b1c 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -5 +5 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
-from datasets_preview_backend.queries import extract_split_rows
+from datasets_preview_backend.queries import extract_rows, get_config_names
@@ -40,3 +40 @@ async def rows(request: Request):
- return JSONResponse(
- extract_split_rows(dataset_id, config_name, split_name, num_rows)
- )
+ return JSONResponse(extract_rows(dataset_id, config_name, split_name, num_rows))
diff --git a/tests/test_main.py b/tests/test_main.py
index 46ff0365..e0f32501 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -10,2 +10,2 @@ from datasets_preview_backend.queries import (
- get_dataset_config_names,
- get_config_splits,
+ get_config_names,
+ get_splits,
@@ -17 +17 @@ def test_get_configs():
- config_names = get_dataset_config_names("acronym_identification")
+ config_names = get_config_names("acronym_identification")
@@ -21 +21 @@ def test_get_configs():
- config_names = get_dataset_config_names("glue")
+ config_names = get_config_names("glue")
@@ -27 +27 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_config_splits("acronym_identification", None)
+ splits = get_splits("acronym_identification", None)
@@ -31 +31 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_config_splits("glue", "ax")
+ splits = get_splits("glue", "ax")
@@ -37 +37 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_config_splits("hda_nli_hindi", "HDA nli hindi")
+ splits = get_splits("hda_nli_hindi", "HDA nli hindi")
@@ -43 +43 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_config_splits("classla/copa_hr", "copa_hr")
+ splits = get_splits("classla/copa_hr", "copa_hr")
@@ -46 +46 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
- splits = get_config_splits("mc4", "sn")
+ splits = get_splits("mc4", "sn")
|
|
985e895ec2ae63cfca5f753f9006b1176d68ade0
|
Sylvain Lesage
| 2021-07-30T16:02:12 |
refactor: 💡 remove unused functions
|
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index 8e37b3be..d43154e4 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -21,16 +20,0 @@ class DatasetBuilderScriptError(Error):
-class DatasetBuilderScriptConfigError(Error):
- """Exception raised if the builder script fails for this config.
-
- Attributes:
- dataset_id -- the erroneous dataset id
- config_name -- the erroneous dataset config_name
- """
-
- def __init__(self, dataset_id, config_name):
- self.dataset_id = dataset_id
- self.config_name = config_name
- super().__init__(
- f"Dataset builder script error. Dataset: '{self.dataset_id}', config: '{self.config_name}'"
- )
-
-
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
index f29dde0f..e2e648b1 100644
--- a/src/datasets_preview_backend/queries.py
+++ b/src/datasets_preview_backend/queries.py
@@ -17 +16,0 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptConfigError,
@@ -24,0 +24,2 @@ from datasets_preview_backend.exceptions import (
+# TODO: log the traces on every caught exception
+
@@ -52,3 +53 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- raise DatasetBuilderScriptConfigError(
- dataset_id=dataset_id, config_name=config_name
- )
+ raise DatasetBuilderScriptError(dataset_id=dataset_id)
@@ -74 +73 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
-def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
+def extract_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
@@ -82,0 +82,2 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
+ except FileNotFoundError as err:
+ raise DatasetNotFoundError(dataset_id=dataset_id)
@@ -101,0 +103,2 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
+ elif message.startswith(f"Config name is missing."):
+ raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
@@ -109,0 +113,2 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
+ except (ModuleNotFoundError, RuntimeError, TypeError):
+ raise DatasetBuilderScriptError(dataset_id=dataset_id)
@@ -123,31 +127,0 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
-
-
-def extract_config_rows(dataset_id: str, config_name: str, num_rows: int):
- logging.debug(
- f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name}"
- )
-
- splits = get_config_splits(dataset_id, config_name)
-
- return {
- "dataset_id": dataset_id,
- "config_name": config_name,
- "splits": {
- split: extract_split_rows(dataset_id, config_name, split, num_rows)
- for split in splits
- },
- }
-
-
-def extract_dataset_rows(dataset_id: str, num_rows: int):
- logging.debug(f"asked for {num_rows} first rows of dataset {dataset_id}")
-
- config_names = get_dataset_config_names(dataset_id)
-
- return {
- "dataset_id": dataset_id,
- "configs": {
- config_name: extract_config_rows(dataset_id, config_name, num_rows)
- for config_name in config_names
- },
- }
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index d54ee588..9712b3c7 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -5 +5 @@ from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
-from datasets_preview_backend.queries import extract_dataset_rows
+from datasets_preview_backend.queries import extract_split_rows
@@ -9 +8,0 @@ from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptConfigError,
@@ -23,3 +22,3 @@ async def rows(request: Request):
- if "dataset" not in request.query_params:
- return PlainTextResponse("Missing query parameter: 'dataset'", status_code=400)
- dataset_id: str = request.query_params["dataset"]
+ dataset_id: str = request.query_params.get("dataset")
+ config_name: str = request.query_params.get("config")
+ split_name: str = request.query_params.get("split")
@@ -29,0 +29,10 @@ async def rows(request: Request):
+ if dataset_id is None:
+ return PlainTextResponse(
+ "'dataset' is a required query parameter.", status_code=400
+ )
+ # note: config_name must not be set to refer to the None config_name (the majority of datasets).
+ if split_name is None:
+ return PlainTextResponse(
+ "'split' is a required query parameter.", status_code=400
+ )
+
@@ -31 +40,3 @@ async def rows(request: Request):
- return JSONResponse(extract_dataset_rows(dataset_id, num_rows))
+ return JSONResponse(
+ extract_split_rows(dataset_id, config_name, split_name, num_rows)
+ )
@@ -36 +46,0 @@ async def rows(request: Request):
- DatasetBuilderScriptConfigError,
diff --git a/tests/test_main.py b/tests/test_main.py
index ebe539dd..46ff0365 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5 +4,0 @@ from datasets_preview_backend.queries import (
- DatasetBuilderScriptConfigError,
@@ -13,3 +12 @@ from datasets_preview_backend.queries import (
- extract_dataset_rows,
- extract_config_rows,
- extract_split_rows,
+ extract_rows,
@@ -58 +55 @@ def test_extract_split_rows():
- extract = extract_split_rows(dataset_id, config_name, split, num_rows)
+ extract = extract_rows(dataset_id, config_name, split, num_rows)
@@ -73 +70 @@ def test_extract_split_rows_num_rows():
- extract = extract_split_rows(dataset_id, config_name, split, num_rows)
+ extract = extract_rows(dataset_id, config_name, split, num_rows)
@@ -81 +78 @@ def test_extract_unknown_config():
- extract_config_rows("glue", "doesnotexist", 100)
+ extract_rows("glue", "doesnotexist", "train", 100)
@@ -83 +80 @@ def test_extract_unknown_config():
- extract_split_rows("glue", "doesnotexist", "train", 100)
+ extract_rows("glue", None, "train", 100)
@@ -88,42 +85 @@ def test_extract_unknown_split():
- extract_split_rows("glue", "ax", "train", 100)
-
-
-def test_extract_config_rows():
- dataset_id = "glue"
- config_name = "cola"
- num_rows = 100
- extract = extract_config_rows(dataset_id, config_name, num_rows)
- assert "dataset_id" in extract and extract["dataset_id"] == dataset_id
- assert "config_name" in extract and extract["config_name"] == config_name
- assert "splits" in extract
- splits = extract["splits"]
- assert len(splits) == 3
- assert "train" in splits
- split = splits["train"]
- rows = split["rows"]
- assert len(rows) == 100
- assert (
- rows[0]["sentence"]
- == "Our friends won't buy this analysis, let alone the next one we propose."
- )
-
-
-def test_extract_dataset():
- dataset_id = "acronym_identification"
- num_rows = 100
- extract = extract_dataset_rows(dataset_id, num_rows)
- assert "dataset_id" in extract and extract["dataset_id"] == dataset_id
- assert "configs" in extract
- configs = extract["configs"]
- assert None in configs
- assert len(configs) == 1
- assert len(configs[None]["splits"]["train"]["rows"]) == num_rows
-
- dataset_id = "adversarial_qa"
- num_rows = 100
- extract = extract_dataset_rows(dataset_id, num_rows)
- configs = extract["configs"]
- assert len(configs) == 4
- assert "adversarialQA" in configs
- assert len(configs["adversarialQA"]["splits"]["train"]["rows"]) == num_rows
- assert configs["adversarialQA"]["splits"]["train"]["rows"][0]["title"] == "Brain"
+ extract_rows("glue", "ax", "train", 100)
@@ -134 +90 @@ def test_extract_unknown_dataset():
- extract_dataset_rows("doesnotexist", 100)
+ extract_rows("doesnotexist", None, "train", 100)
@@ -136,6 +92 @@ def test_extract_unknown_dataset():
- extract_dataset_rows("AConsApart/anime_subtitles_DialoGPT", 100)
-
-
-def test_extract_unknown_config():
- with pytest.raises(ConfigNotFoundError):
- extract_config_rows("glue", "doesnotexist", 100)
+ extract_rows("AConsApart/anime_subtitles_DialoGPT", None, "train", 100)
@@ -146 +97 @@ def test_extract_bogus_dataset():
- extract_dataset_rows("TimTreasure4/Test", 100)
+ extract_rows("TimTreasure4/Test", None, "train", 100)
@@ -150,4 +101,4 @@ def test_extract_bogus_config():
- with pytest.raises(DatasetBuilderScriptConfigError):
- extract_config_rows("Valahaar/wsdmt", None, 10)
- with pytest.raises(DatasetBuilderScriptConfigError):
- extract_config_rows("nateraw/image-folder", None, 10)
+ with pytest.raises(DatasetBuilderScriptError):
+ extract_rows("Valahaar/wsdmt", None, "train", 10)
+ with pytest.raises(DatasetBuilderScriptError):
+ extract_rows("nateraw/image-folder", None, "train", 10)
@@ -164 +115 @@ def test_extract_not_implemented_split():
- extract_split_rows("ade_corpus_v2", "Ade_corpus_v2_classification", "train", 10)
+ extract_rows("ade_corpus_v2", "Ade_corpus_v2_classification", "train", 10)
@@ -169 +120 @@ def test_tar_gz_extension():
- extract_split_rows("air_dialogue", "air_dialogue_data", "train", 10)
+ extract_rows("air_dialogue", "air_dialogue_data", "train", 10)
|
|
b2ab4bdea476dff3badda501ca442d64a221743e
|
Sylvain Lesage
| 2021-07-30T15:19:18 |
feat: 🎸 change endpoint /extract to /rows
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 9821cc8a..c636acc4 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -6 +6 @@ from datasets_preview_backend.config import PORT
-from datasets_preview_backend.routes import healthcheck, extract
+from datasets_preview_backend.routes import healthcheck, rows
@@ -13 +13 @@ def start():
- Route("/{dataset_id:path}/extract", endpoint=extract),
+ Route("/rows", endpoint=rows),
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
index a3abd46f..d54ee588 100644
--- a/src/datasets_preview_backend/routes.py
+++ b/src/datasets_preview_backend/routes.py
@@ -22,2 +22,4 @@ async def healthcheck(_: Request):
-async def extract(request: Request):
- dataset_id: str = request.path_params["dataset_id"]
+async def rows(request: Request):
+ if "dataset" not in request.query_params:
+ return PlainTextResponse("Missing query parameter: 'dataset'", status_code=400)
+ dataset_id: str = request.query_params["dataset"]
|
|
f846a7cb9b7c93925c42f163e0c8d06cc30c995b
|
Sylvain Lesage
| 2021-07-30T15:10:29 |
refactor: 💡 split code into multiple files
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index c49956ab..519c9e25 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -0,0 +1 @@
+from datasets import list_datasets
@@ -3 +3,0 @@ import time
-from tqdm import tqdm
@@ -6 +5,0 @@ from tqdm.contrib.concurrent import process_map
-from datasets import list_datasets
@@ -8 +7 @@ from datasets import list_datasets
-from datasets_preview_backend.main import (
+from datasets_preview_backend.queries import (
diff --git a/src/datasets_preview_backend/config.py b/src/datasets_preview_backend/config.py
new file mode 100644
index 00000000..1018acc2
--- /dev/null
+++ b/src/datasets_preview_backend/config.py
@@ -0,0 +1,11 @@
+import os
+from datasets_preview_backend.utils import get_int_value
+
+DEFAULT_PORT = 8000
+DEFAULT_EXTRACT_ROWS_LIMIT = 100
+
+
+PORT = get_int_value(d=os.environ, key="DPB_PORT", default=DEFAULT_PORT)
+EXTRACT_ROWS_LIMIT = get_int_value(
+ d=os.environ, key="DPB_EXTRACT_ROWS_LIMIT", default=DEFAULT_EXTRACT_ROWS_LIMIT
+)
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 4b939715..9821cc8a 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -1,7 +0,0 @@
-import logging
-import os
-import re
-
-from datasets.builder import DatasetBuilder
-from typing import List
-
@@ -9,2 +1,0 @@ from starlette.applications import Starlette
-from starlette.requests import Request
-from starlette.responses import PlainTextResponse, JSONResponse
@@ -14,190 +5,2 @@ import uvicorn
-from datasets import (
- IterableDataset,
- load_dataset,
- load_dataset_builder,
- prepare_module,
- import_main_class,
-)
-from datasets.utils.streaming_download_manager import StreamingDownloadManager
-
-from datasets_preview_backend.exceptions import (
- DatasetBuilderScriptError,
- DatasetBuilderScriptConfigError,
- DatasetBuilderScriptConfigNoSplitsError,
- DatasetNotFoundError,
- ConfigNotFoundError,
- SplitError,
- SplitNotImplementedError,
-)
-
-DEFAULT_PORT = 8000
-DEFAULT_EXTRACT_ROWS_LIMIT = 100
-
-
-def get_int_value(d, key, default):
- try:
- value = int(d.get(key))
- except TypeError:
- value = default
- return value
-
-
-PORT = get_int_value(d=os.environ, key="DPB_PORT", default=DEFAULT_PORT)
-EXTRACT_ROWS_LIMIT = get_int_value(
- d=os.environ, key="DPB_EXTRACT_ROWS_LIMIT", default=DEFAULT_EXTRACT_ROWS_LIMIT
-)
-
-
-async def healthcheck(request: Request):
- return PlainTextResponse("ok")
-
-
-def get_dataset_config_names(dataset_id: str) -> List[str]:
- try:
- module_path, *_ = prepare_module(dataset_id, dataset=True)
- builder_cls = import_main_class(module_path, dataset=True)
- except FileNotFoundError as err:
- raise DatasetNotFoundError(dataset_id=dataset_id)
- except (ModuleNotFoundError):
- raise DatasetBuilderScriptError(dataset_id=dataset_id)
-
- config_names = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
- logging.debug(
- f"The dataset builder has {len(config_names)} configs: {config_names}"
- )
- return config_names
-
-
-def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- try:
- builder = load_dataset_builder(dataset_id, name=config_name)
- except ValueError as err:
- message = str(err)
- if message.startswith(f"BuilderConfig {config_name} not found"):
- raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
- else:
- raise
- except (ModuleNotFoundError, RuntimeError, TypeError):
- raise DatasetBuilderScriptConfigError(
- dataset_id=dataset_id, config_name=config_name
- )
-
- if builder.info.splits is None:
- # try to get them from _split_generators
- try:
- splits = [
- split_generator.name
- for split_generator in builder._split_generators(
- StreamingDownloadManager(base_path=builder.base_path)
- )
- ]
- except:
- raise DatasetBuilderScriptConfigNoSplitsError(
- dataset_id=dataset_id, config_name=config_name
- )
- else:
- splits = list(builder.info.splits.keys())
- return splits
-
-
-def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
- logging.debug(
- f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name} - {split}"
- )
-
- try:
- dataset: IterableDataset = load_dataset(
- dataset_id, name=config_name, split=split, streaming=True
- )
- except NotImplementedError as err:
- # TODO: check what has changed once https://github.com/huggingface/datasets/pull/2662 is merged
- try:
- regex = re.compile(
- r"Extraction protocol for file at .*?((\.\w+)?\.\w+)* is not implemented yet"
- )
- extension = regex.match(str(err)).group(1)
- except:
- extension = None
- raise SplitNotImplementedError(
- dataset_id=dataset_id,
- config_name=config_name,
- split=split,
- extension=extension,
- )
- except ValueError as err:
- message = str(err)
- if message.startswith(f"BuilderConfig {config_name} not found"):
- raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
- elif message.startswith(f'Unknown split "{split}".') or message.startswith(
- f"Bad split: {split}."
- ):
- raise SplitError(
- dataset_id=dataset_id, config_name=config_name, split=split
- )
- else:
- raise
-
- rows = list(dataset.take(num_rows))
- if len(rows) != num_rows:
- logging.warning(
- f"could not read all the required rows ({len(rows)} / {num_rows}) from dataset {dataset_id} - {config_name} - {split}"
- )
-
- return {
- "dataset_id": dataset_id,
- "config_name": config_name,
- "split": split,
- "rows": rows,
- }
-
-
-def extract_config_rows(dataset_id: str, config_name: str, num_rows: int):
- logging.debug(
- f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name}"
- )
-
- splits = get_config_splits(dataset_id, config_name)
-
- return {
- "dataset_id": dataset_id,
- "config_name": config_name,
- "splits": {
- split: extract_split_rows(dataset_id, config_name, split, num_rows)
- for split in splits
- },
- }
-
-
-def extract_dataset_rows(dataset_id: str, num_rows: int):
- logging.debug(f"asked for {num_rows} first rows of dataset {dataset_id}")
-
- config_names = get_dataset_config_names(dataset_id)
-
- return {
- "dataset_id": dataset_id,
- "configs": {
- config_name: extract_config_rows(dataset_id, config_name, num_rows)
- for config_name in config_names
- },
- }
-
-
-async def extract(request: Request):
- dataset_id: str = request.path_params["dataset_id"]
- num_rows = get_int_value(
- d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT
- )
-
- try:
- return JSONResponse(extract_dataset_rows(dataset_id, num_rows))
- except (DatasetNotFoundError, ConfigNotFoundError) as err:
- return PlainTextResponse(err.message, status_code=404)
- except (
- DatasetBuilderScriptError,
- DatasetBuilderScriptConfigError,
- DatasetBuilderScriptConfigNoSplitsError,
- SplitError,
- SplitNotImplementedError,
- ) as err:
- return PlainTextResponse(err.message, status_code=400)
- # other exceptions will generate a 500 response
+from datasets_preview_backend.config import PORT
+from datasets_preview_backend.routes import healthcheck, extract
diff --git a/src/datasets_preview_backend/queries.py b/src/datasets_preview_backend/queries.py
new file mode 100644
index 00000000..f29dde0f
--- /dev/null
+++ b/src/datasets_preview_backend/queries.py
@@ -0,0 +1,153 @@
+import re
+import logging
+
+from typing import List
+
+from datasets import (
+ IterableDataset,
+ load_dataset,
+ load_dataset_builder,
+ prepare_module,
+ import_main_class,
+)
+from datasets.utils.streaming_download_manager import StreamingDownloadManager
+
+from datasets_preview_backend.exceptions import (
+ DatasetBuilderScriptError,
+ DatasetBuilderScriptConfigError,
+ DatasetBuilderScriptConfigNoSplitsError,
+ DatasetNotFoundError,
+ ConfigNotFoundError,
+ SplitError,
+ SplitNotImplementedError,
+)
+
+
+def get_dataset_config_names(dataset_id: str) -> List[str]:
+ try:
+ module_path, *_ = prepare_module(dataset_id, dataset=True)
+ builder_cls = import_main_class(module_path, dataset=True)
+ except FileNotFoundError as err:
+ raise DatasetNotFoundError(dataset_id=dataset_id)
+ except (ModuleNotFoundError):
+ raise DatasetBuilderScriptError(dataset_id=dataset_id)
+
+ config_names = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
+ logging.debug(
+ f"The dataset builder has {len(config_names)} configs: {config_names}"
+ )
+ return config_names
+
+
+def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
+ try:
+ builder = load_dataset_builder(dataset_id, name=config_name)
+ except ValueError as err:
+ message = str(err)
+ if message.startswith(f"BuilderConfig {config_name} not found"):
+ raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
+ else:
+ raise
+ except (ModuleNotFoundError, RuntimeError, TypeError):
+ raise DatasetBuilderScriptConfigError(
+ dataset_id=dataset_id, config_name=config_name
+ )
+
+ if builder.info.splits is None:
+ # try to get them from _split_generators
+ try:
+ splits = [
+ split_generator.name
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path)
+ )
+ ]
+ except:
+ raise DatasetBuilderScriptConfigNoSplitsError(
+ dataset_id=dataset_id, config_name=config_name
+ )
+ else:
+ splits = list(builder.info.splits.keys())
+ return splits
+
+
+def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
+ logging.debug(
+ f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name} - {split}"
+ )
+
+ try:
+ dataset: IterableDataset = load_dataset(
+ dataset_id, name=config_name, split=split, streaming=True
+ )
+ except NotImplementedError as err:
+ # TODO: check what has changed once https://github.com/huggingface/datasets/pull/2662 is merged
+ try:
+ regex = re.compile(
+ r"Extraction protocol for file at .*?((\.\w+)?\.\w+)* is not implemented yet"
+ )
+ extension = regex.match(str(err)).group(1)
+ except:
+ extension = None
+ raise SplitNotImplementedError(
+ dataset_id=dataset_id,
+ config_name=config_name,
+ split=split,
+ extension=extension,
+ )
+ except ValueError as err:
+ message = str(err)
+ if message.startswith(f"BuilderConfig {config_name} not found"):
+ raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
+ elif message.startswith(f'Unknown split "{split}".') or message.startswith(
+ f"Bad split: {split}."
+ ):
+ raise SplitError(
+ dataset_id=dataset_id, config_name=config_name, split=split
+ )
+ else:
+ raise
+
+ rows = list(dataset.take(num_rows))
+ if len(rows) != num_rows:
+ logging.warning(
+ f"could not read all the required rows ({len(rows)} / {num_rows}) from dataset {dataset_id} - {config_name} - {split}"
+ )
+
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "split": split,
+ "rows": rows,
+ }
+
+
+def extract_config_rows(dataset_id: str, config_name: str, num_rows: int):
+ logging.debug(
+ f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name}"
+ )
+
+ splits = get_config_splits(dataset_id, config_name)
+
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "splits": {
+ split: extract_split_rows(dataset_id, config_name, split, num_rows)
+ for split in splits
+ },
+ }
+
+
+def extract_dataset_rows(dataset_id: str, num_rows: int):
+ logging.debug(f"asked for {num_rows} first rows of dataset {dataset_id}")
+
+ config_names = get_dataset_config_names(dataset_id)
+
+ return {
+ "dataset_id": dataset_id,
+ "configs": {
+ config_name: extract_config_rows(dataset_id, config_name, num_rows)
+ for config_name in config_names
+ },
+ }
diff --git a/src/datasets_preview_backend/routes.py b/src/datasets_preview_backend/routes.py
new file mode 100644
index 00000000..a3abd46f
--- /dev/null
+++ b/src/datasets_preview_backend/routes.py
@@ -0,0 +1,40 @@
+from starlette.requests import Request
+from starlette.responses import PlainTextResponse, JSONResponse
+
+from datasets_preview_backend.config import EXTRACT_ROWS_LIMIT
+from datasets_preview_backend.queries import extract_dataset_rows
+from datasets_preview_backend.utils import get_int_value
+from datasets_preview_backend.exceptions import (
+ DatasetBuilderScriptError,
+ DatasetBuilderScriptConfigError,
+ DatasetBuilderScriptConfigNoSplitsError,
+ DatasetNotFoundError,
+ ConfigNotFoundError,
+ SplitError,
+ SplitNotImplementedError,
+)
+
+
+async def healthcheck(_: Request):
+ return PlainTextResponse("ok")
+
+
+async def extract(request: Request):
+ dataset_id: str = request.path_params["dataset_id"]
+ num_rows = get_int_value(
+ d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT
+ )
+
+ try:
+ return JSONResponse(extract_dataset_rows(dataset_id, num_rows))
+ except (DatasetNotFoundError, ConfigNotFoundError) as err:
+ return PlainTextResponse(err.message, status_code=404)
+ except (
+ DatasetBuilderScriptError,
+ DatasetBuilderScriptConfigError,
+ DatasetBuilderScriptConfigNoSplitsError,
+ SplitError,
+ SplitNotImplementedError,
+ ) as err:
+ return PlainTextResponse(err.message, status_code=400)
+ # other exceptions will generate a 500 response
diff --git a/src/datasets_preview_backend/utils.py b/src/datasets_preview_backend/utils.py
new file mode 100644
index 00000000..a9e44207
--- /dev/null
+++ b/src/datasets_preview_backend/utils.py
@@ -0,0 +1,6 @@
+def get_int_value(d, key, default):
+ try:
+ value = int(d.get(key))
+ except TypeError:
+ value = default
+ return value
diff --git a/tests/test_main.py b/tests/test_main.py
index 531e1577..ebe539dd 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -3 +3 @@ import pytest
-from datasets_preview_backend.main import (
+from datasets_preview_backend.queries import (
|
|
c7c93f7f38837187c4cbe30f9d03b432e6d9d8da
|
Sylvain Lesage
| 2021-07-30T13:47:45 |
fix: 🐛 detect .tar.gz extension (any double extension)
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 969f6c9c..4b939715 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -113 +113 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
- # try to parse the unimplemented file extension
+ # TODO: check what has changed once https://github.com/huggingface/datasets/pull/2662 is merged
@@ -116 +116 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
- r"Extraction protocol for file at .*(\.[\w]*) is not implemented yet"
+ r"Extraction protocol for file at .*?((\.\w+)?\.\w+)* is not implemented yet"
diff --git a/tests/test_main.py b/tests/test_main.py
index 9d9c0c06..531e1577 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -164,0 +165,5 @@ def test_extract_not_implemented_split():
+
+
+def test_tar_gz_extension():
+ with pytest.raises(SplitNotImplementedError):
+ extract_split_rows("air_dialogue", "air_dialogue_data", "train", 10)
|
|
dd9af67708997931161df9facabe9e934b5227f3
|
Sylvain Lesage
| 2021-07-30T11:02:55 |
test: 💍 send 20 jobs to every worker
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 3c9fb9ed..c49956ab 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -87,2 +87 @@ def export_all_datasets_exceptions():
- get_config_names_report,
- dataset_ids,
+ get_config_names_report, dataset_ids, chunksize=20
@@ -100 +99,4 @@ def export_all_datasets_exceptions():
- get_split_names_report, split_names_dataset_ids, split_names_config_names
+ get_split_names_report,
+ split_names_dataset_ids,
+ split_names_config_names,
+ chunksize=20,
@@ -114 +116,5 @@ def export_all_datasets_exceptions():
- get_rows_report, rows_dataset_ids, rows_config_names, rows_split_names
+ get_rows_report,
+ rows_dataset_ids,
+ rows_config_names,
+ rows_split_names,
+ chunksize=20,
|
|
4ed7764bac54c333471bd3d306b41d3c477c0b16
|
Sylvain Lesage
| 2021-07-30T10:42:44 |
feat: 🎸 implement a fallback method to get the splits
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 98b64923..969f6c9c 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -20,0 +21 @@ from datasets import (
+from datasets.utils.streaming_download_manager import StreamingDownloadManager
@@ -85,3 +86,12 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- raise DatasetBuilderScriptConfigNoSplitsError(
- dataset_id=dataset_id, config_name=config_name
- )
+ # try to get them from _split_generators
+ try:
+ splits = [
+ split_generator.name
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path)
+ )
+ ]
+ except:
+ raise DatasetBuilderScriptConfigNoSplitsError(
+ dataset_id=dataset_id, config_name=config_name
+ )
@@ -89 +99 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- splits = builder.info.splits.keys()
+ splits = list(builder.info.splits.keys())
diff --git a/tests/test_main.py b/tests/test_main.py
index 3d5360bb..9d9c0c06 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6 +6 @@ from datasets_preview_backend.main import (
- DatasetBuilderScriptConfigNoSplitsError,
+ # DatasetBuilderScriptConfigNoSplitsError,
@@ -38,0 +39,13 @@ def test_get_splits(): # sourcery skip: extract-duplicate-method
+ # uses the fallback to call "builder._split_generators"
+ splits = get_config_splits("hda_nli_hindi", "HDA nli hindi")
+ assert len(splits) == 3
+ assert "train" in splits
+ assert "validation" in splits
+ assert "test" in splits
+
+ splits = get_config_splits("classla/copa_hr", "copa_hr")
+ assert len(splits) == 3
+
+ splits = get_config_splits("mc4", "sn")
+ assert len(splits) == 2
+
@@ -67 +80 @@ def test_extract_unknown_config():
- with pytest.raises(ConfigNameError):
+ with pytest.raises(ConfigNotFoundError):
@@ -69 +82 @@ def test_extract_unknown_config():
- with pytest.raises(ConfigNameError):
+ with pytest.raises(ConfigNotFoundError):
@@ -143,9 +156,4 @@ def test_extract_bogus_config():
-def test_extract_bogus_splits():
- with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
- extract_config_rows("hda_nli_hindi", "HDA nli hindi", 10)
- with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
- extract_config_rows("mc4", "sn", 10)
- with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
- extract_dataset_rows("classla/copa_hr", 100)
- with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
- extract_config_rows("classla/copa_hr", "copa_hr", 100)
+# def test_extract_bogus_splits():
+# not sure if we have an example of such an error
+# with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
+# extract_config_rows("mc4", "sn", 10)
|
|
a3585e395084ac2a48b8aca28f06eb7cf1fde56c
|
Sylvain Lesage
| 2021-07-30T10:20:37 |
feat: 🎸 capture config script and not implemented exceptions
|
diff --git a/Makefile b/Makefile
index 252fda1e..b1d44a97 100644
--- a/Makefile
+++ b/Makefile
@@ -10 +10 @@ test:
- poetry run python -m pytest tests
+ poetry run python -m pytest -x tests
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
index 59f59a47..8e37b3be 100644
--- a/src/datasets_preview_backend/exceptions.py
+++ b/src/datasets_preview_backend/exceptions.py
@@ -36,0 +37,16 @@ class DatasetBuilderScriptConfigError(Error):
+class DatasetBuilderScriptConfigNoSplitsError(Error):
+ """Exception raised if the builder script fails for this config.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ config_name -- the erroneous dataset config_name
+ """
+
+ def __init__(self, dataset_id, config_name):
+ self.dataset_id = dataset_id
+ self.config_name = config_name
+ super().__init__(
+ f"Dataset builder script error: missing .info.splits. Dataset: '{self.dataset_id}', config: '{self.config_name}'"
+ )
+
+
@@ -80,0 +97,23 @@ class SplitError(Error):
+
+
+class SplitNotImplementedError(Error):
+ """Exception raised for NotImplementedError in the split.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ config_name -- the erroneous dataset config_name
+ split -- the erroneous dataset split
+ extension -- the file extension not implemented yet
+ """
+
+ def __init__(self, dataset_id, config_name, split, extension):
+ self.dataset_id = dataset_id
+ self.config_name = config_name
+ self.split = split
+ self.extension = extension
+ extension_str = (
+ "" if self.extension is None else f" for extension '{self.extension}'"
+ )
+ super().__init__(
+ f"Extraction protocol not implemented{extension_str}. Dataset: '{self.dataset_id}', config: '{self.config_name}', script: '{self.split}'"
+ )
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 6772533f..98b64923 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -2,0 +3 @@ import os
+import re
@@ -23,0 +25 @@ from datasets_preview_backend.exceptions import (
+ DatasetBuilderScriptConfigNoSplitsError,
@@ -26,0 +29 @@ from datasets_preview_backend.exceptions import (
+ SplitNotImplementedError,
@@ -76 +79 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- except ModuleNotFoundError:
+ except (ModuleNotFoundError, RuntimeError, TypeError):
@@ -82 +85 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- raise DatasetBuilderScriptConfigError(
+ raise DatasetBuilderScriptConfigNoSplitsError(
@@ -85 +88,3 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- return builder.info.splits.keys()
+ else:
+ splits = builder.info.splits.keys()
+ return splits
@@ -96,0 +102,15 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
+ except NotImplementedError as err:
+ # try to parse the unimplemented file extension
+ try:
+ regex = re.compile(
+ r"Extraction protocol for file at .*(\.[\w]*) is not implemented yet"
+ )
+ extension = regex.match(str(err)).group(1)
+ except:
+ extension = None
+ raise SplitNotImplementedError(
+ dataset_id=dataset_id,
+ config_name=config_name,
+ split=split,
+ extension=extension,
+ )
@@ -165 +185,7 @@ async def extract(request: Request):
- except (DatasetBuilderScriptError, DatasetBuilderScriptConfigError) as err:
+ except (
+ DatasetBuilderScriptError,
+ DatasetBuilderScriptConfigError,
+ DatasetBuilderScriptConfigNoSplitsError,
+ SplitError,
+ SplitNotImplementedError,
+ ) as err:
diff --git a/tests/test_main.py b/tests/test_main.py
index 2295f516..3d5360bb 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5,0 +6 @@ from datasets_preview_backend.main import (
+ DatasetBuilderScriptConfigNoSplitsError,
@@ -8,0 +10 @@ from datasets_preview_backend.main import (
+ SplitNotImplementedError,
@@ -136 +138 @@ def test_extract_bogus_config():
- extract_dataset_rows("classla/copa_hr", 100)
+ extract_config_rows("Valahaar/wsdmt", None, 10)
@@ -137,0 +140,11 @@ def test_extract_bogus_config():
+ extract_config_rows("nateraw/image-folder", None, 10)
+
+
+def test_extract_bogus_splits():
+ with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
+ extract_config_rows("hda_nli_hindi", "HDA nli hindi", 10)
+ with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
+ extract_config_rows("mc4", "sn", 10)
+ with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
+ extract_dataset_rows("classla/copa_hr", 100)
+ with pytest.raises(DatasetBuilderScriptConfigNoSplitsError):
@@ -138,0 +152,5 @@ def test_extract_bogus_config():
+
+
+def test_extract_not_implemented_split():
+ with pytest.raises(SplitNotImplementedError):
+ extract_split_rows("ade_corpus_v2", "Ade_corpus_v2_classification", "train", 10)
|
|
aa708005121624b63902d584f6afc492715dd74b
|
Sylvain Lesage
| 2021-07-30T08:50:34 |
feat: 🎸 catch more exceptions
|
diff --git a/Makefile b/Makefile
index 08c4239b..252fda1e 100644
--- a/Makefile
+++ b/Makefile
@@ -10 +10 @@ test:
- poetry run python -m pytest
+ poetry run python -m pytest tests
diff --git a/src/datasets_preview_backend/exceptions.py b/src/datasets_preview_backend/exceptions.py
new file mode 100644
index 00000000..59f59a47
--- /dev/null
+++ b/src/datasets_preview_backend/exceptions.py
@@ -0,0 +1,80 @@
+class Error(Exception):
+ """Base class for exceptions in this module."""
+
+ def __init__(self, message):
+ self.message = message
+ super().__init__(message)
+
+
+class DatasetBuilderScriptError(Error):
+ """Exception raised if the dataset script fails.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ """
+
+ def __init__(self, dataset_id):
+ self.dataset_id = dataset_id
+ super().__init__(f"Dataset builder script error. Dataset: '{self.dataset_id}'")
+
+
+class DatasetBuilderScriptConfigError(Error):
+ """Exception raised if the builder script fails for this config.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ config_name -- the erroneous dataset config_name
+ """
+
+ def __init__(self, dataset_id, config_name):
+ self.dataset_id = dataset_id
+ self.config_name = config_name
+ super().__init__(
+ f"Dataset builder script error. Dataset: '{self.dataset_id}', config: '{self.config_name}'"
+ )
+
+
+class DatasetNotFoundError(Error):
+ """Exception raised if a dataset has not been found.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ """
+
+ def __init__(self, dataset_id):
+ self.dataset_id = dataset_id
+ super().__init__(f"Dataset not found. Dataset: '{self.dataset_id}'")
+
+
+class ConfigNotFoundError(Error):
+ """Exception raised for config builder not found.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ config_name -- the erroneous dataset config_name
+ """
+
+ def __init__(self, dataset_id, config_name):
+ self.dataset_id = dataset_id
+ self.config_name = config_name
+ super().__init__(
+ f"Config not found. Dataset: '{self.dataset_id}', config: '{self.config_name}'"
+ )
+
+
+class SplitError(Error):
+ """Exception raised for errors in the split.
+
+ Attributes:
+ dataset_id -- the erroneous dataset id
+ config_name -- the erroneous dataset config_name
+ split -- the erroneous dataset split
+ """
+
+ def __init__(self, dataset_id, config_name, split):
+ self.dataset_id = dataset_id
+ self.config_name = config_name
+ self.split = split
+ super().__init__(
+ f"Split error. Dataset: '{self.dataset_id}', config: '{self.config_name}', script: '{self.split}'"
+ )
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 798fc570..6772533f 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -14 +13,0 @@ from datasets import (
- Dataset,
@@ -21,0 +21,8 @@ from datasets import (
+from datasets_preview_backend.exceptions import (
+ DatasetBuilderScriptError,
+ DatasetBuilderScriptConfigError,
+ DatasetNotFoundError,
+ ConfigNotFoundError,
+ SplitError,
+)
+
@@ -26,28 +32,0 @@ DEFAULT_EXTRACT_ROWS_LIMIT = 100
-class Error(Exception):
- """Base class for exceptions in this module."""
-
- pass
-
-
-class ConfigNameError(Error):
- """Exception raised for errors in the config name.
-
- Attributes:
- config_name -- the erroneous dataset config_name
- """
-
- def __init__(self, config_name):
- self.config_name = config_name
-
-
-class SplitError(Error):
- """Exception raised for errors in the split name.
-
- Attributes:
- split -- the erroneous dataset split
- """
-
- def __init__(self, split):
- self.split = split
-
-
@@ -73,2 +52,8 @@ def get_dataset_config_names(dataset_id: str) -> List[str]:
- module_path, *_ = prepare_module(dataset_id, dataset=True)
- builder_cls = import_main_class(module_path, dataset=True)
+ try:
+ module_path, *_ = prepare_module(dataset_id, dataset=True)
+ builder_cls = import_main_class(module_path, dataset=True)
+ except FileNotFoundError as err:
+ raise DatasetNotFoundError(dataset_id=dataset_id)
+ except (ModuleNotFoundError):
+ raise DatasetBuilderScriptError(dataset_id=dataset_id)
+
@@ -88 +73 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
- raise ConfigNameError(config_name=config_name)
+ raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
@@ -90,0 +76,9 @@ def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
+ except ModuleNotFoundError:
+ raise DatasetBuilderScriptConfigError(
+ dataset_id=dataset_id, config_name=config_name
+ )
+
+ if builder.info.splits is None:
+ raise DatasetBuilderScriptConfigError(
+ dataset_id=dataset_id, config_name=config_name
+ )
@@ -106 +100 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
- raise ConfigNameError(config_name=config_name)
+ raise ConfigNotFoundError(dataset_id=dataset_id, config_name=config_name)
@@ -110 +104,3 @@ def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows:
- raise SplitError(split=split)
+ raise SplitError(
+ dataset_id=dataset_id, config_name=config_name, split=split
+ )
@@ -167,2 +163,4 @@ async def extract(request: Request):
- except FileNotFoundError as e:
- return PlainTextResponse("Dataset not found", status_code=404)
+ except (DatasetNotFoundError, ConfigNotFoundError) as err:
+ return PlainTextResponse(err.message, status_code=404)
+ except (DatasetBuilderScriptError, DatasetBuilderScriptConfigError) as err:
+ return PlainTextResponse(err.message, status_code=400)
diff --git a/tests/test_main.py b/tests/test_main.py
index 28c369c5..2295f516 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -4 +4,4 @@ from datasets_preview_backend.main import (
- ConfigNameError,
+ DatasetBuilderScriptError,
+ DatasetBuilderScriptConfigError,
+ ConfigNotFoundError,
+ DatasetNotFoundError,
@@ -115 +118 @@ def test_extract_unknown_dataset():
- with pytest.raises(FileNotFoundError):
+ with pytest.raises(DatasetNotFoundError):
@@ -116,0 +120,19 @@ def test_extract_unknown_dataset():
+ with pytest.raises(DatasetNotFoundError):
+ extract_dataset_rows("AConsApart/anime_subtitles_DialoGPT", 100)
+
+
+def test_extract_unknown_config():
+ with pytest.raises(ConfigNotFoundError):
+ extract_config_rows("glue", "doesnotexist", 100)
+
+
+def test_extract_bogus_dataset():
+ with pytest.raises(DatasetBuilderScriptError):
+ extract_dataset_rows("TimTreasure4/Test", 100)
+
+
+def test_extract_bogus_config():
+ with pytest.raises(DatasetBuilderScriptConfigError):
+ extract_dataset_rows("classla/copa_hr", 100)
+ with pytest.raises(DatasetBuilderScriptConfigError):
+ extract_config_rows("classla/copa_hr", "copa_hr", 100)
|
|
b3ec3eee2186926a9e00db7105368aa87bb842ac
|
Sylvain Lesage
| 2021-07-29T13:38:04 |
fix: 🐛 fix all ImportError exceptions for the current datasets
|
diff --git a/poetry.lock b/poetry.lock
index 80d02e45..df2f3a97 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -0,0 +1,11 @@
+[[package]]
+name = "absl-py"
+version = "0.13.0"
+description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+six = "*"
+
@@ -36,0 +48,37 @@ trio = ["trio (>=0.16)"]
+[[package]]
+name = "apache-beam"
+version = "2.31.0"
+description = "Apache Beam SDK for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+avro-python3 = ">=1.8.1,<1.9.2 || >1.9.2,<1.10.0"
+crcmod = ">=1.7,<2.0"
+dill = ">=0.3.1.1,<0.3.2"
+fastavro = ">=0.21.4,<2"
+future = ">=0.18.2,<1.0.0"
+grpcio = ">=1.29.0,<2"
+hdfs = ">=2.1.0,<3.0.0"
+httplib2 = ">=0.8,<0.20.0"
+numpy = ">=1.14.3,<1.21.0"
+oauth2client = ">=2.0.1,<5"
+protobuf = ">=3.12.2,<4"
+pyarrow = ">=0.15.1,<5.0.0"
+pydot = ">=1.2.0,<2"
+pymongo = ">=3.8.0,<4.0.0"
+python-dateutil = ">=2.8.0,<3"
+pytz = ">=2018.3"
+requests = ">=2.24.0,<3.0.0"
+typing-extensions = ">=3.7.0,<3.8.0"
+
+[package.extras]
+aws = ["boto3 (>=1.9)"]
+azure = ["azure-storage-blob (>=12.3.2)", "azure-core (>=1.7.0)"]
+docs = ["Sphinx (>=1.5.2,<2.0)"]
+gcp = ["cachetools (>=3.1.0,<5)", "google-apitools (>=0.5.31,<0.5.32)", "google-auth (>=1.18.0,<2)", "google-cloud-datastore (>=1.7.1,<2)", "google-cloud-pubsub (>=0.39.0,<2)", "google-cloud-bigquery (>=1.6.0,<3)", "google-cloud-core (>=0.28.1,<2)", "google-cloud-bigtable (>=0.31.1,<2)", "google-cloud-spanner (>=1.13.0,<2)", "grpcio-gcp (>=0.2.2,<1)", "google-cloud-dlp (>=0.12.0,<2)", "google-cloud-language (>=1.3.0,<2)", "google-cloud-videointelligence (>=1.8.0,<2)", "google-cloud-vision (>=0.38.0,<2)", "google-cloud-profiler (>=3.0.4,<4)"]
+interactive = ["facets-overview (>=1.0.0,<2)", "ipython (>=5.8.0,<8)", "ipykernel (>=5.2.0,<6)", "jupyter-client (>=6.1.11,<6.1.13)", "timeloop (>=1.0.2,<2)"]
+interactive_test = ["nbformat (>=5.0.5,<6)", "nbconvert (>=5.6.1,<6)", "selenium (>=3.141.0,<4)", "needle (>=0.5.0,<1)", "chromedriver-binary (>=88,<89)", "pillow (>=7.1.1,<8)"]
+test = ["freezegun (>=0.3.12)", "mock (>=1.0.1,<3.0.0)", "nose (>=1.3.7)", "nose-xunitmp (>=0.4.1)", "pandas (>=1.0,<1.3.0)", "parameterized (>=0.7.1,<0.8.0)", "pyhamcrest (>=1.9,!=1.10.0,<2.0.0)", "pyyaml (>=3.12,<6.0.0)", "requests-mock (>=1.7,<2.0)", "tenacity (>=5.0.2,<6.0)", "pytest (>=4.4.0,<5.0)", "pytest-xdist (>=1.29.0,<2)", "pytest-timeout (>=1.3.3,<2)", "sqlalchemy (>=1.3,<2.0)", "psycopg2-binary (>=2.8.5,<3.0.0)", "testcontainers (>=3.0.3,<4.0.0)"]
+
@@ -63,0 +112,11 @@ tests = ["pytest", "pytest-asyncio", "mypy (>=0.800)"]
+[[package]]
+name = "astunparse"
+version = "1.6.3"
+description = "An AST unparser for Python"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+six = ">=1.6.1,<2.0"
+
@@ -93,0 +153,27 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>
+[[package]]
+name = "avro-python3"
+version = "1.9.2.1"
+description = "Avro is a serialization and RPC framework."
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+snappy = ["python-snappy"]
+zstandard = ["zstandard"]
+
+[[package]]
+name = "bcj-cffi"
+version = "0.5.1"
+description = "bcj algorithm library"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+cffi = ">=1.14.0"
+
+[package.extras]
+check = ["mypy", "check-manifest", "flake8", "readme-renderer", "pygments", "isort", "twine"]
+test = ["pytest", "pytest-cov", "coverage[toml] (>=5.2)"]
+
@@ -115,0 +202,35 @@ uvloop = ["uvloop (>=0.15.2)"]
+[[package]]
+name = "brotli"
+version = "1.0.9"
+description = "Python bindings for the Brotli compression library"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "brotlicffi"
+version = "1.0.9.2"
+description = "Python CFFI bindings to the Brotli library"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+cffi = ">=1.0.0"
+
+[[package]]
+name = "cachetools"
+version = "4.2.2"
+description = "Extensible memoizing collections and decorators"
+category = "main"
+optional = false
+python-versions = "~=3.5"
+
+[[package]]
+name = "cbor"
+version = "1.0.0"
+description = "RFC 7049 - Concise Binary Object Representation"
+category = "main"
+optional = false
+python-versions = "*"
+
@@ -123,0 +245,11 @@ python-versions = "*"
+[[package]]
+name = "cffi"
+version = "1.14.6"
+description = "Foreign Function Interface for Python calling C code."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pycparser = "*"
+
@@ -161,0 +294,16 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+[[package]]
+name = "conllu"
+version = "4.4"
+description = "CoNLL-U Parser parses a CoNLL-U formatted string into a nested python dictionary"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "crcmod"
+version = "1.7"
+description = "CRC Generator"
+category = "main"
+optional = false
+python-versions = "*"
+
@@ -187 +335 @@ benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "tr
-dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
+dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
@@ -194 +342 @@ tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
@@ -199 +347 @@ name = "dill"
-version = "0.3.4"
+version = "0.3.1.1"
@@ -203 +351 @@ optional = false
-python-versions = ">=2.7, !=3.0.*"
+python-versions = ">=2.6, !=3.0.*"
@@ -207,0 +356,30 @@ graph = ["objgraph (>=1.7.2)"]
+[[package]]
+name = "docopt"
+version = "0.6.2"
+description = "Pythonic argument parser, that will make you smile"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "et-xmlfile"
+version = "1.1.0"
+description = "An implementation of lxml.xmlfile for the standard library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "fastavro"
+version = "1.4.4"
+description = "Fast read/write of AVRO files"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+codecs = ["python-snappy", "zstandard", "lz4"]
+lz4 = ["lz4"]
+snappy = ["python-snappy"]
+zstandard = ["zstandard"]
+
@@ -215,0 +394,8 @@ python-versions = "*"
+[[package]]
+name = "flatbuffers"
+version = "1.12"
+description = "The FlatBuffers serialization format for Python"
+category = "main"
+optional = false
+python-versions = "*"
+
@@ -240,0 +427,75 @@ ssh = ["paramiko"]
+[[package]]
+name = "future"
+version = "0.18.2"
+description = "Clean single-source support for Python 3 and 2"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "gast"
+version = "0.4.0"
+description = "Python AST that abstracts the underlying Python version"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "google-auth"
+version = "1.34.0"
+description = "Google Authentication Library"
+category = "main"
+optional = false
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*"
+
+[package.dependencies]
+cachetools = ">=2.0.0,<5.0"
+pyasn1-modules = ">=0.2.1"
+rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""}
+six = ">=1.9.0"
+
+[package.extras]
+aiohttp = ["requests (>=2.20.0,<3.0.0dev)", "aiohttp (>=3.6.2,<4.0.0dev)"]
+pyopenssl = ["pyopenssl (>=20.0.0)"]
+reauth = ["pyu2f (>=0.1.5)"]
+
+[[package]]
+name = "google-auth-oauthlib"
+version = "0.4.4"
+description = "Google Authentication Library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+google-auth = ">=1.0.0"
+requests-oauthlib = ">=0.7.0"
+
+[package.extras]
+tool = ["click (>=6.0.0)"]
+
+[[package]]
+name = "google-pasta"
+version = "0.2.0"
+description = "pasta is an AST-based Python refactoring library"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+six = "*"
+
+[[package]]
+name = "grpcio"
+version = "1.34.1"
+description = "HTTP/2-based RPC framework"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+six = ">=1.5.2"
+
+[package.extras]
+protobuf = ["grpcio-tools (>=1.34.1)"]
+
@@ -248,0 +510,43 @@ python-versions = ">=3.6"
+[[package]]
+name = "h5py"
+version = "3.1.0"
+description = "Read and write HDF5 files from Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.17.5", markers = "python_version == \"3.8\""},
+ {version = ">=1.19.3", markers = "python_version >= \"3.9\""},
+]
+
+[[package]]
+name = "hdfs"
+version = "2.6.0"
+description = "HdfsCLI: API and command line interface for HDFS."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+docopt = "*"
+requests = ">=2.7.0"
+six = ">=1.9.0"
+
+[package.extras]
+avro = ["fastavro (>=0.21.19)"]
+dataframe = ["fastavro (>=0.21.19)", "pandas (>=0.14.1)"]
+kerberos = ["requests-kerberos (>=0.7.0)"]
+
+[[package]]
+name = "httplib2"
+version = "0.19.1"
+description = "A comprehensive HTTP client library."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pyparsing = ">=2.4.2,<3"
+
@@ -251 +555 @@ name = "huggingface-hub"
-version = "0.0.14"
+version = "0.0.12"
@@ -286,0 +591,98 @@ python-versions = "*"
+[[package]]
+name = "joblib"
+version = "1.0.1"
+description = "Lightweight pipelining with Python functions"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "jsonlines"
+version = "2.0.0"
+description = "Library with helpers for the jsonlines file format"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "kenlm"
+version = "0.0.0"
+description = ""
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.source]
+type = "url"
+url = "https://github.com/kpu/kenlm/archive/master.zip"
+[[package]]
+name = "keras-nightly"
+version = "2.5.0.dev2021032900"
+description = "TensorFlow Keras."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "keras-preprocessing"
+version = "1.1.2"
+description = "Easy data preprocessing and data augmentation for deep learning models"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+numpy = ">=1.9.1"
+six = ">=1.9.0"
+
+[package.extras]
+image = ["scipy (>=0.14)", "Pillow (>=5.2.0)"]
+pep8 = ["flake8"]
+tests = ["pandas", "pillow", "tensorflow", "keras", "pytest", "pytest-xdist", "pytest-cov"]
+
+[[package]]
+name = "kss"
+version = "2.5.1"
+description = "Split Korean text into sentences using heuristic algorithm using pure python"
+category = "main"
+optional = false
+python-versions = ">=3"
+
+[[package]]
+name = "lm-dataformat"
+version = "0.0.19"
+description = "A utility for storing and reading files for LM training."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+jsonlines = "*"
+ujson = "*"
+zstandard = "*"
+
+[[package]]
+name = "lxml"
+version = "4.6.3"
+description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
+
+[package.extras]
+cssselect = ["cssselect (>=0.7)"]
+html5 = ["html5lib"]
+htmlsoup = ["beautifulsoup4"]
+source = ["Cython (>=0.29.7)"]
+
+[[package]]
+name = "markdown"
+version = "3.3.4"
+description = "Python implementation of Markdown."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+testing = ["coverage", "pyyaml"]
+
@@ -297 +699 @@ name = "multiprocess"
-version = "0.70.12.2"
+version = "0.70.9"
@@ -304 +706,14 @@ python-versions = "*"
-dill = ">=0.3.4"
+dill = ">=0.3.1"
+
+[[package]]
+name = "multivolumefile"
+version = "0.2.3"
+description = "multi volume file wrapper library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+check = ["check-manifest", "flake8", "flake8-black", "readme-renderer", "pygments", "isort (>=5.0.3)", "twine"]
+test = ["pytest", "pytest-cov", "pyannotate", "coverage[toml] (>=5.2)", "coveralls (>=2.1.1)", "hypothesis"]
+type = ["mypy", "mypy-extensions"]
@@ -313,0 +729,28 @@ python-versions = "*"
+[[package]]
+name = "nlp"
+version = "0.4.0"
+description = "HuggingFace/NLP is an open library of NLP datasets."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+dill = "*"
+filelock = "*"
+numpy = "*"
+pandas = "*"
+pyarrow = ">=0.16.0"
+requests = ">=2.19.0"
+tqdm = ">=4.27"
+xxhash = "*"
+
+[package.extras]
+apache-beam = ["apache-beam"]
+dev = ["apache-beam", "absl-py", "bs4", "elasticsearch", "faiss-cpu", "langdetect", "mwparserfromhell", "nltk", "pytest", "pytest-xdist", "tensorflow", "torch", "tldextract", "zstandard", "black", "isort", "flake8 (==3.7.9)"]
+docs = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinx-copybutton"]
+quality = ["black", "isort", "flake8 (==3.7.9)"]
+tensorflow = ["tensorflow (>=2.2.0)"]
+tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
+tests = ["apache-beam", "absl-py", "bs4", "elasticsearch", "faiss-cpu", "langdetect", "mwparserfromhell", "nltk", "pytest", "pytest-xdist", "tensorflow", "torch", "tldextract", "zstandard"]
+torch = ["torch"]
+
@@ -316 +759 @@ name = "numpy"
-version = "1.21.1"
+version = "1.19.5"
@@ -320 +763,55 @@ optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.6"
+
+[[package]]
+name = "oauth2client"
+version = "4.1.3"
+description = "OAuth 2.0 client library"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+httplib2 = ">=0.9.1"
+pyasn1 = ">=0.1.7"
+pyasn1-modules = ">=0.0.5"
+rsa = ">=3.1.4"
+six = ">=1.6.1"
+
+[[package]]
+name = "oauthlib"
+version = "3.1.1"
+description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+rsa = ["cryptography (>=3.0.0,<4)"]
+signals = ["blinker (>=1.4.0)"]
+signedtoken = ["cryptography (>=3.0.0,<4)", "pyjwt (>=2.0.0,<3)"]
+
+[[package]]
+name = "openpyxl"
+version = "3.0.7"
+description = "A Python library to read/write Excel 2010 xlsx/xlsm files"
+category = "main"
+optional = false
+python-versions = ">=3.6,"
+
+[package.dependencies]
+et-xmlfile = "*"
+
+[[package]]
+name = "opt-einsum"
+version = "3.3.0"
+description = "Optimizing numpys einsum function"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[package.dependencies]
+numpy = ">=1.7"
+
+[package.extras]
+docs = ["sphinx (==1.2.3)", "sphinxcontrib-napoleon", "sphinx-rtd-theme", "numpydoc"]
+tests = ["pytest", "pytest-cov", "pytest-pep8"]
@@ -331 +828,98 @@ python-versions = ">=3.6"
-pyparsing = ">=2.0.2"
+pyparsing = ">=2.0.2"
+
+[[package]]
+name = "pandas"
+version = "1.3.1"
+description = "Powerful data structures for data analysis, time series, and statistics"
+category = "main"
+optional = false
+python-versions = ">=3.7.1"
+
+[package.dependencies]
+numpy = ">=1.17.3"
+python-dateutil = ">=2.7.3"
+pytz = ">=2017.3"
+
+[package.extras]
+test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
+
+[[package]]
+name = "pathspec"
+version = "0.9.0"
+description = "Utility library for gitignore style pattern matching of file paths."
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+
+[[package]]
+name = "pillow"
+version = "8.3.1"
+description = "Python Imaging Library (Fork)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "pluggy"
+version = "0.13.1"
+description = "plugin and hook calling mechanisms for python"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+
+[[package]]
+name = "protobuf"
+version = "3.17.3"
+description = "Protocol Buffers"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+six = ">=1.9"
+
+[[package]]
+name = "py"
+version = "1.10.0"
+description = "library with cross-python path, ini-parsing, io, code, log facilities"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "py7zr"
+version = "0.16.1"
+description = "Pure python 7-zip library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+bcj-cffi = ">=0.5.1,<0.6.0"
+brotli = {version = ">=1.0.9", markers = "platform_python_implementation == \"CPython\""}
+brotlicffi = {version = ">=1.0.9.2", markers = "platform_python_implementation == \"PyPy\""}
+multivolumefile = ">=0.2.3"
+pycryptodomex = ">=3.6.6"
+pyppmd = ">=0.14.0"
+pyzstd = ">=0.14.4,<0.15.0"
+texttable = "*"
+
+[package.extras]
+check = ["mypy (>=0.812)", "mypy-extensions (>=0.4.1)", "check-manifest", "flake8", "flake8-black", "flake8-deprecated", "isort (>=5.0.3)", "pygments", "readme-renderer", "twine"]
+docs = ["sphinx (>=2.3)", "sphinx-py3doc-enhanced-theme", "sphinx-a4doc", "docutils"]
+test = ["pytest", "pytest-benchmark", "pytest-cov", "pytest-timeout", "pytest-remotedata", "pytest-profiling", "pyannotate", "py-cpuinfo", "coverage[toml] (>=5.2)", "coveralls (>=2.1.1)"]
+test_compat = ["libarchive-c"]
+
+[[package]]
+name = "pyarrow"
+version = "4.0.1"
+description = "Python library for Apache Arrow"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+numpy = ">=1.16.6"
@@ -334,3 +928,3 @@ pyparsing = ">=2.0.2"
-name = "pandas"
-version = "1.3.1"
-description = "Powerful data structures for data analysis, time series, and statistics"
+name = "pyasn1"
+version = "0.4.8"
+description = "ASN.1 types and codecs"
@@ -339,9 +933 @@ optional = false
-python-versions = ">=3.7.1"
-
-[package.dependencies]
-numpy = ">=1.17.3"
-python-dateutil = ">=2.7.3"
-pytz = ">=2017.3"
-
-[package.extras]
-test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
+python-versions = "*"
@@ -350,4 +936,4 @@ test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
-name = "pathspec"
-version = "0.9.0"
-description = "Utility library for gitignore style pattern matching of file paths."
-category = "dev"
+name = "pyasn1-modules"
+version = "0.2.8"
+description = "A collection of ASN.1-based protocols modules."
+category = "main"
@@ -355 +941,4 @@ optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+python-versions = "*"
+
+[package.dependencies]
+pyasn1 = ">=0.4.6,<0.5.0"
@@ -358,4 +947,4 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
-name = "pluggy"
-version = "0.13.1"
-description = "plugin and hook calling mechanisms for python"
-category = "dev"
+name = "pycparser"
+version = "2.20"
+description = "C parser in Python"
+category = "main"
@@ -365,2 +954,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-[package.extras]
-dev = ["pre-commit", "tox"]
+[[package]]
+name = "pycryptodomex"
+version = "3.10.1"
+description = "Cryptographic library for Python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
@@ -369,4 +963,4 @@ dev = ["pre-commit", "tox"]
-name = "py"
-version = "1.10.0"
-description = "library with cross-python path, ini-parsing, io, code, log facilities"
-category = "dev"
+name = "pydot"
+version = "1.4.2"
+description = "Python interface to Graphviz's Dot"
+category = "main"
@@ -375,0 +970,3 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+[package.dependencies]
+pyparsing = ">=2.1.4"
+
@@ -377,3 +974,3 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-name = "pyarrow"
-version = "4.0.1"
-description = "Python library for Apache Arrow"
+name = "pymongo"
+version = "3.12.0"
+description = "Python driver for MongoDB <http://www.mongodb.org>"
@@ -382 +979 @@ optional = false
-python-versions = ">=3.6"
+python-versions = "*"
@@ -384,2 +981,9 @@ python-versions = ">=3.6"
-[package.dependencies]
-numpy = ">=1.16.6"
+[package.extras]
+aws = ["pymongo-auth-aws (<2.0.0)"]
+encryption = ["pymongocrypt (>=1.1.0,<2.0.0)"]
+gssapi = ["pykerberos"]
+ocsp = ["pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)", "certifi"]
+snappy = ["python-snappy"]
+srv = ["dnspython (>=1.16.0,<1.17.0)"]
+tls = ["ipaddress"]
+zstd = ["zstandard"]
@@ -394,0 +999,14 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+[[package]]
+name = "pyppmd"
+version = "0.15.2"
+description = "PPMd compression/decompression library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+check = ["mypy (>=0.812)", "mypy-extensions (>=0.4.3)", "check-manifest", "flake8", "flake8-black", "readme-renderer", "pygments", "isort (>=5.0.3)"]
+docs = ["sphinx (>=2.3)", "sphinx-rtd-theme"]
+fuzzer = ["atheris", "hypothesis"]
+test = ["pytest", "pytest-benchmark", "pytest-cov", "psutil", "hypothesis", "coverage[toml] (>=5.2)"]
+
@@ -439 +1057 @@ description = "YAML parser and emitter for Python"
-category = "dev"
+category = "main"
@@ -442,0 +1061,8 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+[[package]]
+name = "pyzstd"
+version = "0.14.4"
+description = "Python bindings to Zstandard (zstd) compression library, the API is similar to Python's bz2/lzma/zlib module."
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
@@ -447 +1073 @@ description = "Alternative regular expression module, to replace re."
-category = "dev"
+category = "main"
@@ -468,0 +1095,41 @@ use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
+[[package]]
+name = "requests-oauthlib"
+version = "1.3.0"
+description = "OAuthlib authentication support for Requests."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[package.dependencies]
+oauthlib = ">=3.0.0"
+requests = ">=2.0.0"
+
+[package.extras]
+rsa = ["oauthlib[signedtoken] (>=3.0.0)"]
+
+[[package]]
+name = "rsa"
+version = "4.7.2"
+description = "Pure-Python RSA implementation"
+category = "main"
+optional = false
+python-versions = ">=3.5, <4"
+
+[package.dependencies]
+pyasn1 = ">=0.1.3"
+
+[[package]]
+name = "sacremoses"
+version = "0.0.45"
+description = "SacreMoses"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+click = "*"
+joblib = "*"
+regex = "*"
+six = "*"
+tqdm = "*"
+
@@ -471 +1138 @@ name = "six"
-version = "1.16.0"
+version = "1.15.0"
@@ -498,0 +1166,100 @@ full = ["itsdangerous", "jinja2", "python-multipart", "pyyaml", "requests", "gra
+[[package]]
+name = "tensorboard"
+version = "2.5.0"
+description = "TensorBoard lets you watch Tensors Flow"
+category = "main"
+optional = false
+python-versions = ">= 2.7, != 3.0.*, != 3.1.*"
+
+[package.dependencies]
+absl-py = ">=0.4"
+google-auth = ">=1.6.3,<2"
+google-auth-oauthlib = ">=0.4.1,<0.5"
+grpcio = ">=1.24.3"
+markdown = ">=2.6.8"
+numpy = ">=1.12.0"
+protobuf = ">=3.6.0"
+requests = ">=2.21.0,<3"
+tensorboard-data-server = ">=0.6.0,<0.7.0"
+tensorboard-plugin-wit = ">=1.6.0"
+werkzeug = ">=0.11.15"
+
+[[package]]
+name = "tensorboard-data-server"
+version = "0.6.1"
+description = "Fast data loading for TensorBoard"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "tensorboard-plugin-wit"
+version = "1.8.0"
+description = "What-If Tool TensorBoard plugin."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "tensorflow"
+version = "2.5.0"
+description = "TensorFlow is an open source machine learning framework for everyone."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+absl-py = ">=0.10,<1.0"
+astunparse = ">=1.6.3,<1.7.0"
+flatbuffers = ">=1.12.0,<1.13.0"
+gast = "0.4.0"
+google-pasta = ">=0.2,<1.0"
+grpcio = ">=1.34.0,<1.35.0"
+h5py = ">=3.1.0,<3.2.0"
+keras-nightly = ">=2.5.0.dev,<2.6.0"
+keras-preprocessing = ">=1.1.2,<1.2.0"
+numpy = ">=1.19.2,<1.20.0"
+opt-einsum = ">=3.3.0,<3.4.0"
+protobuf = ">=3.9.2"
+six = ">=1.15.0,<1.16.0"
+tensorboard = ">=2.5,<3.0"
+tensorflow-estimator = ">=2.5.0rc0,<2.6.0"
+termcolor = ">=1.1.0,<1.2.0"
+typing-extensions = ">=3.7.4,<3.8.0"
+wrapt = ">=1.12.1,<1.13.0"
+
+[[package]]
+name = "tensorflow-estimator"
+version = "2.5.0"
+description = "TensorFlow Estimator."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "termcolor"
+version = "1.1.0"
+description = "ANSII Color formatting for output in terminal."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "texttable"
+version = "1.6.4"
+description = "module for creating simple ASCII tables"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "tokenizers"
+version = "0.10.3"
+description = "Fast and Customizable Tokenizers"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.extras]
+testing = ["pytest"]
+
@@ -530,0 +1298,69 @@ telegram = ["requests"]
+[[package]]
+name = "transformers"
+version = "4.9.1"
+description = "State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch"
+category = "main"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.dependencies]
+filelock = "*"
+huggingface-hub = "0.0.12"
+numpy = ">=1.17"
+packaging = "*"
+pyyaml = ">=5.1"
+regex = "!=2019.12.17"
+requests = "*"
+sacremoses = "*"
+tokenizers = ">=0.10.1,<0.11"
+tqdm = ">=4.27"
+
+[package.extras]
+all = ["tensorflow (>=2.3)", "onnxconverter-common", "keras2onnx", "torch (>=1.0)", "jax (>=0.2.8)", "jaxlib (>=0.1.65)", "flax (>=0.3.4)", "optax (>=0.0.8)", "sentencepiece (==0.1.91)", "protobuf", "tokenizers (>=0.10.1,<0.11)", "soundfile", "torchaudio", "pillow", "optuna", "ray", "timm", "codecarbon (==1.2.0)"]
+codecarbon = ["codecarbon (==1.2.0)"]
+deepspeed = ["deepspeed (>=0.4.3)"]
+dev = ["tensorflow (>=2.3)", "onnxconverter-common", "keras2onnx", "torch (>=1.0)", "jax (>=0.2.8)", "jaxlib (>=0.1.65)", "flax (>=0.3.4)", "optax (>=0.0.8)", "sentencepiece (==0.1.91)", "protobuf", "tokenizers (>=0.10.1,<0.11)", "soundfile", "torchaudio", "pillow", "optuna", "ray", "timm", "codecarbon (==1.2.0)", "pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "pytest-timeout", "black (==21.4b0)", "sacrebleu (>=1.4.12)", "rouge-score", "nltk", "gitpython", "faiss-cpu", "cookiecutter (==1.7.2)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "unidic-lite (>=1.0.7)", "unidic (>=1.0.2)", "docutils (==0.16.0)", "recommonmark", "sphinx (==3.2.1)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinx-copybutton", "sphinxext-opengraph (==0.4.1)", "scikit-learn"]
+docs = ["tensorflow (>=2.3)", "onnxconverter-common", "keras2onnx", "torch (>=1.0)", "jax (>=0.2.8)", "jaxlib (>=0.1.65)", "flax (>=0.3.4)", "optax (>=0.0.8)", "sentencepiece (==0.1.91)", "protobuf", "tokenizers (>=0.10.1,<0.11)", "soundfile", "torchaudio", "pillow", "optuna", "ray", "timm", "codecarbon (==1.2.0)", "docutils (==0.16.0)", "recommonmark", "sphinx (==3.2.1)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinx-copybutton", "sphinxext-opengraph (==0.4.1)"]
+docs_specific = ["docutils (==0.16.0)", "recommonmark", "sphinx (==3.2.1)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinx-copybutton", "sphinxext-opengraph (==0.4.1)"]
+fairscale = ["fairscale (>0.3)"]
+flax = ["jax (>=0.2.8)", "jaxlib (>=0.1.65)", "flax (>=0.3.4)", "optax (>=0.0.8)"]
+integrations = ["optuna", "ray"]
+ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "unidic-lite (>=1.0.7)", "unidic (>=1.0.2)"]
+modelcreation = ["cookiecutter (==1.7.2)"]
+onnx = ["onnxconverter-common", "keras2onnx", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
+onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
+optuna = ["optuna"]
+quality = ["black (==21.4b0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
+ray = ["ray"]
+retrieval = ["faiss-cpu", "datasets"]
+sagemaker = ["sagemaker (>=2.31.0)"]
+sentencepiece = ["sentencepiece (==0.1.91)", "protobuf"]
+serving = ["pydantic", "uvicorn", "fastapi", "starlette"]
+sklearn = ["scikit-learn"]
+speech = ["soundfile", "torchaudio"]
+testing = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "pytest-timeout", "black (==21.4b0)", "sacrebleu (>=1.4.12)", "rouge-score", "nltk", "gitpython", "faiss-cpu", "cookiecutter (==1.7.2)"]
+tf = ["tensorflow (>=2.3)", "onnxconverter-common", "keras2onnx"]
+tf-cpu = ["tensorflow-cpu (>=2.3)", "onnxconverter-common", "keras2onnx"]
+timm = ["timm"]
+tokenizers = ["tokenizers (>=0.10.1,<0.11)"]
+torch = ["torch (>=1.0)"]
+torchhub = ["filelock", "huggingface-hub (==0.0.12)", "importlib-metadata", "numpy (>=1.17)", "packaging", "protobuf", "regex (!=2019.12.17)", "requests", "sacremoses", "sentencepiece (==0.1.91)", "torch (>=1.0)", "tokenizers (>=0.10.1,<0.11)", "tqdm (>=4.27)"]
+vision = ["pillow"]
+
+[[package]]
+name = "trec-car-tools"
+version = "2.5.4"
+description = ""
+category = "main"
+optional = false
+python-versions = ">=3.6"
+develop = false
+
+[package.dependencies]
+cbor = ">=1.0.0"
+numpy = ">=1.11.2"
+
+[package.source]
+type = "directory"
+url = "vendors/trec-car-tools/python3"
+
@@ -533 +1369 @@ name = "typing-extensions"
-version = "3.10.0.0"
+version = "3.7.4.3"
@@ -538,0 +1375,8 @@ python-versions = "*"
+[[package]]
+name = "ujson"
+version = "4.0.2"
+description = "Ultra fast JSON encoder and decoder for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
@@ -582,0 +1427,27 @@ watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"]
+[[package]]
+name = "werkzeug"
+version = "2.0.1"
+description = "The comprehensive WSGI web application library."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+watchdog = ["watchdog"]
+
+[[package]]
+name = "wget"
+version = "3.2"
+description = "pure python download utility"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "wrapt"
+version = "1.12.1"
+description = "Module for decorators, wrappers and monkey patching."
+category = "main"
+optional = false
+python-versions = "*"
+
@@ -602,0 +1474,14 @@ multidict = ">=4.0"
+[[package]]
+name = "zstandard"
+version = "0.15.2"
+description = "Zstandard bindings for Python"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[package.dependencies]
+cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\""}
+
+[package.extras]
+cffi = ["cffi (>=1.11)"]
+
@@ -606 +1491 @@ python-versions = "^3.8"
-content-hash = "7e0e1ab05dac16e809f2bc9bb7c4998f492b5fdcf5bafd5b6f5f289ab2ec06b1"
+content-hash = "a33b4415d9dadbe5601eb950817b4464343d32cde1cbacbad426b2d26cd40fb5"
@@ -608,0 +1494,4 @@ content-hash = "7e0e1ab05dac16e809f2bc9bb7c4998f492b5fdcf5bafd5b6f5f289ab2ec06b1
+absl-py = [
+ {file = "absl-py-0.13.0.tar.gz", hash = "sha256:6953272383486044699fd0e9f00aad167a27e08ce19aae66c6c4b10e7e767793"},
+ {file = "absl_py-0.13.0-py3-none-any.whl", hash = "sha256:62bd4e248ddb19d81aec8f9446b407ff37c8175c2ba88266a7afa9b4ce4a333b"},
+]
@@ -651,0 +1541,24 @@ anyio = [
+apache-beam = [
+ {file = "apache-beam-2.31.0.zip", hash = "sha256:bab2702905e5e41e6f11621127ba73d54929df34c7b059108d2d3a95dcdc5cff"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:52b6449d22c5d1478048bcfcf9191b3f04ba8cd5f1f560d94d8252ece59e6307"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:578eb88de49e10acb902e21750f862084477e994f8975c11ac972048497d8223"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:443b7967e8436a0359940f7d030f6e486c504e517af43d7cb862a63d1645977c"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:8adabb0c702a048d59efed58b86f186b72e8e16771489989aa74292f5ba91a88"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:42ebdc16ad0b2e69010d0468fbd756d66a8600cc6ffce76ff0bed64144a3b4b8"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-win32.whl", hash = "sha256:91c7c7aee5a36662d9596db7e5975b2a0292093e5abde6ecd7f728f07671aa11"},
+ {file = "apache_beam-2.31.0-cp36-cp36m-win_amd64.whl", hash = "sha256:54ba90e8f30b38ae37752984f0ff9045ccc1564665ad60efd5df22d6cc0d9f22"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:da58d5236f092911b529b1d9ecf1c07a78995bca5a22075e2c6920b7e94d96b7"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:76ad0cbbbad5dc08109f471867ac761fe0c31c1acaf82334b92cea59824c5b7b"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:7306ac79c5c2ebb4715be49b0051a23f01b26e8600c5e5f299dd9dd61c5daf05"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:2370d117abba79aef783a4a1ec93d897bebd49ef2e87b19a714d77bd2544a02d"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:cfb9a5de36ed92a3f7c4d41299ed5aa083c3b972e621efbe2f5fb69c3290fb8a"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-win32.whl", hash = "sha256:3c3503be062488196789c8c62d8c970eac8f7b208e040f98f8fe7fd43a8db7eb"},
+ {file = "apache_beam-2.31.0-cp37-cp37m-win_amd64.whl", hash = "sha256:36542d0570bebee8dc542487dcf3017b554da7b27c5a020790fe93d600b6487b"},
+ {file = "apache_beam-2.31.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5011d8aa701f2e1f3f1227ac6b8e922009e67ae8f06272c101b175709f2b2871"},
+ {file = "apache_beam-2.31.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:55bf3a16fd0a43e31b0b87dd4e2a2f5558adaf77f95ab454704dbc40a7fe9527"},
+ {file = "apache_beam-2.31.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:f79e44ad09a59e7e6eebb0f3e7637a36e988f2a649af5e34442fbbfe22ead230"},
+ {file = "apache_beam-2.31.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:3e09650e625ae3d20ab0752c11f238b39b3b9c64a1adb6a4b30cce6dffe097e1"},
+ {file = "apache_beam-2.31.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d49bbcaa352798279e34bdafb11ab097f60b4b9dad171cf6437f13e68cd3bdc6"},
+ {file = "apache_beam-2.31.0-cp38-cp38-win32.whl", hash = "sha256:fa44d2330899ef4f8e21b38adb5c3e550b29b50c743844e8b6a77422a6928851"},
+ {file = "apache_beam-2.31.0-cp38-cp38-win_amd64.whl", hash = "sha256:64ee0f3eeccb83f4165d8582f9c35faa9aca38ed7ea9a8047030cca89d554686"},
+]
@@ -663,0 +1577,4 @@ asgiref = [
+astunparse = [
+ {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"},
+ {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"},
+]
@@ -675,0 +1593,41 @@ attrs = [
+avro-python3 = [
+ {file = "avro-python3-1.9.2.1.tar.gz", hash = "sha256:ca1e77a3da5ac98e8833588f71fb2e170b38e34787ee0e04920de0e9470b7d32"},
+]
+bcj-cffi = [
+ {file = "bcj-cffi-0.5.1.tar.gz", hash = "sha256:2b4a5d1252e72bea5a5eb1e51650435825299b0e87c25ddfd21a1623514fd0cc"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:104af01bedb0cdad6eefd4367f85247ef24be02afb1caf6d1481252340e2c92d"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ff0dc7ccf7f2f86bc833d932ba6a997765657dcee261660caf7e26fe8cd250cd"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5ebd82c7c41bd448b3a43d280fa10a43eef3f1e4ff28a0b2045a0b19c600545f"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:da93f1a750f8917449d0f378c5e46360df7a80b1fc00aee6a14998ec59193cf6"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:3ac34ee983a4734d482ae349d9f71cf4398a500bff9170ccac8dd258b62e4e6c"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-win32.whl", hash = "sha256:1a25d32a6c5ca2cefdb0b0fcf82134728892dabddb043b330713fcbe1d022b49"},
+ {file = "bcj_cffi-0.5.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f10a4a023de1681218efb81e3bedcffa2d0457f4093b87c10fec6927857a1ced"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0244c66251cee13dc479f765b44d5c4762b77fde220ef007a90f3f2aa2adc9f1"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f31d4d46c0495169e797296253c3e6b642c5c7ee17f5a7f5b6e633326a8b0ad2"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:cbaf6ded7f2415a1f872fbff746db0eaab4aa9c6cc89a60217b5925c214969d5"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:aaf507e239f4586812f12fd0a49ba892ffd7fe1300233b8ed0b66f21d56f6ca1"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:d916718a0bb3c7e421cbb92b42ee4bcdba6f122a6ec6c7b82d4953324a6f0cdc"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-win32.whl", hash = "sha256:f4e831f0a139b0bf6fe844611c77d10cd1b411f9853888a0e2dfcc65d686c54e"},
+ {file = "bcj_cffi-0.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:713c704d55cb0707200de6b693c1f4061b313547c6f31eb9a29040bbd03dd01e"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c866e20064332db938f521cdc3694079b7ebe74a3e1a32631f09155cbefb5709"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:bc44e677f6edd7c0022a1e23c60608fa0be5067c05434afffce6a6c2ac1c1445"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:32287a6ad02252744ac76fbe7fe0df3605b812e5f45a337be1c932f3afeb4ace"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f557c2a465f11ccb620b807a4a37ba173f261b1fffe0375f641fc1c0b0e1e410"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:b0a22e63ca295426daa2eb4c04a096c680485789d88bd660908041859920fd0c"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-win32.whl", hash = "sha256:1815309de9979075ec9f5a33763c63bb6dc9cf53a0b7669699ca4589bd120695"},
+ {file = "bcj_cffi-0.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1005f7e23db9febc88639897e5ce6c2084693240255d7a86a4fc1e9c613532c"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:883ac8e138334606c743de7648974f3ce13250d7eccfb69af286225fd409e196"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9884cda59d5b2a8b85d0421707ab4ab03911432b003b5effd7106fa21de5acb1"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2d531836a6f51ae4c0895789808effaecde085a72be2350abe0e195b2e9ef090"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:08a7099ababbe92a976da30ce4d1f1a79cc8d588743e4d43b2df9272b0a51c1b"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:6e4ecdc79c3a6db3e45147215e2b176b58507f48ae5934eba207700553994e21"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:02780de82e0b5cbfb6452de270f3b64635d32ba768a32ee9e881bc4097c9914d"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-win32.whl", hash = "sha256:ccf972ae14987c2c16e825fab5b59e540fe7892c6d3405d55349b8d9361bf7be"},
+ {file = "bcj_cffi-0.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:ad394d5b7574d033a5566066498d6b51c3af179fac16abfa760554d977591b4f"},
+ {file = "bcj_cffi-0.5.1-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:02d58b5a633262c52f1379b3a27de42e6ce2cb9170df383bc2eb2d9c238841fc"},
+ {file = "bcj_cffi-0.5.1-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:ea7879030ffe13efdc2e6483ecd65dd222df5a40be124f2eb4f06132ce7aec78"},
+ {file = "bcj_cffi-0.5.1-pp36-pypy36_pp73-win32.whl", hash = "sha256:dafab8db5e62405e8890d654dffa4f5601fe8669d18221803b4617f10975277b"},
+ {file = "bcj_cffi-0.5.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:af1979e93937695e086affed8eca4aa0a8f5db73cb36cc7a0a968e516195799a"},
+ {file = "bcj_cffi-0.5.1-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:6e1f9e7f1b54db5a4d3e65de977e0b130d2c256ce8b47d1606bb369588d5ad6d"},
+ {file = "bcj_cffi-0.5.1-pp37-pypy37_pp73-win32.whl", hash = "sha256:bf1c5fe0669a6735c58230cafa7e1b45aa4de4ca92fa85ccf0ed69aa9f9c1bba"},
+]
@@ -679,0 +1638,73 @@ black = [
+brotli = [
+ {file = "Brotli-1.0.9-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:268fe94547ba25b58ebc724680609c8ee3e5a843202e9a381f6f9c5e8bdb5c70"},
+ {file = "Brotli-1.0.9-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:c2415d9d082152460f2bd4e382a1e85aed233abc92db5a3880da2257dc7daf7b"},
+ {file = "Brotli-1.0.9-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5913a1177fc36e30fcf6dc868ce23b0453952c78c04c266d3149b3d39e1410d6"},
+ {file = "Brotli-1.0.9-cp27-cp27m-win32.whl", hash = "sha256:afde17ae04d90fbe53afb628f7f2d4ca022797aa093e809de5c3cf276f61bbfa"},
+ {file = "Brotli-1.0.9-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7cb81373984cc0e4682f31bc3d6be9026006d96eecd07ea49aafb06897746452"},
+ {file = "Brotli-1.0.9-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:db844eb158a87ccab83e868a762ea8024ae27337fc7ddcbfcddd157f841fdfe7"},
+ {file = "Brotli-1.0.9-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:c83aa123d56f2e060644427a882a36b3c12db93727ad7a7b9efd7d7f3e9cc2c4"},
+ {file = "Brotli-1.0.9-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:6b2ae9f5f67f89aade1fab0f7fd8f2832501311c363a21579d02defa844d9296"},
+ {file = "Brotli-1.0.9-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:68715970f16b6e92c574c30747c95cf8cf62804569647386ff032195dc89a430"},
+ {file = "Brotli-1.0.9-cp35-cp35m-win32.whl", hash = "sha256:defed7ea5f218a9f2336301e6fd379f55c655bea65ba2476346340a0ce6f74a1"},
+ {file = "Brotli-1.0.9-cp35-cp35m-win_amd64.whl", hash = "sha256:88c63a1b55f352b02c6ffd24b15ead9fc0e8bf781dbe070213039324922a2eea"},
+ {file = "Brotli-1.0.9-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:503fa6af7da9f4b5780bb7e4cbe0c639b010f12be85d02c99452825dd0feef3f"},
+ {file = "Brotli-1.0.9-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:40d15c79f42e0a2c72892bf407979febd9cf91f36f495ffb333d1d04cebb34e4"},
+ {file = "Brotli-1.0.9-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:93130612b837103e15ac3f9cbacb4613f9e348b58b3aad53721d92e57f96d46a"},
+ {file = "Brotli-1.0.9-cp36-cp36m-win32.whl", hash = "sha256:61a7ee1f13ab913897dac7da44a73c6d44d48a4adff42a5701e3239791c96e14"},
+ {file = "Brotli-1.0.9-cp36-cp36m-win_amd64.whl", hash = "sha256:1c48472a6ba3b113452355b9af0a60da5c2ae60477f8feda8346f8fd48e3e87c"},
+ {file = "Brotli-1.0.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b78a24b5fd13c03ee2b7b86290ed20efdc95da75a3557cc06811764d5ad1126"},
+ {file = "Brotli-1.0.9-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:9d12cf2851759b8de8ca5fde36a59c08210a97ffca0eb94c532ce7b17c6a3d1d"},
+ {file = "Brotli-1.0.9-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6c772d6c0a79ac0f414a9f8947cc407e119b8598de7621f39cacadae3cf57d12"},
+ {file = "Brotli-1.0.9-cp37-cp37m-win32.whl", hash = "sha256:f909bbbc433048b499cb9db9e713b5d8d949e8c109a2a548502fb9aa8630f0b1"},
+ {file = "Brotli-1.0.9-cp37-cp37m-win_amd64.whl", hash = "sha256:97f715cf371b16ac88b8c19da00029804e20e25f30d80203417255d239f228b5"},
+ {file = "Brotli-1.0.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:160c78292e98d21e73a4cc7f76a234390e516afcd982fa17e1422f7c6a9ce9c8"},
+ {file = "Brotli-1.0.9-cp38-cp38-manylinux1_i686.whl", hash = "sha256:b663f1e02de5d0573610756398e44c130add0eb9a3fc912a09665332942a2efb"},
+ {file = "Brotli-1.0.9-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5b6ef7d9f9c38292df3690fe3e302b5b530999fa90014853dcd0d6902fb59f26"},
+ {file = "Brotli-1.0.9-cp38-cp38-win32.whl", hash = "sha256:35a3edbe18e876e596553c4007a087f8bcfd538f19bc116917b3c7522fca0429"},
+ {file = "Brotli-1.0.9-cp38-cp38-win_amd64.whl", hash = "sha256:269a5743a393c65db46a7bb982644c67ecba4b8d91b392403ad8a861ba6f495f"},
+ {file = "Brotli-1.0.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5cb1e18167792d7d21e21365d7650b72d5081ed476123ff7b8cac7f45189c0c7"},
+ {file = "Brotli-1.0.9-cp39-cp39-manylinux1_i686.whl", hash = "sha256:16d528a45c2e1909c2798f27f7bf0a3feec1dc9e50948e738b961618e38b6a7b"},
+ {file = "Brotli-1.0.9-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:56d027eace784738457437df7331965473f2c0da2c70e1a1f6fdbae5402e0389"},
+ {file = "Brotli-1.0.9-cp39-cp39-win32.whl", hash = "sha256:cfc391f4429ee0a9370aa93d812a52e1fee0f37a81861f4fdd1f4fb28e8547c3"},
+ {file = "Brotli-1.0.9-cp39-cp39-win_amd64.whl", hash = "sha256:854c33dad5ba0fbd6ab69185fec8dab89e13cda6b7d191ba111987df74f38761"},
+ {file = "Brotli-1.0.9.zip", hash = "sha256:4d1b810aa0ed773f81dceda2cc7b403d01057458730e309856356d4ef4188438"},
+]
+brotlicffi = [
+ {file = "brotlicffi-1.0.9.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:408ec4359f9763280d5c4e0ad29c51d1240b25fdd18719067e972163b4125b98"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2e4629f7690ded66c8818715c6d4dd6a7ff6a4f10fad6186fe99850f781ce210"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:137c4635edcdf593de5ce9d0daa596bf499591b16b8fca5fd72a490deb54b2ee"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:af8a1b7bcfccf9c41a3c8654994d6a81821fdfe4caddcfe5045bfda936546ca3"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9078432af4785f35ab3840587eed7fb131e3fc77eb2a739282b649b343c584dd"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7bb913d5bf3b4ce2ec59872711dc9faaff5f320c3c3827cada2d8a7b793a7753"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:16a0c9392a1059e2e62839fbd037d2e7e03c8ae5da65e9746f582464f7fab1bb"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:94d2810efc5723f1447b332223b197466190518a3eeca93b9f357efb5b22c6dc"},
+ {file = "brotlicffi-1.0.9.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9e70f3e20f317d70912b10dbec48b29114d3dbd0e9d88475cb328e6c086f0546"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:586f0ea3c2eed455d5f2330b9ab4a591514c8de0ee53d445645efcfbf053c69f"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-manylinux1_i686.whl", hash = "sha256:4454c3baedc277fd6e65f983e3eb8e77f4bc15060f69370a0201746e2edeca81"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:52c1c12dad6eb1d44213a0a76acf5f18f64653bd801300bef5e2f983405bdde5"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-manylinux2010_i686.whl", hash = "sha256:21cd400d24b344c218d8e32b394849e31b7c15784667575dbda9f65c46a64b0a"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:71061f8bc86335b652e442260c4367b782a92c6e295cf5a10eff84c7d19d8cf5"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:15e0db52c56056be6310fc116b3d7c6f34185594e261f23790b2fb6489998363"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-win32.whl", hash = "sha256:551305703d12a2dd1ae43d3dde35dee20b1cb49b5796279d4d34e2c6aec6be4d"},
+ {file = "brotlicffi-1.0.9.2-cp35-abi3-win_amd64.whl", hash = "sha256:2be4fb8a7cb482f226af686cd06d2a2cab164ccdf99e460f8e3a5ec9a5337da2"},
+ {file = "brotlicffi-1.0.9.2-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:8e7221d8a084d32d15c7b58e0ce0573972375c5038423dbe83f217cfe512e680"},
+ {file = "brotlicffi-1.0.9.2-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:75a46bc5ed2753e1648cc211dcb2c1ac66116038766822dc104023f67ff4dfd8"},
+ {file = "brotlicffi-1.0.9.2-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:1e27c43ef72a278f9739b12b2df80ee72048cd4cbe498f8bbe08aaaa67a5d5c8"},
+ {file = "brotlicffi-1.0.9.2-pp27-pypy_73-win32.whl", hash = "sha256:feb942814285bdc5e97efc77a04e48283c17dfab9ea082d79c0a7b9e53ef1eab"},
+ {file = "brotlicffi-1.0.9.2-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a6208d82c3172eeeb3be83ed4efd5831552c7cd47576468e50fcf0fb23fcf97f"},
+ {file = "brotlicffi-1.0.9.2-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:408c810c599786fb806556ff17e844a903884e6370ca400bcec7fa286149f39c"},
+ {file = "brotlicffi-1.0.9.2-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a73099858ee343e8801710a08be8d194f47715ff21e98d92a19ac461058f52d1"},
+ {file = "brotlicffi-1.0.9.2-pp36-pypy36_pp73-win32.whl", hash = "sha256:916b790f967a18a595e61f218c252f83718ac91f24157d622cf0fa710cd26ab7"},
+ {file = "brotlicffi-1.0.9.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ba4a00263af40e875ec3d6c7f623cbf8c795b55705da18c64ec36b6bf0848bc5"},
+ {file = "brotlicffi-1.0.9.2-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:df78aa47741122b0d5463f1208b7bb18bc9706dee5152d9f56e0ead4865015cd"},
+ {file = "brotlicffi-1.0.9.2-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:9030cd5099252d16bfa4e22659c84a89c102e94f8e81d30764788b72e2d7cfb7"},
+ {file = "brotlicffi-1.0.9.2-pp37-pypy37_pp73-win32.whl", hash = "sha256:7e72978f4090a161885b114f87b784f538dcb77dafc6602592c1cf39ae8d243d"},
+ {file = "brotlicffi-1.0.9.2.tar.gz", hash = "sha256:0c248a68129d8fc6a217767406c731e498c3e19a7be05ea0a90c3c86637b7d96"},
+]
+cachetools = [
+ {file = "cachetools-4.2.2-py3-none-any.whl", hash = "sha256:2cc0b89715337ab6dbba85b5b50effe2b0c74e035d83ee8ed637cf52f12ae001"},
+ {file = "cachetools-4.2.2.tar.gz", hash = "sha256:61b5ed1e22a0924aed1d23b478f37e8d52549ff8a961de2909c69bf950020cff"},
+]
+cbor = [
+ {file = "cbor-1.0.0.tar.gz", hash = "sha256:13225a262ddf5615cbd9fd55a76a0d53069d18b07d2e9f19c39e6acb8609bbb6"},
+]
@@ -683,0 +1715,47 @@ certifi = [
+cffi = [
+ {file = "cffi-1.14.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:22b9c3c320171c108e903d61a3723b51e37aaa8c81255b5e7ce102775bd01e2c"},
+ {file = "cffi-1.14.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f0c5d1acbfca6ebdd6b1e3eded8d261affb6ddcf2186205518f1428b8569bb99"},
+ {file = "cffi-1.14.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:99f27fefe34c37ba9875f224a8f36e31d744d8083e00f520f133cab79ad5e819"},
+ {file = "cffi-1.14.6-cp27-cp27m-win32.whl", hash = "sha256:55af55e32ae468e9946f741a5d51f9896da6b9bf0bbdd326843fec05c730eb20"},
+ {file = "cffi-1.14.6-cp27-cp27m-win_amd64.whl", hash = "sha256:7bcac9a2b4fdbed2c16fa5681356d7121ecabf041f18d97ed5b8e0dd38a80224"},
+ {file = "cffi-1.14.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ed38b924ce794e505647f7c331b22a693bee1538fdf46b0222c4717b42f744e7"},
+ {file = "cffi-1.14.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e22dcb48709fc51a7b58a927391b23ab37eb3737a98ac4338e2448bef8559b33"},
+ {file = "cffi-1.14.6-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:aedb15f0a5a5949ecb129a82b72b19df97bbbca024081ed2ef88bd5c0a610534"},
+ {file = "cffi-1.14.6-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:48916e459c54c4a70e52745639f1db524542140433599e13911b2f329834276a"},
+ {file = "cffi-1.14.6-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f627688813d0a4140153ff532537fbe4afea5a3dffce1f9deb7f91f848a832b5"},
+ {file = "cffi-1.14.6-cp35-cp35m-win32.whl", hash = "sha256:f0010c6f9d1a4011e429109fda55a225921e3206e7f62a0c22a35344bfd13cca"},
+ {file = "cffi-1.14.6-cp35-cp35m-win_amd64.whl", hash = "sha256:57e555a9feb4a8460415f1aac331a2dc833b1115284f7ded7278b54afc5bd218"},
+ {file = "cffi-1.14.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e8c6a99be100371dbb046880e7a282152aa5d6127ae01783e37662ef73850d8f"},
+ {file = "cffi-1.14.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:19ca0dbdeda3b2615421d54bef8985f72af6e0c47082a8d26122adac81a95872"},
+ {file = "cffi-1.14.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d950695ae4381ecd856bcaf2b1e866720e4ab9a1498cba61c602e56630ca7195"},
+ {file = "cffi-1.14.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9dc245e3ac69c92ee4c167fbdd7428ec1956d4e754223124991ef29eb57a09d"},
+ {file = "cffi-1.14.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8661b2ce9694ca01c529bfa204dbb144b275a31685a075ce123f12331be790b"},
+ {file = "cffi-1.14.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b315d709717a99f4b27b59b021e6207c64620790ca3e0bde636a6c7f14618abb"},
+ {file = "cffi-1.14.6-cp36-cp36m-win32.whl", hash = "sha256:80b06212075346b5546b0417b9f2bf467fea3bfe7352f781ffc05a8ab24ba14a"},
+ {file = "cffi-1.14.6-cp36-cp36m-win_amd64.whl", hash = "sha256:a9da7010cec5a12193d1af9872a00888f396aba3dc79186604a09ea3ee7c029e"},
+ {file = "cffi-1.14.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4373612d59c404baeb7cbd788a18b2b2a8331abcc84c3ba40051fcd18b17a4d5"},
+ {file = "cffi-1.14.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f10afb1004f102c7868ebfe91c28f4a712227fe4cb24974350ace1f90e1febbf"},
+ {file = "cffi-1.14.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fd4305f86f53dfd8cd3522269ed7fc34856a8ee3709a5e28b2836b2db9d4cd69"},
+ {file = "cffi-1.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d6169cb3c6c2ad50db5b868db6491a790300ade1ed5d1da29289d73bbe40b56"},
+ {file = "cffi-1.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d4b68e216fc65e9fe4f524c177b54964af043dde734807586cf5435af84045c"},
+ {file = "cffi-1.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33791e8a2dc2953f28b8d8d300dde42dd929ac28f974c4b4c6272cb2955cb762"},
+ {file = "cffi-1.14.6-cp37-cp37m-win32.whl", hash = "sha256:0c0591bee64e438883b0c92a7bed78f6290d40bf02e54c5bf0978eaf36061771"},
+ {file = "cffi-1.14.6-cp37-cp37m-win_amd64.whl", hash = "sha256:8eb687582ed7cd8c4bdbff3df6c0da443eb89c3c72e6e5dcdd9c81729712791a"},
+ {file = "cffi-1.14.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba6f2b3f452e150945d58f4badd92310449876c4c954836cfb1803bdd7b422f0"},
+ {file = "cffi-1.14.6-cp38-cp38-manylinux1_i686.whl", hash = "sha256:64fda793737bc4037521d4899be780534b9aea552eb673b9833b01f945904c2e"},
+ {file = "cffi-1.14.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9f3e33c28cd39d1b655ed1ba7247133b6f7fc16fa16887b120c0c670e35ce346"},
+ {file = "cffi-1.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26bb2549b72708c833f5abe62b756176022a7b9a7f689b571e74c8478ead51dc"},
+ {file = "cffi-1.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb687a11f0a7a1839719edd80f41e459cc5366857ecbed383ff376c4e3cc6afd"},
+ {file = "cffi-1.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ad4d668a5c0645d281dcd17aff2be3212bc109b33814bbb15c4939f44181cc"},
+ {file = "cffi-1.14.6-cp38-cp38-win32.whl", hash = "sha256:487d63e1454627c8e47dd230025780e91869cfba4c753a74fda196a1f6ad6548"},
+ {file = "cffi-1.14.6-cp38-cp38-win_amd64.whl", hash = "sha256:c33d18eb6e6bc36f09d793c0dc58b0211fccc6ae5149b808da4a62660678b156"},
+ {file = "cffi-1.14.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:06c54a68935738d206570b20da5ef2b6b6d92b38ef3ec45c5422c0ebaf338d4d"},
+ {file = "cffi-1.14.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:f174135f5609428cc6e1b9090f9268f5c8935fddb1b25ccb8255a2d50de6789e"},
+ {file = "cffi-1.14.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f3ebe6e73c319340830a9b2825d32eb6d8475c1dac020b4f0aa774ee3b898d1c"},
+ {file = "cffi-1.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c8d896becff2fa653dc4438b54a5a25a971d1f4110b32bd3068db3722c80202"},
+ {file = "cffi-1.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4922cd707b25e623b902c86188aca466d3620892db76c0bdd7b99a3d5e61d35f"},
+ {file = "cffi-1.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9e005e9bd57bc987764c32a1bee4364c44fdc11a3cc20a40b93b444984f2b87"},
+ {file = "cffi-1.14.6-cp39-cp39-win32.whl", hash = "sha256:eb9e2a346c5238a30a746893f23a9535e700f8192a68c07c0258e7ece6ff3728"},
+ {file = "cffi-1.14.6-cp39-cp39-win_amd64.whl", hash = "sha256:818014c754cd3dba7229c0f5884396264d51ffb87ec86e927ef0be140bfdb0d2"},
+ {file = "cffi-1.14.6.tar.gz", hash = "sha256:c9a875ce9d7fe32887784274dd533c57909b7b1dcadcc128a2ac21331a9765dd"},
+]
@@ -699,0 +1778,10 @@ colorama = [
+conllu = [
+ {file = "conllu-4.4-py2.py3-none-any.whl", hash = "sha256:fe7e3547bc2beec8a0af8076cd564040dff7feec4ef20779a63a395e59e8116f"},
+ {file = "conllu-4.4.tar.gz", hash = "sha256:37b812ef3e30168232239d65564e257975c3399ec5d7fca9915a52b44bdc6553"},
+]
+crcmod = [
+ {file = "crcmod-1.7.tar.gz", hash = "sha256:dc7051a0db5f2bd48665a990d3ec1cc305a466a77358ca4492826f41f283601e"},
+ {file = "crcmod-1.7.win32-py2.6.msi", hash = "sha256:69a2e5c6c36d0f096a7beb4cd34e5f882ec5fd232efb710cdb85d4ff196bd52e"},
+ {file = "crcmod-1.7.win32-py2.7.msi", hash = "sha256:737fb308fa2ce9aed2e29075f0d5980d4a89bfbec48a368c607c5c63b3efb90e"},
+ {file = "crcmod-1.7.win32-py3.1.msi", hash = "sha256:50586ab48981f11e5b117523d97bb70864a2a1af246cf6e4f5c4a21ef4611cd1"},
+]
@@ -705,2 +1793,27 @@ dill = [
- {file = "dill-0.3.4-py2.py3-none-any.whl", hash = "sha256:7e40e4a70304fd9ceab3535d36e58791d9c4a776b38ec7f7ec9afc8d3dca4d4f"},
- {file = "dill-0.3.4.zip", hash = "sha256:9f9734205146b2b353ab3fec9af0070237b6ddae78452af83d2fca84d739e675"},
+ {file = "dill-0.3.1.1.tar.gz", hash = "sha256:42d8ef819367516592a825746a18073ced42ca169ab1f5f4044134703e7a049c"},
+]
+docopt = [
+ {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"},
+]
+et-xmlfile = [
+ {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"},
+ {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"},
+]
+fastavro = [
+ {file = "fastavro-1.4.4-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:0a3e3fa7288db81c1ad521321ced7a895c6a7928511b27393c3f418d6e5f6946"},
+ {file = "fastavro-1.4.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8391357b6857f80b850bb4708c4b8db0f3d1c1e83ab2ea888958d89f46efd26"},
+ {file = "fastavro-1.4.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f1e0093a5ea7f5b72997c91abfa7838cb536c9a1dca9df4c8b7f233e853e0ac"},
+ {file = "fastavro-1.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:e03b80a9fb52b753d948788b0048b2a3f7551ba7f8584e60e90a1b3b5071fdd0"},
+ {file = "fastavro-1.4.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b85f2569102f3b54a3973dd12f5bf5049c125488e70959360dd5cbb8c71d47c7"},
+ {file = "fastavro-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:836399e5ac80ef8a19e3a0484585c58e52268c87d77b3681615761a9439032ed"},
+ {file = "fastavro-1.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b53816eec734ca34518c7e748d6d538aca0d3c0111ec726ad9808a098bfdd209"},
+ {file = "fastavro-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6446204cf55299243c9d43b1c5e355c74e6980e61c9bb58138ea9f3a6c99d826"},
+ {file = "fastavro-1.4.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:76aa6545443e1f1d6406c52876f3f7157076b91441943db300bc1c0f0daad6aa"},
+ {file = "fastavro-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24802411fa8557a6703ad1ccdea892f6b72730fc68ecb80214d7208f49a37ebc"},
+ {file = "fastavro-1.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7844fc4f55da7b1948234a7e5a50230fa0f27ff74cdb5efbb107c5cd5db0f49a"},
+ {file = "fastavro-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:bfcff703061a9ecce06fc69ac8f994fd1ddb2d55a6ff585fe25a7b45bbda4011"},
+ {file = "fastavro-1.4.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:bb49ece8865c8515f34ff8e307799b8cdb32b6db31acf4174c09b5c8e7fc5280"},
+ {file = "fastavro-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30acd5c296155959f371bc4ccf7e76461fef5923c738fa34211b5a65c855aef2"},
+ {file = "fastavro-1.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52b9fd197319a1b2eaeb9bfeb7313fa45ef92b49c3884c138b3ab90740232a66"},
+ {file = "fastavro-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:c74f8b48d4e4b36a9013ddb2cbaac68504cfdc48cdfe4753edfd017b5156e18a"},
+ {file = "fastavro-1.4.4.tar.gz", hash = "sha256:16fcc82844913804b05f28481972b850580ff3103b48f36c021d3b99019f9e3f"},
@@ -711,0 +1825,4 @@ filelock = [
+flatbuffers = [
+ {file = "flatbuffers-1.12-py2.py3-none-any.whl", hash = "sha256:9e9ef47fa92625c4721036e7c4124182668dc6021d9e7c73704edd395648deb9"},
+ {file = "flatbuffers-1.12.tar.gz", hash = "sha256:63bb9a722d5e373701913e226135b28a6f6ac200d5cc7b4d919fa38d73b44610"},
+]
@@ -715,0 +1833,68 @@ fsspec = [
+future = [
+ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"},
+]
+gast = [
+ {file = "gast-0.4.0-py3-none-any.whl", hash = "sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4"},
+ {file = "gast-0.4.0.tar.gz", hash = "sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1"},
+]
+google-auth = [
+ {file = "google-auth-1.34.0.tar.gz", hash = "sha256:f1094088bae046fb06f3d1a3d7df14717e8d959e9105b79c57725bd4e17597a2"},
+ {file = "google_auth-1.34.0-py2.py3-none-any.whl", hash = "sha256:bd6aa5916970a823e76ffb3d5c3ad3f0bedafca0a7fa53bc15149ab21cb71e05"},
+]
+google-auth-oauthlib = [
+ {file = "google-auth-oauthlib-0.4.4.tar.gz", hash = "sha256:09832c6e75032f93818edf1affe4746121d640c625a5bef9b5c96af676e98eee"},
+ {file = "google_auth_oauthlib-0.4.4-py2.py3-none-any.whl", hash = "sha256:0e92aacacfb94978de3b7972cf4b0f204c3cd206f74ddd0dc0b31e91164e6317"},
+]
+google-pasta = [
+ {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"},
+ {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"},
+ {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"},
+]
+grpcio = [
+ {file = "grpcio-1.34.1-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:5c4402fd8ce28e2847112105591139dc121c8980770f683eb781be1568a64097"},
+ {file = "grpcio-1.34.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c6f756c11144c7ecb51b87f0d60a4b72e05635b9f24ddfa004286ab0c8527fa0"},
+ {file = "grpcio-1.34.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:ec6d1b3daed886a73e40b4dc553474ef415acc111e913d7324cc2c6b0ba9efe0"},
+ {file = "grpcio-1.34.1-cp27-cp27m-win32.whl", hash = "sha256:d757bc8bb12f07014dde55a04b5261c94828b605cf0726d02d491c3dc71aa6bb"},
+ {file = "grpcio-1.34.1-cp27-cp27m-win_amd64.whl", hash = "sha256:f74cb93cd090b07528cf586a18628370e5780c08e0239f4af796f60a5e773568"},
+ {file = "grpcio-1.34.1-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:c4355fa382dfc71c130dc3eccd8ae606a13e1729be2a77b6c44cd5a130d0c616"},
+ {file = "grpcio-1.34.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f1a8048428a7a1e5b12322b3ee44ee0bb8e1bea1d67f08fa1813c455f3ef638c"},
+ {file = "grpcio-1.34.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:0bd906496b9dd3751b9e5cacc7ceb25a57c16ce2aa67315b85ee86a4ba7246f1"},
+ {file = "grpcio-1.34.1-cp35-cp35m-linux_armv7l.whl", hash = "sha256:5e488a40ebeb883117aa0dba2cea410ef2ab545a2403b2ac9101e62d42808c71"},
+ {file = "grpcio-1.34.1-cp35-cp35m-macosx_10_10_intel.whl", hash = "sha256:98c06f0f7feeca736cc98f3f46b9b74c5f5fdc5febfc7d72728d1895c57be87f"},
+ {file = "grpcio-1.34.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:90a4799c15b8b5aa587f65650a0cea28ea88bcd2c5fdf4f1adb2b8b7b4e77a5e"},
+ {file = "grpcio-1.34.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:121af89d0b9ba1d47c738242783675009dd4e9067359481e4b743eb9e5886682"},
+ {file = "grpcio-1.34.1-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:1be193803c706f78d0df12c817eaf2415fb4d39472fa00d860700e6c7a99f8f7"},
+ {file = "grpcio-1.34.1-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:9e465a1d594a9a5f4252c4abbb93909c42768bee5fbfcd18098d60bf06a35573"},
+ {file = "grpcio-1.34.1-cp35-cp35m-win32.whl", hash = "sha256:8b16d14160b7fd8bc43600be70e0da677d17dd8aafb5a258bbda996fe410320e"},
+ {file = "grpcio-1.34.1-cp35-cp35m-win_amd64.whl", hash = "sha256:8a543209ab606dd55c58dc218be8e8619214607f03717dded78c7d27f1d05ba5"},
+ {file = "grpcio-1.34.1-cp36-cp36m-linux_armv7l.whl", hash = "sha256:f74f270550df347a18f839331f84838b938c8923a9e13a6fa7cc69c79087a686"},
+ {file = "grpcio-1.34.1-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:163a2cf7f4df3ff0a04f49e634526e3d88f02393a7ebf8f34a2134c88b06322e"},
+ {file = "grpcio-1.34.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:11735ac4efd53691afeb36d006e20db9b7d4b6f3356c751f32d5747aee38fa4c"},
+ {file = "grpcio-1.34.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:79bda20756e2fc7236b94468ffcce4b516953f946a80b7ea883f89d9e9b25a41"},
+ {file = "grpcio-1.34.1-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1857f88b351e2382aa57ed892960361a8b71acca4aa1b90998007b4177f15114"},
+ {file = "grpcio-1.34.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:6f81fbf9f830e20aee93480305877f73f15bfa58fa87433eb331696be47ae7ba"},
+ {file = "grpcio-1.34.1-cp36-cp36m-win32.whl", hash = "sha256:ff8aef869c2e9de65c3a693406f7d1200d87e6d541d096eae69f98e7f301fa60"},
+ {file = "grpcio-1.34.1-cp36-cp36m-win_amd64.whl", hash = "sha256:ece7459c182e00ca90b2e5823940a552651b5eb3acdeee9350377ddb44d9c412"},
+ {file = "grpcio-1.34.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:7924ef3a898f6ff985540ee5d8c7554f0c925dc7668c3d63461600ea50b39658"},
+ {file = "grpcio-1.34.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:b5e96ca83d5c34c9b60d8951e52492b0d9d072c3fe38a1c19765932e121036ce"},
+ {file = "grpcio-1.34.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:fe9360347a3f4f2ec6923d8afb03a9194f3f14e054cb09e75e8346af9c0aa9f6"},
+ {file = "grpcio-1.34.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:cadc09c9bd24ecf3ba7ae55b5a741f7de694a8843e97e82a7c3fa2e6e81e0f9a"},
+ {file = "grpcio-1.34.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5971e6dfcfa0ebeb0df2d15383e1b53fa36208198c8aff9a4eed5ece2a6d4571"},
+ {file = "grpcio-1.34.1-cp37-cp37m-win32.whl", hash = "sha256:a181092b534e996e36d0c0216d81280d4942322170c823b2fb84ec4597dc0bd5"},
+ {file = "grpcio-1.34.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2b97cdd4582445ad7bd441f5f3c57d838bcdc518a05713dab0c7f4b945afb39e"},
+ {file = "grpcio-1.34.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:ff760c5ce73c177851864e8caaf75467eaf06c1b6857b21e1789658375e720fb"},
+ {file = "grpcio-1.34.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:fd58ea88dd5439e03c6587f0b672db1627ec8ed47be312c74632650dfed33c2e"},
+ {file = "grpcio-1.34.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f6fee4445cffb45593b4c1d9bb0bc7922e77ec846a1237e2e744b1223d69c863"},
+ {file = "grpcio-1.34.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:cd4da71e105088b1a7e629d1b033f16d87dec08524d0e4f5d77982af6fe1b6c2"},
+ {file = "grpcio-1.34.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:9d43849d8925ec24bf121bccd941a13d4e8c2cffdfa769a04a6d4ed38c6b88a2"},
+ {file = "grpcio-1.34.1-cp38-cp38-win32.whl", hash = "sha256:696f0de4d47f738063432bbbcecd07f78256864f0839e41369458421f539f00a"},
+ {file = "grpcio-1.34.1-cp38-cp38-win_amd64.whl", hash = "sha256:8fff784ec5d12252a7cc0ab6f1a3206861b94e45ee0ebeba2439bd10a6db2f1a"},
+ {file = "grpcio-1.34.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:ed8ac4f76cbbef5dc54594cb7bf6fbb985f5be66abcb1f9da8142500e4d76492"},
+ {file = "grpcio-1.34.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8dad4184e4669672e126de26776eba8e3db4914660b4a0a6c7edbdbcf3e2f05f"},
+ {file = "grpcio-1.34.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:011e9b5e47cb9d2a808e8c2dd5ae86df085d5879d9e8095a24631a32c577f231"},
+ {file = "grpcio-1.34.1-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:49ffc5bb78b201db24d8d1644193beb50a896c3cb35b259b4fb9c44dba18585f"},
+ {file = "grpcio-1.34.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:cfe0e015cb8db5a27a92621fdd9dc8e69b2f7130db326601802e6ff36626deff"},
+ {file = "grpcio-1.34.1-cp39-cp39-win32.whl", hash = "sha256:809732f300fa8093b40f843c36f6f78423ffb40493098185bc4a96bd67126db5"},
+ {file = "grpcio-1.34.1-cp39-cp39-win_amd64.whl", hash = "sha256:96dc85c059f15390beb7ac6bf075d1e4cf72e8f5c9b6c37ea179b7cc579816fd"},
+ {file = "grpcio-1.34.1.tar.gz", hash = "sha256:1c746a3cd8a830d8d916a9d0476a786aaa98c5cc2a096344af2be955e439f8ac"},
+]
@@ -719,0 +1905,23 @@ h11 = [
+h5py = [
+ {file = "h5py-3.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1cd367f89a5441236bdbb795e9fb9a9e3424929c00b4a54254ca760437f83d69"},
+ {file = "h5py-3.1.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fea05349f63625a8fb808e57e42bb4c76930cf5d50ac58b678c52f913a48a89b"},
+ {file = "h5py-3.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e37352ddfcf9d77a2a47f7c8f7e125c6d20cc06c2995edeb7be222d4e152636"},
+ {file = "h5py-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e33f61d3eb862614c0f273a1f993a64dc2f093e1a3094932c50ada9d2db2170f"},
+ {file = "h5py-3.1.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:236ac8d943be30b617ab615c3d4a4bf4a438add2be87e54af3687ab721a18fac"},
+ {file = "h5py-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:02c391fdb980762a1cc03a4bcaecd03dc463994a9a63a02264830114a96e111f"},
+ {file = "h5py-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f89a3dae38843ffa49d17a31a3509a8129e9b46ece602a0138e1ed79e685c361"},
+ {file = "h5py-3.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ba71f6229d2013fbb606476ecc29c6223fc16b244d35fcd8566ad9dbaf910857"},
+ {file = "h5py-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:dccb89358bc84abcd711363c3e138f9f4eccfdf866f2139a8e72308328765b2c"},
+ {file = "h5py-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cb74df83709d6d03d11e60b9480812f58da34f194beafa8c8314dbbeeedfe0a6"},
+ {file = "h5py-3.1.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:80c623be10479e81b64fa713b7ed4c0bbe9f02e8e7d2a2e5382336087b615ce4"},
+ {file = "h5py-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:1cdfd1c5449ca1329d152f0b66830e93226ebce4f5e07dd8dc16bfc2b1a49d7b"},
+ {file = "h5py-3.1.0.tar.gz", hash = "sha256:1e2516f190652beedcb8c7acfa1c6fa92d99b42331cbef5e5c7ec2d65b0fc3c2"},
+]
+hdfs = [
+ {file = "hdfs-2.6.0-py3-none-any.whl", hash = "sha256:05912125cfc68075387f271654dac185dc1aba8b347519f6a14d1395e39d7749"},
+ {file = "hdfs-2.6.0.tar.gz", hash = "sha256:bc92ce4347f106d48b541f756caa930476998cfd3eed477ffbd63ae9ad1cdc22"},
+]
+httplib2 = [
+ {file = "httplib2-0.19.1-py3-none-any.whl", hash = "sha256:2ad195faf9faf079723f6714926e9a9061f694d07724b846658ce08d40f522b4"},
+ {file = "httplib2-0.19.1.tar.gz", hash = "sha256:0b12617eeca7433d4c396a100eaecfa4b08ee99aa881e6df6e257a7aad5d533d"},
+]
@@ -721,2 +1929,2 @@ huggingface-hub = [
- {file = "huggingface_hub-0.0.14-py3-none-any.whl", hash = "sha256:3f931112abb679001d8d1310bfd2676cec9ce3417b2d9965d5a2d44dcca2e5e2"},
- {file = "huggingface_hub-0.0.14.tar.gz", hash = "sha256:560313eb1b1df0014a0b2469a1ccd2491aa5fe71606b1d858c7f293ec8678f11"},
+ {file = "huggingface_hub-0.0.12-py3-none-any.whl", hash = "sha256:5c82ff96897a72e1ed48a94c1796686f120dea05888200522f3994f130c12e6a"},
+ {file = "huggingface_hub-0.0.12.tar.gz", hash = "sha256:661b17fab0c475276fd71603ee7e16c3b3d1d6e812e1b29f40144f64d361e59d"},
@@ -731,0 +1940,75 @@ iniconfig = [
+joblib = [
+ {file = "joblib-1.0.1-py3-none-any.whl", hash = "sha256:feeb1ec69c4d45129954f1b7034954241eedfd6ba39b5e9e4b6883be3332d5e5"},
+ {file = "joblib-1.0.1.tar.gz", hash = "sha256:9c17567692206d2f3fb9ecf5e991084254fe631665c450b443761c4186a613f7"},
+]
+jsonlines = [
+ {file = "jsonlines-2.0.0-py3-none-any.whl", hash = "sha256:bfb043d4e25fd894dca67b1f2adf014e493cb65d0f18b3a74a98bfcd97c3d983"},
+ {file = "jsonlines-2.0.0.tar.gz", hash = "sha256:6fdd03104c9a421a1ba587a121aaac743bf02d8f87fa9cdaa3b852249a241fe8"},
+]
+kenlm = []
+keras-nightly = [
+ {file = "keras_nightly-2.5.0.dev2021032900-py2.py3-none-any.whl", hash = "sha256:6ba70f738f4008222de7e7fdd5b2b18c48c49b897a9fca54c844854e25964011"},
+]
+keras-preprocessing = [
+ {file = "Keras_Preprocessing-1.1.2-py2.py3-none-any.whl", hash = "sha256:7b82029b130ff61cc99b55f3bd27427df4838576838c5b2f65940e4fcec99a7b"},
+ {file = "Keras_Preprocessing-1.1.2.tar.gz", hash = "sha256:add82567c50c8bc648c14195bf544a5ce7c1f76761536956c3d2978970179ef3"},
+]
+kss = [
+ {file = "kss-2.5.1-py3-none-any.whl", hash = "sha256:29801a0ac9c6872cadc4cb08f8e451fa4abe844de9973b2f50ed41b6a92c82f9"},
+]
+lm-dataformat = [
+ {file = "lm_dataformat-0.0.19-py3-none-any.whl", hash = "sha256:d05bebb6e885bfd4861516f8eca6baa90487e9ffb81b790448d9609866ca2e1f"},
+ {file = "lm_dataformat-0.0.19.tar.gz", hash = "sha256:04fed4405a0eaf9b18f59051476e6e9511759cf27818b5ed67694c5b6f2fe41a"},
+]
+lxml = [
+ {file = "lxml-4.6.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:df7c53783a46febb0e70f6b05df2ba104610f2fb0d27023409734a3ecbb78fb2"},
+ {file = "lxml-4.6.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:1b7584d421d254ab86d4f0b13ec662a9014397678a7c4265a02a6d7c2b18a75f"},
+ {file = "lxml-4.6.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:079f3ae844f38982d156efce585bc540c16a926d4436712cf4baee0cce487a3d"},
+ {file = "lxml-4.6.3-cp27-cp27m-win32.whl", hash = "sha256:bc4313cbeb0e7a416a488d72f9680fffffc645f8a838bd2193809881c67dd106"},
+ {file = "lxml-4.6.3-cp27-cp27m-win_amd64.whl", hash = "sha256:8157dadbb09a34a6bd95a50690595e1fa0af1a99445e2744110e3dca7831c4ee"},
+ {file = "lxml-4.6.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7728e05c35412ba36d3e9795ae8995e3c86958179c9770e65558ec3fdfd3724f"},
+ {file = "lxml-4.6.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:4bff24dfeea62f2e56f5bab929b4428ae6caba2d1eea0c2d6eb618e30a71e6d4"},
+ {file = "lxml-4.6.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:74f7d8d439b18fa4c385f3f5dfd11144bb87c1da034a466c5b5577d23a1d9b51"},
+ {file = "lxml-4.6.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f90ba11136bfdd25cae3951af8da2e95121c9b9b93727b1b896e3fa105b2f586"},
+ {file = "lxml-4.6.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:4c61b3a0db43a1607d6264166b230438f85bfed02e8cff20c22e564d0faff354"},
+ {file = "lxml-4.6.3-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:5c8c163396cc0df3fd151b927e74f6e4acd67160d6c33304e805b84293351d16"},
+ {file = "lxml-4.6.3-cp35-cp35m-win32.whl", hash = "sha256:f2380a6376dfa090227b663f9678150ef27543483055cc327555fb592c5967e2"},
+ {file = "lxml-4.6.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c4f05c5a7c49d2fb70223d0d5bcfbe474cf928310ac9fa6a7c6dddc831d0b1d4"},
+ {file = "lxml-4.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d2e35d7bf1c1ac8c538f88d26b396e73dd81440d59c1ef8522e1ea77b345ede4"},
+ {file = "lxml-4.6.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:289e9ca1a9287f08daaf796d96e06cb2bc2958891d7911ac7cae1c5f9e1e0ee3"},
+ {file = "lxml-4.6.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:bccbfc27563652de7dc9bdc595cb25e90b59c5f8e23e806ed0fd623755b6565d"},
+ {file = "lxml-4.6.3-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:d916d31fd85b2f78c76400d625076d9124de3e4bda8b016d25a050cc7d603f24"},
+ {file = "lxml-4.6.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:820628b7b3135403540202e60551e741f9b6d3304371712521be939470b454ec"},
+ {file = "lxml-4.6.3-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:c47ff7e0a36d4efac9fd692cfa33fbd0636674c102e9e8d9b26e1b93a94e7617"},
+ {file = "lxml-4.6.3-cp36-cp36m-win32.whl", hash = "sha256:5a0a14e264069c03e46f926be0d8919f4105c1623d620e7ec0e612a2e9bf1c04"},
+ {file = "lxml-4.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:92e821e43ad382332eade6812e298dc9701c75fe289f2a2d39c7960b43d1e92a"},
+ {file = "lxml-4.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:efd7a09678fd8b53117f6bae4fa3825e0a22b03ef0a932e070c0bdbb3a35e654"},
+ {file = "lxml-4.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:efac139c3f0bf4f0939f9375af4b02c5ad83a622de52d6dfa8e438e8e01d0eb0"},
+ {file = "lxml-4.6.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0fbcf5565ac01dff87cbfc0ff323515c823081c5777a9fc7703ff58388c258c3"},
+ {file = "lxml-4.6.3-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:36108c73739985979bf302006527cf8a20515ce444ba916281d1c43938b8bb96"},
+ {file = "lxml-4.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:122fba10466c7bd4178b07dba427aa516286b846b2cbd6f6169141917283aae2"},
+ {file = "lxml-4.6.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:cdaf11d2bd275bf391b5308f86731e5194a21af45fbaaaf1d9e8147b9160ea92"},
+ {file = "lxml-4.6.3-cp37-cp37m-win32.whl", hash = "sha256:3439c71103ef0e904ea0a1901611863e51f50b5cd5e8654a151740fde5e1cade"},
+ {file = "lxml-4.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4289728b5e2000a4ad4ab8da6e1db2e093c63c08bdc0414799ee776a3f78da4b"},
+ {file = "lxml-4.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b007cbb845b28db4fb8b6a5cdcbf65bacb16a8bd328b53cbc0698688a68e1caa"},
+ {file = "lxml-4.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:76fa7b1362d19f8fbd3e75fe2fb7c79359b0af8747e6f7141c338f0bee2f871a"},
+ {file = "lxml-4.6.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:26e761ab5b07adf5f555ee82fb4bfc35bf93750499c6c7614bd64d12aaa67927"},
+ {file = "lxml-4.6.3-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:e1cbd3f19a61e27e011e02f9600837b921ac661f0c40560eefb366e4e4fb275e"},
+ {file = "lxml-4.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:66e575c62792c3f9ca47cb8b6fab9e35bab91360c783d1606f758761810c9791"},
+ {file = "lxml-4.6.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:1b38116b6e628118dea5b2186ee6820ab138dbb1e24a13e478490c7db2f326ae"},
+ {file = "lxml-4.6.3-cp38-cp38-win32.whl", hash = "sha256:89b8b22a5ff72d89d48d0e62abb14340d9e99fd637d046c27b8b257a01ffbe28"},
+ {file = "lxml-4.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:2a9d50e69aac3ebee695424f7dbd7b8c6d6eb7de2a2eb6b0f6c7db6aa41e02b7"},
+ {file = "lxml-4.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ce256aaa50f6cc9a649c51be3cd4ff142d67295bfc4f490c9134d0f9f6d58ef0"},
+ {file = "lxml-4.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:7610b8c31688f0b1be0ef882889817939490a36d0ee880ea562a4e1399c447a1"},
+ {file = "lxml-4.6.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f8380c03e45cf09f8557bdaa41e1fa7c81f3ae22828e1db470ab2a6c96d8bc23"},
+ {file = "lxml-4.6.3-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3082c518be8e97324390614dacd041bb1358c882d77108ca1957ba47738d9d59"},
+ {file = "lxml-4.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:884ab9b29feaca361f7f88d811b1eea9bfca36cf3da27768d28ad45c3ee6f969"},
+ {file = "lxml-4.6.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:6f12e1427285008fd32a6025e38e977d44d6382cf28e7201ed10d6c1698d2a9a"},
+ {file = "lxml-4.6.3-cp39-cp39-win32.whl", hash = "sha256:33bb934a044cf32157c12bfcfbb6649807da20aa92c062ef51903415c704704f"},
+ {file = "lxml-4.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:542d454665a3e277f76954418124d67516c5f88e51a900365ed54a9806122b83"},
+ {file = "lxml-4.6.3.tar.gz", hash = "sha256:39b78571b3b30645ac77b95f7c69d1bffc4cf8c3b157c435a34da72e78c82468"},
+]
+markdown = [
+ {file = "Markdown-3.3.4-py3-none-any.whl", hash = "sha256:96c3ba1261de2f7547b46a00ea8463832c921d3f9d6aba3f255a6f71386db20c"},
+ {file = "Markdown-3.3.4.tar.gz", hash = "sha256:31b5b491868dcc87d6c24b7e3d19a0d730d59d3e46f4eea6430a321bed387a49"},
+]
@@ -772,13 +2055,7 @@ multiprocess = [
- {file = "multiprocess-0.70.12.2-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:35d41e410ca2a32977a483ae1f40f86b193b45cecf85567c2fae402fb8bf172e"},
- {file = "multiprocess-0.70.12.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:9a02237eae21975155c816883479f72e239d16823a6bc063173d59acec9bcf41"},
- {file = "multiprocess-0.70.12.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f12a939cd2f01d0a900e7ef2aaee3c351a49fd2297d7f760b537af22727561b8"},
- {file = "multiprocess-0.70.12.2-cp27-cp27m-win32.whl", hash = "sha256:be3ad3eaf204abc646d85e70e41244f66d88200628a0ab867c8fc206b97cedbf"},
- {file = "multiprocess-0.70.12.2-cp27-cp27m-win_amd64.whl", hash = "sha256:c85ffc38c50c5a4f32f3f3c1a284725b7b5040188f254eba6e572c53d3da525b"},
- {file = "multiprocess-0.70.12.2-pp27-none-any.whl", hash = "sha256:a9f58945edb234591684c0a181b744a3231643814ef3a8f47cea9a2073b4b2bb"},
- {file = "multiprocess-0.70.12.2-pp36-none-any.whl", hash = "sha256:0e0a5ae4bd84e4c22baddf824d3b8168214f8c1cce51e2cb080421cb1f7b04d1"},
- {file = "multiprocess-0.70.12.2-pp37-none-any.whl", hash = "sha256:916a314a1e0f3454033d59672ba6181fa45948ab1091d68cdd479258576e7b27"},
- {file = "multiprocess-0.70.12.2-py36-none-any.whl", hash = "sha256:b3f866f7d9c7acc1a9cb1b6063a29f5cb140ff545b35b71fd4bfdac6f19d75fa"},
- {file = "multiprocess-0.70.12.2-py37-none-any.whl", hash = "sha256:6aa67e805e50b6e9dfc56dd0f0c85ac3409e6791d4ec5405c5f9bc0a47d745a4"},
- {file = "multiprocess-0.70.12.2-py38-none-any.whl", hash = "sha256:85941e650c277af44fc82e3e97faacb920e5ce3615238b540cbad4012d6f60e9"},
- {file = "multiprocess-0.70.12.2-py39-none-any.whl", hash = "sha256:6f812a1d3f198b7cacd63983f60e2dc1338bd4450893f90c435067b5a3127e6f"},
- {file = "multiprocess-0.70.12.2.zip", hash = "sha256:206bb9b97b73f87fec1ed15a19f8762950256aa84225450abc7150d02855a083"},
+ {file = "multiprocess-0.70.9-cp27-cp27m-win32.whl", hash = "sha256:0e4e65c2e74aa14fa0c9a1f838b5e9a5f8fe5b3a173925792260843c4a6157ec"},
+ {file = "multiprocess-0.70.9-cp27-cp27m-win_amd64.whl", hash = "sha256:1eb7dfe2d809d53be92e8a288ed1c01614fe5407bbc9d078ed451a749fb1bd34"},
+ {file = "multiprocess-0.70.9.tar.gz", hash = "sha256:9fd5bd990132da77e73dec6e9613408602a4612e1d73caf2e2b813d2b61508e5"},
+]
+multivolumefile = [
+ {file = "multivolumefile-0.2.3-py3-none-any.whl", hash = "sha256:237f4353b60af1703087cf7725755a1f6fcaeeea48421e1896940cd1c920d678"},
+ {file = "multivolumefile-0.2.3.tar.gz", hash = "sha256:a0648d0aafbc96e59198d5c17e9acad7eb531abea51035d08ce8060dcad709d6"},
@@ -789,0 +2067,4 @@ mypy-extensions = [
+nlp = [
+ {file = "nlp-0.4.0-py3-none-any.whl", hash = "sha256:a7335eb3939133d29dfefb507260b3b069bd7bcc662661ad026ff1404545a96c"},
+ {file = "nlp-0.4.0.tar.gz", hash = "sha256:0aa6bc966ffc2d2be7248bd71f258360281cd717c10811e1b55bb2fa50bf79d4"},
+]
@@ -791,28 +2072,50 @@ numpy = [
- {file = "numpy-1.21.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38e8648f9449a549a7dfe8d8755a5979b45b3538520d1e735637ef28e8c2dc50"},
- {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fd7d7409fa643a91d0a05c7554dd68aa9c9bb16e186f6ccfe40d6e003156e33a"},
- {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a75b4498b1e93d8b700282dc8e655b8bd559c0904b3910b144646dbbbc03e062"},
- {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1412aa0aec3e00bc23fbb8664d76552b4efde98fb71f60737c83efbac24112f1"},
- {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e46ceaff65609b5399163de5893d8f2a82d3c77d5e56d976c8b5fb01faa6b671"},
- {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c6a2324085dd52f96498419ba95b5777e40b6bcbc20088fddb9e8cbb58885e8e"},
- {file = "numpy-1.21.1-cp37-cp37m-win32.whl", hash = "sha256:73101b2a1fef16602696d133db402a7e7586654682244344b8329cdcbbb82172"},
- {file = "numpy-1.21.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7a708a79c9a9d26904d1cca8d383bf869edf6f8e7650d85dbc77b041e8c5a0f8"},
- {file = "numpy-1.21.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95b995d0c413f5d0428b3f880e8fe1660ff9396dcd1f9eedbc311f37b5652e16"},
- {file = "numpy-1.21.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:635e6bd31c9fb3d475c8f44a089569070d10a9ef18ed13738b03049280281267"},
- {file = "numpy-1.21.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a3d5fb89bfe21be2ef47c0614b9c9c707b7362386c9a3ff1feae63e0267ccb6"},
- {file = "numpy-1.21.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8a326af80e86d0e9ce92bcc1e65c8ff88297de4fa14ee936cb2293d414c9ec63"},
- {file = "numpy-1.21.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:791492091744b0fe390a6ce85cc1bf5149968ac7d5f0477288f78c89b385d9af"},
- {file = "numpy-1.21.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0318c465786c1f63ac05d7c4dbcecd4d2d7e13f0959b01b534ea1e92202235c5"},
- {file = "numpy-1.21.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a513bd9c1551894ee3d31369f9b07460ef223694098cf27d399513415855b68"},
- {file = "numpy-1.21.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:91c6f5fc58df1e0a3cc0c3a717bb3308ff850abdaa6d2d802573ee2b11f674a8"},
- {file = "numpy-1.21.1-cp38-cp38-win32.whl", hash = "sha256:978010b68e17150db8765355d1ccdd450f9fc916824e8c4e35ee620590e234cd"},
- {file = "numpy-1.21.1-cp38-cp38-win_amd64.whl", hash = "sha256:9749a40a5b22333467f02fe11edc98f022133ee1bfa8ab99bda5e5437b831214"},
- {file = "numpy-1.21.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d7a4aeac3b94af92a9373d6e77b37691b86411f9745190d2c351f410ab3a791f"},
- {file = "numpy-1.21.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d9e7912a56108aba9b31df688a4c4f5cb0d9d3787386b87d504762b6754fbb1b"},
- {file = "numpy-1.21.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:25b40b98ebdd272bc3020935427a4530b7d60dfbe1ab9381a39147834e985eac"},
- {file = "numpy-1.21.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8a92c5aea763d14ba9d6475803fc7904bda7decc2a0a68153f587ad82941fec1"},
- {file = "numpy-1.21.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05a0f648eb28bae4bcb204e6fd14603de2908de982e761a2fc78efe0f19e96e1"},
- {file = "numpy-1.21.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f01f28075a92eede918b965e86e8f0ba7b7797a95aa8d35e1cc8821f5fc3ad6a"},
- {file = "numpy-1.21.1-cp39-cp39-win32.whl", hash = "sha256:88c0b89ad1cc24a5efbb99ff9ab5db0f9a86e9cc50240177a571fbe9c2860ac2"},
- {file = "numpy-1.21.1-cp39-cp39-win_amd64.whl", hash = "sha256:01721eefe70544d548425a07c80be8377096a54118070b8a62476866d5208e33"},
- {file = "numpy-1.21.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d4d1de6e6fb3d28781c73fbde702ac97f03d79e4ffd6598b880b2d95d62ead4"},
- {file = "numpy-1.21.1.zip", hash = "sha256:dff4af63638afcc57a3dfb9e4b26d434a7a602d225b42d746ea7fe2edf1342fd"},
+ {file = "numpy-1.19.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc6bd4fd593cb261332568485e20a0712883cf631f6f5e8e86a52caa8b2b50ff"},
+ {file = "numpy-1.19.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:aeb9ed923be74e659984e321f609b9ba54a48354bfd168d21a2b072ed1e833ea"},
+ {file = "numpy-1.19.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:8b5e972b43c8fc27d56550b4120fe6257fdc15f9301914380b27f74856299fea"},
+ {file = "numpy-1.19.5-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:43d4c81d5ffdff6bae58d66a3cd7f54a7acd9a0e7b18d97abb255defc09e3140"},
+ {file = "numpy-1.19.5-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:a4646724fba402aa7504cd48b4b50e783296b5e10a524c7a6da62e4a8ac9698d"},
+ {file = "numpy-1.19.5-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:2e55195bc1c6b705bfd8ad6f288b38b11b1af32f3c8289d6c50d47f950c12e76"},
+ {file = "numpy-1.19.5-cp36-cp36m-win32.whl", hash = "sha256:39b70c19ec771805081578cc936bbe95336798b7edf4732ed102e7a43ec5c07a"},
+ {file = "numpy-1.19.5-cp36-cp36m-win_amd64.whl", hash = "sha256:dbd18bcf4889b720ba13a27ec2f2aac1981bd41203b3a3b27ba7a33f88ae4827"},
+ {file = "numpy-1.19.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:603aa0706be710eea8884af807b1b3bc9fb2e49b9f4da439e76000f3b3c6ff0f"},
+ {file = "numpy-1.19.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cae865b1cae1ec2663d8ea56ef6ff185bad091a5e33ebbadd98de2cfa3fa668f"},
+ {file = "numpy-1.19.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:36674959eed6957e61f11c912f71e78857a8d0604171dfd9ce9ad5cbf41c511c"},
+ {file = "numpy-1.19.5-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:06fab248a088e439402141ea04f0fffb203723148f6ee791e9c75b3e9e82f080"},
+ {file = "numpy-1.19.5-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6149a185cece5ee78d1d196938b2a8f9d09f5a5ebfbba66969302a778d5ddd1d"},
+ {file = "numpy-1.19.5-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:50a4a0ad0111cc1b71fa32dedd05fa239f7fb5a43a40663269bb5dc7877cfd28"},
+ {file = "numpy-1.19.5-cp37-cp37m-win32.whl", hash = "sha256:d051ec1c64b85ecc69531e1137bb9751c6830772ee5c1c426dbcfe98ef5788d7"},
+ {file = "numpy-1.19.5-cp37-cp37m-win_amd64.whl", hash = "sha256:a12ff4c8ddfee61f90a1633a4c4afd3f7bcb32b11c52026c92a12e1325922d0d"},
+ {file = "numpy-1.19.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cf2402002d3d9f91c8b01e66fbb436a4ed01c6498fffed0e4c7566da1d40ee1e"},
+ {file = "numpy-1.19.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1ded4fce9cfaaf24e7a0ab51b7a87be9038ea1ace7f34b841fe3b6894c721d1c"},
+ {file = "numpy-1.19.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:012426a41bc9ab63bb158635aecccc7610e3eff5d31d1eb43bc099debc979d94"},
+ {file = "numpy-1.19.5-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:759e4095edc3c1b3ac031f34d9459fa781777a93ccc633a472a5468587a190ff"},
+ {file = "numpy-1.19.5-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:a9d17f2be3b427fbb2bce61e596cf555d6f8a56c222bd2ca148baeeb5e5c783c"},
+ {file = "numpy-1.19.5-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:99abf4f353c3d1a0c7a5f27699482c987cf663b1eac20db59b8c7b061eabd7fc"},
+ {file = "numpy-1.19.5-cp38-cp38-win32.whl", hash = "sha256:384ec0463d1c2671170901994aeb6dce126de0a95ccc3976c43b0038a37329c2"},
+ {file = "numpy-1.19.5-cp38-cp38-win_amd64.whl", hash = "sha256:811daee36a58dc79cf3d8bdd4a490e4277d0e4b7d103a001a4e73ddb48e7e6aa"},
+ {file = "numpy-1.19.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c843b3f50d1ab7361ca4f0b3639bf691569493a56808a0b0c54a051d260b7dbd"},
+ {file = "numpy-1.19.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d6631f2e867676b13026e2846180e2c13c1e11289d67da08d71cacb2cd93d4aa"},
+ {file = "numpy-1.19.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7fb43004bce0ca31d8f13a6eb5e943fa73371381e53f7074ed21a4cb786c32f8"},
+ {file = "numpy-1.19.5-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2ea52bd92ab9f768cc64a4c3ef8f4b2580a17af0a5436f6126b08efbd1838371"},
+ {file = "numpy-1.19.5-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:400580cbd3cff6ffa6293df2278c75aef2d58d8d93d3c5614cd67981dae68ceb"},
+ {file = "numpy-1.19.5-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:df609c82f18c5b9f6cb97271f03315ff0dbe481a2a02e56aeb1b1a985ce38e60"},
+ {file = "numpy-1.19.5-cp39-cp39-win32.whl", hash = "sha256:ab83f24d5c52d60dbc8cd0528759532736b56db58adaa7b5f1f76ad551416a1e"},
+ {file = "numpy-1.19.5-cp39-cp39-win_amd64.whl", hash = "sha256:0eef32ca3132a48e43f6a0f5a82cb508f22ce5a3d6f67a8329c81c8e226d3f6e"},
+ {file = "numpy-1.19.5-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a0d53e51a6cb6f0d9082decb7a4cb6dfb33055308c4c44f53103c073f649af73"},
+ {file = "numpy-1.19.5.zip", hash = "sha256:a76f502430dd98d7546e1ea2250a7360c065a5fdea52b2dffe8ae7180909b6f4"},
+]
+oauth2client = [
+ {file = "oauth2client-4.1.3-py2.py3-none-any.whl", hash = "sha256:b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac"},
+ {file = "oauth2client-4.1.3.tar.gz", hash = "sha256:d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6"},
+]
+oauthlib = [
+ {file = "oauthlib-3.1.1-py2.py3-none-any.whl", hash = "sha256:42bf6354c2ed8c6acb54d971fce6f88193d97297e18602a3a886603f9d7730cc"},
+ {file = "oauthlib-3.1.1.tar.gz", hash = "sha256:8f0215fcc533dd8dd1bee6f4c412d4f0cd7297307d43ac61666389e3bc3198a3"},
+]
+openpyxl = [
+ {file = "openpyxl-3.0.7-py2.py3-none-any.whl", hash = "sha256:46af4eaf201a89b610fcca177eed957635f88770a5462fb6aae4a2a52b0ff516"},
+ {file = "openpyxl-3.0.7.tar.gz", hash = "sha256:6456a3b472e1ef0facb1129f3c6ef00713cebf62e736cd7a75bcc3247432f251"},
+]
+opt-einsum = [
+ {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"},
+ {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"},
@@ -848,0 +2152,41 @@ pathspec = [
+pillow = [
+ {file = "Pillow-8.3.1-1-cp36-cp36m-win_amd64.whl", hash = "sha256:fd7eef578f5b2200d066db1b50c4aa66410786201669fb76d5238b007918fb24"},
+ {file = "Pillow-8.3.1-1-cp37-cp37m-win_amd64.whl", hash = "sha256:75e09042a3b39e0ea61ce37e941221313d51a9c26b8e54e12b3ececccb71718a"},
+ {file = "Pillow-8.3.1-1-cp38-cp38-win_amd64.whl", hash = "sha256:c0e0550a404c69aab1e04ae89cca3e2a042b56ab043f7f729d984bf73ed2a093"},
+ {file = "Pillow-8.3.1-1-cp39-cp39-win_amd64.whl", hash = "sha256:479ab11cbd69612acefa8286481f65c5dece2002ffaa4f9db62682379ca3bb77"},
+ {file = "Pillow-8.3.1-1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f156d6ecfc747ee111c167f8faf5f4953761b5e66e91a4e6767e548d0f80129c"},
+ {file = "Pillow-8.3.1-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:196560dba4da7a72c5e7085fccc5938ab4075fd37fe8b5468869724109812edd"},
+ {file = "Pillow-8.3.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c9569049d04aaacd690573a0398dbd8e0bf0255684fee512b413c2142ab723"},
+ {file = "Pillow-8.3.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c088a000dfdd88c184cc7271bfac8c5b82d9efa8637cd2b68183771e3cf56f04"},
+ {file = "Pillow-8.3.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fc214a6b75d2e0ea7745488da7da3c381f41790812988c7a92345978414fad37"},
+ {file = "Pillow-8.3.1-cp36-cp36m-win32.whl", hash = "sha256:a17ca41f45cf78c2216ebfab03add7cc350c305c38ff34ef4eef66b7d76c5229"},
+ {file = "Pillow-8.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:67b3666b544b953a2777cb3f5a922e991be73ab32635666ee72e05876b8a92de"},
+ {file = "Pillow-8.3.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:ff04c373477723430dce2e9d024c708a047d44cf17166bf16e604b379bf0ca14"},
+ {file = "Pillow-8.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9364c81b252d8348e9cc0cb63e856b8f7c1b340caba6ee7a7a65c968312f7dab"},
+ {file = "Pillow-8.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2f381932dca2cf775811a008aa3027671ace723b7a38838045b1aee8669fdcf"},
+ {file = "Pillow-8.3.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d0da39795049a9afcaadec532e7b669b5ebbb2a9134576ebcc15dd5bdae33cc0"},
+ {file = "Pillow-8.3.1-cp37-cp37m-win32.whl", hash = "sha256:2b6dfa068a8b6137da34a4936f5a816aba0ecc967af2feeb32c4393ddd671cba"},
+ {file = "Pillow-8.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a4eef1ff2d62676deabf076f963eda4da34b51bc0517c70239fafed1d5b51500"},
+ {file = "Pillow-8.3.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:660a87085925c61a0dcc80efb967512ac34dbb256ff7dd2b9b4ee8dbdab58cf4"},
+ {file = "Pillow-8.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:15a2808e269a1cf2131930183dcc0419bc77bb73eb54285dde2706ac9939fa8e"},
+ {file = "Pillow-8.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:969cc558cca859cadf24f890fc009e1bce7d7d0386ba7c0478641a60199adf79"},
+ {file = "Pillow-8.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ee77c14a0299d0541d26f3d8500bb57e081233e3fa915fa35abd02c51fa7fae"},
+ {file = "Pillow-8.3.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c11003197f908878164f0e6da15fce22373ac3fc320cda8c9d16e6bba105b844"},
+ {file = "Pillow-8.3.1-cp38-cp38-win32.whl", hash = "sha256:3f08bd8d785204149b5b33e3b5f0ebbfe2190ea58d1a051c578e29e39bfd2367"},
+ {file = "Pillow-8.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:70af7d222df0ff81a2da601fab42decb009dc721545ed78549cb96e3a1c5f0c8"},
+ {file = "Pillow-8.3.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:37730f6e68bdc6a3f02d2079c34c532330d206429f3cee651aab6b66839a9f0e"},
+ {file = "Pillow-8.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bc3c7ef940eeb200ca65bd83005eb3aae8083d47e8fcbf5f0943baa50726856"},
+ {file = "Pillow-8.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c35d09db702f4185ba22bb33ef1751ad49c266534339a5cebeb5159d364f6f82"},
+ {file = "Pillow-8.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b2efa07f69dc395d95bb9ef3299f4ca29bcb2157dc615bae0b42c3c20668ffc"},
+ {file = "Pillow-8.3.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cc866706d56bd3a7dbf8bac8660c6f6462f2f2b8a49add2ba617bc0c54473d83"},
+ {file = "Pillow-8.3.1-cp39-cp39-win32.whl", hash = "sha256:9a211b663cf2314edbdb4cf897beeb5c9ee3810d1d53f0e423f06d6ebbf9cd5d"},
+ {file = "Pillow-8.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:c2a5ff58751670292b406b9f06e07ed1446a4b13ffced6b6cab75b857485cbc8"},
+ {file = "Pillow-8.3.1-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c379425c2707078dfb6bfad2430728831d399dc95a7deeb92015eb4c92345eaf"},
+ {file = "Pillow-8.3.1-pp36-pypy36_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:114f816e4f73f9ec06997b2fde81a92cbf0777c9e8f462005550eed6bae57e63"},
+ {file = "Pillow-8.3.1-pp36-pypy36_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8960a8a9f4598974e4c2aeb1bff9bdd5db03ee65fd1fce8adf3223721aa2a636"},
+ {file = "Pillow-8.3.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:147bd9e71fb9dcf08357b4d530b5167941e222a6fd21f869c7911bac40b9994d"},
+ {file = "Pillow-8.3.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1fd5066cd343b5db88c048d971994e56b296868766e461b82fa4e22498f34d77"},
+ {file = "Pillow-8.3.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f4ebde71785f8bceb39dcd1e7f06bcc5d5c3cf48b9f69ab52636309387b097c8"},
+ {file = "Pillow-8.3.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1c03e24be975e2afe70dfc5da6f187eea0b49a68bb2b69db0f30a61b7031cee4"},
+ {file = "Pillow-8.3.1.tar.gz", hash = "sha256:2cac53839bfc5cece8fdbe7f084d5e3ee61e1303cccc86511d351adcb9e2c792"},
+]
@@ -852,0 +2197,29 @@ pluggy = [
+protobuf = [
+ {file = "protobuf-3.17.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ab6bb0e270c6c58e7ff4345b3a803cc59dbee19ddf77a4719c5b635f1d547aa8"},
+ {file = "protobuf-3.17.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:13ee7be3c2d9a5d2b42a1030976f760f28755fcf5863c55b1460fd205e6cd637"},
+ {file = "protobuf-3.17.3-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:1556a1049ccec58c7855a78d27e5c6e70e95103b32de9142bae0576e9200a1b0"},
+ {file = "protobuf-3.17.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f0e59430ee953184a703a324b8ec52f571c6c4259d496a19d1cabcdc19dabc62"},
+ {file = "protobuf-3.17.3-cp35-cp35m-win32.whl", hash = "sha256:a981222367fb4210a10a929ad5983ae93bd5a050a0824fc35d6371c07b78caf6"},
+ {file = "protobuf-3.17.3-cp35-cp35m-win_amd64.whl", hash = "sha256:6d847c59963c03fd7a0cd7c488cadfa10cda4fff34d8bc8cba92935a91b7a037"},
+ {file = "protobuf-3.17.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:145ce0af55c4259ca74993ddab3479c78af064002ec8227beb3d944405123c71"},
+ {file = "protobuf-3.17.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ce4d8bf0321e7b2d4395e253f8002a1a5ffbcfd7bcc0a6ba46712c07d47d0b4"},
+ {file = "protobuf-3.17.3-cp36-cp36m-win32.whl", hash = "sha256:7a4c97961e9e5b03a56f9a6c82742ed55375c4a25f2692b625d4087d02ed31b9"},
+ {file = "protobuf-3.17.3-cp36-cp36m-win_amd64.whl", hash = "sha256:a22b3a0dbac6544dacbafd4c5f6a29e389a50e3b193e2c70dae6bbf7930f651d"},
+ {file = "protobuf-3.17.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ffea251f5cd3c0b9b43c7a7a912777e0bc86263436a87c2555242a348817221b"},
+ {file = "protobuf-3.17.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:9b7a5c1022e0fa0dbde7fd03682d07d14624ad870ae52054849d8960f04bc764"},
+ {file = "protobuf-3.17.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8727ee027157516e2c311f218ebf2260a18088ffb2d29473e82add217d196b1c"},
+ {file = "protobuf-3.17.3-cp37-cp37m-win32.whl", hash = "sha256:14c1c9377a7ffbeaccd4722ab0aa900091f52b516ad89c4b0c3bb0a4af903ba5"},
+ {file = "protobuf-3.17.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c56c050a947186ba51de4f94ab441d7f04fcd44c56df6e922369cc2e1a92d683"},
+ {file = "protobuf-3.17.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2ae692bb6d1992afb6b74348e7bb648a75bb0d3565a3f5eea5bec8f62bd06d87"},
+ {file = "protobuf-3.17.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:99938f2a2d7ca6563c0ade0c5ca8982264c484fdecf418bd68e880a7ab5730b1"},
+ {file = "protobuf-3.17.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6902a1e4b7a319ec611a7345ff81b6b004b36b0d2196ce7a748b3493da3d226d"},
+ {file = "protobuf-3.17.3-cp38-cp38-win32.whl", hash = "sha256:59e5cf6b737c3a376932fbfb869043415f7c16a0cf176ab30a5bbc419cd709c1"},
+ {file = "protobuf-3.17.3-cp38-cp38-win_amd64.whl", hash = "sha256:ebcb546f10069b56dc2e3da35e003a02076aaa377caf8530fe9789570984a8d2"},
+ {file = "protobuf-3.17.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ffbd23640bb7403574f7aff8368e2aeb2ec9a5c6306580be48ac59a6bac8bde"},
+ {file = "protobuf-3.17.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:26010f693b675ff5a1d0e1bdb17689b8b716a18709113288fead438703d45539"},
+ {file = "protobuf-3.17.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e76d9686e088fece2450dbc7ee905f9be904e427341d289acbe9ad00b78ebd47"},
+ {file = "protobuf-3.17.3-cp39-cp39-win32.whl", hash = "sha256:a38bac25f51c93e4be4092c88b2568b9f407c27217d3dd23c7a57fa522a17554"},
+ {file = "protobuf-3.17.3-cp39-cp39-win_amd64.whl", hash = "sha256:85d6303e4adade2827e43c2b54114d9a6ea547b671cb63fafd5011dc47d0e13d"},
+ {file = "protobuf-3.17.3-py2.py3-none-any.whl", hash = "sha256:2bfb815216a9cd9faec52b16fd2bfa68437a44b67c56bee59bc3926522ecb04e"},
+ {file = "protobuf-3.17.3.tar.gz", hash = "sha256:72804ea5eaa9c22a090d2803813e280fb273b62d5ae497aaf3553d141c4fdd7b"},
+]
@@ -856,0 +2230,4 @@ py = [
+py7zr = [
+ {file = "py7zr-0.16.1-py3-none-any.whl", hash = "sha256:3ce3308f3c89475a4d4997d02f7c9945e921c7b4df16e6243fe4daef0e7e312f"},
+ {file = "py7zr-0.16.1.tar.gz", hash = "sha256:0ab402f73fc8cc41f2a5523436ae53dec6ff612597d9168ed80cfa1574d27fe0"},
+]
@@ -883,0 +2261,170 @@ pyarrow = [
+pyasn1 = [
+ {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"},
+ {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"},
+ {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"},
+ {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"},
+ {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"},
+ {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"},
+ {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"},
+ {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"},
+ {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"},
+ {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"},
+ {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"},
+ {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"},
+ {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"},
+]
+pyasn1-modules = [
+ {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"},
+ {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"},
+ {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"},
+ {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"},
+ {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"},
+ {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
+ {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"},
+ {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"},
+ {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"},
+ {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"},
+ {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"},
+ {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"},
+ {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"},
+]
+pycparser = [
+ {file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"},
+ {file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"},
+]
+pycryptodomex = [
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:4344ab16faf6c2d9df2b6772995623698fb2d5f114dace4ab2ff335550cf71d5"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f933ecf4cb736c7af60a6a533db2bf569717f2318b265f92907acff1db43bc34"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0bd35af6a18b724c689e56f2dbbdd8e409288be71952d271ba3d9614b31d188c"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ec9901d19cadb80d9235ee41cc58983f18660314a0eb3fc7b11b0522ac3b6c4a"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:c2b680987f418858e89dbb4f09c8c919ece62811780a27051ace72b2f69fb1be"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:a6584ae58001d17bb4dc0faa8a426919c2c028ef4d90ceb4191802ca6edb8204"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-win32.whl", hash = "sha256:4195604f75cdc1db9bccdb9e44d783add3c817319c30aaff011670c9ed167690"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27m-win_amd64.whl", hash = "sha256:9f713ffb4e27b5575bd917c70bbc3f7b348241a351015dbbc514c01b7061ff7e"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:418f51c61eab52d9920f4ef468d22c89dab1be5ac796f71cf3802f6a6e667df0"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:8a98e02cbf8f624add45deff444539bf26345b479fc04fa0937b23cd84078d91"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:dbd2c361db939a4252589baa94da4404d45e3fc70da1a31e541644cdf354336e"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:564063e3782474c92cbb333effd06e6eb718471783c6e67f28c63f0fc3ac7b23"},
+ {file = "pycryptodomex-3.10.1-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:e4a1245e7b846e88ba63e7543483bda61b9acbaee61eadbead5a1ce479d94740"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3b8eb85b3cc7f083d87978c264d10ff9de3b4bfc46f1c6fdc2792e7d7ebc87bb"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-manylinux1_i686.whl", hash = "sha256:f3bb267df679f70a9f40f17d62d22fe12e8b75e490f41807e7560de4d3e6bf9f"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:04265a7a84ae002001249bd1de2823bcf46832bd4b58f6965567cb8a07cf4f00"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-manylinux2010_i686.whl", hash = "sha256:72f44b5be46faef2a1bf2a85902511b31f4dd7b01ce0c3978e92edb2cc812a82"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:e090a8609e2095aa86978559b140cf8968af99ee54b8791b29ff804838f29f10"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:20c45a30f3389148f94edb77f3b216c677a277942f62a2b81a1cc0b6b2dde7fc"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-win32.whl", hash = "sha256:fc9c55dc1ed57db76595f2d19a479fc1c3a1be2c9da8de798a93d286c5f65f38"},
+ {file = "pycryptodomex-3.10.1-cp35-abi3-win_amd64.whl", hash = "sha256:3dfce70c4e425607ae87b8eae67c9c7dbba59a33b62d70f79417aef0bc5c735b"},
+ {file = "pycryptodomex-3.10.1-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:940db96449d7b2ebb2c7bf190be1514f3d67914bd37e54e8d30a182bd375a1a9"},
+ {file = "pycryptodomex-3.10.1-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:d8fae5ba3d34c868ae43614e0bd6fb61114b2687ac3255798791ce075d95aece"},
+ {file = "pycryptodomex-3.10.1-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:f2abeb4c4ce7584912f4d637b2c57f23720d35dd2892bfeb1b2c84b6fb7a8c88"},
+ {file = "pycryptodomex-3.10.1-pp27-pypy_73-win32.whl", hash = "sha256:36dab7f506948056ceba2d57c1ade74e898401960de697cefc02f3519bd26c1b"},
+ {file = "pycryptodomex-3.10.1-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37ec1b407ec032c7a0c1fdd2da12813f560bad38ae61ad9c7ce3c0573b3e5e30"},
+ {file = "pycryptodomex-3.10.1-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:00a584ee52bf5e27d540129ca9bf7c4a7e7447f24ff4a220faa1304ad0c09bcd"},
+ {file = "pycryptodomex-3.10.1-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:961333e7ee896651f02d4692242aa36b787b8e8e0baa2256717b2b9d55ae0a3c"},
+ {file = "pycryptodomex-3.10.1-pp36-pypy36_pp73-win32.whl", hash = "sha256:2959304d1ce31ab303d9fb5db2b294814278b35154d9b30bf7facc52d6088d0a"},
+ {file = "pycryptodomex-3.10.1.tar.gz", hash = "sha256:541cd3e3e252fb19a7b48f420b798b53483302b7fe4d9954c947605d0a263d62"},
+]
+pydot = [
+ {file = "pydot-1.4.2-py2.py3-none-any.whl", hash = "sha256:66c98190c65b8d2e2382a441b4c0edfdb4f4c025ef9cb9874de478fb0793a451"},
+ {file = "pydot-1.4.2.tar.gz", hash = "sha256:248081a39bcb56784deb018977e428605c1c758f10897a339fce1dd728ff007d"},
+]
+pymongo = [
+ {file = "pymongo-3.12.0-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:072ba7cb65c8aa4d5c5659bf6722ee85781c9d7816dc00679b8b6f3dff1ddafc"},
+ {file = "pymongo-3.12.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:d6e11ffd43184d529d6752d6dcb62b994f903038a17ea2168ef1910c96324d26"},
+ {file = "pymongo-3.12.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:7412a36798966624dc4c57d64aa43c2d1100b348abd98daaac8e99e57d87e1d7"},
+ {file = "pymongo-3.12.0-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8a82e35d52ad6f867e88096a1a2b9bdc7ec4d5e65c7b4976a248bf2d1a32a93"},
+ {file = "pymongo-3.12.0-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:dcd3d0009fbb6e454d729f8b22d0063bd9171c31a55e0f0271119bd4f2700023"},
+ {file = "pymongo-3.12.0-cp27-cp27m-win32.whl", hash = "sha256:1bc6fe7279ff40c6818db002bf5284aa03ec181ea1b1ceaeee33c289d412afa7"},
+ {file = "pymongo-3.12.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e2b7670c0c8c6b501464150dd49dd0d6be6cb7f049e064124911cec5514fa19e"},
+ {file = "pymongo-3.12.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:316c1b8723afa9870567cd6dff35d440b2afeda53aa13da6c5ab85f98ed6f5ca"},
+ {file = "pymongo-3.12.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:255a35bf29185f44b412e31a927d9dcedda7c2c380127ecc4fbf2f61b72fa978"},
+ {file = "pymongo-3.12.0-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ffbae429ba9e42d0582d3ac63fdb410338892468a2107d8ff68228ec9a39a0ed"},
+ {file = "pymongo-3.12.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c188db6cf9e14dbbb42f5254292be96f05374a35e7dfa087cc2140f0ff4f10f6"},
+ {file = "pymongo-3.12.0-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:6fb3f85870ae26896bb44e67db94045f2ebf00c5d41e6b66cdcbb5afd644fc18"},
+ {file = "pymongo-3.12.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:aaa038eafb7186a4abbb311fcf20724be9363645882bbce540bef4797e812a7a"},
+ {file = "pymongo-3.12.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:7d98ce3c42921bb91566121b658e0d9d59a9082a9bd6f473190607ff25ab637f"},
+ {file = "pymongo-3.12.0-cp34-cp34m-win32.whl", hash = "sha256:b0a0cf39f589e52d801fdef418305562bc030cdf8929217463c8433c65fd5c2f"},
+ {file = "pymongo-3.12.0-cp34-cp34m-win_amd64.whl", hash = "sha256:ceae3ab9e11a27aaab42878f1d203600dfd24f0e43678b47298219a0f10c0d30"},
+ {file = "pymongo-3.12.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:5e574664f1468872cd40f74e4811e22b1aa4de9399d6bcfdf1ee6ea94c017fcf"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:73b400fdc22de84bae0dbf1a22613928a41612ec0a3d6ed47caf7ad4d3d0f2ff"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:cbf8672edeb7b7128c4a939274801f0e32bbf5159987815e3d1eace625264a46"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:a634a4730ce0b0934ed75e45beba730968e12b4dafbb22f69b3b2f616d9e644e"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:c55782a55f4a013a78ac5b6ee4b8731a192dea7ab09f1b6b3044c96d5128edd4"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux2014_ppc64le.whl", hash = "sha256:11f9e0cfc84ade088a38df2708d0b958bb76360181df1b2e1e1a41beaa57952b"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux2014_s390x.whl", hash = "sha256:186104a94d39b8412f8e3de385acd990a628346a4402d4f3a288a82b8660bd22"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:70761fd3c576b027eec882b43ee0a8e5b22ff9c20cdf4d0400e104bc29e53e34"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:333bfad77aa9cd11711febfb75eed0bb537a1d022e1c252714dad38993590240"},
+ {file = "pymongo-3.12.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fa8957e9a1b202cb45e6b839c241cd986c897be1e722b81d2f32e9c6aeee80b0"},
+ {file = "pymongo-3.12.0-cp35-cp35m-win32.whl", hash = "sha256:4ba0def4abef058c0e5101e05e3d5266e6fffb9795bbf8be0fe912a7361a0209"},
+ {file = "pymongo-3.12.0-cp35-cp35m-win_amd64.whl", hash = "sha256:a0e5dff6701fa615f165306e642709e1c1550d5b237c5a7a6ea299886828bd50"},
+ {file = "pymongo-3.12.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:b542d56ed1b8d5cf3bb36326f814bd2fbe8812dfd2582b80a15689ea433c0e35"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a325600c83e61e3c9cebc0c2b1c8c4140fa887f789085075e8f44c8ff2547eb9"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:48d5bc80ab0af6b60c4163c5617f5cd23f2f880d7600940870ea5055816af024"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c5cab230e7cabdae9ff23c12271231283efefb944c1b79bed79a91beb65ba547"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:d73e10772152605f6648ba4410318594f1043bbfe36d2fadee7c4b8912eff7c5"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:b1c4874331ab960429caca81acb9d2932170d66d6d6f87e65dc4507a85aca152"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:a3566acfbcde46911c52810374ecc0354fdb841284a3efef6ff7105bc007e9a8"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:b3b5b3cbc3fdf4fcfa292529df2a85b5d9c7053913a739d3069af1e12e12219f"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd3854148005c808c485c754a184c71116372263709958b42aefbef2e5dd373a"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f55c1ddcc1f6050b07d468ce594f55dbf6107b459e16f735d26818d7be1e9538"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ced944dcdd561476deef7cb7bfd4987c69fffbfeff6d02ca4d5d4fd592d559b7"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78ecb8d42f50d393af912bfb1fb1dcc9aabe9967973efb49ee577e8f1cea494c"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1970cfe2aec1bf74b40cf30c130ad10cd968941694630386db33e1d044c22a2e"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8bf42d3b32f586f4c9e37541769993783a534ad35531ce8a4379f6fa664fba9"},
+ {file = "pymongo-3.12.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:bc9ac81e73573516070d24ce15da91281922811f385645df32bd3c8a45ab4684"},
+ {file = "pymongo-3.12.0-cp36-cp36m-win32.whl", hash = "sha256:d04ca462cb99077e6c059e97c072957caf2918e6e4191e3161c01c439e0193de"},
+ {file = "pymongo-3.12.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f2acf9bbcd514e901f82c4ca6926bbd2ae61716728f110b4343eb0a69612d018"},
+ {file = "pymongo-3.12.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:b754240daafecd9d5fce426b0fbaaed03f4ebb130745c8a4ae9231fffb8d75e5"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:af586e85144023686fb0af09c8cdf672484ea182f352e7ceead3d832de381e1b"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fe5872ce6f9627deac8314bdffd3862624227c3de4c17ef0cc78bbf0402999eb"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:f6977a520bd96e097c8a37a8cbb9faa1ea99d21bf84190195056e25f688af73d"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:2dbfbbded947a83a3dffc2bd1ec4750c17e40904692186e2c55a3ad314ca0222"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:a752ecd1a26000a6d67be7c9a2e93801994a8b3f866ac95b672fbc00225ca91a"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:1bab889ae7640eba739f67fcbf8eff252dddc60d4495e6ddd3a87cd9a95fdb52"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:f94c7d22fb36b184734dded7345a04ec5f95130421c775b8b0c65044ef073f34"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec5ca7c0007ce268048bbe0ffc6846ed1616cf3d8628b136e81d5e64ff3f52a2"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c72d08acdf573455b2b9d2b75b8237654841d63a48bc2327dc102c6ee89b75a"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6ea08758b6673610b3c5bdf47189286cf9c58b1077558706a2f6f8744922527"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46d5ec90276f71af3a29917b30f2aec2315a2759b5f8d45b3b63a07ca8a070a3"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:625befa3bc9b40746a749115cc6a15bf20b9bd7597ca55d646205b479a2c99c7"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d1131562ddc2ea8a446f66c2648d7dabec2b3816fc818528eb978a75a6d23b2e"},
+ {file = "pymongo-3.12.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eee42a1cc06565f6b21caa1f504ec15e07de7ebfd520ab57f8cb3308bc118e22"},
+ {file = "pymongo-3.12.0-cp37-cp37m-win32.whl", hash = "sha256:94d38eba4d1b5eb3e6bfece0651b855a35c44f32fd91f512ab4ba41b8c0d3e66"},
+ {file = "pymongo-3.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e018a4921657c2d3f89c720b7b90b9182e277178a04a7e9542cc79d7d787ca51"},
+ {file = "pymongo-3.12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7c6a9948916a7bbcc6d3a9f6fb75db1acb5546078023bfb3db6efabcd5a67527"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e9faf8d4712d5ea301d74abfcf6dafe4b7f4af7936e91f283b0ad7bf69ed3e3a"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cc2894fe91f31a513860238ede69fe47fada21f9e7ddfe73f7f9fef93a971e41"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:053b4ebf91c7395d1fcd2ce6a9edff0024575b7b2de6781554a4114448a8adc9"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:39dafa2eaf577d1969f289dc9a44501859a1897eb45bd589e93ce843fc610800"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:246ec420e4c8744fceb4e259f906211b9c198e1f345e6158dcd7cbad3737e11e"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:208debdcf76ed39ebf24f38509f50dc1c100e31e8653817fedb8e1f867850a13"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:18290649759f9db660972442aa606f845c368db9b08c4c73770f6da14113569b"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657ad80de8ec9ed656f28844efc801a0802961e8c6a85038d97ff6f555ef4919"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b772bab31cbd9cb911e41e1a611ebc9497f9a32a7348e2747c38210f75c00f41"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2399a85b54f68008e483b2871f4a458b4c980469c7fe921595ede073e4844f1e"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e66780f14c2efaf989cd3ac613b03ee6a8e3a0ba7b96c0bb14adca71a427e55"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02dc0b0f48ed3cd06c13b7e31b066bf91e00dac5f8147b0a0a45f9009bfab857"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:070a4ef689c9438a999ec3830e69b208ff0d12251846e064d947f97d819d1d05"},
+ {file = "pymongo-3.12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:db93608a246da44d728842b8fa9e45aa9782db76955f634a707739a8d53ff544"},
+ {file = "pymongo-3.12.0-cp38-cp38-win32.whl", hash = "sha256:5af390fa9faf56c93252dab09ea57cd020c9123aa921b63a0ed51832fdb492e7"},
+ {file = "pymongo-3.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:a2239556ff7241584ce57be1facf25081669bb457a9e5cbe68cce4aae6567aa1"},
+ {file = "pymongo-3.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cda9e628b1315beec8341e8c04aac9a0b910650b05e0751e42e399d5694aeacb"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:845a8b83798b2fb11b09928413cb32692866bfbc28830a433d9fa4c8c3720dd0"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:da8288bc4a7807c6715416deed1c57d94d5e03e93537889e002bf985be503f1a"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a9ba2a63777027b06b116e1ea8248e66fd1bedc2c644f93124b81a91ddbf6d88"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:9a13661681d17e43009bb3e85e837aa1ec5feeea1e3654682a01b8821940f8b3"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:6b89dc51206e4971c5568c797991eaaef5dc2a6118d67165858ad11752dba055"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:701e08457183da70ed96b35a6b43e6ba1df0b47c837b063cde39a1fbe1aeda81"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:e7a33322e08021c37e89cae8ff06327503e8a1719e97c69f32c31cbf6c30d72c"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd1f49f949a658c4e8f81ed73f9aad25fcc7d4f62f767f591e749e30038c4e1d"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6d055f01b83b1a4df8bb0c61983d3bdffa913764488910af3620e5c2450bf83"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd6ff2192f34bd622883c745a56f492b1c9ccd44e14953e8051c33024a2947d5"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19d4bd0fc29aa405bb1781456c9cfff9fceabb68543741eb17234952dbc2bbb0"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24f8aeec4d6b894a6128844e50ff423dd02462ee83addf503c598ee3a80ddf3d"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b6055e0ef451ff73c93d0348d122a0750dddf323b9361de5835dac2f6cf7fc1"},
+ {file = "pymongo-3.12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6261bee7c5abadeac7497f8f1c43e521da78dd13b0a2439f526a7b0fc3788824"},
+ {file = "pymongo-3.12.0-cp39-cp39-win32.whl", hash = "sha256:2e92aa32300a0b5e4175caec7769f482b292769807024a86d674b3f19b8e3755"},
+ {file = "pymongo-3.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:3ce83f17f641a62a4dfb0ba1b8a3c1ced7c842f511b5450d90c030c7828e3693"},
+ {file = "pymongo-3.12.0-py2.7-macosx-10.14-intel.egg", hash = "sha256:d1740776b70367277323fafb76bcf09753a5cc9824f5d705bac22a34ff3668ea"},
+ {file = "pymongo-3.12.0.tar.gz", hash = "sha256:b88d1742159bc93a078733f9789f563cef26f5e370eba810476a71aa98e5fbc2"},
+]
@@ -887,0 +2435,29 @@ pyparsing = [
+pyppmd = [
+ {file = "pyppmd-0.15.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c4bc7eee8dc4da2d87c6a59796db16afc7c910fefc13b719d9feb61341958a7d"},
+ {file = "pyppmd-0.15.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2192c7db9eee2945b259f05369f33da23e99e393cefd214d9de6feb1c882babf"},
+ {file = "pyppmd-0.15.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c75daf1cc832fd417cddf6bf6532e22f3c20bcb095e35413367f02af547bdb96"},
+ {file = "pyppmd-0.15.2-cp36-cp36m-win32.whl", hash = "sha256:999784c10ba6b9db071c571ef037c85f5e291e89377c84644b0ae3771424c78b"},
+ {file = "pyppmd-0.15.2-cp36-cp36m-win_amd64.whl", hash = "sha256:48e85e74d710ddf66775beeab8ae75bb1d8765a10c0a0e1f3137ecb69daa1a34"},
+ {file = "pyppmd-0.15.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5b23f28c2052cf4abb821e1eaab973867c9aec5c9ad3f4483e99e2ce88b4d959"},
+ {file = "pyppmd-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64620b677fc4cdfdfbe8b0364e11362634f378088af443be0d184c82e83fe440"},
+ {file = "pyppmd-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:236c36fcad93c653aca37cefb63df4710d4450833bee1fe9a334dad70cfdde9b"},
+ {file = "pyppmd-0.15.2-cp37-cp37m-win32.whl", hash = "sha256:64eaae44fe7c90e598027fb3e7095b9e2586764e7368a70ba0ba37dafd8e1994"},
+ {file = "pyppmd-0.15.2-cp37-cp37m-win_amd64.whl", hash = "sha256:178cf8f3a9909e43524a0bbf0c458cc535f59e68046903e49eab038917bfd644"},
+ {file = "pyppmd-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5782bd9724d04e70a724caf6f78bda31d6c4426f0ab1a659165d4b6d7e2d11cc"},
+ {file = "pyppmd-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d56df5e8c65f7b28e821c2450a5f9840ff06b386848c909e7dcf5449a55db8c0"},
+ {file = "pyppmd-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f5167ab2487a1ff7761374cab3e6ded1ccb3f97807150a8f9ac6bc6f0c35138"},
+ {file = "pyppmd-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33daaa90356912c12fae442de4d3227aa94ee51f9b69700109bdca9433491d79"},
+ {file = "pyppmd-0.15.2-cp38-cp38-win32.whl", hash = "sha256:a51c057597da7b517cb2d51d440472c7dd5f2014e0e150f7b1aed8a4eb0e392c"},
+ {file = "pyppmd-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:8f914f31b27130d1e61cea70c7ad133b4d0c0209fb85d9218c3513083092b913"},
+ {file = "pyppmd-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8db10325726311aed07b1a9aa7c3b2f477f9d5d721b77c2e98e9293494977980"},
+ {file = "pyppmd-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61e225adb18b9531e6daeb97cf292241aaa17690308d89e799049bda48a1c957"},
+ {file = "pyppmd-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90255e1dc5eb04b7302c498a5acb0c1929ee1bbfc6346519b32c2058fa33dcab"},
+ {file = "pyppmd-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e642c5f65af8997c51041a64a56fab1ba5b382d910e6c4f8aa1ce0de497faa9f"},
+ {file = "pyppmd-0.15.2-cp39-cp39-win32.whl", hash = "sha256:d72bb209ceaa0d23708d28c8ebd54e2bdbec9bb8c7d6a819460c3bea671fb061"},
+ {file = "pyppmd-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:61fa1df0a912da6abdf9c181cc1ab95821b545e812c3e9d016a271dc4eaab57d"},
+ {file = "pyppmd-0.15.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dee5ad99dd66b5daeb780fad6267f83834cfcb4377ba667a6e1f162450bab370"},
+ {file = "pyppmd-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4286f0b3cb0b313b2dfd0ae4b6e301c144957856eccfa039038afa793e81853d"},
+ {file = "pyppmd-0.15.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f77aecfc364188e38b476de637a48cb3985d3e13d9fe11a23a86857e8c30a4d7"},
+ {file = "pyppmd-0.15.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7c497fa447795bbb9e0b1be811e8085c3e6856b62b5957898bd6475c2758b1f3"},
+ {file = "pyppmd-0.15.2.tar.gz", hash = "sha256:17195786082a473f271ad2e4bcc3bd224fcff44a53f085026ebb16a65f2c92f3"},
+]
@@ -930,0 +2507,46 @@ pyyaml = [
+pyzstd = [
+ {file = "pyzstd-0.14.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9aab6567b7911b0a1aec9cdbc42673ea6c580b6c8763a6063e9bfe0b48f40e19"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:049ef5c8f0f6f6b56f635fd1a23a4b50c2432d546052072468d6a5cbcd0cf701"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fba67b593c29b0fc658677cc432553bea0f068ed1b125f4404fdf124e5bbdbaa"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:1cbcc5eb05db103b4c3999f2ae8a7302fcaca23b895a0e85c528ab2d47312be6"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:a6e95308a4ec968c049196c8b7f4ee0be1d423676e71ccf6c7730c942958ed31"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:5c5e9a9f58ccc99b021bffc2d6a2ec706e04cd9d7ed239de061afc56a6ba1ee9"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:ca869717deea3152232fb702fd0925dd9a1ce408052678853693956c5c380c6f"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-win32.whl", hash = "sha256:96cc88606d24481959af291e6a367c3854c75849818059513b5a9522e5ed6dc7"},
+ {file = "pyzstd-0.14.4-cp36-cp36m-win_amd64.whl", hash = "sha256:31cc5cf8cacb7a1ee0a3d9c46adcb6f04ee4b62d2ded232bff5e6c94b0171ad6"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9085e764d07afc72f67df2a291ac2505a89ac714720888a9dbc4458ef261d9d6"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:dce30fce9b4d2c5fb0ef58bf0f3ed5633c45c2ed6da6e9cd85d78cc95fe26119"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:4f1fcc1be04a78ca5023542614c58504afe944c5a27f50e409daf3914233e4ed"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:1a515dd3e0b4fc0b89d5bf517e8300f67a31a40ef449e6bd7905375449fd9bc2"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:704accbdcdb2cae406dc148acee9156824fbdcac4df939679b4e3f15ac90368a"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:a8a55aabd4abb1aab7e4a89e11f4916cb3292fa662487dbbbebad7eb42b4a16d"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:97f96882be9c29ba6892c2ebcc6da0128d23e105db12c592199bdd7cdafa61de"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-win32.whl", hash = "sha256:ea2487f5085eda2965d2dbb5d82b9a11ebe821df882d482ce4158f9364909301"},
+ {file = "pyzstd-0.14.4-cp37-cp37m-win_amd64.whl", hash = "sha256:9fc974a683a1396746d8600ce2507e21730202590c70b604747804b72a615e0c"},
+ {file = "pyzstd-0.14.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c80a41148580f4f9560fd27e16d040281a74c7e873734d4ece6a45308c95071d"},
+ {file = "pyzstd-0.14.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:56775aa7a0902bd4756a3d51ab1abb7984716a607233754edaa5021cd20bf204"},
+ {file = "pyzstd-0.14.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d6bb34e8c4c534c426cc4e4b3db2e61a955dee8ba15414b62f93e996c4b7105b"},
+ {file = "pyzstd-0.14.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8ee5770d6d8e2b86204dff3f69594b5bc4be8bce2bf859d351ecd6c2351e1c66"},
+ {file = "pyzstd-0.14.4-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:3e9e3a800b59e297364d29d65b578f5e3bf56c62f802fd96e2e7a29a64831fe8"},
+ {file = "pyzstd-0.14.4-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:f0cb4a25b554eb1bad42f4de64624e48da55c2af72a46ed0d7e4bacacc94ab42"},
+ {file = "pyzstd-0.14.4-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:72ea68d9113cda25e188c101d980b36883bece12d97d1bb790e83d5dd3e6780a"},
+ {file = "pyzstd-0.14.4-cp38-cp38-win32.whl", hash = "sha256:84686494e9f93080c5f94b320fb1937b9c349ac236880b6fd3f74a26a1037e11"},
+ {file = "pyzstd-0.14.4-cp38-cp38-win_amd64.whl", hash = "sha256:61efe917a6bffbe5dbc1288d5cea5803ee8ee5a8d5b42970589352840724c814"},
+ {file = "pyzstd-0.14.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:58b303eb72edbea3e6a4ec2bd3215f9dfb189d7595be5bef511e7d066bdcdd55"},
+ {file = "pyzstd-0.14.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd32436b9bf811b52acc0d909720b0a9dcc75237afade4cd3998df11f3bcb1e8"},
+ {file = "pyzstd-0.14.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:59db9a8007feb4dd0041f4e668dfd08f29d2a88905dc64341beec2638f662432"},
+ {file = "pyzstd-0.14.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:90f83acbd7355e80f196d3e7193f25d5128dc1302dc98446b8f39fee4fbb7fd8"},
+ {file = "pyzstd-0.14.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:5d2a8e7b0320ecd9378566423d6287add6e3b7919c9c8676a0f13869ae36fc2d"},
+ {file = "pyzstd-0.14.4-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:38ba67342642fd2db8de033950dfa8e26282d706ae230bafc696ed8a4e155906"},
+ {file = "pyzstd-0.14.4-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:c87c7351c90e2eeffa79f6ad64c0ef8f6df094af3fa12c5f262a0c59a118a967"},
+ {file = "pyzstd-0.14.4-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:9b6864e3ef9e8cc911d890b47c2a3b4ad7d556d4095d8a3c8806f8c1e85a3abf"},
+ {file = "pyzstd-0.14.4-cp39-cp39-win32.whl", hash = "sha256:3cd554d06397d6d6a679515bada056d601e6cb234753dce20d0a77bf1c611c90"},
+ {file = "pyzstd-0.14.4-cp39-cp39-win_amd64.whl", hash = "sha256:20dc1e9e4ca28bd6952314b30b62e1d024117715f6e49ed65b3b0477b3e8ec70"},
+ {file = "pyzstd-0.14.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3a4c9db63ec5cbefe5601d03a720c5cc399aeeb7902bf3cfb5720df88a6c5835"},
+ {file = "pyzstd-0.14.4-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:87741d16c50bf7a87e055bfbc6ed468541c38f9b4be9463082172cfbb969fcb7"},
+ {file = "pyzstd-0.14.4-pp36-pypy36_pp73-win32.whl", hash = "sha256:1ca717b6d493008d5eb84073de3c7c80d174b2b9b399be2f7a7290ca6eef26b7"},
+ {file = "pyzstd-0.14.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8183a72b393b4b4299a9609f2f0f9c9c0eef5eb35145580226fc69af301355de"},
+ {file = "pyzstd-0.14.4-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:4ce434af7b6e898925e63d1154bc4f9f3543511cc5c17dae3665065c21716407"},
+ {file = "pyzstd-0.14.4-pp37-pypy37_pp73-win32.whl", hash = "sha256:fea569c4ce8061e37396a831ec04d109afc1eefa0e3ce515df1adc93dcb73e4f"},
+ {file = "pyzstd-0.14.4.tar.gz", hash = "sha256:43df9c969b912874f78edf2d29b78710547bff89c0e61c63aaa8af5ab824588b"},
+]
@@ -977,0 +2600,13 @@ requests = [
+requests-oauthlib = [
+ {file = "requests-oauthlib-1.3.0.tar.gz", hash = "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a"},
+ {file = "requests_oauthlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d"},
+ {file = "requests_oauthlib-1.3.0-py3.7.egg", hash = "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc"},
+]
+rsa = [
+ {file = "rsa-4.7.2-py3-none-any.whl", hash = "sha256:78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2"},
+ {file = "rsa-4.7.2.tar.gz", hash = "sha256:9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9"},
+]
+sacremoses = [
+ {file = "sacremoses-0.0.45-py3-none-any.whl", hash = "sha256:fa93db44bc04542553ba6090818b892f603d02aa0d681e6c5c3023baf17e8564"},
+ {file = "sacremoses-0.0.45.tar.gz", hash = "sha256:58176cc28391830789b763641d0f458819bebe88681dac72b41a19c0aedc07e9"},
+]
@@ -979,2 +2614,2 @@ six = [
- {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
- {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+ {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"},
+ {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"},
@@ -989,0 +2625,54 @@ starlette = [
+tensorboard = [
+ {file = "tensorboard-2.5.0-py3-none-any.whl", hash = "sha256:e167460085b6528956b33bab1c970c989cdce47a6616273880733f5e7bde452e"},
+]
+tensorboard-data-server = [
+ {file = "tensorboard_data_server-0.6.1-py3-none-any.whl", hash = "sha256:809fe9887682d35c1f7d1f54f0f40f98bb1f771b14265b453ca051e2ce58fca7"},
+ {file = "tensorboard_data_server-0.6.1-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:fa8cef9be4fcae2f2363c88176638baf2da19c5ec90addb49b1cde05c95c88ee"},
+ {file = "tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a"},
+]
+tensorboard-plugin-wit = [
+ {file = "tensorboard_plugin_wit-1.8.0-py3-none-any.whl", hash = "sha256:2a80d1c551d741e99b2f197bb915d8a133e24adb8da1732b840041860f91183a"},
+]
+tensorflow = [
+ {file = "tensorflow-2.5.0-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:7e1351ce05b897d5cf1042066b6929ca3f595a717849421ae92dbe8d6d2f1c74"},
+ {file = "tensorflow-2.5.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:31a3ea994c336fc5a6ba0e6d61f131262b2c6dbff97e2b7473ff6da0cf9383f7"},
+ {file = "tensorflow-2.5.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c45059b42bca01ce441004abb965acf7838b40d12e036920063bd7ac540def9a"},
+ {file = "tensorflow-2.5.0-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:616bc8094cb289b3bd21eded2196b0dba65bce53bad112efcaf2acb6f7d9e6a5"},
+ {file = "tensorflow-2.5.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:739d25273ccc10fedc74517de099bd5b16a274d1295fad6bfef834ad28cc3401"},
+ {file = "tensorflow-2.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:68b70ca7df7f5f8fbe3d7240e937b3ea8b1a25e51710f60293e7edada00257a2"},
+ {file = "tensorflow-2.5.0-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:c46b1d1b0eec54577d7ba545e3951c9dd0355ca05a8eb776c95d9a3e22e7be9c"},
+ {file = "tensorflow-2.5.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:34ab87aac9093de98cbba68d7e8dca9159c36acd06a03e5749c956c7ab08d9da"},
+ {file = "tensorflow-2.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:46f10a2edc694bb54a2d869a65b5a09705dab1874a89b529990a943416ad48aa"},
+ {file = "tensorflow-2.5.0-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:baebb9c95ef1815bb410317ad525dd3dbb26064fe95636b51486459b6536bc6e"},
+ {file = "tensorflow-2.5.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:1ea003f9e11508d0336c242a2a3bc73aea205dd5b31892c3e1d7f5d0f0e60c0a"},
+ {file = "tensorflow-2.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:4edec9b9f6ef8f1407762a3a6bd050173177f686d5ea6b59e91487b645173f73"},
+]
+tensorflow-estimator = [
+ {file = "tensorflow_estimator-2.5.0-py2.py3-none-any.whl", hash = "sha256:d1fe76dee8b1dcab865d807a0246da0a9c4a635b1eba6e9545bf216c3aad6955"},
+]
+termcolor = [
+ {file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"},
+]
+texttable = [
+ {file = "texttable-1.6.4-py2.py3-none-any.whl", hash = "sha256:dd2b0eaebb2a9e167d1cefedab4700e5dcbdb076114eed30b58b97ed6b37d6f2"},
+ {file = "texttable-1.6.4.tar.gz", hash = "sha256:42ee7b9e15f7b225747c3fa08f43c5d6c83bc899f80ff9bae9319334824076e9"},
+]
+tokenizers = [
+ {file = "tokenizers-0.10.3-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:4ab688daf4692a6c31dfe42f1f3a4a8c22050705eb69d58d3efde9d55f434586"},
+ {file = "tokenizers-0.10.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c26dbc3b2a3d71d3d40c50975ec62145932f05aea73f03ea35c48ebd3a717611"},
+ {file = "tokenizers-0.10.3-cp36-cp36m-win32.whl", hash = "sha256:6b84673997990b3c260ae2f7c57fdf1f835e316820eff14aca46dc68be3c0c74"},
+ {file = "tokenizers-0.10.3-cp36-cp36m-win_amd64.whl", hash = "sha256:2a9ee3ee574d4aa740e099b0ad6ef8e63f52f48cde359bb31801146a5aa614dc"},
+ {file = "tokenizers-0.10.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:2f8c5fefef0d0a03be613547e613fbda06b9e6ee0891236649524964c3e54d80"},
+ {file = "tokenizers-0.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4cc194104c8e427ffc4f54c7866488b42f2b1f6351a6cad0d045ca5ab8108e42"},
+ {file = "tokenizers-0.10.3-cp37-cp37m-win32.whl", hash = "sha256:edd8cb85c16b4b65e87ea5ef9d400be9fdd53c4152adbaca8817e16dd3aa480b"},
+ {file = "tokenizers-0.10.3-cp37-cp37m-win_amd64.whl", hash = "sha256:7b11b373705d082d43657c08883b79b5330f1952f0668d17488b6b889c4d7feb"},
+ {file = "tokenizers-0.10.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:a7ce0c2f27f7c92aa3f895231de90319acdf960ce2e42ba591edc651fda7d3c9"},
+ {file = "tokenizers-0.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ae7e40d9c8a77c5a4109731ac3e21633b0c609c56a8b58be6b863da61fa54636"},
+ {file = "tokenizers-0.10.3-cp38-cp38-win32.whl", hash = "sha256:a7ce051aafc53c564c9edbc09df300c2bd4f6ce87460fc22a276fed405d1892a"},
+ {file = "tokenizers-0.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:91a8c045980594c7c437a52c3da5276eb3c530a662b4ef628ff32d81fb22b543"},
+ {file = "tokenizers-0.10.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:1d8867db210d75d97312360ae23b92aeb6a6b5bc65e15c1cd9d204b3fa3fc262"},
+ {file = "tokenizers-0.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:18c495e700f4588b9a00e58b4c41dc459c36daaa7c39a27faf880eb8f5533ce1"},
+ {file = "tokenizers-0.10.3-cp39-cp39-win32.whl", hash = "sha256:ad700fd9da518884fd58bf89f0b6dfeecef9b4e2d2db8765ef259f66d6c14980"},
+ {file = "tokenizers-0.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:e9d147e545cdfeca560646c7a703bf287afe45645da426506ccd5eb78aab5ef5"},
+ {file = "tokenizers-0.10.3.tar.gz", hash = "sha256:1a5d3b596c6d3a237e1ad7f46c472d467b0246be7fd1a364f12576eb8db8f7e6"},
+]
@@ -1001,0 +2691,5 @@ tqdm = [
+transformers = [
+ {file = "transformers-4.9.1-py3-none-any.whl", hash = "sha256:86f3c46efecf114c6886d361c1d6cca14738f0e9d1effadb1e9252770cba55a0"},
+ {file = "transformers-4.9.1.tar.gz", hash = "sha256:1c30e38b2e0da15e110d9bb9a627f78de9569b9c6036d6533baf783015c339be"},
+]
+trec-car-tools = []
@@ -1003,3 +2697,26 @@ typing-extensions = [
- {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"},
- {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"},
- {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"},
+ {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"},
+ {file = "typing_extensions-3.7.4.3-py3-none-any.whl", hash = "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918"},
+ {file = "typing_extensions-3.7.4.3.tar.gz", hash = "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c"},
+]
+ujson = [
+ {file = "ujson-4.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:e390df0dcc7897ffb98e17eae1f4c442c39c91814c298ad84d935a3c5c7a32fa"},
+ {file = "ujson-4.0.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:84b1dca0d53b0a8d58835f72ea2894e4d6cf7a5dd8f520ab4cbd698c81e49737"},
+ {file = "ujson-4.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:91396a585ba51f84dc71c8da60cdc86de6b60ba0272c389b6482020a1fac9394"},
+ {file = "ujson-4.0.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:eb6b25a7670c7537a5998e695fa62ff13c7f9c33faf82927adf4daa460d5f62e"},
+ {file = "ujson-4.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f8aded54c2bc554ce20b397f72101737dd61ee7b81c771684a7dd7805e6cca0c"},
+ {file = "ujson-4.0.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:30962467c36ff6de6161d784cd2a6aac1097f0128b522d6e9291678e34fb2b47"},
+ {file = "ujson-4.0.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:fc51e545d65689c398161f07fd405104956ec27f22453de85898fa088b2cd4bb"},
+ {file = "ujson-4.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e6e90330670c78e727d6637bb5a215d3e093d8e3570d439fd4922942f88da361"},
+ {file = "ujson-4.0.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:5e1636b94c7f1f59a8ead4c8a7bab1b12cc52d4c21ababa295ffec56b445fd2a"},
+ {file = "ujson-4.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:e2cadeb0ddc98e3963bea266cc5b884e5d77d73adf807f0bda9eca64d1c509d5"},
+ {file = "ujson-4.0.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a214ba5a21dad71a43c0f5aef917cd56a2d70bc974d845be211c66b6742a471c"},
+ {file = "ujson-4.0.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0190d26c0e990c17ad072ec8593647218fe1c675d11089cd3d1440175b568967"},
+ {file = "ujson-4.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:f273a875c0b42c2a019c337631bc1907f6fdfbc84210cc0d1fff0e2019bbfaec"},
+ {file = "ujson-4.0.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d3a87888c40b5bfcf69b4030427cd666893e826e82cc8608d1ba8b4b5e04ea99"},
+ {file = "ujson-4.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:7333e8bc45ea28c74ae26157eacaed5e5629dbada32e0103c23eb368f93af108"},
+ {file = "ujson-4.0.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:b3a6dcc660220539aa718bcc9dbd6dedf2a01d19c875d1033f028f212e36d6bb"},
+ {file = "ujson-4.0.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:0ea07fe57f9157118ca689e7f6db72759395b99121c0ff038d2e38649c626fb1"},
+ {file = "ujson-4.0.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:4d6d061563470cac889c0a9fd367013a5dbd8efc36ad01ab3e67a57e56cad720"},
+ {file = "ujson-4.0.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b5c70704962cf93ec6ea3271a47d952b75ae1980d6c56b8496cec2a722075939"},
+ {file = "ujson-4.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:aad6d92f4d71e37ea70e966500f1951ecd065edca3a70d3861b37b176dd6702c"},
+ {file = "ujson-4.0.2.tar.gz", hash = "sha256:c615a9e9e378a7383b756b7e7a73c38b22aeb8967a8bfbffd4741f7ffd043c4d"},
@@ -1037,0 +2755,10 @@ watchdog = [
+werkzeug = [
+ {file = "Werkzeug-2.0.1-py3-none-any.whl", hash = "sha256:6c1ec500dcdba0baa27600f6a22f6333d8b662d22027ff9f6202e3367413caa8"},
+ {file = "Werkzeug-2.0.1.tar.gz", hash = "sha256:1de1db30d010ff1af14a009224ec49ab2329ad2cde454c8a708130642d579c42"},
+]
+wget = [
+ {file = "wget-3.2.zip", hash = "sha256:35e630eca2aa50ce998b9b1a127bb26b30dfee573702782aa982f875e3f16061"},
+]
+wrapt = [
+ {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"},
+]
@@ -1142,0 +2870,50 @@ yarl = [
+zstandard = [
+ {file = "zstandard-0.15.2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:7b16bd74ae7bfbaca407a127e11058b287a4267caad13bd41305a5e630472549"},
+ {file = "zstandard-0.15.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8baf7991547441458325ca8fafeae79ef1501cb4354022724f3edd62279c5b2b"},
+ {file = "zstandard-0.15.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5752f44795b943c99be367fee5edf3122a1690b0d1ecd1bd5ec94c7fd2c39c94"},
+ {file = "zstandard-0.15.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:3547ff4eee7175d944a865bbdf5529b0969c253e8a148c287f0668fe4eb9c935"},
+ {file = "zstandard-0.15.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:ac43c1821ba81e9344d818c5feed574a17f51fca27976ff7d022645c378fbbf5"},
+ {file = "zstandard-0.15.2-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:1fb23b1754ce834a3a1a1e148cc2faad76eeadf9d889efe5e8199d3fb839d3c6"},
+ {file = "zstandard-0.15.2-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:1faefe33e3d6870a4dce637bcb41f7abb46a1872a595ecc7b034016081c37543"},
+ {file = "zstandard-0.15.2-cp35-cp35m-win32.whl", hash = "sha256:b7d3a484ace91ed827aa2ef3b44895e2ec106031012f14d28bd11a55f24fa734"},
+ {file = "zstandard-0.15.2-cp35-cp35m-win_amd64.whl", hash = "sha256:ff5b75f94101beaa373f1511319580a010f6e03458ee51b1a386d7de5331440a"},
+ {file = "zstandard-0.15.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c9e2dcb7f851f020232b991c226c5678dc07090256e929e45a89538d82f71d2e"},
+ {file = "zstandard-0.15.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4800ab8ec94cbf1ed09c2b4686288750cab0642cb4d6fba2a56db66b923aeb92"},
+ {file = "zstandard-0.15.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ec58e84d625553d191a23d5988a19c3ebfed519fff2a8b844223e3f074152163"},
+ {file = "zstandard-0.15.2-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:bd3c478a4a574f412efc58ba7e09ab4cd83484c545746a01601636e87e3dbf23"},
+ {file = "zstandard-0.15.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:6f5d0330bc992b1e267a1b69fbdbb5ebe8c3a6af107d67e14c7a5b1ede2c5945"},
+ {file = "zstandard-0.15.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:b4963dad6cf28bfe0b61c3265d1c74a26a7605df3445bfcd3ba25de012330b2d"},
+ {file = "zstandard-0.15.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:77d26452676f471223571efd73131fd4a626622c7960458aab2763e025836fc5"},
+ {file = "zstandard-0.15.2-cp36-cp36m-win32.whl", hash = "sha256:6ffadd48e6fe85f27ca3ca10cfd3ef3d0f933bef7316870285ffeb58d791ca9c"},
+ {file = "zstandard-0.15.2-cp36-cp36m-win_amd64.whl", hash = "sha256:92d49cc3b49372cfea2d42f43a2c16a98a32a6bc2f42abcde121132dbfc2f023"},
+ {file = "zstandard-0.15.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:af5a011609206e390b44847da32463437505bf55fd8985e7a91c52d9da338d4b"},
+ {file = "zstandard-0.15.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:31e35790434da54c106f05fa93ab4d0fab2798a6350e8a73928ec602e8505836"},
+ {file = "zstandard-0.15.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a4f8af277bb527fa3d56b216bda4da931b36b2d3fe416b6fc1744072b2c1dbd9"},
+ {file = "zstandard-0.15.2-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:72a011678c654df8323aa7b687e3147749034fdbe994d346f139ab9702b59cea"},
+ {file = "zstandard-0.15.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:5d53f02aeb8fdd48b88bc80bece82542d084fb1a7ba03bf241fd53b63aee4f22"},
+ {file = "zstandard-0.15.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:f8bb00ced04a8feff05989996db47906673ed45b11d86ad5ce892b5741e5f9dd"},
+ {file = "zstandard-0.15.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:7a88cc773ffe55992ff7259a8df5fb3570168d7138c69aadba40142d0e5ce39a"},
+ {file = "zstandard-0.15.2-cp37-cp37m-win32.whl", hash = "sha256:1c5ef399f81204fbd9f0df3debf80389fd8aa9660fe1746d37c80b0d45f809e9"},
+ {file = "zstandard-0.15.2-cp37-cp37m-win_amd64.whl", hash = "sha256:22f127ff5da052ffba73af146d7d61db874f5edb468b36c9cb0b857316a21b3d"},
+ {file = "zstandard-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9867206093d7283d7de01bd2bf60389eb4d19b67306a0a763d1a8a4dbe2fb7c3"},
+ {file = "zstandard-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f98fc5750aac2d63d482909184aac72a979bfd123b112ec53fd365104ea15b1c"},
+ {file = "zstandard-0.15.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3fe469a887f6142cc108e44c7f42c036e43620ebaf500747be2317c9f4615d4f"},
+ {file = "zstandard-0.15.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:edde82ce3007a64e8434ccaf1b53271da4f255224d77b880b59e7d6d73df90c8"},
+ {file = "zstandard-0.15.2-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:855d95ec78b6f0ff66e076d5461bf12d09d8e8f7e2b3fc9de7236d1464fd730e"},
+ {file = "zstandard-0.15.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d25c8eeb4720da41e7afbc404891e3a945b8bb6d5230e4c53d23ac4f4f9fc52c"},
+ {file = "zstandard-0.15.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:2353b61f249a5fc243aae3caa1207c80c7e6919a58b1f9992758fa496f61f839"},
+ {file = "zstandard-0.15.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:6cc162b5b6e3c40b223163a9ea86cd332bd352ddadb5fd142fc0706e5e4eaaff"},
+ {file = "zstandard-0.15.2-cp38-cp38-win32.whl", hash = "sha256:94d0de65e37f5677165725f1fc7fb1616b9542d42a9832a9a0bdcba0ed68b63b"},
+ {file = "zstandard-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:b0975748bb6ec55b6d0f6665313c2cf7af6f536221dccd5879b967d76f6e7899"},
+ {file = "zstandard-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eda0719b29792f0fea04a853377cfff934660cb6cd72a0a0eeba7a1f0df4a16e"},
+ {file = "zstandard-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fb77dd152054c6685639d855693579a92f276b38b8003be5942de31d241ebfb"},
+ {file = "zstandard-0.15.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:24cdcc6f297f7c978a40fb7706877ad33d8e28acc1786992a52199502d6da2a4"},
+ {file = "zstandard-0.15.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:69b7a5720b8dfab9005a43c7ddb2e3ccacbb9a2442908ae4ed49dd51ab19698a"},
+ {file = "zstandard-0.15.2-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:dc8c03d0c5c10c200441ffb4cce46d869d9e5c4ef007f55856751dc288a2dffd"},
+ {file = "zstandard-0.15.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:3e1cd2db25117c5b7c7e86a17cde6104a93719a9df7cb099d7498e4c1d13ee5c"},
+ {file = "zstandard-0.15.2-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:ab9f19460dfa4c5dd25431b75bee28b5f018bf43476858d64b1aa1046196a2a0"},
+ {file = "zstandard-0.15.2-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:f36722144bc0a5068934e51dca5a38a5b4daac1be84f4423244277e4baf24e7a"},
+ {file = "zstandard-0.15.2-cp39-cp39-win32.whl", hash = "sha256:378ac053c0cfc74d115cbb6ee181540f3e793c7cca8ed8cd3893e338af9e942c"},
+ {file = "zstandard-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:9ee3c992b93e26c2ae827404a626138588e30bdabaaf7aa3aa25082a4e718790"},
+ {file = "zstandard-0.15.2.tar.gz", hash = "sha256:52de08355fd5cfb3ef4533891092bb96229d43c2069703d4aff04fdbedf9c92f"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index de3d95ba..6c86a715 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,0 +12,14 @@ uvicorn = "^0.14.0"
+Pillow = "^8.3.1"
+trec-car-tools = {path = "vendors/trec-car-tools/python3"}
+apache-beam = "^2.31.0"
+conllu = "^4.4"
+kss = "^2.5.1"
+lm-dataformat = "^0.0.19"
+lxml = "^4.6.3"
+nlp = "^0.4.0"
+openpyxl = "^3.0.7"
+py7zr = "^0.16.1"
+tensorflow = "^2.5.0"
+transformers = "^4.9.1"
+wget = "^3.2"
+kenlm = {url = "https://github.com/kpu/kenlm/archive/master.zip"}
diff --git a/vendors/trec-car-tools/.gitignore b/vendors/trec-car-tools/.gitignore
new file mode 100644
index 00000000..aad927c9
--- /dev/null
+++ b/vendors/trec-car-tools/.gitignore
@@ -0,0 +1,31 @@
+# Maven template
+target/
+pom.xml.tag
+pom.xml.releaseBackup
+pom.xml.versionsBackup
+pom.xml.next
+release.properties
+dependency-reduced-pom.xml
+buildNumber.properties
+.mvn/timing.properties
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Sphinx documentation
+python3/_build/
diff --git a/vendors/trec-car-tools/.travis.yml b/vendors/trec-car-tools/.travis.yml
new file mode 100644
index 00000000..3db709ac
--- /dev/null
+++ b/vendors/trec-car-tools/.travis.yml
@@ -0,0 +1,20 @@
+language: python
+python:
+ - "3.5"
+before_install:
+ - sudo apt-get -qq update
+ - sudo apt-get install -y maven
+python:
+ - "3.6"
+ - "3.7"
+install:
+ - pip install -r python3/requirements.txt
+script:
+ - pip install python3/
+ - pushd trec-car-tools-example; mvn install; popd
+
+ - curl http://trec-car.cs.unh.edu/datareleases/v2.0/test200.v2.0.tar.xz | tar -xJ
+ - pages=test200/test200-train/train.pages.cbor outlines=test200/test200-train/train.pages.cbor-outlines.cbor paragraphs=test200/test200-train/train.pages.cbor-paragraphs.cbor bash .travis/test.sh
+
+ - curl http://trec-car.cs.unh.edu/datareleases/v1.5/test200-v1.5.tar.xz | tar -xJ
+ - pages=test200/train.test200.cbor outlines=test200/train.test200.cbor paragraphs=test200/train.test200.cbor.paragraphs bash .travis/test.sh
diff --git a/vendors/trec-car-tools/.travis/test.sh b/vendors/trec-car-tools/.travis/test.sh
new file mode 100755
index 00000000..0aed3e4f
--- /dev/null
+++ b/vendors/trec-car-tools/.travis/test.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -ex
+
+python3/test.py pages $pages >/dev/null
+python3/test.py outlines $outlines >/dev/null
+python3/test.py paragraphs $paragraphs >/dev/null
+
+cd trec-car-tools-example/
+mvn org.codehaus.mojo:exec-maven-plugin:1.5.0:java -Dexec.mainClass="edu.unh.cs.treccar_v2.read_data.ReadDataTest" -Dexec.args="header ../$pages" >/dev/null
+mvn org.codehaus.mojo:exec-maven-plugin:1.5.0:java -Dexec.mainClass="edu.unh.cs.treccar_v2.read_data.ReadDataTest" -Dexec.args="pages ../$pages" >/dev/null
+mvn org.codehaus.mojo:exec-maven-plugin:1.5.0:java -Dexec.mainClass="edu.unh.cs.treccar_v2.read_data.ReadDataTest" -Dexec.args="outlines ../$outlines" >/dev/null
+mvn org.codehaus.mojo:exec-maven-plugin:1.5.0:java -Dexec.mainClass="edu.unh.cs.treccar_v2.read_data.ReadDataTest" -Dexec.args="paragraphs ../$paragraphs" >/dev/null
diff --git a/vendors/trec-car-tools/LICENSE b/vendors/trec-car-tools/LICENSE
new file mode 100644
index 00000000..bc025538
--- /dev/null
+++ b/vendors/trec-car-tools/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2017, Laura Dietz and Ben Gamari
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendors/trec-car-tools/README.mkd b/vendors/trec-car-tools/README.mkd
new file mode 100644
index 00000000..cfdedf54
--- /dev/null
+++ b/vendors/trec-car-tools/README.mkd
@@ -0,0 +1,162 @@
+# TREC Car Tools
+
+[](https://travis-ci.org/TREMA-UNH/trec-car-tools)
+
+Development tools for participants of the TREC Complex Answer Retrieval track.
+
+Data release support for v1.5 and v2.0.
+
+Note that in order to allow to compile your project for two trec-car format versions, the maven artifact Id was changed to `treccar-tools-v2` with version 2.0, and the package path changed to `treccar_v2`
+
+
+Current support for
+- Python 3.6
+- Java 1.8
+
+If you are using [Anaconda](https://www.anaconda.com/), install the `cbor`
+library for Python 3.6:
+```
+conda install -c laura-dietz cbor=1.0.0
+```
+
+## How to use the Python bindings for trec-car-tools?
+
+1. Get the data from [http://trec-car.cs.unh.edu](http://trec-car.cs.unh.edu)
+2. Clone this repository
+3. `python setup.py install`
+
+Look out for test.py for an example on how to access the data.
+
+
+## How to use the java 1.8 (or higher) bindings for trec-car-tools through maven?
+
+add to your project's pom.xml file (or similarly gradel or sbt):
+
+~~~~
+ <repositories>
+ <repository>
+ <id>jitpack.io</id>
+ <url>https://jitpack.io</url>
+ </repository>
+ </repositories>
+~~~~
+
+add the trec-car-tools dependency:
+
+~~~~
+ <dependency>
+ <groupId>com.github.TREMA-UNH</groupId>
+ <artifactId>trec-car-tools-java</artifactId>
+ <version>17</version>
+ </dependency>
+~~~~
+
+compile your project with `mvn compile`
+
+
+
+
+## Tool support
+
+This package provides support for the following activities.
+
+- `read_data`: Reading the provided paragraph collection, outline collections, and training articles
+- `format_runs`: writing submission files
+
+
+## Reading Data
+
+If you use python or java, please use `trec-car-tools`, no need to understand the following. We provide bindings for haskell upon request. If you are programming under a different language, you can use any CBOR library and decode the grammar below.
+
+[CBOR](cbor.io) is similar to JSON, but it is a binary format that compresses better and avoids text file encoding issues.
+
+Articles, outlines, paragraphs are all described with CBOR following this grammar. Wikipedia-internal hyperlinks are preserved through `ParaLink`s.
+
+
+~~~~~
+ Page -> $pageName $pageId [PageSkeleton] PageType PageMetadata
+ PageType -> ArticlePage | CategoryPage | RedirectPage ParaLink | DisambiguationPage
+ PageMetadata -> RedirectNames DisambiguationNames DisambiguationIds CategoryNames CategoryIds InlinkIds InlinkAnchors
+ RedirectNames -> [$pageName]
+ DisambiguationNames -> [$pageName]
+ DisambiguationIds -> [$pageId]
+ CategoryNames -> [$pageName]
+ CategoryIds -> [$pageId]
+ InlinkIds -> [$pageId]
+ InlinkAnchors -> [$anchorText]
+
+ PageSkeleton -> Section | Para | Image | ListItem
+ Section -> $sectionHeading [PageSkeleton]
+ Para -> Paragraph
+ Paragraph -> $paragraphId, [ParaBody]
+ ListItem -> $nestingLevel, Paragraph
+ Image -> $imageURL [PageSkeleton]
+ ParaBody -> ParaText | ParaLink
+ ParaText -> $text
+ ParaLink -> $targetPage $targetPageId $linkSection $anchorText
+~~~~~
+
+You can use any CBOR serialization library. Below a convenience library for reading the data into Python (3.5)
+
+- `./read_data/trec_car_read_data.py`
+Python 3.5 convenience library for reading the input data (in CBOR format).
+-- If you use anaconda, please install the cbor library with `conda install -c auto cbor=1.0`
+-- Otherwise install it with `pypi install cbor`
+
+## Ranking Results
+
+Given an outline, your task is to produce one ranking for each section $section (representing an information need in traditional IR evaluations).
+
+Each ranked element is an (entity,passage) pair, meaning that this passage is relevant for the section, because it features a relevant entity. "Relevant" means that the entity or passage must/should/could be listed in this section.
+
+The section is represented by the path of headings in the outline `$pageTitle/$heading1/$heading1.1/.../$section` in URL encoding.
+
+The entity is represented by the DBpedia entity id (derived from the Wikipedia URL). Optionally, the entity can be omitted.
+
+The passage is represented by the passage id given in the passage corpus (an MD5 hash of the content). Optionally, the passage can be omitted.
+
+
+The results are provided in a format that is similar to the "trec\_results file format" of [trec_eval](http://trec.nist.gov/trec_eval). More info on how to use [trec_eval](http://stackoverflow.com/questions/4275825/how-to-evaluate-a-search-retrieval-engine-using-trec-eval) and [source](https://github.com/usnistgov/trec_eval).
+
+Example of ranking format
+~~~~~
+ Green\_sea\_turtle\Habitat Pelagic\_zone 12345 0 27409 myTeam
+ $qid $entity $passageId rank sim run_id
+~~~~~
+
+
+
+## Integration with other tools
+
+It is recommended to use the `format_runs` package to write run files. Here an example:
+
+
+ with open('runfile', mode='w', encoding='UTF-8') as f:
+ writer = configure_csv_writer(f)
+ for page in pages:
+ for section_path in page.flat_headings_list():
+ ranking = [RankingEntry(page.page_name, section_path, p.para_id, r, s, paragraph_content=p) for p,s,r in ranking]
+ format_run(writer, ranking, exp_name='test')
+
+ f.close()
+
+This ensures that the output is correctly formatted to work with `trec_eval` and the provided qrels file.
+
+Run [trec_eval](https://github.com/usnistgov/trec_eval/blob/master/README) version 9.0.4 as usual:
+
+ trec_eval -q release.qrel runfile > run.eval
+
+The output is compatible with the eval plotting package [minir-plots](https://github.com/laura-dietz/minir-plots). For example run
+
+ python column.py --out column-plot.pdf --metric map run.eval
+ python column_difficulty.py --out column-difficulty-plot.pdf --metric map run.eval run2.eval
+
+Moreover, you can compute success statistics such as hurts/helps or a paired-t-test as follows.
+
+ python hurtshelps.py --metric map run.eval run2.eval
+ python paired-ttest.py --metric map run.eval run2.eval
+
+
+
+
+<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/3.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" href="http://purl.org/dc/dcmitype/Dataset" property="dct:title" rel="dct:type">TREC-CAR Dataset</span> by <a xmlns:cc="http://creativecommons.org/ns#" href="trec-car.cs.unh.edu" property="cc:attributionName" rel="cc:attributionURL">Laura Dietz, Ben Gamari</a> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons Attribution-ShareAlike 3.0 Unported License</a>.<br />Based on a work at <a xmlns:dct="http://purl.org/dc/terms/" href="www.wikipedia.org" rel="dct:source">www.wikipedia.org</a>.
diff --git a/vendors/trec-car-tools/python3/Makefile b/vendors/trec-car-tools/python3/Makefile
new file mode 100644
index 00000000..4f1a4bc4
--- /dev/null
+++ b/vendors/trec-car-tools/python3/Makefile
@@ -0,0 +1,225 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " epub3 to make an epub3"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+ @echo " dummy to check syntax errors of document sources"
+
+.PHONY: clean
+clean:
+ rm -rf $(BUILDDIR)/*
+
+.PHONY: html
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+.PHONY: dirhtml
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+.PHONY: singlehtml
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+.PHONY: pickle
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+.PHONY: json
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+.PHONY: htmlhelp
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+.PHONY: qthelp
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/trec-car-tools.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/trec-car-tools.qhc"
+
+.PHONY: applehelp
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+.PHONY: devhelp
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/trec-car-tools"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/trec-car-tools"
+ @echo "# devhelp"
+
+.PHONY: epub
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+.PHONY: epub3
+epub3:
+ $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
+ @echo
+ @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
+
+.PHONY: latex
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+.PHONY: latexpdf
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+.PHONY: latexpdfja
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+.PHONY: text
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+.PHONY: man
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+.PHONY: texinfo
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+.PHONY: info
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+.PHONY: gettext
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+.PHONY: changes
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+.PHONY: linkcheck
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+.PHONY: doctest
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+.PHONY: coverage
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+.PHONY: xml
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+.PHONY: pseudoxml
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
+
+.PHONY: dummy
+dummy:
+ $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
+ @echo
+ @echo "Build finished. Dummy builder generates no files."
diff --git a/vendors/trec-car-tools/python3/README.mkd b/vendors/trec-car-tools/python3/README.mkd
new file mode 100644
index 00000000..0cd79b04
--- /dev/null
+++ b/vendors/trec-car-tools/python3/README.mkd
@@ -0,0 +1,4 @@
+# TREC CAR Python library
+
+See [ReadTheDocs](https://trec-car-tools.readthedocs.io/en/latest/) for Python
+usage and reference documentation.
diff --git a/vendors/trec-car-tools/python3/annotated_content.py b/vendors/trec-car-tools/python3/annotated_content.py
new file mode 100644
index 00000000..25998e6b
--- /dev/null
+++ b/vendors/trec-car-tools/python3/annotated_content.py
@@ -0,0 +1,115 @@
+# This is an example on how to access content of TREC CAR data
+# and convert it into a string of content with offset-based entity link annotations.
+# Feel free to use the AnnotatedContentBuilder
+# I highly recommend that you implement your own version `annotate_section_content`
+# because you need to make decisions on which content to include, where to
+# futher provide newlines etc.
+# Keep in mind, whatever you add to your output needs to go through the
+# AnnotatedContenBuilder or offsets won't match
+# you can add all kinds of semantic annotations on offsets. However, in the current
+# implementation they much be non-overlapping.
+
+
+
+from trec_car.read_data import *
+
+class Annotation():
+ """Wraps a semantic annotation with offset information """
+ def __init__(self, start, end, annotation):
+ self.start = start
+ self.end = end
+ self.annotation = annotation
+
+class AnnotatedContentBuilder():
+ """Builds a string iteratively and keeps track of offsets.
+ Chunks of plain text and semantic annotations need to added in order
+ """
+ def __init__(self):
+ self.content = ""
+ self.offset = 0
+ self.annotations = []
+
+ def append(self, chunk, optAnnotation=None):
+ start = self.offset
+ self.content += chunk
+ self.offset = len(self.content)
+ end = self.offset
+ if optAnnotation:
+ self.annotations.append( Annotation(start=start, end=end, annotation=optAnnotation))
+
+ def get_content(self):
+ return self.content
+
+ def get_annotations(self):
+ return self.annotations
+
+
+def annotate_section_content(section):
+ """ Example implementation to break out the content of a (top-level) section with entity links """
+ def annotated_content(skel, contentBuilder):
+ if isinstance(skel, Section):
+ contentBuilder.append('\n')
+ contentBuilder.append(skel.heading)
+ contentBuilder.append('\n')
+ for child in skel.children:
+ annotated_content(child, contentBuilder)
+ # contentBuilder.append('\n')
+
+ elif isinstance(skel, List):
+ annotated_content(skel.body, contentBuilder)
+
+ elif isinstance(skel, Para):
+ for body in skel.paragraph.bodies:
+ annotated_content_bodies(body, contentBuilder)
+ contentBuilder.append('\n')
+ else:
+ pass
+
+ def annotated_content_bodies(body, contentBuilder):
+ if isinstance(body, ParaLink):
+ contentBuilder.append(body.get_text(), body)
+
+ elif isinstance(body, ParaText):
+ contentBuilder.append(body.get_text())
+
+ else:
+ pass
+
+ contentBuilder = AnnotatedContentBuilder()
+ for child in section.children:
+ annotated_content(child, contentBuilder)
+ return contentBuilder
+
+
+
+
+
+if __name__ == '__main__':
+
+ import sys
+
+ if len(sys.argv)<1 or len(sys.argv)>3:
+ print("usage ",sys.argv[0]," articlefile")
+ exit()
+
+ articles=sys.argv[1]
+
+
+
+ with open(articles, 'rb') as f:
+ for p in iter_pages(f):
+ print('\npagename:', p.page_name)
+ print('\npageid:', p.page_id)
+
+
+ print("get content of top-level sections, with subsections inlined and broken out entity offsets")
+ for section in p.child_sections:
+
+ print(" == ",section.heading ," ==")
+
+ builder = annotate_section_content(section)
+ print(builder.get_content())
+ for ann in builder.get_annotations():
+ print(ann.start, ann.end, ann.annotation)
+
+ print()
diff --git a/vendors/trec-car-tools/python3/conf.py b/vendors/trec-car-tools/python3/conf.py
new file mode 100644
index 00000000..5c683ae3
--- /dev/null
+++ b/vendors/trec-car-tools/python3/conf.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+#
+# trec-car-tools documentation build configuration file, created by
+# sphinx-quickstart on Fri Nov 10 09:43:28 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+#
+
+import os
+import sys
+sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.viewcode',
+ 'sphinx.ext.autodoc',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'trec-car-tools'
+copyright = u'2017, Ben Gamari, Laura Dietz'
+author = u'Ben Gamari, Laura Dietz'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'1.0'
+# The full version, including alpha/beta/rc tags.
+release = u'1.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+# html_title = u'trec-car-tools v1.0'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
diff --git a/vendors/trec-car-tools/python3/format_runs_test.py b/vendors/trec-car-tools/python3/format_runs_test.py
new file mode 100644
index 00000000..54cae7c2
--- /dev/null
+++ b/vendors/trec-car-tools/python3/format_runs_test.py
@@ -0,0 +1,40 @@
+from trec_car.format_runs import *
+from trec_car.read_data import *
+import itertools
+import sys
+
+if len(sys.argv)<3:
+ print("usage ",sys.argv[0]," outlinefile paragraphfile out")
+ exit()
+
+query_cbor=sys.argv[1]
+psg_cbor=sys.argv[2]
+out=sys.argv[3]
+
+pages = []
+with open(query_cbor, 'rb') as f:
+ pages = [p for p in itertools.islice(iter_annotations(f), 0, 1000)]
+
+
+paragraphs = []
+with open(psg_cbor, 'rb') as f:
+ d = {p.para_id: p for p in itertools.islice(iter_paragraphs(f), 0, 500 ,5)}
+ paragraphs = d.values()
+
+print("pages: ", len(pages))
+print("paragraphs: ", len(paragraphs))
+
+mock_ranking = [(p, 1.0 / (r + 1), (r + 1)) for p, r in zip(paragraphs, range(0, 1000))]
+
+with open(out,mode='w', encoding='UTF-8') as f:
+ writer = f
+ numqueries = 0
+ for page in pages:
+ for section_path in page.flat_headings_list():
+ numqueries += 1
+ query_id = "/".join([page.page_id]+[section.headingId for section in section_path])
+ ranking = [RankingEntry(query_id, p.para_id, r, s, paragraph_content=p) for p, s, r in mock_ranking]
+ format_run(writer, ranking, exp_name='test')
+
+ f.close()
+ print("num queries = ", numqueries)
diff --git a/vendors/trec-car-tools/python3/index.rst b/vendors/trec-car-tools/python3/index.rst
new file mode 100644
index 00000000..ec6434e8
--- /dev/null
+++ b/vendors/trec-car-tools/python3/index.rst
@@ -0,0 +1,141 @@
+trec-car-tools
+==============
+
+This is the documentation for ``trec-car-tools``, a Python 3 library for reading
+and manipulating the `TREC Complex Answer Retrieval
+<http://trec-car.cs.unh.edu/>`_ (CAR) dataset.
+
+Getting started
+---------------
+
+This library requires Python 3.3 or greater. It can can be installed with
+``setup.py`` ::
+
+ python3 ./setup.py install
+
+If you are using `Anaconda <https://www.anaconda.com/>`_, install the ``cbor``
+library for Python 3.6: ::
+
+ conda install -c laura-dietz cbor=1.0.0
+
+Once you have installed the library, you can download a `dataset
+<http://trec-car.cs.unh.edu/datareleases/>`_ and start playing.
+
+Reading the dataset
+-------------------
+
+The TREC CAR dataset consists of a number of different exports. These include,
+
+ * Annotations files (also called "pages files") contain full Wikipedia pages and their contents
+ * Paragraphs files contain only paragraphs disembodied from their pages
+ * Outlines files contain only the section structure of pages and no textual content
+
+To read an annotations file use the :func:`iter_annotations` function:
+
+.. autofunction:: trec_car.read_data.iter_annotations
+
+For instance, to list the page IDs of pages in a pages file one might write
+
+.. code-block:: python
+
+ for page in read_data.iter_annotations(open('train.test200.cbor', 'rb')):
+ print(page.pageId)
+
+Likewise, to read a paragraphs file the :func:`iter_paragraphs` function is
+provided
+
+.. autofunction:: trec_car.read_data.iter_paragraphs
+
+To list the text of all paragraphs in a paragarphs file one might write,
+
+.. code-block:: python
+
+ for para in read_data.iter_paragraphs(open('train.test200.cbor', 'rb')):
+ print(para.getText())
+
+Basic types
+-----------
+
+.. class:: trec_car.read_data.PageName
+
+ :class:`PageName` represents the natural language "name" of a page. Note that
+ this means that it is not necessarily unique. If you need a unique handle for
+ a page use :class:`PageId`.
+
+.. class:: trec_car.read_data.PageId
+
+ A :class:`PageId` is the unique identifier for a :class:`Page`.
+
+The :class:`Page` type
+----------------------
+
+.. autoclass:: trec_car.read_data.Page
+ :members:
+
+.. autoclass:: trec_car.read_data.PageMetadata
+ :members:
+
+Types of pages
+~~~~~~~~~~~~~~
+
+.. autoclass:: trec_car.read_data.PageType
+
+ The abstact base class.
+
+.. autoclass:: trec_car.read_data.ArticlePage
+.. autoclass:: trec_car.read_data.CategoryPage
+.. autoclass:: trec_car.read_data.DisambiguationPage
+.. autoclass:: trec_car.read_data.RedirectPage
+ :members:
+
+Page structure
+--------------
+
+The high-level structure of a :class:`Page` is captured by the subclasses of
+:class:`PageSkeleton`.
+
+.. autoclass:: trec_car.read_data.PageSkeleton
+ :members:
+
+.. autoclass:: trec_car.read_data.Para
+ :members:
+ :show-inheritance:
+
+.. autoclass:: trec_car.read_data.Section
+ :members:
+ :show-inheritance:
+
+.. autoclass:: trec_car.read_data.List
+ :members:
+ :show-inheritance:
+
+.. autoclass:: trec_car.read_data.Image
+ :members:
+ :show-inheritance:
+
+Paragraph contents
+------------------
+
+.. autoclass:: trec_car.read_data.Paragraph
+ :members:
+
+.. autoclass:: trec_car.read_data.ParaBody
+ :members:
+
+.. autoclass:: trec_car.read_data.ParaText
+ :members:
+ :show-inheritance:
+
+.. autoclass:: trec_car.read_data.ParaLink
+ :members:
+ :show-inheritance:
+
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/vendors/trec-car-tools/python3/read_data_test.py b/vendors/trec-car-tools/python3/read_data_test.py
new file mode 100644
index 00000000..4ebecef0
--- /dev/null
+++ b/vendors/trec-car-tools/python3/read_data_test.py
@@ -0,0 +1,97 @@
+from trec_car.read_data import *
+import sys
+
+if len(sys.argv)<1 or len(sys.argv)>3:
+ print("usage ",sys.argv[0]," articlefile [outlinefile paragraphfile]")
+ exit()
+
+articles=sys.argv[1]
+
+
+# to open either pages or outlines use iter_annotations
+# See docstrings of respective objects for more documentation.
+
+with open(articles, 'rb') as f:
+ for p in iter_pages(f):
+ print('\npagename:', p.page_name)
+ print('\npageid:', p.page_id)
+ print('\nmeta:', p.page_meta)
+
+ # get infoboxes
+ print('\ninfoboxes:')
+ for box in p.get_infoboxes():
+ print(box)
+ print()
+
+ # get one data structure with nested (heading, [children]) pairs
+ headings = p.nested_headings()
+ # print("headings", [section.heading for (section, content) in headings])
+
+ print("sections with content: ")
+ for (section, _) in headings:
+ if section:
+ print (section.get_text())
+
+ print("sections with content: ")
+ for section in p.child_sections:
+ if section:
+ print ('== ', section.heading ,' ==')
+ print (section.get_text_with_headings(False))
+
+ if len(p.outline())>0:
+ print( p.outline()[0].__str__())
+
+ print('deep headings= ', [ (str(section.heading), len(children)) for (section, children) in p.deep_headings_list()])
+
+ print('flat headings= ' ,["/".join([str(section.heading) for section in sectionpath]) for sectionpath in p.flat_headings_list()])
+
+
+
+if (len(sys.argv)==1):
+ sys.exit()
+
+outlines=sys.argv[2]
+paragraphs=sys.argv[3]
+
+
+
+with open(outlines, 'rb') as f:
+ for p in iter_annotations(f):
+ print('\npagename:', p.page_name)
+
+ # get one data structure with nested (heading, [children]) pairs
+ headings = p.nested_headings()
+ print('headings= ', [ (str(section.heading), len(children)) for (section, children) in headings])
+
+ if len(p.outline())>2:
+ print('heading 1=', p.outline()[0])
+
+ print('deep headings= ', [ (str(section.heading), len(children)) for (section, children) in p.deep_headings_list()])
+
+ print('flat headings= ' ,["/".join([str(section.heading) for section in sectionpath]) for sectionpath in p.flat_headings_list()])
+
+# exit(0)
+
+
+with open(paragraphs, 'rb') as f:
+ for p in iter_paragraphs(f):
+ print('\n', p.para_id, ':')
+
+ # Print just the text
+ texts = [elem.text if isinstance(elem, ParaText)
+ else elem.anchor_text
+ for elem in p.bodies]
+ print(' '.join(texts))
+
+ # Print just the linked entities
+ entities = [elem.page
+ for elem in p.bodies
+ if isinstance(elem, ParaLink)]
+ print(entities)
+
+ # Print text interspersed with links as pairs (text, link)
+ mixed = [(elem.anchor_text, elem.page) if isinstance(elem, ParaLink)
+ else (elem.text, None)
+ for elem in p.bodies]
+ print(mixed)
+
diff --git a/vendors/trec-car-tools/python3/requirements.txt b/vendors/trec-car-tools/python3/requirements.txt
new file mode 100644
index 00000000..3e51d3af
--- /dev/null
+++ b/vendors/trec-car-tools/python3/requirements.txt
@@ -0,0 +1 @@
+cbor>=1.0.0
diff --git a/vendors/trec-car-tools/python3/setup.py b/vendors/trec-car-tools/python3/setup.py
new file mode 100755
index 00000000..56ff2e47
--- /dev/null
+++ b/vendors/trec-car-tools/python3/setup.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+from setuptools import setup
+
+setup(
+ name='trec-car-tools',
+ version='2.5.4',
+ packages=['trec_car'],
+ url='https://github.com/TREMA-UNH/trec-car-tools/python3',
+ # download_url='https://github.com/TREMA-UNH/trec-car-tools/archive/2.0.tar.gz',
+ keywords=['wikipedia','complex answer retrieval','trec car'],
+ license='BSD 3-Clause',
+ author='laura-dietz',
+ author_email='[email protected]',
+ description='Support tools for TREC CAR participants. Also see trec-car.cs.unh.edu',
+ install_requires=['cbor>=1.0.0', 'numpy>=1.11.2'],
+ python_requires='>=3.6',
+ classifiers=[
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8'
+ ]
+)
diff --git a/vendors/trec-car-tools/python3/test.py b/vendors/trec-car-tools/python3/test.py
new file mode 100755
index 00000000..2815766e
--- /dev/null
+++ b/vendors/trec-car-tools/python3/test.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+
+from trec_car.read_data import *
+import argparse
+
+def dump_pages(args):
+ for p in iter_pages(args.file):
+ print(p.page_meta)
+ print(p)
+ print("\n".join([("%s %s"% (heading,content)) for (heading,content) in p.deep_headings_list()]))
+
+def dump_outlines(args):
+ for p in iter_outlines(args.file):
+ print(p.page_meta)
+ print(p)
+ print("\n".join([("%s"% heading ) for (heading,empty_content) in p.deep_headings_list()]))
+
+def dump_paragraphs(args):
+ for p in iter_paragraphs(args.file):
+ print(p)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ subparser = parser.add_subparsers()
+ p = subparser.add_parser('pages', help='Dump pages')
+ p.add_argument('file', type=argparse.FileType('rb'), help='A pages file')
+ p.set_defaults(func=dump_pages)
+
+ p = subparser.add_parser('outlines', help='Dump outlines')
+ p.add_argument('file', type=argparse.FileType('rb'), help='An outlines file')
+ p.set_defaults(func=dump_outlines)
+
+ p = subparser.add_parser('paragraphs', help='Dump paragraphs')
+ p.add_argument('file', type=argparse.FileType('rb'), help='A paragraphs file')
+ p.set_defaults(func=dump_paragraphs)
+
+ args = parser.parse_args()
+ if 'func' not in args:
+ parser.print_usage()
+ else:
+ args.func(args)
diff --git a/vendors/trec-car-tools/python3/trec_car/__init__.py b/vendors/trec-car-tools/python3/trec_car/__init__.py
new file mode 100644
index 00000000..ccb9b451
--- /dev/null
+++ b/vendors/trec-car-tools/python3/trec_car/__init__.py
@@ -0,0 +1,5 @@
+"""__init__ module for trec-car-tools, imports all necessary functions for reading cbor data provided in the TREC CAR"""
+
+__version__ = 1.0
+
+__all__ = ['read_data', 'format_runs']
diff --git a/vendors/trec-car-tools/python3/trec_car/format_runs.py b/vendors/trec-car-tools/python3/trec_car/format_runs.py
new file mode 100644
index 00000000..cc859620
--- /dev/null
+++ b/vendors/trec-car-tools/python3/trec_car/format_runs.py
@@ -0,0 +1,59 @@
+import csv
+import urllib.parse
+from typing import *
+
+
+
+def encode_section_path(page_id, section_path):
+ elements = [page_id] + section_path
+
+ return '/'.join([urllib.parse.quote(elem) for elem in elements])
+ # return urllib.parse.urlencode({'page':page_id, 'sectionpath':section_path})
+
+def encode_page_only(page_id):
+ return urllib.parse.quote(page_id)
+
+
+class RankingEntry(object):
+ """
+ A paragraph within a Wikipedia page.
+
+ Attributes:
+ paragraph The content of the Paragraph (which in turn contain a list of ParaBodys)
+ """
+ def __init__(self, query_id:str, paragraph_id:str, rank:int, score:float, exp_name:str=None, paragraph_content:str=None):
+ assert(rank > 0)
+ self.query_id = query_id
+ self.paragraph_id = paragraph_id
+ self.rank = rank
+ self.score = score
+ self.exp_name = exp_name
+ self.paragraph_content = paragraph_content
+
+ def to_trec_eval_row(self, alternative_exp_name=None, page_only=False):
+ exp_name_ = alternative_exp_name if alternative_exp_name is not None \
+ else self.exp_name
+ return [self.query_id, 'Q0', self.paragraph_id, self.rank, self.score, exp_name_]
+
+#
+# csv.register_dialect(
+# 'trec_eval',
+# delimiter = ' ',
+# quotechar = '"',
+# doublequote = False,
+# skipinitialspace = False,
+# lineterminator = '\n',
+# quoting = csv.QUOTE_NONE)
+#
+#
+# def configure_csv_writer(fileobj):
+# 'Convenience method to create a csv writer with the trec_eval_dialect'
+# return csv.writer(fileobj, dialect='trec_eval')
+#
+
+def format_run(writer, ranking_of_paragraphs, exp_name=None):
+ 'write one ranking to the csv writer'
+ for elem in ranking_of_paragraphs:
+ # query-number Q0 document-id rank score Exp
+ writer.write(" ".join([str(x) for x in elem.to_trec_eval_row(exp_name)]))
+ writer.write("\n")
diff --git a/vendors/trec-car-tools/python3/trec_car/read_data.py b/vendors/trec-car-tools/python3/trec_car/read_data.py
new file mode 100644
index 00000000..6ba92e94
--- /dev/null
+++ b/vendors/trec-car-tools/python3/trec_car/read_data.py
@@ -0,0 +1,778 @@
+# Use python 3.6 or higher
+# obsolete: conda install -c auto cbor=0.1.4
+
+from __future__ import print_function
+
+from abc import abstractmethod
+
+import cbor
+import itertools
+import typing
+
+PageId = str
+PageName = str
+
+class CborElementNotDefinedException(Exception):
+ def __init__(self, cbor):
+ self.cbor = cbor
+ Exception.__init__(self, 'unknown Cbor element encountrered: %s' % str(cbor))
+
+class WrongCarFileException(Exception):
+ def __init__(self, file_type, expected_file_types):
+ self.file_type = file_type
+ self.expected_file_types = expected_file_types
+ Exception.__init__(self, 'Open method does not support CAR file type: %s. Instead expect following CAR file types: %s' % (str(file_type), str(expected_file_types)))
+
+class BrokenCborFileException(Exception):
+ def __init__(self):
+ Exception.__init__(self, 'Corrupt, incomplete, or otherwise broken CBOR file. Please re-download or contact the organizers or use appropriate reader to open this file.')
+
+
+class Page(object):
+ """
+ The name and skeleton of a Wikipedia page.
+
+ .. attribute:: page_name
+
+ :rtype: PageName
+
+ The name of the page.
+
+ .. attribute:: skeleton
+
+ :rtype: typing.List[PageSkeleton]
+
+ The contents of the page
+
+ .. attribute:: page_type
+
+ :rtype: PageType
+
+ Type about the page
+
+ .. attribute:: page_meta
+
+ :rtype: PageMetadata
+
+ Metadata about the page
+ """
+ def __init__(self, page_name, page_id, skeleton, page_type, page_meta):
+ self.page_name = page_name
+ self.page_id = page_id
+ self.skeleton = list(skeleton)
+ self.child_sections = [child for child in self.skeleton if isinstance(child, Section)]
+ self.page_type = page_type
+ self.page_meta = page_meta
+
+ def deep_headings_list(self):
+ return [child.nested_headings() for child in self.child_sections]
+
+ def flat_headings_list(self):
+ """ return
+ Returns a flat list of headings contained by the :class:`Page`.
+
+ :rtype: typing.List[Section]
+ """
+ def flatten(prefix, headings):
+ for section, children in headings:
+ new_prefix = prefix + [section]
+ if len(children)>0 :
+ yield new_prefix
+ yield from flatten(new_prefix, children)
+ else:
+ yield new_prefix
+
+ deep_headings = self.deep_headings_list()
+ return list(flatten([], deep_headings))
+
+ def get_infoboxes(self):
+ toplevel_infoboxes = [child for child in self.skeleton if isinstance(child, InfoBox)]
+ section_infoboxes = [section.get_infoboxes()
+ for sections
+ in self.flat_headings_list()
+ for section in sections]
+ return toplevel_infoboxes + list(itertools.chain.from_iterable(section_infoboxes))
+
+
+ @staticmethod
+ def from_cbor(cbor):
+
+ if not (cbor[0] == 0 or cbor[0] == 1): # tag
+ raise CborElementNotDefinedException(cbor)
+ pagename = cbor[1]
+ pageId = cbor[2].decode('ascii')
+
+ if len(cbor)==4:
+ return Page(pagename, pageId, map(PageSkeleton.from_cbor, cbor[3]), ArticlePage, PageMetadata.default())
+ else:
+ page_type = PageType.from_cbor(cbor[4])
+ return Page(pagename, pageId, map(PageSkeleton.from_cbor, cbor[3]), page_type, PageMetadata.from_cbor(cbor[5]))
+
+ def __str__(self):
+ return "Page(%s)" % self.page_name
+
+ def to_string(self):
+ """
+ Render a string representation of the page.
+
+ :rtype: str
+ """
+ return self.page_name + self.page_meta +\
+ '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + '\n'.join(str(s) for s in self.skeleton)
+
+ def nested_headings(self):
+ """
+ Each heading recursively represented by a pair of ``(heading,
+ list_of_child_sections)``.
+
+ :rtype: typing.List[typing.Tuple[Section, typing.List[Section]]]
+ """
+ result = [child.nested_headings() for child in self.child_sections]
+ return result
+
+ def outline(self):
+ return self.child_sections
+
+ def get_text(self):
+ """Include all visible text below this elements. Includes Captions of images, but no headings and no infoboxes. See `get_text_with_headings` for a version that includes headings."""
+ return '\n'.join(skel.get_text() for skel in self.skeleton)
+
+
+ def get_text_with_headings(self, include_heading = False):
+ """Include all visible text below this elements. While the heading of this element is excluded, headings of subsections will be included. Captions of images are excluded."""
+ return '\n'.join(skel.get_text_with_headings(include_heading = True) for skel in self.skeleton)
+
+class PageType(object):
+ """
+ An abstract base class representing the various types of pages.
+
+ Subclasses include
+
+ * :class:`ArticlePage`
+ * :class:`CategoryPage`
+ * :class:`DisambiguationPage`
+ * :class:`RedirectPage`
+ """
+ @staticmethod
+ def from_cbor(cbor):
+ typetag = cbor[0]
+ if typetag == 0: return ArticlePage()
+ elif typetag == 1: return CategoryPage()
+ elif typetag == 2: return DisambiguationPage()
+ elif typetag == 3:
+ target = cbor[1]
+ if type(target) == list: # TODO this is almost certainly wrong
+ targetPage = target[1]
+ else:
+ targetPage = target.decode('ascii')
+ return RedirectPage(targetPage)
+ else:
+ raise CborElementNotDefinedException(cbor)
+
+class ArticlePage(PageType):
+ ''
+ def __init__(self):
+ pass
+ def __str__(self): return "ArticlePage"
+
+class CategoryPage(PageType):
+ def __init__(self):
+ pass
+ def __str__(self): return "CategoryPage"
+
+class DisambiguationPage(PageType):
+ def __init__(self):
+ pass
+ def __str__(self): return "Disambiguation Page"
+
+class RedirectPage(PageType):
+ """
+ .. attribute:: targetPage
+
+ :rtype: PageId
+
+ The target of the redirect.
+ """
+ def __init__(self, targetPage):
+ self.targetPage = targetPage
+ def __str__(self):
+ return "RedirectPage " + self.targetPage
+
+class PageMetadata(object):
+ """
+ Meta data for a page
+
+ .. attribute:: redirectNames
+
+ :rtype: PageName
+
+ Names of pages which redirect to this page
+
+ .. attribute:: disambiguationNames
+
+ :rtype: PageName
+
+ Names of disambiguation pages which link to this page
+
+ .. attribute:: disambiguationId
+
+ :rtype: PageId
+
+ Page IDs of disambiguation pages which link to this page
+
+ .. attribute:: categoryNames
+
+ :rtype: str
+
+ Page names of categories to which this page belongs
+
+ .. attribute:: categoryIds
+
+ :rtype: str
+
+ Page IDs of categories to which this page belongs
+
+ .. attribute:: inlinkIds
+
+ :rtype: str
+
+ Page IDs of pages containing inlinks
+
+ .. attribute:: inlinkAnchors
+ inlinkAnchor frequencies
+
+ :rtype: str
+
+ (Anchor text, frequency) of pages containing inlinks
+ """
+ def __init__(self, redirectNames, disambiguationNames, disambiguationIds, categoryNames, categoryIds, inlinkIds,
+ inlinkAnchors):
+ self.inlinkAnchors = inlinkAnchors
+ self.inlinkIds = inlinkIds
+ self.categoryIds = categoryIds
+ self.categoryNames = categoryNames
+ self.disambiguationIds = disambiguationIds
+ self.disambiguationNames = disambiguationNames
+ self.redirectNames = redirectNames
+
+ @staticmethod
+ def default():
+ return PageMetadata(None, None, None, None, None, None, None)
+
+ def __str__(self):
+ redirStr = ("" if self.redirectNames is None else (" redirected = "+", ".join([name for name in self.redirectNames])))
+ disamStr = ("" if self.disambiguationNames is None else (" disambiguated = "+", ".join([name for name in self.disambiguationNames])))
+ catStr = ("" if self.redirectNames is None else (" categories = "+", ".join([name for name in (self.categoryNames or [])])))
+ inlinkStr = ("" if self.inlinkIds is None else (" inlinks = "+", ".join([name for name in self.inlinkIds])))
+ # inlinkAnchorStr = str (self.inlinkAnchors)
+ inlinkAnchorStr = ("" if self.inlinkAnchors is None else
+ (" inlinkAnchors = "+", ".join(
+ [ ("%s: %d" % (name, freq)) for (name, freq) in self.inlinkAnchors]
+ # [ ("%s: " % (name)) for (name, freq) in self.inlinkAnchors] \
+ )))
+ return "%s \n%s \n%s \n%s \n%s\n" % (redirStr, disamStr, catStr, inlinkStr, inlinkAnchorStr)
+
+ @staticmethod
+ def from_cbor(cbor):
+ redirectNames=None
+ disambiguationNames=None
+ disambiguationIds=None
+ categoryNames=None
+ categoryIds=None
+ inlinkIds=None
+ inlinkAnchors=None
+
+ def decodeListOfIdList(cbor):
+ if len(cbor)==0: return None
+ else:
+ return [elem.decode('ascii') for elem in cbor]
+
+ def decodeListOfNameList(cbor):
+ if len(cbor)==0: return None
+ else:
+ return cbor
+
+ def decodeListOfNameIntList(cbor):
+ if len(cbor)==0: return None
+ else:
+ # need to convert list of pair-lists to lists of pair-tuples
+ return [(elem[0], elem[1]) for elem in cbor]
+
+ for i in range(0, len(cbor), 2):
+ tag = cbor[i][0]
+ cbor_data = cbor[i+1]
+
+ if tag == 0:
+ redirectNames = decodeListOfNameList(cbor_data)
+ elif tag == 1:
+ disambiguationNames=decodeListOfNameList(cbor_data)
+ elif tag == 2:
+ disambiguationIds=decodeListOfIdList(cbor_data)
+ elif tag == 3:
+ categoryNames=decodeListOfNameList(cbor_data)
+ elif tag == 4:
+ categoryIds=decodeListOfIdList(cbor_data)
+ elif tag == 5:
+ inlinkIds=decodeListOfIdList(cbor_data)
+
+ elif tag == 6:
+ # compatability with v1.6
+ inlinkAnchors = [(anchor, 1) for anchor in decodeListOfNameList(cbor_data)]
+ elif tag == 7:
+ # compatability with v2.0
+ inlinkAnchors = decodeListOfNameIntList(cbor_data)
+ i+=2
+
+ return PageMetadata(redirectNames, disambiguationNames, disambiguationIds, categoryNames, categoryIds, inlinkIds, inlinkAnchors)
+
+class PageSkeleton(object):
+ """
+ An abstract superclass for the various types of page elements. Subclasses include:
+
+ * :class:`Section`
+ * :class:`Para`
+ * :class:`Image`
+
+ """
+ @staticmethod
+ def from_cbor(cbor):
+ tag = cbor[0]
+ if tag == 0: # section
+ heading = cbor[1]
+ headingId = cbor[2].decode('ascii')
+ return Section(heading, headingId, map(PageSkeleton.from_cbor, cbor[3]))
+ elif tag == 1: # para-wrapper
+ return Para(Paragraph.from_cbor(cbor[1]))
+ elif tag == 2: #images
+ imageUrl = cbor[1]
+ caption = [PageSkeleton.from_cbor(elem) for elem in cbor[2]]
+ return Image(imageUrl, caption=caption)
+ elif tag == 3: # paragraph
+ level = cbor[1]
+ body = Paragraph.from_cbor(cbor[2])
+ return List(level, body)
+ elif tag == 4: # infobox
+ infobox_title = cbor[1]
+ cbor_entries = cbor[2]
+ entries = [ (kv[0], PageSkeleton.from_cbor(kv[1][0])) for kv in cbor_entries if kv[1] and kv[1][0]] # if no value is defined kv[1] will be null.
+ return InfoBox(infobox_title, entries)
+ else:
+ raise CborElementNotDefinedException(cbor)
+
+ def get_text(self):
+ """Includes visible text of this element and below. Headings are excluded. Image Captions are included. Infoboxes are ignored. (For a version with headers and no captions see `get_text_with_headings` """
+ raise NotImplementedError
+
+ def get_text_with_headings(self, include_heading = False):
+ """Include all visible text below this elements. While the heading of this element is excluded, headings of subsections will be included. Captions of images are excluded."""
+ raise NotImplementedError
+
+
+class Section(PageSkeleton):
+ """
+ A section of a Wikipedia page.
+
+ .. attribute:: heading
+
+ :rtype: str
+
+ The section heading.
+
+ .. attribute:: headingId
+
+ :rtype: str
+
+ The unique identifier of a section heading.
+
+ .. attribute:: children
+
+ :rtype: typing.List[PageSkeleton]
+
+ The :class:`PageSkeleton` elements contained by the section.
+ """
+ def __init__(self, heading, headingId, children):
+ self.heading = heading
+ self.headingId = headingId
+ self.children = list(children)
+ self.child_sections = [child for child in self.children if isinstance(child, Section)]
+
+ def str_(self, level):
+ bar = "".join("="*level)
+ children = "".join(c.str_(level=level+1) for c in self.children)
+ return "\n%s %s %s\n\n%s" % (bar, self.heading, bar, children)
+
+ def __str__(self):
+ return self.str_(level=1)
+
+ def __getitem__(self, idx):
+ return self.children[idx]
+
+ def nested_headings(self):
+ return (self, [child.nested_headings() for child in self.child_sections])
+
+ def get_text(self):
+ return '\n'.join(child.get_text() for child in self.children)
+
+ def get_text_with_headings(self, include_heading = False):
+ opt_heading = self.heading + "\n" if include_heading else ""
+ return opt_heading + '\n'.join(child.get_text_with_headings(include_heading = True) for child in self.children)
+
+
+ def get_infoboxes(self):
+ return [child for child in self.children if isinstance(child, InfoBox)]
+
+class Para(PageSkeleton):
+ """
+ A paragraph within a Wikipedia page.
+
+ .. attribute:: paragraph
+
+ :rtype: Paragraph
+
+ The content of the Paragraph (which in turn contain a list of :class:`ParaBody`\ s)
+ """
+ def __init__(self, paragraph):
+ self.paragraph = paragraph
+
+ def str_(self, level=None):
+ return str(self.paragraph)
+
+ def __str__(self):
+ return self.str_()
+
+ def get_text(self):
+ return self.paragraph.get_text()
+
+ def get_text_with_headings(self, include_heading = False):
+ return self.get_text()
+
+class Image(PageSkeleton):
+ """
+ An image within a Wikipedia page.
+
+ .. attribute:: caption
+
+ :rtype: str
+
+ PageSkeleton representing the caption of the image
+
+ .. attribute:: imageurl
+
+ :rtype: str
+
+ URL to the image; spaces need to be replaced with underscores, Wikimedia
+ Commons namespace needs to be prefixed
+ """
+ def __init__(self, imageurl, caption):
+ self.caption = caption
+ self.imageurl = imageurl
+
+ def str_(self, level=None):
+ return str("!["+self.imageurl+"]. Caption: "+(''.join([str(skel) for skel in self.caption])))
+
+ def __str__(self):
+ return self.str_()
+
+ def get_text(self):
+ return '\n'.join(skel.get_text() for skel in self.caption)
+
+ def get_text_with_headings(self, include_heading = False):
+ return ''
+
+class List(PageSkeleton):
+ """
+ An list element within a Wikipedia page.
+
+ .. attribute:: level
+
+ :rtype: int
+
+ The list nesting level
+
+ .. attribute:: body
+
+ A :class:`Paragraph` containing the list element contents.
+ """
+ def __init__(self, level, body):
+ self.level = level
+ self.body = body
+
+ def str_(self, level=None):
+ return str("*" * self.level + " " + str(self.body) + '\n')
+
+ def __str__(self):
+ return self.str_()
+
+
+ def get_text(self):
+ return self.body.get_text()
+
+ def get_text_with_headings(self, include_heading = False):
+ return self.get_text()
+
+
+class InfoBox(PageSkeleton):
+ def __init__(self, infobox_type, entries):
+ """
+ An list element within a Wikipedia page.
+
+ .. attribute:: infobox_type
+
+ :rtype: str
+
+ The title/type of the infobox
+
+ .. attribute:: entries
+
+ Key-value pair, where key is a string, and value is a :class:`PageSkeleton` containing the value. Values are often paragraphs or images, but they can also be lists.
+ """
+ self.title = infobox_type
+ self.entries = entries
+
+ def str_(self, level=None):
+ return self.title+ "\n"+ ("\n".join([key+": "+str(values) for (key,values) in self.entries]))
+
+ def __str__(self):
+ return self.str_()
+
+
+
+ def get_text(self):
+ return ''
+
+ def get_text_with_headings(self, include_heading = False):
+ return ''
+
+
+class Paragraph(object):
+ """
+ A paragraph.
+ """
+ def __init__(self, para_id, bodies):
+ self.para_id = para_id
+ self.bodies = list(bodies)
+
+ @staticmethod
+ def from_cbor(cbor):
+ if (not cbor[0] == 0):
+ raise CborElementNotDefinedException(cbor)
+
+ paragraphId = cbor[1].decode('ascii')
+ return Paragraph(paragraphId, map(ParaBody.from_cbor, cbor[2]))
+
+ def get_text(self):
+ """
+ Get all of the contained text.
+
+ :rtype: str
+ """
+ return ''.join([body.get_text() for body in self.bodies])
+
+ def str_(self, level=None):
+ return ' '.join(str(body) for body in self.bodies)
+
+ def __str__(self):
+ return self.str_()
+
+
+class ParaBody(object):
+ """
+ An abstract superclass representing a bit of :class:`Paragraph` content.
+ """
+ @staticmethod
+ def from_cbor(cbor):
+ tag = cbor[0]
+ if tag == 0:
+ return ParaText(cbor[1])
+ elif tag == 1:
+ cbor_ = cbor[1]
+ linkSection = None
+ if len(cbor_[2]) == 1:
+ linkSection = cbor_[2][0]
+ linkTargetId = cbor_[3].decode('ascii')
+ return ParaLink(cbor_[1], linkSection, linkTargetId, cbor_[4])
+ else:
+ raise CborElementNotDefinedException(cbor)
+
+ @abstractmethod
+ def get_text(self):
+ """
+ Get all of the text within a :class:`ParaBody`.
+
+ :rtype: str
+ """
+ raise NotImplementedError
+
+class ParaText(ParaBody):
+ """
+ A bit of plain text from a paragraph.
+
+ .. attribute:: text
+
+ :rtype: str
+
+ The text
+ """
+ def __init__(self, text):
+ self.text = text
+
+ def get_text(self):
+ return self.text
+
+ def str_(self, level=None):
+ return self.text
+
+ def __str__(self):
+ return self.str_()
+
+
+class ParaLink(ParaBody):
+ """
+ A link within a paragraph.
+
+ .. attribute:: page
+
+ :rtype: PageName
+
+ The page name of the link target
+
+ .. attribute:: pageid
+
+ :rtype: PageId
+
+ The link target as trec-car identifer
+
+ .. attribute:: link_section
+
+ :rtype: str
+
+ Section anchor of link target (i.e. the part after the ``#`` in the
+ URL), or ``None``.
+
+ .. attribute:: anchor_text
+
+ :rtype: str
+
+ The anchor text of the link
+ """
+ def __init__(self, page, link_section, pageid, anchor_text):
+ self.page = page
+ self.pageid = pageid
+ self.link_section = link_section
+ self.anchor_text = anchor_text
+
+ def get_text(self):
+ return self.anchor_text
+
+ def str_(self, level=None):
+ return "[%s](%s)" % (self.anchor_text, self.page)
+
+ def __str__(self):
+ return self.str_()
+
+
+def _iter_with_header(file, parse, expected_file_types):
+ maybe_hdr = cbor.load(file)
+ if isinstance(maybe_hdr, list) and maybe_hdr[0] == 'CAR':
+ # we have a header
+ file_type = maybe_hdr[1][0]
+ if not file_type in expected_file_types:
+ # print( 'File type tag is expected to be ', (" ".join(expected_file_types)), 'but given file is of type ', file_type)
+ # print('Did not expect file of type', file_type)
+ raise WrongCarFileException(file_type, expected_file_types)
+
+ # read beginning of variable-length list
+ if (not file.read(1) == b'\x9f'):
+ raise BrokenCborFileException()
+ else:
+ yield parse(maybe_hdr)
+
+ while True:
+ try:
+ # Check for break symbol
+ if (peek_for_break(file)):
+ break
+
+ yield parse(cbor.load(file))
+ except EOFError:
+ break
+
+def peek_for_break(cbor):
+ b = cbor.peek(1)
+ return b[0:1] == b'\xff'
+
+
+def iter_annotations(file):
+ """
+ Iterate over the :class:`Page`\ s of an annotations file.
+
+ :type file: typing.BinaryIO
+ :rtype: typing.Iterator[Page]
+ """
+ return _iter_with_header(file, Page.from_cbor, [0,1])
+ # return TrecCarHeader.from_cbor(file)
+
+
+
+def iter_pages(file):
+ """
+ Iterate over the :class:`Page`\ s of an annotations file.
+
+ :type file: typing.BinaryIO
+ :rtype: typing.Iterator[Page]
+ """
+ return _iter_with_header(file, Page.from_cbor, [0])
+
+
+
+def iter_outlines(file):
+ """
+ Iterate over the :class:`Page`\ s of an annotations file.
+
+ :type file: typing.BinaryIO
+ :rtype: typing.Iterator[Page]
+ """
+ return _iter_with_header(file, Page.from_cbor, [1])
+
+
+def iter_paragraphs(file):
+ """
+ Iterate over the :class:`Paragraph`\ s of an paragraphs file.
+
+ :type file: typing.BinaryIO
+ :rtype: typing.Iterator[Paragraph]
+ """
+ return _iter_with_header(file, Paragraph.from_cbor, [2])
+
+def dump_annotations(file):
+ for page in iter_annotations(file):
+ print(page.to_string())
+
+def with_toc(read_val):
+ class AnnotationsFile(object):
+ def __init__(self, fname):
+ """
+ Read annotations from a file.
+
+ Arguments:
+ fname The name of the CBOR file. A table-of-contents file is
+ also expected to be present.
+ """
+ self.cbor = open(fname, 'rb')
+ self.toc = cbor.load(open(fname+'.toc', 'rb'))
+
+ def keys(self):
+ """ The page names contained in an annotations file. """
+ return self.toc.keys()
+
+ def get(self, page):
+ """ Lookup a page by name. Returns a Page or None """
+ offset = self.toc.get(page)
+ if offset is not None:
+ self.cbor.seek(offset)
+ return read_val(cbor.load(self.cbor))
+ return None
+ return AnnotationsFile
+
+AnnotationsFile = with_toc(Page.from_cbor)
+ParagraphsFile = with_toc(Paragraph.from_cbor)
diff --git a/vendors/trec-car-tools/trec-car-tools-example/pom.xml b/vendors/trec-car-tools/trec-car-tools-example/pom.xml
new file mode 100644
index 00000000..95930619
--- /dev/null
+++ b/vendors/trec-car-tools/trec-car-tools-example/pom.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>edu.unh.cs.treccar</groupId>
+ <artifactId>treccar-tools-example</artifactId>
+ <version>0.1</version>
+
+ <repositories>
+ <repository>
+ <id>jitpack.io</id>
+ <url>https://jitpack.io</url>
+ </repository>
+ </repositories>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.github.TREMA-UNH</groupId>
+ <artifactId>trec-car-tools-java</artifactId>
+ <version>18</version>
+ </dependency>
+ <dependency>
+ <groupId>co.nstant.in</groupId>
+ <artifactId>cbor</artifactId>
+ <version>0.8</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <version>3.3.2</version>
+ </dependency>
+ <dependency>
+ <groupId>org.jetbrains</groupId>
+ <artifactId>annotations-java5</artifactId>
+ <version>RELEASE</version>
+ </dependency>
+
+ <!-- Only necessary for lucene indexing -->
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-core</artifactId>
+ <version>7.2.0</version>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.2</version>
+ <configuration>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ <!--<useUniqueVersions>false</useUniqueVersions>-->
+ </descriptorRefs>
+ <archive>
+ <manifest>
+ <mainClass>edu.unh.cs.TrecCarToolsExample</mainClass>
+ </manifest>
+ </archive>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.2</version>
+ <configuration>
+ <source>1.10</source>
+ <target>1.10</target>
+ </configuration>
+ </plugin>
+
+ <plugin> <!-- Create sources.jar -->
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>3.0.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-release-plugin</artifactId>
+ <configuration>
+ <goals>assembly:single</goals>
+ </configuration>
+ </plugin>
+
+ </plugins>
+ </build>
+
+
+</project>
diff --git a/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarBuildLuceneIndex.java b/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarBuildLuceneIndex.java
new file mode 100644
index 00000000..1d438325
--- /dev/null
+++ b/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarBuildLuceneIndex.java
@@ -0,0 +1,220 @@
+package edu.unh.cs;
+
+import edu.unh.cs.treccar_v2.Data;
+import edu.unh.cs.treccar_v2.read_data.CborFileTypeException;
+import edu.unh.cs.treccar_v2.read_data.CborRuntimeException;
+import edu.unh.cs.treccar_v2.read_data.DeserializeData;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.jetbrains.annotations.NotNull;
+
+import java.io.*;
+import java.nio.file.FileSystems;
+import java.nio.file.Path;
+import java.util.Collections;
+import java.util.Iterator;
+
+/*
+ * User: dietz
+ * Date: 1/4/18
+ * Time: 1:23 PM
+ */
+
+/**
+ * Example of how to build a lucene index of trec car paragraphs
+ */
+public class TrecCarBuildLuceneIndex {
+
+ private static void usage() {
+ System.out.println("Command line parameters: paragraphs paragraphCBOR LuceneINDEX");
+ System.exit(-1);
+ }
+
+ public static void main(String[] args) throws IOException {
+ System.setProperty("file.encoding", "UTF-8");
+
+ if (args.length < 3)
+ usage();
+
+ String mode = args[0];
+ String indexPath = args[2];
+
+ if (mode.equals("paragraphs")) {
+ final String paragraphsFile = args[1];
+ final FileInputStream fileInputStream2 = new FileInputStream(new File(paragraphsFile));
+
+ System.out.println("Creating paragraph index in "+indexPath);
+ final IndexWriter indexWriter = setupIndexWriter(indexPath, "paragraph.lucene");
+ final Iterator<Data.Paragraph> paragraphIterator = DeserializeData.iterParagraphs(fileInputStream2);
+
+ for (int i=1; paragraphIterator.hasNext(); i++){
+ final Document doc = paragraphToLuceneDoc(paragraphIterator.next());
+ indexWriter.addDocument(doc);
+ if (i % 10000 == 0) {
+ System.out.print('.');
+ indexWriter.commit();
+ }
+ }
+
+ System.out.println("\n Done indexing.");
+
+ indexWriter.commit();
+ indexWriter.close();
+ }
+ else if (mode.equals("pages")) {
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream = new FileInputStream(new File(pagesFile));
+
+ System.out.println("Creating page index in "+indexPath);
+ final IndexWriter indexWriter = setupIndexWriter(indexPath, "pages.lucene");
+
+ final Iterator<Data.Page> pageIterator = DeserializeData.iterAnnotations(fileInputStream);
+
+ for (int i=1; pageIterator.hasNext(); i++){
+ final Document doc = pageToLuceneDoc(pageIterator.next());
+ indexWriter.addDocument(doc);
+ if (i % 10000 == 0) {
+ System.out.print('.');
+ indexWriter.commit();
+ }
+ }
+
+ System.out.println("\n Done indexing.");
+
+
+ indexWriter.commit();
+ indexWriter.close();
+ }
+ }
+
+ private static Iterable<Document> toIterable(final Iterator<Document> iter) throws CborRuntimeException, CborFileTypeException {
+ return new Iterable<Document>() {
+ @Override
+ @NotNull
+ public Iterator<Document> iterator() {
+ return iter;
+ }
+ };
+ }
+
+
+ public static class ParaToLuceneIterator implements Iterator<Document> {
+ private static final int DEBUG_EVERY = 10000;
+ private int counter = DEBUG_EVERY;
+ private final Iterator<Data.Paragraph> paragraphIterator;
+
+ ParaToLuceneIterator(Iterator<Data.Paragraph> paragraphIterator){
+ this.paragraphIterator = paragraphIterator;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return this.paragraphIterator.hasNext();
+ }
+
+ @Override
+ public Document next() {
+ counter --;
+ if(counter < 0) {
+ System.out.print('.');
+ counter = DEBUG_EVERY;
+ }
+
+ Data.Paragraph p = this.paragraphIterator.next();
+ return paragraphToLuceneDoc(p);
+ }
+
+ @Override
+ public void remove() {
+ this.paragraphIterator.remove();
+ }
+ }
+
+ @NotNull
+ private static Document paragraphToLuceneDoc(Data.Paragraph p) {
+ final Document doc = new Document();
+ final String content = p.getTextOnly(); // <-- Todo Adapt this to your needs!
+ doc.add(new TextField("text", content, Field.Store.YES));
+ doc.add(new StringField("paragraphid", p.getParaId(), Field.Store.YES)); // don't tokenize this!
+ return doc;
+ }
+
+
+ public static class PageToLuceneIterator implements Iterator<Document> {
+ private static final int DEBUG_EVERY = 1000;
+ private int counter = DEBUG_EVERY;
+ private final Iterator<Data.Page> pageIterator;
+
+ PageToLuceneIterator(Iterator<Data.Page> pageIterator){
+ this.pageIterator = pageIterator;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return this.pageIterator.hasNext();
+ }
+
+ @Override
+ public Document next() {
+ counter --;
+ if(counter < 0) {
+ System.out.print('.');
+ counter = DEBUG_EVERY;
+ }
+
+ Data.Page p = this.pageIterator.next();
+ return pageToLuceneDoc(p);
+ }
+
+ @Override
+ public void remove() {
+ this.pageIterator.remove();
+ }
+ }
+
+ @NotNull
+ private static Document pageToLuceneDoc(Data.Page p) {
+ final Document doc = new Document();
+ StringBuilder content = new StringBuilder();
+ pageContent(p, content); // Todo Adapt this to your needs!
+
+ doc.add(new TextField("text", content.toString(), Field.Store.NO)); // dont store, just index
+ doc.add(new StringField("pageid", p.getPageId(), Field.Store.YES)); // don't tokenize this!
+ return doc;
+ }
+
+
+ private static void sectionContent(Data.Section section, StringBuilder content){
+ content.append(section.getHeading()+'\n');
+ for (Data.PageSkeleton skel: section.getChildren()) {
+ if (skel instanceof Data.Section) sectionContent((Data.Section) skel, content);
+ else if (skel instanceof Data.Para) paragraphContent((Data.Para) skel, content);
+ else {
+ }
+ }
+ }
+ private static void paragraphContent(Data.Para paragraph, StringBuilder content){
+ content.append(paragraph.getParagraph().getTextOnly()).append('\n');
+ }
+ private static void pageContent(Data.Page page, StringBuilder content){
+ content.append(page.getPageName()).append('\n');
+
+ for(Data.PageSkeleton skel: page.getSkeleton()){
+ if(skel instanceof Data.Section) sectionContent((Data.Section) skel, content);
+ else if(skel instanceof Data.Para) paragraphContent((Data.Para) skel, content);
+ else {} // ignore other
+ }
+
+ }
+ @NotNull
+ private static IndexWriter setupIndexWriter(String indexPath, String typeIndex) throws IOException {
+ Path path = FileSystems.getDefault().getPath(indexPath, typeIndex);
+ Directory indexDir = FSDirectory.open(path);
+ IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer());
+ return new IndexWriter(indexDir, config);
+ }
+}
\ No newline at end of file
diff --git a/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarQueryLuceneIndex.java b/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarQueryLuceneIndex.java
new file mode 100644
index 00000000..97eec091
--- /dev/null
+++ b/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarQueryLuceneIndex.java
@@ -0,0 +1,231 @@
+package edu.unh.cs;
+
+import edu.unh.cs.treccar_v2.Data;
+import edu.unh.cs.treccar_v2.read_data.CborFileTypeException;
+import edu.unh.cs.treccar_v2.read_data.CborRuntimeException;
+import edu.unh.cs.treccar_v2.read_data.DeserializeData;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.similarities.BM25Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.jetbrains.annotations.NotNull;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.StringReader;
+import java.nio.file.FileSystems;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+/*
+ * User: dietz
+ * Date: 1/4/18
+ * Time: 1:23 PM
+ */
+
+/**
+ * Example of how to build a lucene index of trec car paragraphs
+ */
+public class TrecCarQueryLuceneIndex {
+
+ private static void usage() {
+ System.out.println("Command line parameters: action OutlineCBOR LuceneINDEX\n" +
+ "action is one of output-sections | paragraphs-run-sections | paragraphs-run-pages | pages-run-pages");
+ System.exit(-1);
+ }
+
+ static class MyQueryBuilder {
+
+ private final StandardAnalyzer analyzer;
+ private List<String> tokens;
+
+ public MyQueryBuilder(StandardAnalyzer standardAnalyzer){
+ analyzer = standardAnalyzer;
+ tokens = new ArrayList<>(128);
+ }
+
+ public BooleanQuery toQuery(String queryStr) throws IOException {
+
+ TokenStream tokenStream = analyzer.tokenStream("text", new StringReader(queryStr));
+ tokenStream.reset();
+ tokens.clear();
+ while (tokenStream.incrementToken()) {
+ final String token = tokenStream.getAttribute(CharTermAttribute.class).toString();
+ tokens.add(token);
+ }
+ tokenStream.end();
+ tokenStream.close();
+ BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
+ for (String token : tokens) {
+ booleanQuery.add(new TermQuery(new Term("text", token)), BooleanClause.Occur.SHOULD);
+ }
+ return booleanQuery.build();
+ }
+ }
+
+ public static void main(String[] args) throws IOException {
+ System.setProperty("file.encoding", "UTF-8");
+
+ if (args.length < 3)
+ usage();
+
+ String mode = args[0];
+ String indexPath = args[2];
+
+
+ if (mode.equals("output-sections")) {
+ IndexSearcher searcher = setupIndexSearcher(indexPath, "paragraph.lucene");
+
+ searcher.setSimilarity(new BM25Similarity());
+ final MyQueryBuilder queryBuilder = new MyQueryBuilder(new StandardAnalyzer());
+
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream3 = new FileInputStream(new File(pagesFile));
+ for (Data.Page page : DeserializeData.iterableAnnotations(fileInputStream3)) {
+ System.out.println("\n\nPage: "+page.getPageId());
+ for (List<Data.Section> sectionPath : page.flatSectionPaths()) {
+ System.out.println();
+ System.out.println(Data.sectionPathId(page.getPageId(), sectionPath) + " \t " + Data.sectionPathHeadings(sectionPath));
+
+ String queryStr = buildSectionQueryStr(page, sectionPath);
+
+ // get top 10 documents
+ TopDocs tops = searcher.search(queryBuilder.toQuery(queryStr), 10);
+ ScoreDoc[] scoreDoc = tops.scoreDocs;
+ System.out.println("Found "+scoreDoc.length+" results.");
+ for (ScoreDoc score : scoreDoc) {
+ final Document doc = searcher.doc(score.doc); // to access stored content
+ // print score and internal docid
+ System.out.println(doc.getField("paragraphid").stringValue()+ " (" + score.doc + "): SCORE " + score.score);
+ // access and print content
+ System.out.println(" " +doc.getField("text").stringValue());
+ }
+
+ }
+ System.out.println();
+ }
+ } else if (mode.equals("paragraphs-run-sections")) {
+ IndexSearcher searcher = setupIndexSearcher(indexPath, "paragraph.lucene");
+
+ searcher.setSimilarity(new BM25Similarity());
+ final MyQueryBuilder queryBuilder = new MyQueryBuilder(new StandardAnalyzer());
+
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream3 = new FileInputStream(new File(pagesFile));
+ for (Data.Page page : DeserializeData.iterableAnnotations(fileInputStream3)) {
+ for (List<Data.Section> sectionPath : page.flatSectionPaths()) {
+ final String queryId = Data.sectionPathId(page.getPageId(), sectionPath);
+
+ String queryStr = buildSectionQueryStr(page, sectionPath);
+
+ TopDocs tops = searcher.search(queryBuilder.toQuery(queryStr), 100);
+ ScoreDoc[] scoreDoc = tops.scoreDocs;
+ for (int i = 0; i < scoreDoc.length; i++) {
+ ScoreDoc score = scoreDoc[i];
+ final Document doc = searcher.doc(score.doc); // to access stored content
+ // print score and internal docid
+ final String paragraphid = doc.getField("paragraphid").stringValue();
+ final float searchScore = score.score;
+ final int searchRank = i+1;
+
+ System.out.println(queryId+" Q0 "+paragraphid+" "+searchRank + " "+searchScore+" Lucene-BM25");
+ }
+
+ }
+ }
+ } else if (mode.equals("paragraphs-run-pages")) {
+ IndexSearcher searcher = setupIndexSearcher(indexPath, "paragraph.lucene");
+
+ searcher.setSimilarity(new BM25Similarity());
+ final MyQueryBuilder queryBuilder = new MyQueryBuilder(new StandardAnalyzer());
+
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream3 = new FileInputStream(new File(pagesFile));
+ for (Data.Page page : DeserializeData.iterableAnnotations(fileInputStream3)) {
+ final String queryId = page.getPageId();
+
+ String queryStr = buildSectionQueryStr(page, Collections.<Data.Section>emptyList());
+
+ TopDocs tops = searcher.search(queryBuilder.toQuery(queryStr), 100);
+ ScoreDoc[] scoreDoc = tops.scoreDocs;
+ for (int i = 0; i < scoreDoc.length; i++) {
+ ScoreDoc score = scoreDoc[i];
+ final Document doc = searcher.doc(score.doc); // to access stored content
+ // print score and internal docid
+ final String paragraphid = doc.getField("paragraphid").stringValue();
+ final float searchScore = score.score;
+ final int searchRank = i+1;
+
+ System.out.println(queryId+" Q0 "+paragraphid+" "+searchRank + " "+searchScore+" Lucene-BM25");
+ }
+
+ }
+ } else if (mode.equals("pages-run-pages")) {
+ IndexSearcher searcher = setupIndexSearcher(indexPath, "pages.lucene");
+
+ searcher.setSimilarity(new BM25Similarity());
+ final MyQueryBuilder queryBuilder = new MyQueryBuilder(new StandardAnalyzer());
+
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream3 = new FileInputStream(new File(pagesFile));
+ for (Data.Page page : DeserializeData.iterableAnnotations(fileInputStream3)) {
+ final String queryId = page.getPageId();
+
+ String queryStr = buildSectionQueryStr(page, Collections.<Data.Section>emptyList());
+
+ TopDocs tops = searcher.search(queryBuilder.toQuery(queryStr), 100);
+ ScoreDoc[] scoreDoc = tops.scoreDocs;
+ for (int i = 0; i < scoreDoc.length; i++) {
+ ScoreDoc score = scoreDoc[i];
+ final Document doc = searcher.doc(score.doc); // to access stored content
+ // print score and internal docid
+ final String paragraphid = doc.getField("pageid").stringValue();
+ final float searchScore = score.score;
+ final int searchRank = i+1;
+
+ System.out.println(queryId+" Q0 "+paragraphid+" "+searchRank + " "+searchScore+" Lucene-BM25");
+ }
+
+ }
+ }
+ }
+
+ @NotNull
+ private static IndexSearcher setupIndexSearcher(String indexPath, String typeIndex) throws IOException {
+ Path path = FileSystems.getDefault().getPath(indexPath, typeIndex);
+ Directory indexDir = FSDirectory.open(path);
+ IndexReader reader = DirectoryReader.open(indexDir);
+ return new IndexSearcher(reader);
+ }
+
+ @NotNull
+ private static String buildSectionQueryStr(Data.Page page, List<Data.Section> sectionPath) {
+ StringBuilder queryStr = new StringBuilder();
+ queryStr.append(page.getPageName());
+ for (Data.Section section: sectionPath) {
+ queryStr.append(" ").append(section.getHeading());
+ }
+// System.out.println("queryStr = " + queryStr);
+ return queryStr.toString();
+ }
+
+ private static Iterable<Document> toIterable(final Iterator<Document> iter) throws CborRuntimeException, CborFileTypeException {
+ return new Iterable<Document>() {
+ @Override
+ @NotNull
+ public Iterator<Document> iterator() {
+ return iter;
+ }
+ };
+ }
+
+}
\ No newline at end of file
diff --git a/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarToolsExample.java b/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarToolsExample.java
new file mode 100644
index 00000000..0243d0bf
--- /dev/null
+++ b/vendors/trec-car-tools/trec-car-tools-example/src/main/java/edu/unh/cs/TrecCarToolsExample.java
@@ -0,0 +1,63 @@
+package edu.unh.cs;
+
+import edu.unh.cs.treccar_v2.Data;
+import edu.unh.cs.treccar_v2.read_data.DeserializeData;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.List;
+
+/**
+ * User: dietz
+ * Date: 12/9/16
+ * Time: 5:17 PM
+ */
+public class TrecCarToolsExample {
+ private static void usage() {
+ System.out.println("Command line parameters: (header|pages|outlines|paragraphs) FILE");
+ System.exit(-1);
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("file.encoding", "UTF-8");
+
+ if (args.length<2)
+ usage();
+
+ String mode = args[0];
+ if (mode.equals("header")) {
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream = new FileInputStream(new File(pagesFile));
+ System.out.println(DeserializeData.getTrecCarHeader(fileInputStream));
+ System.out.println();
+ }
+ else if (mode.equals("pages")) {
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream = new FileInputStream(new File(pagesFile));
+ for(Data.Page page: DeserializeData.iterableAnnotations(fileInputStream)) {
+ System.out.println(page);
+ System.out.println();
+ }
+ } else if (mode.equals("outlines")) {
+ final String pagesFile = args[1];
+ final FileInputStream fileInputStream3 = new FileInputStream(new File(pagesFile));
+ for(Data.Page page: DeserializeData.iterableAnnotations(fileInputStream3)) {
+ for (List<Data.Section> sectionPath : page.flatSectionPaths()){
+ System.out.println(Data.sectionPathId(page.getPageId(), sectionPath)+" \t "+Data.sectionPathHeadings(sectionPath));
+ }
+ System.out.println();
+ }
+ } else if (mode.equals("paragraphs")) {
+ final String paragraphsFile = args[1];
+ final FileInputStream fileInputStream2 = new FileInputStream(new File(paragraphsFile));
+ for(Data.Paragraph p: DeserializeData.iterableParagraphs(fileInputStream2)) {
+ System.out.println(p);
+ System.out.println();
+ }
+ } else {
+ usage();
+ }
+
+ }
+
+}
|
|
4bfaf3e14e59406abe93e2fa22e72fb918e66628
|
Sylvain Lesage
| 2021-07-29T10:41:59 |
test: 💍 (quality) change output format, and use multiprocessing
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index a9ddf7b2..3c9fb9ed 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -3,0 +4 @@ from tqdm import tqdm
+from tqdm.contrib.concurrent import process_map
@@ -7 +8,5 @@ from datasets import list_datasets
-from datasets_preview_backend.main import extract_dataset_rows
+from datasets_preview_backend.main import (
+ get_dataset_config_names,
+ get_config_splits,
+ extract_split_rows,
+)
@@ -10 +15,43 @@ from datasets_preview_backend.main import extract_dataset_rows
-def export_all_datasets_exceptions():
+def get_config_names_report(dataset_id: str):
+ try:
+ config_names = get_dataset_config_names(dataset_id)
+ return {
+ "dataset_id": dataset_id,
+ "config_names": list(config_names),
+ "success": True,
+ "exception": None,
+ "message": None,
+ }
+ except Exception as err:
+ return {
+ "dataset_id": dataset_id,
+ "config_names": [],
+ "success": False,
+ "exception": str(type(err).__name__),
+ "message": str(err),
+ }
+
+
+def get_split_names_report(dataset_id: str, config_name: str):
+ try:
+ split_names = get_config_splits(dataset_id, config_name)
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "split_names": list(split_names),
+ "success": True,
+ "exception": None,
+ "message": None,
+ }
+ except Exception as err:
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "split_names": [],
+ "success": False,
+ "exception": str(type(err).__name__),
+ "message": str(err),
+ }
+
+
+def get_rows_report(dataset_id: str, config_name: str, split_name: str):
@@ -11,0 +59,24 @@ def export_all_datasets_exceptions():
+ try:
+ extract = extract_split_rows(dataset_id, config_name, split_name, num_rows)
+ if len(extract["rows"]) != num_rows:
+ raise ValueError(f"{len(extract['rows'])} rows instead of {num_rows}")
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "split_name": split_name,
+ "success": True,
+ "exception": None,
+ "message": None,
+ }
+ except Exception as err:
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "split_name": split_name,
+ "success": False,
+ "exception": str(type(err).__name__),
+ "message": str(err),
+ }
+
+
+def export_all_datasets_exceptions():
@@ -14,34 +85,37 @@ def export_all_datasets_exceptions():
- results = []
-
- for dataset_id in tqdm(dataset_ids):
-
- success = False
- try:
- extract = extract_dataset_rows(dataset_id, num_rows)
- exception = ""
- config_names = extract["configs"].keys()
- split_names = set()
- for config_name, config in extract["configs"].items():
- for split_name, split in config["splits"].items():
- split_names.add(split_name)
- if len(split["rows"]) != num_rows:
- raise ValueError(
- f"{len(split['rows'])} rows instead of {num_rows} in {config_name} - {split_name}"
- )
- success = True
- message = ""
- except Exception as err:
- exception = str(type(err).__name__)
- message = str(err)
- config_names = []
- split_names = []
- results.append(
- {
- "dataset_id": dataset_id,
- "success": success,
- "exception": exception,
- "message": message,
- "all_config_names": list(config_names),
- "all_split_names": list(split_names),
- }
- )
+ print("Get config names for all the datasets")
+ config_names_reports = process_map(
+ get_config_names_report,
+ dataset_ids,
+ )
+
+ print("Get split names for all the pairs (dataset_id, config_name)")
+ split_names_dataset_ids = []
+ split_names_config_names = []
+ for report in config_names_reports:
+ for config_name in report["config_names"]:
+ # reports with an exception will not contribute to the lists since config_names is empty
+ split_names_dataset_ids.append(report["dataset_id"])
+ split_names_config_names.append(config_name)
+ split_names_reports = process_map(
+ get_split_names_report, split_names_dataset_ids, split_names_config_names
+ )
+
+ print("Get rows extract for all the tuples (dataset_id, config_name, split_name)")
+ rows_dataset_ids = []
+ rows_config_names = []
+ rows_split_names = []
+ for report in split_names_reports:
+ for split_name in report["split_names"]:
+ # reports with an exception will not contribute to the lists since split_names is empty
+ rows_dataset_ids.append(report["dataset_id"])
+ rows_config_names.append(report["config_name"])
+ rows_split_names.append(split_name)
+ rows_reports = process_map(
+ get_rows_report, rows_dataset_ids, rows_config_names, rows_split_names
+ )
+
+ results = {
+ "config_names_reports": config_names_reports,
+ "split_names_reports": split_names_reports,
+ "rows_reports": rows_reports,
+ }
|
|
46432aef51cf08fe89b649541afb3324ca0ec921
|
Sylvain Lesage
| 2021-07-29T08:40:25 |
feat: 🎸 return all the splits for all the configs of a dataset
|
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
index 2db7a4fa..a9ddf7b2 100644
--- a/quality/test_datasets.py
+++ b/quality/test_datasets.py
@@ -7 +7 @@ from datasets import list_datasets
-from datasets_preview_backend.main import get_dataset_extract
+from datasets_preview_backend.main import extract_dataset_rows
@@ -11 +11 @@ def export_all_datasets_exceptions():
- num_rows = 100
+ num_rows = 10
@@ -16 +16 @@ def export_all_datasets_exceptions():
- for dataset_id in tqdm(dataset_ids[0:2]):
+ for dataset_id in tqdm(dataset_ids):
@@ -20 +20 @@ def export_all_datasets_exceptions():
- extract = get_dataset_extract(dataset_id, num_rows)
+ extract = extract_dataset_rows(dataset_id, num_rows)
@@ -22,2 +22,9 @@ def export_all_datasets_exceptions():
- if len(extract) != num_rows:
- raise f"{len(extract)} rows instead of {num_rows}"
+ config_names = extract["configs"].keys()
+ split_names = set()
+ for config_name, config in extract["configs"].items():
+ for split_name, split in config["splits"].items():
+ split_names.add(split_name)
+ if len(split["rows"]) != num_rows:
+ raise ValueError(
+ f"{len(split['rows'])} rows instead of {num_rows} in {config_name} - {split_name}"
+ )
@@ -28,0 +36,2 @@ def export_all_datasets_exceptions():
+ config_names = []
+ split_names = []
@@ -34,0 +44,2 @@ def export_all_datasets_exceptions():
+ "all_config_names": list(config_names),
+ "all_split_names": list(split_names),
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index db197807..798fc570 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -13 +13,8 @@ import uvicorn
-from datasets import load_dataset, prepare_module, import_main_class
+from datasets import (
+ Dataset,
+ IterableDataset,
+ load_dataset,
+ load_dataset_builder,
+ prepare_module,
+ import_main_class,
+)
@@ -35,0 +43,11 @@ class ConfigNameError(Error):
+class SplitError(Error):
+ """Exception raised for errors in the split name.
+
+ Attributes:
+ split -- the erroneous dataset split
+ """
+
+ def __init__(self, split):
+ self.split = split
+
+
@@ -64 +82 @@ def get_dataset_config_names(dataset_id: str) -> List[str]:
-def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int):
+def get_config_splits(dataset_id: str, config_name: str) -> List[str]:
@@ -66,3 +84 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
- dataset = load_dataset(
- dataset_id, name=config_name, split="train", streaming=True
- )
+ builder = load_dataset_builder(dataset_id, name=config_name)
@@ -74,0 +91 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
+ return builder.info.splits.keys()
@@ -76 +92,0 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
- logging.debug(f"Dataset loaded")
@@ -78 +94,19 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
- rows = list(dataset.take(num_rows))
+def extract_split_rows(dataset_id: str, config_name: str, split: str, num_rows: int):
+ logging.debug(
+ f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name} - {split}"
+ )
+
+ try:
+ dataset: IterableDataset = load_dataset(
+ dataset_id, name=config_name, split=split, streaming=True
+ )
+ except ValueError as err:
+ message = str(err)
+ if message.startswith(f"BuilderConfig {config_name} not found"):
+ raise ConfigNameError(config_name=config_name)
+ elif message.startswith(f'Unknown split "{split}".') or message.startswith(
+ f"Bad split: {split}."
+ ):
+ raise SplitError(split=split)
+ else:
+ raise
@@ -79,0 +114 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
+ rows = list(dataset.take(num_rows))
@@ -82 +117 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
- f"could not read all the required rows ({len(rows)} / {num_rows})"
+ f"could not read all the required rows ({len(rows)} / {num_rows}) from dataset {dataset_id} - {config_name} - {split}"
@@ -85 +120,23 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
- return {"dataset_id": dataset_id, "config_name": config_name, "rows": rows}
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "split": split,
+ "rows": rows,
+ }
+
+
+def extract_config_rows(dataset_id: str, config_name: str, num_rows: int):
+ logging.debug(
+ f"asked for {num_rows} first rows of dataset {dataset_id} - {config_name}"
+ )
+
+ splits = get_config_splits(dataset_id, config_name)
+
+ return {
+ "dataset_id": dataset_id,
+ "config_name": config_name,
+ "splits": {
+ split: extract_split_rows(dataset_id, config_name, split, num_rows)
+ for split in splits
+ },
+ }
@@ -88,3 +145,2 @@ def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int)
-def get_dataset_extract(dataset_id: str, num_rows: int):
- # TODO: manage splits
- logging.debug(f"Asked for {num_rows} first rows of dataset {dataset_id}")
+def extract_dataset_rows(dataset_id: str, num_rows: int):
+ logging.debug(f"asked for {num_rows} first rows of dataset {dataset_id}")
@@ -97 +153 @@ def get_dataset_extract(dataset_id: str, num_rows: int):
- config_name: get_dataset_config_extract(dataset_id, config_name, num_rows)
+ config_name: extract_config_rows(dataset_id, config_name, num_rows)
@@ -110 +166 @@ async def extract(request: Request):
- return JSONResponse(get_dataset_extract(dataset_id, num_rows))
+ return JSONResponse(extract_dataset_rows(dataset_id, num_rows))
diff --git a/tests/test_main.py b/tests/test_main.py
index 70b4db62..28c369c5 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5 +5 @@ from datasets_preview_backend.main import (
- get_dataset_extract,
+ SplitError,
@@ -7 +7,4 @@ from datasets_preview_backend.main import (
- get_dataset_config_extract,
+ get_config_splits,
+ extract_dataset_rows,
+ extract_config_rows,
+ extract_split_rows,
@@ -21 +24,12 @@ def test_get_configs():
-def test_extract_without_config():
+def test_get_splits(): # sourcery skip: extract-duplicate-method
+ splits = get_config_splits("acronym_identification", None)
+ assert len(splits) == 3
+ assert "train" in splits
+
+ splits = get_config_splits("glue", "ax")
+ assert len(splits) == 1
+ assert "test" in splits
+ assert "train" not in splits
+
+
+def test_extract_split_rows():
@@ -23,0 +38 @@ def test_extract_without_config():
+ split = "train"
@@ -25 +40 @@ def test_extract_without_config():
- extract = get_dataset_config_extract(dataset_id, config_name, num_rows)
+ extract = extract_split_rows(dataset_id, config_name, split, num_rows)
@@ -27,0 +43 @@ def test_extract_without_config():
+ assert "split" in extract and extract["split"] == split
@@ -34,15 +50 @@ def test_extract_without_config():
-def test_extract_with_config():
- dataset_id = "glue"
- config_name = "cola"
- num_rows = 100
- extract = get_dataset_config_extract(dataset_id, config_name, num_rows)
- assert "config_name" in extract and extract["config_name"] == config_name
- rows = extract["rows"]
- assert len(rows) == 100
- assert (
- rows[0]["sentence"]
- == "Our friends won't buy this analysis, let alone the next one we propose."
- )
-
-
-def test_extract_num_rows():
+def test_extract_split_rows_num_rows():
@@ -50,0 +53 @@ def test_extract_num_rows():
+ split = "train"
@@ -52 +55 @@ def test_extract_num_rows():
- extract = get_dataset_config_extract(dataset_id, config_name, num_rows)
+ extract = extract_split_rows(dataset_id, config_name, split, num_rows)
@@ -60 +63,3 @@ def test_extract_unknown_config():
- get_dataset_config_extract("glue", "doesnotexist", 100)
+ extract_config_rows("glue", "doesnotexist", 100)
+ with pytest.raises(ConfigNameError):
+ extract_split_rows("glue", "doesnotexist", "train", 100)
@@ -64,4 +69,2 @@ def test_extract_unknown_split():
- # "aeslc" dataset has no "train" split, while "train" is the hardcoded split used to download
- extract = get_dataset_config_extract("aeslc", None, 100)
- rows = extract["rows"]
- assert len(rows) == 0
+ with pytest.raises(SplitError):
+ extract_split_rows("glue", "ax", "train", 100)
@@ -70 +73,21 @@ def test_extract_unknown_split():
-def test_extract_dataset_without_config():
+def test_extract_config_rows():
+ dataset_id = "glue"
+ config_name = "cola"
+ num_rows = 100
+ extract = extract_config_rows(dataset_id, config_name, num_rows)
+ assert "dataset_id" in extract and extract["dataset_id"] == dataset_id
+ assert "config_name" in extract and extract["config_name"] == config_name
+ assert "splits" in extract
+ splits = extract["splits"]
+ assert len(splits) == 3
+ assert "train" in splits
+ split = splits["train"]
+ rows = split["rows"]
+ assert len(rows) == 100
+ assert (
+ rows[0]["sentence"]
+ == "Our friends won't buy this analysis, let alone the next one we propose."
+ )
+
+
+def test_extract_dataset():
@@ -73 +96 @@ def test_extract_dataset_without_config():
- extract = get_dataset_extract(dataset_id, num_rows)
+ extract = extract_dataset_rows(dataset_id, num_rows)
@@ -79,2 +102 @@ def test_extract_dataset_without_config():
- assert len(configs[None]["rows"]) == num_rows
-
+ assert len(configs[None]["splits"]["train"]["rows"]) == num_rows
@@ -82 +103,0 @@ def test_extract_dataset_without_config():
-def test_extract_dataset_with_configs():
@@ -85 +106 @@ def test_extract_dataset_with_configs():
- extract = get_dataset_extract(dataset_id, num_rows)
+ extract = extract_dataset_rows(dataset_id, num_rows)
@@ -89,2 +110,2 @@ def test_extract_dataset_with_configs():
- assert len(configs["adversarialQA"]["rows"]) == num_rows
- assert configs["adversarialQA"]["rows"][0]["title"] == "Brain"
+ assert len(configs["adversarialQA"]["splits"]["train"]["rows"]) == num_rows
+ assert configs["adversarialQA"]["splits"]["train"]["rows"][0]["title"] == "Brain"
@@ -95 +116 @@ def test_extract_unknown_dataset():
- get_dataset_extract("doesnotexist", 100)
+ extract_dataset_rows("doesnotexist", 100)
|
|
6ed484a65332ccab5b22c08d6e1d6095c704d9a4
|
Sylvain Lesage
| 2021-07-28T15:39:54 |
feat: 🎸 (extract) change structure to return all configurations
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index f38087af..db197807 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -3,0 +4,3 @@ import os
+from datasets.builder import DatasetBuilder
+from typing import List
+
@@ -10 +13 @@ import uvicorn
-from datasets import load_dataset
+from datasets import load_dataset, prepare_module, import_main_class
@@ -15,0 +19,17 @@ DEFAULT_EXTRACT_ROWS_LIMIT = 100
+class Error(Exception):
+ """Base class for exceptions in this module."""
+
+ pass
+
+
+class ConfigNameError(Error):
+ """Exception raised for errors in the config name.
+
+ Attributes:
+ config_name -- the erroneous dataset config_name
+ """
+
+ def __init__(self, config_name):
+ self.config_name = config_name
+
+
@@ -34,3 +54,8 @@ async def healthcheck(request: Request):
-def get_dataset_extract(dataset_id: str, num_rows: int):
- # TODO: manage splits and configs
- logging.debug(f"Asked for {num_rows} first rows of dataset {dataset_id}")
+def get_dataset_config_names(dataset_id: str) -> List[str]:
+ module_path, *_ = prepare_module(dataset_id, dataset=True)
+ builder_cls = import_main_class(module_path, dataset=True)
+ config_names = [c.name for c in builder_cls.BUILDER_CONFIGS] or [None]
+ logging.debug(
+ f"The dataset builder has {len(config_names)} configs: {config_names}"
+ )
+ return config_names
@@ -38 +63,12 @@ def get_dataset_extract(dataset_id: str, num_rows: int):
- dataset = load_dataset(dataset_id, split="train", streaming=True)
+
+def get_dataset_config_extract(dataset_id: str, config_name: str, num_rows: int):
+ try:
+ dataset = load_dataset(
+ dataset_id, name=config_name, split="train", streaming=True
+ )
+ except ValueError as err:
+ message = str(err)
+ if message.startswith(f"BuilderConfig {config_name} not found"):
+ raise ConfigNameError(config_name=config_name)
+ else:
+ raise
@@ -49 +85,16 @@ def get_dataset_extract(dataset_id: str, num_rows: int):
- return rows
+ return {"dataset_id": dataset_id, "config_name": config_name, "rows": rows}
+
+
+def get_dataset_extract(dataset_id: str, num_rows: int):
+ # TODO: manage splits
+ logging.debug(f"Asked for {num_rows} first rows of dataset {dataset_id}")
+
+ config_names = get_dataset_config_names(dataset_id)
+
+ return {
+ "dataset_id": dataset_id,
+ "configs": {
+ config_name: get_dataset_config_extract(dataset_id, config_name, num_rows)
+ for config_name in config_names
+ },
+ }
diff --git a/tests/test_main.py b/tests/test_main.py
index 116b35bc..70b4db62 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -2 +1,0 @@ import pytest
-from datasets import list_datasets
@@ -4 +3,6 @@ from datasets import list_datasets
-from datasets_preview_backend.main import get_dataset_extract
+from datasets_preview_backend.main import (
+ ConfigNameError,
+ get_dataset_extract,
+ get_dataset_config_names,
+ get_dataset_config_extract,
+)
@@ -7,4 +11,35 @@ from datasets_preview_backend.main import get_dataset_extract
-def test_extract_ok():
- extract = get_dataset_extract("acronym_identification", 100)
- assert len(extract) == 100
- assert extract[0]["tokens"][0] == "What"
+def test_get_configs():
+ config_names = get_dataset_config_names("acronym_identification")
+ assert len(config_names) == 1
+ assert config_names[0] is None
+
+ config_names = get_dataset_config_names("glue")
+ assert len(config_names) == 12
+ assert "cola" in config_names
+
+
+def test_extract_without_config():
+ dataset_id = "acronym_identification"
+ config_name = None
+ num_rows = 100
+ extract = get_dataset_config_extract(dataset_id, config_name, num_rows)
+ assert "dataset_id" in extract and extract["dataset_id"] == dataset_id
+ assert "config_name" in extract and extract["config_name"] == config_name
+ assert "rows" in extract
+ rows = extract["rows"]
+ assert len(rows) == num_rows
+ assert rows[0]["tokens"][0] == "What"
+
+
+def test_extract_with_config():
+ dataset_id = "glue"
+ config_name = "cola"
+ num_rows = 100
+ extract = get_dataset_config_extract(dataset_id, config_name, num_rows)
+ assert "config_name" in extract and extract["config_name"] == config_name
+ rows = extract["rows"]
+ assert len(rows) == 100
+ assert (
+ rows[0]["sentence"]
+ == "Our friends won't buy this analysis, let alone the next one we propose."
+ )
@@ -14,3 +49,12 @@ def test_extract_num_rows():
- extract = get_dataset_extract("acronym_identification", 20)
- assert len(extract) == 20
- assert extract[0]["tokens"][0] == "What"
+ dataset_id = "acronym_identification"
+ config_name = None
+ num_rows = 20
+ extract = get_dataset_config_extract(dataset_id, config_name, num_rows)
+ rows = extract["rows"]
+ assert len(rows) == 20
+ assert rows[0]["tokens"][0] == "What"
+
+
+def test_extract_unknown_config():
+ with pytest.raises(ConfigNameError):
+ get_dataset_config_extract("glue", "doesnotexist", 100)
@@ -21,2 +65,3 @@ def test_extract_unknown_split():
- extract = get_dataset_extract("aeslc", 100)
- assert len(extract) == 0
+ extract = get_dataset_config_extract("aeslc", None, 100)
+ rows = extract["rows"]
+ assert len(rows) == 0
@@ -25,3 +70,11 @@ def test_extract_unknown_split():
-def test_extract_unknown_model():
- with pytest.raises(FileNotFoundError):
- get_dataset_extract("doesnotexist", 100)
+def test_extract_dataset_without_config():
+ dataset_id = "acronym_identification"
+ num_rows = 100
+ extract = get_dataset_extract(dataset_id, num_rows)
+ assert "dataset_id" in extract and extract["dataset_id"] == dataset_id
+ assert "configs" in extract
+ configs = extract["configs"]
+ assert None in configs
+ assert len(configs) == 1
+ assert len(configs[None]["rows"]) == num_rows
+
@@ -28,0 +82,9 @@ def test_extract_unknown_model():
+def test_extract_dataset_with_configs():
+ dataset_id = "adversarial_qa"
+ num_rows = 100
+ extract = get_dataset_extract(dataset_id, num_rows)
+ configs = extract["configs"]
+ assert len(configs) == 4
+ assert "adversarialQA" in configs
+ assert len(configs["adversarialQA"]["rows"]) == num_rows
+ assert configs["adversarialQA"]["rows"][0]["title"] == "Brain"
@@ -30,3 +92,4 @@ def test_extract_unknown_model():
-def test_extract_subset_not_implemented():
- with pytest.raises(ValueError, match="Config name is missing..*"):
- get_dataset_extract("glue", 100)
+
+def test_extract_unknown_dataset():
+ with pytest.raises(FileNotFoundError):
+ get_dataset_extract("doesnotexist", 100)
|
|
fbcfa5371cd38d24ee012f4f8d44a7bbce4fd640
|
Sylvain Lesage
| 2021-07-28T12:22:56 |
test: 💍 add a script to generate a report of all Exceptions
|
diff --git a/Makefile b/Makefile
index 52b88f2b..08c4239b 100644
--- a/Makefile
+++ b/Makefile
@@ -0,0 +1,2 @@
+.PHONY: install run test quality watch
+
@@ -9,0 +12,3 @@ test:
+quality:
+ poetry run python quality/test_datasets.py
+
diff --git a/poetry.lock b/poetry.lock
index 97716d17..80d02e45 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -76 +76 @@ description = "Atomic file writes."
-category = "main"
+category = "dev"
@@ -283 +283 @@ description = "iniconfig: brain-dead simple config-ini parsing"
-category = "main"
+category = "dev"
@@ -361 +361 @@ description = "plugin and hook calling mechanisms for python"
-category = "main"
+category = "dev"
@@ -372 +372 @@ description = "library with cross-python path, ini-parsing, io, code, log facili
-category = "main"
+category = "dev"
@@ -399 +399 @@ description = "pytest: simple powerful testing with Python"
-category = "main"
+category = "dev"
@@ -503 +503 @@ description = "Python Library for Tom's Obvious, Minimal Language"
-category = "main"
+category = "dev"
@@ -606 +606 @@ python-versions = "^3.8"
-content-hash = "464a59b79e0ffae394c5b77d4bc3d871158d4fb0b6ad8e7c2054f64d0c3c2321"
+content-hash = "7e0e1ab05dac16e809f2bc9bb7c4998f492b5fdcf5bafd5b6f5f289ab2ec06b1"
diff --git a/pyproject.toml b/pyproject.toml
index 0aab884b..de3d95ba 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -12 +11,0 @@ uvicorn = "^0.14.0"
-pytest = "^6.2.4"
@@ -16,0 +16,2 @@ watchdog = {extras = ["watchmedo"], version = "^2.1.3"}
+tqdm = "^4.61.2"
+pytest = "^6.2.4"
diff --git a/quality/test_datasets.py b/quality/test_datasets.py
new file mode 100644
index 00000000..2db7a4fa
--- /dev/null
+++ b/quality/test_datasets.py
@@ -0,0 +1,46 @@
+import json
+import time
+from tqdm import tqdm
+
+from datasets import list_datasets
+
+from datasets_preview_backend.main import get_dataset_extract
+
+
+def export_all_datasets_exceptions():
+ num_rows = 100
+ dataset_ids = list_datasets(with_community_datasets=True)
+
+ results = []
+
+ for dataset_id in tqdm(dataset_ids[0:2]):
+
+ success = False
+ try:
+ extract = get_dataset_extract(dataset_id, num_rows)
+ exception = ""
+ if len(extract) != num_rows:
+ raise f"{len(extract)} rows instead of {num_rows}"
+ success = True
+ message = ""
+ except Exception as err:
+ exception = str(type(err).__name__)
+ message = str(err)
+ results.append(
+ {
+ "dataset_id": dataset_id,
+ "success": success,
+ "exception": exception,
+ "message": message,
+ }
+ )
+
+ time_string = time.strftime("%Y%m%d-%H%M%S")
+ filename = f"/tmp/datasets-{time_string}.json"
+ with open(filename, "w") as outfile:
+ json.dump(results, outfile, indent=2)
+ print(f"report has been written at {filename}")
+
+
+if __name__ == "__main__":
+ export_all_datasets_exceptions()
|
|
20e10053a5755c514915dc3d68b7179a429e9439
|
Sylvain Lesage
| 2021-07-28T08:27:25 |
test: 💍 add test on unknown split
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 4baaa624..116b35bc 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1,0 +2 @@ import pytest
+from datasets import list_datasets
@@ -17,0 +19,6 @@ def test_extract_num_rows():
+def test_extract_unknown_split():
+ # "aeslc" dataset has no "train" split, while "train" is the hardcoded split used to download
+ extract = get_dataset_extract("aeslc", 100)
+ assert len(extract) == 0
+
+
@@ -20 +27 @@ def test_extract_unknown_model():
- extract = get_dataset_extract("doesnotexist", 100)
+ get_dataset_extract("doesnotexist", 100)
@@ -25 +32 @@ def test_extract_subset_not_implemented():
- extract = get_dataset_extract("glue", 100)
+ get_dataset_extract("glue", 100)
|
|
2ec0321b4627e918789d26eb6e827d35d6923822
|
Sylvain Lesage
| 2021-07-27T14:29:26 |
fix: 🐛 fix variables name: dataset_id, not model_id!
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 3815a8e0..f38087af 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -34,3 +34,3 @@ async def healthcheck(request: Request):
-def get_dataset_extract(model_id: str, num_rows: int):
- # TODO: manage splits and submodels
- logging.debug(f"Asked for {num_rows} first rows of model {model_id}")
+def get_dataset_extract(dataset_id: str, num_rows: int):
+ # TODO: manage splits and configs
+ logging.debug(f"Asked for {num_rows} first rows of dataset {dataset_id}")
@@ -38 +38 @@ def get_dataset_extract(model_id: str, num_rows: int):
- dataset = load_dataset(model_id, split="train", streaming=True)
+ dataset = load_dataset(dataset_id, split="train", streaming=True)
@@ -53 +53 @@ async def extract(request: Request):
- model_id: str = request.path_params["model_id"]
+ dataset_id: str = request.path_params["dataset_id"]
@@ -59 +59 @@ async def extract(request: Request):
- return JSONResponse(get_dataset_extract(model_id, num_rows))
+ return JSONResponse(get_dataset_extract(dataset_id, num_rows))
@@ -61 +61,2 @@ async def extract(request: Request):
- return PlainTextResponse("Model data could not be found", status_code=404)
+ return PlainTextResponse("Dataset not found", status_code=404)
+ # other exceptions will generate a 500 response
@@ -68 +69 @@ def start():
- Route("/{model_id:path}/extract", endpoint=extract),
+ Route("/{dataset_id:path}/extract", endpoint=extract),
|
|
b1d8380d60626c4ea5f04d58a6a2518d24c706b1
|
Sylvain Lesage
| 2021-07-27T11:55:30 |
feat: 🎸 return a 404 error if the model could not be found
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index fb478595..3815a8e0 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -37,5 +37,2 @@ def get_dataset_extract(model_id: str, num_rows: int):
- try:
- dataset = load_dataset(model_id, split="train", streaming=True)
- except:
- logging.warning(f"Dataset could not be loaded.")
- return []
+
+ dataset = load_dataset(model_id, split="train", streaming=True)
@@ -61 +58,4 @@ async def extract(request: Request):
- return JSONResponse(get_dataset_extract(model_id, num_rows))
+ try:
+ return JSONResponse(get_dataset_extract(model_id, num_rows))
+ except FileNotFoundError as e:
+ return PlainTextResponse("Model data could not be found", status_code=404)
diff --git a/tests/test_main.py b/tests/test_main.py
index 0da7c372..4baaa624 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -0,0 +1,2 @@
+import pytest
+
@@ -9,0 +12,11 @@ def test_extract_ok():
+def test_extract_num_rows():
+ extract = get_dataset_extract("acronym_identification", 20)
+ assert len(extract) == 20
+ assert extract[0]["tokens"][0] == "What"
+
+
+def test_extract_unknown_model():
+ with pytest.raises(FileNotFoundError):
+ extract = get_dataset_extract("doesnotexist", 100)
+
+
@@ -11,2 +24,2 @@ def test_extract_subset_not_implemented():
- extract = get_dataset_extract("glue", 100)
- assert len(extract) == 0
+ with pytest.raises(ValueError, match="Config name is missing..*"):
+ extract = get_dataset_extract("glue", 100)
|
|
0540ae53caccc0c0e3819cb0691a934227aa170d
|
Sylvain Lesage
| 2021-07-27T11:35:25 |
chore: 🤖 use logging functions
|
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index fc37b884..fb478595 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -0,0 +1 @@
+import logging
@@ -35 +36 @@ def get_dataset_extract(model_id: str, num_rows: int):
- print(f"Asked for {num_rows} first rows of model {model_id}")
+ logging.debug(f"Asked for {num_rows} first rows of model {model_id}")
@@ -39 +40 @@ def get_dataset_extract(model_id: str, num_rows: int):
- print(f"WARN Dataset could not be loaded.")
+ logging.warning(f"Dataset could not be loaded.")
@@ -42 +43 @@ def get_dataset_extract(model_id: str, num_rows: int):
- print(f"Dataset loaded")
+ logging.debug(f"Dataset loaded")
@@ -47 +48,3 @@ def get_dataset_extract(model_id: str, num_rows: int):
- print(f"WARN could not read all the required rows ({len(rows)} / {num_rows})")
+ logging.warning(
+ f"could not read all the required rows ({len(rows)} / {num_rows})"
+ )
|
|
19d0d3416c8e7d5da61a4f19c2f8bc805d389bc1
|
Sylvain Lesage
| 2021-07-27T11:29:25 |
test: 💍 add two tests on get_dataset_extract
|
diff --git a/Makefile b/Makefile
index 2fdd0a84..52b88f2b 100644
--- a/Makefile
+++ b/Makefile
@@ -8 +8 @@ test:
- poetry run pytest
+ poetry run python -m pytest
diff --git a/src/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
index 2a2df864..fc37b884 100644
--- a/src/datasets_preview_backend/main.py
+++ b/src/datasets_preview_backend/main.py
@@ -39 +39 @@ def get_dataset_extract(model_id: str, num_rows: int):
- print(f"Dataset could not be loaded.")
+ print(f"WARN Dataset could not be loaded.")
diff --git a/tests/test_main.py b/tests/test_main.py
index 1030a2e3..0da7c372 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1,2 +1 @@
-def func(x):
- return x + 1
+from datasets_preview_backend.main import get_dataset_extract
@@ -5,2 +4,9 @@ def func(x):
-def test_answer():
- assert func(3) == 4
+def test_extract_ok():
+ extract = get_dataset_extract("acronym_identification", 100)
+ assert len(extract) == 100
+ assert extract[0]["tokens"][0] == "What"
+
+
+def test_extract_subset_not_implemented():
+ extract = get_dataset_extract("glue", 100)
+ assert len(extract) == 0
|
|
89fc8fb8687bb17cc49bc678924d022038dbdf1e
|
Sylvain Lesage
| 2021-07-27T11:15:33 |
test: 💍 add pytest first dummy test
|
diff --git a/Makefile b/Makefile
index e808d478..2fdd0a84 100644
--- a/Makefile
+++ b/Makefile
@@ -6,0 +7,3 @@ run:
+test:
+ poetry run pytest
+
diff --git a/poetry.lock b/poetry.lock
index 9962516f..97716d17 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -71,0 +72,8 @@ python-versions = ">=3.5.3"
+[[package]]
+name = "atomicwrites"
+version = "1.4.0"
+description = "Atomic file writes."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
@@ -179 +187 @@ benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "tr
-dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
+dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
@@ -186 +194 @@ tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
@@ -270,0 +279,8 @@ python-versions = ">=3.5"
+[[package]]
+name = "iniconfig"
+version = "1.1.1"
+description = "iniconfig: brain-dead simple config-ini parsing"
+category = "main"
+optional = false
+python-versions = "*"
+
@@ -340,0 +357,19 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+[[package]]
+name = "pluggy"
+version = "0.13.1"
+description = "plugin and hook calling mechanisms for python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+
+[[package]]
+name = "py"
+version = "1.10.0"
+description = "library with cross-python path, ini-parsing, io, code, log facilities"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
@@ -359,0 +395,21 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+[[package]]
+name = "pytest"
+version = "6.2.4"
+description = "pytest: simple powerful testing with Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""}
+attrs = ">=19.2.0"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<1.0.0a1"
+py = ">=1.8.2"
+toml = "*"
+
+[package.extras]
+testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"]
+
@@ -442,0 +499,8 @@ full = ["itsdangerous", "jinja2", "python-multipart", "pyyaml", "requests", "gra
+[[package]]
+name = "toml"
+version = "0.10.2"
+description = "Python Library for Tom's Obvious, Minimal Language"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+
@@ -542 +606 @@ python-versions = "^3.8"
-content-hash = "09367b69976c42be1e0e288c128e517b37bad5cd596824332f4dc371ee26d21d"
+content-hash = "464a59b79e0ffae394c5b77d4bc3d871158d4fb0b6ad8e7c2054f64d0c3c2321"
@@ -603,0 +668,4 @@ async-timeout = [
+atomicwrites = [
+ {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"},
+ {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"},
+]
@@ -659,0 +728,4 @@ idna = [
+iniconfig = [
+ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
+ {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"},
+]
@@ -776,0 +849,8 @@ pathspec = [
+pluggy = [
+ {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"},
+ {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"},
+]
+py = [
+ {file = "py-1.10.0-py2.py3-none-any.whl", hash = "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"},
+ {file = "py-1.10.0.tar.gz", hash = "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3"},
+]
@@ -807,0 +888,4 @@ pyparsing = [
+pytest = [
+ {file = "pytest-6.2.4-py3-none-any.whl", hash = "sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890"},
+ {file = "pytest-6.2.4.tar.gz", hash = "sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b"},
+]
@@ -905,0 +990,4 @@ starlette = [
+toml = [
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index 3395aa2a..0aab884b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,0 +12 @@ uvicorn = "^0.14.0"
+pytest = "^6.2.4"
diff --git a/tests/test_main.py b/tests/test_main.py
new file mode 100644
index 00000000..1030a2e3
--- /dev/null
+++ b/tests/test_main.py
@@ -0,0 +1,6 @@
+def func(x):
+ return x + 1
+
+
+def test_answer():
+ assert func(3) == 4
|
|
296d49c8eb0a2cc7a77da40007e2e8f8f4b2ed12
|
Sylvain Lesage
| 2021-07-27T11:09:46 |
chore: 🤖 unversion local poetry configuration
|
diff --git a/.gitignore b/.gitignore
index 038cb807..e2cdb79d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,0 +8 @@
+# local configurations
@@ -8,0 +10 @@
+poetry.toml
diff --git a/poetry.toml b/poetry.toml
deleted file mode 100644
index ab1033bd..00000000
--- a/poetry.toml
+++ /dev/null
@@ -1,2 +0,0 @@
-[virtualenvs]
-in-project = true
|
|
18972f288407496a60c94e16b0bb772ae6e05f15
|
Sylvain Lesage
| 2021-07-27T11:08:58 |
chore: 🤖 put the code inside src/
|
diff --git a/Makefile b/Makefile
index da691fdd..e808d478 100644
--- a/Makefile
+++ b/Makefile
@@ -5 +5 @@ run:
- poetry run python datasets_preview_backend/main.py
+ poetry run python src/datasets_preview_backend/main.py
@@ -8 +8 @@ watch:
- poetry run watchmedo auto-restart -d datasets_preview_backend -p "*.py" -R python datasets_preview_backend/main.py
+ poetry run watchmedo auto-restart -d src/datasets_preview_backend -p "*.py" -R python src/datasets_preview_backend/main.py
diff --git a/datasets_preview_backend/main.py b/src/datasets_preview_backend/main.py
similarity index 100%
rename from datasets_preview_backend/main.py
rename to src/datasets_preview_backend/main.py
|
|
be73bec406c5f4427815f1a373e38489e51273e8
|
Sylvain Lesage
| 2021-07-27T10:49:57 |
feat: 🎸 return the dataset extract as JSON
|
diff --git a/datasets_preview_backend/main.py b/datasets_preview_backend/main.py
index 6023cb51..2a2df864 100644
--- a/datasets_preview_backend/main.py
+++ b/datasets_preview_backend/main.py
@@ -5 +5 @@ from starlette.requests import Request
-from starlette.responses import PlainTextResponse
+from starlette.responses import PlainTextResponse, JSONResponse
@@ -8,0 +9,2 @@ import uvicorn
+from datasets import load_dataset
+
@@ -30,0 +33,19 @@ async def healthcheck(request: Request):
+def get_dataset_extract(model_id: str, num_rows: int):
+ # TODO: manage splits and submodels
+ print(f"Asked for {num_rows} first rows of model {model_id}")
+ try:
+ dataset = load_dataset(model_id, split="train", streaming=True)
+ except:
+ print(f"Dataset could not be loaded.")
+ return []
+
+ print(f"Dataset loaded")
+
+ rows = list(dataset.take(num_rows))
+
+ if len(rows) != num_rows:
+ print(f"WARN could not read all the required rows ({len(rows)} / {num_rows})")
+
+ return rows
+
+
@@ -33 +54,3 @@ async def extract(request: Request):
- rows = get_int_value(d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT)
+ num_rows = get_int_value(
+ d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT
+ )
@@ -35 +58 @@ async def extract(request: Request):
- return PlainTextResponse(model_id + "-" + str(rows))
+ return JSONResponse(get_dataset_extract(model_id, num_rows))
diff --git a/poetry.lock b/poetry.lock
index 217bd8c9..9962516f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -0,0 +1,19 @@
+[[package]]
+name = "aiohttp"
+version = "3.7.4.post0"
+description = "Async http client/server framework (asyncio)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+async-timeout = ">=3.0,<4.0"
+attrs = ">=17.3.0"
+chardet = ">=2.0,<5.0"
+multidict = ">=4.5,<7.0"
+typing-extensions = ">=3.6.5"
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["aiodns", "brotlipy", "cchardet"]
+
@@ -44,0 +64,22 @@ tests = ["pytest", "pytest-asyncio", "mypy (>=0.800)"]
+[[package]]
+name = "async-timeout"
+version = "3.0.1"
+description = "Timeout context manager for asyncio programs"
+category = "main"
+optional = false
+python-versions = ">=3.5.3"
+
+[[package]]
+name = "attrs"
+version = "21.2.0"
+description = "Classes Without Boilerplate"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[package.extras]
+dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit"]
+docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
+tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface"]
+tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins"]
+
@@ -74,0 +116,8 @@ python-versions = "*"
+[[package]]
+name = "chardet"
+version = "4.0.0"
+description = "Universal encoding detector for Python 2 and 3"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
@@ -113,0 +163 @@ python-versions = "*"
+aiohttp = {version = "*", optional = true, markers = "extra == \"streaming\""}
@@ -129 +179 @@ benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "tr
-dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
+dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
@@ -136 +186 @@ tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
-tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
@@ -220,0 +271,8 @@ python-versions = ">=3.5"
+[[package]]
+name = "multidict"
+version = "5.1.0"
+description = "multidict implementation"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
@@ -468,0 +527,12 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+[[package]]
+name = "yarl"
+version = "1.6.3"
+description = "Yet another URL library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+
@@ -472 +542 @@ python-versions = "^3.8"
-content-hash = "4fc8135a2125613a91182281b87f952a90180eb31ecb913e49ec1fede4763afa"
+content-hash = "09367b69976c42be1e0e288c128e517b37bad5cd596824332f4dc371ee26d21d"
@@ -474,0 +545,39 @@ content-hash = "4fc8135a2125613a91182281b87f952a90180eb31ecb913e49ec1fede4763afa
+aiohttp = [
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-win32.whl", hash = "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-win32.whl", hash = "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-win32.whl", hash = "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-win_amd64.whl", hash = "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-win32.whl", hash = "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-win_amd64.whl", hash = "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe"},
+ {file = "aiohttp-3.7.4.post0.tar.gz", hash = "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf"},
+]
@@ -490,0 +600,8 @@ asgiref = [
+async-timeout = [
+ {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"},
+ {file = "async_timeout-3.0.1-py3-none-any.whl", hash = "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3"},
+]
+attrs = [
+ {file = "attrs-21.2.0-py2.py3-none-any.whl", hash = "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1"},
+ {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"},
+]
@@ -498,0 +616,4 @@ certifi = [
+chardet = [
+ {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"},
+ {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"},
+]
@@ -538,0 +660,39 @@ idna = [
+multidict = [
+ {file = "multidict-5.1.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224"},
+ {file = "multidict-5.1.0-cp36-cp36m-win32.whl", hash = "sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26"},
+ {file = "multidict-5.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6"},
+ {file = "multidict-5.1.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37"},
+ {file = "multidict-5.1.0-cp37-cp37m-win32.whl", hash = "sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5"},
+ {file = "multidict-5.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632"},
+ {file = "multidict-5.1.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea"},
+ {file = "multidict-5.1.0-cp38-cp38-win32.whl", hash = "sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656"},
+ {file = "multidict-5.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3"},
+ {file = "multidict-5.1.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda"},
+ {file = "multidict-5.1.0-cp39-cp39-win32.whl", hash = "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80"},
+ {file = "multidict-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359"},
+ {file = "multidict-5.1.0.tar.gz", hash = "sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5"},
+]
@@ -855,0 +1016,39 @@ xxhash = [
+yarl = [
+ {file = "yarl-1.6.3-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366"},
+ {file = "yarl-1.6.3-cp36-cp36m-win32.whl", hash = "sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721"},
+ {file = "yarl-1.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643"},
+ {file = "yarl-1.6.3-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970"},
+ {file = "yarl-1.6.3-cp37-cp37m-win32.whl", hash = "sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e"},
+ {file = "yarl-1.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50"},
+ {file = "yarl-1.6.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2"},
+ {file = "yarl-1.6.3-cp38-cp38-win32.whl", hash = "sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896"},
+ {file = "yarl-1.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a"},
+ {file = "yarl-1.6.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4"},
+ {file = "yarl-1.6.3-cp39-cp39-win32.whl", hash = "sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424"},
+ {file = "yarl-1.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6"},
+ {file = "yarl-1.6.3.tar.gz", hash = "sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index c13095e1..3395aa2a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9 +9 @@ python = "^3.8"
-datasets = "^1.10.2"
+datasets = {extras = ["streaming"], version = "^1.10.2"}
|
|
9473e431bf6d5e9ba8bd42497bf95c71af026185
|
Sylvain Lesage
| 2021-07-26T14:10:19 |
feat: 🎸 load envvars, and add basis for extract/ route
|
diff --git a/datasets_preview_backend/main.py b/datasets_preview_backend/main.py
index ce89814a..6023cb51 100644
--- a/datasets_preview_backend/main.py
+++ b/datasets_preview_backend/main.py
@@ -9 +9,2 @@ import uvicorn
-PORT = 8000
+DEFAULT_PORT = 8000
+DEFAULT_EXTRACT_ROWS_LIMIT = 100
@@ -12 +13,15 @@ PORT = 8000
-def healthcheck(request: Request):
+def get_int_value(d, key, default):
+ try:
+ value = int(d.get(key))
+ except TypeError:
+ value = default
+ return value
+
+
+PORT = get_int_value(d=os.environ, key="DPB_PORT", default=DEFAULT_PORT)
+EXTRACT_ROWS_LIMIT = get_int_value(
+ d=os.environ, key="DPB_EXTRACT_ROWS_LIMIT", default=DEFAULT_EXTRACT_ROWS_LIMIT
+)
+
+
+async def healthcheck(request: Request):
@@ -15,0 +31,7 @@ def healthcheck(request: Request):
+async def extract(request: Request):
+ model_id: str = request.path_params["model_id"]
+ rows = get_int_value(d=request.query_params, key="rows", default=EXTRACT_ROWS_LIMIT)
+
+ return PlainTextResponse(model_id + "-" + str(rows))
+
+
@@ -19,0 +42 @@ def start():
+ Route("/{model_id:path}/extract", endpoint=extract),
@@ -23,2 +46 @@ def start():
- port = os.environ.get("TBL_PORT", PORT)
- uvicorn.run(app, host="0.0.0.0", port=port)
+ uvicorn.run(app, host="0.0.0.0", port=PORT)
|
|
96cf611122c6b7ad9dd151439fb2724e3c44b49d
|
Sylvain Lesage
| 2021-07-26T11:28:07 |
chore: 🤖 use make for install/run/watch + add watch mode
|
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..da691fdd
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,8 @@
+install:
+ poetry install
+
+run:
+ poetry run python datasets_preview_backend/main.py
+
+watch:
+ poetry run watchmedo auto-restart -d datasets_preview_backend -p "*.py" -R python datasets_preview_backend/main.py
diff --git a/README.md b/README.md
index f5a5101b..9d4bd617 100644
--- a/README.md
+++ b/README.md
@@ -9,0 +10,2 @@ The URL schema is `https://huggingface.co/datasets-preview/:datasetId/extract?ro
+- Poetry
+- make
@@ -16,4 +18 @@ cd datasets-preview-backend
-python -m venv .venv
-source .venv/bin/activate
-pip install .
-deactivate
+make install
@@ -22,2 +20,0 @@ deactivate
-See [INSTALL.md](./INSTALL.md) for details on how it has been deployed.
-
@@ -28,2 +25 @@ cd datasets-preview-backend
-source .venv/bin/activate
-python datasets-preview-backend/main.py
+make run
@@ -35,0 +32,7 @@ Set environment variables to configure the following aspects:
+
+To restart the application on file changes while developing, run:
+
+```bash
+cd datasets-preview-backend
+make watch
+```
diff --git a/poetry.lock b/poetry.lock
index 9be04f83..217bd8c9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -25,0 +26,8 @@ python-versions = "*"
+[[package]]
+name = "argh"
+version = "0.26.2"
+description = "An unobtrusive argparse wrapper with natural syntax"
+category = "dev"
+optional = false
+python-versions = "*"
+
@@ -312,0 +321,8 @@ python-versions = "*"
+[[package]]
+name = "pyyaml"
+version = "5.4.1"
+description = "YAML parser and emitter for Python"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+
@@ -429,0 +446,15 @@ standard = ["websockets (>=9.1)", "httptools (>=0.2.0,<0.3.0)", "watchgod (>=0.6
+[[package]]
+name = "watchdog"
+version = "2.1.3"
+description = "Filesystem events monitoring"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+argh = {version = ">=0.24.1", optional = true, markers = "extra == \"watchmedo\""}
+PyYAML = {version = ">=3.10", optional = true, markers = "extra == \"watchmedo\""}
+
+[package.extras]
+watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"]
+
@@ -441 +472 @@ python-versions = "^3.8"
-content-hash = "ff8cfb3b658efe0bce6b6031b0ac810904c0bba275af222cb7a070480887bd45"
+content-hash = "4fc8135a2125613a91182281b87f952a90180eb31ecb913e49ec1fede4763afa"
@@ -451,0 +483,4 @@ appdirs = [
+argh = [
+ {file = "argh-0.26.2-py2.py3-none-any.whl", hash = "sha256:a9b3aaa1904eeb78e32394cd46c6f37ac0fb4af6dc488daa58971bdc7d7fcaf3"},
+ {file = "argh-0.26.2.tar.gz", hash = "sha256:e9535b8c84dc9571a48999094fda7f33e63c3f1b74f3e5f3ac0105a58405bb65"},
+]
@@ -620,0 +656,31 @@ pytz = [
+pyyaml = [
+ {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"},
+ {file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"},
+ {file = "PyYAML-5.4.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8"},
+ {file = "PyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-win32.whl", hash = "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-win32.whl", hash = "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf"},
+ {file = "PyYAML-5.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc"},
+ {file = "PyYAML-5.4.1-cp38-cp38-win32.whl", hash = "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc"},
+ {file = "PyYAML-5.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696"},
+ {file = "PyYAML-5.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6"},
+ {file = "PyYAML-5.4.1-cp39-cp39-win32.whl", hash = "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10"},
+ {file = "PyYAML-5.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db"},
+ {file = "PyYAML-5.4.1.tar.gz", hash = "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"},
+]
@@ -700,0 +767,23 @@ uvicorn = [
+watchdog = [
+ {file = "watchdog-2.1.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9628f3f85375a17614a2ab5eac7665f7f7be8b6b0a2a228e6f6a2e91dd4bfe26"},
+ {file = "watchdog-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:acc4e2d5be6f140f02ee8590e51c002829e2c33ee199036fcd61311d558d89f4"},
+ {file = "watchdog-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b851237cf3533fabbc034ffcd84d0fa52014b3121454e5f8b86974b531560c"},
+ {file = "watchdog-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a12539ecf2478a94e4ba4d13476bb2c7a2e0a2080af2bb37df84d88b1b01358a"},
+ {file = "watchdog-2.1.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6fe9c8533e955c6589cfea6f3f0a1a95fb16867a211125236c82e1815932b5d7"},
+ {file = "watchdog-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d9456f0433845e7153b102fffeb767bde2406b76042f2216838af3b21707894e"},
+ {file = "watchdog-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd8c595d5a93abd441ee7c5bb3ff0d7170e79031520d113d6f401d0cf49d7c8f"},
+ {file = "watchdog-2.1.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0bcfe904c7d404eb6905f7106c54873503b442e8e918cc226e1828f498bdc0ca"},
+ {file = "watchdog-2.1.3-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bf84bd94cbaad8f6b9cbaeef43080920f4cb0e61ad90af7106b3de402f5fe127"},
+ {file = "watchdog-2.1.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b8ddb2c9f92e0c686ea77341dcb58216fa5ff7d5f992c7278ee8a392a06e86bb"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8805a5f468862daf1e4f4447b0ccf3acaff626eaa57fbb46d7960d1cf09f2e6d"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:3e305ea2757f81d8ebd8559d1a944ed83e3ab1bdf68bcf16ec851b97c08dc035"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_i686.whl", hash = "sha256:431a3ea70b20962e6dee65f0eeecd768cd3085ea613ccb9b53c8969de9f6ebd2"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:e4929ac2aaa2e4f1a30a36751160be391911da463a8799460340901517298b13"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:201cadf0b8c11922f54ec97482f95b2aafca429c4c3a4bb869a14f3c20c32686"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:3a7d242a7963174684206093846537220ee37ba9986b824a326a8bb4ef329a33"},
+ {file = "watchdog-2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:54e057727dd18bd01a3060dbf5104eb5a495ca26316487e0f32a394fd5fe725a"},
+ {file = "watchdog-2.1.3-py3-none-win32.whl", hash = "sha256:b5fc5c127bad6983eecf1ad117ab3418949f18af9c8758bd10158be3647298a9"},
+ {file = "watchdog-2.1.3-py3-none-win_amd64.whl", hash = "sha256:44acad6f642996a2b50bb9ce4fb3730dde08f23e79e20cd3d8e2a2076b730381"},
+ {file = "watchdog-2.1.3-py3-none-win_ia64.whl", hash = "sha256:0bcdf7b99b56a3ae069866c33d247c9994ffde91b620eaf0306b27e099bd1ae0"},
+ {file = "watchdog-2.1.3.tar.gz", hash = "sha256:e5236a8e8602ab6db4b873664c2d356c365ab3cac96fbdec4970ad616415dd45"},
+]
diff --git a/pyproject.toml b/pyproject.toml
index 591f73a2..c13095e1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -14,0 +15 @@ black = "^21.7b0"
+watchdog = {extras = ["watchmedo"], version = "^2.1.3"}
|
|
d5fc7cba68fc2f1ab1478748ce07830716920f74
|
Sylvain Lesage
| 2021-07-26T10:57:56 |
feat: 🎸 create app with starlette, create healthcheck endpoint
|
diff --git a/datasets_preview_backend/main.py b/datasets_preview_backend/main.py
new file mode 100644
index 00000000..ce89814a
--- /dev/null
+++ b/datasets_preview_backend/main.py
@@ -0,0 +1,28 @@
+import os
+
+from starlette.applications import Starlette
+from starlette.requests import Request
+from starlette.responses import PlainTextResponse
+from starlette.routing import Route
+import uvicorn
+
+PORT = 8000
+
+
+def healthcheck(request: Request):
+ return PlainTextResponse("ok")
+
+
+def start():
+ app = Starlette(
+ routes=[
+ Route("/healthcheck", endpoint=healthcheck),
+ ]
+ )
+
+ port = os.environ.get("TBL_PORT", PORT)
+ uvicorn.run(app, host="0.0.0.0", port=port)
+
+
+if __name__ == "__main__":
+ start()
|
|
ae6949750c03106cf5fd1bb43afeb1362633d48e
|
Sylvain Lesage
| 2021-07-26T10:42:39 |
chore: 🤖 install poetry venv in .venv
|
diff --git a/poetry.toml b/poetry.toml
new file mode 100644
index 00000000..ab1033bd
--- /dev/null
+++ b/poetry.toml
@@ -0,0 +1,2 @@
+[virtualenvs]
+in-project = true
|
|
29adb0cce3d4363362c9bbab45c0b5d056e24193
|
Sylvain Lesage
| 2021-07-26T09:52:55 |
chore: 🤖 create project
|
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..038cb807
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+# MacOS
+.DS_Store
+
+# Python
+*.pyc
+.venv/
+
+.vscode/
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..f5a5101b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,35 @@
+# Datasets preview backend
+
+> API to extract rows of 🤗 datasets
+
+The URL schema is `https://huggingface.co/datasets-preview/:datasetId/extract?rows=100`. For example https://huggingface.co/datasets-preview/acronym_identification/extract?rows=10 will return a JSON file with the list of the first 10 rows of the first available dataset split in https://huggingface.co/datasets/acronym_identification.
+
+## Requirements
+
+- Python 3.8+
+
+## Install
+
+```bash
+git clone [email protected]:huggingface/datasets-preview-backend.git
+cd datasets-preview-backend
+python -m venv .venv
+source .venv/bin/activate
+pip install .
+deactivate
+```
+
+See [INSTALL.md](./INSTALL.md) for details on how it has been deployed.
+
+## Run
+
+```bash
+cd datasets-preview-backend
+source .venv/bin/activate
+python datasets-preview-backend/main.py
+```
+
+Set environment variables to configure the following aspects:
+
+- `DPB_EXTRACT_ROWS_LIMIT`: maximum number of rows in the extract. Defaults to `100`.
+- `DPB_PORT`: the port used by the app
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 00000000..9be04f83
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,766 @@
+[[package]]
+name = "anyio"
+version = "3.3.0"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+category = "main"
+optional = false
+python-versions = ">=3.6.2"
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"]
+test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=6.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"]
+trio = ["trio (>=0.16)"]
+
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "asgiref"
+version = "3.4.1"
+description = "ASGI specs, helper code, and adapters"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+tests = ["pytest", "pytest-asyncio", "mypy (>=0.800)"]
+
+[[package]]
+name = "black"
+version = "21.7b0"
+description = "The uncompromising code formatter."
+category = "dev"
+optional = false
+python-versions = ">=3.6.2"
+
+[package.dependencies]
+appdirs = "*"
+click = ">=7.1.2"
+mypy-extensions = ">=0.4.3"
+pathspec = ">=0.8.1,<1"
+regex = ">=2020.1.8"
+tomli = ">=0.2.6,<2.0.0"
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"]
+python2 = ["typed-ast (>=1.4.2)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "certifi"
+version = "2021.5.30"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "charset-normalizer"
+version = "2.0.3"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+category = "main"
+optional = false
+python-versions = ">=3.5.0"
+
+[package.extras]
+unicode_backport = ["unicodedata2"]
+
+[[package]]
+name = "click"
+version = "8.0.1"
+description = "Composable command line interface toolkit"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.4"
+description = "Cross-platform colored terminal text."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "datasets"
+version = "1.10.2"
+description = "HuggingFace/Datasets is an open library of NLP datasets."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+dill = "*"
+fsspec = ">=2021.05.0"
+huggingface-hub = "<0.1.0"
+multiprocess = "*"
+numpy = ">=1.17"
+packaging = "*"
+pandas = "*"
+pyarrow = ">=1.0.0,<4.0.0 || >4.0.0"
+requests = ">=2.19.0"
+tqdm = ">=4.42"
+xxhash = "*"
+
+[package.extras]
+apache-beam = ["apache-beam (>=2.26.0)"]
+benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "transformers (==3.0.2)"]
+dev = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)", "importlib-resources"]
+docs = ["docutils (==0.16.0)", "recommonmark", "sphinx (==3.1.2)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinxext-opengraph (==0.4.1)", "sphinx-copybutton", "fsspec", "s3fs"]
+quality = ["black (==21.4b0)", "flake8 (==3.7.9)", "isort", "pyyaml (>=5.3.1)"]
+s3 = ["fsspec", "boto3 (==1.16.43)", "botocore (==1.19.52)", "s3fs"]
+streaming = ["aiohttp"]
+tensorflow = ["tensorflow (>=2.2.0)"]
+tensorflow_gpu = ["tensorflow-gpu (>=2.2.0)"]
+tests = ["absl-py", "pytest", "pytest-xdist", "aiohttp", "apache-beam (>=2.26.0)", "elasticsearch", "aiobotocore (==1.2.2)", "boto3 (==1.16.43)", "botocore (==1.19.52)", "faiss-cpu", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs", "tensorflow (>=2.3)", "torch", "transformers", "bs4", "conllu", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
+torch = ["torch"]
+
+[[package]]
+name = "dill"
+version = "0.3.4"
+description = "serialize all of python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*"
+
+[package.extras]
+graph = ["objgraph (>=1.7.2)"]
+
+[[package]]
+name = "filelock"
+version = "3.0.12"
+description = "A platform independent file lock."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "fsspec"
+version = "2021.7.0"
+description = "File-system specification"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+dask = ["dask", "distributed"]
+dropbox = ["dropboxdrivefs", "requests", "dropbox"]
+entrypoints = ["importlib-metadata"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+hdfs = ["pyarrow (>=1)"]
+http = ["requests", "aiohttp"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+
+[[package]]
+name = "h11"
+version = "0.12.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "huggingface-hub"
+version = "0.0.14"
+description = "Client library to download and publish models on the huggingface.co hub"
+category = "main"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.dependencies]
+filelock = "*"
+packaging = ">=20.9"
+requests = "*"
+tqdm = "*"
+typing-extensions = "*"
+
+[package.extras]
+all = ["pytest", "black (>=20.8b1)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
+dev = ["pytest", "black (>=20.8b1)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
+quality = ["black (>=20.8b1)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
+testing = ["pytest"]
+torch = ["torch"]
+
+[[package]]
+name = "idna"
+version = "3.2"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "multiprocess"
+version = "0.70.12.2"
+description = "better multiprocessing and multithreading in python"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+dill = ">=0.3.4"
+
+[[package]]
+name = "mypy-extensions"
+version = "0.4.3"
+description = "Experimental type system extensions for programs checked with the mypy typechecker."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "numpy"
+version = "1.21.1"
+description = "NumPy is the fundamental package for array computing with Python."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "packaging"
+version = "21.0"
+description = "Core utilities for Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+pyparsing = ">=2.0.2"
+
+[[package]]
+name = "pandas"
+version = "1.3.1"
+description = "Powerful data structures for data analysis, time series, and statistics"
+category = "main"
+optional = false
+python-versions = ">=3.7.1"
+
+[package.dependencies]
+numpy = ">=1.17.3"
+python-dateutil = ">=2.7.3"
+pytz = ">=2017.3"
+
+[package.extras]
+test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
+
+[[package]]
+name = "pathspec"
+version = "0.9.0"
+description = "Utility library for gitignore style pattern matching of file paths."
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+
+[[package]]
+name = "pyarrow"
+version = "4.0.1"
+description = "Python library for Apache Arrow"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+numpy = ">=1.16.6"
+
+[[package]]
+name = "pyparsing"
+version = "2.4.7"
+description = "Python parsing module"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "pytz"
+version = "2021.1"
+description = "World timezone definitions, modern and historical"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "regex"
+version = "2021.7.6"
+description = "Alternative regular expression module, to replace re."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "requests"
+version = "2.26.0"
+description = "Python HTTP for Humans."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
+idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
+urllib3 = ">=1.21.1,<1.27"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
+use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "sniffio"
+version = "1.2.0"
+description = "Sniff out which async library your code is running under"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "starlette"
+version = "0.16.0"
+description = "The little ASGI library that shines."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+anyio = ">=3.0.0,<4"
+
+[package.extras]
+full = ["itsdangerous", "jinja2", "python-multipart", "pyyaml", "requests", "graphene"]
+
+[[package]]
+name = "tomli"
+version = "1.1.0"
+description = "A lil' TOML parser"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "tqdm"
+version = "4.61.2"
+description = "Fast, Extensible Progress Meter"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["py-make (>=0.1.0)", "twine", "wheel"]
+notebook = ["ipywidgets (>=6)"]
+telegram = ["requests"]
+
+[[package]]
+name = "typing-extensions"
+version = "3.10.0.0"
+description = "Backported and Experimental Type Hints for Python 3.5+"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "urllib3"
+version = "1.26.6"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+
+[package.extras]
+brotli = ["brotlipy (>=0.6.0)"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "uvicorn"
+version = "0.14.0"
+description = "The lightning-fast ASGI server."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+asgiref = ">=3.3.4"
+click = ">=7"
+h11 = ">=0.8"
+
+[package.extras]
+standard = ["websockets (>=9.1)", "httptools (>=0.2.0,<0.3.0)", "watchgod (>=0.6)", "python-dotenv (>=0.13)", "PyYAML (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "colorama (>=0.4)"]
+
+[[package]]
+name = "xxhash"
+version = "2.0.2"
+description = "Python binding for xxHash"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[metadata]
+lock-version = "1.1"
+python-versions = "^3.8"
+content-hash = "ff8cfb3b658efe0bce6b6031b0ac810904c0bba275af222cb7a070480887bd45"
+
+[metadata.files]
+anyio = [
+ {file = "anyio-3.3.0-py3-none-any.whl", hash = "sha256:929a6852074397afe1d989002aa96d457e3e1e5441357c60d03e7eea0e65e1b0"},
+ {file = "anyio-3.3.0.tar.gz", hash = "sha256:ae57a67583e5ff8b4af47666ff5651c3732d45fd26c929253748e796af860374"},
+]
+appdirs = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+asgiref = [
+ {file = "asgiref-3.4.1-py3-none-any.whl", hash = "sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214"},
+ {file = "asgiref-3.4.1.tar.gz", hash = "sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9"},
+]
+black = [
+ {file = "black-21.7b0-py3-none-any.whl", hash = "sha256:1c7aa6ada8ee864db745b22790a32f94b2795c253a75d6d9b5e439ff10d23116"},
+ {file = "black-21.7b0.tar.gz", hash = "sha256:c8373c6491de9362e39271630b65b964607bc5c79c83783547d76c839b3aa219"},
+]
+certifi = [
+ {file = "certifi-2021.5.30-py2.py3-none-any.whl", hash = "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"},
+ {file = "certifi-2021.5.30.tar.gz", hash = "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee"},
+]
+charset-normalizer = [
+ {file = "charset-normalizer-2.0.3.tar.gz", hash = "sha256:c46c3ace2d744cfbdebceaa3c19ae691f53ae621b39fd7570f59d14fb7f2fd12"},
+ {file = "charset_normalizer-2.0.3-py3-none-any.whl", hash = "sha256:88fce3fa5b1a84fdcb3f603d889f723d1dd89b26059d0123ca435570e848d5e1"},
+]
+click = [
+ {file = "click-8.0.1-py3-none-any.whl", hash = "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"},
+ {file = "click-8.0.1.tar.gz", hash = "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a"},
+]
+colorama = [
+ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
+ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
+]
+datasets = [
+ {file = "datasets-1.10.2-py3-none-any.whl", hash = "sha256:a523e22b222b38700cc672445f2d534ed4a5aeda1399c074b722feda36b175c2"},
+ {file = "datasets-1.10.2.tar.gz", hash = "sha256:19106e8f5fa7be95ccd19fa82653ce707cad378d0e323c3013a17c2b6513bf5c"},
+]
+dill = [
+ {file = "dill-0.3.4-py2.py3-none-any.whl", hash = "sha256:7e40e4a70304fd9ceab3535d36e58791d9c4a776b38ec7f7ec9afc8d3dca4d4f"},
+ {file = "dill-0.3.4.zip", hash = "sha256:9f9734205146b2b353ab3fec9af0070237b6ddae78452af83d2fca84d739e675"},
+]
+filelock = [
+ {file = "filelock-3.0.12-py3-none-any.whl", hash = "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"},
+ {file = "filelock-3.0.12.tar.gz", hash = "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59"},
+]
+fsspec = [
+ {file = "fsspec-2021.7.0-py3-none-any.whl", hash = "sha256:86822ccf367da99957f49db64f7d5fd3d8d21444fac4dfdc8ebc38ee93d478c6"},
+ {file = "fsspec-2021.7.0.tar.gz", hash = "sha256:792ebd3b54de0b30f1ce73f0ba0a8bcc864724f2d9f248cb8d0ece47db0cbde8"},
+]
+h11 = [
+ {file = "h11-0.12.0-py3-none-any.whl", hash = "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6"},
+ {file = "h11-0.12.0.tar.gz", hash = "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042"},
+]
+huggingface-hub = [
+ {file = "huggingface_hub-0.0.14-py3-none-any.whl", hash = "sha256:3f931112abb679001d8d1310bfd2676cec9ce3417b2d9965d5a2d44dcca2e5e2"},
+ {file = "huggingface_hub-0.0.14.tar.gz", hash = "sha256:560313eb1b1df0014a0b2469a1ccd2491aa5fe71606b1d858c7f293ec8678f11"},
+]
+idna = [
+ {file = "idna-3.2-py3-none-any.whl", hash = "sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a"},
+ {file = "idna-3.2.tar.gz", hash = "sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3"},
+]
+multiprocess = [
+ {file = "multiprocess-0.70.12.2-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:35d41e410ca2a32977a483ae1f40f86b193b45cecf85567c2fae402fb8bf172e"},
+ {file = "multiprocess-0.70.12.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:9a02237eae21975155c816883479f72e239d16823a6bc063173d59acec9bcf41"},
+ {file = "multiprocess-0.70.12.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f12a939cd2f01d0a900e7ef2aaee3c351a49fd2297d7f760b537af22727561b8"},
+ {file = "multiprocess-0.70.12.2-cp27-cp27m-win32.whl", hash = "sha256:be3ad3eaf204abc646d85e70e41244f66d88200628a0ab867c8fc206b97cedbf"},
+ {file = "multiprocess-0.70.12.2-cp27-cp27m-win_amd64.whl", hash = "sha256:c85ffc38c50c5a4f32f3f3c1a284725b7b5040188f254eba6e572c53d3da525b"},
+ {file = "multiprocess-0.70.12.2-pp27-none-any.whl", hash = "sha256:a9f58945edb234591684c0a181b744a3231643814ef3a8f47cea9a2073b4b2bb"},
+ {file = "multiprocess-0.70.12.2-pp36-none-any.whl", hash = "sha256:0e0a5ae4bd84e4c22baddf824d3b8168214f8c1cce51e2cb080421cb1f7b04d1"},
+ {file = "multiprocess-0.70.12.2-pp37-none-any.whl", hash = "sha256:916a314a1e0f3454033d59672ba6181fa45948ab1091d68cdd479258576e7b27"},
+ {file = "multiprocess-0.70.12.2-py36-none-any.whl", hash = "sha256:b3f866f7d9c7acc1a9cb1b6063a29f5cb140ff545b35b71fd4bfdac6f19d75fa"},
+ {file = "multiprocess-0.70.12.2-py37-none-any.whl", hash = "sha256:6aa67e805e50b6e9dfc56dd0f0c85ac3409e6791d4ec5405c5f9bc0a47d745a4"},
+ {file = "multiprocess-0.70.12.2-py38-none-any.whl", hash = "sha256:85941e650c277af44fc82e3e97faacb920e5ce3615238b540cbad4012d6f60e9"},
+ {file = "multiprocess-0.70.12.2-py39-none-any.whl", hash = "sha256:6f812a1d3f198b7cacd63983f60e2dc1338bd4450893f90c435067b5a3127e6f"},
+ {file = "multiprocess-0.70.12.2.zip", hash = "sha256:206bb9b97b73f87fec1ed15a19f8762950256aa84225450abc7150d02855a083"},
+]
+mypy-extensions = [
+ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
+ {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
+]
+numpy = [
+ {file = "numpy-1.21.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38e8648f9449a549a7dfe8d8755a5979b45b3538520d1e735637ef28e8c2dc50"},
+ {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fd7d7409fa643a91d0a05c7554dd68aa9c9bb16e186f6ccfe40d6e003156e33a"},
+ {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a75b4498b1e93d8b700282dc8e655b8bd559c0904b3910b144646dbbbc03e062"},
+ {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1412aa0aec3e00bc23fbb8664d76552b4efde98fb71f60737c83efbac24112f1"},
+ {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e46ceaff65609b5399163de5893d8f2a82d3c77d5e56d976c8b5fb01faa6b671"},
+ {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c6a2324085dd52f96498419ba95b5777e40b6bcbc20088fddb9e8cbb58885e8e"},
+ {file = "numpy-1.21.1-cp37-cp37m-win32.whl", hash = "sha256:73101b2a1fef16602696d133db402a7e7586654682244344b8329cdcbbb82172"},
+ {file = "numpy-1.21.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7a708a79c9a9d26904d1cca8d383bf869edf6f8e7650d85dbc77b041e8c5a0f8"},
+ {file = "numpy-1.21.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95b995d0c413f5d0428b3f880e8fe1660ff9396dcd1f9eedbc311f37b5652e16"},
+ {file = "numpy-1.21.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:635e6bd31c9fb3d475c8f44a089569070d10a9ef18ed13738b03049280281267"},
+ {file = "numpy-1.21.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a3d5fb89bfe21be2ef47c0614b9c9c707b7362386c9a3ff1feae63e0267ccb6"},
+ {file = "numpy-1.21.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8a326af80e86d0e9ce92bcc1e65c8ff88297de4fa14ee936cb2293d414c9ec63"},
+ {file = "numpy-1.21.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:791492091744b0fe390a6ce85cc1bf5149968ac7d5f0477288f78c89b385d9af"},
+ {file = "numpy-1.21.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0318c465786c1f63ac05d7c4dbcecd4d2d7e13f0959b01b534ea1e92202235c5"},
+ {file = "numpy-1.21.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a513bd9c1551894ee3d31369f9b07460ef223694098cf27d399513415855b68"},
+ {file = "numpy-1.21.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:91c6f5fc58df1e0a3cc0c3a717bb3308ff850abdaa6d2d802573ee2b11f674a8"},
+ {file = "numpy-1.21.1-cp38-cp38-win32.whl", hash = "sha256:978010b68e17150db8765355d1ccdd450f9fc916824e8c4e35ee620590e234cd"},
+ {file = "numpy-1.21.1-cp38-cp38-win_amd64.whl", hash = "sha256:9749a40a5b22333467f02fe11edc98f022133ee1bfa8ab99bda5e5437b831214"},
+ {file = "numpy-1.21.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d7a4aeac3b94af92a9373d6e77b37691b86411f9745190d2c351f410ab3a791f"},
+ {file = "numpy-1.21.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d9e7912a56108aba9b31df688a4c4f5cb0d9d3787386b87d504762b6754fbb1b"},
+ {file = "numpy-1.21.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:25b40b98ebdd272bc3020935427a4530b7d60dfbe1ab9381a39147834e985eac"},
+ {file = "numpy-1.21.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8a92c5aea763d14ba9d6475803fc7904bda7decc2a0a68153f587ad82941fec1"},
+ {file = "numpy-1.21.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05a0f648eb28bae4bcb204e6fd14603de2908de982e761a2fc78efe0f19e96e1"},
+ {file = "numpy-1.21.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f01f28075a92eede918b965e86e8f0ba7b7797a95aa8d35e1cc8821f5fc3ad6a"},
+ {file = "numpy-1.21.1-cp39-cp39-win32.whl", hash = "sha256:88c0b89ad1cc24a5efbb99ff9ab5db0f9a86e9cc50240177a571fbe9c2860ac2"},
+ {file = "numpy-1.21.1-cp39-cp39-win_amd64.whl", hash = "sha256:01721eefe70544d548425a07c80be8377096a54118070b8a62476866d5208e33"},
+ {file = "numpy-1.21.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d4d1de6e6fb3d28781c73fbde702ac97f03d79e4ffd6598b880b2d95d62ead4"},
+ {file = "numpy-1.21.1.zip", hash = "sha256:dff4af63638afcc57a3dfb9e4b26d434a7a602d225b42d746ea7fe2edf1342fd"},
+]
+packaging = [
+ {file = "packaging-21.0-py3-none-any.whl", hash = "sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"},
+ {file = "packaging-21.0.tar.gz", hash = "sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7"},
+]
+pandas = [
+ {file = "pandas-1.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1ee8418d0f936ff2216513aa03e199657eceb67690995d427a4a7ecd2e68f442"},
+ {file = "pandas-1.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d9acfca191140a518779d1095036d842d5e5bc8e8ad8b5eaad1aff90fe1870d"},
+ {file = "pandas-1.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e323028ab192fcfe1e8999c012a0fa96d066453bb354c7e7a4a267b25e73d3c8"},
+ {file = "pandas-1.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d06661c6eb741ae633ee1c57e8c432bb4203024e263fe1a077fa3fda7817fdb"},
+ {file = "pandas-1.3.1-cp37-cp37m-win32.whl", hash = "sha256:23c7452771501254d2ae23e9e9dac88417de7e6eff3ce64ee494bb94dc88c300"},
+ {file = "pandas-1.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7150039e78a81eddd9f5a05363a11cadf90a4968aac6f086fd83e66cf1c8d1d6"},
+ {file = "pandas-1.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5c09a2538f0fddf3895070579082089ff4ae52b6cb176d8ec7a4dacf7e3676c1"},
+ {file = "pandas-1.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:905fc3e0fcd86b0a9f1f97abee7d36894698d2592b22b859f08ea5a8fe3d3aab"},
+ {file = "pandas-1.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ee927c70794e875a59796fab8047098aa59787b1be680717c141cd7873818ae"},
+ {file = "pandas-1.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c976e023ed580e60a82ccebdca8e1cc24d8b1fbb28175eb6521025c127dab66"},
+ {file = "pandas-1.3.1-cp38-cp38-win32.whl", hash = "sha256:22f3fcc129fb482ef44e7df2a594f0bd514ac45aabe50da1a10709de1b0f9d84"},
+ {file = "pandas-1.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45656cd59ae9745a1a21271a62001df58342b59c66d50754390066db500a8362"},
+ {file = "pandas-1.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:114c6789d15862508900a25cb4cb51820bfdd8595ea306bab3b53cd19f990b65"},
+ {file = "pandas-1.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:527c43311894aff131dea99cf418cd723bfd4f0bcf3c3da460f3b57e52a64da5"},
+ {file = "pandas-1.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb3b33dde260b1766ea4d3c6b8fbf6799cee18d50a2a8bc534cf3550b7c819a"},
+ {file = "pandas-1.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c28760932283d2c9f6fa5e53d2f77a514163b9e67fd0ee0879081be612567195"},
+ {file = "pandas-1.3.1-cp39-cp39-win32.whl", hash = "sha256:be12d77f7e03c40a2466ed00ccd1a5f20a574d3c622fe1516037faa31aa448aa"},
+ {file = "pandas-1.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:9e1fe6722cbe27eb5891c1977bca62d456c19935352eea64d33956db46139364"},
+ {file = "pandas-1.3.1.tar.gz", hash = "sha256:341935a594db24f3ff07d1b34d1d231786aa9adfa84b76eab10bf42907c8aed3"},
+]
+pathspec = [
+ {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
+ {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
+]
+pyarrow = [
+ {file = "pyarrow-4.0.1-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:5387db80c6a7b5598884bf4df3fc546b3373771ad614548b782e840b71704877"},
+ {file = "pyarrow-4.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:76b75a9cfc572e890a1e000fd532bdd2084ec3f1ee94ee51802a477913a21072"},
+ {file = "pyarrow-4.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:423cd6a14810f4e40cb76e13d4240040fc1594d69fe1c4f2c70be00ad512ade5"},
+ {file = "pyarrow-4.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e1351576877764fb4d5690e4721ce902e987c85f4ab081c70a34e1d24646586e"},
+ {file = "pyarrow-4.0.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:0fde9c7a3d5d37f3fe5d18c4ed015e8f585b68b26d72a10d7012cad61afe43ff"},
+ {file = "pyarrow-4.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:afd4f7c0a225a326d2c0039cdc8631b5e8be30f78f6b7a3e5ce741cf5dd81c72"},
+ {file = "pyarrow-4.0.1-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:b05bdd513f045d43228247ef4d9269c88139788e2d566f4cb3e855e282ad0330"},
+ {file = "pyarrow-4.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:150db335143edd00d3ec669c7c8167d401c4aa0a290749351c80bbf146892b2e"},
+ {file = "pyarrow-4.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:dcd20ee0240a88772eeb5691102c276f5cdec79527fb3a0679af7f93f93cb4bd"},
+ {file = "pyarrow-4.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:24040a20208e9b16ba7b284624ebfe67e40f5c40b5dc8d874da322ac0053f9d3"},
+ {file = "pyarrow-4.0.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:e44dfd7e61c9eb6dda59bc49ad69e77945f6d049185a517c130417e3ca0494d8"},
+ {file = "pyarrow-4.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ee3d87615876550fee9a523307dd4b00f0f44cf47a94a32a07793da307df31a0"},
+ {file = "pyarrow-4.0.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:fa7b165cfa97158c1e6d15c68428317b4f4ae786d1dc2dbab43f1328c1eb43aa"},
+ {file = "pyarrow-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:33c457728a1ce825b80aa8c8ed573709f1efe72003d45fa6fdbb444de9cc0b74"},
+ {file = "pyarrow-4.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:72cf3477538bd8504f14d6299a387cc335444f7a188f548096dfea9533551f02"},
+ {file = "pyarrow-4.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a81adbfbe2f6528d4593b5a8962b2751838517401d14e9d4cab6787478802693"},
+ {file = "pyarrow-4.0.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:c2733c9bcd00074ce5497dd0a7b8a10c91d3395ddce322d7021c7fdc4ea6f610"},
+ {file = "pyarrow-4.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:d0f080b2d9720bec42624cb0df66f60ae66b84a2ccd1fe2c291322df915ac9db"},
+ {file = "pyarrow-4.0.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:6b7bd8f5aa327cc32a1b9b02a76502851575f5edb110f93c59a45c70211a5618"},
+ {file = "pyarrow-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe976695318560a97c6d31bba828eeca28c44c6f6401005e54ba476a28ac0a10"},
+ {file = "pyarrow-4.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5f2660f59dfcfd34adac7c08dc7f615920de703f191066ed6277628975f06878"},
+ {file = "pyarrow-4.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:5a76ec44af838862b23fb5cfc48765bc7978f7b58a181c96ad92856280de548b"},
+ {file = "pyarrow-4.0.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:04be0f7cb9090bd029b5b53bed628548fef569e5d0b5c6cd7f6d0106dbbc782d"},
+ {file = "pyarrow-4.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:a968375c66e505f72b421f5864a37f51aad5da61b6396fa283f956e9f2b2b923"},
+ {file = "pyarrow-4.0.1.tar.gz", hash = "sha256:11517f0b4f4acbab0c37c674b4d1aad3c3dfea0f6b1bb322e921555258101ab3"},
+]
+pyparsing = [
+ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"},
+ {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"},
+]
+python-dateutil = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+pytz = [
+ {file = "pytz-2021.1-py2.py3-none-any.whl", hash = "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798"},
+ {file = "pytz-2021.1.tar.gz", hash = "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da"},
+]
+regex = [
+ {file = "regex-2021.7.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e6a1e5ca97d411a461041d057348e578dc344ecd2add3555aedba3b408c9f874"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:6afe6a627888c9a6cfbb603d1d017ce204cebd589d66e0703309b8048c3b0854"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ccb3d2190476d00414aab36cca453e4596e8f70a206e2aa8db3d495a109153d2"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:ed693137a9187052fc46eedfafdcb74e09917166362af4cc4fddc3b31560e93d"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:99d8ab206a5270c1002bfcf25c51bf329ca951e5a169f3b43214fdda1f0b5f0d"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:b85ac458354165405c8a84725de7bbd07b00d9f72c31a60ffbf96bb38d3e25fa"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:3f5716923d3d0bfb27048242a6e0f14eecdb2e2a7fac47eda1d055288595f222"},
+ {file = "regex-2021.7.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5983c19d0beb6af88cb4d47afb92d96751fb3fa1784d8785b1cdf14c6519407"},
+ {file = "regex-2021.7.6-cp36-cp36m-win32.whl", hash = "sha256:c92831dac113a6e0ab28bc98f33781383fe294df1a2c3dfd1e850114da35fd5b"},
+ {file = "regex-2021.7.6-cp36-cp36m-win_amd64.whl", hash = "sha256:791aa1b300e5b6e5d597c37c346fb4d66422178566bbb426dd87eaae475053fb"},
+ {file = "regex-2021.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:59506c6e8bd9306cd8a41511e32d16d5d1194110b8cfe5a11d102d8b63cf945d"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:564a4c8a29435d1f2256ba247a0315325ea63335508ad8ed938a4f14c4116a5d"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:59c00bb8dd8775473cbfb967925ad2c3ecc8886b3b2d0c90a8e2707e06c743f0"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:9a854b916806c7e3b40e6616ac9e85d3cdb7649d9e6590653deb5b341a736cec"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:db2b7df831c3187a37f3bb80ec095f249fa276dbe09abd3d35297fc250385694"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:173bc44ff95bc1e96398c38f3629d86fa72e539c79900283afa895694229fe6a"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:15dddb19823f5147e7517bb12635b3c82e6f2a3a6b696cc3e321522e8b9308ad"},
+ {file = "regex-2021.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ddeabc7652024803666ea09f32dd1ed40a0579b6fbb2a213eba590683025895"},
+ {file = "regex-2021.7.6-cp37-cp37m-win32.whl", hash = "sha256:f080248b3e029d052bf74a897b9d74cfb7643537fbde97fe8225a6467fb559b5"},
+ {file = "regex-2021.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:d8bbce0c96462dbceaa7ac4a7dfbbee92745b801b24bce10a98d2f2b1ea9432f"},
+ {file = "regex-2021.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edd1a68f79b89b0c57339bce297ad5d5ffcc6ae7e1afdb10f1947706ed066c9c"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux1_i686.whl", hash = "sha256:422dec1e7cbb2efbbe50e3f1de36b82906def93ed48da12d1714cabcd993d7f0"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cbe23b323988a04c3e5b0c387fe3f8f363bf06c0680daf775875d979e376bd26"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0eb2c6e0fcec5e0f1d3bcc1133556563222a2ffd2211945d7b1480c1b1a42a6f"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:1c78780bf46d620ff4fff40728f98b8afd8b8e35c3efd638c7df67be2d5cddbf"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bc84fb254a875a9f66616ed4538542fb7965db6356f3df571d783f7c8d256edd"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:598c0a79b4b851b922f504f9f39a863d83ebdfff787261a5ed061c21e67dd761"},
+ {file = "regex-2021.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875c355360d0f8d3d827e462b29ea7682bf52327d500a4f837e934e9e4656068"},
+ {file = "regex-2021.7.6-cp38-cp38-win32.whl", hash = "sha256:e586f448df2bbc37dfadccdb7ccd125c62b4348cb90c10840d695592aa1b29e0"},
+ {file = "regex-2021.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:2fe5e71e11a54e3355fa272137d521a40aace5d937d08b494bed4529964c19c4"},
+ {file = "regex-2021.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6110bab7eab6566492618540c70edd4d2a18f40ca1d51d704f1d81c52d245026"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:4f64fc59fd5b10557f6cd0937e1597af022ad9b27d454e182485f1db3008f417"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:89e5528803566af4df368df2d6f503c84fbfb8249e6631c7b025fe23e6bd0cde"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2366fe0479ca0e9afa534174faa2beae87847d208d457d200183f28c74eaea59"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f9392a4555f3e4cb45310a65b403d86b589adc773898c25a39184b1ba4db8985"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:2bceeb491b38225b1fee4517107b8491ba54fba77cf22a12e996d96a3c55613d"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:f98dc35ab9a749276f1a4a38ab3e0e2ba1662ce710f6530f5b0a6656f1c32b58"},
+ {file = "regex-2021.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319eb2a8d0888fa6f1d9177705f341bc9455a2c8aca130016e52c7fe8d6c37a3"},
+ {file = "regex-2021.7.6-cp39-cp39-win32.whl", hash = "sha256:eaf58b9e30e0e546cdc3ac06cf9165a1ca5b3de8221e9df679416ca667972035"},
+ {file = "regex-2021.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:4c9c3155fe74269f61e27617529b7f09552fbb12e44b1189cebbdb24294e6e1c"},
+ {file = "regex-2021.7.6.tar.gz", hash = "sha256:8394e266005f2d8c6f0bc6780001f7afa3ef81a7a2111fa35058ded6fce79e4d"},
+]
+requests = [
+ {file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"},
+ {file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"},
+]
+six = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+sniffio = [
+ {file = "sniffio-1.2.0-py3-none-any.whl", hash = "sha256:471b71698eac1c2112a40ce2752bb2f4a4814c22a54a3eed3676bc0f5ca9f663"},
+ {file = "sniffio-1.2.0.tar.gz", hash = "sha256:c4666eecec1d3f50960c6bdf61ab7bc350648da6c126e3cf6898d8cd4ddcd3de"},
+]
+starlette = [
+ {file = "starlette-0.16.0-py3-none-any.whl", hash = "sha256:38eb24bf705a2c317e15868e384c1b8a12ca396e5a3c3a003db7e667c43f939f"},
+ {file = "starlette-0.16.0.tar.gz", hash = "sha256:e1904b5d0007aee24bdd3c43994be9b3b729f4f58e740200de1d623f8c3a8870"},
+]
+tomli = [
+ {file = "tomli-1.1.0-py3-none-any.whl", hash = "sha256:f4a182048010e89cbec0ae4686b21f550a7f2903f665e34a6de58ec15424f919"},
+ {file = "tomli-1.1.0.tar.gz", hash = "sha256:33d7984738f8bb699c9b0a816eb646a8178a69eaa792d258486776a5d21b8ca5"},
+]
+tqdm = [
+ {file = "tqdm-4.61.2-py2.py3-none-any.whl", hash = "sha256:5aa445ea0ad8b16d82b15ab342de6b195a722d75fc1ef9934a46bba6feafbc64"},
+ {file = "tqdm-4.61.2.tar.gz", hash = "sha256:8bb94db0d4468fea27d004a0f1d1c02da3cdedc00fe491c0de986b76a04d6b0a"},
+]
+typing-extensions = [
+ {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"},
+ {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"},
+ {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"},
+]
+urllib3 = [
+ {file = "urllib3-1.26.6-py2.py3-none-any.whl", hash = "sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4"},
+ {file = "urllib3-1.26.6.tar.gz", hash = "sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f"},
+]
+uvicorn = [
+ {file = "uvicorn-0.14.0-py3-none-any.whl", hash = "sha256:2a76bb359171a504b3d1c853409af3adbfa5cef374a4a59e5881945a97a93eae"},
+ {file = "uvicorn-0.14.0.tar.gz", hash = "sha256:45ad7dfaaa7d55cab4cd1e85e03f27e9d60bc067ddc59db52a2b0aeca8870292"},
+]
+xxhash = [
+ {file = "xxhash-2.0.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:dac3b94881b943bbe418f5829128b9c48f69a66f816ef8b72ee0129d676dbd7c"},
+ {file = "xxhash-2.0.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:43fd97f332bd581639bb99fe8f09f7e9113d49cad4d21bef0620867f92c802c6"},
+ {file = "xxhash-2.0.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:6e5058c3fa5b42ded9a303f1a5a42d3ff732cb54c108424c63e993fc3379513c"},
+ {file = "xxhash-2.0.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:dfacce97a3ccb46089e358ceaeca9300298511673bf87596da66882af386f6c7"},
+ {file = "xxhash-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:1dfa115c8e07b3e1d94ebd60a6d6ee16ea692efb890e245addb0d33b47ee1dee"},
+ {file = "xxhash-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:fb28b0313c7582225373f343635674231518452331a9bdea8261d0e27b48594f"},
+ {file = "xxhash-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:427851234a87bfe6636c90b89bd65b7ca913befff3c7bcd92a3568e635fccc92"},
+ {file = "xxhash-2.0.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:0b92a01dc8dcada8827de140a5df83c9e8e5c190ef8bf972c98ebbe0924ee044"},
+ {file = "xxhash-2.0.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:676d6964b8a9bdaf737ae6836b886ab53b2863c6aa00d43952b130a6130d1bdc"},
+ {file = "xxhash-2.0.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:8362693a1ce5c1373f48f047470e7797ed17dfe5babc37ba7bef50d6e6f83a72"},
+ {file = "xxhash-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:515747159fccd23fc9d1b7afeaa8bd7fc36884188b47491713d22032c5f9e502"},
+ {file = "xxhash-2.0.2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:e1787b9cea43f256f8d06c8429999d386a9da9cb000c265a4dde48dd08242528"},
+ {file = "xxhash-2.0.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:d47ab1245ee4c7e6fc424ad990e4d7cfe0f206d617efe990fea34000a9242102"},
+ {file = "xxhash-2.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:81ec049f4936a49311e1fc58036d7d682b5c83d6d16ba1c852a981588c90e027"},
+ {file = "xxhash-2.0.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:df71aeedee74eaf670d1243b6722c8c77626f3b6e6cf2cd79f2e336b151749cd"},
+ {file = "xxhash-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a922315c8e20dae0d35e54b49fd7ee348fe0a5e2fd8ec02f6a74140e063fcdb3"},
+ {file = "xxhash-2.0.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:22ddd484cd92d138feeec556387894b8ec529bab7f2feb3a177eb84baadee8c1"},
+ {file = "xxhash-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:b4964e7ddca1ef9d7addef40a9f5eaa97aeda367c1d895e392533c0d2f9c3b8e"},
+ {file = "xxhash-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:6077fdb44f68920c4ac8e2f34b2a107c9a218f00a698253c824a0c6c1b9622a3"},
+ {file = "xxhash-2.0.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:04ae5706ddfe0fd2b46cd0b6487d3edae7e724e27d732b055ffd0f9539c4afc5"},
+ {file = "xxhash-2.0.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c4a892bc47b6ea92bbb82499a81882548ce990d62c1862b3834f1f70e8cf4423"},
+ {file = "xxhash-2.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:57d43ce9594676b503c0a0a383481cb4e5cf736f88970bd41849fe15a68a5d48"},
+ {file = "xxhash-2.0.2-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:c2e44d162c3361392dbde736ee8ba3d1a414f63e32be6c71186f2b0654559d26"},
+ {file = "xxhash-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:0beb79835ca47af257f8126fccd9d5e0ba56ba7d39dab6f6b5a7acea4d8ac4b5"},
+ {file = "xxhash-2.0.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f2bef10c417c4667310cc240d49e521e6b5fc90c4ff77a1ec78649869685e8d3"},
+ {file = "xxhash-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:9b6bb1bd34a6365c790c328a604ec5a628059fef6e4486380caa89bc12787a6e"},
+ {file = "xxhash-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:4243dbeb1ce09d359289844f0c54676343857fdc6a092184aea159fecdf6d9f3"},
+ {file = "xxhash-2.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:71b38300e1803ab32ee787f89cdbc032b46ac5834eca9109d8fb576ae1a31741"},
+ {file = "xxhash-2.0.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:a8a68d117178f15c96cb9ae2613f53db94e0fdb34ffc69c7ab600c899c7a966c"},
+ {file = "xxhash-2.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dd9c72520f790ce6eaa535cdad1a53ded22deab43766cfa7cef42834a9a65561"},
+ {file = "xxhash-2.0.2-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:f95adf6091fa13ce19fab21fadb8d07210822320568d24a6405d6b557afc0411"},
+ {file = "xxhash-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:00aaf882036d2a0fa7652cf9aeaaf2ad077b784c09ef8d60f5d97ebf0d47ffa1"},
+ {file = "xxhash-2.0.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:bb8c0efad20da40da1aa56f36b929b965d1adede8a1d5b37b702d378a683e0dd"},
+ {file = "xxhash-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:6fc0b8c21a181b771e1f0c25eb8a0a241af0126f1fc19f4c3cde7233de91326f"},
+ {file = "xxhash-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b232b47a3aa825e0df14b1bd3e051dd327c8539e382728ddb81997d26de5256a"},
+ {file = "xxhash-2.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dc328d3d635ec851d6befdf6ced2134d587d3be973dbbbc489da24c0c88ecb01"},
+ {file = "xxhash-2.0.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:9e6e5e095417060bed45119c510d5bc846b62e2a8218cb3e5a19b3ccf12e4c18"},
+ {file = "xxhash-2.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b4b7d4d19c125738c5fc48356505dfbd63b3cdf826dd868a1b80a73de48729b7"},
+ {file = "xxhash-2.0.2-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:686fcf2aff041df65470eccc7dcea5e7e77cfad99efcaba0c6f58bbd81846e10"},
+ {file = "xxhash-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:cb3a196fd1d55ce86b1123cbf3ef6603f80f4d0b46541412bb5056b0563ef384"},
+ {file = "xxhash-2.0.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:68d067427f2c6f7b3014e28bf4794b0876ab5f6366b53e1d6f59d275b4f19a8d"},
+ {file = "xxhash-2.0.2-cp38-cp38-win32.whl", hash = "sha256:73649555656dd17e809b9b3c54855f4f72144024b0e6395cd37b5395fa0f48c3"},
+ {file = "xxhash-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:dafd1066c99d448a7a1226f10766b61ff752aaad8a4392e4cae30aafefa6fff5"},
+ {file = "xxhash-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb1e9e347c9810a272154814cf5ce33a6c3ac7d0d7cbcb066e92dd5f9fa4db8f"},
+ {file = "xxhash-2.0.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:ebff22f1783f641c6c2b313bfc44d6cc620c17409ec512e67c7c6de809155880"},
+ {file = "xxhash-2.0.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:b7640e043ac6e0f503eadb108e6971d69b0c95c23fbcac3e5632578f9f906050"},
+ {file = "xxhash-2.0.2-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:db2352d375e6594620c462c029d3c1a1b18ff7168e470657e354f1b8b332d9dd"},
+ {file = "xxhash-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f49dbd3b8e4cc13f2df92fb3db39204e3258105a212e23784cbb340e415ae8ed"},
+ {file = "xxhash-2.0.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e70059c5cc8f0cecd16d8cb0263de8f317239cabee3fa4af35c0a1ddaed2110e"},
+ {file = "xxhash-2.0.2-cp39-cp39-win32.whl", hash = "sha256:a0199a07a264be96ed658ba3b4e9ee58a3c678e51a18e134e2518cf1a8171e18"},
+ {file = "xxhash-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:173d3f662dc88de734bd622e46a3bbac6fd00e957b3e098fa8b75b141aa4354e"},
+ {file = "xxhash-2.0.2-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:e94fdff9b102ca7c0969230d209f7ce17020db17a89d026ac45d8ffb9e4929ec"},
+ {file = "xxhash-2.0.2-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:d7175cd7f490aae742d18eb9b519e74180958f88fa8ff47091727b3efb57bfbf"},
+ {file = "xxhash-2.0.2-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:d707d2a053a5d55ccd2e59d7a228636cafeebb44c9ac3ca1c088f4d384c8c3a9"},
+ {file = "xxhash-2.0.2-pp27-pypy_73-win32.whl", hash = "sha256:dad190caa293abbb39d96b4a09f121fc971d81eb19c96e4e0db89a99a7d59b93"},
+ {file = "xxhash-2.0.2-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5dc3da5fa855dd8e35f24d20fabfcd29c0b3ac85a14dc2c329c029971ae4eeb7"},
+ {file = "xxhash-2.0.2-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:17a3b0a2ff20879ed5c9d9c178349e9c6257db11b193e4103282d7a78ef9cb08"},
+ {file = "xxhash-2.0.2-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:c75f8375c80c3815f49a744ef1a8303577757eb9a2dc53bed33d9318b760fec6"},
+ {file = "xxhash-2.0.2-pp36-pypy36_pp73-win32.whl", hash = "sha256:eb2670ed6c435189aeb479bfff990e00b849ae0ff49945632db74b2a2a08d192"},
+ {file = "xxhash-2.0.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ff518ec1bd7cc33218f8f3325848c56e9c73c5df30138a64a89dd65ab1e1ffb5"},
+ {file = "xxhash-2.0.2-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:c4a0806ffb33c9d892b5565fa010c252c7e0f4d01ded901a637dfede624e4d0c"},
+ {file = "xxhash-2.0.2-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:fdfac2014301da79cebcd8f9535c875f63242fe404d741cec5f70f400cc6a561"},
+ {file = "xxhash-2.0.2-pp37-pypy37_pp73-win32.whl", hash = "sha256:357f6a52bd18a80635cf4c83f648c42fa0609713b4183929ed019f7627af4b68"},
+ {file = "xxhash-2.0.2.tar.gz", hash = "sha256:b7bead8cf6210eadf9cecf356e17af794f57c0939a3d420a00d87ea652f87b49"},
+]
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..591f73a2
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,18 @@
+[tool.poetry]
+name = "datasets-preview-backend"
+version = "0.1.0"
+description = "API to extract rows of 🤗 datasets"
+authors = ["Sylvain Lesage <[email protected]>"]
+
+[tool.poetry.dependencies]
+python = "^3.8"
+datasets = "^1.10.2"
+starlette = "^0.16.0"
+uvicorn = "^0.14.0"
+
+[tool.poetry.dev-dependencies]
+black = "^21.7b0"
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.