Saibo-backup commited on
Commit
37cea5d
·
1 Parent(s): 2c1e140

refactoring: finished refactoring Grammar Module

Browse files
CP/Tasks/ptb-test-processed.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ac00d323bcb85ae9aac3e76ffa33041fdee55880d6264a2f48fd039e77b0771
3
- size 810628
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e96a474d57ce19c53be1e0cb8f04107e6dae3f51a8261dea2280b69ad19c209
3
+ size 2329227
CP/Tasks/ptb/jsonl/merge2jsonl.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+
4
+ def merge_texts_targets(texts, targets, output_file):
5
+ with open(output_file, 'w') as output:
6
+ for i, (text, target) in enumerate(zip(texts, targets)):
7
+ data = {
8
+ 'id': i,
9
+ 'text': text.strip(),
10
+ 'target': target.strip()
11
+ }
12
+ json.dump(data, output)
13
+ output.write('\n')
14
+
15
+ # Create the argument parser
16
+ parser = argparse.ArgumentParser(description='Merge text and target inputs into a JSONL file.')
17
+ parser.add_argument('texts', help='Input text file')
18
+ parser.add_argument('targets', help='Input target file')
19
+ args = parser.parse_args()
20
+
21
+ # Generate output file name based on input file names
22
+ output_file = args.texts.split('.')[0] + '--' + args.targets.split('.')[0] + '.jsonl'
23
+
24
+ # Read input files
25
+ with open(args.texts, 'r') as texts_file, open(args.targets, 'r') as targets_file:
26
+ texts = texts_file.readlines()
27
+ targets = targets_file.readlines()
28
+
29
+ # Merge the texts and targets and create the output file
30
+ merge_texts_targets(texts, targets, output_file)
31
+
32
+ print(f"Merged file '{output_file}' has been created successfully.")
33
+
CP/Tasks/{split_sentence2tokens_for_re_grammar.py → ptb/jsonl/ptb-test-masked-spaced.txt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4f5a2c1204b57eba72af09098ee7563ddf0e83bb204d9badf2477cc0244a195
3
- size 895
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906e1c75bcc9f4f9b63b0c7feb00cf85ff7ee14f7d38d2d575b92ddaea4a5211
3
+ size 871277
CP/Tasks/ptb/jsonl/ptb-test-only-text--ptb-test-masked-spaced.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bd29e200b0a1fad7fcbb378e12a0da9764487b186bd5c325cee363ca83c5f34
3
+ size 1248683
CP/Tasks/ptb/jsonl/ptb-test-only-text--ptb-test-spaced.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d908d0ccad7fa72a070c3d97c85784ab1182027261657621157e69b9fe587cd4
3
+ size 1392078
CP/Tasks/ptb/jsonl/ptb-test-only-text.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76ec329a389fa8f00a97525b1f25fe40f3738ae5f7a3e1923be7e0555df37228
3
+ size 289065
CP/Tasks/ptb/jsonl/ptb-test-spaced.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868ab5268557536a1ef914a28a370a2629d739072566f2c7e3612493ace3dd6b
3
+ size 1014613
CP/Tasks/ptb/parsed/extract_all_tags.py CHANGED
@@ -1,3 +1,48 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:974ca12acc55a29ccc3c2b258662f5fe20bba9eb223fd0b6844d4f2d239a9de3
3
- size 1554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import re
4
+ from pathlib import Path
5
+ from collections import Counter
6
+
7
+ WORD_LEVEL_TAGS = [
8
+ "CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT", "POS",
9
+ "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$",
10
+ "WRB"
11
+ ]
12
+
13
+
14
+ def extract_tags(line):
15
+ pattern = re.compile(r'\(([A-Z0-9-$]*) ')
16
+ tags = pattern.findall(line)
17
+ return tags
18
+
19
+
20
+ def main(input_file):
21
+ # Extract all tags and collect their frequencies
22
+ tag_counter = Counter()
23
+ with open(input_file, 'r') as file:
24
+ for line in file:
25
+ tags = extract_tags(line)
26
+ tag_counter.update(tag for tag in tags if tag not in WORD_LEVEL_TAGS)
27
+
28
+ # Sort tags by frequency
29
+ sorted_tags = [tag for tag, _ in sorted(tag_counter.items(), key=lambda x: x[1], reverse=True)]
30
+
31
+ # Prepare output file name based on input file name
32
+ input_file_path = Path(input_file)
33
+ output_file = f"{input_file_path.stem}_tags.jsonl"
34
+
35
+ # Save sorted tags as a JSONL file
36
+ with open(output_file, 'w') as file:
37
+ for tag in sorted_tags:
38
+ file.write(json.dumps(tag) + '\n')
39
+
40
+ print(f"Tags extracted, sorted by frequency and saved to {output_file}")
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser(description='Extract and save tags from Penn Treebank dataset')
45
+ parser.add_argument('-i', '--input', required=True, help='Input file path')
46
+ args = parser.parse_args()
47
+
48
+ main(args.input)
CP/Tasks/ptb/parsed/extract_all_tags_w_freq.py CHANGED
@@ -1,3 +1,44 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea2b318d857514f1afb3b5156c12d1bd09187bc29b7710fe3298b5eb2258964e
3
- size 1613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import re
4
+ from pathlib import Path
5
+ from collections import Counter
6
+
7
+ WORD_LEVEL_TAGS = [
8
+ "CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT", "POS", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB"
9
+ ]
10
+
11
+ def extract_tags(line):
12
+ pattern = re.compile(r'\(([A-Z0-9-$]*) ')
13
+ tags = pattern.findall(line)
14
+ return tags
15
+
16
+ def main(input_file):
17
+ # Extract all tags and collect their frequencies
18
+ tag_counter = Counter()
19
+ with open(input_file, 'r') as file:
20
+ for line in file:
21
+ tags = extract_tags(line)
22
+ tag_counter.update(tag for tag in tags if tag not in WORD_LEVEL_TAGS)
23
+
24
+ # Sort tags by frequency
25
+ sorted_tags = sorted(tag_counter.items(), key=lambda x: x[1], reverse=True)
26
+
27
+ # Prepare output file name based on input file name
28
+ input_file_path = Path(input_file)
29
+ output_file = f"{input_file_path.stem}_tags_w_freq.jsonl"
30
+
31
+ # Save sorted tags and their frequencies as a JSONL file
32
+ with open(output_file, 'w') as file:
33
+ for tag, freq in sorted_tags:
34
+ file.write(json.dumps({"tag": tag, "frequency": freq}) + '\n')
35
+
36
+ print(f"Tags extracted, sorted by frequency and saved to {output_file}")
37
+
38
+ if __name__ == "__main__":
39
+ parser = argparse.ArgumentParser(description='Extract tags from Penn Treebank dataset and sort them by frequency')
40
+ parser.add_argument('-i', '--input', required=True, help='Input file path')
41
+ args = parser.parse_args()
42
+
43
+ main(args.input)
44
+
CP/Tasks/ptb/parsed/extract_word.py CHANGED
@@ -1,3 +1,27 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dabcd921d209025273e1d0125f96944b2d413c300983416da75193870a30e7e
3
- size 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import argparse
3
+
4
+ def extract_words(line):
5
+ word_list = re.findall(r'\(\w+\s+([^()]+)', line)
6
+ return word_list
7
+
8
+ def extract_and_save_words(input_file, output_file):
9
+ with open(input_file, 'r') as input_f:
10
+ with open(output_file, 'w') as output_f:
11
+ for line in input_f:
12
+ words = extract_words(line)
13
+ output_f.write(' '.join(words) + '\n')
14
+
15
+ if __name__ == '__main__':
16
+ parser = argparse.ArgumentParser(description='Extract masked words from a tree bank parse tree file.')
17
+ parser.add_argument('input_file', type=str, help='Path to the input file')
18
+ args = parser.parse_args()
19
+
20
+ # Derive the output file name based on the input file
21
+ output_file = args.input_file.replace('.txt', '-only-text.txt')
22
+
23
+ # Extract words and save them to the output file
24
+ extract_and_save_words(args.input_file, output_file)
25
+
26
+ print('Extraction complete. Output file:', output_file)
27
+
CP/Tasks/ptb/parsed/process_ptb.py CHANGED
@@ -1,3 +1,78 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c1416cc3e4e56de61fb5f6245503314778ee25f500501d8e1af5a19c02d3540
3
- size 2390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import argparse
3
+ import os
4
+
5
+ def mask_words(line):
6
+ modified_line = re.sub(r'(\(\w+\$?\s+)([^()]+)', r'\1XX', line)
7
+ return modified_line
8
+
9
+ def add_space(line):
10
+
11
+ modified_line = re.sub(r'\(', r'( ', line)
12
+ modified_line = re.sub(r'\)', r' )', modified_line)
13
+
14
+ return modified_line
15
+
16
+ def rm_space(line):
17
+
18
+ modified_line = re.sub(r'\( ', r'(', line)
19
+ modified_line = re.sub(r' \)',r')', modified_line)
20
+
21
+ return modified_line
22
+
23
+ def get_file_suffix(spaced, mask_words_enabled, unspaced):
24
+ file_suffix = ''
25
+ if spaced:
26
+ file_suffix += '-spaced'
27
+
28
+ if mask_words_enabled:
29
+ file_suffix += '-masked'
30
+
31
+ if unspaced:
32
+ file_suffix += '-unspaced'
33
+
34
+ return file_suffix
35
+
36
+ def get_default_output_file(input_file, file_suffix):
37
+ file_name, extension = os.path.splitext(input_file)
38
+ return f"{file_name}{file_suffix}{extension}"
39
+
40
+ def main(input_file, output_file, spaced, mask_words_enabled, unspaced):
41
+ with open(input_file, 'r') as f:
42
+ lines = f.readlines()
43
+
44
+ modified_lines = []
45
+ file_suffix = get_file_suffix(spaced=spaced, mask_words_enabled=mask_words_enabled, unspaced=unspaced)
46
+
47
+ for line in lines:
48
+ if spaced:
49
+ line = add_space(line)
50
+ if mask_words_enabled:
51
+ line = mask_words(line)
52
+ if unspaced:
53
+ line = rm_space(line)
54
+ modified_lines.append(line)
55
+
56
+ output_file = output_file or get_default_output_file(input_file, file_suffix)
57
+
58
+ with open(output_file, 'w') as f:
59
+ f.writelines(modified_lines)
60
+
61
+ if __name__ == '__main__':
62
+ parser = argparse.ArgumentParser(description='Process input and output files.')
63
+ parser.add_argument('input_file', help='path to the input file')
64
+ parser.add_argument('--output_file', help='path to the output file')
65
+ parser.add_argument('--spaced', action='store_true', help='whether to add spaces around parentheses')
66
+ parser.add_argument('--mask_words', action='store_true', help='whether to mask words with "XX"')
67
+ parser.add_argument('--unspaced', action='store_true', help='whether to remove spaces around parentheses')
68
+
69
+ args = parser.parse_args()
70
+
71
+ input_file = args.input_file
72
+ output_file = args.output_file
73
+ mask_words_enabled = args.mask_words
74
+ spaced = args.spaced
75
+ unspaced = args.unspaced
76
+
77
+ main(input_file, output_file, spaced, mask_words_enabled, unspaced)
78
+
CP/Tasks/ptb/parsed/{ptb-test-masked.txt → ptb-test-masked_old.txt} RENAMED
File without changes
CP/Tasks/ptb/parsed/ptb-valid-masked-spaced.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2add94eb1865c3e02bf9c941b61365cb9a4ecdc41fe86fad4f908d010923079
3
+ size 611448
CP/Tasks/ptb/parsed/ptb-valid-masked.txt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08518c0bc14830f38e9b4003015fa066806042656ec7d990d0ed3709d89d885b
3
- size 611887
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2d9d892c0750cf2cada058b24c443ee54f93029740f8f153d538b3554dc6ec
3
+ size 480128
CP/Tasks/ptb/raw-text/ptb_txt2jsonl.py CHANGED
@@ -1,3 +1,26 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:405970689258ee9f8e93348faecc161ce23b1160a69c56619cc3e4f614dca755
3
- size 864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ def txt_to_jsonl(txt_file, jsonl_file):
6
+ with open(txt_file, 'r') as file:
7
+ lines = file.readlines()
8
+
9
+ with open(jsonl_file, 'w') as file:
10
+ for i, line in enumerate(lines):
11
+ sentence = line.strip()
12
+ json_obj = {'id': i, 'text': sentence}
13
+ json_line = json.dumps(json_obj)
14
+ file.write(json_line + '\n')
15
+
16
+ print(f"Conversion completed. JSONL file '{jsonl_file}' created.")
17
+
18
+ if __name__ == '__main__':
19
+ parser = argparse.ArgumentParser(description='Convert a text file to JSONL format.')
20
+ parser.add_argument('input_file', type=str, help='Path to the input text file')
21
+ args = parser.parse_args()
22
+
23
+ txt_file_path = args.input_file
24
+ jsonl_file_path = os.path.splitext(txt_file_path)[0] + '.jsonl'
25
+
26
+ txt_to_jsonl(txt_file_path, jsonl_file_path)
ED/KB/YAGO-KB.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72e099c2f89010e7e4de17e6880958c340629f75ea20984eb5bcb24e4483ac6a
3
+ size 10689102
ED/KB/filter.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+
3
+ def remove_non_alphabet_lines(input_file, output_file):
4
+ filtered_lines = []
5
+ with open(input_file, 'r') as f_input:
6
+ for line in f_input:
7
+ line = line.strip()
8
+ if any(char.isalpha() for char in line):
9
+ filtered_lines.append(line)
10
+
11
+ sorted_lines = sorted(filtered_lines)
12
+
13
+ with open(output_file, 'w') as f_output:
14
+ for line in sorted_lines:
15
+ f_output.write(line + '\n')
16
+
17
+ # Example usage
18
+ input_file = 'input.txt'
19
+ output_file = 'output.txt'
20
+ remove_non_alphabet_lines(input_file, output_file)
21
+
ED/KB/input.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a5bcaf5525112493678933af2d24a48928680e660c12e6479ff7c14e503dfc4
3
+ size 122730616
ED/KB/{kilt_wikipedia_entities_test.jsonl → kilt_wikipedia_entities_1K.jsonl} RENAMED
File without changes
ED/KB/kilt_wikipedia_entities_filtered.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b50fad315173c6b7b20c7a8dfe136e4b2e9c9e957ad397b8650f5fb86c26803d
3
+ size 122716437
ED/KB/output.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3183e5d74e9db780fb6bb0575b42993437ed375182a2b78fa2daee93892e5de8
3
+ size 122715895
ED/KB/run.py CHANGED
@@ -1,3 +1,10 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:67eb116aee104b9316ef73f0ff8d36e6c394319d8b60037bedd7432673fb3d68
3
- size 330
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ input_file = 'kilt_wikipedia_entites.txt'
4
+ output_file = 'kilt_wikipedia_entites.jsonl'
5
+
6
+ with open(input_file, 'r') as input_text, open(output_file, 'w') as output_jsonl:
7
+ for line in input_text:
8
+ #entry = {"word": line.strip()}
9
+ json.dump(line.strip(), output_jsonl)
10
+ output_jsonl.write('\n')
ED/KB/sorted.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0cc3cc4f0f31577d9a34ea438c4d1c87484f46e34fc7fb23fdf08715dcd8ed6
3
+ size 137067148
ED/check_target_entity_in_universe.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+
4
+ def count_non_universe_targets(data_path, universe_path):
5
+ # Load universe file entities into a set for fast lookup
6
+ universe_entities = set()
7
+ with open(universe_path, 'r') as universe_file:
8
+ for line in universe_file:
9
+ entity = json.loads(line.strip())
10
+ universe_entities.add(entity)
11
+
12
+ # Count entries in data file with targets not in universe
13
+ non_universe_count = 0
14
+ total_count = 0
15
+ with open(data_path, 'r') as data_file:
16
+ for line in data_file:
17
+ data_entry = json.loads(line.strip())
18
+ total_count += 1
19
+ if data_entry['target'] not in universe_entities:
20
+ non_universe_count += 1
21
+
22
+ print(f'Total entries: {total_count}')
23
+ print(f'Entries with targets not in universe: {non_universe_count}')
24
+
25
+ if __name__ == "__main__":
26
+ parser = argparse.ArgumentParser()
27
+ parser.add_argument('--data', required=True, help='Path to input data jsonl file')
28
+ parser.add_argument('--universe', required=True, help='Path to universe jsonl file')
29
+ args = parser.parse_args()
30
+
31
+ count_non_universe_targets(args.data, args.universe)
32
+
IE/IE_merge2json.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+
4
+ def jsonl_to_list(filename):
5
+ """
6
+ Read a JSONL file and return it as a list.
7
+ """
8
+ with open(filename, 'r') as f:
9
+ return [json.loads(line) for line in f]
10
+
11
+ def combine_and_write_to_single_line_jsonl(entities_file, relations_file, output_file):
12
+ """
13
+ Combine data from two JSONL files and write to a single line in a JSONL file.
14
+ """
15
+ entities = jsonl_to_list(entities_file)
16
+ relations = jsonl_to_list(relations_file)
17
+
18
+ combined_data = {
19
+ "entities": entities,
20
+ "relations": relations
21
+ }
22
+
23
+ with open(output_file, 'w') as f:
24
+ f.write(json.dumps(combined_data))
25
+
26
+ def main():
27
+ parser = argparse.ArgumentParser(description="Combine two JSONL files into one JSONL file with a single line.")
28
+ parser.add_argument("entities_file", help="Path to the entities.jsonl file.")
29
+ parser.add_argument("relations_file", help="Path to the relations.jsonl file.")
30
+ parser.add_argument("output_file", help="Path to the output JSONL file.")
31
+
32
+ args = parser.parse_args()
33
+
34
+ combine_and_write_to_single_line_jsonl(args.entities_file, args.relations_file, args.output_file)
35
+
36
+ if __name__ == "__main__":
37
+ main()
IE/KB/medium/sorted.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f5e0f6b97b9691856425d1da11238e7788651663d11479b4bca661eb2212b84
3
+ size 58369230
IE/KB/small/sorted.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cfa7efb2ae3b2e6c34cf35fc8450c8c7c5a08cfd2905f669be86a3c8dd862f7
3
+ size 4844233
IE/Tasks/rebel_1M.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02e0ad482dc961c17b22d4fcf2c61a53f020c4e34594b25689d97059fee0826e
3
+ size 61115532
IE/Tasks/rebel_6M.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0510a53bfc09816bca77a91a2584d1355ff56e637c60bd94a29cbfc0f2116998
3
+ size 142680871
IE/Tasks/wikinre.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b51b9c3745c40d9bae10294c38dc990fb7d3657acfff372f09d521d539079e
3
+ size 5126380