Duplicated tokens
#2
by
ferrazzipietro
- opened
Hi,
thanks for the useful resource.
I have noticed some inconsistencies trying to use the dataset. There are repetition of tokens that are not present in the "text" column. For example,
- At document 008290, "estratti" is duplicated in the "tokens" at position 1132 and 1133, while it is not duplicated in the respective "text" column
- At document 041038, "montelukast" duplicated in "tokens" at position 31 and 32; "eritema" at position 1872 and 1874; "multiforme" at position 1873 and 1875; "congestione" 2982 and 2984; "nasale" 2985 and 2987; "insufficienza"; "epatica"
I collected these duplication and cleaned the data accordingly:
def adjust_quotes(example):
example['tokens'] = [t if t not in ['``', "''"] else '"' for t in example['tokens']]
return example
def remove_duplicated(example):
duplications = {
'008290':[1133],
'041038':[32, 1874, 1875, 2984, 2985, 3770, 3771, 3823+7, 3823+8],
'013403':[4520, 4521, 4522],
'041333':[1068, 3835+1],
'041334':[3835],
'042601':[538, 539, 540, 541, 542, 547+4, 2202+5, 2203+5, 2204+5, 2622+8, 2845+9],
'041445':[1162, 1162+1, 1162+2],
'041387':[362, 362+1],
'047147':[1677, 1677+1],
'037940':[209, 209+1, 884+2, 884+3, 4095+4, 4095+5],
'049108':[895, 895+1, 2328+2, 2328+3],
'046269':[1512, 3180, 4093+1, 4219+3, 4241+4],
'045214':[1738, 1738+1, 1759+2, 1759+3],
'025396':[527, 527+1],
'035540':[1214],
'037637':[1893, 1895+1],
'025463':[735, 735+1, 858+2, 858+3, 858+4, 858+5, 864+6, 864+7, 874+8, 874+9, 891+10, 891+11, 891+12, 903+13, 903+14, 906+15, 906+16, 1159+17, 1159+18],
'042962':[275, 275+1, 478+2, 478+3, 488+4, 488+5, 557+6, 557+7, 596+8, 596+9, 800+10, 800+11, 937+12, 937+13, 1118+14, 1118+15, 1200+16, 1250+17, 1250+17,
1254+18, 1254+19, 1316+20, 1345+21, 1345+22, 1345+23, 1379+24, 1379+25, 1447+26, 1447+27, 1447+28, 1503+29, 1503+30, 1737+31, 1748+32, 1748+33,
1815+34, 1927+35, 1927+36, 2039+37, 2039+38, 3137+39, 3137+40, 3145+41, 3145+42, 3153+43, 3153+44, 3153+45, 3206+46, 3220+47, 3220+48, 3231+49,
3360+50, 3360+51, 3360+52, 3534+53, 3534+54, 4419+55, 4419+56, 4502+57, 4502+58, 4554+59, 4554+60, 4617+61, 4617+61, 4681+62, 4681+63, 4883+64,
4883+65, 4885+66, 4885+67, 4885+68, 4885+69, 4912+70, 4912+71, 4912+72
],
'045765':[1079],
'048293':[4686, 8157+1, 8416+2, 8416+3],
'025740':[63, 534+1, 693+2, 693+3],
'039547':[5701, 5701+1],
'038107':[1107, 4545+1, 4545+2],
'036824':[6020],
'025055':[889, 894+1, 894+2, 894+3, 894+4, 894+5, 898+6, 904+7, 904+8, 1125+9, 2474+10, 2474+11, 2478+12, 2482+13]
}
if example['document_id'] in duplications.keys():
pos = duplications[example['document_id']]
for i, p in enumerate(pos):
#print(example['tokens'].pop(p-i))
#print(example['ner_tags'].pop(p-i))
example['tokens'].pop(p-i)
example['ner_tags'].pop(p-i)
return example
def adjust_IOB(example):
prev_tag = 'O'
for i, tag in enumerate(example['ner_tags']):
if prev_tag.startswith('O') and tag.startswith('I-'):
print(f"Adjusting IOB: {prev_tag} -> {tag}")
example['ner_tags'][i] = 'B-' + tag[2:]
print(f"Adjusted IOB: {tag} -> {example['ner_tags'][i]}")
prev_tag = tag
return example
cleaned_data = d.map(adjust_quotes)
cleaned_data = cleaned_data.map(remove_duplicated, load_from_cache_file=False)
cleaned_data = cleaned_data.map(adjust_IOB, load_from_cache_file=False)
Nevertheless, I think a fix in the data distribution is required
A minor notice is that the quotes are reported in the "text" column differently than in the "tokens" one. In the text, they are represented using the char '"', while in the tokens they use either "´´" or "''", which breakes the direct connection "text"-"tokens".