Datasets:

Modalities:
Text
Formats:
csv
Languages:
German
Libraries:
Datasets
pandas
License:
File size: 2,485 Bytes
2510105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import bs4, re, bz2, shutil
from urllib.request import urlretrieve
import pandas as pd

# Download and unpack file
filename = "dewikiquote-latest-pages-articles.xml.bz2"
urlretrieve("https://dumps.wikimedia.org/dewikiquote/latest/" + filename, filename)
with bz2.BZ2File(filename) as fr, open(filename[:-4],"wb") as fw:
    shutil.copyfileobj(fr,fw)

# Open file and parse it
with open("dewikiquote-latest-pages-articles.xml") as fp:
    soup = bs4.BeautifulSoup(fp, "xml")
pages = soup.mediawiki.findAll('page')


# Return all quotes on a single page
def get_quotes(text: str) -> [str]:
    res = []
    # usually a quote is in ONE line
    for line in text.split("\n"):
        # remove leading and trailing whitespaces
        stripped = line.strip()
        # Usually at the bottom, quotes are not from the current author so stop here
        if "zitate mit bezug auf" in stripped.lower():
            return res
        match = re.search("\*\s*(\"[^\"]+\")", stripped)
        if match:
            quote = match.group(1)
            cleaned = re.sub(r'\[\[[^\[]+\]\]', lambda x: x.group()[2:].split("|")[-1][:-2], quote)
            cleaned = re.sub(r'{{[^{}\|]+\|([^{}]+)}}', lambda x: x.group(1), cleaned)
            cleaned = re.sub(r'<[^<>]+>', "",cleaned)
            cleaned = cleaned.replace("//", "")  # removes //
            cleaned = re.sub(' +', ' ', cleaned) # remove whitespaces
            if "http" not in cleaned and len(cleaned) > 5:
                res.append(cleaned)
    return res

# Get categorie to which a page belongs to
def get_categories(text: str) -> str:
    return re.findall(r"\[\[Kategorie:([^\]|]+)[^]]*\]\]", text)

# def get_movie_quotes(text: str):
#     match = re.search(r"== Zitate ==(.*)== Dialoge ==", text, flags=re.DOTALL)
#     if match:
#         segment = match.group(1)
#         match = re.findall(r"=== ([^=]+) === ([^])", segment)
#     return []

# Extract quotes and authors
data = []
omitted = set()
for page in pages:
    author = page.title.text
    raw_text = page.find("text").text
    categories = get_categories(raw_text)
    if "Person" in categories:
        quotes = get_quotes(raw_text)
        for quote in quotes:
            data.append((author, quote))
#     elif "== Filminfo" in raw_text:
#         fiction = get_movie_quotes(raw_text)

#         break
    else:
        omitted.add(author)

# Save results to csv
df = pd.DataFrame(data, columns=["author", "quote"]).drop_duplicates()
df.to_csv("train.csv")