Datasets:

Modalities:
Text
Formats:
csv
Languages:
German
Libraries:
Datasets
pandas
License:
wikiquote-de-quotes / CrawlingQuotes.py
costrau's picture
Add script for crawling quotes
2510105
import bs4, re, bz2, shutil
from urllib.request import urlretrieve
import pandas as pd
# Download and unpack file
filename = "dewikiquote-latest-pages-articles.xml.bz2"
urlretrieve("https://dumps.wikimedia.org/dewikiquote/latest/" + filename, filename)
with bz2.BZ2File(filename) as fr, open(filename[:-4],"wb") as fw:
shutil.copyfileobj(fr,fw)
# Open file and parse it
with open("dewikiquote-latest-pages-articles.xml") as fp:
soup = bs4.BeautifulSoup(fp, "xml")
pages = soup.mediawiki.findAll('page')
# Return all quotes on a single page
def get_quotes(text: str) -> [str]:
res = []
# usually a quote is in ONE line
for line in text.split("\n"):
# remove leading and trailing whitespaces
stripped = line.strip()
# Usually at the bottom, quotes are not from the current author so stop here
if "zitate mit bezug auf" in stripped.lower():
return res
match = re.search("\*\s*(\"[^\"]+\")", stripped)
if match:
quote = match.group(1)
cleaned = re.sub(r'\[\[[^\[]+\]\]', lambda x: x.group()[2:].split("|")[-1][:-2], quote)
cleaned = re.sub(r'{{[^{}\|]+\|([^{}]+)}}', lambda x: x.group(1), cleaned)
cleaned = re.sub(r'<[^<>]+>', "",cleaned)
cleaned = cleaned.replace("//", "") # removes //
cleaned = re.sub(' +', ' ', cleaned) # remove whitespaces
if "http" not in cleaned and len(cleaned) > 5:
res.append(cleaned)
return res
# Get categorie to which a page belongs to
def get_categories(text: str) -> str:
return re.findall(r"\[\[Kategorie:([^\]|]+)[^]]*\]\]", text)
# def get_movie_quotes(text: str):
# match = re.search(r"== Zitate ==(.*)== Dialoge ==", text, flags=re.DOTALL)
# if match:
# segment = match.group(1)
# match = re.findall(r"=== ([^=]+) === ([^])", segment)
# return []
# Extract quotes and authors
data = []
omitted = set()
for page in pages:
author = page.title.text
raw_text = page.find("text").text
categories = get_categories(raw_text)
if "Person" in categories:
quotes = get_quotes(raw_text)
for quote in quotes:
data.append((author, quote))
# elif "== Filminfo" in raw_text:
# fiction = get_movie_quotes(raw_text)
# break
else:
omitted.add(author)
# Save results to csv
df = pd.DataFrame(data, columns=["author", "quote"]).drop_duplicates()
df.to_csv("train.csv")