|
from bs4 import BeautifulSoup |
|
import requests |
|
from typing import Tuple, List |
|
from tqdm import tqdm |
|
import itertools |
|
from multiprocessing import Pool |
|
|
|
monthLinkClass = "sc-zpw6hx-0" |
|
articleLinkClass = "sc-1w8kdgf-0" |
|
newsHeadClass = "sc-1efpnfq-0" |
|
newsBodyClass = "sc-77igqf-0" |
|
videoClass = "lhhce6-0" |
|
|
|
url = "https://www.theonion.com" |
|
|
|
|
|
|
|
def get_links(_url: str, _class: str) -> List[str]: |
|
""" |
|
This function takes in a URL string and a class string and returns a list |
|
of all links from a page with that URL and class. |
|
|
|
Args: |
|
_url (str): A string representing the URL of the page to scrape. |
|
_class (str): A string representing the class of the div containing the links. |
|
|
|
Returns: |
|
List[str]: A list of strings representing the URLs of the links found. |
|
""" |
|
|
|
page = requests.get(_url) |
|
|
|
|
|
soup = BeautifulSoup(page.text, 'html.parser') |
|
|
|
|
|
try: |
|
link_div = soup.find_all('div', attrs={'class': lambda e: e.startswith(_class) if e else False})[0] |
|
except IndexError: |
|
raise IndexError(f"Error: {_url}, with {_class}") |
|
|
|
|
|
links = link_div.findAll('a') |
|
links = [link.get('href') for link in links] |
|
|
|
|
|
return links |
|
|
|
|
|
def extractText(_url: str, _done: bool = True) -> Tuple[str, str]: |
|
""" |
|
This function takes in a URL string and a boolean flag (default=True) and returns a tuple |
|
of two strings: the first is the text of the page's H1 heading with class 'newsHeadClass', |
|
and the second is the text of the page's first paragraph with class 'newsBodyClass'. |
|
|
|
If the function encounters an SSLError or ConnectionError, it will attempt to retry the |
|
request with the _done flag set to False (to prevent infinite recursion), and if this also |
|
fails, it will return an empty tuple of strings. |
|
|
|
If the page does not have an H1 heading or first paragraph with the expected class, the |
|
function will return an empty tuple of strings. |
|
|
|
Args: |
|
_url (str): A string representing the URL of the page to scrape. |
|
_done (bool): A boolean flag indicating whether the function has already attempted |
|
to retry the request (default=True). |
|
|
|
Returns: |
|
Tuple[str, str]: A tuple of two strings representing the text of the H1 heading and |
|
first paragraph with the expected classes (or empty strings if not found). |
|
""" |
|
try: |
|
|
|
page = requests.get(_url) |
|
except (requests.exceptions.SSLError, requests.exceptions.ConnectionError): |
|
|
|
|
|
if _done: |
|
return extractText(_url, _done=False) |
|
else: |
|
|
|
return "", "" |
|
|
|
|
|
soup = BeautifulSoup(page.text, 'html.parser') |
|
|
|
try: |
|
|
|
head = soup.find_all('h1', attrs={'class': lambda _e: _e.startswith(newsHeadClass) if _e else False})[0].text |
|
|
|
|
|
body = soup.find_all('p', attrs={'class': lambda _e: _e.startswith(newsBodyClass) if _e else False})[0].text |
|
|
|
|
|
if head == body: |
|
return "", "" |
|
else: |
|
|
|
return head, body |
|
|
|
except IndexError as e: |
|
|
|
a = soup.find_all('div', attrs={'class': lambda _e: _e.startswith(videoClass) if _e else False}) |
|
if not a: |
|
|
|
return "", "" |
|
|
|
print(f"Error: {_url}") |
|
raise e |
|
|
|
|
|
def batched_extractText(_urls: List[str], _p: Pool) -> List[str]: |
|
""" |
|
This function takes in a list of URL strings and a multiprocessing Pool object, and returns |
|
a list of strings representing the text of the H1 heading and first paragraph for each page. |
|
|
|
The function uses the multiprocessing Pool object to parallelize the extraction of text from |
|
the pages, and returns the results as a list of strings. |
|
|
|
Args: |
|
_urls (List[str]): A list of strings representing the URLs of the pages to scrape. |
|
_p (Pool): A multiprocessing Pool object used to parallelize the extraction of text. |
|
|
|
Returns: |
|
List[str]: A list of strings representing the text of the H1 heading and first paragraph |
|
for each page. |
|
""" |
|
|
|
|
|
results = _p.map_async(extractText, _urls).get() |
|
|
|
|
|
return results |
|
|
|
|
|
def main() -> None: |
|
""" |
|
Scrape news article titles and bodies from The Onion website and save them to a file. |
|
|
|
Returns: |
|
None |
|
|
|
""" |
|
|
|
monthLinks = get_links(url + "/sitemap", monthLinkClass) |
|
print(f"{len(monthLinks)} months have been found.") |
|
print(f"Oldest is {monthLinks[-1].replace('/sitemap/', '')}") |
|
print(f"and newest is {monthLinks[0].replace('/sitemap/', '')}") |
|
|
|
|
|
monthLinks = [url + link for link in monthLinks] |
|
|
|
|
|
|
|
articleLinks = [get_links(monthLink, articleLinkClass) for monthLink in tqdm(monthLinks, desc="Months")] |
|
articleLinks = list(itertools.chain(*articleLinks)) |
|
print(f"{len(articleLinks)} articles have been found.") |
|
|
|
|
|
|
|
text = [] |
|
batch_size = 60 |
|
batch_counter = tqdm(range(0, len(articleLinks), batch_size), total=len(articleLinks), desc="Articles") |
|
with Pool(batch_size) as p: |
|
for x in batch_counter: |
|
text += batched_extractText(articleLinks[x:x + batch_size], p) |
|
batch_counter.update(batch_size) |
|
|
|
|
|
text = [x for x in text if x != ("", "")] |
|
|
|
|
|
with open("onion/NewsWebScrape.txt", mode="w", encoding="utf-8") as f: |
|
for article in text: |
|
if article: |
|
f.write(f"{article[0]} #~# {article[1]}\n") |
|
|
|
|
|
print(f"{len(articleLinks)} articles where found, and {len(text)} articles where written to file.") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|