|
from xtractor.utils import ( |
|
extractor, |
|
read_the_json, |
|
dumps_the_json, |
|
jsonl_converter, |
|
multi_threading, |
|
) |
|
|
|
|
|
def fetch_links(link="https://www.shinjiru.com.my/blog/"): |
|
soup = extractor(link) |
|
div_tag = soup.find_all("div", class_="row") |
|
|
|
|
|
temp = [] |
|
for divTag in div_tag: |
|
a_tag = divTag.find_all("a", href=True) |
|
for aTag in a_tag: |
|
if ( |
|
"blog" in aTag["href"] |
|
and "page" not in aTag["href"] |
|
and "category" not in aTag["href"] |
|
): |
|
temp.append(aTag["href"]) |
|
|
|
all_links = list(set(temp)) |
|
return all_links |
|
|
|
|
|
def get_all_links(): |
|
temp = [] |
|
for i in range(38): |
|
base_link = f"https://www.shinjiru.com.my/blog/page/{i+1}" |
|
all_links = fetch_links(link=base_link) |
|
temp.append(all_links) |
|
|
|
full_links = [i for x in temp for i in x] |
|
full_links = list(set(full_links)) |
|
return full_links |
|
|
|
|
|
def get_articles(link): |
|
soup = extractor(link) |
|
|
|
try: |
|
div_tag = soup.find("div", class_="col-lg") |
|
|
|
title = div_tag.find("h1").text |
|
p_tag = div_tag.find_all("p") |
|
|
|
all_p_tag = [x.text for x in p_tag] |
|
article = " ".join(all_p_tag) |
|
|
|
return title, article |
|
except: |
|
pass |
|
|
|
|
|
if "__main__" == __name__: |
|
|
|
|
|
|
|
|
|
|
|
links = read_the_json("./shinjiru/all_links_shinjiru.json") |
|
links = links["links"] |
|
|
|
title_ = [] |
|
body_ = [] |
|
for link in links: |
|
try: |
|
title, article = get_articles(link) |
|
title_.append(title) |
|
body_.append(article) |
|
except: |
|
pass |
|
|
|
data = {"title": title_, "body": body_} |
|
|
|
dumps_the_json(data, json_file_name="./shinjiru/shinjiru_article.json") |
|
print("DUMPS!") |
|
jsonl_converter( |
|
json_file_path="./shinjiru/shinjiru_article.json", |
|
json_l_file_path="./shinjiru/shinjiru_article.jsonl", |
|
col_1_name="title", |
|
col_2_name="body", |
|
) |
|
print("CONVERTED!") |
|
|