mrcuddle commited on
Commit
7984f0c
1 Parent(s): 80501a2

Migrated from GitHub

Browse files
data/P0_get_categories.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pathlib
3
+ import bs4
4
+
5
+ dest = pathlib.Path("data")
6
+ dest.mkdir(exist_ok=True)
7
+
8
+ f_save = dest / "categories.txt"
9
+
10
+ url = "https://www.literotica.com/stories/"
11
+ r = requests.get(url)
12
+
13
+ if not r.ok:
14
+ msg = f"Failed to download {url}"
15
+ raise ValueError(msg)
16
+
17
+ html = r.content
18
+ soup = bs4.BeautifulSoup(html, "lxml")
19
+
20
+ with open(f_save, "w") as FOUT:
21
+ for a in soup.find_all("a", href=True):
22
+ link = a["href"]
23
+
24
+ if "www.literotica.com/c/" not in link:
25
+ continue
26
+
27
+ print(link)
28
+
29
+ FOUT.write(link + "\n")
data/P1_get_page_counts.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import bs4
3
+ import pandas as pd
4
+ from dspipe import Pipe
5
+ import time
6
+
7
+
8
+ categories = open("data/categories.txt").read().strip().split("\n")
9
+ sess = requests.session()
10
+
11
+ data = []
12
+
13
+
14
+ def compute(url):
15
+ print(url)
16
+
17
+ # Append this so we start the first page
18
+ first_page_url = url + "/1-page"
19
+
20
+ r = sess.get(first_page_url)
21
+
22
+ if not r.ok:
23
+ msg = f"Failed to download {url}"
24
+ raise ValueError(msg)
25
+
26
+ soup = bs4.BeautifulSoup(r.content, "lxml")
27
+ button = soup.find("select")
28
+
29
+ # If this fails assume we only have 3 pages
30
+ try:
31
+ options = [int(x["value"]) for x in button.find_all("option", value=True)]
32
+ except AttributeError:
33
+ options = [1, 2, 3]
34
+
35
+ data.append(
36
+ {"url": url, "last_page": max(options),}
37
+ )
38
+
39
+ print(data[-1])
40
+
41
+ time.sleep(1)
42
+
43
+
44
+ Pipe(categories, limit=None, shuffle=True)(compute, 1)
45
+
46
+ df = pd.DataFrame(data).set_index("url")
47
+ df.to_csv("data/categories_with_pages.csv")
data/P2_get_story_links.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import bs4
3
+ import pandas as pd
4
+ from dspipe import Pipe
5
+ import time
6
+
7
+ sess = requests.session()
8
+
9
+
10
+ def page_iterator():
11
+ df = pd.read_csv("data/categories_with_pages.csv")
12
+
13
+ for url, n_pages in zip(df.url, df.last_page):
14
+ for n in range(n_pages):
15
+ yield (url, n)
16
+
17
+
18
+ def compute(item, f1):
19
+ base_url, n = item
20
+ category = base_url.split("/")[-1]
21
+ url = f"{base_url}/{n}-page"
22
+
23
+ f1 = f1.parent / f"{category}_{n:04d}.html"
24
+ if f1.exists():
25
+ return False
26
+
27
+ r = sess.get(url)
28
+
29
+ if not r.ok:
30
+ msg = f"Failed to download {url}"
31
+ raise ValueError(msg)
32
+
33
+ with open(f1, "wb") as FOUT:
34
+ FOUT.write(r.content)
35
+
36
+ time.sleep(2)
37
+
38
+
39
+ Pipe(page_iterator(), "data/story_listings", limit=None, shuffle=True, autoname=False)(
40
+ compute, 1
41
+ )
data/P3_download_stories.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import bs4
3
+ import pandas as pd
4
+ from dspipe import Pipe
5
+ import time
6
+ from pathlib import Path
7
+ from itertools import chain
8
+
9
+
10
+ """
11
+ Downloads only the first page of each story, later stages of the pipeline
12
+ check and download additionally pages if needed.
13
+ """
14
+
15
+
16
+ n_downloads = 2
17
+ fail_time = 120
18
+
19
+ sess = requests.session()
20
+
21
+
22
+ def proxy(url):
23
+ # Comment the line below to use the proxy
24
+ return sess.get(url)
25
+
26
+ port = 8000
27
+ host = "" # Fill this in with a proper host
28
+ proxy_url = f"{host}:{port}"
29
+ return requests.get(proxy_url, params={"url": url})
30
+
31
+
32
+ def collect(f_html):
33
+
34
+ with open(f_html) as FIN:
35
+ soup = bs4.BeautifulSoup(FIN.read(), "lxml")
36
+
37
+ links = []
38
+ for a in soup.find_all("a", href=True):
39
+ link = a["href"]
40
+ # 'www.literotica.com/i/' are image stories
41
+
42
+ if "www.literotica.com/s/" not in link:
43
+ continue
44
+
45
+ link = link.split("/s/")[-1]
46
+
47
+ links.append(link)
48
+
49
+ # if not len(links):
50
+ # print(f"Note: {f_html} has no text links")
51
+
52
+ return links
53
+
54
+
55
+ def download(f0, f1):
56
+ print(f0, f1)
57
+
58
+ url = "https://www.literotica.com/s/" + f0
59
+
60
+ try:
61
+ # r = sess.get(url)
62
+ r = proxy(url)
63
+
64
+ except:
65
+ print(f"Failed to download {url}")
66
+ time.sleep(fail_time)
67
+ return False
68
+
69
+ if not r.ok:
70
+ print(f"Failed to download {url}")
71
+ time.sleep(fail_time)
72
+ return False
73
+
74
+ html = r.content
75
+ soup = bs4.BeautifulSoup(html, "lxml")
76
+
77
+ # Remove the scripting and styling
78
+ for name in ["script", "style"]:
79
+ for block in soup.find_all(name):
80
+ block.decompose()
81
+
82
+ with open(f1, "w") as FOUT:
83
+ FOUT.write(str(soup))
84
+
85
+ time.sleep(1)
86
+
87
+
88
+ P = Pipe("data/story_listings/", input_suffix=".html", limit=None)
89
+ links = P(collect, -1)
90
+ links = list(chain(*links))
91
+
92
+
93
+ Pipe(links, "data/stories", output_suffix=".html", shuffle=True)(download, n_downloads)
data/P4_download_extra_pages.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import bs4
3
+ import json
4
+ import pandas as pd
5
+ from dspipe import Pipe
6
+ import time
7
+ from pathlib import Path
8
+ from itertools import chain
9
+
10
+ """
11
+ Downloads additional pages if needed for each story. Collects the text
12
+ for each story into a single file along with the meta information from
13
+ the first page.
14
+ """
15
+
16
+ n_downloads = 4
17
+ fail_time = 120
18
+ sess = requests.session()
19
+
20
+
21
+ def proxy(url):
22
+ # Comment the line below to use the proxy
23
+ return sess.get(url)
24
+
25
+ port = 8000
26
+ host = "" # Fill this in with a proper host
27
+ proxy_url = f"{host}:{port}"
28
+ return requests.get(proxy_url, params={"url": url})
29
+
30
+
31
+ def download(f0, f1):
32
+
33
+ with open(f0) as FIN:
34
+ raw = FIN.read()
35
+
36
+ soup = bs4.BeautifulSoup(raw, "lxml")
37
+
38
+ page_text = soup.find("div", class_="b-pager-pages").span.text
39
+
40
+ title = soup.find("div", class_="b-story-header").h1.get_text()
41
+ author = soup.find("div", class_="b-story-header").a.get_text()
42
+
43
+ stats = soup.find("span", class_="b-story-stats").text.split()
44
+ cat = soup.find("div", class_="b-breadcrumbs").children
45
+
46
+ meta = {
47
+ "title": title,
48
+ "author": author,
49
+ "category": list(cat)[1].text,
50
+ "n_comments": int(stats[0]),
51
+ "n_views": int(stats[2]),
52
+ "n_favorites": int(stats[4]),
53
+ "n_pages": int(page_text.split()[0]),
54
+ }
55
+
56
+ next_btn = soup.find("a", text="Next")
57
+ story = soup.find("div", class_="b-story-body-x").get_text()
58
+
59
+ print(meta)
60
+
61
+ while next_btn is not None:
62
+ link = next_btn["href"]
63
+
64
+ r = proxy(link)
65
+
66
+ if not r.ok:
67
+ print(f"Failed to download {url}")
68
+ time.sleep(fail_time)
69
+ return False
70
+
71
+ soup = bs4.BeautifulSoup(r.content, "lxml")
72
+
73
+ page_text = soup.find("div", class_="b-story-body-x")
74
+ next_btn = soup.find("a", text="Next")
75
+
76
+ # print(page_text)
77
+
78
+ if page_text:
79
+ story += page_text.get_text()
80
+
81
+ item = {
82
+ "text": story,
83
+ "meta": meta,
84
+ }
85
+
86
+ js = json.dumps(item, indent=2)
87
+
88
+ with open(f1, "w") as FOUT:
89
+ FOUT.write(js)
90
+
91
+ time.sleep(1)
92
+
93
+
94
+ def safe(f0, f1):
95
+ try:
96
+ download(f0, f1)
97
+ except Exception as EX:
98
+ print(f"FAILED {f0}, {EX}")
99
+
100
+
101
+ Pipe(
102
+ "data/stories/",
103
+ "data/full_text",
104
+ input_suffix=".html",
105
+ output_suffix=".json",
106
+ shuffle=True,
107
+ limit=None,
108
+ )(safe, n_downloads)
data/P5_collate_stories.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jsonlines
2
+ import json
3
+ from dspipe import Pipe
4
+
5
+ f_save = "data/Literotica.jsonl"
6
+
7
+
8
+ def compute(f0):
9
+ with open(f0) as FIN:
10
+ js = json.load(FIN)
11
+
12
+ FOUT.write(js)
13
+
14
+
15
+ with jsonlines.open(f_save, "w") as FOUT:
16
+ Pipe("data/full_text/")(compute, 1)