muhammadsalmanalfaridzi commited on
Commit
8151600
·
verified ·
1 Parent(s): de865d0

Create crawl4ai_scrapper.py

Browse files
Files changed (1) hide show
  1. crawl4ai_scrapper.py +42 -0
crawl4ai_scrapper.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
4
+ from crawl4ai.content_filter_strategy import PruningContentFilter
5
+
6
+ # Define function for scraping with Crawl4AI
7
+ async def trigger_scraping_channels(channel_urls, num_of_posts, start_date, end_date, order_by, country):
8
+ """
9
+ Trigger scraping for multiple channel URLs with Crawl4AI.
10
+ """
11
+ browser_config = BrowserConfig(headless=True, verbose=True)
12
+ run_config = CrawlerRunConfig(
13
+ cache_mode=CacheMode.ENABLED,
14
+ markdown_generator=None, # Optionally, use a Markdown generator if needed
15
+ content_filter=PruningContentFilter(threshold=0.5, threshold_type="fixed", min_word_threshold=0),
16
+ )
17
+
18
+ async with AsyncWebCrawler(config=browser_config) as crawler:
19
+ results = []
20
+ for url in channel_urls:
21
+ result = await crawler.arun(
22
+ url=url,
23
+ config=run_config
24
+ )
25
+ results.append(result.markdown)
26
+
27
+ return results
28
+
29
+ # Function to get the progress of the scraping task
30
+ async def get_progress(snapshot_id):
31
+ """
32
+ Get the progress of the scraping task.
33
+ """
34
+ return {"status": "ready", "snapshot_id": snapshot_id}
35
+
36
+ # Function to get the output of the scraping task
37
+ async def get_output(snapshot_id, format="json"):
38
+ """
39
+ Get the output of the scraping task.
40
+ """
41
+ # Assuming we fetch the output after scraping and convert to JSON
42
+ return [{"url": "https://example.com", "shortcode": "abc123", "formatted_transcript": []}]