-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathweb_scraper.py
40 lines (31 loc) · 1.36 KB
/
web_scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
# class WebScraper:
# def scrape(self, url):
# response = requests.get(url)
# soup = BeautifulSoup(response.content, 'html.parser')
# return ' '.join(map(lambda p: p.text, soup.find_all('p')))
class WebScraper:
def scrape(self, url):
# Setup Chrome options
chrome_options = Options()
chrome_options.add_argument("--headless") # Ensure GUI is off
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
# Set path to chromedriver as per your configuration
webdriver_service = Service(ChromeDriverManager().install())
# Choose Chrome Browser
driver = webdriver.Chrome(service=webdriver_service, options=chrome_options)
driver.get(url)
# Wait for the dynamic content to load
driver.implicitly_wait(10)
paragraphs = driver.find_elements(By.TAG_NAME, 'p')
text = ' '.join([p.text for p in paragraphs])
driver.quit()
return text