-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathScrape_Website.py
More file actions
35 lines (30 loc) · 1.47 KB
/
Scrape_Website.py
File metadata and controls
35 lines (30 loc) · 1.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import requests
from bs4 import BeautifulSoup
url = 'https://example.com' # Replace with the URL of the website you want to scrape
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# Use the BeautifulSoup functions to extract the data you need from the HTML
headings = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
for heading in headings:
heading_text = heading.get_text()
link = heading.find('a') # Find the first <a> tag within the heading, if it exists
if link:
heading_link = link['href']
print(f"=== {heading_text} ===")
print(f"Link: {heading_link}")
else:
print(f"=== {heading_text} ===")
next_element = heading.find_next() # Get the next element after the heading
while next_element.name.startswith(('h', 'p', 'ul', 'ol', 'img')): # Select specific tags
# Print the text of the element based on its tag
if next_element.name.startswith('h'):
print(f"=== {next_element.get_text()} ===")
elif next_element.name == 'p':
print(next_element.get_text())
elif next_element.name in ['ul', 'ol']:
for li in next_element.find_all('li'):
print(f"- {li.get_text()}")
elif next_element.name == 'img':
print(f"Image: {next_element['src']}")
next_element = next_element.find_next() # Move to the next element
print('\n') # Add a newline for better readability