-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathweb_scraping_search.py
More file actions
25 lines (19 loc) · 1 KB
/
web_scraping_search.py
File metadata and controls
25 lines (19 loc) · 1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# filename: web_scraping_search.py
import requests
from bs4 import BeautifulSoup
def search_web_for_info(search_query, search_url):
response = requests.get(search_url)
response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
# Assuming the website uses UTF-8 encoding; adjust if necessary
soup = BeautifulSoup(response.content, 'html.parser')
# The following line is a placeholder. The actual search will depend on the structure of the website and the HTML tags.
search_results = soup.find_all(text=lambda text: search_query in text)
return search_results
# Replace 'http://example.com/search' with the actual URL of the search page
search_url = 'http://example.com/search'
search_query = 'SEL 810A'
# Perform the search and print the results
search_results = search_web_for_info(search_query, search_url)
print(f"Information related to '{search_query}' found on the web page:")
for result in search_results:
print(result)