I'm trying to figure out how to loop through multiple pages to pull indeed's company ratings. Any ideas??? Here is the what I've done to pull one page worth of data.
def parse(base_url):
base_url = requests.get('https://www.indeed.com/cmp/Google/reviews', timeout=5)
page_content = BeautifulSoup(base_url.content, 'lxml')
containers = page_content.findAll('div', {'class':'cmp-review-container'})
df = pd.DataFrame(columns = ['rating', 'rating_title', 'rating_description',
'rating_pros', 'rating_cons'])
for item in containers:
try:
rating = item.find('div', {'class': 'cmp-ratingNumber'}).text.replace('\n', '')
except:
rating = None
try:
rating_title = item.find('div', {'class': 'cmp-review-title'}).text.replace('\n', '')
except:
rating_title = None
try:
rating_description = item.find('span', {'class': 'cmp-review-text'}).text.replace('\n', '')
except:
rating_description = None
try:
rating_pros = item.find('div', {'class': 'cmp-review-pro-text'}).text.replace('\n', '')
except:
rating_pros = None
try:
rating_cons = item.find('div', {'class': 'cmp-review-con-text'}).text.replace('\n', '')
except:
rating_cons = None
df = df.append({'rating': rating, 'rating_title': rating_title, 'rating_description': rating_description,
'rating_pros': rating_pros, 'rating_cons': rating_cons}, ignore_index=True)
return df
Related
I'm trying to loop pages, crawler and save detailed contents from this link:
Based on the code from here, I've modified the code to:
import pandas as pd
import requests
from bs4 import BeautifulSoup
BASE_URL = "http://www.jscq.com.cn/dsf/zc/cjgg"
def get_main_urls() -> list:
start_url = f"{BASE_URL}/index.html"
return [start_url] + [f"{BASE_URL}/index_{i}.html" for i in range(1, 6)]
def get_follow_urls(urls: list, session: requests.Session()) -> iter:
for url in urls[:1]: # remove [:1] to scrape all the pages
body = session.get(url).content
s = BeautifulSoup(body, "lxml").find_all("td", {"width": "60%"})
yield from [f"{BASE_URL}{a.find('a')['href'][1:]}" for a in s]
updated_df = pd.DataFrame()
with requests.Session() as connection_session: # reuse your connection!
for follow_url in get_follow_urls(get_main_urls(), connection_session):
key = follow_url.rsplit("/")[-1].replace(".html", "")
# print(f"Fetching data for {key}...")
dfs = pd.read_html(
connection_session.get(follow_url).content.decode("utf-8"),
flavor="bs4")
# https://stackoverflow.com/questions/39710903/pd-read-html-imports-a-list-rather-than-a-dataframe
for df in dfs:
# df = dfs[0].T
df = dfs[0].T.iloc[1:, :].copy()
updated_df = updated_df.append(df)
print(updated_df)
cols = ['项目编号', '转让/出租标的名称', '转让方/出租方名称', '转让标的评估价/年租金评估价(元)',
'转让底价/年租金底价(元)', '受让方/承租方名称', '成交价/成交年租金(元)', '成交日期']
updated_df.columns = cols
updated_df.to_excel('./data.xlsx', index = False)
But it only successfully crawler the first page, how could I crawler all the pages and also add url column? Thanks.
Is this what you're looking for? This processes all the urls and dumps a list of dataframes to a single excel file.
Here's how:
import pandas as pd
import requests
from bs4 import BeautifulSoup
BASE_URL = "http://www.jscq.com.cn/dsf/zc/cjgg"
COLUMNS = [
'项目编号', '转让/出租标的名称', '转让方/出租方名称',
'转让标的评估价/年租金评估价(元)', '转让底价/年租金底价(元)',
'受让方/承租方名称', '成交价/成交年租金(元)', '成交日期', 'URL'
]
def get_main_urls() -> list:
start_url = f"{BASE_URL}/index.html"
return [start_url] + [f"{BASE_URL}/index_{i}.html" for i in range(1, 6)]
def get_follow_urls(urls: list, session: requests.Session()) -> iter:
for url in urls:
body = session.get(url).content
s = BeautifulSoup(body, "lxml").find_all("td", {"width": "60%"})
yield from [f"{BASE_URL}{a.find('a')['href'][1:]}" for a in s]
def post_process(list_of_dataframes: list, source_url: str) -> pd.DataFrame():
temp_df = list_of_dataframes[0]
temp_df = temp_df.append(
pd.Series(["URL", source_url], index=temp_df.columns),
ignore_index=True,
)
return temp_df.T.iloc[1:, :].copy()
def dump_to_excel(post_processed_dfs: list):
df = pd.concat(post_processed_dfs)
df.columns = COLUMNS
df.to_excel("scraped_data.xlsx", index=False)
processed_dfs = []
with requests.Session() as connection_session: # reuse your connection!
for follow_url in get_follow_urls(get_main_urls(), connection_session):
key = follow_url.rsplit("/")[-1].replace(".html", "")
print(f"Fetching data for {key}...")
df_list = pd.read_html(
connection_session.get(follow_url).content.decode("utf-8"),
flavor="bs4",
)
processed_dfs.append(post_process(df_list, follow_url))
dump_to_excel(processed_dfs)
Output:
To help fight covid19 here in the Philippines, I'm trying to do data analysis. My data source is table of incidences in Wikipedia. See https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Philippines
Tried to get table in python with Beautiful soup but I cannot seem to get the content of the columns [Facility of admission or consultation, Had recent travel history abroad]. See screenshot:
What am I doing wrong?
Here's my code: (can also be found here https://github.com/gio888/covid19_ph2/blob/master/covid_import_from_wikipedia.ipynb)
import pandas as pd
import requests
from bs4 import BeautifulSoup
url = "https://en.wikipedia.org/wiki/Template:2019%E2%80%9320_coronavirus_pandemic_data/Philippines_medical_cases_summary"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table', class_='wikitable')
n_columns = 0
n_rows=0
column_names = []
for row in table.find_all('tr'):
td_tags = row.find_all('td')
if len(td_tags) > 0:
n_rows+=1
if n_columns == 0:
n_columns = len(td_tags)
th_tags = row.find_all('th')
if len(th_tags) > 0 and len(column_names) == 0:
for th in th_tags:
column_names.append(th.get_text())
columns = column_names if len(column_names) > 0 else range(0,n_columns)
df = pd.DataFrame(columns = columns,index= range(0,n_rows))
row_marker = 0
for row in table.find_all('tr'):
column_marker = 0
columns = row.find_all('td')
for column in columns:
df.iat[row_marker,column_marker] = column.get_text()
column_marker += 1
if len(columns) > 0:
row_marker += 1
for col in df:
try:
df[col] = df[col].astype(float)
except ValueError:
pass
df
import pandas as pd
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver = webdriver.Firefox()
driver.get(
"https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Philippines")
items = [["yes", "Yes"], ["no", "No"], [
"TBA", "TBA"], ["status-d", "Died"], ["status-r", "Recovered"], ["status-a", "Admitted"]]
for item in items:
script = (
"document.querySelectorAll('.{}').forEach((element) => element.innerHTML = '{}')".format(*item))
driver.execute_script(script)
df = pd.read_html(driver.page_source)[2]
df.to_csv("data.csv", index=False)
driver.quit()
Output: View Online
import pandas as pd
import requests
from bs4 import BeautifulSoup
#url = "https://en.wikipedia.org/wiki/Template:2019%E2%80%9320_coronavirus_pandemic_data/Philippines_medical_cases_summary"
css_content = {
'status-a': 'Admitted',
'status-r': 'Recovered',
'status-d': 'Died',
'yes':'Yes',
'no': 'No',
'tba':'TBA',
"covid-sticky":'skip_header'
}
def Check_att(source,value,attribute='class'):
# <tag att='value'> <td class='x'>
if col_value : return col_value
if value in source.attrs.get(attribute, []) :
return css_content.get(value,'')
return ''
url = 'https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Philippines'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table', class_='wikitable')
column_names = [col_name.text.rstrip('\n').strip() for col_name in table.select('tr.covid-sticky > th')]
n_rows = len(table.select('tr > td'))
df = pd.DataFrame(columns = column_names,index= range(0,n_rows))
for row_index,row in enumerate(table.find_all('tr')[1:],0):
# if Check_att(row,"covid-sticky") :continue
columns = row.find_all('td')
for col_index , column in enumerate(columns,0):
col_value = ''
col_value = Check_att(column,'status-a')
col_value = Check_att(column,'status-r')
col_value = Check_att(column,'status-d')
col_value = Check_att(column,'yes')
col_value = Check_att(column,'no')
col_value = Check_att(column,'tba')
if not col_value :
col_value = column.get_text().rstrip('\n').strip()
df.iat[row_index,col_index] = col_value
for col in df:
try:
df[col] = df[col].astype(float)
except ValueError:
pass
print(df)
I am scraping names, prices and images from this website. There are 8 items in total, but in the DF I would like to filter only the items that contain the pattern "Original Zaino Antifurto". When I try to apply the bp_filter to the DF I get an error, probably due to hidden characters.
Does anyone know how to filter for this pattern avoiding the error?
import requests
from bs4 import BeautifulSoup
import pandas as pd
url_xd = 'https://www.xd-design.com/it-it/catalogsearch/result/?q=Bobby+Original+Zaino+Antifurto'
req_xd = requests.get(url_xd)
pars_xd = BeautifulSoup(req_xd.content, 'html.parser')
con_xd = pars_xd.find_all('div', class_ = 'product details product-item-details')
names_xd = []
prices_xd = []
picts_xd = []
for container in con_xd:
name = container.find("a", class_="product-item-link").text
names_xd.append(name)
for container in con_xd:
price = container.find("span", class_="price").text
prices_xd.append(price)
for container in con_xd:
pict = container.find("a").get("href")
picts_xd.append(pict)
bp_xd = pd.DataFrame({'(XD-Design) Item_Name': names_xd,
'Item_Price_EUR': prices_xd,
'Link_to_Pict': picts_xd })
bp_xd['Item_Price_EUR'] = bp_xd['Item_Price_EUR'].str.replace('€','').str.replace(',','.').astype(float)
bp_xd['(XD-Design) Item_Name'] = bp_xd['(XD-Design) Item_Name'].str.strip()
bp_filter = bp_xd['(XD-Design) Item_Name'][bp_xd['(XD-Design) Item_Name'].str.contains('Original Zaino Antifurto')]
# bp_xd[bp_filter]
Here you have the fixed working code
import requests
from bs4 import BeautifulSoup
import pandas as pd
url_xd = 'https://www.xd-design.com/it-it/catalogsearch/result/?q=Bobby+Original+Zaino+Antifurto'
req_xd = requests.get(url_xd)
pars_xd = BeautifulSoup(req_xd.content, 'html.parser')
con_xd = pars_xd.find_all('div', class_ = 'product details product-item-details')
names_xd = [c.find("a", class_="product-item-link").text for c in con_xd]
prices_xd = [c.find("span", class_="price").text for c in con_xd]
picts_xd = [c.find("a").get("href") for c in con_xd]
df = pd.DataFrame({'(XD-Design) Item_Name': names_xd,
'Item_Price_EUR': prices_xd,
'Link_to_Pict': picts_xd })
df['Item_Price_EUR'] = df['Item_Price_EUR'].str.replace('€','').str.replace(',','.').astype(float)
df['(XD-Design) Item_Name'] = df['(XD-Design) Item_Name'].str.strip()
df = df.loc[df['(XD-Design) Item_Name'].apply(lambda x: 1 if 'Original Zaino Antifurto' in x else 0) == 1]
I got help from here to crawl on law.go.kr with the code below.
I'm trying to crawl other websites like http://lawbot.org, http://law.go.kr, https://casenote.kr.
But problem is that I have no understanding of html...
I understood all the code and how to get html address for the code below but it's different on other websites...
I want to know how to use the code below to crawl other web pages.
import requests
from bs4 import BeautifulSoup
if __name__ == '__main__':
# Using request get 50 items from first page. pg=1 is page number, outmax=50 items
per page
response = requests.post(
"http://law.go.kr/precScListR.doq=*§ion=evtNm&outmax=79329&pg=1&fsort=21,10,30&precSeq=0&dtlYn=N")
# Parse html using BeautifulSoup
page = BeautifulSoup(response.text, "html.parser")
# Go through all pages and collect posts numbers in items
items = []
for i in range(1, 2):
# Get all links
links = page.select("#viewHeightDiv .s_tit a")
# Loop all links and collect post numbers
for link in links:
# Parse post number from "onclick" attribute
items.append(''.join([n for n in link.attrs["onclick"] if n.isdigit()]))
# Open all posts and collect in posts dictionary with keys: number, url and text
posts = []
for item in items:
url = "http://law.go.kr/precInfoR.do?precSeq=%s&vSct=*" % item
response = requests.get(url)
parsed = BeautifulSoup(response.text, "html.parser")
text = parsed.find('div', attrs={'id': 'contentBody'}).text #전문 저장
'id': 'contentBody', 제목제외 저장 'class': 'pgroup'
title = parsed.select_one("h2").text
posts.append({'number': item, 'url': url, 'text': text, 'title': title})
with open("D://\LAWGO_DATA/" + item + '.txt', 'w', encoding='utf8') as f:
f.write(text)
One more example for lawbot.org:
import requests
from bs4 import BeautifulSoup
base_url = 'http://lawbot.org'
search_url = base_url + '/?q=유죄'
response = requests.get(search_url)
page = BeautifulSoup(response.text, "html.parser")
lastPageNumber = int(page.select_one("li.page-item:not(.next):nth-last-child(2)").text)
casesList = []
for i in range(1, lastPageNumber + 1):
if i > 1:
response = requests.get(search_url + "&page=" + str(i))
page = BeautifulSoup(response.text, "html.parser")
cases = page.select("div.panre_center > ul.media-list li.panre_lists")
for case in cases:
title = case.findChild("h6").text
caseDocNumber = case.findChild(attrs={"class": "caseDocNumber"}).text
caseCourt = case.findChild(attrs={"class": "caseCourt"}).text
case_url = base_url + case.findChild("a")['href']
casesList.append({"title": title, "caseDocNumber": caseDocNumber, "caseCourt": caseCourt, "case_url": case_url})
# print("title:{}, caseDocNumber:{}, caseCourt:{}, caseUrl:{}".format(title, caseDocNumber, caseCourt, case_url))
for case in casesList:
response = requests.get(case["case_url"])
page = BeautifulSoup(response.text, "html.parser")
body = page.find(attrs={"class": "panre_body"}).text
print(body)
I have fetched the html data from a site and trying to fetch product urls:
def get_soup(url):
soup = None
response = requests.get(url)
if response.status_code == 200:
html = response.content
soup = BeautifulSoup(html, "html.parser")
return soup
def get_category_urls(url):
soup = get_soup(url)
cat_urls = []
categories = soup.find('div', attrs={'id': 'menu_oc'})
if categories is not None:
for c in categories.findAll('a'):
if c['href'] is not None:
cat_urls.append(c['href'])
return cat_urls
def get_product_urls(url):
soup = get_soup(url)
prod_urls = []
if soup.find('div', attrs={'class': 'pagination'}):
for link in soup.select('div.links a'):
if link.string.isdecimal(): # dump next and last links
prod_urls.append(link['href'])
print("Found following product urls::", prod_urls)
return prod_urls
if __name__ == '__main__':
category_urls = get_category_urls(URL)
product_urls = get_product_urls(URL)
How to efficiently identify the pagination condition in above loc?
screen shots of actual site with pagination:
and without pagination:
site link
pagination category
should be okay
from bs4 import BeautifulSoup
import requests
def get_soup(url):
soup = None
response = requests.get(url)
if response.status_code == 200:
html = response.content
soup = BeautifulSoup(html, "html.parser")
return soup
def get_category_urls(url):
soup = get_soup(url)
cat_urls = []
categories = soup.find('div', attrs={'id': 'menu_oc'})
if categories is not None:
for c in categories.findAll('a'):
if c['href'] is not None:
cat_urls.append(c['href'])
return cat_urls
def get_all_products(url):
prod_urls = []
soup = get_soup(url)
prod_urls.append(get_product_urls(soup))
links = get_pagination(soup)
print("Found those pages:", links)
if not links:
return prod_urls
for link in links:
soup = get_soup(link)
prod_urls.append(get_product_urls(soup))
print("Found following product urls:", prod_urls)
return prod_urls
def get_product_urls(soup):
links = soup.select('div.product-list .span .name a')
return [link['href'] for link in links]
def get_pagination(soup):
pages = soup.select('div.pagination div.links a')
return [link['href'] for link in pages if link.string.isdecimal()]
if __name__ == '__main__':
URL = 'http://www.example.com/shop/index.php?route=product/category&path=63_64'
category_urls = get_category_urls(URL)
product_urls = get_all_products(URL)