Extract Title Tags BeautifulSoup - beautifulsoup

I need help because I wanted to write code for finding out the title tags on a website. Although I used the code from another question and applied it to this scenario, there are no title tags whenever I print 'Beschreibung'.
from bs4 import BeautifulSoup
import requests
import pandas as pd
import urllib.parse
webseite = 'https://www.entega.de/sitemap/'
response = requests.get(webseite)
response.status_code
soup = BeautifulSoup (response.content, 'html.parser')
result_container = soup.find_all('div', {'class':'clearfix'})
url_part_1 = 'https://www.entega.de/sitemap/'
url_part_2 = []
for item in result_container:
for link in item.find_all ('a', {'class':'modSitemap__lvl1Link ui-link' }):
url_part_2.append (link.get ('href'))
url_joined = []
for i in url_part_2:
url_joined.append (urllib.parse.urljoin(url_part_1,i))
Überschrift = []
Beschreibung = []
Verlinkungen = []
for link in url_joined:
response = requests.get (link)
soup = BeautifulSoup (response.content, 'html.parser')
Beschreibung.append(soup.find_all('a', title=True, class_='modSitemap__lvl1Link ui-link'))

You are getting nothing because these links don't have an <a class="modSitemap__lvl1Link ui-link"> tag. They do have classes that start with that string though. You could expand to that. Or you can simply just get the <a> tags that have a title attribute.
So change your loop to either:
import re
for link in url_joined:
response = requests.get (link)
soup = BeautifulSoup (response.content, 'html.parser')
Beschreibung.append(soup.find_all('a', {'class':re.compile("^modSitemap__lvl1Link ui-link")}, title=True, ))
or
for link in url_joined:
response = requests.get (link)
soup = BeautifulSoup (response.content, 'html.parser')
Beschreibung.append(soup.find_all('a', title=True))

Related

beautiful soup unable to scrape website contents

Hi I am trying to do a simple web scrape on this website https://www.sayurbox.com/p/Swallow%20Tepung%20Agar%20Agar%20Tinggi%20Serat%207%20gram
where my code is this:
def userAgent(URL):
ua = UserAgent()
USER_AGENT = ua.random
headers = {"User-Agent" : str(USER_AGENT),"Accept-Encoding": "*","Connection": "keep-alive"}
resp = requests.get(URL, headers=headers)
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "html.parser")
print(f'{URL}')
else:
print(f'error 200:{URL}')
urlError = pd.DataFrame({'url':[URL],
'date':[dateNow]
})
urlError.to_csv('errorUrl/errorUrl.csv', mode='a', index=False, header=False)
return soup
soup = userAgent(url)
productTitle = soup.find_all('div', {"class":"InfoProductDetail__shortDesc"})
However it is unable to do so, is there something wrong with my code? I tried adding time.sleep to wait for the page to load, however it still does not work. Help will be greatly appreciated
Your code is fine but the url is dynamic meaning data is generated by JavaScript and requests,BeautifulSoup can't mimic that's you need automation tool something like selenium.Now you can run the code.
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
url = 'https://www.sayurbox.com/p/Swallow%20Tepung%20Agar%20Agar%20Tinggi%20Serat%207%20gram'
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
driver.get(url)
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'html.parser')
driver.close()
title=soup.select_one('.InfoProductDetail__shortDesc').text
price= soup.select_one('span.InfoProductDetail__price').text
print(title)
print(price)
Output:
Swallow Tepung Agar Agar Tinggi Serat 7 gram
7.900

bs4 can't get specific results

I am trying to get specific data from a website that is under a class that is used multiple times. So my thought was to search for the next biggest class and then use bs4 again to narrow my search results further. However, I get this error:
AttributeError: ResultSet object has no attribute 'find_all'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?
This is my code:
import requests
from bs4 import BeautifulSoup
def main():
responce()
def responce():
r = requests.get('https://robinhood.com/stocks/WISH')
soup = BeautifulSoup(r.content, 'html.parser')
responce = soup.find_all(class_="css-ktio0g""")
responce = responce.find_all(class_="css-6e9xj2")
print(responce)
main()
import requests
from bs4 import BeautifulSoup
from pprint import pp
def main(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
goal = [x.text for x in soup.select('span.css-ktio0g')]
pp(goal)
main('https://robinhood.com/stocks/WISH')
Output:
['Piotr Szulczewski',
'—',
'San Francisco, California',
'2010',
'7.25B',
'—',
'—',
'155.46M',
'$12.39',
'$11.44',
'$11.97',
'76.70M',
'$32.85',
'$7.52',
'— per share',
'Expected Aug 11, After Hours',
'Sign up for a Robinhood Account to buy or sell ContextLogic stock and '
'options commission-free.']

Scrape url link in table by BS4

I tried to scrape the hyperlinks in the tag (a herf) of the table. However, it doesn't work. Can you help to improve this code?
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
dfs = pd.DataFrame()
for i in range(1,11):
driver = webdriver.Chrome()
driver.get('https://racing.hkjc.com/racing/information/English/racing/RaceCard.aspx?RaceDate=2021/02/06&Racecourse=ST&RaceNo='+str(i)+'')
res = driver.execute_script('return document.documentElement.outerHTML')
time.sleep(3)
driver.quit()
soup = BeautifulSoup(res, 'lxml')
h_table = soup.find('table', {'class':'table_bd f_tac f_fs13'})
def tableDataText(h_table):
rows = []
trs = h_table.find_all('tr')
headerow = [td.get_text(strip=True) for td in trs[0].find_all('th')] # header row
if headerow: # if there is a header row include first
rows.append(headerow)
trs = trs[1:]
for tr in trs: # for every table row
rows.append([td.get_text(strip=True) for td in tr.find_all('td')]) # data row
return rows
result_table = tableDataText(h_table)
df = pd.DataFrame(result_table[1:], columns=result_table[0])
dfs = pd.concat([dfs, df], ignore_index=True)
Your question and the expected result is not that clear and should be improved - If just wanna grab all the urls from the href you can go with:
from bs4 import BeautifulSoup
from selenium import webdriver
linkList = []
for i in range(1,11):
driver = webdriver.Chrome()
driver.get('https://racing.hkjc.com/racing/information/English/racing/RaceCard.aspx?RaceDate=2021/02/06&Racecourse=ST&RaceNo='+str(i)+'')
time.sleep(6)
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.quit()
for a in soup.select('table#racecardlist table a'):
linkList.append('https://racing.hkjc.com'+a['href'])
linkList

Parse bs4 table from Baseball reference

url = 'https://www.baseball-reference.com/boxes/NYN/NYN201704030.shtml'
def make_soup(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
return soup
soup = make_soup(url)
I'm trying to locate the play by play table on that page and I've exhausted every option. Any thoughts on how to locate?
This is the tbody located under div.table_outer_container.mobile_table
You can use Selenium in combination with BeautifulSoup to scrape that table content as follows:
from bs4 import BeautifulSoup
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://www.baseball-reference.com/boxes/NYN/NYN201704030.shtml")
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
pbp_table = soup.find_all("table", {"id":"play_by_play"})
for tag in pbp_table:
print (tag.text)
If you want to use this code be sure to look at the Selenium guide on drivers and download the latest geckodriver if you're using Firefox as in that code above.
It is commented out in the source:
Look for the something to identify the comment, i.e the play_by_play id
from requests import get
from bs4 import BeautifulSoup, Comment
cont = get("https://www.baseball-reference.com/boxes/NYN/NYN201704030.shtml").content
soup = BeautifulSoup(cont, "lxml")
# Search Comments
comment = soup.find(text=lambda n: isinstance(n, Comment) and 'id="play_by_play"' in n)
soup2 = BeautifulSoup(comment)
table = soup2.select("#play_by_play")[0]
Which gets what you want:
In [3]: from requests import get
...: from bs4 import BeautifulSoup, Comment
...: cont = get("https://www.baseball-reference.com/boxes/NYN/NYN201704030.sh
...: tml").content
...: soup = BeautifulSoup(cont, "lxml")
...: comment = soup.find(text=lambda n: isinstance(n, Comment) and 'id="pla
...: y_by_play"' in n)
...: soup2 = BeautifulSoup(comment, "lxml")
...: table = soup2.select("#play_by_play")[0]
...: print(table.select_one(".pbp_summary_top").text)
...:
Top of the 1st, Braves Batting, Tied 0-0, Mets' Noah Syndergaard facing 1-2-3
In [4]:
You can also use a regex with text=...:
cont = get("https://www.baseball-reference.com/boxes/NYN/NYN201704030.shtml").content
soup = BeautifulSoup(cont, "lxml")
comment = soup.find(text=compile('id="play_by_play"'))
soup2 = BeautifulSoup(comment, "lxml")
table = soup2.select("#play_by_play")[0]

Remove duplicate url's python beautifulsoup

I want to remove the duplicate url's from the file having list of url's. it has "http://www.naver.com/나눔글꼴.jpg" and they are repeating.. here is my code:
from bs4 import BeautifulSoup
import lxml
import re
import urllib.request
p = re.compile('나눔글꼴')
html = 'http://www.naver.com'
data = urllib.request.urlopen("http://www.naver.com").read()
soup = BeautifulSoup(data, 'lxml')
links = p.findall(str(soup))
i = set()
for i in links:
link = 'http://www.naver.com/' + str(i) + '.jpg'
print(link)
You forgot to give input to the set() method:
soup = BeautifulSoup(data, 'lxml')
links = p.findall(str(soup))
i = set(links)
for x in i:
link = 'http://www.naver.com/' + str(x) + '.jpg'
print(link)