How to get data from IMDb using BeautifulSoup - beautifulsoup

I'm trying to get the names of top 250 IMDb movies using BeautifulSoup. The code does not execute properly and shows no errors.
import requests
from bs4 import BeautifulSoup
url = "https://www.imdb.com/chart/top"
response = requests.get(url)
rc = response.content
soup = BeautifulSoup(rc,"html.parser")
for i in soup.find_all("td",{"class:":"titleColumn"}):
print(i)
I'm expecting it show me all of the td tags with titleColumn classes but it is not working. Am I missing something? Thanks in advance!

Remove the : after the class:
{"class:":"titleColumn"}
to
{"class":"titleColumn"}
Example ++
import requests
from bs4 import BeautifulSoup
url = "https://www.imdb.com/chart/top"
response = requests.get(url)
rc = response.content
soup = BeautifulSoup(rc,"html.parser")
data = []
for i in soup.find_all("td",{"class":"titleColumn"}):
data.append({
'people':i.a['title'],
'title':i.a.get_text(),
'info':i.span.get_text()
})
data

Related

Selenium: click next till the last page

I am creating a web scraping tool using BeautifulSoup and Selenium. I am scraping a community forum where I am able to scrap the first web page of a particular thread. Say, for example, for the following thread: https://www.dell.com/community/Optiplex-Desktops/dell-optiplex-7000MT-DDR5-Ram-campatibility/m-p/8224888#M61514
i can scrap only the first page. I want to scrap all of the pages (in this case 3) and display the content.
The following code scraps the first page:
import pandas as pd
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException
url = "https://www.dell.com/community/Optiplex-Desktops/dell-optiplex-7000MT-DDR5-Ram-campatibility/m-p/8224888#M61514"
result = requests.get(url)
soup = BeautifulSoup(result.text, "html.parser")
date = '01-19-2023'
comments = []
comments_section = soup.find('div', {'class':'lia-component-message-list-detail-with-inline-editors'})
comments_body = comments_section.find_all('div', {'class':'lia-linear-display-message-view'})
for comment in comments_body:
if date in comment.find('span',{'class':'local-date'}).text :
comments.append({
'Date': comment.find('span',{'class':'local-date'}).text.strip('\u200e'),
'Board': soup.find_all('li', {'class': 'lia-breadcrumb-node crumb'})[1].text.strip(),
'Sub-board':soup.find('a', {'class': 'lia-link-navigation crumb-board lia-breadcrumb-board lia-breadcrumb-forum'}).text,
'Title of Post': soup.find('div', {'class':'lia-message-subject'}).text.strip(),
'Main Message': soup.find('div', {'class':'lia-message-body'}).text.strip(),
'Post Comment': comment.find('div',{'class':'lia-message-body-content'}).text.strip(),
'Post Time' : comment.find('span',{'class':'local-time'}).text,
'Username': comment.find('a',{'class':'lia-user-name-link'}).text,
'URL' : str(url)
})
df1 = pd.DataFrame(comments)
print(df1)
I have tried the following:
next_page = driver.find_element("xpath","//li[#class='lia-link-navigation lia-js-data-pageNum-2 lia-custom-event']")
next_page.click ()
page2_url = driver.current_url
print(page2_url)
this is specific just for page 2.
However, i want this for all subsequent pages. And if there is only one page continue to execute next statement.
By using the above code i'm trying to get the URLs for the subsequent pages which i will add to list of urls that need to be scraped. Is there any alternative way to acheive this?
To scrape all the pages you can add a simple while 1 loop which is broken when the button Next Page disappear.
while 1:
print('current page:', soup.select_one('span[aria-current="page"]').text)
comments_section = ...
comments_body = ...
for comment in comments_body:
...
# next_btn is a list
next_btn = soup.select('a[aria-label="Next Page"]')
# if the list is not empty...
if next_btn:
url = next_btn[0]['href']
soup = BeautifulSoup(requests.get(url).text, "html.parser")
else:
break

Get img src with beautifulsoup4 is returning an empty array response

I'm trying to get the url src from the following html
For some reason when i try to print out the logo url, I get [] as a response. My code is as follows:
from urllib.request import Request
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
url = 'https://growjo.com/industry/Cannabis'
request = Request(
url,
headers={'User-Agent': 'Mozilla/5.0'}
)
page = urlopen(request)
page_content_bytes = page.read()
page_html = page_content_bytes.decode("utf-8")
soup = BeautifulSoup(page_html, "html.parser")
company_rows = soup.find_all("table",{"class":"jss31"})[0].find_all("tbody")[0].find_all("tr")
for company in company_rows:
company_data = company.find_all("td")
logo = company_data[1].find_all("div",{"class":"lazyload-wrapper"})[0].find_all("a")
name = company_data[1].text
print(logo)
break
I tried printing out the 'a' tags...i tried the 'img'...they all respond with []. Its as if bs4 is not reading within the div class=lazyload-wrapper
Any help would be greatly appreciated.
The urls those contain logos are entirely dynamic
Bs4 can't render JS
API is restricted by authentication
Use an automation tool something like seleniun
Here I use Selenium4 with bs4
WebDriverManager is here
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import time
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_argument("--headless")
#chrome to stay open
#options.add_experimental_option("detach", True)
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=options)
url= 'https://growjo.com/industry/Cannabis'
driver.get(url)
time.sleep(2)
soup = BeautifulSoup(driver.page_source, "html.parser")
company_rows = soup.select('table.jss31 tbody tr')
for company in company_rows:
log = company.select_one('td[class="jss38 jss40 jss46"] div + a')
logo = 'https://growjo.com' + log.get('href') if log else None
print(logo)
Output:
https://growjo.com/company/Dutchie
https://growjo.com/company/Ascend_Wellness_Holdings
https://growjo.com/company/Hiku_Brands
https://growjo.com/company/C3_Industries
https://growjo.com/company/Jane_Technologies
https://growjo.com/company/Headset
https://growjo.com/company/Jushi_Holdings
https://growjo.com/company/FLOWER_CO.
https://growjo.com/company/Columbia_Care
https://growjo.com/company/Cannabis_Control_Commission
https://growjo.com/company/FIGR
https://growjo.com/company/Leafly
https://growjo.com/company/Hound_Labs
https://growjo.com/company/Leaf_Trade
https://growjo.com/company/Wurk
https://growjo.com/company/Sundial_Cannabis
https://growjo.com/company/BEYOND_%2F_HELLO
https://growjo.com/company/PharmaCann
https://growjo.com/company/LeafLink
https://growjo.com/company/Connected_Cannabis_Co.
https://growjo.com/company/NATURE'S_MEDICINES
https://growjo.com/company/Althea_Group
https://growjo.com/company/CURE_Pharmaceutical
https://growjo.com/company/urban-gro
https://growjo.com/company/NABIS
None
https://growjo.com/company/Medisun
https://growjo.com/company/Mammoth_Distribution
https://growjo.com/company/Dosecann_Cannabis_Solutions
https://growjo.com/company/Vireo_Health
https://growjo.com/company/Dama_Financial
https://growjo.com/company/Caliber
https://growjo.com/company/springbig
https://growjo.com/company/Westleaf
https://growjo.com/company/INSA
https://growjo.com/company/Pure_Sunfarms
https://growjo.com/company/Sensi_Media_Group
https://growjo.com/company/Verano_Holdings
https://growjo.com/company/TILT_Holdings
https://growjo.com/company/Bloom_Medicinals
https://growjo.com/company/Planet_13_Holdings
https://growjo.com/company/Liberty_Health_Sciences
https://growjo.com/company/Calyx_Peak_Companies
https://growjo.com/company/Vangst
https://growjo.com/company/Fire_&_Flower
https://growjo.com/company/Revolution_Enterprises
https://growjo.com/company/4Front_Ventures
https://growjo.com/company/Calyx_Containers
https://growjo.com/company/GreenTech_Industries
https://growjo.com/company/BZAM_Cannabis
https://growjo.com/company/Cova_Software
None
https://growjo.com/company/Up_Cannabis
https://growjo.com/company/Cann_Group
https://growjo.com/company/Holistic_Industries
https://growjo.com/company/Treez
https://growjo.com/company/INDIVA
https://growjo.com/company/Kiva_Confections
https://growjo.com/company/MariMed
https://growjo.com/company/MCR_Labs
https://growjo.com/company/Vicente_Sederberg
https://growjo.com/company/Demetrix
https://growjo.com/company/365_Cannabis
https://growjo.com/company/LivWell_Enlightened_Health
https://growjo.com/company/High_Tide
https://growjo.com/company/The_Hawthorne_Gardening_Company
https://growjo.com/company/WYLD
https://growjo.com/company/VidaCann
https://growjo.com/company/Sira_Naturals
https://growjo.com/company/iAnthus
https://growjo.com/company/EastHORN_Clinical_Services
https://growjo.com/company/PharmaCielo
https://growjo.com/company/OCS_Ontario_Cannabis_Store
https://growjo.com/company/Hugh_Wood_Canada
https://growjo.com/company/Wana_Brands
https://growjo.com/company/Parallel
https://growjo.com/company/Weedmaps
None
https://growjo.com/company/Dark_Heart_Nursery
https://growjo.com/company/Stealth_Monitoring
https://growjo.com/company/dicentra
https://growjo.com/company/Sunday_Goods_&_The_Pharm
https://growjo.com/company/Phase_Zero_Design
https://growjo.com/company/Sava
https://growjo.com/company/Ceylon_Solutions
https://growjo.com/company/Green_Flower
https://growjo.com/company/Shryne_Group
https://growjo.com/company/MJ_Freeway
https://growjo.com/company/Theory_Wellness
https://growjo.com/company/HEXO_Corp
https://growjo.com/company/Lightshade
https://growjo.com/company/New_Frontier_Data
https://growjo.com/company/Mission_Dispensaries
https://growjo.com/company/FLUENT_Cannabis_Care
https://growjo.com/company/Superette
https://growjo.com/company/HdL_Companies
https://growjo.com/company/Helix_Technologies
https://growjo.com/company/Mary's_Medicinals
https://growjo.com/company/Indus_Holdings
https://growjo.com/company/Auxly
https://growjo.com/company/Good_Chemistry
https://growjo.com/company/Khiron_Life_Sciences_Corp
https://growjo.com/company/The_Apothecarium

Scraping a web page with a "more" button...with beautifulsoup

I'm trying to scrape information from this website: "http://vlg.film/"
I'm not only interested in the first 15 titles, but in all of them. When clicking on the 'Show More' button a couple of times, the extra titles show up in the "inspect element" window, but the url stays the same, i.e. "https://vlg.film/". Does anyone have a or some bright ideas? I am fairly new to this..Thanks
`
import requests as re
from bs4 import BeautifulSoup as bs
url = ("https://vlg.film/")
page = re.get(url)
soup = bs(page.content, 'html.parser')
wrap = soup.find_all('div', class_="column column--20 column--main")
for det in wrap:
link = det.a['href']
print(link)
`
Looks like you can simply add the pagination to the url. The trick is to know when you reached the end. Playing around with it, it appears once you reach the end, it repeats the first page. So all you need to do is keep appending the links into a list, and when you start to repeat a link, have it stop.
import requests as re
from bs4 import BeautifulSoup as bs
next_page = True
page_num = 1
links = []
while next_page == True:
url = ("https://vlg.film/")
payload = {'PAGEN_1': '%s' %page_num}
page = re.get(url, params=payload)
soup = bs(page.content, 'html.parser')
wrap = soup.find_all('div', class_="column column--20 column--main")
for det in wrap:
link = det.a['href']
if link in links:
next_page = False
break
links.append(link)
page_num += 1
for link in links:
print(link)
Output:
/films/ainbo/
/films/boss-level/
/films/i-care-a-lot/
/films/fear-of-rain/
/films/extinct/
/films/reckoning/
/films/marksman/
/films/breaking-news-in-yuba-county/
/films/promising-young-woman/
/films/knuckledust/
/films/rifkins-festival/
/films/petit-pays/
/films/life-as-it-should-be/
/films/human-voice/
/films/come-away/
/films/jiu-jitsu/
/films/comeback-trail/
/films/cagefighter/
/films/kolskaya/
/films/golden-voices/
/films/bad-hair/
/films/dragon-rider/
/films/lucky/
/films/zalozhnik/
/films/findind-steve-mcqueen/
/films/black-water-abyss/
/films/bigfoot-family/
/films/alone/
/films/marionette/
/films/after-we-collided/
/films/copperfield/
/films/her-blue-sky/
/films/secret-garden/
/films/hour-of-lead/
/films/eve/
/films/happier-times-grump/
/films/palm-springs/
/films/unhinged/
/films/mermaid-in-paris/
/films/lassie/
/films/sunlit-night/
/films/hello-world/
/films/blood-machines/
/films/samsam/
/films/search-and-destroy/
/films/play/
/films/mortal/
/films/debt-collector-2/
/films/chosen-ones/
/films/inheritance/
/films/tailgate/
/films/silent-voice/
/films/roads-not-taken/
/films/jim-marshall/
/films/goya-murders/
/films/SUFD/
/films/pinocchio/
/films/swallow/
/films/come-as-you-are/
/films/kelly-gang/
/films/corpus-christi/
/films/gentlemen/
/films/vic-the-viking/
/films/perfect-nanny/
/films/farmageddon/
/films/close-to-the-horizon/
/films/disturbing-the-peace/
/films/trauma-center/
/films/benjamin/
/films/COURIER/
/films/aeronauts/
/films/la-belle-epoque/
/films/arctic-dogs/
/films/paradise-hills/
/films/ditya-pogody/
/films/selma-v-gorode-prizrakov/
/films/rainy-day-in-ny/
/films/ty-umeesh-khranit-sekrety/
/films/after-the-wedding/
/films/the-room/
/films/kuda-ty-propala-bernadett/
/films/uglydolls/
/films/smert-i-zhizn-dzhona-f-donovana/
/films/sinyaya-bezdna-2/
/films/just-a-gigolo/
/films/i-am-mother/
/films/city-hunter/
/films/lets-dance/
/films/five-feet-apart/
/films/after/
/films/100-things/
/films/greta/
/films/CORGI/
/films/destroyer/
/films/vice/
/films/ayka/
/films/van-gogh/
/films/serenity/
This is pretty simple web site to extract data. Create a urls list of web page , how many page do you want to extract. Then use for loop to iterate all page extract the data.
import requests as re
from bs4 import BeautifulSoup as bs
urls = ["http://vlg.film/ajax/index_films.php?PAGEN_1={}".format(x) for x in range(1,11)]
for url in urls:
page = re.get(url)
soup = bs(page.content, 'html.parser')
wrap = soup.find_all('div', class_="column column--20 column--main")
print(url)
for det in wrap:
link = det.a['href']
print(link)

How to narrow in on text in beautifulsoup

I'm trying to find the river level here. Yesterday I got some amazing help to use the first BOLD (strong) text, however today that isn't working because new strong text has appeared BEFORE the river level due to the river being in flood. Is there a way in beautiful soup to harvest the first word in bold ending with an m ?
Cheers!!
This should help u:
import requests
from bs4 import BeautifulSoup
url = 'https://flood-warning-information.service.gov.uk/station/8208'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
header = soup.find('header',class_ = "intro")
paragraphs = header.find_all('p')
txt = paragraphs[1].strong.text
print(txt)
Output:
1.97m
This also works for the url that u mentioned in ur previous question.
Output for that url:
0.66m
Hope that this helps!
If you want to use a CSS Selector:
import requests
from bs4 import BeautifulSoup
url = 'https://flood-warning-information.service.gov.uk/station/8208'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
print(soup.select_one('.intro p strong:nth-of-type(1)').text)
Output:
1.97m

BeautifulSoup code Error

I cannot seem to find the problem in this code.
Help will be appreciated.
import requests
from bs4 import BeautifulSoup
url = 'http://nytimes.com'
r = requests.get(url)
r_html = r.text
soup = BeautifulSoup(r_html)
title = soup.find('span','articletitle').string
Code & Error Screenshot
The problem is http://nytimes.com does not have any articletitle span. To be safe, just check if soup.find('span','articletitle') is not None: before accessing it. Also, you don't need to access string property here. For example, the following would work fine.
import requests
from bs4 import BeautifulSoup
url = 'http://nytimes.com'
r = requests.get(url)
r_html = r.text
soup = BeautifulSoup(r_html, 'html.parser')
if soup.find('div', 'suggestions') is not None:
title = soup.find('div','suggestions')
print(title)
Put your code inside try and catch & then print exception which is occurring. using the exception occurred you can rectify the problem.
Hi use a parser as your second argument for the get method,
Ex:-
page_content = BeautifulSoup(r.content, "html.parser")