Scrapy - Extracting data from next page - scrapy

I need your help folks, to page and extract elements. Here is my spider
import json
import scrapy
class YPSpider(scrapy.Spider):
name = 'yp'
start_urls = ['https://www.infobel.com/fr/france/business/50000/informatique_internet/']
def parse(self, response):
next_page_url = response.xpath('//ul[#class="pagination"]/li[#class="active"]/following-sibling::li[1]/a/#href').extract_first()
if next_page_url:
yield response.follow(next_page_url, callback=self.parse)
if response.meta.get('has_phone'):
item = response.meta['item']
response = json.loads(response.body)
item['phone'] = response['result']
yield item
else:
items = response.xpath('//*[contains(#class, "customer-box")]')
for item in items:
address_lines = item.xpath('.//span[contains(#class, "fa-map-marker")]/../span[#class="detail-text"]//text()').extract()
title = item.xpath('.//h2[#class="customer-item-name"]/a/text()').extract_first().strip()
address = address_lines[0].replace('\r', '').replace('\t', '').strip() if address_lines else ''
village = address_lines[1].replace('\r', '').replace('\t', '').strip() if len(address_lines) >= 1 else ''
phone = item.xpath('.//span[contains(#class, "icon-phone")]/../span[#class="detail-text"]/text()').extract()
item = {
'title': title,
'address': address,
'village': village,
'phone': phone,
}
if phone:
if phone[0].isnumeric():
item['phone'] = phone[0]
yield item
elif len(phone) >= 2:
yield scrapy.Request('https://www.infobel.com/fr/france/Search/Decrypt?encryptedString={}'.format(phone[1]), meta={'item': item, 'has_phone': True}
)
How can I make this crawl to go the page next and scrape elements from that page?
I appreciate your help
thank you in advance

You need to add this code to your parse method:
next_page_url = response.xpath('//ul[#class="pagination"]/li[#class="active"]/following-sibling::li[1]/a/#href').extract_first()
if next_page_url:
yield response.follow(next_page_url, callback=self.parse)
You need to read a bit about Python syntax before asking questions here:
def parse(self, response):
next_page_url = response.xpath('//ul[#class="pagination"]/li[#class="active"]/following-sibling::li[1]/a/#href').extract_first()
if next_page_url:
yield response.follow(next_page_url, callback=self.parse)
if response.meta.get('has_phone'):
item = response.meta['item']
response = json.loads(response.body)
item['phone'] = response['result']
yield item
else:
items = response.xpath('//*[contains(#class, "customer-box")]')
for item in items:
address_lines = item.xpath('.//span[contains(#class, "fa-map-marker")]/../span[#class="detail-text"]//text()').extract()
title = item.xpath('.//h2[#class="customer-item-name"]/a/text()').extract_first().strip()
address = address_lines[0].replace('\r', '').replace('\t', '').strip() if address_lines else ''
village = address_lines[1].replace('\r', '').replace('\t', '').strip() if len(address_lines) >= 1 else ''
phone = item.xpath('.//span[contains(#class, "icon-phone")]/../span[#class="detail-text"]/text()').extract()
item = {
'title': title,
'address': address,
'village': village,
'phone': phone,
}
if phone:
if phone[0].isnumeric():
item['phone'] = phone[0]
yield item
elif len(phone) >= 2:
yield scrapy.Request('https://www.infobel.com/fr/france/Search/Decrypt?encryptedString={}'.format(phone[1]), meta={'item': item, 'has_phone': True}
)

Related

Scraper not grabbing all text in list/div

My scraper seems to skip on information, for example I want to extract all the countries and the highway codes belonging to each country. However, when I iterate over this I only get one country and one highway code for each list of numbered highways.
How do I get all the highway routes and their respective countries?
Here's the scraper that I am working with:
import scrapy
from scrapy.item import Field
from itemloaders.processors import TakeFirst, MapCompose
from itemloaders import ItemLoader
class roadItem(scrapy.Item):
highway_names = Field(output_processor = TakeFirst())
country = Field(output_processor = TakeFirst())
route_name = Field(output_processor = TakeFirst())
class roadSpider(scrapy.Spider):
name = "road"
start_urls = ["https://en.wikipedia.org/w/index.php?title=Category:Lists_of_roads_sharing_the_same_title&pageuntil=092%0AList+of+highways+numbered+92#mw-pages"]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(
url,
callback = self.parse
)
def parse(self, response):
list_data = response.xpath('(//div[#class="mw-category-group"][last()])[2]//ul')
for items in list_data:
for link in items.xpath(".//a/#href").getall():
yield response.follow(
response.urljoin(link),
callback = self.parse_roads
)
next_page = response.xpath("//div[#id='mw-pages']/a[1]//#href").get()
if next_page is not None:
yield response.follow(
response.urljoin(next_page),
callback = self.parse
)
def parse_roads(self, response):
loader = ItemLoader(roadItem())
loader.add_value("highway_names", response.xpath("//h1[#id='firstHeading']//text()").get())
data = response.xpath("//div[#id='mw-content-text']")
for list_h2 in data:
if list_h2.xpath("(((//h2)[position() >1]//span)[position() mod 4=1])[position() < last()]//text()").get():
loader.add_value("country", list_h2.xpath("(((//h2)[position() >1]//span)[position() mod 4=1])[position() < last()]//text()").get())
else:
loader.add_value("country", list_h2.xpath("(//h2//span)[1]//text()").get())
if list_h2.xpath("(//div[#class='mw-parser-output']//ul)[position() > 1 and position() < last()]//li//#title").getall():
for routes in list_h2.xpath("(//div[#class='mw-parser-output']//ul)[position() > 1 and position() < last()]//li//#title").getall():
loader.add_value('route_name', routes)
else:
for routess in list_h2.xpath("//ul//li//#title").getall():
loader.add_value('route_name', routess)
yield loader.load_item()

scrapy pagination not working on tripadvisor

i'm trying to scrape the restaurant pages on tripadvisor (just to learn how it works)
However, i only get the first page.
What am I missing?
here is the code, thanks!
import scrapy
class TripadvSpider(scrapy.Spider):
name = 'tripadv'
allowed_domains = ['tripadvisor.com']
start_urls = ['https://www.tripadvisor.com/Restaurants-g60795-oa0-Philadelphia_Pennsylvania.html#EATERY_LIST_CONTENTS']
def parse(self, response):
for stores in response.css('div.emrzT'):
yield {
'name' : stores.css('a.bHGqj::text').extract(),
'link' : stores.css('a.bHGqj').xpath("#href").extract()
}
next_page = ('http://tripadvisor.com' + response.css('a.nav').attrib['href']).extract()
##next_page = response.xpath('//a[contains(text(), "Next")]/#href).extract())
#next_page = ('http://tripadvisor.com' + response.css('a:contains("Next")').attrib['href'].extract())
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
#djmystica, Now it's working fine
import scrapy
class TripadvSpider(scrapy.Spider):
name = 'tripadv'
allowed_domains = ['tripadvisor.com']
start_urls = [
'https://www.tripadvisor.com/Restaurants-g60795-oa0-Philadelphia_Pennsylvania.html#EATERY_LIST_CONTENTS']
def parse(self, response):
for stores in response.css('div.emrzT'):
yield {
'name': stores.css('a.bHGqj::text').extract_first(),
'link': stores.css('a.bHGqj').xpath("#href").extract_first()}
#next_page = ('http://tripadvisor.com' +response.css('a.nav').attrib['href']).extract()
next_page = response.xpath('//a[contains(text(), "Next")]/#href').extract_first()
abs_next_page = f'https://www.tripadvisor.com{next_page}'
#next_page = ('http://tripadvisor.com' + response.css('a:contains("Next")').attrib['href'].extract())
if abs_next_page is not None:
yield response.follow(abs_next_page, callback=self.parse)

scrapy.exceptions.NotSupported: Unsupported URL scheme '': no handler available for that scheme

I collect links from several directories and then insert them into start_urls as a link variable
import scrapy
class SplashSpider(scrapy.Spider):
f = open('text.txt')
links = f.read()
name = 'spide'
start_urls = [str(links)]
f.close()
def parse(self, response):
title = response.css('.title::text').extract()
description = response.css("div#desc").extract()
title = list(map(str.strip, title))
description = list(map(str.strip, description))
yield{
'Title': title,
'Main Info': description,
}
but I'm catching an error: scrapy.exceptions.NotSupported: Unsupported URL scheme '': no handler available for that scheme
my text.txt file:
'https:// url1.com','https:// url2.com', ... , 'https:// url300000.com', 'https:// url300001.com'
import scrapy
class SplashSpider(scrapy.Spider):
with open('text.txt') as f:
links = f.readlines()
links = list(map(lambda x: x.strip().replace(' ', ''), links))
name = 'spider'
start_urls = links
def parse(self, response):
title = response.css('.title::text').extract()
description = response.css("div#desc").extract()
title = list(map(str.strip, title))
description = list(map(str.strip, description))
yield{
'Title': title,
'Main Info': description,
}

Scrapy response incomplete get url how to

I would like to parse the value obtained from parse again by connecting to another url. How do I fix it?
from scrapy import Spider
from scrapy.selector import Selector
from stack.items import StackItem
class StackSpider(Spider):
name = "stack"
allowed_domains = ["*"]
global n
#n = 1997
start_urls = ['https://www.melon.com/chart/age/list.htm?chartType=YE&chartGenre=KPOP&chartDate=2010',]
def parse(self, response):
url = 'https://www.melon.com/song/detail.htm?songId='
questions = Selector(response).xpath('//*[#id="frm"]/table/tbody/tr')
for question in questions:
item = StackItem()
item['musicid'] = question.xpath('td/div/input/#value').extract()[0]
item['title'] = question.xpath('td[4]/div/div/div/span/strong/a/#title').extract()
item['artlist'] = question.xpath(
'td[4]/div/div/div[2]/div[1]/a/text()').extract()
item['album'] = question.xpath(
'td[4]/div/div/div[2]/div[2]/a/text()').extract()
item['sunwhi'] = question.xpath(
'td[2]/div/span/text()').extract()[0]
response_url=requests.get(url+musicid)
def parse(self, response):
questions = Selector(response).xpath('//*[#id="downloadfrm"]/div/div/div[2]/div[2]/dl/dd')
for question in questions:
item = StackItem()
item['album'] = question.xpath('a/text()').extract()[0]
yield item
class StackSpider(Spider):
name = "stack"
allowed_domains = ["*"]
global n
#n = 1997
start_urls = ['https://www.melon.com/chart/age/list.htm?chartType=YE&chartGenre=KPOP&chartDate=2010',]
def parse(self, response):
url = 'https://www.melon.com/song/detail.htm?songId='
questions = Selector(response).xpath('//*[#id="frm"]/table/tbody/tr')
for question in questions:
item = StackItem()
item['musicid'] = question.xpath('td/div/input/#value').extract()[0]
item['title'] = question.xpath('td[4]/div/div/div/span/strong/a/#title').extract()
item['artlist'] = question.xpath(
'td[4]/div/div/div[2]/div[1]/a/text()').extract()
item['album'] = question.xpath(
'td[4]/div/div/div[2]/div[2]/a/text()').extract()
item['sunwhi'] = question.xpath(
'td[2]/div/span/text()').extract()[0]
response_url=requests.get(url+musicid)
def parse(self, response):
questions = Selector(response).xpath('//*[#id="downloadfrm"]/div/div/div[2]/div[2]/dl/dd')
for question in questions:
item = StackItem()
item['album'] = question.xpath('a/text()').extract()[0]
yield item

Get next page using scrapy

I'm interested in get contractors data for atlanta from this page:
http://www.1800contractor.com/d.Atlanta.GA.html?link_id=3658
So for I can open the links for the categories
'Additions & Remodeling'
'Architects & Engineers'
'Fountains & Ponds'
......
.....
.....
But I can open only the first page:
http://www.1800contractor.com/d.Additions-Remodeling.Atlanta.GA.-12001.html?startingIndex=0&showDirectory=true
I'm trying to open get the next one with the links of the 'Next' button:
next_page_url = response.xpath('/html/body/div[1]/center/table/tr[8]/td[2]/a/#href').extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
request = scrapy.Request(absolute_next_page_url)
yield request
But it makes no difference.
This is the code of my spider:
import scrapy
class Spider_1800(scrapy.Spider):
name = '1800contractor'
allowed_domains = ['1800contractor.com']
start_urls = (
'http://www.1800contractor.com/d.Atlanta.GA.html?link_id=3658',
)
def parse(self, response):
urls = response.xpath('/html/body/center/table/tr/td[2]/table/tr[6]/td/table/tr[2]/td/b/a/#href').extract()
for url in urls:
absolute_url = response.urljoin(url)
request = scrapy.Request(
absolute_url, callback=self.parse_contractors)
yield request
# process next page
next_page_url = response.xpath('/html/body/div[1]/center/table/tr[8]/td[2]/a/#href').extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
request = scrapy.Request(absolute_next_page_url)
yield request
def parse_contractors(self, response):
name = response.xpath(
'/html/body/div[1]/center/table/tr[5]/td/table/tr[1]/td/b/a/#href').extract()
contrator = {
'name': name,
'url': response.url}
yield contrator
You are not paginating the right request, parse handles the request generated with the urls in start_urls, which means that you need to enter each category in http://www.1800contractor.com/d.Atlanta.GA.html?link_id=3658 first.
def parse(self, response):
urls = response.xpath('/html/body/center/table/tr/td[2]/table/tr[6]/td/table/tr[2]/td/b/a/#href').extract()
for url in urls:
absolute_url = response.urljoin(url)
request = scrapy.Request(
absolute_url, callback=self.parse_contractors)
yield request
def parse_contractors(self, response):
name = response.xpath(
'/html/body/div[1]/center/table/tr[5]/td/table/tr[1]/td/b/a/#href').extract()
contrator = {
'name': name,
'url': response.url}
yield contrator
next_page_url = response.xpath('/html/body/div[1]/center/table/tr[8]/td[2]/a/#href').extract_first()
if next_page_url:
absolute_next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(absolute_next_page_url, callback=self.parse_contractors)
After hitting start_url your xpath for picking url for contractors are not working. Next page is present on contractor page therefore its called after contractor url. this will work for you
def parse(self, response):
urls = response.xpath('//table//*[#class="hiCatNaked"]').extract()
for url in urls:
absolute_url = response.urljoin(url)
request = scrapy.Request(
absolute_url, callback=self.parse_contractors)
yield request
def parse_contractors(self, response):
name=response.xpath('/html/body/div[1]/center/table/tr[5]/td/table/tr[1]/td/b/a/#href').extract()
contrator = {
'name': name,
'url': response.url}
yield contrator
next_page_url = response.xpath('//a[b[contains(.,'Next')]]/#href').extract_first()
if next_page_url:
absolute_next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(absolute_next_page_url, callback=self.parse_contractors)