Scrapy Wikipedia: Yield does not show all rows - scrapy

I am trying to get the GDP Estimate (Under IMF) from the following page:
https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(nominal)
However, I am only getting the first row (93,863,851). Here's the Scrapy Spider code:
def parse(self, response):
title = response.xpath("(//tbody)[3]")
for country in title:
yield {'GDP': country.xpath(".//td[3]/text()").get()}
On other hand, I can use getall() method to get all the data but this brings all data points into one single cell when I export it to CSV/XLSX. So this is not a solution for me.
How can I get all the datapoints via the loop? Please help.

Your selector is not correct. You should loop through the table rows and yield the data that you need. See sample below.
import scrapy
class TestSpider(scrapy.Spider):
name = 'test'
start_urls = ['https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(nominal)']
def parse(self, response):
for row in response.xpath("//caption/parent::table/tbody/tr"):
yield {
"country": row.xpath("./td[1]/a/text()").get(),
"region": row.xpath("./td[2]/a/text()").get(),
"imf_est": row.xpath("./td[3]/text()").get(),
"imf_est_year": row.xpath("./td[4]/text()").get(),
"un_est": row.xpath("./td[5]/text()").get(),
"un_est_year": row.xpath("./td[6]/text()").get(),
"worldbank_est": row.xpath("./td[7]/text()").get(),
"worldbank_est_year": row.xpath("./td[8]/text()").get(),
}

Related

Scrapy/Selenium: How do I follow the links in 1 webpage?

I am new to web-scraping.
I want to go to Webpage_A and follow all the links there.
Each of the link lead to a page where I can select some button and download the data in an Excel file.
I tried the below code. But I believe there is an error with
if link:
yield SeleniumRequest(
Instead of using "SeleniumRequest" to follow the links, what should I use?
If using pure Scrapy, I know I can use
yield response.follow(
Thank you
class testSpider(scrapy.Spider):
name = 'test_s'
def start_requests(self):
yield SeleniumRequest(
url='CONFIDENTIAL',
wait_time=15,
screenshot=True,
callback=self.parse
)
def parse(self, response):
tables_name = response.xpath("//div[#class='contain wrap:l']//li")
for t in tables_name:
name=t.xpath(".//a/span/text()").get()
link = t.xpath(".//a/#href").get()
if link:
yield SeleniumRequest(
meta={'table_name': name},
url= link,
wait_time=15,
screenshot=True,
callback=self.parse_table
)
def parse_table(self, response):
name = response.request.meta['table_name']
button_select=response.find_element_by_xpath("(//a[text()='Select All'])").click()
button_st_yr=response.find_element_by_xpath("//select[#name='ctl00$ContentPlaceHolder1$StartYearDropDownList'] /option[1]").click()
button_end_mth=response.find_element_by_xpath("//select[#name='ctl00$ContentPlaceHolder1$EndMonthDropDownList']/option[text()='Dec']").click()
button_download=response.find_element_by_xpath("//input[#id='ctl00_ContentPlaceHolder1_DownloadButton']").click()
yield{
'table_name': name
}

Scrapy only groups of items when I run this code

I'm new to Scrapy and I've a problem getting it to return more than the first row. This is the code:
import scrapy
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
for row in response.xpath('//ul[#class="card-collection"]'):
yield {
'item_title': response.xpath('.//div[1]/p[#class="card__body-
title"]/text()').get(),
'item_county': response.xpath('.//ul[#class="card__body-
keyinfo"]/li[2]/text()').get(),
'item_price':
response.xpath('.//p[#class="card__price"]/span[1]/text()').get(),
'item_id': response.xpath('.//li[#class="card-
item"]/a/#href').get()
}
I would like it to have Title, County, Price, Id of each item in different rows. Actually if I run this code as it is it just gives me the first line.
I have tried getall but that just gives me blocks of each item.
Any help would be appreciated.
Here is working code which returns 30 rows:
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
yield {
'item_title': row.xpath('.//div[1]/p[#class="card__body-title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-keyinfo"]/li[2]/text()').get(),
'item_price': row.xpath('.//p[#class="card__price"]/span[1]/text()').get(),
'item_id': row.xpath('.//li[#class="card-item"]/a/#href').get()
}
try row.xpath('.// ) instead of response.xpath
eg
for row in response.xpath('//ul[#class="card-collection"]'):
yield {'item_title': row.xpath('.//div[1]/p[#class="card__body-
title"]/text()').get(), etc...}

How can I extract the item id from the response in Scrapy?

import scrapy
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
yield {
'item_id': row.xpath('.//a/#href').get(),
'item_title': row.xpath('.//div[1]/p[#class="card__body-
title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-
keyinfo"]/li[2]/text()').get(),
'item_price':
row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
}
I want to extract the item number from the item_id response which is a url.
Is it possible to do this?
The response looks like this:
{'item_id': 'https://www.donedeal.ie/farmtools-for-sale/international-784-
tractor/25283884?campaign=3', 'item_title': 'INTERNATIONAL 784 TRACTOR',
'item_county': 'Derry', 'item_price': '3,000'}
I'd appreciate any advice, thanks
Somethink like this would work. Not clean but still, spliting the string up until you get the id you want.
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
link = row.xpath('.//a/#href').get()
link_split = link.split('/')[-1]
link_id = link_split.split('?')[0]
yield {
'item_id': link_id,
'item_title': row.xpath('.//div[1]/p[#class="card__body
title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-
keyinfo"]/li[2]/text()').get(),
'item_price':
row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
}
Update in response to comment
Complete code example
import scrapy
class TestSpider(scrapy.Spider):
name = 'test'
allowed_domains = ['donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
link = row.xpath('.//a/#href').get()
link_split = link.split('/')[-1]
link_id = link_split.split('?')[0]
yield {
'item_id':link_id,
'item_title': row.xpath('.//p[#class="card__body-title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-keyinfo"]/li[2]/text()').get(),
'item_price': row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
}
A note, when looping over each 'card', you don't need to specify the div if you're aiming to get a selector with a unique class like card__body-title.
Please note that yielding a dictionary is one of three ways thinking about grabbing data from Scrapy. Consider using items and itemloaders.
Items: Here
ItemLoaders: Here
ItemLoaders Example: Here
A cleaner alternative would be to use regex. You can even use it with Scrapy selectors (docs)
'item_title': row.xpath('.//div[1]/p[#class="card__body-title"]/text()').re_first(r'/(\d+)\?campaign')
In the snippet above, the regex will return a string with only the digits between / and ?campaign.
In this particular URL https://www.donedeal.ie/farmtools-for-sale/international-784-tractor/25283884?campaign=3 it would return '25283884'
Edited: Corrected the regex

What are the correct tags and properties to select?

I want to crawl a web site (http://theschoolofkyiv.org/participants/220/dan-acostioaei) to extract artist's name and biography only. When I define the tags and properties, it comes out without any text, which I want to see.
I am using scrapy to crawl the web site. For other websites, it works fine. I have tested my codes but it seems I can not define the correct tags or properties. Can you please have a look at my codes?
This is the code that I used to crawl the website. (I do not understand why stackoverflow enforces me to enter irrelevant text all the time. I have already explained what I wanted to say.)
import scrapy
from scrapy.selector import Selector
from artistlist.items import ArtistlistItem
class ArtistlistSpider(scrapy.Spider):
name = "artistlist"
allowed_domains = ["theschoolofkyiv.org"]
start_urls = ['http://theschoolofkyiv.org/participants/220/dan-acostioaei']
enter code here
def parse(self, response):
titles = response.xpath("//div[#id='participants']")
for titles in titles:
item = ArtistlistItem()
item['artist'] = response.css('.ng-binding::text').extract()
item['biography'] = response.css('p::text').extract()
yield item
This is the output that I get:
{'artist': [],
'biography': ['\n ',
'\n ',
'\n ',
'\n ',
'\n ',
'\n ']}
Simple illustration (assuming you already know about AJAX request mentioned by Tony Montana):
import scrapy
import re
import json
from artistlist.items import ArtistlistItem
class ArtistlistSpider(scrapy.Spider):
name = "artistlist"
allowed_domains = ["theschoolofkyiv.org"]
start_urls = ['http://theschoolofkyiv.org/participants/220/dan-acostioaei']
def parse(self, response):
participant_id = re.search(r'participants/(\d+)', response.url).group(1)
if participant_id:
yield scrapy.Request(
url="http://theschoolofkyiv.org/wordpress/wp-json/posts/{participant_id}".format(participant_id=participant_id),
callback=self.parse_participant,
)
def parse_participant(self, response):
data = json.loads(response.body)
item = ArtistlistItem()
item['artist'] = data["title"]
item['biography'] = data["acf"]["en_participant_bio"]
yield item

Scrapy - Copying only the xpath into .csv file

I have many other scripts with simlar basic code that work, but when I run this spider in cmd, and I open the .csv file to look at the "titles" saved, I get the xpath copied into excel. Any idea why?
import scrapy
class MovieSpider(scrapy.Spider):
name = 'movie'
allowed_domains = ['https://www.imdb.com/search/title?start=1']
start_urls = ['https://www.imdb.com/search/title?start=1/']
def parse(self, response):
titles = response.xpath('//*[#id="main"]/div/div/div[3]/div[1]/div[3]/h3/a')
pass
print(titles)
for title in titles:
yield {'Title': title}
--- Try Two Below:------
for subject in titles:
yield {
'Title': subject.xpath('.//h3[#class="lister-item-header"]/a/text()').extract_first(),
'Runtime': subject.xpath('.//p[#class="text-muted"]/span/text()').extract_first(),
'Description': subject.xpath('.//p[#class="text-muted"]/p/text()').extract_first(),
'Director': subject.xpath('.//*[#id="main"]/a/text()').extract_first(),
'Rating': subject.xpath('.//div[#class="inline-block ratings-imdb-rating"]/strong/text()').extract_first()
}
Use extract() or extract_first(), also use shorter and more capacious notation for xpath:
import scrapy
class MovieSpider(scrapy.Spider):
name = 'movie'
allowed_domains = ['https://www.imdb.com/search/title?start=1']
start_urls = ['https://www.imdb.com/search/title?start=1/']
def parse(self, response):
subjects = response.xpath('//div[#class="lister-item mode-advanced"]')
for subject in subjects:
yield {
'Title': subject.xpath('.//h3[#class="lister-item-header"]/a/text()').extract_first(),
'Rating': subject.xpath('.//div[#class="inline-block ratings-imdb-rating"]/strong/text()').extract_first(),
'Runtime': subject.xpath('.//span[#class="runtime"]/text()').extract_first(),
'Description': subject.xpath('.//p[#class="text-muted"]/text()').extract_first(),
'Directior': subject.xpath('.//p[contains(text(), "Director")]/a[1]/text()').extract_first(),
}
output: