import scrapy
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
yield {
'item_id': row.xpath('.//a/#href').get(),
'item_title': row.xpath('.//div[1]/p[#class="card__body-
title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-
keyinfo"]/li[2]/text()').get(),
'item_price':
row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
}
I want to extract the item number from the item_id response which is a url.
Is it possible to do this?
The response looks like this:
{'item_id': 'https://www.donedeal.ie/farmtools-for-sale/international-784-
tractor/25283884?campaign=3', 'item_title': 'INTERNATIONAL 784 TRACTOR',
'item_county': 'Derry', 'item_price': '3,000'}
I'd appreciate any advice, thanks
Somethink like this would work. Not clean but still, spliting the string up until you get the id you want.
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
link = row.xpath('.//a/#href').get()
link_split = link.split('/')[-1]
link_id = link_split.split('?')[0]
yield {
'item_id': link_id,
'item_title': row.xpath('.//div[1]/p[#class="card__body
title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-
keyinfo"]/li[2]/text()').get(),
'item_price':
row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
}
Update in response to comment
Complete code example
import scrapy
class TestSpider(scrapy.Spider):
name = 'test'
allowed_domains = ['donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
link = row.xpath('.//a/#href').get()
link_split = link.split('/')[-1]
link_id = link_split.split('?')[0]
yield {
'item_id':link_id,
'item_title': row.xpath('.//p[#class="card__body-title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-keyinfo"]/li[2]/text()').get(),
'item_price': row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
}
A note, when looping over each 'card', you don't need to specify the div if you're aiming to get a selector with a unique class like card__body-title.
Please note that yielding a dictionary is one of three ways thinking about grabbing data from Scrapy. Consider using items and itemloaders.
Items: Here
ItemLoaders: Here
ItemLoaders Example: Here
A cleaner alternative would be to use regex. You can even use it with Scrapy selectors (docs)
'item_title': row.xpath('.//div[1]/p[#class="card__body-title"]/text()').re_first(r'/(\d+)\?campaign')
In the snippet above, the regex will return a string with only the digits between / and ?campaign.
In this particular URL https://www.donedeal.ie/farmtools-for-sale/international-784-tractor/25283884?campaign=3 it would return '25283884'
Edited: Corrected the regex
Related
I am trying to get the GDP Estimate (Under IMF) from the following page:
https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(nominal)
However, I am only getting the first row (93,863,851). Here's the Scrapy Spider code:
def parse(self, response):
title = response.xpath("(//tbody)[3]")
for country in title:
yield {'GDP': country.xpath(".//td[3]/text()").get()}
On other hand, I can use getall() method to get all the data but this brings all data points into one single cell when I export it to CSV/XLSX. So this is not a solution for me.
How can I get all the datapoints via the loop? Please help.
Your selector is not correct. You should loop through the table rows and yield the data that you need. See sample below.
import scrapy
class TestSpider(scrapy.Spider):
name = 'test'
start_urls = ['https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(nominal)']
def parse(self, response):
for row in response.xpath("//caption/parent::table/tbody/tr"):
yield {
"country": row.xpath("./td[1]/a/text()").get(),
"region": row.xpath("./td[2]/a/text()").get(),
"imf_est": row.xpath("./td[3]/text()").get(),
"imf_est_year": row.xpath("./td[4]/text()").get(),
"un_est": row.xpath("./td[5]/text()").get(),
"un_est_year": row.xpath("./td[6]/text()").get(),
"worldbank_est": row.xpath("./td[7]/text()").get(),
"worldbank_est_year": row.xpath("./td[8]/text()").get(),
}
I'm new to Scrapy and I've a problem getting it to return more than the first row. This is the code:
import scrapy
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
for row in response.xpath('//ul[#class="card-collection"]'):
yield {
'item_title': response.xpath('.//div[1]/p[#class="card__body-
title"]/text()').get(),
'item_county': response.xpath('.//ul[#class="card__body-
keyinfo"]/li[2]/text()').get(),
'item_price':
response.xpath('.//p[#class="card__price"]/span[1]/text()').get(),
'item_id': response.xpath('.//li[#class="card-
item"]/a/#href').get()
}
I would like it to have Title, County, Price, Id of each item in different rows. Actually if I run this code as it is it just gives me the first line.
I have tried getall but that just gives me blocks of each item.
Any help would be appreciated.
Here is working code which returns 30 rows:
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.donedeal.ie']
start_urls = ['https://www.donedeal.ie/farmtools/']
def parse(self, response):
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
yield {
'item_title': row.xpath('.//div[1]/p[#class="card__body-title"]/text()').get(),
'item_county': row.xpath('.//ul[#class="card__body-keyinfo"]/li[2]/text()').get(),
'item_price': row.xpath('.//p[#class="card__price"]/span[1]/text()').get(),
'item_id': row.xpath('.//li[#class="card-item"]/a/#href').get()
}
try row.xpath('.// ) instead of response.xpath
eg
for row in response.xpath('//ul[#class="card-collection"]'):
yield {'item_title': row.xpath('.//div[1]/p[#class="card__body-
title"]/text()').get(), etc...}
I have many other scripts with simlar basic code that work, but when I run this spider in cmd, and I open the .csv file to look at the "titles" saved, I get the xpath copied into excel. Any idea why?
import scrapy
class MovieSpider(scrapy.Spider):
name = 'movie'
allowed_domains = ['https://www.imdb.com/search/title?start=1']
start_urls = ['https://www.imdb.com/search/title?start=1/']
def parse(self, response):
titles = response.xpath('//*[#id="main"]/div/div/div[3]/div[1]/div[3]/h3/a')
pass
print(titles)
for title in titles:
yield {'Title': title}
--- Try Two Below:------
for subject in titles:
yield {
'Title': subject.xpath('.//h3[#class="lister-item-header"]/a/text()').extract_first(),
'Runtime': subject.xpath('.//p[#class="text-muted"]/span/text()').extract_first(),
'Description': subject.xpath('.//p[#class="text-muted"]/p/text()').extract_first(),
'Director': subject.xpath('.//*[#id="main"]/a/text()').extract_first(),
'Rating': subject.xpath('.//div[#class="inline-block ratings-imdb-rating"]/strong/text()').extract_first()
}
Use extract() or extract_first(), also use shorter and more capacious notation for xpath:
import scrapy
class MovieSpider(scrapy.Spider):
name = 'movie'
allowed_domains = ['https://www.imdb.com/search/title?start=1']
start_urls = ['https://www.imdb.com/search/title?start=1/']
def parse(self, response):
subjects = response.xpath('//div[#class="lister-item mode-advanced"]')
for subject in subjects:
yield {
'Title': subject.xpath('.//h3[#class="lister-item-header"]/a/text()').extract_first(),
'Rating': subject.xpath('.//div[#class="inline-block ratings-imdb-rating"]/strong/text()').extract_first(),
'Runtime': subject.xpath('.//span[#class="runtime"]/text()').extract_first(),
'Description': subject.xpath('.//p[#class="text-muted"]/text()').extract_first(),
'Directior': subject.xpath('.//p[contains(text(), "Director")]/a[1]/text()').extract_first(),
}
output:
I have a scraper with the following rules:
rules = (
Rule(LinkExtractor(allow=('\S+list=\S+'))),
Rule(LinkExtractor(allow=('\S+list=\S+'))),
Rule(LinkExtractor(allow=('\S+view=1\S+')), callback='parse_archive'),
)
As you can see, the 2nd and 3rd rules are exactly the same.
What I would like to do is tell scrappy extract the links I am interested in by referring to particular places within a page only. For convenience, I am sending you the corresponding XPaths although I would prefer a solution based on BeatifullSoup's syntax.
//*[#id="main_frame"]/tbody/tr[3]/td[2]/table/tbody/tr/td/div/table/tbody/tr/td[1]
//*[#id="main_frame"]/tbody/tr[3]/td[2]/table/tbody/tr/td/div/form/table/tbody/tr[1]
//*[#id="main_frame"]/tbody/tr[3]/td[2]/table/tbody/tr/td/div/form/table/tbody/tr[2]
EDIT:
Let me give you an example. Let's assume that I want to extract the five (out of six) links on the top of Scrapy's Offcial Page:
And here is my spider. Any ideas?
class dmozSpider(CrawlSpider):
name = "dmoz"
allowed_domains = ["scrapy.org"]
start_urls = [
"http://scrapy.org/",
]
rules = (
Rule(LinkExtractor(allow=('\S+/'), restrict_xpaths=('/html/body/div[1]/div/ul')), callback='first_level'),
)
def first_level(self, response):
taco = dmozItem()
taco['basic_url'] = response.url
return taco
This can be done with the restrict_xpaths parameter. See the LxmlLinkExtractor documentation
Edit:
You can also pass a list to restrict_xpaths.
Edit 2:
Full example that should work:
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
class dmozItem(scrapy.Item):
basic_url = scrapy.Field()
class dmozSpider(CrawlSpider):
name = "dmoz"
allowed_domains = ["scrapy.org"]
start_urls = [
"http://scrapy.org/",
]
def clean_url(value):
return value.replace('/../', '/')
rules = (
Rule(
LinkExtractor(
allow=('\S+/'),
restrict_xpaths=(['.//ul[#class="navigation"]/a[1]',
'.//ul[#class="navigation"]/a[2]',
'.//ul[#class="navigation"]/a[3]',
'.//ul[#class="navigation"]/a[4]',
'.//ul[#class="navigation"]/a[5]']),
process_value=clean_url
),
callback='first_level'),
)
def first_level(self, response):
taco = dmozItem()
taco['basic_url'] = response.url
return taco
What is the correct way to nest Item data?
For example, I want the output of a product:
{
'price': price,
'title': title,
'meta': {
'url': url,
'added_on': added_on
}
I have scrapy.Item of:
class ProductItem(scrapy.Item):
url = scrapy.Field(output_processor=TakeFirst())
price = scrapy.Field(output_processor=TakeFirst())
title = scrapy.Field(output_processor=TakeFirst())
url = scrapy.Field(output_processor=TakeFirst())
added_on = scrapy.Field(output_processor=TakeFirst())
Now, the way I do it is just to reformat the whole item in the pipeline according to new item template:
class FormatedItem(scrapy.Item):
title = scrapy.Field()
price = scrapy.Field()
meta = scrapy.Field()
and in pipeline:
def process_item(self, item, spider):
formated_item = FormatedItem()
formated_item['title'] = item['title']
formated_item['price'] = item['price']
formated_item['meta'] = {
'url': item['url'],
'added_on': item['added_on']
}
return formated_item
Is this correct way to approach this or is there a more straight-forward way to approach this without breaking the philosophy of the framework?
UPDATE from comments: Looks like nested loaders is the updated approach. Another comment suggests this approach will cause errors during serialization.
Best way to approach this is by creating a main and a meta item class/loader.
from scrapy.item import Item, Field
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import TakeFirst
class MetaItem(Item):
url = Field()
added_on = Field()
class MainItem(Item):
price = Field()
title = Field()
meta = Field(serializer=MetaItem)
class MainItemLoader(ItemLoader):
default_item_class = MainItem
default_output_processor = TakeFirst()
class MetaItemLoader(ItemLoader):
default_item_class = MetaItem
default_output_processor = TakeFirst()
Sample usage:
from scrapy.spider import Spider
from qwerty.items import MainItemLoader, MetaItemLoader
from scrapy.selector import Selector
class DmozSpider(Spider):
name = "dmoz"
allowed_domains = ["example.com"]
start_urls = ["http://example.com"]
def parse(self, response):
mainloader = MainItemLoader(selector=Selector(response))
mainloader.add_value('title', 'test')
mainloader.add_value('price', 'price')
mainloader.add_value('meta', self.get_meta(response))
return mainloader.load_item()
def get_meta(self, response):
metaloader = MetaItemLoader(selector=Selector(response))
metaloader.add_value('url', response.url)
metaloader.add_value('added_on', 'now')
return metaloader.load_item()
After that, you can easily expand your items in the future by creating more "sub-items."
I think it would be more straightforward to construct the dictionary in the spider. Here are two different ways of doing it, both achieving the same result. The only possible dealbreaker here is that the processors apply on the item['meta'] field, not on the item['meta']['added_on'] and item['meta']['url'] fields.
def parse(self, response):
item = MyItem()
item['meta'] = {'added_on': response.css("a::text").extract()[0]}
item['meta']['url'] = response.xpath("//a/#href").extract()[0]
return item
Is there a specific reason for which you want to construct it that way instead of unpacking the meta field ?