The start_url in this spider seems to be causing a problem but I am unsure why. Here is the project breakdown.
import scrapy
from statements.items import StatementsItem
class IncomeannualSpider(scrapy.Spider):
name = 'incomeannual'
start_urls = ['https://www.marketwatch.com/investing/stock/A/financials']
def parse(self, response):
item = {}
item['ticker'] = response.xpath("//h1[contains(#id, 'instrumentname')]//text()").extract()
item['sales2014'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales/Revenue']]/text()").extract()[0]
item['sales2015'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales/Revenue']]/text()").extract()[1]
item['sales2016'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales/Revenue']]/text()").extract()[2]
item['sales2017'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales/Revenue']]/text()").extract()[3]
item['sales2018'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales/Revenue']]/text()").extract()[4]
item['sales2014rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales Growth']]/text()").extract()[0]
item['sales2015rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales Growth']]/text()").extract()[1]
item['sales2016rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales Growth']]/text()").extract()[2]
item['sales2017rate'] = response.xpath("//td[./preceding- sibling::td[normalize-space()='Sales Growth']]/text()").extract()[3]
item['sales2018rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Sales Growth']]/text()").extract()[4]
item['cogs2014'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Cost of Goods Sold (COGS) incl. D&A']]/text()").extract()[0]
item['cogs2015'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Cost of Goods Sold (COGS) incl. D&A']]/text()").extract()[1]
item['cogs2016'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Cost of Goods Sold (COGS) incl. D&A']]/text()").extract()[2]
item['cogs2017'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Cost of Goods Sold (COGS) incl. D&A']]/text()").extract()[3]
item['cogs2018'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Cost of Goods Sold (COGS) incl. D&A']]/text()").extract()[4]
item['cogs2014rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='COGS Growth']]/text()").extract()[0]
item['cogs2015rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='COGS Growth']]/text()").extract()[1]
item['cogs2016rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='COGS Growth']]/text()").extract()[2]
item['cogs2017rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='COGS Growth']]/text()").extract()[3]
item['cogs2018rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='COGS Growth']]/text()").extract()[4]
item['pretaxincome2014'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income']]/text()").extract()[0]
item['pretaxincome2015'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income']]/text()").extract()[1]
item['pretaxincome2016'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income']]/text()").extract()[2]
item['pretaxincome2017'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income']]/text()").extract()[3]
item['pretaxincome2018'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income']]/text()").extract()[4]
item['pretaxincome2014rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income Growth']]/text()").extract()[0]
item['pretaxincome2015rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income Growth']]/text()").extract()[1]
item['pretaxincome2016rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income Growth']]/text()").extract()[2]
item['pretaxincome2017rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income Growth']]/text()").extract()[3]
item['pretaxincome2018rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Pretax Income Growth']]/text()").extract()[4]
item['netincome2014'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income']]/text()").extract()[0]
item['netincome2015'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income']]/text()").extract()[1]
item['netincome2016'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income']]/text()").extract()[2]
item['netincome2017'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income']]/text()").extract()[3]
item['netincome2018'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income']]/text()").extract()[4]
item['netincome2014rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income Growth']]/text()").extract()[0]
item['netincome2015rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income Growth']]/text()").extract()[1]
item['netincome2016rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income Growth']]/text()").extract()[2]
item['netincome2017rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income Growth']]/text()").extract()[3]
item['netincome2018rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='Net Income Growth']]/text()").extract()[4]
item['eps2014'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic)']]/text()").extract()[0]
item['eps2015'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic)']]/text()").extract()[1]
item['eps2016'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic)']]/text()").extract()[2]
item['eps2017'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic)']]/text()").extract()[3]
item['eps2018'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic)']]/text()").extract()[4]
item['eps2014rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) Growth']]/text()").extract()[0]
item['eps2015rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) Growth']]/text()").extract()[1]
item['eps2016rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) Growth']]/text()").extract()[2]
item['eps2017rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) Growth']]/text()").extract()[3]
item['eps2018rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) Growth']]/text()").extract()[4]
item['eps2014altrate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[0]
item['eps2015altrate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[1]
item['eps2016altrate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[2]
item['eps2017altrate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[3]
item['eps2018altrate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[4]
item['ebitda2014'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA']]/text()").extract()[0]
item['ebitda2015'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA']]/text()").extract()[1]
item['ebitda2016'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA']]/text()").extract()[2]
item['ebitda2017'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA']]/text()").extract()[3]
item['ebitda2018'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA']]/text()").extract()[4]
item['ebitda2014rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA Growth']]/text()").extract()[0]
item['ebitda2015rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA Growth']]/text()").extract()[1]
item['ebitda2016rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA Growth']]/text()").extract()[2]
item['ebitda2017rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA Growth']]/text()").extract()[3]
item['ebitda2018rate'] = response.xpath("//td[./preceding-sibling::td[normalize-space()='EBITDA Growth']]/text()").extract()[4]
yield item
All of the xpaths were checked with the start_url in the shell and seem to be working just fine.
2019-03-17 10:25:06 [scrapy.utils.log] INFO: Scrapy 1.6.0 started (bot:
statements)
2019-03-17 10:25:06 [scrapy.utils.log] INFO: Versions: lxml 4.3.1.0, libxml2
2.9.5, cssselect 1.0.3, parsel 1.5.1, w3lib 1.20.0, Twisted 18.9.0, Python
3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 23:09:28) [MSC v.1916 64 bit
(AMD64)], pyOpenSSL 19.0.0 (OpenSSL 1.1.1a 20 Nov 2018), cryptography 2.5,
Platform Windows-10-10.0.17763-SP0
2019-03-17 10:25:06 [scrapy.crawler] INFO: Overridden settings: {'BOT_NAME':
'statements', 'FEED_EXPORT_ENCODING': 'utf-8', 'FEED_FORMAT': 'csv',
'FEED_URI': 'sdasda.csv', 'NEWSPIDER_MODULE': 'statements.spiders',
'ROBOTSTXT_OBEY': True, 'SPIDER_MODULES': ['statements.spiders'],
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36
(KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'}
2019-03-17 10:25:06 [scrapy.extensions.telnet] INFO: Telnet Password:
3580241d541f00bb
2019-03-17 10:25:06 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.feedexport.FeedExporter',
'scrapy.extensions.logstats.LogStats']
2019-03-17 10:25:06 [scrapy.middleware] INFO: Enabled downloader
middlewares:
['scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware',
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'statements.middlewares.StatementsDownloaderMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2019-03-17 10:25:06 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2019-03-17 10:25:06 [scrapy.middleware] INFO: Enabled item pipelines:
['statements.pipelines.StatementsPipeline']
2019-03-17 10:25:06 [scrapy.core.engine] INFO: Spider opened
2019-03-17 10:25:06 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0
pages/min), scraped 0 items (at 0 items/min)
2019-03-17 10:25:06 [incomeannual] INFO: Spider opened: incomeannual
2019-03-17 10:25:06 [scrapy.extensions.telnet] INFO: Telnet console
listening on 127.0.0.1:6024
2019-03-17 10:25:07 [scrapy.core.engine] DEBUG: Crawled (200) <GET
https://www.marketwatch.com/robots.txt> (referer: None)
2019-03-17 10:25:07 [scrapy.core.engine] DEBUG: Crawled (200) <GET
https://www.marketwatch.com/investing/stock/A/financials> (referer: None)
2019-03-17 10:25:07 [scrapy.core.scraper] ERROR: Spider error processing
<GET https://www.marketwatch.com/investing/stock/A/financials> (referer:
None)
Traceback (most recent call last):
File "c:\users\jesse\appdata\local\programs\python\python37\lib\site-
packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "c:\users\jesse\appdata\local\programs\python\python37\lib\site-
packages\scrapy\spidermiddlewares\offsite.py", line 29, in
process_spider_output
for x in result:
File "c:\users\jesse\appdata\local\programs\python\python37\lib\site-
packages\scrapy\spidermiddlewares\referer.py", line 339, in <genexpr>
return (_set_referer(r) for r in result or ())
File "c:\users\jesse\appdata\local\programs\python\python37\lib\site-
packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "c:\users\jesse\appdata\local\programs\python\python37\lib\site-
packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "C:\Users\Jesse\Files\Financial\statements\statements\spiders\incomeannual.py",
line 64, in parse
item['eps2014altrate'] = response.xpath("//td[./preceding-
sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[0]
IndexError: list index out of range
2019-03-17 10:25:07 [scrapy.core.engine] INFO: Closing spider (finished)
2019-03-17 10:25:07 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 636,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 25693,
'downloader/response_count': 2,
'downloader/response_status_count/200': 2,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2019, 3, 17, 14, 25, 7, 786531),
'log_count/DEBUG': 2,
'log_count/ERROR': 1,
'log_count/INFO': 10,
'response_received_count': 2,
'robotstxt/request_count': 1,
'robotstxt/response_count': 1,
'robotstxt/response_status_count/200': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'spider_exceptions/IndexError': 1,
'start_time': datetime.datetime(2019, 3, 17, 14, 25, 6, 856319)}
2019-03-17 10:25:07 [scrapy.core.engine] INFO: Spider closed (finished)
This site requires a the USER_AGENT setting to be enabled to allow scraping. I've tried to work with specifying headers in the settings.py but this spider will actually be using over 5000 start_urls and I'm not sure how to use this setting with multiple urls. I've used this setup with multiple other projects and they work fine.
Any advice will be very much appreciated! Thanks!
The error in your log is because that specific XPath returns nothing (tested in scrapy shell):
>>> response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()
[]
>>> response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()[0]
Traceback (most recent call last):
File "<console>", line 1, in <module>
IndexError: list index out of range
You need to check the length of the selector result before getting an index, because it is not safe to assume that an index exists. There are various shorthand solutions here: Get value at list/array index or "None" if out of range in Python
Here is one example:
values = response.xpath("//td[./preceding-sibling::td[normalize-space()='EPS (Basic) - Growth']]/text()").extract()
item['eps2014altrate'] = value[0] if 0 < len(values) else None
item['eps2015altrate'] = value[1] if 1 < len(values) else None
item['eps2016altrate'] = value[2] if 2 < len(values) else None
item['eps2017altrate'] = value[3] if 3 < len(values) else None
item['eps2018altrate'] = value[4] if 4 < len(values) else None
You can make it a bit less verbose by writing a helper function, like this. Either way, you should use this pattern everywhere, not just for the failing XPath.
Try this approach while testing:
try:
item['ticker'] = response.xpath("//..//text()").extract()
except:
item['ticker'] = "-"
try:
item['sales2014'] = response.xpath("//../text()").extract()[0]
except:
item['sales2014'] = "-"
try:
item['sales2015'] = response.xpath("//../text()").extract()[1]
except:
item['sales2015'] = "-"
Later, use a helper function to optimize the code.
Related
I have written a code that passes through links within a web page to extract data and move to the next page. It is the about link from each author in quotes.toscrape.com.
import scrapy
class TestSpider(scrapy.Spider):
name = 'test'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com',]
def parse(self, response):
linkto = response.css('div.quote > span > a::attr(href)').extract()
for links in linkto:
links = response.urljoin(links)
yield scrapy.Request(url=links, callback = scrapy.parse_about)
nextp = response.css('li.next > a::attr(href)').extract()
if nextp:
nextp = response.urljoin(nextp)
yield scrapy.Request(url=nextp, callback=self.parse)
def parse_about(self, response):
yield {
'date_of_birth': response.css('span.author-born-date::text').extract(),
'author': response.css('h3.author-title::text').extract(),
}
I executed in the command prompt:
scrapy crawl test -o test.csv
but the results I got:
019-03-20 16:36:03 [scrapy.utils.log] INFO: Scrapy 1.5.1 started (bot: quotestoscrape)
2019-03-20 16:36:03 [scrapy.utils.log] INFO: Versions: lxml 4.1.1.0, libxml2 2.9.9, cssselect 1.0.3, parsel 1.5.1, w3lib 1.20.0, Twisted 17.5.0, Python 2.7.15 |Anaconda, Inc.| (default, Nov 13 2018, 17:33:26) [MSC v.1500 64 bit (AMD64)], pyOpenSSL 19.0.0 (OpenSSL 1.1.1 11 Sep 2018), cryptography 2.5, Platform Windows-10-10.0.17134
2019-03-20 16:36:03 [scrapy.crawler] INFO: Overridden settings: {'NEWSPIDER_MODULE': 'quotestoscrape.spiders', 'SPIDER_MODULES': ['quotestoscrape.spiders'], 'ROBOTSTXT_OBEY': True, 'BOT_NAME': 'quotestoscrape'}
2019-03-20 16:36:03 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.logstats.LogStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.corestats.CoreStats']
2019-03-20 16:36:03 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware',
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2019-03-20 16:36:03 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2019-03-20 16:36:03 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2019-03-20 16:36:03 [scrapy.core.engine] INFO: Spider opened
2019-03-20 16:36:03 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2019-03-20 16:36:03 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2019-03-20 16:36:03 [scrapy.core.engine] DEBUG: Crawled (404) <GET http://quotes.toscrape.com/robots.txt> (referer: None)
2019-03-20 16:36:03 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://quotes.toscrape.com> (referer: None)
2019-03-20 16:36:04 [scrapy.core.scraper] ERROR: Spider error processing <GET http://quotes.toscrape.com> (referer: None)
Traceback (most recent call last):
File "C:\Users\kenny\Anaconda3\lib\site-packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "C:\Users\kenny\Anaconda3\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 30, in process_spider_output
for x in result:
File "C:\Users\kenny\Anaconda3\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 339, in <genexpr>
return (_set_referer(r) for r in result or ())
File "C:\Users\kenny\Anaconda3\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "C:\Users\kenny\Anaconda3\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "C:\Users\kenny\quotestoscrape\quotestoscrape\spiders\QuoteTestSpider.py", line 13, in parse
yield scrapy.Request(url=links, callback = scrapy.parse_about)
AttributeError: 'module' object has no attribute 'parse_about'
2019-03-20 16:36:04 [scrapy.core.engine] INFO: Closing spider (finished)
2019-03-20 16:36:04 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 446,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 2701,
'downloader/response_count': 2,
'downloader/response_status_count/200': 1,
'downloader/response_status_count/404': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2019, 3, 20, 21, 36, 4, 41000),
'log_count/DEBUG': 3,
'log_count/ERROR': 1,
'log_count/INFO': 7,
'response_received_count': 2,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'spider_exceptions/AttributeError': 1,
'start_time': datetime.datetime(2019, 3, 20, 21, 36, 3, 468000)}
2019-03-20 16:36:04 [scrapy.core.engine] INFO: Spider closed (finished)
And my csv file I moved it to is empty:
enter image description here
Please let me know what I am doing wrong
According to your log method parse_about is not called because you are trying to call scrapy.parse_about instead of spider's self.parse_about:
....
for links in linkto:
links = response.urljoin(links)
yield scrapy.Request(url=links, callback = self.parse_about)
As your application doesn't scrape any data -> It creates empty csv file as result.
I'm trying to learn how to use scrappy's itemLoaders, can anybody told me what am I doing wrong???I would like to thank you in advance.
import scrapy
from items.items import ItemsItem
from scrapy.loader import ItemLoader
class ItemspiderSpider(scrapy.Spider):
name = 'itemspider'
allowed_domains = ['yellowpages.com']
start_urls = ['https://www.yellowpages.com/search?search_terms=handyman&geo_location_terms=Miami%2C+FL']
def parse(self, response):
#create the loader using the response
l = ItemLoader(item=ItemsItem(), response=response)
#create a for loop
for listing in response.css('div.search-results.organic div.srp-listing'):
l.add_css('Name', listing.css('a.business-name span::text').extract())
l.add_css('Details', response.urljoin(listing.css('a.business-name::attr(href)')))
l.add_css('WebSite', listing.css('a.track-visit-website::attr(href)').extract_first())
l.add_css('Phones', listing.css('div.phones::text').extract())
yield l.load_item()
When I run the code I keep getting this error:
root#debian:~/Desktop/items/items/spiders# scrapy runspider itemspider.py -o item.csv
/usr/local/lib/python3.5/dist-packages/scrapy/spiderloader.py:37: UserWarning: There are several spiders with the same name:
ItemspiderSpider named 'itemspider' (in items.spiders.itemspider)
ItemspiderSpider named 'itemspider' (in items.spiders.itemspiderLog)
This can cause unexpected behavior.
warnings.warn(msg, UserWarning)
2017-07-04 16:33:20 [scrapy.utils.log] INFO: Scrapy 1.4.0 started (bot: items)
2017-07-04 16:33:20 [scrapy.utils.log] INFO: Overridden settings: {'BOT_NAME': 'items', 'FEED_FORMAT': 'csv', 'SPIDER_LOADER_WARN_ONLY': True, 'SPIDER_MODULES': ['items.spiders'], 'FEED_URI': 'item.csv', 'ROBOTSTXT_OBEY': True, 'NEWSPIDER_MODULE': 'items.spiders'}
2017-07-04 16:33:20 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
'scrapy.extensions.memusage.MemoryUsage',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.feedexport.FeedExporter',
'scrapy.extensions.logstats.LogStats']
2017-07-04 16:33:20 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware',
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2017-07-04 16:33:20 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2017-07-04 16:33:20 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2017-07-04 16:33:20 [scrapy.core.engine] INFO: Spider opened
2017-07-04 16:33:20 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2017-07-04 16:33:20 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2017-07-04 16:33:21 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yellowpages.com/robots.txt> (referer: None)
2017-07-04 16:33:23 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yellowpages.com/search?search_terms=handyman&geo_location_terms=Miami%2C+FL> (referer: None)
2017-07-04 16:33:24 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.yellowpages.com/search?search_terms=handyman&geo_location_terms=Miami%2C+FL> (referer: None)
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/usr/local/lib/python3.5/dist-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/usr/local/lib/python3.5/dist-packages/scrapy/spidermiddlewares/referer.py", line 339, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/usr/local/lib/python3.5/dist-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/usr/local/lib/python3.5/dist-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/root/Desktop/items/items/spiders/itemspider.py", line 17, in parse
l.add_css('Details', response.urljoin(listing.css('a.business-name::attr(href)')))
File "/usr/local/lib/python3.5/dist-packages/scrapy/http/response/text.py", line 82, in urljoin
return urljoin(get_base_url(self), url)
File "/usr/lib/python3.5/urllib/parse.py", line 416, in urljoin
base, url, _coerce_result = _coerce_args(base, url)
File "/usr/lib/python3.5/urllib/parse.py", line 112, in _coerce_args
raise TypeError("Cannot mix str and non-str arguments")
TypeError: Cannot mix str and non-str arguments
2017-07-04 16:33:24 [scrapy.core.engine] INFO: Closing spider (finished)
2017-07-04 16:33:24 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 503,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 52924,
'downloader/response_count': 2,
'downloader/response_status_count/200': 2,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2017, 7, 4, 21, 33, 24, 121098),
'log_count/DEBUG': 3,
'log_count/ERROR': 1,
'log_count/INFO': 7,
'memusage/max': 49471488,
'memusage/startup': 49471488,
'response_received_count': 2,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'spider_exceptions/TypeError': 1,
'start_time': datetime.datetime(2017, 7, 4, 21, 33, 20, 705391)}
2017-07-04 16:33:24 [scrapy.core.engine] INFO: Spider closed (finished)
Not sure what is going this is actually the first time I tried to use the ItemLoaders
There are a few issues with your code:
response.urljoin() expects a single string as parameter, not a list. You are passing the result of listing.css(), which is a SelectorList. You can use response.urljoin(listing.css('a.business-name::attr(href)').extract_first())
you need to instantiate one item loader per loop iteration, otherwise, you're accumulating values for each field of a single yielded item
you are using .add_css() with some values (result of .extract...() calls. .add_css() needs a CSS selector string, not the result of a selector extraction. The CSS extraction will then be done by the item loader. Or, you can use .add_value() if you want to pass the "final" field value directly.
Here are 2 versions that should get you going:
import scrapy
from items.items import ItemsItem
from scrapy.loader import ItemLoader
class ItemspiderSpider(scrapy.Spider):
name = 'itemspider'
allowed_domains = ['yellowpages.com']
start_urls = ['https://www.yellowpages.com/search?search_terms=handyman&geo_location_terms=Miami%2C+FL']
def parse(self, response):
for listing in response.css('div.search-results.organic div.srp-listing'):
# create the loader using the SELECTOR, inside the loop
l = ItemLoader(item=ItemsItem())
# use .add_value() since we pass the extraction result directly
l.add_value('Name', listing.css('a.business-name span::text').extract())
# pass a single value to response.urljoin()
l.add_value('Details',
response.urljoin(
listing.css('a.business-name::attr(href)').extract_first()
))
l.add_value('WebSite', listing.css('a.track-visit-website::attr(href)').extract_first())
l.add_value('Phones', listing.css('div.phones::text').extract())
yield l.load_item()
Or, using .add_css():
import scrapy
from items.items import ItemsItem
from scrapy.loader import ItemLoader
class ItemspiderSpider(scrapy.Spider):
name = 'itemspider'
allowed_domains = ['yellowpages.com']
start_urls = ['https://www.yellowpages.com/search?search_terms=handyman&geo_location_terms=Miami%2C+FL']
def parse(self, response):
for listing in response.css('div.search-results.organic div.srp-listing'):
# pass the 'listing' selector to the item loader
# so that CSS selection is relative to it
l = ItemLoader(ItemsItem(), selector=listing)
l.add_css('Name', 'a.business-name span::text')
l.add_css('Details', 'a.business-name::attr(href)')
l.add_css('WebSite', 'a.track-visit-website::attr(href)')
l.add_css('Phones', 'div.phones::text')
yield l.load_item()
When trying to scrap a page passing headers and body i get the following error show below.
i tried converting to json, str and sending it but it doesn't give any results.
please let me know if anything needs to be changed..
Code
import scrapy
class TestingSpider(scrapy.Spider):
name = "test"
def start_requests(self):
request_headers = {
"Host": "host_here",
"User-Agent": "Mozilla/5.0 20100101 Firefox/46.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Cache-Control": "max-age=0"
}
url = "my_url_here"
payload = {
"searchargs.approvedFrom.input": "05/18/2017",
"searchargs.approvedTO.input": "05/18/2017"
"pagesize": -1
}
yield scrapy.Request(url, method="POST", callback=self.parse, headers=request_headers, body=payload)
def parse(self, response):
print("-------------------------------came here-------------------------------")
print(response.body)
Error 1
Traceback (most recent call last):
File "/home/suventure/home/python/lib/python3.5/site-packages/scrapy/core/engine.py", line 127, in _next_request
request = next(slot.start_requests)
File "/home/suventure/Desktop/suventure-projects/python-projects/scraper_txrrc/scraper_txrrc/spiders/wells_spider.py", line 114, in start_requests
yield scrapy.Request(url, method="POST", callback=self.parse, headers=request_headers, body=payload)
File "/home/suventure/home/python/lib/python3.5/site-packages/scrapy/http/request/__init__.py", line 26, in __init__
self._set_body(body)
File "/home/suventure/home/python/lib/python3.5/site-packages/scrapy/http/request/__init__.py", line 68, in _set_body
self._body = to_bytes(body, self.encoding)
File "/home/suventure/home/python/lib/python3.5/site-packages/scrapy/utils/python.py", line 117, in to_bytes
'object, got %s' % type(text).__name__)
TypeError: to_bytes must receive a unicode, str or bytes object, got dict
Error 2 without any response if dict is converted to string and sent in body
2017-05-19 22:39:38 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scraper_)
2017-05-19 22:39:38 [scrapy.utils.log] INFO: Overridden settings: {'BOT_NAME': 'scraper', 'NEWSPIDER_MODULE': 'scraper_.spiders', 'SPIDER_MODULES': ['scraper_.spiders'], 'ROBOTSTXT_OBEY': True}
2017-05-19 22:39:39 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.corestats.CoreStats',
'scrapy.extensions.logstats.LogStats']
2017-05-19 22:39:39 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware',
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2017-05-19 22:39:39 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2017-05-19 22:39:39 [scrapy.middleware] INFO: Enabled item pipelines:
['scrapy.pipelines.files.FilesPipeline']
2017-05-19 22:39:39 [scrapy.core.engine] INFO: Spider opened
2017-05-19 22:39:39 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2017-05-19 22:39:39 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2017-05-19 22:39:40 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://website_link_here/robots.txt> (referer: None)
2017-05-19 22:39:40 [scrapy.downloadermiddlewares.robotstxt] DEBUG: Forbidden by robots.txt: <POST website_link_here>
2017-05-19 22:39:40 [scrapy.core.engine] INFO: Closing spider (finished)
2017-05-19 22:39:40 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/exception_count': 1,
'downloader/exception_type_count/scrapy.exceptions.IgnoreRequest': 1,
'downloader/request_bytes': 232,
'downloader/request_count': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 258,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2017, 5, 19, 17, 9, 40, 581949),
'log_count/DEBUG': 3,
'log_count/INFO': 7,
'response_received_count': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'start_time': datetime.datetime(2017, 5, 19, 17, 9, 39, 332675)}
2017-05-19 22:39:40 [scrapy.core.engine] INFO: Spider closed (finished)
In settings.py change
ROBOTSTXT_OBEY = False
I'm working on retrieving information from the National Gallery of Art's online catalog. Due to the catalog's structure, I can't navigate by extracting and following links from entry to entry. Fortunately, each object in the collection has a predictable url. I want my spider to navigate the collection by generating start urls.
I have attempted to solve my problem by implementing the solution from this thread. Unfortunately, this seems to break another part of my spider. The error log reveals that my urls are being successfully generated, but they aren't being processed correctly. If I'm interpreting the log correctly—which I suspect I'm not—there is a conflict between the redefinition of the start_urls that allows me to generate the urls I need and the rules section of the spider. As things stand now, the spider also doesn't respect the number of pages that I ask it to crawl.
You'll find my spider and a typical error below. I appreciate any help you can offer.
Spider:
URL = "http://www.nga.gov/content/ngaweb/Collection/art-object-page.%d"
starting_number = 1312
number_of_pages = 10
class NGASpider(CrawlSpider):
name = 'ngamedallions'
allowed_domains = ['nga.gov']
start_urls = [URL % starting_number]
rules = (
Rule(LinkExtractor(allow=('art-object-page.*','objects/*')),callback='parse_CatalogRecord',
follow=True))
def __init__(self):
self.page_number = starting_number
def start_requests(self):
for i in range (self.page_number, number_of_pages, -1):
yield Request(url = URL % i + ".html" , callback=self.parse)
def parse_CatalogRecord(self, response):
CatalogRecord = ItemLoader(item=NgamedallionsItem(), response=response)
CatalogRecord.default_output_processor = TakeFirst()
CatalogRecord.image_urls_out = scrapy.loader.processors.Identity()
keywords = "medal|medallion"
r = re.compile('.*(%s).*' % keywords, re.IGNORECASE|re.MULTILINE|re.UNICODE)
if r.search(response.body_as_unicode()):
CatalogRecord.add_xpath('title', './/dl[#class="artwork-details"]/dt[#class="title"]/text()')
CatalogRecord.add_xpath('accession', './/dd[#class="accession"]/text()')
CatalogRecord.add_xpath('inscription', './/div[#id="inscription"]/p/text()')
CatalogRecord.add_xpath('image_urls', './/img[#class="mainImg"]/#src')
return CatalogRecord.load_item()
Typical Error:
2016-04-29 15:35:00 [scrapy] ERROR: Spider error processing <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1178.html> (referer: None)
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/usr/lib/pymodules/python2.7/scrapy/spidermiddlewares/offsite.py", line 28, in process_spider_output
for x in result:
File "/usr/lib/pymodules/python2.7/scrapy/spidermiddlewares/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/usr/lib/pymodules/python2.7/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/usr/lib/pymodules/python2.7/scrapy/spidermiddlewares/depth.py", line 54, in <genexpr>
return (r for r in result or () if _filter(r))
File "/usr/lib/pymodules/python2.7/scrapy/spiders/crawl.py", line 73, in _parse_response
for request_or_item in self._requests_to_follow(response):
File "/usr/lib/pymodules/python2.7/scrapy/spiders/crawl.py", line 51, in _requests_to_follow
for n, rule in enumerate(self._rules):
AttributeError: 'NGASpider' object has no attribute '_rules'
Update in Resonse to eLRuLL's Solution
Simply removing def __init__ and start_urls allows my spider to crawl my generated urls. However, it also seems to prevent 'def parse_CatalogRecord(self, response)' from being applied. When I run the spider now, it only scrapes pages from outside the range of generated urls. My revised spider and log output follow below.
Spider:
URL = "http://www.nga.gov/content/ngaweb/Collection/art-object-page.%d"
starting_number = 1312
number_of_pages = 1311
class NGASpider(CrawlSpider):
name = 'ngamedallions'
allowed_domains = ['nga.gov']
rules = (
Rule(LinkExtractor(allow=('art-object-page.*','objects/*')),callback='parse_CatalogRecord',
follow=True))
def start_requests(self):
self.page_number = starting_number
for i in range (self.page_number, number_of_pages, -1):
yield Request(url = URL % i + ".html" , callback=self.parse)
def parse_CatalogRecord(self, response):
CatalogRecord = ItemLoader(item=NgamedallionsItem(), response=response)
CatalogRecord.default_output_processor = TakeFirst()
CatalogRecord.image_urls_out = scrapy.loader.processors.Identity()
keywords = "medal|medallion"
r = re.compile('.*(%s).*' % keywords, re.IGNORECASE|re.MULTILINE|re.UNICODE)
if r.search(response.body_as_unicode()):
CatalogRecord.add_xpath('title', './/dl[#class="artwork-details"]/dt[#class="title"]/text()')
CatalogRecord.add_xpath('accession', './/dd[#class="accession"]/text()')
CatalogRecord.add_xpath('inscription', './/div[#id="inscription"]/p/text()')
CatalogRecord.add_xpath('image_urls', './/img[#class="mainImg"]/#src')
return CatalogRecord.load_item()
Log:
2016-05-02 15:50:02 [scrapy] INFO: Scrapy 1.0.5.post4+g4b324a8 started (bot: ngamedallions)
2016-05-02 15:50:02 [scrapy] INFO: Optional features available: ssl, http11
2016-05-02 15:50:02 [scrapy] INFO: Overridden settings: {'NEWSPIDER_MODULE': 'ngamedallions.spiders', 'FEED_URI': 'items.json', 'SPIDER_MODULES': ['ngamedallions.spiders'], 'BOT_NAME': 'ngamedallions', 'FEED_FORMAT': 'json', 'DOWNLOAD_DELAY': 3}
2016-05-02 15:50:02 [scrapy] INFO: Enabled extensions: CloseSpider, FeedExporter, TelnetConsole, LogStats, CoreStats, SpiderState
2016-05-02 15:50:02 [scrapy] INFO: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2016-05-02 15:50:02 [scrapy] INFO: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2016-05-02 15:50:02 [scrapy] INFO: Enabled item pipelines: ImagesPipeline
2016-05-02 15:50:02 [scrapy] INFO: Spider opened
2016-05-02 15:50:02 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2016-05-02 15:50:02 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-05-02 15:50:02 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html> (referer: None)
2016-05-02 15:50:02 [scrapy] DEBUG: Filtered duplicate request: <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html> - no more duplicates will be shown (see DUPEFILTER_DEBUG to show all duplicates)
2016-05-02 15:50:05 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1313.html> (referer: http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html)
2016-05-02 15:50:05 [scrapy] DEBUG: File (uptodate): Downloaded image from <GET http://media.nga.gov/public/objects/1/3/1/3/1313-primary-0-440x400.jpg> referred in <None>
2016-05-02 15:50:05 [scrapy] DEBUG: Scraped from <200 http://www.nga.gov/content/ngaweb/Collection/art-object-page.1313.html>
{'accession': u'1942.9.163.b',
'image_urls': [u'http://media.nga.gov/public/objects/1/3/1/3/1313-primary-0-440x400.jpg'],
'images': [{'checksum': '9d5f2e30230aeec1582ca087bcde6bfa',
'path': 'full/3a692347183d26ffefe9ba0af80b0b6bf247fae5.jpg',
'url': 'http://media.nga.gov/public/objects/1/3/1/3/1313-primary-0-440x400.jpg'}],
'inscription': u'around top circumference: TRINACRIA IANI; upper center: PELORVS ; across center: PA LI; across bottom: BELAVRA',
'title': u'House between Two Hills [reverse]'}
2016-05-02 15:50:05 [scrapy] INFO: Closing spider (finished)
2016-05-02 15:50:05 [scrapy] INFO: Stored json feed (1 items) in: items.json
2016-05-02 15:50:05 [scrapy] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 631,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 26324,
'downloader/response_count': 2,
'downloader/response_status_count/200': 2,
'dupefilter/filtered': 3,
'file_count': 1,
'file_status_count/uptodate': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2016, 5, 2, 19, 50, 5, 810570),
'item_scraped_count': 1,
'log_count/DEBUG': 6,
'log_count/INFO': 8,
'request_depth_max': 2,
'response_received_count': 2,
'scheduler/dequeued': 2,
'scheduler/dequeued/memory': 2,
'scheduler/enqueued': 2,
'scheduler/enqueued/memory': 2,
'start_time': datetime.datetime(2016, 5, 2, 19, 50, 2, 455508)}
2016-05-02 15:50:05 [scrapy] INFO: Spider closed (finished)
don't override the __init__ method if you are not going to call super.
Now, you don't really need to declare start_urls for your spider to work if you are going to use start_requests.
Just remove your def __init__ method and no need for start_urls to exist.
UPDATE
Ok my mistake, looks like CrawlSpider needs the start_urls attribute, so just create it instead of using the start_requests method:
start_urls = [URL % i + '.html' for i in range (starting_number, number_of_pages, -1)]
and remove start_requests
I'm having trouble using Scrapy's image pipeline to retrieve images. From the error reports, I think I am feeding Scrapy the right image_urls. However, instead of downloading images from them, Scrapy returns the error: ValueError: Missing scheme in request url: h.
This is my first time using the image pipeline feature, so I suspect I'm making a simple mistake. All the same, I'd appreciate help solving it.
Below you'll find my spider, settings, items, and error output. They're not quite MWEs, but I think they're pretty simple and easy to understand all the same.
Spider:
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ngamedallions.items import NgamedallionsItem
from scrapy.loader.processors import TakeFirst
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join
from scrapy.http import Request
import re
class NGASpider(CrawlSpider):
name = 'ngamedallions'
allowed_domains = ['nga.gov']
start_urls = [
'http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html'
]
rules = (
Rule(LinkExtractor(allow=('art-object-page.*','objects/*')),callback='parse_CatalogRecord',
follow=True
),)
def parse_CatalogRecord(self, response):
CatalogRecord = ItemLoader(item=NgamedallionsItem(), response=response)
CatalogRecord.default_output_processor = TakeFirst()
keywords = "medal|medallion"
r = re.compile('.*(%s).*' % keywords, re.IGNORECASE|re.MULTILINE|re.UNICODE)
if r.search(response.body_as_unicode()):
CatalogRecord.add_xpath('title', './/dl[#class="artwork-details"]/dt[#class="title"]/text()')
CatalogRecord.add_xpath('accession', './/dd[#class="accession"]/text()')
CatalogRecord.add_xpath('inscription', './/div[#id="inscription"]/p/text()')
CatalogRecord.add_xpath('image_urls', './/img[#class="mainImg"]/#src')
return CatalogRecord.load_item()
Settings:
BOT_NAME = 'ngamedallions'
SPIDER_MODULES = ['ngamedallions.spiders']
NEWSPIDER_MODULE = 'ngamedallions.spiders'
DOWNLOAD_DELAY=3
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1,
}
IMAGES_STORE = '/home/tricia/Documents/Programing/Scrapy/ngamedallions/medallionimages'
Items:
import scrapy
class NgamedallionsItem(scrapy.Item):
title = scrapy.Field()
accession = scrapy.Field()
inscription = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
pass
Error Log:
2016-04-24 19:00:40 [scrapy] INFO: Scrapy 1.0.5.post2+ga046ce8 started (bot: ngamedallions)
2016-04-24 19:00:40 [scrapy] INFO: Optional features available: ssl, http11
2016-04-24 19:00:40 [scrapy] INFO: Overridden settings: {'NEWSPIDER_MODULE': 'ngamedallions.spiders', 'FEED_URI': 'items.json', 'SPIDER_MODULES': ['ngamedallions.spiders'], 'BOT_NAME': 'ngamedallions', 'FEED_FORMAT': 'json', 'DOWNLOAD_DELAY': 3}
2016-04-24 19:00:40 [scrapy] INFO: Enabled extensions: CloseSpider, FeedExporter, TelnetConsole, LogStats, CoreStats, SpiderState
2016-04-24 19:00:40 [scrapy] INFO: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2016-04-24 19:00:40 [scrapy] INFO: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2016-04-24 19:00:40 [scrapy] INFO: Enabled item pipelines: ImagesPipeline
2016-04-24 19:00:40 [scrapy] INFO: Spider opened
2016-04-24 19:00:40 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2016-04-24 19:00:40 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-04-24 19:00:40 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html> (referer: None)
2016-04-24 19:00:44 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1.html> (referer: None)
2016-04-24 19:00:48 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html> (referer: http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html)
2016-04-24 19:00:48 [scrapy] ERROR: Error processing {'accession': u'1942.9.163.a',
'image_urls': u'http://media.nga.gov/public/objects/1/3/1/2/1312-primary-0-440x400.jpg',
'inscription': u'around circumference: IOHANNES FRANCISCVS GON MA; around bottom circumference: MANTVA',
'title': u'Gianfrancesco Gonzaga di Rodigo, 1445-1496, Lord of Bozzolo, Sabbioneta, and Viadana 1478 [obverse]'}
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 577, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/pipelines/media.py", line 44, in process_item
requests = arg_to_iter(self.get_media_requests(item, info))
File "/usr/lib/pymodules/python2.7/scrapy/pipelines/images.py", line 109, in get_media_requests
return [Request(x) for x in item.get(self.IMAGES_URLS_FIELD, [])]
File "/usr/lib/pymodules/python2.7/scrapy/http/request/__init__.py", line 24, in __init__
self._set_url(url)
File "/usr/lib/pymodules/python2.7/scrapy/http/request/__init__.py", line 55, in _set_url
self._set_url(url.encode(self.encoding))
File "/usr/lib/pymodules/python2.7/scrapy/http/request/__init__.py", line 59, in _set_url
raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: h
2016-04-24 19:00:48 [scrapy] DEBUG: Filtered duplicate request: <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html> - no more duplicates will be shown (see DUPEFILTER_DEBUG to show all duplicates)
2016-04-24 19:00:51 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1313.html> (referer: http://www.nga.gov/content/ngaweb/Collection/art-object-page.1312.html)
2016-04-24 19:00:52 [scrapy] ERROR: Error processing {'accession': u'1942.9.163.b',
'image_urls': u'http://media.nga.gov/public/objects/1/3/1/3/1313-primary-0-440x400.jpg',
'inscription': u'around top circumference: TRINACRIA IANI; upper center: PELORVS ; across center: PA LI; across bottom: BELAVRA',
'title': u'House between Two Hills [reverse]'}
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 577, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/pipelines/media.py", line 44, in process_item
requests = arg_to_iter(self.get_media_requests(item, info))
File "/usr/lib/pymodules/python2.7/scrapy/pipelines/images.py", line 109, in get_media_requests
return [Request(x) for x in item.get(self.IMAGES_URLS_FIELD, [])]
File "/usr/lib/pymodules/python2.7/scrapy/http/request/__init__.py", line 24, in __init__
self._set_url(url)
File "/usr/lib/pymodules/python2.7/scrapy/http/request/__init__.py", line 55, in _set_url
self._set_url(url.encode(self.encoding))
File "/usr/lib/pymodules/python2.7/scrapy/http/request/__init__.py", line 59, in _set_url
raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: h
2016-04-24 19:00:55 [scrapy] DEBUG: Crawled (200) <GET http://www.nga.gov/content/ngaweb/Collection/art-object-page.1.html> (referer: http://www.nga.gov/content/ngaweb/Collection/art-object-page.1.html)
2016-04-24 19:01:02 [scrapy] INFO: Closing spider (finished)
2016-04-24 19:01:02 [scrapy] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 1609,
'downloader/request_count': 5,
'downloader/request_method_count/GET': 5,
'downloader/response_bytes': 125593,
'downloader/response_count': 5,
'downloader/response_status_count/200': 5,
'dupefilter/filtered': 5,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2016, 4, 24, 23, 1, 2, 938181),
'log_count/DEBUG': 7,
'log_count/ERROR': 2,
'log_count/INFO': 7,
'request_depth_max': 2,
'response_received_count': 5,
'scheduler/dequeued': 5,
'scheduler/dequeued/memory': 5,
'scheduler/enqueued': 5,
'scheduler/enqueued/memory': 5,
'start_time': datetime.datetime(2016, 4, 24, 23, 0, 40, 851598)}
2016-04-24 19:01:02 [scrapy] INFO: Spider closed (finished)
The TakeFirst processor is making image_urls a string when it should be a list.
Add:
CatalogRecord.image_urls_out = lambda v: v
EDIT:
This could also be:
CatalogRecord.image_urls_out = scrapy.loader.processors.Identity()