I need to scrap a webpage generated by Javascript.The href is like this:
[4]
I wrote a WebkitDownloader in scrapy and it works.
class WebkitDownloader( object ):
def process_request( self, request, spider ):
if spider.name in settings.WEBKIT_DOWNLOADER:
if( type(request) is not FormRequest ):
print '===================Processing non-FormRequest=================='
webview = webkit.WebView()
webview.connect( 'load-finished', lambda v,f: gtk.main_quit() )
webview.load_uri( request.url )
gtk.main()
js = jswebkit.JSContext( webview.get_main_frame().get_global_context() )
renderedBody = str( js.EvaluateScript( 'document.documentElement.innerHTML' ) )
#print renderedBody
return HtmlResponse( request.url, body=renderedBody )
I still don't know how to send a request to WebkitDownloader in my spider parse module.
Can I use something like this: request.append('javascript:__doPostBack('pager','4')') and send the Javascript request to the WebkitDownloader? Or how to build the Javascript request?
you can use this code in your middleware
from scrapy.http import HtmlResponse
import gtk
import webkit
import jswebkit
class WebkitDownloader( object ):
def stop_gtk(self, v, f):
gtk.main_quit()
def _get_webview(self):
webview = webkit.WebView()
props = webview.get_settings()
props.set_property('enable-java-applet', False)
props.set_property('enable-plugins', False)
props.set_property('enable-page-cache', False)
return webview
def process_request( self, request, spider ):
if 'renderjs' in request.meta:
webview = self._get_webview()
webview.connect('load-finished', self.stop_gtk)
webview.load_uri(request.url)
gtk.main()
ctx = jswebkit.JSContext(webview.get_main_frame().get_global_context())
url = ctx.EvaluateScript('window.location.href')
html = ctx.EvaluateScript('document.documentElement.innerHTML')
return HtmlResponse(url, encoding='utf-8', body=html.encode('utf-8'))
Related
Now my spiders are sending data to my site in this way:
def parse_product(response, **cb_kwargs):
item = {}
item[url] = response.url
data = {
"source_id": 505,
"token": f"{API_TOKEN}",
"products": [item]
}
headers = {'Content-Type': 'application/json'}
url = 'http://some.site.com/api/'
requests.post(url=url, headers=headers, data=json.dumps(data))
is it possible to somehow implement this design through a pipeline or middleware, because it is inconvenient to prescribe for each spider?
p.s. the data (data) needs to be sent in the json format (json.dumps(data)), if I make the item = MyItemClass() class, an error occurs...
It can be done using a pipeline fairly easily. You can also use scrapy's Item class and item Field class as long as you cast them to a dict prior to calling json.dumps.
For Example:
class Pipeline:
def process_item(self, item, spider):
data = dict(item)
headers = {'Content-Type': 'application/json'}
url = 'http://some.site.com/api/'
requests.post(url=url, headers=headers, data=json.dumps(data))
return item
If you use this example it will call it on each and every item you yield from your spider. Just remember to activate it in your settings.py file.
I found another additional solution (on github), maybe someone will be interested...
pipeline.py
import json
import logging
import requests
from scrapy.utils.serialize import ScrapyJSONEncoder
from twisted.internet.defer import DeferredLock
from twisted.internet.threads import deferToThread
default_serialize = ScrapyJSONEncoder().encode
class HttpPostPipeline(object):
settings = None
items_buffer = []
DEFAULT_HTTP_POST_PIPELINE_BUFFERED = False
DEFAULT_HTTP_POST_PIPELINE_BUFFER_SIZE = 100
def __init__(self, url, headers=None, serialize_func=default_serialize):
"""Initialize pipeline.
Parameters
----------
url : StrictRedis
Redis client instance.
serialize_func : callable
Items serializer function.
"""
self.url = url
self.headers = headers if headers else {}
self.serialize_func = serialize_func
self._lock = DeferredLock()
#classmethod
def from_crawler(cls, crawler):
params = {
'url': crawler.settings.get('HTTP_POST_PIPELINE_URL'),
}
if crawler.settings.get('HTTP_POST_PIPELINE_HEADERS'):
params['headers'] = crawler.settings['HTTP_POST_PIPELINE_HEADERS']
ext = cls(**params)
ext.settings = crawler.settings
return ext
def process_item(self, item, spider):
if self.settings.get('HTTP_POST_PIPELINE_BUFFERED', self.DEFAULT_HTTP_POST_PIPELINE_BUFFERED):
self._lock.run(self._process_items, item)
return item
else:
return deferToThread(self._process_item, item, spider)
def _process_item(self, item, spider):
data = self.serialize_func(item)
requests.post(self.url, json=json.loads(data), headers=self.headers)
return item
def _process_items(self, item):
self.items_buffer.append(item)
if len(self.items_buffer) >= int(self.settings.get('HTTP_POST_PIPELINE_BUFFER_SIZE',
self.DEFAULT_HTTP_POST_PIPELINE_BUFFER_SIZE)):
deferToThread(self.send_items, self.items_buffer)
self.items_buffer = []
def send_items(self, items):
logging.debug("Sending batch of {} items".format(len(items)))
serialized_items = [self.serialize_func(item) for item in items]
requests.post(self.url, json=[json.loads(data) for data in serialized_items], headers=self.headers)
def close_spider(self, spider):
if len(self.items_buffer) > 0:
deferToThread(self.send_items, self.items_buffer)
I am trying to get within several urls of a webpage and follow the response to the next parser to grab another set of urls on a page. However, from this page I need to grab the next page urls but I wanted to try this by manipulating the page string by parsing it and then passing this as the next page. However, the scraper crawls but it returns nothing not even the output on the final parser when I load item.
Note: I know that I can grab the next page rather simply with an if-statement on the href. However, I wanted to try something different in case I had to face a situation where I would have to do this.
Here's my scraper:
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.item import Field
from itemloaders.processors import TakeFirst
from scrapy.loader import ItemLoader
class ZooplasItem(scrapy.Item):
stuff = Field()
class ZooplasSpider(scrapy.Spider):
name = 'zooplas'
start_urls = ['https://www.zoopla.co.uk/overseas/']
def start_request(self):
for url in self.start_urls:
yield scrapy.Request(
url,
callback = self.parse, )
def parse(self, response):
container = response.xpath("//ul[#class='list-inline list-unstyled']//li")
for links in container:
urls = links.xpath(".//a/#href").get()
yield response.follow(
urls, callback = self.parse_places
)
def parse_places(self, response):
container = response.xpath("//ul[#class='listing-results clearfix js-gtm-list']//li")
for links in container:
urls = links.xpath('(//div[#class="listing-results-right clearfix"]//a)[position() mod 3=1]//#href').get()
yield response.follow(
urls, callback = self.parse_listings
)
if response.xpath("//div[#id='content']//div//h1//text()").extract_first():
page_on = response.xpath("//div[#id='content']//div//h1//text()").extract_first()
name_of_page = page_on.split()[-1]
else:
pass
if response.xpath("(//div[#class='paginate bg-muted'])//a[last()-1]//href").extract_first():
url_link = response.xpath("(//div[#class='paginate bg-muted'])//a[last()-1]//href").extract_first()
url_link = url_link.split('/')
last_page = url_link[-1].split('=')[-1]
else:
pass
all_pages = []
for index, n in enumerate(url_link):
for page_name, page_num in zip(name_of_page, last_page):
if index == 5:
url_link[index] = page_name
testit='/'.join(url_link)
equal_split = testit.split('=')
for another_i, n2 in enumerate(equal_split):
if another_i == 3:
for range_val in range(1, page_num+1):
equal_split[another_i] = str(2)
all_pages.append('='.join(equal_split))
for urls in all_pages:
yield response.follow(
urls, callback = self.parse.places
)
def parse_listings(self, response):
loader = ItemLoader(ZooplasItem(), response=response)
loader.default.output_processor = TakeFirst()
loader.add_xpath("//article[#class='dp-sidebar-wrapper__summary']//h1//text()")
yield loader.load_item()
process = CrawlerProcess(
settings = {
'FEED_URI':'zoopla.jl',
'FEED_FORMAT':'jsonlines'
}
)
process.crawl(ZooplasSpider)
process.start()
I know the way of grabbing the urls works as I have tried it on a single url using the following:
url = "https://www.zoopla.co.uk/overseas/property/ireland/?new_homes=include&include_sold=false&pn=16"
list_of_stuff = ['Ireland', 'Germany','France']
pages_of_stuff = [5, 7, 6]
test = []
all_pages = []
j=0
for index, n in enumerate(a):
for l_stuff, p_stuff in zip(list_of_stuff,pages_of_stuff):
if index == 5:
a[index] = l_stuff
testit='/'.join(a)
equal_split = testit.split('=')
for another_i, n2 in enumerate(equal_split):
if another_i == 3:
for range_val in range(1, p_stuff+1):
equal_split[another_i] = str(range_val)
print('='.join(equal_split))
Which is the same as the one used above just a change of variables. this outputs the following links and they work:
https://www.zoopla.co.uk/overseas/property/Ireland/?new_homes=include&include_sold=false&pn=1
https://www.zoopla.co.uk/overseas/property/Ireland/?new_homes=include&include_sold=false&pn=2
https://www.zoopla.co.uk/overseas/property/Ireland/?new_homes=include&include_sold=false&pn=3
https://www.zoopla.co.uk/overseas/property/Ireland/?new_homes=include&include_sold=false&pn=4
https://www.zoopla.co.uk/overseas/property/Ireland/?new_homes=include&include_sold=false&pn=5
https://www.zoopla.co.uk/overseas/property/Germany/?new_homes=include&include_sold=false&pn=1
https://www.zoopla.co.uk/overseas/property/Germany/?new_homes=include&include_sold=false&pn=2
...
Your use case is suited for using scrapy crawl spider. You can write rules on how to extract links to the properties and how to extract links to the next pages. I have changed your code to use a crawl spider class and I have changed your FEEDS settings to use the recommended settings. FEED_URI and FEED_FORMAT are deprecated in newer versions of scrapy.
Read more about the crawl spider from the docs
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule
from scrapy.item import Field
from itemloaders.processors import TakeFirst
from scrapy.loader import ItemLoader
class ZooplasItem(scrapy.Item):
stuff = Field()
country = Field()
class ZooplasSpider(CrawlSpider):
name = 'zooplas'
allowed_domains = ['zoopla.co.uk']
start_urls = ['https://www.zoopla.co.uk/overseas/']
rules = (
Rule(LinkExtractor(restrict_css='a.link-novisit'), follow=True), # follow the countries links
Rule(LinkExtractor(restrict_css='div.paginate'), follow=True), # follow pagination links
Rule(LinkExtractor(restrict_xpaths="//a[contains(#class,'listing-result')]"), callback='parse_item', follow=True), # follow the link to actual property listing
)
def parse_item(self, response):
# here you are on the details page for each property
loader = ItemLoader(ZooplasItem(), response=response)
loader.default_output_processor = TakeFirst()
loader.add_xpath("stuff", "//article[#class='dp-sidebar-wrapper__summary']//h1//text()")
loader.add_xpath("country","//li[#class='ui-breadcrumbs__item'][3]/a/text()")
yield loader.load_item()
if __name__ == '__main__':
process = CrawlerProcess(
settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36',
'FEEDS': {
'zoopla.jl': {
'format': 'jsonlines'
}
}
}
)
process.crawl(ZooplasSpider)
process.start()
Can anyone help me? I'm practicing and I can't understand what I did wrong on pagination! It only returns the first page to me and sometimes an error comes up. When it works, it just returns the first page.
"The source list for the Content Security Policy directive 'frame-src' contains an invalid source '*trackcmp.net' It will be ignored", source: https://naturaldaterra.com.br/hortifruti.html?page=2"
import scrapy
from scrapy_selenium import SeleniumRequest
class ComputerdealsSpider(scrapy.Spider):
name = 'produtos'
def start_requests(self):
yield SeleniumRequest(
url='https://naturaldaterra.com.br/hortifruti.html?page=1',
wait_time=3,
callback=self.parse
)
def parse(self, response):
for produto in response.xpath("//div[#class='gallery-items-1IC']/div"):
yield {
'nome_produto': produto.xpath(".//div[#class='item-nameContainer-1kz']/span/text()").get(),
'valor_produto': produto.xpath(".//span[#class='itemPrice-price-1R-']/text()").getall(),
}
next_page = response.xpath("//button[#class='tile-root-1uO'][1]/text()").get()
if next_page:
absolute_url = f"https://naturaldaterra.com.br/hortifruti.html?page={next_page}"
yield SeleniumRequest(
url=absolute_url,
wait_time=3,
callback=self.parse
)
The problem is that your xpath selector returns None instead of the next page number. Consider changing it from
next_page = response.xpath("//button[#class='tile-root-1uO'][1]/text()").get()
to
next_page = response.xpath("//button[#class='tile-root_active-TUl tile-root-1uO']/following-sibling::button[1]/text()").get()
For your future projects consider using scrapy-playwright to scrape js rendered websites. It is faster and simple to use. See a sample implementation of your scraper using scrapy-playwright
import scrapy
from scrapy.crawler import CrawlerProcess
class ComputerdealsSpider(scrapy.Spider):
name = 'produtos'
def start_requests(self):
yield scrapy.Request(
url='https://naturaldaterra.com.br/hortifruti.html?page=1',
meta={"playwright": True}
)
def parse(self, response):
for produto in response.xpath("//div[#class='gallery-items-1IC']/div"):
yield {
'nome_produto': produto.xpath(".//div[#class='item-nameContainer-1kz']/span/text()").get(),
'valor_produto': produto.xpath(".//span[#class='itemPrice-price-1R-']/text()").getall(),
}
# scrape next page
next_page = response.xpath(
"//button[#class='tile-root_active-TUl tile-root-1uO']/following-sibling::button[1]/text()").get()
yield scrapy.Request(
url='https://naturaldaterra.com.br/hortifruti.html?page=' + next_page,
meta={"playwright": True}
)
if __name__ == "__main__":
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"DOWNLOAD_HANDLERS": {
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
}, })
process.crawl(ComputerdealsSpider)
process.start()
I use scrapy and selenium for crawl! my site use ajax for pagination! actully , url no change and so response.body also no change! I want to click with selenium (for pagination) and get self.driver.page_source and use it instead response.body!
So i writed this code :
res = scrapy.http.TextResponse(url=self.driver.current_url, body=self.driver.page_source,
encoding='utf-8')
print(str(res)) //nothing to print!
for quote in res.css("#ctl00_ContentPlaceHolder1_Grd_Dr_DXMainTable > tr.dxgvDataRow_Office2003Blue"):
i = i+1
item = dict()
item['id'] = int(quote.css("td.dxgv:nth-child(1)::text").extract_first())
And no error !
You can replace body of original response in scrapy by using response.replace() method:
def parse(self, response):
response = response.replace(body=driver.page_source)
I have ran across an issue in which my Lua script refuses to execute. The returned response from the ScrapyRequest call seems to be an HTML body, while i'm expecting a document title. I am assuming that the Lua script is never being called as it seems to have no apparent effect on the response. I have dug a lot through the documentation and can't quite seem to figure out what is missing here. Does anyone have any suggestions?
from urlparse import urljoin
import scrapy
from scrapy_splash import SplashRequest
GOOGLE_BASE_URL = 'https://www.google.com/'
GOOGLE_QUERY_PARAMETERS = '#q={query}'
GOOGLE_SEARCH_URL = urljoin(GOOGLE_BASE_URL, GOOGLE_QUERY_PARAMETERS)
GOOGLE_SEARCH_QUERY = 'example search query'
LUA_SCRIPT = """
function main(splash)
assert(splash:go(splash.args.url))
return splash:evaljs("document.title")
end
"""
SCRAPY_CRAWLER_NAME = 'google_crawler'
SCRAPY_SPLASH_ENDPOINT = 'render.html'
SCRAPY_ARGS = {
'lua_source': LUA_SCRIPT
}
def get_search_url(query):
return GOOGLE_SEARCH_URL.format(query=query)
class GoogleCrawler(scrapy.Spider):
name=SCRAPY_CRAWLER_NAME
search_url = get_search_url(GOOGLE_SEARCH_QUERY)
def start_requests(self):
response = SplashRequest(self.search_url,
self.parse, endpoint=SPLASH_ENDPOINT, args=SCRAPY_ARGS)
yield response
def parse(self, response):
doc_title = response.body_as_unicode()
print doc_title
'endpoint' argument of SplashRequest must be 'execute' in order to execute a Lua script; it is 'render.html' in the example.
LUA_SCRIPT = """
function main(splash)
assert(splash:go(splash.args.url))
return title = splash:evaljs("document.title")
end
"""
def start_requests(self):
SplashRequest(self.search_url,self.parse, endpoint='execute',args=SCRAPY_ARGS)
You can recover the value with response.data['title']