I am using scrapy to scrap a hebrew website. However even after encoding scrapped data into UTF-8, I am not able to get the hewbrew character.
Getting weird string(× ×¨×¡×™ בעמ) in CSV. However If I check print same item, I am able to see the correct string on terminal.
Following is the website I am using.
http://www.moch.gov.il/rasham_hakablanim/Pages/pinkas_hakablanim.aspx
class Spider(BaseSpider):
name = "moch"
allowed_domains = ["www.moch.gov.il"]
start_urls = ["http://www.moch.gov.il/rasham_hakablanim/Pages/pinkas_hakablanim.aspx"]
def parse(self, response):
data = {'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$cboAnaf': unicode(140),
'SearchFreeText:': u'חפש',
'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$txtShemKablan': u'',
'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$txtMisparYeshut': u'',
'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$txtShemYeshuv': u'הקלד יישוב',
'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$txtMisparKablan': u'',
'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$btnSearch': u'חפש',
'ctl00$ScriptManager1': u'ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$UpdatePanel1|ctl00$ctl13$g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d$ctl00$btnSearch'}
yield FormRequest.from_response(response,
formdata=data,
callback = self.fetch_details,
dont_click = True)
def fetch_details(self, response):
# print response.body
hxs = HtmlXPathSelector(response)
item = MochItem()
names = hxs.select("//table[#id='ctl00_ctl13_g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d_ctl00_gridRashamDetails']//tr/td[2]/font/text()").extract()
phones = hxs.select("//table[#id='ctl00_ctl13_g_dbcc924d_5066_4fee_bc5c_6671d3e2c06d_ctl00_gridRashamDetails']//tr/td[6]/font/text()").extract()
index = 0
for name in names:
item['name'] = name.encode('utf-8')
item['phone'] = phones[index].encode('utf-8')
index += 1
print item # This is printed correctly on termial.
yield item # If I create a CSV output file. Then I am not able to see proper Hebrew String
The weird thing is, If i open the same csv in notepad++. I am able to see the correct output. So as a workaroud. What i did is, I opened the csv in notepad++ and change the encoding to UTF-8. And saved it. Now when i again open the csv in excel it shows me the correct hebrew string.
Is there anyway to specify the CSV encoding, from within scrapy ?
Related
I'm trying to extract some fields from start_url, and want to add the PDF link fields that are obtained from each URL that has been obtained. I tried Scrapy but no lucky to add PDF fields. Here is my code,
import scrapy
class MybookSpider(scrapy.Spider):
name = 'mybooks'
allowed_domains = ['gln.kemdikbud.go.id']
start_urls = ['https://gln.kemdikbud.go.id/glnsite/category/modul-gls/page/1/']
def parse(self, response):
#pass
# gathering all links
book_urls = response.xpath("//div[#class='td-module-thumb']//a/#href").getall()
total_url = len(book_urls)
i = 0
for a in range(total_url):
title = response.xpath("//h3[#class='entry-title td-module-title']//a/text()")[i].extract()
url_source = response.xpath("//div[#class='td-module-thumb']//a/#href")[i].get()
thumbnail = response.xpath('//*[#class="td-block-span4"]//*[has-class("entry-thumb")]//#src')[i].extract()
pdf = scrapy.Request(book_urls[i], self.find_details)
yield {
'Book Title': title,
'URL': url_source,
'Mini IMG': thumbnail,
'PDF Link': pdf
}
i+=1
def find_details(self, response):
# find PDF link
pdf = response.xpath("//div[#class='td-post-content']//a/#href").get()
return pdf
How do I add a PDF link field correctly when I export it as CSV? Thanks in advance
pdf = scrapy.Request(book_urls[i], self.find_details)
It means pdf variable is a request.
Scrapy is asynchronous so you'll have trouble to get a return value from a function. Just make a request and pass the details to the callback with cb_kwargs.
import scrapy
class MybookSpider(scrapy.Spider):
name = 'mybooks'
allowed_domains = ['gln.kemdikbud.go.id']
start_urls = ['https://gln.kemdikbud.go.id/glnsite/category/modul-gls/page/1/']
def parse(self, response):
# gathering all links
book_urls = response.xpath("//div[#class='td-module-thumb']//a/#href").getall()
total_url = len(book_urls)
for i in range(total_url):
item = dict()
item['title'] = response.xpath("//h3[#class='entry-title td-module-title']//a/text()")[i].extract()
item['url_source'] = response.xpath("//div[#class='td-module-thumb']//a/#href")[i].get()
item['thumbnail'] = response.xpath('//*[#class="td-block-span4"]//*[has-class("entry-thumb")]//#src')[i].extract()
yield scrapy.Request(url=book_urls[i], callback=self.find_details, cb_kwargs={'item': item})
def find_details(self, response, item):
# find PDF link
item['pdf'] = response.xpath("//div[#class='td-post-content']//a/#href").get()
yield item
I am trying to scrape all the hyperlinks of the website for all the news articles to extract the content. I am able to retrieve all the data from all the articles which are loaded when you open the website but when you scroll down on the site more articles are automatically loaded due to an event. Currently I am using Scrapy_Splash but I receive the same amount of links when I do not use splash. I hope you can help me out. The spider is called class FinanceNewsScraperSpider(scrapy.Spider):. Below you can see my code:
name = "audinewsarticles"
def start_requests(self):
start_urls = ['https://www.reuters.com/companies/NSUG.DE/news',
]
urls = start_urls
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_newspage)
def parse_newspage(self, response):
links = response.xpath('//a[contains(#href,"/article/")]/#href').extract() #extract hyperlink
for url in links:
yield SplashRequest(url=url,
callback=self.parse_article,
endpoint='render.html')
def parse_article(self, response):
item = AudiItem()
item['article_link'] = response.url
item['article_headline'] = response.xpath('//*[contains(#class,"ArticleHeader_headline")]/text()').extract()
item['article_date'] = response.xpath('//*[contains(#class,"ArticleHeader_date")]/text()').extract()
item['article_text'] = response.xpath('//div[#class="StandardArticleBody_body"]//p/text()').extract()
print(item)
#saving data to file.
path = 'news/'
file = 'audinews_' + str(datetime.now().strftime("%Y%m%d-%H%M")) + '.csv'
file_name = open(path + file, 'a')
fieldnames = ['article_link', 'article_headline','article_date','article_text'] #adding header to file
writer = csv.writer(file_name, lineterminator='\n')
writer.writerow([item[key] for key in item.keys()])
Please let me know if you need more information from me.
In below code I am trying to collect email ids from a website. It can be on contact or about us page.
From parse method I follow extemail method for all those pages.
From every page I collected few email ids.
Now I need to print them with original record sent to init method.
For example:
record = "https://www.wockenfusscandies.com/"
I want to print output as,
https://www.wockenfusscandies.com/|abc#gamil.com|def#outlook.com
I am not able to store them in self.emails and deliver back to init method.
Please help.
import scrapy
from scrapy.crawler import CrawlerProcess
class EmailSpider(scrapy.Spider):
def __init__(self, record):
self.record = record
self.emails = []
url = record.split("|")[4]
if not url.startswith("http"):
url = "http://{}".format(url)
if url:
self.start_urls = ["https://www.wockenfusscandies.com/"]
else:
self.start_urls = []
def parse(self, response):
contact_list = [a.attrib['href'] for a in response.css('a') if 'contact' in a.attrib['href'] or 'about' in a.attrib['href']]
contact_list.append(response.request.url)
for fllink in contact_list:
yield response.follow(fllink, self.extemail)
def extemail(self, response):
emails = response.css('body').re('[a-zA-Z0-9_.+-]+#[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
yield {
'emails': emails
}
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
f = open("/Users/kalpesh/work/data/test.csv")
for rec in f:
process.crawl(EmailSpider, record=rec)
f.close()
process.start()
If I understand your intend correctly you could try the following proceeding:
a) collect the mail-ids in self.emails like
def extemail(self, response):
emails = response.css('body').re('[a-zA-Z0-9_.+-]+#[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
self.emails = emails.copy()
yield {
'emails': emails
}
(Or on what other way you get the email-ids from emails)
b) add a close(self, reason) method as in GitHub-Example which is called when the spider has finished
def close(self, reason):
mails_for_record = ""
for mail in self.emails:
mails_for_record += mail + "|"
print(self.record + mails_for_record)
Please also note, I read somewhere that for some versions of Scrapy it is def close(self, reason), for others it is def closed(self, reason).
Hope, this proceeding helps you.
You should visit all the site pages before yielding result for this one site.
This means that you should have queue of pages to visit and results storage.
It can be done using meta.
Some pseudocode:
def parse(self, response):
meta = response.meta
if not meta.get('seen'):
# -- finding urls of contact and about us pages --
# -- putting it to meta['queue'] --
# -- setting meta['seen'] = True
page_emails_found = ...getting emails here...
# --- extending already discovered emails
# --- from other pages/initial empty list with new ones
meta['emails'].extend(page_emails_found)
# if queue isn't empty - yielding new request
if meta['queue']:
next_url = meta['queue'].pop()
yield Request(next_url, callback=self.parse, meta=copy(meta))
# if queue is empty - yielding result from meta
else:
yield {'url': current_domain, 'emails': meta['emails']}
Something like this..
I have written this short spider code to extract titles from hacker news front page(http://news.ycombinator.com/).
import scrapy
class HackerItem(scrapy.Item): #declaring the item
hackertitle = scrapy.Field()
class HackerSpider(scrapy.Spider):
name = 'hackernewscrawler'
allowed_domains = ['news.ycombinator.com'] # website we chose
start_urls = ['http://news.ycombinator.com/']
def parse(self,response):
sel = scrapy.Selector(response) #selector to help us extract the titles
item=HackerItem() #the item declared up
# xpath of the titles
item['hackertitle'] =
sel.xpath("//tr[#class='athing']/td[3]/a[#href]/text()").extract()
# printing titles using print statement.
print (item['hackertitle']
However when i run the code scrapy scrawl hackernewscrawler -o hntitles.json -t json
i get an empty .json file that does not have any content in it.
You should change print statement to yield:
import scrapy
class HackerItem(scrapy.Item): #declaring the item
hackertitle = scrapy.Field()
class HackerSpider(scrapy.Spider):
name = 'hackernewscrawler'
allowed_domains = ['news.ycombinator.com'] # website we chose
start_urls = ['http://news.ycombinator.com/']
def parse(self,response):
sel = scrapy.Selector(response) #selector to help us extract the titles
item=HackerItem() #the item declared up
# xpath of the titles
item['hackertitle'] = sel.xpath("//tr[#class='athing']/td[3]/a[#href]/text()").extract()
# return items
yield item
Then run:
scrapy crawl hackernewscrawler -o hntitles.json -t json
I use scrapy and selenium for crawl! my site use ajax for pagination! actully , url no change and so response.body also no change! I want to click with selenium (for pagination) and get self.driver.page_source and use it instead response.body!
So i writed this code :
res = scrapy.http.TextResponse(url=self.driver.current_url, body=self.driver.page_source,
encoding='utf-8')
print(str(res)) //nothing to print!
for quote in res.css("#ctl00_ContentPlaceHolder1_Grd_Dr_DXMainTable > tr.dxgvDataRow_Office2003Blue"):
i = i+1
item = dict()
item['id'] = int(quote.css("td.dxgv:nth-child(1)::text").extract_first())
And no error !
You can replace body of original response in scrapy by using response.replace() method:
def parse(self, response):
response = response.replace(body=driver.page_source)