This is my code:
from scrapy.spider import Spider
from scrapy.selector import Selector
from thuctapsinhvietnam.items import ThuctapsinhvietnamItem
class ThuctapsinhvietnamSpider(Spider):
name = "thuctapsinhvietnam"
allowed_domains ["thuctapsinhvietnam.com"]
start_urls = [
"http://www.thuctapsinhvietnam.com/thuctap-phuong-vinh-hao-3970.html"
]
def parse(self, response):
def parse(self, response):
cases = Selector(response).xpath('//div[#id="areaThongTinCaNhan"]/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr')
for case in cases:
item = ThuctapsinhvietnamItem()
item['Name'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['Phone'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['Email'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['Dob'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['Sex'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['School'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['Introduction'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
item['Place'] = question.xpath(
'//td[3][#class="thongtin"]/text()')extract()[0]
yield item
I have problem when try to scrapy in ubuntu, I use xpath to get data.
I am trying scrapy information from this website but it has error here:
File "thuctapsinhvietnam_spider.py", line 15
cases = Selector(response).xpath('//div[#id="areaThongTinCaNhan"]/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr')
^
IndentationError: expected an indented block
You have two of these:
def parse(self, response):
def parse(self, response):
Related
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'https://www.tripadvisor.com/Restaurant_Review-g298006-d740275-Reviews-Deniz_Restaurant-Izmir_Izmir_Province_Turkish_Aegean_Coast.html',
]
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'author': response.xpath("//div[contains(#class, 'member_info')]//div/text()").extract(),
'rating': response.xpath("//span[contains(#class,'ui_bubble_rating')]/#alt").extract() ,
'comment_tag': response.xpath("//span[contains(#class, 'noQuotes')]/text()").extract(),
'comment': response.xpath('//div[#class="entry"]/p/text()').extract()
}
next_page = response.xpath("//div[contains(#class, 'unified')]/a[contains(#class, 'next')]/#href").extract_first()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
This is my spider code. My problem is when i used crawl with json. there are so many repeated data from website
… I have a scrapy Code that is running in shell, but when I try to export it to csv, it returns an empty file. It exports data when I do not go into a link and try to parse the description
import scrapy
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["dmozs.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
filename = response.url.split("/")[-2] + '.html'
with open(filename, 'wb') as f:
f.write(response.body)
import scrapy
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
filename = response.url.split("/")[-2] + '.html'
with open(filename, 'wb') as f:
f.write(response.body)
just you have miss type
allowed_domains = ["dmozs.org"]
allowed_domains = ["dmoz.org"]
please change code allowed_domains = ["dmoz.org"]
import scrapy
import csv
from series.items import SeriesItem
class EpisodeScraperSpider(scrapy.Spider):
name = "episode_scraper"
allowed_domains = ["imdb.com"]
start_urls = []
def __init__(self, id=None, series=None, *args, **kwargs):
super(EpisodeScraperSpider, self).__init__(*args, **kwargs)
if id is not None:
self.start_urls = ['http://www.imdb.com/title/{!s}/episodes?season={!s}'.format(id, series)]
else:
with open('series_episode.csv') as f:
f_csv = csv.DictReader(f)
for row in f_csv:
self.start_urls.append('http://www.imdb.com/title/{!s}/episodes?season={!s}'.format(row["id"], row["series"]))
def parse(self, response):
episodes = response.xpath('//div[contains(#class, "list_item")]')
title = response.xpath('//h3/a/text()').extract()[0]
for episode in episodes:
global title
item = SeriesItem()
item['series_episode'] = episode.xpath('div/a/div[contains(#data-const,"tt")]/div/text()').extract()
item['title'] = '{!s}: {!s}'.format(title, episode.xpath('div[#class="info"]/strong/a/text()').extract())
item['imdb_id'] = episode.xpath('div[#class="image"]/a/div/#data-const').extract()
item['airdate'] = [x.strip() for x in episode.xpath('div/div[#class="airdate"]/text()').extract()]
yield item
When I try this script in scrapyd I got no result. It does have result in scrapy. I think the problem is in this line.
with open('series_episode.csv') as f:
I don't know where to put my csv file.
Please help me!!
Thanks
one option would be to save it in /tmp
with open('/tmp/series_episode.csv') as f:
Here is my spider:
from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from vrisko.items import VriskoItem
from scrapy.http import Request
class vriskoSpider(CrawlSpider):
name = 'vrisko'
allowed_domains = ['vrisko.gr']
start_urls = ['http://www.vrisko.gr/search/%CE%B3%CE%B9%CE%B1%CF%84%CF%81%CE%BF%CF%82/%CE%BA%CE%BF%CF%81%CE%B4%CE%B5%CE%BB%CE%B9%CE%BF']
rules = (Rule(SgmlLinkExtractor(allow=('\?page=\d')),'parse_start_url',follow=True),)
def parse_start_url(self, response):
hxs = HtmlXPathSelector(response)
subpages = hxs.select('//a[#class="detailsHyper_class"]/#href').extract()
ep = hxs.select('//a[#itemprop="name"]/text()').extract()
ad = hxs.select('//div[#class="results_address_class"]/text()').extract()
for eponimia,address,subpage in zip(ep,ad,subpages):
vriskoit = VriskoItem()
vriskoit['eponimia'] = eponimia
vriskoit['address'] = address
request = Request(subpage,callback = self.subPage)
request.meta['vriskoit'] = vriskoit
yield request
def subPage(self,response):
vriskoit = response.meta['vriskoit']
hxs = HtmlXPathSelector(response)
vriskoit['category'] = hxs.select('//div[#class="category_class"]/span/text()').extract()
yield vriskoit
and here is my pipiline:
import csv
class myExporter(object):
def __init__(self):
self.brandCategoryCsv = csv.writer(open('brandCategoryTable.csv', 'wb'))
self.brandCategoryCsv.writerow(['eponimia', 'address','category'])
def process_item(self, item, spider):
for e,a,c in zip(item['eponimia'],item['address'],item['category']):
self.brandCategoryCsv.writerow([e.encode('utf-8'), a.encode('utf-8'), c.encode('utf-8')])
return item
my problem is that for both the first 2 fields (eponimia,address), only the first character is written to the output csv file and i cant find why.
Any help would be much appreciated, i am out of ideas.
Remove zip function from myExporter.process_item
def process_item(self, item, spider):
self.brandCategoryCsv.writerow([item['eponimia'].encode('utf-8'),
item['address'].encode('utf-8'),
item['category'].encode('utf-8')])
return item
You already converted items list to individual items in vriskoSpider.parse_start_url.
zip iterates your strings:
In [1]: a = 'test1'
In [2]: b = 'test2'
In [3]: for x, y in zip(a, b):
...: print x, y
...:
t t
e e
s s
t t
1 2
here is my spider:
from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from vrisko.items import VriskoItem
class vriskoSpider(CrawlSpider):
name = 'vrisko'
allowed_domains = ['vrisko.gr']
start_urls = ['http://www.vrisko.gr/search/%CE%B3%CE%B9%CE%B1%CF%84%CF%81%CE%BF%CF%82/%CE%BA%CE%BF%CF%81%CE%B4%CE%B5%CE%BB%CE%B9%CE%BF']
rules = (
Rule(SgmlLinkExtractor(allow=('\?page=\d')), callback='parse_vrisko'),
)
def parse_vrisko(self, response):
hxs = HtmlXPathSelector(response)
vriskoit = VriskoItem()
vriskoit['eponimia'] = hxs.select("//a[#itemprop='name']/text()").extract()
vriskoit['address'] = hxs.select("//div[#class='results_address_class']/text()").extract()
print ' '.join(vriskoit['eponimia']).join(vriskoit['address'])
return vriskoit
The pages i try to crawl have the format http://www.blabla.com/blabla/bla?page=x
where x = any integer.
My problem is that my spider crawls all pages except the first one!
Any ideas why does this happen ?
Thank you in advance!
if you look into scrapy doc , start_urls response goes to **
parse
** method
so you can change your rule like this
rules = (
Rule(SgmlLinkExtractor(allow=('\?page=\d')), callback='parse'),
)
and method name from def parse_vrisko(self, response): to def parse(self, response):
or you can remove start_urls and start your spider with def start_requests(self): with callback to parse_vrisko