duplicated file is not showed in scrapy - scrapy

This code was working before
class CustomFilter(RFPDupeFilter):
def request_seen(self, request):
fp = request.url
if fp in self.fingerprints: #if this condition is true, we have visited this url before
logging.log(logging.INFO, "Ignoring url=%r" % request.url)
return True
else: #if this condition is true, we are about to scrap a details page url
self.fingerprints.add(fp)
logging.log(logging.INFO, "Scrpaing url=%r" % request.url)
if self.file:
self.file.write(fp + os.linesep)
else:
print("why is that happending?")
return False
I specify the duplicated class in my settings, and I keep getting "why is that happening?" that's because the file is not there.
I read the official code of the RFPDupeFilter, and there should already be a file called request.seen, but for some reasons, it's not being generated :(

Related

xhtml2pdf in django - issue link_callback

I am trying to generate a pdf from an html-template in django. Therefore i use pretty much the basic methods found in the web. The issue is that in can get it to generate the content of my template, but not the images from my media directory. I always get the following error:
SuspiciousFileOperation at /manager/createpdf/
The joined path is located outside of the base path component
Since i can get some result, i assume that nothing is wrong with my view. Here is my render_to_pdf
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode('UTF-8')), result, link_callback=link_callback)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
and the link_callback:
def link_callback(uri, rel):
result = finders.find(uri)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = list(os.path.realpath(path) for path in result)
path=result[0]
else:
sUrl = settings.STATIC_URL
sRoot = settings.STATIC_ROOT
mUrl = settings.MEDIA_URL
mRoot = settings.MEDIA_ROOT
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
# make sure that file exists
if not os.path.isfile(path):
raise Exception( 'media URI must start with %s or %s' % (sUrl, mUrl))
return path
I am pretty much sure that the link_callback doesnt do it's purpose. But my knowledge is to little to patch it. I also assume that i configured the media directory correctly. I can access the media files in other views/templates.
Help is very appreciated, since i spend quiet some hours on this issue... A big thx to all the are going to contribute here!
OK, i found it! In the finders.py i checked the find-method. It turns out that the find method only looks for files in the static directory and disregards the media directory. i just deleted all the lins and it works now. Here is the code:
def link_callback(uri, rel):
sUrl = settings.STATIC_URL
sRoot = settings.STATIC_ROOT
mUrl = settings.MEDIA_URL
mRoot = settings.MEDIA_ROOT
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
if not os.path.isfile(path):
raise Exception( 'media URI must start with %s or %s' % (sUrl, mUrl))
return path

Need to return Scrapy callback method data to calling function

In below code I am trying to collect email ids from a website. It can be on contact or about us page.
From parse method I follow extemail method for all those pages.
From every page I collected few email ids.
Now I need to print them with original record sent to init method.
For example:
record = "https://www.wockenfusscandies.com/"
I want to print output as,
https://www.wockenfusscandies.com/|abc#gamil.com|def#outlook.com
I am not able to store them in self.emails and deliver back to init method.
Please help.
import scrapy
from scrapy.crawler import CrawlerProcess
class EmailSpider(scrapy.Spider):
def __init__(self, record):
self.record = record
self.emails = []
url = record.split("|")[4]
if not url.startswith("http"):
url = "http://{}".format(url)
if url:
self.start_urls = ["https://www.wockenfusscandies.com/"]
else:
self.start_urls = []
def parse(self, response):
contact_list = [a.attrib['href'] for a in response.css('a') if 'contact' in a.attrib['href'] or 'about' in a.attrib['href']]
contact_list.append(response.request.url)
for fllink in contact_list:
yield response.follow(fllink, self.extemail)
def extemail(self, response):
emails = response.css('body').re('[a-zA-Z0-9_.+-]+#[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
yield {
'emails': emails
}
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
f = open("/Users/kalpesh/work/data/test.csv")
for rec in f:
process.crawl(EmailSpider, record=rec)
f.close()
process.start()
If I understand your intend correctly you could try the following proceeding:
a) collect the mail-ids in self.emails like
def extemail(self, response):
emails = response.css('body').re('[a-zA-Z0-9_.+-]+#[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
self.emails = emails.copy()
yield {
'emails': emails
}
(Or on what other way you get the email-ids from emails)
b) add a close(self, reason) method as in GitHub-Example which is called when the spider has finished
def close(self, reason):
mails_for_record = ""
for mail in self.emails:
mails_for_record += mail + "|"
print(self.record + mails_for_record)
Please also note, I read somewhere that for some versions of Scrapy it is def close(self, reason), for others it is def closed(self, reason).
Hope, this proceeding helps you.
You should visit all the site pages before yielding result for this one site.
This means that you should have queue of pages to visit and results storage.
It can be done using meta.
Some pseudocode:
def parse(self, response):
meta = response.meta
if not meta.get('seen'):
# -- finding urls of contact and about us pages --
# -- putting it to meta['queue'] --
# -- setting meta['seen'] = True
page_emails_found = ...getting emails here...
# --- extending already discovered emails
# --- from other pages/initial empty list with new ones
meta['emails'].extend(page_emails_found)
# if queue isn't empty - yielding new request
if meta['queue']:
next_url = meta['queue'].pop()
yield Request(next_url, callback=self.parse, meta=copy(meta))
# if queue is empty - yielding result from meta
else:
yield {'url': current_domain, 'emails': meta['emails']}
Something like this..

How to get website to consistently return content from a GET request when it's inconsistent?

I posted a similar question earlier but I think this is a more refined question.
I'm trying to scrape: https://www.prosportstransactions.com/football/Search/SearchResults.php?Player=&Team=&BeginDate=&EndDate=&PlayerMovementChkBx=yes&submit=Search&start=0
My code randomly throws errors when I send a GET request to the URL. After debugging, I saw the following happen. A GET request for the following url will be sent(Example URL, could happen on any page): https://www.prosportstransactions.com/football/Search/SearchResults.php?Player=&Team=&BeginDate=&EndDate=&PlayerMovementChkBx=yes&submit=Search&start=2400
The webpage will then say "There were no matching transactions found.". However, if I refresh the page, the content will then be loaded. I'm using BeautifulSoup and Selenium and have put sleep statements in my code in hopes that it'll work but to no avail. Is this a problem on the website's end? It doesn't make sense to me how one GET request will return nothing but the exact same request will return something. Also, is there anything I could to fix it or is it out of control?
Here is a sample of my code:
t
def scrapeWebsite(url, start, stop):
driver = webdriver.Chrome(executable_path='/Users/Downloads/chromedriver')
print(start, stop)
madeDict = {"Date": [], "Team": [], "Name": [], "Relinquished": [], "Notes": []}
#for i in range(0, 214025, 25):
for i in range(start, stop, 25):
print("Current Page: " + str(i))
currUrl = url + str(i)
#print(currUrl)
#r = requests.get(currUrl)
#soupPage = BeautifulSoup(r.content)
driver.get(currUrl)
#Sleep program for dynamic refreshing
time.sleep(1)
soupPage = BeautifulSoup(driver.page_source, 'html.parser')
#page = urllib2.urlopen(currUrl)
#time.sleep(2)
#soupPage = BeautifulSoup(page, 'html.parser')
info = soupPage.find("table", attrs={'class': 'datatable center'})
time.sleep(1)
extractedInfo = info.findAll("td")
The error occurs at the last line. "findAll" complains because it can't find findAll when the content is null(meaning the GET request returned nothing)
I did some workaround to scrape all the page using try except.
Probably the requests loop it is so fast and the page can't support it.
See the example below, worked like a charm:
import requests
from bs4 import BeautifulSoup
URL = 'https://www.prosportstransactions.com/football/Search/SearchResults.php?Player=&Team=&BeginDate=&EndDate=' \
'&PlayerMovementChkBx=yes&submit=Search&start=%s'
def scrape(start=0, stop=214525):
for page in range(start, stop, 25):
current_url = URL % page
print('scrape: current %s' % page)
while True:
try:
response = requests.request('GET', current_url)
if response.ok:
soup = BeautifulSoup(response.content.decode('utf-8'), features='html.parser')
table = soup.find("table", attrs={'class': 'datatable center'})
trs = table.find_all('tr')
slice_pos = 1 if page > 0 else 0
for tr in trs[slice_pos:]:
yield tr.find_all('td')
break
except Exception as exception:
print(exception)
for columns in scrape():
values = [column.text.strip() for column in columns]
# Continuous your code ...

apollo-upload-client and graphene-django

I have a question about using apollo-upload-client and graphene-django. Here I've discovered that apollo-upload-client adding operations to formData. But here graphene-django is only trying to get query parameter. And the question is, where and how it should be fixed?
If you're referring to the data that has a header like (when viewing the HTTP from Chrome tools):
Content-Disposition: form-data; name="operations"
and data like
{"operationName":"MyMutation","variables":{"myData"....}, "query":"mutation MyMutation"...},
the graphene-python library interprets this and assembles it into a query for you, inserting the variables and removing the file data from the query. If you are using Django, you can find all of the uploaded files in info.context.FILES when writing a mutation.
Here's my solution to support the latest apollo-upload-client (8.1). I recently had to revisit my Django code when I upgraded from apollo-upload-client 5.x to 8.x. Hope this helps.
Sorry I'm using an older graphene-django but hopefully you can update the mutation syntax to the latest.
Upload scalar type (passthrough, basically):
class Upload(Scalar):
'''A file upload'''
#staticmethod
def serialize(value):
raise Exception('File upload cannot be serialized')
#staticmethod
def parse_literal(node):
raise Exception('No such thing as a file upload literal')
#staticmethod
def parse_value(value):
return value
My upload mutation:
class UploadImage(relay.ClientIDMutation):
class Input:
image = graphene.Field(Upload, required=True)
success = graphene.Field(graphene.Boolean)
#classmethod
def mutate_and_get_payload(cls, input, context, info):
with NamedTemporaryFile(delete=False) as tmp:
for chunk in input['image'].chunks():
tmp.write(chunk)
image_file = tmp.name
# do something with image_file
return UploadImage(success=True)
The heavy lifting happens in a custom GraphQL view. Basically it injects the file object into the appropriate places in the variables map.
def maybe_int(s):
try:
return int(s)
except ValueError:
return s
class CustomGraphqlView(GraphQLView):
def parse_request_json(self, json_string):
try:
request_json = json.loads(json_string)
if self.batch:
assert isinstance(request_json,
list), ('Batch requests should receive a list, but received {}.').format(
repr(request_json))
assert len(request_json) > 0, ('Received an empty list in the batch request.')
else:
assert isinstance(request_json, dict), ('The received data is not a valid JSON query.')
return request_json
except AssertionError as e:
raise HttpError(HttpResponseBadRequest(str(e)))
except BaseException:
logger.exception('Invalid JSON')
raise HttpError(HttpResponseBadRequest('POST body sent invalid JSON.'))
def parse_body(self, request):
content_type = self.get_content_type(request)
if content_type == 'application/graphql':
return {'query': request.body.decode()}
elif content_type == 'application/json':
return self.parse_request_json(request.body.decode('utf-8'))
elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']:
operations_json = request.POST.get('operations')
map_json = request.POST.get('map')
if operations_json and map_json:
operations = self.parse_request_json(operations_json)
map = self.parse_request_json(map_json)
for file_id, f in request.FILES.items():
for name in map[file_id]:
segments = [maybe_int(s) for s in name.split('.')]
cur = operations
while len(segments) > 1:
cur = cur[segments.pop(0)]
cur[segments.pop(0)] = f
logger.info('parse_body %s', operations)
return operations
else:
return request.POST
return {}

do not crawl saved urls in database

I save crawled urls in Mysql database. When scrapy crawls sites again, the schedule or the downloader should only hit/crawl/download the page if its url is not in database.
#settings.py
DOWNLOADER_MIDDLEWARES = {
'myproject.middlewares.RandomUserAgentMiddleware': 400,
'myproject.middlewares.ProxyMiddleware': 410,
'myproject.middlewares.DupFilterMiddleware': 390,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None
# Disable compression middleware, so the actual HTML pages are cached
}
#middlewares.py
class DupFilterMiddleware(object):
def process_response(self, request, response, spider):
conn = MySQLdb.connect(user='dbuser',passwd='dbpass',db='dbname',host='localhost', charset='utf8', use_unicode=True)
cursor = conn.cursor()
log.msg("Make mysql connection", level=log.INFO)
cursor.execute("""SELECT id FROM scrapy WHERE url = %s""", (response.url))
if cursor.fetchone() is None:
return None
else:
raise IgnoreRequest("Duplicate --db-- item found: %s" % response.url)
#spider.py
class TestSpider(CrawlSpider):
name = "test_spider"
allowed_domains = ["test.com"]
start_urls = ["http://test.com/company/JV-Driver-Jobs-dHJhZGVzODkydGVhbA%3D%3D"]
rules = [
Rule(SgmlLinkExtractor(allow=("http://example.com/job/(.*)",)),callback="parse_items"),
Rule(SgmlLinkExtractor(allow=("http://example.com/company/",)), follow=True),
]
def parse_items(self, response):
l = XPathItemLoader(testItem(), response = response)
l.default_output_processor = MapCompose(lambda v: v.strip(), replace_escape_chars)
l.add_xpath('job_title', '//h1/text()')
l.add_value('url',response.url)
l.add_xpath('job_description', '//tr[2]/td[2]')
l.add_value('job_code', '99')
return l.load_item()
It works but I got ERROR: Error downloading from raise IgnoreRequest() . Is it intended ?
2013-10-15 17:54:16-0600 [test_spider] ERROR: Error downloading <GET http://example.com/job/aaa>: Duplicate --db-- item found: http://example.com/job/aaa
Another problem with my approach is I have to query for each url I am going to crawl. Says, I have 10k urls to crawl which means I hit mysql server 10k times. How can i do it in 1 mysql query? (e.g. get all crawled urls and store them somewhere, then check the request url against them)
Update:
Follow audiodude suggestion, here is my latest code. However, DupFilterMiddleware stops working. It runs the init but never call process_request anymore. Removing _init_ will make the process_request works again. What did I do wrong ?
class DupFilterMiddleware(object):
def __init__(self):
self.conn = MySQLdb.connect(user='myuser',passwd='mypw',db='mydb',host='localhost', charset='utf8', use_unicode=True)
self.cursor = self.conn.cursor()
self.url_set = set()
self.cursor.execute('SELECT url FROM scrapy')
for url in self.cursor.fetchall():
self.url_set.add(url)
print self.url_set
log.msg("DupFilterMiddleware Initialize mysql connection", level=log.INFO)
def process_request(self, request, spider):
log.msg("Process Request URL:{%s}" % request.url, level=log.WARNING)
if request.url in url_set:
log.msg("IgnoreRequest Exception {%s}" % request.url, level=log.WARNING)
raise IgnoreRequest()
else:
return None
A few things I can think of:
First, you should use process_request in your DupFilterMiddleware. That way, you filter the request before it ever even gets downloaded. Your current solution is wasting alot of time and resources downloading pages that eventually get thrown out.
Secondly, you should not connect to your database inside of process_response/process_request. That means you are creating a new connection for every item (and throwing away the old one). This is very inefficient. Try the following:
class DupFilterMiddleware(object):
def __init__(self):
self.conn = MySQLdb.connect(...
self.cursor = conn.cursor()
Then replace cursor.execute(... in your process_response method with self.cursor.execute(...
Finally, I would agree that it can be suboptimal to hit the MySQL server 10k times. For such a low volume of data, why not load it all into a set() in memory. Put this in the __init__ method of your downloader middleware:
self.url_set = set()
cursor.execute('SELECT url FROM scrapy')
for url in cursor.fetchall():
self.url_set.add(url)
Then instead of executing a query and checking results, simply do:
if response.url in url_set:
raise IgnoreRequest(...