from selenium import webdriver
from selenium.webdriver.chrome.options import Options
path_to_download = '/home/dev/'
options = Options()
options.add_experimental_option("prefs", {
"download.default_directory": path_to_download,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
options.add_argument('start-maximized')
driver = webdriver.Chrome(executable_path='/home/dev/Downloads/chromedriver_linux64/chromedriver',
options=options)
#long logic
elem2 = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), 'Excel')]")))
elem2.click() #downloads the file (export to excel)
for now I am stuck with putting a time.sleep(5) then os.rename(f'{path_to_download}/exported.xlsx',f'{path_to_download}/{my_id}.xlsx')
is there a way of controlling the filename while or before the file is downloaded?
You can achieve that by checking for the file existence in your download directory and loop over and over until the file appears.
You can do someting like that :
# check the size of the file and return 0 if doesn't exist
def getSize(filename):
if os.path.isfile(filename):
st = os.stat(filename)
return st.st_size
else:
return 0
def wait_download(file_path):
current_size = getSize(file_path)
printed = False
start_time = time.time()
# loop over and over until the current_size change
while current_size !=getSize(file_path) or getSize(file_path)==0:
current_size = getSize(file_path)
# print something to the string to know that we are waiting for the file
while printed == False:
print("Downloading file...")
print("Waiting for download to complete...")
printed = True
# Here we add an exit to not loop forever
if (time.time() - start_time) > 15:
return -1
return 0
# In your code you can call the function like that
files = wait_download(f'{path_to_download}/exported.xlsx')
if files == 0:
#do something
else:
# the file didn't download
Related
I used pass but this exception does not ignore this error and pass to the next line
import argparse
import requests
import re
import requests
parser = argparse.ArgumentParser(description= 'put the right url')
parser.add_argument('-l','--file', type=argparse.FileType('r'))
parser.add_argument('-u','--url', help="Check a url for straight quotes", type=str)
args = parser.parse_args()
url1 = args.url
def analyseur(url1):
def formaturl(url1):
if not re.match('(?:http|ftp|https)://', url1):
return 'https://{}'.format(url1)
return url1
#print(formaturl(url1))
response = requests.get(formaturl(url1))
print(response.headers["Server"])
if args.url:
analyseur(url1)
elif args.file:
try:
with args.file as file:
count = 0
while True:
count += 1
Get next line from file
line = file.readline()
if line is empty
end of file is reached
if not line:
break
print("Line{}: {}".format(count, line.strip()))
url1=line.strip()
analyseur(url1)
except ValueError:
pass
#print("erreur url")
For my job, I built a scrapy spider to quickly check in on ~200-500 website landing pages for clues that the pages are not functioning, outside of just 400-style errors. (e.g. check for the presence of "out of stock" on page.) This check happens across approx. 30 different websites under my purview, all of them using the same page structure.
This has worked fine, every day, for 4 months.
Then, suddenly, and without change to the code, I started getting unpredictable errors, about 4 weeks ago:
url_title = response.css("title::text").extract_first()
AttributeError: 'Response' object has no attribute 'css'
If I run this spider, this error will occur with, say... 3 out of 400 pages.
Then, if immediately run the spider again, those same 3 pages are scraped just fine without error, and 4 totally different pages will return the same error.
Furthermore, if I run the EXACT same spider as below, but replace mapping with just these 7 erroneous landing pages, they are scraped perfectly fine.
Is there something in my code that's not quite right??
I'm going to attach the whole code - sorry in advance!! - I just fear that something I might deem as superfluous may in fact be the cause. So this is the whole thing, but with sensitive data replaced with ####.
I've checked all of the affected pages, and of course the css is valid, and the title is always present.
I've done sudo apt-get update & sudo apt-get dist-upgrade on the server running scrapy, in hopes that this would help. No luck.
import scrapy
from scrapy import signals
from sqlalchemy.orm import sessionmaker
from datetime import date, datetime, timedelta
from scrapy.http.request import Request
from w3lib.url import safe_download_url
from sqlalchemy import and_, or_, not_
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from sqlalchemy.engine import create_engine
engine = create_engine('mysql://######:#######localhost/LandingPages', pool_recycle=3600, echo=False)
#conn = engine.connect()
from LandingPageVerifier.models import LandingPagesFacebook, LandingPagesGoogle, LandingPagesSimplifi, LandingPagesScrapeLog, LandingPagesScrapeResults
Session = sessionmaker(bind=engine)
session = Session()
# today = datetime.now().strftime("%Y-%m-%d")
# thisyear = datetime.now().strftime("%Y")
# thismonth = datetime.now().strftime("%m")
# thisday = datetime.now().strftime("%d")
# start = date(year=2019,month=04,day=09)
todays_datetime = datetime(datetime.today().year, datetime.today().month, datetime.today().day)
print todays_datetime
landingpages_today_fb = session.query(LandingPagesFacebook).filter(LandingPagesFacebook.created_on >= todays_datetime).all()
landingpages_today_google = session.query(LandingPagesGoogle).filter(LandingPagesGoogle.created_on >= todays_datetime).all()
landingpages_today_simplifi = session.query(LandingPagesSimplifi).filter(LandingPagesSimplifi.created_on >= todays_datetime).all()
session.close()
#Mix 'em together!
landingpages_today = landingpages_today_fb + landingpages_today_google + landingpages_today_simplifi
#landingpages_today = landingpages_today_fb
#Do some iterating and formatting work
landingpages_today = [(u.ad_url_full, u.client_id) for u in landingpages_today]
#print landingpages_today
landingpages_today = list(set(landingpages_today))
#print 'Unique pages: '
#print landingpages_today
# unique_landingpages = [(u[0]) for u in landingpages_today]
# unique_landingpage_client = [(u[1]) for u in landingpages_today]
# print 'Pages----->', len(unique_landingpages)
class LandingPage004Spider(scrapy.Spider):
name='LandingPage004Spider'
#classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(LandingPage004Spider, cls).from_crawler(crawler, *args, **kwargs)
#crawler.signals.connect(spider.spider_opened, signals.spider_opened)
crawler.signals.connect(spider.spider_closed, signals.spider_closed)
return spider
def spider_closed(self, spider):
#stats = spider.crawler.stats.get_stats()
stats = spider.crawler.stats.get_value('item_scraped_count'),
Session = sessionmaker(bind=engine)
session = Session()
logitem = LandingPagesScrapeLog(scrape_count = spider.crawler.stats.get_value('item_scraped_count'),
is200 = spider.crawler.stats.get_value('downloader/response_status_count/200'),
is400 = spider.crawler.stats.get_value('downloader/response_status_count/400'),
is403 = spider.crawler.stats.get_value('downloader/response_status_count/403'),
is404 = spider.crawler.stats.get_value('downloader/response_status_count/404'),
is500 = spider.crawler.stats.get_value('downloader/response_status_count/500'),
scrapy_errors = spider.crawler.stats.get_value('log_count/ERROR'),
scrapy_criticals = spider.crawler.stats.get_value('log_count/CRITICAL'),
)
session.add(logitem)
session.commit()
session.close()
#mapping = landingpages_today
handle_httpstatus_list = [200, 302, 404, 400, 500]
start_urls = []
def start_requests(self):
for url, client_id in self.mapping:
yield Request(url, callback=self.parse, meta={'client_id': client_id})
def parse(self, response):
##DEBUG - return all scraped data
#wholepage = response.body.lower()
url = response.url
if 'redirect_urls' in response.request.meta:
redirecturl = response.request.meta['redirect_urls'][0]
if 'utm.pag.ca' in redirecturl:
url_shortener = response.request.meta['redirect_urls'][0]
else:
url_shortener = 'None'
else:
url_shortener = 'None'
client_id = response.meta['client_id']
url_title = response.css("title::text").extract_first()
# pagesize = len(response.xpath('//*[not(descendant-or-self::script)]'))
pagesize = len(response.body)
HTTP_code = response.status
####ERROR CHECK: Small page size
if 'instapage' in response.body.lower():
if pagesize <= 20000:
err_small = 1
else:
err_small = 0
else:
if pagesize <= 35000:
err_small = 1
else:
err_small = 0
####ERROR CHECK: Page contains the phrase 'not found'
if 'not found' in response.xpath('//*[not(descendant-or-self::script)]').extract_first().lower():
#their sites are full of HTML errors, making scrapy unable to notice what is and is not inside a script element
if 'dealerinspire' in response.body.lower():
err_has_not_found = 0
else:
err_has_not_found = 1
else:
err_has_not_found = 0
####ERROR CHECK: Page cotains the phrase 'can't be found'
if "can't be found" in response.xpath('//*[not(self::script)]').extract_first().lower():
err_has_cantbefound = 1
else:
err_has_cantbefound = 0
####ERROR CHECK: Page contains the phrase 'unable to locate'
if 'unable to locate' in response.body.lower():
err_has_unabletolocate = 1
else:
err_has_unabletolocate = 0
####ERROR CHECK: Page contains phrase 'no longer available'
if 'no longer available' in response.body.lower():
err_has_nolongeravailable = 1
else:
err_has_nolongeravailable = 0
####ERROR CHECK: Page contains phrase 'no service specials'
if 'no service specials' in response.body.lower():
err_has_noservicespecials = 1
else:
err_has_noservicespecials = 0
####ERROR CHECK: Page contains phrase 'Sorry, no' to match zero inventory for a search, which normally says "Sorry, no items matching your request were found."
if 'sorry, no ' in response.body.lower():
err_has_sorryno = 1
else:
err_has_sorryno = 0
yield {'client_id': client_id, 'url': url, 'url_shortener': url_shortener, 'url_title': url_title, "pagesize": pagesize, "HTTP_code": HTTP_code, "err_small": err_small, 'err_has_not_found': err_has_not_found, 'err_has_cantbefound': err_has_cantbefound, 'err_has_unabletolocate': err_has_unabletolocate, 'err_has_nolongeravailable': err_has_nolongeravailable, 'err_has_noservicespecials': err_has_noservicespecials, 'err_has_sorryno': err_has_sorryno}
#E-mail settings
def sendmail(recipients,subject,body):
fromaddr = "#######"
toaddr = recipients
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['Subject'] = subject
body = body
msg.attach(MIMEText(body, 'html'))
server = smtplib.SMTP('########)
server.starttls()
server.login(fromaddr, "##########")
text = msg.as_string()
server.sendmail(fromaddr, recipients, text)
server.quit()
`
Expected results is a perfect scrape, with no errors.
Actual results are unpredicatable AttributeErrors, claiming that attribute 'css' can't be found on some pages. But if I scrape those pages individually, using the same script, they scrape just fine.
Sometimes Scrapy can't parse HTML because of markup errors, that's why you can't call response.css(). You can catch these events in your code and analyze broken HTML:
def parse(self, response):
try:
....
your code
.....
except:
with open("Error.htm", "w") as f:
f.write(response.body)
UPDATE You can try to check for empty response:
def parse(self, response):
if not response.body:
yield scrapy.Request(url=response.url, callback=self.parse, meta={'client_id': response.meta["client_id"]})
# your original code
I am currently working on a Note taking app in pyGtk and have set up a TextView where a user can type and add text tags for Bold Underline and Italics.
However, when it comes to saving the formatted text I cannot figure out how to do so.
I am trying to save in Gtk's native tagset format however after using
tag_format = TextBuffer.register_serialize_tagset()
content = TextBuffer.serialize(self, tag_format, start,end)
I cannot write this to a file with
open(filename, 'w').write(content)
because I get an error which states that it cannot write in bytes and needs a string instead.
I am currently working on a Note taking app in pyGtk and have set up a TextView where a user can type and add text tags for Bold Underline and Italics.
However, when it comes to saving the formatted text I cannot figure out how to do so.
I am trying to save in Gtk's native tagset format however after using
tag_format = TextBuffer.register_serialize_tagset()
content = TextBuffer.serialize(self, tag_format, start,end)
I cannot write this to a file with
open(filename, 'w').write(content)
because I get an error which states that it cannot write in bytes and needs a string instead.
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Pango
I am currently working on a Note taking app in pyGtk and have set up a TextView where a user can type and add text tags for Bold Underline and Italics.
However, when it comes to saving the formatted text I cannot figure out how to do so.
I am trying to save in Gtk's native tagset format however after using
tag_format = TextBuffer.register_serialize_tagset()
content = TextBuffer.serialize(self, tag_format, start,end)
I cannot write this to a file with
open(filename, 'w').write(content)
because I get an error which states that it cannot write in bytes and needs a string instead.
File "example.py", line 87, in save_file
open(filename, 'w').write(content)
TypeError: write() argument must be str, not bytes
Here is sample code you can run and test by typing and then saving
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Pango
class MainWindow(Gtk.ApplicationWindow):
def __init__(self):
Gtk.Window.__init__(self, title = "TwoNote")
self.grid = Gtk.Grid()
self.toolbar = Gtk.Toolbar()
self.grid.add(self.toolbar)
#buttons for toolbar
self.button_bold = Gtk.ToggleToolButton()
self.button_italic = Gtk.ToggleToolButton()
self.button_underline = Gtk.ToggleToolButton()
self.button_save = Gtk.ToolButton()
self.button_open = Gtk.ToolButton()
self.mytext = TextSet(self.button_bold, self.button_italic, self.button_underline)
self.button_bold.set_icon_name("format-text-bold-symbolic")
self.toolbar.insert(self.button_bold, 0)
self.button_italic.set_icon_name("format-text-italic-symbolic")
self.toolbar.insert(self.button_italic, 1)
self.button_underline.set_icon_name("format-text-underline-symbolic")
self.toolbar.insert(self.button_underline, 2)
self.toolbar.insert(self.button_save, 3)
self.toolbar.insert(self.button_open, 4)
self.button_open.set_icon_name("document-open-data")
self.button_save.set_icon_name("document-save")
self.button_save.connect("clicked", self.save_file)
self.button_open.connect("clicked", self.open_file)
self.button_bold.connect("toggled", self.mytext.on_button_clicked, "Bold", self.button_italic, self.button_underline)
self.button_italic.connect("toggled", self.mytext.on_button_clicked, "Italic", self.button_bold, self.button_underline)
self.button_underline.connect("toggled", self.mytext.on_button_clicked, "Underline", self.button_bold, self.button_italic)
self.grid.attach_next_to(self.mytext, self.toolbar, Gtk.PositionType.BOTTOM, 10,30)
self.add(self.grid)
filename = "Untitled"
def open_file(self, widget):
open_dialog = Gtk.FileChooserDialog("Open an existing file", self, Gtk.FileChooserAction.OPEN,(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL,Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
open_response = open_dialog.run()
if open_response == Gtk.ResponseType.OK:
filename = open_dialog.get_filename()
text = open(filename).read()
self.mytext.get_buffer().set_text(text)
open_dialog.destroy()
elif open_response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
open_dialog.destroy()
def save_file(self, widget):
savechooser = Gtk.FileChooserDialog('Save File', self, Gtk.FileChooserAction.SAVE, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
allfilter = Gtk.FileFilter()
allfilter.set_name('All files')
allfilter.add_pattern('*')
savechooser.add_filter(allfilter)
txtFilter = Gtk.FileFilter()
txtFilter.set_name('Text file')
txtFilter.add_pattern('*.txt')
savechooser.add_filter(txtFilter)
response = savechooser.run()
if response == Gtk.ResponseType.OK:
filename = savechooser.get_filename()
print(filename, 'selected.')
buf = self.mytext.get_buffer()
start, end = buf.get_bounds()
tag_format = buf.register_serialize_tagset()
content = buf.serialize(buf, tag_format, start, end)
try:
open(filename, 'w').write(content)
except SomeError as e:
print('Could not save %s: %s' % (filename, err))
savechooser.destroy()
elif response == Gtk.ResponseType.CANCEL:
print('Closed, file not saved.')
savechooser.destroy()
class TextSet(Gtk.TextView):
def __init__(self, buttonBold, buttonItalic, buttonUnderline, interval = 1 ):
# Textview Setup
Gtk.TextView.__init__(self)
self.set_vexpand(True)
self.set_indent(10)
self.set_top_margin(90)
self.set_left_margin(20)
self.set_right_margin(20)
self.set_wrap_mode(Gtk.WrapMode.CHAR)
self.tb = TextBuffer()
self.set_buffer(self.tb)
# Thread setup
self.button_bold = buttonBold
self.button_italic = buttonItalic
self.button_underline = buttonUnderline
def on_button_clicked(self, widget, tagname, widget1, widget2):
state = widget.get_active()
name = widget.get_icon_name()
bounds = self.tb.get_selection_bounds()
self.tagname = tagname
if(state):
widget1.set_active(False)
widget2.set_active(False)
#highlighting
if(len(bounds) != 0):
start, end = bounds
myIter = self.tb.get_iter_at_mark(self.tb.get_insert())
myTags = myIter.get_tags()
if(myTags == [] and state == True):
self.tb.apply_tag_by_name(tagname, start, end)
elif(myTags != [] and state == True):
self.tb.remove_all_tags(start, end)
self.tb.apply_tag_by_name(tagname, start, end)
else:
for i in range(len(myTags)):
if(myTags[i].props.name == tagname):
self.tb.remove_tag_by_name(tagname,start,end)
myTags = []
self.tb.markup(widget, tagname)
def mouse_clicked(self, window, event):
self.button_bold.set_active(False)
self.button_italic.set_active(False)
self.button_underline.set_active(False)
class TextBuffer(Gtk.TextBuffer):
def __init__(self):
Gtk.TextBuffer.__init__(self)
self.connect_after('insert-text', self.text_inserted)
# A list to hold our active tags
self.taglist_on = []
# Our Bold tag.
self.tag_bold = self.create_tag("Bold", weight=Pango.Weight.BOLD)
self.tag_none = self.create_tag("None", weight=Pango.Weight.NORMAL)
self.tag_italic = self.create_tag("Italic", style=Pango.Style.ITALIC)
self.tag_underline = self.create_tag("Underline", underline=Pango.Underline.SINGLE)
def get_iter_position(self):
return self.get_iter_at_mark(self.get_insert())
def markup(self, widget, tagname):
self.tag_name = tagname
self.check = True
''' add "bold" to our active tags list '''
if(widget.get_active() == True):
if(self.tag_name == 'Bold'):
if 'Bold' in self.taglist_on:
del self.taglist_on[self.taglist_on.index('Bold')]
else:
self.taglist_on.append('Bold')
if(self.tag_name == 'Italic'):
if 'Italic' in self.taglist_on:
del self.taglist_on[self.taglist_on.index('Italic')]
else:
self.taglist_on.append('Italic')
if(self.tag_name == 'Underline'):
if 'Underline' in self.taglist_on:
del self.taglist_on[self.taglist_on.index('Underline')]
else:
self.taglist_on.append('Underline')
else:
self.check = False
def text_inserted(self, buffer, iter, text, length):
# A text was inserted in the buffer. If there are ny tags in self.tags_on, apply them
#if self.taglist_None or self.taglist_Italic or self.taglist_Underline or self.taglist_Bold:
if self.taglist_on:
# This sets the iter back N characters
iter.backward_chars(length)
# And this applies tag from iter to end of buffer
if(self.check == True):
if(self.tag_name == 'Italic'):
self.apply_tag_by_name('Italic', self.get_iter_position(), iter)
if(self.tag_name == 'Bold'):
self.apply_tag_by_name('Bold', self.get_iter_position(), iter)
if(self.tag_name == 'Underline'):
self.apply_tag_by_name('Underline', self.get_iter_position(), iter)
else:
self.remove_all_tags(self.get_iter_position(), iter)
win = MainWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
I figured it out rather than using
open(filename, 'w').write(content)
to save the content I imported GLib and used
GLib.file_set_contents(filename, content)
Here is the code that I copy from another .py file and I got a TypeError
#coding:utf-8
import serial
import sys
import time
import logging
class TestRemoteControl(object):
def __init__(self,com):
self.ser = serial.Serial(com,115200)
self.ser.bytesize = 8
self.ser.stopbits = 1
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.formatter = logging.Formatter('%(asctime)-25s - %(name)s - %(levelname)s - %(message)s')
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.INFO)
self.fh = logging.FileHandler('Test.txt')
self.fh.setLevel(logging.INFO)
self.fh.setFormatter(self.formatter)
self.ch.setFormatter(self.formatter)
self.logger.addHandler(self.ch)
self.logger.addFilter(self.fh)
def start_esc(self):
self.logger.info("开启电机")
self.ser.write("####1")
def stop_esc(self):
self.logger.info("关闭电机")
self.ser.write("####1")
time.sleep(0.4)
self.ser.write("####1")
time.sleep(0.4)
self.ser.write("####1")
time.sleep(0.4)
def speed_up(self):
self.logger.info("电机加速")
self.ser.write("####3")
def speed_down(self):
self.logger.info("电机减速")
self.ser.write("####2")
def main():
logging.basicConfig(level=logging.DEBUG,
format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt = '%a, %d %b %Y %H:%M:%S',
filename = 'myapp.log',
filemode = 'w')
print("please enter com num:")
a = raw_input()
temp_com = "com"+a
test_RC = TestRemoteControl(temp_com)
count = 1
max_count = int(raw_input('Please enter on-off counts'))
while count < max_count:
test_RC.start_esc()
# time.sleep(10)
# test_RC.speed_up()
time.sleep(2)
test_RC.stop_esc()
print "complete ",count," times "
time.sleep(1)
count += 1
if __name__ == "__main__":
main()
Here is the error, I don't kown why. Help me please.
TypeError:
unbound method init() must be called with TestRemoteControl instance as first argument (got nothing instead)
This code is OK.All the problem is the IDE "pycharm" that I can only run the unittest because I used the name "TestRemoteControl"
Here the code that can get info from https://www.gabar.org/membersearchresults.cfm
but cannot from https://www.gabar.org/membersearchresults.cfm?start=1&id=70FFBD1B-9C8E-9913-79DBB8B989DED6C1
from bs4 import BeautifulSoup
import requests
import traceback
links_to_visit = []
navigation_links = [] # for testing next button
base_url = 'https://www.gabar.org'
def make_soup(link):
r = requests.get(link)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def all_results(url):
global links_to_visit
global navigation_links
soup = make_soup(url)
print(soup)
div = soup.find('div', {'class': 'cs_control'})
links = div.find_all('a')
print(links)
for link in links:
try:
if link.text == 'Next': # prev, next, new search
navigation_links.append(link)
print('got it')
elif not '/MemberSearchDetail.cfm?ID=' in link.get('href'):
pass # I dont need that link
else:
links_to_visit.append(link)
except:
traceback.print_exc()
print(len(links_to_visit))
print(links_to_visit)
#print(links_to_visit[-1].get('href'))
def start():
flag = 1
page = 1
while page < 60716:
flag = 0
if navigation_links[-1].text == 'Next':
flag = 1
next_link = navigation_links[-1]
#print(next_link.get('href'))
page += 25
print(base_url + next_link.get('href'))
all_results(base_url + next_link.get('href'))
print('page is:', page)
if __name__ == '__main__':
all_results('https://www.gabar.org/membersearchresults.cfm')
start()
What I need to understand or do if I want to get full result?
What you need to understand is that there is more than a URL to an HTTP-request. In this case, a search result is only available to the session that executed the search and can therefore only be paged through if you are the "owner" of that session. Most websites identify a session using session-cookies that you need to send along with your HTTP-request.
This can be a huge hassle, but luckily pythons requests takes care of all of that for you with requests.session. Instead of using requests.get(url) you initialize the session session=requests.session() and then use that session in subsequent requests session.get(url). This will automagically preserve cookies for you and in many ways behave like an actual browser would.
You can read more about how requests.session works here.
And last but not least, your fixed code =)
from bs4 import BeautifulSoup
import requests
import traceback
links_to_visit = []
navigation_links = [] # for testing next button
# we initialize the session here
session = requests.session()
base_url = 'https://www.gabar.org'
def make_soup(link):
# r = requests.get(link)
# we use the session here in order to preserve cookies across requests
r = session.get(link)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def all_results(url):
# globals are almost never needed or recommended and certainly not here.
# you can just leave this out
# global links_to_visit
# global navigation_links
soup = make_soup(url)
print(soup)
div = soup.find('div', {'class': 'cs_control'})
links = div.find_all('a')
print(links)
for link in links:
try:
if link.text == 'Next': # prev, next, new search
navigation_links.append(link)
print('got it')
elif not '/MemberSearchDetail.cfm?ID=' in link.get('href'):
pass # I dont need that link
else:
links_to_visit.append(link)
except:
traceback.print_exc()
print(len(links_to_visit))
print(links_to_visit)
#print(links_to_visit[-1].get('href'))
def start():
flag = 1
page = 1
while page < 60716:
flag = 0
if navigation_links[-1].text == 'Next':
flag = 1
next_link = navigation_links[-1]
#print(next_link.get('href'))
page += 25
print(base_url + next_link.get('href'))
all_results(base_url + next_link.get('href'))
print('page is:', page)
if __name__ == '__main__':
all_results('https://www.gabar.org/membersearchresults.cfm')
start()