Click button with selenium - selenium

I am trying to automate my homework on a website called edgenuity.com. I have made it so i can login and get to my courses but i can not click the next activity button of this website. I have been able to click other buttons with selenium but not this one.
here is my code:
import unittest
import edgenuitySolver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from tkinter import *
from functools import partial
import tracemalloc
import time
tracemalloc.start()
def validateLogin(username, password):
print("username entered :", username.get())
print("password entered :", password.get())
print("course entered :", course.get())
#window
tkWindow = Tk()
tkWindow.geometry('400x150')
tkWindow.title('Edgenuity Login Form')
#username label and text entry box
usernameLabel = Label(tkWindow, text="User Name").grid(row=0, column=0)
username = StringVar()
usernameEntry = Entry(tkWindow, textvariable=username).grid(row=0, column=1)
#password label and password entry box
passwordLabel = Label(tkWindow,text="Password").grid(row=1, column=0)
password = StringVar()
passwordEntry = Entry(tkWindow, textvariable=password, show='*').grid(row=1, column=1)
#amount of courses available
courseLable = Label(tkWindow, text="Number of courses?").grid(row=2, column=0)
course = IntVar()
courseEntry = Entry(tkWindow, textvariable=course).grid(row=2, column=1)
validateLogin = partial(validateLogin, username, password)
#login button
loginButton = Button(tkWindow, text="Save Info", command=validateLogin).grid(row=4, column=0)
tkWindow.mainloop()
class edegnuityBot(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_search_in_python_org(self):
driver = self.driver
driver.get("https://auth.edgenuity.com/Login/Login/Student")
self.assertIn("Edgenuity", driver.title)
driver.maximize_window() # For maximizing window
driver.implicitly_wait(5) # gives an implicit wait for 20 seconds
try:
print("finding element...")
element = driver.find_element(By.XPATH, '//*[#id="LoginPassword"]')
element.send_keys(password.get())
print("found element and sent")
element2 = driver.find_element(By.XPATH, '//*[#id="LoginUsername"]')
element2.send_keys(username.get())
time.sleep(2.5)
loginButton = driver.find_element(By.XPATH, '//*[#id="LoginSubmit"]')
loginButton.click()
time.sleep(5)
finally:
print("all good")
#active session
driver.implicitly_wait(5)
try:
countinueButton = driver.find_element(By.XPATH, '//*[#id="btnContinue"]')
countinueButton.click()
time.sleep(2.5)
except:
print("everything goods")
else:
print("no active sessions")
#next activity
here is the code for the website
Next Activity

Related

How to attach screenshot in pytest-html report with coftest.py?

I want to attach a screenshot to my HTML report but I haven't found any good resource on how to use the conftest.py file. I created the coftest.py file inside the pytest folder with the following code:
import pytest
#pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
pytest_html = item.config.pluginmanager.getplugin("html")
outcome = yield
report = outcome.get_result()
extra = getattr(report, "extra", [])
image="D:/Selenium/Insights/2022-11-02_00-13-18/error_page.png"
if report.when == "call":
# always add url to report
extra.append(pytest_html.extras.url("http://www.example.com/"))
extra.append(pytest_html.extra.image(image))
xfail = hasattr(report, "wasxfail")
if (report.skipped and xfail) or (report.failed and not xfail):
# only add additional html on failure
# extra.append(pytest_html.extras.html("<div>Additional HTML</div>"))
extra.append(pytest_html.extra.image(image))
report.extra = extra
And my test.py file is:
import time
from os import getenv
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from dotenv import load_dotenv
from Login_actions import Login_activities
from Insights_actions import Insights_activities
from Locators import Locators
import pytest, os
from datetime import datetime
class Test_Insights():
#pytest.fixture
def test_setup(self):
#make new directory for downloads
new_dir = r"D:\Selenium\Insights\{timestamp}".format(timestamp=datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
# print(new_dir)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
self.saved_dir=new_dir
prefs = {"download.default_directory": new_dir, "download.directory_upgrade": True, "download.prompt_for_download": False}
#intiating chrome browser instance
options=Options()
options.add_argument('--start-maximized')
# options.add_argument('--headless')
options.add_experimental_option("prefs", prefs)
self.driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=options)
#load credentials
load_dotenv()
self.username = getenv("TOP_USERNAME")
self.password = getenv("TOP_PWD")
#exiting ceremonies
yield
self.driver.close()
self.driver.quit()
print("Test executed")
def test_check_in(self, test_setup):
driver=self.driver
# login_url="https://tilt-sso.preprod.crto.in/" separate login page
# url="https://tilt-orange360.preprod.crto.in/insights/home"
url="https://tilt-sso.preprod.crto.in/auth?code=5515f8b0-4b64-4da4-b506-e6a6a3f81b23&scope=cn%20dn%20mail%20uid%20umsId&state=eyJyZWRpcmVjdF91cmkiOiJcL2hvbWUiLCJub25jZSI6IktaTFBxczU5T3lQUWJaRUp0OFhBQWZvZDNueDhPaENDbGlJWVRqZ08ifQ%3D%3D"
driver.get(url)
try:
welcome_text = driver.find_element(by=By.XPATH, value="//div[contains(text(),'Criteo')]")
assert welcome_text
login_actions = Login_activities(driver)
login_actions.enter_username(test_setup.username)
login_actions.enter_password(test_setup.password)
login_actions.login()
page_load_wait = WebDriverWait(driver, timeout=30).until(
EC.url_to_be("https://tilt-orange360.preprod.crto.in/insights/home"))
if (page_load_wait):
WebDriverWait(driver, timeout=20).until(
EC.visibility_of_element_located((By.XPATH, Locators.welcome_text)))
WebDriverWait(driver, timeout=20).until(EC.element_to_be_clickable((By.XPATH, Locators.run_insight)))
insights_actions = Insights_activities(driver)
insights_actions.insights_search("Check-In")
insights_actions.search_partners("BOOKINGIT")
insights_actions.smart_date_30days()
insights_actions.submit_insights()
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, Locators.success_mesg)))
# submit_verify = driver.find_element(by=By.XPATH, value=Locators.success_mesg)
# assert(submit_verify)
print("Submission successful")
insights_actions.download_file()
time.sleep(20)
print(self.saved_dir)
arr=[]
arr+=[file for file in os.listdir(self.saved_dir) if file.endswith('.pptx')]
print("File in the directory: " + arr[0])
while not arr:
time.sleep(5)
if arr:
print("Insights completed. File downloaded successfully")
else:
print("File not available")
raise NoSuchElementException
except:
if driver.find_element(by=By.XPATH,value=Locators.error_page):
driver.get_screenshot_as_file('{dir}/error_page.png'.format(dir=self.saved_dir))
print("500 Internal server error")
Error_page=driver.current_url
print("The error page: "+Error_page)
raise NoSuchElementException
I do not know why is it not working. The document: https://pytest-html.readthedocs.io/en/latest/user_guide.html#enhancing-reports does not have much information. I really need help here, please.

Scrapy-Selenium: Output contains 'None' value

I am trying to execute this script using Scrapy-Selenium middleware to handle JavaScript. This script is not showing any error but provides 'None' output. I think I am missing something from this
followers = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//a[#href='/goat/followers/']"))).click()
time.sleep(3)
particular line but I am unable to figure it out. What am I actually missing here? I am stuck at this point very badly.
import scrapy
from scrapy_selenium import SeleniumRequest
from selenium.webdriver.common.keys import Keys
from scrapy.selector import Selector
from goat.settings import *
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By import time
class FollowersSpider(scrapy.Spider):
name = 'followers'
start_urls = [
"https://www.instagram.com"
]
def __init__(self):
chrome_options = Options()
chrome_options.add_argument('__headless')
chrome_path = SELENIUM_DRIVER_EXECUTABLE_PATH
driver = webdriver.Chrome(executable_path=chrome_path, options=chrome_options)
driver.set_window_size(1920, 1080)
driver.get("https://www.instagram.com/goat/")
#target username
username = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//input[#name='username']")))
password = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//input[#name='password']")))
#enter username and password
username.clear()
username.send_keys("username")
time.sleep(2)
password.clear()
password.send_keys("password")
time.sleep(2)
#target the login button and click it
button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button[type='submit']"))).click()
time.sleep(2)
#We are logged in!
#closing pop_up
pop_up = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//button[contains(text(), 'Not Now')]"))).click()
time.sleep(2)
#closing notification pop_up
notification_pop_up = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//button[contains(text(), 'Not Now')]"))).click()
time.sleep(2)
#search_box
searchbox = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//input[#placeholder='Search']")))
searchbox.clear()
keyword = "#goat"
searchbox.send_keys(keyword)
# searchbox.send_keys(keyword, Keys.ENTER)
time.sleep(2)
select_search = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//a[#href='/goat/']"))).click()
time.sleep(3)
followers = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//a[#href='/goat/followers/']"))).click()
time.sleep(3)
select_follower = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//span[#class='Jv7Aj mArmR MqpiF ']/a"))).click()
time.sleep(3)
self.html = driver.page_source
driver.quit()
def parse(self, response):
resp = Selector(text=self.html)
links = resp.xpath("//span[#class='Jv7Aj mArmR MqpiF ']/a")
for link in links:
yield {
'Title': link.xpath(".//div[#class='XBGH5']/h2/text()").get(),
'Posts': link.xpath("(.//span[#class='g47SY '])[1]/text()").get(),
'Followers': link.xpath("(.//span[#class='g47SY '])[2]/text()").get()
}

Passing authenticated session from selenium to scrapy

I am trying to login in a website using selenium then pass the authenticated session to scrapy to extract stuff.
The issue is that after I pass the session to scrapy I am still not logged in.
class LoginSpider(scrapy.Spider):
name = 'login'
allowed_domains = ['*****']
start_urls = ['*****']
def __init__(self):
self.driver = webdriver.Firefox()
def start_requests(self):
# driver = webdriver.Firefox()
self.driver.get('*****')
time.sleep(5)
portalButton = self.driver.find_element_by_xpath('//*[#id="fb_submit"]')
portalButton.click()
time.sleep(2)
self.driver.find_element_by_xpath('//*[#id="email"]').send_keys('******')
self.driver.find_element_by_xpath('//*[#id="password"]').send_keys('******')
self.driver.find_element_by_xpath('//*[#id="btn-login"]').click()
time.sleep(5)
for cookie in self.driver.get_cookies():
c = {cookie['name']: cookie['value']}
yield Request(url="****",cookies=c,callback=self.parse)
def parse(self,response):
# self.log("->>>>>>>>>>>>")
open_in_browser(response)
# view(response)
self.log("->>>>>>>>>>>>")
I would suggest changing that step a bit:
for cookie in self.driver.get_cookies():
c = {cookie['name']: cookie['value']}
to something like that:
_cookies = {cookie['name']: cookie['value'] for cookie in self.driver.get_cookies()}
yield Request(url="****",cookies=_cookies,callback=self.parse)
in each iteration you re-create c with new {cookie['name']: cookie['value']}
my code examples:
import time
import scrapy
from scrapy import Request
from scrapy.utils.response import open_in_browser
from selenium import webdriver
from selenium.webdriver.common.by import By
class LoginSpider(scrapy.Spider):
name = 'login'
start_urls = ['URL']
def __init__(self):
super().__init__()
self.driver = webdriver.Chrome()
def start_requests(self):
self.driver.get('URL')
time.sleep(5)
self.driver.find_element(By.ID, ('email')).send_keys('EMAIL')
self.driver.find_element(By.ID, ('passwd')).send_keys('PASSWORD')
self.driver.find_element(By.ID, ('SubmitLogin')).click()
_cookies = {cookie['name']: cookie['value'] for cookie in self.driver.get_cookies()}
yield Request(url='URL',
cookies=_cookies,
callback=self.parse)
self.driver.quit()
def parse(self, response, **kwargs):
open_in_browser(response)
self.log(response)

The website it opens using the automated software closes soon after it gets executed

Error could possibly be in line 7,
it should open the browser search for the specific word then should the window should stay until closed
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome("C:/Users/daniy/AppData/Local/Programs/Python/Python37-32/Scripts/chromedriver.exe")
def test_search_in_python_org(self):
driver = self.driver
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
driver.implicitly_wait(6000)
elem = driver.find_element_by_name("q")
driver.implicitly_wait(5000)
elem.send_keys("pycon")
driver.implicitly_wait(6000)
elem.send_keys(Keys.RETURN)
driver.implicitly_wait(6000)
assert "No results found." not in driver.page_source
driver.implicitly_wait(9000)
if __name__ == "__main__":
unittest.main()
Try this out it works for me.
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
elem=WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.NAME, "q")))
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
driver.implicitly_wait(5)
self.assertNotIn("No results found.", driver.page_source)
Import
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
You should use the right version of ChromeDrive as per your browser
refer to this link: https://chromedriver.chromium.org/downloads
Try the below code it should work
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = self.driver = webdriver.Chrome("C:/Users/daniy/AppData/Local/Programs/Python/Python37-32/Scripts/chromedriver.exe")
def test_search_in_python_org(self):
driver = self.driver
driver.implicitly_wait(6000)
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
time.sleep(6000)
elem = driver.find_element_by_name("q")
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
time.sleep(6000)
assert "No results found." not in driver.page_source
if __name__ == "__main__":
unittest.main()

Unable to parse an element from eBay via BeautifulSoup

I am trying to scrape "number of items sold" on eBay but for some reason I cannot. I already have the title, price, and all I need is total_sold_price which I am unable to attain. Every time I run my code, I just get a blank for total_sold_price.
try:
title_selenium = driver.find_element_by_xpath('//*[#id="itemTitle"]').text
except:
title_selenium = ""
try:
price_selenium = driver.find_element_by_xpath('//*[#id="prcIsum"]').text.strip().split()
except:
price_selenium = ""
try:
total_sold_price_BeautifulSoup = soup.find('span', {'class': 'vi-qtyS-hot-red'}).text
except:
total_sold_price_BeautifulSoup = ""
My entire code: https://pastebin.com/bu8HgCDZ
Thank you so much.
Fixed it for you. You need to make the soup call inside your loop.
Note: I am using this path '../chromedriver', please change it to your path before running the code.
Code
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
driver = webdriver.Chrome('../chromedriver')
driver.get('https://www.ebay.com/sch/i.html?_from=R40&_nkw=watches&_sacat=0&_pgn=1')
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.maximize_window()
tempList = []
for link in soup.find_all('a', href=True):
if 'itm' in link['href']:
print(link['href'])
tempList.append(link['href'])
array_length = len(tempList)
for i in range(array_length):
driver.get(tempList[i])
timeout = 5
try:
element_present = EC.presence_of_element_located((By.XPATH, '//*[#id="itemTitle"]'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
try:
title_selenium = driver.find_element_by_xpath('//*[#id="itemTitle"]').text
except:
title_selenium = ""
try:
price_selenium = driver.find_element_by_xpath('//*[#id="prcIsum"]').text.strip().split()
except:
price_selenium = ""
#you need to call soup here due to your loop structure
soup = BeautifulSoup(driver.page_source, 'lxml')
try:
total_sold_price_BeautifulSoup = soup.find('span', {'class': 'vi-qtyS-hot-red'}).text
except:
total_sold_price_BeautifulSoup = ""
print("title: ", title_selenium)
print("price: ", price_selenium)
print("total_sold_price: ", total_sold_price_BeautifulSoup)
print("\n")
i+=1
driver.close()