Python selenium unable to fill input box by ID or xpath - selenium

I have the following code:
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.common.keys import Keys
if __name__ == '__main__':
path_to_chromedriver = r'C:\chromedriver' # change path as needed
browser = webdriver.Chrome(executable_path=path_to_chromedriver)
wait = WebDriverWait(browser, 10)
browser.get("https://pjm.com/")
wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/form/div[3]/div/div[1]/div/div[1]/span[2]"))).click()
wait.until(EC.presence_of_element_located((By.ID, "IDToken1"))).send_keys("user")
wait.until(EC.presence_of_element_located((By.ID, "IDToken2"))).send_keys("pwd")
But the last two lines of codes are not able to execute and I don't have a clue why it should be like that.

I see that the click() will open a new tab, in which case you have to switch to that tab before proceeding with login details:
browser.switch_to.window(browser.window_handles[1])
wait.until(EC.presence_of_element_located((By.ID, "IDToken1"))).send_keys("user")
wait.until(EC.presence_of_element_located((By.ID, "IDToken2"))).send_keys("pwd")

Related

Selenium (Instagram Web Assistant Extension) called INSSIST

I am trying to click the [+] Button in the INSSIST extension using selenium, but I just get an error everytime. I just want to know if it is possible to click it. Can't figure it out
For the code to work you have to make sure you got the chrome inssist extension and you have close all instances of chrome otherwise it will not load selenium correctly, because it is a background extension aswell
Here is the online instagram alternative called inssist
https://chrome.google.com/webstore/detail/inssist-web-client-for-in/bcocdbombenodlegijagbhdjbifpiijp
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
chrome_options.add_argument(r"user-data-dir=C:\\Users\\Alexander Smith\\AppData\\Local\\Google\\Chrome\\User Data")
chrome_options.add_argument('profile-directory=Profile 2')
driver = webdriver.Chrome(executable_path=r"C:\\Users\\Alexander Smith\\Desktop\\chromedriver.exe", options=chrome_options)
driver.get("chrome-extension://bcocdbombenodlegijagbhdjbifpiijp/inssist.html#instagram.com/")
print("Good")
create = WebDriverWait(driver,9).until(EC.element_to_be_clickable((By.XPATH, '//\*\[#id="mount_0_0_3c"\]/div/div/div/div\[1\]/div/div/div/div\[1\]/div\[1\]/div\[1\]/div/div/div/div/div\[1\]/div')))
print("Good")
ActionChains(driver).move_to_element(create)
print("Good")
create.send_keys(Keys.RETURN)

Keep page refresh Selenium

I am struggling to keep the session open all the time. The code executes, but only when the link is present.
driver.refresh
doesn't seem to keep the page refreshed.
Is there a way to refresh the page every couple seconds?
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
user_name = "username"
password = "password"
driver = webdriver.Chrome()
driver.get('https://mywebsite.com/boardslots.aspx?date=07%2F31%2F2022')
element = driver.find_element(By.ID,"txtUsername")
element.send_keys(user_name)
element = driver.find_element(By.ID,"txtPassword")
element.send_keys(password)
element.send_keys(Keys.RETURN)
while True:
try:
driver.find_element(By.LINK_TEXT,"Claim")
except NoSuchElementException:
driver.refresh
else:
driver.find_element(By.LINK_TEXT,"Claim").click()
try:
# element = WebDriverWait(driver,1)
element= WebDriverWait(driver,
10).until(expected_conditions.visibility_of_element_located((By.XPATH, '/html
/body/form/div[3]/div[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td[9]/a')))
finally:
driver.find_element(BY.ID,"btnSubmitSingle").click()
#/html/body/form/div[3]/div[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td[9]/a

building a web scraping using the selenium command

I'm building a web scraping using the selenium command. I was able to read the data from the table on the first and second pages, however, I cannot read the data on the following pages. Can anybody help me?
Below is the code I am using.
NoSuchElementException: Message: no such element: Unable to locate
element:
{"method":"xpath","selector":"//table[1]/tbody[1]/tr[#class='painel'
and 1]/td[2]/a[1 and #href='javascript:pesquisar(2);']"} (Session
info: headless chrome=86.0.4240.75)
import time
import requests
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import os
import json
url = 'https://www.desaparecidos.pr.gov.br/desaparecidos/desaparecidos.do?action=iniciarProcesso&m=false'
option = Options()
driver = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
driver.get(url)
time.sleep (5)
lista = driver.find_element_by_xpath('//*[#id="list_tabela"]/tbody')
lista_text = lista.text
print (lista_text)
driver.implicitly_wait(5)
driver.find_element_by_xpath("//table[1]/tbody[1]/tr[#class='painel' and 1]/td[2]/a[1 and #href='javascript:pesquisar(2);']").click()
time.sleep (5)
lista = driver.find_element_by_xpath('//*[#id="list_tabela"]/tbody')
lista_text = lista.text
print (lista_text)
driver.implicitly_wait(10)
driver.find_element_by_xpath("//table[1]/tbody[1]/tr[#class='painel' and 1]/td[2]/a[3 and #href='javascript:pesquisar(3);']").click()
time.sleep (10)
lista = driver.find_element_by_xpath('//*[#id="list_tabela"]/tbody')
lista_text = lista.text
print (lista_text)

Selenium scraping JS loaded pages

I'm trying to scrape some of the loaded JS data from https://surviv.io/stats/player787, such as the number of total kills. Could someone tell me how I can scrape the js loaded data with selenium. Thanks.
EDIT: Here is some of the code
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('https://surviv.io/stats/player787')
b = browser.find_element_by_tag_name('tr')
The 'tr' which contains the data that I want is not grabbed by selenium
To get the count of kills.Induce WebDriverWait and visibility_of_all_elements_located
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('https://surviv.io/stats/player787')
allkills = WebDriverWait(browser,20).until(EC.visibility_of_all_elements_located((By.XPATH,"//div[#class='card-mode-stat-name' and text()='KILLS']/following-sibling::div[1]")))
for item in allkills:
print(item.text)
The reason it's not finding it is because the page isn't fully rendered. You can add a wait with selenium so will not move on until the specified element is rendered first.
Also, if it's in a <table> tag, let pandas do the parsing for you (it uses beautifulsoup under the hood to pull out the <table>, <th>, <tr>, and <td> tags, returns them as a list of dataframes once you get the rendered html source:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import pandas as pd
browser = webdriver.Chrome('C:/chromedriver_win32/chromedriver.exe')
browser.get('https://surviv.io/stats/player787')
delay = 3 # seconds
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CLASS_NAME, 'player-stats-overview')))
df = pd.read_html(browser.page_source)[0]
print (df.loc[0,'Kills'])
browser.close()
Output:
18884
print (df)
Wins Kills Games K/G
0 638 18884 8896 2.1
You could avoid the overhead of a browser and simply mimic the POST request the page makes.
import requests
headers = {'content-type': 'application/json; charset=UTF-8'}
data = {"slug":"player787","interval":"all","mapIdFilter":"-1"}
r = requests.post('https://surviv.io/api/user_stats', headers=headers, json=data)
data = r.json()
desired_stats = ['wins', 'kills', 'games', 'kpg']
for stat in desired_stats:
print(stat, ': ' , data[stat])
For OP:
View of payload in network tab visible when you click on the appropriate xhr indicated by the url in my answer (you need to scroll down to see the payload info)
To scrape the values 652, 19152, 8926, 2.1, etc from JS loaded pages you you have to induce WebDriverWait for the visibility_of_all_elements_located() and you can use either of the following Locator Strategies:
Using CSS_SELECTOR:
driver.get('https://surviv.io/stats/player787')
print([my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, "table.player-stats-overview td")))])
Using XPATH:
driver.get('https://surviv.io/stats/player787')
print([my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, "//table[#class='player-stats-overview']//td")))])
Console Output:
['652', '19152', '8926', '2.1']
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

AttributeError: 'element_to_be_clickable' object has no attribute 'click' python3.7

As my code shows, I am trying to automate the process of logging and other things using selenium in python 3.7. I am struck as it is showing "AttributeError: element_to_be_clickable has no object click" in the line botton_to_click().
from bs4 import BeautifulSoup
from bs4.element import Tag
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
base = 'https://www.wsj.com'
url = 'https://www.wsj.com/search/term.html?KEYWORDS=cybersecurity&min-date=2018/04/01&max-date=2019/03/31&isAdvanced=true&daysback=90d&andor=AND&sort=date-desc&source=wsjarticle,wsjpro&page=1'
browser = webdriver.Safari(executable_path='/usr/bin/safaridriver')
browser.get(url)
browser.find_element_by_id('editions-select').click()
browser.find_element_by_id('na,us').click()
botton_to_click = WebDriverWait(browser, 10).until(EC.element_to_be_clickable, ((By.XPATH,"//button[#type='button' and contains(.,'Sign In')]")))
botton_to_click.click()
browser.find_element_by_id('username').send_keys('##$%*&^%##$')
browser.find_element_by_id('password').send_keys('##*$%^!#')
browser.find_element_by_id('basic-login').click()
browser.find_element_by_id('masthead-container').click()
browser.find_element_by_id('searchInput').send_keys('cybersecurity')
browser.find_element_by_name('ADVANCED SEARCH').click()
browser.find_element_by_id('dp1560924131783').send_keys('2018/04/01')
browser.find_element_by_id('dp1560924131784').send_keys('2019/03/31')
browser.find_element_by_id('wsjblogs').click()
browser.find_element_by_id('wsjvideo').click()
browser.find_element_by_id('interactivemedia').click()
browser.find_element_by_id('sitesearch').click()
browser.close()
Thanks.
Remove the comma after element_to_be_clickable as given below, It may resolve your issue.
botton_to_click = WebDriverWait(browser, 10).until(EC.element_to_be_clickable, ((By.XPATH,"//button[#type='button' and contains(.,'Sign In')]")))
to
botton_to_click = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,"//button[#type='button' and contains(.,'Sign In')]")))