Error whilst requesting a login to a site - beautifulsoup

last EDIT:
It seems that i am connecting to the website but for some reason i get an error with my code!Probably i do something wrong with BeautifulSouP.I am thinking that i should change somethin on the url variable!
SOLUTION
The mistake i was doing was:
First i connect to the website with requests module,then i reopen the site with urllib.requests module,thats why i wasnt logged as a user!!!Thanks you all!
def connect():
with requests.Session() as c:
urll2 = "http://www.oddscheck.net/inc/userlogin.php"
payload= {
'useremail': 'email#gmail.com',
'userpassword':'password',
'PHPSESSID' : 'ih79c4t5srr6p2',
'CF-RAY' : '35f784ad1-ATH'
}
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0'
c.post(urll2,data=payload,headers={"Referer": "http://www.oddscheck.net/?page=home&cmd=def"})
url = c.get('http://www.oddscheck.net/index.php?page=myaccount')
""" [MISTAKE]
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
soup = BS(resp, "html.parser")
[/MISTAKE]
"""
soup = BS(url.text,"html.parser")
gamesave = ""
for record in soup.find_all("tr"):
game = game1=""
for data in record.find_all("td"):
if data.get('class') == ['centertd', 'col_10']:
for link in data.find_all("a"):
game1 += ", "+"http://www.oddscheck.net/"+link.get('href')
else:
game += ","+data.text
game2 = game+game1
if len(game2) > 40:
gamesave += game[1:]+","+game1[1:]+"\n"
#header = "League,Time,1,X,2,U,O,Link"+"\n"
file = open(os.path.expanduser("Odss.txt"),"wb")
#file.write(bytes(header, encoding="UTF-8",errors="ignore"))
file.write(bytes(gamesave, encoding="UTF-8",errors="ignore"))
file.close()
ERROR I GET:
Traceback (most recent call last):
File "sportingbet.py", line 314, in <module>
connect()
File "sportingbet.py", line 27, in connect
req = urllib.request.Request(url, headers=headers)
File "/usr/lib/python3.5/urllib/request.py", line 269, in __init__
self.full_url = url
File "/usr/lib/python3.5/urllib/request.py", line 295, in full_url
self._parse()
File "/usr/lib/python3.5/urllib/request.py", line 324, in _parse
raise ValueError("unknown url type: %r" % self.full_url)
ValueError: unknown url type: 'Response [200]

The client-side variable name for your URL must be consistent with that of the variable for the server-side. Assuming you already know the name of it, you should just be able to change it to that and have it run.

Related

How to know if twitch streamer is live and send message about that? Discord.py

I'm writing discord bot now, so I wanted to know how to save the name of the streamer in a separate file, so when he goes live bot sends a message about that in specific channel.
This is what I tried:
import os
import json
import discord
import requests
from discord.ext import tasks, commands
from twitchAPI.twitch import Twitch
from discord.utils import get
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='$', intents=intents)
Authentication with twitch API:
client_id = os.getenv('client_id')
client_secret = os.getenv('Dweller_token')
twitch = Twitch(client_id, client_secret)
twitch.authenticate_app([])
TWITCH_STREAM_API_ENDPOINT_V5 = "https://api.twitch.tv/dweller/streams/{}"
API_HEADERS = {
'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json',
}
Returns True if online, False if not:
def checkuser(user):
try:
userid = twitch.get_users(logins=[user])['data'][0]['id']
url = TWITCH_STREAM_API_ENDPOINT_V5.format(userid)
try:
req = requests.Session().get(url, headers=API_HEADERS)
jsondata = req.json()
if 'stream' in jsondata:
if jsondata['stream'] is not None:
return True
else:
return False
except Exception as e:
print("Error checking user: ", e)
return False
except IndexError:
return False
Bot event. Always checks if streamer is live. Sends a message if so. And adds specific role to the streamer if he is live:
#bot.event
async def on_ready():
# Defines a loop that will run every 10 seconds (checks for live users every 10 seconds).
#tasks.loop(seconds=10)
async def live_notifs_loop():
# Opens and reads the json file
with open('streamers.json', 'r') as file:
streamers = json.loads(file.read())
# Makes sure the json isn't empty before continuing.
if streamers is not None:
# Gets the guild, 'twitch streams' channel, and streaming role.
guild = bot.get_guild(1234567890)
channel = bot.get_channel(1234567890)
role = get(guild.roles, id=1234567890)
# Loops through the json and gets the key,value which in this case is the user_id and twitch_name of
# every item in the json.
for user_id, twitch_name in streamers.items():
# Takes the given twitch_name and checks it using the checkuser function to see if they're live.
# Returns either true or false.
status = checkuser(twitch_name)
# Gets the user using the collected user_id in the json
user = bot.get_user(int(user_id))
# Makes sure they're live
if status is True:
# Checks to see if the live message has already been sent.
async for message in channel.history(limit=200):
# If it has, break the loop (do nothing).
if str(user.mention) in message.content and "is now streaming" in message.content:
break
# If it hasn't, assign them the streaming role and send the message.
else:
# Gets all the members in your guild.
async for member in guild.fetch_members(limit=None):
# If one of the id's of the members in your guild matches the one from the json and
# they're live, give them the streaming role.
if member.id == int(user_id):
await member.add_roles(role)
# Sends the live notification to the 'twitch streams' channel then breaks the loop.
await channel.send(
f":red_circle: **LIVE**\n{user.mention} is now streaming on Twitch!"
f"\nhttps://www.twitch.tv/{twitch_name}")
print(f"{user} started streaming. Sending a notification.")
break
# If they aren't live do this:
else:
# Gets all the members in your guild.
async for member in guild.fetch_members(limit=None):
# If one of the id's of the members in your guild matches the one from the json and they're not
# live, remove the streaming role.
if member.id == int(user_id):
await member.remove_roles(role)
# Checks to see if the live notification was sent.
async for message in channel.history(limit=200):
# If it was, delete it.
if str(user.mention) in message.content and "is now streaming" in message.content:
await message.delete()
# Start your loop.
live_notifs_loop.start()
Command that adds 'chosen' streamers to json file:
# Command to add Twitch usernames to the json.
#bot.command(name='addtwitch', help='Adds your Twitch to the live notifs.', pass_context=True)
async def add_twitch(ctx, twitch_name):
# Opens and reads the json file.
with open('streamers.json', 'r') as file:
streamers = json.loads(file.read())
# Gets the users id that called the command.
user_id = ctx.author.id
# Assigns their given twitch_name to their discord id and adds it to the streamers.json.
streamers[user_id] = twitch_name
# Adds the changes we made to the json file.
with open('streamers.json', 'w') as file:
file.write(json.dumps(streamers))
# Tells the user it worked.
await ctx.send(f"Added {twitch_name} for {ctx.author} to the notifications list.")
print('Server Running')
bot.run(os.getenv('token'))
I want to write '$add_twitch turb4ik' and bot saves streamer turb4ik in streamers.json and checks if streamer is live or not. If True send notification in specific channel. But it doesn't seem to work.
And I get this syntax error:
Unhandled exception in internal background task 'live_notifs_loop'.
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/tasks/__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "main.py", line 62, in live_notifs_loop
streamers = json.loads(file.read())
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Ignoring exception in command None:
discord.ext.commands.errors.CommandNotFound: Command "add_twitch" is not found
I also tried this piece of code, it gives me the information about the channel, but it doesn't give me streamer status:
client_id = os.getenv('client_id')
oauth_token = os.getenv('Dweller_token')
twitch = Twitch(client_id, oauth_token)
twitch.authenticate_app([])
user_info = twitch.get_users(logins=['turb4ik'])
user_id = user_info['data'][0]['id']
print(user_info)
And there is one more problem: every time I start my bot it says that twitchAPI is not installed and I need to install it every time I start my bot. Sometimes my bot seems to forget about twitchAPI and goes offline and says that I again need to install twitchAPI.
I know this is hard, but please help me. Maybe I should do it witch SQL(sqlite3 library) or so. Much obliget!
Edit:
Another syntax error:
Unhandled exception in internal background task 'live_notifs_loop'.
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/tasks/__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "main.py", line 78, in live_notifs_loop
streamers = json.load(file)
File "/usr/lib/python3.8/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Ignoring exception in command addtwitch:
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/core.py", line 85, in wrapped
ret = await coro(*args, **kwargs)
File "main.py", line 136, in add_twitch
streamers = json.load(file)
File "/usr/lib/python3.8/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/bot.py", line 902, in invoke
await ctx.command.invoke(ctx)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/core.py", line 864, in invoke
await injected(*ctx.args, **ctx.kwargs)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/core.py", line 94, in wrapped
raise CommandInvokeError(exc) from exc
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Unhandled exception in internal background task 'live_notifs_loop'.
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/tasks/__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "main.py", line 62, in live_notifs_loop
streamers = json.loads(file.read())
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Instead of streamers = json.loads(file.read()) use streamers = json.load(file)
discord.ext.commands.errors.CommandNotFound: Command "add_twitch" is not found
Since you are defining your command with name=addtwitch agrument, you can only call your command with $addtwitch user. To avoid this, add the aliases =['add_twitch'] argument to
#bot.command(name='addtwitch', help='Adds your Twitch to the live notifs.', pass_context=True, aliases =['add_twitch'])

python opencv2, error camera interface, only cam light

This is the code for opencv2 Takeimage button.
It doesn't work properly, only the cam light on, but camera interface doesn't show:
def TakeImage():
Id=(txt.get())
name=(txt2.get())
if(is_number(Id) and name.isalpha()):
Video= cv2.VideoCapture(0)
harcascadePath = "haarcascade_frontalfacedafults_.xml"
detector=cv2.CascadeClassifier(harcascadePath)
sampleNum=0
while(True):
ret,img=Video.read();
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=detector.DetectMultiScale(gray,1.2,5);
for(x,y,w,h) in faces:
cv2.recatangle(img,(x,y),(x+w,y+h),(255,0,0),2)
sampleNum=sampleNum+1
cv2.imwrite("TrainImages\ "+name +"."+Id +'.'+str(samlpeNum)+".jpg",gray[y:y+h,x:x+h])
cv2.imshow('Frame',img)
if cv2.waitKey(100) &0XFF == ord('s'):
break
elif sample>60:
break
Video.release()
cv2.destroyAllWindows()
res = "Images Saved for ID: "+ Id + " Name : "+ name
row = [Id, Name]
with open ('studentDetails\studentDetails.csv','a+') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
message.configure(text=res)
else:
if(is_number(Id)):
res ="Enter Alphabetical Name"
message.configure(text= res)
if (name.isalpha()):
res="Enter Numberic Id"
message.configure(text=res)
Error showing:
Exception in Tkinter callback Traceback (most recent call last):
File
"C:\Users\Lenovo\AppData\Local\Programs\Python\Python37\lib\tkinter__init__.py",
line 1705, in call
return self.func(*args) File "C:\Users\Lenovo\Desktop\face reconiger system.py", line 87, in TakeImage
faces=detector.DetectMultiScale(gray,1.2,5); AttributeError: 'cv2.CascadeClassifier' object has no attribute 'DetectMultiScale'
The path to the .xml file looks incorrect. You need to replace the following line:
harcascadePath = "haarcascade_frontalfacedafults_.xml"
with
harcascadePath = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'

TypeError: '<' not supported between instances of 'str' and 'int' Doc2Vec

Any ideas why this error is being thrown
"TypeError: '<' not supported between … 'str' and 'int'" when doc-tag not present for most_similar()
I have a list of .txt documents stored in my data folder and want to compare one doc to another through my flask app on localhost.
Traceback (most recent call last):
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
2463, in __call__
return self.wsgi_app(environ, start_response)
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
2449, in wsgi_app
response = self.handle_exception(e)
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\_compat.py", line
39, in reraise
raise value
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
2446, in wsgi_app
response = self.full_dispatch_request()
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
1820,
in handle_user_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\_compat.py", line
39, in reraise
raise value
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
1949,
in full_dispatch_request
rv = self.dispatch_request()
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\app.py", line
1935,
in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "C:\Users\ibrahimm\Desktop\doc2vec-compare-doc-demo\app.py", line 56, in api_compare_2
vec1 = d2v_model.docvecs.most_similar(data['doc1'])
File "C:\Users\ibrahimm\AppData\Local\Continuum\anaconda3\lib\site-
packages\gensim\models\keyedvectors.py", line 1715, in most_similar
elif doc in self.doctags or doc < self.count:
TypeError: '<' not supported between instances of 'str' and 'int'\
app.py
#app.route('/api/compare_2', methods=['POST'])
def api_compare_2():
data = request.get_json()
if not 'doc1' in data or not 'doc2' in data:
return 'ERROR'
vec1 = d2v_model.docvecs.most_similar(data['doc1'])
vec2 = d2v_model.docvecs.most_similar(data['doc2'])
vec1 = gensim.matutils.full2sparse(vec1)
vec2 = gensim.matutils.full2sparse(vec2)
print (data)
print (vec2)
print (vec1)
return jsonify(sim=gensim.matutils.cossim(vec1, vec2))
#app.route('/api/compare_all', methods=['POST'])
def api_compare_all():
data = request.get_json()
if not 'doc' in data:
return 'ERROR'
vec = d2v_model.docvecs.most_similar(data['doc'])
res = d2v_model.docvecs.most_similar([vec], topn=5)
return jsonify(list=res)
model.py
def load_model():
try:
return gensim.models.doc2vec.Doc2Vec.load("doc2vec.model2")
except:
print ('Model not found!')
return None
def train_model():
#path to the input corpus files
data="data"
#tagging the text files
class DocIterator(object):
def __init__(self, doc_list, labels_list):
self.labels_list = labels_list
self.doc_list = doc_list
def __iter__(self):
for idx, doc in enumerate(self.doc_list):
yield TaggedDocument(words=doc.split(), tags=[self.labels_list[idx]])
docLabels = [f for f in listdir(data) if f.endswith('.txt')]
print(docLabels)
data = []
for doc in docLabels:
data.append(open(r'C:\Users\ibrahimm\Desktop\doc2vec-compare-doc-demo\data\\' + doc,
encoding='cp437').read())
tokenizer = RegexpTokenizer(r'\w+')
stopword_set = set(stopwords.words('english'))
#This function does all cleaning of data using two objects above
def nlp_clean(data):
new_data = []
for d in data:
new_str = d.lower()
dlist = tokenizer.tokenize(new_str)
dlist = list(set(dlist).difference(stopword_set))
new_data.append(dlist)
return new_data
data = nlp_clean(data)
it = DocIterator(data, docLabels)
#train doc2vec model
model = gensim.models.Doc2Vec(size=300, window=15, min_count=4, workers=10,alpha=0.025, min_alpha=0.025, iter=20) # use fixed learning rate
model.build_vocab(it)
model.train(it, epochs=model.iter, total_examples=model.corpus_count)
model.save("doc2vec.model2")
If you try to look-up a string doc-tag that's not in the model, you unfortunately get this confusing error, instead of a clearer error. (See gensim's open-issue: https://github.com/RaRe-Technologies/gensim/issues/1737#issuecomment-346995119 )
Whatever is in data['doc1'] isn't a tag in the model.
You may be able to pre-check, before attempting a most_similar() operation, by looking at whether data['doc1'] in model.docvecs is True.
TypeError: '<' not supported between instances of 'str' and 'int'
[35182] Failed to execute script docker-compose
This error is was as a result of copy and paste code with a wrong quotation mark(). change this to this ''

Python selenium/Beautifulsoup using urlretrieve 403 forbidden error

Please understand that I am not good at English.
I created a crawl program that downloads images using image URLs
Get the image URL in the imgs_urls array.
Then downloads them using the urlretrieve function using those URLs.
I get an 403 forbidden error before using all the urls in the array.
#####syncopation###### was used because of the limit of 30000 characters
How can I fix the error?
code:
def Remainder_All_ImagesURLs_Google(searchText):
def scroll_page():
for i in range(7):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(3)
def click_button():
more_imgs_button_xpath = "//*[#id='smb']"
element = driver.find_element_by_xpath(more_imgs_button_xpath)
element.click()
sleep(3)
def create_soup():
html_source = driver.page_source
soup = BeautifulSoup(html_source, 'html.parser')
return soup
driver = webdriver.Chrome('C:/Users/ajh46\Anaconda3/ChromeDriver/chromedriver.exe')
driver.maximize_window()
sleep(2)
searchUrl = "https://www.google.com/search?q={}&site=webhp&tbm=isch".format(searchText)
driver.get(searchUrl)
try:
scroll_page()
click_button()
scroll_page()
except:
click_button()
scroll_page()
imgs_urls = []
cnt = 0
for j in range(100):
element = driver.find_element_by_css_selector("div[data-ri = '" + str(cnt + j) + "'] img")
element.click()
sleep(1)
soup = create_soup()
for img in soup.find_all('img'):
try:
if img['src'].startswith('http') and img['src'].endswith('jpg'):
imgs_urls.append(img['src'])
except:
pass
print(str(cnt + j))
cnt += 2
driver.close()
return(imgs_urls)
def download_image(url,filename):
full_name = str(filename) + ".jpg"
urllib.request.urlretrieve(url, 'C:/Python/' + full_name)
print(full_name)
if __name__ == "__main__":
count2 = 0
searchText = 'sites:pinterest white dress'
filename = 'white dress'
for url2 in Remainder_All_ImagesURLs_Google(searchText):
download_image(url2, filename + str(count2))
count2 += 1
print(url2)
imgs_url array:
297
['https://s-media-cache-ak0.pinimg.com/736x/3f/1b/1d/3f1b1decd26c10e3ce0a14d270c4a3db.jpg' #####syncopation##### 'http://24myfashion.com/2016/wp-content/uploads/2016/04/Red-and-white-dress-2017-2018-15.jpg', 'https://s-media-cache-ak0.pinimg.com/736x/84/3d/c5/843dc5b9879801fce8ca33b569948143.jpg']
Output:
white dress0.jpg
https://s-media-cache-ak0.pinimg.com/736x/3f/1b/1d/3f1b1decd26c10e3ce0a14d270c4a3db.jpg
#####syncopation#####
white dress101.jpg
https://s-media-cache-ak0.pinimg.com/originals/4e/9e/83/4e9e83b4aaf3224b5b26482b4639004f.jpg**
Error:
Traceback (most recent call last):
File "C:/Users/ajh46/PycharmProjects/untitled3/Crawling.py", line 216, in <module>
download_image(url2, filename + str(count2))
File "C:/Users/ajh46/PycharmProjects/untitled3/Crawling.py", line 192, in download_image
urllib.request.urlretrieve(url, 'C:/Python/' + full_name)
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 248, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "C:\Users\ajh46\Anaconda3\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
.urlretrieve() has no cookies or session like in selenium browser that why you got 403, and you also need to set user-agent.
Just change your download_image() function to the following
def download_image(url,filename):
browser = webdriver.Chrome()
browser.get(url)
userAgent = browser.execute_script("return navigator.userAgent;")
seleniumCookies= browser.get_cookies()
cookies = ''
for cookie in seleniumCookies:
cookies += '%s=%s;' % (cookie['name'], cookie['value'])
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', userAgent)]
opener.addheaders.append(('Cookie', cookies))
full_name = str(filename) + ".jpg"
urllib.request.urlretrieve(url, 'C:/Python/' + full_name)
print(full_name)
Hope this helps :)

TypeError: POST data should be bytes or an iterable of bytes. It cannot be of type str

My Code.
#!/usr/bin/env python
#coding: utf-8
userid="NicoNicoCreate#gmail.com"
passwd="********"
import sys, re, cgi, urllib, urllib.request, urllib.error, http.cookiejar, xml.dom.minidom, time, urllib.parse
import simplejson as json
def getToken():
html = urllib.request.urlopen("http://www.nicovideo.jp/my/mylist").read()
for line in html.splitlines():
mo = re.match(r'^\s*NicoAPI\.token = "(?P<token>[\d\w-]+)";\s*',line)
if mo:
token = mo.group('token')
break
assert token
return token
def mylist_create(name):
cmdurl = "http://www.nicovideo.jp/api/mylistgroup/add"
q = {}
q['name'] = name.encode("utf-8")
q['description'] = ""
q['public'] = 0
q['default_sort'] = 0
q['icon_id'] = 0
q['token'] = token
cmdurl += "?" + urllib.parse.urlencode(q).encode("utf-8")
j = json.load( urllib.request.urlopen(cmdurl), encoding='utf-8')
return j['id']
def addvideo_tomylist(mid,smids):
for smid in smids:
cmdurl = "http://www.nicovideo.jp/api/mylist/add"
q = {}
q['group_id'] = mid
q['item_type'] = 0
q['item_id'] = smid
q['description'] = u""
q['token'] = token
cmdurl += "?" + urllib.parse.urlencode(q).encode("utf-8")
j = json.load( urllib.request.urlopen(cmdurl), encoding='utf-8')
time.sleep(0.5)
#Login
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar()))
urllib.request.install_opener(opener)
urllib.request.urlopen("https://secure.nicovideo.jp/secure/login",
urllib.parse.urlencode( {"mail":userid, "password":passwd}) ).encode("utf-8")
#GetToken
token = getToken()
#MakeMylist&AddMylist
mid = mylist_create(u"Testlist")
addvideo_tomylist(mid, ["sm9","sm1097445", "sm1715919" ] )
MyError.
Traceback (most recent call last):
File "Nico3.py", line 48, in <module>
urllib.parse.urlencode( {"mail":userid, "password":passwd}) ).encode("utf-8")
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 162, in urlopen
return opener.open(url, data, timeout)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 463, in open
req = meth(req)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 1170, in do_request_
raise TypeError(msg)
TypeError: POST data should be bytes or an iterable of bytes. It cannot be of type str.
I've tried encode but it did not help.
I'm japanese accademic students.
It was not able to be settled by my knowledge.
I am aware of this similar question, TypeError: POST data should be bytes or an iterable of bytes. It cannot be str, but am too new for the answer to be much help.
You paren is in the wrong place so you are not actually encoding:
.urlencode({"mail":userid, "password":passwd}).encode("utf-8")) # <- move inside