I am attempting to cache webdriver instances across test case classes. I do not need a "clean" webdriver since I am simply using PhantomJS to query the DOM (I do need JavaScript enabled, which is why I am not simply fetching the source and parsing that).
The cache is a dictionary with the URL as a key and the driver instance as value. The cache is in the base test case, and I call get() which is a method on the base test case. This method instantiates webdriver, and goes to the url if the driver is not in the cache already.
It appears there's some kind of socket issue when trying to access driver properties on the cached instance in the second test case (derivedb.py). I'd appreciate if someone could tell how to get this work.
I am getting the following output:
$ python launcher.py
test_a (deriveda.DerivedTestCaseA) ... Instantiate new driver
Title is: Google
ok
test_b (deriveda.DerivedTestCaseA) ... Retrieve driver from cache
Title is: Google
ok
test_a (derivedb.DerivedTestCaseB) ... Retrieve driver from cache
ERROR
======================================================================
ERROR: test_a (derivedb.DerivedTestCaseB)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/cohenaa/PycharmProjects/sanity/derivedb.py", line 7, in test_a
print "Title is: %s" % self.driver.title
File "/Users/cohenaa/sanity-env/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py", line 185, in title
resp = self.execute(Command.GET_TITLE)
File "/Users/cohenaa/sanity-env/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py", line 162, in execute
response = self.command_executor.execute(driver_command, params)
File "/Users/cohenaa/sanity-env/lib/python2.7/site-packages/selenium/webdriver/remote/remote_connection.py", line 349, in execute
return self._request(url, method=command_info[0], data=data)
File "/Users/cohenaa/sanity-env/lib/python2.7/site-packages/selenium/webdriver/remote/remote_connection.py", line 410, in _request
resp = opener.open(request)
File "/sw/lib/python2.7/urllib2.py", line 404, in open
response = self._open(req, data)
File "/sw/lib/python2.7/urllib2.py", line 422, in _open
'_open', req)
File "/sw/lib/python2.7/urllib2.py", line 382, in _call_chain
result = func(*args)
File "/sw/lib/python2.7/urllib2.py", line 1214, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/sw/lib/python2.7/urllib2.py", line 1184, in do_open
raise URLError(err)
URLError: <urlopen error [Errno 61] Connection refused>
----------------------------------------------------------------------
Ran 3 tests in 1.271s
FAILED (errors=1)
launcher.py
import unittest
from deriveda import DerivedTestCaseA
from derivedb import DerivedTestCaseB
suite = unittest.TestSuite()
testclasses = [DerivedTestCaseA, DerivedTestCaseB]
testloader = unittest.TestLoader()
classes_to_names = {}
for tc in testclasses:
classes_to_names[tc] = testloader.getTestCaseNames(tc)
for tc in classes_to_names:
for testname in classes_to_names[tc]:
suite.addTest(tc(testname))
unittest.TextTestRunner(verbosity=10).run(suite)
deriveda.py
from basetestcase import BaseTestCase
from unittest import main
class DerivedTestCaseA(BaseTestCase):
def test_a(self):
self.get("http://www.google.com")
print "Title is: %s" % self.driver.title
def test_b(self):
self.get("http://www.google.com")
print "Title is: %s" % self.driver.title
derivedb.py
from basetestcase import BaseTestCase
class DerivedTestCaseB(BaseTestCase):
def test_a(self):
self.get("http://www.google.com")
print "Title is: %s" % self.driver.title
basetestcase.py:
import unittest
from selenium import webdriver
class BaseTestCase(unittest.TestCase):
cache = {}
def get(self, url):
if url not in self.cache:
print "Instantiate new driver"
self.driver = webdriver.PhantomJS()
self.driver.get(url)
self.cache[url] = self.driver
else:
print "Retrieve driver from cache"
self.driver = self.cache[url]
#classmethod
def tearDownClass(cls):
for url in BaseTestCase.cache:
BaseTestCase.cache[url].quit()
Ahhh. I see now. The driver is quitting after each test case. If I quit after the suite is run instead, no error.
Related
I'm writing discord bot now, so I wanted to know how to save the name of the streamer in a separate file, so when he goes live bot sends a message about that in specific channel.
This is what I tried:
import os
import json
import discord
import requests
from discord.ext import tasks, commands
from twitchAPI.twitch import Twitch
from discord.utils import get
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='$', intents=intents)
Authentication with twitch API:
client_id = os.getenv('client_id')
client_secret = os.getenv('Dweller_token')
twitch = Twitch(client_id, client_secret)
twitch.authenticate_app([])
TWITCH_STREAM_API_ENDPOINT_V5 = "https://api.twitch.tv/dweller/streams/{}"
API_HEADERS = {
'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json',
}
Returns True if online, False if not:
def checkuser(user):
try:
userid = twitch.get_users(logins=[user])['data'][0]['id']
url = TWITCH_STREAM_API_ENDPOINT_V5.format(userid)
try:
req = requests.Session().get(url, headers=API_HEADERS)
jsondata = req.json()
if 'stream' in jsondata:
if jsondata['stream'] is not None:
return True
else:
return False
except Exception as e:
print("Error checking user: ", e)
return False
except IndexError:
return False
Bot event. Always checks if streamer is live. Sends a message if so. And adds specific role to the streamer if he is live:
#bot.event
async def on_ready():
# Defines a loop that will run every 10 seconds (checks for live users every 10 seconds).
#tasks.loop(seconds=10)
async def live_notifs_loop():
# Opens and reads the json file
with open('streamers.json', 'r') as file:
streamers = json.loads(file.read())
# Makes sure the json isn't empty before continuing.
if streamers is not None:
# Gets the guild, 'twitch streams' channel, and streaming role.
guild = bot.get_guild(1234567890)
channel = bot.get_channel(1234567890)
role = get(guild.roles, id=1234567890)
# Loops through the json and gets the key,value which in this case is the user_id and twitch_name of
# every item in the json.
for user_id, twitch_name in streamers.items():
# Takes the given twitch_name and checks it using the checkuser function to see if they're live.
# Returns either true or false.
status = checkuser(twitch_name)
# Gets the user using the collected user_id in the json
user = bot.get_user(int(user_id))
# Makes sure they're live
if status is True:
# Checks to see if the live message has already been sent.
async for message in channel.history(limit=200):
# If it has, break the loop (do nothing).
if str(user.mention) in message.content and "is now streaming" in message.content:
break
# If it hasn't, assign them the streaming role and send the message.
else:
# Gets all the members in your guild.
async for member in guild.fetch_members(limit=None):
# If one of the id's of the members in your guild matches the one from the json and
# they're live, give them the streaming role.
if member.id == int(user_id):
await member.add_roles(role)
# Sends the live notification to the 'twitch streams' channel then breaks the loop.
await channel.send(
f":red_circle: **LIVE**\n{user.mention} is now streaming on Twitch!"
f"\nhttps://www.twitch.tv/{twitch_name}")
print(f"{user} started streaming. Sending a notification.")
break
# If they aren't live do this:
else:
# Gets all the members in your guild.
async for member in guild.fetch_members(limit=None):
# If one of the id's of the members in your guild matches the one from the json and they're not
# live, remove the streaming role.
if member.id == int(user_id):
await member.remove_roles(role)
# Checks to see if the live notification was sent.
async for message in channel.history(limit=200):
# If it was, delete it.
if str(user.mention) in message.content and "is now streaming" in message.content:
await message.delete()
# Start your loop.
live_notifs_loop.start()
Command that adds 'chosen' streamers to json file:
# Command to add Twitch usernames to the json.
#bot.command(name='addtwitch', help='Adds your Twitch to the live notifs.', pass_context=True)
async def add_twitch(ctx, twitch_name):
# Opens and reads the json file.
with open('streamers.json', 'r') as file:
streamers = json.loads(file.read())
# Gets the users id that called the command.
user_id = ctx.author.id
# Assigns their given twitch_name to their discord id and adds it to the streamers.json.
streamers[user_id] = twitch_name
# Adds the changes we made to the json file.
with open('streamers.json', 'w') as file:
file.write(json.dumps(streamers))
# Tells the user it worked.
await ctx.send(f"Added {twitch_name} for {ctx.author} to the notifications list.")
print('Server Running')
bot.run(os.getenv('token'))
I want to write '$add_twitch turb4ik' and bot saves streamer turb4ik in streamers.json and checks if streamer is live or not. If True send notification in specific channel. But it doesn't seem to work.
And I get this syntax error:
Unhandled exception in internal background task 'live_notifs_loop'.
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/tasks/__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "main.py", line 62, in live_notifs_loop
streamers = json.loads(file.read())
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Ignoring exception in command None:
discord.ext.commands.errors.CommandNotFound: Command "add_twitch" is not found
I also tried this piece of code, it gives me the information about the channel, but it doesn't give me streamer status:
client_id = os.getenv('client_id')
oauth_token = os.getenv('Dweller_token')
twitch = Twitch(client_id, oauth_token)
twitch.authenticate_app([])
user_info = twitch.get_users(logins=['turb4ik'])
user_id = user_info['data'][0]['id']
print(user_info)
And there is one more problem: every time I start my bot it says that twitchAPI is not installed and I need to install it every time I start my bot. Sometimes my bot seems to forget about twitchAPI and goes offline and says that I again need to install twitchAPI.
I know this is hard, but please help me. Maybe I should do it witch SQL(sqlite3 library) or so. Much obliget!
Edit:
Another syntax error:
Unhandled exception in internal background task 'live_notifs_loop'.
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/tasks/__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "main.py", line 78, in live_notifs_loop
streamers = json.load(file)
File "/usr/lib/python3.8/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Ignoring exception in command addtwitch:
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/core.py", line 85, in wrapped
ret = await coro(*args, **kwargs)
File "main.py", line 136, in add_twitch
streamers = json.load(file)
File "/usr/lib/python3.8/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/bot.py", line 902, in invoke
await ctx.command.invoke(ctx)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/core.py", line 864, in invoke
await injected(*ctx.args, **ctx.kwargs)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/commands/core.py", line 94, in wrapped
raise CommandInvokeError(exc) from exc
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Unhandled exception in internal background task 'live_notifs_loop'.
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/discord/ext/tasks/__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "main.py", line 62, in live_notifs_loop
streamers = json.loads(file.read())
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Instead of streamers = json.loads(file.read()) use streamers = json.load(file)
discord.ext.commands.errors.CommandNotFound: Command "add_twitch" is not found
Since you are defining your command with name=addtwitch agrument, you can only call your command with $addtwitch user. To avoid this, add the aliases =['add_twitch'] argument to
#bot.command(name='addtwitch', help='Adds your Twitch to the live notifs.', pass_context=True, aliases =['add_twitch'])
I want to store a pickle file on S3, as a result of a luigi Task. Below is the class that defines the Task:
class CreateItemVocabulariesTask(luigi.Task):
def __init__(self):
self.client = S3Client(AwsConfig().aws_access_key_id,
AwsConfig().aws_secret_access_key)
super().__init__()
def requires(self):
return [GetItem2VecDataTask()]
def run(self):
filename = 'item2vec_results.tsv'
data = self.client.get('s3://{}/item2vec_results.tsv'.format(AwsConfig().item2vec_path),
filename)
df = pd.read_csv(filename, sep='\t', encoding='latin1')
unique_users = df['CustomerId'].unique()
unique_items = df['ProductNumber'].unique()
item_to_int, int_to_item = utils.create_lookup_tables(unique_items)
user_to_int, int_to_user = utils.create_lookup_tables(unique_users)
with self.output()[0].open('wb') as out_file:
pickle.dump(item_to_int, out_file)
with self.output()[1].open('wb') as out_file:
pickle.dump(int_to_item, out_file)
with self.output()[2].open('wb') as out_file:
pickle.dump(user_to_int, out_file)
with self.output()[3].open('wb') as out_file:
pickle.dump(int_to_user, out_file)
def output(self):
files = [S3Target('s3://{}/item2int.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/int2item.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/user2int.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/int2user.pkl'.format(AwsConfig().item2vec_path), client=self.client),]
return files
When I run this task I get the error ValueError: Unsupported open mode 'wb'. The items I try to dump into a pickle file are just python dictionaries.
Full traceback:
Traceback (most recent call last):
File "C:\Anaconda3\lib\site-packages\luigi\worker.py", line 203, in run
new_deps = self._run_get_new_deps()
File "C:\Anaconda3\lib\site-packages\luigi\worker.py", line 140, in _run_get_new_deps
task_gen = self.task.run()
File "C:\Users\user\Documents\python workspace\pipeline.py", line 60, in run
with self.output()[0].open('wb') as out_file:
File "C:\Anaconda3\lib\site-packages\luigi\contrib\s3.py", line 714, in open
raise ValueError("Unsupported open mode '%s'" % mode)
ValueError: Unsupported open mode 'wb'
This is an issue that only happens on python 3.x as explained here. In order to use python 3 and write a binary file or target (ie using 'wb' mode) just set format parameter for S3Target to Nop. Like this:
S3Target('s3://path/to/file', client=self.client, format=luigi.format.Nop)
Notice it's just a trick and not so intuitive nor documented.
Has anyone experienced this error before when trying to connect to hive.
Sample code used (https://github.com/telefonicaid/fiware-cygnus/blob/master/cygnus-ngsi/resources/hiveclients/python/hiveserver2-client.py):
import sys
import pyhs2
from pyhs2.error import Pyhs2Exception
# get the input parameters
if len(sys.argv) != 6:
print 'Usage: python hiveserver2-client.py <hive_host> <hive_port> <db_name> <hadoop_user> <hadoop_password>'
sys.exit()
hiveHost = sys.argv[1]
hivePort = sys.argv[2]
dbName = sys.argv[3]
hadoopUser = sys.argv[4]
hadoopPassword = sys.argv[5]
# do the connection
with pyhs2.connect(host=hiveHost,
port=hivePort,
authMechanism="PLAIN",
user=hadoopUser,
password=hadoopPassword,
database=dbName) as conn:
# get a client
with conn.cursor() as client:
# create a loop attending HiveQL queries
while (1):
query = raw_input('remotehive> ')
try:
if not query:
continue
if query == 'exit':
sys.exit()
# execute the query
client.execute(query)
# get the content
for row in client.fetch():
print row
except Pyhs2Exception, ex:
print ex.errorMessage
Error displayed:
[centos#test]$ sudo python hiveserver2-client.py computing.cosmos.lab.fiware.org 10000 default USERNAME TOKEN
Traceback (most recent call last):
File "hiveserver2-client.py", line 42, in <module>
database=dbName) as conn:
File "/usr/lib/python2.7/site-packages/pyhs2/__init__.py", line 7, in connect
return Connection(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/pyhs2/connections.py", line 46, in __init__
transport.open()
File "/usr/lib/python2.7/site-packages/pyhs2/cloudera/thrift_sasl.py", line 74, in open
status, payload = self._recv_sasl_message()
File "/usr/lib/python2.7/site-packages/pyhs2/cloudera/thrift_sasl.py", line 92, in _recv_sasl_message
header = self._trans.readAll(5)
File "/usr/lib/python2.7/site-packages/thrift/transport/TTransport.py", line 60, in readAll
chunk = self.read(sz - have)
File "/usr/lib/python2.7/site-packages/thrift/transport/TSocket.py", line 132, in read
message='TSocket read 0 bytes')
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
can you post your piece of code ? This looks like some auth mechanism or credentials sent are not Valid.
authMechanism= can be "PLAIN" or "KERBEROS" as per your setup .
I am using Scrapy 1.1 and I call Scrapy from within a script. My spider launching method looks like this:
def run_spider(self):
runner = CrawlerProcess(get_project_settings())
spider = SiteSpider()
configure_logging()
d = runner.crawl(spider, websites_file=self.raw_data_file)
d.addBoth(lambda _: reactor.stop())
reactor.run()
Here is an extract of my spider with an errback written as in the documentation, but it only prints when catches a failure.
class SiteSpider(scrapy.Spider):
name = 'SiteCrawler'
custom_settings = {
'FEED_FORMAT': 'json',
'FEED_URI': 'result.json',
}
def __init__(self, websites_file=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.websites_file = websites_file
print('***********')
print(self.websites_file)
def start_requests(self):
.....
if is_valid_url(website_url):
yield scrapy.Request(url=website_url, callback=self.parse, errback=self.handle_errors, meta={'url': account_id})
def parse(self, response):
.....
yield item
def handle_errors(self, failure):
if failure.check(HttpError):
# these exceptions come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
print('HttpError on ' + response.url)
elif failure.check(DNSLookupError):
# this is the original request
request = failure.request
print('DNSLookupError on ' + request.url)
elif failure.check(TimeoutError, TCPTimedOutError):
request = failure.request
print('TimeoutError on ' + request.url)
My problem is that I get errors I expect, like:
TimeoutError on http://www.example.com
But also get tracebacks for the same websites:
2016-08-05 13:40:55 [scrapy] ERROR: Error downloading <GET http://www.example.com/robots.txt>: TCP connection timed out: 60: Operation timed out.
Traceback (most recent call last):
File ".../anaconda/lib/python3.5/site-packages/twisted/internet/defer.py", line 1126, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File ".../anaconda/lib/python3.5/site-packages/twisted/python/failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File ".../anaconda/lib/python3.5/site-packages/scrapy/core/downloader/middleware.py", line 43, in process_request
defer.returnValue((yield download_func(request=request,spider=spider)))
twisted.internet.error.TCPTimedOutError: TCP connection timed out: 60: Operation timed out.
The written exception handling messages and the tracebacks can often be traced to the same websites. After searching a lot on stackoverflow, in the docs and the likes I still dont know why I see the tracebacks.
This also occurs with DNSLookupErrors for example.
Excuse me, my Scrapy knowledge is juvenile. Is this normal behavior?
Also, I added this to settings.py, which is under my crawler. Other entires (for example the item_pipelines) most exactly work.
LOG_LEVEL = 'WARNING'
But I still see debug messages, not only warnings and everything above that. (if configure_logging() is added to the spider launch) I am running this from terminal on mac os x.
I would be very happy to get any help with this.
Try this in a script:
if __name__ == '__main__':
runner = CrawlerProcess(get_project_settings())
spider = SiteSpider()
configure_logging()
d = runner.crawl(spider, websites_file=self.raw_data_file)
d.addBoth(lambda _: reactor.stop())
reactor.run()
I'm running a Twisted TCP server. Each protocol instance has an mqtt pub/sub client. I've reduced the actual production code to the simplest possible form below. I've stripped out a lot of irrelevant complexity to simplify the bug-finding process. The server works for multiple client connections and produces the data received from the mqtt client, but after any client connects/disconnects/reconnects a few times I get an exception that I haven't been able to understand or trap. I'm hoping that Jean-Paul or someone can point me at the error of my ways.
Once the exception occurs, no new clients can connect to the server. Each new connect attempt produces the exception.
Clients that are already connected continue to receive data ok.
The exception is
Unhandled Error
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/twisted/python/log.py", line 73, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/usr/lib/python2.7/dist-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/lib/python2.7/dist-packages/twisted/python/context.py", line 81, in callWithContext
return func(*args,**kw)
File "/usr/lib/python2.7/dist-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
--- <exception caught here> ---
File "/usr/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 1069, in doRead
transport = self.transport(skt, protocol, addr, self, s, self.reactor)
File "/usr/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 786, in __init__
self.startReading()
File "/usr/lib/python2.7/dist-packages/twisted/internet/abstract.py", line 429, in startReading
self.reactor.addReader(self)
File "/usr/lib/python2.7/dist-packages/twisted/internet/epollreactor.py", line 256, in addReader
_epoll.EPOLLIN, _epoll.EPOLLOUT)
File "/usr/lib/python2.7/dist-packages/twisted/internet/epollreactor.py", line 240, in _add
self._poller.modify(fd, flags)
exceptions.IOError: [Errno 2] No such file or directory
The basic server code is:
(this example will run and does generate the exception)
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.protocols import basic
from paho.mqtt import client # The most recent version of the legacy Mosquitto client
from random import randint
class MsgReceiver(basic.LineReceiver):
def __init__(self, factory): # new (factory)
self.factory = factory # new (factory)
def connectionMade(self):
self.mqClient = self.startMQ()
if self.mqClient:
self.factory.clients.append(self)
else:
self.transport.loseConnection()
def connectionLost(self, reason):
pass
def lineReceived(self, line):
pass
def on_message(self, mosq, obj, msg):
try:
self.sendLine(msg.payload)
except Exception, err:
print(err.message)
def startMQ(self):
mqName = "-".join(["myAppName", str(randint(0, 99999))])
mqClient = client.Client(mqName)
if mqClient.connect("localhost", 1883, 60) != 0:
print('Could not connect to mq server')
return False
mqClient.on_message = self.on_message
(success, mid) = mqClient.subscribe("myTopic", 0)
if success != 0:
return False
mqClient.loop_start()
return mqClient
class MsgReceiverFactory(Factory):
allow_reuse_address = True
def __init__(self, clients):
self.clients = clients
def buildProtocol(self, addr):
return MsgReceiver(self)
if __name__ == "__main__":
try:
clients = []
reactor.listenTCP(43217, MsgReceiverFactory(clients))
reactor.run()
except Exception, err:
print(err.message)
if reactor.running:
reactor.stop()
A simple client that will induce the error when run twice (the first time it runs fine):
Interesting that if I enable the time.sleep(3) it runs fine and doesn't seem to induce the error
#!/usr/bin/python
from __future__ import print_function
import socket
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 43217))
data = s.recv(1024)
print(data)
#time.sleep(3)
s.close()