How to write python unittest cases to mock redis connection (redis.StrictRedis) in Django - redis

How can I mock the following function for connecting to Redis?
import redis
class RedisCache:
redis_instance = None
#classmethod
def set_connect(cls):
redis_instance = redis.StrictRedis(host='0.0.0.0', port=6379, password='xyz', charset='utf-8', decode_responses=True, socket_timeout=30)
return redis_instance
#classmethod
def get_conn(cls):
cls.redis_instance = cls.set_connect()
return cls.redis_instance
I looked for some solutions, but they were basically using fakeredis module. I wanted to have a simpler way to mock these functions.
Note-
data returned by the function: Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>

You can use patch() function to mock out redis.StrictRedis class. See where-to-patch
E.g.
redis_cache.py:
import redis
class RedisCache:
redis_instance = None
#classmethod
def set_connect(cls):
redis_instance = redis.StrictRedis(host='0.0.0.0', port=6379, password='xyz',
charset='utf-8', decode_responses=True, socket_timeout=30)
return redis_instance
#classmethod
def get_conn(cls):
cls.redis_instance = cls.set_connect()
return cls.redis_instance
test_redis_cache.py:
from unittest import TestCase
import unittest
from unittest.mock import patch, Mock
from redis_cache import RedisCache
class TestRedisCache(TestCase):
def test_set_connect(self):
with patch('redis.StrictRedis') as mock_StrictRedis:
mock_redis_instance = mock_StrictRedis.return_value
actual = RedisCache.set_connect()
self.assertEqual(actual, mock_redis_instance)
mock_StrictRedis.assert_called_once_with(host='0.0.0.0', port=6379, password='xyz',
charset='utf-8', decode_responses=True, socket_timeout=30)
#patch('redis.StrictRedis')
def test_get_conn(self, mock_StrictRedis):
mock_redis_instance = mock_StrictRedis.return_value
RedisCache.get_conn()
self.assertEqual(RedisCache.redis_instance, mock_redis_instance)
if __name__ == '__main__':
unittest.main()
test result:
..
----------------------------------------------------------------------
Ran 2 tests in 0.004s
OK
Name Stmts Miss Cover Missing
------------------------------------------------------------------------------
src/stackoverflow/70016401/redis_cache.py 11 0 100%
src/stackoverflow/70016401/test_redis_cache.py 18 0 100%
------------------------------------------------------------------------------
TOTAL 29 0 100%

Related

Python multiprocessing how to update a complex object in a manager list without using .join() method

I started programming in Python about 2 months ago and I've been struggling with this problem in the last 2 weeks.
I know there are many similar threads to this one but I can't really find a solution which suits my case.
I need to have the main process which is the one which interacts with Telegram and another process, buffer, which understands the complex object received from the main and updates it.
I'd like to do this in a simpler and smoother way.
At the moment objects are not being updated due to the use of multi-processing without the join() method.
I tried then to use multi-threading instead but it gives me compatibility problems with Pyrogram a framework which i am using to interact with Telegram.
I wrote again the "complexity" of my project in order to reproduce the same error I am getting and in order to get and give the best help possible from and for everyone.
a.py
class A():
def __init__(self, length = -1, height = -1):
self.length = length
self.height = height
b.py
from a import A
class B(A):
def __init__(self, length = -1, height = -1, width = -1):
super().__init__(length = -1, height = -1)
self.length = length
self.height = height
self.width = width
def setHeight(self, value):
self.height = value
c.py
class C():
def __init__(self, a, x = 0, y = 0):
self.a = a
self.x = x
self.y = y
def func1(self):
if self.x < 7:
self.x = 7
d.py
from c import C
class D(C):
def __init__(self, a, x = 0, y = 0, z = 0):
super().__init__(a, x = 0, y = 0)
self.a = a
self.x = x
self.y = y
self.z = z
def func2(self):
self.func1()
main.py
from b import B
from d import D
from multiprocessing import Process, Manager
from buffer import buffer
if __name__ == "__main__":
manager = Manager()
lizt = manager.list()
buffer = Process(target = buffer, args = (lizt, )) #passing the list as a parameter
buffer.start()
#can't invoke buffer.join() here because I need the below code to keep running while the buffer process takes a few minutes to end an instance passed in the list
#hence I can't wait the join() function to update the objects inside the buffer but i need objects updated in order to pop them out from the list
import datetime as dt
t = dt.datetime.now()
#library of kind of multithreading (pool of 4 processes), uses asyncio lib
#this while was put to reproduce the same error I am getting
while True:
if t + dt.timedelta(seconds = 10) < dt.datetime.now():
lizt.append(D(B(5, 5, 5)))
t = dt.datetime.now()
"""
#This is the code which looks like the one in my project
#main.py
from pyrogram import Client #library of kind of multithreading (pool of 4 processes), uses asyncio lib
from b import B
from d import D
from multiprocessing import Process, Manager
from buffer import buffer
if __name__ == "__main__":
api_id = 1234567
api_hash = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
app = Client("my_account", api_id, api_hash)
manager = Manager()
lizt = manager.list()
buffer = Process(target = buffer, args = (lizt, )) #passing the list as a parameter
buffer.start()
#can't invoke buffer.join() here because I need the below code to run at the same time as the buffer process
#hence I can't wait the join() function to update the objects inside the buffer
#app.on_message()
def my_handler(client, message):
lizt.append(complex_object_conatining_message)
"""
buffer.py
def buffer(buffer):
print("buffer was defined")
while True:
if len(buffer) > 0:
print(buffer[0].x) #prints 0
buffer[0].func2() #this changes the class attribute locally in the class instance but not in here
print(buffer[0].x) #prints 0, but I'd like it to be 7
print(buffer[0].a.height) #prints 5
buffer[0].a.setHeight(10) #and this has the same behaviour
print(buffer[0].a.height) #prints 5 but I'd like it to be 10
buffer.pop(0)
This is the whole code about the problem I am having.
Literally every suggestion is welcome, hopefully constructive, thank you in advance!
At last I had to change the way to solve this problem, which was using asyncio like the framework was doing as well.
This solution offers everything I was looking for:
-complex objects update
-avoiding the problems of multiprocessing (in particular with join())
It is also:
-lightweight: before I had 2 python processes 1) about 40K 2) about 75K
This actual process is about 30K (and it's also faster and cleaner)
Here's the solution, I hope it will be useful for someone else like it was for me:
The part of the classes is skipped because this solution updates complex objects absolutely fine
main.py
from pyrogram import Client
import asyncio
import time
def cancel_tasks():
#get all task in current loop
tasks = asyncio.Task.all_tasks()
for t in tasks:
t.cancel()
try:
buffer = []
firstWorker(buffer) #this one is the old buffer.py file and function
#the missing loop and loop method are explained in the next piece of code
except KeyboardInterrupt:
print("")
finally:
print("Closing Loop")
cancel_tasks()
firstWorker.py
import asyncio
def firstWorker(buffer):
print("First Worker Executed")
api_id = 1234567
api_hash = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
app = Client("my_account", api_id, api_hash)
#app.on_message()
async def my_handler(client, message):
print("Message Arrived")
buffer.append(complex_object_conatining_message)
await asyncio.sleep(1)
app.run(secondWorker(buffer)) #here is the trick: I changed the
#method run() of the Client class
#inside the Pyrogram framework
#since it was a loop itself.
#In this way I added another task
#to the existing loop in orther to
#let run both of them together.
my secondWorker.py
import asyncio
async def secondWorker(buffer):
while True:
if len(buffer) > 0:
print(buffer.pop(0))
await asyncio.sleep(1)
The resources to understand the asyncio used in this code can be found here:
Asyncio simple tutorial
Python Asyncio Official Documentation
This tutorial about how to fix classical Asyncio errors

Deferred requests in scrapy

I would like to repeatedly scrape the same URLs with different delays. After researching the issue it seemed that the appropriate solution was to use something like
nextreq = scrapy.Request(url, dont_filter=True)
d = defer.Deferred()
delay = 1
reactor.callLater(delay, d.callback, nextreq)
yield d
in parse.
However, I have been unable to make this work. I am getting the error message
ERROR: Spider must return Request, BaseItem, dict or None, got 'Deferred'
I am not familiar with twisted so I hope I am just missing something obvious
Is there a better way of achieving my goal that doesn't fight the framework so much?
I finally found an answer in an old PR
def parse():
req = scrapy.Request(...)
delay = 0
reactor.callLater(delay, self.crawler.engine.schedule, request=req, spider=self)
However, the spider can exit due to being idle too early. Based on the outdated middleware https://github.com/ArturGaspar/scrapy-delayed-requests, this can be remedied with
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
class ImmortalSpiderMiddleware(object):
#classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_idle, signal=signals.spider_idle)
return s
#classmethod
def spider_idle(cls, spider):
raise DontCloseSpider()
The final option, updating the middleware by ArturGaspar, led to:
from weakref import WeakKeyDictionary
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from twisted.internet import reactor
class DelayedRequestsMiddleware(object):
requests = WeakKeyDictionary()
#classmethod
def from_crawler(cls, crawler):
ext = cls()
crawler.signals.connect(ext.spider_idle, signal=signals.spider_idle)
return ext
#classmethod
def spider_idle(cls, spider):
if cls.requests.get(spider):
spider.log("delayed requests pending, not closing spider")
raise DontCloseSpider()
def process_request(self, request, spider):
delay = request.meta.pop('delay_request', None)
if delay:
self.requests.setdefault(spider, 0)
self.requests[spider] += 1
reactor.callLater(delay, self.schedule_request, request.copy(),
spider)
raise IgnoreRequest()
def schedule_request(self, request, spider):
spider.crawler.engine.schedule(request, spider)
self.requests[spider] -= 1
And can be used in parse like:
yield Request(..., meta={'delay_request': 5})

MemoryError when querying database from Process

I am trying to create a program with 3 processes that read from the same database. The code was working before I started introducing processes.
I am getting MemoryError when performing a select() from PeeWee, I suspect there is something wrong with sharing of resources. Minimal example:
models.py
from playhouse.pool import PooledSqliteExtDatabase
file_scanner_database = PooledSqliteExtDatabase(
None,
max_connections=32,
)
class FileModel(Model):
class Meta:
database = file_scanner_database
main.py
from file_scanner import FileScanner
from models import file_scanner_database
from models import FileModel
from multiprocessing import Process
def create_scanner_agent(data):
scanner = FileScanner(data)
scanner.start_scanner()
shared_info = {'db_location': '/absolute/path/to/database'}
file_scanner_database.init(shared_info['db_location'])
file_scanner_database.connect()
file_scanner_database.create_tables([FileModel], safe=True)
new_process = Process(
target=create_scanner_agent,
args=(shared_info,)
)
new_process.daemon = True
new_process.start()
try:
new_process.join()
except KeyboardInterrupt:
pass
new_process.terminate()
file_scanner.py
from models import file_scanner_database
from models import FileModel
class FileScanner:
def __init__(self, data):
for k, v in data.items():
setattr(self, k, v)
file_scanner_database.init(self.db_location)
file_scanner_database.connect()
def start_scanner(self):
while True:
# THIS IS WHERE THE PROGRAM CRASHES
for row in FileModel.select():
...
It looks like you're trying to access memory across a fork? Or some such craziness? I think the answer is that you're doing it wrong homie. Try opening your DB connection after the fork.

Python 3.4 / GTK / Async

I use tkinter with a async funktion.
Now I will use gtk3 in stead of tkinkter.
Is there also a way to run my async function?
How should I adapt the code
Here are some code fragments:
async def _event_loop(app, interval=0.05):
try:
while True:
app.update()
await asyncio.sleep(interval)
except tkinter.TclError as exc:
if "application has been destroyed" not in exc.args[0]:
raise
class SSHFrame(tkinter.Frame):
def __init__(self, parent):
super().__init__(parent)
...
...
async def _run(self, host, command, user, password):
try:
async with asyncssh.connect(host, username=user, password=password,
client_keys=None) as conn:
self._proc = await conn.create_process(command,
term_type='dumb')
while not self._proc.stdout.at_eof():
self._output(await self._proc.stdout.read(1024))
self._output('\n[Disconnected]\n')
except (asyncssh.Error, OSError) as exc:
self._output('[%s]\n' % str(exc))
finally:
self._proc = None
class App(tkinter.Frame):
def __init__(self, parent):
super().__init__(parent)
...
...
asyncio.get_event_loop().run_until_complete(_event_loop(App(tkinter.Tk())))
import asyncio
import sys
from gi.repository import Gtk, GLib
#asyncio.coroutine
def start(app):
yield from asyncio.sleep(0)
app.register()
app.activate()
def glib_update(main_context, loop):
while main_context.pending():
main_context.iteration(False)
loop.call_later(.01, glib_update, main_context, loop)
if sys.platform == "win32":
from asyncio.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.SelectorEventLoop()
asyncio.set_event_loop(loop)
# This is just a fake gtk appliaction here, you should create your own see
# http://python-gtk-3-tutorial.readthedocs.io/en/latest/application.html
my_gtk_app = Gtk.Application()
try:
main_context = GLib.MainContext.default()
asyncio.async(start(my_gtk_app))
glib_update(main_context, loop)
loop.run_forever()
finally:
loop.close()

Twisted XMLRPC proxy

I'd like to make XMLRPC proxy with balancer using twisted.
[XMLRPC Server 1_1 8.8.8.8:8000] <--> [----------------------] <--- Client
[Proxy example.com:8000] <--- Client
[XMLRPC Server 1_2 9.9.9.9:8000] <--> [----------------------] <--- Client
So there are two XMLRPC instances which represents same methods. I need xmlrpc-proxy between this instances and clients. One more thing - this proxy should also accept JSON calls (kind of http://example.com:8000/RPC2, http://example.com:8000/JSON).
Right now I was trying to implement XMLRPC proxy calls. I can't receive answer back to client although sendLine() is calling.
import argparse
from twisted.internet import protocol, reactor, defer, threads
from twisted.web import xmlrpc
from twisted.internet.task import LoopingCall
from twisted.internet.defer import DeferredQueue, Deferred, inlineCallbacks
from twisted.protocols.basic import LineReceiver
import configfile
from bcsxmlrpc import xmlrpc_request_parser, xmlrpc_marshal
from customlogging import logging
logging.getLogger().setLevel(logging.DEBUG)
class ProxyClient(xmlrpc.Proxy):
def __init__(self, proxy_uri, user, timeout=30.0):
self.proxy_uri = proxy_uri
xmlrpc.Proxy.__init__(self, url=proxy_uri, user=user, connectTimeout=timeout)
#inlineCallbacks
def call_api(self, name, *args):
logging.debug(u"Calling API: %s" % name)
result = yield self.callRemote(name, *args)
proxy_pool.add(self.proxy_uri)
defer.returnValue(result)
class Request(object):
def __init__(self, method, params, deferred):
self.method = method
self.params = params
self.deferred = deferred
class ProxyServer(LineReceiver):
def dataReceived(self, data):
logging.pr(data)
params, method = xmlrpc_request_parser(data) # got method name and arguments
d = Deferred()
d.addCallbacks(self._send_reply, self._log_error)
logging.debug(u"%s%s added to queue" % (method, params))
queue.put(Request(method, params, d))
def _send_reply(self, result):
logging.ps(result)
self.sendLine(str(result))
def _log_error(self, error):
logging.error(error)
def connectionMade(self):
logging.info(u"New client connected")
def connectionLost(self, reason):
logging.info(u"Client connection lost: %s" % reason.getErrorMessage())
class ProxyServerFactory(protocol.Factory):
protocol = ProxyServer
def buildProtocol(self, addr):
return ProxyServer()
#inlineCallbacks
def _queue_execute_job():
if queue.pending and proxy_pool:
proxy = proxy_pool.pop()
request = yield queue.get()
result = yield ProxyClient(proxy, "").call_api(request.method, *list(request.params))
request.deferred.callback(result)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run configuration")
parser.add_argument('--config', help=u"Configuration file name/path")
config = configfile.ProxyConfig(parser.parse_args().config)
global proxy_pool
proxy_pool = set()
for proxy_server in config.servers:
proxy_pool.add(proxy_server)
global queue
queue = DeferredQueue()
lc2 = LoopingCall(_queue_execute_job)
lc2.start(1)
logging.info(u"Starting Proxy at port %s" % config.port)
reactor.listenTCP(config.port, ProxyServerFactory())
reactor.run()