tornado's AsyncHttpTestCase is not available outside self.fetch - testing

I have an AsyncHttpTestCase and I want to access it from methods besides self.fetch. Specifically, I have a SockJS handler that I want a sockjs-client to attach too for my tests.
I've discovered that even though self.get_url('/foo') returns a valid url, that url does not respond to anything except for self.fetch(). What gives?
Is this just not possible with AsyncHttpTestCase? What is the best pattern for doing this?
Here's tests.py
import urllib2
from tornado.httpclient import AsyncHTTPClient
import tornado.testing
from tornado.testing import AsyncTestCase, AsyncHTTPTestCase
from app import DebugApp
class TestDebug(AsyncHTTPTestCase):
def get_app(self):
return DebugApp()
def test_foo(self):
response = self.fetch('/foo')
print response.body
assert response.body == 'derp'
def test_foo_from_urllib(self):
response = urllib2.urlopen(self.get_url('/foo'), None, 2)
print response
assert response.body == 'derp'
def runTest(self):
pass
and app.py
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import options
class FooHandler(tornado.web.RequestHandler):
def get(self):
self.write("derp")
url_patterns = [
(r"/foo", FooHandler),
]
class DebugApp(tornado.web.Application):
def __init__(self):
tornado.web.Application.__init__(self, url_patterns, debug=True, xsrf_cookies=False)
def main():
app = DebugApp()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(6006)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
and runtest.py
#!/usr/bin/env python
import unittest
from os import path
import sys
import tornado.testing
PROJECT_PATH = path.dirname(path.abspath(__file__))
sys.path.append(PROJECT_PATH)
def all():
suite = unittest.defaultTestLoader.discover('./', 'tests.py', path.dirname(path.abspath(__file__)))
print suite
return suite
if __name__ == '__main__':
# Print a nice message so that we can differentiate between test runs
print ''
print '%s %s' % ('Debug app', '0.1.0')
print '\033[92m' + '-------------- Running Test Suite --------------' + '\033[0m'
print ''
tornado.testing.main()

The problem is that the IOLoop is not running when you call urlopen (and it cannot be, because urlopen is a blocking function). You must run the IOLoop (with either #gen_test, self.wait(), or indirectly via methods like self.fetch()) for the server to respond, and you can only interact with it via non-blocking functions (or if you must use blocking functions like urlopen, run them in a separate thread).

Related

pytest-factoryboy register factory with custom name - error fixture not found

using pytest-factoryboy in my conftest.py
import pytest
from pytest_factoryboy import register
from rest_framework.test import APIClient
from apps.api.factories import leadfactory, userfactory
#pytest.fixture
def client():
return APIClient()
#pytest.fixture
def staff(db):
return userfactory.StaffFactory()
register(leadfactory.OpenLeadFactory, "lead1")
pytest_lead.py
def test_leads_get(self, client, staff, lead1):
client.login(username=staff.username, password="staff")
url = reverse("lead_list")
response = client.get(url)
assert response.status_code == 200
after runnig pytest i'm getting error
________________________________________________ ERROR at setup of TestLeads.test_leads_get ________________________________________________
file C:\Users\suraj\Desktop\ShwetaSoftwares\Working\git\udimagic_project\apps\api\pytest\pytest_leads.py, line 38
def test_leads_get(self, client, staff, lead1):
file C:\Users\suraj\AppData\Local\Programs\Python\Python39\lib\site-packages\pytest_factoryboy\fixture.py, line 350
def model_fixture(request: SubRequest, factory_name: str) -> Any:
file C:\Users\suraj\AppData\Local\Programs\Python\Python39\lib\site-packages\pytest_factoryboy\fixture.py, line 503
def subfactory_fixture(request: SubRequest, factory_class: FactoryType) -> Any:
E fixture 'custom_user__first_name' not found
> available fixtures: _dj_autoclear_mailbox, _django_clear_site_cache, _django_db_helper, _django_db_marker, _django_set_urlconf, _django_setup_unittest, _fail_for_invalid_template_variable, _live_server_helper, _session_faker, _template_string_if_invalid_marker, admin_client, admin_user, affiliate, async_client, async_rf, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, client, db, django_assert_max_num_queries, django_assert_num_queries, django_capture_on_commit_callbacks, django_db_blocker, django_db_createdb, django_db_keepdb, django_db_modify_db_settings, django_db_modify_db_settings_parallel_suffix, django_db_modify_db_settings_tox_suffix, django_db_modify_db_settings_xdist_suffix, django_db_reset_sequences, django_db_serialized_rollback, django_db_setup, django_db_use_migrations, django_mail_dnsname, django_mail_patch_dns, django_test_environment, django_user_model, django_username_field, doctest_namespace, factoryboy_request, faker, lead1, lead1__addresscountry, lead1__addressstate, lead1__billing_name, lead1__created_at, lead1__created_by, lead1__description, lead1__email, lead1__first_name, lead1__lead_status, lead1__mobile, lead1__owner, lead_data, live_server, mailoutbox, monkeypatch, normal, open_lead_factory, partner1, partner2, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, rf, settings, staff, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, transactional_db
> use 'pytest --fixtures [testpath]' for help on them.
when i'm using it without custom name its working -
register(leadfactory.OpenLeadFactory)
help me, after using custom name where i'm making mistake

How can I remove messages from an ActiveMQ queue using Python?

I have an ActiveMQ queue which has several messages that were sent using persistent set to true. When I create a subscriber in Python to read the queue, I get all of the messages in the queue. The next time I open the subscriber, I get all of the same messages. I adjusted the code that writes to the queue to set persistent to false, but the message remains in the queue. Have I neglected to send an acknowledgement?
The code is written using Python 2.7 because that's what our customer is using. I'd love to upgrade them, but I don't have the time.
Here's the script that reads the queue:
import socket
import threading
import xml.etree.ElementTree as etree
from xml.dom import minidom # for pretty printing
# import SampleXML
import sys
import os
import math
import time
from time import monotonic
import string
import stomp # for queue support
import platform
class ConnectionListener(stomp.ConnectionListener):
def __init__(self, connection):
self.connection = connection
print ("Listener created")
def on_message(self, message):
print ("Received message with body ") + message.body
class Reader:
def __init__(self):
pass
def ConnectToQueue(self):
#For Production
user = os.getenv("ACTIVEMQ_USER") or "worthington"
#user = os.getenv("ACTIVEMQ_USER") or "worthington_test"
password = os.getenv("ACTIVEMQ_PASSWORD") or "level3"
host = os.getenv("ACTIVEMQ_HOST") or "localhost"
port = os.getenv("ACTIVEMQ_PORT") or 61613
# destination = sys.argv[1:2] or ["/topic/event"]
# destination = destination[0]
dest = "from_entec_test"
#For Production
# dest = "from_entec"
try:
conn = stomp.Connection10(host_and_ports = [(host, port)])
conn.set_listener('message', ConnectionListener(conn))
# conn.start()
# subscribe_id = '-'.join(map(str, (platform.node(), os.getppid(), os.getpid())))
conn.connect(login=user,passcode=password)
subscribe_id = "Queue Test Listener"
conn.subscribe(destination=dest, id=subscribe_id, ack='client-individual')
conn.unsubscribe(id=subscribe_id)
conn.disconnect()
except Exception as error:
reason = str(error)
print("Exception when readig data from queue: " + str(error))
pass
if __name__ == "__main__" :
try:
UploadData = Reader()
UploadData.ConnectToQueue()
print ("Reader finished.")
except Exception as Value:
reason = str(Value)
pass
And here's the code that writes to it:
import socket
import threading
import xml.etree.ElementTree as etree
from xml.dom import minidom # for pretty printing
# import SampleXML
import sys
import os
import math
import time
from time import monotonic
import string
import stomp # for queue support
import platform
class ConnectionListener(stomp.ConnectionListener):
def __init__(self, connection):
self.connection = connection
print "Listener created"
def on_message(self, message):
print "Received message with body " + message.body
class UploadData:
def __init__(self):
pass
def ConnectToQueue(self):
#For Production
user = os.getenv("ACTIVEMQ_USER") or "worthington"
#user = os.getenv("ACTIVEMQ_USER") or "worthington_test"
password = os.getenv("ACTIVEMQ_PASSWORD") or "level3"
host = os.getenv("ACTIVEMQ_HOST") or "localhost"
port = os.getenv("ACTIVEMQ_PORT") or 61613
# destination = sys.argv[1:2] or ["/topic/event"]
# destination = destination[0]
dest = "from_entec_test"
#For Production
# dest = "from_entec"
try:
conn = stomp.Connection10(host_and_ports = [(host, port)])
# conn.start()
# subscribe_id = '-'.join(map(str, (platform.node(), os.getppid(), os.getpid())))
subscribe_id = "Queue Test Listener"
conn.connect(login=user,passcode=password)
message = "This is a test message."
conn.send(dest, message, persistent='true')
print "Sent message containing: " + message
conn.disconnect()
except Exception, error:
reason = str(error)
print "Exception when writing data to queue: " + str(error)
pass
if __name__ == "__main__" :
try:
UploadData = UploadData()
UploadData.ConnectToQueue()
except Exception, Value:
reason = str(Value)
print "Main routine exception: " + str(Value)
pass
I'm not very familiar with Python STOMP clients but from the code you appear to be subscribing using the 'client-individual' mode of STOMP which means that each message you receive requires you to send an ACK frame back with the message Id value so that the remote can mark it as consumed. Since you are not doing that the messages will not be removed from the Queue.
As an alternative you can use the 'auto' acknowledgement mode which marks the message as consumed as soon as the broker dispatches them. To understand the STOMP subscription model please refer to the STOMP specification.

django rest framework test code self.client.delete problem

from rest_framework import status, response
from rest_framework.test import APITestCase
from lots.models import Lot
class LotsTestCase(APITestCase):
def setUp(self) -> None:
self.lot = Lot.objects.create(name="1",
address="Dont Know",
phone_num="010-4451-2211",
latitude=127.12,
longitude=352.123,
basic_rate=20000,
additional_rate=2000,
partnership=False,
section_count=3,)
def test_delete(self):
response = self.client.delete(f'api/lots/{self.lot["name"]}')
# response = self.client.delete(f'/api/users/{self.users[0].pk}')
# url = reverse(f'/api/lots/{self.lot}', kwargs={'pk': self.lot.pk})
# self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(self.lot.objects.filter(pk=self.lot.pk.count()))
I have problems with the test code above. Why doesn't it work? I know it has to do with calling dictionary values but I just can't figure it out. Thanks for your help.
Lot.objects.create(...) returns a Lot type so you access name by self.lot.name.

Scrapy Splash won't execute lua script

I have ran across an issue in which my Lua script refuses to execute. The returned response from the ScrapyRequest call seems to be an HTML body, while i'm expecting a document title. I am assuming that the Lua script is never being called as it seems to have no apparent effect on the response. I have dug a lot through the documentation and can't quite seem to figure out what is missing here. Does anyone have any suggestions?
from urlparse import urljoin
import scrapy
from scrapy_splash import SplashRequest
GOOGLE_BASE_URL = 'https://www.google.com/'
GOOGLE_QUERY_PARAMETERS = '#q={query}'
GOOGLE_SEARCH_URL = urljoin(GOOGLE_BASE_URL, GOOGLE_QUERY_PARAMETERS)
GOOGLE_SEARCH_QUERY = 'example search query'
LUA_SCRIPT = """
function main(splash)
assert(splash:go(splash.args.url))
return splash:evaljs("document.title")
end
"""
SCRAPY_CRAWLER_NAME = 'google_crawler'
SCRAPY_SPLASH_ENDPOINT = 'render.html'
SCRAPY_ARGS = {
'lua_source': LUA_SCRIPT
}
def get_search_url(query):
return GOOGLE_SEARCH_URL.format(query=query)
class GoogleCrawler(scrapy.Spider):
name=SCRAPY_CRAWLER_NAME
search_url = get_search_url(GOOGLE_SEARCH_QUERY)
def start_requests(self):
response = SplashRequest(self.search_url,
self.parse, endpoint=SPLASH_ENDPOINT, args=SCRAPY_ARGS)
yield response
def parse(self, response):
doc_title = response.body_as_unicode()
print doc_title
'endpoint' argument of SplashRequest must be 'execute' in order to execute a Lua script; it is 'render.html' in the example.
LUA_SCRIPT = """
function main(splash)
assert(splash:go(splash.args.url))
return title = splash:evaljs("document.title")
end
"""
def start_requests(self):
SplashRequest(self.search_url,self.parse, endpoint='execute',args=SCRAPY_ARGS)
You can recover the value with response.data['title']

How can messages be retained in a Flask website?

I'm new to Flask and trying to understand how to retain variable values. To do this, I'm trying to write a small application that can accept message inputs, add them to a list and then print out that list. My code isn't working and I'm not sure why. I would appreciate guidance on this problem (and on any other obvious problems).
from flask import Flask
from flask import request
from flask import redirect
class Server(Flask):
def __init__(self, *args, **kwargs):
super(Server, self).__init__(*args, **kwargs)
self.messages = []
server = Server(__name__)
#server.route('/')
def form():
return "messages: " + server.messages
#server.route("/input_message")
def input_message():
return '<form action="/store_message" method="GET"><input name="input1"><input type="submit" value="enter message"></form>'
#server.route("/store_message")
def store_message():
server.messages.append(request.args.get('input1', ''))
return redirect("http://127.0.0.1:5000", code = 302)
if __name__ == "__main__":
server.run(
host = "127.0.0.1",
port = "5000"
)
In your example server.messages is a list, but you're treating it like a string so "messages:" + server.messages is going to lead to an error. You want something like "messages: " + ",".join(server.messages)