I'm trying to use pytest on Jython. And I'm getting stuck right at the beginning.
I've successfully installed the pytest package with easy_install:
$ ./jython easy_install pytest
When I try to run example from this page, things go wrong. I receive an extremely long failure report, like the one bellow. Does anybody have any idea why this is happening?
py.test-jython
============================= test session starts ==============================
platform java1.6.0_37 -- Python 2.5.3 -- pytest-2.3.2
collected 1 items
test_sample.py F
=================================== FAILURES ===================================
_________________ test_answer __________________
def test_answer():
assert func(3) == 5
test_sample.py:5:
self = AssertionError()
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
try:
self.msg = str(args[0])
except py.builtin._sysex:
raise
except:
self.msg = "<[broken __repr__] %s at %0xd>" %(
args[0].__class__, id(args[0]))
else:
f = py.code.Frame(sys._getframe(1))
try:
source = f.code.fullsource
if source is not None:
try:
source = source.getstatement(f.lineno, assertion=True)
except IndexError:
source = None
else:
source = str(source.deindent()).strip()
except py.error.ENOENT:
source = None
# this can also occur during reinterpretation, when the
# co_filename is set to "<run>".
if source:
self.msg = reinterpret(source, f, should_fail=True)
../jython2.5.3/Lib/site-packages/pytest-2.3.2-py2.5.egg/_pytest/assertion/reinterpret.py:32:
source = 'assert func(3) == 5', frame =
should_fail = True
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
../jython2.5.3/Lib/site-packages/pytest-2.3.2-py2.5.egg/_pytest/assertion/newinterpret.py:49:
.
.
.
self = <_pytest.assertion.newinterpret.DebugInterpreter object at 0x4>
name = Name
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
../jython2.5.3/Lib/site-packages/pytest-2.3.2-py2.5.egg/_pytest/assertion/newinterpret.py:147:
self = <_pytest.assertion.newinterpret.DebugInterpreter object at 0x4>
node = Name
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
E AssertionError: can't handle Name
../jython2.5.3/Lib/site-packages/pytest-2.3.2-py2.5.egg/_pytest/assertion/newinterpret.py:134: AssertionError
=========================== 1 failed in 0.55 seconds ===========================
Pytest has a workaround for jython's lacking AST implementation, see issue1479. I just extended the workaround on the pytest side to work on jython-2.5.3. You can install a dev-candidate of pytest with:
pip install -i http://pypi.testrun.org -U pytest
and should get at least version 2.3.4.dev1 with "py.test-jython --version" and get assertions working with jython-2.5.3.
Currently pytest does not support Jython2.5.3, works only on Jython2.5.1.
Related
I'm using the tf.Print op in a Jupyter notebook. It works as required, but will only print the output to the console, without printing in the notebook. Is there any way to get around this?
An example would be the following (in a notebook):
import tensorflow as tf
a = tf.constant(1.0)
a = tf.Print(a, [a], 'hi')
sess = tf.Session()
a.eval(session=sess)
That code will print 'hi[1]' in the console, but nothing in the notebook.
Update Feb 3, 2017
I've wrapped this into memory_util package. Example usage
# install memory util
import urllib.request
response = urllib.request.urlopen("https://raw.githubusercontent.com/yaroslavvb/memory_util/master/memory_util.py")
open("memory_util.py", "wb").write(response.read())
import memory_util
sess = tf.Session()
a = tf.random_uniform((1000,))
b = tf.random_uniform((1000,))
c = a + b
with memory_util.capture_stderr() as stderr:
sess.run(c.op)
print(stderr.getvalue())
** Old stuff**
You could reuse FD redirector from IPython core. (idea from Mark Sandler)
import os
import sys
STDOUT = 1
STDERR = 2
class FDRedirector(object):
""" Class to redirect output (stdout or stderr) at the OS level using
file descriptors.
"""
def __init__(self, fd=STDOUT):
""" fd is the file descriptor of the outpout you want to capture.
It can be STDOUT or STERR.
"""
self.fd = fd
self.started = False
self.piper = None
self.pipew = None
def start(self):
""" Setup the redirection.
"""
if not self.started:
self.oldhandle = os.dup(self.fd)
self.piper, self.pipew = os.pipe()
os.dup2(self.pipew, self.fd)
os.close(self.pipew)
self.started = True
def flush(self):
""" Flush the captured output, similar to the flush method of any
stream.
"""
if self.fd == STDOUT:
sys.stdout.flush()
elif self.fd == STDERR:
sys.stderr.flush()
def stop(self):
""" Unset the redirection and return the captured output.
"""
if self.started:
self.flush()
os.dup2(self.oldhandle, self.fd)
os.close(self.oldhandle)
f = os.fdopen(self.piper, 'r')
output = f.read()
f.close()
self.started = False
return output
else:
return ''
def getvalue(self):
""" Return the output captured since the last getvalue, or the
start of the redirection.
"""
output = self.stop()
self.start()
return output
import tensorflow as tf
x = tf.constant([1,2,3])
a=tf.Print(x, [x])
redirect=FDRedirector(STDERR)
sess = tf.InteractiveSession()
redirect.start();
a.eval();
print "Result"
print redirect.stop()
I ran into the same problem and got around it by using a function like this in my notebooks:
def tf_print(tensor, transform=None):
# Insert a custom python operation into the graph that does nothing but print a tensors value
def print_tensor(x):
# x is typically a numpy array here so you could do anything you want with it,
# but adding a transformation of some kind usually makes the output more digestible
print(x if transform is None else transform(x))
return x
log_op = tf.py_func(print_tensor, [tensor], [tensor.dtype])[0]
with tf.control_dependencies([log_op]):
res = tf.identity(tensor)
# Return the given tensor
return res
# Now define a tensor and use the tf_print function much like the tf.identity function
tensor = tf_print(tf.random_normal([100, 100]), transform=lambda x: [np.min(x), np.max(x)])
# This will print the transformed version of the tensors actual value
# (which was summarized to just the min and max for brevity)
sess = tf.InteractiveSession()
sess.run([tensor])
sess.close()
FYI, using a logger instead of calling "print" in my custom function worked wonders for me as the stdout is often buffered by jupyter and not shown before "Loss is Nan" kind of errors -- which was the whole point in using that function in the first place in my case.
You can check the terminal where you launched the jupyter notebook to see the message.
import tensorflow as tf
tf.InteractiveSession()
a = tf.constant(1)
b = tf.constant(2)
opt = a + b
opt = tf.Print(opt, [opt], message="1 + 2 = ")
opt.eval()
In the terminal, I can see:
2018-01-02 23:38:07.691808: I tensorflow/core/kernels/logging_ops.cc:79] 1 + 2 = [3]
A simple way, tried it in regular python, but not jupyter yet.
os.dup2(sys.stdout.fileno(), 1)
os.dup2(sys.stdout.fileno(), 2)
Explanation is here: In python, how to capture the stdout from a c++ shared library to a variable
The issue that I faced was that one can't run a session inside a Tensorflow Graph, like in the training or in the evaluation.
That's why the options to use sess.run(opt) or opt.eval() were not a solution for me.
The best thing was to use tf.Print() and redirect the logging to an external file.
I did this using a temporal file, which I transferred to a regular file like this:
STDERR=2
import os
import sys
import tempfile
class captured:
def __init__(self, fd=STDERR):
self.fd = fd
self.prevfd = None
def __enter__(self):
t = tempfile.NamedTemporaryFile()
self.prevfd = os.dup(self.fd)
os.dup2(t.fileno(), self.fd)
return t
def __exit__(self, exc_type, exc_value, traceback):
os.dup2(self.prevfd, self.fd)
with captured(fd=STDERR) as tmp:
...
classifier.evaluate(input_fn=input_fn, steps=100)
with open('log.txt', 'w') as f:
print(open(tmp.name).read(), file=f)
And then in my evaluation I do:
a = tf.constant(1)
a = tf.Print(a, [a], message="a: ")
I want to write a chat demo with tornado and redis. I use redis subscribe , but what I wrote is not work . when I run the code , iterm output
listening 8000
GroupChat here
getMsg here
None
None
And I PUBLISH testc helloword in redis-cli, iterm output:
[I 150401 18:30:57 web:1825] 304 GET /groupchat?key=testc (127.0.0.1) 2.40ms
Message(kind=u'message', channel=u'testc', body=u'helloword', pattern=u'testc')
I just want to get the Message in GroupChat.get , but I get None. anyone help me?
GroupChat code is here :
class GroupChat(tornado.web.RequestHandler):
def initialize(self):
print 'GroupChat here'
self.c = tornadoredis.Client(host=CONFIG['REDIS_HOST'], port=CONFIG['REDIS_PORT'], password=CONFIG['REDIS_AUTH'])
self.channelMsgModel = channelMsgModel(self.c)
#tornado.gen.coroutine
def get(self):
try:
key = self.get_argument('key')
info = yield self.channelMsgModel.getMsg(key)
print info
self.finish(info)
except Exception, e:
print e
pass
channelMsgModel code is here :
import tornado.gen
class channelMsgModel :
timeout = 10
def __init__(self, redisobj):
self.redisobj = redisobj
#tornado.gen.coroutine
def getMsg(self, key):
print 'getMsg here'
yield tornado.gen.Task(self.redisobj.subscribe, key)
info = self.redisobj.listen(self.on_message)
print info
raise tornado.gen.Return(info)
def on_message(self, msg):
if (msg.kind == 'message'):
print msg
return msg
elif (msg.kind == 'unsubscribe'):
self.redisobj.disconnect()
# raise tornado.gen.Return(False)
Use a toro.Queue (which will be included in Tornado itself in the upcoming version 4.2):
class channelMsgModel:
def __init__(self, redisobj):
self.redisobj = redisobj
self.queue = toro.Queue()
#gen.coroutine
def getMsg(self, key):
yield gen.Task(self.redisobj.subscribe, key)
self.redisobj.listen(self.on_message)
info = yield self.queue.get()
raise tornado.gen.Return(info)
def on_message(self, msg):
if (msg.kind == 'message'):
self.queue.put_nowait(msg)
elif (msg.kind == 'unsubscribe'):
self.redisobj.disconnect()
In Grinder we'd like to add customized statistics
grinder.statistics.registerSummaryExpression("connTimeout", "userLong0")
grinder.statistics.forCurrentTest.addLong("userLong0", 1)
And it seems to be successful as we can get the customized field in Grinder out file
The problem is that the value of that statistics is always 0
Here is the complete script implemented by Jython
# -*- coding: utf-8 -*-
from net.grinder.script.Grinder import grinder
from net.grinder.script import Test
from com.netease.cloud.ndir.performance import Query
from com.netease.cloud.ndir.performance import QueryReturnCode
def writeToFile(text):
filename = "response.log"
file = open(filename, "a")
file.write(str(text) + "\n")
file.close()
ndir_client = grinder.getProperties().getProperty("ndirClient")
query_file = grinder.getProperties().getProperty("queryFile")
request = Query("grinder.properties", query_file)
grinder.statistics.registerSummaryExpression("connTimeout", "userLong0")
grinder.statistics.registerSummaryExpression("readTimeout", "userLong1")
grinder.statistics.registerSummaryExpression("code!=200", "userLong2")
grinder.statistics.registerSummaryExpression("docs=[]", "userLong3")
grinder.statistics.registerSummaryExpression("unknown", "userLong4")
class TestRunner:
def __init__(self):
grinder.statistics.delayReports=True
def initialSleep(self):
sleepTime = grinder.threadNumber * 20 # per thread
grinder.sleep(sleepTime, 0)
def query(self):
if ndir_client == "true":
query = request.randomQueryByNdirClient
else:
query = request.randomQueryByHttpGet
try:
result = query()
except:
writeToFile("exception")
grinder.statistics.forCurrentTest.addLong("userLong4", 1)
grinder.getStatistics().getForCurrentTest().setSuccess(False)
return
if result == 0:
grinder.getStatistics().getForCurrentTest().setSuccess(True)
return
elif result == 1:
grinder.statistics.forCurrentTest.addLong("userLong0", 1)
grinder.getStatistics().getForCurrentTest().setSuccess(False)
return
elif result == 2:
grinder.statistics.forCurrentTest.addLong("userLong1", 1)
grinder.getStatistics().getForCurrentTest().setSuccess(False)
return
elif result == 3:
grinder.statistics.forCurrentTest.addLong("userLong2", 1)
grinder.getStatistics().getForCurrentTest().setSuccess(False)
return
elif result == 4:
grinder.statistics.forCurrentTest.addLong("userLong3", 1)
grinder.getStatistics().getForCurrentTest().setSuccess(True)
return
else:
grinder.statistics.forCurrentTest.addLong("userLong4", 1)
grinder.getStatistics().getForCurrentTest().setSuccess(False)
return
request = Test(120, 'query').wrap(query)
def __call__(self):
if grinder.runNumber == 0:
self.initialSleep()
self.request(self)
I suspect the problem is that you are marking tests as failed, but expecting the statistics to appear in the summary. Only successful tests are accumulated into the summary statistics.
Try registering data log expressions as well
grinder.statistics.registerDataLogExpression("connTimeout", "userLong0")
grinder.statistics.registerDataLogExpression("readTimeout", "userLong1")
grinder.statistics.registerDataLogExpression("code!=200", "userLong2")
grinder.statistics.registerDataLogExpression("docs=[]", "userLong3")
grinder.statistics.registerDataLogExpression("unknown", "userLong4")
Then you'll at least see the values in the data log file of the worker process.
I am working on openstack and I want to monitor the Virtual Machines cpu usage. For that I want to find their PIDs through the parent (central) openstack instance.
I used
ps aux | grep
and I did receive an output. I however want to confirm if this is correct PID. Is their any way I can check this?
Or is their any other way to find the PID's of the virtual machine?
Update.
This command does not work . It gives me a PID which always change. Its not constant.
Thank you
Well libvirt has some interfaces for this. Here's some python that extracts that data into datastructures for you:
#!/usr/bin/env python
# Modules
import subprocess
import traceback
import commands
import signal
import time
import sys
import re
import os
import getopt
import pprint
try:
import libvirt
except:
print "no libvirt detected"
sys.exit(0)
from xml.dom.minidom import parseString
global instances
global virt_conn
global tick
global virt_exist
def virtstats():
global virt_exist
global virt_conn
global instances
cpu_stats = []
if virt_exist == True:
if virt_conn == None:
print 'Failed to open connection to the hypervisor'
virt_exist = False
if virt_exist == True:
virt_info = virt_conn.getInfo()
for x in range(0, virt_info[2]):
cpu_stats.append(virt_conn.getCPUStats(x,0))
virt_capabilities = virt_conn.getCapabilities()
domcpustats = 0
# domcpustats = virDomain::GetcpuSTATS()
totmem = 0
totvcpu = 0
totcount = 0
vcpu_stats = []
for id in virt_conn.listDomainsID():
dom = virt_conn.lookupByID(id)
totvcpu += dom.maxVcpus()
vcpu_stats.append(dom.vcpus())
totmem += dom.maxMemory()
totcount += 1
dom = parseString(virt_capabilities)
xmlTag = dom.getElementsByTagName('model')[0].toxml()
xmlData=xmlTag.replace('<model>','').replace('</model>','')
for info in virt_info:
print info
for stat in cpu_stats:
print "cpu %s" % stat
for vstat in vcpu_stats:
print "vcpu:\n"
pprint.pprint(vstat)
print "CPU ( %s ) Use - %s vCPUS ( %s logical processors )" % (xmlData, totvcpu, virt_info[2])
sys.exit(0)
def main():
try:
global virt_conn
global virt_exist
virt_conn = libvirt.openReadOnly(None)
virt_exist = True
except:
virt_exist = False
print "OK: not a compute node"
sys.exit(0)
virtstats()
if __name__ == "__main__":
main()
Now what you get from this in terms of usage is cpu time.
The vcpu blocks are basically in this layout:
1st: vCPU number, starting from 0.
2nd: vCPU state.
0: offline
1: running
2: blocked on resource
3rd: CPU time used in nanoseconds
4th: real CPU number
The CPU blocks are obvious once you realize that's what's goin down in libvirt.
Hope that helps!
By using libvirt, python, lxml, and lsof you can recover the pid if your Virtual Instance (domain) has a display output set. (VNC, Spice, ...)
Retrieve display port
Retrieve pid from opened display port
Here is the code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import etree
import libvirt
from subprocess import check_output
def get_port_from_XML(xml_desc):
tree = etree.fromstring(xml_desc)
ports = tree.xpath('//graphics/#port')
if len(ports):
return ports[0]
return None
def get_pid_from_listen_port(port):
if port is None:
return ''
return check_output(['lsof', '-i:%s' % port, '-t']).replace('\n','')
conn = libvirt.openReadOnly('')
if conn is None:
print 'Failed to open connection to the hypervisor'
sys.exit(1)
for domain_id in conn.listDomainsID():
domain_instance = conn.lookupByID(domain_id)
name = domain_instance.name()
xml_desc = domain_instance.XMLDesc(0)
port = get_port_from_XML(xml_desc)
pid = get_pid_from_listen_port(port)
print '%s (port:%s) (pid:%s)' % (name, port, pid)
grep "79d87652-8c8e-4afa-8c13-32fbcbf98e76" --include=libvirt.xml /path/to/nova/instances -r -A 2 | grep "<name" | cut -d " " -f 3
allows to find "instance-" which can be mapped to ps aux output of "-name" parameter. so you can map openstack instance id to pid.
The most simple way is using cgroups:
In Ubuntu:
cat /sys/fs/cgroup/cpuset/libvirt/qemu/<machine-name>/tasks
I'm trying to migrate from plone 3.3.5 to plone 4.0.7 and I'm stuck on a step that converts all the FileFields to BlobFields.
Plone upgrade script successfully converts all native FileFields but I have several custom AT-based classes which have to be converted manually. I've tried two ways of doing the conversion which leads me to the same error.
Using schemaextender as outlined in Plone migration guide and a source code example
Renaming all FileFields to blob fields and then running this script:
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl import getSecurityManager
from Products.CMFCore.utils import getToolByName
from zope.app.component.hooks import setSite
from Products.contentmigration.migrator import BaseInlineMigrator
from Products.contentmigration.walker import CustomQueryWalker
from plone.app.blob.field import BlobField
admin=app.acl_users.getUserById("admin")
newSecurityManager(None, admin)
portal = app.plone
setSite(portal)
def find_all_types_fields(portal_catalog, type_instance_to_search):
output = {}
searched = []
for k in catalog():
kobj = k.getObject()
if kobj.__class__.__name__ in searched:
continue
searched.append(kobj.__class__.__name__)
for field in kobj.schema.fields():
if isinstance(field, type_instance_to_search):
if kobj.__class__.__name__ in output:
output[kobj.__class__.__name__].append(field.__name__)
else:
output[kobj.__class__.__name__] = [field.__name__]
return output
def produce_migrator(field_map):
source_class = field_map.keys()[0]
fields = {}
for x in field_map.values()[0]: fields[x] = None
class FileBlobMigrator(BaseInlineMigrator):
'''Migrating ExtensionBlobField (which is still a FileField) to BlobField'''
src_portal_type = source_class
src_meta_type = source_class
fields_map = fields
def migrate_data(self):
'''Unfinished'''
for k in self.fields_map.keys():
#print "examining attributes"
#import pdb; pdb.set_trace()
#if hasattr(self.obj, k):
if k in self.obj.schema.keys():
print("***converting attribute:", k)
field = self.obj.getField(k).get(self.obj)
mutator = self.obj.getField(k).getMutator(self.obj)
mutator(field)
def last_migrate_reindex(self):
'''Unfinished'''
self.obj.reindexObject()
return FileBlobMigrator
def consume_migrator(portal_catalog, migrator):
walker = CustomQueryWalker(portal_catalog, migrator, full_transaction=True)
transaction.savepoint(optimistic=True)
walker_status = walker.go()
return walker.getOutput()
def migrate_blobs(catalog, migrate_type):
all_fields = find_all_types_fields(catalog, migrate_type)
import pdb; pdb.set_trace()
for k in [ {k : all_fields[k]} for k in all_fields]:
migrator = produce_migrator(k)
print consume_migrator(catalog, migrator)
catalog = getToolByName(portal, 'portal_catalog')
migrate_blobs(catalog, BlobField)
The problem occurs on self.obj.reindexObject() line where I receive the following traceback:
2011-08-09 17:21:12 ERROR Zope.UnIndex KeywordIndex: unindex_object could not remove documentId -1945041983 from index object_provides. This should not happen.
Traceback (most recent call last):
File "/home/alex/projects/plone4/eggs/Zope2-2.12.18-py2.6-linux-x86_64.egg/Products/PluginIndexes/common/UnIndex.py", line 166, in removeForwardIndexEntry indexRow.remove(documentId)
KeyError: -1945041983
> /home/alex/projects/plone4/eggs/Zope2-2.12.18-py2.6-linux-x86_64.egg/Products/PluginIndexes/common/UnIndex.py(192)removeForwardIndexEntry()
191 str(documentId), str(self.id)),
--> 192 exc_info=sys.exc_info())
193 else:
If I remove the line that triggers reindexing, the conversion completes successfully, but if I try to manually reindex catalog later, every object that's been converted can no longer be found, and I'm a bit at loss of what to do now.
The site has LinguaPlone installed, maybe it has something to do with this?
One option would be to run the migration without the reindexObject() call and do a "Clear and Rebuild" in the catalog ZMI Advanced tab after migrating.