How can I create Context in Blender BGE (python3.3) with gl interop?
When I use this way
from OpenGL import platform as gl_platform
ctx_props = cl.context_properties
props = []
if sys.platform == "linux":
from OpenGL import GLX
props.append((ctx_props.PLATFORM, self.platform))
props.append((ctx_props.GL_CONTEXT_KHR, GLX.glXGetCurrentContext()))
props.append((ctx_props.GLX_DISPLAY_KHR, GLX.glXGetCurrentDisplay()))
elif sys.platform == "win32":
from OpenGL import WGL
props.append((ctx_props.PLATFORM, self.platform))
props.append((ctx_props.GL_CONTEXT_KHR, gl_platform.GetCurrentContext()))
props.append((ctx_props.WGL_HDC_KHR, WGL.wglGetCurrentDC()))
elif sys.platform == "darwin":
props.append((ctx_props.CONTEXT_PROPERTY_USE_CGL_SHAREGROUP_APPLE, cl.get_apple_cgl_share_group()))
if sys.platform == "darwin":
ctx = cl.Context(properties=props, devices=[])
else:
try:
ctx = cl.Context(properties=props)
except:
ctx = cl.Context(properties=props, devices = [device])
Blender crashed.
I use Blender 2.65 & pyopencl form git. Ubuntu 12.10.
Related
i am trying to make a code about a drone flying to multiple waypoint and the drone can't continue to the next waypoint when i not showing the red color on camera.
because the camera cv2 and the drone runs at the same time, my code runs very laggy, so i tried using multiprocessing method and modify my code. when i trying to run my new code, my multi processing doesn't work and it keeps skipping almost of my code and straight to RTL mode.
from inspect import ArgInfo
from dronekit import connect, VehicleMode, LocationGlobalRelative
from pymavlink import mavutil
from numpy import loadtxt, array
from time import sleep
import sys
import cv2
import numpy as np
import multiprocessing
cap = cv2.VideoCapture(0)
hsv_a = np.array([198, 255, 255])
hsv_b = np.array([158, 68, 137])
treshold = 150
lat = [-35.3629722, -35.3629064, -35.3634361, -35.3638474]
lon = [149.1649709, 149.1655721, 149.1657331, 149.1639733]
#vehicle = connect('udp:127.0.0.1:14551',wait_ready=True)
vehicle = connect('udp:127.0.0.1:14551',wait_ready=True)
def arm_and_takeoff(aTargetAltitude): #fungsi arming dan takeoff
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
while not(vehicle.is_armable):
print(" Waiting for vehicle to initialise...")
sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not(vehicle.armed):
print(" Waiting for arming...")
sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude)
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
#Break and return from function just below target altitude.
if (vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95):
print("Reached target altitude")
break
sleep(1)
def dist(a,z): #a=awal z=akhir
d_lat= (a.lat-z.lat)**2
d_long= (a.lon-z.lon)**2
jarak = (d_lat+d_long)**0.5
return jarak
def gerak_drone():
for i in range(0,len(lat)):
print(i)
wp = LocationGlobalRelative(lat[i],lon[i],2)
vehicle.simple_goto(wp)
sleep(1)
while (dist(vehicle.location.global_relative_frame,wp)>=0.0001):
print (str(round(dist(vehicle.location.global_relative_frame,wp)*100000,2)))
while True:
_,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask = cv2.inRange(hsv, hsv_b, hsv_a)
cv2.imshow("warna", mask)
cv2.imshow("hitamPutih", gray)
cv2.imshow("apa", frame)
print(cv2.countNonZero(mask))
if cv2.waitKey(500) == 27 or cv2.countNonZero(mask) > treshold :
break
if __name__ == "_main_":
altitude = 2
lat_distance = 1
lon_distance = 1
p1 = multiprocessing.Process(target=arm_and_takeoff, args=(altitude))
p2 = multiprocessing.Process(target=dist, args=(lat_distance, lon_distance))
p3 = multiprocessing.Process(target=gerak_drone)
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
print("Coming back")
vehicle.mode = VehicleMode("RTL")
sleep(20)
vehicle.mode = VehicleMode("LAND")
Here is my terminal result
To start with, I am not a developer, but a mere automation engineer that have worked a bit with coding in Java, python, C#, C++ and C.
I am trying to make a prototype that take pictures and stores them using a digital pin on the board. Atm I can take pictures using a switch, but it is really slow(around 3 seconds pr image).
My complete system is going to be like this:
A product passes by on a conveyor and a photo cell triggers the board to take an image and store it. If an operator removes a product(because of bad quality) the image is stored in a different folder.
I started with the snapshot function shipped with Mendel and have tried to get rid off the overhead, but the Gstream and pipeline-stuff confuses me a lot.
If someone could help me with how to understand the supplied code, or how to write a minimalistic solution to take an image i would be grateful :)
I have tried to understand and use project-teachable and examples-camera from Google coral https://github.com/google-coral, but with no luck. I have had the best luck with the snapshot tool that uses snapshot.py that are referenced here https://coral.withgoogle.com/docs/camera/datasheet/#snapshot-tool
from periphery import GPIO
import time
import argparse
import contextlib
import fcntl
import os
import select
import sys
import termios
import threading
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
from functools import partial
from gi.repository import GLib, GObject, Gst, GstBase
from PIL import Image
GObject.threads_init()
Gst.init(None)
WIDTH = 2592
HEIGHT = 1944
FILENAME_PREFIX = 'img'
FILENAME_SUFFIX = '.png'
AF_SYSFS_NODE = '/sys/module/ov5645_camera_mipi_v2/parameters/ov5645_af'
CAMERA_INIT_QUERY_SYSFS_NODE = '/sys/module/ov5645_camera_mipi_v2/parameters/ov5645_initialized'
HDMI_SYSFS_NODE = '/sys/class/drm/card0/card0-HDMI-A-1/status'
# No of initial frames to throw away before camera has stabilized
SCRAP_FRAMES = 1
SRC_WIDTH = 2592
SRC_HEIGHT = 1944
SRC_RATE = '15/1'
SRC_ELEMENT = 'v4l2src'
SINK_WIDTH = 2592
SINK_HEIGHT = 1944
SINK_ELEMENT = ('appsink name=appsink sync=false emit-signals=true '
'max-buffers=1 drop=true')
SCREEN_SINK = 'glimagesink sync=false'
FAKE_SINK = 'fakesink sync=false'
SRC_CAPS = 'video/x-raw,format=YUY2,width={width},height={height},framerate={rate}'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
PIPELINE = '''
{src_element} ! {src_caps} ! {leaky_q} ! tee name=t
t. ! {leaky_q} ! {screen_sink}
t. ! {leaky_q} ! videoconvert ! {sink_caps} ! {sink_element}
'''
def on_bus_message(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
loop.quit()
return True
def on_new_sample(sink, snapinfo):
if not snapinfo.save_frame():
# Throw away the frame
return Gst.FlowReturn.OK
sample = sink.emit('pull-sample')
buf = sample.get_buffer()
result, mapinfo = buf.map(Gst.MapFlags.READ)
if result:
imgfile = snapinfo.get_filename()
caps = sample.get_caps()
width = WIDTH
height = HEIGHT
img = Image.frombytes('RGB', (width, height), mapinfo.data, 'raw')
img.save(imgfile)
img.close()
buf.unmap(mapinfo)
return Gst.FlowReturn.OK
def run_pipeline(snapinfo):
src_caps = SRC_CAPS.format(width=SRC_WIDTH, height=SRC_HEIGHT, rate=SRC_RATE)
sink_caps = SINK_CAPS.format(width=SINK_WIDTH, height=SINK_HEIGHT)
screen_sink = FAKE_SINK
pipeline = PIPELINE.format(
leaky_q=LEAKY_Q,
src_element=SRC_ELEMENT,
src_caps=src_caps,
sink_caps=sink_caps,
sink_element=SINK_ELEMENT,
screen_sink=screen_sink)
pipeline = Gst.parse_launch(pipeline)
appsink = pipeline.get_by_name('appsink')
appsink.connect('new-sample', partial(on_new_sample, snapinfo=snapinfo))
loop = GObject.MainLoop()
# Set up a pipeline bus watch to catch errors.
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', on_bus_message, loop)
# Connect the loop to the snaphelper
snapinfo.connect_loop(loop)
# Run pipeline.
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# Clean up.
pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
class SnapHelper:
def __init__(self, sysfs, prefix='img', oneshot=True, suffix='jpg'):
self.prefix = prefix
self.oneshot = oneshot
self.suffix = suffix
self.snap_it = oneshot
self.num = 0
self.scrapframes = SCRAP_FRAMES
self.sysfs = sysfs
def get_filename(self):
while True:
filename = self.prefix + str(self.num).zfill(4) + '.' + self.suffix
self.num = self.num + 1
if not os.path.exists(filename):
break
return filename
#def check_af(self):
#try:
# self.sysfs.seek(0)
# v = self.sysfs.read()
# if int(v) != 0x10:
# print('NO Focus')
#except:
# pass
# def refocus(self):
# try:#
# self.sysfs.write('1')
# self.sysfs.flush()
# except:
# pass
def save_frame(self):
# We always want to throw away the initial frames to let the
# camera stabilize. This seemed empirically to be the right number
# when running on desktop.
if self.scrapframes > 0:
self.scrapframes = self.scrapframes - 1
return False
if self.snap_it:
self.snap_it = False
retval = True
else:
retval = False
if self.oneshot:
self.loop.quit()
return retval
def connect_loop(self, loop):
self.loop = loop
def take_picture(snap):
start_time = int(round(time.time()))
run_pipeline(snap)
print(time.time()- start_time)
def main():
button = GPIO(138, "in")
last_state = False
with open(AF_SYSFS_NODE, 'w+') as sysfs:
snap = SnapHelper(sysfs, 'test', 'oneshot', 'jpg')
sysfs.write('2')
while 1:
button_state = button.read()
if(button_state==True and last_state == False):
snap = SnapHelper(sysfs, 'test', 'oneshot', 'jpg')
take_picture(snap)
last_state = button_state
if __name__== "__main__":
main()
sys.exit()
Output is what i expect, but it is slow.
I switched to a USB-webcam and used the pygame library instead.
How to refer to the text entry widget`s input in a subprocess.call() in Python GTK? App for calling bioinformatics tool from PyGTK:
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import subprocess
class EntryExample:
def enter_callback(self, widget, entry):
entry_text = entry.get_text()
print "Entry contents: %s\n" % entry_text
def entry_toggle_editable(self, checkbutton, entry):
entry.set_editable(checkbutton.get_active())
def entry_toggle_visibility(self, checkbutton, entry):
entry.set_visibility(checkbutton.get_active())
def __init__(self):
# create a new window
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_size_request(200, 100)
window.set_title("GTK Entry")
window.connect("delete_event", lambda w,e: gtk.main_quit())
vbox = gtk.VBox(False, 0)
window.add(vbox)
vbox.show()
entry = gtk.Entry()
entry.set_max_length(50)
entry.connect("activate", self.enter_callback, entry)
entry.set_text("Insert")
entry.insert_text(" SRA accession number", len(entry.get_text()))
entry.select_region(0, len(entry.get_text()))
vbox.pack_start(entry, True, True, 0)
entry.show()
hbox = gtk.HBox(False, 0)
vbox.add(hbox)
hbox.show()
# Create a new button for running Linux Shell script
buttonscript = gtk.Button(label="Download", stock=None)
# Connect the "clicked" signal of the button to the function
buttonscript.connect("clicked", runlinuxshell )
vbox.pack_start(buttonscript, True, True, 0)
buttonscript.set_flags(gtk.CAN_DEFAULT)
buttonscript.grab_default()
buttonscript.show()
button = gtk.Button(stock=gtk.STOCK_CLOSE)
button.connect("clicked", lambda w: gtk.main_quit())
vbox.pack_start(button, True, True, 0)
button.set_flags(gtk.CAN_DEFAULT)
button.grab_default()
button.show()
window.show()
def runlinuxshell ():
subprocess.call('$i=len(entry.get_text()) # Error is here
echo $i
./fastq-dump --split-files $i -v')
def main():
gtk.main()
return 0
if __name__ == "__main__":
EntryExample()
main()
How to pass text input from a widget into the suprocess.call()?
Is there any good example on how to call bioinformatics linux tools in PyGTK?
disclaimer: the sample uses pygobject with introspection and not pygtk which is deprecated for years and should not be used in new code.
disclaimer 2: the sample can be greatly improved to say the least, it's just an adaption of your original script.
You probably would do some like the following:
import gi
from gi.repository import Gtk
import subprocess
class EntryExample:
def __init__(self):
window = Gtk.Window()
window.set_size_request(200, 100)
window.set_title("GTK Entry")
window.connect("delete_event", Gtk.main_quit)
vbox = Gtk.VBox(False, 0)
window.add(vbox)
self.entry = Gtk.Entry()
self.entry.set_max_length(50)
self.entry.set_text("SRA accession number")
vbox.pack_start(self.entry, True, True, 0)
buttonscript = Gtk.Button(label="Download", stock=None)
buttonscript.connect("clicked", self.runlinuxshell)
vbox.pack_start(buttonscript, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_CLOSE)
button.connect("clicked", Gtk.main_quit)
vbox.pack_start(button, True, True, 0)
window.show_all()
def runlinuxshell (self, widget):
mylen = len(self.entry.get_text())
# Here you will execute your subprocess with mylen
def main(self):
Gtk.main()
if __name__ == "__main__":
sub = EntryExample()
sub.main()
Recently I want to try some open source solvers instead of CPLEX. I found that PICOS + zibopt may be a good choice. However, I can merely find instruction on how to make zibopt work with python under windows properly. I downloaded the windows libraries (.dll file) of scip, and I try to install python-zibopt according to the command "python setup.py install". The error " blockmemshell/memory.h no such file" always popped out. I felt that it is because my compiler, which is VS120COMNTOOL, doecn't find the scip solver. Is there any chance that I can make scip work under windows now?
Did you have a look at the current python interface of SCIP 3.1.0? It uses the library from the SCIP Optimization Suite so you don't have to link another LP solver to SCIP.
On Windows, please try this modified setup.py file:
import sys, os, readline, glob, platform
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
BASEDIR = os.path.dirname(os.path.abspath(__file__))
BASEDIR = os.path.dirname(BASEDIR)
BASEDIR = os.path.dirname(BASEDIR)
INCLUDEDIR = os.path.join(BASEDIR,'src')
BASEDIR = os.path.dirname(BASEDIR)
#identify compiler version
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
raise Exception('cannot determine compiler version')
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
if platform.architecture()[0].find('64')>=0:
LIBDIR = os.path.join(BASEDIR,'vc'+str(majorVersion),'scip_spx','x64','Release')
else:
LIBDIR = os.path.join(BASEDIR,'vc'+str(majorVersion),'scip_spx','Release')
print('BASEDIR='+ BASEDIR)
print('INCLUDEDIR='+ INCLUDEDIR)
print('LIBDIR='+ LIBDIR)
def complete(text, state):
return (glob.glob(text+'*')+[None])[state]
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
libscipopt = 'lib/libscipopt.so'
includescip = 'include/scip'
ext_modules = []
ext_modules += [Extension('pyscipopt.scip', [os.path.join('pyscipopt', 'scip.pyx')],
#extra_compile_args=['-g', '-O0', '-UNDEBUG'],
include_dirs=[INCLUDEDIR],
library_dirs=[LIBDIR],
#runtime_library_dirs=[os.path.abspath('lib')],
libraries=['spx', 'scip_spx'])]
#libraries=['scipopt', 'readline', 'z', 'gmp', 'ncurses', 'm'])]
setup(
name = 'pyscipopt',
version = '0.1',
description = 'wrapper for SCIP in Python',
author = 'Zuse Institute Berlin',
author_email = 'scip#zib.de',
license = 'MIT',
cmdclass = {'build_ext' : build_ext},
ext_modules = ext_modules,
packages=['pyscipopt']
)
I see that pdf-viewers like okular and evince are able to display the index of a pdf document (book) very well, with link to every paragraph.
How can they do so? They use poppler library, how could I do extract that index with poppler, or in general?
it just stops at first level (recursion needed to go more deeply)
toc=document->toc();
QDomElement docElem = toc->documentElement();
QDomNode n = docElem.firstChild();
while(!n.isNull()) {
QDomElement e = n.toElement(); // try to convert the node to an element.
if(!e.isNull()) {
qDebug("elem %s\n",qPrintable(e.tagName())); // the node really is an element.
}
n = n.nextSibling();
}
Here is a demo how to do this with poppler in Python:
import poppler
def walk_index(iterp, doc):
while iterp.next():
link=iterp.get_action()
s = doc.find_dest(link.dest.named_dest)
print link.title,' ', doc.get_page(s.page_num).get_label()
child = iterp.get_child()
if child:
walk_index(child, doc)
def main():
uri = ("file:///"+path_to_pdf)
doc = poppler.document_new_from_file(uri, None)
iterp = poppler.IndexIter(doc)
link = iterp.get_action()
s = doc.find_dest(link.dest.named_dest)
print link.title,' ', doc.get_page(s.page_num).get_label()
walk_index(iterp, doc)
return 0
if __name__ == '__main__':
main()
python poppler library is obsolete, here is how to do it with Gobject:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# walk to table of contents and print titles and pages
import sys
from gi.repository import Poppler
def walk_index(iterp, doc):
while iterp.next():
link=iterp.get_action()
dest=doc.find_dest(link.goto_dest.dest.named_dest)
s = doc.get_page(dest.page_num-1)
print link.goto_dest.title, dest.page_num, s.get_label()
child = iterp.get_child()
if child:
walk_index(child, doc)
def main():
uri = ("file:///"+sys.argv[1])
doc = Poppler.Document.new_from_file(uri, None)
iterp = Poppler.IndexIter.new(doc)
link = iterp.get_action()
dest=doc.find_dest(link.goto_dest.dest.named_dest)
s = doc.get_page(dest.page_num-1)
print link.goto_dest.title, dest.page_num, s.get_label()
walk_index(iterp, doc)
return 0
if __name__ == '__main__':
main()