multiple processing in dronekit not working - python-multiprocessing

i am trying to make a code about a drone flying to multiple waypoint and the drone can't continue to the next waypoint when i not showing the red color on camera.
because the camera cv2 and the drone runs at the same time, my code runs very laggy, so i tried using multiprocessing method and modify my code. when i trying to run my new code, my multi processing doesn't work and it keeps skipping almost of my code and straight to RTL mode.
from inspect import ArgInfo
from dronekit import connect, VehicleMode, LocationGlobalRelative
from pymavlink import mavutil
from numpy import loadtxt, array
from time import sleep
import sys
import cv2
import numpy as np
import multiprocessing
cap = cv2.VideoCapture(0)
hsv_a = np.array([198, 255, 255])
hsv_b = np.array([158, 68, 137])
treshold = 150
lat = [-35.3629722, -35.3629064, -35.3634361, -35.3638474]
lon = [149.1649709, 149.1655721, 149.1657331, 149.1639733]
#vehicle = connect('udp:127.0.0.1:14551',wait_ready=True)
vehicle = connect('udp:127.0.0.1:14551',wait_ready=True)
def arm_and_takeoff(aTargetAltitude): #fungsi arming dan takeoff
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
while not(vehicle.is_armable):
print(" Waiting for vehicle to initialise...")
sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not(vehicle.armed):
print(" Waiting for arming...")
sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude)
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
#Break and return from function just below target altitude.
if (vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95):
print("Reached target altitude")
break
sleep(1)
def dist(a,z): #a=awal z=akhir
d_lat= (a.lat-z.lat)**2
d_long= (a.lon-z.lon)**2
jarak = (d_lat+d_long)**0.5
return jarak
def gerak_drone():
for i in range(0,len(lat)):
print(i)
wp = LocationGlobalRelative(lat[i],lon[i],2)
vehicle.simple_goto(wp)
sleep(1)
while (dist(vehicle.location.global_relative_frame,wp)>=0.0001):
print (str(round(dist(vehicle.location.global_relative_frame,wp)*100000,2)))
while True:
_,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask = cv2.inRange(hsv, hsv_b, hsv_a)
cv2.imshow("warna", mask)
cv2.imshow("hitamPutih", gray)
cv2.imshow("apa", frame)
print(cv2.countNonZero(mask))
if cv2.waitKey(500) == 27 or cv2.countNonZero(mask) > treshold :
break
if __name__ == "_main_":
altitude = 2
lat_distance = 1
lon_distance = 1
p1 = multiprocessing.Process(target=arm_and_takeoff, args=(altitude))
p2 = multiprocessing.Process(target=dist, args=(lat_distance, lon_distance))
p3 = multiprocessing.Process(target=gerak_drone)
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
print("Coming back")
vehicle.mode = VehicleMode("RTL")
sleep(20)
vehicle.mode = VehicleMode("LAND")
Here is my terminal result

Related

How to extract skeleton only without video in mediapipe?

import cv2
import mediapipe as mp
import numpy as np
import sys
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
mp_drawing_styles = mp.solutions.drawing_styles
#min_Tracking_confidence = 1 for higher accuracy
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=1)
#Import Video and Set codec
cap = cv2.VideoCapture(sys.argv[1])
# print("cap :", cap.shape)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
if cap.isOpened() == False:
print("Error opening video stream or file")
raise TypeError
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
outdir, inputflnm = sys.argv[1][:sys.argv[1].rfind(
'/')+1], sys.argv[1][sys.argv[1].rfind('/')+1:]
inflnm, inflext = inputflnm.split('.')
out_filename = f'{outdir}{inflnm}_annotated.{inflext}'
# out = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(
# 'M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
out = cv2.VideoWriter(out_filename, fourcc, 30, (frame_width, frame_height))
while cap.isOpened():
ret, image = cap.read()
if not ret:
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = pose.process(image) #core
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Render detections
mp_drawing.draw_landmarks(
image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
out.write(image)
mp_drawing.plot_landmarks(
results.pose_world_landmarks, mp_pose.POSE_CONNECTIONS)
pose.close()
cap.release()
out.release()
Hello,
I would like to extract skeleton without skeleton+video.
I changed the code to using input video instead of original image in Mediapipe code.
and the result was success.
the result was the video with skeleton
plus I want to see only skeleton without video.
I tired to remove the video but i could not.
I appreciate to you if you give me any help!

Is there a way to have multiple rectangles displayed at the same time using visual.Rect in PsychoPy?

I'm trying to create stimuli that consist of 100 small lines in the centre of the screen, with orientations sampled from a Gaussian distribution (please see the image link below):
Orientation stimuli
I've managed to achieve something that almost fits the bill, but this code only works in isolation:
from psychopy import visual, core, event
import numpy as np
from numpy.random import random
import random
Lines = visual.Rect(
win=win, name='Lines',
width=(0.015, 0.0025)[0], height=(0.015, 0.0025)[1],
lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',
fillColor=[1,1,1], fillColorSpace='rgb',
opacity=1, depth=-2.0, interpolate=True)
lines_hpos = np.random.uniform(-0.49,0.49,100)
mu = 315
sigma = 15
for i in range(100):
Lines.pos = [lines_hpos[i],np.random.uniform(-0.49,0.49)]
Lines.ori = random.gauss(mu, sigma)
I've tried to manipulate this code so that I can integrate it into the full experiment I'm designing in PsychoPy's experiment builder. I run the below code in the experiment builder's coding window calling 'gdist' and 'loc' as values for the 'Orientation' and 'Position' of the rectangles, respectively:
import random
gdist =[]
loc = []
lines_hpos = np.random.uniform(-0.49,0.49,100)
mu = 90
sigma = 20
for i in range(100):
rloc = [lines_hpos[i],np.random.uniform(-0.49,0.49)]
loc.append(rloc)
gauss = random.gauss(mu, sigma)
gdist.append(gauss)
When I attempt to run the experiment, I get an error return and the experiment fails to start:
File "C:\Users\r02mj20\AppData\Local\PsychoPy3\lib\site-packages\psychopy\visual\image.py", line 238, in __del__
File "C:\Users\r02mj20\AppData\Local\PsychoPy3\lib\site-packages\pyglet\gl\lib.py", line 97, in errcheck
ImportError: sys.meta_path is None, Python is likely shutting down
I'm assuming this has something to do with pyglet not liking the idea of there being 100 rectangles all at once (side note: the script works fine if range(1)). If anyone has any suggestions for how I might fix or work around this problem, I'd be eternally grateful.
i don't see any problem with this idea, except you better use visual.Line instead of Rect, and your units of measure are not described; the key to preserving video memory is BufferImageStim, btw
from psychopy import visual, core, event, monitors
from psychopy.iohub.client import launchHubServer
import random
import numpy as np
MU = 315; SIGMA = 15
num_lines = 100
io = launchHubServer(iohub_config_name='iohub_config.yaml')
display = io.devices.display
mon = monitors.Monitor(name = display.getPsychopyMonitorName())
win = visual.Window([640, 480], units='pix', viewScale = 1.0,
monitor = mon, winType='pyglet',
fullScr = False, waitBlanking = True, useFBO = True, useLights = False,
allowStencil=False, allowGui = True,
screen = display.getIndex(), colorSpace = 'rgb255', color = [128,128,128],
name = 'my_win01')
rects = []
lines_hpos = np.random.uniform(-0.49, 0.49, num_lines)
for i in range(num_lines):
line_rect = visual.Rect(win=win, size=(0.001, 1.0), units='norm',
pos=(0,0), lineWidth=1, lineColor=[1,1,1], fillColor=[1,1,1], opacity=1, depth=-2.0,
name='lines_rect', interpolate=True, autoLog=False, autoDraw=False)
line_rect.pos = [lines_hpos[i], np.random.uniform(-0.49,0.49)]
line_rect.ori = random.gauss(MU, SIGMA)
rects.append(line_rect)
rect_buffer = visual.BufferImageStim(win, buffer='back', stim=rects, sqPower2=False, interpolate=False, name='rect-buffer', autoLog=True)
rect_buffer.draw()
win.flip()
event.waitKeys()

Trying to take pictures with Coral camera with Coral edgeTPU dev board but it is really slow

To start with, I am not a developer, but a mere automation engineer that have worked a bit with coding in Java, python, C#, C++ and C.
I am trying to make a prototype that take pictures and stores them using a digital pin on the board. Atm I can take pictures using a switch, but it is really slow(around 3 seconds pr image).
My complete system is going to be like this:
A product passes by on a conveyor and a photo cell triggers the board to take an image and store it. If an operator removes a product(because of bad quality) the image is stored in a different folder.
I started with the snapshot function shipped with Mendel and have tried to get rid off the overhead, but the Gstream and pipeline-stuff confuses me a lot.
If someone could help me with how to understand the supplied code, or how to write a minimalistic solution to take an image i would be grateful :)
I have tried to understand and use project-teachable and examples-camera from Google coral https://github.com/google-coral, but with no luck. I have had the best luck with the snapshot tool that uses snapshot.py that are referenced here https://coral.withgoogle.com/docs/camera/datasheet/#snapshot-tool
from periphery import GPIO
import time
import argparse
import contextlib
import fcntl
import os
import select
import sys
import termios
import threading
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
from functools import partial
from gi.repository import GLib, GObject, Gst, GstBase
from PIL import Image
GObject.threads_init()
Gst.init(None)
WIDTH = 2592
HEIGHT = 1944
FILENAME_PREFIX = 'img'
FILENAME_SUFFIX = '.png'
AF_SYSFS_NODE = '/sys/module/ov5645_camera_mipi_v2/parameters/ov5645_af'
CAMERA_INIT_QUERY_SYSFS_NODE = '/sys/module/ov5645_camera_mipi_v2/parameters/ov5645_initialized'
HDMI_SYSFS_NODE = '/sys/class/drm/card0/card0-HDMI-A-1/status'
# No of initial frames to throw away before camera has stabilized
SCRAP_FRAMES = 1
SRC_WIDTH = 2592
SRC_HEIGHT = 1944
SRC_RATE = '15/1'
SRC_ELEMENT = 'v4l2src'
SINK_WIDTH = 2592
SINK_HEIGHT = 1944
SINK_ELEMENT = ('appsink name=appsink sync=false emit-signals=true '
'max-buffers=1 drop=true')
SCREEN_SINK = 'glimagesink sync=false'
FAKE_SINK = 'fakesink sync=false'
SRC_CAPS = 'video/x-raw,format=YUY2,width={width},height={height},framerate={rate}'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
PIPELINE = '''
{src_element} ! {src_caps} ! {leaky_q} ! tee name=t
t. ! {leaky_q} ! {screen_sink}
t. ! {leaky_q} ! videoconvert ! {sink_caps} ! {sink_element}
'''
def on_bus_message(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
loop.quit()
return True
def on_new_sample(sink, snapinfo):
if not snapinfo.save_frame():
# Throw away the frame
return Gst.FlowReturn.OK
sample = sink.emit('pull-sample')
buf = sample.get_buffer()
result, mapinfo = buf.map(Gst.MapFlags.READ)
if result:
imgfile = snapinfo.get_filename()
caps = sample.get_caps()
width = WIDTH
height = HEIGHT
img = Image.frombytes('RGB', (width, height), mapinfo.data, 'raw')
img.save(imgfile)
img.close()
buf.unmap(mapinfo)
return Gst.FlowReturn.OK
def run_pipeline(snapinfo):
src_caps = SRC_CAPS.format(width=SRC_WIDTH, height=SRC_HEIGHT, rate=SRC_RATE)
sink_caps = SINK_CAPS.format(width=SINK_WIDTH, height=SINK_HEIGHT)
screen_sink = FAKE_SINK
pipeline = PIPELINE.format(
leaky_q=LEAKY_Q,
src_element=SRC_ELEMENT,
src_caps=src_caps,
sink_caps=sink_caps,
sink_element=SINK_ELEMENT,
screen_sink=screen_sink)
pipeline = Gst.parse_launch(pipeline)
appsink = pipeline.get_by_name('appsink')
appsink.connect('new-sample', partial(on_new_sample, snapinfo=snapinfo))
loop = GObject.MainLoop()
# Set up a pipeline bus watch to catch errors.
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', on_bus_message, loop)
# Connect the loop to the snaphelper
snapinfo.connect_loop(loop)
# Run pipeline.
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# Clean up.
pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
class SnapHelper:
def __init__(self, sysfs, prefix='img', oneshot=True, suffix='jpg'):
self.prefix = prefix
self.oneshot = oneshot
self.suffix = suffix
self.snap_it = oneshot
self.num = 0
self.scrapframes = SCRAP_FRAMES
self.sysfs = sysfs
def get_filename(self):
while True:
filename = self.prefix + str(self.num).zfill(4) + '.' + self.suffix
self.num = self.num + 1
if not os.path.exists(filename):
break
return filename
#def check_af(self):
#try:
# self.sysfs.seek(0)
# v = self.sysfs.read()
# if int(v) != 0x10:
# print('NO Focus')
#except:
# pass
# def refocus(self):
# try:#
# self.sysfs.write('1')
# self.sysfs.flush()
# except:
# pass
def save_frame(self):
# We always want to throw away the initial frames to let the
# camera stabilize. This seemed empirically to be the right number
# when running on desktop.
if self.scrapframes > 0:
self.scrapframes = self.scrapframes - 1
return False
if self.snap_it:
self.snap_it = False
retval = True
else:
retval = False
if self.oneshot:
self.loop.quit()
return retval
def connect_loop(self, loop):
self.loop = loop
def take_picture(snap):
start_time = int(round(time.time()))
run_pipeline(snap)
print(time.time()- start_time)
def main():
button = GPIO(138, "in")
last_state = False
with open(AF_SYSFS_NODE, 'w+') as sysfs:
snap = SnapHelper(sysfs, 'test', 'oneshot', 'jpg')
sysfs.write('2')
while 1:
button_state = button.read()
if(button_state==True and last_state == False):
snap = SnapHelper(sysfs, 'test', 'oneshot', 'jpg')
take_picture(snap)
last_state = button_state
if __name__== "__main__":
main()
sys.exit()
Output is what i expect, but it is slow.
I switched to a USB-webcam and used the pygame library instead.

how to make cross hair mouse tracker on a PlotWidget() promoted in designer-qt5

I am trying to make a cross hair on my pyqtgraph interactive plots, which are embedded in a PyQt5 GUI thanks to the designer-qt5. I found a working
code in the pyqtgraph "examples". A simplified WORKING example is posted below. Now I want the same, but the problem seems to be that I promoted a
QGraphicsView() to a pg.PlotWidget in the designer, instead of pg.GraphicsWindow()? The Code does not work for me because my p1 is "pyqtgraph.widgets.PlotWidget.PlotWidget object" while in the example p1 is
"pyqtgraph.graphicsItems.PlotItem.PlotItem.PlotItem object".
So what should I do to make this example work for me?
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.Point import Point
pg.setConfigOption('background', '#ffffff')
pg.setConfigOption('foreground', 'k')
pg.setConfigOptions(antialias=True)
app = QtGui.QApplication([])
win = pg.GraphicsWindow()
win.setWindowTitle('pyqtgraph example: crosshair')
label = pg.LabelItem(justify='right')
win.addItem(label)
p1 = win.addPlot(row=1, col=0)
p1.setAutoVisible(y=True)
#create numpy arrays
#make the numbers large to show that the xrange shows data from 10000 to all the way 0
data1 = 10000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000)
p1.plot(data1, pen="r")
#cross hair
vLine = pg.InfiniteLine(angle=90, movable=False)
hLine = pg.InfiniteLine(angle=0, movable=False)
p1.addItem(vLine, ignoreBounds=True)
p1.addItem(hLine, ignoreBounds=True)
vb = p1.vb
print(p1)
print(vb)
def mouseMoved(evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if p1.sceneBoundingRect().contains(pos):
mousePoint = vb.mapSceneToView(pos)
index = int(mousePoint.x())
if index > 0 and index < len(data1):
label.setText("<span style='font-size: 12pt'>x=%0.1f, <span style='color: green'>y2=%0.1f</span>" % (mousePoint.x(), data1[index]))
vLine.setPos(mousePoint.x())
hLine.setPos(mousePoint.y())
proxy = pg.SignalProxy(p1.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
#p1.scene().sigMouseMoved.connect(mouseMoved)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
I am very sorry for the noise!!! I fix it myself!
The important part was:
plot_wg.proxy = proxy
Very simple...
Below is the function, which should work for any PlotWidget:
def cross_hair(self, plot_wg, log=False ):
global fit
################### TETS cross hair ############3
vLine = pg.InfiniteLine(angle=90, movable=False)#, pos=0)
hLine = pg.InfiniteLine(angle=0, movable=False)#, pos=2450000)
plot_wg.addItem(vLine, ignoreBounds=True)
plot_wg.addItem(hLine, ignoreBounds=True)
vb = plot_wg.getViewBox()
label = pg.TextItem()
plot_wg.addItem(label)
def mouseMoved(evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if plot_wg.sceneBoundingRect().contains(pos):
mousePoint = vb.mapSceneToView(pos)
if log == True:
label.setText("x=%0.3f, y1=%0.3f"%(10**mousePoint.x(), mousePoint.y()))
else:
label.setText("x=%0.3f, y1=%0.3f"%(mousePoint.x(), mousePoint.y()))
vLine.setPos(mousePoint.x())
hLine.setPos(mousePoint.y())
#print(mousePoint.x(),mousePoint.y())
plot_wg.getViewBox().setAutoVisible(y=True)
proxy = pg.SignalProxy(plot_wg.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
plot_wg.proxy = proxy
proxy = pg.SignalProxy(plot_wg.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
plot_wg.proxy = proxy
################### TETS cross hair ############3

Why does OpenCV's Meanshift tracking algorithm only track object the first time?

I am running the meanshift tracking algorithm to track objects in a live stream(with webcam) in OpenCV however the algorithm only works the first time it is run and does not work when I run the program again unless I restart my computer. Why is this so?
Algorithm taken from: https://docs.opencv.org/trunk/db/df8/tutorial_py_meanshift.html
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()