Posting volume down/up events using Core Graphics [duplicate] - objective-c

Is there a way to emulate key presses of the media keys (volume up/down, play, pause, prev, next) on common Apple notebooks?
How?

That took some time and many hacks (trying around with ctypes, the IOKit native interface, Quartz and/or Cocoa). This seems like an easy solution now:
#!/usr/bin/python
import Quartz
# NSEvent.h
NSSystemDefined = 14
# hidsystem/ev_keymap.h
NX_KEYTYPE_SOUND_UP = 0
NX_KEYTYPE_SOUND_DOWN = 1
NX_KEYTYPE_PLAY = 16
NX_KEYTYPE_NEXT = 17
NX_KEYTYPE_PREVIOUS = 18
NX_KEYTYPE_FAST = 19
NX_KEYTYPE_REWIND = 20
def HIDPostAuxKey(key):
def doKey(down):
ev = Quartz.NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0,0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
cev = ev.CGEvent()
Quartz.CGEventPost(0, cev)
doKey(True)
doKey(False)
for _ in range(10):
HIDPostAuxKey(NX_KEYTYPE_SOUND_UP)
HIDPostAuxKey(NX_KEYTYPE_PLAY)
(While I needed this in Python for now, my question was not really Python related and of course you can easily translate that to any other language, esp. ObjC.)

Swift 5 / MacOS 10.14.4 / Xcode 10.2
#IBAction func mediaPressed(_ sender: AnyObject) {
let NX_KEYTYPE_SOUND_UP: UInt32 = 0
let NX_KEYTYPE_SOUND_DOWN: UInt32 = 1
let NX_KEYTYPE_PLAY: UInt32 = 16
let NX_KEYTYPE_NEXT: UInt32 = 17
let NX_KEYTYPE_PREVIOUS: UInt32 = 18
let NX_KEYTYPE_FAST: UInt32 = 19
let NX_KEYTYPE_REWIND: UInt32 = 20
func HIDPostAuxKey(key: UInt32) {
func doKey(down: Bool) {
let flags = NSEvent.ModifierFlags(rawValue: (down ? 0xa00 : 0xb00))
let data1 = Int((key<<16) | (down ? 0xa00 : 0xb00))
let ev = NSEvent.otherEvent(with: NSEvent.EventType.systemDefined,
location: NSPoint(x:0,y:0),
modifierFlags: flags,
timestamp: 0,
windowNumber: 0,
context: nil,
subtype: 8,
data1: data1,
data2: -1
)
let cev = ev?.cgEvent
cev?.post(tap: CGEventTapLocation.cghidEventTap)
}
doKey(down: true)
doKey(down: false)
}
for _ in 1...10 {
HIDPostAuxKey(key:NX_KEYTYPE_SOUND_UP)
}
HIDPostAuxKey(key:NX_KEYTYPE_PLAY)
}

Thank you Albert for that! I expanded on your script a bit to make it an executable that could in turn be called by Quicksilver or another launcher/trigger handler.
#!/usr/bin/python
# CLI program to control the mediakeys on OS X. Used to emulate the mediakey on a keyboard with no such keys.
# Easiest used in combination with a launcher/trigger software such as Quicksilver.
# Main part taken from http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
# Glue to make it into cli program by Fredrik Wallner http://www.wallner.nu/fredrik/
import Quartz
import sys
# NSEvent.h
NSSystemDefined = 14
# hidsystem/ev_keymap.h
NX_KEYTYPE_SOUND_UP = 0
NX_KEYTYPE_SOUND_DOWN = 1
NX_KEYTYPE_PLAY = 16
NX_KEYTYPE_NEXT = 17
NX_KEYTYPE_PREVIOUS = 18
NX_KEYTYPE_FAST = 19
NX_KEYTYPE_REWIND = 20
supportedcmds = {'playpause': NX_KEYTYPE_PLAY, 'next': NX_KEYTYPE_NEXT, 'prev': NX_KEYTYPE_PREVIOUS, 'volup': NX_KEYTYPE_SOUND_UP, 'voldown': NX_KEYTYPE_SOUND_DOWN}
def HIDPostAuxKey(key):
def doKey(down):
ev = Quartz.NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0,0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
cev = ev.CGEvent()
Quartz.CGEventPost(0, cev)
doKey(True)
doKey(False)
if __name__ == "__main__":
try:
command = sys.argv[1]
assert(command in supportedcmds)
HIDPostAuxKey(supportedcmds[command])
except (IndexError, AssertionError):
print "Usage: %s command" % (sys.argv[0],)
print "\tSupported commands are %s" % supportedcmds.keys()
The script can be found at https://gist.github.com/4078034

Related

I am not able to render 2D images of 3D point cloud

I am trying to render 2D images of point clouds from different viewpoints and save them as images.
I found a code online which does the same thing but for meshes. I tweaked it a little bit to import the 3D point cloud. But the code does not work and gives back black images. Please help me with this. I am open to use another library too if you know the solution. I just want to render the 2D images. Thank You
Code:
import os.path
import math
import sys
C = bpy.context
D = bpy.data
scene = D.scenes['Scene']
# cameras: a list of camera positions
# a camera position is defined by two parameters: (theta, phi),
# where we fix the "r" of (r, theta, phi) in spherical coordinate system.
# 5 orientations: front, right, back, left, top
cameras = [
(60, 0), (60, 90), (60, 180), (60, 270),
(0, 0)
]
# 12 orientations around the object with 30-deg elevation
# cameras = [(60, i) for i in range(0, 360, 30)]
render_setting = scene.render
# output image size = (W, H)
w = 500
h = 500
render_setting.resolution_x = w
render_setting.resolution_y = h
def main():
argv = sys.argv
argv = argv[argv.index('--') + 1:]
if len(argv) != 2:
print('phong.py args: <3d mesh path> <image dir>')
exit(-1)
model = argv[0]
image_dir = argv[1]
# blender has no native support for off files
# install_off_addon()
# init_camera()
fix_camera_to_origin()
do_model(model, image_dir)
def install_off_addon():
try:
# bpy.ops.preferences.addon_install(
# overwrite=False,
# filepath=os.path.dirname(__file__) +
# '/blender-off-addon/import_off.py'
# )
bpy.ops.preferences.addon_enable(module='import_off')
except Exception as e:
print(e)
print("""Import blender-off-addon failed.
Did you pull the blender-off-addon submodule?
$ git submodule update --recursive --remote
""")
exit(-1)
def init_camera():
cam = D.objects['Camera']
# select the camera object
scene.objects.active = cam
cam.select = True
# set the rendering mode to orthogonal and scale
C.object.data.type = 'ORTHO'
C.object.data.ortho_scale = 2.
def fix_camera_to_origin():
origin_name = 'Origin'
# create origin
try:
origin = D.objects[origin_name]
except KeyError:
bpy.ops.object.empty_add(type='SPHERE')
D.objects['Empty'].name = origin_name
origin = D.objects[origin_name]
origin.location = (0, 0, 0)
cam = D.objects['Camera']
# scene.objects.active = cam
# cam.select = True
if 'Track To' not in cam.constraints:
bpy.ops.object.constraint_add(type='TRACK_TO')
cam.constraints['Track To'].target = origin
cam.constraints['Track To'].track_axis = 'TRACK_NEGATIVE_Z'
cam.constraints['Track To'].up_axis = 'UP_Y'
def do_model(path, image_dir):
name = load_model(path)
center_model(name)
normalize_model(name)
image_subdir = os.path.join(image_dir, name)
for i, c in enumerate(cameras):
move_camera(c)
render()
save(image_subdir, '%s.%d' % (name, i))
# delete_model(name)
def load_model(path):
d = os.path.dirname(path)
ext = path.split('.')[-1]
name = os.path.basename(path).split('.')[0]
# handle weird object naming by Blender for stl files
if ext == 'stl':
name = name.title().replace('_', ' ')
if name not in D.objects:
print('loading :' + name)
if ext == 'stl':
bpy.ops.import_mesh.stl(filepath=path, directory=d,
filter_glob='*.stl')
elif ext == 'off':
bpy.ops.import_mesh.off(filepath=path, filter_glob='*.off')
elif ext == 'obj':
bpy.ops.import_scene.obj(filepath=path, filter_glob='*.obj')
else:
bpy.ops.import_mesh.ply(filepath=path, filter_glob='*.ply')
return name
def delete_model(name):
for ob in scene.objects:
if ob.type == 'MESH' and ob.name.startswith(name):
ob.select = True
else:
ob.select = False
bpy.ops.object.delete()
def center_model(name):
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
D.objects[name].location = (0, 0, 0)
def normalize_model(name):
obj = D.objects[name]
dim = obj.dimensions
print('original dim:' + str(dim))
if max(dim) > 0:
dim = dim / max(dim)
obj.dimensions = dim
print('new dim:' + str(dim))
def move_camera(coord):
def deg2rad(deg):
return deg * math.pi / 180.
r = 3.
theta, phi = deg2rad(coord[0]), deg2rad(coord[1])
loc_x = r * math.sin(theta) * math.cos(phi)
loc_y = r * math.sin(theta) * math.sin(phi)
loc_z = r * math.cos(theta)
D.objects['Camera'].location = (loc_x, loc_y, loc_z)
def render():
bpy.ops.render.render()
def save(image_dir, name):
path = os.path.join(image_dir, name + '.png')
D.images['Render Result'].save_render(filepath=path)
print('save to ' + path)
if __name__ == '__main__':
main()

Pybullet on colab, cannot connect X server

I am using rl-baselines-zoo 3 to run ddpg with my custom env on colab. After I used show video function in that zoo repo, it said it cannot connect to the server. It works fine on other built-in envs, so I guess it's my env problem. please, need some help...
I set every thing from zoo's tutorials
Traceback:
pybullet build time: Jul 12 2021 20:46:20
/usr/local/lib/python3.7/dist-packages/gym/logger.py:30: UserWarning:
WARN: Box bound precision lowered by casting to float32
startThreads creating 1 threads.
starting thread 0
started thread 0
argc=2
argv[0] = --unused
argv[1] = --start_demo_name=Physics Server
ExampleBrowserThreadFunc started
X11 functions dynamically loaded using dlopen/dlsym OK!
X11 functions dynamically loaded using dlopen/dlsym OK!
Creating context
Created GL 3.3 context
Direct GLX rendering context obtained
Making context current
GL_VENDOR=VMware, Inc.
GL_RENDERER=llvmpipe (LLVM 10.0.0, 256 bits)
GL_VERSION=3.3 (Core Profile) Mesa 20.0.8
GL_SHADING_LANGUAGE_VERSION=3.30
pthread_getconcurrency()=0
Version = 3.3 (Core Profile) Mesa 20.0.8
Vendor = VMware, Inc.
Renderer = llvmpipe (LLVM 10.0.0, 256 bits)
b3Printf: Selected demo: Physics Server
startThreads creating 1 threads.
starting thread 0
started thread 0
MotionThreadFunc thread started
ven = VMware, Inc.
ven = VMware, Inc.
Wrapping the env in a VecTransposeImage.
tcmalloc: large alloc 3276800000 bytes == 0x556b03bda000 # 0x7f7cad04a001 0x7f7caa3f554f 0x7f7caa445b58 0x7f7caa449b17 0x7f7caa4e8203 0x556a81194d54 0x556a81194a50 0x556a81209105 0x556a812037ad 0x556a81196c9f 0x556a811d7d79 0x556a811d4cc4 0x556a81196ea1 0x556a81205bb5 0x556a8119630a 0x556a812087f0 0x556a812037ad 0x556a811963ea 0x556a8120460e 0x556a812034ae 0x556a811963ea 0x556a8120532a 0x556a812034ae 0x556a812031b3 0x556a81201660 0x556a81194b59 0x556a81194a50 0x556a81208453 0x556a812034ae 0x556a811963ea 0x556a812043b5
tcmalloc: large alloc 3276800000 bytes == 0x556bc78da000 # 0x7f7cad04a001 0x7f7caa3f554f 0x7f7caa445b58 0x7f7caa449b17 0x7f7caa4e8203 0x556a81194d54 0x556a81194a50 0x556a81209105 0x556a812037ad 0x556a81196c9f 0x556a811d7d79 0x556a811d4cc4 0x556a81196ea1 0x556a81205bb5 0x556a8119630a 0x556a812087f0 0x556a812037ad 0x556a811963ea 0x556a8120460e 0x556a812034ae 0x556a811963ea 0x556a8120532a 0x556a812034ae 0x556a812031b3 0x556a81201660 0x556a81194b59 0x556a81194a50 0x556a81208453 0x556a812034ae 0x556a811963ea 0x556a812043b5
/content/gdrive/My Drive/hsr/rl-baselines3-zoo/logs/ddpg/FoodHuntingHSR-v0_3/videos/final-model-ddpg-FoodHuntingHSR-v0-step-0-to-step-200.mp4
/usr/local/lib/python3.7/dist-packages/gym/logger.py:30: UserWarning:
WARN: Tried to pass invalid video frame, marking as broken: Your frame has data type float32, but we require uint8 (i.e. RGB values from 0-255).
Saving video to /content/gdrive/My Drive/hsr/rl-baselines3-zoo/logs/ddpg/FoodHuntingHSR-v0_3/videos/final-model-ddpg-FoodHuntingHSR-v0-step-0-to-step-200.mp4
numActiveThreads = 0
stopping threads
destroy semaphore
semaphore destroyed
Thread with taskId 0 exiting
Thread TERMINATED
destroy main semaphore
main semaphore destroyed
finished
numActiveThreads = 0
btShutDownExampleBrowser stopping threads
Thread with taskId 0 exiting
Thread TERMINATED
destroy semaphore
semaphore destroyed
destroy main semaphore
main semaphore destroyed
Exception ignored in: <function VecVideoRecorder.__del__ at 0x7f7c2b5cc200>
Traceback (most recent call last):
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/vec_video_recorder.py", line 114, in __del__
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/vec_video_recorder.py", line 110, in close
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/base_vec_env.py", line 278, in close
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/dummy_vec_env.py", line 67, in close
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/monitor.py", line 113, in close
File "/usr/local/lib/python3.7/dist-packages/gym/core.py", line 243, in close
File "/usr/local/lib/python3.7/dist-packages/gym/core.py", line 243, in close
File "/content/gdrive/My Drive/hsr/PyLIS/gym-foodhunting/gym_foodhunting/foodhunting/gym_foodhunting.py", line 538, in close
pybullet.error: Not connected to physics server
class FoodHuntingEnv(gym.Env):
metadata = {'render.modes': ['human','rgb_array']}
GRAVITY = -10.0
BULLET_STEPS = 120 # p.setTimeStep(1.0 / 240.0), so 1 gym step == 0.5 sec.
def __init__(self, render=False, robot_model=R2D2, max_steps=500, num_foods=3, num_fakes=0, object_size=1.0, object_radius_scale=1.0, object_radius_offset=1.0, object_angle_scale=1.0):
"""Initialize environment.
"""
### gym variables
self.observation_space = robot_model.getObservationSpace() # classmethod
self.action_space = robot_model.getActionSpace() # classmethod
self.reward_range = (-1.0, 1.0)
self.seed()
### pybullet settings
self.ifrender = render
self.physicsClient = p.connect(p.GUI if self.ifrender else p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
### env variables
self.robot_model = robot_model
self.max_steps = max_steps
self.num_foods = num_foods
self.num_fakes = num_fakes
self.object_size = object_size
self.object_radius_scale = object_radius_scale
self.object_radius_offset = object_radius_offset
self.object_angle_scale = object_angle_scale
self.plane_id = None
self.robot = None
self.object_ids = []
### episode variables
self.steps = 0
self.episode_rewards = 0.0
def close(self):
"""Close environment.
"""
p.disconnect(self.physicsClient)
def reset(self):
"""Reset environment.
"""
self.steps = 0
self.episode_rewards = 0
p.resetSimulation()
# p.setTimeStep(1.0 / 240.0)
p.setGravity(0, 0, self.GRAVITY)
self.plane_id = p.loadURDF('plane.urdf')
self.robot = self.robot_model()
self.object_ids = []
for i, (pos, orn) in enumerate(self._generateObjectPositions(num=(self.num_foods+self.num_fakes), radius_scale=self.object_radius_scale, radius_offset=self.object_radius_offset, angle_scale=self.object_angle_scale)):
if i < self.num_foods:
urdfPath = 'food_sphere.urdf'
else:
urdfPath = 'food_cube.urdf'
object_id = p.loadURDF(urdfPath, pos, orn, globalScaling=self.object_size)
self.object_ids.append(object_id)
for i in range(self.BULLET_STEPS):
p.stepSimulation()
obs = self._getObservation()
#print('reset laile')
#self.robot.printAllJointInfo()
return obs
def step(self, action):
"""Apply action to environment, then return observation and reward.
"""
self.steps += 1
self.robot.setAction(action)
reward = -1.0 * float(self.num_foods) / float(self.max_steps) # so agent needs to eat foods quickly
for i in range(self.BULLET_STEPS):
p.stepSimulation()
reward += self._getReward()
self.episode_rewards += reward
obs = self._getObservation()
done = self._isDone()
pos, orn = self.robot.getPositionAndOrientation()
info = { 'steps': self.steps, 'pos': pos, 'orn': orn }
if done:
#print('Done laile')
info['episode'] = { 'r': self.episode_rewards, 'l': self.steps }
# print(self.episode_rewards, self.steps)
#print(self.robot.getBaseRollPosition(), self.robot.getTorsoLiftPosition(), self.robot.getHeadPosition(), self.robot.getArmPosition(), self.robot.getWristPosition(), self.robot.getGripperPosition()) # for HSR debug
#print(self.robot.getHeadPosition(), self.robot.getGripperPosition()) # for R2D2 debug
return obs, reward, done, info
def render(self, mode='human', close=False):
"""This is a dummy function. This environment cannot control rendering timing.
"""
if mode != 'rgb_array':
return np.array([])
return self._getObservation()
def seed(self, seed=None):
"""Set random seed.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _getReward(self):
"""Detect contact points and return reward.
"""
reward = 0
contacted_object_ids = [ object_id for object_id in self.object_ids if self.robot.isContact(object_id) ]
for object_id in contacted_object_ids:
reward += 1 if self._isFood(object_id) else -1
p.removeBody(object_id)
self.object_ids.remove(object_id)
return reward
def _getObservation(self):
"""Get observation.
"""
obs = self.robot.getObservation()
return obs
def _isFood(self, object_id):
"""Check if object_id is a food.
"""
baseLink, urdfPath = p.getBodyInfo(object_id)
return urdfPath == b'food_sphere.urdf' # otherwise, fake
def _isDone(self):
"""Check if episode is done.
"""
#print(self.object_ids,'self')
available_object_ids = [ object_id for object_id in self.object_ids if self._isFood(object_id) ]
#print(available_object_ids)
return self.steps >= self.max_steps or len(available_object_ids) <= 0
def _generateObjectPositions(self, num=1, retry=100, radius_scale=1.0, radius_offset=1.0, angle_scale=1.0, angle_offset=0.5*np.pi, z=1.5, near_distance=0.5):
"""Generate food positions randomly.
"""
def genPos():
r = radius_scale * self.np_random.rand() + radius_offset
a = -np.pi * angle_scale + angle_offset
b = np.pi * angle_scale + angle_offset
ang = (b - a) * self.np_random.rand() + a
return np.array([r * np.sin(ang), r * np.cos(ang), z])
def isNear(pos, poss):
for p, o in poss:
if np.linalg.norm(p - pos) < near_distance:
return True
return False
def genPosRetry(poss):
for i in range(retry):
pos = genPos()
if not isNear(pos, poss):
return pos
return genPos()
poss = []
for i in range(num):
pos = genPosRetry(poss)
orn = p.getQuaternionFromEuler([0.0, 0.0, 2.0*np.pi*self.np_random.rand()])
poss.append((pos, orn))
return poss

Make USB device visible with different vendor and product ID

I'm looking for a way to make a USB device show up as if it has different vendor and product IDs. I'm trying to make a proprietary piece of software to work with a USB device that should be supported but gets rejected solely because of its ID.
The software is for Windows, but I can run it in a VM in Linux. So I'll be fine with either approach, whatever works:
Changing USB ID in Linux
Changing USB ID in Windows
Making Qemu (or perhaps some other equivalent) change USB ID in passthrough
There may be a simpler way to do this, but I was faced with a similar problem and was able to create a process in which I could change the device descriptor information for development purposes. The process is summarized in this diagram:
First configure a static IP address for you Raspberry Pi and configure your PC ethernet TCP/IPv4 settings so you are able to communicate with you Raspberry Pi over the LAN connection.
Download the Virtual Here Raspberry Pi server and the client software for your PC from the Virtual Here website. The trial version will work for this use case.
Move the Virtual Here server software to you Raspberry Pi. In order to run the USB server you need to change the privileges of the file with $ sudo chmod +x vhusbdarm then run with $ sudo ./vhusbdarm.
Run the client software on your local machine. You will see that the client detects the USB device on the USB device server at <Your Raspberry Pi IP address>:7575. Connecting to the device at this point will give no advantage and mimic a direct connection.
Run the python file bellow, which was modified from a solution I found here, but utilizes Scapy sniff to capture the incoming packets before forwarding the raw data. The original script in the linked solution should work fine as well. In the script you can see that I used port 12345.
#!/usr/bin/env python
from scapy.all import *
import socket
import threading
import select
from queue import Queue
main_queue = Queue()
terminateAll = False
class ClientThread(threading.Thread):
def __init__(self, clientSocket, targetHost, targetPort):
threading.Thread.__init__(self)
self.__clientSocket = clientSocket
self.__targetHost = targetHost
self.__targetPort = targetPort
def run(self):
print("Client Thread started")
self.__clientSocket.setblocking(0)
targetHostSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
targetHostSocket.connect((self.__targetHost, self.__targetPort))
targetHostSocket.setblocking(0)
clientData = b""
targetHostData = b""
terminate = False
while not terminate and not terminateAll:
inputs = [self.__clientSocket, targetHostSocket]
outputs = []
if len(clientData) > 0:
outputs.append(self.__clientSocket)
if len(targetHostData) > 0:
outputs.append(targetHostSocket)
try:
inputsReady, outputsReady, errorsReady = select.select(inputs, outputs, [], 1.0)
except Exception as e:
print(e)
break
for inp in inputsReady:
if inp == self.__clientSocket:
try:
data = self.__clientSocket.recv(4096)
#print(data)
#data = b""
#while not main_queue.empty():
# data += main_queue.get()
except Exception as e:
print(e)
if data != None:
if len(data) > 0:
targetHostData += data
#else:
# terminate = True
elif inp == targetHostSocket:
try:
data = b""
while not main_queue.empty():
data += main_queue.get()
except Exception as e:
print(e)
if data != None:
if len(data) > 0:
clientData += data
for out in outputsReady:
if out == self.__clientSocket and len(clientData) > 0:
#pck = Ether(clientData)
#pck.show()
bytesWritten = self.__clientSocket.send(clientData)
if bytesWritten > 0:
clientData = clientData[bytesWritten:]
elif out == targetHostSocket and len(targetHostData) > 0:
#pck = Ether(targetHostData)
#pck.show()
bytesWritten = targetHostSocket.send(targetHostData)
if bytesWritten > 0:
targetHostData = targetHostData[bytesWritten:]
self.__clientSocket.close()
targetHostSocket.close()
print ("ClientThread terminating")
def handle_sniff(pck):
if IP in pck:
if pck[IP].src == "192.168.1.48":
if Raw in pck:
payload = pck[Raw].load
if b'\x12\x01\x00\x01\x00\x00\x00\x08$\x07\x04\x00\x88#\x01\x02\x00\x01' in payload:
payload = payload.replace(b'\x00\x08$\x07\x04\x00\x88#\x01\x02\x00', b'\x00\x08$\x07\x04\x00\x88\x15\x01\x02\x00')
print(payload)
main_queue.put(payload)
if __name__ == '__main__':
localHost = "localhost"
localPort = 12345
targetHost = "192.168.1.12"
targetPort = 7575
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind((localHost, localPort))
serverSocket.listen(5)
print("Waiting for client...")
while True:
try:
clientSocket, address = serverSocket.accept()
except KeyboardInterrupt:
print("\nTerminating...")
terminateAll = True
break
print("starting client")
ClientThread(clientSocket, targetHost, targetPort).start()
sniff(iface="Ethernet", prn=lambda pck: handle_sniff(pck))
serverSocket.close()
Once the script is running, configure the Virtual Here client to listen for USB servers at localhost:12345. The handle_sniff function is where the USB device descriptor information is being changed. Once connected you should be able to double click on the USB device in the dropdown tree. You will see the USB data begin to be printed in the your python console.
In the above example I changed the device bcdDevice bytes of the USB Descriptor.
Another helpful script I used to identify the packet that contained the information I was targeting is below. I modified a script I found in this solution. It is modified to print the raw data along with the unpacked device descriptor information, which can then be searched for in the TCP raw data to identify which bytes to replace.
#!/usr/bin/env python
from __future__ import print_function
import argparse
import string
import struct
import sys
import win32api
import win32file
import pywintypes
BUFF=b""
def CTL_CODE(DeviceType, Function, Method, Access):
return (DeviceType << 16) | (Access << 14) | (Function << 2) | Method
def USB_CTL(id):
# CTL_CODE(FILE_DEVICE_USB, (id), METHOD_BUFFERED, FILE_ANY_ACCESS)
return CTL_CODE(0x22, id, 0, 0)
IOCTL_USB_GET_ROOT_HUB_NAME = USB_CTL(258) # HCD_GET_ROOT_HUB_NAME
IOCTL_USB_GET_NODE_INFORMATION = USB_CTL(258) # USB_GET_NODE_INFORMATION
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION = USB_CTL(259) # USB_GET_NODE_CONNECTION_INFORMATION
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME = USB_CTL(264) # USB_GET_NODE_CONNECTION_DRIVERKEY_NAME
IOCTL_USB_GET_NODE_CONNECTION_NAME = USB_CTL(261) # USB_GET_NODE_CONNECTION_NAME
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION = USB_CTL(260) # USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION
USB_CONFIGURATION_DESCRIPTOR_TYPE = 2
USB_STRING_DESCRIPTOR_TYPE = 3
USB_INTERFACE_DESCRIPTOR_TYPE = 4
MAXIMUM_USB_STRING_LENGTH = 255
def open_dev(name):
try:
handle = win32file.CreateFile(name,
win32file.GENERIC_WRITE,
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
except pywintypes.error as e:
return None
return handle
def get_root_hub_name(handle):
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_ROOT_HUB_NAME,
None,
6,
None)
act_len, _ = struct.unpack('LH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_ROOT_HUB_NAME,
None,
act_len,
None)
return buf[4:].decode('utf-16le')
def get_driverkey_name(handle, index):
key_name = bytes(chr(index) + '\0'*9, 'utf-8')
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME,
key_name,
10,
None)
except pywintypes.error as e:
print(e.strerror, index)
sys.exit(1)
_, act_len, _ = struct.unpack('LLH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME,
key_name,
act_len,
None)
return buf[8:].decode('utf-16le')
def get_ext_hub_name(handle, index):
hub_name = chr(index) + '\0'*9
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_NAME,
hub_name,
10,
None)
_, act_len, _ = struct.unpack('LLH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_NAME,
hub_name,
act_len,
None)
return buf[8:].decode('utf-16le')
def get_str_desc(handle, conn_idx, str_idx):
req = struct.pack('LBBHHH',
conn_idx,
0,
0,
(USB_STRING_DESCRIPTOR_TYPE<<8) | str_idx,
win32api.GetSystemDefaultLangID(),
MAXIMUM_USB_STRING_LENGTH)
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION,
req,
12+MAXIMUM_USB_STRING_LENGTH,
None)
except pywintypes.error as e:
return 'ERROR: no String Descriptor for index {}'.format(str_idx)
if len(buf) > 16:
return buf[14:].decode('utf-16le')
return ''
def exam_hub(name, verbose, level):
handle = open_dev(r'\\.\{}'.format(name))
if not handle:
print('Failed to open device {}'.format(name))
return
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_INFORMATION,
None,
76,
None)
print_hub_ports(handle, ord(buf[6]), verbose, level)
handle.close()
def print_str_or_hex(to_be_print):
if all(c in string.printable for c in to_be_print):
print('"{}"'.format(to_be_print))
return
print('Hex: ', end='')
for x in to_be_print:
print('{:02x} '.format(ord(x)), end='')
print('')
def print_hub_ports(handle, num_ports, verbose, level):
print(handle, num_ports, verbose, level)
for idx in range(1, num_ports+1):
info = bytes(chr(idx) + '\0'*34, 'utf-8')
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION,
info,
34 + 11*30,
None)
except pywintypes.error as e:
print(e)
print(e.winerror, e.funcname, e.strerror)
return
print(buf)
_, vid, pid, vers, manu, prod, seri, _, ishub, _, stat = struct.unpack('=12sHHHBBB3s?6sL', buf[:35])
if ishub:
if verbose:
print('{} [Port{}] {}'.format(' '*level, idx, 'USB Hub'))
exam_hub(get_ext_hub_name(handle, idx), verbose, level)
elif stat == 0 and verbose:
print('{} [Port{}] {}'.format(' '*level, idx, 'NoDeviceConnected'))
elif stat == 1:
if verbose or (manu != 0 or prod != 0 or seri != 0):
print('{} [Port{}] {}'.format(' '*level, idx, get_driverkey_name(handle, idx)))
print('{} Vendor ID: 0x{:04X}'.format(' '*level, vid))
print('{} Product ID: 0x{:04X}'.format(' '*level, pid))
print('{} Device BCD: 0x{:04X}'.format(' '*level, vers))
print(vers)
if manu != 0:
print('{} Manufacturer (0x{:x}) -> '.format(' '*level, manu), end='')
print_str_or_hex(get_str_desc(handle, idx, manu))
if prod != 0:
print('{} Product (0x{:x}) -> '.format(' '*level, prod), end='')
print_str_or_hex(get_str_desc(handle, idx, prod))
if seri != 0:
print('{} Serial No (0x{:x}) -> '.format(' '*level, seri), end='')
print_str_or_hex(get_str_desc(handle, idx, seri))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true',
help="Increase output verbosity.")
args = parser.parse_args()
for i in range(10):
name = r"\\.\HCD{}".format(i)
handle = open_dev(name)
if not handle:
continue
root = get_root_hub_name(handle)
print('{}RootHub: {}'.format('\n' if i != 0 else '', root))
dev_name = r'\\.\{}'.format(root)
dev_handle = open_dev(dev_name)
if not dev_handle:
print('Failed to open device {}'.format(dev_name))
continue
buf = win32file.DeviceIoControl(dev_handle,
IOCTL_USB_GET_NODE_INFORMATION,
None,
76,
None)
global BUFF
BUFF=buf
print_hub_ports(dev_handle, buf[6], args.verbose, 0)
dev_handle.close()
handle.close()
if __name__ == '__main__':
main()
P.S. This is also really helpful for filtering and modifying any of the USB data being transferred not just the device descriptor.

Trying to take pictures with Coral camera with Coral edgeTPU dev board but it is really slow

To start with, I am not a developer, but a mere automation engineer that have worked a bit with coding in Java, python, C#, C++ and C.
I am trying to make a prototype that take pictures and stores them using a digital pin on the board. Atm I can take pictures using a switch, but it is really slow(around 3 seconds pr image).
My complete system is going to be like this:
A product passes by on a conveyor and a photo cell triggers the board to take an image and store it. If an operator removes a product(because of bad quality) the image is stored in a different folder.
I started with the snapshot function shipped with Mendel and have tried to get rid off the overhead, but the Gstream and pipeline-stuff confuses me a lot.
If someone could help me with how to understand the supplied code, or how to write a minimalistic solution to take an image i would be grateful :)
I have tried to understand and use project-teachable and examples-camera from Google coral https://github.com/google-coral, but with no luck. I have had the best luck with the snapshot tool that uses snapshot.py that are referenced here https://coral.withgoogle.com/docs/camera/datasheet/#snapshot-tool
from periphery import GPIO
import time
import argparse
import contextlib
import fcntl
import os
import select
import sys
import termios
import threading
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
from functools import partial
from gi.repository import GLib, GObject, Gst, GstBase
from PIL import Image
GObject.threads_init()
Gst.init(None)
WIDTH = 2592
HEIGHT = 1944
FILENAME_PREFIX = 'img'
FILENAME_SUFFIX = '.png'
AF_SYSFS_NODE = '/sys/module/ov5645_camera_mipi_v2/parameters/ov5645_af'
CAMERA_INIT_QUERY_SYSFS_NODE = '/sys/module/ov5645_camera_mipi_v2/parameters/ov5645_initialized'
HDMI_SYSFS_NODE = '/sys/class/drm/card0/card0-HDMI-A-1/status'
# No of initial frames to throw away before camera has stabilized
SCRAP_FRAMES = 1
SRC_WIDTH = 2592
SRC_HEIGHT = 1944
SRC_RATE = '15/1'
SRC_ELEMENT = 'v4l2src'
SINK_WIDTH = 2592
SINK_HEIGHT = 1944
SINK_ELEMENT = ('appsink name=appsink sync=false emit-signals=true '
'max-buffers=1 drop=true')
SCREEN_SINK = 'glimagesink sync=false'
FAKE_SINK = 'fakesink sync=false'
SRC_CAPS = 'video/x-raw,format=YUY2,width={width},height={height},framerate={rate}'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
PIPELINE = '''
{src_element} ! {src_caps} ! {leaky_q} ! tee name=t
t. ! {leaky_q} ! {screen_sink}
t. ! {leaky_q} ! videoconvert ! {sink_caps} ! {sink_element}
'''
def on_bus_message(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
loop.quit()
return True
def on_new_sample(sink, snapinfo):
if not snapinfo.save_frame():
# Throw away the frame
return Gst.FlowReturn.OK
sample = sink.emit('pull-sample')
buf = sample.get_buffer()
result, mapinfo = buf.map(Gst.MapFlags.READ)
if result:
imgfile = snapinfo.get_filename()
caps = sample.get_caps()
width = WIDTH
height = HEIGHT
img = Image.frombytes('RGB', (width, height), mapinfo.data, 'raw')
img.save(imgfile)
img.close()
buf.unmap(mapinfo)
return Gst.FlowReturn.OK
def run_pipeline(snapinfo):
src_caps = SRC_CAPS.format(width=SRC_WIDTH, height=SRC_HEIGHT, rate=SRC_RATE)
sink_caps = SINK_CAPS.format(width=SINK_WIDTH, height=SINK_HEIGHT)
screen_sink = FAKE_SINK
pipeline = PIPELINE.format(
leaky_q=LEAKY_Q,
src_element=SRC_ELEMENT,
src_caps=src_caps,
sink_caps=sink_caps,
sink_element=SINK_ELEMENT,
screen_sink=screen_sink)
pipeline = Gst.parse_launch(pipeline)
appsink = pipeline.get_by_name('appsink')
appsink.connect('new-sample', partial(on_new_sample, snapinfo=snapinfo))
loop = GObject.MainLoop()
# Set up a pipeline bus watch to catch errors.
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', on_bus_message, loop)
# Connect the loop to the snaphelper
snapinfo.connect_loop(loop)
# Run pipeline.
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# Clean up.
pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
class SnapHelper:
def __init__(self, sysfs, prefix='img', oneshot=True, suffix='jpg'):
self.prefix = prefix
self.oneshot = oneshot
self.suffix = suffix
self.snap_it = oneshot
self.num = 0
self.scrapframes = SCRAP_FRAMES
self.sysfs = sysfs
def get_filename(self):
while True:
filename = self.prefix + str(self.num).zfill(4) + '.' + self.suffix
self.num = self.num + 1
if not os.path.exists(filename):
break
return filename
#def check_af(self):
#try:
# self.sysfs.seek(0)
# v = self.sysfs.read()
# if int(v) != 0x10:
# print('NO Focus')
#except:
# pass
# def refocus(self):
# try:#
# self.sysfs.write('1')
# self.sysfs.flush()
# except:
# pass
def save_frame(self):
# We always want to throw away the initial frames to let the
# camera stabilize. This seemed empirically to be the right number
# when running on desktop.
if self.scrapframes > 0:
self.scrapframes = self.scrapframes - 1
return False
if self.snap_it:
self.snap_it = False
retval = True
else:
retval = False
if self.oneshot:
self.loop.quit()
return retval
def connect_loop(self, loop):
self.loop = loop
def take_picture(snap):
start_time = int(round(time.time()))
run_pipeline(snap)
print(time.time()- start_time)
def main():
button = GPIO(138, "in")
last_state = False
with open(AF_SYSFS_NODE, 'w+') as sysfs:
snap = SnapHelper(sysfs, 'test', 'oneshot', 'jpg')
sysfs.write('2')
while 1:
button_state = button.read()
if(button_state==True and last_state == False):
snap = SnapHelper(sysfs, 'test', 'oneshot', 'jpg')
take_picture(snap)
last_state = button_state
if __name__== "__main__":
main()
sys.exit()
Output is what i expect, but it is slow.
I switched to a USB-webcam and used the pygame library instead.

Can this PyGame code run 60fps for >40 critters?

In my other question, some of the posters asked to see the code and suggested I make a new question. As requested, here is most of the code I'm using. I've removed the Vector class, simply because it's a lot of code. It's well-understood math that I got from someone else (https://gist.github.com/mcleonard/5351452), and cProfile didn't have much to say about any of the functions there. I've provided a link in the code, if you want to make this run-able.
This code should run, if you paste the the vector class where indicated in the code.
The problem is, once I get above 20 critters, the framerate drops rapidly from 60fps to 11fps around 50 critters.
Please excuse the spaghetti-code. Much of this is diagnostic kludging or pre-code that I intend to either remove, or turn into a behavior (instead of a hard-coded value).
This app is basically composed of 4 objects.
A Vector object provides abstracted vector operations.
A Heat Block is able to track it's own "heat" level, increase it and decrease it. It can also draw itself.
A Heat Map is composed of heat blocks which are tiled across the screen. When given coordinates, it can choose the block that those coordinates fall within.
A Critter has many features that make it able to wander around the screen, bump off of the walls and other critters, choose a new random direction, and die.
The main loop iterates through each critter in the "flock" and updates its "condition" (whether or not it's "dying"), its location, its orientation, and the heat block on which it is currently standing. The loop also iterates over each heat block to let it "cool down."
Then the main loop asks the heat map to draw itself, and then each critter in the flock to draw itself.
import pygame
from pygame import gfxdraw
import pygame.locals
import os
import math
import random
import time
(I got a nice vector class from someone else. It's large, and mostly likely not the problem.)
(INSERT CONTENTS OF VECTOR.PY FROM https://gist.github.com/mcleonard/5351452 HERE)
pygame.init()
#some global constants
BLUE = (0, 0, 255)
WHITE = (255,255,255)
diagnostic = False
SPAWN_TIME = 1 #number of seconds between creating new critters
FLOCK_LIMIT = 30 #number of critters at which the flock begins being culled
GUIDs = [0] #list of guaranteed unique IDs for identifying each critter
# Set the position of the OS window
position = (30, 30)
os.environ['SDL_VIDEO_WINDOW_POS'] = str(position[0]) + "," + str(position[1])
# Set the position, width and height of the screen [width, height]
size_x = 1000
size_y = 500
size = (size_x, size_y)
FRAMERATE = 60
SECS_FOR_DYING = 1
screen = pygame.display.set_mode(size)
screen.set_alpha(None)
pygame.display.set_caption("My Game")
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
def random_float(lower, upper):
num = random.randint(lower*1000, upper*1000)
return num/1000
def new_GUID():
num = GUIDs[-1]
num = num + 1
while num in GUIDs:
num += 1
GUIDs.append(num)
return num
class HeatBlock:
def __init__(self,_tlx,_tly,h,w):
self.tlx = int(_tlx)
self.tly = int(_tly)
self.height = int(h)+1
self.width = int(w)
self.heat = 255.0
self.registered = False
def register_tresspasser(self):
self.registered = True
self.heat = max(self.heat - 1, 0)
def cool_down(self):
if not self.registered:
self.heat = min(self.heat + 0.1, 255)
self.registered = False
def hb_draw_self(self):
screen.fill((255,int(self.heat),int(self.heat)), [self.tlx, self.tly, self.width, self.height])
class HeatMap:
def __init__(self, _h, _v):
self.h_freq = _h #horizontal frequency
self.h_rez = size_x/self.h_freq #horizontal resolution
self.v_freq = _v #vertical frequency
self.v_rez = size_y/self.v_freq #vertical resolution
self.blocks = []
def make_map(self):
h_size = size_x/self.h_freq
v_size = size_y/self.v_freq
for h_count in range(0, self.h_freq):
TLx = h_count * h_size #TopLeft corner, x
col = []
for v_count in range(0, self.v_freq):
TLy = v_count * v_size #TopLeft corner, y
col.append(HeatBlock(TLx,TLy,v_size,h_size))
self.blocks.append(col)
def hm_draw_self(self):
for col in self.blocks:
for block in col:
block.cool_down()
block.hb_draw_self()
def register(self, x, y):
#convert the given coordinates of the trespasser into a col/row block index
col = max(int(math.floor(x / self.h_rez)),0)
row = max(int(math.floor(y / self.v_rez)),0)
self.blocks[col][row].register_tresspasser()
class Critter:
def __init__(self):
self.color = (random.randint(1, 200), random.randint(1, 200), random.randint(1, 200))
self.linear_speed = random_float(20, 100)
self.radius = int(round(10 * (100/self.linear_speed)))
self.angular_speed = random_float(0.1, 2)
self.x = int(random.randint(self.radius*2, size_x - (self.radius*2)))
self.y = int(random.randint(self.radius*2, size_y - (self.radius*2)))
self.orientation = Vector(0, 1).rotate(random.randint(-180, 180))
self.sensor = Vector(0, 20)
self.sensor_length = 20
self.new_orientation = self.orientation
self.draw_bounds = False
self.GUID = new_GUID()
self.condition = 0 #0 = alive, [1-fps] = dying, >fps = dead
self.delete_me = False
def c_draw_self(self):
#if we're alive and not dying, draw our normal self
if self.condition == 0:
#diagnostic
if self.draw_bounds:
pygame.gfxdraw.rectangle(screen, [int(self.x), int(self.y), 1, 1], BLUE)
temp = self.orientation * (self.linear_speed * 20)
pygame.gfxdraw.line(screen, int(self.x), int(self.y), int(self.x + temp[0]), int(self.y + temp[1]), BLUE)
#if there's a new orientation, match it gradually
temp = self.new_orientation * self.linear_speed
#draw my body
pygame.gfxdraw.aacircle(screen, int(self.x), int(self.y), self.radius, self.color)
#draw a line indicating my new direction
pygame.gfxdraw.line(screen, int(self.x), int(self.y), int(self.x + temp[0]), int(self.y + temp[1]), BLUE)
#draw my sensor (a line pointing forward)
self.sensor = self.orientation.normalize() * self.sensor_length
pygame.gfxdraw.line(screen, int(self.x), int(self.y), int(self.x + self.sensor[0]), int(self.y + self.sensor[1]), BLUE)
#otherwise we're dying, draw our dying animation
elif 1 <= self.condition <= FRAMERATE*SECS_FOR_DYING:
#draw some lines in a spinningi circle
for num in range(0,10):
line = Vector(0, 1).rotate((num*(360/10))+(self.condition*23))
line = line*self.radius
pygame.gfxdraw.line(screen, int(self.x), int(self.y), int(self.x+line[0]), int(self.y+line[1]), self.color)
def print_self(self):
#diagnostic
print("==============")
print("radius:", self.radius)
print("color:", self.color)
print("linear_speed:", self.linear_speed)
print("angular_speed:", self.angular_speed)
print("x:", self.x)
print("y:", int(self.y))
print("orientation:", self.orientation)
def avoid_others(self, _flock):
for _critter in _flock:
#if the critter isn't ME...
if _critter.GUID is not self.GUID and _critter.condition == 0:
#and it's touching me...
if self.x - _critter.x <= self.radius + _critter.radius:
me = Vector(self.x, int(self.y))
other_guy = Vector(_critter.x, _critter.y)
distance = me - other_guy
#give me new orientation that's away from the other guy
if distance.norm() <= ((self.radius) + (_critter.radius)):
new_direction = me - other_guy
self.orientation = self.new_orientation = new_direction.normalize()
def update_location(self, elapsed):
boundary = '?'
while boundary != 'X':
boundary = self.out_of_bounds()
if boundary == 'N':
self.orientation = self.new_orientation = Vector(0, 1).rotate(random.randint(-20, 20))
self.y = (self.radius) + 2
elif boundary == 'S':
self.orientation = self.new_orientation = Vector(0,-1).rotate(random.randint(-20, 20))
self.y = (size_y - (self.radius)) - 2
elif boundary == 'E':
self.orientation = self.new_orientation = Vector(-1,0).rotate(random.randint(-20, 20))
self.x = (size_x - (self.radius)) - 2
elif boundary == 'W':
self.orientation = self.new_orientation = Vector(1,0).rotate(random.randint(-20, 20))
self.x = (self.radius) + 2
point = Vector(self.x, self.y)
self.x, self.y = (point + (self.orientation * (self.linear_speed*(elapsed/1000))))
boundary = self.out_of_bounds()
def update_orientation(self, elapsed):
#randomly choose a new direction, from time to time
if random.randint(0, 100) > 98:
self.choose_new_orientation()
difference = self.orientation.argument() - self.new_orientation.argument()
self.orientation = self.orientation.rotate((difference * (self.angular_speed*(elapsed/1000))))
def still_alive(self, elapsed):
return_value = True #I am still alive
if self.condition == 0:
return_value = True
elif self.condition <= FRAMERATE*SECS_FOR_DYING:
self.condition = self.condition + (elapsed/17)
return_value = True
if self.condition > FRAMERATE*SECS_FOR_DYING:
return_value = False
return return_value
def choose_new_orientation(self):
if self.new_orientation:
if (self.orientation.argument() - self.new_orientation.argument()) < 5:
rotation = random.randint(-300, 300)
self.new_orientation = self.orientation.rotate(rotation)
def out_of_bounds(self):
if self.x >= (size_x - (self.radius)):
return 'E'
elif self.y >= (size_y - (self.radius)):
return 'S'
elif self.x <= (0 + (self.radius)):
return 'W'
elif self.y <= (0 + (self.radius)):
return 'N'
else:
return 'X'
# -------- Main Program Loop -----------
# generate critters
flock = [Critter()]
# generate heat map
heatMap = HeatMap(60, 40)
heatMap.make_map()
# set some settings
last_spawn = time.clock()
run_time = time.perf_counter()
frame_count = 0
max_time = 0
ms_elapsed = 1
avg_fps = [1]
# Loop until the user clicks the close button.
done = False
while not done:
# --- Main event loop only processes one event
frame_count = frame_count + 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# --- Game logic should go here
#check if it's time to make another critter
if time.clock() - last_spawn > SPAWN_TIME:
flock.append(Critter())
last_spawn = time.clock()
if len(flock) >= FLOCK_LIMIT:
#if we're over the flock limit, cull the herd
counter = FLOCK_LIMIT
for critter in flock[0:len(flock)-FLOCK_LIMIT]:
#this code allows a critter to be "dying" for a while, to play an animation
if critter.condition == 0:
critter.condition = 1
elif not critter.still_alive(ms_elapsed):
critter.delete_me = True
counter = 0
#delete all the critters that have finished dying
while counter < len(flock):
if flock[counter].delete_me:
del flock[counter]
else:
counter = counter+1
#----loop on all critters once, doing all functions for each critter
for critter in flock:
if critter.condition == 0:
critter.avoid_others(flock)
if critter.condition == 0:
heatMap.register(critter.x, critter.y)
critter.update_location(ms_elapsed)
critter.update_orientation(ms_elapsed)
if diagnostic:
critter.print_self()
#----alternately, loop for each function. Speed seems to be similar either way
#for critter in flock:
# if critter.condition == 0:
# critter.update_location(ms_elapsed)
#for critter in flock:
# if critter.condition == 0:
# critter.update_orientation(ms_elapsed)
# --- Screen-clearing code goes here
# Here, we clear the screen to white. Don't put other drawing commands
screen.fill(WHITE)
# --- Drawing code should go here
#draw the heat_map
heatMap.hm_draw_self()
for critter in flock:
critter.c_draw_self()
#draw the framerate
myfont = pygame.font.SysFont("monospace", 15)
#average the framerate over 60 frames
temp = sum(avg_fps)/float(len(avg_fps))
text = str(round(((1/temp)*1000),0))+"FPS | "+str(len(flock))+" Critters"
label = myfont.render(text, 1, (0, 0, 0))
screen.blit(label, (5, 5))
# --- Go ahead and update the screen with what we've drawn.
pygame.display.update()
# --- Limit to 60 frames per second
#only run for 30 seconds
if time.perf_counter()-run_time >= 30:
done = True
#limit to 60fps
#add this frame's time to the list
avg_fps.append(ms_elapsed)
#remove any old frames
while len(avg_fps) > 60:
del avg_fps[0]
ms_elapsed = clock.tick(FRAMERATE)
#track longest frame
if ms_elapsed > max_time:
max_time = ms_elapsed
#print some stats once the program is finished
print("Count:", frame_count)
print("Max time since last flip:", str(max_time)+"ms")
print("Total Time:", str(int(time.perf_counter()-run_time))+"s")
print("Average time for a flip:", str(int(((time.perf_counter()-run_time)/frame_count)*1000))+"ms")
# Close the window and quit.
pygame.quit()
One thing you can already do to improve the performance is to use pygame.math.Vector2 instead of your Vector class, because it's implemented in C and therefore faster. Before I switched to pygame's vector class, I could have ~50 critters on the screen before the frame rate dropped below 60, and after the change up to ~100.
pygame.math.Vector2 doesn't have that argument method, so you need to extract it from the class and turn it into a function:
def argument(vec):
""" Returns the argument of the vector, the angle clockwise from +y."""
arg_in_rad = math.acos(Vector(0,1)*vec/vec.length())
arg_in_deg = math.degrees(arg_in_rad)
if vec.x < 0:
return 360 - arg_in_deg
else:
return arg_in_deg
And change .norm() to .length() everywhere in the program.
Also, define the font object (myfont) before the while loop. That's only a minor improvement, but every frame counts.
Another change that made a significant improvement was to streamline my collision-detection algorithm.
Formerly, I had been looping through every critter in the flock, and measuring the distance between it and every other critter in the flock. If that distance was small enough, I do something. That's n^2 checks, which is not awesome.
I'd thought about using a quadtree, but it didn't seem efficient to rebalance the whole tree every frame, because it will change every time a critter moves.
Well, I finally actually tried it, and it turns out that building a brand-new quadtree at the beginning of each frame is actually plenty fast. Once I have the tree, I pass it to the avoidance function where I just extract an intersection of any of the critters in that tree within a bounding box I care about. Then I just iterate on those neighbors to measure distances and update directions and whatnot.
Now I'm up to 150 or so critters before I start dropping frames (up from 40).
So the moral of the story is, trust evidence instead of intuition.