How to stop download progression from creating new line. I just want it only in one line.
Here's the code
while (handle.status().state
!= lt.torrent_status.seeding):
s = handle.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating']
print ('%.2f%% ... (UP: %.1f kb/s DOWN: %.1f kB/s seed: %d) %s ' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, state_str[s.state]))
time.sleep(5)
end = time.time()
print(handle.name(), "COMPLETE")
print("Elapsed Time:
",int((end-begin)//60),"min :", int((end-begin)%60), "sec")
print(datetime.datetime.now())
Related
I am using rl-baselines-zoo 3 to run ddpg with my custom env on colab. After I used show video function in that zoo repo, it said it cannot connect to the server. It works fine on other built-in envs, so I guess it's my env problem. please, need some help...
I set every thing from zoo's tutorials
Traceback:
pybullet build time: Jul 12 2021 20:46:20
/usr/local/lib/python3.7/dist-packages/gym/logger.py:30: UserWarning:
WARN: Box bound precision lowered by casting to float32
startThreads creating 1 threads.
starting thread 0
started thread 0
argc=2
argv[0] = --unused
argv[1] = --start_demo_name=Physics Server
ExampleBrowserThreadFunc started
X11 functions dynamically loaded using dlopen/dlsym OK!
X11 functions dynamically loaded using dlopen/dlsym OK!
Creating context
Created GL 3.3 context
Direct GLX rendering context obtained
Making context current
GL_VENDOR=VMware, Inc.
GL_RENDERER=llvmpipe (LLVM 10.0.0, 256 bits)
GL_VERSION=3.3 (Core Profile) Mesa 20.0.8
GL_SHADING_LANGUAGE_VERSION=3.30
pthread_getconcurrency()=0
Version = 3.3 (Core Profile) Mesa 20.0.8
Vendor = VMware, Inc.
Renderer = llvmpipe (LLVM 10.0.0, 256 bits)
b3Printf: Selected demo: Physics Server
startThreads creating 1 threads.
starting thread 0
started thread 0
MotionThreadFunc thread started
ven = VMware, Inc.
ven = VMware, Inc.
Wrapping the env in a VecTransposeImage.
tcmalloc: large alloc 3276800000 bytes == 0x556b03bda000 # 0x7f7cad04a001 0x7f7caa3f554f 0x7f7caa445b58 0x7f7caa449b17 0x7f7caa4e8203 0x556a81194d54 0x556a81194a50 0x556a81209105 0x556a812037ad 0x556a81196c9f 0x556a811d7d79 0x556a811d4cc4 0x556a81196ea1 0x556a81205bb5 0x556a8119630a 0x556a812087f0 0x556a812037ad 0x556a811963ea 0x556a8120460e 0x556a812034ae 0x556a811963ea 0x556a8120532a 0x556a812034ae 0x556a812031b3 0x556a81201660 0x556a81194b59 0x556a81194a50 0x556a81208453 0x556a812034ae 0x556a811963ea 0x556a812043b5
tcmalloc: large alloc 3276800000 bytes == 0x556bc78da000 # 0x7f7cad04a001 0x7f7caa3f554f 0x7f7caa445b58 0x7f7caa449b17 0x7f7caa4e8203 0x556a81194d54 0x556a81194a50 0x556a81209105 0x556a812037ad 0x556a81196c9f 0x556a811d7d79 0x556a811d4cc4 0x556a81196ea1 0x556a81205bb5 0x556a8119630a 0x556a812087f0 0x556a812037ad 0x556a811963ea 0x556a8120460e 0x556a812034ae 0x556a811963ea 0x556a8120532a 0x556a812034ae 0x556a812031b3 0x556a81201660 0x556a81194b59 0x556a81194a50 0x556a81208453 0x556a812034ae 0x556a811963ea 0x556a812043b5
/content/gdrive/My Drive/hsr/rl-baselines3-zoo/logs/ddpg/FoodHuntingHSR-v0_3/videos/final-model-ddpg-FoodHuntingHSR-v0-step-0-to-step-200.mp4
/usr/local/lib/python3.7/dist-packages/gym/logger.py:30: UserWarning:
WARN: Tried to pass invalid video frame, marking as broken: Your frame has data type float32, but we require uint8 (i.e. RGB values from 0-255).
Saving video to /content/gdrive/My Drive/hsr/rl-baselines3-zoo/logs/ddpg/FoodHuntingHSR-v0_3/videos/final-model-ddpg-FoodHuntingHSR-v0-step-0-to-step-200.mp4
numActiveThreads = 0
stopping threads
destroy semaphore
semaphore destroyed
Thread with taskId 0 exiting
Thread TERMINATED
destroy main semaphore
main semaphore destroyed
finished
numActiveThreads = 0
btShutDownExampleBrowser stopping threads
Thread with taskId 0 exiting
Thread TERMINATED
destroy semaphore
semaphore destroyed
destroy main semaphore
main semaphore destroyed
Exception ignored in: <function VecVideoRecorder.__del__ at 0x7f7c2b5cc200>
Traceback (most recent call last):
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/vec_video_recorder.py", line 114, in __del__
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/vec_video_recorder.py", line 110, in close
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/base_vec_env.py", line 278, in close
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/vec_env/dummy_vec_env.py", line 67, in close
File "/content/gdrive/My Drive/hsr/stable-baselines3/stable_baselines3/common/monitor.py", line 113, in close
File "/usr/local/lib/python3.7/dist-packages/gym/core.py", line 243, in close
File "/usr/local/lib/python3.7/dist-packages/gym/core.py", line 243, in close
File "/content/gdrive/My Drive/hsr/PyLIS/gym-foodhunting/gym_foodhunting/foodhunting/gym_foodhunting.py", line 538, in close
pybullet.error: Not connected to physics server
class FoodHuntingEnv(gym.Env):
metadata = {'render.modes': ['human','rgb_array']}
GRAVITY = -10.0
BULLET_STEPS = 120 # p.setTimeStep(1.0 / 240.0), so 1 gym step == 0.5 sec.
def __init__(self, render=False, robot_model=R2D2, max_steps=500, num_foods=3, num_fakes=0, object_size=1.0, object_radius_scale=1.0, object_radius_offset=1.0, object_angle_scale=1.0):
"""Initialize environment.
"""
### gym variables
self.observation_space = robot_model.getObservationSpace() # classmethod
self.action_space = robot_model.getActionSpace() # classmethod
self.reward_range = (-1.0, 1.0)
self.seed()
### pybullet settings
self.ifrender = render
self.physicsClient = p.connect(p.GUI if self.ifrender else p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
### env variables
self.robot_model = robot_model
self.max_steps = max_steps
self.num_foods = num_foods
self.num_fakes = num_fakes
self.object_size = object_size
self.object_radius_scale = object_radius_scale
self.object_radius_offset = object_radius_offset
self.object_angle_scale = object_angle_scale
self.plane_id = None
self.robot = None
self.object_ids = []
### episode variables
self.steps = 0
self.episode_rewards = 0.0
def close(self):
"""Close environment.
"""
p.disconnect(self.physicsClient)
def reset(self):
"""Reset environment.
"""
self.steps = 0
self.episode_rewards = 0
p.resetSimulation()
# p.setTimeStep(1.0 / 240.0)
p.setGravity(0, 0, self.GRAVITY)
self.plane_id = p.loadURDF('plane.urdf')
self.robot = self.robot_model()
self.object_ids = []
for i, (pos, orn) in enumerate(self._generateObjectPositions(num=(self.num_foods+self.num_fakes), radius_scale=self.object_radius_scale, radius_offset=self.object_radius_offset, angle_scale=self.object_angle_scale)):
if i < self.num_foods:
urdfPath = 'food_sphere.urdf'
else:
urdfPath = 'food_cube.urdf'
object_id = p.loadURDF(urdfPath, pos, orn, globalScaling=self.object_size)
self.object_ids.append(object_id)
for i in range(self.BULLET_STEPS):
p.stepSimulation()
obs = self._getObservation()
#print('reset laile')
#self.robot.printAllJointInfo()
return obs
def step(self, action):
"""Apply action to environment, then return observation and reward.
"""
self.steps += 1
self.robot.setAction(action)
reward = -1.0 * float(self.num_foods) / float(self.max_steps) # so agent needs to eat foods quickly
for i in range(self.BULLET_STEPS):
p.stepSimulation()
reward += self._getReward()
self.episode_rewards += reward
obs = self._getObservation()
done = self._isDone()
pos, orn = self.robot.getPositionAndOrientation()
info = { 'steps': self.steps, 'pos': pos, 'orn': orn }
if done:
#print('Done laile')
info['episode'] = { 'r': self.episode_rewards, 'l': self.steps }
# print(self.episode_rewards, self.steps)
#print(self.robot.getBaseRollPosition(), self.robot.getTorsoLiftPosition(), self.robot.getHeadPosition(), self.robot.getArmPosition(), self.robot.getWristPosition(), self.robot.getGripperPosition()) # for HSR debug
#print(self.robot.getHeadPosition(), self.robot.getGripperPosition()) # for R2D2 debug
return obs, reward, done, info
def render(self, mode='human', close=False):
"""This is a dummy function. This environment cannot control rendering timing.
"""
if mode != 'rgb_array':
return np.array([])
return self._getObservation()
def seed(self, seed=None):
"""Set random seed.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _getReward(self):
"""Detect contact points and return reward.
"""
reward = 0
contacted_object_ids = [ object_id for object_id in self.object_ids if self.robot.isContact(object_id) ]
for object_id in contacted_object_ids:
reward += 1 if self._isFood(object_id) else -1
p.removeBody(object_id)
self.object_ids.remove(object_id)
return reward
def _getObservation(self):
"""Get observation.
"""
obs = self.robot.getObservation()
return obs
def _isFood(self, object_id):
"""Check if object_id is a food.
"""
baseLink, urdfPath = p.getBodyInfo(object_id)
return urdfPath == b'food_sphere.urdf' # otherwise, fake
def _isDone(self):
"""Check if episode is done.
"""
#print(self.object_ids,'self')
available_object_ids = [ object_id for object_id in self.object_ids if self._isFood(object_id) ]
#print(available_object_ids)
return self.steps >= self.max_steps or len(available_object_ids) <= 0
def _generateObjectPositions(self, num=1, retry=100, radius_scale=1.0, radius_offset=1.0, angle_scale=1.0, angle_offset=0.5*np.pi, z=1.5, near_distance=0.5):
"""Generate food positions randomly.
"""
def genPos():
r = radius_scale * self.np_random.rand() + radius_offset
a = -np.pi * angle_scale + angle_offset
b = np.pi * angle_scale + angle_offset
ang = (b - a) * self.np_random.rand() + a
return np.array([r * np.sin(ang), r * np.cos(ang), z])
def isNear(pos, poss):
for p, o in poss:
if np.linalg.norm(p - pos) < near_distance:
return True
return False
def genPosRetry(poss):
for i in range(retry):
pos = genPos()
if not isNear(pos, poss):
return pos
return genPos()
poss = []
for i in range(num):
pos = genPosRetry(poss)
orn = p.getQuaternionFromEuler([0.0, 0.0, 2.0*np.pi*self.np_random.rand()])
poss.append((pos, orn))
return poss
Background
I'm using source code from Tensorflow's object detection, as well as Firebase's MLInterpreter. I'm trying to stick closely to the prescribed steps in the documentation. During training, I can see on TensorBoard that the models is training properly, but somehow I am not exporting and wiring things up correctly for inference. Here are the details:
Commands I used, from training through .tflite file
First, I submit the training job using a ssd_mobilenet_v1 config file. The config file is more or less the same that Tensorflow provides by default - I have only modified the class count and the bucket name.
gcloud ml-engine jobs submit training `whoami`_<JOB_NAME>_`date +%m_%d_%Y_%H_%M_%S` \
--runtime-version 1.12 \
--job-dir=gs://<BUCKET_NAME>/model_dir \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \
--module-name object_detection.model_main \
--region us-central1 \
--config object_detection/samples/cloud/cloud.yml \
-- \
--model_dir=gs://<BUCKET_NAME>/model_dir \
--pipeline_config_path=gs://<BUCKET_NAME>/data/ssd_mobilenet_v1.config
Then I export the tflite_graph.pb file:
python models/research/object_detection/export_tflite_ssd_graph.py \
--input_type image_tensor \
--pipeline_config_path ssd_mobilenet_v1.config \
--trained_checkpoint_prefix model.ckpt-264012 \
--output_directory exported_tflite
Great, at this point I have tflite_graph.pb, and need to get from there to the actual .tflite file:
tflite_convert \
--output_file=model.tflite \
--graph_def_file=exported_tflite/tflite_graph.pb \
--input_arrays=normalized_input_image_tensor \
--output_arrays=TFLite_Detection_PostProcess \
--input_shapes=1,300,300,3 \
--allow_custom_ops
Performing inference with Swift and Firebase
I'd like to eventually use AVFoundation to capture images from the camera, but to make this more readable I'll post just the relevant parts of the code:
Here's where the model is initialized and ioOptions are set. I found a comment at the top of export_tflite_ssd_graph (used above) that I used to determine the ioOptions, but I continue to be unconvinced that I configured those properly:
guard let modelPath = Bundle.main.path(forResource: "model", ofType: "tflite") else {
self.interpreter = nil;
super.init()
return;
}
let localModel = CustomLocalModel(modelPath: modelPath)
self.interpreter = ModelInterpreter.modelInterpreter(localModel: localModel)
do {
try self.ioOptions.setInputFormat(index: 0, type: .float32, dimensions: [1, 300, 300, 3])
try self.ioOptions.setOutputFormat(index: 0, type: .float32, dimensions: [1, 10, 4])
try self.ioOptions.setOutputFormat(index: 1, type: .float32, dimensions: [1, 10])
try self.ioOptions.setOutputFormat(index: 2, type: .float32, dimensions: [1, 10])
try self.ioOptions.setOutputFormat(index: 3, type: .float32, dimensions: [1])
} catch let error as NSError {
print("Failed to set input or output format with error: \(error.localizedDescription)")
}
After setting things up, I use the following lines to perform inference later on. Basically, I convert the databuffer to CGImage, do some resizing, and then repack the RGB values into a buffer that I can pass to the model for inference:
# Draw the image in a context
guard let context = CGContext(
data: nil,
width: image.width, height: image.height,
bitsPerComponent: 8, bytesPerRow: image.width * 4,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue
) else {
return;
}
context.draw(image, in: CGRect(x: 0, y: 0, width: image.width, height: image.height))
guard let imageData = context.data else { return; }
# "image" is now a CGImage
let inputs = ModelInputs()
var inputData = Data()
do {
for row in 0 ..< 300 {
for col in 0 ..< 300 {
let offset = 4 * (col * context.width + row)
// (Ignore offset 0, the unused alpha channel)
let red = imageData.load(fromByteOffset: offset+1, as: UInt8.self)
let green = imageData.load(fromByteOffset: offset+2, as: UInt8.self)
let blue = imageData.load(fromByteOffset: offset+3, as: UInt8.self)
var normalizedRed = Float32(red) / 255.0
var normalizedGreen = Float(green) / 255.0
var normalizedBlue = Float(blue) / 255.0
// Append normalized values to Data object in RGB order.
let elementSize = MemoryLayout.size(ofValue: normalizedRed)
var bytes = [UInt8](repeating: 0, count: elementSize)
memcpy(&bytes, &normalizedRed, elementSize)
inputData.append(&bytes, count: elementSize)
memcpy(&bytes, &normalizedGreen, elementSize)
inputData.append(&bytes, count: elementSize)
memcpy(&bytes, &normalizedBlue, elementSize)
inputData.append(&bytes, count: elementSize)
}
}
try inputs.addInput(inputData)
} catch let error {
print("Failed to add input: \(error)")
}
guard let interpret = self.interpreter else { return; }
print("Running interpreter")
interpret.run(inputs: inputs, options: self.ioOptions) { outputs, error in
guard error == nil, let outputs = outputs else { return; }
do {
try print(outputs.output(index: 1))
try print(outputs.output(index: 2))
...
} catch let error {
print(error)
}
}
Problem / Question
I actually get an output finally, after a few hours of trying to get the data into a format that doesn't throw errors.
The problem is, the output probabilities are really low and the classes are almost never correct. I know that my model has better accuracy than this, and am feeling like I've done something wrong between getting the checkpoint files and actually running inference on the .tflite file.
Can anybody who has worked with object detection see where I may have gone off course?
I am using scrapy along with scrapyrt to create APIs. My application have around 20 spiders. We are using NFS server for load balancing on production. Unfortunately application using 40% and more space.
"stats": {
"downloader/request_bytes": 12033,
"downloader/request_count": 5,
"downloader/request_method_count/GET": 4,
"downloader/request_method_count/POST": 1,
"downloader/response_bytes": 20165,
"downloader/response_count": 5,
"downloader/response_status_count/200": 3,
"downloader/response_status_count/302": 1,
"downloader/response_status_count/404": 1,
"finish_reason": "finished",
"finish_time": "2019-05-23 06:05:04",
"item_scraped_count": 1,
"log_count/DEBUG": 35,
"log_count/INFO": 20,
"memusage/max": 3399057408,
"memusage/startup": 3399057408,
"request_depth_max": 2,
"response_received_count": 4,
"scheduler/dequeued": 4,
"scheduler/dequeued/memory": 4,
"scheduler/enqueued": 4,
"scheduler/enqueued/memory": 4,
"start_time": "2019-05-23 06:05:01"
}
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
14500 root 20 0 4999116 3.190g 7184 S 0.3 40.9 103:34.01 scrapyrt
I followed scrapy memory leak documentation so remove meta attribute from request, but still memory is increasing.
class GarudaRetrieveBookingSpider(Spider):
"""get the pax, flight and fare details"""
name = "garuda_retrieve_booking"
meta = dict()
formdata = dict()
start_url = ''
booking_code = ''
output_dict = {'schedule_detail': [], 'pax_details': [], 'reservation_name': '', 'fare_details': {}}
pax_count = 0
adult_count = 0
child_count = 0
infant_count = 0
ticket_list_adt_child = []
ticket_list_inf = []
# this variable is created to save rt command response data to pass in next call if there are no tickets
rt_response = ''
def start_requests(self):
"""
:return: Request object
"""
post_data = self.data
garuda_session_id = post_data['parameter']['jSessionId']
post_data["command"] = "IG"
file_path = os.path.join(GARUDA_SESSION_FILES_PATH, TODAY_DATE, garuda_session_id + "_session.txt")
session_data = get_cookies(self, file_path)
self.start_url = GARUDA_KEEP_ALIVE_URL.format(session_id=session_data["jSessionId"], site=SITE, lang=LANGUAGE)
self.meta = {"session_data": session_data, "post_data": post_data}
return [Request(self.start_url, self.parse, errback=self.errback_httpbin)]
def parse(self, response):
"""
:param response:
:return: FormRequest
description: submit IG command
"""
self.log("\n\nparse response: {}\n\n".format(response.text))
if response.status != 200:
error_message = 'parse method failed.'
return {"status": False, "error_message": error_message}
session_data = self.meta["session_data"]
command = self.meta["post_data"]["command"]
# override the command with current command
session_data["tasks"][0]["command"]["command"] = command
self.formdata = {
"data": json.dumps(session_data)
}
yield scrapy.FormRequest(CRYTO_COMMAND_URL, formdata=self.formdata,
callback=self.ig_command_response, errback=self.errback_httpbin)
Need to write some code for work that telnets into a device, runs a command and exits telnet
I Keep getting error
file "python", line 54
tn.close()
^
SyntaxError: unexpected EOF while parsing
What am I doing wrong, new to coding
Cheers
import time
import telnetlib
tn_username = "xxxx"
tn_password = "xxxxxxx"
#Globals:
CACHE_DATA = {}
SNIPPET_NAME = 'xxxxxx: xxxxxx'
FAILED_COUNT = 0
COLLECTION_PROBLEM = False
TELNET_PORT = 23
TELNET_TIMEOUT = 2
FAILED_ITEMS = []
self.logger.ui_debug('************** %s: Starting *******************' % (SNIPPET_NAME))
#start timer
start_time = time.time()
try:
#connect to telnet
tn = telnetlib.Telnet
tn.read_until("login: ")
tn.write(tn_username + "\n")
tn.read_until("Password: ")
tn.write(tn_password + "\n")
for obj_oid in result_handler.oids:
##obj_name = result_handler[obj_oid]['name']
try:
#run oid as CLI call from result_handler
tn.write(obj_oid+"\r")
rawdata = tn.read_until("Welcome to the Tesira Text Protocol Server...", TELNET_TIMEOUT)
if rawdata:
result_handler[obj_oid] = [(0,"Collection Ok")]
CACHE_DATA[obj_oid] = rawdata.strip()
else:
FAILED_COUNT += 1
result_handler[obj_oid] = [(0,"Failed: No data found")]
FAILED_ITEMS.append(obj_oid)
except:
FAILED_ITEMS.append(obj_oid)
result_handler[obj_oid] = [(0,'Failed: Collection: %s' % obj_oid)]
FAILED_COUNT +=1
#save job time for perf graph
CACHE_DATA['biamp'] = round(time.time() - start_time,4)
#gracefully quit the telnet session so as to not leave any defunct processes on the host device.
tn.write("bye\r")
tn.close()
I'm attempting to use Conda Accelerate to speedup some data preprocessing, but initial benchmarks indicate either I'm not using it correctly or it has no effect on FFT & linear algebra execution times in numpy and librosa. Re-reading the literature - does this mean I'm supposed to decorate and recode every ndarray operation as in the batch-matmul example for NumbaPro? I'd assumed I simply installed and it made numpy faster, but this doesn't appear to be the case.
Benchmarks and code are below. I've installed accelerate via conda install accelerate and also imported it for good measure.
Thanks!
Result - negligible difference before and after conda install accelerate
Total time was 25.356
Total load time was 1.6743
Total math time was 22.1599
Total save time was 1.5139
Total stft math time was 12.9219
Total other numpy math time was 9.1886
Relevant code:
loads, maths, saves = [], [], []
stfts, nps = [], []
# now we have a dict of all source files grouped by voice
for i in range(30):
v0_fn = v0_list[i]
v1_fn = v1_list[i]
tl0 = time.time()
# Process v0 & v1 file
v0_fn = signal_dir+v0_fn
v0, fs_s = librosa.load(v0_fn, sr=None)
v1_fn = signal_dir+v1_fn
v1, fs_s = librosa.load(v1_fn, sr=None)
tl1 = time.time()
loads.append((tl1-tl0))
mix = v0 + v1
# Capture the magnitude and phase of signal and signal + noise
tm0 = time.time()
v0_stft = librosa.stft(v0, int(frame_size*fs), int(step_size*fs)).transpose()
tm1 = time.time()
v0_mag = (v0_stft.real**2 + v0_stft.imag**2)**0.5
v0_pha = np.arctan2(v0_stft.imag, v0_stft.real)
v0_rtheta = np.stack((v0_mag, v0_pha), axis=0)
tm2 = time.time()
v1_stft = librosa.stft(v1, int(frame_size*fs), int(step_size*fs)).transpose()
tm3 = time.time()
v1_mag = (v1_stft.real**2 + v1_stft.imag**2)**0.5
v1_pha = np.arctan2(v1_stft.imag, v1_stft.real)
v1_rtheta = np.stack((v1_mag, v1_pha), axis=0)
tm4 = time.time()
mix_stft = librosa.stft(mix, int(frame_size*fs), int(step_size*fs)).transpose()
tm5 = time.time()
mix_mag = (mix_stft.real**2 + mix_stft.imag**2)**0.5
mix_pha = np.arctan2(mix_stft.imag, mix_stft.real)
mix_rtheta = np.stack((mix_mag, mix_pha), axis=0)
tm6 = time.time()
stfts += [tm1-tm0, tm3-tm2, tm5-tm4]
nps += [tm2-tm1, tm4-tm3, tm6-tm5]
data['sig_rtheta'] = v0_rtheta
data['noi_rtheta'] = v1_rtheta
data['mix_rtheta'] = mix_rtheta
tl2 = time.time()
maths.append(tl2-tl1)
with open(write_name, 'w') as f:
cPickle.dump(all_info, f, protocol=-1)
tl3 = time.time()
saves.append(tl3-tl2)
t1 = time.time()
print 'Total time was %.3f' % (t1-t0)
print 'Total load time was %.4f' % np.sum(loads)
print 'Total math time was %.4f' % np.sum(maths)
print 'Total save time was %.4f' % np.sum(saves)
print 'Total stft math was %.4f' % np.sum(stfts)
print 'Total other numpy math time was %.4f' % np.sum(nps)